query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
sequencelengths
30
30
negative_scores
sequencelengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Computes the embedding for given wide_ftrs_sp_idx and wide_ftrs_sp_val. A SparseTensor is created using wide_ftrs_sp_idx and wide_ftrs_sp_val and then multiplied with weights to obtain an embedding for each document
def _compute_embedding_score_per_record(self, wide_ftrs_idx_with_value_per_record): # Split idx and val back wide_ftrs_sp_idx, wide_ftrs_sp_val = tf.split(wide_ftrs_idx_with_value_per_record, 2, axis=-1) wide_ftrs_sp_idx = tf.cast(wide_ftrs_sp_idx, dtype=tf.int64) # Transformation shape = tf.ones(shape=[tf.shape(wide_ftrs_sp_idx)[0], 1], dtype=tf.int64) * self._num_wide_sp valid_wide_ftrs_idx_mask = tf.cast(tf.not_equal(wide_ftrs_sp_idx, self._padding_idx), tf.float32) wide_ftrs_sp_idx = tf.expand_dims(wide_ftrs_sp_idx, -1) # Get sparse feature vector v where v[ftr_idx_i] = ftr_val_i and v[other] = 0 wide_ftrs_sp = sparse_tensor_merge(wide_ftrs_sp_idx, wide_ftrs_sp_val * valid_wide_ftrs_idx_mask, shape) # Feature weights bias = self.ftrs_weight[0] # Compute embedding embedding = tf.sparse.matmul(wide_ftrs_sp, self.ftrs_weight) + bias return embedding
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self,\n num_wide_sp: int,\n wide_ftrs_sp_idx: tf.Tensor,\n sp_emb_size: int,\n wide_ftrs_sp_val: tf.Tensor = None,\n padding_idx: int = 0,\n initializer=tf.contrib.layers.xavier_initializer()):\n wide_ftrs_sp_idx = tf.cast(wide_ftrs_sp_idx, dtype=tf.float32)\n if wide_ftrs_sp_val is None: # Default to 1 if values unspecified\n wide_ftrs_sp_val = tf.ones(tf.shape(wide_ftrs_sp_idx), dtype=tf.float32)\n\n self._num_wide_sp = num_wide_sp\n self._padding_idx = padding_idx\n\n with tf.variable_scope('wide', reuse=tf.AUTO_REUSE):\n # Feature weights\n self.ftrs_weight = tf.get_variable('wide_ftrs_sp_weight',\n shape=[num_wide_sp, sp_emb_size],\n initializer=initializer,\n trainable=True)\n\n # A hack to combine idx and val so that we can process them together in `tf.map_fn` later\n # Shape=[batch_size, max_group_size, max_wide_ftrs_sp_size*2], max_wide_ftrs_size is the maximum number of\n # sparse wide features in a document in the batch\n wide_ftrs_sp_idx_with_value = tf.concat([wide_ftrs_sp_idx, wide_ftrs_sp_val], axis=-1)\n\n # Compute embedding sample-wise\n self.embedding = tf.map_fn(self._compute_embedding_score_per_record, wide_ftrs_sp_idx_with_value,\n dtype=tf.float32)", "def SparseEmbedding(data=None, weight=None, input_dim=_Null, output_dim=_Null, dtype=_Null, out=None, name=None, **kwargs):\n return (0,)", "def forward(self, doc_weights):\n\n doc_probs = F.softmax(doc_weights,dim=1)\n\n # shape: [batch_size, n_topics, 1]\n unsqueezed_doc_probs = doc_probs.unsqueeze(2)\n\n # shape: [1, n_topics, embedding_dim]\n unsqueezed_topic_vectors = self.topic_vectors.unsqueeze(0)\n\n # linear combination of topic vectors weighted by probabilities,\n # shape: [batch_size, embedding_dim]\n doc_vectors = (unsqueezed_doc_probs*unsqueezed_topic_vectors).sum(1)\n\n return doc_vectors", "def init_embeddings(self, weight, words):\n # wrap in tensor\n if isinstance(weight, list):\n weight = torch.Tensor(weight).float()\n if isinstance(weight, np.ndarray):\n weight = torch.from_numpy(weight).float()\n # check embedding size\n if weight.size(1) != self.embedding_dim:\n raise ValueError(\"Mismatched embedding dim {} for model \"\n \"with dim {}\".format(weight.size(1),\n self.embedding_dim))\n\n self_idxs, other_idxs = [], []\n for other_idx, word in enumerate(words):\n try:\n self_idxs.append(self.d.s2i[word])\n other_idxs.append(other_idx)\n except KeyError:\n pass\n\n other_idxs = torch.LongTensor(other_idxs)\n self_idxs = torch.LongTensor(self_idxs)\n self.weight.data[self_idxs] = weight[other_idxs]", "def add_word_embedding_op(self):\n if self.pos:\n print(\"adding pos embeddings\")\n with tf.variable_scope(\"pos\"):\n _pos_embeddings = tf.Variable(self.pos_embeddings,\n name=\"la_pos_embeddings\",\n dtype=tf.float32, trainable=False)\n pos_embeddings = tf.nn.embedding_lookup(_pos_embeddings, self.pos_ids,\n name=\"pos_embeddings\")\n self.pos_vecs = pos_embeddings\n print(\"adding word_embeddings\")\n with tf.variable_scope(\"words\"):\n _word_embeddings = tf.Variable(self.embeddings, name=\"_word_embeddings\",\n dtype=tf.float32, trainable=False)\n word_embeddings = tf.nn.embedding_lookup(_word_embeddings,\n self.word_ids,\n name=\"word_embeddings\")\n if self.use_window:\n print(\"Concatenating word vectors of context words\")\n word_embeddings_sl = tf.nn.embedding_lookup(_word_embeddings,\n self.word_ids_sl,\n name=\"word_embeddings_sl\")\n word_embeddings_sr = tf.nn.embedding_lookup(_word_embeddings,\n self.word_ids_sr,\n name=\"word_embeddings_sr\")\n word_embeddings = tf.concat([word_embeddings_sr, word_embeddings,\n word_embeddings_sl], axis=-1)\n if self.use_char_embeddings:\n print(\"adding CNN for char embeddings\")\n with tf.variable_scope(\"chars\"):\n _char_embeddings = tf.get_variable(name=\"_char_embeddings\",\n dtype=tf.float32,\n shape=[self.char_count, \n self.c_dim_input])\n char_embeddings = tf.nn.embedding_lookup(_char_embeddings, \n self.char_ids, \n name=\"char_embeddings\")\n s = char_embeddings.shape\n # the shape of our char_embeddings is now (batch_size, max number of words\n # in each sentence, max number of chars in each word, self.c_dim )\n char_filter = tf.get_variable(\"char_filter\", dtype=tf.float32,\n shape=[self.c_filter_width, \n self.c_filter_height,\n self.c_dim_input,\n self.c_dim_output])\n print(\"adding 2d convolution layer\")\n char_conv_layer = tf.nn.conv2d(char_embeddings, char_filter, \n strides=[1, 1, 1, 1], \n padding=\"SAME\")\n char_conv_layer = tf.nn.tanh(char_conv_layer)\n print(\"adding 2d pooling layer\")\n char_conv_layer = tf.layers.max_pooling2d(char_conv_layer, \n 1, \n strides=1)\n char_output = tf.reshape(char_conv_layer, shape=[-1, self.max_len, \n self.max_word_length*\n self.c_dim_output])\n word_embeddings = tf.concat([word_embeddings, char_output], axis=-1)\n if self.pos and self.concat_pos:\n print(\"concatenating pos with word_embeddings\")\n word_embeddings = tf.concat([word_embeddings, pos_embeddings], axis=-1)\n self.word_embeddings = word_embeddings\n if self.use_additional and self.hybrid:\n print(\"using additional embeddings\")\n _word_embeddings_2 = tf.Variable(self.additional_embeddings,\n name=\"two_word_embeddings\",\n dtype=tf.float32, trainable=False)\n word_embeddings_2 = tf.nn.embedding_lookup(_word_embeddings_2,\n self.word_ids,\n name=\"two_word_embeddings\")\n self.word_embeddings_2 = word_embeddings_2", "def get_WS(w2v):\n # get set of MAX_NGRAM-grams in text\n lines = open(INFNAME_FORMAT.format(\"train\")).readlines() \\\n + open(INFNAME_FORMAT.format(\"test\")).readlines()\n raw = [process_line(l) for l in lines ]\n ngrams_in_data = set()\n for words in raw:\n for ngram in tweet_to_ngrams(words):\n ngrams_in_data.add(ngram)\n\n # load sentiment features from model\n clf_pipe = pickle.load(open(CLF_FNAME, 'rb')) # model\n\n vect = clf_pipe.best_estimator_.named_steps['vect']\n clf = clf_pipe.best_estimator_.named_steps['clf']\n\n features_to_sent_idx = vect.vocabulary_ # map from model features to sentiment index\n # currently, sentiment = 2 * (count_pos / (count_pos + count_neg)) - 1\n sentiments = clf.feature_count_[1,:] / np.sum(clf.feature_count_, axis=0) # in [0,1]\n sentiments = 2 * sentiments - 1 # rescale to [-1,1]\n\n features_to_sent = {feat: sentiments[idx] for (feat,idx) in features_to_sent_idx.items()}\n\n # build WS and ngram_idx_map for each MAX_NGRAM-gram in the text\n k = len(next(iter(w2v.values()))) # dimension of embedding\n WS = np.zeros(shape=(len(ngrams_in_data) + 1, k + MAX_NGRAM), dtype='float32')\n ngram_idx_map = {}\n\n index = 1 # first row is left 0, for padding in the cnn. This is also neutral sentiment.\n # For Vader Sentiment analysis\n# vader_analyzer = SentimentIntensityAnalyzer()\n\n\n for ngram in ngrams_in_data:\n ngram_idx_map[ngram] = index\n\n # set word embedding, note that unknown words already randomized in load_embedding \n words = ngram.split(' ')\n WS[index,:k] = w2v[words[-1]] # embedding of last word\n\n # set sentiment embedding\n for n in range(MAX_NGRAM): # for 1, 2, ... length ngrams\n sub_ngram = ' '.join(words[-1 - n:]) \n\n # Naive Bayes Sentiment feature --------------------------------\n sent = features_to_sent.get(sub_ngram, 0.0) # default to neutral 0\n # --------------------------------------------------------------\n\n# # TextBlob sentiment feature -----------------------------------\n# sent = TextBlob(sub_ngram).sentiment.polarity\n# # --------------------------------------------------------------\n\n# # Vader sentiment feature -------------------------------------\n# sent = vader_analyzer.polarity_scores(sub_ngram)['compound']\n# # -------------------------------------------------------------\n WS[index,k+n] = sent\n\n index += 1\n\n return WS, ngram_idx_map", "def embedding_model(\n n_factors: int = 50,\n window: int = 5,\n min_count: int = 1,\n learning_rate: float = 0.05,\n negative_samples: int = 10,\n negative_exponent: float = 0.75,\n workers: int = 4,\n n_iterations: int = 10,\n batch_size: int = 10000,\n skip_gram: int = 0,\n) -> Word2Vec:\n logger.info(\"Defining Embedding Neural Network model.\")\n model = Word2Vec(\n vector_size=n_factors,\n window=window,\n min_count=min_count,\n alpha=learning_rate,\n negative=negative_samples,\n ns_exponent=negative_exponent,\n workers=workers,\n epochs=n_iterations,\n batch_words=batch_size,\n sg=skip_gram,\n compute_loss=True,\n )\n return model", "def embedding(\n input,\n weight,\n padding_idx=None,\n max_norm=None,\n norm_type=2.0,\n scale_grad_by_freq=False,\n sparse=False,\n):\n\n assert sparse is False, \"Not support sparse=True yet!\"\n if padding_idx is not None:\n if padding_idx > 0:\n assert padding_idx < weight.size(\n 0\n ), \"Padding_idx must be within num_embeddings\"\n elif padding_idx < 0:\n assert padding_idx >= -weight.size(\n 0\n ), \"Padding_idx must be within num_embeddings\"\n padding_idx = weight.size(0) + padding_idx\n\n if max_norm is not None:\n with flow.no_grad():\n weight = flow._C.embedding_renorm_(weight, input, max_norm, norm_type)\n\n if padding_idx is None and not scale_grad_by_freq:\n return flow._C.gather(weight, input, axis=0)\n else:\n return flow._C.embedding(weight, input, padding_idx, scale_grad_by_freq)", "def embedding(inputs,\n vocab_dim,\n embedding_dim,\n reuse,\n validate_indices=False,\n w_init=tf.random_uniform_initializer(-1., 1.),\n trainable=True,\n normalize=False,\n vocab_freqs=None,\n name=\"Embedding\"):\n\n input_shape = util.get_input_shape(inputs)\n assert len(input_shape) == 2, \"Input Tensor shape must be 2-D\"\n\n with tf.variable_scope(name, reuse=reuse):\n with tf.device('/cpu:0'):\n W = tf.get_variable(\n \"W\", shape=[vocab_dim, embedding_dim], initializer=w_init, trainable=trainable)\n if normalize:\n assert vocab_freqs is not None\n vocab_freqs = tf.constant(vocab_freqs, dtype=tf.float32, shape=(vocab_dim, 1))\n W = _normalize(W, vocab_freqs)\n\n output = tf.cast(inputs, tf.int32)\n output = tf.nn.embedding_lookup(W, output, validate_indices=validate_indices)\n\n shape = [-1] + output.get_shape().as_list()[1:3] + [1]\n # seq_length = util.retrieve_seq_length(tf.reshape(inputs, shape))\n\n return output", "def embed_data(\n self,\n data: Dict[str, tf.SparseTensor]\n ) -> Tuple[tf.Tensor, tf.Tensor]:\n\n batch_shape = tf.shape(data[\"t\"])[:-1]\n flat_data = nest.map_structure(batches.flatten_batch, data)\n flat_data = nest.map_structure(batches.sparse_fill_empty_rows, flat_data)\n\n context_embeddings = (\n self.embedding.provide_embeddings_to_forward_fn(\n flat_data, feature_types=self._config.context_features))\n context_embeddings = nest.map_structure(\n batches.get_unflatten_batch_fn(batch_shape), context_embeddings)\n\n sequential_embeddings = (\n self.embedding.provide_embeddings_to_forward_fn(\n flat_data, feature_types=self._config.sequential_features))\n sequential_embeddings = nest.map_structure(\n batches.get_unflatten_batch_fn(batch_shape), sequential_embeddings)\n\n dt = tf.divide(tf.cast(data[\"dt\"], dtype=tf.float32), 5400.)\n t = tf.divide(tf.cast(data[\"t\"], dtype=tf.float32), 5400.)\n dt_log = tf.log(dt + 1.)\n\n embedding_dict = sequential_embeddings.copy()\n embedding_dict.update(context_embeddings)\n embedding_dict[\"dt_s\"] = tf.matmul(dt_log, self.w_dt)\n combined_embedding = self._combine_embeddings_for_input(embedding_dict)\n inputs = combined_embedding\n if self._config.get(\"apply_bias\", False):\n inputs = inputs + tf.get_variable(\n \"_\".join([self._config.embedding_type, \"final_bias\"]),\n shape=[self.get_total_embedding_size()],\n initializer=tf.zeros_initializer)\n time_vect = t\n\n return inputs, time_vect", "def make_embedding_matrix(docs, size, min_count = 5, window = 5, n_iter = 5, savename = None, workers = 3):\n\n print('Starting the embedding generation')\n t0 = time.time()\n model = gensim.models.Word2Vec(docs, min_count=min_count, window = window,\n size = size, iter = n_iter, workers = workers)\n t1 = time.time()\n print('All done, total time %s' % (t1-t0))\n \n if savename is not None:\n model.save(savename)\n \n return model", "def project_embedding(self):\n w = self.feature_embedding.weight.data\n d = w.size(-1) - 1\n narrowed = w.narrow(-1, 1, d)\n tmp = 1 + torch.sum(torch.pow(narrowed, 2), dim=-1, keepdim=True)\n tmp.sqrt_()\n w.narrow(-1, 0, 1).copy_(tmp)\n return w # can be delete?", "def _compute_span_pair_embeddings(self,\n top_span_embeddings: torch.FloatTensor,\n antecedent_embeddings: torch.FloatTensor,\n genre_embedding: torch.FloatTensor,\n trigger_same_type_agreement_embeddings: torch.FloatTensor,\n realies_same_type_agreement_embeddings: torch.FloatTensor,\n antecedent_offsets: torch.FloatTensor):\n\n # Shape: (1, max_antecedents, embedding_size)\n antecedent_distance_embeddings = self._distance_embedding(\n util.bucket_values(antecedent_offsets,\n num_total_buckets=self._num_distance_buckets))\n\n # Shape: (1, 1, max_antecedents, embedding_size)\n antecedent_distance_embeddings = antecedent_distance_embeddings.unsqueeze(0)\n\n expanded_distance_embeddings_shape = (antecedent_embeddings.size(0), # batch_size\n antecedent_embeddings.size(1), # num_spans_to_keep\n antecedent_embeddings.size(2), # max_antecedents\n antecedent_distance_embeddings.size(-1)) # embedding_size\n # Shape: (batch_size, num_spans_to_keep, max_antecedents, embedding_size)\n antecedent_distance_embeddings = antecedent_distance_embeddings.expand(*expanded_distance_embeddings_shape)\n antecedent_genre_embeddings = genre_embedding.view(1, 1, 1, -1).expand_as(antecedent_distance_embeddings)\n feature_embeddings = self._dropout(torch.cat(\n [antecedent_genre_embeddings, realies_same_type_agreement_embeddings, trigger_same_type_agreement_embeddings],-1))\n # ], -1))\n\n # [antecedent_distance_embeddings, antecedent_genre_embeddings, trigger_same_type_agreement_embeddings,\n # realies_same_type_agreement_embeddings], -1))\n # feature_embeddings = self._dropout(torch.cat(\n # [antecedent_genre_embeddings, trigger_same_type_agreement_embeddings], -1\n # ))\n\n # Shape: (batch_size, num_spans_to_keep, max_antecedents, embedding_size)\n target_embeddings = top_span_embeddings.unsqueeze(2).expand_as(antecedent_embeddings)\n # Shape: (batch_size, num_spans_to_keep, max_antecedents, embedding_size)\n span_pair_embeddings = torch.cat([target_embeddings,\n antecedent_embeddings,\n antecedent_embeddings * target_embeddings,\n feature_embeddings], -1)\n return span_pair_embeddings", "def build_sense_embedding(target_sense_to_id, word_freq, EMBEDDING_DIM):\r\n res = {}\r\n wordvecs = load_glove(EMBEDDING_DIM)\r\n \r\n for target_sense_list in target_sense_to_id:\r\n for key, _ in target_sense_list.items():\r\n sense_vector = np.zeros(EMBEDDING_DIM)\r\n senses = key.split(',')\r\n n = 0\r\n for sensekey in senses:\r\n #print(sensekey) \r\n if '/' in sensekey:\r\n continue\r\n sense_synset = sc2ss(sensekey)\r\n if sense_synset:\r\n sense_vector += build_sense_vector(sense_synset, word_freq, wordvecs)\r\n n += 1\r\n if n != 0:\r\n res[key] = sense_vector/n\r\n return res", "def tf_word2vec(sentences, vocab, epochs, learning_rate, num_sampled,\n window_size, batch_size, embed_size, tensorboard):\n vocab_size = len(vocab)\n\n # Clears the default graph stack and resets the global default graph;\n # this line is crucial if we want to re-run the class in interactive\n # environment such as jupyter notebook\n tf.reset_default_graph()\n\n # when building out tensorflow's computation graph, it's a good practice to\n # group nodes/operations that have similar purposes together using name_scope;\n # this additional step will give us nicer graph representation in Tensorboard,\n # which is tool that gives us nice graphical representation of the computation\n # graph we have defined\n with tf.name_scope('data'):\n # for target_words:\n # we will use it with tensorflow's loss later, and the function requires rank 2\n # input, that's why there's an extra dimension in the shape\n center_words = tf.placeholder(tf.int32, shape = [batch_size], name = 'center_words')\n target_words = tf.placeholder(tf.int32, shape = [batch_size, 1], name = 'target_words')\n\n with tf.name_scope('embedding_matrix'):\n # the actual word vectors\n embed_matrix = tf.Variable(\n tf.random_uniform([vocab_size, embed_size], -1.0, 1.0), name = 'embed_matrix')\n\n with tf.name_scope('loss'):\n # input -> hidden layer\n embed = tf.nn.embedding_lookup(embed_matrix, center_words, name = 'embed')\n\n # hidden layer -> output layer's weights\n stddev = 1.0 / embed_size ** 0.5\n output_weight = tf.Variable(\n tf.truncated_normal([vocab_size, embed_size], stddev = stddev), name = 'output_weight')\n\n output_bias = tf.Variable(tf.zeros([vocab_size]), name = 'output_bias')\n\n # hidden layer -> output layer + sampled softmax loss\n total_loss = tf.reduce_mean(tf.nn.sampled_softmax_loss( # tf.nn.nce_loss(\n weights = output_weight, biases = output_bias,\n labels = target_words, inputs = embed,\n num_sampled = num_sampled, num_classes = vocab_size), name = 'loss')\n\n # create a summary scalar that reports the loss\n tf.summary.scalar('total_loss', total_loss)\n summary_op = tf.summary.merge_all()\n\n optimizer = tf.train.AdagradOptimizer(learning_rate)\n train_step = optimizer.minimize(total_loss)\n init = tf.global_variables_initializer()\n\n # batch_iters = len(data) // batch_size\n with tf.Session() as sess:\n sess.run(init)\n\n # record the average loss in the last skip_step steps\n history = []\n writer = tf.summary.FileWriter(tensorboard, sess.graph)\n for epoch in trange(epochs):\n iterator = generate_sample(sentences, vocab, window = window_size)\n batch_gen = get_batch(iterator, batch_size)\n\n # for _ in range(batch_iters):\n # try:\n centers, targets = next(batch_gen)\n feed_dict = {center_words: centers, target_words: targets}\n _, loss, summary = sess.run([train_step, total_loss, summary_op], feed_dict)\n\n writer.add_summary(summary, epoch)\n history.append(loss)\n\n writer.close()\n word_vectors = sess.run(embed_matrix)\n\n return word_vectors, history", "def get_pretrained_embeddings(source_vocab,embed_df):\r\n \r\n num_tokens = len(source_vocab)\r\n embedding_dim = embed_df.shape[1]\r\n weights = np.zeros((num_tokens,embedding_dim),dtype=np.float32)\r\n \r\n for idx in range(num_tokens):\r\n token = source_vocab.lookup_index(idx)\r\n if token in embed_df.index:\r\n weights[idx,:] = embed_df.loc[token]\r\n else:\r\n weights[idx,:] = np.random.randn(1,embedding_dim)\r\n \r\n embed_tensor = torch.FloatTensor(weights)\r\n return embed_tensor", "def get_W(word_vecs, vocab, k=300):\n vocab_size = len(word_vecs)\n word_idx_map = dict()\n W = np.zeros(shape=(vocab_size+2, k), dtype='float32')\n W[0] = np.zeros(k, dtype='float32') # padding vector\n i = 1\n for word in vocab:\n \tif word_vecs.has_key(word):\n \tW[i] = word_vecs[word]\n \tword_idx_map[word] = i\n \ti += 1\n else:\n \tword_idx_map[word] = vocab_size+1\n W[vocab_size+1] = np.zeros(k, dtype='float32')\n return W, word_idx_map", "def hybrid_forward(self, F, words, wordsmask, subwords, subwordsmask):\n #pylint: disable=arguments-differ\n wordsmask = F.expand_dims(wordsmask, axis=-1)\n embeddings = F.broadcast_mul(self.embedding(words), wordsmask)\n subword_embeddings = self.subword_embedding(subwords, subwordsmask)\n return embeddings + subword_embeddings", "def vectorize(tokens_list, feature_fns, min_freq, vocab=None):\n ###TODO\n \n features = []\n feature_freq = {}\n vocabulary = {}\n \n # 2 case : for vocab\n # case 1: \n if (vocab == None):\n \n for doc in tokens_list: \n #print('doc#=%d tokens=%s'%(i,doc)) \n data = featurize(doc,feature_fns)\n #print('data=',data)\n \n for feature in data: \n if feature[1] > 0 : \n if feature[0] not in feature_freq.keys():\n feature_freq.setdefault(feature[0],1) \n else :\n feature_freq[feature[0]] += 1\n \n if feature[0] not in vocabulary.keys() :\n vocabulary.setdefault(feature[0], None) \n \n features.append(data)\n \n # sort vocab according to features (alphabetical order)\n vacab_list = sorted(feature_freq.keys(), key =lambda x: x,reverse=False)\n \n for colIndex,term in enumerate(vacab_list) :\n #print('colIndex = %d, term = %s'%(colIndex,term))\n vocabulary[term] = colIndex\n\n else: # case 2 \n \n # vocab already present\n #print('Vocab already present')\n vocabulary = vocab.copy() \n \n \n for doc in tokens_list: \n data = featurize(doc,feature_fns) \n \n test_data = [] \n for feature in data: \n # only take feature present in vocab \n if feature[0] in vocabulary.keys():\n #print('feature = ',feature) \n if feature[1] > 0 : \n test_data.append(feature) \n if feature[0] not in feature_freq.keys():\n feature_freq.setdefault(feature[0],1) \n else :\n feature_freq[feature[0]] += 1\n \n #print('test_data = ',len(test_data)) \n features.append(test_data)\n #test_data.clear()\n #print('features = ',features)\n \n \n # build a csr_matrix \n row = []\n col = []\n data = [] \n \n for docID,feat_list in enumerate(features) :\n for term in feat_list:\n if (feature_freq[term[0]] >= min_freq): # (zero values are not stored)\n \n row.append(docID)\n col.append(vocabulary[term[0]])\n data.append(term[1])\n \n #print('row =',row)\n #print('col =',col)\n #print('data=',data)\n \n X = csr_matrix((data, (row, col)), shape=(len(features), len(vocabulary)), dtype=np.int64)\n \n #print('X ->')\n #print(X.toarray())\n #print(' size of X = ',X.get_shape())\n \n return(X, vocabulary)", "def _fit_spectral_embedding(self, K, vect_len_ts):\n MAX_ITER = 500\n ABSTOL = 1e-2\n RELTOL = 1e-4\n \n self.vect_len_ts = vect_len_ts\n \n # Saving state\n h = {}\n h['objval'] = np.zeros(MAX_ITER)\n h['r_norm'] = np.zeros(MAX_ITER)\n h['s_norm'] = np.zeros(MAX_ITER)\n h['eps_pri'] = np.zeros(MAX_ITER)\n h['eps_dual'] = np.zeros(MAX_ITER)\n \n if self.verbose:\n print('{:6} | {:7} | {:7} | {:7} | {:7} | {:12}\\n'.\n format('iter','r norm', 'eps pri', 's norm', 'eps dual', 'objective'))\n \n # block differentiation matrix\n D = self._generate_block_differentiation_matrix(vect_len_ts)\n \n # vect containing respective lengths of Y and W\n vect_size_tv = vect_len_ts - 1\n \n #number of samples\n nsamples = vect_len_ts.sum()\n \n # kappa: proximal parameter\n kappa = self.reg_param/self.admm_rho\n \n # constants for stopping tolerance\n c_prim = np.sqrt(self.n_clusters*vect_len_ts.sum())\n c_dual = np.sqrt(self.n_clusters*vect_size_tv.sum())\n \n print('obj = {:10.2f}'.format(self._objective_function(K)))\n\n \n for k in range(MAX_ITER):\n \n #pdb.set_trace()\n Wold = np.copy(self.W)\n #Hold = np.copy(self.H)\n Yold = np.copy(self.Y)\n rho = self.admm_rho\n def Hstep_cost_function(H):\n \"\"\"\n cost function used by pymanopt to solve the stiefel manifold problem\n ..math: \\min_H - trace(H^\\top*K*H)/T + \\frac{\\rho}{2} \\|H^\\top D - W + Y \\|_F^2\n ..math: s.t. H^\\top H = I\n ..math: T (number of samples)\n \"\"\" \n U = Wold - Yold\n #cost = -np.trace(H.T@K@H) + (self.admm_rho/2)*(norm(H.T@D - Wold + self.Y, 'fro')**2) \n cost = -np.trace(H.T@K@H)/nsamples + (rho/2)*np.trace((H.T@D - U)@(H.T@D-U).T) \n return cost\n \n def egrad(H):\n U = Wold - Yold\n grad_e = -2*(K@H)/nsamples + rho*D@((H.T@D - U).T)\n return grad_e\n \n # ================ H-Update =============\n manifold = Stiefel(vect_len_ts.sum(), self.n_clusters)\n problem = Problem(manifold=manifold, cost=Hstep_cost_function, verbosity = 0)\n #solver = SteepestDescent(maxtime=float('inf'),mingradnorm=1e-8, \n # minstepsize=1e-16, maxiter = self.max_iter) #instantiation pymanopt\n #solver = TrustRegions()\n solver = ConjugateGradient()\n self.H = np.asarray(solver.solve(problem))\n #print('norm variation of H = {}'.format(norm(Hold-self.H, 'fro')))\n #print('H : variation on obj = {}'.format(-np.trace(Hold.T@K@Hold)/nsamples + np.trace(self.H.T@[email protected])/nsamples))\n \n #================= W - update =============\n H_top_times_D = (self.H.T)@D\n target_ell = H_top_times_D + self.Y\n for t in range(self.W.shape[1]):\n self.W[:,t] = self._soft_shrinkage(target_ell[:,t], kappa)\n #print(norm(self.W[:,t] - target_ell[:,t]))\n #print(norm(self.W[:,t] - Wold[:,t]))\n \n #print('norm variation of W = {}'.format(norm(Wold-self.W, 'fro')))\n #print('W: variation on obj = {}'.format(self._columwise_norm(Wold, 1) - self._columwise_norm(self.W, 1)))\n \n \n #================ Y -Updates =============\n self.Y = np.copy(self.Y) + (H_top_times_D - self.W)\n #print('norm variation of Y = {}'.format(norm(Yold-self.Y, 'fro')))\n #print('obj = {:10.2f}'.format(self._objective_function(K)))\n # ============ history ====================\n h['objval'][k] = self._objective_function(K)\n h['r_norm'][k] = norm(H_top_times_D - self.W, 'fro')\n h['s_norm'][k] = self.admm_rho*norm(D@(Wold - self.W).transpose(), 'fro')\n norm_Htop_D = norm(H_top_times_D, 'fro')\n norm_D_Ytop = self.admm_rho*norm([email protected](), 'fro') \n h['eps_pri'][k] = c_prim*ABSTOL + RELTOL*np.max([norm(self.W, 'fro'), norm_Htop_D])\n h['eps_dual'][k] = c_dual*ABSTOL + RELTOL*norm_D_Ytop\n \n # verbose\n if self.verbose:\n print('{:6} | {:5.3f} | {:5.3f} | {:5.3f} | {:5.3} | {:10.2f}\\n'.format(k, h['r_norm'][k], h['eps_pri'][k],\n h['s_norm'][k], h['eps_dual'][k], h['objval'][k]))\n \n # check convergence\n if (h['r_norm'][k] < h['eps_pri'][k]) and (h['s_norm'][k] < h['eps_dual'][k]):\n break\n \n self.history = h\n return self", "def source_embedding_fairseq(self):\r\n return tf.get_variable(\r\n name=\"W\",\r\n shape=[self.params[\"feature.dim\"], self.params[\"embedding.dim\"]],\r\n initializer=tf.random_normal_initializer(\r\n mean=0.0,\r\n stddev=0.1))", "def _build(self, ids):\n # Construct embeddings.\n if self._existing_vocab is None:\n if self.EMBEDDINGS not in self._initializers:\n self._initializers[self.EMBEDDINGS] = basic.create_linear_initializer(\n self._vocab_size)\n self._embeddings = tf.get_variable(\n \"embeddings\",\n shape=[self._vocab_size, self._embed_dim],\n dtype=tf.float32,\n initializer=self._initializers[self.EMBEDDINGS],\n partitioner=self._partitioners.get(self.EMBEDDINGS, None),\n regularizer=self._regularizers.get(self.EMBEDDINGS, None),\n trainable=self._trainable)\n else:\n self._embeddings = tf.get_variable(\n \"embeddings\",\n dtype=tf.float32,\n initializer=self._existing_vocab,\n regularizer=self._regularizers.get(self.EMBEDDINGS, None),\n trainable=self._trainable)\n\n # Lookup embeddings\n return tf.nn.embedding_lookup(\n self._embeddings, ids, name=\"embedding_lookup\")", "def add_word_embeddings_op(self):\n with tf.variable_scope(\"words\"):\n if self.config.embeddings is None:\n self.logger.info(\"WARNING: randomly initializing word vectors\")\n _word_embeddings = tf.get_variable(\n name=\"_word_embeddings\",\n dtype=tf.float32,\n shape=[self.config.nwords, self.config.dim_word])\n else:\n _word_embeddings = tf.Variable(\n self.config.embeddings,\n name=\"_word_embeddings\",\n dtype=tf.float32,\n trainable=self.config.train_embeddings)\n\n word_embeddings = tf.nn.embedding_lookup(_word_embeddings,\n self.word_ids, name=\"word_embeddings\")\n\n with tf.variable_scope(\"chars\"):\n if self.config.use_chars:\n # get char embeddings matrix\n _char_embeddings = tf.get_variable(\n name=\"_char_embeddings\",\n dtype=tf.float32,\n shape=[self.config.nchars, self.config.dim_char])\n char_embeddings = tf.nn.embedding_lookup(_char_embeddings,\n self.char_ids, name=\"char_embeddings\")\n\n # put the time dimension on axis=1\n s = tf.shape(char_embeddings)\n char_embeddings = tf.reshape(char_embeddings,\n shape=[s[0]*s[1], s[-2], self.config.dim_char])\n word_lengths = tf.reshape(self.word_lengths, shape=[s[0]*s[1]])\n\n # bi lstm on chars\n cell_fw = tf.contrib.rnn.LSTMCell(self.config.hidden_size_char,\n state_is_tuple=True)\n cell_bw = tf.contrib.rnn.LSTMCell(self.config.hidden_size_char,\n state_is_tuple=True)\n _output = tf.nn.bidirectional_dynamic_rnn(\n cell_fw, cell_bw, char_embeddings,\n sequence_length=word_lengths, dtype=tf.float32)\n\n # read and concat output\n _, ((_, output_fw), (_, output_bw)) = _output\n output = tf.concat([output_fw, output_bw], axis=-1)\n\n # shape = (batch size, max sentence length, char hidden size)\n output = tf.reshape(output,\n shape=[s[0], s[1], 2*self.config.hidden_size_char])\n word_embeddings = tf.concat([word_embeddings, output], axis=-1)\n\n self.word_embeddings = tf.nn.dropout(word_embeddings, self.dropout)", "def train_word_vectors(self,docs):\n \n \n #may need to remove interpunction too?\n print('Building tfidf vectorizer')\n \n self.tfidf = TfidfVectorizer(**self.tfidf_params)\n \n self.tfidf.fit(docs) \n \n if self.savename is not None:\n with open(self.savename + '_tfidf.obj','wb') as f:\n pickle.dump(self.tfidf,f) \n print('Done training tfidf vectorizer')", "def get_W(word_vecs, vocab, k=300):\n vocab_size = len(vocab)\n word_idx_map = dict()\n W = np.zeros(shape=(vocab_size, k), dtype='float32')\n i = 0\n for word in vocab:\n W[i] = word_vecs[word]\n word_idx_map[word] = i\n i += 1\n # W[0] = np.zeros(k, dtype='float32')\n return W, word_idx_map", "def create_embedding(self):\n self.embedding = []\n\n for index in range(1,self.args.window_size+1):\n print(\"\\nOptimization round: \" +str(index)+\"/\"+str(self.args.window_size)+\".\")\n print(\"Creating documents.\")\n clean_documents = self.walk_extracts(index)\n print(\"Fitting model.\")\n model = Word2Vec(clean_documents,\n size = self.args.dimensions,\n window = 1,\n min_count = self.args.min_count,\n sg = 1,\n workers = self.args.workers)\n\n new_embedding = self.get_embedding(model)\n self.embedding = self.embedding +[new_embedding]\n self.embedding = np.concatenate(self.embedding, axis = 1)", "def _get_embedding_layer(self, input_data, doc_input_data):\n opts = self._options\n word_embedding = tf.Variable(tf.random_uniform((self.vocab_size, opts.embed_dim), -1.0, 1.0))\n embed = []\n\n temp = tf.zeros([opts.batch_size, opts.embed_dim])\n embed_d = []\n for n in range(opts.sentence_sample):\n temp = tf.add(temp, tf.nn.embedding_lookup(word_embedding, doc_input_data[:, n]))\n embed_d.append(temp)\n\n if opts.concat == 'True':\n combined_embed_vector_length = opts.embed_dim * opts.window_size + opts.embed_dim\n for j in range(opts.window_size):\n embed_w = tf.nn.embedding_lookup(word_embedding, input_data[:, j])\n embed.append(embed_w)\n embed.append(embed_d)\n else:\n combined_embed_vector_length = opts.embed_dim\n embed_w = tf.zeros([opts.batch_size, opts.embed_dim])\n for j in range(opts.window_size):\n embed_w += tf.nn.embedding_lookup(word_embedding, input_data[:, j])\n embed_w += embed_d\n embed.append(embed_w)\n\n return tf.concat(embed, 1), word_embedding, combined_embed_vector_length", "def tfidf(docs):\n vocab = {}\n df = {}\n regex = re.compile(\"\\s+\")\n count = 0\n for doc in docs:\n terms = re.split(regex, doc)\n for term in set(terms):\n if len(term) > 0:\n if term not in vocab:\n vocab[term] = count # (index, df)\n df[term] = 1\n count += 1\n else:\n df[term] += 1\n num_docs = len(docs)\n scores = []\n for i in range(0, num_docs):\n scores.append({})\n\n for index in range(0, num_docs):\n terms = re.split(regex, docs[index])\n for term, tf in collections.Counter(terms).most_common():\n if len(term) > 0:\n term_index = vocab[term]\n score = float(tf) * np.log(float(num_docs) / float(df[term]))\n if score > 0.0:\n scores[index][term_index] = score\n\n i_list = []\n j_list = []\n data = []\n\n for i in range(0, num_docs):\n for j, score in scores[i].iteritems():\n i_list.append(i)\n j_list.append(j)\n data.append(score)\n\n matrix = sp.csr_matrix((data, (i_list, j_list)), shape=(num_docs, len(vocab)))\n reverse_map = {v: k for k, v in vocab.iteritems()}\n return matrix, reverse_map.values()", "def word_embeddings(self, feed_seq_batch):\n # compute seq_length\n self.feed_per_seq_length = self.get_per_seq_length(feed_seq_batch)\n\n # compute embedding\n with tf.variable_scope(\"word_embedding\"):\n seq_embedding = tf.get_variable(\n name=\"seq_embedding\",\n shape=[self.config.vocab_size, self.config.embed_dim],\n initializer=self.initializer)\n word_embeddings = tf.nn.embedding_lookup(seq_embedding,\n feed_seq_batch)\n if self.train_mode:\n word_embeddings = tf.nn.dropout(\n word_embeddings, keep_prob=self.feed_general_droprate)\n word_embeddings = tf.nn.tanh(word_embeddings)\n\n return word_embeddings", "def get_W(word_vecs, k=300):\n vocab_size = len(word_vecs)\n word_idx_map = dict()\n W = np.zeros(shape=(vocab_size + 1, k), dtype='float32')\n W[0] = np.zeros(k, dtype='float32')\n i = 1\n for word in word_vecs:\n W[i] = word_vecs[word]\n word_idx_map[word] = i\n i += 1\n return W, word_idx_map" ]
[ "0.74690807", "0.5747046", "0.5677438", "0.56014305", "0.5536787", "0.55277413", "0.5525836", "0.5523767", "0.55162793", "0.5460928", "0.5435253", "0.54200006", "0.53970104", "0.5396664", "0.53539515", "0.53538656", "0.5292858", "0.5253519", "0.52486753", "0.52143943", "0.5204133", "0.52005213", "0.51924676", "0.5178131", "0.5169487", "0.5159528", "0.51541835", "0.5127514", "0.51269877", "0.5110968" ]
0.7925619
0
Returns grid of wrapped class if it exists, otherwise None.
def grid(self): if hasattr(self.cls, "grid"): return self.cls.grid
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_grid(self):\r\n return self.grid", "def getGrid(self):\n\n\t\treturn self._grid", "def get_grid( self ):\n\n return self.__grid", "def get_grid(self):\n return self._grid", "def getGrid(self):\n\n\t\t\treturn self._logic.getGrid()", "def __getitem__(self, pos):\n if (self.master.__class__.__name__ == 'OneDimGrid') or (issubclass(self.master.__class__, OneDimGrid)):\n return self._getitem_1d(pos)\n if (self.master.__class__.__name__ == 'Grid') or (issubclass(self.master.__class__, Grid)):\n return self._getitem_2d(pos)\n else:\n return None", "def __getitem__(self, pos):\n if (self.master.__class__.__name__ == 'OneDimGrid') or (issubclass(self.master.__class__, OneDimGrid)):\n return self._getitem_1d(pos)\n if (self.master.__class__.__name__ == 'Grid') or (issubclass(self.master.__class__, Grid)):\n return self._getitem_2d(pos)\n else:\n return None", "def process_grid(self, grid: Grid) -> Grid:", "def grid(self):\n return self._grid", "def grid(self):\n return self.__grid", "def getclsfromcontainer(cls, container):\n for i in range(len(clslist)):\n if container == clslist[i]().getcontainer():\n return clslist[i]\n return None", "def getGrid(self):\n\n return self.board", "def getContainerfromCls(cls, clsname):\n for i in range(len(clslist)):\n if clsname == clslist[i]:\n return clslist[i]().getcontainer()\n return None", "def get_class(self, grp, class_type=\"NXcollection\"):\n coll = [grp[name] for name in grp\n if isinstance(grp[name], h5py.Group) and\n self.get_attr(grp[name], \"NX_class\") == class_type]\n return coll", "def find(self, cls):\r\n for currentClass in self._classesToCheck(cls):\r\n if currentClass in self.config:\r\n return self.config[currentClass]\r\n else:\r\n return None", "def get_grid(grid_url, raw=False):\n fid = parse_grid_id_args(None, grid_url)\n response = v2.grids.content(fid)\n parsed_content = response.json()\n\n if raw:\n return parsed_content\n return Grid(parsed_content, fid)", "def get_square_by_class(self, square_class, from_square=None):\r\n start_index = 0\r\n if from_square is not None:\r\n # don't start at the begining\r\n for i in range(0, len(self.squares)):\r\n if self.squares[i] == from_square:\r\n start_index = i\r\n break\r\n\r\n while True:\r\n if issubclass(self.squares[start_index].__class__, square_class):\r\n return self.squares[start_index]\r\n start_index += 1\r\n if start_index >= len(self.squares):\r\n start_index = 0\r\n\r\n raise SquareNotFound", "def grid_stat_wrapper():\n\n conf = metplus_config()\n return GridStatWrapper(conf, None)", "def find_class(self):\n stack = inspect.stack()\n frame = stack[1][0]\n return frame.f_locals.get('self', None)", "def getone(self, Cl):\n for object in self.ginfo.sprites():\n if isinstance(object, Cl):\n return object\n else:\n return None", "def getContainingGrid( self, point ):\n idx = self.indxHash( point );\n return self.mGrids[idx];", "def get_tile(self, row, col):\r\n # replace with your code\r\n return self.grid[row][col]", "def get_tile(self, row, col):\n # replace with your code\n return self.grid[row][col]", "def _class(self):\n return self.__class", "def get_tile(self, row, col):\n # replace with your code\n return self._grid[row][col]", "def get_tile(self, row, col):\n # replace with your code\n return self._grid[row][col]", "def get_tile(self, row, col):\n # replace with your code\n return self._grid[row][col]", "def get_game_cell(self, row, col):\n try:\n return GameCell.objects.get(game=self, row=row, col=col)\n except GameCell.DoesNotExist:\n return None", "def _class(self, class_):\r\n\r\n if class_:\r\n if hasattr(class_, '__mro__'):\r\n #this is a class\r\n return class_\r\n else:\r\n #this is an instance\r\n return type(class_)", "def corner_grid(self):\n\n if self.pixel_ref == 'corner':\n return self\n else:\n # shift the grid\n x0y0 = ((self.x0 - self.dx / 2.), (self.y0 - self.dy / 2.))\n args = dict(nxny=(self.nx, self.ny), dxdy=(self.dx, self.dy),\n proj=self.proj, pixel_ref='corner', x0y0=x0y0)\n return Grid(**args)" ]
[ "0.6043967", "0.5944432", "0.592956", "0.59174585", "0.5895752", "0.5777044", "0.5777044", "0.56115645", "0.55932224", "0.5587703", "0.5494381", "0.5486588", "0.54390824", "0.53928554", "0.5361043", "0.53522295", "0.53305113", "0.52599454", "0.5226078", "0.5182626", "0.5150893", "0.5105808", "0.50835377", "0.5061093", "0.5033227", "0.5033227", "0.5033227", "0.50289553", "0.50226223", "0.5022271" ]
0.70849097
0
Clean up series name by removing any . and _ characters, along with any trailing hyphens. Is basically equivalent to replacing all _ and . with a
def CleanSerieName(series_name): try: series_name = re.sub("(\D)\.(?!\s)(\D)", "\\1 \\2", series_name) series_name = re.sub("(\d)\.(\d{4})", "\\1 \\2", series_name) # if it ends in a year then don't keep the dot series_name = re.sub("(\D)\.(?!\s)", "\\1 ", series_name) series_name = re.sub("\.(?!\s)(\D)", " \\1", series_name) series_name = series_name.replace("_", " ") series_name = re.sub("-$", "", series_name) words = [x.strip() for x in series_name.split()] tempword=[] for word in words: if not word.isupper(): word = capwords(word) tempword.append(word) new_series_name = " ".join(tempword) return new_series_name.strip() except TypeError: log.debug("CleanSerieName: There is no SerieName to clean")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sanitize_name(name: str) -> str:\n return re.sub(r\"[^A-Za-z0-9_-]\", \"-\", name)[0:128]", "def sanitize_name(name):\n # For now just change dashes to underscores. Fix this more in the future\n return name.replace(\"-\", \"_\")", "def clean_episode_title(filename):\n new_str = filename.replace('_', ' ').replace('-', ' ')\n return re.sub(r'\\s+', ' ', new_str).strip()", "def remove_extra(name):\n return re.sub(r\"-[\\S\\s]*\", \"\", re.sub(r\"\\([\\w\\W]*\\)\", \"\", name))", "def cleanup(name):\n cleaned_name = name.rstrip(\".\")\n return cleaned_name", "def normalize_name(name):\n return PUNCT_RE.sub('-', name.lower()).strip('-')", "def _normalize_show_name(name):\n\tname = name.casefold()\n\tname = re.sub(\"[^a-z0-9]\", \" \", name)\n\tname = re.sub(\"_\", \" \", name)\n\tname = re.sub(\"season \\d( part \\d)?\", \" \", name)\n\tname = re.sub(\"\\s+\", \" \", name)\n\treturn name", "def normalize(name):\n name = name.lower()\n name = name.replace('-', '')\n name = name.replace(' ', '')\n return name", "def to_safe_name(name: str) -> str:\n return regex_replace(r'\\-|\\.|:', \"\", name.replace(' ', '_'))", "def _clean_workflow_name(name: str) -> str:\n return REGEX_CHARS_TO_REPLACE.sub(\"-\", name).strip(\"-\")", "def namify(text):\n return slugify(text).replace('-','_')", "def _clean_filename(name):\n return re.sub(\"[^\\\\w .]\", \"\", name)", "def scrub_underscore_suffix(filename):\n scrubbed = re.sub(r\"_[^_]+\\.\", \".\", filename)\n return scrubbed", "def normalize_package_name(_s: str) -> str:\n return _s.replace('_', '-').lower()", "def normalize_reference_name(name):\n return name.strip().lower().replace(\"-\", \"_\").replace(\" \", \"_\")", "def sanitize_luxcore_name(string):\r\n return re.sub(\"[^_0-9a-zA-Z]+\", \"__\", string)", "def remove_specials(sentence):\n sentence = sentence.replace('-', ' ')\n sentence = re.sub(r'[^\\w\\s]', '', sentence)\n return sentence", "def sanitize(mystr):\n retainlist = \"_-\"\n return re.sub(r'[^\\w' + retainlist + ']', '_', mystr)", "def clean_name(s):\n return re.sub('[\\W_]+', '', s).lower()", "def _sanitizeName(name):\n\n name = name.lower() # lower.\n name = name.replace('.','') # remove periods.\n name = name.replace('-','') # remove dashes.\n name = name.replace(\"'\",'') # remove apostrophies.\n # return it.\n return name", "def sanitize(name):\n return re.sub(\"\\\\W|^(?=\\\\d)\", \"_\", name)", "def _sanitize_field_name(self, field_name):\n field_name = field_name.replace(self._field_prefix, '')\n return field_name.replace('.', '_')", "def sanitize_title(title):\n # Discard everything after the colon\n title = title.split(':')[0]\n title.replace('.', '')\n return title.lower()", "def descorize(s):\n return s.replace(\"_\", \" \")", "def sanitize(name):\n return re.sub('\\\\W|^(?=\\\\d)', '_', name)", "def fix_ending(x):\n x = strip_stoich_wrapper(x)\n x = re.sub(r'(?<=[a-zA-Z])\\-(?=[a-zA-Z]$)', ' ', x)\n return x", "def filter_underscores(self, string):\n newstring = string.replace('_', '-')\n return newstring", "def sn(string):\n\n return re.sub('[^A-Za-z0-9_.\\\\-/]', '.', string)", "def unmangle_measurement_name(measurement_name):\n measurement_name = measurement_name.replace('_sp_', ' ')\n measurement_name = measurement_name.replace('_dsh_', '-')\n return measurement_name", "def trimname(name):\n while name[-1].isdigit():\n name = name[:-1]\n if name[-1] == '_':\n name = name[:-1]\n return name" ]
[ "0.74064714", "0.7253388", "0.7148532", "0.71050465", "0.70775956", "0.7051443", "0.69524163", "0.69253844", "0.6901493", "0.6870551", "0.6868943", "0.68440765", "0.68252516", "0.6810167", "0.68019074", "0.6784888", "0.6768763", "0.6760372", "0.67535406", "0.67399555", "0.6685387", "0.6679929", "0.66587347", "0.6654267", "0.6647998", "0.66274565", "0.661536", "0.66030294", "0.6598479", "0.65878624" ]
0.799551
0
Return how high the match is. Currently 15 is the best match This function give the flexibility to change the most important attribute for matching or even give the user the possibility to set his own preference release is the filename as it is in the result from used websites If source is matched, score is increased with 8 If quality is matched, score increased with 4 If codec is matched, score is increased with 2 If releasegroup is matched, score is increased with 1
def scoreMatch(release, wanted): score = int(0) if 'source' in release.keys() and 'source' in wanted.keys(): if release['source'] == wanted['source']: score += 8 if 'quality' in release.keys() and 'quality' in wanted.keys(): if release['quality'] == wanted['quality']: score += 4 elif wanted['quality'] == '720p' and release['quality'] == '1080p': score += 4 elif wanted['quality'] == '1080p' and release['quality'] == '720p' : score += 4 if 'codec' in release.keys() and 'codec' in wanted.keys(): if release['codec'] == wanted['codec']: score += 2 if 'releasegrp' in release.keys() and 'releasegrp' in wanted.keys(): if release['releasegrp'] == wanted['releasegrp']: score += 1 return score
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def match_score(self):\n return self._match_score", "def find_best_match(fpl_teams: List[str], team: str) -> Tuple[str, int]:\n best_ratio = 0.0\n best_match = None\n for t in fpl_teams:\n if fuzz.partial_ratio(t, team) > best_ratio:\n best_ratio = fuzz.partial_ratio(t, team)\n best_match = t\n print(f\"Best match {best_match}/{team}, score {best_ratio}\")\n return best_match, best_ratio", "def get_multiplier(quality):\n\n if quality == \"low\":\n return 5\n elif quality == \"medium\":\n return 6\n elif quality == \"good\":\n return 7\n elif quality == \"high\":\n return 8\n return 6", "def winrate(matches):\n if not matches:\n print('no matches')\n return None\n\n win_loss = [match['result'] for match in matches]\n return sum(win_loss)/len(win_loss)", "def get_estimated_score(match_data: dict) -> float:\n \n auto_high = {match_data['auto_HighClose']: match_data['auto_conInnerClose'],\n match_data['auto_HighFrontCP']: match_data['auto_conInnerFrontCP'],\n match_data['auto_HighLine']: match_data['auto_conInnerLine']\n }\n auto_low = match_data['auto_Low']\n auto_line = match_data['auto_leftSectorLine']\n \n tele_high = {match_data['tele_HighClose']: match_data['tele_conInnerClose'],\n match_data['tele_HighFrontCP']: match_data['tele_conInnerFrontCP'],\n match_data['tele_HighLine']: match_data['tele_conInnerLine'],\n match_data['tele_HighBackCP']: match_data['tele_conInnerBackCP']\n }\n tele_low = match_data['tele_Low']\n climbed = match_data['tele_Climbed']\n parked = match_data['tele_UnderSG']\n \n score = 0\n \n # Gives autonomous points\n for x in auto_high:\n score += (4.3, 4.8)[auto_high[x]] * x\n score += auto_low * 2\n if auto_line: score += 5\n \n # Gives teleop points\n for x in tele_high:\n score += (2.15, 2.4)[tele_high[x]] * x\n score += tele_low\n \n # Gives endgame points\n if climbed: score += 25\n if parked: score += 5\n \n return score", "def best_match(self, available_mtypes, header_mtypes):\n weighted_matches = self._best_weighted_matches(available_mtypes,\n header_mtypes)\n return weighted_matches[0][4]", "def get_current_rating(self):\n summary = self.get_version_summary()\n pattern = '\\$.*? .*? .*? .*? .*? .*? (.*?) .*? .*? \\r\\n' \n rating = int(re.findall(pattern,summary).pop())\n return rating", "def getMatchScore(self) -> str:\n score = self.__getDataField(\"score\")\n # some matches do not have a score\n if not score:\n return None\n\n # change scome characters in the score to make it easier afterwards\n return score.strip().replace(\"–\", \"-\")", "def matchscore(self):\n print(self.team1.name + \" \" + str(self.team1score) + \" - \" + str(self.team2score) + \" \" + self.team2.name)", "def PredictMatch(yourName, tgtName, teamModels, db):\n\n teamTgtPipe, teamYourPipe = QueryTeamData(tgtName, yourName, db)\n featureCoefTgt, probTgt = PredictOp(teamTgtPipe, teamYourPipe, tgtName, teamModels)\n featureCoefYour, probYour = PredictOp(teamYourPipe, teamTgtPipe, yourName, teamModels)\n odds = round(probYour / (probYour + probTgt), 2)\n\n # In featureCoefYour, you want INCREASE those with POSTIVE COEF, DECREASE those with NEGATIVE COEF\n # In featureCoefTgt, you want to do the opposite\n\n # reverse both the sign of the coef, and '_op' in features so as to be the same with featureCoefYour\n featureCoefTgt['coef'] = - featureCoefTgt['coef']\n featureCoefTgt.features = [ii[:-3] if \"_op\" in ii else ii + '_op' for ii in featureCoefTgt.features]\n\n # Combine only the most important 10 features\n # featureBoth = featureCoefTgt[11:].append(featureCoefYour[11:])\n\n # Combine only all the most important features\n featureBoth = featureCoefTgt.append(featureCoefYour)\n\n # get action recommendations\n # Somehow the pandas here uses a deprecated para cols, instaed of the new one subset\n #featureBoth.drop_duplicates(subset = 'features', take_last = True, inplace = True)\n featureBoth.drop_duplicates(cols = 'features', take_last = True, inplace = True)\n actions, featureImprove = GetActions(featureBoth)\n Imp = 0.1\n oddsNew = ImprovedScore(tgtName, yourName, teamModels, featureImprove, teamTgtPipe, teamYourPipe, Imp)\n\n return odds, oddsNew, actions", "def _best_matching_movie(movie):\n return (Levenshtein.distance(movie_name, movie.get('title').lower()),\n (0 - movie.get('votes', 0)),\n )", "def quality(self):\n return self.plays * self.number", "def effective_priority (self):\n return self.priority if self.match.is_wildcarded else (1<<16) + 1", "def evaluate_match_quality(matches, thresh=.7):\n marr = np.zeros((len(matches), 2))\n quality = np.zeros((len(matches), 1))\n for i in range(len(matches)):\n marr[i,0] = matches[i][0].distance\n marr[i,1] = matches[i][1].distance\n quality[i] = marr[i, 0] < thresh*marr[i, 1]", "def get_dbot_score(verdict):\n if verdict == 'Malicious':\n return 3\n elif verdict == 'Suspicious':\n return 2\n elif verdict == 'Benign' or verdict == 'Redirector':\n return 1\n else:\n return 0", "def _get_similarity_score(self, dict1, dict2):\n try:\n majorScoreDeterminer1 = ['primaryGenreId']\n majorScoreDeterminer2 = ['genreIds']\n Score = 0 # Base Score\n for items in majorScoreDeterminer2:\n\n for item1 in self._get_app_param_info(dict1, resultCount=1, resultKey=items):\n if item1 in self._get_app_param_info(dict2, resultCount=1, resultKey=items):\n if Score == 0: # Add 50% base score for this category.\n Score += 2 * .5\n Score += 2 * .5 / len(self._get_app_param_info(dict1, resultCount=1, resultKey=items))\n\n for items in majorScoreDeterminer1:\n if str(self._get_app_param_info(dict1, resultCount=1, resultKey=items)) in str(\n self._get_app_param_info(dict2, resultCount=1, resultKey=items)) and str(\n self._get_app_param_info(dict2, resultCount=1, resultKey=items)) and str(\n self._get_app_param_info(dict1, resultCount=1, resultKey=items)):\n Score += (3 / len(majorScoreDeterminer1))\n\n nameMatchScore = difflib.SequenceMatcher(None,\n self._get_app_param_info(dict1, resultCount=1,\n resultKey='trackName'),\n self._get_app_param_info(dict2, resultCount=1,\n resultKey='trackName')).ratio()\n Score += nameMatchScore\n\n minorScoreDeterminer = ['isGameCenterEnabled', 'languageCodesISO2A', 'contentAdvisoryRating', 'artistId',\n 'formattedPrice']\n\n for items in minorScoreDeterminer:\n if items == \"formattedPrice\":\n if str(self._get_app_param_info(dict1, resultCount=1, resultKey=items)) == \"Free\" and str(\n self._get_app_param_info(dict2, resultCount=1, resultKey=items)) == \"Free\":\n Score += (4 / (len(minorScoreDeterminer)))\n elif str(self._get_app_param_info(dict1, resultCount=1, resultKey=items)) == \"Free\" and str(\n self._get_app_param_info(dict2, resultCount=1, resultKey=items)) != \"Free\":\n continue\n elif str(self._get_app_param_info(dict1, resultCount=1, resultKey=items)) != \"Free\" and str(\n self._get_app_param_info(dict2, resultCount=1, resultKey=items)) == \"Free\":\n continue\n elif str(self._get_app_param_info(dict1, resultCount=1, resultKey=items)) != \"Free\" and str(\n self._get_app_param_info(dict2, resultCount=1, resultKey=items)) != \"Free\":\n Score += (4 / (len(minorScoreDeterminer)))\n else:\n if str(self._get_app_param_info(dict1, resultCount=1, resultKey=items)) in str(\n self._get_app_param_info(dict2, resultCount=1, resultKey=items)):\n Score += (4 / (len(minorScoreDeterminer)))\n Score = round(Score, 1)\n log_str = \"id\" + str(self._get_app_param_info(dict2, resultCount=1, resultKey='trackId')) + \" - \" + str(\n self._get_app_param_info(dict2, resultCount=1, resultKey='trackName')) + \"\\tScore: \" + str(Score)\n except AssertionError as e:\n print(\"Error: _get_similarity_score %s\" % e)\n sys.exit(\"Exit script with error code %s\" % e)\n except TypeError as e:\n print(\"Error: _get_similarity_score %s\" % e)\n sys.exit(\"Exit script with error code %s\" % e)\n except:\n e = sys.exc_info()[0]\n print(\"Error: _get_similarity_score %s\" % e)\n sys.exit(\"Exit script with error code %s\" % e)\n else:\n return log_str", "def quality_rating(PR):\n \n if PR <= 85:\n quality = \"poor\"\n elif PR < 90:\n quality = \"mediocre\"\n elif PR < 95:\n quality = \"good\"\n else:\n quality = \"great\"\n\n return quality", "def max_score(self):\n return self.raw_possible", "def _get_lip_best(self) -> float:\n pass", "def scoring(self):\n pass", "def main():\n long = give_long()\n short = give_short()\n similarity1 = find_similarity(long, short)\n print('The best match is '+similarity1+'.')", "def get_score(self, red_score, blue_score):\n if red_score < blue_score:\n return 0\n elif red_score > blue_score:\n return 1\n else:\n return 0.5", "def calculate_score(sorted_dice, choice):\n score = NO_MATCH_VALUE()\n str_d = \"\".join(map(str, sorted_dice))\n if choice in range(1, 6+1):\n score = sorted_dice.count(choice) * choice\n elif choice == REC_THREE_OF_A_KIND() and re.search(r'([\\d])\\1{2}', str_d):\n score = sum(sorted_dice)\n elif choice == REC_FOUR_OF_A_KIND() and re.search(r'([\\d])\\1{3}', str_d):\n score = sum(sorted_dice)\n elif choice == REC_FULL_HOUSE():\n if re.search(r'([\\d])\\1{2}([\\d])\\2', str_d) or re.search(r'([\\d])\\1([\\d])\\2{2}', str_d):\n score = FULL_HOUSE_VALUE()\n elif choice == REC_S_STRAIGHT() and re.search(r'(1234|2345|3456|12234|23345|34456|12334|23445|34556)', str_d):\n score = SMALL_STRAIGHT_VALUE()\n elif choice == REC_L_STRAIGHT() and re.match(r'(12345|23456)', str_d):\n score = LARGE_STRAIGHT_VALUE()\n elif choice == REC_YAHTZEE() and re.search(r'([\\d])\\1{4}', str_d):\n score = FIRST_YAHTZEE_VALUE()\n elif choice == REC_CHANCE():\n score = sum(sorted_dice)\n return score", "def heuristic_2_reflection(game, player) -> float:\n\n reflection_available_factor = get_reflection_available_factor(game, player)\n\n return float(reflection_available_factor)", "def custom_score(game, player):\n\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n heuristics_options = {\n \"heuristic_1_center\": heuristic_1_center,\n \"heuristic_2_reflection\": heuristic_2_reflection,\n \"heuristic_3_partition\": heuristic_3_partition,\n \"heuristic_combined_1_2\": heuristic_combined_1_2,\n \"heuristic_combined_1_3\": heuristic_combined_1_3,\n \"heuristic_combined_2_3\": heuristic_combined_2_3,\n \"heuristic_combined_1_2_3\": heuristic_combined_1_2_3,\n \"heuristic_combined_1_2_3_with_improve_score\": heuristic_combined_1_2_3_with_improve_score\n }\n\n return heuristics_options[\"heuristic_combined_1_2_3_with_improve_score\"](game, player)", "def event_details_score_moe(self, g, w):\n thr = self.threat_designation_score(g, w)\n fln = self.filename_score(g, w)\n pth = self.path_score(g, w)\n hsh = self.hash_score(g, w)\n\n file_scores = [fln, pth, hsh]\n file_scores = list(filter(lambda x: x is not None, file_scores))\n\n if not file_scores:\n return thr\n elif thr is None:\n coeffs = [0.5, 1 / float(len(file_scores))]\n return coeffs[1] * (sum(file_scores))\n else:\n coeffs = [0.5, 1 / float(2.0 * len(file_scores))]\n return coeffs[0] * thr + coeffs[1] * (sum(file_scores))", "def get_best_match(self, list):\n raise NotImplementedError", "def compute_scores(self):\n if self.num_classes == 2:\n score_1 = self.competition_metric(\n helmet_threshold=0.5,\n impact_threshold=0.5,\n )[1]\n\n score_2 = self.competition_metric(\n helmet_threshold=0.5,\n impact_threshold_ratio=0.5,\n )[1]\n\n score_3 = self.competition_metric(\n impact_threshold=0.5,\n )[1]\n else:\n score_1 = self.detection_metric(threshold=0.1)\n score_2 = self.detection_metric(threshold=0.25)\n score_3 = self.detection_metric(threshold=0.5)\n\n return score_1, score_2, score_3", "def get_review_result(genus, matches):\n\n # Split the matches term, where the first term is the number of genes matched and the second term\n # is the total number of matches possible. Then assign a term for the review of results\n sequenced_num = matches.split('/')\n success = 'successful'\n intermediate = 'intermediate'\n not_success = 'unsuccessful'\n\n if len(sequenced_num) > 1 & len(sequenced_num) < 3:\n matched = int(sequenced_num[0])\n total = int(sequenced_num[1])\n\n if 'Escherichia' in genus: # Escherichia has a total of 53 matches\n if total - matched <= 1:\n return success\n elif total - matched <= 5:\n return intermediate\n else:\n return not_success\n elif 'Listeria' in genus: # Listeria has a total of 50 matches\n if total - matched <= 1:\n return success\n elif total - matched <= 5:\n return intermediate\n else:\n return not_success\n elif 'Salmonella' in genus: # Salmonella has a total of 50 matches\n if total - matched <= 1:\n return success\n elif total - matched <= 5:\n return intermediate\n else:\n return not_success\n return \"error\"", "def goals_ratio_to_num_matches(self, team_id, num_matches=1):\n # {{{\n if type(num_matches) is not int or num_matches == 0:\n num_matches = 1\n # this is fastest selecting to compared with concat and append\n # %timeit matches[(matches[\"HID\"] == team_id) | (matches[\"AID\"] == team_id)].sort_index()\n # 1.21 ms ± 14.7 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)\n # %timeit pd.concat([matches[matches[\"HID\"] == team_id], matches[matches[\"AID\"] == team_id]]).sort_index()\n # 3.26 ms ± 62.2 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)\n # %timeit matches[matches[\"HID\"]==team_id].append(matches[matches[\"AID\"]==team_id]).sort_index()\n # 3.31 ms ± 75.8 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)\n matches_containing_team = self.matches[(self.matches[\"HID\"] == team_id) |\n (self.matches[\"AID\"] == team_id)].sort_index()[-1-num_matches:-1]\n\n goals_conceded, goals_scored = np.nan, np.nan\n if not matches_containing_team.empty:\n goals_conceded = matches_containing_team[matches_containing_team[\"HID\"] == team_id]['ASC'].sum() + \\\n matches_containing_team[matches_containing_team[\"AID\"] == team_id]['HSC'].sum()\n goals_scored = matches_containing_team[matches_containing_team[\"HID\"] == team_id]['HSC'].sum() + \\\n matches_containing_team[matches_containing_team[\"AID\"] == team_id]['ASC'].sum()\n\n return goals_scored / goals_conceded if goals_conceded != 0 else goals_scored / (goals_conceded + 1)" ]
[ "0.6655822", "0.60846305", "0.6081943", "0.60700995", "0.60530263", "0.6026354", "0.5944691", "0.5893109", "0.5852491", "0.58279943", "0.58237416", "0.5798685", "0.5770047", "0.576194", "0.5688892", "0.5686409", "0.5685571", "0.5665665", "0.56466246", "0.5630043", "0.5586035", "0.5578033", "0.5563272", "0.55343294", "0.55279285", "0.55273074", "0.55240935", "0.5518012", "0.55158526", "0.5510129" ]
0.70330405
0
This function calculates the BMI of a person based on the height in meters and mass in kilograms that they provided
def calculate_bmi(mass = 56, height = 1.5): BMI = mass / (height**2) return BMI
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bmi(weight, height):\n return weight / height ** 2", "def calculate_bmi(height, weight):\n m_weight = 703 * weight\n m_height = height**2\n bmi = m_weight / m_height\n return bmi", "def calculate_bmi(cls, height_cm, weight_kg):\n if height_cm is None or weight_kg is None:\n return None\n # handle string data\n if type(height_cm) is str or type(weight_kg) is str:\n try:\n height_cm, weight_kg = float(height_cm), float(weight_kg)\n except ValueError:\n return None\n except TypeError:\n return None\n # convert height to meter from centimeter\n height_m = height_cm / 100.0\n # clculate and return bmi value\n return round(weight_kg / (height_m ** 2), 2)", "def bmi(self):\n if self.height == 0:\n raise ValueError(\"Height can't be zero\")\n return self.weight / (self.height * self.height)", "def calculate_bmi(self):\n\n # If not all the data is available, return 0, otherwise the result\n # of the calculation below breaks django's template filters\n if not self.weight or not self.height:\n return 0\n\n weight = self.weight if self.use_metric else AbstractWeight(self.weight, 'lb').kg\n return weight / pow(self.height / decimal.Decimal(100), 2)", "def count_bmi(data):\n height = data['height']\n weight = data['weight']\n ft = math.floor(height)\n inch = weight - ft\n inch = inch / 10\n height = ft * 0.3048 + inch * 0.0254\n return round(weight / (height * height), 2)", "def calc_carbon_herb(height, diameter = 1, age = 1):\n \"\"\"This includes habits: perennial, annual, bulb, climber, biennial\\\n annual/biennial, perennial climber, annual/perennial, corm, annual climber\"\"\"\n \n #convert to imperial\n height /= 3.281 #feet\n diameter /= 2.54 #inches\n \n #print(height, diameter)\n \n #calculate green weight of herb: (above-ground weight) * 1.2\n green_weight = ( diameter**2 * height) * 1.2\n \n #dry weight: average tree is 72.5 dry matter \n dry_weight = 0.725 * green_weight\n \n #weight of carbon: 50% of tree dry weight\n c_weight = 0.5 * dry_weight\n \n #weight of CO2 sequestered\n co2_weight = 3.67 * c_weight\n \n return co2_weight/2.205/1 #convert from lbs to kg, divide by age", "def bmi_calculate(self):\r\n try:\r\n manager_obj = BMI_Manager(self.data)\r\n manager_obj.bmi_calculation()\r\n except Exception as ex:\r\n print(\"Exception in bmi_calculate function\")", "def interact():\n weight = int(input(\"What is your weight in kgs? \"))\n height = int(input(\"What is your height in centimeters? \"))\n print(\"Your BMI is: \", bmi(weight, height/100))", "def hern_bulge_mass(r,b):\n rb = r/b\n return ((rb*rb)/(2*(1+rb)**2.))", "def calculate_basal_metabolic_rate(self, formula=1):\n factor = 5 if self.gender == self.GENDER_MALE else -161\n weight = self.weight if self.use_metric else AbstractWeight(self.weight, 'lb').kg\n\n try:\n rate = ((10 * weight) # in kg\n + (decimal.Decimal(6.25) * self.height) # in cm\n - (5 * self.age) # in years\n + factor)\n # Any of the entries is missing\n except TypeError:\n rate = 0\n\n return decimal.Decimal(str(rate)).quantize(TWOPLACES)", "def bmi_magic(data):\n data['BMI'] = np.nan\n for i, row in data.iterrows():\n data.at[i, \"Height\"] = height_conversion(data.at[i, \"Height\"])\n data.at[i, \"Weight\"] = weight_conversion(data.at[i, \"Weight\"])\n data.at[i, \"BMI\"] = calculate_bmi(data.at[i, \"Height\"], data.at[i, \"Weight\"])\n return data", "def main():\n height = raw_input(\"What is your height (FEET INCHES)? \")\n height = int(height.split(\" \")[0]) * 12 + int(height.split(\" \")[1])\n weight = int(raw_input(\"What is your weight (lbs)? \"))\n\n w_part = weight * 720\n h_part = math.sqrt(height)\n\n bmi = w_part / h_part\n\n print \"Being %s inches tall and weighing %s pounds, your BMI is: %s\" % \\\n (height, weight, bmi)\n if bmi in range(19, 26):\n print \"This is considered healthy.\"\n else:\n print \"This is considered unhealthy.\"", "def testClinicalPatientBMI(self):\n attr = self.session.create_visit_attr()\n\n self.util.floatTypeTest(self, attr, \"bmi\")\n\n self.util.floatPropertyTest(self, attr, \"bmi\")", "def performBMItests():\r\n feet = 5\r\n inches = 60\r\n\r\n expectedBMIa = 18.4\r\n poundsA = 92\r\n expectedBMIb = 18.5\r\n poundsB = 92.5\r\n expectedBMIc = 24.9\r\n poundsC = 124.5\r\n expectedBMId = 25.0\r\n poundsD = 125.0\r\n expectedBMIe = 29.9\r\n poundsE = 149.5\r\n expectedBMIf = 30.0\r\n poundsF = 150.0\r\n\r\n expectedResulta = \"Underweight\"\r\n expectedResultb = \"Normal\"\r\n expectedResultc = \"Normal\"\r\n expectedResultd = \"Overweight\"\r\n expectedResulte = \"Overweight\"\r\n expectedResultf = \"Obese\"\r\n\r\n BMIa, resultA = calculateBMI(feet, inches, poundsA)\r\n BMIb, resultB = calculateBMI(feet, inches, poundsB)\r\n BMIc, resultC = calculateBMI(feet, inches, poundsC)\r\n BMId, resultD = calculateBMI(feet, inches, poundsD)\r\n BMIe, resultE = calculateBMI(feet, inches, poundsE)\r\n BMIf, resultF = calculateBMI(feet, inches, poundsF)\r\n\r\n print(\"\")\r\n print(\"\")\r\n bmi_test(expectedBMIa, BMIa, expectedResulta, resultA)\r\n bmi_test(expectedBMIb, BMIb, expectedResultb, resultB)\r\n bmi_test(expectedBMIc, BMIc, expectedResultc, resultC)\r\n bmi_test(expectedBMId, BMId, expectedResultd, resultD)\r\n bmi_test(expectedBMIe, BMIe, expectedResulte, resultE)\r\n bmi_test(expectedBMIf, BMIf, expectedResultf, resultF)\r\n print(\"\")\r\n print(\"\")", "def print_bmi_string(self):\n bmi = self.bmi()\n bmi_str = f\"{self.name} you have a BMI of {bmi:0.2f}.\\n\\n\"\n if bmi < 18.5:\n bmi_str += \"UNDERWEIGHT:\\n Increase your weight.\"\n elif bmi < 25:\n bmi_str += \"NORMAL WEIGHT:\\n Keep your weight that way.\"\n elif bmi < 30:\n bmi_str += \"OVERWEIGHT:\\n Try to lose weight.\"\n else:\n bmi_str += \"OBESITY:\\n Let yourself be assisted by a specialist.\"\n return bmi_str", "def guess_molecular_weight(self, i_seq):\n map_values = self._map_values.get(\"mFo\", None)\n if (map_values is None) : return None\n height = map_values[i_seq]\n mean_carbon = flex.mean(self.carbon_fo_values)\n assert (mean_carbon > 0)\n return 6 * height / mean_carbon", "def abbott_steam():\n per_klb = 20 # dollars per klb of steam\n kwh_eq = to_kwh(1) # kwh equivalent of steam\n per_kwh = per_klb / kwh_eq\n return per_kwh", "def bmi_categories():\n # Variable to ensure PEP8 convention pass (amount of characters in line)\n your_bmi = \"your BMI Category is \"\n # Result to user in age below 18\n if int(age) < 18:\n \"\"\"\n If age of user is below 18\n \"\"\"\n if float(bmi) <= 18.5:\n print(f\"{name.capitalize()} {your_bmi}underweight\")\n elif float(bmi) >= 18.5 and float(bmi) <= 24.9:\n print(f\"{name.capitalize()} {your_bmi}normal\")\n elif float(bmi) >= 25 and float(bmi) <= 29.9:\n print(f\"{name.capitalize()} {your_bmi}overweight\")\n else:\n print(f\"{name.capitalize()} {your_bmi}obesity\")\n # Else result to user in age is over 18\n else:\n \"\"\"\n If age of user is equal or over 18 - adults\n \"\"\"\n if float(bmi) <= 16:\n print(f\"{name.capitalize()} {your_bmi}Severe Thinness\")\n elif float(bmi) >= 16 and float(bmi) <= 17:\n print(f\"{name.capitalize()} {your_bmi}Moderate Thinness\")\n elif float(bmi) >= 17 and float(bmi) <= 18.5:\n print(f\"{name.capitalize()} {your_bmi}Mild Thinness\")\n elif float(bmi) >= 18.5 and float(bmi) <= 25:\n print(f\"{name.capitalize()} {your_bmi}Normal\")\n elif float(bmi) >= 25 and float(bmi) <= 30:\n print(f\"{name.capitalize()} {your_bmi}Overweight\")\n elif float(bmi) >= 30 and float(bmi) <= 35:\n print(f\"{name.capitalize()} {your_bmi}Obese Class I\")\n elif float(bmi) >= 35 and float(bmi) <= 40:\n print(f\"{name.capitalize()} {your_bmi}Obese Class II\")\n else:\n print(f\"{name.capitalize()} {your_bmi}Obese Class III\")", "def get_mi_char_heights(freq_matrix, mi):\n result = freq_matrix * mi\n result[isnan(result)] = 0\n return result", "def BMI(self, value):\n if self.reg.N:\n self.reg.PC += value", "def mass(self) -> Mass:\n return self.weight", "def gibbs_(dict_, T):\n\tdST = dict_['S298'] + CpT(dict_, T)[0]\n\t\t\n\tdHT = dict_['dH298'] + CpT(dict_, T)[1]/1000\n\t\t\n\treturn (dHT - T*dST/1000)", "def calc_carbon_tree(height, diameter=25, age = 10):\n \"\"\"Height in meter, diameter in cm, age in years\"\"\"\n \"\"\"This includes habits: Tree, Bamboo\"\"\"\n \n #convert to imperial\n height = height/3.281 #feet\n diameter = diameter/2.54 #inches\n \n #calculate green weight of tree: (above-ground weight) * 1.2\n if diameter < 11:\n green_weight = (0.25 * diameter**2 * height) * 1.2\n else:\n green_weight = (0.15 * diameter**2 * height) * 1.2\n \n #dry weight: average tree is 72.5 dry matter \n dry_weight = 0.725 * green_weight\n \n #weight of carbon: 50% of tree dry weight\n c_weight = 0.5 * dry_weight\n \n #weight of CO2 sequestered\n co2_weight = 3.67 * c_weight\n \n return co2_weight/2.205/age #convert from lbs to kg and divide by age", "def getWeight(self) -> float:\n ...", "def grains(self):\n grain_weight = self.mass * kilograms_to_grains\n return grain_weight", "def find_bpm(duration, numbeats):\n dur = duration / 60\n bpm = numbeats / dur\n print('bpm calculated')\n\n if bpm > 300:\n raise TypeError(\"TOOOOOOO HIGH\")\n\n # except ValueError:\n # pass\n # else:\n # logging.info('Calculated BPM: %s', bpm)\n\n return bpm", "def describe_batery(self):\n print(f\"This car has a {self.batery_size}-kWh batery.\")", "def get_bpmf(self, kB=0.001987204134799235, temperature=300.0):\n if len(self._meaningful_energies) == 0:\n return 0.\n\n beta = 1. / temperature / kB\n V_0 = 1661.\n\n nr_samples = self.get_number_translations()\n energies = -beta * self._meaningful_energies\n e_max = energies.max()\n exp_mean = np.exp(energies - e_max).sum() / nr_samples\n\n bpmf = -temperature * kB * (np.log(exp_mean) + e_max)\n\n V_binding = self.get_box_volume()\n correction = -temperature * kB * np.log(V_binding / V_0 / 8 / np.pi**2)\n return bpmf + correction", "def mamajek08_Prot_age(t, BmV, unit='Gyr'):\n return barnes07_Prot_age(t, BmV, unit=unit, a=0.407, b=0.325, c=0.495, n=0.566)" ]
[ "0.83242905", "0.806568", "0.764164", "0.76323813", "0.75619763", "0.7452656", "0.68758476", "0.66670614", "0.65086615", "0.6396343", "0.63882107", "0.62999606", "0.6258748", "0.6254665", "0.61130226", "0.5970074", "0.58918816", "0.5685218", "0.5593287", "0.55461025", "0.554602", "0.5530306", "0.5505567", "0.54922503", "0.54198986", "0.5408683", "0.5363905", "0.5361663", "0.53612983", "0.5339138" ]
0.8592544
0
Test case for team_template_folders_change_stream_get Create a change stream.
def test_team_template_folders_change_stream_get(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_team_template_folders_change_stream_post(self):\n pass", "def test_workflows_change_stream_get(self):\n pass", "def portal_template_folders_change_stream_get(self, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.portal_template_folders_change_stream_get_with_http_info(**kwargs)\n else:\n (data) = self.portal_template_folders_change_stream_get_with_http_info(**kwargs)\n return data", "def portal_template_folders_change_stream_get_with_http_info(self, **kwargs):\n\n all_params = ['options']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method portal_template_folders_change_stream_get\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n collection_formats = {}\n\n resource_path = '/PortalTemplateFolders/change-stream'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'options' in params:\n query_params['options'] = params['options']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])\n\n # Authentication setting\n auth_settings = ['access_token']\n\n return self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='file',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n collection_formats=collection_formats)", "def test_get_stream(self):\n pass", "def test_workflows_change_stream_post(self):\n pass", "def test_data_source_soaps_change_stream_get(self):\n pass", "def test_team_template_folders_get(self):\n pass", "def create_stream(self):\n pass", "def test_team_template_folders_id_team_get(self):\n pass", "def portal_template_folders_change_stream_post(self, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.portal_template_folders_change_stream_post_with_http_info(**kwargs)\n else:\n (data) = self.portal_template_folders_change_stream_post_with_http_info(**kwargs)\n return data", "def portal_template_folders_change_stream_post_with_http_info(self, **kwargs):\n\n all_params = ['options']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method portal_template_folders_change_stream_post\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n collection_formats = {}\n\n resource_path = '/PortalTemplateFolders/change-stream'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n if 'options' in params:\n form_params.append(('options', params['options']))\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])\n\n # Authentication setting\n auth_settings = ['access_token']\n\n return self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='file',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n collection_formats=collection_formats)", "def stream_created(self,stream):\n pass", "def test_stream_message_retention_days_on_stream_creation(self) -> None:\n admin = self.example_user(\"iago\")\n\n streams_raw: List[StreamDict] = [\n {\n \"name\": \"new_stream\",\n \"message_retention_days\": 10,\n \"is_web_public\": False,\n }\n ]\n with self.assertRaisesRegex(JsonableError, \"Must be an organization owner\"):\n list_to_streams(streams_raw, admin, autocreate=True)\n\n streams_raw = [\n {\n \"name\": \"new_stream\",\n \"message_retention_days\": -1,\n \"is_web_public\": False,\n }\n ]\n with self.assertRaisesRegex(JsonableError, \"Must be an organization owner\"):\n list_to_streams(streams_raw, admin, autocreate=True)\n\n streams_raw = [\n {\n \"name\": \"new_stream\",\n \"message_retention_days\": None,\n \"is_web_public\": False,\n }\n ]\n result = list_to_streams(streams_raw, admin, autocreate=True)\n self.assert_length(result[0], 0)\n self.assert_length(result[1], 1)\n self.assertEqual(result[1][0].name, \"new_stream\")\n self.assertEqual(result[1][0].message_retention_days, None)\n\n owner = self.example_user(\"desdemona\")\n realm = owner.realm\n streams_raw = [\n {\n \"name\": \"new_stream1\",\n \"message_retention_days\": 10,\n \"is_web_public\": False,\n },\n {\n \"name\": \"new_stream2\",\n \"message_retention_days\": -1,\n \"is_web_public\": False,\n },\n {\n \"name\": \"new_stream3\",\n \"is_web_public\": False,\n },\n ]\n\n do_change_realm_plan_type(realm, Realm.PLAN_TYPE_LIMITED, acting_user=admin)\n with self.assertRaisesRegex(\n JsonableError, \"Available on Zulip Cloud Standard. Upgrade to access.\"\n ):\n list_to_streams(streams_raw, owner, autocreate=True)\n\n do_change_realm_plan_type(realm, Realm.PLAN_TYPE_SELF_HOSTED, acting_user=admin)\n result = list_to_streams(streams_raw, owner, autocreate=True)\n self.assert_length(result[0], 0)\n self.assert_length(result[1], 3)\n self.assertEqual(result[1][0].name, \"new_stream1\")\n self.assertEqual(result[1][0].message_retention_days, 10)\n self.assertEqual(result[1][1].name, \"new_stream2\")\n self.assertEqual(result[1][1].message_retention_days, -1)\n self.assertEqual(result[1][2].name, \"new_stream3\")\n self.assertEqual(result[1][2].message_retention_days, None)", "def test_team_template_folders_id_get(self):\n pass", "def portal_templates_change_stream_get(self, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.portal_templates_change_stream_get_with_http_info(**kwargs)\n else:\n (data) = self.portal_templates_change_stream_get_with_http_info(**kwargs)\n return data", "def create_subworkflow_file(self, workflow: Workflow, props: PropertySet):", "def testDataStreams(self):\n path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_HFS,\n identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file',\n parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n\n self.assertEqual(file_entry.number_of_data_streams, 1)\n\n data_stream_names = []\n for data_stream in file_entry.data_streams:\n data_stream_names.append(data_stream.name)\n\n self.assertEqual(data_stream_names, [''])\n\n path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_A_DIRECTORY,\n location='/a_directory', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n\n self.assertEqual(file_entry.number_of_data_streams, 0)\n\n data_stream_names = []\n for data_stream in file_entry.data_streams:\n data_stream_names.append(data_stream.name)\n\n self.assertEqual(data_stream_names, [])\n\n path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_HFS, identifier=25,\n location='/a_directory/a_resourcefork', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n\n self.assertEqual(file_entry.number_of_data_streams, 2)\n\n data_stream_names = []\n for data_stream in file_entry.data_streams:\n data_stream_names.append(data_stream.name)\n\n self.assertEqual(data_stream_names, ['', 'rsrc'])", "def test_splits_streams(self):\n username = 'darth-vader'\n user = create_profile(username)\n\n now = timezone.now()\n streams = [\n {\n 'author': user,\n 'airs_on': now.replace(year=(now.year + 1)),\n 'ends_on': now.replace(hour=(now.hour - 1)),\n 'title': 'Future Stream',\n 'added_on': now\n },\n {\n 'author': user,\n 'airs_on': now.replace(year=(now.year - 1)),\n 'ends_on': now.replace(hour=(now.hour - 1)),\n 'title': 'Previous Stream',\n 'added_on': now\n }\n\n ]\n create_streams(streams)\n\n url = reverse('main_app:user', args=(username,))\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n\n future_streams = response.context['future_streams']\n previous_streams = response.context['previous_streams']\n\n self.assertTrue(len(future_streams))\n self.assertTrue(len(previous_streams))\n self.assertEqual(future_streams[0].title, 'Future Stream')\n self.assertEqual(previous_streams[0].title, 'Previous Stream')", "def test_team_template_folders_id_templates_get(self):\n pass", "def test_team_template_folders_find_one_get(self):\n pass", "def portal_templates_change_stream_get_with_http_info(self, **kwargs):\n\n all_params = ['options']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method portal_templates_change_stream_get\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n collection_formats = {}\n\n resource_path = '/PortalTemplates/change-stream'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'options' in params:\n query_params['options'] = params['options']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])\n\n # Authentication setting\n auth_settings = ['access_token']\n\n return self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='file',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n collection_formats=collection_formats)", "def testGetDataStreams(self):\n path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_HFS,\n identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file',\n parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n\n data_streams = file_entry._GetDataStreams()\n self.assertEqual(len(data_streams), 1)\n\n path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_HFS, identifier=25,\n location='/a_directory/a_resourcefork', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n\n data_streams = file_entry._GetDataStreams()\n self.assertEqual(len(data_streams), 2)", "def test_finds_live_stream(self):\n username = 'darth-vader'\n user = create_profile(username)\n\n now = timezone.now()\n streams = [\n {\n 'author': user,\n 'airs_on': now.replace(hour=(now.hour - 1)),\n 'ends_on': now.replace(hour=(now.hour + 1)),\n 'title': 'Live Stream',\n 'added_on': now\n },\n ]\n create_streams(streams)\n\n url = reverse('main_app:user', args=(username,))\n response = self.client.get(url)\n\n self.assertEqual(response.status_code, 200)\n self.assertTrue(response.context['live_stream'])\n self.assertEqual(response.context['live_stream'].title, 'Live Stream')", "def test_stream_publish(self):\n pass", "def create_workflow_file(self, workflow: Workflow, props: PropertySet):", "def test_access_stream(self) -> None:\n # Create a private stream for which Hamlet is the only subscriber.\n hamlet = self.example_user(\"hamlet\")\n\n stream_name = \"new_private_stream\"\n self.login_user(hamlet)\n self.common_subscribe_to_streams(hamlet, [stream_name], invite_only=True)\n stream = get_stream(stream_name, hamlet.realm)\n\n othello = self.example_user(\"othello\")\n\n # Nobody can access a stream that doesn't exist\n with self.assertRaisesRegex(JsonableError, \"Invalid stream ID\"):\n access_stream_by_id(hamlet, 501232)\n with self.assertRaisesRegex(JsonableError, \"Invalid stream name 'invalid stream'\"):\n access_stream_by_name(hamlet, \"invalid stream\")\n\n # Hamlet can access the private stream\n (stream_ret, sub_ret) = access_stream_by_id(hamlet, stream.id)\n self.assertEqual(stream.id, stream_ret.id)\n assert sub_ret is not None\n self.assertEqual(sub_ret.recipient.type_id, stream.id)\n (stream_ret2, sub_ret2) = access_stream_by_name(hamlet, stream.name)\n self.assertEqual(stream_ret.id, stream_ret2.id)\n self.assertEqual(sub_ret, sub_ret2)\n\n # Othello cannot access the private stream\n with self.assertRaisesRegex(JsonableError, \"Invalid stream ID\"):\n access_stream_by_id(othello, stream.id)\n with self.assertRaisesRegex(JsonableError, \"Invalid stream name 'new_private_stream'\"):\n access_stream_by_name(othello, stream.name)\n\n # Both Othello and Hamlet can access a public stream that only\n # Hamlet is subscribed to in this realm\n public_stream_name = \"public_stream\"\n self.common_subscribe_to_streams(hamlet, [public_stream_name], invite_only=False)\n public_stream = get_stream(public_stream_name, hamlet.realm)\n access_stream_by_id(othello, public_stream.id)\n access_stream_by_name(othello, public_stream.name)\n access_stream_by_id(hamlet, public_stream.id)\n access_stream_by_name(hamlet, public_stream.name)\n\n # Nobody can access a public stream in another realm\n mit_realm = get_realm(\"zephyr\")\n mit_stream = ensure_stream(mit_realm, \"mit_stream\", invite_only=False, acting_user=None)\n sipbtest = self.mit_user(\"sipbtest\")\n with self.assertRaisesRegex(JsonableError, \"Invalid stream ID\"):\n access_stream_by_id(hamlet, mit_stream.id)\n with self.assertRaisesRegex(JsonableError, \"Invalid stream name 'mit_stream'\"):\n access_stream_by_name(hamlet, mit_stream.name)\n with self.assertRaisesRegex(JsonableError, \"Invalid stream ID\"):\n access_stream_by_id(sipbtest, stream.id)\n with self.assertRaisesRegex(JsonableError, \"Invalid stream name 'new_private_stream'\"):\n access_stream_by_name(sipbtest, stream.name)\n\n # MIT realm users cannot access even public streams in their realm\n with self.assertRaisesRegex(JsonableError, \"Invalid stream ID\"):\n access_stream_by_id(sipbtest, mit_stream.id)\n with self.assertRaisesRegex(JsonableError, \"Invalid stream name 'mit_stream'\"):\n access_stream_by_name(sipbtest, mit_stream.name)\n\n # But they can access streams they are subscribed to\n self.common_subscribe_to_streams(sipbtest, [mit_stream.name], subdomain=\"zephyr\")\n access_stream_by_id(sipbtest, mit_stream.id)\n access_stream_by_name(sipbtest, mit_stream.name)", "def getStream(outstream):\n\n if isinstance(outstream,list):\n # return second element (should be an open handle)\n if outstream[1] is None:\n # open handle if needed\n create_parent_dir(outstream[0])\n outstream[1]=open(outstream[0],'a')\n return outstream[1]\n else:\n return outstream", "def test_team_template_folders_id_put(self):\n pass", "def test_team_template_folders_id_parent_get(self):\n pass" ]
[ "0.755507", "0.7026903", "0.66827625", "0.62135065", "0.59119457", "0.58844477", "0.58758533", "0.5808115", "0.5783344", "0.5732833", "0.56628746", "0.5575189", "0.54996955", "0.54252094", "0.5422915", "0.54228514", "0.5402587", "0.534042", "0.5337802", "0.53215986", "0.5318711", "0.53115743", "0.5267184", "0.5259941", "0.5249996", "0.51733327", "0.516844", "0.5166919", "0.5162474", "0.5136616" ]
0.85309476
0
Test case for team_template_folders_change_stream_post Create a change stream.
def test_team_template_folders_change_stream_post(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_team_template_folders_change_stream_get(self):\n pass", "def test_workflows_change_stream_post(self):\n pass", "def portal_template_folders_change_stream_post(self, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.portal_template_folders_change_stream_post_with_http_info(**kwargs)\n else:\n (data) = self.portal_template_folders_change_stream_post_with_http_info(**kwargs)\n return data", "def test_team_template_folders_post(self):\n pass", "def portal_template_folders_change_stream_post_with_http_info(self, **kwargs):\n\n all_params = ['options']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method portal_template_folders_change_stream_post\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n collection_formats = {}\n\n resource_path = '/PortalTemplateFolders/change-stream'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n if 'options' in params:\n form_params.append(('options', params['options']))\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])\n\n # Authentication setting\n auth_settings = ['access_token']\n\n return self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='file',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n collection_formats=collection_formats)", "def test_workflows_change_stream_get(self):\n pass", "def stream_created(self,stream):\n pass", "def test_data_source_soaps_change_stream_post(self):\n pass", "def test_stream_publish(self):\n pass", "def test_team_template_folders_id_templates_post(self):\n pass", "def test_team_template_folders_id_put(self):\n pass", "def test_team_template_folders_id_children_post(self):\n pass", "def test_team_template_folders_id_replace_post(self):\n pass", "def portal_templates_change_stream_post(self, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.portal_templates_change_stream_post_with_http_info(**kwargs)\n else:\n (data) = self.portal_templates_change_stream_post_with_http_info(**kwargs)\n return data", "def test_stream_message_retention_days_on_stream_creation(self) -> None:\n admin = self.example_user(\"iago\")\n\n streams_raw: List[StreamDict] = [\n {\n \"name\": \"new_stream\",\n \"message_retention_days\": 10,\n \"is_web_public\": False,\n }\n ]\n with self.assertRaisesRegex(JsonableError, \"Must be an organization owner\"):\n list_to_streams(streams_raw, admin, autocreate=True)\n\n streams_raw = [\n {\n \"name\": \"new_stream\",\n \"message_retention_days\": -1,\n \"is_web_public\": False,\n }\n ]\n with self.assertRaisesRegex(JsonableError, \"Must be an organization owner\"):\n list_to_streams(streams_raw, admin, autocreate=True)\n\n streams_raw = [\n {\n \"name\": \"new_stream\",\n \"message_retention_days\": None,\n \"is_web_public\": False,\n }\n ]\n result = list_to_streams(streams_raw, admin, autocreate=True)\n self.assert_length(result[0], 0)\n self.assert_length(result[1], 1)\n self.assertEqual(result[1][0].name, \"new_stream\")\n self.assertEqual(result[1][0].message_retention_days, None)\n\n owner = self.example_user(\"desdemona\")\n realm = owner.realm\n streams_raw = [\n {\n \"name\": \"new_stream1\",\n \"message_retention_days\": 10,\n \"is_web_public\": False,\n },\n {\n \"name\": \"new_stream2\",\n \"message_retention_days\": -1,\n \"is_web_public\": False,\n },\n {\n \"name\": \"new_stream3\",\n \"is_web_public\": False,\n },\n ]\n\n do_change_realm_plan_type(realm, Realm.PLAN_TYPE_LIMITED, acting_user=admin)\n with self.assertRaisesRegex(\n JsonableError, \"Available on Zulip Cloud Standard. Upgrade to access.\"\n ):\n list_to_streams(streams_raw, owner, autocreate=True)\n\n do_change_realm_plan_type(realm, Realm.PLAN_TYPE_SELF_HOSTED, acting_user=admin)\n result = list_to_streams(streams_raw, owner, autocreate=True)\n self.assert_length(result[0], 0)\n self.assert_length(result[1], 3)\n self.assertEqual(result[1][0].name, \"new_stream1\")\n self.assertEqual(result[1][0].message_retention_days, 10)\n self.assertEqual(result[1][1].name, \"new_stream2\")\n self.assertEqual(result[1][1].message_retention_days, -1)\n self.assertEqual(result[1][2].name, \"new_stream3\")\n self.assertEqual(result[1][2].message_retention_days, None)", "def create_subworkflow_file(self, workflow: Workflow, props: PropertySet):", "def portal_template_folders_change_stream_get(self, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.portal_template_folders_change_stream_get_with_http_info(**kwargs)\n else:\n (data) = self.portal_template_folders_change_stream_get_with_http_info(**kwargs)\n return data", "def test_meeting_live_stream_update(self):\n pass", "def test_create_files(self):\n\n testdir = \"test_output\"\n test_submission = Submission()\n self.addCleanup(os.remove, \"submission.tar.gz\")\n self.addCleanup(shutil.rmtree, testdir)\n\n test_submission.create_files(testdir)\n\n self.doCleanups()", "def test_delete_stream(self):\n pass", "def create_stream(self):\n pass", "def test_splits_streams(self):\n username = 'darth-vader'\n user = create_profile(username)\n\n now = timezone.now()\n streams = [\n {\n 'author': user,\n 'airs_on': now.replace(year=(now.year + 1)),\n 'ends_on': now.replace(hour=(now.hour - 1)),\n 'title': 'Future Stream',\n 'added_on': now\n },\n {\n 'author': user,\n 'airs_on': now.replace(year=(now.year - 1)),\n 'ends_on': now.replace(hour=(now.hour - 1)),\n 'title': 'Previous Stream',\n 'added_on': now\n }\n\n ]\n create_streams(streams)\n\n url = reverse('main_app:user', args=(username,))\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n\n future_streams = response.context['future_streams']\n previous_streams = response.context['previous_streams']\n\n self.assertTrue(len(future_streams))\n self.assertTrue(len(previous_streams))\n self.assertEqual(future_streams[0].title, 'Future Stream')\n self.assertEqual(previous_streams[0].title, 'Previous Stream')", "def test_create_folder(self):\n test = Server()\n inputs = [['create_folder','oook'],['create_folder','oook']]\n response = ['folder created','Folder already exists. Try with another folder name']\n res = []\n for val in inputs:\n res.append(test.create_folder(val))\n self.assertListEqual(res, response)", "def create_workflow_file(self, workflow: Workflow, props: PropertySet):", "def test_successful_subscriptions_notifies_stream(self) -> None:\n invitee = self.example_user(\"iago\")\n invitee_full_name = \"Iago\"\n\n current_stream = self.get_streams(invitee)[0]\n invite_streams = self.make_random_stream_names([current_stream])[:1]\n\n notifications_stream = get_stream(current_stream, self.test_realm)\n self.test_realm.notifications_stream_id = notifications_stream.id\n self.test_realm.save()\n\n self.common_subscribe_to_streams(\n invitee,\n invite_streams,\n extra_post_data=dict(\n announce=\"true\",\n principals=orjson.dumps([self.user_profile.id]).decode(),\n ),\n )\n target_stream = get_stream(invite_streams[0], self.test_realm)\n\n msg = self.get_second_to_last_message()\n self.assertEqual(msg.recipient.type, Recipient.STREAM)\n self.assertEqual(msg.recipient.type_id, notifications_stream.id)\n self.assertEqual(msg.sender_id, self.notification_bot(self.test_realm).id)\n expected_msg = (\n f\"@_**{invitee_full_name}|{invitee.id}** created a new stream #**{invite_streams[0]}**.\"\n )\n self.assertEqual(msg.content, expected_msg)\n\n msg = self.get_last_message()\n self.assertEqual(msg.recipient.type, Recipient.STREAM)\n self.assertEqual(msg.recipient.type_id, target_stream.id)\n self.assertEqual(msg.sender_id, self.notification_bot(self.test_realm).id)\n expected_msg = (\n f\"**Public** stream created by @_**{invitee_full_name}|{invitee.id}**. **Description:**\\n\"\n \"```` quote\\n*No description.*\\n````\"\n )\n self.assertEqual(msg.content, expected_msg)", "def test_invalid_stream_rename(self) -> None:\n user_profile = self.example_user(\"hamlet\")\n self.login_user(user_profile)\n stream = self.subscribe(user_profile, \"stream_name1\")\n do_change_user_role(user_profile, UserProfile.ROLE_REALM_ADMINISTRATOR, acting_user=None)\n # Check for empty name\n result = self.client_patch(f\"/json/streams/{stream.id}\", {\"new_name\": \"\"})\n self.assert_json_error(result, \"Stream name can't be empty!\")\n # Check for long name\n result = self.client_patch(f\"/json/streams/{stream.id}\", {\"new_name\": \"a\" * 61})\n self.assert_json_error(result, \"Stream name too long (limit: 60 characters).\")\n # Check for Cc characters\n result = self.client_patch(f\"/json/streams/{stream.id}\", {\"new_name\": \"test\\n\\rname\"})\n self.assert_json_error(result, \"Invalid character in stream name, at position 5!\")\n # Check for Cn characters\n result = self.client_patch(f\"/json/streams/{stream.id}\", {\"new_name\": \"test\\uFFFEame\"})\n self.assert_json_error(result, \"Invalid character in stream name, at position 5!\")", "def test_register_stream(self):\n pass", "def process_IN_CREATE(self, event):\n try:\n if self.checks(event):\n if ListenerContainer.is_syncing and not event.pathname[:2] == '.#':\n if event.dir:\n ListenerContainer.add_watch(event.pathname)\n ListenerContainer.client.mkdir(event.pathname)\n else:\n count = 0\n while True: # This may have an unending loop\n try:\n ListenerContainer.client.upload(event.pathname)\n break\n except SocketError or error_reply:\n reset()\n else:\n timer = now()\n if event.dir:\n ListenerContainer.add_watch(event.pathname)\n x = [timer, 'MKDIR', event.pathname]\n else:\n x = [timer, 'UPLOAD', event.pathname]\n ListenerContainer.sync_db.quick_push(x)\n except:\n reset()", "def create_streams(streams):\n for stream in streams:\n Stream.objects.create(**stream)", "def team_members_change_stream_post(self, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.team_members_change_stream_post_with_http_info(**kwargs)\n else:\n (data) = self.team_members_change_stream_post_with_http_info(**kwargs)\n return data" ]
[ "0.772903", "0.73171365", "0.68941355", "0.62274915", "0.6097967", "0.60745305", "0.60315573", "0.6004187", "0.5965138", "0.5764315", "0.57167226", "0.5641095", "0.5500242", "0.54977274", "0.54891986", "0.54516405", "0.53646666", "0.53120434", "0.530664", "0.5295484", "0.5292626", "0.52829903", "0.5271348", "0.5256404", "0.52446467", "0.5243058", "0.5228189", "0.5220588", "0.5194712", "0.51562375" ]
0.8841244
0
Test case for team_template_folders_count_get Count instances of the model matched by where from the data source.
def test_team_template_folders_count_get(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_team_template_folders_id_templates_count_get(self):\n pass", "def test_team_template_folders_id_children_count_get(self):\n pass", "def test_workflows_id_templates_count_get(self):\n pass", "def test_count(self):\n\n command = Command()\n modellist = command.get_modellist()\n for model_name, count in modellist:\n # taking model class by it's name\n model = ContentType.objects.get(model=model_name).model_class()\n # testing we've counted objects in this model right\n self.assert_count(model, count)", "def do_count(self, *args):\n count = 0\n args = [ele for ele in args[0].split(' ')]\n if args[0] == '':\n print(\"** class name missing **\")\n return\n if args[0] not in self.list_classes:\n print(\"** class doesn't exist **\")\n return\n else:\n ''' Get a list of specified instances '''\n for key, obj in storage.all().items():\n key = key.split('.')\n if key[0] == args[0]:\n count += 1\n print(count)", "def group_nested_object_count(request, model, group_uuid):\n num_of_objects = model.objects.filter(group_id=group_uuid).count()\n return num_of_objects", "def test_workflows_count_get(self):\n pass", "def test_team_template_folders_id_get(self):\n pass", "def test_team_template_folders_get(self):\n pass", "def test_team_template_folders_id_exists_get(self):\n pass", "def object_count(request, model):\n active_tool_session_id = request.session[\"active_tool_session_id\"]\n num_of_objects = model.objects.filter(\n tool_session_id=active_tool_session_id\n ).count()\n return num_of_objects", "def test_team_template_folders_id_team_get(self):\n pass", "def count(args):\n path = os.path.abspath(args.path)\n total = 0\n\n if args.recursive:\n if os.path.exists(args.path):\n for item in os.listdir(path):\n little_path = os.path.join(path, item)\n if os.path.isfile(little_path):\n total += parse_file_count(little_path, args)\n else:\n total += count(little_path)\n else:\n print(\"EROARE: <\" + args.path +\n \"> invalid, nu putem ajunge acolo\")\n else:\n if os.path.isfile(args.path):\n total += parse_file_count(args.path, args)\n else:\n print(\"EROARE: <\" + args.pattern +\n \"> invalid, nu este fisier\")\n return total", "def count():", "def do_count(self, args):\n args = shlex.split(args)\n if len(args) < 1:\n return\n _nb_objects = 0\n items = storage.all()\n for key in items:\n if items[key].__class__.__name__ == args[0]:\n _nb_objects += 1\n print(_nb_objects)", "def test_data_source_soaps_id_dynamic_datas_count_get(self):\n pass", "def test_properties_count_group_by_group_by_get(self):\n pass", "def count(self, cls=None):\n return len(self.all(cls))", "def test_properties_count_group_by_group_by_and_sub_group_by_get(self):\n pass", "def test_team_template_folders_find_one_get(self):\n pass", "def test_team_template_folders_id_templates_get(self):\n pass", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def count(self, args):\n counter = 0\n lists = args.split()\n\n if lists[0] not in HBNBCommand.class_check:\n print(\"** class doesn't exist **\")\n return\n\n objects = storage.all()\n for key in objects:\n name = key.split('.')\n if name[0] == lists[0]:\n counter += 1\n print(counter)", "def team_members_id_team_template_folders_count_get_with_http_info(self, id, **kwargs):\n\n all_params = ['id', 'where']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method team_members_id_team_template_folders_count_get\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'id' is set\n if ('id' not in params) or (params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `team_members_id_team_template_folders_count_get`\")\n\n\n collection_formats = {}\n\n resource_path = '/TeamMembers/{id}/team/templateFolders/count'.replace('{format}', 'json')\n path_params = {}\n if 'id' in params:\n path_params['id'] = params['id']\n\n query_params = {}\n if 'where' in params:\n query_params['where'] = params['where']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])\n\n # Authentication setting\n auth_settings = ['access_token']\n\n return self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='InlineResponse2001',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n collection_formats=collection_formats)", "def count(self):\n\n raise NotImplementedError", "def test_team_template_folders_id_head(self):\n pass", "def get_PM_filter_by_counts(request):\n import_file_id = request.GET.get('import_file_id', '')\n\n matched = BuildingSnapshot.objects.filter(\n import_file__pk=import_file_id,\n source_type__in=[2, 3],\n children__isnull=False\n ).count()\n unmatched = BuildingSnapshot.objects.filter(\n import_file__pk=import_file_id,\n source_type__in=[2, 3],\n children__isnull=True\n ).count()\n return {\n 'status': 'success',\n 'matched': matched,\n 'unmatched': unmatched,\n }" ]
[ "0.812971", "0.7428567", "0.7044275", "0.66002345", "0.6333074", "0.6258393", "0.6168583", "0.6167181", "0.6113796", "0.6025949", "0.59740585", "0.59104866", "0.59100795", "0.58996695", "0.58489484", "0.58410746", "0.58345574", "0.58324665", "0.58177435", "0.58169466", "0.58120257", "0.5738905", "0.5738905", "0.5738905", "0.5738905", "0.57345784", "0.5734534", "0.57324535", "0.5731164", "0.57212216" ]
0.8291441
0
Test case for team_template_folders_find_one_get Find first instance of the model matched by filter from the data source.
def test_team_template_folders_find_one_get(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_team_template_folders_id_team_get(self):\n pass", "def test_team_template_folders_id_exists_get(self):\n pass", "def test_team_template_folders_id_templates_fk_get(self):\n pass", "def test_team_template_folders_id_get(self):\n pass", "def test_team_template_folders_id_templates_get(self):\n pass", "def test_team_template_folders_get(self):\n pass", "def test_team_template_folders_id_parent_get(self):\n pass", "def test_workflows_find_one_get(self):\n pass", "def test_data_source_soaps_find_one_get(self):\n pass", "def test_team_template_folders_id_head(self):\n pass", "def portal_template_folders_find_one_get_with_http_info(self, **kwargs):\n\n all_params = ['filter']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method portal_template_folders_find_one_get\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n collection_formats = {}\n\n resource_path = '/PortalTemplateFolders/findOne'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'filter' in params:\n query_params['filter'] = params['filter']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])\n\n # Authentication setting\n auth_settings = ['access_token']\n\n return self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='PortalTemplateFolder',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n collection_formats=collection_formats)", "def test_team_template_folders_id_children_fk_get(self):\n pass", "def test_team_template_folders_id_children_get(self):\n pass", "def test_find_multi_one(self):\n result = Project.objects.find(['project', 'ThisFails'])\n self.assertEqual(len(result), 1)\n self.assertEqual(result[0], self.project)", "def test_team_template_folders_change_stream_get(self):\n pass", "def find_one():\n fmter.tpl._straightline(\"one document\", 100)\n result = users.find_one({})\n print(type(result))\n ppt(result)\n \n fmter.tpl._straightline(\"none result\", 100)\n result = users.find_one({\"_id\": 100})\n print(type(result))\n ppt(result)", "def portal_template_folders_find_one_get(self, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.portal_template_folders_find_one_get_with_http_info(**kwargs)\n else:\n (data) = self.portal_template_folders_find_one_get_with_http_info(**kwargs)\n return data", "def test_team_template_folders_id_patch(self):\n pass", "def find_exact(self, **kwargs):\n results = list(self.find(**kwargs))\n if len(results) == 1:\n return results[0]\n return None", "def first(self, **kwargs):\n return self.find(**kwargs).first()", "async def get_one(self, where):\n\n pass", "def find_first(cls, **kwargs):\n return cls.query.filter_by(**kwargs).first()", "def test_team_template_folders_id_templates_count_get(self):\n pass", "def test_get_finder(self):\n user_new = self.make_user('user_new')\n self.make_assignment(self.category, user_new, self.role_finder)\n url = reverse('projectroles:api_project_list')\n response = self.request_knox(url, token=self.get_token(user_new))\n self.assertEqual(response.status_code, 200)\n response_data = json.loads(response.content)\n self.assertEqual(len(response_data), 2)\n self.assertEqual(\n response_data[0]['sodar_uuid'], str(self.category.sodar_uuid)\n )\n self.assertEqual(\n response_data[1]['sodar_uuid'], str(self.project.sodar_uuid)\n )\n self.assertEqual(\n response_data[1],\n {\n 'title': self.project.title,\n 'sodar_uuid': str(self.project.sodar_uuid),\n },\n )", "def find_by_name(folder, name):\n # return only the first entity...\n for entity in find_by(folder, lambda e: e.name == name):\n return entity", "def test_xml_template_get_by_id(self):\n xmlTemplateExpected = XmlTemplate.objects.get(id=1)\n self.assertEqual(XmlTemplate.get_by_id(1), xmlTemplateExpected)", "def test_get_own_template_as_user_returns_template(self):\n mock_request = create_mock_request(user=self.user1)\n template = template_api.get_by_id(\n self.fixture.user1_template.id, request=mock_request\n )\n self.assertEqual(template, self.fixture.user1_template)", "def test_single_get(self, mock_helio_finder, mock_exists):\n static_path = self.finder.find('path/to/component/component.ext')\n self.assertEqual(static_path, 'MOCK_BASE_DIR/path/to/component/static/component.ext')", "def find_cached(self, *args, **kwargs) -> t.Union[FoldersModel, FolderModel]:\n return self.find(*args, **kwargs)", "def find_one(cls, dataset_id):\n return super(cls, cls).find_one({DATASET_ID: dataset_id})" ]
[ "0.6741028", "0.66696125", "0.6659063", "0.6548872", "0.6494747", "0.63703674", "0.63639253", "0.6355142", "0.62466127", "0.5951043", "0.5894491", "0.5852349", "0.5748025", "0.5667436", "0.5662029", "0.56452614", "0.55575687", "0.55110466", "0.55069876", "0.5503777", "0.5471547", "0.54489726", "0.538149", "0.53739333", "0.5372044", "0.535296", "0.5352563", "0.534029", "0.53213763", "0.53082114" ]
0.8466202
0
Test case for team_template_folders_get Find all instances of the model matched by filter from the data source.
def test_team_template_folders_get(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_team_template_folders_find_one_get(self):\n pass", "def test_team_template_folders_id_templates_get(self):\n pass", "def test_team_template_folders_id_team_get(self):\n pass", "def test_team_template_folders_id_get(self):\n pass", "def test_team_template_folders_id_templates_fk_get(self):\n pass", "def test_team_template_folders_id_children_get(self):\n pass", "def test_team_template_folders_id_exists_get(self):\n pass", "def test_team_template_folders_id_templates_count_get(self):\n pass", "def test_team_template_folders_count_get(self):\n pass", "def test_team_template_folders_change_stream_get(self):\n pass", "def test_team_template_folders_id_children_fk_get(self):\n pass", "def test_team_template_folders_id_parent_get(self):\n pass", "def test_team_template_folders_id_head(self):\n pass", "def test_team_template_folders_post(self):\n pass", "def test_team_template_folders_id_patch(self):\n pass", "def test_team_template_folders_id_templates_post(self):\n pass", "def test_team_template_folders_id_children_count_get(self):\n pass", "def test_team_template_folders_id_children_post(self):\n pass", "def test_team_template_folders_id_put(self):\n pass", "def test_workflows_id_templates_get(self):\n pass", "def test_get_result_directories(self):\n pass", "def test_get_all_as_staff_returns_accessible_templates(self):\n mock_request = create_mock_request(user=self.staff_user)\n templates = template_api.get_all(request=mock_request)\n self.assertEqual(templates.count(), 2)\n self.assertTrue(self.fixture.user2_template in list(templates))\n self.assertTrue(self.fixture.global_template in list(templates))", "def test_team_template_folders_id_replace_post(self):\n pass", "def test_get_activity_templates(self):\n pass", "def test_team_template_folders_id_delete(self):\n pass", "def test_get_all_as_superuser_returns_all_templates(self):\n mock_request = create_mock_request(user=self.superuser)\n templates = template_api.get_all(request=mock_request)\n self.assertEqual(templates.count(), 3)\n self.assertTrue(self.fixture.user1_template in list(templates))\n self.assertTrue(self.fixture.user2_template in list(templates))\n self.assertTrue(self.fixture.global_template in list(templates))", "def test_team_template_folders_id_templates_fk_put(self):\n pass", "def get_queryset(self):\n queryset = File.objects.filter(folder__name=self.folder_name)\n return queryset", "def test_get_all_as_user_returns_accessible_templates(self):\n mock_request = create_mock_request(user=self.user)\n templates = template_api.get_all(request=mock_request)\n self.assertEqual(templates.count(), 2)\n self.assertTrue(self.fixture.user1_template in list(templates))\n self.assertTrue(self.fixture.global_template in list(templates))", "def test_get_all(self, mock_helio_finder, mock_exists):\n static_paths = self.finder.find('path/to/component/component.ext', all=True)\n self.assertEqual(static_paths, ['MOCK_BASE_DIR/path/to/component/static/component.ext',\n 'MOCK_BASE_DIR_2/path/to/component/static/component.ext'])" ]
[ "0.7745991", "0.7599349", "0.7545926", "0.7328151", "0.7193395", "0.70863736", "0.69541967", "0.679594", "0.6721732", "0.6666562", "0.66541135", "0.65030825", "0.6460915", "0.64054143", "0.63062704", "0.62567776", "0.6240918", "0.60979414", "0.59302366", "0.58679736", "0.5838807", "0.5808496", "0.57946205", "0.5763122", "0.57107157", "0.5688925", "0.56585974", "0.5638195", "0.5592347", "0.5571065" ]
0.791822
0
Test case for team_template_folders_id_children_count_get Counts children of TeamTemplateFolder.
def test_team_template_folders_id_children_count_get(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def portal_template_folders_id_children_count_get(self, id, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.portal_template_folders_id_children_count_get_with_http_info(id, **kwargs)\n else:\n (data) = self.portal_template_folders_id_children_count_get_with_http_info(id, **kwargs)\n return data", "def test_team_template_folders_count_get(self):\n pass", "def test_team_template_folders_id_templates_count_get(self):\n pass", "def test_team_template_folders_id_children_get(self):\n pass", "def portal_template_folders_id_children_count_get_with_http_info(self, id, **kwargs):\n\n all_params = ['id', 'where']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method portal_template_folders_id_children_count_get\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'id' is set\n if ('id' not in params) or (params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `portal_template_folders_id_children_count_get`\")\n\n\n collection_formats = {}\n\n resource_path = '/PortalTemplateFolders/{id}/children/count'.replace('{format}', 'json')\n path_params = {}\n if 'id' in params:\n path_params['id'] = params['id']\n\n query_params = {}\n if 'where' in params:\n query_params['where'] = params['where']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])\n\n # Authentication setting\n auth_settings = ['access_token']\n\n return self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='InlineResponse2001',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n collection_formats=collection_formats)", "def test_team_template_folders_id_children_post(self):\n pass", "def portal_template_folders_count_get(self, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.portal_template_folders_count_get_with_http_info(**kwargs)\n else:\n (data) = self.portal_template_folders_count_get_with_http_info(**kwargs)\n return data", "def _count_children(self, item):\n return len(self.tree.get_children(item))", "def test_team_template_folders_id_children_fk_get(self):\n pass", "def GetChildrenCount(self, item, recursively=True):\r\n\r\n return item.GetChildrenCount(recursively)", "def team_members_id_team_template_folders_count_get(self, id, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.team_members_id_team_template_folders_count_get_with_http_info(id, **kwargs)\n else:\n (data) = self.team_members_id_team_template_folders_count_get_with_http_info(id, **kwargs)\n return data", "def GetChildrenCount(self, recursively=True):\r\n\r\n count = len(self._children)\r\n \r\n if not recursively:\r\n return count\r\n\r\n total = count\r\n\r\n for n in xrange(count):\r\n total += self._children[n].GetChildrenCount()\r\n \r\n return total", "def children_count(self):\n return len(self._children_ids)", "def team_members_id_team_template_folders_count_get_with_http_info(self, id, **kwargs):\n\n all_params = ['id', 'where']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method team_members_id_team_template_folders_count_get\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'id' is set\n if ('id' not in params) or (params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `team_members_id_team_template_folders_count_get`\")\n\n\n collection_formats = {}\n\n resource_path = '/TeamMembers/{id}/team/templateFolders/count'.replace('{format}', 'json')\n path_params = {}\n if 'id' in params:\n path_params['id'] = params['id']\n\n query_params = {}\n if 'where' in params:\n query_params['where'] = params['where']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])\n\n # Authentication setting\n auth_settings = ['access_token']\n\n return self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='InlineResponse2001',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n collection_formats=collection_formats)", "def __getChildrenCount(self, rootId):\n root = self.vc.findViewById(rootId)\n if root:\n return len(root.children())\n else:\n printLog(self.threadName + '[__getChildrenCount] parent view not found.', logging.ERROR)\n return 0", "def child_count(self):\n\t\treturn len(self._children)", "def children_num(self,p):\n counter = 0\n for child in self.children(p):\n counter += 1\n return counter", "def get_num_children(self):\n return len(self.children)", "def portal_template_folders_count_get_with_http_info(self, **kwargs):\n\n all_params = ['where']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method portal_template_folders_count_get\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n collection_formats = {}\n\n resource_path = '/PortalTemplateFolders/count'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'where' in params:\n query_params['where'] = params['where']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])\n\n # Authentication setting\n auth_settings = ['access_token']\n\n return self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='InlineResponse2001',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n collection_formats=collection_formats)", "def _children_count(self):\n cnt = 0\n if self.left:\n cnt += 1\n if self.right:\n cnt += 1\n return cnt", "def get_child_entries_count(self):\n\t\treturn call_sdk_function('PrlFsInfo_GetChildEntriesCount', self.handle)", "def n_subdir(self):\n self.assert_is_dir_and_exists()\n n = 0\n for _ in self.select_dir(recursive=False):\n n += 1\n return n", "def leaf_count(self) -> int:\n if self.children == []:\n return 1\n else:\n return sum([x.leaf_count() for x in self.children])", "def test_child_count(self):\n self.shell.onecmd(\"create %s/something ''\" % (self.tests_path))\n self.shell.onecmd(\"create %s/something/else ''\" % (self.tests_path))\n self.shell.onecmd(\"create %s/something/else/entirely ''\" % (self.tests_path))\n self.shell.onecmd(\"create %s/something/else/entirely/child ''\" % (self.tests_path))\n self.shell.onecmd(\"child_count %s/something\" % (self.tests_path))\n expected_output = u\"%s/something/else: 2\\n\" % (self.tests_path)\n self.assertEqual(expected_output, self.output.getvalue())", "def leaf_count(t: Tree) -> int:\n if t.children == []:\n return 1\n else:\n return sum([leaf_count(child) for child in t.children])", "def get_children_count(cur, node):\n sql = \"\"\"\n SELECT\n COUNT(*)\n FROM\n nodes\n WHERE\n parent=%s;\n \"\"\"\n cur.execute(sql, (str(node), ))\n result = cur.fetchone()\n return result['count']", "def test_workflows_id_templates_count_get(self):\n pass", "def test_team_template_folders_id_children_fk_put(self):\n pass", "def childCount(self):\n if self.__child is not None:\n return len(self.__child)\n return self._expectedChildCount()", "def count(args):\n path = os.path.abspath(args.path)\n total = 0\n\n if args.recursive:\n if os.path.exists(args.path):\n for item in os.listdir(path):\n little_path = os.path.join(path, item)\n if os.path.isfile(little_path):\n total += parse_file_count(little_path, args)\n else:\n total += count(little_path)\n else:\n print(\"EROARE: <\" + args.path +\n \"> invalid, nu putem ajunge acolo\")\n else:\n if os.path.isfile(args.path):\n total += parse_file_count(args.path, args)\n else:\n print(\"EROARE: <\" + args.pattern +\n \"> invalid, nu este fisier\")\n return total" ]
[ "0.76291406", "0.74576056", "0.7453799", "0.73170596", "0.7294022", "0.6993843", "0.6614962", "0.66143423", "0.65480965", "0.6540002", "0.65165895", "0.6505344", "0.64972985", "0.6445665", "0.637142", "0.6343745", "0.62351745", "0.6225197", "0.62086076", "0.6185676", "0.6160662", "0.6159241", "0.6134905", "0.61035514", "0.6077381", "0.6069097", "0.60095495", "0.59608966", "0.5927636", "0.58994055" ]
0.89132285
0
Test case for team_template_folders_id_children_fk_delete Delete a related item by id for children.
def test_team_template_folders_id_children_fk_delete(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_team_template_folders_id_templates_fk_delete(self):\n pass", "def portal_template_folders_id_children_fk_delete(self, id, fk, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.portal_template_folders_id_children_fk_delete_with_http_info(id, fk, **kwargs)\n else:\n (data) = self.portal_template_folders_id_children_fk_delete_with_http_info(id, fk, **kwargs)\n return data", "def test_team_template_folders_id_delete(self):\n pass", "def test_team_template_folders_id_children_fk_put(self):\n pass", "def test_team_template_folders_id_children_fk_get(self):\n pass", "def test_team_template_folders_id_children_post(self):\n pass", "def portal_template_folders_id_children_fk_delete_with_http_info(self, id, fk, **kwargs):\n\n all_params = ['id', 'fk']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method portal_template_folders_id_children_fk_delete\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'id' is set\n if ('id' not in params) or (params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `portal_template_folders_id_children_fk_delete`\")\n # verify the required parameter 'fk' is set\n if ('fk' not in params) or (params['fk'] is None):\n raise ValueError(\"Missing the required parameter `fk` when calling `portal_template_folders_id_children_fk_delete`\")\n\n\n collection_formats = {}\n\n resource_path = '/PortalTemplateFolders/{id}/children/{fk}'.replace('{format}', 'json')\n path_params = {}\n if 'id' in params:\n path_params['id'] = params['id']\n if 'fk' in params:\n path_params['fk'] = params['fk']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])\n\n # Authentication setting\n auth_settings = ['access_token']\n\n return self.api_client.call_api(resource_path, 'DELETE',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None,\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n collection_formats=collection_formats)", "def test_workflows_id_templates_fk_delete(self):\n pass", "def delete(self, tree_path):\n\t\traise NotImplementedError", "def delete_parent(sender, instance, **kwargs):\n ItemRelation.objects.filter(child_id=instance.item_id).delete()", "def team_members_id_team_template_folders_fk_delete(self, id, fk, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.team_members_id_team_template_folders_fk_delete_with_http_info(id, fk, **kwargs)\n else:\n (data) = self.team_members_id_team_template_folders_fk_delete_with_http_info(id, fk, **kwargs)\n return data", "def test_team_template_folders_id_children_get(self):\n pass", "def test_remove_childless_on_delete_tree(delete_tree):\n tree_size = delete_tree.size\n delete_tree.remove(\"teabaggers\")\n assert delete_tree.size == tree_size - 1", "def test_remove_childless_on_delete_tree2(delete_tree):\n delete_tree.remove(\"teabaggers\")\n assert delete_tree.contains(\"teabaggers\") is False", "def delete_child(self, model):\n assert isinstance(model, self.model_class) # it's a homogeneous collection\n m_id = str(model.get_id())\n assert m_id != None # needs a real id or cid\n assert m_id in self._models\n model._mark_deleted()\n del self._models[m_id]", "def test_delete_item_using_delete(self):\n pass", "def test_delete_child(self):\r\n # Create 2 children of main course.\r\n resp_1 = self.create_xblock(display_name='child 1', category='chapter')\r\n resp_2 = self.create_xblock(display_name='child 2', category='chapter')\r\n chapter1_usage_key = self.response_usage_key(resp_1)\r\n chapter2_usage_key = self.response_usage_key(resp_2)\r\n\r\n course = self.get_item_from_modulestore(self.usage_key)\r\n self.assertIn(chapter1_usage_key, course.children)\r\n self.assertIn(chapter2_usage_key, course.children)\r\n\r\n # Remove one child from the course.\r\n resp = self.client.ajax_post(\r\n self.course_update_url,\r\n data={'children': [unicode(chapter2_usage_key)]}\r\n )\r\n self.assertEqual(resp.status_code, 200)\r\n\r\n # Verify that the child is removed.\r\n course = self.get_item_from_modulestore(self.usage_key)\r\n self.assertNotIn(chapter1_usage_key, course.children)\r\n self.assertIn(chapter2_usage_key, course.children)", "def portal_template_folders_id_templates_rel_fk_delete(self, id, fk, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.portal_template_folders_id_templates_rel_fk_delete_with_http_info(id, fk, **kwargs)\n else:\n (data) = self.portal_template_folders_id_templates_rel_fk_delete_with_http_info(id, fk, **kwargs)\n return data", "def portal_template_folders_id_children_fk_put(self, id, fk, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.portal_template_folders_id_children_fk_put_with_http_info(id, fk, **kwargs)\n else:\n (data) = self.portal_template_folders_id_children_fk_put_with_http_info(id, fk, **kwargs)\n return data", "def _test_delete_mm_document(create_using_pid1=True):\n\n parent, child = _choose_endpoints_and_do_request(\n (client, json_headers, \"DELETE\"),\n (parent_pid, parent_pid_type, child_pid, child_pid_type),\n payload,\n create_using_pid1=create_using_pid1,\n )\n\n _assert_record_relations(parent, expected={\"relations\": {}})\n _assert_record_relations(child, expected={\"relations\": {}})", "def team_members_id_team_template_folders_fk_delete_with_http_info(self, id, fk, **kwargs):\n\n all_params = ['id', 'fk']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method team_members_id_team_template_folders_fk_delete\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'id' is set\n if ('id' not in params) or (params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `team_members_id_team_template_folders_fk_delete`\")\n # verify the required parameter 'fk' is set\n if ('fk' not in params) or (params['fk'] is None):\n raise ValueError(\"Missing the required parameter `fk` when calling `team_members_id_team_template_folders_fk_delete`\")\n\n\n collection_formats = {}\n\n resource_path = '/TeamMembers/{id}/team/templateFolders/{fk}'.replace('{format}', 'json')\n path_params = {}\n if 'id' in params:\n path_params['id'] = params['id']\n if 'fk' in params:\n path_params['fk'] = params['fk']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])\n\n # Authentication setting\n auth_settings = ['access_token']\n\n return self.api_client.call_api(resource_path, 'DELETE',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None,\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n collection_formats=collection_formats)", "def portal_template_folders_id_templates_fk_delete(self, id, fk, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.portal_template_folders_id_templates_fk_delete_with_http_info(id, fk, **kwargs)\n else:\n (data) = self.portal_template_folders_id_templates_fk_delete_with_http_info(id, fk, **kwargs)\n return data", "def test_00_menu_deletion(self):\r\n cr, uid, Menus = self.cr, self.uid, self.Menus\r\n\r\n # Generic trick necessary for search() calls to avoid hidden menus \r\n ctx = {'ir.ui.menu.full_list': True}\r\n\r\n root_id = Menus.create(cr, uid, {'name': 'Test root'})\r\n child1_id = Menus.create(cr, uid, {'name': 'Test child 1', 'parent_id': root_id})\r\n child2_id = Menus.create(cr, uid, {'name': 'Test child 2', 'parent_id': root_id})\r\n child21_id = Menus.create(cr, uid, {'name': 'Test child 2-1', 'parent_id': child2_id})\r\n\r\n all_ids = [root_id, child1_id, child2_id, child21_id]\r\n\r\n # delete and check that direct children are promoted to top-level\r\n # cfr. explanation in menu.unlink()\r\n Menus.unlink(cr, uid, [root_id])\r\n\r\n remaining_ids = Menus.search(cr, uid, [('id', 'in', all_ids)], order=\"id\", context=ctx)\r\n self.assertEqual([child1_id, child2_id, child21_id], remaining_ids)\r\n\r\n orphan_ids = Menus.search(cr, uid, [('id', 'in', all_ids), ('parent_id', '=', False)], order=\"id\", context=ctx)\r\n self.assertEqual([child1_id, child2_id], orphan_ids)", "def remove_child(self, child_id):\r\n self.children = [ c for c in self.children if c.id!= child_id ]", "def test_remove_middle_child_on_delete_tree(delete_tree):\n tree_size = delete_tree.size\n delete_tree.remove(\"teabag\")\n assert delete_tree.size == tree_size - 1", "def delete_entry(\n self,\n the_id: str,\n recursive: bool = False) -> None:\n\n if the_id in self._subtypes:\n # handle all the children\n children = self.subtypes[the_id]\n if children is not None and len(children) > 0:\n if not recursive:\n raise ValueError(\n 'LabelSchema entry for id {} has children. Either move children to a '\n 'different parent, or make recursive=True to delete all children.'.format(the_id))\n the_children = children.copy() # unsafe to loop over a changing list\n for entry in the_children:\n self.delete_entry(entry, recursive=True)\n # now, all the children have been deleted.\n del self._subtypes[the_id]\n # remove the entry from the parent's subtypes list\n parent_id = self.get_parent(the_id)\n self.subtypes[parent_id].remove(parent_id)\n # remove entry from labels\n del self._labels[the_id]\n del self._parent_types[the_id]", "def DeleteChildren(self, item):\r\n\r\n self._dirty = True # do this first so stuff below doesn't cause flicker\r\n\r\n self.ChildrenClosing(item)\r\n item.DeleteChildren(self)", "def test_remove_middle_child_on_delete_tree2(delete_tree):\n delete_tree.remove(\"teabag\")\n assert delete_tree.contains(\"teabag\") is False", "def test_user_delete_reddit_child(self):\n with app.app_context():\n with self.client:\n self.create_user()\n self.login_user()\n # link a reddit child model to test\n reddit_usr = Reddit_User(username=\"reddit_child\",\n refresh_token=\"notactuallyatoken\",\n user_id=current_user.id)\n db.session.add(reddit_usr)\n db.session.commit()\n # Make sure the reddit child exists\n reddit_usr = Reddit_User.query.first()\n self.assertFalse(reddit_usr is None)\n self.assertTrue(reddit_usr.user_id == current_user.id)\n # user delete\n response = self.client.post(url_for(\"account_delete\"),\n data = {'password': \"password\"})\n user = User.query.first()\n self.assertTrue(user is None)\n reddit_usr = Reddit_User.query.first()\n self.assertTrue(reddit_usr is None)\n return True", "def team_members_id_image_folders_fk_delete(self, id, fk, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.team_members_id_image_folders_fk_delete_with_http_info(id, fk, **kwargs)\n else:\n (data) = self.team_members_id_image_folders_fk_delete_with_http_info(id, fk, **kwargs)\n return data" ]
[ "0.74967825", "0.73387957", "0.7176434", "0.71450096", "0.70276856", "0.69281375", "0.68056697", "0.6547637", "0.64695954", "0.6458936", "0.64097804", "0.63891387", "0.62271225", "0.6220188", "0.6212821", "0.62127", "0.6198733", "0.6152488", "0.6083034", "0.60765576", "0.60492444", "0.59937686", "0.59825224", "0.59014755", "0.588867", "0.5869347", "0.5862347", "0.58463734", "0.5845767", "0.58204496" ]
0.8815056
0
Test case for team_template_folders_id_children_fk_get Find a related item by id for children.
def test_team_template_folders_id_children_fk_get(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_team_template_folders_id_children_get(self):\n pass", "def test_team_template_folders_id_children_fk_put(self):\n pass", "def test_team_template_folders_id_templates_fk_get(self):\n pass", "def test_team_template_folders_id_children_post(self):\n pass", "def test_team_template_folders_id_children_fk_delete(self):\n pass", "def test_team_template_folders_id_children_count_get(self):\n pass", "def test_team_template_folders_id_parent_get(self):\n pass", "def portal_template_folders_id_children_fk_get(self, id, fk, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.portal_template_folders_id_children_fk_get_with_http_info(id, fk, **kwargs)\n else:\n (data) = self.portal_template_folders_id_children_fk_get_with_http_info(id, fk, **kwargs)\n return data", "def portal_template_folders_id_children_fk_get_with_http_info(self, id, fk, **kwargs):\n\n all_params = ['id', 'fk']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method portal_template_folders_id_children_fk_get\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'id' is set\n if ('id' not in params) or (params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `portal_template_folders_id_children_fk_get`\")\n # verify the required parameter 'fk' is set\n if ('fk' not in params) or (params['fk'] is None):\n raise ValueError(\"Missing the required parameter `fk` when calling `portal_template_folders_id_children_fk_get`\")\n\n\n collection_formats = {}\n\n resource_path = '/PortalTemplateFolders/{id}/children/{fk}'.replace('{format}', 'json')\n path_params = {}\n if 'id' in params:\n path_params['id'] = params['id']\n if 'fk' in params:\n path_params['fk'] = params['fk']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])\n\n # Authentication setting\n auth_settings = ['access_token']\n\n return self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='PortalTemplateFolder',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n collection_formats=collection_formats)", "def test_team_template_folders_id_get(self):\n pass", "def test_team_template_folders_id_team_get(self):\n pass", "def portal_template_folders_id_children_fk_put_with_http_info(self, id, fk, **kwargs):\n\n all_params = ['id', 'fk', 'data']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method portal_template_folders_id_children_fk_put\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'id' is set\n if ('id' not in params) or (params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `portal_template_folders_id_children_fk_put`\")\n # verify the required parameter 'fk' is set\n if ('fk' not in params) or (params['fk'] is None):\n raise ValueError(\"Missing the required parameter `fk` when calling `portal_template_folders_id_children_fk_put`\")\n\n\n collection_formats = {}\n\n resource_path = '/PortalTemplateFolders/{id}/children/{fk}'.replace('{format}', 'json')\n path_params = {}\n if 'id' in params:\n path_params['id'] = params['id']\n if 'fk' in params:\n path_params['fk'] = params['fk']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'data' in params:\n body_params = params['data']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])\n\n # Authentication setting\n auth_settings = ['access_token']\n\n return self.api_client.call_api(resource_path, 'PUT',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='PortalTemplateFolder',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n collection_formats=collection_formats)", "def portal_template_folders_id_children_get(self, id, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.portal_template_folders_id_children_get_with_http_info(id, **kwargs)\n else:\n (data) = self.portal_template_folders_id_children_get_with_http_info(id, **kwargs)\n return data", "def test_team_template_folders_find_one_get(self):\n pass", "def test_workflows_id_templates_fk_get(self):\n pass", "def portal_template_folders_id_children_get_with_http_info(self, id, **kwargs):\n\n all_params = ['id', 'filter']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method portal_template_folders_id_children_get\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'id' is set\n if ('id' not in params) or (params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `portal_template_folders_id_children_get`\")\n\n\n collection_formats = {}\n\n resource_path = '/PortalTemplateFolders/{id}/children'.replace('{format}', 'json')\n path_params = {}\n if 'id' in params:\n path_params['id'] = params['id']\n\n query_params = {}\n if 'filter' in params:\n query_params['filter'] = params['filter']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])\n\n # Authentication setting\n auth_settings = ['access_token']\n\n return self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='list[PortalTemplateFolder]',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n collection_formats=collection_formats)", "def test_team_template_folders_id_exists_get(self):\n pass", "def portal_template_folders_id_children_fk_delete_with_http_info(self, id, fk, **kwargs):\n\n all_params = ['id', 'fk']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method portal_template_folders_id_children_fk_delete\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'id' is set\n if ('id' not in params) or (params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `portal_template_folders_id_children_fk_delete`\")\n # verify the required parameter 'fk' is set\n if ('fk' not in params) or (params['fk'] is None):\n raise ValueError(\"Missing the required parameter `fk` when calling `portal_template_folders_id_children_fk_delete`\")\n\n\n collection_formats = {}\n\n resource_path = '/PortalTemplateFolders/{id}/children/{fk}'.replace('{format}', 'json')\n path_params = {}\n if 'id' in params:\n path_params['id'] = params['id']\n if 'fk' in params:\n path_params['fk'] = params['fk']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])\n\n # Authentication setting\n auth_settings = ['access_token']\n\n return self.api_client.call_api(resource_path, 'DELETE',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None,\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n collection_formats=collection_formats)", "def portal_template_folders_id_children_fk_put(self, id, fk, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.portal_template_folders_id_children_fk_put_with_http_info(id, fk, **kwargs)\n else:\n (data) = self.portal_template_folders_id_children_fk_put_with_http_info(id, fk, **kwargs)\n return data", "def team_members_id_team_template_folders_fk_get(self, id, fk, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.team_members_id_team_template_folders_fk_get_with_http_info(id, fk, **kwargs)\n else:\n (data) = self.team_members_id_team_template_folders_fk_get_with_http_info(id, fk, **kwargs)\n return data", "def test_team_template_folders_id_templates_get(self):\n pass", "def test_get_children_category(self):\n children = self.category.get_children()\n self.assertEqual(children[0], self.project)", "def google_get_children(request):\n\n file_id = request.data['id']\n file_name = request.data['name']\n file_is_folder = request.data['is_folder']\n\n # if the length of children is greater than 0, this file must have already been used\n # in an api request so it already has all the children.\n # There is no other way to populate the children variable other than to make\n # an api request to this endpoint.\n try:\n if len(request.data['children']) > 0:\n return JsonResponse(request.data)\n\n except KeyError:\n pass\n\n responseData = {\n 'id': file_id,\n 'toggled': False,\n 'active': True,\n 'name': file_name,\n }\n # This line now checks if it has been previously determined a folder before making any google api calls.\n if file_is_folder or google.is_folder(google.get_metadata(file_id)):\n children = google.children_search(file_id)\n responseData['is_folder'] = True\n responseData['children'] = filterGChildrenResponse(children)\n\n return JsonResponse(responseData)", "def get_child_elements_by_id(self, id):\n for item in self._elements:\n if item.get_parent_id() == id:\n yield item", "def GetChildren(self, p_int, p_int_1, p_int_2):\n ...", "def test_get_children(self):\n c1 = self.hiarc_collections.create_collection(\n self.hiarc_util.create_collection())\n c2 = self.hiarc_collections.create_collection(\n self.hiarc_util.create_collection())\n c3 = self.hiarc_collections.create_collection(\n self.hiarc_util.create_collection())\n c4 = self.hiarc_collections.create_collection(\n self.hiarc_util.create_collection())\n c5 = self.hiarc_collections.create_collection(\n self.hiarc_util.create_collection())\n c6 = self.hiarc_collections.create_collection(\n self.hiarc_util.create_collection())\n\n self.hiarc_collections.add_child_to_collection(c1.key, c2.key)\n self.hiarc_collections.add_child_to_collection(c1.key, c3.key)\n self.hiarc_collections.add_child_to_collection(c1.key, c4.key)\n self.hiarc_collections.add_child_to_collection(c2.key, c5.key)\n self.hiarc_collections.add_child_to_collection(c4.key, c6.key)\n\n children = self.hiarc_collections.get_collection_children(c1.key)\n assert len(children) == 3\n assert next((c for c in children if self.hiarc_util.compare_dict_to_entity(\n c, c2)), None) is not None\n assert next((c for c in children if self.hiarc_util.compare_dict_to_entity(\n c, c3)), None) is not None\n assert next((c for c in children if self.hiarc_util.compare_dict_to_entity(\n c, c4)), None) is not None\n assert next((c for c in children if self.hiarc_util.compare_dict_to_entity(\n c, c5)), None) is None\n assert next((c for c in children if self.hiarc_util.compare_dict_to_entity(\n c, c6)), None) is None", "def team_members_id_team_template_folders_fk_get_with_http_info(self, id, fk, **kwargs):\n\n all_params = ['id', 'fk']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method team_members_id_team_template_folders_fk_get\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'id' is set\n if ('id' not in params) or (params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `team_members_id_team_template_folders_fk_get`\")\n # verify the required parameter 'fk' is set\n if ('fk' not in params) or (params['fk'] is None):\n raise ValueError(\"Missing the required parameter `fk` when calling `team_members_id_team_template_folders_fk_get`\")\n\n\n collection_formats = {}\n\n resource_path = '/TeamMembers/{id}/team/templateFolders/{fk}'.replace('{format}', 'json')\n path_params = {}\n if 'id' in params:\n path_params['id'] = params['id']\n if 'fk' in params:\n path_params['fk'] = params['fk']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])\n\n # Authentication setting\n auth_settings = ['access_token']\n\n return self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='TeamTemplateFolder',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n collection_formats=collection_formats)", "async def get_child_ids(db, post_id):\n sql = \"SELECT id FROM hive_posts WHERE parent_id = :id AND is_deleted = '0'\"\n return await db.query_col(sql, id=post_id)", "def test_get_children_project(self):\n children = self.project.get_children()\n self.assertEqual(children.count(), 0)", "def get_child_ids(id,conn):\n\n child_ids = ('WITH RECURSIVE children AS '\n '(SELECT subject_id '\n 'FROM cvterm_relationship '\n 'WHERE object_id = %s '\n 'UNION '\n 'SELECT cr.subject_id '\n 'FROM cvterm_relationship cr '\n 'INNER JOIN children ch ON ch.subject_id = cr.object_id) '\n 'SELECT * FROM children')\n ids = connect(child_ids,id,conn)\n list_of_ids = []\n for item in ids:\n list_of_ids.append(item[0])\n return(list_of_ids)" ]
[ "0.80349183", "0.70777005", "0.69962776", "0.6920054", "0.6870119", "0.67686564", "0.67260855", "0.6719751", "0.655585", "0.62000024", "0.5972196", "0.5950351", "0.59449637", "0.592893", "0.5920968", "0.59056914", "0.58841914", "0.58657634", "0.5813402", "0.5782501", "0.5755878", "0.57441425", "0.56980336", "0.55961955", "0.55933654", "0.5569702", "0.5559805", "0.55374163", "0.5533122", "0.55055857" ]
0.8680469
0
Test case for team_template_folders_id_children_fk_put Update a related item by id for children.
def test_team_template_folders_id_children_fk_put(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_team_template_folders_id_children_post(self):\n pass", "def test_team_template_folders_id_children_fk_get(self):\n pass", "def test_team_template_folders_id_templates_fk_put(self):\n pass", "def test_team_template_folders_id_children_fk_delete(self):\n pass", "def portal_template_folders_id_children_fk_put(self, id, fk, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.portal_template_folders_id_children_fk_put_with_http_info(id, fk, **kwargs)\n else:\n (data) = self.portal_template_folders_id_children_fk_put_with_http_info(id, fk, **kwargs)\n return data", "def test_team_template_folders_id_put(self):\n pass", "def test_team_template_folders_id_children_get(self):\n pass", "def portal_template_folders_id_children_fk_put_with_http_info(self, id, fk, **kwargs):\n\n all_params = ['id', 'fk', 'data']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method portal_template_folders_id_children_fk_put\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'id' is set\n if ('id' not in params) or (params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `portal_template_folders_id_children_fk_put`\")\n # verify the required parameter 'fk' is set\n if ('fk' not in params) or (params['fk'] is None):\n raise ValueError(\"Missing the required parameter `fk` when calling `portal_template_folders_id_children_fk_put`\")\n\n\n collection_formats = {}\n\n resource_path = '/PortalTemplateFolders/{id}/children/{fk}'.replace('{format}', 'json')\n path_params = {}\n if 'id' in params:\n path_params['id'] = params['id']\n if 'fk' in params:\n path_params['fk'] = params['fk']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'data' in params:\n body_params = params['data']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])\n\n # Authentication setting\n auth_settings = ['access_token']\n\n return self.api_client.call_api(resource_path, 'PUT',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='PortalTemplateFolder',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n collection_formats=collection_formats)", "def test_team_template_folders_id_replace_post(self):\n pass", "def test_workflows_id_templates_fk_put(self):\n pass", "def team_members_id_team_template_folders_fk_put(self, id, fk, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.team_members_id_team_template_folders_fk_put_with_http_info(id, fk, **kwargs)\n else:\n (data) = self.team_members_id_team_template_folders_fk_put_with_http_info(id, fk, **kwargs)\n return data", "def test_team_template_folders_id_children_count_get(self):\n pass", "def test_with_children(self):\n parent = RouteFactory.create(url='/old-branch/')\n child = ChildRouteFactory.create(slug='leaf', parent=parent)\n new_url = '/new-branch/'\n\n with self.assertNumQueries(1):\n # UPDATE \"routes_route\"\n # SET \"url\" = CONCAT('/new-branch/', SUBSTRING(\"routes_route\".\"url\", 13))\n # WHERE \"routes_route\".\"url\"::text LIKE '/old-branch/%'\n parent.move_to(new_url, move_children=True)\n\n # Because the branch object was available, we'd expect it to update.\n self.assertEqual(parent.url, new_url)\n # ...but it's impractical to expect all in-memory objects to update.\n self.assertEqual(child.url, '/old-branch/leaf/')\n # Once refreshed from the db, however, leaf should have updated.\n child.refresh_from_db()\n self.assertEqual(child.url, '/new-branch/leaf/')", "def test_patch_project_move_child(self):\n new_category = self.make_project(\n 'NewCategory', PROJECT_TYPE_CATEGORY, self.category\n )\n self.make_assignment(new_category, self.user, self.role_owner)\n url = reverse(\n 'projectroles:api_project_update',\n kwargs={'project': self.category.sodar_uuid},\n )\n patch_data = {'parent': str(new_category.sodar_uuid)}\n response = self.request_knox(url, method='PATCH', data=patch_data)\n self.assertEqual(response.status_code, 400, msg=response.content)", "def portal_template_folders_id_templates_rel_fk_put(self, id, fk, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.portal_template_folders_id_templates_rel_fk_put_with_http_info(id, fk, **kwargs)\n else:\n (data) = self.portal_template_folders_id_templates_rel_fk_put_with_http_info(id, fk, **kwargs)\n return data", "def test_team_template_folders_id_templates_fk_get(self):\n pass", "def team_members_id_team_template_folders_fk_put_with_http_info(self, id, fk, **kwargs):\n\n all_params = ['id', 'fk', 'data']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method team_members_id_team_template_folders_fk_put\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'id' is set\n if ('id' not in params) or (params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `team_members_id_team_template_folders_fk_put`\")\n # verify the required parameter 'fk' is set\n if ('fk' not in params) or (params['fk'] is None):\n raise ValueError(\"Missing the required parameter `fk` when calling `team_members_id_team_template_folders_fk_put`\")\n\n\n collection_formats = {}\n\n resource_path = '/TeamMembers/{id}/team/templateFolders/{fk}'.replace('{format}', 'json')\n path_params = {}\n if 'id' in params:\n path_params['id'] = params['id']\n if 'fk' in params:\n path_params['fk'] = params['fk']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'data' in params:\n body_params = params['data']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])\n\n # Authentication setting\n auth_settings = ['access_token']\n\n return self.api_client.call_api(resource_path, 'PUT',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='TeamTemplateFolder',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n collection_formats=collection_formats)", "def test_team_template_folders_id_templates_post(self):\n pass", "def test_team_template_folders_id_patch(self):\n pass", "def test_do_insert_child(test_dao):\r\n DUT = dtmFunction(test_dao, test=True)\r\n DUT.do_select_all(revision_id=1)\r\n\r\n _error_code, _msg = DUT.do_insert(revision_id=1, parent_id=1)\r\n\r\n assert _error_code == 0\r\n assert _msg == (\r\n \"RAMSTK SUCCESS: Adding one or more items to the RAMSTK Program \"\r\n \"database.\")\r\n assert DUT.last_id == 4\r\n\r\n DUT.do_delete(DUT.last_id)", "def update_children(page, prefix=None, walked_pages=None):\n walked_pages = walked_pages if walked_pages else []\n # Perfix used to determine the children path.\n page_segment = page.path\n if prefix:\n page_segment = prefix + '/' + page_segment\n page.real_path = page_segment\n page.save()\n for child in page.children.all():\n # Update the real path for this descendant.\n if child in walked_pages:\n # This page has been processed before, ignore.\n continue\n walked_pages.append(child)\n update_children(child, page_segment, walked_pages)\n return True", "def team_members_id_image_folders_rel_fk_put(self, id, fk, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.team_members_id_image_folders_rel_fk_put_with_http_info(id, fk, **kwargs)\n else:\n (data) = self.team_members_id_image_folders_rel_fk_put_with_http_info(id, fk, **kwargs)\n return data", "def test_skirmish_parenting(self):\n root = SkirmishAction()\n a1 = SkirmishAction()\n a2 = SkirmishAction()\n self.sess.add_all([root, a1, a2])\n self.sess.commit()\n\n root.children.append(a1)\n root.children.append(a2)\n self.sess.commit()\n\n self.assertEqual(a1.parent_id, root.id)\n self.assertEqual(a2.parent_id, root.id)", "def test_update_child_category(self):\n self.add_success(self.test_data['pants'])\n self.add_success(self.test_data['shirts'])\n\n self.add_success(self.test_data['jeans'])\n rv = self.get('pants')\n assert not in_response(rv, 'This category is empty.')\n assert in_response(rv, 'Jeans')\n\n self.edit_success('jeans', self.test_data['t-shirts'])\n rv = self.get('pants')\n assert in_response(rv, 'This category is empty.')\n assert not in_response(rv, 'Jeans')\n assert not in_response(rv, 'T-Shirts')\n rv = self.get('shirts')\n assert not in_response(rv, 'This category is empty.')\n assert in_response(rv, 'T-Shirts')\n assert not in_response(rv, 'Jeans')", "def test_team_template_folders_id_parent_get(self):\n pass", "def team_members_id_image_folders_fk_put(self, id, fk, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.team_members_id_image_folders_fk_put_with_http_info(id, fk, **kwargs)\n else:\n (data) = self.team_members_id_image_folders_fk_put_with_http_info(id, fk, **kwargs)\n return data", "def test_team_template_folders_id_templates_fk_delete(self):\n pass", "def team_members_id_team_image_folders_fk_put(self, id, fk, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.team_members_id_team_image_folders_fk_put_with_http_info(id, fk, **kwargs)\n else:\n (data) = self.team_members_id_team_image_folders_fk_put_with_http_info(id, fk, **kwargs)\n return data", "def portal_template_folders_id_templates_fk_put(self, id, fk, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.portal_template_folders_id_templates_fk_put_with_http_info(id, fk, **kwargs)\n else:\n (data) = self.portal_template_folders_id_templates_fk_put_with_http_info(id, fk, **kwargs)\n return data", "def updateChildren(parentId, deletingParent=False):\n Dashboard.objects(parent=parentId).update(set__hasParentChanged=True)\n if deletingParent:\n Dashboard.objects(parent=parentId).update(unset__parent=1)" ]
[ "0.7230349", "0.71743786", "0.713646", "0.7025978", "0.6966624", "0.6745306", "0.6649519", "0.6372774", "0.62732536", "0.6084119", "0.60431314", "0.59393644", "0.59389395", "0.5871714", "0.57597464", "0.5710098", "0.56835604", "0.5659726", "0.5648161", "0.561432", "0.55963826", "0.55958486", "0.55821973", "0.557301", "0.556531", "0.5534598", "0.550289", "0.54573584", "0.5398222", "0.5382721" ]
0.8613742
0
Test case for team_template_folders_id_children_get Queries children of TeamTemplateFolder.
def test_team_template_folders_id_children_get(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_team_template_folders_id_children_fk_get(self):\n pass", "def test_team_template_folders_id_children_count_get(self):\n pass", "def test_team_template_folders_id_children_post(self):\n pass", "def portal_template_folders_id_children_get(self, id, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.portal_template_folders_id_children_get_with_http_info(id, **kwargs)\n else:\n (data) = self.portal_template_folders_id_children_get_with_http_info(id, **kwargs)\n return data", "def portal_template_folders_id_children_get_with_http_info(self, id, **kwargs):\n\n all_params = ['id', 'filter']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method portal_template_folders_id_children_get\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'id' is set\n if ('id' not in params) or (params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `portal_template_folders_id_children_get`\")\n\n\n collection_formats = {}\n\n resource_path = '/PortalTemplateFolders/{id}/children'.replace('{format}', 'json')\n path_params = {}\n if 'id' in params:\n path_params['id'] = params['id']\n\n query_params = {}\n if 'filter' in params:\n query_params['filter'] = params['filter']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])\n\n # Authentication setting\n auth_settings = ['access_token']\n\n return self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='list[PortalTemplateFolder]',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n collection_formats=collection_formats)", "def test_team_template_folders_id_parent_get(self):\n pass", "def test_team_template_folders_id_children_fk_put(self):\n pass", "def test_team_template_folders_id_children_fk_delete(self):\n pass", "def portal_template_folders_id_children_fk_get(self, id, fk, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.portal_template_folders_id_children_fk_get_with_http_info(id, fk, **kwargs)\n else:\n (data) = self.portal_template_folders_id_children_fk_get_with_http_info(id, fk, **kwargs)\n return data", "def test_team_template_folders_id_templates_fk_get(self):\n pass", "def test_team_template_folders_id_get(self):\n pass", "def test_team_template_folders_id_templates_get(self):\n pass", "def portal_template_folders_id_children_post_with_http_info(self, id, **kwargs):\n\n all_params = ['id', 'data']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method portal_template_folders_id_children_post\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'id' is set\n if ('id' not in params) or (params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `portal_template_folders_id_children_post`\")\n\n\n collection_formats = {}\n\n resource_path = '/PortalTemplateFolders/{id}/children'.replace('{format}', 'json')\n path_params = {}\n if 'id' in params:\n path_params['id'] = params['id']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'data' in params:\n body_params = params['data']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])\n\n # Authentication setting\n auth_settings = ['access_token']\n\n return self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='PortalTemplateFolder',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n collection_formats=collection_formats)", "def portal_template_folders_id_children_fk_get_with_http_info(self, id, fk, **kwargs):\n\n all_params = ['id', 'fk']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method portal_template_folders_id_children_fk_get\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'id' is set\n if ('id' not in params) or (params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `portal_template_folders_id_children_fk_get`\")\n # verify the required parameter 'fk' is set\n if ('fk' not in params) or (params['fk'] is None):\n raise ValueError(\"Missing the required parameter `fk` when calling `portal_template_folders_id_children_fk_get`\")\n\n\n collection_formats = {}\n\n resource_path = '/PortalTemplateFolders/{id}/children/{fk}'.replace('{format}', 'json')\n path_params = {}\n if 'id' in params:\n path_params['id'] = params['id']\n if 'fk' in params:\n path_params['fk'] = params['fk']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])\n\n # Authentication setting\n auth_settings = ['access_token']\n\n return self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='PortalTemplateFolder',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n collection_formats=collection_formats)", "def test_team_template_folders_id_team_get(self):\n pass", "def test_team_template_folders_get(self):\n pass", "def test_get_children_project(self):\n children = self.project.get_children()\n self.assertEqual(children.count(), 0)", "def portal_template_folders_id_children_count_get(self, id, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.portal_template_folders_id_children_count_get_with_http_info(id, **kwargs)\n else:\n (data) = self.portal_template_folders_id_children_count_get_with_http_info(id, **kwargs)\n return data", "def test_team_template_folders_id_exists_get(self):\n pass", "def google_get_children(request):\n\n file_id = request.data['id']\n file_name = request.data['name']\n file_is_folder = request.data['is_folder']\n\n # if the length of children is greater than 0, this file must have already been used\n # in an api request so it already has all the children.\n # There is no other way to populate the children variable other than to make\n # an api request to this endpoint.\n try:\n if len(request.data['children']) > 0:\n return JsonResponse(request.data)\n\n except KeyError:\n pass\n\n responseData = {\n 'id': file_id,\n 'toggled': False,\n 'active': True,\n 'name': file_name,\n }\n # This line now checks if it has been previously determined a folder before making any google api calls.\n if file_is_folder or google.is_folder(google.get_metadata(file_id)):\n children = google.children_search(file_id)\n responseData['is_folder'] = True\n responseData['children'] = filterGChildrenResponse(children)\n\n return JsonResponse(responseData)", "def portal_template_folders_id_children_post(self, id, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.portal_template_folders_id_children_post_with_http_info(id, **kwargs)\n else:\n (data) = self.portal_template_folders_id_children_post_with_http_info(id, **kwargs)\n return data", "def portal_template_folders_id_children_count_get_with_http_info(self, id, **kwargs):\n\n all_params = ['id', 'where']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method portal_template_folders_id_children_count_get\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'id' is set\n if ('id' not in params) or (params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `portal_template_folders_id_children_count_get`\")\n\n\n collection_formats = {}\n\n resource_path = '/PortalTemplateFolders/{id}/children/count'.replace('{format}', 'json')\n path_params = {}\n if 'id' in params:\n path_params['id'] = params['id']\n\n query_params = {}\n if 'where' in params:\n query_params['where'] = params['where']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])\n\n # Authentication setting\n auth_settings = ['access_token']\n\n return self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='InlineResponse2001',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n collection_formats=collection_formats)", "def get_folder_children(self, folder_id):\n results = (self.drive_api.files().list(\n fields=\"files(id, name, mimeType)\",\n q=\"'{}' in parents\".format(folder_id)\n ).execute())\n return results.get(\"files\", [])", "def GetChildren(self, *args, **kwargs):\n pass", "def portal_template_folders_id_children_fk_delete_with_http_info(self, id, fk, **kwargs):\n\n all_params = ['id', 'fk']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method portal_template_folders_id_children_fk_delete\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'id' is set\n if ('id' not in params) or (params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `portal_template_folders_id_children_fk_delete`\")\n # verify the required parameter 'fk' is set\n if ('fk' not in params) or (params['fk'] is None):\n raise ValueError(\"Missing the required parameter `fk` when calling `portal_template_folders_id_children_fk_delete`\")\n\n\n collection_formats = {}\n\n resource_path = '/PortalTemplateFolders/{id}/children/{fk}'.replace('{format}', 'json')\n path_params = {}\n if 'id' in params:\n path_params['id'] = params['id']\n if 'fk' in params:\n path_params['fk'] = params['fk']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])\n\n # Authentication setting\n auth_settings = ['access_token']\n\n return self.api_client.call_api(resource_path, 'DELETE',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None,\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n collection_formats=collection_formats)", "def test_team_template_folders_id_templates_post(self):\n pass", "def children(self, path):\n url = u'/'.join(\n [self.conf[\"api\"], \"path\", escape_path(path).strip('/'), \"@children\"])\n params = {}\n self.logger.info(path)\n self.logger.debug(url)\n return self._get_iter(url, params)", "def test_team_template_folders_id_templates_count_get(self):\n pass", "def test_get_children_category(self):\n children = self.category.get_children()\n self.assertEqual(children[0], self.project)", "def portal_template_folders_id_children_fk_put_with_http_info(self, id, fk, **kwargs):\n\n all_params = ['id', 'fk', 'data']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method portal_template_folders_id_children_fk_put\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'id' is set\n if ('id' not in params) or (params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `portal_template_folders_id_children_fk_put`\")\n # verify the required parameter 'fk' is set\n if ('fk' not in params) or (params['fk'] is None):\n raise ValueError(\"Missing the required parameter `fk` when calling `portal_template_folders_id_children_fk_put`\")\n\n\n collection_formats = {}\n\n resource_path = '/PortalTemplateFolders/{id}/children/{fk}'.replace('{format}', 'json')\n path_params = {}\n if 'id' in params:\n path_params['id'] = params['id']\n if 'fk' in params:\n path_params['fk'] = params['fk']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'data' in params:\n body_params = params['data']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])\n\n # Authentication setting\n auth_settings = ['access_token']\n\n return self.api_client.call_api(resource_path, 'PUT',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='PortalTemplateFolder',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n collection_formats=collection_formats)" ]
[ "0.8076144", "0.7571682", "0.75399965", "0.6990369", "0.6945252", "0.6776617", "0.6618336", "0.6605643", "0.6496574", "0.6462159", "0.6442438", "0.6423893", "0.6389917", "0.6339748", "0.63042325", "0.6284398", "0.61475986", "0.608658", "0.60053587", "0.59573615", "0.5838571", "0.5834793", "0.5823457", "0.58058554", "0.57818294", "0.57341796", "0.5720312", "0.57187086", "0.5704503", "0.5682495" ]
0.8509956
0
Test case for team_template_folders_id_children_post Creates a new instance in children of this model.
def test_team_template_folders_id_children_post(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_team_template_folders_id_children_fk_put(self):\n pass", "def test_team_template_folders_id_templates_post(self):\n pass", "def test_team_template_folders_id_children_fk_delete(self):\n pass", "def test_team_template_folders_post(self):\n pass", "def test_team_template_folders_id_children_fk_get(self):\n pass", "def test_team_template_folders_id_children_get(self):\n pass", "def portal_template_folders_id_children_post(self, id, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.portal_template_folders_id_children_post_with_http_info(id, **kwargs)\n else:\n (data) = self.portal_template_folders_id_children_post_with_http_info(id, **kwargs)\n return data", "def test_team_template_folders_id_children_count_get(self):\n pass", "def test_team_template_folders_id_templates_fk_put(self):\n pass", "def test_team_template_folders_id_replace_post(self):\n pass", "def portal_template_folders_id_children_post_with_http_info(self, id, **kwargs):\n\n all_params = ['id', 'data']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method portal_template_folders_id_children_post\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'id' is set\n if ('id' not in params) or (params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `portal_template_folders_id_children_post`\")\n\n\n collection_formats = {}\n\n resource_path = '/PortalTemplateFolders/{id}/children'.replace('{format}', 'json')\n path_params = {}\n if 'id' in params:\n path_params['id'] = params['id']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'data' in params:\n body_params = params['data']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])\n\n # Authentication setting\n auth_settings = ['access_token']\n\n return self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='PortalTemplateFolder',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n collection_formats=collection_formats)", "def portal_template_folders_id_children_fk_put(self, id, fk, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.portal_template_folders_id_children_fk_put_with_http_info(id, fk, **kwargs)\n else:\n (data) = self.portal_template_folders_id_children_fk_put_with_http_info(id, fk, **kwargs)\n return data", "def create_child(self):\n raise NotImplementedError", "def create_new_child(self,instance):\n\t\tnew_child = self.tree.makeTree(self.tree.root, self.tree)\n\t\tnew_child.utility.increment_counts(instance)\n\t\tself.tree.children.append(new_child)", "def test_team_template_folders_id_put(self):\n pass", "def create_children(self):\n actionCount = len(self.availableActions)\n self.children = [None] * actionCount\n\n # Split creation into multiple threads if this is the master node.\n if self.level == 0 and USE_THREADS:\n threads = [None] * actionCount\n for idx in range(actionCount):\n threads[idx] = threading.Thread(target=create_child, args=(self, idx))\n threads[idx].start()\n for t in threads:\n t.join()\n else:\n for idx in range(actionCount):\n create_child(self, idx)\n # Stop making child branches if the most recent child branch already found lethal.\n if self.children[idx].get_max_win_strength() == WIN_VALUE:\n self.children = self.children[:idx+1]\n break", "def create_child(self, **kw):\n m = self.model_class.create(**kw)\n self.add(m)\n return m", "def portal_template_folders_post(self, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.portal_template_folders_post_with_http_info(**kwargs)\n else:\n (data) = self.portal_template_folders_post_with_http_info(**kwargs)\n return data", "def manage_children(request, pk):\n\n parent = get_object_or_404(models.Parent, id=pk)\n\n if request.method == 'POST':\n formset = forms.ChildrenFormset(request.POST, instance=parent)\n if formset.is_valid():\n formset.save()\n return redirect(reverse('nestedforms:manage_children', kwargs={\"pk\": parent.id}))\n else:\n formset = forms.ChildrenFormset(instance=parent)\n\n return render(request, 'manage_children.html', {\n 'parent': parent,\n 'children_formset': formset})", "def test_team_template_folders_id_parent_get(self):\n pass", "def test_team_template_folders_change_stream_post(self):\n pass", "def test_team_template_folders_id_templates_fk_delete(self):\n pass", "def test_team_template_folders_id_patch(self):\n pass", "def portal_template_folders_id_children_fk_put_with_http_info(self, id, fk, **kwargs):\n\n all_params = ['id', 'fk', 'data']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method portal_template_folders_id_children_fk_put\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'id' is set\n if ('id' not in params) or (params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `portal_template_folders_id_children_fk_put`\")\n # verify the required parameter 'fk' is set\n if ('fk' not in params) or (params['fk'] is None):\n raise ValueError(\"Missing the required parameter `fk` when calling `portal_template_folders_id_children_fk_put`\")\n\n\n collection_formats = {}\n\n resource_path = '/PortalTemplateFolders/{id}/children/{fk}'.replace('{format}', 'json')\n path_params = {}\n if 'id' in params:\n path_params['id'] = params['id']\n if 'fk' in params:\n path_params['fk'] = params['fk']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'data' in params:\n body_params = params['data']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])\n\n # Authentication setting\n auth_settings = ['access_token']\n\n return self.api_client.call_api(resource_path, 'PUT',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='PortalTemplateFolder',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n collection_formats=collection_formats)", "def test_create_parent(self):\n response = self.client.post(self.url, data=json.dumps(self.payload), content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())\n self.assertEqual(Parent.objects.count(), self.qty + 1)\n self.assertTrue(User.objects.filter(email=self.payload['email']).exists())\n self.assertTrue(User.objects.filter(username=self.payload['email']).exists())\n user_id = User.objects.get(username=self.payload['email']).id\n self.assertTrue(Parent.objects.filter(user_id=user_id).exists())", "def _create(self, name: str, parent_id: str) -> CreateFolderResponseModel:\n endpoint: ApiEndpoint = self.api_endpoint_group.create\n request_obj: CreateFolderRequestModel = endpoint.load_request(\n name=name, parent_id=parent_id\n )\n response: CreateFolderResponseModel = endpoint.perform_request(\n http=self.auth.http, request_obj=request_obj\n )\n return response", "def create_hierarchy(self):\n\t\tpass", "def test_workflows_id_templates_post(self):\n pass", "def test_team_template_folders_id_templates_fk_get(self):\n pass", "def create_news_post_dir(instance, prepend=settings.MEDIA_ROOT):\n for _ in (post_media_dir,):\n _path = path.split(_(instance, \"\", prepend=prepend))[0]\n try:\n mkdir(_path)\n except FileExistsError:\n pass\n except FileNotFoundError:\n if instance.ministry:\n _ministry = instance.ministry\n elif instance.campaign:\n _campaign = instance.campaign\n _ministry = _campaign.ministry\n else:\n e = 'There was an unknown error finding a dir for %s' % instance.name\n raise AttributeError(e)\n\n # NOTE: this is infinitely recursive if `prepend` does not lead to correct directory\n create_news_post_dir(instance, prepend=prepend)" ]
[ "0.7611713", "0.6998816", "0.68596876", "0.68441486", "0.6784264", "0.67721206", "0.6636559", "0.622771", "0.6163549", "0.6042707", "0.6032051", "0.5965675", "0.5883691", "0.5852412", "0.5784356", "0.5633116", "0.56085575", "0.5578633", "0.5532604", "0.55291253", "0.54448336", "0.5441727", "0.54323524", "0.5356782", "0.534848", "0.53468716", "0.5334006", "0.5319567", "0.5286813", "0.52641207" ]
0.84299403
0
Test case for team_template_folders_id_delete Delete a model instance by {{id}} from the data source.
def test_team_template_folders_id_delete(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_team_template_folders_id_templates_fk_delete(self):\n pass", "def test_team_template_folders_id_children_fk_delete(self):\n pass", "def test_workflows_id_templates_fk_delete(self):\n pass", "def delete(self, _id):", "def test_delete_activity_template(self):\n pass", "def test_delete_team(self):\n pass", "def test_delete_namespaced_template(self):\n pass", "def _delete(self, id: str) -> DeleteFolderResponseModel:\n endpoint: ApiEndpoint = self.api_endpoint_group.delete\n response: DeleteFolderResponseModel = endpoint.perform_request(http=self.auth.http, id=id)\n return response", "def test_delete_run(self):\n pass", "def test_delete_device_template(self):\n pass", "def test_workflows_id_delete(self):\n pass", "def test_xml_template_delete(self):\n XmlTemplate.delete_by_id(1)\n self.assertEqual(XmlTemplate.objects.count(), 1)\n self.assertFalse(XmlTemplate.objects.filter(pk=1).exists())", "def test_variablepresentations_id_delete(self):\n pass", "def test_datatransformationsetups_id_delete(self):\n pass", "def test_coupledmodels_id_delete(self):\n pass", "def delete(self, cls, id):\n pass", "def test_teams_delete_team_v1(self):\n pass", "def test_delete(self):\n pass", "def test_delete_case(self):\n pass", "def test_variables_id_delete(self):\n pass", "def delete(self, id):\n raise NotImplementedError", "def delete(self):\n if not hasattr(self, 'id'):\n raise BadReference('No matching issue on disk')\n shutil.rmtree(self.paths['root'])", "def test_delete_findings(upload, test_id):\n check_delete()\n upload.test_delete_findings(test_id)", "def delete_object(self, id):\n self.request(id, post_args={\"method\": \"delete\"})", "def test_handle_delete(self):\n team = Team(\"BRS\", \"brs\", \"web\")\n team.github_team_id = \"12345\"\n test_user = User(\"userid\")\n test_user.github_id = \"1234\"\n team.add_team_lead(\"1234\")\n self.db.retrieve.return_value = test_user\n self.db.query.return_value = [team]\n self.assertTupleEqual(self.testcommand.handle(\"team delete brs\", user),\n (f\"Team brs deleted\", 200))\n self.db.delete.assert_called_once_with(Team, \"12345\")\n self.gh.org_delete_team.assert_called_once_with(int(\"12345\"))", "def delete(self, template_id: str):\n url = API_PATH[\"flairtemplatedelete\"].format(subreddit=self.subreddit)\n self.subreddit._reddit.post(url, data={\"flair_template_id\": template_id})", "def test_delete_device_by_id(self):\n pass", "def test_delete_team_member(self):\n pass", "def delete_model(self, request, instance):\n pass", "def DELETE(self):\n ids = self.context.objectIds()\n self.context.manage_delObjects(ids)\n self.context.createTemplate()\n return self.request.response.setStatus(200)" ]
[ "0.8241988", "0.7741044", "0.709706", "0.69458854", "0.6785604", "0.6664069", "0.6638059", "0.66359246", "0.6572012", "0.6536794", "0.6530212", "0.6513618", "0.6498237", "0.6491295", "0.649003", "0.6482968", "0.6470163", "0.64554095", "0.6455347", "0.6439067", "0.6428669", "0.6387809", "0.6377268", "0.6370539", "0.6351905", "0.6342345", "0.6331193", "0.6313651", "0.6300566", "0.626585" ]
0.8541751
0
Test case for team_template_folders_id_exists_get Check whether a model instance exists in the data source.
def test_team_template_folders_id_exists_get(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_team_template_folders_find_one_get(self):\n pass", "def test_team_template_folders_id_get(self):\n pass", "def test_team_template_folders_id_templates_fk_get(self):\n pass", "def test_team_template_folders_id_team_get(self):\n pass", "def test_exists_true(self):\n self.assertTrue(SampleTemplate.exists(self.test_study.id))", "def test_team_template_folders_id_templates_get(self):\n pass", "def test_exists_true(self):\n self.assertTrue(Sample.exists(self.sample_id, self.sample_template))", "def test_workflows_id_exists_get(self):\n pass", "def test_data_source_soaps_id_exists_get(self):\n pass", "def test_team_template_folders_get(self):\n pass", "def test_exists_false(self):\n self.assertFalse(SampleTemplate.exists(self.new_study.id))", "def test_exists_true(self):\n self.assertTrue(PrepTemplate.exists(1))", "def exist(self):", "def exists(self):\n try:\n select_template(self.get_paths())\n return True\n except TemplateDoesNotExist:\n return False", "def test_exists_true(self):\n self.assertTrue(PrepSample.exists(self.sample_id, self.prep_template))", "def test_team_template_folders_id_templates_count_get(self):\n pass", "def test_exists_false(self):\n self.assertFalse(Sample.exists('Not_a_Sample', self.sample_template))", "def test_team_template_folders_id_parent_get(self):\n pass", "def test_team_template_folders_id_head(self):\n pass", "def test_team_template_folders_id_put(self):\n pass", "def exists(self):\n return True", "def exists(self):\n return True", "def test_team_template_folders_id_children_fk_get(self):\n pass", "def exists(self):\n return Path(self.model_dir).exists()", "def test_exists(self):\n with self.assertRaises(IncompetentQiitaDeveloperError):\n BaseSample.exists('SKM7.640188', SampleTemplate(1))", "def exists(self, obj):\n return False", "def check_get_query_result_if_exists(self, model, *args, **kwargs):\n # Allows dynamic get querysets\n queryset = _get_queryset(model)\n\n try:\n # Put the args and kwargs in the filter for filtering\n exists = queryset.get(*args, **kwargs)\n return True\n except queryset.model.DoesNotExist as e:\n # If queryset does not exist. Return False\n return False", "def check_model_exists(ckpt):\n expected_data = ckpt + \".data-00000-of-00001\"\n return os.path.exists(expected_data)", "def test_team_template_folders_id_patch(self):\n pass", "def exists(path):\n return get_instance(path).exists(path)" ]
[ "0.7228429", "0.7048265", "0.69274354", "0.6834718", "0.6802283", "0.6684083", "0.66372013", "0.65680605", "0.6373904", "0.63232", "0.6322095", "0.63205624", "0.6253189", "0.61801267", "0.61632395", "0.6132301", "0.6129887", "0.6092639", "0.6070359", "0.60247386", "0.6021102", "0.6021102", "0.6015461", "0.6008077", "0.59834915", "0.59748155", "0.5973477", "0.5955209", "0.5952993", "0.5885278" ]
0.8405338
0
Test case for team_template_folders_id_get Find a model instance by {{id}} from the data source.
def test_team_template_folders_id_get(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_team_template_folders_id_team_get(self):\n pass", "def test_team_template_folders_id_templates_fk_get(self):\n pass", "def test_team_template_folders_find_one_get(self):\n pass", "def test_team_template_folders_id_templates_get(self):\n pass", "def test_team_template_folders_id_exists_get(self):\n pass", "def test_team_template_folders_id_parent_get(self):\n pass", "def test_team_template_folders_id_children_fk_get(self):\n pass", "def test_team_template_folders_id_children_get(self):\n pass", "def test_team_template_folders_id_patch(self):\n pass", "def test_team_template_folders_get(self):\n pass", "def test_team_template_folders_id_head(self):\n pass", "def test_team_template_folders_id_put(self):\n pass", "def test_workflows_id_templates_get(self):\n pass", "def test_workflows_id_templates_fk_get(self):\n pass", "def test_team_template_folders_id_replace_post(self):\n pass", "def test_team_template_folders_id_templates_count_get(self):\n pass", "def test_team_template_folders_id_delete(self):\n pass", "def test_xml_template_get_by_id(self):\n xmlTemplateExpected = XmlTemplate.objects.get(id=1)\n self.assertEqual(XmlTemplate.get_by_id(1), xmlTemplateExpected)", "def test_team_template_folders_id_templates_post(self):\n pass", "def get_object(id):", "def test_team_template_folders_id_templates_fk_put(self):\n pass", "def _get_template_by_id(self, template_id):\n raise NotImplementedError()", "def get(self, _id):", "def test_team_template_folders_change_stream_get(self):\n pass", "def test_team_template_folders_id_templates_fk_delete(self):\n pass", "def test_team_template_folders_id_children_count_get(self):\n pass", "def test_workflows_id_team_get(self):\n pass", "def test_team_template_folders_count_get(self):\n pass", "def test_team_template_folders_id_children_post(self):\n pass", "def test_get_case_by_id(self):\n pass" ]
[ "0.7867408", "0.77084565", "0.7705121", "0.7619714", "0.7559673", "0.71020705", "0.69662166", "0.69017416", "0.67775816", "0.67593014", "0.668215", "0.6330275", "0.6270727", "0.62321335", "0.6192319", "0.61470085", "0.6133207", "0.5968212", "0.59066397", "0.59013623", "0.58498764", "0.5754588", "0.56507635", "0.56483763", "0.56009054", "0.55979455", "0.55895895", "0.5585329", "0.55603594", "0.5500261" ]
0.80761766
0
Test case for team_template_folders_id_head Check whether a model instance exists in the data source.
def test_team_template_folders_id_head(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_team_template_folders_id_exists_get(self):\n pass", "def test_team_template_folders_find_one_get(self):\n pass", "def test_team_template_folders_id_get(self):\n pass", "def test_team_template_folders_id_templates_fk_get(self):\n pass", "def test_team_template_folders_id_team_get(self):\n pass", "def test_team_template_folders_id_templates_get(self):\n pass", "def test_team_template_folders_id_patch(self):\n pass", "def test_team_template_folders_get(self):\n pass", "def test_team_template_folders_id_templates_count_get(self):\n pass", "def test_team_template_folders_id_parent_get(self):\n pass", "def test_team_template_folders_id_put(self):\n pass", "def test_team_template_folders_id_delete(self):\n pass", "def test_team_template_folders_id_templates_fk_put(self):\n pass", "def test_team_template_folders_count_get(self):\n pass", "def test_exists_true(self):\n self.assertTrue(SampleTemplate.exists(self.test_study.id))", "def test_team_template_folders_id_templates_fk_delete(self):\n pass", "def test_team_template_folders_id_children_fk_get(self):\n pass", "def test_team_template_folders_id_templates_post(self):\n pass", "def test_team_template_folders_id_children_get(self):\n pass", "def test_exists_true(self):\n self.assertTrue(PrepTemplate.exists(1))", "def test_team_template_folders_post(self):\n pass", "def test_team_template_folders_id_replace_post(self):\n pass", "def test_exists_true(self):\n self.assertTrue(Sample.exists(self.sample_id, self.sample_template))", "def exists(self):\n try:\n select_template(self.get_paths())\n return True\n except TemplateDoesNotExist:\n return False", "def test_team_template_folders_id_children_fk_put(self):\n pass", "def test_exists_false(self):\n self.assertFalse(SampleTemplate.exists(self.new_study.id))", "def test_team_template_folders_id_children_post(self):\n pass", "def test_workflows_id_exists_get(self):\n pass", "def test_team_template_folders_id_children_fk_delete(self):\n pass", "def test_team_template_folders_id_children_count_get(self):\n pass" ]
[ "0.80098855", "0.7260974", "0.7205708", "0.70972705", "0.69902855", "0.6882515", "0.68041307", "0.66723204", "0.647952", "0.64639044", "0.6451148", "0.6378563", "0.63705957", "0.6277258", "0.620578", "0.6189444", "0.6182183", "0.6142092", "0.60492706", "0.5957383", "0.5850607", "0.58407235", "0.58217025", "0.5798582", "0.57684606", "0.57337934", "0.5684423", "0.56645435", "0.56622195", "0.5658644" ]
0.76032066
1
Test case for team_template_folders_id_parent_get Fetches belongsTo relation parent.
def test_team_template_folders_id_parent_get(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_team_template_folders_id_children_fk_get(self):\n pass", "def test_team_template_folders_id_templates_fk_get(self):\n pass", "def get_parent(self):\n if not self._parent:\n self._parent = yield self.parent_resource.get(self.parent_id)\n\n raise Return(self._parent)", "def get_parent(self):\n parent_id = self.client._perform_json(\"GET\", \"/project-folders/%s\" % self.project_folder_id).get(\"parentId\", None)\n if parent_id is None:\n return None\n else:\n return DSSProjectFolder(self.client, parent_id)", "def test_team_template_folders_id_children_get(self):\n pass", "def get_parent(self) :\n return self.parent", "def find_parent(self):\n pass", "def get_parent(self, **kwargs):\n from canvasapi.course import Course\n from canvasapi.group import Group\n\n response = self._requester.request(\n \"GET\",\n \"{}s/{}\".format(self.parent_type, self.parent_id),\n _kwargs=combine_kwargs(**kwargs),\n )\n\n if self.parent_type == \"group\":\n return Group(self._requester, response.json())\n elif self.parent_type == \"course\":\n return Course(self._requester, response.json())", "def get_parent(self, **kwargs):\n from canvasapi.course import Course\n from canvasapi.group import Group\n\n response = self._requester.request(\n \"GET\",\n \"{}s/{}\".format(self.parent_type, self.parent_id),\n _kwargs=combine_kwargs(**kwargs),\n )\n\n if self.parent_type == \"group\":\n return Group(self._requester, response.json())\n elif self.parent_type == \"course\":\n return Course(self._requester, response.json())", "def test_team_template_folders_id_get(self):\n pass", "def parent(self, parent_object, limit_parent_language=True):\n return self.all().parent(parent_object, limit_parent_language)", "def parent(self):\n address = self.parent_address\n try:\n parent = Page.objects.get(address=address)\n except Page.DoesNotExist:\n parent = None\n\n return parent", "def get_parent_id(self):\n return self._parent_id", "def _get_parent_record(self) -> Link:\n rel = \"parent\"\n href = self.api_endpoint\n return Link(href=href, rel=rel)", "def portal_template_folders_id_parent_get_with_http_info(self, id, **kwargs):\n\n all_params = ['id', 'refresh']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method portal_template_folders_id_parent_get\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'id' is set\n if ('id' not in params) or (params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `portal_template_folders_id_parent_get`\")\n\n\n collection_formats = {}\n\n resource_path = '/PortalTemplateFolders/{id}/parent'.replace('{format}', 'json')\n path_params = {}\n if 'id' in params:\n path_params['id'] = params['id']\n\n query_params = {}\n if 'refresh' in params:\n query_params['refresh'] = params['refresh']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])\n\n # Authentication setting\n auth_settings = ['access_token']\n\n return self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='PortalTemplateFolder',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n collection_formats=collection_formats)", "def get_parent_object_from_request(self, request):\n resolved = resolve(request.path_info)\n if 'object_id' in resolved.kwargs:\n return self.parent_model.objects.get(pk=resolved.kwargs['object_id'])\n return None", "def get_parent_object_from_request(self, request):\n resolved = resolve(request.path_info)\n if 'object_id' in resolved.kwargs:\n return self.parent_model.objects.get(pk=resolved.kwargs['object_id'])\n return None", "def get_parent_object_from_request(self, request):\n resolved = resolve(request.path_info)\n if 'object_id' in resolved.kwargs:\n return self.parent_model.objects.get(pk=resolved.kwargs['object_id'])\n return None", "def find_parent_of(self, *args):\n return _ida_hexrays.citem_t_find_parent_of(self, *args)", "def get_parent(self, parent_infos, root_object):\n if parent_infos and len(parent_infos) == 2:\n name = parent_infos[0]\n uuid = parent_infos[1]\n\n singular_name = Utils.get_singular_name(name)\n parent = self.get_instance(singular_name)\n parent.id = uuid\n\n try:\n (parent, connection) = parent.fetch()\n except Exception, ex:\n Printer.raise_error(\"Failed fetching parent %s with uuid %s\\n%s\" % (name, uuid, ex))\n\n return parent\n\n return root_object", "def get_parent(self, id_) -> str:\n return list(self._nodes[id_]['parents'].keys())[0]", "def portal_template_folders_id_parent_get(self, id, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.portal_template_folders_id_parent_get_with_http_info(id, **kwargs)\n else:\n (data) = self.portal_template_folders_id_parent_get_with_http_info(id, **kwargs)\n return data", "def test_get_parent_role(self):\n root = role_middleware.get_root()\n children = role_middleware.get_all_children(root.id)\n role = children[-1]\n parent = role_middleware.get_parent(role.id)\n print(parent.name, parent.id)", "def test_get_parents_project(self):\n self.assertEqual(list(self.project.get_parents()), [self.category])", "def get_parent(self):\n return self.parent", "def get_parent(self):\n return self.parent", "def get_parent(self):\n return self.parent", "def get_parent(self, **fields):\n existing_fields = [i.name for i in self._db.get_columns('parents')] # Gets all columns of the table\n parent_fields = {}\n for key, value in fields.items(): # Filters incorrect args\n if key in existing_fields:\n parent_fields[key] = value\n additional_fields = ['student_UID', 'student_key'] # Additional fields that could be passed in args\n student_fields = {}\n for key, value in fields.items(): # Filters student fields from args\n if key in additional_fields:\n if key == 'student_UID':\n student_fields['UID'] = value\n else:\n student_fields[key] = value\n student = None if len(student_fields) == 0 else Students.get_or_none(**student_fields) # Gets a student\n if student is not None:\n query = Parents.select().where(student.parent_id == Parents.id).filter(**parent_fields)\n parents = [i for i in query] # Selects a parent of a student and checks requirements\n else:\n parents = [i for i in Parents.select().filter(**parent_fields)] # Selects a parent of a student and checks requirements\n # Expect single value if search by unique fields, list if search by non-unique fields\n return parents if len(parents) > 1 else parents[0] if len(parents) == 1 else None", "def _get_parent_key(self):\n parent_kind = metadata_api.GetParentKind(self)\n if parent_kind:\n parent_key_field = metadata_api.GetParentKeyField(self)\n parent_key_name = getattr(self, parent_key_field, None)\n if parent_key_name:\n return ndb.Key(parent_kind, parent_key_name)\n return None", "def get_parent(self):\n if self.parent:\n return self.parent()\n else:\n return None" ]
[ "0.6964574", "0.6613799", "0.6521798", "0.64490104", "0.6403035", "0.6270911", "0.62071526", "0.6174531", "0.6174531", "0.61695886", "0.6150622", "0.6112547", "0.6101429", "0.6071851", "0.6070446", "0.6058362", "0.6058362", "0.6058362", "0.6050428", "0.6038977", "0.6032884", "0.6026623", "0.6025255", "0.6018558", "0.60072213", "0.60072213", "0.60072213", "0.59780926", "0.59386414", "0.59209484" ]
0.8288951
0
Test case for team_template_folders_id_put Replace attributes for a model instance and persist it into the data source.
def test_team_template_folders_id_put(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_team_template_folders_id_templates_fk_put(self):\n pass", "def test_team_template_folders_id_children_fk_put(self):\n pass", "def test_team_template_folders_id_replace_post(self):\n pass", "def test_team_template_folders_id_patch(self):\n pass", "def test_team_template_folders_id_templates_post(self):\n pass", "def test_workflows_id_templates_fk_put(self):\n pass", "def test_team_template_folders_id_get(self):\n pass", "def test_team_template_folders_id_team_get(self):\n pass", "def test_team_template_folders_post(self):\n pass", "def test_team_template_folders_id_delete(self):\n pass", "def test_team_template_folders_id_children_post(self):\n pass", "def test_team_template_folders_id_templates_fk_get(self):\n pass", "def test_workflows_id_put(self):\n pass", "def test_team_template_folders_id_templates_get(self):\n pass", "def test_upsert_global_template_as_staff_saves(self):\n mock_request = create_mock_request(user=self.staff_user1)\n template_api.upsert(self.fixture.global_template, request=mock_request)", "def test_upsert_own_template_as_staff_saves(self):\n mock_request = create_mock_request(user=self.staff_user1)\n template_api.upsert(self.fixture.user1_template, request=mock_request)", "def test_team_template_folders_id_templates_fk_delete(self):\n pass", "def test_team_template_folders_id_head(self):\n pass", "def init_entity_test(ktrack_instance, tmpdir, entity, folders, files):\n # we need to mock project root to something\n entity_type, entity_id = entity[\"type\"], entity[\"id\"]\n\n # change project root\n mock_routes = copy.deepcopy(\n template_manager._data_routes\n ) # we take default routes and adjust what we need\n mock_routes[\"project_root\"] = tmpdir.dirname\n\n # store old route length, so we can check if correct number of paths was created\n old_registered_paths = ktrack_instance.find(\"path_entry\", [])\n old_len = len(old_registered_paths)\n\n with patch.object(template_manager, \"_data_routes\", mock_routes) as mock_yml_data:\n folder_manager.init_entity(entity_type, entity_id)\n\n # now verify that folders are created\n for folder in folders:\n folder = folder.format(project_root=tmpdir.dirname)\n assert os.path.exists(folder)\n\n for file_path in files:\n f = file_path.format(project_root=tmpdir.dirname)\n assert os.path.exists(f)\n\n # verify that folders are registered in database\n registered_paths = ktrack_instance.find(\"path_entry\", [])\n new_len = len(registered_paths)\n assert new_len == old_len + len(folders) + len(files)", "def test_team_template_folders_change_stream_post(self):\n pass", "def test_team_template_folders_id_exists_get(self):\n pass", "def test_datatransformationsetups_id_put(self):\n pass", "def test_team_template_folders_get(self):\n pass", "def test_update_team(self):\n pass", "def test_team_template_folders_id_children_fk_delete(self):\n pass", "def test_put(self):\n\n url = reverse('file')\n\n data = {\n 'shard_id': self.shard1.id,\n 'link_id': \"b8866161-0b1f-4a8e-acde-07047313ec8f\",\n 'parent_datastore_id': str(self.test_datastore_obj.id),\n 'chunk_count': 1,\n 'size': 512,\n }\n\n self.client.force_authenticate(user=self.test_user_obj)\n response = self.client.put(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertIn('file_id', response.data)\n self.assertIn('file_transfer_id', response.data)\n self.assertIn('file_transfer_secret_key', response.data)", "def test_db_creating_put(self):\n data = {\n 'start_time': '2019-10-29',\n 'end_time': '2019-12-29',\n 'week_day': 6,\n 'time': '23:58:59'\n }\n url = reverse('notification',\n kwargs={'way_id': self.notification.way_id, 'notification_id': self.notification.id})\n\n with mock.patch('notification.models.Notification.update') as notification_update:\n notification_update.return_value = False\n\n response = self.client.put(url, json.dumps(data, cls=DjangoJSONEncoder), content_type='application/json')\n self.assertEqual(response.status_code, 400)", "def test_xml_template_set(self):\n xmlTemplateResult = XmlTemplate.objects.get(id=1)\n xmlTemplateResult.set(\"newTemplate\", '''<?xml >\n <project name=\"newTemplate\">\n </project> ''',)\n self.assertEqual(xmlTemplateResult.template_name, \"newTemplate\")\n self.assertEqual(xmlTemplateResult.template_content, '''<?xml >\n <project name=\"newTemplate\">\n </project> ''')", "def test_createteam(self):\n p1, p2, p3 = self.create3persons()\n t = model.Team(name='Tigers', persons=[p1, p2, p3])\n id = t.store()\n t2 = model.Team(id=id)\n self.assertEqual(t.name, t2.name)\n self.assertEqual(t.persons, t2.persons)", "def test_coupledmodels_id_put(self):\n pass" ]
[ "0.7504428", "0.7104182", "0.70466805", "0.65424544", "0.63857955", "0.6227054", "0.60847133", "0.6040423", "0.6029789", "0.59896743", "0.5904628", "0.57987356", "0.57302064", "0.5700448", "0.5645764", "0.5642856", "0.5634773", "0.5634771", "0.5626497", "0.5549034", "0.55149114", "0.5475933", "0.54689187", "0.54667276", "0.5453471", "0.5434628", "0.5428373", "0.5407456", "0.5400261", "0.5369141" ]
0.7899408
0
Test case for team_template_folders_id_replace_post Replace attributes for a model instance and persist it into the data source.
def test_team_template_folders_id_replace_post(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_team_template_folders_id_put(self):\n pass", "def test_team_template_folders_id_templates_fk_put(self):\n pass", "def test_team_template_folders_id_children_fk_put(self):\n pass", "def test_team_template_folders_id_templates_post(self):\n pass", "def test_team_template_folders_post(self):\n pass", "def test_team_template_folders_id_patch(self):\n pass", "def test_team_template_folders_id_children_post(self):\n pass", "def test_workflows_id_templates_fk_put(self):\n pass", "def test_workflows_id_replace_post(self):\n pass", "def test_team_template_folders_id_delete(self):\n pass", "def test_update_activity_template(self):\n pass", "def test_upsert_own_template_as_staff_saves(self):\n mock_request = create_mock_request(user=self.staff_user1)\n template_api.upsert(self.fixture.user1_template, request=mock_request)", "def test_workflows_id_templates_post(self):\n pass", "def test_xml_template_set(self):\n xmlTemplateResult = XmlTemplate.objects.get(id=1)\n xmlTemplateResult.set(\"newTemplate\", '''<?xml >\n <project name=\"newTemplate\">\n </project> ''',)\n self.assertEqual(xmlTemplateResult.template_name, \"newTemplate\")\n self.assertEqual(xmlTemplateResult.template_content, '''<?xml >\n <project name=\"newTemplate\">\n </project> ''')", "def test_team_template_folders_id_templates_fk_delete(self):\n pass", "def test_upsert_global_template_as_staff_saves(self):\n mock_request = create_mock_request(user=self.staff_user1)\n template_api.upsert(self.fixture.global_template, request=mock_request)", "def test_team_template_folders_id_children_fk_delete(self):\n pass", "def test_team_template_folders_change_stream_post(self):\n pass", "def test_data_source_soaps_id_replace_post(self):\n pass", "def test_update_attribute_data(self):\n pass", "def test_team_template_folders_id_templates_fk_get(self):\n pass", "def test_team_template_folders_id_get(self):\n pass", "def init_entity_test(ktrack_instance, tmpdir, entity, folders, files):\n # we need to mock project root to something\n entity_type, entity_id = entity[\"type\"], entity[\"id\"]\n\n # change project root\n mock_routes = copy.deepcopy(\n template_manager._data_routes\n ) # we take default routes and adjust what we need\n mock_routes[\"project_root\"] = tmpdir.dirname\n\n # store old route length, so we can check if correct number of paths was created\n old_registered_paths = ktrack_instance.find(\"path_entry\", [])\n old_len = len(old_registered_paths)\n\n with patch.object(template_manager, \"_data_routes\", mock_routes) as mock_yml_data:\n folder_manager.init_entity(entity_type, entity_id)\n\n # now verify that folders are created\n for folder in folders:\n folder = folder.format(project_root=tmpdir.dirname)\n assert os.path.exists(folder)\n\n for file_path in files:\n f = file_path.format(project_root=tmpdir.dirname)\n assert os.path.exists(f)\n\n # verify that folders are registered in database\n registered_paths = ktrack_instance.find(\"path_entry\", [])\n new_len = len(registered_paths)\n assert new_len == old_len + len(folders) + len(files)", "def test_update_template_registration(self):\n pass", "def test_prep_new_data(self):\n pass", "def _persist(self):\n trunk.set(self.uuid, self.json)", "def test_distillery_saved(self, mock_template):\n distillery = Distillery.objects.get_by_natural_key(\n 'elasticsearch.test_index.test_docs')\n distillery.save()\n self.assertEqual(mock_template.call_count, 1)", "def test_team_template_folders_id_team_get(self):\n pass", "def test_update_team(self):\n pass", "def save(self, *args, **kwargs):\n if self.pk is None:\n if not self.name.startswith(TEMPLATE_PREFIX):\n self.name = f'{TEMPLATE_PREFIX}{self.name}'\n super(Template, self).save(*args, **kwargs)" ]
[ "0.66780967", "0.6648374", "0.63940597", "0.622652", "0.61064726", "0.60301924", "0.5866185", "0.5818852", "0.57047206", "0.54621875", "0.5419091", "0.53817344", "0.537763", "0.5370623", "0.53631043", "0.53486377", "0.5335189", "0.531689", "0.53012997", "0.52785236", "0.52387077", "0.52098197", "0.52067494", "0.5202714", "0.51723844", "0.51398194", "0.51244575", "0.51152456", "0.51071835", "0.50704134" ]
0.74417293
0
Test case for team_template_folders_id_team_get Fetches belongsTo relation team.
def test_team_template_folders_id_team_get(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_team_template_folders_id_templates_fk_get(self):\n pass", "def test_team_template_folders_id_get(self):\n pass", "def test_team_template_folders_id_templates_get(self):\n pass", "def test_team_template_folders_find_one_get(self):\n pass", "def test_team_template_folders_id_children_fk_get(self):\n pass", "def test_team_template_folders_id_exists_get(self):\n pass", "def test_team_template_folders_get(self):\n pass", "def test_team_template_folders_id_parent_get(self):\n pass", "def test_team_template_folders_id_children_get(self):\n pass", "def team_members_id_team_template_folders_fk_get_with_http_info(self, id, fk, **kwargs):\n\n all_params = ['id', 'fk']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method team_members_id_team_template_folders_fk_get\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'id' is set\n if ('id' not in params) or (params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `team_members_id_team_template_folders_fk_get`\")\n # verify the required parameter 'fk' is set\n if ('fk' not in params) or (params['fk'] is None):\n raise ValueError(\"Missing the required parameter `fk` when calling `team_members_id_team_template_folders_fk_get`\")\n\n\n collection_formats = {}\n\n resource_path = '/TeamMembers/{id}/team/templateFolders/{fk}'.replace('{format}', 'json')\n path_params = {}\n if 'id' in params:\n path_params['id'] = params['id']\n if 'fk' in params:\n path_params['fk'] = params['fk']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])\n\n # Authentication setting\n auth_settings = ['access_token']\n\n return self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='TeamTemplateFolder',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n collection_formats=collection_formats)", "def test_team_template_folders_id_patch(self):\n pass", "def test_workflows_id_team_get(self):\n pass", "def test_workflows_id_templates_fk_get(self):\n pass", "def team_members_id_team_template_folders_fk_get(self, id, fk, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.team_members_id_team_template_folders_fk_get_with_http_info(id, fk, **kwargs)\n else:\n (data) = self.team_members_id_team_template_folders_fk_get_with_http_info(id, fk, **kwargs)\n return data", "def test_team_template_folders_id_head(self):\n pass", "def test_team_template_folders_id_templates_fk_put(self):\n pass", "def test_data_source_soaps_id_team_get(self):\n pass", "def test_team_template_folders_id_templates_count_get(self):\n pass", "def get_team(self, team_id):\n try:\n return CourseTeam.objects.get(team_id=team_id)\n except CourseTeam.DoesNotExist:\n raise Http404 # lint-amnesty, pylint: disable=raise-missing-from", "def test_team_template_folders_id_put(self):\n pass", "def test_retrieve_team(self):\n pass", "def test_team_template_folders_change_stream_get(self):\n pass", "def test_team_template_folders_id_delete(self):\n pass", "def test_basketballteams_id_get(self):\n pass", "def test_team_template_folders_id_templates_fk_delete(self):\n pass", "def test_team_template_folders_id_children_fk_put(self):\n pass", "def find_by_id(self, team, params={}, **options):\n path = \"/teams/%s\" % (team)\n return self.client.get(path, params, **options)", "def test_workflows_id_templates_get(self):\n pass", "def test_team_template_folders_id_templates_post(self):\n pass", "def test_team_template_folders_count_get(self):\n pass" ]
[ "0.78049576", "0.74248075", "0.7267805", "0.71177256", "0.7034635", "0.68907964", "0.68329453", "0.6746703", "0.6577737", "0.60531944", "0.6046749", "0.6015794", "0.5990343", "0.598311", "0.59649336", "0.58300275", "0.57651395", "0.57589376", "0.5682502", "0.5672387", "0.56507635", "0.56144756", "0.5575942", "0.5563592", "0.5541807", "0.54972786", "0.5489234", "0.5459677", "0.5417835", "0.5412422" ]
0.78695655
0
Test case for team_template_folders_id_templates_count_get Counts templates of TeamTemplateFolder.
def test_team_template_folders_id_templates_count_get(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_team_template_folders_count_get(self):\n pass", "def test_workflows_id_templates_count_get(self):\n pass", "def test_team_template_folders_id_children_count_get(self):\n pass", "def team_members_id_team_template_folders_count_get(self, id, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.team_members_id_team_template_folders_count_get_with_http_info(id, **kwargs)\n else:\n (data) = self.team_members_id_team_template_folders_count_get_with_http_info(id, **kwargs)\n return data", "def portal_template_folders_count_get(self, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.portal_template_folders_count_get_with_http_info(**kwargs)\n else:\n (data) = self.portal_template_folders_count_get_with_http_info(**kwargs)\n return data", "def portal_template_folders_id_templates_count_get(self, id, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.portal_template_folders_id_templates_count_get_with_http_info(id, **kwargs)\n else:\n (data) = self.portal_template_folders_id_templates_count_get_with_http_info(id, **kwargs)\n return data", "def team_members_id_team_template_folders_count_get_with_http_info(self, id, **kwargs):\n\n all_params = ['id', 'where']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method team_members_id_team_template_folders_count_get\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'id' is set\n if ('id' not in params) or (params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `team_members_id_team_template_folders_count_get`\")\n\n\n collection_formats = {}\n\n resource_path = '/TeamMembers/{id}/team/templateFolders/count'.replace('{format}', 'json')\n path_params = {}\n if 'id' in params:\n path_params['id'] = params['id']\n\n query_params = {}\n if 'where' in params:\n query_params['where'] = params['where']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])\n\n # Authentication setting\n auth_settings = ['access_token']\n\n return self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='InlineResponse2001',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n collection_formats=collection_formats)", "def portal_template_folders_id_templates_count_get_with_http_info(self, id, **kwargs):\n\n all_params = ['id', 'where']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method portal_template_folders_id_templates_count_get\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'id' is set\n if ('id' not in params) or (params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `portal_template_folders_id_templates_count_get`\")\n\n\n collection_formats = {}\n\n resource_path = '/PortalTemplateFolders/{id}/templates/count'.replace('{format}', 'json')\n path_params = {}\n if 'id' in params:\n path_params['id'] = params['id']\n\n query_params = {}\n if 'where' in params:\n query_params['where'] = params['where']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])\n\n # Authentication setting\n auth_settings = ['access_token']\n\n return self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='InlineResponse2001',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n collection_formats=collection_formats)", "def portal_template_folders_count_get_with_http_info(self, **kwargs):\n\n all_params = ['where']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method portal_template_folders_count_get\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n collection_formats = {}\n\n resource_path = '/PortalTemplateFolders/count'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'where' in params:\n query_params['where'] = params['where']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])\n\n # Authentication setting\n auth_settings = ['access_token']\n\n return self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='InlineResponse2001',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n collection_formats=collection_formats)", "def test_team_template_folders_id_templates_get(self):\n pass", "def test_team_template_folders_id_get(self):\n pass", "def team_members_id_templates_count_get(self, id, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.team_members_id_templates_count_get_with_http_info(id, **kwargs)\n else:\n (data) = self.team_members_id_templates_count_get_with_http_info(id, **kwargs)\n return data", "def team_members_id_templates_count_get_with_http_info(self, id, **kwargs):\n\n all_params = ['id', 'where']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method team_members_id_templates_count_get\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'id' is set\n if ('id' not in params) or (params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `team_members_id_templates_count_get`\")\n\n\n collection_formats = {}\n\n resource_path = '/TeamMembers/{id}/templates/count'.replace('{format}', 'json')\n path_params = {}\n if 'id' in params:\n path_params['id'] = params['id']\n\n query_params = {}\n if 'where' in params:\n query_params['where'] = params['where']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])\n\n # Authentication setting\n auth_settings = ['access_token']\n\n return self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='InlineResponse2001',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n collection_formats=collection_formats)", "def test_team_template_folders_id_templates_post(self):\n pass", "def team_members_id_uploaded_templates_count_get(self, id, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.team_members_id_uploaded_templates_count_get_with_http_info(id, **kwargs)\n else:\n (data) = self.team_members_id_uploaded_templates_count_get_with_http_info(id, **kwargs)\n return data", "def test_team_template_folders_id_exists_get(self):\n pass", "def team_members_id_team_templates_count_get_with_http_info(self, id, **kwargs):\n\n all_params = ['id', 'where']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method team_members_id_team_templates_count_get\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'id' is set\n if ('id' not in params) or (params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `team_members_id_team_templates_count_get`\")\n\n\n collection_formats = {}\n\n resource_path = '/TeamMembers/{id}/team/templates/count'.replace('{format}', 'json')\n path_params = {}\n if 'id' in params:\n path_params['id'] = params['id']\n\n query_params = {}\n if 'where' in params:\n query_params['where'] = params['where']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])\n\n # Authentication setting\n auth_settings = ['access_token']\n\n return self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='InlineResponse2001',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n collection_formats=collection_formats)", "def team_members_id_uploaded_templates_count_get_with_http_info(self, id, **kwargs):\n\n all_params = ['id', 'where']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method team_members_id_uploaded_templates_count_get\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'id' is set\n if ('id' not in params) or (params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `team_members_id_uploaded_templates_count_get`\")\n\n\n collection_formats = {}\n\n resource_path = '/TeamMembers/{id}/uploadedTemplates/count'.replace('{format}', 'json')\n path_params = {}\n if 'id' in params:\n path_params['id'] = params['id']\n\n query_params = {}\n if 'where' in params:\n query_params['where'] = params['where']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])\n\n # Authentication setting\n auth_settings = ['access_token']\n\n return self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='InlineResponse2001',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n collection_formats=collection_formats)", "def portal_templates_count_get(self, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.portal_templates_count_get_with_http_info(**kwargs)\n else:\n (data) = self.portal_templates_count_get_with_http_info(**kwargs)\n return data", "def team_members_id_team_templates_count_get(self, id, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.team_members_id_team_templates_count_get_with_http_info(id, **kwargs)\n else:\n (data) = self.team_members_id_team_templates_count_get_with_http_info(id, **kwargs)\n return data", "def test_team_template_folders_get(self):\n pass", "def test_team_template_folders_id_team_get(self):\n pass", "def portal_template_folders_id_children_count_get(self, id, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.portal_template_folders_id_children_count_get_with_http_info(id, **kwargs)\n else:\n (data) = self.portal_template_folders_id_children_count_get_with_http_info(id, **kwargs)\n return data", "def portal_templates_count_get_with_http_info(self, **kwargs):\n\n all_params = ['where']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method portal_templates_count_get\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n collection_formats = {}\n\n resource_path = '/PortalTemplates/count'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'where' in params:\n query_params['where'] = params['where']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])\n\n # Authentication setting\n auth_settings = ['access_token']\n\n return self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='InlineResponse2001',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n collection_formats=collection_formats)", "def test_team_template_folders_id_head(self):\n pass", "def query_count(self,\n template: Union[list, str, pd.Series, pd.DataFrame, None] = None,\n title: Optional[str] = None,\n keyword: Union[str, list, None] = None,\n mongoquery: Union[str, dict, None] = None,\n ) -> pd.DataFrame:\n # Set data based on arguments\n data = {'all': 'true'} \n data = {}\n \n # Manage query field and rest_url\n if keyword is not None:\n rest_url = '/rest/data/query/keyword/'\n if mongoquery is not None:\n raise ValueError('keyword and mongoquery cannot both be given')\n data['query'] = keyword\n \n elif mongoquery is not None:\n rest_url = '/rest/data/query/'\n if not isinstance(mongoquery, str):\n data['query'] = json.dumps(mongoquery)\n else:\n data['query'] = mongoquery\n else:\n rest_url = '/rest/data/query/'\n data['query'] = '{}'\n \n # Manage template \n if template is not None:\n data['templates'] = []\n \n # Handle DataFrames\n if isinstance(template, pd.DataFrame):\n templates = template\n for template_id in template.id.values:\n data['templates'].append({\"id\":template_id})\n else:\n for t in aslist(template):\n templates = []\n if not isinstance(t, pd.Series):\n t = self.get_template(title=t)\n \n data['templates'].append({\"id\":t.id})\n templates.append(t)\n templates = pd.DataFrame(templates) \n \n data['templates'] = json.dumps(data['templates'])\n else:\n templates = self.get_templates()\n\n # Manage title\n if title is not None:\n data['title'] = title\n\n # Get response\n response = self.post(rest_url, data=data)\n response_json = response.json()\n\n return response_json['count']", "def test_team_template_folders_id_templates_fk_get(self):\n pass", "def testGetTemplatesLength(self):\n self.assertEqual(len(self.service.templates), 12)", "def portal_template_folders_id_children_count_get_with_http_info(self, id, **kwargs):\n\n all_params = ['id', 'where']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method portal_template_folders_id_children_count_get\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'id' is set\n if ('id' not in params) or (params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `portal_template_folders_id_children_count_get`\")\n\n\n collection_formats = {}\n\n resource_path = '/PortalTemplateFolders/{id}/children/count'.replace('{format}', 'json')\n path_params = {}\n if 'id' in params:\n path_params['id'] = params['id']\n\n query_params = {}\n if 'where' in params:\n query_params['where'] = params['where']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])\n\n # Authentication setting\n auth_settings = ['access_token']\n\n return self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='InlineResponse2001',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n collection_formats=collection_formats)", "def test_team_template_folders_id_patch(self):\n pass" ]
[ "0.79925585", "0.75923383", "0.73165613", "0.722862", "0.7195381", "0.7187491", "0.70838785", "0.6988885", "0.6731244", "0.67306924", "0.6324026", "0.63047403", "0.62745947", "0.6207618", "0.61564934", "0.61472076", "0.6124894", "0.6027852", "0.60278064", "0.5964219", "0.595364", "0.5927918", "0.5858723", "0.58511794", "0.5756946", "0.56456023", "0.5644982", "0.56403154", "0.5613165", "0.5576625" ]
0.88226944
0
Test case for team_template_folders_id_templates_fk_delete Delete a related item by id for templates.
def test_team_template_folders_id_templates_fk_delete(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_team_template_folders_id_delete(self):\n pass", "def test_team_template_folders_id_children_fk_delete(self):\n pass", "def test_workflows_id_templates_fk_delete(self):\n pass", "def portal_template_folders_id_templates_fk_delete(self, id, fk, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.portal_template_folders_id_templates_fk_delete_with_http_info(id, fk, **kwargs)\n else:\n (data) = self.portal_template_folders_id_templates_fk_delete_with_http_info(id, fk, **kwargs)\n return data", "def delete_template(_request, template_id):\n template = models.Template.get_by_id(int(template_id))\n template.delete()\n\n url = urlresolvers.reverse('views.admin.list_templates')\n return http.HttpResponseRedirect(url)", "def test_team_template_folders_id_templates_fk_put(self):\n pass", "def portal_template_folders_id_templates_rel_fk_delete(self, id, fk, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.portal_template_folders_id_templates_rel_fk_delete_with_http_info(id, fk, **kwargs)\n else:\n (data) = self.portal_template_folders_id_templates_rel_fk_delete_with_http_info(id, fk, **kwargs)\n return data", "def team_members_id_team_template_folders_fk_delete(self, id, fk, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.team_members_id_team_template_folders_fk_delete_with_http_info(id, fk, **kwargs)\n else:\n (data) = self.team_members_id_team_template_folders_fk_delete_with_http_info(id, fk, **kwargs)\n return data", "def team_members_id_templates_fk_delete(self, id, fk, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.team_members_id_templates_fk_delete_with_http_info(id, fk, **kwargs)\n else:\n (data) = self.team_members_id_templates_fk_delete_with_http_info(id, fk, **kwargs)\n return data", "def test_team_template_folders_id_templates_fk_get(self):\n pass", "def test_delete_multiple_templates_success(self):\n template_id_1 = util.MOCK_UUID_1\n template_id_2 = util.MOCK_UUID_2\n\n rv = TEST_CLIENT.post(\n \"/templates/deletetemplates\", json=[template_id_1, template_id_2]\n )\n result = rv.json()\n\n expected = {\"message\": \"Successfully removed templates\"}\n self.assertDictEqual(expected, result)\n self.assertEqual(rv.status_code, 200)", "def team_members_id_uploaded_templates_fk_delete(self, id, fk, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.team_members_id_uploaded_templates_fk_delete_with_http_info(id, fk, **kwargs)\n else:\n (data) = self.team_members_id_uploaded_templates_fk_delete_with_http_info(id, fk, **kwargs)\n return data", "def team_members_id_team_templates_fk_delete(self, id, fk, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.team_members_id_team_templates_fk_delete_with_http_info(id, fk, **kwargs)\n else:\n (data) = self.team_members_id_team_templates_fk_delete_with_http_info(id, fk, **kwargs)\n return data", "def team_members_id_templates_rel_fk_delete(self, id, fk, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.team_members_id_templates_rel_fk_delete_with_http_info(id, fk, **kwargs)\n else:\n (data) = self.team_members_id_templates_rel_fk_delete_with_http_info(id, fk, **kwargs)\n return data", "def delete(self, template_id: str):\n url = API_PATH[\"flairtemplatedelete\"].format(subreddit=self.subreddit)\n self.subreddit._reddit.post(url, data={\"flair_template_id\": template_id})", "def test_delete_template_success(self):\n template_id = util.MOCK_UUID_1\n\n rv = TEST_CLIENT.delete(f\"/templates/{template_id}\")\n result = rv.json()\n\n expected = {\"message\": \"Template deleted\"}\n self.assertDictEqual(expected, result)", "def test_xml_template_delete(self):\n XmlTemplate.delete_by_id(1)\n self.assertEqual(XmlTemplate.objects.count(), 1)\n self.assertFalse(XmlTemplate.objects.filter(pk=1).exists())", "def test_delete_namespaced_template(self):\n pass", "def test_delete_item_using_delete(self):\n pass", "def test_team_template_folders_id_templates_post(self):\n pass", "def portal_template_folders_id_templates_fk_delete_with_http_info(self, id, fk, **kwargs):\n\n all_params = ['id', 'fk']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method portal_template_folders_id_templates_fk_delete\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'id' is set\n if ('id' not in params) or (params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `portal_template_folders_id_templates_fk_delete`\")\n # verify the required parameter 'fk' is set\n if ('fk' not in params) or (params['fk'] is None):\n raise ValueError(\"Missing the required parameter `fk` when calling `portal_template_folders_id_templates_fk_delete`\")\n\n\n collection_formats = {}\n\n resource_path = '/PortalTemplateFolders/{id}/templates/{fk}'.replace('{format}', 'json')\n path_params = {}\n if 'id' in params:\n path_params['id'] = params['id']\n if 'fk' in params:\n path_params['fk'] = params['fk']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])\n\n # Authentication setting\n auth_settings = ['access_token']\n\n return self.api_client.call_api(resource_path, 'DELETE',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None,\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n collection_formats=collection_formats)", "def delete_template(self):\n try:\n os.remove(self.path)\n except Exception:\n pass", "def test_delete_activity_template(self):\n pass", "def delete(self, _id):", "def test_delete_own_template_as_user_saves(self):\n mock_request = create_mock_request(user=self.user1)\n template_api.delete(self.fixture.user1_template, request=mock_request)", "def test_delete_template_subscription(self):\n pass", "def team_members_id_team_template_folders_fk_delete_with_http_info(self, id, fk, **kwargs):\n\n all_params = ['id', 'fk']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method team_members_id_team_template_folders_fk_delete\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'id' is set\n if ('id' not in params) or (params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `team_members_id_team_template_folders_fk_delete`\")\n # verify the required parameter 'fk' is set\n if ('fk' not in params) or (params['fk'] is None):\n raise ValueError(\"Missing the required parameter `fk` when calling `team_members_id_team_template_folders_fk_delete`\")\n\n\n collection_formats = {}\n\n resource_path = '/TeamMembers/{id}/team/templateFolders/{fk}'.replace('{format}', 'json')\n path_params = {}\n if 'id' in params:\n path_params['id'] = params['id']\n if 'fk' in params:\n path_params['fk'] = params['fk']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])\n\n # Authentication setting\n auth_settings = ['access_token']\n\n return self.api_client.call_api(resource_path, 'DELETE',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None,\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n collection_formats=collection_formats)", "def test_commentary_view_delete(self):\n \n test_response = self.client.get('/papers/commentary/1/delete')\n self.assertEqual(test_response.status_code, 200)\n self.assertTrue('object' in test_response.context) \n self.assertTemplateUsed(test_response, 'base.html')\n self.assertTemplateUsed(test_response, 'confirm_delete.html')", "def DELETE(self):\n ids = self.context.objectIds()\n self.context.manage_delObjects(ids)\n self.context.createTemplate()\n return self.request.response.setStatus(200)", "def team_members_id_team_template_folders_delete(self, id, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.team_members_id_team_template_folders_delete_with_http_info(id, **kwargs)\n else:\n (data) = self.team_members_id_team_template_folders_delete_with_http_info(id, **kwargs)\n return data" ]
[ "0.79190964", "0.7877314", "0.7691019", "0.6831963", "0.6803464", "0.66661274", "0.6630532", "0.6619362", "0.65896314", "0.65800816", "0.6549002", "0.6506355", "0.6483962", "0.64786184", "0.6470037", "0.6384263", "0.63710374", "0.63246095", "0.6287143", "0.6272284", "0.62361395", "0.6228641", "0.61777246", "0.61328626", "0.6100166", "0.60977805", "0.60928035", "0.60681975", "0.6012696", "0.5989767" ]
0.8669718
0
Test case for team_template_folders_id_templates_fk_get Find a related item by id for templates.
def test_team_template_folders_id_templates_fk_get(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_team_template_folders_id_templates_get(self):\n pass", "def test_team_template_folders_id_children_fk_get(self):\n pass", "def test_workflows_id_templates_fk_get(self):\n pass", "def test_team_template_folders_id_get(self):\n pass", "def test_team_template_folders_id_team_get(self):\n pass", "def test_team_template_folders_find_one_get(self):\n pass", "def test_team_template_folders_id_exists_get(self):\n pass", "def test_team_template_folders_id_children_get(self):\n pass", "def test_team_template_folders_id_parent_get(self):\n pass", "def test_team_template_folders_id_templates_fk_put(self):\n pass", "def test_workflows_id_templates_get(self):\n pass", "def test_team_template_folders_id_templates_fk_delete(self):\n pass", "def portal_template_folders_id_templates_fk_get(self, id, fk, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.portal_template_folders_id_templates_fk_get_with_http_info(id, fk, **kwargs)\n else:\n (data) = self.portal_template_folders_id_templates_fk_get_with_http_info(id, fk, **kwargs)\n return data", "def test_team_template_folders_get(self):\n pass", "def team_members_id_team_template_folders_fk_get(self, id, fk, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.team_members_id_team_template_folders_fk_get_with_http_info(id, fk, **kwargs)\n else:\n (data) = self.team_members_id_team_template_folders_fk_get_with_http_info(id, fk, **kwargs)\n return data", "def test_team_template_folders_id_templates_post(self):\n pass", "def test_team_template_folders_id_templates_count_get(self):\n pass", "def _get_template_by_id(self, template_id):\n raise NotImplementedError()", "def portal_template_folders_id_templates_fk_get_with_http_info(self, id, fk, **kwargs):\n\n all_params = ['id', 'fk']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method portal_template_folders_id_templates_fk_get\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'id' is set\n if ('id' not in params) or (params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `portal_template_folders_id_templates_fk_get`\")\n # verify the required parameter 'fk' is set\n if ('fk' not in params) or (params['fk'] is None):\n raise ValueError(\"Missing the required parameter `fk` when calling `portal_template_folders_id_templates_fk_get`\")\n\n\n collection_formats = {}\n\n resource_path = '/PortalTemplateFolders/{id}/templates/{fk}'.replace('{format}', 'json')\n path_params = {}\n if 'id' in params:\n path_params['id'] = params['id']\n if 'fk' in params:\n path_params['fk'] = params['fk']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])\n\n # Authentication setting\n auth_settings = ['access_token']\n\n return self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='Template',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n collection_formats=collection_formats)", "def test_team_template_folders_id_head(self):\n pass", "def test_team_template_folders_id_children_fk_put(self):\n pass", "def team_members_id_tweak_template_folders_nk_templates_fk_get(self, id, id2, nk, fk, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.team_members_id_tweak_template_folders_nk_templates_fk_get_with_http_info(id, id2, nk, fk, **kwargs)\n else:\n (data) = self.team_members_id_tweak_template_folders_nk_templates_fk_get_with_http_info(id, id2, nk, fk, **kwargs)\n return data", "def test_team_template_folders_id_patch(self):\n pass", "def test_workflows_id_templates_fk_put(self):\n pass", "def test_xml_template_get_by_id(self):\n xmlTemplateExpected = XmlTemplate.objects.get(id=1)\n self.assertEqual(XmlTemplate.get_by_id(1), xmlTemplateExpected)", "def team_members_id_uploaded_templates_fk_get(self, id, fk, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.team_members_id_uploaded_templates_fk_get_with_http_info(id, fk, **kwargs)\n else:\n (data) = self.team_members_id_uploaded_templates_fk_get_with_http_info(id, fk, **kwargs)\n return data", "def team_members_id_templates_fk_get(self, id, fk, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.team_members_id_templates_fk_get_with_http_info(id, fk, **kwargs)\n else:\n (data) = self.team_members_id_templates_fk_get_with_http_info(id, fk, **kwargs)\n return data", "def team_members_id_team_template_folders_fk_get_with_http_info(self, id, fk, **kwargs):\n\n all_params = ['id', 'fk']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method team_members_id_team_template_folders_fk_get\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'id' is set\n if ('id' not in params) or (params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `team_members_id_team_template_folders_fk_get`\")\n # verify the required parameter 'fk' is set\n if ('fk' not in params) or (params['fk'] is None):\n raise ValueError(\"Missing the required parameter `fk` when calling `team_members_id_team_template_folders_fk_get`\")\n\n\n collection_formats = {}\n\n resource_path = '/TeamMembers/{id}/team/templateFolders/{fk}'.replace('{format}', 'json')\n path_params = {}\n if 'id' in params:\n path_params['id'] = params['id']\n if 'fk' in params:\n path_params['fk'] = params['fk']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])\n\n # Authentication setting\n auth_settings = ['access_token']\n\n return self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='TeamTemplateFolder',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n collection_formats=collection_formats)", "def team_members_id_tweak_template_folders_get(self, id, id2, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.team_members_id_tweak_template_folders_get_with_http_info(id, id2, **kwargs)\n else:\n (data) = self.team_members_id_tweak_template_folders_get_with_http_info(id, id2, **kwargs)\n return data", "def test_team_template_folders_id_children_fk_delete(self):\n pass" ]
[ "0.74701315", "0.7434965", "0.73949075", "0.7077682", "0.7034495", "0.70042926", "0.68805444", "0.67343676", "0.6631117", "0.6597772", "0.6347506", "0.6252539", "0.62216115", "0.61500025", "0.5987843", "0.5977853", "0.5967109", "0.5960177", "0.59008634", "0.58921754", "0.58916414", "0.58876926", "0.5866265", "0.5808831", "0.578065", "0.57748383", "0.5734253", "0.5682903", "0.5641808", "0.56315356" ]
0.8420041
0
Test case for team_template_folders_id_templates_fk_put Update a related item by id for templates.
def test_team_template_folders_id_templates_fk_put(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_team_template_folders_id_put(self):\n pass", "def test_team_template_folders_id_children_fk_put(self):\n pass", "def test_workflows_id_templates_fk_put(self):\n pass", "def test_team_template_folders_id_templates_fk_get(self):\n pass", "def test_team_template_folders_id_templates_post(self):\n pass", "def test_team_template_folders_id_replace_post(self):\n pass", "def test_team_template_folders_id_templates_fk_delete(self):\n pass", "def portal_template_folders_id_templates_fk_put(self, id, fk, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.portal_template_folders_id_templates_fk_put_with_http_info(id, fk, **kwargs)\n else:\n (data) = self.portal_template_folders_id_templates_fk_put_with_http_info(id, fk, **kwargs)\n return data", "def portal_template_folders_id_templates_rel_fk_put(self, id, fk, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.portal_template_folders_id_templates_rel_fk_put_with_http_info(id, fk, **kwargs)\n else:\n (data) = self.portal_template_folders_id_templates_rel_fk_put_with_http_info(id, fk, **kwargs)\n return data", "def test_team_template_folders_id_templates_get(self):\n pass", "def team_members_id_team_template_folders_fk_put(self, id, fk, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.team_members_id_team_template_folders_fk_put_with_http_info(id, fk, **kwargs)\n else:\n (data) = self.team_members_id_team_template_folders_fk_put_with_http_info(id, fk, **kwargs)\n return data", "def test_team_template_folders_id_patch(self):\n pass", "def team_members_id_templates_rel_fk_put(self, id, fk, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.team_members_id_templates_rel_fk_put_with_http_info(id, fk, **kwargs)\n else:\n (data) = self.team_members_id_templates_rel_fk_put_with_http_info(id, fk, **kwargs)\n return data", "def test_team_template_folders_id_children_post(self):\n pass", "def test_team_template_folders_id_delete(self):\n pass", "def portal_template_folders_id_templates_fk_put_with_http_info(self, id, fk, **kwargs):\n\n all_params = ['id', 'fk', 'data']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method portal_template_folders_id_templates_fk_put\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'id' is set\n if ('id' not in params) or (params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `portal_template_folders_id_templates_fk_put`\")\n # verify the required parameter 'fk' is set\n if ('fk' not in params) or (params['fk'] is None):\n raise ValueError(\"Missing the required parameter `fk` when calling `portal_template_folders_id_templates_fk_put`\")\n\n\n collection_formats = {}\n\n resource_path = '/PortalTemplateFolders/{id}/templates/{fk}'.replace('{format}', 'json')\n path_params = {}\n if 'id' in params:\n path_params['id'] = params['id']\n if 'fk' in params:\n path_params['fk'] = params['fk']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'data' in params:\n body_params = params['data']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])\n\n # Authentication setting\n auth_settings = ['access_token']\n\n return self.api_client.call_api(resource_path, 'PUT',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='Template',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n collection_formats=collection_formats)", "def team_members_id_templates_fk_put(self, id, fk, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.team_members_id_templates_fk_put_with_http_info(id, fk, **kwargs)\n else:\n (data) = self.team_members_id_templates_fk_put_with_http_info(id, fk, **kwargs)\n return data", "def team_members_id_team_template_folders_fk_put_with_http_info(self, id, fk, **kwargs):\n\n all_params = ['id', 'fk', 'data']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method team_members_id_team_template_folders_fk_put\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'id' is set\n if ('id' not in params) or (params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `team_members_id_team_template_folders_fk_put`\")\n # verify the required parameter 'fk' is set\n if ('fk' not in params) or (params['fk'] is None):\n raise ValueError(\"Missing the required parameter `fk` when calling `team_members_id_team_template_folders_fk_put`\")\n\n\n collection_formats = {}\n\n resource_path = '/TeamMembers/{id}/team/templateFolders/{fk}'.replace('{format}', 'json')\n path_params = {}\n if 'id' in params:\n path_params['id'] = params['id']\n if 'fk' in params:\n path_params['fk'] = params['fk']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'data' in params:\n body_params = params['data']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])\n\n # Authentication setting\n auth_settings = ['access_token']\n\n return self.api_client.call_api(resource_path, 'PUT',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='TeamTemplateFolder',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n collection_formats=collection_formats)", "def test_team_template_folders_id_children_fk_delete(self):\n pass", "def test_team_template_folders_id_team_get(self):\n pass", "def test_team_template_folders_id_get(self):\n pass", "def test_workflows_id_templates_fk_get(self):\n pass", "def team_members_id_team_templates_fk_put(self, id, fk, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.team_members_id_team_templates_fk_put_with_http_info(id, fk, **kwargs)\n else:\n (data) = self.team_members_id_team_templates_fk_put_with_http_info(id, fk, **kwargs)\n return data", "def test_team_template_folders_id_children_fk_get(self):\n pass", "def test_team_template_folders_id_exists_get(self):\n pass", "def team_members_id_uploaded_templates_fk_put(self, id, fk, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.team_members_id_uploaded_templates_fk_put_with_http_info(id, fk, **kwargs)\n else:\n (data) = self.team_members_id_uploaded_templates_fk_put_with_http_info(id, fk, **kwargs)\n return data", "def test_workflows_id_templates_fk_delete(self):\n pass", "def test_update_template_success(self):\n template_id = util.MOCK_UUID_1\n template_name = \"template-3\"\n\n rv = TEST_CLIENT.patch(\n f\"/templates/{template_id}\", json={\"name\": template_name}\n )\n result = rv.json()\n\n expected = {\n \"uuid\": template_id,\n \"name\": template_name,\n \"tasks\": [\n {\n \"uuid\": util.MOCK_UUID_1,\n \"task_id\": util.MOCK_UUID_1,\n \"dependencies\": [],\n \"position_x\": 0.0,\n \"position_y\": 0.0,\n }\n ],\n \"experimentId\": util.MOCK_UUID_1,\n \"deploymentId\": None,\n \"createdAt\": util.MOCK_CREATED_AT_1.isoformat(),\n \"updatedAt\": mock.ANY,\n }\n self.assertEqual(result, expected)\n self.assertEqual(rv.status_code, 200)", "def portal_template_folders_id_templates_rel_fk_put_with_http_info(self, id, fk, **kwargs):\n\n all_params = ['id', 'fk', 'data']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method portal_template_folders_id_templates_rel_fk_put\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'id' is set\n if ('id' not in params) or (params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `portal_template_folders_id_templates_rel_fk_put`\")\n # verify the required parameter 'fk' is set\n if ('fk' not in params) or (params['fk'] is None):\n raise ValueError(\"Missing the required parameter `fk` when calling `portal_template_folders_id_templates_rel_fk_put`\")\n\n\n collection_formats = {}\n\n resource_path = '/PortalTemplateFolders/{id}/templates/rel/{fk}'.replace('{format}', 'json')\n path_params = {}\n if 'id' in params:\n path_params['id'] = params['id']\n if 'fk' in params:\n path_params['fk'] = params['fk']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'data' in params:\n body_params = params['data']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])\n\n # Authentication setting\n auth_settings = ['access_token']\n\n return self.api_client.call_api(resource_path, 'PUT',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='PortalTemplate',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n collection_formats=collection_formats)", "def test_team_template_folders_post(self):\n pass" ]
[ "0.75875634", "0.7442952", "0.7431759", "0.68305796", "0.68276864", "0.68187314", "0.6519196", "0.6214955", "0.6187192", "0.6171082", "0.6146838", "0.60781616", "0.5988694", "0.59180284", "0.5909078", "0.58887845", "0.5862075", "0.5821459", "0.5818591", "0.57954454", "0.5780002", "0.5759692", "0.57452136", "0.57424945", "0.5702652", "0.57012635", "0.5689528", "0.5650767", "0.56393826", "0.5623971" ]
0.84282964
0
Test case for team_template_folders_id_templates_get Queries templates of TeamTemplateFolder.
def test_team_template_folders_id_templates_get(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_team_template_folders_id_templates_fk_get(self):\n pass", "def test_team_template_folders_id_get(self):\n pass", "def test_team_template_folders_id_team_get(self):\n pass", "def test_team_template_folders_id_templates_count_get(self):\n pass", "def test_team_template_folders_get(self):\n pass", "def test_team_template_folders_id_exists_get(self):\n pass", "def test_team_template_folders_find_one_get(self):\n pass", "def test_team_template_folders_id_children_get(self):\n pass", "def test_team_template_folders_id_templates_post(self):\n pass", "def test_workflows_id_templates_get(self):\n pass", "def test_team_template_folders_id_parent_get(self):\n pass", "def portal_template_folders_id_templates_get_with_http_info(self, id, **kwargs):\n\n all_params = ['id', 'filter']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method portal_template_folders_id_templates_get\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'id' is set\n if ('id' not in params) or (params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `portal_template_folders_id_templates_get`\")\n\n\n collection_formats = {}\n\n resource_path = '/PortalTemplateFolders/{id}/templates'.replace('{format}', 'json')\n path_params = {}\n if 'id' in params:\n path_params['id'] = params['id']\n\n query_params = {}\n if 'filter' in params:\n query_params['filter'] = params['filter']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])\n\n # Authentication setting\n auth_settings = ['access_token']\n\n return self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='list[Template]',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n collection_formats=collection_formats)", "def test_team_template_folders_count_get(self):\n pass", "def test_team_template_folders_id_head(self):\n pass", "def team_members_id_team_template_folders_get(self, id, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.team_members_id_team_template_folders_get_with_http_info(id, **kwargs)\n else:\n (data) = self.team_members_id_team_template_folders_get_with_http_info(id, **kwargs)\n return data", "def team_members_id_team_template_folders_get_with_http_info(self, id, **kwargs):\n\n all_params = ['id', 'filter']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method team_members_id_team_template_folders_get\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'id' is set\n if ('id' not in params) or (params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `team_members_id_team_template_folders_get`\")\n\n\n collection_formats = {}\n\n resource_path = '/TeamMembers/{id}/team/templateFolders'.replace('{format}', 'json')\n path_params = {}\n if 'id' in params:\n path_params['id'] = params['id']\n\n query_params = {}\n if 'filter' in params:\n query_params['filter'] = params['filter']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])\n\n # Authentication setting\n auth_settings = ['access_token']\n\n return self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='list[TeamTemplateFolder]',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n collection_formats=collection_formats)", "def team_members_id_tweak_template_folders_get(self, id, id2, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.team_members_id_tweak_template_folders_get_with_http_info(id, id2, **kwargs)\n else:\n (data) = self.team_members_id_tweak_template_folders_get_with_http_info(id, id2, **kwargs)\n return data", "def portal_template_folders_id_templates_get(self, id, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.portal_template_folders_id_templates_get_with_http_info(id, **kwargs)\n else:\n (data) = self.portal_template_folders_id_templates_get_with_http_info(id, **kwargs)\n return data", "def test_team_template_folders_id_patch(self):\n pass", "def test_team_template_folders_id_children_fk_get(self):\n pass", "def test_get_all_as_superuser_returns_all_templates(self):\n mock_request = create_mock_request(user=self.superuser)\n templates = template_api.get_all(request=mock_request)\n self.assertEqual(templates.count(), 3)\n self.assertTrue(self.fixture.user1_template in list(templates))\n self.assertTrue(self.fixture.user2_template in list(templates))\n self.assertTrue(self.fixture.global_template in list(templates))", "def get_templates(self):\n return self.http_call(\"get\", url=f\"{self.base_url}/templates\").json()", "def test_get_any_template_as_superuser_returns_template(self):\n mock_request = create_mock_request(user=self.superuser1)\n template = template_api.get_by_id(\n self.fixture.user1_template.id, request=mock_request\n )\n self.assertEqual(template, self.fixture.user1_template)\n template = template_api.get_by_id(\n self.fixture.user2_template.id, request=mock_request\n )\n self.assertEqual(template, self.fixture.user2_template)\n template = template_api.get_by_id(\n self.fixture.global_template.id, request=mock_request\n )\n self.assertEqual(template, self.fixture.global_template)", "def team_members_id_tweak_template_folders_nk_templates_get_with_http_info(self, id, id2, nk, **kwargs):\n\n all_params = ['id', 'id2', 'nk', 'filter']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method team_members_id_tweak_template_folders_nk_templates_get\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'id' is set\n if ('id' not in params) or (params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `team_members_id_tweak_template_folders_nk_templates_get`\")\n # verify the required parameter 'id2' is set\n if ('id2' not in params) or (params['id2'] is None):\n raise ValueError(\"Missing the required parameter `id2` when calling `team_members_id_tweak_template_folders_nk_templates_get`\")\n # verify the required parameter 'nk' is set\n if ('nk' not in params) or (params['nk'] is None):\n raise ValueError(\"Missing the required parameter `nk` when calling `team_members_id_tweak_template_folders_nk_templates_get`\")\n\n\n collection_formats = {}\n\n resource_path = '/TeamMembers/{id}/tweakTemplateFolders/{nk}/templates'.replace('{format}', 'json')\n path_params = {}\n if 'id' in params:\n path_params['id'] = params['id']\n if 'id2' in params:\n path_params['id'] = params['id2']\n if 'nk' in params:\n path_params['nk'] = params['nk']\n\n query_params = {}\n if 'filter' in params:\n query_params['filter'] = params['filter']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])\n\n # Authentication setting\n auth_settings = ['access_token']\n\n return self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='list[Template]',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n collection_formats=collection_formats)", "def portal_template_folders_get(self, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.portal_template_folders_get_with_http_info(**kwargs)\n else:\n (data) = self.portal_template_folders_get_with_http_info(**kwargs)\n return data", "def team_members_id_tweak_template_folders_get_with_http_info(self, id, id2, **kwargs):\n\n all_params = ['id', 'id2', 'filter']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method team_members_id_tweak_template_folders_get\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'id' is set\n if ('id' not in params) or (params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `team_members_id_tweak_template_folders_get`\")\n # verify the required parameter 'id2' is set\n if ('id2' not in params) or (params['id2'] is None):\n raise ValueError(\"Missing the required parameter `id2` when calling `team_members_id_tweak_template_folders_get`\")\n\n\n collection_formats = {}\n\n resource_path = '/TeamMembers/{id}/tweakTemplateFolders'.replace('{format}', 'json')\n path_params = {}\n if 'id' in params:\n path_params['id'] = params['id']\n if 'id2' in params:\n path_params['id'] = params['id2']\n\n query_params = {}\n if 'filter' in params:\n query_params['filter'] = params['filter']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])\n\n # Authentication setting\n auth_settings = ['access_token']\n\n return self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='list[TeamTemplateFolder]',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n collection_formats=collection_formats)", "def team_members_id_tweak_template_folders_nk_templates_get(self, id, id2, nk, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.team_members_id_tweak_template_folders_nk_templates_get_with_http_info(id, id2, nk, **kwargs)\n else:\n (data) = self.team_members_id_tweak_template_folders_nk_templates_get_with_http_info(id, id2, nk, **kwargs)\n return data", "def portal_template_folders_id_get_with_http_info(self, id, **kwargs):\n\n all_params = ['id', 'filter']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method portal_template_folders_id_get\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'id' is set\n if ('id' not in params) or (params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `portal_template_folders_id_get`\")\n\n\n collection_formats = {}\n\n resource_path = '/PortalTemplateFolders/{id}'.replace('{format}', 'json')\n path_params = {}\n if 'id' in params:\n path_params['id'] = params['id']\n\n query_params = {}\n if 'filter' in params:\n query_params['filter'] = params['filter']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])\n\n # Authentication setting\n auth_settings = ['access_token']\n\n return self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='PortalTemplateFolder',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n collection_formats=collection_formats)", "def test_team_template_folders_change_stream_get(self):\n pass", "def test_team_template_folders_id_templates_fk_put(self):\n pass" ]
[ "0.7672202", "0.7596844", "0.7596276", "0.7415448", "0.7369875", "0.72484344", "0.69025093", "0.68312633", "0.67781764", "0.6753093", "0.6541428", "0.646129", "0.63168097", "0.63137025", "0.6275176", "0.6272056", "0.6232344", "0.6222934", "0.6220438", "0.6214882", "0.6123346", "0.6116056", "0.6068337", "0.60040534", "0.5996367", "0.5991352", "0.5971444", "0.5958976", "0.5943888", "0.59420234" ]
0.85034555
0
Test case for team_template_folders_id_templates_post Creates a new instance in templates of this model.
def test_team_template_folders_id_templates_post(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_team_template_folders_post(self):\n pass", "def test_team_template_folders_id_templates_fk_put(self):\n pass", "def test_team_template_folders_id_children_post(self):\n pass", "def test_workflows_id_templates_post(self):\n pass", "def test_team_template_folders_id_replace_post(self):\n pass", "def test_team_template_folders_id_put(self):\n pass", "def test_team_template_folders_id_templates_get(self):\n pass", "def test_team_template_folders_id_children_fk_put(self):\n pass", "def test_team_template_folders_id_templates_fk_get(self):\n pass", "def test_team_template_folders_id_templates_fk_delete(self):\n pass", "def portal_template_folders_post(self, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.portal_template_folders_post_with_http_info(**kwargs)\n else:\n (data) = self.portal_template_folders_post_with_http_info(**kwargs)\n return data", "def team_members_id_team_template_folders_post(self, id, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.team_members_id_team_template_folders_post_with_http_info(id, **kwargs)\n else:\n (data) = self.team_members_id_team_template_folders_post_with_http_info(id, **kwargs)\n return data", "def test_workflows_id_templates_fk_put(self):\n pass", "def test_team_template_folders_id_patch(self):\n pass", "def test_create_template_with_experiment_id_success(self):\n template_name = \"template-3\"\n experiment_id = util.MOCK_UUID_1\n\n rv = TEST_CLIENT.post(\n \"/templates\",\n json={\n \"name\": template_name,\n \"experimentId\": experiment_id,\n },\n )\n result = rv.json()\n\n expected = {\n \"uuid\": mock.ANY,\n \"name\": template_name,\n \"tasks\": [\n {\n \"uuid\": util.MOCK_UUID_1,\n \"task_id\": util.MOCK_UUID_1,\n \"dependencies\": [],\n \"position_x\": 0.0,\n \"position_y\": 0.0,\n },\n {\n \"uuid\": util.MOCK_UUID_4,\n \"task_id\": util.MOCK_UUID_1,\n \"dependencies\": [util.MOCK_UUID_1],\n \"position_x\": 0.0,\n \"position_y\": 0.0,\n },\n ],\n \"experimentId\": experiment_id,\n \"deploymentId\": None,\n \"createdAt\": mock.ANY,\n \"updatedAt\": mock.ANY,\n }\n self.assertEqual(result, expected)\n self.assertEqual(rv.status_code, 200)", "def create_templates(self):\n for name, params in list_registered_templates():\n if self['templates'].filter(theme=self, name=name).count() == 0:\n self['templates'].create(theme=self, name=name)", "def test_team_template_folders_id_team_get(self):\n pass", "def test_team_template_folders_id_templates_count_get(self):\n pass", "def test_post_list_with_template(self):\n template_story = create_story(title=\"Test Template Story\",\n summary=\"Test Template Story Summary\", \n byline=\"Test Template Story Byline\", status=\"published\",\n language=\"en\")\n post_data = {\n 'title': \"Test Story\",\n 'summary': \"Test Summary\",\n 'byline': \"Test Byline\",\n 'status': \"draft\",\n 'language': \"en\",\n 'template_story': template_story.story_id\n }\n self.assertEqual(Story.objects.count(), 1)\n self.api_client.client.login(username=self.username, password=self.password)\n response = self.api_client.post('/api/0.1/stories/',\n format='json', data=post_data)\n self.assertHttpCreated(response)\n self.assertEqual(Story.objects.count(), 2)\n returned_story_id = response['location'].split('/')[-2]\n created_story = Story.objects.get(story_id=returned_story_id)\n self.assertEqual(created_story.title, post_data['title'])\n self.assertEqual(created_story.summary, post_data['summary'])\n self.assertEqual(created_story.byline, post_data['byline'])\n self.assertEqual(created_story.status, post_data['status'])\n self.assertEqual(created_story.get_languages(), [post_data['language']])\n self.assertEqual(created_story.author, self.user)\n self.assertEqual(created_story.template_story, template_story)", "def test_team_template_folders_id_delete(self):\n pass", "def test_team_template_folders_change_stream_post(self):\n pass", "def post(self):\n self.reqparse.add_argument('templateName', type=str, required=True)\n self.reqparse.add_argument('template', type=str, required=True)\n args = self.reqparse.parse_args()\n\n template = db.Template.find_one(template_name=args['templateName'])\n if template:\n return self.make_response('Template already exists, update the existing template instead', HTTP.CONFLICT)\n\n template = Template()\n template.template_name = args['templateName']\n template.template = args['template']\n\n db.session.add(template)\n db.session.commit()\n auditlog(event='template.create', actor=session['user'].username, data=args)\n\n return self.make_response('Template {} has been created'.format(template.template_name), HTTP.CREATED)", "def post(request):\n # load request json\n try:\n request_content = json.loads(request.body)\n except JSONDecodeError as e:\n return failed(status=1000001)\n\n # validate request data\n schema = SCHEMA.copy()\n schema['required'] = ['name', 'image_path']\n validate_result, msg = utils.validate_json(data=request_content, schema=schema)\n if validate_result != 0:\n return failed(status=1000001, msg=msg)\n\n # create new vm template\n new_obj = VmTemplate(**request_content)\n\n # save objects\n try:\n new_obj.save()\n except IntegrityError as e:\n\n return failed(status=1001001, msg=str(e.__cause__))\n\n # return data\n data = new_obj.__dict__\n data.pop('_state')\n return success(data=data)", "def test_add_template(self):\n\n widget = part.Part(self.api, pk=10000)\n\n n = len(widget.getTestTemplates())\n\n part.PartTestTemplate.create(self.api, {\n 'part': widget.pk,\n 'test_name': f\"Test_Name_{n}\",\n 'description': 'A test or something',\n 'required': True,\n })\n\n self.assertEqual(len(widget.getTestTemplates()), n + 1)", "def test_create_template_with_deployment_id_success(self):\n template_name = \"template-3\"\n deployment_id = util.MOCK_UUID_1\n\n rv = TEST_CLIENT.post(\n \"/templates\",\n json={\n \"name\": template_name,\n \"deploymentId\": deployment_id,\n },\n )\n result = rv.json()\n\n expected = {\n \"uuid\": mock.ANY,\n \"name\": template_name,\n \"tasks\": [\n {\n \"uuid\": util.MOCK_UUID_2,\n \"task_id\": util.MOCK_UUID_1,\n \"dependencies\": [],\n \"position_x\": 0.0,\n \"position_y\": 0.0,\n }\n ],\n \"experimentId\": None,\n \"deploymentId\": deployment_id,\n \"createdAt\": mock.ANY,\n \"updatedAt\": mock.ANY,\n }\n self.assertEqual(result, expected)\n self.assertEqual(rv.status_code, 200)", "def portal_template_folders_id_templates_post(self, id, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.portal_template_folders_id_templates_post_with_http_info(id, **kwargs)\n else:\n (data) = self.portal_template_folders_id_templates_post_with_http_info(id, **kwargs)\n return data", "def test_team_template_folders_id_get(self):\n pass", "def test_team_template_folders_id_head(self):\n pass", "def test_team_template_folders_get(self):\n pass", "def post(self, request, pk: int = None):\n if not pk:\n template_id = request.POST.get('template_id')\n template = Template.objects.get(pk=int(template_id))\n _pk = TemplateVersion.objects.create(template=template, test_data={}).pk\n else:\n _pk = TemplateVersion.objects.duplicate(pk)\n template = TemplateVersion.objects.get(pk=pk).template\n return JsonResponse({'id': _pk, 'template_id': template.pk})" ]
[ "0.7337337", "0.721465", "0.7060718", "0.6698532", "0.6563004", "0.65542066", "0.6341067", "0.62356204", "0.62096906", "0.6188248", "0.59654826", "0.594923", "0.5935483", "0.58273405", "0.58084404", "0.57673085", "0.5725541", "0.57208335", "0.57106185", "0.57081985", "0.568146", "0.56719446", "0.56679595", "0.5661843", "0.5654229", "0.5619203", "0.5575924", "0.5516645", "0.54967153", "0.5471444" ]
0.8269024
0
Test case for team_template_folders_post Create a new instance of the model and persist it into the data source.
def test_team_template_folders_post(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_team_template_folders_id_templates_post(self):\n pass", "def test_team_template_folders_id_children_post(self):\n pass", "def test_team_template_folders_id_templates_fk_put(self):\n pass", "def test_team_template_folders_id_children_fk_put(self):\n pass", "def test_team_template_folders_id_put(self):\n pass", "def test_team_template_folders_id_replace_post(self):\n pass", "def test_team_template_folders_change_stream_post(self):\n pass", "def init_entity_test(ktrack_instance, tmpdir, entity, folders, files):\n # we need to mock project root to something\n entity_type, entity_id = entity[\"type\"], entity[\"id\"]\n\n # change project root\n mock_routes = copy.deepcopy(\n template_manager._data_routes\n ) # we take default routes and adjust what we need\n mock_routes[\"project_root\"] = tmpdir.dirname\n\n # store old route length, so we can check if correct number of paths was created\n old_registered_paths = ktrack_instance.find(\"path_entry\", [])\n old_len = len(old_registered_paths)\n\n with patch.object(template_manager, \"_data_routes\", mock_routes) as mock_yml_data:\n folder_manager.init_entity(entity_type, entity_id)\n\n # now verify that folders are created\n for folder in folders:\n folder = folder.format(project_root=tmpdir.dirname)\n assert os.path.exists(folder)\n\n for file_path in files:\n f = file_path.format(project_root=tmpdir.dirname)\n assert os.path.exists(f)\n\n # verify that folders are registered in database\n registered_paths = ktrack_instance.find(\"path_entry\", [])\n new_len = len(registered_paths)\n assert new_len == old_len + len(folders) + len(files)", "def test_team_template_folders_id_templates_fk_delete(self):\n pass", "def test_workflows_id_templates_post(self):\n pass", "def test_team_template_folders_id_children_fk_delete(self):\n pass", "def test_create(self):\n pass", "def test_team_template_folders_id_delete(self):\n pass", "def test_team_template_folders_id_patch(self):\n pass", "def test_team_template_folders_id_templates_fk_get(self):\n pass", "def test_create_files(self):\n\n testdir = \"test_output\"\n test_submission = Submission()\n self.addCleanup(os.remove, \"submission.tar.gz\")\n self.addCleanup(shutil.rmtree, testdir)\n\n test_submission.create_files(testdir)\n\n self.doCleanups()", "def test_teams_create(self):\n pass", "def create_models( self ):", "def test_team_template_folders_get(self):\n pass", "def setUpClass(cls):\n clean_db() # delete all objets created by another tests\n # create a content object\n cls.object = TestModel.objects.create(name=\"TestObject\")", "def test_workflows_id_templates_fk_put(self):\n pass", "def test_create_team(self):\n pass", "def test_handle_create_as_team_lead(self, mock_uuid):\r\n mock_uuid.uuid4.return_value = \"1\"\r\n team = Team(\"GTID\", \"team-name\", \"name\")\r\n team.team_leads.add(user)\r\n self.mock_facade.query.return_value = [team]\r\n project = Project(\"GTID\", [\"repo-link\"])\r\n project_attach = [project.get_attachment()]\r\n with self.app.app_context():\r\n resp, code = \\\r\n self.testcommand.handle(\"project create repo-link team-name\",\r\n user)\r\n expect = {'attachments': project_attach}\r\n self.assertDictEqual(resp, expect)\r\n self.assertEqual(code, 200)\r\n self.mock_facade.query.assert_called_once_with(Team,\r\n [(\"github_team_name\",\r\n \"team-name\")])\r\n self.mock_facade.store.assert_called_once_with(project)", "def test_create_activity_template(self):\n pass", "def setUpTestData(cls):\n cls.post = PostFactory()", "def setUpTestData(cls):\n cls.board = Board.objects.create(name = DICT.get('board_name') )\n\n cls.task = Task.objects.create(head = DICT.get('task_head'),\n description = DICT.get('task_description'),\n board = cls.board )", "def create(self):\n ...", "def test_create_run(self):\n pass", "def perform_create(self, serializer):\n team = get_object_or_404(models.Team, pk=self.kwargs.get('pk'))\n\n return serializer.save(team=team)", "def test_createteam(self):\n p1, p2, p3 = self.create3persons()\n t = model.Team(name='Tigers', persons=[p1, p2, p3])\n id = t.store()\n t2 = model.Team(id=id)\n self.assertEqual(t.name, t2.name)\n self.assertEqual(t.persons, t2.persons)" ]
[ "0.7511183", "0.708873", "0.70515764", "0.6798132", "0.67463607", "0.6450196", "0.6295251", "0.61858845", "0.6110003", "0.60111576", "0.5994621", "0.5928331", "0.59159493", "0.58993393", "0.5871875", "0.58599263", "0.5834365", "0.58327764", "0.58270866", "0.5795094", "0.57877773", "0.5767814", "0.57556", "0.57324374", "0.5713777", "0.5697575", "0.56946373", "0.56492275", "0.56433", "0.56164426" ]
0.77011627
0
Ensure md5 checksums match, STATUS_CRIT
def test_check_md5_crit_md5sum_mismatch(self, mock_generate_md5): jdata = b'{"/etc/swift/object.ring.gz": ' \ b'"6b4f3a0ef3731f18291ecd053ce0d9b6", ' \ b'"/etc/swift/account.ring.gz": ' \ b'"93fc4ae496a7343362ebf13988a137e7", ' \ b'"/etc/swift/container.ring.gz": ' \ b'"0ea1ec9585ef644ce2b5c5b1dced4128"}' pmock_jdata = PropertyMock(return_value=jdata) mock_generate_md5.return_value = 'xxxx' with patch('urllib.request.urlopen') as mock_urlopen: mock_urlopen.return_value = MagicMock(read=pmock_jdata) result = check_md5('.') mock_urlopen.assert_called_with('.ringmd5') expected_result = [(STATUS_CRIT, 'Ringfile /etc/swift/{}.ring.gz ' 'MD5 sum mismatch'.format(name)) for name in ('object', 'account', 'container')] self.assertEqual(result, expected_result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _check_md5sum(_setup_str, src_host, src_pfn):\n\n error = PilotErrors()\n\n _cmd = '%suberftp %s \"quote cksm md5sum 0 -1 %s\"' % (_setup_str, src_host, src_pfn)\n estat, coutp = commands.getstatusoutput(_cmd)\n tolog('md5 uberftp done <%s> (%s): %s' % (_cmd, estat, coutp))\n\n if estat != 0:\n check_syserr(estat, coutp)\n if coutp.find('not understood') >= 0:\n tolog('!!WARNING!!2999!! MD5 unsupported by the server')\n return error.ERR_FAILEDMD5, coutp\n try:\n tmp0 = coutp.split('\\n')[-1]\n fmd5usm = tmp0.split()[1]\n # split removes also the trailing \"\\r\" that uberftp returns, no fmd5sum.strip()\n except:\n tolog('!!WARNING!!2999!! Unable to parse MD5')\n fmd5usm = ''\n return 0, fmd5usm", "def _check_md5(self):\n\n self.log.info('-' * 80)\n self.log.info('Check md5 sum')\n\n self.log.info(self._ref_value)\n self.log.info(self._output_file)\n\n code, out = cmd_exec(['md5sum', self._output_file], shell=False, log=self.log)\n if code:\n self.log.error(out)\n return False\n self.log.info(out)\n\n md5sum, _ = out.split(' ')\n\n self.log.info(f'reference md5: {self._ref_value}')\n self.log.info(f'actual md5: {md5sum}')\n\n if self._ref_value != md5sum:\n return False\n\n return True", "def test_file_integrity_return_error_in_case_of_bad_md5():\n test_file = open('./testfile.tmp', 'a')\n test_file.close()\n\n test_file_path = os.path.realpath('./testfile.tmp')\n test_file_md5 = hashlib.md5(open(test_file_path, 'rb').read()).hexdigest()\n\n bad_md5 = 'some_noise_%s' % test_file_md5\n\n result = PackageDownloadHelper.check_file_integrity(test_file_path, bad_md5)\n\n assert isinstance(result, ApiResponse)", "def check_md5(filename, stored_md5):\n computed_md5 = _get_file_md5(filename)\n if stored_md5 != computed_md5:\n print (\"MD5 checksum of filename\", filename, \"failed. Expected MD5 was\", stored_md5,\n \"but computed MD5 was\", computed_md5, '\\n',\n \"Please check if the data has been downloaded correctly or if the upstream data has changed.\")", "def test_check_md5_ok(self, mock_generate_md5):\n jdata = b'{\"/etc/swift/object.ring.gz\": ' \\\n b'\"6b4f3a0ef3731f18291ecd053ce0d9b6\", ' \\\n b'\"/etc/swift/account.ring.gz\": ' \\\n b'\"6b4f3a0ef3731f18291ecd053ce0d9b6\", ' \\\n b'\"/etc/swift/container.ring.gz\": ' \\\n b'\"6b4f3a0ef3731f18291ecd053ce0d9b6\"}'\n pmock_jdata = PropertyMock(return_value=jdata)\n mock_generate_md5.return_value = '6b4f3a0ef3731f18291ecd053ce0d9b6'\n with patch('urllib.request.urlopen') as mock_urlopen:\n mock_urlopen.return_value = MagicMock(read=pmock_jdata)\n result = check_md5('.')\n mock_urlopen.assert_called_with('.ringmd5')\n self.assertEqual(result,\n [(STATUS_OK, 'OK')])", "def _check_final_md5(self, key, file_name):\r\n fp = open(file_name, 'r')\r\n if key.bucket.connection.debug >= 1:\r\n print 'Checking md5 against etag.'\r\n hex_md5 = key.compute_md5(fp)[0]\r\n if hex_md5 != key.etag.strip('\"\\''):\r\n file_name = fp.name\r\n fp.close()\r\n os.unlink(file_name)\r\n raise ResumableDownloadException(\r\n 'File changed during download: md5 signature doesn\\'t match '\r\n 'etag (incorrect downloaded file deleted)',\r\n ResumableTransferDisposition.ABORT)", "def checkmd5sum():\n print('Generating MD5 Checksum')\n md5 = md5get(upgrade_file)\n if not md5sum:\n print(\"You have not provided an MD5 checksum to check against\")\n print(\"\")\n print(\"You can find what the MD5 checksum of your package should be at https://a10networks.com/support/axseries/software-downloads\")\n print(\"\")\n cont = input(\"Would you like to continue anyway? Y/N [N]\")\n if cont.lower() == 'y' or cont.lower() == 'yes':\n print(\"Continuing at user request\")\n print(\"\")\n else:\n print(\"Exiting at user request\")\n exit(1)\n\n print(\"**************************MD5 Checksum of upgrade file**************************\")\n print(\"Upgrade filename: \" + upgrade_file)\n print(\"Upgrade MD5 Checksum: \" + md5)\n print(\"********************************************************************************\")\n print(\"\")\n print(\"It is suggested that you manually verify the MD5 Checksum against the A10 published checksum before proceeding\")\n cont = input(\"Do you wish to continue? Y/N [N]\")\n print(cont)\n if cont.lower() == 'y' or cont.lower() == 'yes':\n print(\"Continuing\")\n elif cont.lower() != 'y' and cont.lower() != 'yes':\n print(\"Exiting\")\n exit(1)\n\n elif md5sum != md5:\n print(\"************************************ERROR***********************************\")\n print(\"****************************************************************************\")\n print(\"MD5 provided: \" + md5sum + \" does not match the calculated MD5 of the upgrade file: \" + md5 + \".\")\n print(\"\")\n print(\"Please verify the correct MD5 Checksum is being provided\")\n print(\"If the MD5 Checksum provided matches that of the one listed at https://www.a10networks.com/support/axseries/software-downloads, please re-download the upgrade file\")\n print(\"Exiting...\")\n exit(1)\n elif md5sum == md5:\n print(\"MD5 Checksum provided and MD5 of the upgrade file match, proceeding\")", "def _check_final_md5(self, key, etag):\r\n if key.bucket.connection.debug >= 1:\r\n print 'Checking md5 against etag.'\r\n if key.md5 != etag.strip('\"\\''):\r\n # Call key.open_read() before attempting to delete the\r\n # (incorrect-content) key, so we perform that request on a\r\n # different HTTP connection. This is neededb because httplib\r\n # will return a \"Response not ready\" error if you try to perform\r\n # a second transaction on the connection.\r\n key.open_read()\r\n key.close()\r\n key.delete()\r\n raise ResumableUploadException(\r\n 'File changed during upload: md5 signature doesn\\'t match etag '\r\n '(incorrect uploaded object deleted)',\r\n ResumableTransferDisposition.ABORT)", "def md5check(fname, md5fname):\n\tmd5fh = open(md5fname, \"r\")\n\treturn (md5sum(fname) == md5fh.readline())", "def test_check_md5_unknown_valueerror2(self, mock_urlopen):\n jdata = PropertyMock(return_value=b'X')\n mock_urlopen.return_value = MagicMock(read=jdata)\n result = check_md5('.')\n mock_urlopen.assert_called_with('.ringmd5')\n self.assertEqual(result,\n [(STATUS_UNKNOWN,\n \"Can't parse status data\")])", "def test_check_md5_unknown_ioerror(self, mock_generate_md5):\n jdata = b'{\"/etc/swift/object.ring.gz\": ' \\\n b'\"6b4f3a0ef3731f18291ecd053ce0d9b6\", ' \\\n b'\"/etc/swift/account.ring.gz\": ' \\\n b'\"93fc4ae496a7343362ebf13988a137e7\", ' \\\n b'\"/etc/swift/container.ring.gz\": ' \\\n b'\"0ea1ec9585ef644ce2b5c5b1dced4128\"}'\n pmock_jdata = PropertyMock(return_value=jdata)\n mock_generate_md5.side_effect = IOError()\n with patch('urllib.request.urlopen') as mock_urlopen:\n mock_urlopen.return_value = MagicMock(read=pmock_jdata)\n result = check_md5('.')\n mock_urlopen.assert_called_with('.ringmd5')\n expected_result = [(STATUS_UNKNOWN,\n \"Can't open ringfile \"\n \"/etc/swift/{}.ring.gz\".format(name))\n for name in ('object', 'account', 'container')]\n self.assertEqual(result, expected_result)", "def MD5(self) -> _n_0_t_3[_n_0_t_9]:", "def check(self) -> bool:\n return self.check_sum() == self.__md5_sum", "def test_md5sum(self, changes_file):\n for file in changes_file['Files']:\n log.debug('Checking md5sum of %s' % file['name'])\n filename = os.path.join(pylons.config['debexpo.upload.incoming'], file['name'])\n if not os.path.isfile(filename):\n raise OSError(\"Missing file %s in incoming\" % (file['name']))\n sum = md5sum(filename)\n\n if sum != file['md5sum']:\n log.critical('%s != %s' % (sum, file['md5sum']))\n raise OSError(\"MD5 sum mismatch in file %s: %s != %s\" % (file['name'], sum, file['md5sum']))\n\n return True", "def check_md5sum(file1: str, file2: str) -> bool:\n return get_md5_hash(file1) == get_md5_hash(file2)", "def test_checksum(size1, size2, lines, tmpdir):\n fp = tmpdir.join(\"temp-data.txt\").strpath\n data = \"\\n\".join(lines)\n with open(fp, 'w') as f:\n f.write(data)\n exp = hashlib.new(\"md5\", data.encode(\"utf-8\")).hexdigest()\n res1 = checksum(fp, size1)\n res2 = checksum(fp, size2)\n assert exp == res1\n assert res1 == res2\n assert res2 == exp", "def check_pack_checksums():\n conn = sqlite3.connect(DBNAME)\n c = conn.cursor()\n for row in c.execute(\"SELECT lower(hex(sum)) FROM packs\"):\n checksum = row[0]\n res = s3.get_object(Bucket=BUCKET, Key=f\"{checksum}.pack\")\n body = res[\"Body\"]\n h = blake3.blake3()\n for chunk in iter(lambda: body.read(4096), b\"\"):\n h.update(chunk)\n\n c = h.hexdigest()\n if c != checksum:\n raise ValueError(\"pack {checksum}: checksum {c} does not match\")", "def check_md5(file1, file2):\r\n with open(file1, \"rb\") as f1:\r\n h1 = hashlib.md5(f1.read()).digest()\r\n with open(file2, \"rb\") as f2:\r\n h2 = hashlib.md5(f2.read()).digest()\r\n return h1 == h2", "def manifest_with_invalid_md5_values_helper(error_log):\n valid_md5 = '\"1596f493ba9ec53023fca640fb69bd3b\"' # pragma: allowlist secret\n assert valid_md5 not in error_log\n\n short_md5 = '\"1596f493ba9ec53023fca640fb69bd3\"' # pragma: allowlist secret\n long_md5 = '\"d9a68f3d5d9ce03f8a08f509242472234\"' # pragma: allowlist secret\n md5_with_non_hexadecimal = (\n '\"5J1bf75c48761b2e755adc1340e5a9259\"' # pragma: allowlist secret\n )\n short_base64_encoded_md5 = '\"aGVsbG8=\"'\n assert short_md5 in error_log\n assert long_md5 in error_log\n assert md5_with_non_hexadecimal in error_log\n assert short_base64_encoded_md5 in error_log", "def verify_sum(file_path, md5_sum):\n file_md5_sum = generate_sum(file_path)\n return (file_md5_sum == md5_sum)", "def test_check_md5_unknown_valueerror1(self, mock_urlopen):\n base_url = 'asdfasdf'\n url = '{}ringmd5'.format(base_url)\n mock_urlopen.side_effect = ValueError(Mock(return_value=''))\n result = check_md5(base_url)\n mock_urlopen.assert_called_with(url)\n self.assertEqual(result,\n [(STATUS_UNKNOWN,\n \"Can't parse status data\")])", "def test_digest_matches_standard_library_md5(self):\n test_string = \"a short test string\"\n standard_md5 = hashlib.md5()\n md5 = PersistableMD5()\n\n standard_md5.update(test_string)\n md5.update(test_string)\n\n self.assertEqual(md5.digest(), standard_md5.digest())", "def check_md5_enabled():\n if not CONF.md5_enabled:\n raise ValueError('MD5 support is disabled, and support '\n 'will be removed in a 2024 version of '\n 'Ironic.')", "def CheckMd5(filename, md5filename):\n try:\n hasher = hashlib.md5()\n with open(filename) as check_file:\n with open(md5filename) as golden_file:\n for chunk in iter(lambda: check_file.read(128*hasher.block_size), ''):\n hasher.update(chunk)\n md5_contents = golden_file.read()\n if md5_contents:\n golden_digest_and_more = md5_contents.split(' ')\n if golden_digest_and_more:\n return golden_digest_and_more[0] == hasher.hexdigest()\n logging.warning('MD5 checksum match failed for %s', filename)\n return False\n except IOError:\n logging.warning('MD5 hasher read failed for %s', filename)\n return False", "def checksumFile(filename):\n return md5File(filename)", "def test_check_md5_unknown_urlerror(self, mock_urlopen):\n base_url = 'http://localhost:6000/recon/'\n url = '{}ringmd5'.format(base_url)\n error = 'connection refused'\n mock_urlopen.side_effect = (urllib\n .error\n .URLError(Mock(return_value=error)))\n result = check_md5(base_url)\n self.assertEqual(result,\n [(STATUS_UNKNOWN,\n \"Can't open url: {}\".format(url))])", "def verify_checksum(path):\n rc = True\n for f in os.listdir(path):\n if f.endswith('.md5'):\n cwd = os.getcwd()\n os.chdir(path)\n with open(os.devnull, \"w\") as fnull:\n try:\n subprocess.check_call(['md5sum', '-c', f], # pylint: disable=not-callable\n stdout=fnull, stderr=fnull)\n LOG.info(\"Checksum file is included and validated.\")\n except Exception as e:\n LOG.exception(e)\n rc = False\n finally:\n os.chdir(cwd)\n return rc\n LOG.info(\"Checksum file is not included, skipping validation.\")\n return rc", "def _check_hash(self, text):\n old = self.header.get(\"sha1sum\", None)\n if old is None:\n raise crexc.ChecksumError(\"sha1sum is missing in \" + repr(self.basename))\n if self._get_checksum(text) != self.header[\"sha1sum\"]:\n raise crexc.ChecksumError(\"sha1sum mismatch in \" + repr(self.basename))", "def test_MD5signature(self) -> None:\n for algorithm, expected in {\n 'md5': ('698d51a19d8a121ce581499d7b701668',\n 'bcbe3365e6ac95ea2c0343a2395834dd'),\n 'sha1': ('6216f8a75fd5bb3d5f22b6f9958cdede3fc086c2',\n '1c6637a8f2e1f75e06ff9984894d6bd16a3a36a9'),\n 'sha256': ('f6e0a1e2ac41945a9aa7ff8a8aaa0cebc12a3bcc981a929ad5cf810a090e11ae',\n '9b871512327c09ce91dd649b3f96a63b7408ef267c8cc5710114e629730cb61f'),\n }.items():\n # if the current platform does not support the algorithm we're looking at,\n # skip the test steps for that algorithm, but display a warning to the user\n if algorithm not in ALLOWED_HASH_FORMATS:\n warnings.warn(\"Missing hash algorithm {} on this platform, cannot test with it\".format(algorithm), ResourceWarning)\n else:\n s = hash_signature('111', hash_format=algorithm)\n assert expected[0] == s, s\n\n s = hash_signature('222', hash_format=algorithm)\n assert expected[1] == s, s", "def crack_md5(cand_len, b_values):\n global s, K # `s` and `K` are global\n\n slv = z3.Solver()\n \n inp = [z3.BitVec(f'inp_{i}', 32) for i in range(16)]\n\n add_inp_constraint(cand_len, inp, slv)\n\n # MD5 implementation using symbolic variables.\n a0 = 0x67452301 # A\n b0 = 0xefcdab89 # B\n c0 = 0x98badcfe # C\n d0 = 0x10325476 # D\n\n A, B, C, D = a0, b0, c0, d0\n \n for i in range(64):\n if 0 <= i and i <= 15:\n F = (B & C) | (~B & D)\n g = i\n elif 16 <= i and i <= 31:\n F = (D & B) | (~D & C)\n g = (5*i + 1) % 16\n elif 32 <= i and i <= 47:\n F = B ^ C ^ D\n g = (3*i + 5) % 16\n elif 48 <= i <= 63:\n F = C ^ (B | ~D)\n g = (7*i) % 16\n\n F &= 0xFFFFFFFF\n F = (F + A + K[i] + inp[g]) & 0xFFFFFFFF \n A = D\n D = C\n C = B\n\n # NOTE: rol DOES NOT WORK! WE HAVE TO USE z3's `RotateLeft`.\n B = (B + z3.RotateLeft(F, s[i])) & 0xFFFFFFFF\n\n slv.add(B & 0x3FF == b_values[i])\n\n \n # Check for solutions\n def to_ascii(x):\n return chr(x & 0xFF) + chr((x >> 8) & 0xFF) + chr((x >> 16) & 0xFF) + chr(x >> 24)\n\n while slv.check() == z3.sat:\n mdl = slv.model()\n\n print('[+] Solution FOUND!')\n \n flag = ''\n for i, j in enumerate(inp):\n yy = mdl.evaluate(j).as_long() \n print(f'[+] {i:2d} ~~> {yy:08X} ~~> {repr(to_ascii(yy))}')\n flag += to_ascii(yy)\n\n flag = flag[:cand_len]\n\n print('[+] FLAG IS: hxp{%s}' % flag)\n return 1\n else:\n print('[+] Cannot find satisfiable solution :\\\\')\n return -1" ]
[ "0.75136995", "0.73375994", "0.73223966", "0.7005259", "0.68892246", "0.68501675", "0.68256193", "0.6749249", "0.6720844", "0.66895443", "0.66667277", "0.66560763", "0.66402364", "0.6631763", "0.66275257", "0.6622764", "0.65499777", "0.6546595", "0.65142083", "0.6500251", "0.6486221", "0.6474626", "0.64376354", "0.6382731", "0.63726056", "0.6359634", "0.63477457", "0.63333905", "0.63246274", "0.62733936" ]
0.74742234
1
Replication lag over CRIT threshold, STATUS_CRIT
def test_check_replication_crit_lag(self, mock_timestamp): base_url = 'http://localhost:6000/recon/' jdata = b'{"replication_last": 1493299546.629282, ' \ b'"replication_stats": {"no_change": 0, "rsync": 0, ' \ b'"success": 0, "failure": 0, "attempted": 0, "ts_repl": 0, ' \ b'"remove": 0, "remote_merge": 0, "diff_capped": 0, ' \ b'"start": 1493299546.621624, "hashmatch": 0, "diff": 0, ' \ b'"empty": 0}, "replication_time": 0.0076580047607421875}' pmock_jdata = PropertyMock(return_value=jdata) mock_timestamp.return_value = (MagicMock(days=0, seconds=12), 0) with patch('urllib.request.urlopen') as mock_urlopen: mock_urlopen.return_value = MagicMock(read=pmock_jdata) result = check_replication(base_url, [4, 10, 4, 10]) self.assertEqual(result, [(STATUS_CRIT, "'{}' replication lag is " "12 seconds".format(repl)) for repl in ('account', 'object', 'container')])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_check_replication_crit_lag_notworking(self, mock_timestamp):\n base_url = 'http://localhost:6000/recon/'\n jdata = b'{\"replication_last\": 1493299546.629282, ' \\\n b'\"replication_stats\": {\"no_change\": 0, \"rsync\": 0, ' \\\n b'\"success\": 0, \"failure\": 0, \"attempted\": 0, \"ts_repl\": 0, ' \\\n b'\"remove\": 0, \"remote_merge\": 0, \"diff_capped\": 0, ' \\\n b'\"start\": 1493299546.621624, \"hashmatch\": 0, \"diff\": 0, ' \\\n b'\"empty\": 0}, \"replication_time\": 0.0076580047607421875}'\n pmock_jdata = PropertyMock(return_value=jdata)\n mock_timestamp.return_value = (None, 0)\n with patch('urllib.request.urlopen') as mock_urlopen:\n mock_urlopen.return_value = MagicMock(read=pmock_jdata)\n result = check_replication(base_url, [4, 10, 4, 10])\n self.assertEqual(result,\n [(STATUS_CRIT,\n \"'{}' replication lag not working \"\n \"(perms issue? check syslog)\".format(repl))\n for repl in ('account', 'object', 'container')])", "def test_check_replication_crit_day_plus_lag(self, mock_timestamp):\n base_url = 'http://localhost:6000/recon/'\n jdata = b'{\"replication_last\": 1493299546.629282, ' \\\n b'\"replication_stats\": {\"no_change\": 0, \"rsync\": 0, ' \\\n b'\"success\": 0, \"failure\": 0, \"attempted\": 0, \"ts_repl\": 0, ' \\\n b'\"remove\": 0, \"remote_merge\": 0, \"diff_capped\": 0, ' \\\n b'\"start\": 1493299546.621624, \"hashmatch\": 0, \"diff\": 0, ' \\\n b'\"empty\": 0}, \"replication_time\": 0.0076580047607421875}'\n pmock_jdata = PropertyMock(return_value=jdata)\n mock_timestamp.return_value = (MagicMock(days=2, seconds=5), 0)\n with patch('urllib.request.urlopen') as mock_urlopen:\n mock_urlopen.return_value = MagicMock(read=pmock_jdata)\n result = check_replication(base_url, [4, 10, 4, 10])\n self.assertEqual(result,\n [(STATUS_CRIT,\n \"'{}' replication lag is \"\n \"172805 seconds\".format(repl))\n for repl in ('account', 'object', 'container')])", "def test_check_replication_warn_lag(self, mock_timestamp):\n base_url = 'http://localhost:6000/recon/'\n jdata = b'{\"replication_last\": 1493299546.629282, ' \\\n b'\"replication_stats\": {\"no_change\": 0, \"rsync\": 0, ' \\\n b'\"success\": 0, \"failure\": 0, \"attempted\": 0, \"ts_repl\": 0, ' \\\n b'\"remove\": 0, \"remote_merge\": 0, \"diff_capped\": 0, ' \\\n b'\"start\": 1493299546.621624, \"hashmatch\": 0, \"diff\": 0, ' \\\n b'\"empty\": 0}, \"replication_time\": 0.0076580047607421875}'\n pmock_jdata = PropertyMock(return_value=jdata)\n mock_timestamp.return_value = (MagicMock(days=0, seconds=5), 0)\n with patch('urllib.request.urlopen') as mock_urlopen:\n mock_urlopen.return_value = MagicMock(read=pmock_jdata)\n result = check_replication(base_url, [4, 10, 4, 10])\n self.assertEqual(result,\n [(STATUS_WARN,\n \"'{}' replication lag is \"\n \"5 seconds\".format(repl))\n for repl in ('account', 'object', 'container')])", "def test_check_replication_crit_failures(self, mock_timestamp):\n base_url = 'http://localhost:6000/recon/'\n jdata = b'{\"replication_last\": 1493299546.629282, ' \\\n b'\"replication_stats\": {\"no_change\": 0, \"rsync\": 0, ' \\\n b'\"success\": 0, \"failure\": 0, \"attempted\": 0, \"ts_repl\": 0, ' \\\n b'\"remove\": 0, \"remote_merge\": 0, \"diff_capped\": 0, ' \\\n b'\"start\": 1493299546.621624, \"hashmatch\": 0, \"diff\": 0, ' \\\n b'\"empty\": 0}, \"replication_time\": 0.0076580047607421875}'\n pmock_jdata = PropertyMock(return_value=jdata)\n mock_timestamp.return_value = (MagicMock(days=0, seconds=0), 12)\n with patch('urllib.request.urlopen') as mock_urlopen:\n mock_urlopen.return_value = MagicMock(read=pmock_jdata)\n result = check_replication(base_url, [4, 10, 4, 10])\n self.assertEqual(result,\n 3*[(STATUS_CRIT, \"12 replication failures\")])", "def check_opcounters(con, host, warning, critical,perf_data):\n warning=warning or 10000\n critical=critical or 15000\n\n data=get_server_status(con)\n err1,delta_opcounters=get_opcounters(data,'opcounters',host) \n err2,delta_opcounters_repl=get_opcounters(data,'opcountersRepl',host)\n if err1==0 and err2==0:\n delta=[(x+y) for x,y in zip(delta_opcounters ,delta_opcounters_repl) ]\n delta[0]=delta_opcounters[0]#only the time delta shouldn't be summarized\n per_minute_delta=[int(x/delta[0]*60) for x in delta[1:]]\n message=\"Test succeeded , old values missing\"\n message= \"Opcounters: total=%d,insert=%d,query=%d,update=%d,delete=%d,getmore=%d,command=%d\" % tuple(per_minute_delta)\n message+=performance_data(perf_data,([(per_minute_delta[0],\"total\",warning,critical),(per_minute_delta[1],\"insert\"),\n (per_minute_delta[2],\"query\"), (per_minute_delta[3],\"update\"),(per_minute_delta[5],\"delete\"),\n (per_minute_delta[5],\"getmore\"),(per_minute_delta[6],\"command\")]))\n return check_levels(per_minute_delta[0],warning,critical,message)\n else :\n return exit_with_general_critical(\"problem reading data from temp file\")", "def maintenance_cut(df):\n full_maintenance_duration = (\n (df.maintenance_duration + df.maintenance_cycle)/3600\n )\n remainder = df.timestamp % full_maintenance_duration\n \n df['maintenance_cut'] = (\n remainder > (df.maintenance_duration/3600)\n )\n \n return None", "def test_timeout_with_crud_failures(self):\n\n # Local methods to validate vb_seqno\n\n def compare_vb_stat(stat_1, stat_2, vb, comparison=\"!=\"):\n keys_to_check = [\"high_seqno\", \"high_completed_seqno\"]\n result = True\n for key in keys_to_check:\n if vb in stat_1.keys():\n if stat_1[vb][\"uuid\"] != stat_2[vb][\"uuid\"]:\n self.log_failure(\"Mismatch in vb-%s UUID. %s != %s\"\n % (vb, stat_1[vb][\"uuid\"],\n stat_2[vb][\"uuid\"]))\n if comparison == \"!=\":\n if stat_1[vb][key] != stat_2[vb][key]:\n result = False\n self.log.warning(\n \"Mismatch in vb-%s stat %s. %s != %s\"\n % (vb, key, stat_1[vb][key], stat_2[vb][key]))\n elif stat_1[vb][key] == stat_2[vb][key]:\n result = False\n self.log.warning(\"Stat not updated for vb-%s stat %s. \"\n \"%s == %s\"\n % (vb, key,\n stat_1[vb][key], stat_2[vb][key]))\n return result\n\n def validate_vb_seqno_stats():\n \"\"\"\n :return retry_validation: Boolean denoting to retry validation\n \"\"\"\n retry_validation = False\n vb_info[\"post_timeout\"][node.ip] = \\\n cbstat_obj[node.ip].vbucket_seqno(self.bucket.name)\n for tem_vb_num in range(self.cluster.vbuckets):\n tem_vb_num = str(tem_vb_num)\n if tem_vb_num not in affected_vbs:\n if compare_vb_stat(vb_info[\"init\"][node.ip],\n vb_info[\"post_timeout\"][node.ip],\n tem_vb_num) is False:\n self.log_failure(\"Unaffected vb-%s stat\" % tem_vb_num)\n elif int(tem_vb_num) in target_nodes_vbuckets[\"active\"]:\n if compare_vb_stat(vb_info[\"init\"][node.ip],\n vb_info[\"post_timeout\"][node.ip],\n tem_vb_num) is False:\n self.log.warning(\"%s - mismatch in %s vb-%s seq_no\"\n % (node.ip, \"active\", tem_vb_num))\n elif int(tem_vb_num) in target_nodes_vbuckets[\"replica\"]:\n if compare_vb_stat(vb_info[\"init\"][node.ip],\n vb_info[\"post_timeout\"][node.ip],\n tem_vb_num, comparison=\"==\") is False:\n retry_validation = True\n self.log.warning(\"%s - mismatch in %s vb-%s seq_no\"\n % (node.ip, \"replica\", tem_vb_num))\n return retry_validation\n\n shell_conn = dict()\n cbstat_obj = dict()\n error_sim = dict()\n target_nodes_vbuckets = dict()\n vb_info = dict()\n tasks = dict()\n doc_gen = dict()\n affected_vbs = list()\n\n target_nodes_vbuckets[\"active\"] = []\n target_nodes_vbuckets[\"replica\"] = []\n vb_info[\"init\"] = dict()\n vb_info[\"post_timeout\"] = dict()\n vb_info[\"afterCrud\"] = dict()\n\n # Override crud_batch_size to minimum value for testing\n self.crud_batch_size = 5\n self.key = \"test_collections\"\n self.sdk_timeout = 3\n\n # Select target vbucket type to load_docs\n target_vb_type = \"replica\"\n if self.simulate_error == CouchbaseError.STOP_PERSISTENCE \\\n and self.durability_level \\\n == Bucket.DurabilityLevel.MAJORITY_AND_PERSIST_TO_ACTIVE:\n target_vb_type = \"active\"\n\n # Create required scope/collection for successful CRUD operation\n if self.scope_name != CbServer.default_scope:\n self.scope_name = self.bucket_util.get_random_name()\n self.collection_name = self.bucket_util.get_random_name()\n self.log.info(\"Creating scope::collection %s::%s\"\n % (self.scope_name, self.collection_name))\n self.create_scope_collection()\n\n # Load docs into created collection\n self.log.info(\"Loading data into created collection\")\n load_gen = doc_generator(self.key, 0, self.num_items)\n task = self.task.async_load_gen_docs(\n self.cluster, self.bucket, load_gen, \"create\", 0,\n scope=self.scope_name,\n collection=self.collection_name,\n sdk_client_pool=self.sdk_client_pool,\n batch_size=200, process_concurrency=8,\n timeout_secs=60)\n self.task_manager.get_task_result(task)\n if self.subdoc_test:\n load_gen = sub_doc_generator(self.key, 0, self.num_items/2)\n task = self.task.async_load_gen_sub_docs(\n self.cluster, self.bucket,\n load_gen, Bucket_Op.SubDocOps.INSERT,\n timeout_secs=self.sdk_timeout,\n compression=self.sdk_compression,\n path_create=True,\n batch_size=100,\n process_concurrency=8,\n durability=self.durability_level,\n scope=self.scope_name, collection=self.collection_name,\n sdk_client_pool=self.sdk_client_pool)\n self.task_manager.get_task_result(task)\n\n self.bucket.scopes[self.scope_name].collections[\n self.collection_name].num_items = self.num_items\n\n target_nodes = DurabilityHelper.getTargetNodes(self.cluster,\n self.nodes_init,\n self.num_nodes_affected)\n for node in target_nodes:\n shell_conn[node.ip] = RemoteMachineShellConnection(node)\n cbstat_obj[node.ip] = Cbstats(node)\n target_nodes_vbuckets[\"active\"] += \\\n cbstat_obj[node.ip].vbucket_list(self.bucket.name,\n vbucket_type=\"active\")\n target_nodes_vbuckets[\"replica\"] += \\\n cbstat_obj[node.ip].vbucket_list(self.bucket.name,\n vbucket_type=\"replica\")\n vb_info[\"init\"][node.ip] = cbstat_obj[node.ip].vbucket_seqno(\n self.bucket.name)\n error_sim[node.ip] = CouchbaseError(self.log, shell_conn[node.ip])\n\n curr_time = int(time.time())\n expected_timeout = curr_time + self.sdk_timeout\n\n if target_vb_type == \"active\":\n target_vbs = list(\n set(target_nodes_vbuckets[target_vb_type])\n .difference(set(target_nodes_vbuckets[\"replica\"])))\n else:\n target_vbs = list(\n set(target_nodes_vbuckets[target_vb_type])\n .difference(set(target_nodes_vbuckets[\"active\"])))\n\n # Create required doc_generators\n doc_gen[\"create\"] = doc_generator(self.key, self.num_items,\n self.crud_batch_size,\n target_vbucket=target_vbs)\n doc_gen[\"delete\"] = doc_generator(self.key, 0,\n self.crud_batch_size,\n target_vbucket=target_vbs)\n doc_gen[\"read\"] = doc_generator(\n self.key, int(self.num_items/3),\n self.crud_batch_size,\n target_vbucket=target_vbs)\n doc_gen[\"update\"] = doc_generator(\n self.key, int(self.num_items/2),\n self.crud_batch_size,\n target_vbucket=target_vbs)\n\n # Create required subdoc generators\n doc_gen[\"insert\"] = sub_doc_generator(\n self.key, int(self.num_items/2), self.crud_batch_size,\n target_vbucket=target_vbs)\n doc_gen[\"upsert\"] = sub_doc_generator_for_edit(\n self.key, 0, self.crud_batch_size,\n template_index=1,\n target_vbucket=target_vbs)\n doc_gen[\"remove\"] = sub_doc_generator(\n self.key, 0, self.crud_batch_size,\n target_vbucket=target_vbs)\n\n # Perform specified action\n for node in target_nodes:\n error_sim[node.ip].create(self.simulate_error,\n bucket_name=self.bucket.name)\n self.sleep(5, \"Wait for error_simulation to take effect\")\n\n ops_to_perform = [Bucket_Op.DocOps.CREATE, Bucket_Op.DocOps.UPDATE,\n Bucket_Op.DocOps.READ, Bucket_Op.DocOps.DELETE]\n if self.subdoc_test:\n ops_to_perform = [Bucket_Op.SubDocOps.INSERT,\n Bucket_Op.SubDocOps.UPSERT,\n Bucket_Op.SubDocOps.REMOVE]\n\n for op_type in ops_to_perform:\n self.log.info(\"Starting doc op %s\" % op_type)\n if op_type in Bucket_Op.DOC_OPS:\n tasks[op_type] = self.task.async_load_gen_docs(\n self.cluster, self.bucket, doc_gen[op_type], op_type, 0,\n scope=self.scope_name,\n collection=self.collection_name,\n sdk_client_pool=self.sdk_client_pool,\n batch_size=1, process_concurrency=8,\n durability=self.durability_level,\n timeout_secs=self.sdk_timeout,\n suppress_error_table=True,\n print_ops_rate=False,\n skip_read_on_error=True)\n else:\n tasks[op_type] = self.task.async_load_gen_sub_docs(\n self.cluster, self.bucket, doc_gen[op_type], op_type, 0,\n scope=self.scope_name,\n collection=self.collection_name,\n sdk_client_pool=self.sdk_client_pool,\n path_create=True,\n batch_size=1, process_concurrency=8,\n durability=self.durability_level,\n timeout_secs=self.sdk_timeout,\n print_ops_rate=False)\n\n self.task.jython_task_manager.get_task_result(tasks[op_type])\n\n # Validate task failures\n if op_type == Bucket_Op.DocOps.READ:\n # Validation for read task\n if len(tasks[op_type].fail.keys()) != 0:\n self.log_failure(\"Read failed for few docs: %s\"\n % tasks[op_type].fail.keys())\n else:\n # Validation of CRUDs - Update / Create / Delete\n for doc_id, crud_result in tasks[op_type].fail.items():\n vb_num = self.bucket_util.get_vbucket_num_for_key(\n doc_id, self.cluster.vbuckets)\n if SDKException.DurabilityAmbiguousException \\\n not in str(crud_result[\"error\"]):\n self.log_failure(\n \"Invalid exception for doc %s, vb %s: %s\"\n % (doc_id, vb_num, crud_result))\n\n # Revert the specified error scenario\n for node in target_nodes:\n error_sim[node.ip].revert(self.simulate_error,\n bucket_name=self.bucket.name)\n\n # Check whether the timeout triggered properly\n if int(time.time()) < expected_timeout:\n self.log_failure(\"Timed-out before expected time\")\n\n for op_type in ops_to_perform:\n if op_type == Bucket_Op.DocOps.READ:\n continue\n while doc_gen[op_type].has_next():\n doc_id, _ = doc_gen[op_type].next()\n affected_vbs.append(\n str(self.bucket_util.get_vbucket_num_for_key(\n doc_id,\n self.cluster.vbuckets)))\n\n affected_vbs = list(set(affected_vbs))\n # Fetch latest stats and validate the seq_nos are not updated\n for node in target_nodes:\n retry_count = 0\n max_retry = 3\n while retry_count < max_retry:\n self.log.info(\"Trying to validate vbseq_no stats: %d\"\n % (retry_count+1))\n retry_count += 1\n retry_required = validate_vb_seqno_stats()\n if not retry_required:\n break\n self.sleep(5, \"Sleep for vbseq_no stats to update\")\n else:\n # This will be exited only if `break` condition is not met\n self.log_failure(\"validate_vb_seqno_stats verification failed\")\n\n self.validate_test_failure()\n\n # Get SDK Client from client_pool\n sdk_client = self.sdk_client_pool.get_client_for_bucket(\n self.bucket,\n self.scope_name,\n self.collection_name)\n\n # Doc error validation\n for op_type in ops_to_perform:\n task = tasks[op_type]\n\n if self.nodes_init == 1 \\\n and op_type != Bucket_Op.DocOps.READ \\\n and len(task.fail.keys()) != (doc_gen[op_type].end\n - doc_gen[op_type].start):\n self.log_failure(\"Failed keys %d are less than expected %d\"\n % (len(task.fail.keys()),\n (doc_gen[op_type].end\n - doc_gen[op_type].start)))\n\n # Create table objects for display\n table_view = TableView(self.log.error)\n ambiguous_table_view = TableView(self.log.info)\n table_view.set_headers([\"Key\", \"vBucket\", \"Exception\"])\n ambiguous_table_view.set_headers([\"Key\", \"vBucket\"])\n\n # Iterate failed keys for validation\n for doc_key, doc_info in task.fail.items():\n vb_for_key = self.bucket_util.get_vbucket_num_for_key(doc_key)\n\n if SDKException.DurabilityAmbiguousException \\\n not in str(doc_info[\"error\"]):\n table_view.add_row([doc_key, vb_for_key,\n doc_info[\"error\"]])\n\n ambiguous_table_view.add_row([doc_key, str(vb_for_key)])\n if op_type not in Bucket_Op.SUB_DOC_OPS:\n retry_success = \\\n self.durability_helper.retry_for_ambiguous_exception(\n sdk_client, op_type, doc_key, doc_info)\n if not retry_success:\n self.log_failure(\"%s failed in retry for %s\"\n % (op_type, doc_key))\n\n # Display the tables (if any errors)\n table_view.display(\"Unexpected exception during %s\" % op_type)\n ambiguous_table_view.display(\"D_Ambiguous exception during %s\"\n % op_type)\n\n # Release the acquired client\n self.sdk_client_pool.release_client(sdk_client)\n\n # Verify doc count after expected CRUD failure\n self.bucket_util._wait_for_stats_all_buckets(self.cluster,\n self.cluster.buckets)\n self.bucket_util.validate_docs_per_collections_all_buckets(\n self.cluster)\n\n # Fetch latest stats and validate the values are updated\n for node in target_nodes:\n vb_info[\"afterCrud\"][node.ip] = \\\n cbstat_obj[node.ip].vbucket_seqno(self.bucket.name)\n if vb_info[\"init\"][node.ip] == vb_info[\"afterCrud\"][node.ip]:\n self.log_failure(\"vBucket seq_no stats not updated\")\n\n # Disconnect the shell connection\n for node in target_nodes:\n shell_conn[node.ip].disconnect()\n\n self.validate_test_failure()", "def test_check_replication_crit_null_failures(self, mock_timestamp):\n base_url = 'http://localhost:6000/recon/'\n jdata = b'{\"replication_last\": 1493299546.629282, ' \\\n b'\"replication_stats\": {\"no_change\": 0, \"rsync\": 0, ' \\\n b'\"success\": 0, \"failure\": 0, \"attempted\": 0, \"ts_repl\": 0, ' \\\n b'\"remove\": 0, \"remote_merge\": 0, \"diff_capped\": 0, ' \\\n b'\"start\": 1493299546.621624, \"hashmatch\": 0, \"diff\": 0, ' \\\n b'\"empty\": 0}, \"replication_time\": 0.0076580047607421875}'\n pmock_jdata = PropertyMock(return_value=jdata)\n mock_timestamp.return_value = (MagicMock(days=0, seconds=0), -1)\n with patch('urllib.request.urlopen') as mock_urlopen:\n mock_urlopen.return_value = MagicMock(read=pmock_jdata)\n result = check_replication(base_url, [4, 10, 4, 10])\n self.assertEqual(result,\n 3*[(STATUS_CRIT,\n \"replication failures counter is NULL \"\n \"(check syslog)\")])", "def test_connectionLostBackoffDelayDoubles(self):\r\n self.pm.startService()\r\n self.pm.addProcess(\"foo\", [\"foo\"])\r\n self.reactor.advance(self.pm.threshold - 1) #9s\r\n self.assertIn(\"foo\", self.pm.protocols)\r\n self.assertEqual(self.pm.delay[\"foo\"], self.pm.minRestartDelay)\r\n # process dies within the threshold and should not restart immediately\r\n self.pm.protocols[\"foo\"].processEnded(Failure(ProcessDone(0)))\r\n self.assertEqual(self.pm.delay[\"foo\"], self.pm.minRestartDelay * 2)", "def LocalUpdate(self):\n\n # Get current timestamp in miliseconds from unix epoch\n t = int(time.time() * 1000)\n\n # Number of times refill has occured\n lstrefil = self.status['timestamp'] - (60000 - self.status['refillIn'])\n nrefil = (t - lstrefil) / 60000.0\n\n if nrefil > 1:\n self.status['tokensLeft'] += self.status['refillRate'] * \\\n int(nrefil)\n\n if self.status['tokensLeft'] > 60 * self.status['refillRate']:\n self.status['tokensLeft'] = 60 * self.status['refillRate']\n\n # Update timestamps\n self.status['timestamp'] = t\n self.status['refillIn'] = int((1 - nrefil % 1) * 60000)", "def __periodic_maintenance__(self):\n pass", "def get_restart_times(logname, begintime, endtime, \n checkwarm, checkforce, checkdown, checkcold):\n global nowyear #need this since year isn't in system log\n #alternatively just ignore the year\n\n startfound = False\n stopfound = False\n with open(logname, 'r') as f:\n #Look for start\n for line in f:\n if debug: print line\n entrytime = time_from_str(line)\n if entrytime > begintime:\n startfound = True\n if debug: print \"Found start time\"\n break\n else:\n continue\n \n if not startfound:\n print \"Did not find the start time\"\n return\n\n\n warmcount = 0\n downcount = 0\n coldcount = 0\n forcecount = 0\n warmrestart_total_et = datetime.timedelta(0)\n downrestart_total_et = datetime.timedelta(0)\n coldrestart_total_et = datetime.timedelta(0)\n forcerestart_total_et = datetime.timedelta(0)\n\n while not stopfound:\n isdownrestart = False\n iscoldrestart = False\n isforcerestart = False\n\n #Look for a test\n for line in f: \n if debug: print \"Look for restart: \", line\n\n entrytime = time_from_str(line)\n if entrytime > endtime:\n stopfound = True\n if debug: print \"Found end time\"\n break #done looking at log\n\n mr = rr.search(line) # warm, force, or cold restart\n mx = rx.search(line) # down restart\n if mr or mx:\n begintime = entrytime\n if verbose or debug: \n print \"Found restart:\"\n print line\n if mx:\n isdownrestart = True\n break\n else:\n continue #keep looking for restart\n else: # no more lines\n stopfound = True\n\n if stopfound:\n break\n \n #get the restart reason in the next line unless it's a down restart\n if not isdownrestart:\n for line in f:\n mrr = rrr.search(line)\n if mrr:\n reason = mrr.group('reason') \n if reason == \"System restarted by VprocManager\":\n iscoldrestart = True\n break # only wanted to read one line\n else:\n raise RuntimeError(\"End of file before finding end of test\")\n \n #go until end of test (logons enabled)\n for line in f: \n if debug: print \"Look for end of test:\", line\n\n mf = rf.search(line) #recond -L\n mup = rup.search(line) #Logons are enabled\n if mf: # force restart\n isforcerestart = True\n if debug: \n print line\n elif mup:\n if verbose or debug: \n print \"Found end of test:\"\n print line\n enabledtime = time_from_str(line)\n break\n else:\n continue # keep looking for end of test\n else:\n raise RuntimeError(\"End of file before finding end of test\")\n\n\n if debug: print \"Continuing from end of test\"\n\n #Found end of test\n elapsedtime = enabledtime - begintime\n\n if verbose: print\n if isdownrestart:\n if checkdown:\n downcount += 1\n print \"Down restart {0}\".format(downcount)\n downrestart_total_et += elapsedtime\n print \"Start: {0}, End: {1}, ET: {2}\\n\"\\\n .format(begintime, enabledtime, elapsedtime)\n elif isforcerestart:\n if checkforce:\n forcecount += 1\n print \"Force restart {0}\".format(forcecount)\n forcerestart_total_et += elapsedtime\n print \"Start: {0}, End: {1}, ET: {2}\"\\\n .format(begintime, enabledtime, elapsedtime)\n print \"Reason:\", reason\n elif iscoldrestart:\n if checkcold:\n coldcount += 1\n print \"Cold restart {0}\".format(coldcount)\n coldrestart_total_et += elapsedtime\n print \"Start: {0}, End: {1}, ET: {2}\\n\"\\\n .format(begintime, enabledtime, elapsedtime)\n #print \"Reason:\", reason, \"\\n\"\n else: #must be warmrestart\n if checkwarm:\n warmcount += 1\n print \"Warm restart {0}\".format(warmcount)\n warmrestart_total_et += elapsedtime\n print \"Start: {0}, End: {1}, ET: {2}\"\\\n .format(begintime, enabledtime, elapsedtime)\n print \"Reason:\", reason\n\n ### end while not stopfound\n if debug: print \"Done going through log\"\n if verbose: print\n\n if warmcount > 0:\n print \"Warm restart average of {0} tests: {1}\"\\\n .format(warmcount, str(warmrestart_total_et/warmcount).split(\".\")[0])\n #print the time delta without microseconds\n\n if forcecount > 0:\n print \"Force restart average of {0} tests: {1}\"\\\n .format(forcecount, str(forcerestart_total_et/forcecount).split(\".\")[0])\n\n if coldcount > 0:\n print \"Cold restart average of {0} tests: {1}\"\\\n .format(coldcount, str(coldrestart_total_et/coldcount).split(\".\")[0])\n\n if downcount > 0:\n print \"Down restart average of {0} tests: {1}\"\\\n .format(downcount, str(downrestart_total_et/downcount).split(\".\")[0])\n\n\n return", "def auditlog32errsyslogallocnsbfailrate(self) :\n\t\ttry :\n\t\t\treturn self._auditlog32errsyslogallocnsbfailrate\n\t\texcept Exception as e:\n\t\t\traise e", "def fix_replication_from_slow_query_log_after_upgrade():\n run_mysql_command(\"STOP SLAVE;\")\n run_mysql_command(\"SET GLOBAL slow_query_log = 'OFF';\")\n run_mysql_command(\"START SLAVE;\")\n run_mysql_command(\"SET GLOBAL slow_query_log = 'ON';\")\n run_mysql_command(\"show slave status\\G;\")", "def test_lbheartbeat(self):\n pass", "def restart(self, timeStamp):\n print 'monitor_comp.restart() called'\n\n services = self.services\n global monitorVars, ps_VarsList, monitorDefinition\n \n workdir = services.get_working_dir()\n run_id = services.get_config_param('PORTAL_RUNID')\n monitor_file = 'monitor_file.nc'\n # print 'monitor file = ', monitor_file\n\n self.cdfFile = run_id+'_monitor_file.nc'\n services.log('w3 monitor file = ' + self.cdfFile)\n htmlFile = run_id +'.html'\n \n # Get restart files listed in config file. \n try:\n restart_root = services.get_config_param('RESTART_ROOT')\n restart_time = services.get_config_param('RESTART_TIME')\n services.get_restart_files(restart_root, restart_time, self.RESTART_FILES)\n except Exception, e:\n print 'Error in call to get_restart_files()' , e\n raise\n\n # copy monitor file to w3 directory\n try:\n shutil.copyfile(monitor_file,\n os.path.join(self.W3_DIR, self.cdfFile))\n except IOError, (errno, strerror):\n print 'Error copying file %s to %s: %s' % \\\n (monitor_file, self.cdfFile, strerror)\n\n htmlText = self.htmlText.replace('@CDF_FILE@',\n os.path.join(self.W3_BASEURL, self.cdfFile))\n try:\n f = open(os.path.join(self.W3_DIR, htmlFile), 'w')\n f.write(htmlText)\n f.close()\n except IOError, (errno, strerror):\n print 'Error writing to file %s : %s' % \\\n (htmlFile, strerror)\n monitorURL = os.path.join(self.W3_BASEURL , htmlFile)\n self.services.setMonitorURL(monitorURL)\n \n # Load monitorVars and ps_VarsList from pickle file \"monitor_restart\".\n\n pickleDict = {'monitorVars' : monitorVars, 'ps_VarsList': ps_VarsList,\\\n 'monitorDefinition':monitorDefinition}\n# pickleDict = {'monitorVars' : monitorVars, 'ps_VarsList': ps_VarsList}\n pickFile = open('monitor_restart', 'r')\n pickleDict = pickle.load(pickFile)\n pickFile.close()\n monitorVars = pickleDict['monitorVars']\n ps_VarsList = pickleDict['ps_VarsList']\n monitorDefinition = pickleDict['monitorDefinition']\n print 'monitorDefinition = ', monitorDefinition\n \n print 'monitor restart finished'\n return 0", "def _replica_links_lag(self):\n for f in self.fb.get_filesystem_replica_links():\n self.replica_links_lag.add_metric([f.local_file_system.name,\n f.direction,\n f.remote.name,\n f.remote_file_system.name,\n f.status], -1 if f.lag is None else f.lag)", "def mmo_replication_status_summary(self, mmo_connection):\n replication_summary = []\n primary_info = {}\n o = self.mmo_replication_status(mmo_connection)\n o = o + self.mmo_configsrv_replication_status(mmo_connection)\n replset_hosts_up_down = {}\n for shard in self.shards:\n replset_hosts_up_down[shard] = 0\n for replicaset in o:\n if \"Error\" not in replicaset[\"command_output\"].keys():\n for member in replicaset[\"command_output\"][\"members\"]:\n if member[\"stateStr\"] == \"PRIMARY\":\n primary_info[replicaset[\"command_output\"][\"set\"]] = member[\"optimeDate\"]\n\n replication_summary.append( { \"replicaset\": replicaset[\"command_output\"][\"set\"],\n \"hostname\": member[\"name\"],\n \"state\": member[\"stateStr\"],\n \"uptime\": member[\"uptime\"],\n \"configVersion\": member[\"configVersion\"],\n \"optimeDate\": member[\"optimeDate\"] } )\n for doc in replication_summary:\n if doc[\"state\"] == \"PRIMARY\":\n doc[\"lag\"] = \"NA\" # not relevant here\n else: # calculate the slave lag from the PRIMARY optimeDate\n if doc[\"replicaset\"] in primary_info.keys(): # is there a primary in the replset?\n try:\n if hasattr((doc[\"optimeDate\"] - primary_info[doc[\"replicaset\"]]), \"total_seconds\"): # Does not exist in python 2.6\n doc[\"lag\"] = abs((doc[\"optimeDate\"] - primary_info[doc[\"replicaset\"]]).total_seconds())\n else: # for python 2.6 that does not have total_seconds attribute\n # Will only be correct for delays of up to 24 hours\n doc[\"lag\"] = abs((doc[\"optimeDate\"] - primary_info[doc[\"replicaset\"]]).seconds) # Primary needs ot be first in this case\n except:\n doc[\"lag\"] = \"ERR\"\n else:\n doc[\"lag\"] = \"UNK\" # We cannot know what the delay is if there is no primary\n else:\n replset_hosts_up_down[replicaset[\"shard\"]] += 1\n\n #else: Probably redundant code now. Removed ot fix https://github.com/rhysmeister/mmo/issues/34\n # We cannot know the state of much of the replicaset at this point\n # replication_summary.append({\"replicaset\": replicaset[\"shard\"],\n # \"hostname\": \"UNK\",\n # \"state\": \"UNK\",\n # \"uptime\": \"UNK\",\n # \"configVersion\": \"UNK\",\n # \"optimeDate\": \"UNK\"})\n\n\n shard_server_count = {}\n # how many servers in each shard\n for shard in self.shards:\n shard_server_count[shard] = 0\n for s in self.shard_servers:\n shard_server_count[s['shard']] += 1\n # are all the hosts of any shard down?\n for shard in self.shards:\n if replset_hosts_up_down[shard] > 0:\n if replset_hosts_up_down[shard] == shard_server_count[shard]:\n replication_summary.append({\"replicaset\": shard,\n \"hostname\": \"UNK\",\n \"state\": \"UNK\",\n \"uptime\": \"UNK\",\n \"configVersion\": \"UNK\",\n \"optimeDate\": \"UNK\",\n \"lag\": \"UNK\"})\n deduped_replication_summary = []\n for d in replication_summary:\n if d not in deduped_replication_summary:\n deduped_replication_summary.append(d)\n return deduped_replication_summary", "def run_cumulative_wtrfclty_damage(self):\n\n\t\trt = [100, 250, 500, 1000, 2500, 5000, 10000]\n\t\t\n\t\tfor rt_val in rt:\n\t\t\tprint('\\twterfclty_dmg: cumulative rt_{}' .format(rt_val))\n\t\t\t# --- reading in damage results from above analysis\n\t\t\teq_damage_results_csv = os.path.join(self.wterfclty_output_path, \n\t\t\t\t\t\t\t\t\t\t\t\t 'wterfclty_eq_{}yr_dmg.csv' \n\t\t\t\t\t\t\t\t\t\t\t\t .format(rt_val))\n\t\t\ttsu_damage_results_csv = os.path.join(self.wterfclty_output_path, \n\t\t\t\t\t\t\t\t\t\t\t\t 'wterfclty_tsu_{}yr_dmg.csv' \n\t\t\t\t\t\t\t\t\t\t\t\t .format(rt_val))\n\t\t\teq_df = pd.read_csv(eq_damage_results_csv)\n\t\t\ttsu_df = pd.read_csv(tsu_damage_results_csv)\n\n\t\t\tcum_df = pd.DataFrame()\n\t\t\tcum_df['guid'] = eq_df['guid']\n\t\t\t\n\t\t\tcum_df['ds-complet'] = eq_df['ds-complet'] + tsu_df['ds-complet'] \\\n\t\t\t\t- eq_df['ds-complet']*tsu_df['ds-complet']\n\t\t\t\n\t\t\t# --- prob of exceeding each damage state\n\t\t\tcum_df['ls-complet'] = cum_df['ds-complet']\n\n\t\t\tcum_df['ls-extensi'] = eq_df['ls-extensi'] + tsu_df['ls-extensi'] \\\n\t\t\t\t- eq_df['ls-extensi']*tsu_df['ls-extensi']\n\n\t\t\tcum_df['ls-moderat'] = eq_df['ls-moderat'] + tsu_df['ls-moderat'] \\\n\t\t\t\t- eq_df['ls-moderat']*tsu_df['ls-moderat']\n\n\t\t\tcum_df['ls-slight'] = eq_df['ls-slight'] + tsu_df['ls-slight'] \\\n\t\t\t\t- eq_df['ls-slight']*tsu_df['ls-slight']\n\n\t\t\t# --- prob of being in each damage state\n\t\t\tcum_df['ds-extensi'] = cum_df['ls-extensi'] - cum_df['ds-complet']\n\t\t\tcum_df['ds-moderat'] = cum_df['ls-moderat'] - cum_df['ls-extensi']\n\t\t\tcum_df['ds-slight'] = cum_df['ls-slight'] - cum_df['ls-moderat']\n\t\t\tcum_df['ds-none'] = 1 - cum_df['ls-slight']\n\t\t\tcum_df['hazard'] = 'Earthquake+Tsunami'\n\n\t\t\tresult_name = os.path.join(self.wterfclty_output_path, \n\t\t\t\t\t\t\t\t\t 'wterfclty_cumulative_{}yr_dmg.csv' \n\t\t\t\t\t\t\t\t\t .format(rt_val))\n\t\t\tcum_df = cum_df[['guid', \n\t\t\t\t\t\t\t 'ls-slight',\n\t\t\t\t\t\t\t 'ls-moderat',\n\t\t\t\t\t\t\t 'ls-extensi',\n\t\t\t\t\t\t\t 'ls-complet',\n\t\t\t\t\t\t\t 'ds-none', \n\t\t\t\t\t\t\t 'ds-slight', \n\t\t\t\t\t\t\t 'ds-moderat', \n\t\t\t\t\t\t\t 'ds-extensi', \n\t\t\t\t\t\t\t 'ds-complet', \n\t\t\t\t\t\t\t 'hazard']]\n\t\t\tcum_df.to_csv(result_name, index=False)", "def tail_cts_per_shot(datapath, lower, TPQI_starts, bin_size = 0.256, normalize = False, correct_for_bg = True, save = 1, pulses_in_sequence = 300):\n\n print 'analyzing tail counts per shot...' \n current_dir = os.getcwd()\n plt.close('all')\n os.chdir(datapath)\n files = os.listdir(datapath)\n\n for k in arange(len(files)):\n right_file = '.npz' in files[k]\n \n if right_file:\n data = numpy.load(datapath+'\\\\'+files[k])\n\n ch1_counts = data['hist_ch1']\n ch0_counts = data['hist_ch0']\n\n time = bin_size*arange(len(ch1_counts))\n \n if correct_for_bg:\n bg_level_ch1 = ch1_counts[int(0.75*len(ch1_counts)):int(0.90*len(ch1_counts))].mean()\n ch1_counts = ch1_counts - bg_level_ch1*ones(len(ch1_counts))\n bg_level_ch0 = ch0_counts[int(0.75*len(ch0_counts)):int(0.90*len(ch0_counts))].mean()\n ch0_counts = ch0_counts - bg_level_ch0*ones(len(ch0_counts))\n\n #print 'measured background level for [ch0,ch1] = ['+num2str(bg_level_ch0,1)+','+num2str(bg_level_ch1,1)+']'\n\n if normalize:\n ch1_counts_normalized = ch1_counts/ch1_counts.max()\n ch0_counts_normalized = ch0_counts/ch0_counts.max()\n \n upper = lower + 40.0\n\n tail_area_time = time[int(lower/bin_size):int(upper/bin_size)]\n tail_area_ch1 = ch1_counts[int(lower/bin_size):int(upper/bin_size)]\n tail_area_ch0 = ch0_counts[int(lower/bin_size):int(upper/bin_size)]\n\n tail_counts_per_shot = (tail_area_ch1.sum()+tail_area_ch0.sum())/float(TPQI_starts*pulses_in_sequence)\n\n figure1 = plt.figure(figsize=(16.0, 12.0))\n plt.subplot(211)\n if not normalize:\n plt.semilogy(time, ch1_counts, '-k')\n plt.plot(array([lower,lower]), array([1E-1,ch1_counts.max()]), 'r', lw = 2.0)\n plt.plot(array([upper,upper]), array([1E-1,ch1_counts.max()]), 'r', lw = 2.0)\n else:\n plt.semilogy(time, ch1_counts_normalized, '-r')\n plt.plot(array([lower,lower]), array([1E-1,ch1_counts_normalized.max()]), 'r', lw = 2.0)\n plt.plot(array([upper,upper]), array([1E-1,ch1_counts_normalized.max()]), 'r', lw = 2.0)\n \n plt.xlabel('Time after sync (ns)')\n plt.ylabel('Counts ch1')\n plt.title('tail counts per shot = '+num2str(tail_counts_per_shot*1e4,1)+'E-4')\n plt.xlim([0,200])\n\n plt.subplot(212)\n if not normalize:\n plt.semilogy(time, ch0_counts, '-k')\n plt.plot(array([lower,lower]), array([1E-1,ch0_counts.max()]), 'r', lw = 2.0)\n plt.plot(array([upper,upper]), array([1E-1,ch0_counts.max()]), 'r', lw = 2.0)\n else:\n plt.semilogy(time, ch0_counts_normalized, '-k')\n plt.plot(array([lower,lower]), array([1E-1,ch0_counts_normalized.max()]), 'r', lw = 2.0)\n plt.plot(array([upper,upper]), array([1E-1,ch0_counts_normalized.max()]), 'r', lw = 2.0)\n \n plt.xlabel('Time after sync (ns)')\n plt.ylabel('Counts ch0')\n plt.title('tail counts per shot = '+num2str(tail_counts_per_shot*1e4,1)+'E-4')\n plt.xlim([0,200])\n if save:\n figure1.savefig('tail_cts_per_shot.pdf')\n\n try:\n data.close()\n except:\n pass\n\n print 'tail counts per shot = '+num2str(tail_counts_per_shot*1e4,1)+'E-4'\n\n return tail_counts_per_shot", "def run(self):\n try:\n source_conn = self.setup_source_db()\n target_conn = self.setup_target_db()\n target_collection = 'oplog' + self._replica_set\n\n while True:\n try:\n # Induce an operation on the replication test database\n db_name = 'ReplTest_' + self._replica_set.upper()\n source_conn[db_name]['operation'].replace_one({'replica': self._replica_set}, {\n 'replica': self._replica_set, 'ts': int(time.time())}, upsert=True)\n\n # Wait a bit for it to replicate\n time.sleep(10)\n\n # check latest oplog of source\n entry = source_conn['local'][\n 'oplog.rs'].find().sort('$natural', -1).limit(1)\n source_oplog = entry[0]['ts'].time\n\n # get latest oplog from connector target oplog collection\n entry = target_conn['__mongo_connector'][\n target_collection].find().sort('_ts', -1).limit(1)\n target_oplog = entry[0]['_ts'] >> 32\n\n lag = source_oplog - target_oplog\n self._stat_client.gauge(self._lag_key, lag)\n\n time.sleep(self._poll_interval)\n except Exception as ex:\n logger.exception('Connection Failed, retrying..')\n time.sleep(5)\n\n except Exception as ex:\n logger.exception('Critical Error, bailing out..')", "def test_check_replication_warn_failures(self, mock_timestamp):\n base_url = 'http://localhost:6000/recon/'\n jdata = b'{\"replication_last\": 1493299546.629282, ' \\\n b'\"replication_stats\": {\"no_change\": 0, \"rsync\": 0, ' \\\n b'\"success\": 0, \"failure\": 0, \"attempted\": 0, \"ts_repl\": 0, ' \\\n b'\"remove\": 0, \"remote_merge\": 0, \"diff_capped\": 0, ' \\\n b'\"start\": 1493299546.621624, \"hashmatch\": 0, \"diff\": 0, ' \\\n b'\"empty\": 0}, \"replication_time\": 0.0076580047607421875}'\n pmock_jdata = PropertyMock(return_value=jdata)\n mock_timestamp.return_value = (MagicMock(days=0, seconds=0), 5)\n # with patch('urllib2.urlopen') as mock_urlopen:\n with patch('urllib.request.urlopen') as mock_urlopen:\n mock_urlopen.return_value = MagicMock(read=pmock_jdata)\n result = check_replication(base_url, [4, 10, 4, 10])\n self.assertEqual(result,\n 3*[(STATUS_WARN, \"5 replication failures\")])", "def enqueue_lag_monitor_task():\n measure_queue_lag.delay(datetime.now())", "def run_cumulative_pipeline_damage(self):\n\t\t\"\"\" PWP1 = brittle\n\t\t\tPWP2 = ductile \"\"\"\n\n\t\trt = [100, 250, 500, 1000, 2500, 5000, 10000]\n\t\t# rt = [100]\n\n\t\tfor rt_val in rt:\n\t\t\tprint('\\tmc_pipe_dmg: cumulative rt_{}' .format(rt_val))\n\t\t\t# --- reading in damage results from above analysis\n\t\t\teq_damage_results_csv = os.path.join(self.mc_path, \n\t\t\t\t\t\t\t\t\t\t\t\t 'pipe_DS_eq_{}yr_{}.csv' \n\t\t\t\t\t\t\t\t\t\t\t\t .format(rt_val, retrofit_key))\n\t\t\ttsu_damage_results_csv = os.path.join(self.mc_path, \n\t\t\t\t\t\t\t\t\t\t\t\t 'pipe_DS_tsu_{}yr_{}.csv'\n\t\t\t\t\t\t\t\t\t\t\t\t .format(rt_val, retrofit_key))\n\t\t\teq_df = pd.read_csv(eq_damage_results_csv)\n\t\t\ttsu_df = pd.read_csv(tsu_damage_results_csv)\n\n\t\t\teq_df.set_index('guid', inplace=True)\n\t\t\ttsu_df.set_index('guid', inplace=True)\n\n\t\t\tcolumn_keys = list(eq_df.columns)\n\n\t\t\tcum_df = np.logical_or(eq_df.values, tsu_df.values).astype(int)\n\t\t\tcum_df = pd.DataFrame(cum_df, index=eq_df.index, columns=column_keys)\n\t\t\t\n\n\t\t\tresult_name = os.path.join(self.mc_path, \n\t\t\t\t\t\t\t\t\t 'pipe_DS_cumulative_{}yr_{}.csv' \n\t\t\t\t\t\t\t\t\t\t.format(rt_val, retrofit_key))\n\n\t\t\tcum_df.to_csv(result_name, index=True)", "def check_oplog(con, warning, critical, perf_data):\n warning = warning or 24 \n critical = critical or 4\n try:\n db = con.local\n ol=db.system.namespaces.find_one({\"name\":\"local.oplog.rs\"}) \n if (db.system.namespaces.find_one({\"name\":\"local.oplog.rs\"}) != None) :\n oplog = \"oplog.rs\";\n else :\n ol=db.system.namespaces.find_one({\"name\":\"local.oplog.$main\"})\n if (db.system.namespaces.find_one({\"name\":\"local.oplog.$main\"}) != None) :\n oplog = \"oplog.$main\";\n else :\n message = \"neither master/slave nor replica set replication detected\";\n return check_levels(None,warning,critical,message)\n\n try:\n set_read_preference(con.admin)\n data=con.local.command(pymongo.son_manipulator.SON([('collstats',oplog)]))\n except:\n data = con.admin.command(son.SON([('collstats',oplog)]))\n\n ol_size=data['size']\n ol_storage_size=data['storageSize']\n ol_used_storage=int(float(ol_size)/ol_storage_size*100+1)\n ol=con.local[oplog]\n firstc = ol.find().sort(\"$natural\",pymongo.ASCENDING).limit(1)[0]['ts']\n lastc = ol.find().sort(\"$natural\",pymongo.DESCENDING).limit(1)[0]['ts']\n time_in_oplog= (lastc.as_datetime()-firstc.as_datetime())\n message=\"Oplog saves \"+ str(time_in_oplog) + \" %d%% used\" %ol_used_storage \n try: #work starting from python2.7\n hours_in_oplog= time_in_oplog.total_seconds()/60/60\n except:\n hours_in_oplog= float(time_in_oplog.seconds + time_in_oplog.days * 24 * 3600)/60/60\n approx_level=hours_in_oplog*100/ol_used_storage\n message+=performance_data(perf_data,[(\"%.2f\" % hours_in_oplog,'oplog_time',warning,critical),(\"%.2f \" % approx_level, 'oplog_time_100_percent_used')])\n return check_levels(-approx_level,-warning,-critical,message)\n\n except Exception, e:\n return exit_with_general_critical(e)", "def critical(log):\n write(syslog.LOG_CRIT, 'critical', '{log}'.format(log=log))", "def interval(ctx, poll_interval):\n fc_info = {}\n fc_info['POLL_INTERVAL'] = poll_interval\n ctx.obj.mod_entry(\"FLEX_COUNTER_TABLE\", \"FLOW_CNT_TRAP\", fc_info)", "def test_first_ten_minutes_are_ignored():\n m = monitor.Monitor(warmup_interval_s=60)\n t = datetime(2010, 1, 1, 0, 0)\n m.set_outside_temperature(10, t)\n assert m.temperature_update(20, t) == None\n m.boiler_on(t)\n assert m.temperature_update(21, t + timedelta(seconds=120)) == None\n assert m.temperature_update(23, t + timedelta(seconds=1320)) == (11, 6.0)", "def time_to_failure():\n return int(random.expovariate(BREAK_MEAN))\n #return MTBF", "def test_connectionLostMinMaxRestartDelay(self):\r\n self.pm.minRestartDelay = 2\r\n self.pm.maxRestartDelay = 3\r\n\r\n self.pm.startService()\r\n self.pm.addProcess(\"foo\", [\"foo\"])\r\n\r\n self.assertEqual(self.pm.delay[\"foo\"], self.pm.minRestartDelay)\r\n self.reactor.advance(self.pm.threshold - 1)\r\n self.pm.protocols[\"foo\"].processEnded(Failure(ProcessDone(0)))\r\n self.assertEqual(self.pm.delay[\"foo\"], self.pm.maxRestartDelay)" ]
[ "0.6290144", "0.6109067", "0.6005229", "0.5530093", "0.5335259", "0.52734643", "0.5261092", "0.5073339", "0.50290555", "0.4997413", "0.4988721", "0.49720186", "0.48985997", "0.4878976", "0.48768365", "0.48759085", "0.48734292", "0.48707193", "0.4865622", "0.48405007", "0.4831401", "0.48294356", "0.48034218", "0.4766555", "0.47520992", "0.4728369", "0.47263378", "0.47075832", "0.46949476", "0.4686842" ]
0.6512004
0
Replication failures over CRIT threshold, STATUS_CRIT
def test_check_replication_crit_failures(self, mock_timestamp): base_url = 'http://localhost:6000/recon/' jdata = b'{"replication_last": 1493299546.629282, ' \ b'"replication_stats": {"no_change": 0, "rsync": 0, ' \ b'"success": 0, "failure": 0, "attempted": 0, "ts_repl": 0, ' \ b'"remove": 0, "remote_merge": 0, "diff_capped": 0, ' \ b'"start": 1493299546.621624, "hashmatch": 0, "diff": 0, ' \ b'"empty": 0}, "replication_time": 0.0076580047607421875}' pmock_jdata = PropertyMock(return_value=jdata) mock_timestamp.return_value = (MagicMock(days=0, seconds=0), 12) with patch('urllib.request.urlopen') as mock_urlopen: mock_urlopen.return_value = MagicMock(read=pmock_jdata) result = check_replication(base_url, [4, 10, 4, 10]) self.assertEqual(result, 3*[(STATUS_CRIT, "12 replication failures")])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_check_replication_crit_lag_notworking(self, mock_timestamp):\n base_url = 'http://localhost:6000/recon/'\n jdata = b'{\"replication_last\": 1493299546.629282, ' \\\n b'\"replication_stats\": {\"no_change\": 0, \"rsync\": 0, ' \\\n b'\"success\": 0, \"failure\": 0, \"attempted\": 0, \"ts_repl\": 0, ' \\\n b'\"remove\": 0, \"remote_merge\": 0, \"diff_capped\": 0, ' \\\n b'\"start\": 1493299546.621624, \"hashmatch\": 0, \"diff\": 0, ' \\\n b'\"empty\": 0}, \"replication_time\": 0.0076580047607421875}'\n pmock_jdata = PropertyMock(return_value=jdata)\n mock_timestamp.return_value = (None, 0)\n with patch('urllib.request.urlopen') as mock_urlopen:\n mock_urlopen.return_value = MagicMock(read=pmock_jdata)\n result = check_replication(base_url, [4, 10, 4, 10])\n self.assertEqual(result,\n [(STATUS_CRIT,\n \"'{}' replication lag not working \"\n \"(perms issue? check syslog)\".format(repl))\n for repl in ('account', 'object', 'container')])", "def test_check_replication_crit_lag(self, mock_timestamp):\n base_url = 'http://localhost:6000/recon/'\n jdata = b'{\"replication_last\": 1493299546.629282, ' \\\n b'\"replication_stats\": {\"no_change\": 0, \"rsync\": 0, ' \\\n b'\"success\": 0, \"failure\": 0, \"attempted\": 0, \"ts_repl\": 0, ' \\\n b'\"remove\": 0, \"remote_merge\": 0, \"diff_capped\": 0, ' \\\n b'\"start\": 1493299546.621624, \"hashmatch\": 0, \"diff\": 0, ' \\\n b'\"empty\": 0}, \"replication_time\": 0.0076580047607421875}'\n pmock_jdata = PropertyMock(return_value=jdata)\n mock_timestamp.return_value = (MagicMock(days=0, seconds=12), 0)\n with patch('urllib.request.urlopen') as mock_urlopen:\n mock_urlopen.return_value = MagicMock(read=pmock_jdata)\n result = check_replication(base_url, [4, 10, 4, 10])\n self.assertEqual(result,\n [(STATUS_CRIT,\n \"'{}' replication lag is \"\n \"12 seconds\".format(repl))\n for repl in ('account', 'object', 'container')])", "def test_check_replication_crit_null_failures(self, mock_timestamp):\n base_url = 'http://localhost:6000/recon/'\n jdata = b'{\"replication_last\": 1493299546.629282, ' \\\n b'\"replication_stats\": {\"no_change\": 0, \"rsync\": 0, ' \\\n b'\"success\": 0, \"failure\": 0, \"attempted\": 0, \"ts_repl\": 0, ' \\\n b'\"remove\": 0, \"remote_merge\": 0, \"diff_capped\": 0, ' \\\n b'\"start\": 1493299546.621624, \"hashmatch\": 0, \"diff\": 0, ' \\\n b'\"empty\": 0}, \"replication_time\": 0.0076580047607421875}'\n pmock_jdata = PropertyMock(return_value=jdata)\n mock_timestamp.return_value = (MagicMock(days=0, seconds=0), -1)\n with patch('urllib.request.urlopen') as mock_urlopen:\n mock_urlopen.return_value = MagicMock(read=pmock_jdata)\n result = check_replication(base_url, [4, 10, 4, 10])\n self.assertEqual(result,\n 3*[(STATUS_CRIT,\n \"replication failures counter is NULL \"\n \"(check syslog)\")])", "def test_check_replication_warn_failures(self, mock_timestamp):\n base_url = 'http://localhost:6000/recon/'\n jdata = b'{\"replication_last\": 1493299546.629282, ' \\\n b'\"replication_stats\": {\"no_change\": 0, \"rsync\": 0, ' \\\n b'\"success\": 0, \"failure\": 0, \"attempted\": 0, \"ts_repl\": 0, ' \\\n b'\"remove\": 0, \"remote_merge\": 0, \"diff_capped\": 0, ' \\\n b'\"start\": 1493299546.621624, \"hashmatch\": 0, \"diff\": 0, ' \\\n b'\"empty\": 0}, \"replication_time\": 0.0076580047607421875}'\n pmock_jdata = PropertyMock(return_value=jdata)\n mock_timestamp.return_value = (MagicMock(days=0, seconds=0), 5)\n # with patch('urllib2.urlopen') as mock_urlopen:\n with patch('urllib.request.urlopen') as mock_urlopen:\n mock_urlopen.return_value = MagicMock(read=pmock_jdata)\n result = check_replication(base_url, [4, 10, 4, 10])\n self.assertEqual(result,\n 3*[(STATUS_WARN, \"5 replication failures\")])", "def test_check_replication_crit_day_plus_lag(self, mock_timestamp):\n base_url = 'http://localhost:6000/recon/'\n jdata = b'{\"replication_last\": 1493299546.629282, ' \\\n b'\"replication_stats\": {\"no_change\": 0, \"rsync\": 0, ' \\\n b'\"success\": 0, \"failure\": 0, \"attempted\": 0, \"ts_repl\": 0, ' \\\n b'\"remove\": 0, \"remote_merge\": 0, \"diff_capped\": 0, ' \\\n b'\"start\": 1493299546.621624, \"hashmatch\": 0, \"diff\": 0, ' \\\n b'\"empty\": 0}, \"replication_time\": 0.0076580047607421875}'\n pmock_jdata = PropertyMock(return_value=jdata)\n mock_timestamp.return_value = (MagicMock(days=2, seconds=5), 0)\n with patch('urllib.request.urlopen') as mock_urlopen:\n mock_urlopen.return_value = MagicMock(read=pmock_jdata)\n result = check_replication(base_url, [4, 10, 4, 10])\n self.assertEqual(result,\n [(STATUS_CRIT,\n \"'{}' replication lag is \"\n \"172805 seconds\".format(repl))\n for repl in ('account', 'object', 'container')])", "def test_check_replication_warn_lag(self, mock_timestamp):\n base_url = 'http://localhost:6000/recon/'\n jdata = b'{\"replication_last\": 1493299546.629282, ' \\\n b'\"replication_stats\": {\"no_change\": 0, \"rsync\": 0, ' \\\n b'\"success\": 0, \"failure\": 0, \"attempted\": 0, \"ts_repl\": 0, ' \\\n b'\"remove\": 0, \"remote_merge\": 0, \"diff_capped\": 0, ' \\\n b'\"start\": 1493299546.621624, \"hashmatch\": 0, \"diff\": 0, ' \\\n b'\"empty\": 0}, \"replication_time\": 0.0076580047607421875}'\n pmock_jdata = PropertyMock(return_value=jdata)\n mock_timestamp.return_value = (MagicMock(days=0, seconds=5), 0)\n with patch('urllib.request.urlopen') as mock_urlopen:\n mock_urlopen.return_value = MagicMock(read=pmock_jdata)\n result = check_replication(base_url, [4, 10, 4, 10])\n self.assertEqual(result,\n [(STATUS_WARN,\n \"'{}' replication lag is \"\n \"5 seconds\".format(repl))\n for repl in ('account', 'object', 'container')])", "def failure(self, cb: CircuitBreaker, exc: BaseException) -> None:", "def test_timeout_with_crud_failures(self):\n\n # Local methods to validate vb_seqno\n\n def compare_vb_stat(stat_1, stat_2, vb, comparison=\"!=\"):\n keys_to_check = [\"high_seqno\", \"high_completed_seqno\"]\n result = True\n for key in keys_to_check:\n if vb in stat_1.keys():\n if stat_1[vb][\"uuid\"] != stat_2[vb][\"uuid\"]:\n self.log_failure(\"Mismatch in vb-%s UUID. %s != %s\"\n % (vb, stat_1[vb][\"uuid\"],\n stat_2[vb][\"uuid\"]))\n if comparison == \"!=\":\n if stat_1[vb][key] != stat_2[vb][key]:\n result = False\n self.log.warning(\n \"Mismatch in vb-%s stat %s. %s != %s\"\n % (vb, key, stat_1[vb][key], stat_2[vb][key]))\n elif stat_1[vb][key] == stat_2[vb][key]:\n result = False\n self.log.warning(\"Stat not updated for vb-%s stat %s. \"\n \"%s == %s\"\n % (vb, key,\n stat_1[vb][key], stat_2[vb][key]))\n return result\n\n def validate_vb_seqno_stats():\n \"\"\"\n :return retry_validation: Boolean denoting to retry validation\n \"\"\"\n retry_validation = False\n vb_info[\"post_timeout\"][node.ip] = \\\n cbstat_obj[node.ip].vbucket_seqno(self.bucket.name)\n for tem_vb_num in range(self.cluster.vbuckets):\n tem_vb_num = str(tem_vb_num)\n if tem_vb_num not in affected_vbs:\n if compare_vb_stat(vb_info[\"init\"][node.ip],\n vb_info[\"post_timeout\"][node.ip],\n tem_vb_num) is False:\n self.log_failure(\"Unaffected vb-%s stat\" % tem_vb_num)\n elif int(tem_vb_num) in target_nodes_vbuckets[\"active\"]:\n if compare_vb_stat(vb_info[\"init\"][node.ip],\n vb_info[\"post_timeout\"][node.ip],\n tem_vb_num) is False:\n self.log.warning(\"%s - mismatch in %s vb-%s seq_no\"\n % (node.ip, \"active\", tem_vb_num))\n elif int(tem_vb_num) in target_nodes_vbuckets[\"replica\"]:\n if compare_vb_stat(vb_info[\"init\"][node.ip],\n vb_info[\"post_timeout\"][node.ip],\n tem_vb_num, comparison=\"==\") is False:\n retry_validation = True\n self.log.warning(\"%s - mismatch in %s vb-%s seq_no\"\n % (node.ip, \"replica\", tem_vb_num))\n return retry_validation\n\n shell_conn = dict()\n cbstat_obj = dict()\n error_sim = dict()\n target_nodes_vbuckets = dict()\n vb_info = dict()\n tasks = dict()\n doc_gen = dict()\n affected_vbs = list()\n\n target_nodes_vbuckets[\"active\"] = []\n target_nodes_vbuckets[\"replica\"] = []\n vb_info[\"init\"] = dict()\n vb_info[\"post_timeout\"] = dict()\n vb_info[\"afterCrud\"] = dict()\n\n # Override crud_batch_size to minimum value for testing\n self.crud_batch_size = 5\n self.key = \"test_collections\"\n self.sdk_timeout = 3\n\n # Select target vbucket type to load_docs\n target_vb_type = \"replica\"\n if self.simulate_error == CouchbaseError.STOP_PERSISTENCE \\\n and self.durability_level \\\n == Bucket.DurabilityLevel.MAJORITY_AND_PERSIST_TO_ACTIVE:\n target_vb_type = \"active\"\n\n # Create required scope/collection for successful CRUD operation\n if self.scope_name != CbServer.default_scope:\n self.scope_name = self.bucket_util.get_random_name()\n self.collection_name = self.bucket_util.get_random_name()\n self.log.info(\"Creating scope::collection %s::%s\"\n % (self.scope_name, self.collection_name))\n self.create_scope_collection()\n\n # Load docs into created collection\n self.log.info(\"Loading data into created collection\")\n load_gen = doc_generator(self.key, 0, self.num_items)\n task = self.task.async_load_gen_docs(\n self.cluster, self.bucket, load_gen, \"create\", 0,\n scope=self.scope_name,\n collection=self.collection_name,\n sdk_client_pool=self.sdk_client_pool,\n batch_size=200, process_concurrency=8,\n timeout_secs=60)\n self.task_manager.get_task_result(task)\n if self.subdoc_test:\n load_gen = sub_doc_generator(self.key, 0, self.num_items/2)\n task = self.task.async_load_gen_sub_docs(\n self.cluster, self.bucket,\n load_gen, Bucket_Op.SubDocOps.INSERT,\n timeout_secs=self.sdk_timeout,\n compression=self.sdk_compression,\n path_create=True,\n batch_size=100,\n process_concurrency=8,\n durability=self.durability_level,\n scope=self.scope_name, collection=self.collection_name,\n sdk_client_pool=self.sdk_client_pool)\n self.task_manager.get_task_result(task)\n\n self.bucket.scopes[self.scope_name].collections[\n self.collection_name].num_items = self.num_items\n\n target_nodes = DurabilityHelper.getTargetNodes(self.cluster,\n self.nodes_init,\n self.num_nodes_affected)\n for node in target_nodes:\n shell_conn[node.ip] = RemoteMachineShellConnection(node)\n cbstat_obj[node.ip] = Cbstats(node)\n target_nodes_vbuckets[\"active\"] += \\\n cbstat_obj[node.ip].vbucket_list(self.bucket.name,\n vbucket_type=\"active\")\n target_nodes_vbuckets[\"replica\"] += \\\n cbstat_obj[node.ip].vbucket_list(self.bucket.name,\n vbucket_type=\"replica\")\n vb_info[\"init\"][node.ip] = cbstat_obj[node.ip].vbucket_seqno(\n self.bucket.name)\n error_sim[node.ip] = CouchbaseError(self.log, shell_conn[node.ip])\n\n curr_time = int(time.time())\n expected_timeout = curr_time + self.sdk_timeout\n\n if target_vb_type == \"active\":\n target_vbs = list(\n set(target_nodes_vbuckets[target_vb_type])\n .difference(set(target_nodes_vbuckets[\"replica\"])))\n else:\n target_vbs = list(\n set(target_nodes_vbuckets[target_vb_type])\n .difference(set(target_nodes_vbuckets[\"active\"])))\n\n # Create required doc_generators\n doc_gen[\"create\"] = doc_generator(self.key, self.num_items,\n self.crud_batch_size,\n target_vbucket=target_vbs)\n doc_gen[\"delete\"] = doc_generator(self.key, 0,\n self.crud_batch_size,\n target_vbucket=target_vbs)\n doc_gen[\"read\"] = doc_generator(\n self.key, int(self.num_items/3),\n self.crud_batch_size,\n target_vbucket=target_vbs)\n doc_gen[\"update\"] = doc_generator(\n self.key, int(self.num_items/2),\n self.crud_batch_size,\n target_vbucket=target_vbs)\n\n # Create required subdoc generators\n doc_gen[\"insert\"] = sub_doc_generator(\n self.key, int(self.num_items/2), self.crud_batch_size,\n target_vbucket=target_vbs)\n doc_gen[\"upsert\"] = sub_doc_generator_for_edit(\n self.key, 0, self.crud_batch_size,\n template_index=1,\n target_vbucket=target_vbs)\n doc_gen[\"remove\"] = sub_doc_generator(\n self.key, 0, self.crud_batch_size,\n target_vbucket=target_vbs)\n\n # Perform specified action\n for node in target_nodes:\n error_sim[node.ip].create(self.simulate_error,\n bucket_name=self.bucket.name)\n self.sleep(5, \"Wait for error_simulation to take effect\")\n\n ops_to_perform = [Bucket_Op.DocOps.CREATE, Bucket_Op.DocOps.UPDATE,\n Bucket_Op.DocOps.READ, Bucket_Op.DocOps.DELETE]\n if self.subdoc_test:\n ops_to_perform = [Bucket_Op.SubDocOps.INSERT,\n Bucket_Op.SubDocOps.UPSERT,\n Bucket_Op.SubDocOps.REMOVE]\n\n for op_type in ops_to_perform:\n self.log.info(\"Starting doc op %s\" % op_type)\n if op_type in Bucket_Op.DOC_OPS:\n tasks[op_type] = self.task.async_load_gen_docs(\n self.cluster, self.bucket, doc_gen[op_type], op_type, 0,\n scope=self.scope_name,\n collection=self.collection_name,\n sdk_client_pool=self.sdk_client_pool,\n batch_size=1, process_concurrency=8,\n durability=self.durability_level,\n timeout_secs=self.sdk_timeout,\n suppress_error_table=True,\n print_ops_rate=False,\n skip_read_on_error=True)\n else:\n tasks[op_type] = self.task.async_load_gen_sub_docs(\n self.cluster, self.bucket, doc_gen[op_type], op_type, 0,\n scope=self.scope_name,\n collection=self.collection_name,\n sdk_client_pool=self.sdk_client_pool,\n path_create=True,\n batch_size=1, process_concurrency=8,\n durability=self.durability_level,\n timeout_secs=self.sdk_timeout,\n print_ops_rate=False)\n\n self.task.jython_task_manager.get_task_result(tasks[op_type])\n\n # Validate task failures\n if op_type == Bucket_Op.DocOps.READ:\n # Validation for read task\n if len(tasks[op_type].fail.keys()) != 0:\n self.log_failure(\"Read failed for few docs: %s\"\n % tasks[op_type].fail.keys())\n else:\n # Validation of CRUDs - Update / Create / Delete\n for doc_id, crud_result in tasks[op_type].fail.items():\n vb_num = self.bucket_util.get_vbucket_num_for_key(\n doc_id, self.cluster.vbuckets)\n if SDKException.DurabilityAmbiguousException \\\n not in str(crud_result[\"error\"]):\n self.log_failure(\n \"Invalid exception for doc %s, vb %s: %s\"\n % (doc_id, vb_num, crud_result))\n\n # Revert the specified error scenario\n for node in target_nodes:\n error_sim[node.ip].revert(self.simulate_error,\n bucket_name=self.bucket.name)\n\n # Check whether the timeout triggered properly\n if int(time.time()) < expected_timeout:\n self.log_failure(\"Timed-out before expected time\")\n\n for op_type in ops_to_perform:\n if op_type == Bucket_Op.DocOps.READ:\n continue\n while doc_gen[op_type].has_next():\n doc_id, _ = doc_gen[op_type].next()\n affected_vbs.append(\n str(self.bucket_util.get_vbucket_num_for_key(\n doc_id,\n self.cluster.vbuckets)))\n\n affected_vbs = list(set(affected_vbs))\n # Fetch latest stats and validate the seq_nos are not updated\n for node in target_nodes:\n retry_count = 0\n max_retry = 3\n while retry_count < max_retry:\n self.log.info(\"Trying to validate vbseq_no stats: %d\"\n % (retry_count+1))\n retry_count += 1\n retry_required = validate_vb_seqno_stats()\n if not retry_required:\n break\n self.sleep(5, \"Sleep for vbseq_no stats to update\")\n else:\n # This will be exited only if `break` condition is not met\n self.log_failure(\"validate_vb_seqno_stats verification failed\")\n\n self.validate_test_failure()\n\n # Get SDK Client from client_pool\n sdk_client = self.sdk_client_pool.get_client_for_bucket(\n self.bucket,\n self.scope_name,\n self.collection_name)\n\n # Doc error validation\n for op_type in ops_to_perform:\n task = tasks[op_type]\n\n if self.nodes_init == 1 \\\n and op_type != Bucket_Op.DocOps.READ \\\n and len(task.fail.keys()) != (doc_gen[op_type].end\n - doc_gen[op_type].start):\n self.log_failure(\"Failed keys %d are less than expected %d\"\n % (len(task.fail.keys()),\n (doc_gen[op_type].end\n - doc_gen[op_type].start)))\n\n # Create table objects for display\n table_view = TableView(self.log.error)\n ambiguous_table_view = TableView(self.log.info)\n table_view.set_headers([\"Key\", \"vBucket\", \"Exception\"])\n ambiguous_table_view.set_headers([\"Key\", \"vBucket\"])\n\n # Iterate failed keys for validation\n for doc_key, doc_info in task.fail.items():\n vb_for_key = self.bucket_util.get_vbucket_num_for_key(doc_key)\n\n if SDKException.DurabilityAmbiguousException \\\n not in str(doc_info[\"error\"]):\n table_view.add_row([doc_key, vb_for_key,\n doc_info[\"error\"]])\n\n ambiguous_table_view.add_row([doc_key, str(vb_for_key)])\n if op_type not in Bucket_Op.SUB_DOC_OPS:\n retry_success = \\\n self.durability_helper.retry_for_ambiguous_exception(\n sdk_client, op_type, doc_key, doc_info)\n if not retry_success:\n self.log_failure(\"%s failed in retry for %s\"\n % (op_type, doc_key))\n\n # Display the tables (if any errors)\n table_view.display(\"Unexpected exception during %s\" % op_type)\n ambiguous_table_view.display(\"D_Ambiguous exception during %s\"\n % op_type)\n\n # Release the acquired client\n self.sdk_client_pool.release_client(sdk_client)\n\n # Verify doc count after expected CRUD failure\n self.bucket_util._wait_for_stats_all_buckets(self.cluster,\n self.cluster.buckets)\n self.bucket_util.validate_docs_per_collections_all_buckets(\n self.cluster)\n\n # Fetch latest stats and validate the values are updated\n for node in target_nodes:\n vb_info[\"afterCrud\"][node.ip] = \\\n cbstat_obj[node.ip].vbucket_seqno(self.bucket.name)\n if vb_info[\"init\"][node.ip] == vb_info[\"afterCrud\"][node.ip]:\n self.log_failure(\"vBucket seq_no stats not updated\")\n\n # Disconnect the shell connection\n for node in target_nodes:\n shell_conn[node.ip].disconnect()\n\n self.validate_test_failure()", "def test_http_error(self):\n self.__jenkins.contents = 'raise'\n self.assertEqual(-1, self.__jenkins.nr_warnings(('job',), 'normal'))", "def test_retry_run(self):\n pass", "def IntrumentFailHook(self):\n #Restart iserver\n #If failed to restart\n #\treturn fail\n pass", "def testTrialErrored2(self):\n stats = self.default_statistics()\n trial_count = stats[str(0)][\"n\"] + stats[str(1)][\"n\"]\n sched, mock_runner = self.schedulerSetup(trial_count)\n trials = sched._state[\"bracket\"].current_trials()\n for t in trials[:-1]:\n mock_runner._launch_trial(t)\n sched.on_trial_result(\n mock_runner, t, result(stats[str(1)][\"r\"], 10))\n\n mock_runner._launch_trial(trials[-1])\n sched.on_trial_error(mock_runner, trials[-1])\n self.assertEqual(len(sched._state[\"bracket\"].current_trials()),\n self.downscale(stats[str(1)][\"n\"], sched))", "def auditlog32errsyslogallocnsbfailrate(self) :\n\t\ttry :\n\t\t\treturn self._auditlog32errsyslogallocnsbfailrate\n\t\texcept Exception as e:\n\t\t\traise e", "def test_retry_failed(self):\n self.response.raise_for_status.side_effect = requests.HTTPError()\n\n with pytest.raises(requests.HTTPError):\n wsgi._retryable('get', 'http://some.thing')\n\n assert self.session.get.call_count == wsgi.MAX_RETRIES\n assert wsgi.MAX_RETRIES > 1", "def log(failure):\n return self._env.logger.warning('[ping] {}'.format(failure.getErrorMessage()))", "def failure_threshold(self) -> Optional[int]:\n return pulumi.get(self, \"failure_threshold\")", "def failure_threshold(self) -> Optional[int]:\n return pulumi.get(self, \"failure_threshold\")", "def check_opcounters(con, host, warning, critical,perf_data):\n warning=warning or 10000\n critical=critical or 15000\n\n data=get_server_status(con)\n err1,delta_opcounters=get_opcounters(data,'opcounters',host) \n err2,delta_opcounters_repl=get_opcounters(data,'opcountersRepl',host)\n if err1==0 and err2==0:\n delta=[(x+y) for x,y in zip(delta_opcounters ,delta_opcounters_repl) ]\n delta[0]=delta_opcounters[0]#only the time delta shouldn't be summarized\n per_minute_delta=[int(x/delta[0]*60) for x in delta[1:]]\n message=\"Test succeeded , old values missing\"\n message= \"Opcounters: total=%d,insert=%d,query=%d,update=%d,delete=%d,getmore=%d,command=%d\" % tuple(per_minute_delta)\n message+=performance_data(perf_data,([(per_minute_delta[0],\"total\",warning,critical),(per_minute_delta[1],\"insert\"),\n (per_minute_delta[2],\"query\"), (per_minute_delta[3],\"update\"),(per_minute_delta[5],\"delete\"),\n (per_minute_delta[5],\"getmore\"),(per_minute_delta[6],\"command\")]))\n return check_levels(per_minute_delta[0],warning,critical,message)\n else :\n return exit_with_general_critical(\"problem reading data from temp file\")", "def test_connectionLostBackoffDelayDoubles(self):\r\n self.pm.startService()\r\n self.pm.addProcess(\"foo\", [\"foo\"])\r\n self.reactor.advance(self.pm.threshold - 1) #9s\r\n self.assertIn(\"foo\", self.pm.protocols)\r\n self.assertEqual(self.pm.delay[\"foo\"], self.pm.minRestartDelay)\r\n # process dies within the threshold and should not restart immediately\r\n self.pm.protocols[\"foo\"].processEnded(Failure(ProcessDone(0)))\r\n self.assertEqual(self.pm.delay[\"foo\"], self.pm.minRestartDelay * 2)", "def test_concurrent_failover_timer_reset(self):\n\n services_to_fo = self.failover_order[0].split(\":\")\n self.nodes_to_fail = self.get_nodes_to_fail(services_to_fo,\n dynamic_fo_method=True)\n expected_fo_nodes = self.num_nodes_to_be_failover\n self.__update_server_obj()\n rand_node = choice(self.nodes_to_fail.keys())\n self.__update_unaffected_node()\n self.__display_failure_node_status(\"Nodes to be failed\")\n try:\n self.log.info(\"Starting auto-failover procedure\")\n failover_task = ConcurrentFailoverTask(\n task_manager=self.task_manager, master=self.orchestrator,\n servers_to_fail=self.nodes_to_fail,\n expected_fo_nodes=expected_fo_nodes,\n task_type=\"induce_failure\")\n self.task_manager.add_new_task(failover_task)\n self.sleep(int(self.timeout * 0.7),\n \"Wait before bringing back the failed nodes\")\n\n self.log.info(\"Bringing back '%s' for some time\" % rand_node.ip)\n new_timer = None\n shell = RemoteMachineShellConnection(rand_node)\n cb_err = CouchbaseError(self.log, shell)\n if self.nodes_to_fail[rand_node] == CouchbaseError.STOP_MEMCACHED:\n cb_err.revert(CouchbaseError.STOP_MEMCACHED)\n self.sleep(10, \"Wait before creating failure again\")\n cb_err.create(CouchbaseError.STOP_MEMCACHED)\n new_timer = time()\n elif self.nodes_to_fail[rand_node] == \"stop_couchbase\":\n cb_err.revert(CouchbaseError.STOP_SERVER)\n self.sleep(10, \"Wait before creating failure again\")\n cb_err.create(CouchbaseError.STOP_SERVER)\n new_timer = time()\n shell.disconnect()\n\n # Validate the previous auto-failover task failed\n # due to the random_node coming back online\n self.task_manager.get_task_result(failover_task)\n self.assertFalse(failover_task.result,\n \"Nodes failed over though nodes became active\")\n\n # Validate auto_failover_settings\n self.validate_failover_settings(True, self.timeout,\n 0, self.max_count)\n\n # Make sure the new auto-failover timing is honoured\n new_timer = new_timer + self.timeout\n while int(time()) < new_timer:\n settings = self.rest.get_autofailover_settings()\n if settings.count != 0:\n self.fail(\"Nodes failed over before new failover time\")\n\n self.sleep(10, \"Wait for failover rebalance to trigger\")\n self.rest.monitorRebalance()\n\n # Validate auto_failover_settings after actual auto failover\n self.validate_failover_settings(True, self.timeout,\n expected_fo_nodes, self.max_count)\n finally:\n # Recover all nodes from induced failures\n failover_task = ConcurrentFailoverTask(\n task_manager=self.task_manager, master=self.orchestrator,\n servers_to_fail=self.nodes_to_fail,\n expected_fo_nodes=expected_fo_nodes,\n task_type=\"revert_failure\")\n self.task_manager.add_new_task(failover_task)\n self.task_manager.get_task_result(failover_task)\n self.log.info(\"Rebalance out the failed nodes\")\n result = self.cluster_util.rebalance(self.cluster)\n self.assertTrue(result, \"Final rebalance failed\")\n\n # Perform collection crud + doc_ops after rebalance operation\n self.__perform_doc_ops()", "def inject_failure(self):\n # Inject a failure only if there's a process running\n self.BqLog(\"Starting failure injection\")\n while len(self.circQ) > 0 or (self.currentProc and self.currentProc.workLeft > 0):\n t = time_to_failure()\n self.BqLog(\"Inject the next failure after %d seconds\" % (t))\n if t == 0:\n continue\n yield self.env.timeout(t)\n if len(self.circQ) >= 0 and \\\n self.currentProc.workLeft > 0:\n # Only break the machine if it is currently computing,\n # and if current proc is not restarting\n self.BqLog(\"Injecting a failure in %s\" % (self.currentProc.name))\n self.numFailures += 1\n self.process.interrupt(cause=\"failure\")", "def _log_retry_attempt(retry_state):\n logger.warning(\n 'beat: Retrying Redis connection in %s seconds...', retry_state.next_action.sleep\n )", "def manage_critical_issues(exit_code, mip_convert_config_dir, timestamp,\n fields_to_log=None):\n if fields_to_log is None:\n fields_to_log = []\n logger = logging.getLogger(__name__)\n critical_issues_file = os.path.join(mip_convert_config_dir, 'log',\n 'critical_issues.log')\n mip_convert_log = 'mip_convert.{}.log'.format(timestamp)\n logger.debug('Searching \"{}\" for CRITICAL messages'\n ''.format(mip_convert_log))\n critical_issues_list = []\n with open(mip_convert_log) as log_file_handle:\n for line in log_file_handle.readlines():\n if 'CRITICAL' in line:\n critical_issues_list.append(line.strip())\n # Just in case an error code is raised for a separate reason;\n if not critical_issues_list:\n logger.debug('No CRITICAL messages found')\n return exit_code\n\n with open(critical_issues_file, 'a') as critical_issues_log:\n for issue in critical_issues_list:\n line = '|'.join(fields_to_log + [mip_convert_log, issue])\n critical_issues_log.write(line + '\\n')\n logger.info('Wrote \"{}\" critical issues to log file \"{}\"'.format(\n len(critical_issues_list), critical_issues_file))\n return 0", "def test_monitoring_critical_processes(duthosts, rand_one_dut_hostname, tbinfo):\n duthost = duthosts[rand_one_dut_hostname]\n loganalyzer = LogAnalyzer(ansible_host=duthost, marker_prefix=\"monitoring_critical_processes\")\n loganalyzer.expect_regex = []\n bgp_neighbors = duthost.get_bgp_neighbors()\n up_bgp_neighbors = [ k.lower() for k, v in bgp_neighbors.items() if v[\"state\"] == \"established\" ]\n\n skip_containers = []\n skip_containers.append(\"database\")\n skip_containers.append(\"gbsyncd\")\n # Skip 'radv' container on devices whose role is not T0.\n if tbinfo[\"topo\"][\"type\"] != \"t0\":\n skip_containers.append(\"radv\")\n\n containers_in_namespaces = get_containers_namespace_ids(duthost, skip_containers)\n\n expected_alerting_messages = get_expected_alerting_messages(duthost, containers_in_namespaces)\n loganalyzer.expect_regex.extend(expected_alerting_messages)\n marker = loganalyzer.init()\n\n stop_critical_processes(duthost, containers_in_namespaces)\n\n # Wait for 70 seconds such that Supervisord has a chance to write alerting message into syslog.\n logger.info(\"Sleep 70 seconds to wait for the alerting message...\")\n time.sleep(70)\n\n logger.info(\"Checking the alerting messages from syslog...\")\n loganalyzer.analyze(marker)\n logger.info(\"Found all the expected alerting messages from syslog!\")\n\n logger.info(\"Executing the config reload...\")\n config_reload(duthost)\n logger.info(\"Executing the config reload was done!\")\n\n ensure_all_critical_processes_running(duthost, containers_in_namespaces)\n\n if not postcheck_critical_processes_status(duthost, up_bgp_neighbors):\n pytest.fail(\"Post-check failed after testing the container checker!\")\n logger.info(\"Post-checking status of critical processes and BGP sessions was done!\")", "def test_retry(self):\n self.response.raise_for_status.side_effect = \\\n [requests.HTTPError(), None]\n\n wsgi._retryable('get', 'http://some.thing')\n\n assert self.session.get.call_count == 2", "def check_asserts(con, host, warning, critical,perf_data):\n warning = warning or 1\n critical = critical or 10 \n data=get_server_status(con)\n\n asserts=data['asserts']\n \n #{ \"regular\" : 0, \"warning\" : 6, \"msg\" : 0, \"user\" : 12, \"rollovers\" : 0 } \n regular=asserts['regular']\n warning_asserts=asserts['warning']\n msg=asserts['msg']\n user=asserts['user']\n rollovers=asserts['rollovers']\n\n err,delta=maintain_delta([regular,warning_asserts,msg,user,rollovers],host,\"asserts\")\n \n if err==0:\n if delta[5]!=0:\n #the number of rollovers were increased\n warning=-1 # no matter the metrics this situation should raise a warning\n # if this is normal rollover - the warning will not appear again, but if there will be a lot of asserts \n # the warning will stay for a long period of time\n # although this is not a usual situation\n \n regular_ps=delta[1]/delta[0]\n warning_ps=delta[2]/delta[0]\n msg_ps=delta[3]/delta[0]\n user_ps=delta[4]/delta[0]\n rollovers_ps=delta[5]/delta[0]\n total_ps=regular_ps+warning_ps+msg_ps+user_ps\n message = \"Total asserts : %.2f ps\" % total_ps \n message+=performance_data(perf_data,[(total_ps,\"asserts_ps\",warning,critical),(regular_ps,\"regular\"),\n (warning_ps,\"warning\"),(msg_ps,\"msg\"),(user_ps,\"user\")])\n return check_levels(total_ps,warning,critical,message)\n else:\n return exit_with_general_warning(\"problem reading data from temp file\")", "def test_connectionLostMinMaxRestartDelay(self):\r\n self.pm.minRestartDelay = 2\r\n self.pm.maxRestartDelay = 3\r\n\r\n self.pm.startService()\r\n self.pm.addProcess(\"foo\", [\"foo\"])\r\n\r\n self.assertEqual(self.pm.delay[\"foo\"], self.pm.minRestartDelay)\r\n self.reactor.advance(self.pm.threshold - 1)\r\n self.pm.protocols[\"foo\"].processEnded(Failure(ProcessDone(0)))\r\n self.assertEqual(self.pm.delay[\"foo\"], self.pm.maxRestartDelay)", "def cluster_count_failure_report(self, node_id: str) -> ResponseT:\n return self.execute_command(\"CLUSTER COUNT-FAILURE-REPORTS\", node_id)", "def inject_failure(self):\n while len(self.circQ):\n yield self.env.timeout(time_to_failure())\n if len(self.circQ) > 0 and \\\n not self.currentProc.broken and \\\n self.currentProc.workLeft > 0:\n # Only break the machine if it is currently computing,\n # and if current proc is not restarting\n # TODO: Allow errors to be thrown while restarting\n self.BqLog(\"Injecting a failure in %s\" % (self.currentProc.name))\n self.numFailures += 1\n self.process.interrupt(cause=\"failure\")", "def testTrialErrored(self):\n stats = self.default_statistics()\n trial_count = stats[str(0)][\"n\"] + 3\n sched, mock_runner = self.schedulerSetup(trial_count)\n t1, t2, t3 = sched._state[\"bracket\"].current_trials()\n for t in [t1, t2, t3]:\n mock_runner._launch_trial(t)\n\n sched.on_trial_error(mock_runner, t3)\n self.assertEqual(\n TrialScheduler.PAUSE,\n sched.on_trial_result(\n mock_runner, t1, result(stats[str(1)][\"r\"], 10)))\n self.assertEqual(\n TrialScheduler.CONTINUE,\n sched.on_trial_result(\n mock_runner, t2, result(stats[str(1)][\"r\"], 10)))" ]
[ "0.6570607", "0.6430304", "0.63666743", "0.6191292", "0.6105379", "0.60212624", "0.5926016", "0.58817255", "0.5813054", "0.55550146", "0.55077624", "0.549311", "0.54468197", "0.5378133", "0.53728884", "0.5360944", "0.5360944", "0.5347972", "0.5341417", "0.5293764", "0.5282871", "0.5279534", "0.52559024", "0.5214601", "0.51982373", "0.51921594", "0.5178193", "0.51648", "0.5144415", "0.51293105" ]
0.6924033
0
Replication lag over WARN threshold (below CRIT), STATUS_WARN
def test_check_replication_warn_lag(self, mock_timestamp): base_url = 'http://localhost:6000/recon/' jdata = b'{"replication_last": 1493299546.629282, ' \ b'"replication_stats": {"no_change": 0, "rsync": 0, ' \ b'"success": 0, "failure": 0, "attempted": 0, "ts_repl": 0, ' \ b'"remove": 0, "remote_merge": 0, "diff_capped": 0, ' \ b'"start": 1493299546.621624, "hashmatch": 0, "diff": 0, ' \ b'"empty": 0}, "replication_time": 0.0076580047607421875}' pmock_jdata = PropertyMock(return_value=jdata) mock_timestamp.return_value = (MagicMock(days=0, seconds=5), 0) with patch('urllib.request.urlopen') as mock_urlopen: mock_urlopen.return_value = MagicMock(read=pmock_jdata) result = check_replication(base_url, [4, 10, 4, 10]) self.assertEqual(result, [(STATUS_WARN, "'{}' replication lag is " "5 seconds".format(repl)) for repl in ('account', 'object', 'container')])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_check_replication_crit_lag(self, mock_timestamp):\n base_url = 'http://localhost:6000/recon/'\n jdata = b'{\"replication_last\": 1493299546.629282, ' \\\n b'\"replication_stats\": {\"no_change\": 0, \"rsync\": 0, ' \\\n b'\"success\": 0, \"failure\": 0, \"attempted\": 0, \"ts_repl\": 0, ' \\\n b'\"remove\": 0, \"remote_merge\": 0, \"diff_capped\": 0, ' \\\n b'\"start\": 1493299546.621624, \"hashmatch\": 0, \"diff\": 0, ' \\\n b'\"empty\": 0}, \"replication_time\": 0.0076580047607421875}'\n pmock_jdata = PropertyMock(return_value=jdata)\n mock_timestamp.return_value = (MagicMock(days=0, seconds=12), 0)\n with patch('urllib.request.urlopen') as mock_urlopen:\n mock_urlopen.return_value = MagicMock(read=pmock_jdata)\n result = check_replication(base_url, [4, 10, 4, 10])\n self.assertEqual(result,\n [(STATUS_CRIT,\n \"'{}' replication lag is \"\n \"12 seconds\".format(repl))\n for repl in ('account', 'object', 'container')])", "def test_check_replication_crit_lag_notworking(self, mock_timestamp):\n base_url = 'http://localhost:6000/recon/'\n jdata = b'{\"replication_last\": 1493299546.629282, ' \\\n b'\"replication_stats\": {\"no_change\": 0, \"rsync\": 0, ' \\\n b'\"success\": 0, \"failure\": 0, \"attempted\": 0, \"ts_repl\": 0, ' \\\n b'\"remove\": 0, \"remote_merge\": 0, \"diff_capped\": 0, ' \\\n b'\"start\": 1493299546.621624, \"hashmatch\": 0, \"diff\": 0, ' \\\n b'\"empty\": 0}, \"replication_time\": 0.0076580047607421875}'\n pmock_jdata = PropertyMock(return_value=jdata)\n mock_timestamp.return_value = (None, 0)\n with patch('urllib.request.urlopen') as mock_urlopen:\n mock_urlopen.return_value = MagicMock(read=pmock_jdata)\n result = check_replication(base_url, [4, 10, 4, 10])\n self.assertEqual(result,\n [(STATUS_CRIT,\n \"'{}' replication lag not working \"\n \"(perms issue? check syslog)\".format(repl))\n for repl in ('account', 'object', 'container')])", "def test_check_replication_warn_failures(self, mock_timestamp):\n base_url = 'http://localhost:6000/recon/'\n jdata = b'{\"replication_last\": 1493299546.629282, ' \\\n b'\"replication_stats\": {\"no_change\": 0, \"rsync\": 0, ' \\\n b'\"success\": 0, \"failure\": 0, \"attempted\": 0, \"ts_repl\": 0, ' \\\n b'\"remove\": 0, \"remote_merge\": 0, \"diff_capped\": 0, ' \\\n b'\"start\": 1493299546.621624, \"hashmatch\": 0, \"diff\": 0, ' \\\n b'\"empty\": 0}, \"replication_time\": 0.0076580047607421875}'\n pmock_jdata = PropertyMock(return_value=jdata)\n mock_timestamp.return_value = (MagicMock(days=0, seconds=0), 5)\n # with patch('urllib2.urlopen') as mock_urlopen:\n with patch('urllib.request.urlopen') as mock_urlopen:\n mock_urlopen.return_value = MagicMock(read=pmock_jdata)\n result = check_replication(base_url, [4, 10, 4, 10])\n self.assertEqual(result,\n 3*[(STATUS_WARN, \"5 replication failures\")])", "def test_check_replication_crit_day_plus_lag(self, mock_timestamp):\n base_url = 'http://localhost:6000/recon/'\n jdata = b'{\"replication_last\": 1493299546.629282, ' \\\n b'\"replication_stats\": {\"no_change\": 0, \"rsync\": 0, ' \\\n b'\"success\": 0, \"failure\": 0, \"attempted\": 0, \"ts_repl\": 0, ' \\\n b'\"remove\": 0, \"remote_merge\": 0, \"diff_capped\": 0, ' \\\n b'\"start\": 1493299546.621624, \"hashmatch\": 0, \"diff\": 0, ' \\\n b'\"empty\": 0}, \"replication_time\": 0.0076580047607421875}'\n pmock_jdata = PropertyMock(return_value=jdata)\n mock_timestamp.return_value = (MagicMock(days=2, seconds=5), 0)\n with patch('urllib.request.urlopen') as mock_urlopen:\n mock_urlopen.return_value = MagicMock(read=pmock_jdata)\n result = check_replication(base_url, [4, 10, 4, 10])\n self.assertEqual(result,\n [(STATUS_CRIT,\n \"'{}' replication lag is \"\n \"172805 seconds\".format(repl))\n for repl in ('account', 'object', 'container')])", "def _replica_links_lag(self):\n for f in self.fb.get_filesystem_replica_links():\n self.replica_links_lag.add_metric([f.local_file_system.name,\n f.direction,\n f.remote.name,\n f.remote_file_system.name,\n f.status], -1 if f.lag is None else f.lag)", "def test_lbheartbeat(self):\n pass", "def fix_replication_from_slow_query_log_after_upgrade():\n run_mysql_command(\"STOP SLAVE;\")\n run_mysql_command(\"SET GLOBAL slow_query_log = 'OFF';\")\n run_mysql_command(\"START SLAVE;\")\n run_mysql_command(\"SET GLOBAL slow_query_log = 'ON';\")\n run_mysql_command(\"show slave status\\G;\")", "def warning(self, msg, *args):\n if self.lvl<=logging.WARNING: return self._log(msg, *args)", "def warningglobal(self, *args, **kwargs):\n return self.logger.log(logging.WARNING+1, *args, **kwargs)", "def warnings_active(self) -> List[Error]:", "def test_connectionLostBackoffDelayDoubles(self):\r\n self.pm.startService()\r\n self.pm.addProcess(\"foo\", [\"foo\"])\r\n self.reactor.advance(self.pm.threshold - 1) #9s\r\n self.assertIn(\"foo\", self.pm.protocols)\r\n self.assertEqual(self.pm.delay[\"foo\"], self.pm.minRestartDelay)\r\n # process dies within the threshold and should not restart immediately\r\n self.pm.protocols[\"foo\"].processEnded(Failure(ProcessDone(0)))\r\n self.assertEqual(self.pm.delay[\"foo\"], self.pm.minRestartDelay * 2)", "def _log_err_msg(self, message):\n current_time = time.time()\n if current_time - self._last_warning_time > 600:\n logging.warning(message)\n self._last_warning_time = current_time", "def log_warn(self, msg):\n self.log(msg, level=LOG_WARN)", "def log_reconnect(self):\n pass", "def mmo_replication_status_summary(self, mmo_connection):\n replication_summary = []\n primary_info = {}\n o = self.mmo_replication_status(mmo_connection)\n o = o + self.mmo_configsrv_replication_status(mmo_connection)\n replset_hosts_up_down = {}\n for shard in self.shards:\n replset_hosts_up_down[shard] = 0\n for replicaset in o:\n if \"Error\" not in replicaset[\"command_output\"].keys():\n for member in replicaset[\"command_output\"][\"members\"]:\n if member[\"stateStr\"] == \"PRIMARY\":\n primary_info[replicaset[\"command_output\"][\"set\"]] = member[\"optimeDate\"]\n\n replication_summary.append( { \"replicaset\": replicaset[\"command_output\"][\"set\"],\n \"hostname\": member[\"name\"],\n \"state\": member[\"stateStr\"],\n \"uptime\": member[\"uptime\"],\n \"configVersion\": member[\"configVersion\"],\n \"optimeDate\": member[\"optimeDate\"] } )\n for doc in replication_summary:\n if doc[\"state\"] == \"PRIMARY\":\n doc[\"lag\"] = \"NA\" # not relevant here\n else: # calculate the slave lag from the PRIMARY optimeDate\n if doc[\"replicaset\"] in primary_info.keys(): # is there a primary in the replset?\n try:\n if hasattr((doc[\"optimeDate\"] - primary_info[doc[\"replicaset\"]]), \"total_seconds\"): # Does not exist in python 2.6\n doc[\"lag\"] = abs((doc[\"optimeDate\"] - primary_info[doc[\"replicaset\"]]).total_seconds())\n else: # for python 2.6 that does not have total_seconds attribute\n # Will only be correct for delays of up to 24 hours\n doc[\"lag\"] = abs((doc[\"optimeDate\"] - primary_info[doc[\"replicaset\"]]).seconds) # Primary needs ot be first in this case\n except:\n doc[\"lag\"] = \"ERR\"\n else:\n doc[\"lag\"] = \"UNK\" # We cannot know what the delay is if there is no primary\n else:\n replset_hosts_up_down[replicaset[\"shard\"]] += 1\n\n #else: Probably redundant code now. Removed ot fix https://github.com/rhysmeister/mmo/issues/34\n # We cannot know the state of much of the replicaset at this point\n # replication_summary.append({\"replicaset\": replicaset[\"shard\"],\n # \"hostname\": \"UNK\",\n # \"state\": \"UNK\",\n # \"uptime\": \"UNK\",\n # \"configVersion\": \"UNK\",\n # \"optimeDate\": \"UNK\"})\n\n\n shard_server_count = {}\n # how many servers in each shard\n for shard in self.shards:\n shard_server_count[shard] = 0\n for s in self.shard_servers:\n shard_server_count[s['shard']] += 1\n # are all the hosts of any shard down?\n for shard in self.shards:\n if replset_hosts_up_down[shard] > 0:\n if replset_hosts_up_down[shard] == shard_server_count[shard]:\n replication_summary.append({\"replicaset\": shard,\n \"hostname\": \"UNK\",\n \"state\": \"UNK\",\n \"uptime\": \"UNK\",\n \"configVersion\": \"UNK\",\n \"optimeDate\": \"UNK\",\n \"lag\": \"UNK\"})\n deduped_replication_summary = []\n for d in replication_summary:\n if d not in deduped_replication_summary:\n deduped_replication_summary.append(d)\n return deduped_replication_summary", "def warning(self, tag, message, exc_info=False):\n \n self.log(logging.warning,tag, message, exc_info)", "def _report_consumer_offsets_and_lag(self, contexts_limit):\n reported_contexts = 0\n self.log.debug(\"Reporting consumer offsets and lag metrics\")\n for (consumer_group, topic, partition), consumer_offset in self._consumer_offsets.items():\n if reported_contexts >= contexts_limit:\n self.log.debug(\n \"Reported contexts number %s greater than or equal to contexts limit of %s, returning\",\n str(reported_contexts),\n str(contexts_limit),\n )\n return\n consumer_group_tags = ['topic:%s' % topic, 'partition:%s' % partition, 'consumer_group:%s' % consumer_group]\n consumer_group_tags.extend(self._custom_tags)\n\n partitions = self.kafka_client._client.cluster.partitions_for_topic(topic)\n self.log.debug(\"Received partitions %s for topic %s\", partitions, topic)\n if partitions is not None and partition in partitions:\n # report consumer offset if the partition is valid because even if leaderless the consumer offset will\n # be valid once the leader failover completes\n self.gauge('consumer_offset', consumer_offset, tags=consumer_group_tags)\n reported_contexts += 1\n\n if (topic, partition) not in self._highwater_offsets:\n self.log.warning(\n \"Consumer group: %s has offsets for topic: %s partition: %s, but no stored highwater offset \"\n \"(likely the partition is in the middle of leader failover) so cannot calculate consumer lag.\",\n consumer_group,\n topic,\n partition,\n )\n continue\n producer_offset = self._highwater_offsets[(topic, partition)]\n consumer_lag = producer_offset - consumer_offset\n if reported_contexts < contexts_limit:\n self.gauge('consumer_lag', consumer_lag, tags=consumer_group_tags)\n reported_contexts += 1\n\n if consumer_lag < 0:\n # this will effectively result in data loss, so emit an event for max visibility\n title = \"Negative consumer lag for group: {}.\".format(consumer_group)\n message = (\n \"Consumer group: {}, topic: {}, partition: {} has negative consumer lag. This should never \"\n \"happen and will result in the consumer skipping new messages until the lag turns \"\n \"positive.\".format(consumer_group, topic, partition)\n )\n key = \"{}:{}:{}\".format(consumer_group, topic, partition)\n self.send_event(title, message, consumer_group_tags, 'consumer_lag', key, severity=\"error\")\n self.log.debug(message)\n\n if reported_contexts >= contexts_limit:\n continue\n if not self._data_streams_enabled:\n continue\n timestamps = self._broker_timestamps[\"{}_{}\".format(topic, partition)]\n # The producer timestamp can be not set if there was an error fetching broker offsets.\n producer_timestamp = timestamps.get(producer_offset, None)\n consumer_timestamp = self._get_interpolated_timestamp(timestamps, consumer_offset)\n if consumer_timestamp is None or producer_timestamp is None:\n continue\n lag = producer_timestamp - consumer_timestamp\n self.gauge('consumer_lag_seconds', lag, tags=consumer_group_tags)\n reported_contexts += 1\n else:\n if partitions is None:\n msg = (\n \"Consumer group: %s has offsets for topic: %s, partition: %s, but that topic has no partitions \"\n \"in the cluster, so skipping reporting these offsets.\"\n )\n else:\n msg = (\n \"Consumer group: %s has offsets for topic: %s, partition: %s, but that topic partition isn't \"\n \"included in the cluster partitions, so skipping reporting these offsets.\"\n )\n self.log.warning(msg, consumer_group, topic, partition)\n self.kafka_client._client.cluster.request_update() # force metadata update on next poll()", "def test_warning(self):\n self.p.compute_termination_criteria = True\n self.set_parameter_and_step(\"max_iter\", True, 5, \"ignore\")", "def test_lake():\n dwd = DwdWeatherWarningsAPI(WARNCELL_ID_LAKE)\n assert dwd.data_valid\n assert dwd.warncell_id == WARNCELL_ID_LAKE\n assert dwd.warncell_name == WARNCELL_NAME_LAKE\n start_time = datetime.datetime.now(\n datetime.timezone.utc\n ) - datetime.timedelta(0, TIME_TOLERANCE)\n stop_time = start_time + datetime.timedelta(0, (2 * TIME_TOLERANCE))\n assert start_time < dwd.last_update < stop_time\n assert MIN_WARNING_LEVEL <= dwd.current_warning_level <= MAX_WARNING_LEVEL\n assert MIN_WARNING_LEVEL <= dwd.expected_warning_level <= MAX_WARNING_LEVEL\n assert isinstance(dwd.current_warnings, list)\n assert isinstance(dwd.expected_warnings, list)", "def create_lb_unhealthy_alarm ( cloudwatch_conn,\n base_name,\n lb_name,\n min_healthy_hosts,\n topic_arn,\n threshold = 5) :\n alarm = boto.ec2.cloudwatch.MetricAlarm( name = base_name + '-' + lb_name + '-UNHEALTHY-Alarm',\n description = 'Alarm for when ' + lb_name + ' does not have enough healthy hosts',\n metric = 'HealthyHostCount',\n namespace = 'AWS/ELB',\n statistic = 'Average',\n comparison = '<',\n threshold = min_healthy_hosts,\n period = 60,\n evaluation_periods = threshold,\n dimensions = { 'LoadBalancerName': lb_name },\n alarm_actions = topic_arn )\n cloudwatch_conn.create_alarm( alarm )\n return alarm", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def check_opcounters(con, host, warning, critical,perf_data):\n warning=warning or 10000\n critical=critical or 15000\n\n data=get_server_status(con)\n err1,delta_opcounters=get_opcounters(data,'opcounters',host) \n err2,delta_opcounters_repl=get_opcounters(data,'opcountersRepl',host)\n if err1==0 and err2==0:\n delta=[(x+y) for x,y in zip(delta_opcounters ,delta_opcounters_repl) ]\n delta[0]=delta_opcounters[0]#only the time delta shouldn't be summarized\n per_minute_delta=[int(x/delta[0]*60) for x in delta[1:]]\n message=\"Test succeeded , old values missing\"\n message= \"Opcounters: total=%d,insert=%d,query=%d,update=%d,delete=%d,getmore=%d,command=%d\" % tuple(per_minute_delta)\n message+=performance_data(perf_data,([(per_minute_delta[0],\"total\",warning,critical),(per_minute_delta[1],\"insert\"),\n (per_minute_delta[2],\"query\"), (per_minute_delta[3],\"update\"),(per_minute_delta[5],\"delete\"),\n (per_minute_delta[5],\"getmore\"),(per_minute_delta[6],\"command\")]))\n return check_levels(per_minute_delta[0],warning,critical,message)\n else :\n return exit_with_general_critical(\"problem reading data from temp file\")", "def auditlog32errsyslogallocnsbfailrate(self) :\n\t\ttry :\n\t\t\treturn self._auditlog32errsyslogallocnsbfailrate\n\t\texcept Exception as e:\n\t\t\traise e" ]
[ "0.596336", "0.58672875", "0.5598584", "0.5585561", "0.55175346", "0.5379869", "0.53797185", "0.5345961", "0.53422654", "0.53330433", "0.5331419", "0.5326337", "0.5317457", "0.5310167", "0.5303663", "0.5232191", "0.52119166", "0.5203335", "0.52027446", "0.5175046", "0.516937", "0.516937", "0.516937", "0.516937", "0.516937", "0.516937", "0.516937", "0.516937", "0.5124899", "0.51189303" ]
0.6612414
0
Replication lag CRITS with day wrap, STATUS_CRIT
def test_check_replication_crit_day_plus_lag(self, mock_timestamp): base_url = 'http://localhost:6000/recon/' jdata = b'{"replication_last": 1493299546.629282, ' \ b'"replication_stats": {"no_change": 0, "rsync": 0, ' \ b'"success": 0, "failure": 0, "attempted": 0, "ts_repl": 0, ' \ b'"remove": 0, "remote_merge": 0, "diff_capped": 0, ' \ b'"start": 1493299546.621624, "hashmatch": 0, "diff": 0, ' \ b'"empty": 0}, "replication_time": 0.0076580047607421875}' pmock_jdata = PropertyMock(return_value=jdata) mock_timestamp.return_value = (MagicMock(days=2, seconds=5), 0) with patch('urllib.request.urlopen') as mock_urlopen: mock_urlopen.return_value = MagicMock(read=pmock_jdata) result = check_replication(base_url, [4, 10, 4, 10]) self.assertEqual(result, [(STATUS_CRIT, "'{}' replication lag is " "172805 seconds".format(repl)) for repl in ('account', 'object', 'container')])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_check_replication_crit_lag(self, mock_timestamp):\n base_url = 'http://localhost:6000/recon/'\n jdata = b'{\"replication_last\": 1493299546.629282, ' \\\n b'\"replication_stats\": {\"no_change\": 0, \"rsync\": 0, ' \\\n b'\"success\": 0, \"failure\": 0, \"attempted\": 0, \"ts_repl\": 0, ' \\\n b'\"remove\": 0, \"remote_merge\": 0, \"diff_capped\": 0, ' \\\n b'\"start\": 1493299546.621624, \"hashmatch\": 0, \"diff\": 0, ' \\\n b'\"empty\": 0}, \"replication_time\": 0.0076580047607421875}'\n pmock_jdata = PropertyMock(return_value=jdata)\n mock_timestamp.return_value = (MagicMock(days=0, seconds=12), 0)\n with patch('urllib.request.urlopen') as mock_urlopen:\n mock_urlopen.return_value = MagicMock(read=pmock_jdata)\n result = check_replication(base_url, [4, 10, 4, 10])\n self.assertEqual(result,\n [(STATUS_CRIT,\n \"'{}' replication lag is \"\n \"12 seconds\".format(repl))\n for repl in ('account', 'object', 'container')])", "def maintenance_cut(df):\n full_maintenance_duration = (\n (df.maintenance_duration + df.maintenance_cycle)/3600\n )\n remainder = df.timestamp % full_maintenance_duration\n \n df['maintenance_cut'] = (\n remainder > (df.maintenance_duration/3600)\n )\n \n return None", "def test_check_replication_crit_lag_notworking(self, mock_timestamp):\n base_url = 'http://localhost:6000/recon/'\n jdata = b'{\"replication_last\": 1493299546.629282, ' \\\n b'\"replication_stats\": {\"no_change\": 0, \"rsync\": 0, ' \\\n b'\"success\": 0, \"failure\": 0, \"attempted\": 0, \"ts_repl\": 0, ' \\\n b'\"remove\": 0, \"remote_merge\": 0, \"diff_capped\": 0, ' \\\n b'\"start\": 1493299546.621624, \"hashmatch\": 0, \"diff\": 0, ' \\\n b'\"empty\": 0}, \"replication_time\": 0.0076580047607421875}'\n pmock_jdata = PropertyMock(return_value=jdata)\n mock_timestamp.return_value = (None, 0)\n with patch('urllib.request.urlopen') as mock_urlopen:\n mock_urlopen.return_value = MagicMock(read=pmock_jdata)\n result = check_replication(base_url, [4, 10, 4, 10])\n self.assertEqual(result,\n [(STATUS_CRIT,\n \"'{}' replication lag not working \"\n \"(perms issue? check syslog)\".format(repl))\n for repl in ('account', 'object', 'container')])", "def lag(self):\n self._assert_counted_at_lag()\n return self._lag", "def test_check_replication_warn_lag(self, mock_timestamp):\n base_url = 'http://localhost:6000/recon/'\n jdata = b'{\"replication_last\": 1493299546.629282, ' \\\n b'\"replication_stats\": {\"no_change\": 0, \"rsync\": 0, ' \\\n b'\"success\": 0, \"failure\": 0, \"attempted\": 0, \"ts_repl\": 0, ' \\\n b'\"remove\": 0, \"remote_merge\": 0, \"diff_capped\": 0, ' \\\n b'\"start\": 1493299546.621624, \"hashmatch\": 0, \"diff\": 0, ' \\\n b'\"empty\": 0}, \"replication_time\": 0.0076580047607421875}'\n pmock_jdata = PropertyMock(return_value=jdata)\n mock_timestamp.return_value = (MagicMock(days=0, seconds=5), 0)\n with patch('urllib.request.urlopen') as mock_urlopen:\n mock_urlopen.return_value = MagicMock(read=pmock_jdata)\n result = check_replication(base_url, [4, 10, 4, 10])\n self.assertEqual(result,\n [(STATUS_WARN,\n \"'{}' replication lag is \"\n \"5 seconds\".format(repl))\n for repl in ('account', 'object', 'container')])", "def doit(gts, hr):\n sts = gts - datetime.timedelta(hours=hr)\n times = [gts]\n if hr > 24:\n times.append(gts - datetime.timedelta(hours=24))\n if hr == 72:\n times.append(gts - datetime.timedelta(hours=48))\n metadata = {'start_valid': sts.strftime(\"%Y-%m-%dT%H:%M:%SZ\"),\n 'end_valid': gts.strftime(\"%Y-%m-%dT%H:%M:%SZ\")}\n # Create the image data\n # imgdata = np.zeros( (szy, szx), 'u1')\n # timestep = np.zeros( (szy, szx), 'f')\n total = None\n for now in times:\n gribfn = now.strftime((\"/mnt/a4/data/%Y/%m/%d/mrms/ncep/\"\n \"RadarOnly_QPE_24H/\"\n \"RadarOnly_QPE_24H_00.00_%Y%m%d-%H%M00\"\n \".grib2.gz\"))\n if not os.path.isfile(gribfn):\n print(\"mrms_raster_pXXh.py MISSING %s\" % (gribfn,))\n return\n fp = gzip.GzipFile(gribfn, 'rb')\n (tmpfp, tmpfn) = tempfile.mkstemp()\n tmpfp = open(tmpfn, 'wb')\n tmpfp.write(fp.read())\n tmpfp.close()\n grbs = pygrib.open(tmpfn)\n grb = grbs[1]\n os.unlink(tmpfn)\n\n # careful here, how we deal with the two missing values!\n if total is None:\n total = grb['values']\n else:\n maxgrid = np.maximum(grb['values'], total)\n total = np.where(np.logical_and(grb['values'] >= 0, total >= 0),\n grb['values'] + total, maxgrid)\n\n \"\"\"\n 255 levels... wanna do 0 to 20 inches\n index 255 is missing, index 0 is 0\n 0-1 -> 100 - 0.01 res || 0 - 25 -> 100 - 0.25 mm 0\n 1-5 -> 80 - 0.05 res || 25 - 125 -> 80 - 1.25 mm 100\n 5-20 -> 75 - 0.20 res || 125 - 500 -> 75 - 5 mm 180\n \"\"\"\n # total = np.flipud(total)\n # Off scale gets index 254\n imgdata = convert_to_image(total)\n\n (tmpfp, tmpfn) = tempfile.mkstemp()\n # Create Image\n png = Image.fromarray(imgdata.astype('u1'))\n png.putpalette(mrms.make_colorramp())\n png.save('%s.png' % (tmpfn,))\n # os.system(\"xv %s.png\" % (tmpfn,))\n # Now we need to generate the world file\n mrms.write_worldfile('%s.wld' % (tmpfn,))\n # Inject WLD file\n pqstr = (\"/home/ldm/bin/pqinsert -i -p 'plot ac %s \"\n \"gis/images/4326/mrms/p%sh.wld GIS/mrms/p%sh_%s.wld wld' %s.wld\"\n \"\") % (gts.strftime(\"%Y%m%d%H%M\"), hr, hr,\n gts.strftime(\"%Y%m%d%H%M\"), tmpfn)\n subprocess.call(pqstr, shell=True)\n\n # Now we inject into LDM\n pqstr = (\"/home/ldm/bin/pqinsert -i -p 'plot ac %s \"\n \"gis/images/4326/mrms/p%sh.png GIS/mrms/p%sh_%s.png png' %s.png\"\n \"\") % (gts.strftime(\"%Y%m%d%H%M\"), hr, hr,\n gts.strftime(\"%Y%m%d%H%M\"), tmpfn)\n subprocess.call(pqstr, shell=True)\n\n # Create 900913 image\n cmd = (\"gdalwarp -s_srs EPSG:4326 -t_srs EPSG:3857 -q -of GTiff \"\n \"-tr 1000.0 1000.0 %s.png %s.tif\") % (tmpfn, tmpfn)\n subprocess.call(cmd, shell=True)\n\n # Insert into LDM\n pqstr = (\"/home/ldm/bin/pqinsert -i -p 'plot c %s \"\n \"gis/images/900913/mrms/p%sh.tif GIS/mrms/p%sh_%s.tif tif' %s.tif\"\n \"\") % (gts.strftime(\"%Y%m%d%H%M\"), hr, hr,\n gts.strftime(\"%Y%m%d%H%M\"), tmpfn)\n subprocess.call(pqstr, shell=True)\n\n j = open(\"%s.json\" % (tmpfn,), 'w')\n j.write(json.dumps(dict(meta=metadata)))\n j.close()\n\n # Insert into LDM\n pqstr = (\"/home/ldm/bin/pqinsert -i -p 'plot c %s \"\n \"gis/images/4326/mrms/p%sh.json GIS/mrms/p%sh_%s.json json'\"\n \" %s.json\") % (gts.strftime(\"%Y%m%d%H%M\"), hr, hr,\n gts.strftime(\"%Y%m%d%H%M\"), tmpfn)\n subprocess.call(pqstr, shell=True)\n for suffix in ['tif', 'json', 'png', 'wld']:\n os.unlink('%s.%s' % (tmpfn, suffix))\n os.close(tmpfp)\n os.unlink(tmpfn)", "def test_calculate_crow_bounds_cum_failure_rate_type1(self):\n\n _bounds = calculate_crow_bounds(22, 620.0, 0.4239, 0.6142, 0.9, 3, 1)\n self.assertAlmostEqual(_bounds[0], 0.02402216)\n self.assertAlmostEqual(_bounds[1], 0.05255707)", "def lag(s, cur, t):\n if LAG == 0:\n return cur\n else:\n if MODE == 1:\n num = int(WIDTH*LAGSTR/DS)\n elif MODE == 2:\n num = int(WIDTH*LAGSTR/np.mean(np.diff(s)))\n if np.mod(t, LPRINT) == 0:\n print('+> Adding phase lag to local curvature...', end='')\n cur = compute_lag(cur, num)\n if np.mod(t, LPRINT) == 0:\n print(' [done]')\n return cur", "def test_calculate_crow_bounds_cum_failure_rate_type2(self):\n\n _bounds = calculate_crow_bounds(22, 620.0, 0.4239, 0.6142, 0.9, 3, 2)\n self.assertAlmostEqual(_bounds[0], 0.02402216)\n self.assertAlmostEqual(_bounds[1], 0.04877491)", "def cyclic_merit_lag(x,*args):\n CS = args[0]\n print \"rindex\",CS.rindex\n ht = get_ht(x,CS.rindex)\n hf = time2freq(ht)\n CS.hf = hf\n CS.ht = ht\n cs_model,csplus,csminus,phases = make_model_cs(hf,CS.s0,CS.bw,CS.ref_freq)\n merit = 2*(np.abs(cs_model[:,1:] - CS.cs[:,1:])**2).sum() #ignore zeroth harmonic (dc term)\n \n # the objval list keeps track of how the convergence is going\n CS.objval.append(merit)\n \n #gradient_lag\n diff = cs_model - CS.cs #model - data\n cc1 = cs2cc(diff * csminus)\n \n# original c code for reference:\n# for (ilag=0; ilag<cc1.nlag; ilag++) {\n# gradient->data[ilag] = 0.0 + I * 0.0;\n# int lag = (ilag<=cc1.nlag/2) ? ilag : ilag-cc1.nlag;\n# tau = (double)lag * (double)cs->nchan /\n# ( (double)cc1.nlag * cc1.bw*1.e6 );\n# for (ih=1; ih<cc1.nharm; ih++) {\n# phs = M_PI * tau * (double)ih * cc1.ref_freq;\n# phasor = cos(phs)+I*sin(phs);\n# fftwf_complex *ccval = get_cc(&cc1,ih,ip,ilag);\n# gradient->data[ilag] += 4.0 * (*ccval) * phasor\n# * conj(s0->data[ih]) / (float)cs->nchan;\n# }\n# }\n\n #we reuse phases and csminus, csplus from the make_model_cs call\n\n phasors = np.exp(1j*phases)\n cs0 = np.repeat(CS.s0[np.newaxis,:],CS.nlag,axis=0) #filter2cs\n grad = 4.0 * cc1 * phasors * np.conj(cs0) / CS.nchan\n grad = grad[:,1:].sum(1) # sum over all harmonics to get function of lag\n \n #conjugate(res)\n #calc positive shear\n #multiply\n #cs2cc\n cc2 = cs2cc(np.conj(diff) * csplus)\n grad2 = 4.0 * cc2 * np.conj(phasors) * cs0 / CS.nchan\n \n grad = grad + grad2[:,1:].sum(1)\n CS.grad = grad[:]\n CS.model = cs_model[:]\n\n if CS.iprint:\n print \"merit= %.7e grad= %.7e\" % (merit,(np.abs(grad)**2).sum())\n \n if CS.make_plots:\n if CS.niter % CS.plot_every == 0:\n CS.plotCurrentSolution()\n \n \n \n grad = get_params(grad, CS.rindex)\n CS.niter += 1\n \n return merit,grad", "def calc_loss_flux(self, shotANDplunge=\"167192.1\"):\n\n wb = xl.load_workbook(\"recLPdata.xlsx\", data_only=True)\n dataSheet = wb.get_sheet_by_name(\"Sheet1\")\n\n # Get the correct cells.\n if shotANDplunge == \"167192.1\":\n timeLow = \"A3\"\n timeHigh = \"A64\"\n densLow = \"C3\"\n densHigh = \"C64\"\n tempLow = \"D3\"\n tempHigh = \"D64\"\n rMinRsepLow = \"G3\"\n rMinRsepHigh = \"G64\"\n elif shotANDplunge == \"167192.2\":\n timeLow = \"I3\"\n timeHigh = \"I55\"\n densLow = \"K3\"\n densHigh = \"K55\"\n tempLow = \"L3\"\n tempHigh = \"L55\"\n rMinRsepLow = \"O3\"\n rMinRsepHigh = \"O55\"\n elif shotANDplunge == \"167193.1\":\n timeLow = \"Q3\"\n timeHigh = \"Q61\"\n densLow = \"S3\"\n densHigh = \"S61\"\n tempLow = \"T3\"\n tempHigh = \"T61\"\n rMinRsepLow = \"W3\"\n rMinRsepHigh = \"W61\"\n elif shotANDplunge == \"167193.2\":\n timeLow = \"Y3\"\n timeHigh = \"Y48\"\n densLow = \"AA3\"\n densHigh = \"AA48\"\n tempLow = \"AB3\"\n tempHigh = \"AB48\"\n rMinRsepLow = \"AE3\"\n rMinRsepHigh = \"AE48\"\n elif shotANDplunge == \"167194.1\":\n timeLow = \"AG3\"\n timeHigh = \"AG71\"\n densLow = \"AI3\"\n densHigh = \"AI71\"\n tempLow = \"AJ3\"\n tempHigh = \"AJ71\"\n rMinRsepLow = \"AM3\"\n rMinRsepHigh = \"AM71\"\n elif shotANDplunge == \"167194.2\":\n timeLow = \"AO3\"\n timeHigh = \"AO67\"\n densLow = \"AQ3\"\n densHigh = \"AQ67\"\n tempLow = \"AR3\"\n tempHigh = \"AR67\"\n rMinRsepLow = \"AU3\"\n rMinRsepHigh = \"AU67\"\n elif shotANDplunge == \"167195.1\":\n timeLow = \"AW3\"\n timeHigh = \"AW60\"\n densLow = \"AY3\"\n densHigh = \"AY60\"\n tempLow = \"AZ3\"\n tempHigh = \"AZ60\"\n rMinRsepLow = \"BC3\"\n rMinRsepHigh = \"BC60\"\n elif shotANDplunge == \"167195.2\":\n timeLow = \"BE3\"\n timeHigh = \"BE59\"\n densLow = \"BG3\"\n densHigh = \"BG59\"\n tempLow = \"BH3\"\n tempHigh = \"BH59\"\n rMinRsepLow = \"BK3\"\n rMinRsepHigh = \"BK59\"\n else:\n return print(\"Incorrect shot/plunge.\")\n\n times = self.returnArray(dataSheet, timeLow, timeHigh)\n dens = self.returnArray(dataSheet, densLow, densHigh)\n temps = self.returnArray(dataSheet, tempLow, tempHigh)\n rmins = self.returnArray(dataSheet, rMinRsepLow, rMinRsepHigh)\n\n # Go from 10^18 m^-3 to just m^-3.\n for index in range(0, len(dens)):\n if dens[index] is None:\n continue\n else:\n dens[index] = dens[index] * 10**18\n\n # Plasma sound speed assuming Te = Ti.\n sound_speeds = [(temp*2 / massD)**0.5 for temp in temps]\n\n self.shot_and_plunge = shotANDplunge\n self.times = times\n self.dens = dens\n self.temps = temps\n self.rmins = rmins\n self.sound_speeds = sound_speeds\n\n # The flux of W off the probe due to sputtering. sputt_flux = yield * flux of dueterium.\n def sputt_flux(ne, Ti, Te):\n # Sputtering energy threshold of tungsten oxide in eV. Note pure W is 160 eV.\n eThresh = 65\n soundSpeed = ((float(Te) + float(Ti)) / massD)**0.5\n\n # Use lambda function for use in integrate,\n func = lambda E: 0.528 * alpha * Z_D * (massD / (u0*(massD + massW))) * 0.059 * (E+3*Ti) ** (1.0/3.0) * soundSpeed * ne * 2 * (E/3.1415)**0.5 * (1/float(Ti))**(1.5) * math.exp(-E/Ti)\n ans, err = integrate.quad(func, eThresh, np.inf)\n\n #print(\"Sputtered Flux: \" + str(ans))\n #print(\"Sputtered Flux Error: \" + str(err/ans * 100) + \"%\")\n\n return ans\n\n\n for probe in [\"A\", \"B\", \"C\"]:\n # Use corresponding size for desired probe.\n if probe==\"A\":\n size = aSize\n elif probe==\"B\":\n size = bSize\n elif probe==\"C\":\n size = cSize\n else:\n print(\"Incorrect probe entry. Should be either A, B, or C.\")\n\n print(\"Calculating loss flux for \" + probe + \" probes...\")\n\n flux_loss = []\n for index in range(0, len(self.temps)):\n Te = self.temps[index]\n ne = self.dens[index]\n cs = self.sound_speeds[index]\n\n # Approx. speed of W entering flux tube.\n v0 = 0.5 * cs\n\n # Get the ionization rate coefficient for a specific temperature.\n ad = atomic.element('tungsten')\n temperatureRange = np.logspace(0,4,100)\n S = ad.coeffs['ionisation']\n f = interpolate.interp1d(temperatureRange, S(0, temperatureRange, ne))\n coeff = f(Te)\n\n # Calculate lamda_ionization.\n lambda_iz = v0 * (ne * coeff)**(-1)\n\n # Fraction ionized in the flux tube (i.e. it will return to the probe)\n frac = 1 - math.exp(-size / lambda_iz)\n #print(\"Fraction Ionized: \" + str(frac))\n\n # Thus the fraction lost is 1-frac of the sputtered flux.\n Ti = Te\n fracFluxLost = (1 - frac) * sputt_flux(ne=ne, Ti=Ti, Te=Te)\n #print(\"Flux Lost: \" + str(fracFluxLost))\n\n flux_loss.append(fracFluxLost)\n\n self.loss_dict[probe] = {\"rminrsep\":self.rmins, \"flux\":flux_loss}", "def interval(ctx, poll_interval):\n fc_info = {}\n fc_info['POLL_INTERVAL'] = poll_interval\n ctx.obj.mod_entry(\"FLEX_COUNTER_TABLE\", \"FLOW_CNT_TRAP\", fc_info)", "def cygx3MWLC(self):\n # --------------------------------------------------------------------------------------------- #\n # Fermi data\n fitsNnam = os.path.join(self.workpath, 'LCresults.fits')\n lcTab = Table.read(fitsNnam)\n if (self.tstart is not None) and (self.tstop is not None):\n lcTab = lcTab[ (self.tstart <= lcTab['mjd']) & (lcTab['mjd'] <= self.tstop)]\n lcTab = lcTab[lcTab['flux'] != -1.] # avoid undone analyses\n\n timeMJD = lcTab['mjd']\n tref = int(np.floor( timeMJD[0] / 100.0)) * 100 # round to lowest hundred\n timeMJD -= tref\n ts = lcTab['ts']\n detect = lcTab['ts'] >= self.tsmin\n undet = lcTab['ts'] < self.tsmin\n flux = lcTab['flux'][detect]\n fluxerr = lcTab['fluxerr'][detect]\n upperl = lcTab['upperlim'][undet]\n upperl[upperl == -1.] = 0. # for when it failed\n scale = 10**int(np.floor(np.log10( np.mean( np.concatenate( (flux, upperl), axis=0) ) )))\n\n # --------------------------------------------------------------------------------------------- #\n # X-ray data\n batFile = os.path.join(self.workpath, 'CygX3_BAT.fits')\n #maxiFile = os.path.join(self.workpath, 'CygX3_MAXI.csv')\n maxiFile = os.path.join(self.workpath, 'CygX3_MAXI.dat')\n asmFile = os.path.join(self.workpath, 'CygX3_ASM.fits')\n if not os.path.isfile(batFile) or self.clobber:\n os.system('wget http://swift.gsfc.nasa.gov/results/transients/CygX-3.lc.fits -O {}'.format(batFile))\n batTab = Table.read(batFile)\n if not os.path.isfile(maxiFile) or self.clobber:\n #os.system('wget http://www.maxi.jaxa.jp/obs/agn_etc/data/J2032+409/J2032+409.txt -O {}'.format(maxiFile))\n os.system('wget http://134.160.243.77/star_data/J2032+409/J2032+409_g_lc_1day_all.dat -O {}'.format(maxiFile))\n maxiTab = Table.read(maxiFile, format='ascii') #t, f2-20, e2-20, f2-4, e2-4, f4-10, e4-10, f10-20, e10-20\n if not os.path.isfile(asmFile) or self.clobber:\n os.system('wget https://www.dropbox.com/s/65qrhi1oyifvjfn/CygX3_ASM.fits?dl=0 -O {}'.format(asmFile))\n asmTab = Table.read(asmFile) #mjd, 1.3-12.2 keV, 1.3-3.0 keV, 3.0-5.0 keV, and 5.0-12.2 keV\n asmTab = asmTab[asmTab['col1'] > 54500]\n\n # --------------------------------------------------------------------------------------------- #\n # Radio data\n amiFile = os.path.join(self.workpath, 'CygX3_AMI.fits')\n ovroFile = os.path.join(self.workpath, 'CygX3_OVRO.fits')\n if not os.path.isfile(amiFile) or self.clobber:\n os.system('wget https://www.dropbox.com/s/bz9xbdbq6hbrant/AMI_2008_14.fits?dl=0 -O {}'.format(amiFile))\n amiTab = Table.read(amiFile)\n if not os.path.isfile(ovroFile) or self.clobber:\n os.system('wget https://www.dropbox.com/s/rs7xlztd66j6fej/CygX3_OVRO.fits?dl=0 -O {}'.format(ovroFile))\n ovroTab = Table.read(ovroFile)\n ovroOff = - 0.124\n\n # --------------------------------------------------------------------------------------------- #\n # Plot\n lcplt = FermiPlot(savepath='', xsize=8.5, ysize=17)\n lcplt.figname = os.path.join(self.workpath, 'CygX3_MWLC.pdf')\n lcplt.xlabel = r'Time (MJD $-$ {})'.format(tref)\n lcplt.ylabel = [r'Flux density (Jy)', r'Count rate', r'Rate (cm$^{-2}$\\,s$^{-1}$)', r'Flux ($10^{%d}$ ph\\,cm$^{-2}$\\,s$^{-1}$)'%(int(np.log10(scale))), r'TS']\n lcplt.label = [r'AMI', r'OVRO', r'ISS/MAXI ($\\times 30$ ct\\,cm$^{-2}$\\,s$^{-1}$)', r'RXTE/ASM (ct\\,s$^{-1}$)', r'\\textit{Swift}/BAT', None, r'\\textit{Fermi}/LAT', None]\n lcplt.hline = [None, None, None, None, self.tsmin]\n\n deltaY = max(np.concatenate((flux+fluxerr, upperl), axis=0)) - min(np.concatenate((flux-fluxerr, upperl), axis=0))\n lcplt.ymin = [5.e-2,\n None,\n -0.01,\n (min(np.concatenate((flux-fluxerr, upperl-upperl*0.1), axis=0)) - 0.05*deltaY) / scale,\n min(ts) - 0.05*(max(ts)-min(ts))]\n lcplt.ymax = [3.e1,\n None,\n 0.08,\n (max(np.concatenate((flux+fluxerr, upperl), axis=0)) + 0.05*deltaY) / scale, \n max(ts) + 0.05*(max(ts)-min(ts))]\n deltaX = (timeMJD[-1] + lcTab['mjderr'][-1]) - (timeMJD[0] - lcTab['mjderr'][0]) \n lcplt.xmin = timeMJD[0] - lcTab['mjderr'][0] - 0.05*deltaX\n lcplt.xmax = timeMJD[-1] + lcTab['mjderr'][-1] + 0.05*deltaX\n\n lcplt.fill = [item for sublist in zip( timeMJD[detect]-lcTab['mjderr'][detect], timeMJD[detect]+lcTab['mjderr'][detect] ) for item in sublist]\n lcplt.shadecol= self.loran \n\n lcplt.mksize = [1, 1, 1, 1, 1, 2, 2, 2]\n lcplt.ymode = ['log', 'log', 'linear', 'linear', 'linear', 'linear', 'linear', 'linear']\n lcplt.color = ['black', self.lblue, 'black', self.lblue, 'black', 'gray', 'black', 'black']\n lcplt.prop = [3, 3, 3, 3, 1]\n lcplt.limit = [[False, False], [False, False], False, [True, False], False]\n lcplt.multiplot(x = [ [amiTab['MJD']-tref, ovroTab['mjd']-tref],\n [maxiTab['col1']-tref, asmTab['col1']-tref],\n batTab['TIME']+np.ones(len(batTab['TIME']))*0.5-tref,\n [timeMJD[undet], timeMJD[detect]],\n timeMJD ],\n y = [ [amiTab['Jy'], ovroTab['flux']+ovroOff],\n [maxiTab['col4']*30, asmTab['col6']], \n batTab['RATE'],\n [upperl/scale, flux/scale],\n ts ],\n xerr = [ [None, None],\n [None, None],\n np.ones(len(batTab['TIME']))*0.5,\n [lcTab['mjderr'][undet], lcTab['mjderr'][detect]],\n lcTab['mjderr']],\n yerr = [ [None, None],\n [maxiTab['col5']*30, asmTab['col7']],\n batTab['ERROR'],\n [upperl/scale*0.1, fluxerr/scale],\n None])\n lcplt.save()\n\n print(\"\\t=== Figure '{}' created ===\".format(lcplt.figname)) \n return", "def lCurve(self): \n\n # --------------------------------------------------------------------------------------------- #\n # Read data\n fitsNnam = os.path.join(self.workpath, 'LCresults.fits')\n lcTab = Table.read(fitsNnam)\n if (self.tstart is not None) and (self.tstop is not None):\n lcTab = lcTab[ (self.tstart <= lcTab['mjd']) & (lcTab['mjd'] <= self.tstop)]\n lcTab = lcTab[lcTab['flux'] != -1.] # avoid undone analyses\n\n timeMJD = lcTab['mjd']\n tref = int(np.floor( timeMJD[0] / 100.0)) * 100 # round to lowest hundred\n timeMJD -= tref\n ts = lcTab['ts']\n detect = lcTab['ts'] >= self.tsmin\n undet = lcTab['ts'] < self.tsmin\n flux = lcTab['flux'][detect]\n fluxerr = lcTab['fluxerr'][detect]\n upperl = lcTab['upperlim'][undet]\n upperl[upperl == -1.] = 0. # for when it failed\n scale = 10**int(np.floor(np.log10( np.mean( np.concatenate( (flux, upperl), axis=0) ) ))) \n\n # --------------------------------------------------------------------------------------------- #\n # Plot\n lcplt = FermiPlot(savepath='', xsize=8.5, ysize=6)\n lcplt.figname = os.path.join(self.workpath, 'LightCurve.pdf')\n lcplt.xlabel = r'Time (MJD $-$ {})'.format(tref)\n lcplt.ylabel = [r'Flux ($10^{%d}$ ph\\,cm$^{-2}$\\,s$^{-1}$)'%(int(np.log10(scale))), r'TS']\n lcplt.hline = [None, self.tsmin]\n deltaY = max(np.concatenate((flux+fluxerr, upperl), axis=0)) - min(np.concatenate((flux-fluxerr, upperl), axis=0))\n lcplt.ymin = [(min(np.concatenate((flux-fluxerr, upperl-upperl*0.1), axis=0)) - 0.05*deltaY) / scale, min(ts) - 0.05*(max(ts)-min(ts))]\n lcplt.ymax = [(max(np.concatenate((flux+fluxerr, upperl), axis=0)) + 0.05*deltaY) / scale, max(ts) + 0.05*(max(ts)-min(ts))]\n deltaX = (timeMJD[-1] + lcTab['mjderr'][-1]) - (timeMJD[0] - lcTab['mjderr'][0]) \n lcplt.xmin = timeMJD[0] - lcTab['mjderr'][0] - 0.05*deltaX\n lcplt.xmax = timeMJD[-1] + lcTab['mjderr'][-1] + 0.05*deltaX\n lcplt.fill = [item for sublist in zip( timeMJD[detect]-lcTab['mjderr'][detect], timeMJD[detect]+lcTab['mjderr'][detect] ) for item in sublist]\n lcplt.shadecol= self.loran \n if len(flux) == 0:\n lcplt.mksize = [2, 2]\n lcplt.ymode = ['linear', 'linear']\n lcplt.color = ['gray', 'black']\n lcplt.prop = [3, 1]\n lcplt.limit = [True, False]\n lcplt.multiplot(x = [ timeMJD[undet], timeMJD ],\n y = [ upperl/scale, ts ],\n xerr = [ lcTab['mjderr'][undet], lcTab['mjderr']],\n yerr = [ upperl/scale*0.1, None])\n else:\n lcplt.mksize = [2, 2, 2]\n lcplt.ymode = ['linear', 'linear', 'linear']\n lcplt.color = ['gray', 'black', 'black']\n lcplt.prop = [3, 1]\n lcplt.limit = [[True, False], False]\n lcplt.multiplot(x = [ [timeMJD[undet], timeMJD[detect]], timeMJD ],\n y = [ [upperl/scale, flux/scale], ts ],\n xerr = [ [lcTab['mjderr'][undet], lcTab['mjderr'][detect]], lcTab['mjderr']],\n yerr = [ [upperl/scale*0.1, fluxerr/scale], None])\n lcplt.save()\n\n print(\"\\t=== Figure '{}' created ===\".format(lcplt.figname)) \n return", "def get_restart_times(logname, begintime, endtime, \n checkwarm, checkforce, checkdown, checkcold):\n global nowyear #need this since year isn't in system log\n #alternatively just ignore the year\n\n startfound = False\n stopfound = False\n with open(logname, 'r') as f:\n #Look for start\n for line in f:\n if debug: print line\n entrytime = time_from_str(line)\n if entrytime > begintime:\n startfound = True\n if debug: print \"Found start time\"\n break\n else:\n continue\n \n if not startfound:\n print \"Did not find the start time\"\n return\n\n\n warmcount = 0\n downcount = 0\n coldcount = 0\n forcecount = 0\n warmrestart_total_et = datetime.timedelta(0)\n downrestart_total_et = datetime.timedelta(0)\n coldrestart_total_et = datetime.timedelta(0)\n forcerestart_total_et = datetime.timedelta(0)\n\n while not stopfound:\n isdownrestart = False\n iscoldrestart = False\n isforcerestart = False\n\n #Look for a test\n for line in f: \n if debug: print \"Look for restart: \", line\n\n entrytime = time_from_str(line)\n if entrytime > endtime:\n stopfound = True\n if debug: print \"Found end time\"\n break #done looking at log\n\n mr = rr.search(line) # warm, force, or cold restart\n mx = rx.search(line) # down restart\n if mr or mx:\n begintime = entrytime\n if verbose or debug: \n print \"Found restart:\"\n print line\n if mx:\n isdownrestart = True\n break\n else:\n continue #keep looking for restart\n else: # no more lines\n stopfound = True\n\n if stopfound:\n break\n \n #get the restart reason in the next line unless it's a down restart\n if not isdownrestart:\n for line in f:\n mrr = rrr.search(line)\n if mrr:\n reason = mrr.group('reason') \n if reason == \"System restarted by VprocManager\":\n iscoldrestart = True\n break # only wanted to read one line\n else:\n raise RuntimeError(\"End of file before finding end of test\")\n \n #go until end of test (logons enabled)\n for line in f: \n if debug: print \"Look for end of test:\", line\n\n mf = rf.search(line) #recond -L\n mup = rup.search(line) #Logons are enabled\n if mf: # force restart\n isforcerestart = True\n if debug: \n print line\n elif mup:\n if verbose or debug: \n print \"Found end of test:\"\n print line\n enabledtime = time_from_str(line)\n break\n else:\n continue # keep looking for end of test\n else:\n raise RuntimeError(\"End of file before finding end of test\")\n\n\n if debug: print \"Continuing from end of test\"\n\n #Found end of test\n elapsedtime = enabledtime - begintime\n\n if verbose: print\n if isdownrestart:\n if checkdown:\n downcount += 1\n print \"Down restart {0}\".format(downcount)\n downrestart_total_et += elapsedtime\n print \"Start: {0}, End: {1}, ET: {2}\\n\"\\\n .format(begintime, enabledtime, elapsedtime)\n elif isforcerestart:\n if checkforce:\n forcecount += 1\n print \"Force restart {0}\".format(forcecount)\n forcerestart_total_et += elapsedtime\n print \"Start: {0}, End: {1}, ET: {2}\"\\\n .format(begintime, enabledtime, elapsedtime)\n print \"Reason:\", reason\n elif iscoldrestart:\n if checkcold:\n coldcount += 1\n print \"Cold restart {0}\".format(coldcount)\n coldrestart_total_et += elapsedtime\n print \"Start: {0}, End: {1}, ET: {2}\\n\"\\\n .format(begintime, enabledtime, elapsedtime)\n #print \"Reason:\", reason, \"\\n\"\n else: #must be warmrestart\n if checkwarm:\n warmcount += 1\n print \"Warm restart {0}\".format(warmcount)\n warmrestart_total_et += elapsedtime\n print \"Start: {0}, End: {1}, ET: {2}\"\\\n .format(begintime, enabledtime, elapsedtime)\n print \"Reason:\", reason\n\n ### end while not stopfound\n if debug: print \"Done going through log\"\n if verbose: print\n\n if warmcount > 0:\n print \"Warm restart average of {0} tests: {1}\"\\\n .format(warmcount, str(warmrestart_total_et/warmcount).split(\".\")[0])\n #print the time delta without microseconds\n\n if forcecount > 0:\n print \"Force restart average of {0} tests: {1}\"\\\n .format(forcecount, str(forcerestart_total_et/forcecount).split(\".\")[0])\n\n if coldcount > 0:\n print \"Cold restart average of {0} tests: {1}\"\\\n .format(coldcount, str(coldrestart_total_et/coldcount).split(\".\")[0])\n\n if downcount > 0:\n print \"Down restart average of {0} tests: {1}\"\\\n .format(downcount, str(downrestart_total_et/downcount).split(\".\")[0])\n\n\n return", "def lumiVsFill():\n init()\n \n nbins = fills[-1] + 1 - fills[0]\n plfill = ROOT.TProfile(\"plfill\", \"Lumi vs fill\", nbins, int(fills[0]), int(fills[-1]) + 1)\n \n tsStart = fillReport.getFillCreationTime(fills[0])\n tsEnd = fillReport.getFillEndTime(fills[-1]) + 1000\n pltime = ROOT.TProfile(\"pltime\", \"Lumi vs time\", nTimeBins, tsStart, tsEnd)\n \n # Filling\n \n for i in range(0, t.GetEntries()) :\n nb = t.GetEntry(i)\n if nb < 0:\n continue\n #if t.beamStatus[:-1] != \"STABLE BEAMS\":\n #continue\n #if t.tstamp > t.fillEnd:\n #continue\n #if t.tstamp - t.fillStable < deltaWarming:\n #continue\n plfill.Fill(t.fill, t.lumi)\n pltime.Fill(t.tstamp, t.lumi)\n pltime.GetXaxis().SetTimeDisplay(1)\n pltime.GetXaxis().SetTimeFormat(\"%m-%d\") \n \n return plfill, pltime", "def checkGlitches(self):\n cP = self.getKeyword('ISS PRI MET C') # in m/s\n dnuP = self.getKeyword('ISS PRI MET F_SHIFT')*1e6 # in Hz\n nuP = self.getKeyword('ISS PRI MET LASER_F')\n #self.jump = (cP*dnuP/2/(nuP**2))*(2**24-1) # PRIMET jump in m, COMM14\n self.metJumpSize = (cP*dnuP/2/(nuP**2))*(2**31-1) # PRIMET jump in m\n\n relevant_keywords = filter(lambda x: 'NGLIT' in x and\n 'START' in x,\n self.raw[0].header.keys())\n relevant_keywords = [k.split()[4] for k in relevant_keywords]\n glitches = {}\n glitchesStartEnd = {}\n for k in relevant_keywords:\n glitches[k] = self.getKeyword('ISS PRI MET '+k+' END')-\\\n self.getKeyword('ISS PRI MET '+k+' START')\n glitchesStartEnd[k] = (self.getKeyword('ISS PRI MET '+k+' START'),\n self.getKeyword('ISS PRI MET '+k+' END'))\n self.glitches = glitches\n self.glitchesStartEnd = glitchesStartEnd\n if 'NGLITAB' in glitches.keys():\n if glitches['NGLITAB'] !=0:\n print '*SERIOUS WARNING*', glitches['NGLITAB'],\\\n 'glitches in PRIMET A-B in this file'\n else:\n print '*WARNING*: could not assess glitches in A-B'\n\n if 'NGLITB' in glitches.keys():\n if glitches['NGLITB'] !=0:\n print '*SERIOUS WARNING*', glitches['NGLITB'],\\\n 'glitches in PRIMET -B in this file'\n else:\n print 'WARNING: could not assess glitches in -B'\n\n if glitches['NGLITABFCO'] !=0:\n print 'WARNING: AB overflow!', glitches['NGLITABFCO']\n if glitches['NGLITBFCO'] !=0:\n print 'WARNING: -B overflow!', glitches['NGLITBFCO']\n self.glitches = glitches\n return", "def glitch_time_cut(stream, df):\n glitch_interval_list = quality_parameters[stream]['glitch_time_cut']\n \n if glitch_interval_list is None:\n df['glitch_time_cut'] = True\n \n else:\n \n timestamp = df['timestamp']\n \n truth_array = pd.Series(data=True, index=df.index)\n for inf, sup in glitch_interval_list:\n \n interval_truth = (timestamp < inf) | (timestamp > sup)\n truth_array = truth_array & interval_truth\n \n df['glitch_time_cut'] = truth_array\n \n return None", "def test_radar_request_site_historic_px250_bufr_timerange(default_settings):\n\n timestamp = dt.datetime.utcnow() - dt.timedelta(days=1)\n\n request = DwdRadarValues(\n parameter=DwdRadarParameter.PX250_REFLECTIVITY,\n start_date=timestamp,\n end_date=dt.timedelta(hours=1),\n site=DwdRadarSite.BOO,\n settings=default_settings,\n )\n\n # Verify number of elements.\n results = list(request.query())\n\n if len(results) == 0:\n raise pytest.skip(\"Data currently not available\")\n\n assert len(results) == 12", "def lagcrp_helper(egg, match='exact', distance='euclidean',\n ts=None, features=None):\n\n def lagcrp(rec, lstlen):\n \"\"\"Computes lag-crp for a given recall list\"\"\"\n\n def check_pair(a, b):\n if (a>0 and b>0) and (a!=b):\n return True\n else:\n return False\n\n def compute_actual(rec, lstlen):\n arr=pd.Series(data=np.zeros((lstlen)*2),\n index=list(range(-lstlen,0))+list(range(1,lstlen+1)))\n recalled=[]\n for trial in range(0,len(rec)-1):\n a=rec[trial]\n b=rec[trial+1]\n if check_pair(a, b) and (a not in recalled) and (b not in recalled):\n arr[b-a]+=1\n recalled.append(a)\n return arr\n\n def compute_possible(rec, lstlen):\n arr=pd.Series(data=np.zeros((lstlen)*2),\n index=list(range(-lstlen,0))+list(range(1,lstlen+1)))\n recalled=[]\n for trial in rec:\n if np.isnan(trial):\n pass\n else:\n lbound=int(1-trial)\n ubound=int(lstlen-trial)\n chances=list(range(lbound,0))+list(range(1,ubound+1))\n for each in recalled:\n if each-trial in chances:\n chances.remove(each-trial)\n arr[chances]+=1\n recalled.append(trial)\n return arr\n\n actual = compute_actual(rec, lstlen)\n possible = compute_possible(rec, lstlen)\n crp = [0.0 if j == 0 else i / j for i, j in zip(actual, possible)]\n crp.insert(int(len(crp) / 2), np.nan)\n return crp\n\n def nlagcrp(distmat, ts=None):\n\n def lagcrp_model(s):\n idx = list(range(0, -s, -1))\n return np.array([list(range(i, i+s)) for i in idx])\n\n # remove nan columns\n distmat = distmat[:,~np.all(np.isnan(distmat), axis=0)].T\n\n model = lagcrp_model(distmat.shape[1])\n lagcrp = np.zeros(ts * 2)\n for rdx in range(len(distmat)-1):\n item = distmat[rdx, :]\n next_item = distmat[rdx+1, :]\n if not np.isnan(item).any() and not np.isnan(next_item).any():\n outer = np.outer(item, next_item)\n lagcrp += np.array(list(map(lambda lag: np.mean(outer[model==lag]), range(-ts, ts))))\n lagcrp /= ts\n lagcrp = list(lagcrp)\n lagcrp.insert(int(len(lagcrp) / 2), np.nan)\n return np.array(lagcrp)\n\n def _format(p, r):\n p = np.matrix([np.array(i) for i in p])\n if p.shape[0]==1:\n p=p.T\n r = map(lambda x: [np.nan]*p.shape[1] if check_nan(x) else x, r)\n r = np.matrix([np.array(i) for i in r])\n if r.shape[0]==1:\n r=r.T\n return p, r\n\n opts = dict(match=match, distance=distance, features=features)\n if match is 'exact':\n opts.update({'features' : 'item'})\n recmat = recall_matrix(egg, **opts)\n if not ts:\n ts = egg.pres.shape[1]\n if match in ['exact', 'best']:\n lagcrp = [lagcrp(lst, egg.list_length) for lst in recmat]\n elif match is 'smooth':\n lagcrp = np.atleast_2d(np.mean([nlagcrp(r, ts=ts) for r in recmat], 0))\n else:\n raise ValueError('Match must be set to exact, best or smooth.')\n return np.nanmean(lagcrp, axis=0)", "def tail_cts_per_shot(datapath, lower, TPQI_starts, bin_size = 0.256, normalize = False, correct_for_bg = True, save = 1, pulses_in_sequence = 300):\n\n print 'analyzing tail counts per shot...' \n current_dir = os.getcwd()\n plt.close('all')\n os.chdir(datapath)\n files = os.listdir(datapath)\n\n for k in arange(len(files)):\n right_file = '.npz' in files[k]\n \n if right_file:\n data = numpy.load(datapath+'\\\\'+files[k])\n\n ch1_counts = data['hist_ch1']\n ch0_counts = data['hist_ch0']\n\n time = bin_size*arange(len(ch1_counts))\n \n if correct_for_bg:\n bg_level_ch1 = ch1_counts[int(0.75*len(ch1_counts)):int(0.90*len(ch1_counts))].mean()\n ch1_counts = ch1_counts - bg_level_ch1*ones(len(ch1_counts))\n bg_level_ch0 = ch0_counts[int(0.75*len(ch0_counts)):int(0.90*len(ch0_counts))].mean()\n ch0_counts = ch0_counts - bg_level_ch0*ones(len(ch0_counts))\n\n #print 'measured background level for [ch0,ch1] = ['+num2str(bg_level_ch0,1)+','+num2str(bg_level_ch1,1)+']'\n\n if normalize:\n ch1_counts_normalized = ch1_counts/ch1_counts.max()\n ch0_counts_normalized = ch0_counts/ch0_counts.max()\n \n upper = lower + 40.0\n\n tail_area_time = time[int(lower/bin_size):int(upper/bin_size)]\n tail_area_ch1 = ch1_counts[int(lower/bin_size):int(upper/bin_size)]\n tail_area_ch0 = ch0_counts[int(lower/bin_size):int(upper/bin_size)]\n\n tail_counts_per_shot = (tail_area_ch1.sum()+tail_area_ch0.sum())/float(TPQI_starts*pulses_in_sequence)\n\n figure1 = plt.figure(figsize=(16.0, 12.0))\n plt.subplot(211)\n if not normalize:\n plt.semilogy(time, ch1_counts, '-k')\n plt.plot(array([lower,lower]), array([1E-1,ch1_counts.max()]), 'r', lw = 2.0)\n plt.plot(array([upper,upper]), array([1E-1,ch1_counts.max()]), 'r', lw = 2.0)\n else:\n plt.semilogy(time, ch1_counts_normalized, '-r')\n plt.plot(array([lower,lower]), array([1E-1,ch1_counts_normalized.max()]), 'r', lw = 2.0)\n plt.plot(array([upper,upper]), array([1E-1,ch1_counts_normalized.max()]), 'r', lw = 2.0)\n \n plt.xlabel('Time after sync (ns)')\n plt.ylabel('Counts ch1')\n plt.title('tail counts per shot = '+num2str(tail_counts_per_shot*1e4,1)+'E-4')\n plt.xlim([0,200])\n\n plt.subplot(212)\n if not normalize:\n plt.semilogy(time, ch0_counts, '-k')\n plt.plot(array([lower,lower]), array([1E-1,ch0_counts.max()]), 'r', lw = 2.0)\n plt.plot(array([upper,upper]), array([1E-1,ch0_counts.max()]), 'r', lw = 2.0)\n else:\n plt.semilogy(time, ch0_counts_normalized, '-k')\n plt.plot(array([lower,lower]), array([1E-1,ch0_counts_normalized.max()]), 'r', lw = 2.0)\n plt.plot(array([upper,upper]), array([1E-1,ch0_counts_normalized.max()]), 'r', lw = 2.0)\n \n plt.xlabel('Time after sync (ns)')\n plt.ylabel('Counts ch0')\n plt.title('tail counts per shot = '+num2str(tail_counts_per_shot*1e4,1)+'E-4')\n plt.xlim([0,200])\n if save:\n figure1.savefig('tail_cts_per_shot.pdf')\n\n try:\n data.close()\n except:\n pass\n\n print 'tail counts per shot = '+num2str(tail_counts_per_shot*1e4,1)+'E-4'\n\n return tail_counts_per_shot", "def get_replag(self):\n query = \"\"\"SELECT UNIX_TIMESTAMP() - UNIX_TIMESTAMP(rc_timestamp) FROM\n recentchanges ORDER BY rc_timestamp DESC LIMIT 1\"\"\"\n result = list(self.sql_query(query))\n return int(result[0][0])", "def window_change(df: list, key: str, lag: int) -> list:\n\n def _chg(window):\n start = window[0][key]\n end = window[-1][key]\n return round(((end - start) / start) * 100, 2)\n\n return [_chg(df[i : i + lag + 1]) for i in range(len(df) - lag)]", "def restart(self, timeStamp):\n print 'monitor_comp.restart() called'\n\n services = self.services\n global monitorVars, ps_VarsList, monitorDefinition\n \n workdir = services.get_working_dir()\n run_id = services.get_config_param('PORTAL_RUNID')\n monitor_file = 'monitor_file.nc'\n # print 'monitor file = ', monitor_file\n\n self.cdfFile = run_id+'_monitor_file.nc'\n services.log('w3 monitor file = ' + self.cdfFile)\n htmlFile = run_id +'.html'\n \n # Get restart files listed in config file. \n try:\n restart_root = services.get_config_param('RESTART_ROOT')\n restart_time = services.get_config_param('RESTART_TIME')\n services.get_restart_files(restart_root, restart_time, self.RESTART_FILES)\n except Exception, e:\n print 'Error in call to get_restart_files()' , e\n raise\n\n # copy monitor file to w3 directory\n try:\n shutil.copyfile(monitor_file,\n os.path.join(self.W3_DIR, self.cdfFile))\n except IOError, (errno, strerror):\n print 'Error copying file %s to %s: %s' % \\\n (monitor_file, self.cdfFile, strerror)\n\n htmlText = self.htmlText.replace('@CDF_FILE@',\n os.path.join(self.W3_BASEURL, self.cdfFile))\n try:\n f = open(os.path.join(self.W3_DIR, htmlFile), 'w')\n f.write(htmlText)\n f.close()\n except IOError, (errno, strerror):\n print 'Error writing to file %s : %s' % \\\n (htmlFile, strerror)\n monitorURL = os.path.join(self.W3_BASEURL , htmlFile)\n self.services.setMonitorURL(monitorURL)\n \n # Load monitorVars and ps_VarsList from pickle file \"monitor_restart\".\n\n pickleDict = {'monitorVars' : monitorVars, 'ps_VarsList': ps_VarsList,\\\n 'monitorDefinition':monitorDefinition}\n# pickleDict = {'monitorVars' : monitorVars, 'ps_VarsList': ps_VarsList}\n pickFile = open('monitor_restart', 'r')\n pickleDict = pickle.load(pickFile)\n pickFile.close()\n monitorVars = pickleDict['monitorVars']\n ps_VarsList = pickleDict['ps_VarsList']\n monitorDefinition = pickleDict['monitorDefinition']\n print 'monitorDefinition = ', monitorDefinition\n \n print 'monitor restart finished'\n return 0", "def test_radar_request_composite_historic_hg_timerange(default_settings):\n\n timestamp = dt.datetime.utcnow() - dt.timedelta(days=1)\n\n request = DwdRadarValues(\n parameter=DwdRadarParameter.HG_REFLECTIVITY,\n start_date=timestamp,\n end_date=dt.timedelta(minutes=10),\n settings=default_settings,\n )\n\n results = list(request.query())\n\n if len(results) == 0:\n raise pytest.skip(\"Data currently not available\")\n\n # Verify number of results.\n assert len(results) == 2\n\n # Verify all timestamps are properly propagated from the tarfile.\n assert all(\n request.start_date == result.timestamp or request.start_date + dt.timedelta(minutes=5) for result in results\n )", "def run_cumulative_wtrfclty_damage(self):\n\n\t\trt = [100, 250, 500, 1000, 2500, 5000, 10000]\n\t\t\n\t\tfor rt_val in rt:\n\t\t\tprint('\\twterfclty_dmg: cumulative rt_{}' .format(rt_val))\n\t\t\t# --- reading in damage results from above analysis\n\t\t\teq_damage_results_csv = os.path.join(self.wterfclty_output_path, \n\t\t\t\t\t\t\t\t\t\t\t\t 'wterfclty_eq_{}yr_dmg.csv' \n\t\t\t\t\t\t\t\t\t\t\t\t .format(rt_val))\n\t\t\ttsu_damage_results_csv = os.path.join(self.wterfclty_output_path, \n\t\t\t\t\t\t\t\t\t\t\t\t 'wterfclty_tsu_{}yr_dmg.csv' \n\t\t\t\t\t\t\t\t\t\t\t\t .format(rt_val))\n\t\t\teq_df = pd.read_csv(eq_damage_results_csv)\n\t\t\ttsu_df = pd.read_csv(tsu_damage_results_csv)\n\n\t\t\tcum_df = pd.DataFrame()\n\t\t\tcum_df['guid'] = eq_df['guid']\n\t\t\t\n\t\t\tcum_df['ds-complet'] = eq_df['ds-complet'] + tsu_df['ds-complet'] \\\n\t\t\t\t- eq_df['ds-complet']*tsu_df['ds-complet']\n\t\t\t\n\t\t\t# --- prob of exceeding each damage state\n\t\t\tcum_df['ls-complet'] = cum_df['ds-complet']\n\n\t\t\tcum_df['ls-extensi'] = eq_df['ls-extensi'] + tsu_df['ls-extensi'] \\\n\t\t\t\t- eq_df['ls-extensi']*tsu_df['ls-extensi']\n\n\t\t\tcum_df['ls-moderat'] = eq_df['ls-moderat'] + tsu_df['ls-moderat'] \\\n\t\t\t\t- eq_df['ls-moderat']*tsu_df['ls-moderat']\n\n\t\t\tcum_df['ls-slight'] = eq_df['ls-slight'] + tsu_df['ls-slight'] \\\n\t\t\t\t- eq_df['ls-slight']*tsu_df['ls-slight']\n\n\t\t\t# --- prob of being in each damage state\n\t\t\tcum_df['ds-extensi'] = cum_df['ls-extensi'] - cum_df['ds-complet']\n\t\t\tcum_df['ds-moderat'] = cum_df['ls-moderat'] - cum_df['ls-extensi']\n\t\t\tcum_df['ds-slight'] = cum_df['ls-slight'] - cum_df['ls-moderat']\n\t\t\tcum_df['ds-none'] = 1 - cum_df['ls-slight']\n\t\t\tcum_df['hazard'] = 'Earthquake+Tsunami'\n\n\t\t\tresult_name = os.path.join(self.wterfclty_output_path, \n\t\t\t\t\t\t\t\t\t 'wterfclty_cumulative_{}yr_dmg.csv' \n\t\t\t\t\t\t\t\t\t .format(rt_val))\n\t\t\tcum_df = cum_df[['guid', \n\t\t\t\t\t\t\t 'ls-slight',\n\t\t\t\t\t\t\t 'ls-moderat',\n\t\t\t\t\t\t\t 'ls-extensi',\n\t\t\t\t\t\t\t 'ls-complet',\n\t\t\t\t\t\t\t 'ds-none', \n\t\t\t\t\t\t\t 'ds-slight', \n\t\t\t\t\t\t\t 'ds-moderat', \n\t\t\t\t\t\t\t 'ds-extensi', \n\t\t\t\t\t\t\t 'ds-complet', \n\t\t\t\t\t\t\t 'hazard']]\n\t\t\tcum_df.to_csv(result_name, index=False)", "def get_measurement_lag(gt_id):\n # Every measurement is associated with its start date, and measurements\n # are aggregated over one or more days, so, on a given date, only the measurements\n # from at least aggregation_days ago are fully observed.\n # Most of our measurements require 14 days of aggregation\n aggregation_days = 14\n # Some of our measurements are also released a certain number of days late\n days_late = 0\n if gt_id.endswith(\"mjo\"):\n # MJO uses only a single day of aggregation and is released one day late\n aggregation_days = 1\n days_late = 1\n elif \"sst\" in gt_id:\n # SST measurements are released one day late\n days_late = 1\n elif gt_id.endswith(\"mei\"):\n # MEI measurements are released at most 30 days late\n # (since they are released monthly) but are not aggregated\n aggregation_days = 0\n days_late = 30\n elif \"wind\" in gt_id:\n # Wind measurements are released one day late\n days_late = 1\n elif \"icec\" in gt_id:\n days_late = 1\n elif (\"slp\" in gt_id or \"pr_wtr.eatm\" in gt_id or \"rhum.sig995\" in gt_id or\n \"pres.sfc.gauss\" in gt_id or \"pevpr.sfc.gauss\" in gt_id):\n # NCEP/NCAR measurements are released one day late\n days_late = 1\n return aggregation_days + days_late", "def _calculate(self):\n source = self.source\n res = {}\n l_cols = [[], [], [], []]\n r_lines = {}\n dateline=None\n ###delete the below code when fetch data from database(assume: data in database has been pretreatment)\n if source[t.ror].min() > -99.0:\n pass\n else:\n source[t.ror] = np.where(\n source[t.ror] > -99.0, source[t.ror], -99.0)\n ###\n for account in self.accounts:\n source_account = source[source[t.account] == account]\n source_account = source_account.reset_index(drop=True)\n dateline=source_account[t.effective_date]\n ror=source_account[t.ror]/100\n returns_cum = ROR.ror_cum_ann(source_account, self.annualized)\n # double_return_cum=round(double_return_cum,2)+1\n returns_cum = returns_cum + 1\n growth_amounts = returns_cum * self.starting_value\n returns_cum, growth_amounts = round(returns_cum - 1, 4), \\\n round(growth_amounts, 2)\n l_cols[0].append(growth_amounts.iloc[-1, 0])#account growth amount\n l_cols[1].append(growth_amounts.iloc[-1, 1])#bench growth amount\n l_cols[2].append(returns_cum.iloc[-1, 0])#account return\n l_cols[3].append(returns_cum.iloc[-1, 1])#bench return\n r_lines[account] = [list(returns_cum.iloc[:,0]), list(growth_amounts.iloc[:, 0]),#list(returns_cum.iloc[:, 0])\n list(growth_amounts.iloc[:, 1])]#account return, account growth amount, bench growth amount\n res['account_vs_benchmark'] = {'xAxis': self.accounts,\n 'series': l_cols}\n res['growth_of_unit'] = {'xAxis': list(dateline),\n 'series': r_lines}\n return res\n # ret_dict = self._ret(accounts, starting_value, source, annualized)\n # return ret_dict", "def lagcrp(rec, lstlen):\n\n def check_pair(a, b):\n if (a>0 and b>0) and (a!=b):\n return True\n else:\n return False\n\n def compute_actual(rec, lstlen):\n arr=pd.Series(data=np.zeros((lstlen)*2),\n index=list(range(-lstlen,0))+list(range(1,lstlen+1)))\n recalled=[]\n for trial in range(0,len(rec)-1):\n a=rec[trial]\n b=rec[trial+1]\n if check_pair(a, b) and (a not in recalled) and (b not in recalled):\n arr[b-a]+=1\n recalled.append(a)\n return arr\n\n def compute_possible(rec, lstlen):\n arr=pd.Series(data=np.zeros((lstlen)*2),\n index=list(range(-lstlen,0))+list(range(1,lstlen+1)))\n recalled=[]\n for trial in rec:\n if np.isnan(trial):\n pass\n else:\n lbound=int(1-trial)\n ubound=int(lstlen-trial)\n chances=list(range(lbound,0))+list(range(1,ubound+1))\n for each in recalled:\n if each-trial in chances:\n chances.remove(each-trial)\n arr[chances]+=1\n recalled.append(trial)\n return arr\n\n actual = compute_actual(rec, lstlen)\n possible = compute_possible(rec, lstlen)\n crp = [0.0 if j == 0 else i / j for i, j in zip(actual, possible)]\n crp.insert(int(len(crp) / 2), np.nan)\n return crp", "def __init__(self, candidate, reference, breaktime, test_resample=('M', 0.3),\n bias_corr_method='linreg', alpha=0.01, mean_test='wilkoxon',\n var_test='scipy_fligner_killeen', test_check_min_data=10,\n test_check_spearR_sig=(0, 0.01)):\n\n TsRelBreakBase.__init__(self, candidate, reference, breaktime,\n bias_corr_method, dropna=True)\n\n self.alpha = alpha\n self.mean_test = mean_test\n self.var_test = var_test\n\n if test_resample:\n self.resample_to = test_resample[0]\n self.resample_threshold = test_resample[1]\n self.df_test_resampled = \\\n df_conditional_temp_resample(self.df_original,\n self.resample_to,\n self.resample_threshold)\n else:\n self.df_test_resampled = self.df_original.copy(True)\n\n if self._check_notnull() and bias_corr_method:\n self.df_test_resampled[self.reference_col_name] = \\\n self._reference_bias_correction(frame=self.df_test_resampled,\n method=self.bias_corr_method,\n group=None)\n\n self.test_min_data = test_check_min_data\n self.min_corr = test_check_spearR_sig[0]\n self.max_p = test_check_spearR_sig[1]\n\n self.error_code_test, self.error_text_test = self._check_input_data()\n\n self.df_test_resampled['Q'] = self.calc_diff(self.df_test_resampled)" ]
[ "0.57984966", "0.5483226", "0.5448702", "0.536563", "0.53027284", "0.5287678", "0.52505547", "0.52078265", "0.5189895", "0.5172441", "0.50707984", "0.50514287", "0.5050407", "0.5031012", "0.5027122", "0.50037414", "0.49881342", "0.49560872", "0.49285343", "0.49259377", "0.48662257", "0.48503774", "0.48356307", "0.48208332", "0.48206115", "0.4739548", "0.4730854", "0.47158003", "0.4710362", "0.47093073" ]
0.59120035
0
Replication failures over WARN threshold (below CRIT), STATUS_WARN
def test_check_replication_warn_failures(self, mock_timestamp): base_url = 'http://localhost:6000/recon/' jdata = b'{"replication_last": 1493299546.629282, ' \ b'"replication_stats": {"no_change": 0, "rsync": 0, ' \ b'"success": 0, "failure": 0, "attempted": 0, "ts_repl": 0, ' \ b'"remove": 0, "remote_merge": 0, "diff_capped": 0, ' \ b'"start": 1493299546.621624, "hashmatch": 0, "diff": 0, ' \ b'"empty": 0}, "replication_time": 0.0076580047607421875}' pmock_jdata = PropertyMock(return_value=jdata) mock_timestamp.return_value = (MagicMock(days=0, seconds=0), 5) # with patch('urllib2.urlopen') as mock_urlopen: with patch('urllib.request.urlopen') as mock_urlopen: mock_urlopen.return_value = MagicMock(read=pmock_jdata) result = check_replication(base_url, [4, 10, 4, 10]) self.assertEqual(result, 3*[(STATUS_WARN, "5 replication failures")])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_check_replication_warn_lag(self, mock_timestamp):\n base_url = 'http://localhost:6000/recon/'\n jdata = b'{\"replication_last\": 1493299546.629282, ' \\\n b'\"replication_stats\": {\"no_change\": 0, \"rsync\": 0, ' \\\n b'\"success\": 0, \"failure\": 0, \"attempted\": 0, \"ts_repl\": 0, ' \\\n b'\"remove\": 0, \"remote_merge\": 0, \"diff_capped\": 0, ' \\\n b'\"start\": 1493299546.621624, \"hashmatch\": 0, \"diff\": 0, ' \\\n b'\"empty\": 0}, \"replication_time\": 0.0076580047607421875}'\n pmock_jdata = PropertyMock(return_value=jdata)\n mock_timestamp.return_value = (MagicMock(days=0, seconds=5), 0)\n with patch('urllib.request.urlopen') as mock_urlopen:\n mock_urlopen.return_value = MagicMock(read=pmock_jdata)\n result = check_replication(base_url, [4, 10, 4, 10])\n self.assertEqual(result,\n [(STATUS_WARN,\n \"'{}' replication lag is \"\n \"5 seconds\".format(repl))\n for repl in ('account', 'object', 'container')])", "def test_check_replication_crit_failures(self, mock_timestamp):\n base_url = 'http://localhost:6000/recon/'\n jdata = b'{\"replication_last\": 1493299546.629282, ' \\\n b'\"replication_stats\": {\"no_change\": 0, \"rsync\": 0, ' \\\n b'\"success\": 0, \"failure\": 0, \"attempted\": 0, \"ts_repl\": 0, ' \\\n b'\"remove\": 0, \"remote_merge\": 0, \"diff_capped\": 0, ' \\\n b'\"start\": 1493299546.621624, \"hashmatch\": 0, \"diff\": 0, ' \\\n b'\"empty\": 0}, \"replication_time\": 0.0076580047607421875}'\n pmock_jdata = PropertyMock(return_value=jdata)\n mock_timestamp.return_value = (MagicMock(days=0, seconds=0), 12)\n with patch('urllib.request.urlopen') as mock_urlopen:\n mock_urlopen.return_value = MagicMock(read=pmock_jdata)\n result = check_replication(base_url, [4, 10, 4, 10])\n self.assertEqual(result,\n 3*[(STATUS_CRIT, \"12 replication failures\")])", "def test_check_replication_crit_lag_notworking(self, mock_timestamp):\n base_url = 'http://localhost:6000/recon/'\n jdata = b'{\"replication_last\": 1493299546.629282, ' \\\n b'\"replication_stats\": {\"no_change\": 0, \"rsync\": 0, ' \\\n b'\"success\": 0, \"failure\": 0, \"attempted\": 0, \"ts_repl\": 0, ' \\\n b'\"remove\": 0, \"remote_merge\": 0, \"diff_capped\": 0, ' \\\n b'\"start\": 1493299546.621624, \"hashmatch\": 0, \"diff\": 0, ' \\\n b'\"empty\": 0}, \"replication_time\": 0.0076580047607421875}'\n pmock_jdata = PropertyMock(return_value=jdata)\n mock_timestamp.return_value = (None, 0)\n with patch('urllib.request.urlopen') as mock_urlopen:\n mock_urlopen.return_value = MagicMock(read=pmock_jdata)\n result = check_replication(base_url, [4, 10, 4, 10])\n self.assertEqual(result,\n [(STATUS_CRIT,\n \"'{}' replication lag not working \"\n \"(perms issue? check syslog)\".format(repl))\n for repl in ('account', 'object', 'container')])", "def test_check_replication_crit_lag(self, mock_timestamp):\n base_url = 'http://localhost:6000/recon/'\n jdata = b'{\"replication_last\": 1493299546.629282, ' \\\n b'\"replication_stats\": {\"no_change\": 0, \"rsync\": 0, ' \\\n b'\"success\": 0, \"failure\": 0, \"attempted\": 0, \"ts_repl\": 0, ' \\\n b'\"remove\": 0, \"remote_merge\": 0, \"diff_capped\": 0, ' \\\n b'\"start\": 1493299546.621624, \"hashmatch\": 0, \"diff\": 0, ' \\\n b'\"empty\": 0}, \"replication_time\": 0.0076580047607421875}'\n pmock_jdata = PropertyMock(return_value=jdata)\n mock_timestamp.return_value = (MagicMock(days=0, seconds=12), 0)\n with patch('urllib.request.urlopen') as mock_urlopen:\n mock_urlopen.return_value = MagicMock(read=pmock_jdata)\n result = check_replication(base_url, [4, 10, 4, 10])\n self.assertEqual(result,\n [(STATUS_CRIT,\n \"'{}' replication lag is \"\n \"12 seconds\".format(repl))\n for repl in ('account', 'object', 'container')])", "def log(failure):\n return self._env.logger.warning('[ping] {}'.format(failure.getErrorMessage()))", "def test_check_replication_crit_null_failures(self, mock_timestamp):\n base_url = 'http://localhost:6000/recon/'\n jdata = b'{\"replication_last\": 1493299546.629282, ' \\\n b'\"replication_stats\": {\"no_change\": 0, \"rsync\": 0, ' \\\n b'\"success\": 0, \"failure\": 0, \"attempted\": 0, \"ts_repl\": 0, ' \\\n b'\"remove\": 0, \"remote_merge\": 0, \"diff_capped\": 0, ' \\\n b'\"start\": 1493299546.621624, \"hashmatch\": 0, \"diff\": 0, ' \\\n b'\"empty\": 0}, \"replication_time\": 0.0076580047607421875}'\n pmock_jdata = PropertyMock(return_value=jdata)\n mock_timestamp.return_value = (MagicMock(days=0, seconds=0), -1)\n with patch('urllib.request.urlopen') as mock_urlopen:\n mock_urlopen.return_value = MagicMock(read=pmock_jdata)\n result = check_replication(base_url, [4, 10, 4, 10])\n self.assertEqual(result,\n 3*[(STATUS_CRIT,\n \"replication failures counter is NULL \"\n \"(check syslog)\")])", "def failure_threshold(self) -> Optional[int]:\n return pulumi.get(self, \"failure_threshold\")", "def failure_threshold(self) -> Optional[int]:\n return pulumi.get(self, \"failure_threshold\")", "def test_warning(self):\n self.p.compute_termination_criteria = True\n self.set_parameter_and_step(\"max_iter\", True, 5, \"ignore\")", "def warnings_active(self) -> List[Error]:", "def test_check_replication_crit_day_plus_lag(self, mock_timestamp):\n base_url = 'http://localhost:6000/recon/'\n jdata = b'{\"replication_last\": 1493299546.629282, ' \\\n b'\"replication_stats\": {\"no_change\": 0, \"rsync\": 0, ' \\\n b'\"success\": 0, \"failure\": 0, \"attempted\": 0, \"ts_repl\": 0, ' \\\n b'\"remove\": 0, \"remote_merge\": 0, \"diff_capped\": 0, ' \\\n b'\"start\": 1493299546.621624, \"hashmatch\": 0, \"diff\": 0, ' \\\n b'\"empty\": 0}, \"replication_time\": 0.0076580047607421875}'\n pmock_jdata = PropertyMock(return_value=jdata)\n mock_timestamp.return_value = (MagicMock(days=2, seconds=5), 0)\n with patch('urllib.request.urlopen') as mock_urlopen:\n mock_urlopen.return_value = MagicMock(read=pmock_jdata)\n result = check_replication(base_url, [4, 10, 4, 10])\n self.assertEqual(result,\n [(STATUS_CRIT,\n \"'{}' replication lag is \"\n \"172805 seconds\".format(repl))\n for repl in ('account', 'object', 'container')])", "def check_threshold(count, warn, crit, logger):\n warn = int(warn)\n crit = int(crit)\n if count < warn:\n msg = (\"Normal: Resource Count={} is less than the warning={} level\".format(count, warn))\n logger.info(msg)\n print(msg)\n sys.exit(0)\n elif count >= warn and count < crit:\n msg = (\"Warning: Resource count={} has reached the warning={} level\".format(\n count, warn))\n logger.warning(msg)\n print(msg)\n sys.exit(1)\n elif count >= crit:\n msg = (\"Critical: Resource count={} has reached the critical={} level\".format(\n count, crit))\n logger.error(msg)\n print(msg)\n sys.exit(2)\n else:\n print(\"Unknown: Resource count is unknown\")\n sys.exit(3)", "def test_http_error(self):\n self.__jenkins.contents = 'raise'\n self.assertEqual(-1, self.__jenkins.nr_warnings(('job',), 'normal'))", "def _log_err_msg(self, message):\n current_time = time.time()\n if current_time - self._last_warning_time > 600:\n logging.warning(message)\n self._last_warning_time = current_time", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def failure(self, cb: CircuitBreaker, exc: BaseException) -> None:", "def warning(self, msg, *args):\n if self.lvl<=logging.WARNING: return self._log(msg, *args)", "def test_timeout_with_crud_failures(self):\n\n # Local methods to validate vb_seqno\n\n def compare_vb_stat(stat_1, stat_2, vb, comparison=\"!=\"):\n keys_to_check = [\"high_seqno\", \"high_completed_seqno\"]\n result = True\n for key in keys_to_check:\n if vb in stat_1.keys():\n if stat_1[vb][\"uuid\"] != stat_2[vb][\"uuid\"]:\n self.log_failure(\"Mismatch in vb-%s UUID. %s != %s\"\n % (vb, stat_1[vb][\"uuid\"],\n stat_2[vb][\"uuid\"]))\n if comparison == \"!=\":\n if stat_1[vb][key] != stat_2[vb][key]:\n result = False\n self.log.warning(\n \"Mismatch in vb-%s stat %s. %s != %s\"\n % (vb, key, stat_1[vb][key], stat_2[vb][key]))\n elif stat_1[vb][key] == stat_2[vb][key]:\n result = False\n self.log.warning(\"Stat not updated for vb-%s stat %s. \"\n \"%s == %s\"\n % (vb, key,\n stat_1[vb][key], stat_2[vb][key]))\n return result\n\n def validate_vb_seqno_stats():\n \"\"\"\n :return retry_validation: Boolean denoting to retry validation\n \"\"\"\n retry_validation = False\n vb_info[\"post_timeout\"][node.ip] = \\\n cbstat_obj[node.ip].vbucket_seqno(self.bucket.name)\n for tem_vb_num in range(self.cluster.vbuckets):\n tem_vb_num = str(tem_vb_num)\n if tem_vb_num not in affected_vbs:\n if compare_vb_stat(vb_info[\"init\"][node.ip],\n vb_info[\"post_timeout\"][node.ip],\n tem_vb_num) is False:\n self.log_failure(\"Unaffected vb-%s stat\" % tem_vb_num)\n elif int(tem_vb_num) in target_nodes_vbuckets[\"active\"]:\n if compare_vb_stat(vb_info[\"init\"][node.ip],\n vb_info[\"post_timeout\"][node.ip],\n tem_vb_num) is False:\n self.log.warning(\"%s - mismatch in %s vb-%s seq_no\"\n % (node.ip, \"active\", tem_vb_num))\n elif int(tem_vb_num) in target_nodes_vbuckets[\"replica\"]:\n if compare_vb_stat(vb_info[\"init\"][node.ip],\n vb_info[\"post_timeout\"][node.ip],\n tem_vb_num, comparison=\"==\") is False:\n retry_validation = True\n self.log.warning(\"%s - mismatch in %s vb-%s seq_no\"\n % (node.ip, \"replica\", tem_vb_num))\n return retry_validation\n\n shell_conn = dict()\n cbstat_obj = dict()\n error_sim = dict()\n target_nodes_vbuckets = dict()\n vb_info = dict()\n tasks = dict()\n doc_gen = dict()\n affected_vbs = list()\n\n target_nodes_vbuckets[\"active\"] = []\n target_nodes_vbuckets[\"replica\"] = []\n vb_info[\"init\"] = dict()\n vb_info[\"post_timeout\"] = dict()\n vb_info[\"afterCrud\"] = dict()\n\n # Override crud_batch_size to minimum value for testing\n self.crud_batch_size = 5\n self.key = \"test_collections\"\n self.sdk_timeout = 3\n\n # Select target vbucket type to load_docs\n target_vb_type = \"replica\"\n if self.simulate_error == CouchbaseError.STOP_PERSISTENCE \\\n and self.durability_level \\\n == Bucket.DurabilityLevel.MAJORITY_AND_PERSIST_TO_ACTIVE:\n target_vb_type = \"active\"\n\n # Create required scope/collection for successful CRUD operation\n if self.scope_name != CbServer.default_scope:\n self.scope_name = self.bucket_util.get_random_name()\n self.collection_name = self.bucket_util.get_random_name()\n self.log.info(\"Creating scope::collection %s::%s\"\n % (self.scope_name, self.collection_name))\n self.create_scope_collection()\n\n # Load docs into created collection\n self.log.info(\"Loading data into created collection\")\n load_gen = doc_generator(self.key, 0, self.num_items)\n task = self.task.async_load_gen_docs(\n self.cluster, self.bucket, load_gen, \"create\", 0,\n scope=self.scope_name,\n collection=self.collection_name,\n sdk_client_pool=self.sdk_client_pool,\n batch_size=200, process_concurrency=8,\n timeout_secs=60)\n self.task_manager.get_task_result(task)\n if self.subdoc_test:\n load_gen = sub_doc_generator(self.key, 0, self.num_items/2)\n task = self.task.async_load_gen_sub_docs(\n self.cluster, self.bucket,\n load_gen, Bucket_Op.SubDocOps.INSERT,\n timeout_secs=self.sdk_timeout,\n compression=self.sdk_compression,\n path_create=True,\n batch_size=100,\n process_concurrency=8,\n durability=self.durability_level,\n scope=self.scope_name, collection=self.collection_name,\n sdk_client_pool=self.sdk_client_pool)\n self.task_manager.get_task_result(task)\n\n self.bucket.scopes[self.scope_name].collections[\n self.collection_name].num_items = self.num_items\n\n target_nodes = DurabilityHelper.getTargetNodes(self.cluster,\n self.nodes_init,\n self.num_nodes_affected)\n for node in target_nodes:\n shell_conn[node.ip] = RemoteMachineShellConnection(node)\n cbstat_obj[node.ip] = Cbstats(node)\n target_nodes_vbuckets[\"active\"] += \\\n cbstat_obj[node.ip].vbucket_list(self.bucket.name,\n vbucket_type=\"active\")\n target_nodes_vbuckets[\"replica\"] += \\\n cbstat_obj[node.ip].vbucket_list(self.bucket.name,\n vbucket_type=\"replica\")\n vb_info[\"init\"][node.ip] = cbstat_obj[node.ip].vbucket_seqno(\n self.bucket.name)\n error_sim[node.ip] = CouchbaseError(self.log, shell_conn[node.ip])\n\n curr_time = int(time.time())\n expected_timeout = curr_time + self.sdk_timeout\n\n if target_vb_type == \"active\":\n target_vbs = list(\n set(target_nodes_vbuckets[target_vb_type])\n .difference(set(target_nodes_vbuckets[\"replica\"])))\n else:\n target_vbs = list(\n set(target_nodes_vbuckets[target_vb_type])\n .difference(set(target_nodes_vbuckets[\"active\"])))\n\n # Create required doc_generators\n doc_gen[\"create\"] = doc_generator(self.key, self.num_items,\n self.crud_batch_size,\n target_vbucket=target_vbs)\n doc_gen[\"delete\"] = doc_generator(self.key, 0,\n self.crud_batch_size,\n target_vbucket=target_vbs)\n doc_gen[\"read\"] = doc_generator(\n self.key, int(self.num_items/3),\n self.crud_batch_size,\n target_vbucket=target_vbs)\n doc_gen[\"update\"] = doc_generator(\n self.key, int(self.num_items/2),\n self.crud_batch_size,\n target_vbucket=target_vbs)\n\n # Create required subdoc generators\n doc_gen[\"insert\"] = sub_doc_generator(\n self.key, int(self.num_items/2), self.crud_batch_size,\n target_vbucket=target_vbs)\n doc_gen[\"upsert\"] = sub_doc_generator_for_edit(\n self.key, 0, self.crud_batch_size,\n template_index=1,\n target_vbucket=target_vbs)\n doc_gen[\"remove\"] = sub_doc_generator(\n self.key, 0, self.crud_batch_size,\n target_vbucket=target_vbs)\n\n # Perform specified action\n for node in target_nodes:\n error_sim[node.ip].create(self.simulate_error,\n bucket_name=self.bucket.name)\n self.sleep(5, \"Wait for error_simulation to take effect\")\n\n ops_to_perform = [Bucket_Op.DocOps.CREATE, Bucket_Op.DocOps.UPDATE,\n Bucket_Op.DocOps.READ, Bucket_Op.DocOps.DELETE]\n if self.subdoc_test:\n ops_to_perform = [Bucket_Op.SubDocOps.INSERT,\n Bucket_Op.SubDocOps.UPSERT,\n Bucket_Op.SubDocOps.REMOVE]\n\n for op_type in ops_to_perform:\n self.log.info(\"Starting doc op %s\" % op_type)\n if op_type in Bucket_Op.DOC_OPS:\n tasks[op_type] = self.task.async_load_gen_docs(\n self.cluster, self.bucket, doc_gen[op_type], op_type, 0,\n scope=self.scope_name,\n collection=self.collection_name,\n sdk_client_pool=self.sdk_client_pool,\n batch_size=1, process_concurrency=8,\n durability=self.durability_level,\n timeout_secs=self.sdk_timeout,\n suppress_error_table=True,\n print_ops_rate=False,\n skip_read_on_error=True)\n else:\n tasks[op_type] = self.task.async_load_gen_sub_docs(\n self.cluster, self.bucket, doc_gen[op_type], op_type, 0,\n scope=self.scope_name,\n collection=self.collection_name,\n sdk_client_pool=self.sdk_client_pool,\n path_create=True,\n batch_size=1, process_concurrency=8,\n durability=self.durability_level,\n timeout_secs=self.sdk_timeout,\n print_ops_rate=False)\n\n self.task.jython_task_manager.get_task_result(tasks[op_type])\n\n # Validate task failures\n if op_type == Bucket_Op.DocOps.READ:\n # Validation for read task\n if len(tasks[op_type].fail.keys()) != 0:\n self.log_failure(\"Read failed for few docs: %s\"\n % tasks[op_type].fail.keys())\n else:\n # Validation of CRUDs - Update / Create / Delete\n for doc_id, crud_result in tasks[op_type].fail.items():\n vb_num = self.bucket_util.get_vbucket_num_for_key(\n doc_id, self.cluster.vbuckets)\n if SDKException.DurabilityAmbiguousException \\\n not in str(crud_result[\"error\"]):\n self.log_failure(\n \"Invalid exception for doc %s, vb %s: %s\"\n % (doc_id, vb_num, crud_result))\n\n # Revert the specified error scenario\n for node in target_nodes:\n error_sim[node.ip].revert(self.simulate_error,\n bucket_name=self.bucket.name)\n\n # Check whether the timeout triggered properly\n if int(time.time()) < expected_timeout:\n self.log_failure(\"Timed-out before expected time\")\n\n for op_type in ops_to_perform:\n if op_type == Bucket_Op.DocOps.READ:\n continue\n while doc_gen[op_type].has_next():\n doc_id, _ = doc_gen[op_type].next()\n affected_vbs.append(\n str(self.bucket_util.get_vbucket_num_for_key(\n doc_id,\n self.cluster.vbuckets)))\n\n affected_vbs = list(set(affected_vbs))\n # Fetch latest stats and validate the seq_nos are not updated\n for node in target_nodes:\n retry_count = 0\n max_retry = 3\n while retry_count < max_retry:\n self.log.info(\"Trying to validate vbseq_no stats: %d\"\n % (retry_count+1))\n retry_count += 1\n retry_required = validate_vb_seqno_stats()\n if not retry_required:\n break\n self.sleep(5, \"Sleep for vbseq_no stats to update\")\n else:\n # This will be exited only if `break` condition is not met\n self.log_failure(\"validate_vb_seqno_stats verification failed\")\n\n self.validate_test_failure()\n\n # Get SDK Client from client_pool\n sdk_client = self.sdk_client_pool.get_client_for_bucket(\n self.bucket,\n self.scope_name,\n self.collection_name)\n\n # Doc error validation\n for op_type in ops_to_perform:\n task = tasks[op_type]\n\n if self.nodes_init == 1 \\\n and op_type != Bucket_Op.DocOps.READ \\\n and len(task.fail.keys()) != (doc_gen[op_type].end\n - doc_gen[op_type].start):\n self.log_failure(\"Failed keys %d are less than expected %d\"\n % (len(task.fail.keys()),\n (doc_gen[op_type].end\n - doc_gen[op_type].start)))\n\n # Create table objects for display\n table_view = TableView(self.log.error)\n ambiguous_table_view = TableView(self.log.info)\n table_view.set_headers([\"Key\", \"vBucket\", \"Exception\"])\n ambiguous_table_view.set_headers([\"Key\", \"vBucket\"])\n\n # Iterate failed keys for validation\n for doc_key, doc_info in task.fail.items():\n vb_for_key = self.bucket_util.get_vbucket_num_for_key(doc_key)\n\n if SDKException.DurabilityAmbiguousException \\\n not in str(doc_info[\"error\"]):\n table_view.add_row([doc_key, vb_for_key,\n doc_info[\"error\"]])\n\n ambiguous_table_view.add_row([doc_key, str(vb_for_key)])\n if op_type not in Bucket_Op.SUB_DOC_OPS:\n retry_success = \\\n self.durability_helper.retry_for_ambiguous_exception(\n sdk_client, op_type, doc_key, doc_info)\n if not retry_success:\n self.log_failure(\"%s failed in retry for %s\"\n % (op_type, doc_key))\n\n # Display the tables (if any errors)\n table_view.display(\"Unexpected exception during %s\" % op_type)\n ambiguous_table_view.display(\"D_Ambiguous exception during %s\"\n % op_type)\n\n # Release the acquired client\n self.sdk_client_pool.release_client(sdk_client)\n\n # Verify doc count after expected CRUD failure\n self.bucket_util._wait_for_stats_all_buckets(self.cluster,\n self.cluster.buckets)\n self.bucket_util.validate_docs_per_collections_all_buckets(\n self.cluster)\n\n # Fetch latest stats and validate the values are updated\n for node in target_nodes:\n vb_info[\"afterCrud\"][node.ip] = \\\n cbstat_obj[node.ip].vbucket_seqno(self.bucket.name)\n if vb_info[\"init\"][node.ip] == vb_info[\"afterCrud\"][node.ip]:\n self.log_failure(\"vBucket seq_no stats not updated\")\n\n # Disconnect the shell connection\n for node in target_nodes:\n shell_conn[node.ip].disconnect()\n\n self.validate_test_failure()", "def _log_retry_attempt(retry_state):\n logger.warning(\n 'beat: Retrying Redis connection in %s seconds...', retry_state.next_action.sleep\n )", "def test_connectionLostBackoffDelayDoubles(self):\r\n self.pm.startService()\r\n self.pm.addProcess(\"foo\", [\"foo\"])\r\n self.reactor.advance(self.pm.threshold - 1) #9s\r\n self.assertIn(\"foo\", self.pm.protocols)\r\n self.assertEqual(self.pm.delay[\"foo\"], self.pm.minRestartDelay)\r\n # process dies within the threshold and should not restart immediately\r\n self.pm.protocols[\"foo\"].processEnded(Failure(ProcessDone(0)))\r\n self.assertEqual(self.pm.delay[\"foo\"], self.pm.minRestartDelay * 2)", "def warningglobal(self, *args, **kwargs):\n return self.logger.log(logging.WARNING+1, *args, **kwargs)", "def check_journaled(con, warning, critical,perf_data):\n\n warning = warning or 20\n critical = critical or 40\n try:\n data=get_server_status(con)\n journaled = data['dur']['journaledMB'] \n message=\"Journaled : %.2f MB\" % journaled\n message+=performance_data(perf_data,[(\"%.2f\"%journaled,\"journaled\",warning, critical)])\n return check_levels(journaled,warning, critical, message)\n\n except Exception, e:\n return exit_with_general_critical(e)", "def warnings(self) -> List[Error]:" ]
[ "0.64640194", "0.6180904", "0.5985898", "0.5829283", "0.58201987", "0.57316804", "0.5684867", "0.5684867", "0.5620412", "0.56173164", "0.5589144", "0.5572592", "0.5542815", "0.5539604", "0.5533179", "0.5533179", "0.5533179", "0.5533179", "0.5533179", "0.5533179", "0.5533179", "0.5533179", "0.55253357", "0.55046594", "0.5498291", "0.54758215", "0.54637474", "0.5448467", "0.54158294", "0.54143566" ]
0.6709899
0
If there is no any published phrases in db, it would return an empty string, else it would return a phrase in popup
def render(self, context): if not self.phrase: return '' t = context.template.engine.get_template('philosophy_phrases/phrase_popup.html') return t.render(template.Context({'phrase': self.phrase}))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def showTranslatedWithoutJoin(cls):\n print (\"ALL WORDS WITH TRANSLATIONS STORED IN DATABASE:\")\n for word1 in EnglishHelper.query(\"SELECT english_word FROM EnglishWords\", fetchAll=True):\n try:\n print word1[0],\" - \", (EnglishHelper.query(\"select polish_word from PolishWords where \"\n \" id_pl=(select id_pl from translations where \"\n \"id_eng = (select id_eng from EnglishWords \"\n \"where english_word = '%s'))\"%word1))[0].encode('utf-8')\n except:\n print \"There is no translation, sorry :(\"", "def philosophy_phrase():\n phrase = get_random_phrase()\n if not phrase:\n phrase = {'author': '', 'phrase': ''}\n return phrase", "def get_random_phrase(self):\n return random.choice(self.phrases)", "def text_for_posting(self) -> str:\n return self.text or self.publication.text", "async def phrases(self, ctx):\n settings = await self.fetch_settings(ctx)\n cats, cats_remain = self.format_list(settings['cattriggers'])\n dogs, dogs_remain = self.format_list(settings['dogtriggers'])\n\n if not cats and not dogs:\n return await ctx.send(f'😿 I have no phrases to respond to on **{ctx.guild.name}**!')\n\n if ctx.channel.type == discord.ChannelType.text:\n message = f'🐱 Here are the phrases used to summon me on **{ctx.guild.name}**:\\n'\n else:\n message = f'🐱 Here are the phrases used to summon me in DMs:\\n'\n\n if cats:\n message += f'\\nCats: **{cats}**'\n if cats_remain:\n message += f' and **{cats_remain}** more...'\n if not cats and cats_remain:\n message += f'\\nCats: **{cats_remain}** phrase{\"s\" if cats_remain != 1 else \"\"} that {\"are\" if cats_remain != 1 else \"is\"} too long to fit here!'\n \n if dogs:\n message += f'\\nDogs: **{dogs}**'\n if dogs_remain:\n message += f' and **{dogs_remain}** more...'\n if not dogs and dogs_remain:\n message += f'\\nDogs: **{dogs_remain}** phrase{\"s\" if dogs_remain != 1 else \"\"} that {\"are\" if dogs_remain != 1 else \"is\"} too long to fit here!'\n \n if settings['require_mention']:\n message += f'\\n\\nYou need to @mention me for me to respond on **{ctx.guild.name}**!'\n await ctx.send(message)", "def get_PoemText(self):\n return self.text if self.text else \"No Text Yet\\n\"", "def get_text(self) -> str:\n return (\n self.raw_data.get(\"text\") or\n self.raw_data.get(\"caption\") or\n \"\"\n )", "def _compute_fulltext(self):\n return ''", "def get_valid_phrases():\n return [x[0] for x in all_topics if x[1] == \"1\"]", "def phrase_list_filler():\n return (Parse.word('we').possibly() + first_word('put write have know see') + \n Parse.word('that').possibly()).nil()", "def phrases_df_notfound_message(nounphrase):\n\n return html.H5('Noun phrases not found: {}.'.format(notfound),\n style={'color': colours['text']}\n )", "def get_custom_phrases():\n return [x[0] for x in all_topics if x[2] == \"1\"]", "def question(phrase):\n\n if phrase[-1] == '?':\n return 'Sure.'", "def _has_phrase(self, box):\n lines = box.get_lines()\n pattern = self.field.settings.pattern_builder.list_pattern(self._phrases)\n for line in lines:\n if re.search(pattern, line.text) is not None:\n return True\n return False", "def nostatement(phrase):\n\n is_printable = lambda x: x in printable\n is_whitespace = lambda x: x in whitespace\n if not any([is_printable(x) and not is_whitespace(x) for x in phrase]):\n return 'Fine. Be that way!'", "def get_abs_text(record):\n\n result = []\n\n try:\n for item in record['MedlineCitation']['Article']['Abstract']['AbstractText']:\n result.append(item)\n except KeyError:\n pass\n\n if len(result) == 0:\n return '*Abstract Unavailable*'\n else:\n return ' '.join(result)", "def get_text(self):\n try:\n return self.get_translation().text\n except MissingTranslation:\n return _(\"(No text)\")", "def phrase(self):\n return self._phrase", "def phrase(self):\n return self._phrase", "async def proo (pros):\n if not pros.text[0].isalpha() and pros.text[0] not in (\"/\", \"#\", \"@\", \"!\"):\n index = random.randint(0, len(memes.PRO_STRINGS) - 1)\n reply_text = memes.PRO_STRINGS[index]\n await pros.edit(reply_text)", "def get_random_phrase():\n return random.choices(PHRASES, WEIGHTS, k=1)[0]", "def getText(self):\r\n return \"\"", "def return_excerpt():", "def get_full_text(self, item):\n text_content = self.db.plugin_text_text(item_id=item.unique_id)\n output = self.response.render(\n 'plugin_text/full_text.txt',\n dict(text_content=text_content, item=item))\n return unicode(output.decode('utf-8'))", "def obtain_text():\n pass", "def get_text(self):\n\n if self.text: return self.text\n # retrieve from args and return if exists\n text = Settings.get_text() or None\n if text: \n self.text = text\n return text\n # prompt skip\n if not Settings.prompt(\"text\"): return None\n question = {\n 'type': 'input',\n 'name': 'text',\n 'message': 'Text:'\n }\n text = prompt(question)[\"text\"]\n # confirm text\n if not Settings.confirm(text): return self.get_text()\n self.text = text\n return self.text", "def get_text(value):\r\n if value is None:\r\n return None\r\n else:\r\n return self.runtime.service(self, \"i18n\").ugettext(value)", "def OnGetItemText(self, item, column):\r\n \r\n return \"\"", "def has_text(self, page: fitz.Page) -> bool:\n return page.get_text(clip=page.trimbox).strip() != \"\"", "def _get_plain_text(self, url, soup, site):\n print('Get plaint text: ' + url)\n title = str(soup.find(class_=self._title_tags[site]))\n content = str(soup.find(class_=self._content_tags[site]))\n # h = html2text.HTML2Text() # uncomment this segment of code\n # h.ignore_links = True # if you want to get plain text\n # h.ignore_images = True\n # title = h.handle(title)\n # content = h.handle(content)\n if title == None or content == None:\n print('Different website structure: ' + url)\n return ''\n return self._clean(title + content, no_punc=True) # with symbols\n # return title + content # without symbols" ]
[ "0.6144274", "0.5999164", "0.5982543", "0.59605277", "0.59567666", "0.5844588", "0.5810861", "0.57757527", "0.5667609", "0.55661607", "0.552775", "0.5445835", "0.5340363", "0.53307855", "0.5327084", "0.53270626", "0.5305563", "0.52846193", "0.52846193", "0.527648", "0.5274116", "0.5271754", "0.5270978", "0.5256638", "0.5255663", "0.52528286", "0.5246343", "0.5234723", "0.5220189", "0.5206711" ]
0.60088485
1
If there is no any published phrases in db, it would return the empty strings in variables, else it would return a text of the phrase and author of the phrase.
def philosophy_phrase(): phrase = get_random_phrase() if not phrase: phrase = {'author': '', 'phrase': ''} return phrase
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def text_for_posting(self) -> str:\n return self.text or self.publication.text", "def get_valid_phrases():\n return [x[0] for x in all_topics if x[1] == \"1\"]", "def phrase_list_filler():\n return (Parse.word('we').possibly() + first_word('put write have know see') + \n Parse.word('that').possibly()).nil()", "def gutenberg(self, sentences):\n titleFound = False\n authorFound = False\n i = 0\n\n while not titleFound or not authorFound:\n if len(sentences[i].parseWords()) != 0 and not titleFound and sentences[i].parseWords()[0] == 'Title:':\n titleFound = True\n title = ' '.join(sentences[i].parseWords()[1:])\n\n if len(sentences[i].parseWords()) != 0 and not authorFound and sentences[i].parseWords()[0] == 'Author:':\n authorFound = True\n author = ' '.join(sentences[i].parseWords()[1:])\n\n i += 1\n\n return title, author", "def get_custom_phrases():\n return [x[0] for x in all_topics if x[2] == \"1\"]", "async def phrases(self, ctx):\n settings = await self.fetch_settings(ctx)\n cats, cats_remain = self.format_list(settings['cattriggers'])\n dogs, dogs_remain = self.format_list(settings['dogtriggers'])\n\n if not cats and not dogs:\n return await ctx.send(f'😿 I have no phrases to respond to on **{ctx.guild.name}**!')\n\n if ctx.channel.type == discord.ChannelType.text:\n message = f'🐱 Here are the phrases used to summon me on **{ctx.guild.name}**:\\n'\n else:\n message = f'🐱 Here are the phrases used to summon me in DMs:\\n'\n\n if cats:\n message += f'\\nCats: **{cats}**'\n if cats_remain:\n message += f' and **{cats_remain}** more...'\n if not cats and cats_remain:\n message += f'\\nCats: **{cats_remain}** phrase{\"s\" if cats_remain != 1 else \"\"} that {\"are\" if cats_remain != 1 else \"is\"} too long to fit here!'\n \n if dogs:\n message += f'\\nDogs: **{dogs}**'\n if dogs_remain:\n message += f' and **{dogs_remain}** more...'\n if not dogs and dogs_remain:\n message += f'\\nDogs: **{dogs_remain}** phrase{\"s\" if dogs_remain != 1 else \"\"} that {\"are\" if dogs_remain != 1 else \"is\"} too long to fit here!'\n \n if settings['require_mention']:\n message += f'\\n\\nYou need to @mention me for me to respond on **{ctx.guild.name}**!'\n await ctx.send(message)", "def get_noun_phrases(blob):\n return blob.noun_phrases", "def get_random_phrase(self):\n return random.choice(self.phrases)", "def _compute_fulltext(self):\n return ''", "def showTranslatedWithoutJoin(cls):\n print (\"ALL WORDS WITH TRANSLATIONS STORED IN DATABASE:\")\n for word1 in EnglishHelper.query(\"SELECT english_word FROM EnglishWords\", fetchAll=True):\n try:\n print word1[0],\" - \", (EnglishHelper.query(\"select polish_word from PolishWords where \"\n \" id_pl=(select id_pl from translations where \"\n \"id_eng = (select id_eng from EnglishWords \"\n \"where english_word = '%s'))\"%word1))[0].encode('utf-8')\n except:\n print \"There is no translation, sorry :(\"", "def extract_phrases(data,model):\n phrases = []\n alignment = model.alignment_idx\n for i in range(len(data)):\n sent_phrases = phrase_extraction(data[i][\"fr\"],data[i][\"en\"],alignment[i])\n phrases.append(sent_phrases)\n return phrases", "def formatTexts(owned, shared):\n owned_texts = []\n shared_texts = []\n # Catches error if there is no score from the databse search\n try:\n for text in range(len(owned)):\n owned_texts.append(\n {'title': owned[text][0], 'body': owned[text][1], 'score': owned[text][2]})\n for text in range(len(shared)):\n shared_texts.append(\n {'title': shared[text][0], 'body': shared[text][1], 'score': shared[text][2]})\n except:\n for text in range(len(owned)):\n owned_texts.append(\n {'title': owned[text][0], 'body': owned[text][1]})\n for text in range(len(shared)):\n shared_texts.append(\n {'title': shared[text][0], 'body': shared[text][1]})\n # Adds False if the either of the text arrays are empty\n if len(owned_texts) == 0:\n owned_texts.append(False)\n if len(shared_texts) == 0:\n shared_texts.append(False)\n return owned_texts, shared_texts", "def extract_relevant(self):\n item_extraction = self.data\n my_dict = {'tweeted_time': item_extraction['created_at'],\n 'tweet_id': item_extraction['id'],\n # If the time comes when the below becomes more significant, it will be no trouble at all to make an\n # additional column for it, but delimiting it with a ` creates less clutter in the Database\n 'in_reply_to':\n \"NAME/\" + str(item_extraction['in_reply_to_screen_name']) + \"`\" +\n \"STATUSID/\" + str(item_extraction['in_reply_to_status_id_str']) + \"`\" +\n \"USERID/\" + str(item_extraction['in_reply_to_user_id_str']),\n 'lang': item_extraction['lang'],\n 'place': item_extraction['place'], 'source': item_extraction['source']}\n if item_extraction['place'] is not None:\n my_dict['place'] = item_extraction['place']['full_name']\n if 'retweeted_status' in item_extraction.keys():\n my_dict['original_author_id'] = item_extraction['retweeted_status']['user']['id']\n my_dict['original_author_handle'] = item_extraction['retweeted_status']['user']['screen_name']\n tester = item_extraction['retweeted_status']['text']\n cleaned = ' '.join(re.sub(\"(RT : )|(@[\\S]+)|(&\\S+)|(http\\S+)\", \" \", tester).split())\n removed_others = \" \".join(re.sub(\"(#\\S+)\", ' ', cleaned).split())\n final_text = ''.join(list(filter(lambda x: x.isalpha() or x is ' ', removed_others)))\n # This final text will make it a lot easier to run NLP\n final_text = final_text.strip().replace(' ', ' ').replace(' ', ' ')\n my_dict['plain_text'] = final_text.lower()\n my_dict['tweet'] = cleaned\n else:\n my_dict['original_author_id'] = item_extraction['user']['id']\n my_dict['original_author_handle'] = item_extraction['user']['screen_name']\n cleaned = ' '.join(re.sub(\"(@[\\S]+)|(&\\S+)|(http\\S+)\", \" \", item_extraction['text']).split())\n removed_others = \" \".join(re.sub(\"(#\\S+)\", ' ', cleaned).split())\n final_text = ''.join(list(filter(lambda x: x.isalpha() or x is ' ', removed_others)))\n final_text = final_text.strip().replace(' ', ' ').replace(' ', ' ')\n my_dict['plain_text'] = final_text.lower()\n my_dict['tweet'] = cleaned\n return my_dict", "def get_person_text(self, uid):\n words = \"\"\n\n query = \"\"\"\nSELECT ?overview ?researchO ?label\nWHERE\n{\n <%s> <http://vivoweb.org/ontology/core#overview> ?overview .\n <%s> <http://vivoweb.org/ontology/core#researchOverview> ?researchO .\n <%s> <http://www.w3.org/2000/01/rdf-schema#label> ?label .\n}\n \"\"\" % (uid, uid, uid)\n self.setQuery(query)\n try:\n rval = self.query()\n try:\n g = rval.convert()\n except:\n pass\n words = \"%s %s %s\" % (g['results']['bindings'][0]['overview']['value'], g['results']['bindings'][0]['researchO']['value'], g['results']['bindings'][0]['label']['value'])\n except:\n print \"Select failed: %s\" % query\n\n self.setQuery(\"\"\"\nPREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>\nPREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\nPREFIX vivo: <http://vivoweb.org/ontology/core#>\nPREFIX xsd: <http://www.w3.org/2001/XMLSchema#>\nSELECT ?name\nWHERE\n{\n ?auth vivo:relates <%s> .\n ?auth rdf:type vivo:Authorship .\n ?auth vivo:relates ?art .\n filter (?art!=<%s>) .\n ?art <http://vivoweb.org/ontology/core#dateTimeValue> ?date .\n ?date <http://vivoweb.org/ontology/core#dateTime> ?year .\n filter (?year>\"2009-01-01T00:00:00Z\"^^xsd:dateTime) .\n ?art rdfs:label ?name .\n}\nLIMIT 20\n\"\"\" % (uid, uid))\n try:\n rval = self.query()\n try:\n g = rval.convert()\n except:\n pass\n for t in g['results']['bindings']:\n words = words + \" \" + t['name']['value']\n\n except:\n print \"Select failed\"\n traceback.print_exc(file=sys.stdout)\n\n self.setQuery(\"\"\"\nPREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>\nPREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\nPREFIX vivo: <http://vivoweb.org/ontology/core#>\nPREFIX xsd: <http://www.w3.org/2001/XMLSchema#>\n\nSELECT ?name\nWHERE\n{\n ?grant vivo:relates <%s> .\n ?grant rdf:type vivo:Grant .\n ?grant <http://vivoweb.org/ontology/core#dateTimeInterval> ?date .\n ?date <http://vivoweb.org/ontology/core#end> ?end .\n ?end <http://vivoweb.org/ontology/core#dateTime> ?year .\n filter (?year>\"2009-01-01T00:00:00Z\"^^xsd:dateTime) .\n ?grant rdfs:label ?name .\n}\n\n \"\"\" % (uid))\n try:\n rval = self.query()\n try:\n g = rval.convert()\n except:\n pass\n\n for t in g['results']['bindings']:\n words = words + \" \" + t['name']['value']\n\n except:\n print \"Select failed\"\n traceback.print_exc(file=sys.stdout)\n\n\n\n\n return words", "def phrase_retrieve(self, query):\n # ------------------------------------------------------------------\n # TODO: Implement Phrase Query retrieval (ie. return the documents \n # that don't just contain the words, but contain them in the \n # correct order) You will want to use the inverted index \n # that you created in index(), and may also consider using\n # boolean_retrieve. \n # NOTE that you no longer have access to the original documents\n # in self.docs because it is now a map from doc IDs to set\n # of unique words in the original document.\n # Right now this just returns all possible documents!\n docs = []\n first_hash = self.boolean_retrieve(query) # narrows down possible documents\n\n for doc in first_hash:\n title = self.titles[doc]\n word_list = []\n \n for word in query:\n word_list.append(self.inv_index[word][title]) # list for each query word from inverted index\n\n if len(word_list) == 1:\n docs.append(doc) # only one word in query\n break\n\n is_match = bool # undefined boolean value for match or not\n\n for i in word_list[0]: # first word occurrence positions\n for j in range(1, len(query)): # next words in query\n if (i + j) in word_list[j]: # check if words in positional order for document\n is_match = True # stays true throughout range(1, len(query)) if match\n else:\n is_match = False # update match status \n break\n if is_match:\n docs.append(doc)\n break\n \n # ------------------------------------------------------------------\n return sorted(docs) # sorted doesn't actually matter", "def get_sample_text_passages(self, expression, no_passages):\n count = 0\n output = []\n phrase = nltk_tokenize.word_tokenize(expression)\n random.seed(expression)\n random_documents = self.documents.copy()\n random.shuffle(random_documents)\n\n for document in random_documents:\n if count >= no_passages:\n break\n current_document = document.get_tokenized_text()\n for index in range(len(current_document)):\n if current_document[index] == phrase[0]:\n if current_document[index:index+len(phrase)] == phrase:\n passage = \" \".join(current_document[index-20:index+len(phrase)+20])\n output.append((document.filename, passage))\n count += 1\n\n if len(output) <= no_passages:\n return output\n return output[:no_passages]", "def full_text(self):\n\n if self._full_text == []:\n for sentence in self.title():\n self._full_text.append(sentence)\n for sentence in self.abstract():\n self._full_text.append(sentence)\n for sentence in self.body():\n self._full_text.append(sentence)\n\n return self._full_text", "def get_PoemText(self):\n return self.text if self.text else \"No Text Yet\\n\"", "def lookup_pronunciations_for_phrase(words: Sequence[Text]) -> Sequence[Phrase]:\n return EnglishUtils.all_possible_phrases_for(words)", "def get_text(self) -> str:\n return (\n self.raw_data.get(\"text\") or\n self.raw_data.get(\"caption\") or\n \"\"\n )", "def get_text(data):\n return \" \".join([item[\"words\"] for item in data])", "def get_article_str(article_sents):\n article_str = \"\"\n for nlp_sent in article_sents:\n article_str += (' ' + nlp_sent.text + ' ')\n return article_str", "def get_tweet_texts(tweet_list):\n relevant_tweet_text = ''\n for text in tweet_list:\n relevant_tweet_text += ' ' + text['text']\n\n return relevant_tweet_text", "def get_user_and_text(db):\n con = lite.connect(db)\n with con:\n cur = con.cursor()\n cur.execute(\"SELECT author, GROUP_CONCAT(text, ' ') FROM Comments GROUP BY author\")\n user_text_list = cur.fetchall() \n return user_text_list", "def query_texts():\n alloweds = {'author', 'is_prose', 'language', 'title'}\n filters = {}\n for allowed in alloweds:\n grabbed = flask.request.args.get(allowed, None)\n if grabbed:\n filters[allowed] = grabbed\n before_val = flask.request.args.get('before', None)\n after_val = flask.request.args.get('after', None)\n try:\n if before_val is not None:\n before_val = int(before_val)\n if after_val is not None:\n after_val = int(after_val)\n except ValueError:\n return tv5api.errors.error(\n 400,\n message='If used, \"before\" and \"after\" must have integer values.')\n\n if before_val is not None and after_val is not None:\n results = flask.g.db.find(\n tesserae.db.entities.Text.collection,\n year_not=(before_val, after_val),\n **filters)\n elif before_val is not None and after_val is None:\n results = flask.g.db.find(\n tesserae.db.entities.Text.collection,\n # Assuming that lower limit pre-dates all texts in database\n year=(-999999999999, before_val),\n **filters)\n elif not before_val is None and after_val is not None:\n results = flask.g.db.find(\n tesserae.db.entities.Text.collection,\n # Assuming that upper limit post-dates all texts in database\n year=(after_val, 999999999999),\n **filters)\n else:\n results = flask.g.db.find(\n tesserae.db.entities.Text.collection,\n **filters)\n return flask.jsonify(texts=[fix_id(r.json_encode()) for r in results])", "def initial_sentence(external, gene, pubmed_ids, numbers):\n \n pubmed_text = format_pubmed(pubmed_ids)\n \n dnms = 0\n if len(external) > 0:\n dnms = external[\"dnms\"]\n \n # figure out the correct type for the word \"time\", e.g. 1 \"time\", 2 \"times\"\n times = \"times\"\n if len(external) > 0 and external[\"dnms\"] == 1:\n times = \"time\"\n \n initial_text = \"In other large scale exome sequencing projects de novo \" \\\n \"mutations in {} have been identified {} {}{}.\".format(gene,\\\n numbers.number_to_words(dnms), times, pubmed_text)\n \n return initial_text", "def searchphrases(query):\n query_nostopwords = removestopwords(query)\n query_lemmatized = lemmatize(query_nostopwords) #look like\n phraseids = []\n ngramids=[]\n words=query_lemmatized.split()\n query_ngram = \"select id from ngrams where lower(lemmangrams) like lower('%{}%')\".format(query_lemmatized)+\" or lower(lemmangrams) like lower('%{}%')\".format(words[0])\n for word in words[1:]:\n query_ngram=query_ngram+\" or lower(lemmangrams) like lower('%{}%')\".format(word)\n con = it.engine.execute(query_ngram)\n rows_phrase = con.fetchall()\n if rows_phrase:\n ngramids = list(set([str(i[0]) for i in rows_phrase]))\n phraseids.extend(ngramids)\n phraseids = list(set(phraseids))\n results=categorize(phraseids)\n return results", "def get_extras(self, text=None):\n if text is None:\n text = self.nltk_text(self.text)\n # Tag parts of speech\n tagged = nltk.pos_tag(text)\n # Try for composed NNP / NNPS\n is_proper_noun = False\n text = []\n proper_noun = \"\"\n for (word, tag) in tagged:\n if not is_proper_noun and (tag == 'NNP' or tag == 'NNPS'):\n # Start building a proper noun\n proper_noun = word\n # Set it true\n is_proper_noun = True\n # Add it to annotations anyway\n text.append(word)\n elif tag == 'NNP' or tag == 'NNPS':\n # Previous was proper noun. So it may be combined\n proper_noun += \" \" + word\n # Add the single word to annotations anyway\n text.append(word)\n elif is_proper_noun and tag == 'IN':\n # Add what we have by now to the text\n text.append(proper_noun)\n # Previous was proper noun. So it may be composed\n proper_noun += \" \" + word\n elif is_proper_noun:\n # Add what we have by now to the text\n text.append(proper_noun)\n # Finished with proper noun, so set it false\n is_proper_noun = False\n # Remove duplicates\n seen = {}\n result = []\n for w in text:\n if w in seen:\n continue\n seen[w] = 1\n result.append(w)\n # Eliminate common\n result = [w for w in result if w.lower() not in self.common_words and\n w.lower() not in stopwords.words('english')]\n return result", "def thesaurus(self, message):\n read_pointer = open('Thesaurus.txt')\n\n for line in read_pointer:\n split_line = line.split(':', 1)\n if split_line[0] == message:\n return split_line[1]", "def get_abs_text(record):\n\n result = []\n\n try:\n for item in record['MedlineCitation']['Article']['Abstract']['AbstractText']:\n result.append(item)\n except KeyError:\n pass\n\n if len(result) == 0:\n return '*Abstract Unavailable*'\n else:\n return ' '.join(result)" ]
[ "0.5979129", "0.5975303", "0.5940745", "0.5916747", "0.57627124", "0.5760304", "0.5680341", "0.56001073", "0.55721706", "0.55670494", "0.5489164", "0.54319537", "0.54258007", "0.54139996", "0.54103196", "0.53569394", "0.53411996", "0.53195655", "0.5317708", "0.53010523", "0.5297108", "0.5290722", "0.5267299", "0.5249168", "0.5242868", "0.52417827", "0.52401084", "0.5214539", "0.5202019", "0.51894665" ]
0.6491961
0
Optimise the material parameters for the given stoma to the open shape is attained at the given pressure.
def optimise_material_parameters(stoma_cfg): print('*' * 120) print("--> Finding optimum '{}' material parameters for the '{}' stoma...".format( stoma_cfg.material_model.label, stoma_cfg.label)) print('*' * 120) optimisation_helper = MaterialParametersOptimisationHelper(stoma_cfg=stoma_cfg) # Use full resolution mesh and a different optimiser soln = optimisation_helper.do_optimisation() return soln
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setMaterialParameters(val, ptype):\n pdict = {'ambient':'AMBIENT','diffuse':'DIFFUSE','specular':'SPECULAR',\n 'exponent':'EXPONENT'}\n dislin.matopt(val, pdict[ptype])", "def updateParameters(self):\n\n if self.params[1].value:\n if arcpy.Exists(self.params[1].value):\n try:\n min_value = arcpy.GetRasterProperties_management(self.params[1].value, \"MINIMUM\")[0]\n\n if str(self.params[8].value) != str(self.params[1].value):\n self.params[7].value = True\n self.params[8].value = str(self.params[1].value)\n else:\n self.params[7].value = False\n\n if str(min_value) == \"0\":\n if self.params[7].value == True:\n self.params[2].value = True\n self.params[3].enabled = True\n self.params[7].value = False\n else:\n self.params[2].value = False\n self.params[3].enabled = False\n\n except arcpy.ExecuteError:\n pass\n\n if self.params[2].value == True:\n self.params[3].enabled = True\n else:\n self.params[3].enabled = False", "def do_optimisation(self):\n\n print('--> Parameters for optimisation:')\n print('--> Using measurements : {}'.format(self.stoma_cfg.comparison_helper.optimisation_keys))\n print('')\n\n x0 = self.initial_guess()\n\n tol, eps = 1e-4, 0.001\n\n print('--> Using SLSQP with tol={} and eps={}'.format(tol, eps))\n\n soln = opt.minimize(fun=self.optimise_fn,\n x0=x0,\n method='SLSQP',\n tol=tol,\n options={'eps': eps})\n\n print('*' * 120)\n print('--> Optimisation procedure has finished...')\n print(soln)\n print('*' * 120)\n\n if soln.success:\n print('--> Optimisation succeeded. Result is...')\n self._set_material_parameters(soln.x)\n print('--> {}'.format(self.material_model))\n else:\n print('--> The optimisation failed!')\n\n print('*' * 120)\n\n return soln", "def _propagate_material_settings(self, bm, layer):\n state = layer.state\n\n # Shade Flags\n if not bm.use_mist:\n state.shadeFlags |= hsGMatState.kShadeNoFog # Dead in CWE\n state.shadeFlags |= hsGMatState.kShadeReallyNoFog\n\n if bm.use_shadeless:\n state.shadeFlags |= hsGMatState.kShadeWhite\n\n # Colors\n layer.ambient = utils.color(bpy.context.scene.world.ambient_color)\n layer.preshade = utils.color(bm.diffuse_color)\n layer.runtime = utils.color(bm.diffuse_color)\n layer.specular = utils.color(bm.specular_color)\n\n layer.specularPower = min(100.0, float(bm.specular_hardness))\n layer.LODBias = -1.0 # Seems to be the Plasma default\n\n if bm.emit > 0.0:\n # Use the diffuse colour as the emit, scaled by the emit amount\n # (maximum 2.0, so we'll also scale that by 0.5)\n emit_scale = bm.emit * 0.5\n layer.ambient = hsColorRGBA(bm.diffuse_color.r * emit_scale,\n bm.diffuse_color.g * emit_scale,\n bm.diffuse_color.b * emit_scale,\n 1.0)", "def main(\n f0=0.996,\n psi0=0,\n th=0,\n material=None,\n logFileName=None):\n # np.seterr(all='raise')\n np.seterr(all='ignore')\n import os\n from mk.library.mk_lib import findStressOnYS\n from mk.library.lib import gen_tempfile, calcAlphaRho\n from mk_paths import constructBC,findCorrectPath\n import mk.materials.constitutive as constitutive\n import dill\n snapshot = constitutive.Snapshot()\n # from yf2 import wrapHill48\n\n print 'material:',material, type(material).__name__\n\n if type(material).__name__=='NoneType':\n print 'given material', material\n from materials import IsoMat\n matA = IsoMat()\n matB = IsoMat()\n elif type(material).__name__=='str':\n with open(material,'rb') as fo:\n matA = dill.load(fo)\n with open(material,'rb') as fo:\n matB = dill.load(fo)\n matA.set_hrd()\n matA.set_yld()\n matB.set_hrd()\n matB.set_yld()\n else:\n raise IOError, 'Unexpected case'\n # ## Should work on here to allow\n # ## both A and B materials are described using the\n # ## same constitutive model\n # matA = material\n # matB = material\n\n rad2deg = 180./np.pi\n deg2rad = 1./rad2deg\n\n stressA_off, dum1, dum2 = constructBC(\n epsAng = th,\n f_yld = matA.f_yld,\n verbose = False)\n\n ## put the stress on the locus\n matA.update_yld(stressA_off)\n np.set_printoptions(precision=3)\n print('stressA:'+('%7.3f'*6)%(\n matA.stress[0],matA.stress[1],matA.stress[2],\n matA.stress[3],matA.stress[4],matA.stress[5]))\n print('strainA:'+('%7.3f'*6)%(\n matA.dphi[0],matA.dphi[1],matA.dphi[2],\n matA.dphi[3],matA.dphi[4],matA.dphi[5]))\n alpha,rho = calcAlphaRho(matA.stress,matA.dphi)\n print('alpha: %7.4f'%alpha)\n print('rho : %7.4f'%rho)\n\n if type(logFileName).__name__=='NoneType':\n logFileName = gen_tempfile(\n prefix='mk-f0%3.3i-th%4.4i-psi%2.2i'%(\n int(f0*1e3),int(th),int(psi0)),\n affix='log')\n logFile = open(logFileName,'w')\n\n ## integrate for each path.\n absciss = 1e3\n absciss0 = 1e3\n nind = max([len(matA.logfn),len(matB.logfn)])+3\n print('Iteration over the given psi angle')\n head = (\n '%8s'*9+ ## variables\n ('%'+'%is'%nind)*2+ ## aLogFN and bLogFN\n '%'+'%is'%(len(snapshot.logfn)+3))%(\n 'epsRD','epsTD','psi0','psif','sigRD',\n 'sigTD','sigA','T','cmpt','aLogFN','bLogFN','ssFN')\n head = '%s\\n'%head\n logFile.write(head)\n t0 = time.time()\n\n ynew, absciss, xbb= onepath(\n matA=matA,matB=matB,\n psi0=psi0*deg2rad,f0=f0,\n T=absciss,snapshot=snapshot)\n\n matA.recordCurrentStat()\n matB.recordCurrentStat()\n\n dTime = time.time() - t0\n psif1 = xbb[0]\n\n cnt = (\n '%8.3f'*8+\n '%8i'+\n ('%'+'%is'%nind)*2+\n '%'+'%is'%(len(snapshot.logfn)+3))%(\n ynew[1],ynew[2],psi0,\n psif1*rad2deg,\n matA.stress[0],matA.stress[1],\n matA.sig, ## hardening (effective stress)\n absciss,dTime,matA.logfn,matB.logfn,snapshot.logfn)\n print(cnt)\n logFile.write(cnt+'\\n')\n uet(dTime,'total time spent');print('')\n logFile.close()\n print('%s has been saved'%logFileName)\n return logFileName,dTime, matA, matB", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"eAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"eA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doVar(\"rAfb[1.0,-5.0, 5.0]\");\n self.modelBuilder.doVar(\"rA0[1.0, -5.0, 5.0]\");\n self.modelBuilder.doSet(\"POI\",\"rAfb,rA0\")\n self.modelBuilder.factory_('expr::mAfb(\"@0*@1\",eAfb,rAfb)')\n self.modelBuilder.factory_('expr::mA0(\"(@0*@1)\",eA0,rA0)')\n\n \n self.modelBuilder.factory_('expr::eAlph(\"2.0*@0/(2.0-@0)\",eA0)')\n self.modelBuilder.factory_('expr::eNorm(\"3.0/4.0/(2.0+@0)\",eAlph)')\n self.modelBuilder.factory_('expr::eRAlph(\"@0*@1\",eAlph,eNorm)')\n self.modelBuilder.factory_('expr::eRpl(\"(@0+@1)\",eNorm,eAfb)')\n self.modelBuilder.factory_('expr::eRmn(\"(@0-@1)\",eNorm,eAfb)')\n\n self.modelBuilder.factory_('expr::mAlph(\"2.0*@0/(2.0-@0)\",mA0)')\n self.modelBuilder.factory_('expr::mNorm(\"3.0/4.0/(2.0+@0)\",mAlph)')\n self.modelBuilder.factory_('expr::mRAlph(\"@0*@1\",mAlph,mNorm)')\n self.modelBuilder.factory_('expr::mRpl(\"(@0+@1)\",mNorm,mAfb)')\n self.modelBuilder.factory_('expr::mRmn(\"(@0-@1)\",mNorm,mAfb)')", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"eAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"eA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doVar(\"dAfb[0.,-0.75,0.75]\");\n self.modelBuilder.doVar(\"dA0[0.0, -1.0, 1.0]\");\n #self.modelBuilder.doSet(\"POI\",\"dAfb,dA0\")\n self.modelBuilder.doSet(\"POI\",\"dAfb\")\n self.modelBuilder.factory_('expr::mAfb(\"@0+@1\",eAfb,dAfb)')\n self.modelBuilder.factory_('expr::mA0(\"(@0+@1)\",eA0,dA0)')\n\n \n self.modelBuilder.factory_('expr::eAlph(\"2.0*@0/(2.0-@0)\",eA0)')\n self.modelBuilder.factory_('expr::eNorm(\"3.0/4.0/(2.0+@0)\",eAlph)')\n self.modelBuilder.factory_('expr::eRAlph(\"@0*@1\",eAlph,eNorm)')\n self.modelBuilder.factory_('expr::eRpl(\"(@0+@1)\",eNorm,eAfb)')\n self.modelBuilder.factory_('expr::eRmn(\"(@0-@1)\",eNorm,eAfb)')\n\n self.modelBuilder.factory_('expr::mAlph(\"2.0*@0/(2.0-@0)\",mA0)')\n self.modelBuilder.factory_('expr::mNorm(\"3.0/4.0/(2.0+@0)\",mAlph)')\n self.modelBuilder.factory_('expr::mRAlph(\"@0*@1\",mAlph,mNorm)')\n self.modelBuilder.factory_('expr::mRpl(\"(@0+@1)\",mNorm,mAfb)')\n self.modelBuilder.factory_('expr::mRmn(\"(@0-@1)\",mNorm,mAfb)')", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Afb[0.6,-0.7,0.7]\");\n self.modelBuilder.doVar(\"A0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doSet(\"POI\",\"Afb,A0\")\n\n # ss templates\n self.modelBuilder.doVar(\"R_ee_os_fakes[0.6,0.0,1.0]\");\n self.modelBuilder.doVar(\"ee16_fakes_norm[1.0, 0.01, 10.]\");\n self.modelBuilder.doVar(\"ee17_fakes_norm[1.0, 0.01, 10.]\");\n self.modelBuilder.doVar(\"ee18_fakes_norm[1.0, 0.01, 10.]\");\n #Remember, cant use spaces in these formulas!\n #self.modelBuilder.options.verbose = 10\n self.modelBuilder.factory_('expr::R_ee16_qcd_os(\"@0*@1\",ee16_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee17_qcd_os(\"@0*@1\",ee17_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee18_qcd_os(\"@0*@1\",ee18_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee16_qcd_ss(\"@0*(1.0-@1)\",ee16_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee17_qcd_ss(\"@0*(1.0-@1)\",ee17_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee18_qcd_ss(\"@0*(1.0-@1)\",ee18_fakes_norm,R_ee_os_fakes)')\n \n self.modelBuilder.factory_('expr::Alph(\"2.0*@0/(2.0-@0)\",A0)')\n self.modelBuilder.factory_('expr::Norm(\"3.0/4.0/(2.0+@0)\",Alph)')\n self.modelBuilder.factory_('expr::RAlph(\"@0*@1\",Alph,Norm)')\n self.modelBuilder.factory_('expr::Rpl(\"(@0+@1)\",Norm,Afb)')\n self.modelBuilder.factory_('expr::Rmn(\"(@0-@1)\",Norm,Afb)')", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"eAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"eA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doVar(\"mAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"mA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doSet(\"POI\",\"eAfb,mAfb\")\n\n \n self.modelBuilder.factory_('expr::eAlph(\"2.0*@0/(2.0-@0)\",eA0)')\n self.modelBuilder.factory_('expr::eNorm(\"3.0/4.0/(2.0+@0)\",eAlph)')\n self.modelBuilder.factory_('expr::eRAlph(\"@0*@1\",eAlph,eNorm)')\n self.modelBuilder.factory_('expr::eRpl(\"(@0+@1)\",eNorm,eAfb)')\n self.modelBuilder.factory_('expr::eRmn(\"(@0-@1)\",eNorm,eAfb)')\n\n self.modelBuilder.factory_('expr::mAlph(\"2.0*@0/(2.0-@0)\",mA0)')\n self.modelBuilder.factory_('expr::mNorm(\"3.0/4.0/(2.0+@0)\",mAlph)')\n self.modelBuilder.factory_('expr::mRAlph(\"@0*@1\",mAlph,mNorm)')\n self.modelBuilder.factory_('expr::mRpl(\"(@0+@1)\",mNorm,mAfb)')\n self.modelBuilder.factory_('expr::mRmn(\"(@0-@1)\",mNorm,mAfb)')", "def set_shape_params(self, params):\n self.alpha = params[0]\n self.beta = params[1]\n self.gamma = params[2]\n self.c500 = params[3]\n self.P0 = params[4]", "def ParticleFilterParams(fix_params=False):\n\n ## Particle filter parameters\n\n # Q_c will be the time continuous covariance matrix. \n #This should be the errors in the model.\n # in the form [x_cov, y_cov, z_cov, \n # vel_x_cov, vel_y_co, vel_z_cov, \n # mass_cov, \n # sigma_cov, shape_cov, brightness_cov, tau_cov]\n \n\n Q_c = [10., 2., 2., \n 150., 50., 50., \n 5., 0, 0,\n 1e-3, 1e-10, 0., 0.0001]\n\n\n print('Qc values used:', Q_c)\n\n Q_c = np.asarray([i**2 for i in Q_c])\n\n \n # Q_c_frag is used at reinitialisation if the fragmentation option is used\n \n Q_c_frag = [0., 0., 0., \n 0.02, 0.02, 0.02, \n 0.5, 0, 0,\n 2e-3, 5e-9, 0., 0.]\n\n Q_c_frag = [i**2 for i in Q_c_frag]\n\n ## P: starting uncertainty to initialise gaussian spread of particals. \n ## P2: starting uncertainty at reinitialisation if the fragmentation option is used\n ## in the form [x_cov, y_cov, z_cov, % of vel_x_cov, % of vel_y_co, % of vel_z_cov]\n P = [50., 50., 50., 250., 250., 250.]\n P2 = [50., 50., 50., 250., 250., 250.]\n\n ## Initialise state ranges\n\n\n ## shape parameter close to a rounded brick (1.8) (A for a sphere =1.21)\n A_min = 1.21\n A_max = 3.0 \n\n ## luminosity coefficient\n tau_min = 0.0001\n tau_max = 0.1\n\n ## lists of typical meteorite densities for different types. [chond, achond, stony-iron, iron, cometary]\n pm_mean = [3000, 3100, 4500, 7500, 850]\n pm_std = [420, 133, 133, 167, 117 ]\n\n ## to choose density values according to a distribution of meteorite percentages:\n particle_choices = []\n\n # this is created using lines 257-266; uncomment if needs changing.\n random_meteor_type = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 3, 3, 3, 3, 4, 4]\n\n #random_meteor_type = []\n #for i in range(80): # 80 % Chondrites\n # random_meteor_type.append(0)\n #for i in range(11): # 11 % Achondrites\n # random_meteor_type.append(1)\n #for i in range(2):\n # random_meteor_type.append(2) # 2 % Stony-Iron\n #for i in range(5):\n # random_meteor_type.append(3) # 5 % iron\n #for i in range(2):\n # random_meteor_type.append(4) # 2 % cometary\n\n ## ablation coefficeint \n #sigma_min = 0.001*1e-6\n #sigma_max = 0.5*1e-6\n\n\n #range_params = [m0_max, A_mean, A_std, pm_mean, pm_std, random_meteor_type, cd_mean, cd_std, sigma_min, sigma_max, K_min, K_max, tau_min, tau_max]\n range_params = [A_min, A_max, pm_mean, pm_std, random_meteor_type, tau_min, tau_max]\n\n if fix_params:\n \tQ_c[-4:] = [0., 0., 0., 0.]\n \tQ_c_frag[-4:] = [0., 0., 0., 0.]\n return Q_c, Q_c_frag, P, range_params", "def _set_model_parameters(self, verbose=False):\n from scipy.special import gamma\n\n z0 = self.z0\n\n # set parameters that are constants\n p_v, d_v, cs0, sigma, vout0 = (1, 2, 6.7, 0.1, 25.0)\n p_vB, d_vB, Mach0, p_M, d_M = (4, 2, 0.5, 1, 3)\n\n # calculate amplitudes that make the pdf integrate to 1\n A_v = np.log(10)*p_v/gamma(d_v/p_v)\n A_cs = np.log(10)/np.sqrt(2*np.pi)/sigma\n A_vB = np.log(10)*p_vB/gamma(d_vB/p_vB)\n A_M = np.log(10)*p_M/gamma(d_M/p_M)\n\n # store them in dictionaries\n self.cool_params = dict(A_v=A_v, p_v=p_v, d_v=d_v,\n A_cs=A_cs, cs0=cs0, sigma=sigma, vout0=vout0)\n self.hot_params = dict(A_vB=A_vB, p_vB=p_vB, d_vB=d_vB,\n A_M=A_M, Mach0=Mach0,p_M=p_M,d_M=d_M)\n # SN related parameters that set the reference values for loading factors\n self.params = dict(Esn=1.e51*au.erg, mstar=95.5*au.M_sun, vcool=200*au.km/au.s,\n Mej=10.*au.M_sun, ZSN=0.2, ZISM0=0.02)\n self.params['vej'] = np.sqrt(2.0*self.params['Esn']/self.params['Mej']).to('km/s')\n self.ref_params = dict(Mref=self.params['mstar'],\n pref=self.params['Esn']/(2*self.params['vcool']),\n Eref=self.params['Esn'],\n Zref=self.params['Mej']*self.params['ZSN'])\n\n # coefficients used in conversion from mass to other PDFs\n self.vp = (self.ref_params['pref']/self.params['mstar']).to('km/s').value\n self.vE = np.sqrt(self.ref_params['Eref']/self.params['mstar']).to('km/s').value\n self.Ze = (self.ref_params['Zref']/self.params['mstar']).cgs.value\n\n # parameters for scaling relations from Paper~I\n a = np.array(fit_alpha[z0])\n b = np.array(fit_beta[z0])\n\n self.scaling_params = dict(a=a, b=b)\n if z0 == '2H':\n self.cool_params['vout0'] = 45\n self.cool_params['cs0'] = 7.5\n elif z0 == '500':\n self.cool_params['vout0'] = 45\n self.cool_params['cs0'] = 8.5\n elif z0 == '1000':\n self.cool_params['vout0'] = 60\n self.cool_params['cs0'] = 10.0\n self.scaling_params['A'] = np.round(10.**(np.array(self.scaling_params['a'])),2)\n self.scaling_params['p'] = 1.+np.array(self.scaling_params['b'])\n self.enum=dict(M_cool=0, M_int=1, M_hot=2, M_total=3,\n p_cool=4, p_int=5, p_hot=6, p_total=7,\n E_cool=8, E_int=9, E_hot=10, E_total=11,\n Z_cool=12, Z_int=13, Z_hot=14, Z_total=15)\n\n # print parameters\n if verbose:\n self.show_parameters()", "def Optimizer(r_grasp,PAM_r, PAM_s, object_s, object_f, object_params, phi, r_max, walls, obstacles, obstacles_PAM, current_leg, n, n_p, v_max, force_max, legs, dt):\n global action_push_pull, PAM_goal, grasping_goal, object_path_planned, PAM_path_planned\n # assigning cost of changing from one leg to another based on the distance to the desired pose\n cost_ChangeLeg = 1\n dz_final = np.sqrt((object_s.x - object_f.x) ** 2 + (object_s.y - object_f.y) ** 2)\n if dz_final < 1:\n cost_ChangeLeg = 10\n elif dz_final < 2:\n cost_ChangeLeg = 20\n else:\n cost_ChangeLeg = 10\n\n # assigning weight for cost of predicted repositioning and cost of robot motion\n w_cost_reposition = 40\n w_cost_motion = 10\n\n # finding object's leg cordinates\n object_leg = find_corners(object_s.x, object_s.y, object_s.phi, object_params[7], object_params[8])\n\n # initialization (initializeing cost to infinity)\n cost = [float('inf'), float('inf'), float('inf'), float('inf')]\n cost_legchange = [0, 0, 0, 0]\n cost_PAM = [[0, 0],[0, 0],[0, 0],[0, 0]]\n cost_manipulation = [0, 0, 0, 0]\n cost_motion = [0, 0, 0, 0]\n force = [0, 0, 0, 0]\n path = [[[], []], [[], []], [[], []], [[], []]]\n planned_path_w = [[],[],[],[]]\n PAM_g = [[[0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0]]]\n command = [[], [], [], []]\n des = [[], [], [], [], []]\n PAM_goal = state()\n\n # find the nominal trajectory for manipulation\n theta = nominal_traj([object_s.x,object_s.y,object_s.phi], [object_f.x,object_f.y,object_f.phi], v_max, walls, obstacles, n, dt)\n\n # itterate through each leg to find the leg with minimum cost\n for leg in range(4):\n phi_linear = theta\n psi_linear = [theta[k] + phi[leg] for k in range(len(theta))]\n \t# find the cost and required force for manipulation for the leg\n force[leg], cost_manipulation[leg], planned_path_w[leg], command[leg], des= OptTraj([object_s.x, object_s.y, object_s.phi, object_s.xdot, object_s.ydot, object_s.phidot], [object_f.x, object_f.y, object_f.phi, object_f.xdot, object_f.ydot, object_f.phidot], v_max, walls, obstacles, object_params[0:4], object_params[4:7], phi_linear, psi_linear, force_max, r_max[leg], n, dt, object_leg[leg])\n \t# adding cost of changing leg\n if leg != current_leg:\n cost_legchange[leg] = cost_ChangeLeg\n # adding cost of PAM motion to PAM goal pose\n phi0 = np.arctan2(object_leg[leg][1]-object_s.y,object_leg[leg][0]-object_s.x)\n # finding the better option between pulling and pushing for each leg, with the same manipulation plan\n for push_pull in [0,1]:\n PAM_g[leg][push_pull] = [r_grasp * np.cos(phi0) * np.sign(push_pull * 2 - 1) + object_leg[leg][0], r_grasp * np.sin(phi0) * np.sign(push_pull * 2 - 1) + object_leg[leg][1], np.pi * push_pull + phi0]\n cost_PAM[leg][push_pull], path[leg][push_pull], command_pam, goal_orientation = OptPath([PAM_s.x, PAM_s.y, PAM_s.phi], PAM_g[leg][push_pull], walls, obstacles_PAM, n_p, dt)\n if cost_PAM[leg][push_pull]!= float(\"inf\"):\n PAM_s_sim = copy.deepcopy(PAM_s)\n PAM_s_sim.x, PAM_s_sim.y, PAM_s_sim.phi = [PAM_r * np.cos(phi0) * np.sign(push_pull * 2 - 1) + object_leg[leg][0], PAM_r * np.sin(phi0) * np.sign(push_pull * 2 - 1) + object_leg[leg][1], np.pi * push_pull + phi0]\n # adding cost of predicted re-positionings\n n_transition = traj_simulation(copy.deepcopy(PAM_s_sim), copy.deepcopy(object_s), force[leg], legs, leg, command[leg])\n # print(n_transition)\n cost_PAM[leg][push_pull] += w_cost_reposition*n_transition\n cost_motion[leg] += min(cost_PAM[leg])*w_cost_motion\n action_push_pull[leg] = np.argmin(cost_PAM[leg])\n else:\n phi0 = np.arctan2(force[leg][0][1], force[leg][0][0])\n for push_pull in [0,1]:\n PAM_g[leg][push_pull] = [r_grasp * np.cos(phi0) * np.sign(push_pull * 2 - 1) + object_leg[leg][0], r_grasp * np.sin(phi0) * np.sign(push_pull * 2 - 1) + object_leg[leg][1], np.pi * push_pull + phi0]\n cost = [cost_legchange[leg] + cost_motion[leg] + cost_manipulation[leg] for leg in range(4)]\n\n if min(cost) < float(\"inf\"):\n \t[min_index, min_value] = [np.argmin(cost), min(cost)]\n \t# Finding the grasping goal pose based on the selected plan\n \tphi0 = np.arctan2(object_leg[min_index][1]-object_s.y,object_leg[min_index][0]-object_s.x)\n \tgrasping_goal = [PAM_r * np.cos(phi0) * np.sign(action_push_pull[min_index] * 2 - 1) + object_leg[min_index][0], PAM_r * np.sin(phi0) * np.sign(action_push_pull[min_index] * 2 - 1) + object_leg[min_index][1], np.pi * action_push_pull[min_index] + phi0]\n \tPAM_goal = state()\n \tPAM_goal.x, PAM_goal.y, PAM_goal.phi = PAM_g[min_index][action_push_pull[min_index]]\n \tobject_path_planned = Path()\n \tobject_path_planned.header.frame_id = 'frame_0'\n \tfor i in range(len(planned_path_w[min_index])):\n \t\tpose = PoseStamped()\n \t\tpose.pose.position.x = planned_path_w[min_index][i][0]\n \t\tpose.pose.position.y = planned_path_w[min_index][i][1]\n \t\tpose.pose.position.z = 0\n \t\tobject_path_planned.poses.append(pose)\n\n \tPAM_path_planned = Path()\n \tPAM_path_planned.header.frame_id = 'frame_0'\n \tif min_index != current_leg:\n \t\tfor i in range(len(path[min_index][action_push_pull[min_index]])):\n \t\t\tpose = PoseStamped()\n \t\t\tpose.pose.position.x, pose.pose.position.y, pose.pose.orientation.z =path[min_index][action_push_pull[min_index]][i]\n \t\t\tPAM_path_planned.poses.append(pose)\n else:\n \tmin_index = 5\n \tmin_value = float(\"inf\")\n if 0 < min_index and min_index <= 4:\n force_d = force[min_index][0]\n else:\n force_d = [0,0,0]\n\n return cost ,min_index, force_d, PAM_goal, grasping_goal, object_path_planned, PAM_path_planned", "def __SetSFParams(self):\n\n # If radial structure functions are in output\n if self.__containsRadial:\n # Defines radial attributes\n self.__nc_RSoft_O.radial_error_tolerance = self.etol_radial\n\n # Defines radial dimensions\n self.__nc_RSoft_O.createDimension('radial_structure_functions',\\\n len(self.mus))\n\n # Defines radial variables\n mus_var_id_O = self.__nc_RSoft_O.createVariable('mus', \\\n 'f4', ('radial_structure_functions'))\n Ls_var_id_O = self.__nc_RSoft_O.createVariable('Ls', \\\n 'f4', ('radial_structure_functions'))\n radial_Xs_var_id_O = self.__nc_RSoft_O.createVariable(\\\n 'radial_Xs', 'i4', ('radial_structure_functions'))\n radial_Ys_var_id_O = self.__nc_RSoft_O.createVariable(\\\n 'radial_Ys', 'i4', ('radial_structure_functions'))\n\n # Sets radial structure function variables\n mus_var_id_O[:] = self.mus\n Ls_var_id_O[:] = self.Ls\n radial_Xs_var_id_O[:] = self.radial_Xs\n radial_Ys_var_id_O[:] = self.radial_Ys\n\n # If angular structure functions are in output\n if self.__containsAngular:\n # Defines angular attributes\n self.__nc_RSoft_O.angular_error_tolerance = self.etol_angular\n\n # Defines angular dimensions\n self.__nc_RSoft_O.createDimension('angular_structure_functions',\\\n len(self.xis))\n\n # Defines angular variables\n xis_var_id_O = self.__nc_RSoft_O.createVariable('xis', \\\n 'f4', ('angular_structure_functions'))\n zetas_var_id_O = self.__nc_RSoft_O.createVariable('zetas', \\\n 'i4', ('angular_structure_functions'))\n lambdas_var_id_O = self.__nc_RSoft_O.createVariable('lambdas', \\\n 'i4', ('angular_structure_functions'))\n angular_Xs_var_id_O = self.__nc_RSoft_O.createVariable(\\\n 'angular_Xs', 'i4', ('angular_structure_functions'))\n angular_Ys_var_id_O = self.__nc_RSoft_O.createVariable(\\\n 'angular_Ys', 'i4', ('angular_structure_functions'))\n angular_Zs_var_id_O = self.__nc_RSoft_O.createVariable(\\\n 'angular_Zs', 'i4', ('angular_structure_functions')) \n\n # Sets angular structure function variables\n xis_var_id_O[:] = self.xis\n zetas_var_id_O[:] = self.zetas\n lambdas_var_id_O[:] = self.lambdas\n angular_Xs_var_id_O[:] = self.angular_Xs\n angular_Ys_var_id_O[:] = self.angular_Ys\n angular_Zs_var_id_O[:] = self.angular_Zs", "def doParametersOfInterest(self):\n self.modelBuilder.doVar(\"kappa_W[1,0.0,2.0]\") \n self.modelBuilder.doVar(\"kappa_Z[1,0.0,2.0]\") \n self.modelBuilder.doVar(\"kappa_tau[1,0.0,3.0]\")\n self.modelBuilder.doVar(\"kappa_mu[1,0.0,5.0]\") \n self.modelBuilder.factory_(\"expr::kappa_mu_expr(\\\"@0*@1+(1-@0)*@2\\\", CMS_use_kmu[0], kappa_mu, kappa_tau)\")\n self.modelBuilder.doVar(\"kappa_t[1,0.0,4.0]\")\n # additional kappa for the anomalous coupling\n self.modelBuilder.doVar(\"kappa_tilde_t[0.0,0.0,4.0]\")\n self.modelBuilder.doVar(\"kappa_b[1,0.0,3.0]\")\n if not self.resolved:\n self.modelBuilder.doVar(\"kappa_g[1,0.0,2.0]\")\n self.modelBuilder.doVar(\"kappa_gam[1,0.0,2.5]\")\n\tself.modelBuilder.doVar(\"BRinv[0,0,1]\")\n self.modelBuilder.out.var(\"BRinv\").setConstant(True)\n # adding additional kappa to list of parameters of interest\n pois = 'kappa_W,kappa_Z,kappa_tau,kappa_t,kappa_tilde_t,kappa_b'\n if not self.resolved:\n pois += ',kappa_g,kappa_gam'\n self.doMH()\n self.modelBuilder.doSet(\"POI\",pois)\n # use modified Higgs Builder\n self.SMH = AnomalousTopHiggsBuilder(self.modelBuilder)\n self.setup()", "def doParametersOfInterest(self):\r\n if self.fg4fixed:\r\n self.modelBuilder.doVar(\"CMS_zz4l_fg4[0]\")\r\n self.modelBuilder.doVar(\"r[1,0,4]\")\r\n print \"Fixing CMS_zz4l_fg4\"\r\n poi = \"r\"\r\n else:\r\n if self.modelBuilder.out.var(\"CMS_zz4l_fg4\"):\r\n print \"have fg4 inside\"\r\n else:\r\n self.modelBuilder.doVar(\"CMS_zz4l_fg4[0.,-1,1]\")\r\n poi = \"CMS_zz4l_fg4\"\r\n if self.cPOI:\r\n if self.modelBuilder.out.var(\"cww_zz\"):\r\n print \"have czz_ww inside\"\r\n else:\r\n self.modelBuilder.doVar(\"cww_zz[0.5,-10,10]\")\r\n poi += \",cww_zz\"\r\n\r\n if self.fg2POI:\r\n if self.modelBuilder.out.var(\"CMS_zz4l_fg2\"):\r\n print \"have fg2 inside\"\r\n else:\r\n self.modelBuilder.doVar(\"CMS_zz4l_fg2[0.,0,1]\")\r\n poi += \",CMS_zz4l_fg2\"\r\n if self.muFloating:\r\n self.modelBuilder.doVar(\"r[1,0,2000]\")\r\n if self.muAsPOI:\r\n print \"Treating r as a POI\"\r\n poi += \",r\"\r\n else:\r\n self.modelBuilder.out.var(\"r\").setAttribute(\"flatParam\")\r\n if self.phiFloating:\r\n if self.modelBuilder.out.var(\"CMS_zz4l_fg4phi\"):\r\n print \"have fg4phi inside\"\r\n else: \r\n self.modelBuilder.doVar(\"CMS_zz4l_fg4phi[0.,-3.1415926,3.1415926]\")\r\n if self.phiPOI:\r\n poi += \",CMS_zz4l_fg4phi\"\r\n else:\r\n self.modelBuilder.out.var(\"CMS_zz4l_fg4phi\").setAttribute(\"flatParam\")\r\n if self.phi2Floating:\r\n #self.modelBuilder.doVar(\"CMS_zz4l_fg4phi[0.,-math.pi,math.pi]\")\r\n if self.modelBuilder.out.var(\"CMS_zz4l_fg2phi\"):\r\n print \"have fg2phi inside\"\r\n else: \r\n self.modelBuilder.doVar(\"CMS_zz4l_fg2phi[0.,-3.1415926,3.1415926]\")\r\n self.modelBuilder.out.var(\"CMS_zz4l_fg2phi\").setAttribute(\"flatParam\")\r\n \r\n self.modelBuilder.doSet(\"POI\",poi)", "def doParametersOfInterest(self):\n \n self.modelBuilder.doVar('expr::cosW(\"0.87681811112\",)')\n self.modelBuilder.doVar('expr::sinW(\"0.48082221247\",)')\n self.modelBuilder.doVar('expr::mZ(\"91.2\",)')\n self.modelBuilder.doVar('expr::Lambda1(\"100.0\",)')\n self.modelBuilder.doVar('expr::e2(\"0.0917\",)')\n self.modelBuilder.doVar('expr::gs2(\"1.533\",)')\n\n # EFT Higgs basis couplings\n\n self.modelBuilder.doVar('cZ[0,-1,1]') \n self.modelBuilder.doVar(\"cZZ[0,-2,2]\") \n self.modelBuilder.doVar(\"cZZt[0,-2,2]\") \n self.modelBuilder.doVar(\"cZB[0,-6,6]\") \n\n poi='cZ,cZZ,cZZt,cZB'\n\n # Amplitude couplings from EFT couplings \n\n self.modelBuilder.doVar('expr::a1(\"@0+1\",cZ)') # (\"2*(@0+1)\",cZ) in AN/Paper but a1 = 1 for signal model and width calculation\n self.modelBuilder.doVar('expr::a2(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZ,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::a3(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZt,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::k1(\"@0*(@1*pow(@2,2)/(pow(@3,2)*pow(@4,2)))\",cZB,e2,Lambda1,sinW,mZ)')\n self.modelBuilder.doVar('expr::k1L1(\"@0/pow(@1,2)\",k1,Lambda1)')\n\n ###### gamma_H ########\n\n # SMEFT relationships for VV couplings (Expressed using amplitude couplings)\n\n self.modelBuilder.doVar('expr::kappa(\"1.0\",)')\n self.modelBuilder.doVar('expr::kappa_tilde(\"0.0\",)') \n\n self.modelBuilder.doVar('expr::a1_WW(\"@0\",a1)')\n self.modelBuilder.doVar('expr::a2_WW(\"@0*@0*@1\",cosW,a2)')\n self.modelBuilder.doVar('expr::a3_WW(\"@0*@0*@1\",cosW,a3)')\n self.modelBuilder.doVar('expr::k1_WW(\"(@2 / (@0*@0 - @1*@1) - 2*@1*@1*@3*@4*@4 /(@5*@5*(@0*@0 - @1*@1)))\",cosW,sinW,k1,a2,Lambda1,mZ)')\n self.modelBuilder.doVar('expr::k2_k1(\"2*@0*@1*@2/(@0*@0 - @1*@1)\",cosW,sinW,k1)')\n self.modelBuilder.doVar('expr::k2_a2(\"-2*@0*@1*@3*@4*@4/((@2*@2)*(@0*@0 - @1*@1))\",cosW,sinW,mZ,a2,Lambda1)')\n self.modelBuilder.doVar('expr::k2(\"@0 + @1\",k2_k1,k2_a2)')\n\n # Determine gamma_H from VV couplings\n\n zz_expr = '\"4*(@0*@0/4. + 0.1695*@3*@3 + 0.09076*@1*@1 + 0.03809*@2*@2 + 0.8095*@0*@3/2. + 0.5046*@0*@1/2. + 0.2092*@1*@3 + 0.1023*@4*@4 + 0.1901*@0*@4/2. + 0.07429*@3*@4 + 0.04710*@1*@4) \",a1,a2,a3,k1,k2'\n ww_expr = '\"4*(@0*@0/4. + 0.1320*@3*@3 + 0.1944*@1*@1 + 0.08075*@2*@2 + 0.7204*@0*@3/2. + 0.7437*@0*@1/2. + 0.2774*@3*@1) \",a1_WW,a2_WW,a3_WW,k1_WW'\n zgamma_expr = '\"4*(1.118600*@0*@0/4. +0.0035*@1*@1 - 0.125010*@0*@1/2. + 0.000003*@1*@1 - 0.00018*@1*@1 + 0.003100*@0*@1/2. +0.00126*@2*@2 + 0.000005*@2*@2 -0.00047*@2*@2)\",a1_WW,kappa,kappa_tilde'\n gg_expr = '\"(1.1068*@0*@0 + 0.0082*@0*@0 - 0.1150*@0*@0 + 2.5717*@1*@1 + 0.0091*@1*@1 - 0.1982*@1*@1)\",kappa,kappa_tilde'\n bb_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n cc_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n tautau_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n mumu_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n gmgm_expr = '\"4*(1.6054*@0*@0/4. + 0.07312*@1*@1 - 0.6854*@0*@1/2. + 0.00002*@1*@1 - 0.0018*@1*@1 + 0.0085*@0*@1/2. + 0.1699*@2*@2 + 0.00002*@2*@2 - 0.0031*@2*@2)\",a1_WW,kappa,kappa_tilde'\n \n self.modelBuilder.doVar('expr::R_WW('+str(ww_expr)+')')\n self.modelBuilder.doVar('expr::R_ZZ('+str(zz_expr)+')')\n self.modelBuilder.doVar('expr::R_Zgamma('+str(zgamma_expr)+')')\n self.modelBuilder.doVar('expr::R_gg('+str(gg_expr)+')')\n self.modelBuilder.doVar('expr::R_bb('+str(bb_expr)+')')\n self.modelBuilder.doVar('expr::R_cc('+str(cc_expr)+')')\n self.modelBuilder.doVar('expr::R_tautau('+str(tautau_expr)+')')\n self.modelBuilder.doVar('expr::R_mumu('+str(mumu_expr)+')')\n self.modelBuilder.doVar('expr:R_gammagamma('+str(gmgm_expr)+')')\n\n self.modelBuilder.doVar('expr::gammaH(\"(0.5824*@0 + 0.2137*@1 + 0.08187*@2 + 0.06272*@3 + 0.02891*@4 + 0.02619*@5 + 0.002270*@6 + 0.001533*@7 + 0.0002176*@8 )/0.9998\",R_bb,R_WW,R_gg,R_tautau,R_cc,R_ZZ,R_gammagamma,R_Zgamma,R_mumu)') \n\n ###########################\n\n self.g1V = GetCoupTerms(1,1,1,-0.0001,\"1V\") # Compensate for scaling of k1 templates \n self.g2V = GetCoupTerms(1,1,1,-0.0001,\"2V\") \n \n self.modelBuilder.doVar(\"expr::g2V_1(\\\"\"+str(self.g2V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1(\\\"((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1_Neg(\\\"-1*((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.doVar(\"expr::g2V_2(\\\"\"+str(self.g2V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2(\\\"((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2_Neg(\\\"-1*((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.doVar(\"expr::g2V_3(\\\"\"+str(self.g2V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3(\\\"((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3_Neg(\\\"-1*((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.doVar(\"expr::g2V_4(\\\"\"+str(self.g2V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4(\\\"((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4_Neg(\\\"-1*((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.doVar(\"expr::g2V_5(\\\"\"+str(self.g2V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5(\\\"((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5_Neg(\\\"-1*((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.doVar(\"expr::g2V_6(\\\"\"+str(self.g2V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6(\\\"((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6_Neg(\\\"-1*((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.doVar(\"expr::g2V_7(\\\"\"+str(self.g2V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7(\\\"((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7_Neg(\\\"-1*((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.doVar(\"expr::g2V_8(\\\"\"+str(self.g2V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8(\\\"((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8_Neg(\\\"-1*((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.doVar(\"expr::g2V_9(\\\"\"+str(self.g2V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9(\\\"((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9_Neg(\\\"-1*((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.doVar(\"expr::g2V_10(\\\"\"+str(self.g2V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10(\\\"((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10_Neg(\\\"-1*((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.doVar(\"expr::g2V_11(\\\"\"+str(self.g2V[10])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11(\\\"((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11_Neg(\\\"-1*((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.doVar(\"expr::g2V_12(\\\"\"+str(self.g2V[11])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12(\\\"((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12_Neg(\\\"-1*((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.doVar(\"expr::g2V_13(\\\"\"+str(self.g2V[12])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13(\\\"((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13_Neg(\\\"-1*((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.doVar(\"expr::g2V_14(\\\"\"+str(self.g2V[13])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14(\\\"((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14_Neg(\\\"-1*((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.doVar(\"expr::g2V_15(\\\"\"+str(self.g2V[14])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15(\\\"((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15_Neg(\\\"-1*((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.doVar(\"expr::g2V_16(\\\"\"+str(self.g2V[15])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16(\\\"((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16_Neg(\\\"-1*((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.doVar(\"expr::g2V_17(\\\"\"+str(self.g2V[16])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17(\\\"((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17_Neg(\\\"-1*((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.doVar(\"expr::g2V_18(\\\"\"+str(self.g2V[17])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18(\\\"((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18_Neg(\\\"-1*((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.doVar(\"expr::g2V_19(\\\"\"+str(self.g2V[18])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19(\\\"((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19_Neg(\\\"-1*((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.doVar(\"expr::g2V_20(\\\"\"+str(self.g2V[19])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20(\\\"((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20_Neg(\\\"-1*((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.doVar(\"expr::g2V_21(\\\"\"+str(self.g2V[20])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21(\\\"((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21_Neg(\\\"-1*((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.doVar(\"expr::g2V_22(\\\"\"+str(self.g2V[21])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22(\\\"((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22_Neg(\\\"-1*((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.doVar(\"expr::g2V_23(\\\"\"+str(self.g2V[22])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23(\\\"((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23_Neg(\\\"-1*((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.doVar(\"expr::g2V_24(\\\"\"+str(self.g2V[23])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24(\\\"((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24_Neg(\\\"-1*((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.doVar(\"expr::g2V_25(\\\"\"+str(self.g2V[24])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25(\\\"((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25_Neg(\\\"-1*((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.doVar(\"expr::g2V_26(\\\"\"+str(self.g2V[25])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26(\\\"((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26_Neg(\\\"-1*((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.doVar(\"expr::g2V_27(\\\"\"+str(self.g2V[26])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27(\\\"((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27_Neg(\\\"-1*((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.doVar(\"expr::g2V_28(\\\"\"+str(self.g2V[27])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28(\\\"((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28_Neg(\\\"-1*((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.doVar(\"expr::g2V_29(\\\"\"+str(self.g2V[28])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29(\\\"((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29_Neg(\\\"-1*((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.doVar(\"expr::g2V_30(\\\"\"+str(self.g2V[29])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30(\\\"((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30_Neg(\\\"-1*((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.doVar(\"expr::g2V_31(\\\"\"+str(self.g2V[30])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31(\\\"((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31_Neg(\\\"-1*((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.doVar(\"expr::g2V_32(\\\"\"+str(self.g2V[31])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32(\\\"((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32_Neg(\\\"-1*((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.doVar(\"expr::g2V_33(\\\"\"+str(self.g2V[32])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33(\\\"((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33_Neg(\\\"-1*((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.doVar(\"expr::g2V_34(\\\"\"+str(self.g2V[33])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34(\\\"((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34_Neg(\\\"-1*((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.doVar(\"expr::g2V_35(\\\"\"+str(self.g2V[34])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35(\\\"((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35_Neg(\\\"-1*((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n \n self.modelBuilder.doVar(\"expr::g1V_1(\\\"\"+str(self.g1V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1(\\\"((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1_Neg(\\\"-1*((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.doVar(\"expr::g1V_2(\\\"\"+str(self.g1V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2(\\\"((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2_Neg(\\\"-1*((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.doVar(\"expr::g1V_3(\\\"\"+str(self.g1V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3(\\\"((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3_Neg(\\\"-1*((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.doVar(\"expr::g1V_4(\\\"\"+str(self.g1V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4(\\\"((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4_Neg(\\\"-1*((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.doVar(\"expr::g1V_5(\\\"\"+str(self.g1V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5(\\\"((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5_Neg(\\\"-1*((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.doVar(\"expr::g1V_6(\\\"\"+str(self.g1V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6(\\\"((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6_Neg(\\\"-1*((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.doVar(\"expr::g1V_7(\\\"\"+str(self.g1V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7(\\\"((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7_Neg(\\\"-1*((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.doVar(\"expr::g1V_8(\\\"\"+str(self.g1V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8(\\\"((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8_Neg(\\\"-1*((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.doVar(\"expr::g1V_9(\\\"\"+str(self.g1V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9(\\\"((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9_Neg(\\\"-1*((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.doVar(\"expr::g1V_10(\\\"\"+str(self.g1V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10(\\\"((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10_Neg(\\\"-1*((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n \n self.modelBuilder.doSet(\"POI\",poi)", "def optimize_parameters(self):\n pass", "def optimize_parameters(self):\n pass", "def optimize_parameters(self):\n pass", "def set_stress(self, stress=None):\n self.status()\n if not stress:\n if self.__cod == 'vasp': \n #getData = VASP()\n getData = vasp.Stress()\n outfile = 'vasprun.xml'\n elif self.__cod == 'espresso':\n getData = espresso.Stress()\n outfile = 'espresso.out'\n for atoms in self.__structures.items():\n \n if not atoms[1].status:\n atoms[1].stress = np.zeros((3,3))\n continue\n #getData.set_outfile('%s/%s/'%atoms[0] + outfile)\n #getData.set_gsEnergy()\n getData.set_fname(self.__workdir + '%s/'%atoms[1].path.lstrip('.') + outfile)\n getData.set_stress()\n #atoms[1].gsenergy = getData.get_gsEnergy()\n atoms[1].stress = getData.get_stress()\n \n self.__stress = stress", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Afb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"A0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doSet(\"POI\",\"Afb,A0\")\n\n \n self.modelBuilder.factory_('expr::Alph(\"2.0*@0/(2.0-@0)\",A0)')\n self.modelBuilder.factory_('expr::Norm(\"3.0/4.0/(2.0+@0)\",Alph)')\n self.modelBuilder.factory_('expr::RAlph(\"@0*@1\",Alph,Norm)')\n self.modelBuilder.factory_('expr::Rpl(\"(@0+@1)\",Norm,Afb)')\n self.modelBuilder.factory_('expr::Rmn(\"(@0-@1)\",Norm,Afb)')", "def update_pressure(self):\n m_multipliers = np.ones(self.mesh.get_number_of_cells())\n\n\n rhs_current = np.zeros(self.mfd.get_number_of_dof()) \n rhs_current += self.rhs_mfd\n\n\n for cell_index in range(self.mesh.get_number_of_cells()):\n density = -self.ref_pressure\n density += self.current_pressure[cell_index]\n density *= self.compressibility\n density += 1.\n density *= self.ref_density\n\n # We multiply by the inverse of \\frac{\\rho}{\\mu}\n m_multipliers[cell_index] = self.viscosity/density\n\n c_entry = self.compressibility\n c_entry *= self.porosities[cell_index]\n c_entry /= self.delta_t\n c_entry *= self.mesh.get_cell_volume(cell_index)\n\n rhs_current[self.mesh.get_number_of_faces()+\n cell_index] += c_entry*self.current_pressure[cell_index]\n\n self.lhs_coo.data[self.c_start+cell_index] = c_entry\n\n for [index, cell_index] in enumerate(self.rate_wells):\n rhs_current[self.mesh.get_number_of_faces()+cell_index] += \\\n self.rate_wells_rate[index]\n\n self.mfd.update_m(self.lhs_coo.data[:self.m_x_coo_length], m_multipliers)\n\n solution = dsolve.spsolve(self.lhs_coo.tocsr(), rhs_current)\n self.prev_pressure = self.current_pressure\n self.current_pressure = solution[self.mesh.get_number_of_faces():]\n self.current_velocity = solution[:self.mesh.get_number_of_faces()]", "def set_stress(dat):\n # stress parameters\n rho = 2700.\n xgrad = 0.61\n ygrad = (0.61 + 1) / 2\n dat.strs.on()\n dat.strs.bodyforce = False\n dat.add(fdata.fmacro('stressboun', zone='XMIN',\n param=(('direction', 1), ('value', 0))))\n dat.add(fdata.fmacro('stressboun', zone='YMIN',\n param=(('direction', 2), ('value', 0))))\n dat.add(fdata.fmacro('stressboun', zone='ZMIN',\n param=(('direction', 3), ('value', 0))))\n dat.add(fdata.fmacro('stressboun', zone='XMAX',\n param=(('direction', 1), ('value', 0))))\n dat.add(fdata.fmacro('stressboun', zone='YMAX',\n param=(('direction', 2), ('value', 0))))\n dat.zone[0].poissons_ratio = 0.2\n dat.zone[0].thermal_expansion = 3.5e-5\n dat.zone[0].pressure_coupling = 1.\n # Model starts at surface, so no overburden (specified as zgrad when\n # 'calculate_vertical' set to True)\n dat.incon.stressgrad(xgrad=xgrad, ygrad=ygrad, zgrad=0,\n calculate_vertical=True, vertical_fraction=True)\n return dat", "def minmass_v03_change(raw_image, old_minmass, preprocess=True,\n invert=False, noise_size=1, smoothing_size=None,\n threshold=None):\n if preprocess and smoothing_size is None:\n raise ValueError('Please specify the smoothing size. By default, this '\n 'equals diameter.')\n\n if np.issubdtype(raw_image.dtype, np.integer):\n dtype = raw_image.dtype\n if invert:\n raw_image = raw_image ^ np.iinfo(dtype).max\n else:\n dtype = np.uint8\n if invert:\n raw_image = 1 - raw_image\n\n if preprocess:\n image = bandpass(raw_image, noise_size, smoothing_size, threshold)\n else:\n image = raw_image\n\n scale_factor = scalefactor_to_gamut(image, dtype)\n\n return old_minmass / scale_factor", "def setup_fitting_init_pars(inparam, night, band, masterbeam, order):\n\n # Determine whether IGRINS mounting was loose or\n # the night of interest is in question\n if (int(night) < 20180401) or (int(night) > 20190531):\n IPpars = inparam.ips_tightmount_pars[band][masterbeam][order]\n else:\n IPpars = inparam.ips_loosemount_pars[band][masterbeam][order]\n\n # start at bucket loc = 1250 +- 100, width = 250 +- 100,\n # depth = 100 +- 5000 but floor at 0\n centerloc = 1250 if band == 'H' else 1180\n\n # Initialize parameter array for optimization as well as half-range values\n # for each parameter during the various steps of the optimization.\n # Many of the parameters initialized here will be changed throughout the\n # code before optimization and in between optimization steps.\n\n parA0 = np.array([\n 0.0, # 0: The shift of the stellar template (km/s)\n 0.0, # 1: The scale factor for the stellar template\n 0.0, # 2: The shift of the telluric template (km/s)\n 1.0, # 3: The scale factor for the telluric template\n 0.0, # 4: vsini (km/s)\n IPpars[2], # 5: The instrumental resolution (FWHM) in pixels\n 0.0, # 6: Wavelength 0-pt\n 0.0, # 7: Wavelength linear component\n 0.0, # 8: Wavelength quadratic component\n 0.0, # 9: Wavelength cubic component\n 1.0, #10: Continuum zero point\n 0.0, #11: Continuum linear component\n 0.0, #12: Continuum quadratic component\n IPpars[1], #13: Instrumental resolution linear component\n IPpars[0], #14: Instrumental resolution quadratic component\n centerloc, #15: Blaze dip center location\n 330, #16: Blaze dip full width\n 0.05, #17: Blaze dip depth\n 90, #18: Secondary blaze dip full width\n 0.05, #19: Blaze dip depth\n 0.0, #20: Continuum cubic component\n 0.0, #21: Continuum quartic component\n 0.0, #22: Continuum quintic component\n 0.0, #23: Continuum hexic component\n 0.0, #24: secondary par\n 0.0, #25: secondary par\n 0.0, #26: secondary par\n 0.0 #27: secondary par\n ])\n\n return parA0", "def setHardness(self, hardness):\n self.__hardness = hardness\n self.scene.setShaderInput(\"props\", self.__ambient, self.__hardness, 0, 1)", "def maximise_profit(p, photo='Farquhar', res='low', parallel=False,\n solstep='var', symbolic=False, scaleup=True):\n\n success = True # initial assumption: the optimisation will succeed\n\n # original LAI, PPFD\n LAI = p.LAI\n PPFD = p.PPFD\n\n # retrieve relevant sunlit / shaded fractions\n fRcan, fPPFD, fLAI, fscale2can, __ = absorbed_radiation_2_leaves(p)\n\n # sunlit / shaded outputs\n fvc = np.zeros(len(fPPFD))\n E = np.zeros(len(fPPFD))\n gs_can = np.zeros(len(fPPFD))\n gs_can[:] = np.nan # make sure we have nans for averaging\n Pleaf = np.zeros(len(fPPFD))\n An = np.zeros(len(fPPFD))\n Aj = np.zeros(len(fPPFD))\n Ac = np.zeros(len(fPPFD))\n Ci_can = np.zeros(len(fPPFD))\n Ci_can[:] = np.nan # make sure we have nans for averaging\n Tleaf = np.zeros(len(fPPFD))\n Tleaf[:] = np.nan # make sure we have nans for averaging\n Rleaf = np.zeros(len(fPPFD))\n\n # hydraulics\n P, trans = hydraulics(p, res=res)\n COST, VC = hydraulic_cost(p, P)\n\n # apply the scaling to match the As (conductance of can, not leaf)\n trans_can = [trans * e for e in fscale2can]\n\n # sunlit / shaded loop, two assimilation streams\n for i in range(len(fRcan)):\n\n if i == 0: # sunlit\n fvc_opt = p.fvc_sun\n\n else: # shaded\n fvc_opt = p.fvc_sha\n\n p.Rnet = fRcan[i]\n p.PPFD = fPPFD[i]\n p.LAI = fLAI[i]\n p.scale2can = fscale2can[i]\n trans = trans_can[i]\n cost = np.ma.copy(COST)\n vc = np.ma.copy(VC)\n\n if p.PPFD > 50.: # min threshold for photosynthesis\n try:\n gain, Ci = photo_gain(p, trans, photo, res, parallel, solstep,\n symbolic)\n\n # look for the most net profit\n profit = gain - cost\n\n # deal with edge cases by rebounding the solution\n gc, gs, gb = leaf_energy_balance(p, trans)\n profit_check = profit[1:][np.logical_and(gc[1:] > cst.zero,\n np.logical_and(Ci[1:] / p.CO2 < 0.95,\n gs[1:] <\n (conv.GbvGbc /\n conv.GwvGc) * gb))]\n idx = np.isclose(profit, max(profit_check))\n idx = [list(idx).index(e) for e in idx if e]\n\n if idx: # opt values\n fvc[i] = vc[idx[0]]\n E[i] = trans[idx[0]]\n gs_can[i] = gs[idx[0]]\n Pleaf[i] = P[idx[0]]\n Tleaf[i], __ = leaf_temperature(p, trans[idx[0]])\n Ci_can[i] = Ci[idx[0]]\n\n # rubisco- or electron transport-limitation?\n An[i], Aj[i], Ac[i], Rleaf[i] = \\\n calc_photosynthesis(p, E[i], Ci_can[i], photo)\n\n else:\n success = False\n\n except (ValueError, TypeError): # no opt\n success = False\n\n if not success: # this is rare, use last opt vc\n idx = bn.nanargmin(abs(vc - fvc_opt))\n fvc[i] = vc[idx]\n E[i] = trans[idx]\n Pleaf[i] = P[idx]\n Tleaf[i], __ = leaf_temperature(p, trans[idx])\n __, Ci_can[i] = photo_gain(p, np.asarray([E[i]]), photo, res,\n False, solstep, symbolic)\n Ci_can[i] = Ci_can[0] # a single value is returned\n\n if Ci_can[i] >= 0.95: # no solve\n Ci_can[i] = np.nan\n\n if (str(Ci_can[i]) == '--'): # no valid Ci\n Tleaf[i] = p.Tair\n fvc[i], gs_can[i], E[i], An[i], Ci_can[i], Rleaf[i] = \\\n (fvc_opt, ) + (0., ) * 5\n\n __, gs_can[i], __ = leaf_energy_balance(p, E[i])\n\n # rubisco- or electron transport-limitation?\n An[i], Aj[i], Ac[i], Rleaf[i] = calc_photosynthesis(p, E[i],\n Ci_can[i],\n photo)\n\n # if the critical point has been reached, stall\n if np.isclose(fvc[i], p.ratiocrit) or np.isclose(E[i], 0.):\n Tleaf[i] = p.Tair\n gs_can[i], E[i], An[i], Ci_can[i], Rleaf[i] = (0., ) * 5\n\n else:\n Tleaf[i] = p.Tair\n fvc[i], gs_can[i], E[i], An[i], Ci_can[i], Rleaf[i] = \\\n (fvc_opt, ) + (0., ) * 5\n\n # deal with no solves for Pleaf\n Pleaf[Pleaf > p.Ps] = p.Ps\n\n if np.isclose(np.nanmean(Pleaf), p.Ps):\n E_can, gs_can, Pleaf_can, An_can, Ci_can, rublim_can, Tleaf_can, \\\n Rleaf_can = (0., ) * 8\n\n else: # sum contributions from sunlit and shaded leaves\n with np.errstate(invalid='ignore'): # nans, no warning\n if any(np.isclose(Pleaf, p.Ps)):\n Pleaf[np.isclose(Pleaf, p.Ps)] = np.nan\n\n # deal with no solves for gs\n gs_can[np.isclose(gs_can, 0.)] = np.nan\n\n E_can = np.nansum(E) * conv.MILI # mmol m-2 s-1\n gs_can = np.nanmean(gs_can) # mol m-2 s-1\n Pleaf_can = np.nanmean(Pleaf) # MPa\n An_can = np.nansum(An) # umol m-2 s-1\n Ci_can = np.nanmean(Ci_can) # Pa\n rublim_can = rubisco_limit(np.nansum(Aj), np.nansum(Ac))\n Tleaf_can = np.nanmean(Tleaf) # degC\n Rleaf_can = np.nansum(Rleaf) # umol m-2 s-1\n\n if not scaleup: # downscale fluxes to the tree\n E_can /= np.sum(fscale2can)\n An_can /= np.sum(fscale2can)\n\n # reset original all canopy / forcing LAI, PPFD\n p.LAI = LAI\n p.PPFD = PPFD\n\n if (any(np.isnan([E_can, gs_can, An_can, Ci_can])) or\n any(np.isclose([E_can, gs_can, An_can, Ci_can], [0., ] * 4)) or\n any([E_can < 0., gs_can < 0., Ci_can < 0.]) or\n rublim_can == 0.):\n E_can, gs_can, An_can, Ci_can, Tleaf_can, Rleaf_can = (0., ) * 6\n\n return (fvc, p.kmax * VC[0], E_can, gs_can, Pleaf_can, An_can, Ci_can,\n rublim_can, Tleaf_can, Rleaf_can)", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Afb[0.6,-0.70,0.70]\");\n self.modelBuilder.doSet(\"POI\",\"Afb\")\n\n # ss templates\n self.modelBuilder.doVar(\"Rdy_mumu_ss[1.0,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rdy_ee_ss[1.0,0.0,10.0]\");\n \n self.modelBuilder.factory_('expr::Rpl(\"(1.+@0)\",Afb)')\n self.modelBuilder.factory_('expr::Rmn(\"(1.-@0)\",Afb)')", "def setParameters(self, sx_sim=None):\n # TODO rething that ..\n #if sx_sim is not None:\n #if ds_model is not None:\n #if di_model is not None:\n self.sx_sim = sx_sim\n p = defaultParams(chord=self._chord, rho=self._rho, sx=self.sx_sim, ds=self.ds_model, di=self.di_model,\n M=self._M33, C=self._C33, K=self._K33)\n p['beta'] = self._beta\n if len(p['Iq'])==0:\n raise Exception('No states are present')\n\n # --- Dynamic inflow / induction\n p['a0'] = self._a0\n p['ap0'] = self._ap0\n p['di_tau1'] = self.di_tau1\n p['di_tau2'] = self.di_tau2\n\n # --- Aerodynamic parameters\n if self._y_AQ>0: \n print('[WARN] y_AQ positive is unconventional')\n p['y_AQ'] = self._y_AQ\n if self._y_AT is None:\n p['y_AT'] = self._y_AQ+self._chord/2 # default is approximatively half a chord behind\n else:\n p['y_AT'] = self._y_AT\n p['x_AQ'] = self._x_AQ\n p['x_AT'] = self._x_AT\n if self._ppol is None:\n raise Exception('Polar parameters need to be set')\n p.update(self._ppol)\n # # p.update({'linModel':False, 'drag':drag})\n\n self.p_sim = p" ]
[ "0.56353325", "0.5370124", "0.53695107", "0.532933", "0.5328432", "0.52875483", "0.5247943", "0.52012515", "0.51888525", "0.51510626", "0.51493967", "0.5130091", "0.5126285", "0.5090735", "0.5089308", "0.50722903", "0.5053663", "0.5041106", "0.5041106", "0.5041106", "0.50183666", "0.5013354", "0.49972543", "0.49884853", "0.4977678", "0.497339", "0.49722317", "0.497215", "0.49721453", "0.49636337" ]
0.72586125
0
Get and format the initial guess for the optimisation Returns np.ndarray The initial guess
def initial_guess(self): x0 = [self.material_model.isotropic_matrix.c1, self.material_model.isotropic_matrix.c2] if not self.material_model.is_isotropic: # c5 is scaled in the optimisation function x0.append(self.material_model.fibres.c5 / c5_factor) if self.include_lm: x0.append(self.material_model.fibres.lm) return np.asarray(x0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_initial_guess():\n x0 = np.zeros(6)\n x0[0] = 1\n x0[3] = 1\n return x0", "def default_initial_params(self) -> numpy.ndarray:\n\n total_time = self.adiabatic_evolution_time\n step_time = total_time / self.iterations\n hamiltonian = self.hamiltonian\n\n params = []\n for param in self.params():\n if param.letter == 'U':\n p, i = param.subscripts\n params.append(_canonicalize_exponent(\n -0.5 * self.orbital_energies[p] * step_time / numpy.pi, 2))\n else:\n p, q, i = param.subscripts\n # Use the midpoint of the time segment\n interpolation_progress = 0.5 * (2 * i + 1) / self.iterations\n params.append(_canonicalize_exponent(\n -2 * hamiltonian.two_body[p, q] * interpolation_progress *\n step_time / numpy.pi, 2))\n\n return numpy.array(params)", "def get_initial_params(self, x, y, yerr):\n# p0 = zeros(self.max_phonons + 1)\n p0 = zeros(2)\n p0[0] = 100\n p0[1] = .1\n return p0", "def get_initial_params(self, x, y, yerr):\n estimated_max = max(y)\n estimated_min = min(y)\n y1 = map(int, y *1000)\n estimated_position = x[ y1.index(min(y1)) ]\n estimated_width = (max(x) - min(x)) / 20.0\n p0 = array([estimated_position, estimated_width, estimated_max, estimated_min])\n return p0", "def lin_init_guess(problem):\n xin = []\n for i in range(problem['Nv']):\n xin.append(np.linspace(problem['xi'][i, :], problem['xf'][i, :], problem['N'] + 1)[1:-1, :].flatten())\n xin.append(np.random.rand((problem['N']+1)*problem['num_inputs']))\n # now find initial guess for time if it is also a variable to optimize:\n if problem['T'] == 0:\n xin.append(np.array([np.sqrt(np.sum((problem['xi'][0, :2]-problem['xf'][-1, :2])**2))]))\n\n return np.concatenate(xin)", "def _make_initial_guess(self, X: np.ndarray, y: np.ndarray) -> np.ndarray:\n # pylint: disable=C0103\n if self.opt_initial_guess:\n # Note: based on: https://stackoverflow.com/a/42322656\n freqs = np.fft.fftfreq(y.size, X[1] - X[0])\n Fyy = np.abs(np.fft.rfft(y))[1:]\n w_guess = 2 * np.pi * np.abs(freqs[1 + np.argmax(Fyy)])\n A_guess = np.std(y) * np.sqrt(2)\n c_guess = np.mean(y)\n\n return np.asarray([A_guess, w_guess, 0.0, c_guess])\n\n return np.std(y) * np.random.randn(4)", "def solve_prep(self):\n\n par = self.par\n sol = self.sol\n\n # a. retirement\n sol.m_ret = np.zeros((par.T,par.Nm_ret))\n sol.c_ret = np.zeros((par.T,par.Nm_ret))\n sol.a_ret = np.zeros((par.T,par.Nm_ret))\n sol.inv_v_ret = np.zeros((par.T,par.Nm_ret))\n sol.inv_vm_ret = np.zeros((par.T,par.Nm_ret))\n sol.inv_vn_ret = np.zeros((par.T,par.Nm_ret))\n\n # b. working\n if par.solmethod == 'G2EGM':\n\n sol.c = np.zeros((par.T,par.Nn,par.Nm))\n sol.d = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_v = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_vm = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_vn = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.ucon_c = np.zeros((par.T,par.Nn,par.Nm))\n sol.ucon_d = np.zeros((par.T,par.Nn,par.Nm))\n sol.ucon_v = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.dcon_c = np.zeros((par.T,par.Nn,par.Nm))\n sol.dcon_d = np.zeros((par.T,par.Nn,par.Nm))\n sol.dcon_v = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.acon_c = np.zeros((par.T,par.Nn,par.Nm))\n sol.acon_d = np.zeros((par.T,par.Nn,par.Nm))\n sol.acon_v = np.zeros((par.T,par.Nn,par.Nm))\n sol.con_c = np.zeros((par.T,par.Nn,par.Nm))\n sol.con_d = np.zeros((par.T,par.Nn,par.Nm))\n sol.con_v = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.z = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.w = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n sol.wa = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n sol.wb = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n \n elif par.solmethod == 'NEGM':\n\n sol.c = np.zeros((par.T,par.Nn,par.Nm))\n sol.d = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_v = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_vn = np.zeros((0,0,0))\n sol.inv_vm = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.w = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n sol.wa = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n sol.wb = np.zeros((0,0,0))\n \n sol.c_pure_c = np.zeros((par.T,par.Nb_pd,par.Nm))\n sol.inv_v_pure_c = np.zeros((par.T,par.Nb_pd,par.Nm))", "def initial_guess_x0(self) -> npt.NDArray:\n if self._background is not None:\n return np.array(\n [np.random.uniform(lb, ub) for lb, ub in self.bounds]\n )\n return np.array(\n [np.random.uniform(lb, ub) for lb, ub in self.bounds * 2]\n )", "def solve(self) -> jnp.ndarray:\n pass", "def run():\n return estimate(0,1,0)", "def get_initial_params(self, x, y, yerr):\n estimated_height = max(y)\n y1 = map(int, y *1000)\n estimated_position = x[ y1.index(max(y1)) ]\n estimated_width = (max(x) - min(x)) / 20.0\n p0 = array([estimated_position, estimated_width, estimated_height])\n return p0", "def get_initial_params(self, x, y, yerr):\n estimated_height = max(y)\n y1 = map(int, y *1000)\n estimated_position = x[ y1.index(max(y1)) ]\n estimated_width = (max(x) - min(x)) / 20.0\n p0 = array([estimated_position, estimated_width, estimated_height])\n return p0", "def initial(self) -> np.ndarray:\n return self._dist['initial']", "def _solve(self) -> CasADiArrayType:\n solver_input = {\"x0\": self.x0, \"p\": self.p}\n if self.opt_type in CONSTRAINED_OPT:\n solver_input[\"lbg\"] = self._lbg\n solver_input[\"ubg\"] = self._ubg\n self._solution = self._solver(**solver_input)\n self._stats = self._solver.stats()\n self._stats[\"solution\"] = self._solution\n return self._solution[\"x\"]", "def idealize(self) -> None:\n self.k = np.zeros(6, dtype=float)\n self.p = np.zeros(2, dtype=float)\n self.c = np.zeros(2, dtype=float)", "def get_model_init(self) -> ndarray:\n beta = np.zeros(self.fevar.size)\n gamma = np.zeros(self.revar.size)\n var = np.hstack([beta, gamma])\n grad_beta = self.gradient(var)[:self.fevar.size]\n hess_beta = self.hessian(var)[:self.fevar.size,\n :self.fevar.size]\n beta = beta - np.linalg.solve(\n hess_beta + np.identity(self.fevar.size),\n grad_beta\n )\n return np.hstack([beta, gamma])", "def fitstart(self):\n return np.zeros(self.exog.shape[1])", "def get_initial_params(self, x, y, yerr):\n p = [0,0]\n p[1] = min(y)\n p[0] = (max(y)-min(y))/(max(x)-min(x))\n return array(p)", "def initial_x():\n\n # RANDOMLY GENERATES the INITIAL VALUES of the independent variables:\n temp = [uniform(1, cfg.n) for i in range(cfg.n)]\n\n return np.array(temp, dtype=np.float_)", "def _solve(self) -> CasADiArrayType:\n self._solution = minimize(**self.minimize_input)\n return self._solution.x", "def calculate_GNOp_initial(self):\n\n # Initialise\n Transform2NormIndOp = Transform2NormInd()\n\n # Directories\n dataset_paths = self.dataset_paths\n sensor_data_path = self.sensor_data_path\n output_dir = self.output_dir\n\n ################################################################################################################\n # 1.\tRead Harmonisation Matchup Data\n ################################################################################################################\n\n print \"Match-up Dataset:\"\n for path in dataset_paths:\n print \">\", path\n\n print \"\\nSensor Data File:\"\n print \"> \"+sensor_data_path\n\n print \"\\nOpening data...\"\n HData = self.data_reader(dataset_paths, sensor_data_path, open_uncertainty=True)\n\n ################################################################################################################\n # 2.\tPerform harmonisation\n ################################################################################################################\n\n print \"Complete\"\n\n print \"\\nData Info\"\n print \"==========\"\n print \"Reference Sensors - \", [str(sensor) for sensor in HData.idx['sensors']\n if sensor not in HData.idx[\"parameter_sensor\"]]\n print \"Harmonising Sensors - \", [str(sensor) for sensor in HData.idx['sensors']\n if sensor in HData.idx[\"parameter_sensor\"]]\n print \"Total Match-Ups - \", HData.idx['cNm'][-1]\n print \"Total Sensor State Data Values - \", HData.idx['idx'][-1]\n print \"Total Harmonisation Paramaters - \", len(HData.idx['parameter_sensor'])\n\n print \"\\nComputing Initial GN Value...\"\n # a. reparameterise input data such that output data are independent quantities\n HData = Transform2NormIndOp.run(HData)\n\n # b. run GN algorithm on modified data\n GNOp = GNAlgo(HData)\n\n ################################################################################################################\n # 3.\tWrite data to file\n ################################################################################################################\n\n print '\\nWriting data to file...'\n\n # Write data\n save_directory_GNOp = pjoin(output_dir, \"temp\", \"GNOp_initial\")\n try:\n os.makedirs(save_directory_GNOp)\n except OSError:\n pass\n\n GNOp.save(save_directory_GNOp)\n\n print \"Output Data Written To:\"\n print \">\", save_directory_GNOp", "def optimize(self, data, guess=None):\n if self.operator is None:\n print(\"Please set an Linear operator \"\n \"using the SetOperator method.\")\n return\n\n if guess is None:\n guess = np.zeros(\n (\n self.maxit+1,\n self.image_dim,\n self.image_dim\n ),\n dtype=self.DTYPE\n )\n start = time.perf_counter()\n result = self._cg_solve(\n x=guess,\n data=data,\n iters=self.maxit,\n lambd=self.lambd,\n tol=self.tol\n )\n result[~np.isfinite(result)] = 0\n end = time.perf_counter()-start\n print(\"\\n\"+\"-\"*80)\n print(\"Elapsed time: %f seconds\" % (end))\n print(\"-\"*80)\n if self.do_incor:\n result = self.mask*result/self.incor\n return (self.kspace_filter(result), self.res)", "def get_random_start(self):\n arr = np.zeros(self.dimension)\n n_fit_p = len(self.fit_parameters)\n n_nui_p = len(self.nuisance_parameters)\n arr[:n_fit_p] = self.get_random_fit_parameters\n arr[n_fit_p:n_fit_p+n_nui_p] = self.get_random_nuisance_parameters\n arr[n_fit_p+n_nui_p:] = self.get_random_wilson_coeffs_start\n return arr", "def _compute_results(self):\n self.Y_best = best_value(self.Y)\n self.x_opt = self.X[np.argmin(self.Y),:]\n self.fx_opt = np.min(self.Y)\n self.distance = self._compute_distance_betw_consecutive_x()", "def guess_params(no_cultures):\n # C(t=0), N(t=0), S(t=0)\n amounts_guess = [0.2, 0.2, 0.0]\n # kn, ks\n kn_ks_guess = [0.05, 0.15]\n # b, a\n ba_guess = [0.05, 0.05]\n # r\n r_guess = [1.0]\n # Initial guess: C(t=0), N(t=0), S(t=0), kn, ks, r0, b0, a0, r1, b1, a1, ...\n init_guess = np.array(amounts_guess + kn_ks_guess +\n ba_guess + r_guess*no_cultures)\n return init_guess", "def GenerateInitialSolution():\n c = random.random()*C\n count = 0\n while np.count_nonzero(alpha) < gamma:\n rand = random.randint(0, len(x_train)-1)\n if y_train[rand] == 1:\n alpha[rand] = c\n L[rand, 1] = c\n # L[count, 0] = rand\n # L[count, 1] = alpha[rand]\n SVs[count] = rand\n count += 1\n while np.count_nonzero(alpha) < 2*gamma:\n rand = random.randint(0, len(x_train)-1)\n if y_train[rand] == -1:\n alpha[rand] = c\n L[rand, 1] = c\n # L[count, 0] = rand\n # L[count, 1] = alpha[rand]\n SVs[count] = rand\n count += 1\n return alpha", "def step(self):\n if self.Y.shape[0]<self.initial_design_numdata:\n self.suggested_sample = initial_design('random', self.space, 1)\n else:\n self.suggested_sample = self._compute_next_evaluations()\n\n self.X = np.vstack((self.X,self.suggested_sample))\n\n # --- Update current evaluation time and function evaluations\n self.num_acquisitions += 1\n\n if self.verbosity:\n print(\"num acquisition: {}\".format(self.num_acquisitions))\n\n return np.array(self.suggested_sample[0,:])", "def pick_initial_sample(self):\n x = np.atleast_1d(self.init_sample_func())\n return 0, x", "def preferred_init_points(self):\n if self._initial_state is None:\n return None\n else:\n # If an initial state was set by the user, then we want to make sure that the VQE does\n # not start from a random point. Thus, we return an all-zero initial point for the\n # optimizer which is used (unless it gets overwritten by a higher-priority setting at\n # runtime of the VQE).\n # However, in order to determine the correct length, we must build the QuantumCircuit\n # first, because otherwise the operators may not be set yet.\n self._build()\n return np.zeros(self.reps * len(self.operators), dtype=float)", "def initial_guess(self, pressure, loading):\n saturation_loading, langmuir_k = super().initial_guess(pressure, loading)\n guess = {\"n_m\": saturation_loading, \"K\": langmuir_k}\n guess = self.initial_guess_bounds(guess)\n return guess" ]
[ "0.7000875", "0.6070277", "0.6045303", "0.6023337", "0.59964377", "0.5964515", "0.5947499", "0.59411687", "0.59016037", "0.58711827", "0.58324164", "0.58324164", "0.575756", "0.57448596", "0.57446176", "0.5737668", "0.57161623", "0.56971306", "0.5694137", "0.5621573", "0.560803", "0.5555678", "0.55278385", "0.55237955", "0.546963", "0.5466885", "0.544746", "0.5441836", "0.54362637", "0.54163426" ]
0.73199725
0
Perform the optimisation using SLSQP. Settings tested vs. validation model.
def do_optimisation(self): print('--> Parameters for optimisation:') print('--> Using measurements : {}'.format(self.stoma_cfg.comparison_helper.optimisation_keys)) print('') x0 = self.initial_guess() tol, eps = 1e-4, 0.001 print('--> Using SLSQP with tol={} and eps={}'.format(tol, eps)) soln = opt.minimize(fun=self.optimise_fn, x0=x0, method='SLSQP', tol=tol, options={'eps': eps}) print('*' * 120) print('--> Optimisation procedure has finished...') print(soln) print('*' * 120) if soln.success: print('--> Optimisation succeeded. Result is...') self._set_material_parameters(soln.x) print('--> {}'.format(self.material_model)) else: print('--> The optimisation failed!') print('*' * 120) return soln
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def optimize(self):\n\n self.logger.info(\"Solving with Dynamic Slope Scaling Procedure in Julia :\")\n optimization_start = time.time()\n\n # 1. Preprocess for old network graph\n if self.old_network_graph is not None:\n\n # DSSP on old network\n old_network_obj = sum(list(nx.get_node_attributes(self.old_network_graph, config.BUILDING_CONSUMPTION_KEY).values()))-1e-5\n try:\n self.check_infeasibility(self.old_network_graph, old_network_obj)\n except DHCOptimizerException as e:\n e.data = \"Invalid existing network: \" + e.data\n raise e\n\n flows, obj_val = self.optimize_with_dssp_julia(self.old_network_graph, old_network_obj, set())\n self.logger.info(\"Optimization phase time: %.2fs\" % (time.time() - optimization_start))\n solution_old_graph = self.build_solution_graph(self.old_network_graph, flows)\n\n if self.modify_old_network:\n\n # Add max capacity on old edges\n self.old_capacity = deepcopy(flows)\n old_buildings = list(nx.get_node_attributes(self.old_network_graph, config.BUILDING_CONSUMPTION_KEY).values())\n for key in flows:\n if (key[1],key[0],0) not in self.old_capacity and key[1] not in old_buildings:\n self.old_capacity[(key[1],key[0],0)] = self.old_capacity[key]\n\n # Add Imaginary edges\n for edge in self.old_capacity:\n if self.optimization_graph.has_edge(*edge):\n\n # add nodes\n if not self.optimization_graph.has_node(config.IM_PREFIX+edge[0]):\n self.optimization_graph.add_node(config.IM_PREFIX+edge[0])\n self.optimization_graph.nodes[config.IM_PREFIX+edge[0]][config.GPD_GEO_KEY] = \\\n self.optimization_graph.nodes[edge[0]][config.GPD_GEO_KEY]\n if not self.optimization_graph.has_node(config.IM_PREFIX+edge[1]):\n self.optimization_graph.add_node(config.IM_PREFIX+edge[1])\n self.optimization_graph.nodes[config.IM_PREFIX+edge[1]][config.GPD_GEO_KEY] = \\\n self.optimization_graph.nodes[edge[1]][config.GPD_GEO_KEY]\n # add edges\n if not self.optimization_graph.has_edge(edge[0],config.IM_PREFIX+edge[0]):\n self.optimization_graph.add_edge(edge[0],config.IM_PREFIX+edge[0])\n if not self.optimization_graph.has_edge(config.IM_PREFIX+edge[0],config.IM_PREFIX+edge[1]):\n self.optimization_graph.add_edge(config.IM_PREFIX+edge[0],config.IM_PREFIX+edge[1])\n if not self.optimization_graph.has_edge(config.IM_PREFIX+edge[1],edge[1]):\n self.optimization_graph.add_edge(config.IM_PREFIX+edge[1],edge[1])\n\n # put cost\n self.optimization_graph.edges[(config.IM_PREFIX+edge[0],config.IM_PREFIX+edge[1],0)][config.EDGE_COST_KEY] = \\\n self.optimization_graph.edges[(edge[0],edge[1],0)][config.EDGE_COST_KEY]\n self.optimization_graph.edges[(edge[0],edge[1],0)][config.EDGE_COST_KEY] = 1e-5\n self.optimization_graph.edges[(edge[0],config.IM_PREFIX+edge[0],0)][config.EDGE_COST_KEY] = 1e-5\n self.optimization_graph.edges[(config.IM_PREFIX+edge[1],edge[1],0)][config.EDGE_COST_KEY] = 1e-5\n\n else:\n # if we don't modify the old network, we have to change the capacity of the supplies\n already_consummed = {}\n for edge in solution_old_graph.edges():\n if solution_old_graph.nodes[edge[0]].get(config.NODE_TYPE_KEY) == config.SUPPLY_NODE_TYPE:\n already_consummed[edge[0]] = already_consummed.get(edge[0], 0) + \\\n solution_old_graph.edges[edge][config.SOLUTION_POWER_FLOW_KEY]\n for source in already_consummed:\n if already_consummed[source] <= self.optimization_graph.nodes[source][config.SUPPLY_POWER_CAPACITY_KEY]:\n self.optimization_graph.nodes[source][config.SUPPLY_POWER_CAPACITY_KEY] -= already_consummed[source]\n self.network_objective -= already_consummed[source]\n else:\n self.network_objective -= self.optimization_graph.nodes[source][config.SUPPLY_POWER_CAPACITY_KEY]\n self.optimization_graph.nodes[source][config.SUPPLY_POWER_CAPACITY_KEY] = 0\n\n # Remove edges from old network\n edges_to_remove = set()\n for e in self.optimization_graph.edges():\n if self.old_network_graph.has_edge(*e) or self.old_network_graph.has_edge(e[1],e[0]):\n edges_to_remove.add(e)\n self.optimization_graph.remove_edges_from(edges_to_remove)\n\n # Remove isolated buildings of optimization graph\n isolated_to_remove = set()\n for e in self.old_network_graph.edges():\n if e[0] in self.old_network_graph.nodes() and \\\n self.optimization_graph.nodes[e[1]].get(config.NODE_TYPE_KEY) == config.BUILDING_NODE_TYPE:\n isolated_to_remove.add(e)\n self.optimization_graph.remove_edges_from(isolated_to_remove)\n\n # Remove buildings from old network\n for n, data in self.old_network_graph.nodes(data=True):\n if data.get(config.NODE_TYPE_KEY) == config.BUILDING_NODE_TYPE:\n self.optimization_graph.remove_node(n)\n\n # Re-link sources\n sources = set()\n for n, data in self.optimization_graph.nodes(data=True):\n if data.get(config.NODE_TYPE_KEY) == config.SUPPLY_NODE_TYPE:\n sources.add(n)\n source_graph = self.optimization_graph.subgraph(sources).copy()\n self.optimization_graph.remove_nodes_from(sources)\n gnx.remove_isolates(self.optimization_graph)\n node_filter = lambda n: self.optimization_graph.nodes.get(n,{}).get(config.NODE_TYPE_KEY) != config.BUILDING_NODE_TYPE\n gnx.spatial_points_merge(self.optimization_graph, source_graph.nodes_to_gdf(), node_filter=node_filter, inplace=True)\n\n # fill missing information\n gnx.fill_edges_missing_geometry_attributes(self.optimization_graph)\n gnx.fill_length_attribute(self.optimization_graph, config.EDGE_LENGTH_KEY, only_missing=True)\n gnx.fill_length_attribute(self.optimization_graph, config.EDGE_COST_KEY, only_missing=True)\n for e in self.optimization_graph.edges(keys=True):\n self.optimization_graph.edges[e][config.LEASTCOST_COEF_KEY] = \\\n self.optimization_graph.edges[e].get(config.LEASTCOST_COEF_KEY,0)\n\n\n\n # 2. Process the DSSP on optimization graph\n self.check_is_ready()\n self.check_infeasibility(self.optimization_graph, self.network_objective)\n\n if self.old_network_graph is not None and self.modify_old_network:\n old_buildings = set(nx.get_node_attributes(self.old_network_graph, config.BUILDING_CONSUMPTION_KEY).keys())\n else:\n old_buildings = set()\n flows, obj_val = self.optimize_with_dssp_julia(self.optimization_graph, self.network_objective, old_buildings,postprocess= (not self.modify_old_network))\n self.logger.info(\"Optimization phase time: %.2fs\" % (time.time() - optimization_start))\n self.solution_graph = self.build_solution_graph(self.optimization_graph, flows, self.connected)\n\n # 3. Postprocess for old network graph\n if self.old_network_graph is not None:\n \n if self.modify_old_network:\n # Put the right supply capacity and cost\n for edge in self.old_capacity:\n if self.solution_graph.has_edge(edge[0],edge[1]):\n self.solution_graph.edges[(edge[0],edge[1])][config.EDGE_COST_KEY] = \\\n self.optimization_graph.edges[(config.IM_PREFIX+edge[0],config.IM_PREFIX+edge[1],0)][config.EDGE_COST_KEY]\n \n # Remove imaginary edges\n imaginary_nodes_to_remove = set()\n nodes_to_relabel = {}\n for edge in self.solution_graph.edges():\n if str(edge[0]).startswith(config.IM_PREFIX) and str(edge[1]).startswith(config.IM_PREFIX):\n real_edge = edge[0][len(config.IM_PREFIX):],edge[1][len(config.IM_PREFIX):]\n self.old_capacity[(real_edge[0], real_edge[1], 0)] = pd.np.inf\n self.old_capacity[(real_edge[1], real_edge[0], 0)] = pd.np.inf\n if not self.solution_graph.has_edge(*real_edge):\n for i in range(2):\n nodes_to_relabel[edge[i]] = real_edge[i]\n else:\n self.solution_graph.edges[real_edge[0],real_edge[1]][config.SOLUTION_POWER_FLOW_KEY] += \\\n self.solution_graph.edges[edge].get(config.SOLUTION_POWER_FLOW_KEY,0)\n imaginary_nodes_to_remove.add(edge[0])\n imaginary_nodes_to_remove.add(edge[1])\n elif str(edge[0]).startswith(config.IM_PREFIX):\n imaginary_nodes_to_remove.add(edge[0])\n elif str(edge[1]).startswith(config.IM_PREFIX):\n imaginary_nodes_to_remove.add(edge[1])\n\n nx.relabel_nodes(self.solution_graph, nodes_to_relabel, copy=False)\n self.solution_graph.remove_nodes_from(list(imaginary_nodes_to_remove))\n for node in nodes_to_relabel.values():\n if self.solution_graph.has_edge(node, node):\n self.solution_graph.remove_edge(node, node)\n\n else:\n for source in nx.get_node_attributes(self.solution_graph, config.SUPPLY_POWER_CAPACITY_KEY):\n self.solution_graph.nodes[source][config.SUPPLY_POWER_CAPACITY_KEY] += already_consummed.get(source,0)\n self.optimization_graph.nodes[source][config.SUPPLY_POWER_CAPACITY_KEY] += already_consummed.get(source,0)\n\n return flows, obj_val", "def optimize(self):\n self.vbe_step()\n self.compute_responsibilities()\n self.compute_sufficient_stats()\n self.vbmstep()", "def fit(self):\n if self.minimizer == \"differential_evolution\":\n kwargs = {\"maxiter\": self._maxiter}\n elif self.minimizer == \"shgo\":\n kwargs = {\"options\": {\"maxiter\": self._maxiter,\n \"jac\": self.cost_func.jac_cost}}\n elif self.minimizer == \"dual_annealing\":\n kwargs = {\"maxiter\": self._maxiter, \"local_search_options\": {\n \"jac\": self.cost_func.jac_cost}}\n fun = self.cost_func.eval_cost\n bounds = self.value_ranges\n algorithm = getattr(optimize, self.minimizer)\n result = algorithm(fun, bounds, **kwargs)\n self._popt = result.x\n if result.success:\n self._status = 0\n elif \"Maximum number of iteration\" in result.message:\n self._status = 1\n else:\n self._status = 2", "def run_spores(model_data, timings, interface, backend, build_only):\n log_time(\n logger,\n timings,\n \"run_start\",\n comment=\"Backend: starting model run in SPORES mode\",\n )\n\n run_config = UpdateObserverDict(\n initial_yaml_string=model_data.attrs[\"run_config\"],\n name=\"run_config\",\n observer=model_data,\n )\n\n backend_model = backend.generate_model(model_data)\n\n log_time(\n logger,\n timings,\n \"run_backend_model_generated\",\n time_since_run_start=True,\n comment=\"Backend: model generated\",\n )\n\n n_spores = run_config[\"spores_options\"][\"spores_number\"]\n slack = run_config[\"spores_options\"][\"slack\"]\n spores_score = run_config[\"spores_options\"][\"score_cost_class\"]\n slack_cost_class = run_config[\"spores_options\"][\"slack_cost_class\"]\n objective_cost_class = run_config[\"spores_options\"][\"objective_cost_class\"]\n\n # Define default scoring function, based on integer scoring method\n # TODO: make the function to run optional\n def _cap_loc_score_default(results, subset=None):\n if subset is None:\n subset = {}\n cap_loc_score = results[\"energy_cap\"].loc[subset]\n cap_loc_score = cap_loc_score.where(cap_loc_score > 1e-3, other=0)\n cap_loc_score = cap_loc_score.where(cap_loc_score == 0, other=100)\n\n return cap_loc_score.to_pandas()\n\n # Define function to update \"spores_score\" after each iteration of the method\n def _update_spores_score(backend_model, cap_loc_score):\n loc_tech_score_dict = {\n (spores_score, i, j): k for (i, j), k in cap_loc_score.stack().items()\n }\n\n interface.update_pyomo_param(\n backend_model, \"cost_energy_cap\", loc_tech_score_dict\n )\n\n def _warn_on_infeasibility():\n return exceptions.warn(\n \"Infeasible SPORE detected. Please check your model configuration. \"\n \"No more SPORES will be generated.\"\n )\n\n def _limit_total_system_costs_constraint_rule(backend_model, cost):\n cost_max = backend_model.cost_max\n\n return (\n sum(\n backend_model.cost[cost, node, tech]\n for [node, tech] in backend_model.nodes * backend_model.techs\n if [cost, node, tech] in backend_model.cost._index\n )\n ) <= cost_max\n\n # Run once for the 'cost-optimal' solution\n results, backend_model = run_plan(\n model_data, run_config, timings, backend, build_only\n )\n if build_only:\n return results, backend_model # We have what we need, so break out of the loop\n\n if results.attrs[\"termination_condition\"] in [\"optimal\", \"feasible\"]:\n results.attrs[\"objective_function_value\"] = backend_model.obj()\n initial_system_cost = backend_model.obj()\n # Storing results and scores in the specific dictionaries\n spores_list = [results]\n cum_scores = _cap_loc_score_default(results)\n # Set group constraint \"cost_max\" equal to slacked cost\n slack_cost = initial_system_cost * (1 + slack)\n backend_model.cost_max = po.Param(\n initialize=slack_cost, mutable=True, within=po.Reals\n )\n backend_model.limit_total_system_costs_constraint = po.Constraint(\n [slack_cost_class],\n rule=_limit_total_system_costs_constraint_rule,\n )\n # Modify objective function weights: spores_score -> 1, all others -> 0\n interface.update_pyomo_param(\n backend_model,\n \"objective_cost_class\",\n objective_cost_class,\n )\n # Update \"spores_score\" based on previous iteration\n _update_spores_score(backend_model, cum_scores)\n else:\n _warn_on_infeasibility()\n return results, backend_model\n\n log_time(\n logger,\n timings,\n \"run_solution_returned\",\n time_since_run_start=True,\n comment=\"Backend: generated solution array for the cost-optimal case\",\n )\n\n # Iterate over the number of SPORES requested by the user\n for _spore in range(0, n_spores):\n results, backend_model = run_plan(\n model_data,\n run_config,\n timings,\n backend,\n build_only,\n backend_rerun=backend_model,\n )\n\n if results.attrs[\"termination_condition\"] in [\"optimal\", \"feasible\"]:\n results.attrs[\"objective_function_value\"] = backend_model.obj()\n # Storing results and scores in the specific dictionaries\n spores_list.append(results)\n cum_scores += _cap_loc_score_default(results)\n # Update \"spores_score\" based on previous iteration\n _update_spores_score(backend_model, cum_scores)\n else:\n _warn_on_infeasibility()\n break\n log_time(\n logger,\n timings,\n \"run_solution_returned\",\n time_since_run_start=True,\n comment=\"Backend: generated solution array for the cost-optimal case\",\n )\n # TODO: make this function work with the spores dimension,\n # so that postprocessing can take place in core/model.py, as with run_plan and run_operate\n\n results = xr.concat(spores_list, dim=\"spores\")\n\n return results, backend_model", "def fit(self):\n self._minuit_problem.migrad() # run optimizer\n self._status = 0 if self._minuit_problem.migrad_ok() else 1", "def optimize(self, return_teacher_params_bool = False):\n\n gen_batches = self.DATASET.data_stream(self.BATCH_SIZE)\n \n num_complete_batches, leftover = divmod(self.DATASET.num_example['train'], self.BATCH_SIZE)\n\n # number of minibatches per epoch\n num_mini_batches_per_epochs = num_complete_batches + bool(leftover)\n\n # number of total iterations\n num_total_iters = self.NUM_EPOCHS * num_mini_batches_per_epochs\n\n # number of time that the sparisty levels get updated\n num_sparsity_updates = num_total_iters // self.MASK_UPDATE_FREQ \n \n mask_update_limit = num_total_iters - self.MASK_UPDATE_FREQ\n \n if self.SAVE_BOOL == True:\n # save the transferred results in the desinated directory.\n\n trans_model_dir = self.unique_model_dir\n\n# while os.path.exists(trans_model_dir):\n# trans_model_dir = trans_model_dir + '_0'\n \n if not os.path.exists(trans_model_dir):\n os.makedirs(trans_model_dir)\n\n np.save(trans_model_dir + '/param_dict.npy', self.param_dict) \n \n \n\n nt_trans_params_all_sparsities_all_runs = []\n nt_trans_masks_all_sparsities_all_runs = []\n nt_trans_vali_all_sparsities_all_runs = []\n teacher_params_all_sparsities_all_runs = []\n \n \n num_sparisty_levels = len(self.NN_DENSITY_LEVEL_LIST) \n num_runs = len(range(self.INIT_RUN_INDEX, self.INIT_RUN_INDEX + self.NUM_RUNS ))\n all_density_all_run_num_total_iters = num_sparisty_levels * num_runs * num_total_iters\n \n \n for nn_density_level in self.NN_DENSITY_LEVEL_LIST: \n \n \n nt_trans_params_all_runs = []\n nt_trans_masks_all_runs = []\n nt_trans_vali_all_runs = []\n teacher_params_all_runs = []\n\n\n for run_index in range(self.INIT_RUN_INDEX, self.INIT_RUN_INDEX + self.NUM_RUNS ):\n\n # do logging\n for handler in logging.root.handlers[:]:\n logging.root.removeHandler(handler)\n\n # a string that summarizes the current ntt experiment\n model_summary_str = self.model_str + '_density_' + str(round(nn_density_level, 2) ) + '_run_' + str(run_index)\n\n if self.SAVE_BOOL == True:\n model_dir_density_run = trans_model_dir + '/' + 'density_' + str(round(nn_density_level, 2) ) + '/' + 'run_' + str(run_index) + '/'\n\n os.makedirs(model_dir_density_run)\n \n logging.basicConfig(filename = model_dir_density_run + \"/\" + model_summary_str + \"_log.log\", format='%(asctime)s %(message)s', filemode='w', level=logging.DEBUG)\n\n else: \n logging.basicConfig(filename = model_summary_str + \"_log.log\" , format='%(asctime)s %(message)s', filemode='w', level=logging.DEBUG)\n \n \n # for different run indices, randomly draw teacher net's parameters\n _, teacher_net_params = self.init_fun(random.PRNGKey(run_index), tuple(self.batch_input_shape))\n \n # the prediction of the teacher net evaluated on validation samples\n vali_teacher_prediction = self.apply_fn(teacher_net_params, self.vali_samples)\n\n vali_teacher_ntk_mat = self.emp_ntk_fn(self.vali_inputs_1, self.vali_inputs_2, teacher_net_params) \n\n # the initial binary mask\n \n if self.PRUNE_METHOD == 'magnitude': \n masks = get_masks_from_jax_params(teacher_net_params, nn_density_level, global_bool = self.GLOBAL_PRUNE_BOOL)\n elif self.PRUNE_METHOD == 'logit_snip':\n logger.info(\"Use logit snip method to get the initial mask\")\n num_examples_snip = 128\n\n# gen_batches_logit_snip = self.DATASET.data_stream(num_examples_snip)\n \n snip_input = self.DATASET.dataset['train']['input'][:num_examples_snip, :]\n \n if self.GLOBAL_PRUNE_BOOL == False:\n logger.warning(\"layerwise sparse net initialized with logit_snip\") \n masks = get_logit_snip_masks(teacher_net_params, nn_density_level, self.apply_fn, snip_input, self.batch_input_shape, GlOBAL_PRUNE_BOOL = self.GLOBAL_PRUNE_BOOL) \n else:\n raise NotImplementedError(\"not implemented\")\n \n\n # the initial student parameters\n masked_student_net_params = get_sparse_params_filtered_by_masks(teacher_net_params, masks)\n\n # instantiate the optimizer triple \n opt_init, opt_update, get_params = self.OPTIMIZER_WITH_PARAMS\n\n opt_state = opt_init(teacher_net_params) \n\n # one step of NTK transfer\n @jit\n def nt_transfer_step(i, opt_state, x, masks):\n\n # parameters in the current optimizer state\n student_net_params = get_params(opt_state)\n\n # gradients that flow through the binary masks\n masked_g = grad(self.nt_transfer_loss)(student_net_params, masks, teacher_net_params, x, nn_density_level)\n\n return opt_update(i, masked_g, opt_state)\n\n # a list of validation loss\n vali_loss_list = []\n\n # calculate the initial validation loss. \n vali_loss = self.eval_nt_transfer_loss_on_vali_data(masked_student_net_params, vali_teacher_prediction, vali_teacher_ntk_mat, nn_density_level)\n\n vali_loss_list.append(vali_loss)\n\n logger.info(\"Before transfer: trans dist %.3f | ntk dist %.3f | targ dist %.3f | l2 pentalty %.3f | nn density %.2f\", vali_loss[0], vali_loss[1], vali_loss[2], vali_loss[3], nn_density_level)\n itercount = itertools.count()\n\n t = time.time()\n\n # loop through iterations\n for num_iter in range(1, num_total_iters + 1): \n \n # a batch of input data\n batch_xs, _ = next(gen_batches) \n\n # reshape the input to a proper format (2d array for MLP and 3d for CNN)\n batch_xs = batch_xs.reshape(self.batch_input_shape) \n\n # update the optimizer state\n opt_state = nt_transfer_step(next(itercount), opt_state, batch_xs, masks )\n\n\n if num_iter % 100 == 0:\n elapsed_time = time.time() - t\n \n if (num_iter <= 500) and (run_index == self.INIT_RUN_INDEX) and (nn_density_level == self.NN_DENSITY_LEVEL_LIST[0]): \n # estimate the program end time.\n remaining_iter_num = all_density_all_run_num_total_iters - num_iter\n remaining_seconds = elapsed_time * ( remaining_iter_num / 100 )\n expected_end_time = str(datetime.now() + timedelta(seconds = remaining_seconds))\n\n # get parameters from the current optimizer state\n student_net_params = get_params(opt_state) \n\n # filter the paramters by masks\n masked_student_net_params = get_sparse_params_filtered_by_masks(student_net_params , masks)\n \n # validation loss\n vali_loss = self.eval_nt_transfer_loss_on_vali_data(masked_student_net_params, vali_teacher_prediction, vali_teacher_ntk_mat, nn_density_level) \n\n vali_loss_list.append(vali_loss)\n\n logger.info('run: %02d/%02d | iter %04d/%04d | trans. dist %.3f | ntk dist %.3f | targ. dist %.3f | l2 %.3f | nn density %.2f | time %.2f [s] | expected finish time %s', run_index, self.NUM_RUNS + self.INIT_RUN_INDEX - 1, num_iter, num_total_iters, vali_loss[0], vali_loss[1], vali_loss[2], vali_loss[3], nn_density_level, elapsed_time, expected_end_time)\n t = time.time()\n\n\n if (num_iter % self.MASK_UPDATE_FREQ == 0) and (num_iter < mask_update_limit):\n # get parameters from the current optimizer state\n student_net_params = get_params(opt_state) \n \n # update masks\n masks = get_masks_from_jax_params(student_net_params, nn_density_level, global_bool = self.GLOBAL_PRUNE_BOOL)\n \n# if self.PRUNE_METHOD == 'logit_snip':\n# logit_snip_batch_xs, _ = next(gen_batches_logit_snip)\n# masks = get_logit_snip_masks(student_net_params, nn_density_level, self.apply_fn, snip_input, self.batch_input_shape, GlOBAL_PRUNE_BOOL = self.GLOBAL_PRUNE_BOOL) \n# else:\n# masks = get_masks_from_jax_params(student_net_params, nn_density_level, global_bool = self.GLOBAL_PRUNE_BOOL)\n\n\n \n elapsed_time = time.time() - t\n \n student_net_params = get_params(opt_state) \n \n # filter the paramters by masks\n masked_student_net_params = get_sparse_params_filtered_by_masks(student_net_params , masks)\n \n vali_loss = self.eval_nt_transfer_loss_on_vali_data(masked_student_net_params, vali_teacher_prediction, vali_teacher_ntk_mat, nn_density_level) \n\n vali_loss_list.append(vali_loss)\n \n logger.info('run: %02d/%02d | iter %04d/%04d | trans. dist %.3f | ntk dist %.3f | targ. dist %.3f | l2 %.3f | nn density %.2f | time %.2f [s]', run_index, self.NUM_RUNS + self.INIT_RUN_INDEX - 1, num_iter, num_total_iters, vali_loss[0], vali_loss[1], vali_loss[2], vali_loss[3], nn_density_level, elapsed_time )\n \n vali_loss_array = np.array(vali_loss_list)\n\n nt_trans_params_all_runs.append(masked_student_net_params)\n nt_trans_masks_all_runs.append(masks)\n nt_trans_vali_all_runs.append(vali_loss_array)\n teacher_params_all_runs.append(teacher_net_params )\n\n if self.SAVE_BOOL == True:\n\n model_summary_str = self.model_str + '_density_' + str(round(nn_density_level, 2) ) + '_run_' + str(run_index)\n\n teacher_param_fileName = model_dir_density_run + 'teacher_params_' + model_summary_str\n np.save(teacher_param_fileName, teacher_net_params)\n\n student_param_fileName = model_dir_density_run + 'transferred_params_' + model_summary_str\n np.save(student_param_fileName, masked_student_net_params)\n\n mask_fileName = model_dir_density_run + 'transferred_masks_' + model_summary_str\n np.save(mask_fileName, masks)\n\n loss_array_fileName = model_dir_density_run + 'loss_array_' + model_summary_str\n np.save(loss_array_fileName, vali_loss_array)\n \n\n nt_trans_params_all_sparsities_all_runs.append( nt_trans_params_all_runs )\n nt_trans_masks_all_sparsities_all_runs.append( nt_trans_masks_all_runs )\n nt_trans_vali_all_sparsities_all_runs.append( nt_trans_vali_all_runs )\n teacher_params_all_sparsities_all_runs.append( teacher_params_all_runs )\n \n if return_teacher_params_bool:\n return nt_trans_params_all_sparsities_all_runs, nt_trans_masks_all_sparsities_all_runs, nt_trans_vali_all_sparsities_all_runs, teacher_params_all_sparsities_all_runs\n\n else:\n return nt_trans_params_all_sparsities_all_runs, nt_trans_masks_all_sparsities_all_runs, nt_trans_vali_all_sparsities_all_runs", "def _optimise(self):\n pass", "def _make_spsa_optimizer(self):\n def optimize(maxiter: int = 1000,\n tol = None,\n save_steps: int = 1,\n c0: float = 0.62,\n c1: float = 0.1,\n c2: float = 0.602,\n c3: float = 0.101,\n c4: float = 0):\n \"\"\"\n This method is heavily based on qiskits optimizers.spsa method, \n adapted here to worth with on quibs tn's without exact gradients \n\n Parameters\n ----------\n maxiter: Maximum number of iterations to perform.\n tol : None or float stops optim if tol is reached (default none - completes all steps)\n save_steps: Save intermediate info every save_steps step. It has a min. value of 1.\n last_avg: Averaged parameters over the last_avg iterations.\n If last_avg = 1, only the last iteration is considered. It has a min. value of 1.\n c0: The initial a. Step size to update parameters.\n c1: The initial c. The step size used to approximate gradient.\n c2: The alpha in the paper, and it is used to adjust a (c0) at each iteration.\n c3: The gamma in the paper, and it is used to adjust c (c1) at each iteration.\n c4: The parameter used to control a as well.\n \n Returns\n -------\n TYPE : updated object? (same return as TNOptimize)\n \"\"\"\n _spsa_vars = [c0, c1, c2, c3, c4]\n theta = self.vectorizer.vector\n nb_params = len(theta)\n use_exact_grads = 'grads' in self._method\n \n if save_steps:\n theta_vec = [theta]\n cost_vec = [self.vectorized_value_and_grad(theta)[0]]\n \n \n pbar = tqdm(total=maxiter, disable=not self.progbar)\n def callback(_):\n pbar.clear()\n pbar.update()\n val = round(self.loss, 5)\n pbar.set_description(str(val))\n\n if self.loss_target is not None:\n if self.loss < self.loss_target:\n # returning True doesn't terminate optimization\n pbar.close()\n raise KeyboardInterrupt\n \n for ii in range(maxiter):\n \n a_spsa = float(_spsa_vars[0]) / ((ii + 1 + _spsa_vars[4])**_spsa_vars[2])\n c_spsa = float(_spsa_vars[1]) / ((ii + 1)**_spsa_vars[3])\n delta = 2 * randint(0, 2, size=nb_params) - 1\n # plus and minus directions\n \n if use_exact_grads:\n raise NotImplementedError('Will use grad calc to project on to SP-direction')\n else:\n theta_plus = theta + c_spsa * delta\n theta_minus = theta - c_spsa * delta\n\n cost_plus = self.vectorized_value_and_grad(theta_plus)[0]\n cost_minus = self.vectorized_value_and_grad(theta_minus)[0]\n # derivative estimate\n g_spsa = (cost_plus - cost_minus) * delta / (2.0 * c_spsa)\n # updated theta\n theta = theta - a_spsa * g_spsa\n \n callback(ii)\n \n if tol is not None:\n if (cost_plus + cost_minus)/2 < tol:\n pbar.close()\n break\n \n if save_steps:\n theta_vec.append(theta)\n cost_vec.append(cost_plus/2+cost_minus/2)\n \n \n result_dict = {'hyper_parameters':_spsa_vars,\n 'maxiter':maxiter,\n 'theta_opt':theta,\n 'cost_opt':self.vectorized_value_and_grad(theta)[0],\n 'grad_opt':self.vectorized_value_and_grad(theta)[1]}\n if save_steps:\n result_dict['theta_history'] = theta_vec\n result_dict['cost_history'] = cost_vec\n self.result_dict = result_dict\n pbar.close()\n\n return self.inject_res_vector_and_return_tn()\n return optimize", "def test_sparsity(config):\n total_zeros = 0\n total_nonzeros = 0\n\n print ('<===sparsity type is {}'.format(config.sparsity_type))\n print ('<===layers to be pruned are {}'.format(config._prune_ratios))\n if config.masked_progressive and (config.sparsity_type == 'filter' or config.sparsity_type =='column'or config.sparsity_type == \"bn_filter\" ):\n ### test both column and row sparsity\n print (\"***********checking column sparsity*************\")\n for name,W in config.model.named_parameters():\n if name not in config.prune_ratios:\n continue\n W = W.cpu().detach().numpy()\n shape = W.shape\n W2d = W.reshape(shape[0],-1)\n column_l2_norm = LA.norm(W2d,2,axis=0)\n zero_column = np.sum(column_l2_norm == 0)\n nonzero_column = np.sum(column_l2_norm !=0)\n\n print (\"column sparsity of layer {} is {}\".format(name,zero_column/(zero_column+nonzero_column)))\n print (\"***********checking filter sparsity*************\") \n for name,W in config.model.named_parameters():\n if name not in config.prune_ratios:\n continue\n W = W.cpu().detach().numpy()\n shape = W.shape\n W2d = W.reshape(shape[0],-1)\n row_l2_norm = LA.norm(W2d,2,axis=1)\n zero_row = np.sum(row_l2_norm == 0)\n nonzero_row = np.sum(row_l2_norm !=0)\n print (\"filter sparsity of layer {} is {}\".format(name,zero_row/(zero_row+nonzero_row)))\n print (\"************checking overall sparsity in conv layers*************\")\n for name,W in config.model.named_parameters():\n if name not in config.prune_ratios:\n continue\n W = W.cpu().detach().numpy() \n total_zeros +=np.sum(W==0)\n total_nonzeros +=np.sum(W!=0)\n print ('only consider conv layers, compression rate is {}'.format((total_zeros+total_nonzeros)/total_nonzeros))\n return\n \n if config.sparsity_type == \"irregular\":\n for name,W in config.model.named_parameters():\n if 'bias' in name:\n continue\n W = W.cpu().detach().numpy()\n zeros = np.sum(W==0)\n total_zeros+=zeros\n nonzeros = np.sum(W!=0)\n total_nonzeros+=nonzeros\n print (\"sparsity at layer {} is {}\".format(name,zeros/(zeros+nonzeros)))\n total_weight_number = total_zeros+total_nonzeros\n print ('overal compression rate is {}'.format(total_weight_number/total_nonzeros))\n elif config.sparsity_type == \"column\":\n for name,W in config.model.named_parameters():\n if name not in config.prune_ratios:\n continue\n W = W.cpu().detach().numpy()\n shape = W.shape\n W2d = W.reshape(shape[0],-1)\n column_l2_norm = LA.norm(W2d,2,axis=0)\n zero_column = np.sum(column_l2_norm == 0)\n nonzero_column = np.sum(column_l2_norm !=0)\n total_zeros +=np.sum(W==0)\n total_nonzeros +=np.sum(W!=0)\n print (\"column sparsity of layer {} is {}\".format(name,zero_column/(zero_column+nonzero_column)))\n print ('only consider conv layers, compression rate is {}'.format((total_zeros+total_nonzeros)/total_nonzeros)) \n elif config.sparsity_type == \"filter\":\n print ('inside if')\n print (config.prune_ratios)\n for name,W in config.model.named_parameters():\n if name not in config.prune_ratios:\n continue\n W = W.cpu().detach().numpy()\n shape = W.shape\n W2d = W.reshape(shape[0],-1)\n row_l2_norm = LA.norm(W2d,2,axis=1)\n zero_row = np.sum(row_l2_norm == 0)\n nonzero_row = np.sum(row_l2_norm !=0)\n total_zeros +=np.sum(W==0)\n total_nonzeros +=np.sum(W!=0)\n print (\"filter sparsity of layer {} is {}\".format(name,zero_row/(zero_row+nonzero_row)))\n print ('only consider conv layers, compression rate is {}'.format((total_zeros+total_nonzeros)/total_nonzeros))\n elif config.sparsity_type == \"bn_filter\":\n print ('inside bn_filter')\n print (config.prune_ratios)\n for i,(name,W) in enumerate(config.model.named_parameters()):\n if name not in config.prune_ratios:\n continue\n W = W.cpu().detach().numpy()\n zeros = np.sum(W==0)\n nonzeros = np.sum(W!=0)\n print (\"sparsity at layer {} is {}\".format(name,zeros/(zeros+nonzeros)))", "def run(self, X, Y, model):\n\n p0 = X.iloc[0] # read in the input info\n params = lmfit.Parameters() # empty parameter class\n success = True # check for success\n\n if model == 'Medlyn':\n min, max = self.param_space('g1')\n params.add('g1', p0.g1, min=min, max=max)\n min, max = self.param_space('sref')\n params.add('sref', p0.sref, min=min, max=max)\n\n if model == 'Eller':\n min, max = self.param_space('kmax')\n params.add('kmaxS1', p0.kmaxS1, min=min, max=max)\n\n if (model == 'ProfitMax') or (model == 'ProfitMax2'):\n min, max = self.param_space('kmax')\n params.add('kmax', p0.kmax, min=min, max=max)\n\n # the following models all require the Sperry kmax as an input!\n if model == 'Tuzet':\n min, max = self.param_space('g1')\n params.add('g1T', p0.g1T, min=min, max=max)\n\n if 'Tleaf' in X.columns: # vary g1 and kmax\n min, max = self.param_space('kmax')\n params.add('kmaxT', p0.kmax, min=min, max=max)\n\n else: # vary g1 and Pref, sref fixed\n min, max = self.param_space('PrefT', P50=p0.P50, P88=p0.P88)\n\n if any(X['Ps_pd'] > p0.PrefT):\n params.add('PrefT', p0.PrefT, min=min, max=max)\n\n else:\n params.add('PrefT', -p0.P88, min=min, max=max)\n\n if model == 'WUE-LWP':\n min, max = self.param_space('Lambda')\n params.add('Lambda', p0.Lambda, min=min, max=max)\n\n if model == 'CGain':\n min, max = self.param_space('Kappa')\n params.add('Kappa', p0.Kappa, min=min, max=max)\n\n if model == 'CMax':\n min, max = self.param_space('Alpha')\n params.add('Alpha', p0.Alpha, min=min, max=max)\n min, max = self.param_space('Beta')\n params.add('Beta', p0.Beta, min=min, max=max)\n\n if model == 'SOX-OPT':\n min, max = self.param_space('kmax')\n params.add('kmaxS2', p0.kmaxS2, min=min, max=max)\n\n if model == 'LeastCost':\n min, max = self.param_space('kmax')\n params.add('kmaxLC', p0.kmaxLC, min=min, max=max)\n min, max = self.param_space('Eta')\n params.add('Eta', p0.Eta, min=min, max=max)\n\n if model == 'CAP':\n min, max = self.param_space('krl')\n params.add('krlC', p0.krlC, min=min, max=max)\n min, max = self.param_space('Pcrit', P50=p0.P50, P88=p0.P88)\n\n if any(X['Ps_pd'] > p0.PcritC):\n params.add('PcritC', p0.PcritC, min=min, max=max)\n\n else:\n params.add('PcritC', -p0.P88, min=min, max=max)\n\n if model == 'MES':\n min, max = self.param_space('krl')\n params.add('krlM', p0.krlM, min=min, max=max)\n min, max = self.param_space('Pcrit', P50=p0.P50, P88=p0.P88)\n\n if any(X['Ps_pd'] > p0.PcritM):\n params.add('PcritM', p0.PcritM, min=min, max=max)\n\n else:\n params.add('PcritM', -p0.P88, min=min, max=max)\n\n if not os.path.isdir(self.opath): # create output dir\n os.makedirs(self.opath)\n\n # run the minimizer\n if self.method == 'emcee':\n out = lmfit.minimize(fres, params, args=(model, X, Y,\n self.inf_gb,),\n method=self.method, steps=self.steps,\n nwalkers=self.nchains, burn=self.burn,\n thin=self.thin, is_weighted=False,\n progress=False, nan_policy='omit')\n\n else:\n out = lmfit.minimize(fres, params, args=(model, X, Y,\n self.inf_gb,),\n method=self.method, nan_policy='omit')\n\n for param in out.params.values():\n\n if np.isclose(param.value, param.init_value):\n params[param.name] = lmfit.Parameter(name=param.name,\n value=1.5 *\n param.init_value)\n out = lmfit.minimize(fres, params,\n args=(model, X, Y, self.inf_gb,),\n method=self.method,\n nan_policy='omit')\n\n if not os.path.isfile(os.path.join(self.opath, '%s.txt' % (model))):\n txt = open(os.path.join(self.opath, '%s.txt' % (model)), 'w+')\n\n else: # append to existing file\n txt = open(os.path.join(self.opath, '%s.txt' % (model)), 'a+')\n\n txt.write('\\n')\n txt.write(lmfit.fit_report(out))\n\n if not success:\n txt.write('\\n## Warning: had to fix first parameter value')\n\n txt.write('\\n')\n txt.close() # close text file\n\n return out.params.valuesdict()", "def optimize(\n # trials,\n random_state=SEED):\n\n space = {\n 'max_depth': scope.int(hp.uniform('max_depth', 5, 15)),\n 'subsample': hp.uniform('subsample', 0.03, 1),\n 'learning_rate' : hp.loguniform('learning_rate', np.log(0.005), np.log(0.5)) - 0.0001,\n 'colsample_bytree': hp.uniform('colsample_bytree', 0.3, 1),\n 'reg_alpha': hp.loguniform('reg_alpha', np.log(0.005), np.log(5)) - 0.0001,\n 'reg_lambda': hp.loguniform('reg_lambda', np.log(1), np.log(5)),\n 'bagging_freq': hp.choice('bagging_freq', [0, 1]),\n 'num_leaves': scope.int(hp.uniform('num_leaves', 10, 128)),\n 'n_estimators': 1000,\n 'boosting': 'gbdt',\n 'objective': 'multiclass',\n 'num_class': 12,\n 'metric': 'None',\n 'is_unbalance': 'true',\n # 'min_data_per_group': 1000,\n 'verbose': -1,\n 'random_seed': 42,\n \n }\n\n # Use the fmin function from Hyperopt to find the best hyperparameters\n best = fmin(score_model, space, algo=tpe.suggest,\n # trials=trials,\n max_evals=hyperopt_niters)\n return best", "def optim_solve(\n self, x0: devices.PrimaryWeights = None, global_search: bool = False, **kwargs\n ) -> scipy.optimize.OptimizeResult:\n print(f'{\" optim_solve \":~^60s}')\n self._assert_problem_is_valid()\n if self._background is None:\n bounds = self.bounds * 2\n print(\"> No background specified, will optimise background.\")\n else:\n bounds = self.bounds\n\n if np.inf in self._target_contrast:\n print(\"> Aiming to maximise contrast.\")\n\n elif -np.inf in self._target_contrast:\n print(\"> Aiming to minimize contrast.\")\n\n constraints = [\n {\"type\": \"eq\", \"fun\": self.silencing_constraint, \"tol\": 1e-04}\n ]\n\n if x0 is None:\n x0 = self.initial_guess_x0()\n \n if not global_search: # Local minimization\n\n default_options = {\"iprint\": 2, \"disp\": True, \"ftol\": 1e-08}\n options = kwargs.pop(\"options\", default_options)\n\n print(\"> Performing local optimization with SLSQP.\")\n result = scipy.optimize.minimize(\n fun=self.objective_function,\n x0=x0,\n method=\"SLSQP\",\n bounds=bounds,\n constraints=constraints,\n options=options,\n **kwargs,\n )\n\n elif global_search: # Global minimization\n print(\n \"> Performing global optimization with basinhopping and SLSQP\"\n )\n\n # Configure global defaults\n disp = kwargs.pop(\"disp\", True)\n # Configure local defaults\n default_minimizer_kwargs = {\n \"method\": \"SLSQP\",\n \"constraints\": constraints,\n \"bounds\": bounds,\n \"options\": {\"iprint\": 2, \"disp\": False},\n }\n minimizer_kwargs = kwargs.pop(\n \"minimizer_kwargs\", default_minimizer_kwargs\n )\n\n # Do optimization\n result = scipy.optimize.basinhopping(\n func=self.objective_function,\n x0=x0,\n minimizer_kwargs=minimizer_kwargs,\n disp=disp,\n **kwargs,\n )\n\n return result", "def fit(self):\n if self.cost_func.hessian:\n self._popt = ral_nlls.solve(self.initial_params,\n self.cost_func.eval_r,\n self.cost_func.jac_res,\n self.hes_eval,\n options=self._options,\n lower_bounds=self.param_ranges[0],\n upper_bounds=self.param_ranges[1])[0]\n else:\n self._popt = ral_nlls.solve(self.initial_params,\n self.cost_func.eval_r,\n self.cost_func.jac_res,\n options=self._options,\n lower_bounds=self.param_ranges[0],\n upper_bounds=self.param_ranges[1])[0]\n self._status = 0 if self._popt is not None else 1", "def propose_optimize():\n pass", "def sparse_options(default_solver='spsolve',\n default_least_squares_solver='least_squares_lsmr' if HAVE_SCIPY_LSMR else 'least_squares_generic_lsmr',\n bicgstab_tol=1e-15,\n bicgstab_maxiter=None,\n spilu_drop_tol=1e-4,\n spilu_fill_factor=10,\n spilu_drop_rule='basic,area',\n spilu_permc_spec='COLAMD',\n spsolve_permc_spec='COLAMD',\n spsolve_keep_factorization=True,\n lgmres_tol=1e-5,\n lgmres_maxiter=1000,\n lgmres_inner_m=39,\n lgmres_outer_k=3,\n least_squares_lsmr_damp=0.0,\n least_squares_lsmr_atol=1e-6,\n least_squares_lsmr_btol=1e-6,\n least_squares_lsmr_conlim=1e8,\n least_squares_lsmr_maxiter=None,\n least_squares_lsmr_show=False,\n least_squares_lsqr_damp=0.0,\n least_squares_lsqr_atol=1e-6,\n least_squares_lsqr_btol=1e-6,\n least_squares_lsqr_conlim=1e8,\n least_squares_lsqr_iter_lim=None,\n least_squares_lsqr_show=False,\n pyamg_tol=1e-5,\n pyamg_maxiter=400,\n pyamg_verb=False,\n pyamg_rs_strength=('classical', {'theta': 0.25}),\n pyamg_rs_CF='RS',\n pyamg_rs_presmoother=('gauss_seidel', {'sweep': 'symmetric'}),\n pyamg_rs_postsmoother=('gauss_seidel', {'sweep': 'symmetric'}),\n pyamg_rs_max_levels=10,\n pyamg_rs_max_coarse=500,\n pyamg_rs_coarse_solver='pinv2',\n pyamg_rs_cycle='V',\n pyamg_rs_accel=None,\n pyamg_rs_tol=1e-5,\n pyamg_rs_maxiter=100,\n pyamg_sa_symmetry='hermitian',\n pyamg_sa_strength='symmetric',\n pyamg_sa_aggregate='standard',\n pyamg_sa_smooth=('jacobi', {'omega': 4.0/3.0}),\n pyamg_sa_presmoother=('block_gauss_seidel', {'sweep': 'symmetric'}),\n pyamg_sa_postsmoother=('block_gauss_seidel', {'sweep': 'symmetric'}),\n pyamg_sa_improve_candidates=[('block_gauss_seidel', {'sweep': 'symmetric', 'iterations': 4}), None],\n pyamg_sa_max_levels=10,\n pyamg_sa_max_coarse=500,\n pyamg_sa_diagonal_dominance=False,\n pyamg_sa_coarse_solver='pinv2',\n pyamg_sa_cycle='V',\n pyamg_sa_accel=None,\n pyamg_sa_tol=1e-5,\n pyamg_sa_maxiter=100):\n\n assert default_least_squares_solver.startswith('least_squares')\n\n opts = (('bicgstab_spilu', {'type': 'bicgstab_spilu',\n 'tol': bicgstab_tol,\n 'maxiter': bicgstab_maxiter,\n 'spilu_drop_tol': spilu_drop_tol,\n 'spilu_fill_factor': spilu_fill_factor,\n 'spilu_drop_rule': spilu_drop_rule,\n 'spilu_permc_spec': spilu_permc_spec}),\n ('bicgstab', {'type': 'bicgstab',\n 'tol': bicgstab_tol,\n 'maxiter': bicgstab_maxiter}),\n ('spsolve', {'type': 'spsolve',\n 'permc_spec': spsolve_permc_spec,\n 'keep_factorization': spsolve_keep_factorization}),\n ('lgmres', {'type': 'lgmres',\n 'tol': lgmres_tol,\n 'maxiter': lgmres_maxiter,\n 'inner_m': lgmres_inner_m,\n 'outer_k': lgmres_outer_k}),\n ('least_squares_lsqr', {'type': 'least_squares_lsqr',\n 'damp': least_squares_lsqr_damp,\n 'atol': least_squares_lsqr_atol,\n 'btol': least_squares_lsqr_btol,\n 'conlim': least_squares_lsqr_conlim,\n 'iter_lim': least_squares_lsqr_iter_lim,\n 'show': least_squares_lsqr_show}))\n\n if HAVE_SCIPY_LSMR:\n opts += (('least_squares_lsmr', {'type': 'least_squares_lsmr',\n 'damp': least_squares_lsmr_damp,\n 'atol': least_squares_lsmr_atol,\n 'btol': least_squares_lsmr_btol,\n 'conlim': least_squares_lsmr_conlim,\n 'maxiter': least_squares_lsmr_maxiter,\n 'show': least_squares_lsmr_show}),)\n\n if HAVE_PYAMG:\n opts += (('pyamg', {'type': 'pyamg',\n 'tol': pyamg_tol,\n 'maxiter': pyamg_maxiter}),\n ('pyamg-rs', {'type': 'pyamg-rs',\n 'strength': pyamg_rs_strength,\n 'CF': pyamg_rs_CF,\n 'presmoother': pyamg_rs_presmoother,\n 'postsmoother': pyamg_rs_postsmoother,\n 'max_levels': pyamg_rs_max_levels,\n 'max_coarse': pyamg_rs_max_coarse,\n 'coarse_solver': pyamg_rs_coarse_solver,\n 'cycle': pyamg_rs_cycle,\n 'accel': pyamg_rs_accel,\n 'tol': pyamg_rs_tol,\n 'maxiter': pyamg_rs_maxiter}),\n ('pyamg-sa', {'type': 'pyamg-sa',\n 'symmetry': pyamg_sa_symmetry,\n 'strength': pyamg_sa_strength,\n 'aggregate': pyamg_sa_aggregate,\n 'smooth': pyamg_sa_smooth,\n 'presmoother': pyamg_sa_presmoother,\n 'postsmoother': pyamg_sa_postsmoother,\n 'improve_candidates': pyamg_sa_improve_candidates,\n 'max_levels': pyamg_sa_max_levels,\n 'max_coarse': pyamg_sa_max_coarse,\n 'diagonal_dominance': pyamg_sa_diagonal_dominance,\n 'coarse_solver': pyamg_sa_coarse_solver,\n 'cycle': pyamg_sa_cycle,\n 'accel': pyamg_sa_accel,\n 'tol': pyamg_sa_tol,\n 'maxiter': pyamg_sa_maxiter}))\n opts = OrderedDict(opts)\n opts.update(genericsolvers.options())\n def_opt = opts.pop(default_solver)\n if default_least_squares_solver != default_solver:\n def_ls_opt = opts.pop(default_least_squares_solver)\n ordered_opts = OrderedDict(((default_solver, def_opt),\n (default_least_squares_solver, def_ls_opt)))\n else:\n ordered_opts = OrderedDict(((default_solver, def_opt),))\n ordered_opts.update(opts)\n return ordered_opts", "def main(**kwargs):\n flowsheet = Flowsheet(name='MB_Model') \n \n # Fix variables\n setInputs(flowsheet) \n\n ts = time.time() \n\n mb = flowsheet.MB_fuel\n \n # Initialize fuel reactor\n flowsheet.MB_fuel._initialize(outlvl=1,\n optarg={\"tol\" : 1e-8,\n \"max_cpu_time\" : 600,\n \"print_level\" : 5,\n \"halt_on_ampl_error\": 'yes'}) \n \n # Create a solver\n opt = SolverFactory('ipopt')\n opt.options = {'tol': 1e-8,\n 'linear_solver' : 'ma27',\n 'bound_push': 1e-8,\n 'max_cpu_time': 600,\n 'print_level': 5}\n \n results = opt.solve(flowsheet,tee=True,symbolic_solver_labels=False,\n keepfiles=False)\n\n #flowsheet.MB_fuel.Solid_In_M.fix(691.4)\n #flowsheet.MB_fuel.Gas_In_y['CO2'].fix(0.03999)\n #flowsheet.MB_fuel.Gas_In_y['H2O'].fix(0.00001)\n #flowsheet.MB_fuel.Gas_In_y['CH4'].fix(0.96)\n\n\n\n #results = opt.solve(flowsheet,tee=True,symbolic_solver_labels=False,\n # keepfiles=False)\n \n \n print(\"\\n\")\n print(\"----------------------------------------------------------\")\n print('Total simulation time: ', value(time.time() - ts), \" s\")\n print(\"----------------------------------------------------------\")\n\n \n # Print some variables \n #print_summary_fuel_reactor(flowsheet) \n\n # Plot some variables \n #results_plot_fuel_reactor(flowsheet) \n\n m = flowsheet.MB_fuel\n if 'Solid_M' in kwargs:\n m.Solid_In_M.fix(kwargs['Solid_M'])\n if 'Solid_T' in kwargs:\n m.Solid_In_Ts[t].fix(kwargs['Solid_T'])\n if 'Solid_x' in kwargs:\n m.Solid_In_x['Fe2O3'].fix(kwargs['Solid_x']['Fe2O3'])\n m.Solid_In_x['Fe3O4'].fix(kwargs['Solid_x']['Fe3O4'])\n m.Solid_In_x['Al2O3'].fix(kwargs['Solid_x']['Al2O3'])\n if 'Gas_F' in kwargs:\n m.Gas_In_F.fix(kwargs['Gas_F'])\n if 'Gas_P' in kwargs:\n m.Gas_In_P.fix(kwargs['Gas_P'])\n if 'Gas_T' in kwargs:\n m.Gas_In_T.fix(kwargs['Gas_T'])\n if 'Gas_y' in kwargs:\n m.Gas_In_y['CO2'].fix(kwargs['Gas_y']['CO2'])\n m.Gas_In_y['H2O'].fix(kwargs['Gas_y']['H2O'])\n m.Gas_In_y['CH4'].fix(kwargs['Gas_y']['CH4'])\n\n results = opt.solve(flowsheet, tee=True)\n\n with open('ss_fs.txt','w') as f:\n flowsheet.display(ostream=f)\n\n dt_Gflux_CO2 = []\n dt_Gflux_H2O = []\n dt_Gflux_CH4 = []\n dt_Sflux_Fe2O3 = []\n dt_Sflux_Fe3O4 = []\n dt_Sflux_Al2O3 = []\n dt_Ctrans_CO2 = []\n dt_Ctrans_H2O = []\n dt_Ctrans_CH4 = []\n dt_qtrans_Fe2O3 = []\n dt_qtrans_Fe3O4 = []\n dt_qtrans_Al2O3 = []\n dt_Ghflux = []\n dt_Ts = []\n dt_TgGS = []\n dt_TsGS = []\n dt_vg = []\n dt_vs = []\n\n# for z in mb.z.get_finite_elements():\n# if z != mb.z.first() and z != mb.z.last():\n#\n# dt_Gflux_CO2.append( (mb.Cg[z,'CO2'].value-mb.Cg[prev,'CO2'].value)/\\\n# (mb.G_flux[z,'CO2'].value-mb.G_flux[prev,'CO2'].value) \\\n# *(z-prev)*mb.eps.value*mb.L.value /(z-prev))\n#\n# dt_Gflux_H2O.append( (mb.Cg[z,'H2O'].value-mb.Cg[prev,'H2O'].value)/\\\n# (mb.G_flux[z,'H2O'].value-mb.G_flux[prev,'H2O'].value) \\\n# *(z-prev)*mb.eps.value*mb.L.value /(z-prev))\n#\n# dt_Gflux_CH4.append( (mb.Cg[z,'CH4'].value-mb.Cg[prev,'CH4'].value)/\\\n# (mb.G_flux[z,'CH4'].value-mb.G_flux[prev,'CH4'].value) \\\n# *(z-prev)*mb.eps.value*mb.L.value /(z-prev))\n#\n# dt_Ctrans_CO2.append( (mb.Cg[z,'CO2'].value-mb.Cg[prev,'CO2'].value)/\\\n# (mb.Ctrans[z,'CO2'].value)* \\\n# #-mv.Ctrans[prev,'CO2'].value)*\\\n# mb.eps.value/(1-mb.eps.value) /(z-prev))\n#\n# dt_Ctrans_H2O.append( (mb.Cg[z,'H2O'].value-mb.Cg[prev,'H2O'].value)/\\\n# (mb.Ctrans[z,'H2O'].value)* \\\n# #-mv.Ctrans[prev,'H2O'].value)*\\\n# mb.eps.value/(1-mb.eps.value) /(z-prev))\n#\n# dt_Ctrans_CH4.append( (mb.Cg[z,'CH4'].value-mb.Cg[prev,'CH4'].value)/\\\n# (mb.Ctrans[z,'CH4'].value)* \\\n# #-mv.Ctrans[prev,'CH4'].value)*\\\n# mb.eps.value/(1-mb.eps.value) /(z-prev))\n#\n# dt_Sflux_Fe2O3.append( (mb.q[z,'Fe2O3'].value-mb.q[prev,'Fe2O3'].value)/\\\n# (mb.S_flux[z,'Fe2O3'].value-mb.S_flux[prev,'Fe2O3'].value)*\\\n# (z-prev)/(1-mb.eps.value)*mb.L.value /(z-prev))\n#\n# dt_Sflux_Fe3O4.append( (mb.q[z,'Fe3O4'].value-mb.q[prev,'Fe3O4'].value)/\\\n# (mb.S_flux[z,'Fe3O4'].value-mb.S_flux[prev,'Fe3O4'].value)*\\\n# (z-prev)/(1-mb.eps.value)*mb.L.value /(z-prev))\n#\n# dt_Sflux_Al2O3.append( (mb.q[z,'Al2O3'].value-mb.q[prev,'Al2O3'].value)/\\\n# (mb.S_flux[z,'Al2O3'].value-mb.S_flux[prev,'Al2O3'].value)*\\\n# (z-prev)/(1-mb.eps.value)*mb.L.value /(z-prev))\n#\n# dt_qtrans_Fe2O3.append( (mb.q[z,'Fe2O3'].value-mb.q[prev,'Fe2O3'].value)/\\\n# (mb.qtrans[z,'Fe2O3'].value )/(z-prev)) \n# #-mb.qtrans[prev,'Fe2O3'].value) )\n#\n# dt_qtrans_Fe3O4.append( (mb.q[z,'Fe3O4'].value-mb.q[prev,'Fe3O4'].value)/\\\n# (mb.qtrans[z,'Fe3O4'].value )/(z-prev)) \n# #-mb.qtrans[prev,'Fe3O4'].value) )\n#\n# dt_qtrans_Al2O3.append( (mb.q[z,'Fe3O4'].value-mb.q[prev,'Fe3O4'].value)/\\\n# (mb.qtrans[z,'Fe3O4'].value )/(z-prev)) \n# #-mb.qtrans[prev,'Fe3O4'].value) )\n#\n# dt_Ghflux.append( (mb.Tg[z].value-mb.Tg[prev].value)/\\\n# (mb.Gh_flux[z].value-mb.Gh_flux[prev].value)* (z-prev)* mb.eps.value*\\\n# mb.L.value* mb.rho_vap[z].value* mb.cp_gas[z].value /(z-prev)) \n#\n# dt_Ts.append( (z-prev)*(1-mb.eps.value)*mb.L.value/mb.vs.value /(z-prev))\n#\n# dt_TgGS.append( (mb.Tg[z].value - mb.Tg[prev].value)/\\\n# mb.Tg_GS[z].value* mb.eps.value* mb.rho_vap[z].value* mb.cp_gas[z].value \n# /(z-prev))\n# \n# dt_TsGS.append( (mb.Ts[z].value - mb.Ts[prev].value)/\\\n# mb.Tg_GS[z].value* (1-mb.eps.value)* mb.rho_sol.value* mb.cp_sol[z].value*1e-3 \n# /(z-prev))\n# \n# dt_vg.append( mb.L.value*(z-prev)/mb.vg[z].value /(z-prev))\n# \n# dt_vs.append( mb.L.value*(z-prev)/mb.vs.value /(z-prev))\n#\n# prev = z\n#\n# with open('dt.txt','w') as f:\n# f.write('dt_Gflux_CO2\\t')\n# for t in dt_Gflux_CO2:\n# f.write('%1.3f'%t +'\\t')\n#\n# f.write('\\ndt_Gflux_H2O\\t')\n# for t in dt_Gflux_H2O:\n# f.write('%1.3f'%t +'\\t')\n#\n# f.write('\\ndt_Gflux_CH4\\t') \n# for t in dt_Gflux_CH4:\n# f.write('%1.3f'%t +'\\t')\n#\n# f.write('\\ndt_Sflux_Fe2O3\\t') \n# for t in dt_Sflux_Fe2O3:\n# f.write('%1.3f'%t +'\\t')\n#\n# f.write('\\ndt_Sflux_Fe3O4\\t') \n# for t in dt_Sflux_Fe3O4:\n# f.write('%1.3f'%t +'\\t')\n#\n# f.write('\\ndt_Sflux_Al2O3\\t') \n# for t in dt_Sflux_Al2O3:\n# f.write('%1.3f'%t +'\\t')\n#\n# f.write('\\ndt_Ctrans_CO2\\t') \n# for t in dt_Ctrans_CO2:\n# f.write('%1.3f'%t +'\\t')\n# \n# f.write('\\ndt_Ctrans_H2O\\t') \n# for t in dt_Ctrans_H2O:\n# f.write('%1.3f'%t +'\\t')\n#\n# f.write('\\ndt_Ctrans_CH4\\t') \n# for t in dt_Ctrans_CH4:\n# f.write('%1.3f'%t +'\\t')\n#\n# f.write('\\ndt_qtrans_Fe2O3\\t') \n# for t in dt_qtrans_Fe2O3:\n# f.write('%1.3f'%t +'\\t')\n#\n# f.write('\\ndt_qtrans_Fe3O4\\t') \n# for t in dt_qtrans_Fe3O4:\n# f.write('%1.3f'%t +'\\t')\n#\n# f.write('\\ndt_qtrans_Al2O3\\t') \n# for t in dt_qtrans_Al2O3:\n# f.write('%1.3f'%t +'\\t')\n#\n# f.write('\\ndt_Ghflux\\t') \n# for t in dt_Ghflux:\n# f.write('%1.3f'%t +'\\t')\n#\n# f.write('\\ndt_Ts\\t\\t') \n# for t in dt_Ts:\n# f.write('%1.3f'%t +'\\t')\n#\n# f.write('\\ndt_TgGS\\t\\t') \n# for t in dt_TgGS:\n# f.write('%1.3f'%t +'\\t')\n#\n# f.write('\\ndt_TsGS\\t\\t') \n# for t in dt_TsGS:\n# f.write('%1.3f'%t +'\\t')\n#\n# f.write('\\ndt_vg\\t\\t') \n# for t in dt_vg:\n# f.write('%1.3f'%t +'\\t')\n#\n# f.write('\\ndt_vs\\t\\t') \n# for t in dt_vs:\n# f.write('%1.3f'%t +'\\t')\n\n\n # Store the flowsheet \n return flowsheet", "def setup(self, use_warm_start, settings={}):\n assert self.opt_type in QP_COST, \"OSQP cannot solve this type of problem\"\n self.use_warm_start = use_warm_start\n self._setup_input = settings\n if self.opt_type in CONSTRAINED_OPT:\n self._setup_input[\"u\"] = np.inf * np.ones(self.opt.nk + self.opt.na)\n self._reset_parameters()\n return self", "def __init__(self, settings,study):\n \n # Store the study #\n ###################\n \n self._study = study\n self._parameters_size = self._study.geometry.parameters_size\n \n # Read settings #\n ################# \n if hasattr(settings, 'global_sample_function'):\n # Use given function and ignore bounds\n self._global_sample_function = settings.global_sample_function\n self._global_parameters_bounds = None\n else:\n # If no function, use uniform rand with given boundaries if provided. If not, assume [0,1]\n if hasattr(settings, 'global_parameters_bounds'):\n self._global_parameters_bounds = np.array(settings.global_parameters_bounds)\n else:\n self._global_parameters_bounds = [(0, 1)]*self._parameters_size\n \n self._global_sample_function = lambda: self._global_parameters_bounds[:,0] + (self._global_parameters_bounds[:,1]-self._global_parameters_bounds[:,0])*np.random.rand(1,self._parameters_size).flatten()\n \n\n if hasattr(settings, 'global_result_constraint'):\n self._global_result_constraint = settings.global_result_constraint\n else:\n self._global_result_constraint = None \n \n if hasattr(settings, 'local_result_constraint'):\n self._local_result_constraint = settings.local_result_constraint\n else:\n self._local_result_constraint = None\n \n if hasattr(settings, 'local_max_iterations'):\n self._local_max_iterations = settings.local_max_iterations\n else:\n self._local_max_iterations = 50\n \n if hasattr(settings, 'local_method'):\n self._local_method = settings.local_method\n else:\n self._local_method = 'L-BFGS-B'\n \n if hasattr(settings, 'local_scaling_factor'):\n self._local_scaling_factor = settings.local_scaling_factor\n else:\n self._local_scaling_factor = 1\n \n if hasattr(settings, 'local_ftol'):\n self._local_ftol = settings.local_ftol\n else:\n self._local_ftol = 1e-5\n \n if hasattr(settings, 'local_pgtol'):\n self._local_pgtol = settings.local_pgtol\n else:\n self._local_pgtol = 1e-5\n \n # Wavelength settings for lumopt \n if hasattr(settings, 'local_wavelength_start'):\n self._local_wavelength_start = settings.local_wavelength_start\n else:\n self._local_wavelength_start = 1550e-9\n \n if hasattr(settings, 'local_wavelength_stop'):\n self._local_wavelength_stop = settings.local_wavelength_stop\n else:\n self._local_wavelength_stop = 1550e-9\n \n if hasattr(settings, 'local_wavelength_points'):\n self._local_wavelength_points = settings.local_wavelength_points\n else:\n self._local_wavelength_points = 1\n \n # Keep track of the latest random restart. Run a first simulation with\n # the initial parameters already stored in the geometry\n self._new_param = None", "def solve_model(self): \n \n t0 = time.time() #start the clock\n \n # a. benchmark case\n \n #i. joint pdf of productivity state and tau \n self.make_joint_pdf(1)\n \n #ii. set policy. in RR08 the benchmark economy has no taxes nor subsidies\n self.tau_benchmark = np.array([0, 0, 0]) #subsidy rate, excempt rate, tax rate \n self.set_tax_system(self.tau_benchmark) #set tax system\n \n #iii. benchmark equilibrium\n self.Yss_b, self.Kss_b, self.TFPss_b, self.average_firm_size_b, self.E_star_b, _, \\\n _, self.N_ss_b, self.w_ss_b, self.cdf_stationary_b, self.cdf_emp_b = self.solve_stationary_equilibrium()\n \n print(\"\\n-----------------------------------------\")\n print(\"Benchmark Stationary Equilibrium\")\n print(\"-----------------------------------------\")\n print(f\"ss output = {self.Yss_b:.2f}\")\n print(f\"ss capital = {self.Kss_b:.2f}\")\n print(f\"ss tfp = {self.TFPss_b:.2f}\")\n print(f\"ss wage = {self.w_ss_b:.2f}\")\n print(f\"entry mass = {self.E_star_b:.3f}\")\n print(f\"avg. firm size = {self.average_firm_size_b:.2f}\")\n \n #b. plot (note that the distributions plotted here are unaffected by the distortionary policies)\n \n if self.plott:\n #i. initialize\n employed = [4.99, 49.99]\n firm_size_by_employee = np.zeros(len(employed)+1)\n share_employment = np.zeros(len(employed)+1)\n \n \n #i. percentage of firms that employ employed\n \n for i_e in range(len(employed)):\n summ = np.sum(firm_size_by_employee)\n interpolate = self.interpol(self.labor_demand_rel, self.cdf_stationary_b, employed[i_e])[0] #labor_demand_rel is labor demand with the lowest value normalized to 1\n firm_size_by_employee[i_e] = interpolate - summ\n firm_size_by_employee[-1] = 1 - np.sum(firm_size_by_employee)\n \n plt.pie(firm_size_by_employee, labels=['<5','5<50','50 =<'], autopct=\"%.1f%%\")\n plt.title('Size of Firms by Firm Size (Number of Employees)')\n plt.savefig('firm_size_rr08.pdf')\n plt.show()\n \n \n #ii. employment percentage by firm size\n for i_e in range(len(employed)):\n summ = np.sum(share_employment)\n interpolate = self.interpol(self.labor_demand_rel, self.cdf_emp_b , employed[i_e])[0]\n share_employment[i_e] = interpolate - summ\n share_employment[-1] = 1 - np.sum(share_employment)\n \n plt.pie(share_employment, labels=['<5','5<50','50 =<'], autopct=\"%.1f%%\")\n plt.title('Employment Share by Firm Size (Number of Employees)')\n plt.savefig('employment_by_firm_size_rr08.pdf')\n plt.show()\n \n #iii. productivity cdf and employment cdf\n plt.plot(self.grid_s, self.cdf_stationary_b)\n plt.plot(self.grid_s, self.cdf_emp_b)\n plt.title('Stationary CDF' )\n plt.xlabel('Productivity level')\n plt.ylabel('Cumulative Sum')\n plt.legend(['Firms by Productivity Level','Share of Employment'])\n plt.savefig('cdf_rr08.pdf')\n plt.show()\n \n \n \n #c. distortion case\n \n #i. joint pdf of productivity state and tau \n self.make_joint_pdf(0)\n \n #ii. compute stationary economy for each tau\n \n for idx, tau in enumerate(self.tau_vector):\n \n #iii. find the subsidy rate that generates the same capital stock as in benchmark economy\n self.tau_s[idx] = self.find_subsidy_rate(tau)\n \n # set tax system with newly found tau_s and given tau\n tauv = np.array([-self.tau_s[idx], self.excempt_frac, tau]) #subsidy rate, excempt rate, tax rate \n self.set_tax_system(tauv) #set tax system\n \n #v. distorted stationary equilibrium\n self.Yss_d[idx], self.Kss_d[idx], self.TFPss_d[idx], self.average_firm_size_d[idx], self.E_star_d[idx], \\\n self.Y_set_d[idx,:], self.subsidy_size_d[idx], self.N_ss_d[idx], self.w_ss_d[idx],\\\n _, _ = self.solve_stationary_equilibrium()\n \n print(\"\\n-----------------------------------------\")\n print(\"Distorted Stationary Equilibrium\")\n print(\"-----------------------------------------\\n\")\n if self.distortion_case == 1:\n print(\"Tax/Subidy Uncorrelated with Firm Level Producitivity\\n\")\n elif self.distortion_case == 2:\n print(\"Tax/Subidy Negatively Correlated with Firm Level Producitivity\")\n print(\"(low productivity firms recieve subsidy, high productivity taxed)\\n\")\n elif self.distortion_case == 2:\n print(\"Tax/Subidy Positively Correlated with Firm Level Producitivity\")\n print(\"(high productivity firms recieve subsidy, low productivity taxed)\\n\")\n if self.policy_type == 1 :\n print(\"Tax Type: Tax on output\\n\")\n elif self.policy_type == 2 :\n print(\"Tax Type: Tax on capital\\n\")\n elif self.policy_type == 3 :\n print(\"Tax Type: Tax on labor\\n\")\n print(f\"fraction of firms recieving subsidy = {self.subsidy_frac:.2f}\")\n print(f\"fraction of firms taxed = {1-self.subsidy_frac-self.excempt_frac:.2f}\")\n print(f\"fraction of firms excempt = {self.excempt_frac:.2f}\")\n print(\"-----------------------------------------\\n\")\n \n print(tabulate([['relative Yss', round(self.Yss_d[0]/self.Yss_b, 2), round(self.Yss_d[1]/self.Yss_b, 2), round(self.Yss_d[2]/self.Yss_b, 2), round(self.Yss_d[3]/self.Yss_b, 2)],\n ['relative TFPss', round(self.TFPss_d[0]/self.TFPss_b, 2), round(self.TFPss_d[1]/self.TFPss_b, 2), round(self.TFPss_d[2]/self.TFPss_b, 2), round(self.TFPss_d[3]/self.TFPss_b, 2)], \n ['relative entry mass', round(self.E_star_d[0]/self.E_star_b, 2), round(self.E_star_d[1]/self.E_star_b, 2), round(self.E_star_d[2]/self.E_star_b, 2), round(self.E_star_d[3]/self.E_star_b, 2)],\n ['share of subsidized output', round(self.Y_set_d[0,0], 2), round(self.Y_set_d[1,0], 2), round(self.Y_set_d[2,0], 2), round(self.Y_set_d[3,0], 2)],\n ['total subsidy paid of output', round(self.subsidy_size_d[0], 2), round(self.subsidy_size_d[1], 2), round(self.subsidy_size_d[2], 2), round(self.subsidy_size_d[3], 2)],\n ['subsidy rate (tau_s)', round(self.tau_s[0], 2), round(self.tau_s[1], 2), round(self.tau_s[2], 2), round(self.tau_s[3], 2)],\n [], \n ['relative Kss', round(self.Kss_d[0]/self.Kss_b, 2), round(self.Kss_d[1]/self.Kss_b, 2), round(self.Kss_d[2]/self.Kss_b, 2), round(self.Kss_d[3]/self.Kss_b, 2)], \n ['relative wss', round(self.w_ss_d[0]/self.w_ss_b, 2), round(self.w_ss_d[1]/self.w_ss_b, 2), round(self.w_ss_d[2]/self.w_ss_b, 2), round(self.w_ss_d[3]/self.w_ss_b, 2)], \n ['relative Nss', round(self.N_ss_d[0]/self.N_ss_b, 2), round(self.N_ss_d[1]/self.N_ss_b, 2), round(self.N_ss_d[2]/self.N_ss_b, 2), round(self.N_ss_d[3]/self.N_ss_b, 2)], \n ['relative avg. firm size', round(self.average_firm_size_d[0]/self.average_firm_size_b, 2), round(self.average_firm_size_d[1]/self.average_firm_size_b, 2), round(self.average_firm_size_d[2]/self.average_firm_size_b, 2), round(self.average_firm_size_d[3]/self.average_firm_size_b, 2)]],\n headers=['Variable', 'Tax = '+str(self.tau_vector[0]), \"Tax = \"+str(self.tau_vector[1]), 'Tax = '+str(self.tau_vector[2]), 'Tax = '+str(self.tau_vector[3])]))\n \n\n t1 = time.time()\n print(f'\\nTotal Run Time: {t1-t0:.2f} seconds')", "def solve(self):\n start = timer()\n # encode into milp\n me = MILPEncoder(MILPSolver.prob,\n MILPSolver.params.logger.LOGFILE, \n MILPSolver.params.INTRA_DEP_CONSTRS,\n MILPSolver.params.INTER_DEP_CONSTRS)\n if MILPSolver.lp == True:\n gmodel = me.lp_encode()\n else:\n gmodel = me.encode()\n # Set gurobi parameters\n pgo = 1 if MILPSolver.params.PRINT_GUROBI_OUTPUT == True else 0\n gmodel.setParam('OUTPUT_FLAG', pgo)\n tl = MILPSolver.params.TIMEOUT\n if tl != -1 : gmodel.setParam('TIME_LIMIT', tl)\n if not MILPSolver.params.DEFAULT_CUTS: \n MILPSolver.disable_default_cuts(gmodel)\n gmodel._vars = gmodel.getVars()\n # set callback cuts \n MILPSolver.id_form = IdealFormulation(MILPSolver.prob,\n gmodel, \n MILPSolver.params.IDEAL_FREQ,\n MILPSolver.params.logger.LOGFILE)\n MILPSolver.dep_cuts = DepCuts(MILPSolver.prob,\n gmodel,\n MILPSolver.params.DEP_FREQ,\n MILPSolver.params.INTRA_DEP_CUTS,\n MILPSolver.params.INTER_DEP_CUTS,\n MILPSolver.sip_params,\n MILPSolver.params.logger.LOGFILE)\n # Optimise\n if MILPSolver.params.callback_enabled() and MILPSolver.lp == False:\n gmodel.optimize(MILPSolver._callback)\n else:\n gmodel.optimize()\n\n runtime = timer() - start\n cex = None \n if MILPSolver.status == SolveResult.BRANCH_THRESHOLD:\n result = SolveResult.BRANCH_THRESHOLD\n elif gmodel.status == GRB.OPTIMAL:\n cex_shape = MILPSolver.prob.spec.input_layer.input_shape\n cex = np.zeros(cex_shape)\n for i in itertools.product(*[range(j) for j in cex_shape]):\n cex[i] = MILPSolver.prob.spec.input_layer.out_vars[i].x\n result = SolveResult.UNSATISFIED\n elif gmodel.status == GRB.TIME_LIMIT:\n result = SolveResult.TIMEOUT\n elif gmodel.status == GRB.INTERRUPTED:\n result = SolveResult.INTERRUPTED\n elif gmodel.status == GRB.INFEASIBLE or gmodel.status == GRB.INF_OR_UNBD:\n result = SolveResult.SATISFIED\n else:\n result = SolveResult.UNKNOWN\n \n # MILPSolver.logger.info('Verification problem {} solved, '\n # 'LP: {}, '\n # 'time: {:.2f}, '\n # 'result: {}.'\n # .format(MILPSolver.prob.id,\n # MILPSolver.lp,\n # runtime,\n # result.value))\n \n return SolveReport(result, runtime, cex)", "def setup(\n self,\n method: str = \"SLSQP\",\n tol: Union[None, float] = None,\n options: Union[None, Dict] = None,\n ):\n\n # Input check\n if self.opt_type in CONSTRAINED_OPT and (\n method not in ScipyMinimizeSolver.methods_handle_constraints\n ):\n raise TypeError(\n f\"optimization problem has constraints, the method '{method}' is not suitable\"\n )\n\n # Setup class attributes\n\n ## Container for the statistics.\n self._stats = None\n\n ## Method name.\n self.method = method\n\n # Setup minimize input parameters\n\n ## Input to the minimize method\n self.minimize_input = {\n \"fun\": self.f,\n \"method\": method,\n \"x0\": self.x0.toarray().flatten(),\n }\n\n if tol is not None:\n self.minimize_input[\"tol\"] = tol\n\n if options is not None:\n self.minimize_input[\"options\"] = options\n\n if method in ScipyMinimizeSolver.methods_req_jac:\n self.minimize_input[\"jac\"] = self.jac\n\n if method in ScipyMinimizeSolver.methods_req_hess:\n self.minimize_input[\"hess\"] = self.hess\n\n ## Constraints definition passed to the minimize method.\n self._constraints = {}\n if method in ScipyMinimizeSolver.methods_handle_constraints:\n if method != \"trust-constr\":\n if self.opt_type in CONSTRAINED_OPT:\n self._constraints[\"constr\"] = {\n \"type\": \"ineq\",\n \"fun\": self.v,\n \"jac\": self.dv,\n }\n else:\n if self.opt.nk:\n self._constraints[\"k\"] = LinearConstraint(\n A=csc_matrix(self.opt.M(self.p).toarray()),\n lb=-self.opt.c(self.p).toarray().flatten(),\n ub=self.opt.inf * np.ones(self.opt.nk),\n )\n\n if self.opt.na:\n eq = -self.opt.b(self.p).toarray().flatten()\n self._constraints[\"a\"] = LinearConstraint(\n A=csc_matrix(self.opt.A(self.p).toarray()),\n lb=eq,\n ub=eq,\n )\n\n if self.opt.ng:\n self._constraints[\"g\"] = NonlinearConstraint(\n fun=self.g,\n lb=np.zeros(self.opt.ng),\n ub=self.opt.inf * np.ones(self.opt.ng),\n jac=self.dg,\n hess=self.ddg,\n )\n\n if self.opt.nh:\n self._constraints[\"h\"] = NonlinearConstraint(\n fun=self.h,\n lb=np.zeros(self.opt.nh),\n ub=np.zeros(self.opt.nh),\n jac=self.dh,\n hess=self.ddh,\n )\n\n return self", "def ExecuteInstanceStochasticAdaptiveRefinementAux_Functionality(current_global_index,pickled_coarse_model,pickled_coarse_project_parameters,pickled_custom_metric_refinement_parameters,pickled_custom_remesh_refinement_parameters,random_variable,current_index,current_analysis_stage,previous_computational_time,open_mp_threads,mapping_flag,pickled_mapping_reference_model,print_to_file,filename):\n\n start_time = time.time()\n # unpickle model and build Kratos Model object\n serialized_model = pickle.loads(pickled_coarse_model)\n current_model = KratosMultiphysics.Model()\n serialized_model.Load(\"ModelSerialization\",current_model)\n del(serialized_model)\n # unpickle parameters and build Kratos Parameters object\n serialized_project_parameters = pickle.loads(pickled_coarse_project_parameters)\n current_project_parameters = KratosMultiphysics.Parameters()\n serialized_project_parameters.Load(\"ParametersSerialization\",current_project_parameters)\n del(serialized_project_parameters)\n # refine if current current_global_index > 0, adaptive refinement based on the solution of previous index\n if (current_index > 0):\n # unpickle metric and remesh refinement parameters and build Kratos Parameters objects\n serialized_custom_metric_refinement_parameters = pickle.loads(pickled_custom_metric_refinement_parameters)\n serialized_custom_remesh_refinement_parameters = pickle.loads(pickled_custom_remesh_refinement_parameters)\n current_custom_metric_refinement_parameters = KratosMultiphysics.Parameters()\n current_custom_remesh_refinement_parameters = KratosMultiphysics.Parameters()\n serialized_custom_metric_refinement_parameters.Load(\"MetricRefinementParametersSerialization\",current_custom_metric_refinement_parameters)\n serialized_custom_remesh_refinement_parameters.Load(\"RemeshRefinementParametersSerialization\",current_custom_remesh_refinement_parameters)\n del(serialized_custom_metric_refinement_parameters,serialized_custom_remesh_refinement_parameters)\n # refine the model Kratos object\n adaptive_refinement_manager = AdaptiveRefinement(current_index,current_model,current_project_parameters,current_custom_metric_refinement_parameters,current_custom_remesh_refinement_parameters)\n refined_model,refined_project_parameters = adaptive_refinement_manager.ComputeAdaptiveRefinement()\n current_model = refined_model\n del(refined_model,refined_project_parameters)\n # constructor analysis stage\n simulation = current_analysis_stage(current_model,current_project_parameters,random_variable)\n # add filename flag print_to_file is true\n if (print_to_file):\n simulation.filename = filename\n # add flag if current index is maximum index\n if (current_index == current_global_index):\n simulation.is_current_index_maximum_index = True\n else:\n simulation.is_current_index_maximum_index = False\n # mapping if in current finest level and mapping flag is true\n # otherwise standard behavior\n if (mapping_flag is True and current_index == current_global_index):\n # unpickle mapping reference model and build Kratos Model object\n serialized_mapping_reference_model = pickle.loads(pickled_mapping_reference_model)\n mapping_reference_model = KratosMultiphysics.Model()\n serialized_mapping_reference_model.Load(\"ModelSerialization\",mapping_reference_model)\n del(serialized_mapping_reference_model)\n # send reference model to analysis stage for mapping and set mapping flag to true\n simulation.mapping_reference_model = mapping_reference_model\n simulation.mapping = True\n simulation.Run()\n # mapping if in current finest level and mapping flag is true\n # otherwise standard qoi evaluation\n if (mapping_flag is True and current_index == current_global_index):\n qoi = simulation.MappingAndEvaluateQuantityOfInterest()\n else:\n qoi = simulation.EvaluateQuantityOfInterest()\n # save model and parameters as MpiSerializer Kratos objects\n serialized_finer_model = KratosMultiphysics.MpiSerializer()\n serialized_finer_model.Save(\"ModelSerialization\",simulation.model)\n # pickle model and parameters\n pickled_finer_model = pickle.dumps(serialized_finer_model, 2) # second argument is the protocol and is NECESSARY (according to pybind11 docs)\n del(simulation)\n end_time = time.time()\n computational_time = previous_computational_time + open_mp_threads*(end_time-start_time) # multiply by open mp threads to consider real machine cost\n return qoi,pickled_finer_model,computational_time", "def optim_optuna(modelname=\"model_dl.1_lstm.py\", \n pars= {}, \n df = None,\n optim_method=\"normal/prune\",\n save_folder=\"/mymodel/\", log_folder=\"\",ntrials=2) :\n \n module = module_load(modelname) \n\n def objective(trial):\n param_dict = module.get_params(choice=\"test\", ncol_input=df.shape[1], ncol_output=df.shape[1])\n for t,p in pars.items():\n pres = None\n #p = pars[t]\n x = p['type']\n \n if x=='log_uniform':\n pres = trial.suggest_loguniform(t,p['range'][0], p['range'][1])\n \n elif x=='int':\n pres = trial.suggest_int(t,p['range'][0], p['range'][1])\n \n elif x=='categorical':\n pres = trial.suggest_categorical(t,p['value'])\n \n elif x=='discrete_uniform':\n pres = trial.suggest_discrete_uniform(t, p['init'],p['range'][0],p['range'][1])\n \n elif x=='uniform':\n pres = trial.suggest_uniform(t,p['range'][0], p['range'][1])\n \n else:\n raise Exception('Not supported type {}'.format(p['type']))\n\n param_dict[t] = pres\n \n model = module.Model(**param_dict)\n sess = module.fit(model,df)\n stats = model.stats[\"loss\"]\n del sess\n del model\n tf.reset_default_graph()\n return stats\n \n if optim_method=='prune':\n study = optuna.create_study(pruner=optuna.pruners.MedianPruner())\n else:\n study = optuna.create_study() # Create a new study.\n \n \"\"\"\n optuna create-study --study-name \"distributed-example\" --storage \"sqlite:///example.db\"\n \n https://optuna.readthedocs.io/en/latest/tutorial/distributed.html\n if __name__ == '__main__':\n study = optuna.load_study(study_name='distributed-example', storage='sqlite:///example.db')\n study.optimize(objective, n_trials=100)\n \n \n \n \"\"\"\n study.optimize(objective, n_trials=ntrials) # Invoke optimization of the objective function.\n param_dict = study.best_params\n param_dict.update(module.get_params(choice=\"test\", ncol_input=df.shape[1], \n ncol_output=df.shape[1]))\n \n ### Run best model\n model = module.Model(**param_dict)\n sess = module.fit(model,df)\n \n #### Saving \n modelname = modelname.replace(\".\", \"_\") # this is the module name which contains .\n save_folder = save_folder + \"/\" + modelname\n if not(os.path.isdir(save_folder)):\n os.makedirs(save_folder)\n file_path = os.path.join(save_folder,modelname+'.ckpt')\n\n save(sess,file_path)\n\n\n ### Update with Best values\n study_trials = study.trials_dataframe()\n study_trials.to_csv(os.path.join(save_folder,modelname+'_study.csv'))\n \n param_dict[\"best_value\"] = study.best_value\n param_dict[\"file_path\"] = file_path \n json.dump( param_dict, os.path.join(save_folder, modelname+'_params.csv') )\n \n return param_dict", "def __solve(self) -> None:\n pyo.TransformationFactory(\"contrib.detect_fixed_vars\").apply_to(self.model) # type: ignore\n pyo.TransformationFactory(\"contrib.deactivate_trivial_constraints\").apply_to(self.model) # type: ignore\n\n # initialise the solver object\n self._logger.debug(\"[ModelSolver] Solver object initiated...\")\n solver = Config.OPTIMISATION_MODEL_CONFIG['SOLVER_TYPE']\n opt = pyo.SolverFactory(solver)\n if Config.OPTIMISATION_MODEL_CONFIG['SOLVER_OPTION'].get(solver) is not None:\n for k, v in Config.OPTIMISATION_MODEL_CONFIG['SOLVER_OPTION'].get(solver).items():\n opt.options[k] = v\n\n try:\n start_time = datetime.now()\n self._logger.debug(\"[ModelSolver] Solver starting...\")\n results = opt.solve(self.model, tee=True)\n self.results = results\n end_time = datetime.now()\n self._logger.info(f\"[ModelSolver] Solver completed in {end_time - start_time}.\")\n except Exception as e:\n raise Exception(f\"Model optimisation failed with {solver} with error message {e}.\")\n\n if (results.solver.status == SolverStatus.ok) and (results.solver.termination_condition == TerminationCondition.optimal):\n self._logger.info(\"Solution is feasible and optimal\")\n results.write()\n elif results.solver.termination_condition == TerminationCondition.infeasible:\n raise ValueError(\"Model optimisation resulted into an infeasible solution\")\n\n self.model.optimised = True", "def setup_solver(self):\n option = Options()\n if logger.getEffectiveLevel() == logging.DEBUG:\n # option.printLevel = PrintLevel.HIGH\n option.printLevel = PrintLevel.NONE\n else:\n option.printLevel = PrintLevel.NONE\n self.solver_minimizing = SQProblem(self.nV, self.nC)\n self.solver_minimizing.setOptions(option)\n self.solver_maximizing = SQProblem(self.nV, self.nC)\n self.solver_maximizing.setOptions(option)\n\n self.solver_minimizing_recent_index = -2\n self.solver_maximizing_recent_index = -2", "def __init__(self, optimizer='BFGS', optimizer_kwargs=None,\n lossprime=True, max_iterations = 1000000):\n\n user_kwargs = optimizer_kwargs\n optimizer_kwargs = {}\n print(f\"in {optimizer}: max_iterations = {max_iterations}\")\n if optimizer == 'BFGS':\n from scipy.optimize import minimize as optimizer\n optimizer_kwargs = {\n 'method' : 'BFGS',\n 'options': {'gtol': 1e-15,\n 'maxiter': max_iterations}\n }\n #optimizer_kwargs = {'method':'BFGS', 'gtol': 1e-15, }\n elif optimizer == 'L-BFGS-B':\n from scipy.optimize import minimize as optimizer\n optimizer_kwargs = {\n 'method': 'L-BFGS-B',\n 'options': {'ftol': 1e-05,\n 'gtol': 1e-08,\n 'maxfun': max_iterations,\n 'maxiter': max_iterations}\n }\n import scipy\n from distutils.version import StrictVersion\n if StrictVersion(scipy.__version__) >= StrictVersion('0.17.0'):\n optimizer_kwargs['options']['maxls'] = 2000\n elif optimizer == 'TNC':\n from scipy.optimize import minimize as optimizer\n optimizer_kwargs = {\n 'method': 'TNC',\n 'options': {'ftol': 0.,\n 'xtol': 0.,\n 'gtol': 1e-08,\n 'maxiter': max_iterations, }\n }\n elif optimizer == 'Newton-CG':\n from scipy.optimize import minimize as optimizer\n optimizer_kwargs = {\n 'method': 'Newton-CG',\n 'options': {'xtol': 1e-15,\n 'maxiter': max_iterations,}\n }\n\n elif optimizer == 'Nelder-Mead':\n from scipy.optimize import minimize as optimizer\n optimizer_kwargs = {\n 'method': 'Nelder-Mead',\n 'options': {'maxfun': max_iterations,\n 'maxiter': max_iterations, }\n }\n lossprime = False\n\n if user_kwargs:\n optimizer_kwargs.update(user_kwargs)\n self.optimizer = optimizer\n self.optimizer_kwargs = optimizer_kwargs\n self.lossprime = lossprime", "def form(func, dist_list, init_search_point, alg):\n \n def SLSQP(func, dist_list, init_search_point):\n \n dim = len(dist_list)\n current_beta = 0\n new_beta = 1\n sig = np.empty((1, dim))\n mu = np.empty((1, dim))\n new_search_point = np.array(init_search_point).reshape((1, dim))\n \n def f_l(x_l):\n return(func([x_l[i,:]*sig[0,i] + mu[0,i] for i in range(0, dim)]))\n \n while abs(current_beta-new_beta) > 0.001:\n current_search_point = new_search_point\n current_beta = new_beta\n for i in range(0, dim):\n if dist_list[i][1] != 'norm':\n mu[0,i], sig[0, i] = Rosenblatt_Transform(dist_list[i][0], current_search_point[0,i])\n else:\n mu[0,i], sig[0, i] = dist_list[i][0].mean(), dist_list[i][0].std()\n \n dist_fun = lambda u: np.linalg.norm(u) \n \n alg = 'SLSQP'\n \n H = lambda u: f_l(u)\n cons = ({'type': 'eq', 'fun': lambda u: -(H(u.reshape(-1,1)))})\n \n result = scipy.optimize.minimize(dist_fun, x0 = current_search_point, constraints = cons, method=alg)\n \n new_beta = result.fun\n u = np.array(result.x).reshape((1,dim))\n \n new_search_point = np.empty((1, dim))\n for i in range(0, dim):\n new_search_point[0,i] = mu[0,i] + u[0,i]*sig[0,i]\n \n beta_value = new_beta \n p_f = sst.norm.cdf(-beta_value)\n iterations = result.nit\n u = result.x\n x = u[:]*sig[0,:] + mu[0,:]\n print(x)\n grad_val = scipy.optimize.approx_fprime(x, func, 0.00000001)\n grad_val = grad_val.reshape((1, dim))\n \n sum1 = np.sum((grad_val[0,:]**2)*(sig[0,:]**2))\n cosines = np.empty((1, dim))\n \n for i in range(0, dim):\n cosines[0,i] = grad_val[0,i]*sig[0,i]/np.sqrt(sum1) \n \n print('------------------------')\n print('First-Order Reliability Analysis')\n print('Algorithm: slsqp solver')\n print('Iterations: {}\\nReliability index = {}\\nProbability of failure = {}'.format(iterations, beta_value, p_f))\n print('------------------------')\n \n return(beta_value, p_f, x, u, mu, sig, cosines) \n \n def HL_R(func, dist_list, init_search_point):\n \n iterations = 0\n cur_beta = 3\n new_beta = 0\n dim = len(dist_list)\n global_mean_arr = np.empty((1, dim))\n global_std_arr = np.empty((1, dim))\n new_search_point = np.array(init_search_point).reshape((1, dim))\n \n while abs(cur_beta - new_beta) > 0.001:\n cur_beta = new_beta\n cur_cosines = np.zeros((1, dim))\n new_cosines = np.ones((1, dim))\n \n while max((abs(cur_cosines - new_cosines))[0]) > 0.005:\n \n cur_cosines = new_cosines\n \n cur_search_point = new_search_point\n \n for i in range(0, dim):\n if dist_list[i][1] != 'norm':\n global_mean_arr[0, i], global_std_arr[0, i] = Rosenblatt_Transform(dist_list[i][0], cur_search_point[0,i])\n else:\n global_mean_arr[0, i], global_std_arr[0, i] = dist_list[i][0].mean(), dist_list[i][0].std()\n \n \n grad_val = scipy.optimize.approx_fprime(cur_search_point[0], func, 0.00000001)\n grad_val = grad_val.reshape((1, dim))\n \n sum1 = np.sum((grad_val[0,:]**2)*(global_std_arr[0,:]**2))\n cosines = np.empty((1, dim))\n \n for i in range(0, dim):\n cosines[0,i] = grad_val[0,i]*global_std_arr[0,i]/np.sqrt(sum1)\n \n new_cosines = cosines\n new_search_point = np.empty((1, dim))\n for i in range(0, dim):\n new_search_point[0,i] = global_mean_arr[0,i] - new_cosines[0,i]*global_std_arr[0,i]*cur_beta\n \n iterations = iterations + 1\n \n \n B = Symbol('B')\n coordinates = []\n for i in range(0, dim):\n coordinates.append(global_mean_arr[0, i] - new_cosines[0,i]*global_std_arr[0, i]*B)\n new_beta = float(solve(func(coordinates), B)[0])\n \n cosines = new_cosines \n beta_value = new_beta\n p_f = sst.norm.cdf(-new_beta)\n x = new_search_point\n u = (x[0,:] - global_mean_arr[0,:])/global_std_arr\n \n print('-------------------------')\n print('First-Order Reliability Analysis')\n print('Algorithm: HL-R solver')\n print('Iterations: {}\\nReliability index = {}\\nProbability of failure = {}'.format(iterations, beta_value, p_f))\n print('-------------------------')\n \n return(beta_value, p_f, x, u, global_mean_arr, global_std_arr, cosines)\n \n def HL_RF(func, dist_list, init_search_point):\n\n cur_beta = 3\n new_beta = 0\n dim = len(dist_list)\n\n new_search_point = np.array(init_search_point).reshape((1, dim))\n iterations = 0\n while abs(cur_beta - new_beta) > 0.001 and abs(func(new_search_point[0])) > 0.001:\n global_mean_arr = np.empty((1, dim))\n global_std_arr = np.empty((1, dim))\n cur_beta = new_beta\n cur_search_point = new_search_point\n \n for i in range(0, dim):\n if dist_list[i][1] != 'norm':\n global_mean_arr[0,i], global_std_arr[0, i] = Rosenblatt_Transform(dist_list[i][0], cur_search_point[0,i])\n else:\n global_mean_arr[0,i], global_std_arr[0, i] = dist_list[i][0].mean(), dist_list[i][0].std()\n \n f_val = func(cur_search_point[0])\n \n x_ast = np.empty((1, dim))\n for i in range(0, dim):\n x_ast[0,i] =(cur_search_point[0,i] - global_mean_arr[0,i])/global_std_arr[0,i]\n\n grad_val = scipy.optimize.approx_fprime(cur_search_point[0], func, 0.000001)\n grad_val = grad_val.reshape((1, dim)) \n \n grad_val_ast = np.empty(grad_val.shape)\n for i in range(0, dim):\n grad_val_ast[0,i] = grad_val[0,i]*global_std_arr[0,i]\n \n t1 = 1/np.sum(grad_val_ast[0,:]**2)\n\n t2 = sum(grad_val_ast[0,:]*x_ast[0,:]) - f_val\n \n t3 = t1*t2\n \n new_x_ast = np.empty(x_ast.shape)\n for i in range(0, dim):\n new_x_ast[0,i] = t3*grad_val_ast[0,i]\n u = new_x_ast\n new_beta = np.linalg.norm(new_x_ast)\n \n new_search_point = np.empty((1, dim))\n for i in range(0, dim):\n new_search_point[0,i] = new_x_ast[0,i]*global_std_arr[0,i] + global_mean_arr[0,i]\n iterations = iterations + 1\n \n grad_val_ast_sum = sum(grad_val_ast[0,:]**2)\n cosines = grad_val_ast/(grad_val_ast_sum**0.5)\n beta_value = new_beta\n x = new_search_point\n p_f = sst.norm.cdf(-beta_value)\n \n print('-------------------------')\n print('First-Order Reliability Analysis')\n print('Algorithm: HL-RF solver')\n print('Iterations: {}\\nReliability index = {}\\nProbability of failure = {}'.format(iterations, beta_value, p_f))\n print('-------------------------')\n \n return(beta_value, p_f, x, u, global_mean_arr, global_std_arr, cosines)\n \n if alg == 'slsqp':\n return(SLSQP(func, dist_list, init_search_point))\n elif alg == 'HL-R':\n return(HL_R(func, dist_list, init_search_point))\n elif alg == 'HL-RF':\n return(HL_RF(func, dist_list, init_search_point))", "def optimizer(grad, method, init_par, alpha, delta, plx_obs, mualpha_obs, mudelta_obs, vrad_obs, sigma_obs, sigma_vrad, ccoeff, N):\r\n\r\n\t\r\n\tif grad == 'NO':\r\n\t\tif method == 'Powell' :\r\n\t\t\tres = opt.minimize(Ulike,init_par, method = method,\r\n\t\t\t args = (alpha, delta, plx_obs, mualpha_obs,mudelta_obs, vrad_obs, sigma_obs, sigma_vrad, ccoeff, N))\r\n\t\t\treturn res.x, res.nit\r\n\t\telif method == 'Nelder-Mead':\r\n\t\t\tres = opt.minimize(Ulike,init_par, method = method,\r\n\t\t\t args = (alpha, delta, plx_obs, mualpha_obs,mudelta_obs, vrad_obs, sigma_obs, sigma_vrad, ccoeff, N),\r\n\t\t\t\t options = {'ftol': 0.0001})\r\n\t\t\treturn res.x, res.nit\r\n\t\telif method == 'default':\r\n\t\t\tres = opt.minimize(Ulike,init_par, \r\n\t\t\t args = (alpha, delta, plx_obs, mualpha_obs,mudelta_obs, vrad_obs, sigma_obs, sigma_vrad, ccoeff, N))\r\n\t\t\treturn res.x, res.nit\r\n\r\n\telif grad == 'YES':\r\n\t\tres = opt.minimize(Ulike, init_par, method = method, jac = stella_grad_full, \r\n \t\t\t args = (alpha, delta, plx_obs, mualpha_obs,mudelta_obs, vrad_obs, sigma_obs, sigma_vrad, ccoeff, N),\r\n\t\t\t options={'disp': True, 'maxiter': 4000, 'xtol': 1e-4})\r\n\t\treturn res.x, res.nit \r\n\t\t\t\r\n\t\t\r\n\telif grad == 'HESS':\r\n\t\tres = opt.minimize(Ulike, init_par, method = method, jac = stella_grad_full, hess = stella_hessian,\r\n\t\t\t\t\t args = (alpha, delta, plx_obs, mualpha_obs,mudelta_obs, vrad_obs, sigma_obs, sigma_vrad, ccoeff, N),\r\n\t\t\t\t\t options = {'disp': True, 'maxiter': 4000, 'xtol': 1.e-06}) \r\n\t\treturn res.x, res.nit", "def run(self, C, p0 = None):\n global algorithm \n algorithm = AdaptiveMM(self.g, C, p0 = p0, lambda0 = 2000)\n solve()", "def test_LSQ_SLSQP_with_constraints(self, fitter):\n fitter = fitter()\n\n g1 = models.Gaussian1D(100, 5, stddev=1)\n g1.mean.fixed = True\n fslsqp = SLSQPLSQFitter()\n slsqp_model = fslsqp(g1, self.xdata, self.ydata)\n model = fitter(g1, self.xdata, self.ydata)\n assert_allclose(model.parameters, slsqp_model.parameters, rtol=10 ** (-4))" ]
[ "0.65719897", "0.6496645", "0.6289987", "0.62891555", "0.6283293", "0.61205184", "0.6095392", "0.60702574", "0.6065315", "0.6050676", "0.60274816", "0.60217804", "0.59814197", "0.5976579", "0.59747475", "0.59728956", "0.5968939", "0.59640926", "0.59405863", "0.5938876", "0.58918935", "0.58829045", "0.58768135", "0.58272266", "0.5826807", "0.5808724", "0.5804657", "0.5804654", "0.5802825", "0.57858837" ]
0.73497164
0
Move the temporary file to it's permanent location. Return True if the key didn't exists before.
def _commit(self, key, tmppath, overwrite = True): exists = self.has(key) if exists: if not overwrite: os.unlink(tmppath) return False else: self.prepare(key) os.rename(tmppath, self._filename(key)) return not exists
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _move_temporary(self, url: str) -> bool:\n if self._file_path.exists():\n info('File already exists')\n return True\n # If download complete, make file permanent\n move(self._temp_path, self._file_path)\n info(\"DOWNLOADED: %s TO %s\" % (url, self._file_path))\n return True", "def test_file_exists(self):\n with TemporaryDirectory() as tmp:\n # define path to file\n fp = os.path.join(tmp, \"asdf.txt\")\n\n # write atomically to file\n with atomic_write(fp, \"w\") as f:\n f.write(\"asdf\")\n\n # ensure file exists\n assert os.path.exists(fp)\n\n # ensure atomic_write to same file raises an error as it already exists\n try:\n with atomic_write(fp, \"w\") as f:\n f.write(\"asdf\")\n except FileExistsError as e:\n self.assertIsInstance(e, FileExistsError)", "def EnsureBackup(temp_file, save_file):\n temp_file = temp_file.lower()\n save_file = save_file.lower()\n bakFile = save_file + '.bak'\n sameFileData = filecmp.cmp(temp_file, save_file, shallow=0)\n if not sameFileData:\n shutil.copyfile(save_file, bakFile)\n shutil.copyfile(temp_file, save_file)\n os.remove(temp_file)", "def move_from_temp_directory(self):", "def test_save_file():\n test_key = 'qmk_compiler_test_unique_key_name'\n\n # Make sure our test key doesn't exist\n try:\n qmk_storage.get(test_key)\n raise RuntimeError('%s exists on S3 when it should not!' % test_key)\n except Exception as e:\n if e.__class__.__name__ != 'NoSuchKey':\n raise\n\n # Write it to S3\n with NamedTemporaryFile(mode='w', encoding='utf-8') as tempfile:\n tempfile.write('hello')\n tempfile.flush()\n qmk_storage.save_file(tempfile.name, test_key)\n\n # Make sure we can retrieve it\n saved_file = qmk_storage.get(test_key)\n qmk_storage.delete(test_key)\n assert saved_file == 'hello'", "def _delete_key_file():\n\n key_path = _get_path_to_key_file()\n\n if _search_for_key_file(key_path):\n try:\n os.remove(key_path)\n except OSError as e:\n raise NonRecoverableError(\n 'Unable to delete key pair: {0}.'\n .format(str(e)))", "def makeFilePointer(self, key, tmppath):\n fp = open(tmppath, 'w')\n self.getFile(key, fp)\n fp.close()", "def test_retrieve_original_to_temp(self):\n # put file\n self.prepare_uploads()\n backend = BackendS3(**self.config)\n src = os.path.join(self.upload_path, 'demo-test.tar.gz')\n id = utils.generate_id('demo-test.tar.gz')\n backend.put(src, id)\n\n # retrieve file\n result = backend.retrieve_original(id, self.tmp_path)\n expected_dst = os.path.join(self.tmp_path, id, 'demo-test.tar.gz')\n self.assertEquals(expected_dst, result)\n self.assertTrue(os.path.exists(expected_dst))", "def ifExist(file_name, key):\n\tif exists(file_name) and exists(key):\n\t\treturn True\n\telse:\n\t\treturn False", "def tmp_key(filename):\n return TMP_PREFIX + filename", "def test_rotateAlreadyExists(self):\n log = RiggedDailyLogFile(self.name, self.dir)\n self.addCleanup(log.close)\n\n # Build a new file with the same name as the file which would be created\n # if the log file is to be rotated.\n newFilePath = \"{}.{}\".format(log.path, log.suffix(log.lastDate))\n with open(newFilePath, \"w\") as fp:\n fp.write(\"123\")\n previousFile = log._file\n log.rotate()\n self.assertEqual(previousFile, log._file)", "def touch(filename):\n try:\n if os.path.exists(filename):\n os.utime(filename, None)\n else:\n open(filename, \"w\").close()\n except IOError as e:\n if e.errno != 13:\n raise\n else:\n return False\n except OSError as e:\n if e.errno != 13:\n raise\n else:\n return False\n return True", "def test_atomic_write(self):\n with TemporaryDirectory() as tmp:\n fp = os.path.join(tmp, \"asdf.txt\")\n\n # perform an atomic write\n with atomic_write(fp, \"w\") as f:\n assert not os.path.exists(fp)\n tmpfile = f.name\n f.write(\"asdf\")\n\n # ensure tmp file has been deleted\n assert not os.path.exists(tmpfile)\n # ensure file to write to exists\n assert os.path.exists(fp)\n\n # ensure content of destination file is what we expect\n with open(fp) as f:\n self.assertEqual(f.read(), \"asdf\")", "def test_removed(self):\n path = None\n with TemporaryDirectory() as tmp:\n path = tmp\n self.assertTrue(os.path.isdir(tmp))\n tmpfile = os.path.join(tmp, \"a_temp_file\")\n open(tmpfile, \"w\").write(\"data\")\n self.assertTrue(os.path.isfile(tmpfile))\n self.assertFalse(os.path.isdir(path))\n self.assertFalse(os.path.exists(path))", "def safeSave(self):\n self.fileInfo.makeBackup()\n filePath = os.path.join(self.fileInfo.dir,self.fileInfo.name)\n tempPath = filePath+'.tmp'\n self.save(tempPath)\n renameFile(tempPath,filePath)\n self.fileInfo.setMTime()\n self.fileInfo.extras.clear()", "def upload_file(self, key, filepath, access, keep_original=True,\n verbose=False):\n\n # file entry\n try:\n file_entry = self.bucket.new_key(key)\n file_entry.set_metadata('filepath', filepath)\n file_entry.set_contents_from_filename(filepath)\n file_entry.set_acl(access) # access control\n except Exception as error:\n print str(error)\n return False\n else:\n if verbose:\n print \"{} uploaded to amazon s3.\".format(key)\n\n # original file removal\n if not keep_original and os.access(filepath, os.W_OK):\n try:\n os.remove(filepath)\n except (IOError, OSError):\n print \"I/O error, could not remove file.\"\n else:\n if verbose:\n print \"{} (original) removed\".format(filepath)\n\n return True", "def copy_tmp_file(self, dst):\n if dst and self.file_exists(self.tmp_file):\n shutil.copyfile(self.tmp_file, dst)", "def clean(self):\n if os.path.exists(self.initial):\n if os.path.exists(self.path) and os.stat(self.path).st_size == os.stat(\n self.initial).st_size:\n os.remove(self.initial)\n else:\n # if it doesn't match, something probably crashed; rename the temporary file and\n # it'll get uploaded at some point\n self.auto_filename()\n self.rename()\n self.connect()\n os.remove(self.initial)\n if os.path.exists(self.path):\n os.remove(self.path)\n self.filename_set = False", "def get_temporary_file(prefix=\"apsharvest_test_\", suffix=\"\", directory=\"\"):\n try:\n file_fd, filepath = mkstemp(prefix=prefix,\n suffix=suffix,\n dir=directory)\n os.close(file_fd)\n except IOError, e:\n try:\n os.remove(filepath)\n except Exception:\n pass\n raise e\n return filepath", "def check_file(filename, force, expected_file_size=1):\n if os.path.exists(filename):\n if force or os.path.getsize(filename) < expected_file_size:\n logger.debug(\" .. Removing old file '%s'.\", filename)\n os.remove(filename)\n return False\n else:\n return True\n return False", "def replace(self):\n if self.success is False:\n raise TaskError('not ready')\n try:\n temp_src = '/tmp/' + str(random.randint(10000, 99999)) + '.mp3'\n os.move(self.source, temp_src)\n os.move(self.target, self.source)\n os.unlink(temp_src)\n except OSError as e:\n print(e)", "def _delete_path(target_path: str) -> bool:\n target_path = os.path.normpath(target_path)\n with TempFileLock(f\"{target_path}.lock\"):\n return _delete_path_unsafe(target_path)", "def missing_but_potential_file():\r\n tempf = tempfile.NamedTemporaryFile()\r\n fname = tempf.name\r\n tempf.close()\r\n return fname", "def _delete_external_keypair():\n\n if not utils.use_external_resource(ctx.node.properties):\n return False\n\n ctx.logger.info('External resource. Not deleting keypair.')\n\n utils.unassign_runtime_properties_from_resource(RUNTIME_PROPERTIES,\n ctx.instance)\n return True", "def is_writable_dir(path):\n try:\n with TemporaryFile(mode=\"w\", dir=path) as t:\n t.write(\"1\")\n return True\n except Exception:\n return False", "def check_force_generate(self, force):\n if self.exists() and not force:\n self.log.warn(\n '{} already exists, skipping key generation...'.format(self.key_file)\n )\n return False\n else:\n return True", "def testCreateSshKeyPairKeyAlreadyExists(self): #pylint: disable=invalid-name\n public_key = \"/fake/public_key\"\n private_key = \"/fake/private_key\"\n self.Patch(os.path, \"exists\", side_effect=[True, True])\n self.Patch(subprocess, \"check_call\")\n self.Patch(os, \"makedirs\", return_value=True)\n utils.CreateSshKeyPairIfNotExist(private_key, public_key)\n self.assertEqual(subprocess.check_call.call_count, 0) #pylint: disable=no-member", "def test_ref_cache_with_tempfile(self):\n # update cache file from db\n self.host_updater.refresh_cache()\n # create temp_cache_file to test it doesnt broke system\n with open(self.host_updater.temp_cache_file, 'a'):\n pass\n self.host_updater.refresh_cache()\n self.assertFalse(os.path.exists(self.host_updater.temp_cache_file))", "def upload_file(self, bucket, key, local_path):\n if os.path.isfile(local_path):\n self._s3.Bucket(bucket).upload_file(local_path, key)\n\n else:\n raise LocalFileNotFoundException(\"File Not Found - \" + local_path)\n\n return self.key_exists(bucket, key)", "def add_tempfile(self, filename, exists=True):\n tmp = os.path.abspath(filename)\n if exists and not os.path.exists(tmp):\n raise IOError(\"Temporary file does not exist: \" + tmp)\n self._tempfiles[-1].append(tmp)" ]
[ "0.6546251", "0.5571317", "0.5493913", "0.54838884", "0.5460341", "0.5343209", "0.5323855", "0.5305094", "0.52894396", "0.5285813", "0.5267737", "0.5262435", "0.52582973", "0.5255418", "0.5252858", "0.52222496", "0.52116925", "0.5201012", "0.518724", "0.51626956", "0.51376057", "0.5130193", "0.5127713", "0.5095388", "0.5069606", "0.5066147", "0.50312304", "0.50303304", "0.50259894", "0.502086" ]
0.6806565
0
Make sure the directory exists for holding the given key
def prepare(self, key): _mkdirs(self._dirname(key))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_ensure_dir_exists(self):\n pass", "def verify_dir_helper(dir):\n if not os.path.exists(dir):\n print(\"Creating cache directory at {}\".format(dir))\n os.makedirs(dir)", "def _keypath(self) -> pathlib.Path:\n home = pathlib.Path.home()\n keyfile = home / \".cmdc\" / \"apikey\"\n keyfile.parent.mkdir(parents=True, exist_ok=True)\n return keyfile", "def check_path(dir_path):\n if not os.path.exists(dir_path):\n os.mkdir(dir_path, 0755)", "def get_directory(self, key):\n raise NotImplementedError", "def checking_path():\n path = Path(\"phonebook\")\n try:\n path.mkdir(parents=True, exist_ok=False)\n except FileExistsError:\n pass\n else:\n pass", "def ifExist(file_name, key):\n\tif exists(file_name) and exists(key):\n\t\treturn True\n\telse:\n\t\treturn False", "def _check_directory(my_folder):\n if not os.path.exists(my_folder):\n os.makedirs(my_folder)", "def assure_path_exists(self, path):\n\n dir = os.path.dirname(path)\n if not os.path.exists(dir):\n os.makedirs(dir)", "def create_file(self, key=None):\n self.make_directory()\n open(self.file_path(key), 'w').close()", "def dirChecking(dir):\n if not os.path.exists(dir):\n os.mkdir(dir)", "def check_dir(path):\n \n if not os.path.exists(path):\n os.makedirs(path)\n print path", "def ensure_dir(path):\n\n \n try:\n os.makedirs(path)\n except (EnvironmentError) as e:\n if not(e.errno == errno.EEXIST and \n e.filename == path):\n raise\n return", "def _assert_dir_already_exists(dirname):\n\n if not dirname:\n return\n\n assert os.path.isdir(dirname), dirname\n assert os.access(dirname, os.R_OK), dirname\n assert os.access(dirname, os.W_OK), dirname", "def check_file(path):\n if not os.path.exists(path):\n os.makedirs(path)", "def ensure_path_exists(filename):\n targetdir = dirname(expanduser(filename))\n if exists(targetdir):\n return\n os.makedirs(abspath(targetdir))", "def ensure_dir(dir_):\n try:\n os.mkdir(dir_)\n except OSError:\n assert os.path.isdir(dir_)", "def check_dir(path):\n if not os.path.exists(path):\n os.makedirs(path)", "def ensure_dir( dirName ):\r\n if not os.path.exists( dirName ):\r\n os.makedirs( dirName )", "def check_dir(dir):\n if not os.path.exists(dir):\n print(\"[+] Creating directory for target..\")\n os.makedirs(dir)", "def ensure_dir(dir_path):\n try:\n os.mkdir(dir_path)\n except FileExistsError:\n pass", "def ensure_dir_exists(path: Union[str,Path]) -> None:\n# path = str(path)\n assert not os.path.isfile(path)\n os.makedirs(path, exist_ok=True)\n assert os.path.isdir(path)", "def ensure_directory(path):\n\tdir_path = os.path.dirname(path)\n\tif os.path.exists(dir_path):\n\t\treturn\n\tensure_directory(dir_path)\n\ttry:\n\t\tos.mkdir(dir_path)\n\texcept OSError as e:\n\t\t# Ignore if EEXISTS. This is needed to avoid a race if two getters run at once.\n\t\tif e.errno != errno.EEXIST:\n\t\t\traise", "def _set_keystore_path(self) -> None:\n response = self.single_call(\"hmy keys location\").strip()\n if not os.path.exists(response):\n os.mkdir(response)\n self.keystore_path = response", "def testCreateSshKeyPairKeyAlreadyExists(self): #pylint: disable=invalid-name\n public_key = \"/fake/public_key\"\n private_key = \"/fake/private_key\"\n self.Patch(os.path, \"exists\", side_effect=[True, True])\n self.Patch(subprocess, \"check_call\")\n self.Patch(os, \"makedirs\", return_value=True)\n utils.CreateSshKeyPairIfNotExist(private_key, public_key)\n self.assertEqual(subprocess.check_call.call_count, 0) #pylint: disable=no-member", "def _validate_path(dir_path: str) -> None:\n if os.path.exists(dir_path):\n return\n\n logger.info('Creating directory: %s', dir_path)\n os.mkdir(dir_path)", "def _ensure_dir_exists(self, directory):\n directory = directory.strip()\n if not Path(directory).exists():\n os.mkdir(directory)", "def test_get_path_returns_none_for_bad_key(\n self, audio_store_and_expected_files, key):\n audio_store = audio_store_and_expected_files[0]\n assert audio_store.get_path(key) is None", "def check_dir(directory):\n if not os.path.exists(directory):\n os.makedirs(directory)", "def _ensure_directory(self, dirname):\n if not os.path.exists(dirname):\n os.makedirs(dirname)" ]
[ "0.69710225", "0.66094077", "0.6429809", "0.641802", "0.62993103", "0.6295084", "0.6277301", "0.61933404", "0.6177444", "0.6164815", "0.6160266", "0.6155942", "0.61461115", "0.6130273", "0.61162114", "0.6114681", "0.610374", "0.61000377", "0.60758036", "0.6052262", "0.6044748", "0.60310626", "0.60255665", "0.602485", "0.60233897", "0.6004058", "0.5996899", "0.59917456", "0.5990808", "0.59901005" ]
0.7036552
0
Open the file given by it's key for reading.
def open(self, key): try: return open(self._filename(key), "rb") except FileNotFoundError: raise KeyError(key)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def open_file(file_name):\n pass", "def load_key(key_name):\n if not p.exists(key_name):\n write_key(key_name)\n\n return open(key_name, \"rb\").read()", "def open(self, path, filename=None):\n scheme, key = self.getkey(path, filename=filename)\n return BotoReadFileHandle(scheme, key)", "def read_key(self, keyfile_name):\n\n with open(keyfile_name, 'rb') as f:\n self.key = f.read()\n self.cryptor = Fernet(self.key)", "def open(self, key):\n\n ## Set our key\n self.key = key\n\n base_uri = r'/feeds/documents/private/full/spredsheet%%3A%s' % self.key\n\n ## Set also the \"db\"\n self.db = self.client.docsclient.GetDocumentListEntry(base_uri)\n\n ## Make sure internal book-keeping is ok\n self.refresh_tables()", "def __getitem__(self, key):\n with open(self._get_path(key), 'rb') as f:\n unpickler = pickle.Unpickler(f)\n while f.peek(1):\n yield unpickler.load()", "def readKey(self, keyPath):\n\t\ttry:", "def load_key():\r\n\r\n key_dir = os.path.join(os.path.dirname(__file__), \"resources/key\")\r\n\r\n try:\r\n return open(key_dir, \"rb\").read()\r\n except:\r\n return None", "def open(self, filename):\n raise NotImplementedError", "def _open(self, file_path=None):\n\t\tif file_path is None:\n\t\t\tfile_path = self.file_path\n\n\t\tif not os.path.exists(file_path):\n\t\t\traise ValueError('Could not find file: {}'.format(file_path))\n\n\t\ttry:\n\t\t\tf = open(file_path, encoding='utf-8', newline='')\n\t\texcept OSError as err:\n\t\t\tself.log.error(str(err))\n\t\t\traise ValueError('Could not open file: {}'.format(file_path))\n\n\t\treturn f", "def load_key(self):\n\t return open(\"key.key\", \"rb\").read()", "def get_file(self, key):\n result = (None, None)\n path = os.path.join(self.directory, self.subdirectory, key)\n if os.path.isfile(path):\n content_type, _ = mimetypes.guess_type(path)\n with open(path, \"rb\") as file:\n result = content_type, file.read()\n return result", "def open_file():\n fp = open('dictionary.txt', 'r')\n return fp", "def _get_akey_afile(self, key):\n url = self._get_key_url(key)\n return self._parse_url(url)[:2] # skip size", "def open_file(self, fname):\n\n # Save that the file is opened.\n self.open_files[fname] = {}\n self.open_files[fname][\"name\"] = fname\n self.open_files[fname][\"contents\"] = []", "def open(cls, file, mode='r'):\n return open(file, mode)", "def open (self, path, mode):\r\n pass", "def read(self, key):\n raise NotImplementedError", "def get_file_contents(self, key):\n f = open( os.path.join(self.file_path, '%s.xoj' % key))\n data = f.read()\n f.close()\n return data", "def load_device_key(self, filename):\n pass", "async def read(self, key: str) -> ResponseOrKey:", "def open(self, mode='r'):\r\n return open(self.strpath, mode)", "def open(self, filename, mode):\n return open(filename, mode)", "def open_for_reading(path):\r\n path = encode(path)\r\n if path.lower().startswith(\"smb://\"):\r\n return samba.get_file_handle_for_reading(os.path.basename(path), os.path.dirname(path))\r\n else:\r\n return open(path, \"rb\")", "def open_file_by_type(file_name, mode=\"r\"):\n file_name = op.expanduser(file_name) # expands '~' to the absolute home dir\n if file_name.endswith(\"bz2\"):\n return bz2.open(file_name, mode)\n elif file_name.endswith(\"gz\"):\n return gzip.open(file_name, mode, encoding=\"utf-8\")\n else:\n return open(file_name, mode, encoding=\"utf-8\")", "def open_file(file_name):\n\n try:\n return open(file_name, 'rt')\n except Exception as e:\n raise UserException(\"unable to open file {0}\".format(file_name),\n str(e))", "def get(self, key):\n output = self.execute_command(\n 'sudo -i bash -c \\'if [ -f \"{0}\" ]; then cat \"{0}\"; else echo -n \"FILE_NOT_FOUND\"; fi\\''.format(\n self._store_path + key))\n value = return_fixed_output(output)\n return None if value == 'FILE_NOT_FOUND' else value", "def _file_read(fname):\n if not os.path.exists(fname):\n parser.error(\"File '{0}' not found.\".format(fname))\n return open(fname, 'r')", "def open_file(file_path, mode='rb', encoding='iso-8859-1'):\n try:\n return open(file_path, mode=mode, encoding=encoding)\n except IOError:\n raise", "def load(self, key):\n overloads = self._load_index()\n data_name = overloads.get(key)\n if data_name is None:\n return\n try:\n return self._load_data(data_name)\n except OSError:\n # File could have been removed while the index still refers it.\n return" ]
[ "0.6818028", "0.6678012", "0.66192126", "0.6459983", "0.6388763", "0.63320065", "0.62570834", "0.623832", "0.62076575", "0.61941403", "0.61808246", "0.6174946", "0.61232173", "0.6110161", "0.6076455", "0.6033957", "0.6033038", "0.6030252", "0.6023124", "0.59916943", "0.59794134", "0.5952204", "0.5948913", "0.5940777", "0.5910054", "0.59039253", "0.5889379", "0.587672", "0.587501", "0.58664566" ]
0.8616494
0
Registers a template filter function in the FILTERS dict.
def template_filter(name: Optional[str] = None) -> Callable: def decorator(func): name_ = name if name else func.__name__ FILTERS[name_] = func return func return decorator
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def register(self, filter_name, filter_func):\n self._filters[filter_name] = filter_func", "def request_filter(self, fn):\n self.request_filters.append(fn)\n return fn", "def add_filter(self, f):\n raise NotImplementedError", "def test_filter_function_settings(self):\n def foo():\n \"\"\"Dummy function.\"\"\"\n return True\n\n self.es.register_filter(foo)\n self.assertEqual(self.es.filter['all'][0], foo)\n self.assertEqual(self.es.filter['any'], [])\n self.assertEqual(self.es.filter['none'], [])\n\n self.es.register_filter(foo, ftype='none')\n self.assertEqual(self.es.filter['all'][0], foo)\n self.assertEqual(self.es.filter['any'], [])\n self.assertEqual(self.es.filter['none'][0], foo)\n\n self.es.register_filter(foo, ftype='any')\n self.assertEqual(self.es.filter['all'][0], foo)\n self.assertEqual(self.es.filter['any'][0], foo)\n self.assertEqual(self.es.filter['none'][0], foo)", "def configure_filters(app):\n\n for (name, filter) in _filters.iteritems():\n app.jinja_env.filters[name] = filter", "def add_filter(self, name: str, value: any):\n self.filters[name] = value", "def load_custom_filters(environment):\n\n # TODO deprecate ipaddr_index and netmask for the better ipnet ones\n filter_list = {\n 'dpkg_arch': filter_dpkg_arch,\n 'storage_size_num': filter_storage_size_num,\n 'ipnet_hostaddr': filter_ipnet_hostaddr,\n 'ipnet_hostmin': filter_ipnet_hostmin,\n 'ipnet_hostmax': filter_ipnet_hostmax,\n 'ipnet_broadcast': filter_ipnet_broadcast,\n 'ipnet_netmask': filter_ipnet_netmask,\n 'ipnet_contains_ip': filter_ipnet_contains_ip,\n 'ipnet_contains_iprange': filter_ipnet_contains_iprange,\n 'ipnet_range_size': filter_ipnet_range_size,\n 'ipaddr_index': filter_ipaddr_index,\n 'netmask': filter_netmask\n }\n\n for name, function in filter_list.items():\n environment.filters[name] = function", "def add_filter(self, filter):\n self._filters.append(filter.as_dict())", "def add_filters_from_module(self, filter_functions):\n\n super(BigqueryInsertFilterMixin, self).add_filters_from_module(filter_functions)\n\n self.bigquery_insert_filter = getattr(\n filter_functions,\n self.bigquery_insert_filter\n )", "def decorator(module_fn):\n _FILTERS_AND_SAMPLERS.append((filter_, module_fn))\n return module_fn", "def filter(self, name=None):\n def wrapper(fn):\n if name is not None:\n _name = name\n else:\n _name = fn.__name__\n\n if _name in self._filters:\n raise Error(\"Filter already defined: {0}\".format(_name))\n\n self._filters[_name] = fn\n return fn\n return wrapper", "def add_filter(self, filter_):\n assert has_pil, _(\"Cannot add filters without python PIL\")\n self.cache.basename += filter_.basename\n self._filters.append(filter_)", "def add_filter(self, name, value, comparator='equals',\n case_sensitive=False):\n self.filters.append({'name': name, 'value': value,\n 'comparator': comparator,\n 'case_sensitive': case_sensitive,\n 'type': 'filter'})", "def add_filter(self, filter_):\n assert has_pil, _(\"Cannot add filters without python PIL\")\n self.cache.basename += filter_.basename\n self._filters.append(filter_)", "def get_filter():\n return render_template(\"filter_js.html\")", "def filter(self, filter_dict):\n pass", "def add_received_packet_filter(self, filter_func):\n self._received_packet_filters.append(filter_func)", "def RegisterCommonFilters(filtermap):\n\n # General casing for output naming\n filtermap['camelcase'] = stringcase.camelcase\n filtermap['capitalcase'] = stringcase.capitalcase\n filtermap['constcase'] = stringcase.constcase\n filtermap['pascalcase'] = stringcase.pascalcase\n filtermap['snakecase'] = stringcase.snakecase\n filtermap['spinalcase'] = stringcase.spinalcase", "def _filterfunc(self,*args,**kwargs):\n self._filterfunc = self.f\n return self.f(*args,**kwargs)", "def add_filter(self, value=''):\n # Create the filter\n filter_ = FilterWithPlaceholder(self, value=value)\n filter_.focus_force()\n filter_.bind('<Return>',\n lambda evt: self.event_generate('<<FiltersReady>>'))\n\n def _on_typing_out_event(evt):\n if filter_.get() == '':\n self._filters.remove(filter_)\n filter_.grid_forget()\n filter_.destroy()\n filter_.bind('<<TypingOut>>', _on_typing_out_event)\n\n # Push the filter in the list\n self._filters = self._filters[:-1] + [filter_] + [self._filters[-1]]\n\n # Refresh the grid\n for (i, curr) in enumerate(self._filters):\n curr.grid(row=0, column=i, sticky='EW')\n curr.lift()\n\n return filter_", "def _add_filter(self, filter_list, filter_path):\n if \"name\" not in filter_list:\n raise errors.ParserError(\n \"Loading filter-file {} failed. \"\n \"Missing 'name' key in filter object '{!r}'.\".format(\n filter_path, filter_list))\n if \"regex_match\" not in filter_list:\n raise errors.ParserError(\n \"Loading filter-file {} failed. \"\n \"Missing 'regex_match' key in filter object '{!r}'.\".format(\n filter_path, filter_list))\n\n full_filter_name = _get_full_filter_name(filter_list[\"name\"], filter_path)\n if full_filter_name in self._filters_dict:\n raise errors.ParserError(\"Loading filter-file {} failed. \"\n \"Filter named {} already exists.\".format(\n filter_path, full_filter_name))\n\n try:\n self._filters_dict[full_filter_name] = re.compile(\n filter_list[\"regex_match\"])\n logger.debug(\"Added filter {} from filter file {}\", full_filter_name,\n filter_path)\n except re.error as err:\n raise errors.ParserError(\n \"Loading filter-file {} failed. Unable to\"\n \" compile regular expression value '{}'. Error {!r}\".format(\n filter_path, filter_list[\"regex_match\"], err))", "def register_filters(app):\n #\n # Conversion of Markdown to HTML\n Misaka(app, autolink=True, fenced_code=True,\n strikethrough=True, tables=True)\n\n # Registration of handlers for micawber\n app.oembed_providers = bootstrap_basic()\n\n @app.template_filter()\n def onebox(value):\n return make_oembedplus(\n value, app.oembed_providers, maxwidth=600, maxheight=400\n )\n\n # Timezone helper\n app.tz = timezone(app.config['TIME_ZONE'])\n\n # Lambda filters for safe image_url's\n app.jinja_env.filters['quote_plus'] = lambda u: quote_plus(u or '', ':/?&=')\n\n # Custom filters\n @app.template_filter()\n def since_date(value):\n return timesince(value)\n\n @app.template_filter()\n def until_date(value):\n return timesince(value, default=\"now!\", until=True)\n\n @app.template_filter()\n def format_date(value, format='%d.%m.%Y'):\n if value is None: return ''\n return value.strftime(format)\n\n @app.template_filter()\n def format_datetime(value, format='%d.%m.%Y %H:%M'):\n if value is None: return ''\n return value.strftime(format)", "def _add_filters(self, filters):\n self._env.filters['dateformat'] = dateformat\n self._env.filters.update(filters or {})", "def module(filter_):\n def decorator(module_fn):\n \"\"\"Decorates a module function.\"\"\"\n _FILTERS_AND_SAMPLERS.append((filter_, module_fn))\n return module_fn\n return decorator", "def register(self, callback, filters = []):\n\t\tself.callbacks.append((callback, filters))\n\t\tself.events[str(callback)] = []", "def registerInputHandler(callback, filterString=None):\n if filterString in __terminalState.inputHandlers:\n __terminalState.inputHandlers[filterString].append(callback)\n else:\n __terminalState.inputHandlers[filterString] = [callback]", "def filter(self, fn):\n self.__filter_chain.append(fn)", "def test_filter_function_settings_fail(self):\n with self.assertRaises(TypeError):\n self.es.register_filter('test')", "def add(self, new_filter: Filter) -> None:\r\n self.filters.append(new_filter)", "def filter_factory(global_conf, **local_conf):\n conf = global_conf.copy()\n conf.update(local_conf)\n\n def ext_filter(app):\n return UrlRewriteFilter(app, conf)\n return ext_filter" ]
[ "0.7237711", "0.6231328", "0.6202456", "0.60305154", "0.5976299", "0.5930582", "0.590993", "0.58770925", "0.5865919", "0.57110393", "0.55850905", "0.5519374", "0.55121994", "0.5446713", "0.5422097", "0.5401396", "0.5399587", "0.5393217", "0.539265", "0.5392208", "0.5359269", "0.5328645", "0.52884203", "0.52809995", "0.5271166", "0.5208228", "0.520627", "0.51925105", "0.5189365", "0.51792294" ]
0.65563595
1
Wraps the input argument in brackets if it looks like an IPv6 address. Otherwise, returns the input unchanged. This is useful in templates that need to build valid URLs from host name variables that can be either FQDNs or any IPv4 or IPv6 address.
def ipwrap(address: Any) -> str: try: if not isinstance(address, int): ipaddress.IPv6Address(address) return f"[{address}]" except ValueError: pass return str(address)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def format_url_address(address):\n try:\n addr = netaddr.IPAddress(address)\n if addr.version == constants.IPV6_FAMILY:\n return \"[%s]\" % address\n else:\n return str(address)\n except netaddr.AddrFormatError:\n return address", "def expand_ipv6_address(address):\n\n if not is_valid_ipv6_address(address):\n raise ValueError(\"'%s' isn't a valid IPv6 address\" % address)\n\n # expand ipv4-mapped portions of addresses\n if address.count('.') == 3:\n ipv4_start = address.rfind(':', 0, address.find('.')) + 1\n ipv4_end = address.find(':', ipv4_start + 1)\n\n if ipv4_end == -1:\n ipv4_end = None # don't crop the last character\n\n # Converts ipv4 address to its hex ipv6 representation. For instance...\n #\n # '5.9.158.75' => '0509:9e4b'\n\n ipv4_bin = _get_address_binary(address[ipv4_start:ipv4_end])\n groupings = [ipv4_bin[16 * i:16 * (i + 1)] for i in range(2)]\n ipv6_snippet = ':'.join(['%04x' % int(group, 2) for group in groupings])\n\n addr_comp = [address[:ipv4_start - 1] if ipv4_start != 0 else None, ipv6_snippet, address[ipv4_end + 1:] if ipv4_end else None]\n address = ':'.join(filter(None, addr_comp))\n\n # expands collapsed groupings, there can only be a single '::' in a valid\n # address\n if '::' in address:\n missing_groups = 7 - address.count(':')\n address = address.replace('::', '::' + ':' * missing_groups)\n\n # inserts missing zeros\n for index in range(8):\n start = index * 5\n end = address.index(':', start) if index != 7 else len(address)\n missing_zeros = 4 - (end - start)\n\n if missing_zeros > 0:\n address = address[:start] + '0' * missing_zeros + address[start:]\n\n return address", "def safe_ip_format(ip):\r\n try:\r\n if netaddr.IPAddress(ip).version == 6:\r\n return '[%s]' % ip\r\n except (TypeError, netaddr.AddrFormatError): # hostname\r\n pass\r\n # it's IPv4 or hostname\r\n return ip", "def format_ipv6(value, mask):\n value_ipv6 = \":\".join(re.findall('..', \"{:032x}\".format(value)))\n if mask is None:\n return value_ipv6\n value_mask = \":\".join(re.findall('..', \"{:032x}\".format(mask)))\n return \"{}/{}\".format(value_ipv6, value_mask)", "def ip_f(x: Text) -> Tuple[Optional[Text], Optional[Text]]:\n try:\n addrv6 = ipaddress.IPv6Address(x)\n return \"ipv6\", str(addrv6.exploded)\n except ipaddress.AddressValueError:\n try:\n ipaddress.IPv4Address(x)\n return \"ipv4\", x\n except ipaddress.AddressValueError:\n pass\n\n return None, None", "def ipv6_address(self) -> Optional[pulumi.Input[str]]:\n warnings.warn(\"\"\"use `ipv6_addresses` attribute instead\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"ipv6_address is deprecated: use `ipv6_addresses` attribute instead\"\"\")\n\n return pulumi.get(self, \"ipv6_address\")", "def ipv6_address(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"ipv6_address\")", "def process_bind_param(self, value, dialect):\n if self.is_valid_ipv6(value):\n return self.get_shortened_ipv6(value)\n return value", "def SupportsIPv6(self) -> bool:", "def clean_ipv6_address(\n ip_str, unpack_ipv4=False, error_message=_(\"This is not a valid IPv6 address.\")\n):\n try:\n addr = ipaddress.IPv6Address(int(ipaddress.IPv6Address(ip_str)))\n except ValueError:\n raise ValidationError(error_message, code=\"invalid\")\n\n if unpack_ipv4 and addr.ipv4_mapped:\n return str(addr.ipv4_mapped)\n elif addr.ipv4_mapped:\n return \"::ffff:%s\" % str(addr.ipv4_mapped)\n\n return str(addr)", "def OSSupportsIPv6(self) -> bool:", "def is_host_ip6(value):\n try:\n return bool(ipaddress.IPv6Address(value))\n\n except:\n pass", "def is_valid_ipv6_address(address, allow_brackets = False):\n\n if allow_brackets:\n if address.startswith('[') and address.endswith(']'):\n address = address[1:-1]\n\n if address.count('.') == 3:\n # Likely an ipv4-mapped portion. Check that its vaild, then replace with a\n # filler.\n\n ipv4_start = address.rfind(':', 0, address.find('.')) + 1\n ipv4_end = address.find(':', ipv4_start + 1)\n\n if ipv4_end == -1:\n ipv4_end = None # don't crop the last character\n\n if not is_valid_ipv4_address(address[ipv4_start:ipv4_end]):\n return False\n\n addr_comp = [address[:ipv4_start - 1] if ipv4_start != 0 else None, 'ff:ff', address[ipv4_end + 1:] if ipv4_end else None]\n address = ':'.join(filter(None, addr_comp))\n\n # addresses are made up of eight colon separated groups of four hex digits\n # with leading zeros being optional\n # https://en.wikipedia.org/wiki/IPv6#Address_format\n\n colon_count = address.count(':')\n\n if colon_count > 7:\n return False # too many groups\n elif colon_count != 7 and '::' not in address:\n return False # not enough groups and none are collapsed\n elif address.count('::') > 1 or ':::' in address:\n return False # multiple groupings of zeros can't be collapsed\n\n for entry in address.split(':'):\n if not re.match('^[0-9a-fA-f]{0,4}$', entry):\n return False\n\n return True", "def ipv6_address(self) -> pulumi.Output[str]:\n warnings.warn(\"\"\"use `ipv6_addresses` attribute instead\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"ipv6_address is deprecated: use `ipv6_addresses` attribute instead\"\"\")\n\n return pulumi.get(self, \"ipv6_address\")", "def format_hostname(hostname: str) -> str:\n if has_ipv6 and re.match(r\"\\d+.\\d+.\\d+.\\d+\", hostname) is not None:\n hostname = f\"::ffff:{hostname}\"\n return hostname", "def ipv6_address(self) -> Optional[pulumi.Input[str]]:\n warnings.warn(\"\"\"The IPv6 address assigned to the instance. (Deprecated) This property was applicable only to First Generation instances.\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"ipv6_address is deprecated: The IPv6 address assigned to the instance. (Deprecated) This property was applicable only to First Generation instances.\"\"\")\n\n return pulumi.get(self, \"ipv6_address\")", "def get_ipv6_host(self, host):\n\n try:\n host = u'{0}'.format(host)\n return IPv6Network(host, strict=False)\n except ValueError as e:\n error_msg = \"Given host {0} is an invalid IPv6 format -- \" \\\n \"error {1}\".format(host, str(e))\n LOG.error(error_msg)\n self.module.fail_json(msg=error_msg)", "def _check_ip(val: Any, input_format: str, clean: bool) -> Any:\n try:\n if val in NULL_VALUES:\n return (None, \"null\") if clean else False\n\n address = ip_address(val)\n vers = address.version\n\n if vers == 4 and input_format != \"ipv6\" or vers == 6 and input_format != \"ipv4\":\n return (address, \"success\") if clean else True\n return (None, \"unknown\") if clean else False\n\n except (TypeError, ValueError):\n return (None, \"unknown\") if clean else False", "def test_ipv6_validation_failure():\n with pytest.raises(socket.error):\n is_ipv6('2001::0234:C1ab::A0:aabc:003F')", "def alias(self):\n return 'IPv6-Opts'", "def check_ipv4_ipv6_fqdn(val):\n\n try:\n val = u'{0}'.format(val)\n ip = ip_network(val, strict=False)\n return ip.version\n except ValueError:\n return 0", "def _format_ip(val: Any, input_format: str, output_format: str, errors: str) -> Any:\n # pylint: disable=too-many-branches\n address, status = _check_ip(val, input_format, True)\n\n if status == \"null\":\n return np.nan, 0\n if status == \"unknown\":\n if errors == \"raise\":\n raise ValueError(f\"Unable to parse value {val}\")\n return val if errors == \"ignore\" else np.nan, 1\n\n # compressed version without the leading zeros (for ipv6 double colon for zeros)\n if output_format == \"compressed\":\n result = address.compressed\n\n # Converts the integer repesentation of the ip address to its hexadecimal\n # form. Does not contain any dots or colons.\n elif output_format == \"hexa\":\n result = hex(int(address))\n\n # converts the ip address to its binary representation\n elif output_format == \"binary\":\n if address.version == 4:\n result = \"{0:032b}\".format(int(address))\n else:\n result = \"{0:0128b}\".format(int(address))\n\n # converts to integer format\n elif output_format == \"integer\":\n result = int(address)\n\n # converts to packed binary format (big-endian)\n elif output_format == \"packed\":\n result = address.packed\n\n # convert to full representation\n else:\n dlm = \".\" if address.version == 4 else \":\" # delimiter\n result = dlm.join(f\"{'0' * (4 - len(x))}{x}\" for x in address.exploded.split(dlm))\n\n return result, 2 if result != val else 3", "def test_ipv6_validation_success():\n assert is_ipv6('2001:db8::ff00:42:8329')", "def is_net_ip6(value):\n for test in [lambda x: ipaddress.IPv6Network(x)._prefixlen != 128,\n lambda x: ipaddress.IPv6Interface(x)._prefixlen != 128]:\n try:\n return bool(test(value))\n\n except:\n pass\n\n return False", "def test_ipv6_in_net(self):\n test_ip = ip_address.IPAddress(\"2001:0db8:85a3:08d3:1319:8a2e:0370:7344/24\")\n assert test_ip.in_network(\"2001:0d00::/24\")\n assert test_ip.in_network(\"2001:0d00::/29\")", "def ipv6(self, ipv6):\n\n self._ipv6 = ipv6", "def process_bind_param(self, value, dialect):\n if dialect.name == 'postgresql':\n return value\n # NOTE(maurosr): The purpose here is to convert ipv6 to the shortened\n # form, not validate it.\n elif utils.is_valid_ipv6(value):\n return utils.get_shortened_ipv6(value)\n return value", "def test_ipv6_in_range(self):\n test_ip = ip_address.IPAddress(\"2001:0db8:85a3:08d3:1319:8a2e:0370:7344\")\n \n assert test_ip.in_range(\"2000:0db8:85a3:08d3:1319:8a2e:0370:7344\",\"2002:0db8:85a3:08d3:1319:8a2e:0370:7344\")\n assert test_ip.in_range(\"2001:0db8:85a3:07d3:1319:8a2e:0370:7344\",\"2001:0db8:85a3:08d3:1319:8a2e:0370:7344\")\n assert test_ip.in_range(\"::ffff:1.1.1.1\",\"2501:0db8:85a3:08d3:1319:8a2e:0370:7344\")", "def _FixIPv6Address(self, netblocks):\n new_list = []\n length = len(netblocks)\n if length > 0:\n number_ipv6 = 0\n for netblock in netblocks:\n if netblock.version == 4:\n new_list.append(netblock)\n elif netblock.version == 6:\n number_ipv6 += 1\n if number_ipv6 == length:\n return True, new_list\n return False, new_list", "def process_bind_param(self, value, dialect):\n # NOTE(sdague): normalize all the inserts\n if utils.is_valid_ipv6_cidr(value):\n return utils.get_shortened_ipv6_cidr(value)\n return value" ]
[ "0.6575976", "0.6440431", "0.6396398", "0.6351718", "0.6283994", "0.6257915", "0.61470795", "0.6099252", "0.60245305", "0.5985608", "0.59547496", "0.594928", "0.587961", "0.58069444", "0.57675177", "0.57507926", "0.57504153", "0.5659189", "0.5579736", "0.5571313", "0.551502", "0.5505865", "0.54870874", "0.54616845", "0.5452937", "0.5439692", "0.54159164", "0.5393442", "0.5374533", "0.53615236" ]
0.6512216
1
Increment an IP address by a given value. Default increment value is 1.
def increment_ip(ip_string, increment=1): if "/" in ip_string: # IP with prefix interface = ipaddress.ip_interface(ip_string) address = interface.ip + increment # ugly workaround for IPv4: incrementing an interface's address changes the prefix in some # cases. # Check to ensure that the incremented address is in the original network. if address not in interface.network: raise ValueError(f"IP address {address} is not in network {interface.network.with_prefixlen}") else: return f"{address}/{interface.network.prefixlen}" else: # plain IP ip = ipaddress.ip_address(ip_string) return format(ip + increment)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def increment(cls, value):\r\n value.value += 1", "def increment(val):\n return coerce_to_int(val) + 1", "def inc(self):\n self._value += 1", "def increment(self, amount):\n pass", "def increment2(cls, var):\r\n var += 1", "def nv_increment(self, path: Union[bytes, str]) -> None:\n path = _to_bytes_or_null(path)\n ret = lib.Fapi_NvIncrement(self._ctx, path)\n _chkrc(ret)", "def increment(x): # pylint: disable=invalid-name\n return x + 1", "def increase_count(self, number=1):\n self.count += number", "def inc(i):\n i += 1\n return i", "def increment(self, n=1):\n with self.current_counter.get_lock():\n self.current_counter.value += n", "def increment_counter(self) -> None:", "def increment(name, count=1):\n # check the counter is tracked\n if name not in _counter_cache:\n track_counter(name)\n _counter_cache.add(name)\n print 'increment: %s' % name\n memcache.incr(name, delta=count, initial_value=0, namespace=NAMESPACE)", "def increment(self):\n self.data[self.pointer] += 1\n self.data[self.pointer] %= 256", "def inc(self):\n \n self.count += 1", "def increment(self) -> global___Expression:", "def increment(self, index, value):\n self._inrange(index)\n if value==0:\n return\n found,ii = self._find_index(index)\n if found:\n self.value[ii] += value\n if self.value[ii] == 0:\n del self.index[ii]\n del self.value[ii]\n else:\n self.index.insert(ii, index)\n self.value.insert(ii, value)", "def inc( self ):\n self.count += 1", "def increment(self):\r\n return self.add(1)", "def increment_number_served(self, increment):\n self.number_served += increment", "def increment_number_served(self, increment):\n self.number_served += increment", "def change_ip(self, address: int) -> None:\n self.regs[\"ip\"].write(address)", "def inc_num(num):\n return num + 1", "def __iadd__(self, increment):\n self.update(self.val + increment)\n return self", "def increment(self, inc):\n self.done += inc", "def increment(self):\n with self._lock:\n if self._value >= self._limit:\n log.warning(\"%r reached limit\", self)\n raise LimitReached(\n \"{} limit ({}) reached\".format(self.name, self._limit)\n )\n self._value += 1\n log.debug(\"%r incremented\", self)", "def update_variable(value):\n return value + 1", "def increase_counter(self):\n self.values = self.values + 1", "def increment_pc(self):\n self.program_counter[-1] += 1", "def move_ip(self, idx: Optional[int] = None) -> None:\n if idx:\n self.ip = idx\n else:\n self.ip += 1", "async def counter_inc(self,\n row: bytes,\n column: bytes,\n value: int = 1) -> None:\n self._counters[(row, column)] += value\n await self._check_send()" ]
[ "0.6776502", "0.66956335", "0.6532109", "0.63723654", "0.620784", "0.6181279", "0.6171777", "0.6122767", "0.6090748", "0.6089337", "0.6077814", "0.60685587", "0.60502255", "0.6032057", "0.602571", "0.60226136", "0.60221374", "0.59705263", "0.59495497", "0.59495497", "0.5941885", "0.5933058", "0.593221", "0.59128845", "0.590314", "0.5868654", "0.58676124", "0.5867456", "0.5824237", "0.5808705" ]
0.7037077
0
Transforms an IPv4 address to an IPv6 interface address. This will combine an arbitrary IPv6 network address with the 32 address bytes of an IPv4 address into a valid IPv6 address + prefix length notation the equivalent of dotted quad compatible notation.
def ipv4_to_ipv6(v6_network: Union[str, ipaddress.IPv6Network], v4_address: Union[str, ipaddress.IPv4Interface]): if isinstance(v6_network, str): v6_network = ipaddress.IPv6Network(v6_network) if isinstance(v4_address, str): v4_address = ipaddress.IPv4Address(v4_address) v6_address = v6_network[int(v4_address)] return ipaddress.IPv6Interface(f"{v6_address}/{v6_network.prefixlen}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def expand_ipv6_address(address):\n\n if not is_valid_ipv6_address(address):\n raise ValueError(\"'%s' isn't a valid IPv6 address\" % address)\n\n # expand ipv4-mapped portions of addresses\n if address.count('.') == 3:\n ipv4_start = address.rfind(':', 0, address.find('.')) + 1\n ipv4_end = address.find(':', ipv4_start + 1)\n\n if ipv4_end == -1:\n ipv4_end = None # don't crop the last character\n\n # Converts ipv4 address to its hex ipv6 representation. For instance...\n #\n # '5.9.158.75' => '0509:9e4b'\n\n ipv4_bin = _get_address_binary(address[ipv4_start:ipv4_end])\n groupings = [ipv4_bin[16 * i:16 * (i + 1)] for i in range(2)]\n ipv6_snippet = ':'.join(['%04x' % int(group, 2) for group in groupings])\n\n addr_comp = [address[:ipv4_start - 1] if ipv4_start != 0 else None, ipv6_snippet, address[ipv4_end + 1:] if ipv4_end else None]\n address = ':'.join(filter(None, addr_comp))\n\n # expands collapsed groupings, there can only be a single '::' in a valid\n # address\n if '::' in address:\n missing_groups = 7 - address.count(':')\n address = address.replace('::', '::' + ':' * missing_groups)\n\n # inserts missing zeros\n for index in range(8):\n start = index * 5\n end = address.index(':', start) if index != 7 else len(address)\n missing_zeros = 4 - (end - start)\n\n if missing_zeros > 0:\n address = address[:start] + '0' * missing_zeros + address[start:]\n\n return address", "def ipv6_to_ipv4(ipv6_str):\n return '.'.join([str(b) for b in ipv6_str[12:]])", "def ipv6_from_ipv4(ipv4_str):\n pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])\n return pchIPv4 + bytearray((int(x) for x in ipv4_str.split('.')))", "def clean_ipv6_address(\n ip_str, unpack_ipv4=False, error_message=_(\"This is not a valid IPv6 address.\")\n):\n try:\n addr = ipaddress.IPv6Address(int(ipaddress.IPv6Address(ip_str)))\n except ValueError:\n raise ValidationError(error_message, code=\"invalid\")\n\n if unpack_ipv4 and addr.ipv4_mapped:\n return str(addr.ipv4_mapped)\n elif addr.ipv4_mapped:\n return \"::ffff:%s\" % str(addr.ipv4_mapped)\n\n return str(addr)", "def format_ipv6(value, mask):\n value_ipv6 = \":\".join(re.findall('..', \"{:032x}\".format(value)))\n if mask is None:\n return value_ipv6\n value_mask = \":\".join(re.findall('..', \"{:032x}\".format(mask)))\n return \"{}/{}\".format(value_ipv6, value_mask)", "def toV6(self):\n return V6Address.fromV4(self)", "def is_valid_ipv6_address(address, allow_brackets = False):\n\n if allow_brackets:\n if address.startswith('[') and address.endswith(']'):\n address = address[1:-1]\n\n if address.count('.') == 3:\n # Likely an ipv4-mapped portion. Check that its vaild, then replace with a\n # filler.\n\n ipv4_start = address.rfind(':', 0, address.find('.')) + 1\n ipv4_end = address.find(':', ipv4_start + 1)\n\n if ipv4_end == -1:\n ipv4_end = None # don't crop the last character\n\n if not is_valid_ipv4_address(address[ipv4_start:ipv4_end]):\n return False\n\n addr_comp = [address[:ipv4_start - 1] if ipv4_start != 0 else None, 'ff:ff', address[ipv4_end + 1:] if ipv4_end else None]\n address = ':'.join(filter(None, addr_comp))\n\n # addresses are made up of eight colon separated groups of four hex digits\n # with leading zeros being optional\n # https://en.wikipedia.org/wiki/IPv6#Address_format\n\n colon_count = address.count(':')\n\n if colon_count > 7:\n return False # too many groups\n elif colon_count != 7 and '::' not in address:\n return False # not enough groups and none are collapsed\n elif address.count('::') > 1 or ':::' in address:\n return False # multiple groupings of zeros can't be collapsed\n\n for entry in address.split(':'):\n if not re.match('^[0-9a-fA-f]{0,4}$', entry):\n return False\n\n return True", "def test_ip4_cidr_syntax_internal_v6(self):\n \n test_ip = ip_address.IPAddress(\"192.168.0.1/24\")\n \n assert test_ip.addr == [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 192, 168, 0, 1]\n assert test_ip.subnet == [0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0]\n \n test_ip = ip_address.IPAddress(\"127.0.0.1/16\") \n assert test_ip.addr == [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]\n assert test_ip.subnet == [0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0, 0]\n \n test_ip = ip_address.IPAddress(\"127.0.0.1/8\")\n assert test_ip.subnet == [0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0, 0x0, 0]\n \n test_ip = ip_address.IPAddress(\"127.0.0.1\")\n assert test_ip.subnet == []", "def FilterIPv4InIPv6FormatAddrs(addrs):\n filtered = []\n for addr in addrs:\n ipaddr = ipaddress.ip_interface(addr).ip\n if isinstance(ipaddr, ipaddress.IPv6Address):\n ipv6 = ipaddress.IPv6Address(ipaddr)\n # Check if it's an IPv4-mapped or 6to4 address.\n if ipv6.ipv4_mapped is not None or ipv6.sixtofour is not None:\n continue\n # Check if it's an IPv4-compatible address.\n if ipv6.packed.hex(\n )[:24] == '000000000000000000000000' and not ipv6.is_unspecified:\n continue\n filtered += [addr]\n return filtered", "def to_network_v6(zone: Zone) -> ipaddress.IPv6Network:\n\n labels = zone.name.split(\".\")[:-3]\n\n zone_reverse_str = \"\".join(reversed(labels))\n if len(zone_reverse_str) % 4 != 0:\n for _ in range(4 - (len(zone_reverse_str) % 4)):\n zone_reverse_str += \"0\"\n prefix_str = \":\".join(\n [zone_reverse_str[i : i + 4] for i in range(0, len(zone_reverse_str), 4)]\n )\n prefix_str += f\"::/{len(labels) * 4}\"\n\n return ipaddress.IPv6Network(prefix_str, strict=True)", "def ip_f(x: Text) -> Tuple[Optional[Text], Optional[Text]]:\n try:\n addrv6 = ipaddress.IPv6Address(x)\n return \"ipv6\", str(addrv6.exploded)\n except ipaddress.AddressValueError:\n try:\n ipaddress.IPv4Address(x)\n return \"ipv4\", x\n except ipaddress.AddressValueError:\n pass\n\n return None, None", "def encode_ipv4(self, input):\n return inet_aton(input)", "def update_gateway_with_prefixlen(self, ipv4='', ipv4_prefixlen=0, \n ipv6='', ipv6_prefixlen=0, port_no=''):\n port = self.ports[port_no]\n\n if port.gateway is None:\n port.gateway = Gateway(name=port.name, port_no=port.port_no,\n ipv4=ipv4, ipv4_prefixlen=ipv4_prefixlen,\n ipv6=ipv6, ipv6_prefixlen=ipv6_prefixlen)\n else:\n port.gateway.name = port.name\n port.gateway.ipv4 = netaddr.IPAddress(ipv4)\n port.gateway.ipv4_subnet = netaddr.IPNetwork(ipv4 + '/' + str(ipv4_prefixlen))\n port.gateway.ipv6 = netaddr.IPAddress(ipv6)\n port.gateway.ipv6_subnet = netaddr.IPNetwork(ipv6 + '/' + str(ipv6_prefixlen))\n port.gateway.port_no = port.port_no\n\n self.tbl.update_entry(subnet=port.gateway.ipv4_subnet, receive_port=port, metric=0, source=\"CONNECTED\")", "def format_url_address(address):\n try:\n addr = netaddr.IPAddress(address)\n if addr.version == constants.IPV6_FAMILY:\n return \"[%s]\" % address\n else:\n return str(address)\n except netaddr.AddrFormatError:\n return address", "def bytes_to_ipv6_str(buff):\n if len(buff) != DataDescription.B_SEQ_IPv6_LEN:\n raise TypeError(\"Invalid input\")\n return \"%02X%02X:%02X%02X:%02X%02X:%02X%02X:\"\\\n \"%02X%02X:%02X%02X:%02X%02X:%02X%02X\" % buff", "def _FixIPv6Address(self, netblocks):\n new_list = []\n length = len(netblocks)\n if length > 0:\n number_ipv6 = 0\n for netblock in netblocks:\n if netblock.version == 4:\n new_list.append(netblock)\n elif netblock.version == 6:\n number_ipv6 += 1\n if number_ipv6 == length:\n return True, new_list\n return False, new_list", "def get_mask_ipv6(bits):\n\n if bits > 128 or bits < 0:\n raise ValueError('A mask can only be 0-128 bits, got %i' % bits)\n elif bits == 128:\n return FULL_IPv6_MASK\n\n # get the binary representation of the mask\n mask_bin = _get_binary(2 ** bits - 1, 128)[::-1]\n\n # breaks it into sixteen character groupings\n groupings = [mask_bin[16 * i:16 * (i + 1)] for i in range(8)]\n\n # converts each group into its hex value\n return ':'.join(['%04x' % int(group, 2) for group in groupings]).upper()", "def ipv6_networks(view):\n return \"ipv6network?\" \\\n \"_return_fields=\" \\\n \"extattrs,\" \\\n \"comment,\" \\\n \"network,\" \\\n \"network_view,\" \\\n \"utilization&\" \\\n \"network_view=\" + view + \\\n \"&_max_results=-25000\"", "def ipwrap(address: Any) -> str:\n try:\n if not isinstance(address, int):\n ipaddress.IPv6Address(address)\n return f\"[{address}]\"\n except ValueError:\n pass\n\n return str(address)", "def PrefixIpv6Address(self):\n if self.force_auto_sync:\n self.get('PrefixIpv6Address')\n return self._PrefixIpv6Address", "def subnetwork_to_ip_range(subnetwork):\n \n try:\n fragments = subnetwork.split('/')\n network_prefix = fragments[0]\n netmask_len = int(fragments[1])\n \n # try parsing the subnetwork first as IPv4, then as IPv6\n for version in (socket.AF_INET, socket.AF_INET6):\n \n ip_len = 32 if version == socket.AF_INET else 128\n \n try:\n suffix_mask = (1 << (ip_len - netmask_len)) - 1\n netmask = ((1 << ip_len) - 1) - suffix_mask\n ip_hex = socket.inet_pton(version, network_prefix)\n ip_lower = int(binascii.hexlify(ip_hex), 16) & netmask\n ip_upper = ip_lower + suffix_mask\n \n return (ip_lower,\n ip_upper,\n 4 if version == socket.AF_INET else 6)\n except:\n pass\n except:\n pass\n \n raise ValueError(\"invalid subnetwork\")", "def check_ipv4_ipv6_fqdn(val):\n\n try:\n val = u'{0}'.format(val)\n ip = ip_network(val, strict=False)\n return ip.version\n except ValueError:\n return 0", "def test_ipv6_in_net(self):\n test_ip = ip_address.IPAddress(\"2001:0db8:85a3:08d3:1319:8a2e:0370:7344/24\")\n assert test_ip.in_network(\"2001:0d00::/24\")\n assert test_ip.in_network(\"2001:0d00::/29\")", "def get_if_raw_addr6(iff):\n ip6 = get_if_addr6(iff)\n if ip6 is not None:\n return inet_pton(socket.AF_INET6, ip6)\n\n return None", "def _get_reverse_for_ipv6_address(cls, address):\n # @todo: Impelement properly\n parts = [str(x) for x in reversed(IPv6(address).iter_bits())][1:]\n while parts:\n for suffix in (\".ip6.int\", \".ip6.arpa\"):\n name = \"%s.%s\" % (\".\".join(parts), suffix)\n zone = DNSZone.get_by_name(name)\n if zone:\n return zone\n parts.pop(0) # Remove first par\n return None", "def format_ipv4(value, mask=None):\n value_ipv4 = \".\".join([str(int(x, 16)) for x in re.findall('..', \"{:08x}\".format(value))])\n if mask is None:\n return value_ipv4\n value_mask = \".\".join([str(int(x, 16)) for x in re.findall('..', \"{:08x}\".format(mask))])\n return \"{}/{}\".format(value_ipv4, value_mask)", "def SupportsIPv6(self) -> bool:", "def test_ipv6_validation_success():\n assert is_ipv6('2001:db8::ff00:42:8329')", "def convert_relu6(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n out = _op.clip(x, 0.0, 6.0)\n g.add_node(op.output(\"Out\")[0], out)", "def fromV4(klass, ip):\n if not isinstance(ip, V4Address):\n ip = V4Address(str(ip))\n return klass(\"::ffff:{0!s}\".format(ip))" ]
[ "0.7771464", "0.72357774", "0.711864", "0.68538207", "0.666113", "0.6537633", "0.618609", "0.6113375", "0.5976843", "0.59432304", "0.58778465", "0.58773744", "0.5876789", "0.58582246", "0.57257426", "0.5724778", "0.5716151", "0.5713014", "0.5699181", "0.5686832", "0.56770265", "0.56500673", "0.5640726", "0.5635623", "0.56349796", "0.55191565", "0.550838", "0.548835", "0.5479643", "0.5477196" ]
0.7661712
1
Returns base16 encoded string.
def b16encode(s: str) -> str: return base64.b16encode(s.encode()).decode()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_encode_random(self):\n return os.urandom(16).encode('hex')", "def get_byte_string(self):\n return \"\".join(['%02X' % i for i in self._data]).decode('hex')", "def b16decode(s: str) -> str:\n return base64.b16decode(s.encode()).decode()", "def to_encoding(self):\n return \" \".join(self.b)", "def Encode(cls,\n value: Any) -> bytes:\n return cls._EncodeWithBytesLength(value, 16)", "def encode(uuid_):\n return base64.urlsafe_b64encode(uuid_.bytes)[:-2] # Drop '==' padding", "def toString(self):\r\n str = \"\"\r\n for i in range(len(self.Data)):\r\n str += (self.__hexLookup[int(self.Data[i] / 16)]).decode()\r\n str += (self.__hexLookup[int(self.Data[i] % 16)]).decode()\r\n \r\n return str", "def encoded_string(self) -> bytes:\n return self.string.encode(self.encoding)", "def encode_base64(self, i):\n return base64.b64encode(struct.pack('!L', self.transcode(i)), self.extra_chars)[:6]", "def encode(self) -> str: \n\n # Encode the object\n b64 = base64.b64encode(zlib.compress(bytearray(json.dumps(self.data,sort_keys=True),'utf-8')))\n\n # Append the version code string and return it\n return f'{self.versionCode}{b64.decode(\"utf-8\")}'", "def encode(self):\n packed = struct.pack(\"<2Bl\",\n self.device_number,\n self.command_number,\n self.data)\n if self.message_id is not None:\n packed = packed[:5] + struct.pack(\"B\", self.message_id)\n return packed", "def uint16_to_bytes(value):\n return struct.pack(\"b\", value)", "def to_hexstr(self,*,skip_long_data=False):\n b = self.to_ba()\n if len(b)>16:\n dat = Utils.hexstr(b[:-2], separator=\"\",skip_long_data=skip_long_data) + Utils.hexstr(b[-2:], separator=\"\")\n else:\n dat = Utils.hexstr(b, separator=\"\")\n return dat", "def encode(self) -> bytes:\n code: str = str(self.id) + \" \" \\\n + str(self.time) + \" \" \\\n + str(self.action) + \" \" \\\n + str(self.was_success) + \" \" \\\n + self.acting_username + \" \" \\\n + str(self.source_account_id) + \" \" \\\n + str(self.destination_account_id) + \" \" \\\n + str(self.funds_amount)\n return code.encode()", "def uint16_t(n):\n return int(n).to_bytes(2, byteorder='little', signed=False)", "def int2hex(n: int) -> str:", "def string_raw(self):\n return \"x%x\" % self.encoded", "def stringify(self):\n hexcode = \"#\"\n for x in self.value:\n part = hex(x)[2:]\n if len(part) < 2: part = \"0\" + part\n hexcode += part\n return hexcode", "def _reg_encode_utf16_list(self, xlist):\n t = '' \n for x in xlist: \n t += self._reg_encode_utf16(x + u'\\u0000') # null term \n t += self._reg_encode_utf16(u'\\u0000') # end of list (double null) \n return t", "def _encode_base64(data: str) -> str:\n ebytes = base64.b64encode(data.encode(\"utf-8\"))\n estring = str(ebytes, \"utf-8\")\n return estring", "def encode(self) -> bytes:\n\n # unsigned char dmac[6];\n # unsigned char smac[6];\n # uint16_t ethertype;\n # unsigned char payload[];\n\n t = struct.pack(\"H\", socket.htons(self.typ))\n return self.dmac + self.smac + t + self.payload", "def toHashBase(self) -> str:\r\n\r\n if self.hashBase != '':\r\n self_repr = '{}'.format(self.hashBase)\r\n else:\r\n self_repr = ''\r\n self_repr += '{}{}{}'.format(str(self.data), self.version,\r\n self.compatibilityLimit)\r\n if len(self.script) > 0:\r\n self_repr += ' '.join(self.script)\r\n if self.seriesSignature != '':\r\n self_repr += self.seriesSignature\r\n if self.pha != '':\r\n self_repr += self.pha\r\n for key, value in self.identityInfo.items():\r\n self_repr += '{}{}'.format(key, value)\r\n if self.message != '':\r\n self_repr += self.message\r\n\r\n return self_repr", "def to_binary_string(x):\n return \"{0:b}\".format(x)", "def binary_encode(self):\n if self._legacy:\n return bytearray([TransactionVersion.LEGACY]) + self._binary_encode_data()\n encoder = j.data.rivine.encoder_sia_get()\n encoder.add_array(bytearray([TransactionVersion.STANDARD]))\n encoder.add_slice(self._binary_encode_data())\n return encoder.data", "def dumps(self) -> str:\n bits = dill.dumps(self)\n return base64.b64encode(bits).decode(\"ascii\")", "def encode(value: int, length: int) -> str:\n if length < 1 or length > 6:\n raise ValueError(\"Parameter length must be in range [1-6]\")\n\n v: int = value\n result: str = \"\"\n\n for i in range(length, 0, -1):\n result = str(Base64._CHARSET[v & 63]) + result\n v >>= 6\n\n return result", "def v4():\n return base64.urlsafe_b64encode(uuid.uuid4().bytes)[:-2] # Drop '==' padding", "def _string_to_encoded_string(deck_string):\n return deck_string.encode(\"base64\")", "def to_base58(self) -> str:\n return base58.b58encode(self.raw).decode('utf-8')", "def to_base58(self) -> str:\n return base58.b58encode(self.raw).decode('utf-8')" ]
[ "0.6492392", "0.63015807", "0.5999862", "0.5878765", "0.5876465", "0.5854374", "0.58291274", "0.57957095", "0.5775614", "0.57602257", "0.5722614", "0.57142806", "0.56969523", "0.56813633", "0.5668014", "0.56313115", "0.5608445", "0.5593049", "0.55623966", "0.5544264", "0.5541833", "0.5528432", "0.5480942", "0.54728115", "0.5470893", "0.5469925", "0.5460488", "0.54493", "0.54450524", "0.54450524" ]
0.7805905
0
Returns base16 decoded string.
def b16decode(s: str) -> str: return base64.b16decode(s.encode()).decode()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def from_hex(x):\n return base64.b16decode(x, True)", "def b16encode(s: str) -> str:\n return base64.b16encode(s.encode()).decode()", "def decode_hex(self, s):\n return self.transcode(int(s, 16))", "def decode_utf16le(s):\n if b\"\\x00\\x00\" in s:\n index = s.index(b\"\\x00\\x00\")\n if index > 2:\n if s[index - 2] != b\"\\x00\"[0]: #py2+3\n # 61 00 62 00 63 64 00 00\n # ^ ^-- end of string\n # +-- index\n s = s[:index + 2]\n else:\n # 61 00 62 00 63 00 00 00\n # ^ ^-- end of string\n # +-- index\n s = s[:index + 3]\n if (len(s) % 2) != 0:\n s = s + b\"\\x00\"\n s = s.decode(\"utf16\")\n s = s.partition('\\x00')[0]\n return s", "def __decodeString(self,ascii):\n second = ascii%256\n first = (ascii-second)/256\n return str(chr(first))+str(chr(second))", "def decode(self, s):\n i = 0\n strs = []\n while i < len(s):\n l = int(s[i:i+8], 16)\n strs.append(s[i+8:i+8+l])\n i += 8+l\n return strs", "def read_string(self):\n\n # length may be -1, 0, or a positive integer\n length = self.read_and_unpack('l')[0]\n if length > 0:\n return self.read(length).decode(self.utf_16_decoder)\n else:\n return ''", "def _read16(input):\n return struct.unpack(\"<H\", _read_exactly(input, 2))[0]", "def decode_base64(self, s):\n return self.transcode(struct.unpack('!L', base64.b64decode(s + '==', self.extra_chars))[0])", "def base32_decode(encoded_bytes: bytes) -> str:\n\n # decode the bytes from base32\n # then, decode the bytes-like object to return as a string\n return base64.b32decode(encoded_bytes).decode(\"utf-8\")", "def decode_string(self, value):\r\n return value", "def read_string(self):\n return self.bits.read('bytes:{0}'.format(self.read_int())).decode(\"utf-8\", 'replace')", "def DEIMdecode(byte):\n\n result = ''\n if byte & 0x80:\n if byte & 0x40: result += 'B'\n else: result += 'D'\n if byte & 0x20: result += '-'\n result += '%d' % ((byte >> 3) & 0x03)\n if byte & 0x04: result += '-'\n result += '%d' % (byte & 0x03)\n else:\n if byte == 0111: result += 'N'\n elif byte == 0151: result += 'R'\n elif byte == 0171: result += 'F'\n elif byte == 0200: result += 'P'\n else: result += 'A%3.3o' % byte\n return result", "def decode_string(encoded: bytes) -> str:\n if encoded.startswith(codecs.BOM_UTF8):\n return encoded.decode(\"utf-8-sig\")\n elif encoded.startswith(codecs.BOM_UTF16):\n encoded = encoded[len(codecs.BOM_UTF16) :]\n return encoded.decode(\"utf-16\")\n else:\n # No BOM to determine encoding, try utf-8\n return encoded.decode(\"utf-8\")", "def decode(self, s):", "def decode(self, s):", "def reverse(self, s):\n return '\\x16%s\\x16' % s", "def base64_decode(n, encoding='ISO-8859-1'):\t\n decoded = base64.decodestring(n.encode('ascii'))\t\n return tonative(decoded, encoding)", "def GetBits16(self):\n raw_data = self.GetBits(16)[0]\n arg = \"%c%c%c%c\" % (0,0, raw_data[0], raw_data[1])\n return struct.unpack(\">L\", arg)[0]", "def get16bits(data):\n # return int(binascii.hexlify(data), 16)\n return int.from_bytes(data.encode(), 'big')", "def decode_base64(in_str):\n return base64.decodestring(in_str)", "def decode(self, encoded):", "def decode_base64(in_str):\n import base64\n return base64.decodestring(in_str)", "def decode_16_bit_2ch(data):\n\n d = array.array('h', data)\n left = d[0::2]\n right = d[1::2]\n return left, right", "def b64decode(s: str) -> str:\n return base64.b64decode(s.encode()).decode()", "def _b64decode(self, string):\n import base64\n return base64.b64decode(string)", "def _b64decode(self, string):\n import base64\n return base64.b64decode(string)", "def de_base64(msg):\n try:\n msg_ascii = msg.encode('ascii')\n msg_bytes = base64.b64decode(msg_ascii)\n msg_decoded = msg_bytes.decode('ascii')\n return msg_decoded\n except:\n print('Invalid base64-encoded string')", "def decode(decode_format):\n return output_from_decode", "def unpack_uuid(data):\n return data[:16], 16" ]
[ "0.71067536", "0.7025799", "0.6532113", "0.6512282", "0.6127934", "0.6112855", "0.6080767", "0.5956205", "0.59228486", "0.58263737", "0.576815", "0.57412255", "0.5718248", "0.57160187", "0.5713707", "0.5713707", "0.56611294", "0.5641491", "0.56263244", "0.5615537", "0.56049997", "0.5586328", "0.55708826", "0.55699366", "0.55610335", "0.5560292", "0.5560292", "0.5555709", "0.55449075", "0.55422497" ]
0.83401346
0
Return SHA256 hexdigest of string s.
def sha256(s: str) -> str: return hashlib.sha256(s.encode()).hexdigest()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sha256_hexoutput(in_str):\r\n return sha256(in_str.encode('ascii')).hexdigest()", "def get_256_hash_from_string(string):\n\n sha256 = hashlib.sha256()\n sha256.update(string.encode('utf-8'))\n\n return sha256.hexdigest()", "def hex_hash(s):\n if not s:\n return '0'\n s = s.encode('utf-8')\n return '{:x}'.format(adler32(s) & 0xffffffff)", "def hashhex(s):\n h = hashlib.sha1()\n h.update(s.encode())\n return h.hexdigest()", "def hashhex(s):\n h = hashlib.sha1()\n h.update(s)\n return h.hexdigest()", "def hashhex(s):\n h = hashlib.sha1()\n h.update(s.encode('utf-8'))\n return h.hexdigest()", "def hashhex(s):\n h = hashlib.sha1()\n h.update(s.encode('utf-8'))\n return h.hexdigest()", "def convert_to_SHA256(x):\n result = hashlib.sha256(x.encode())\n result = result.hexdigest()\n return result", "def convert_to_SHA256(x):\n result = hashlib.sha256(x.encode())\n result = result.hexdigest()\n return result", "def get_hash(s):\n hash_object = hashlib.md5(s.encode())\n return hash_object.hexdigest()", "def get_hash_code(s):\n h = 0\n n = len(s)\n for i, c in enumerate(s):\n h = h + ord(c) * 31 ** (n - 1 - i)\n return StrUtil.convert_4_bytes(h)", "def hash_sbox(f):\n hf = sha256()\n for x in f:\n hf.update(hex(x).encode('utf-8'))\n return hf.hexdigest()", "def sha256_2_string(string_to_hash):\n\n # Solution for (1a)\n import hashlib\n first_sha = hashlib.sha256(string_to_hash.encode(\"utf8\"))\n second_sha = hashlib.sha256(first_sha.digest())\n return second_sha.hexdigest()\n\n # Placeholder for (1a)\n return \"deadbeef\"", "def hash_bytes_256(b: bytes) -> str:\n return hashlib.sha256(b).hexdigest()", "def get_string_sha256(str_to_convert):\n hasher = hashlib.sha256()\n hasher.update(bytearray(str_to_convert.encode('ascii')))\n return base64.b64encode(hasher.digest())", "def strhash(s: str) -> int:\n h = hashlib.md5(s.encode('utf-8'))\n h = int(h.hexdigest(), base=16)\n return h", "def sha256(value):\n return hashlib.sha256(value).hexdigest()", "def h(x):\n\n hasher = hashlib.sha256()\n hasher.update(x)\n return hasher.digest()", "def sha256(cls, value):\n assert type(value) is str\n return int(sha256(value.encode()).hexdigest(), 16)", "def sha256(content):\n content = content.encode('utf-8')\n return hashlib.sha256(content).hexdigest()", "def getChecksum(self, s):\n \n chksum = 0\n for ch in s:\n chksum = chksum + ord(ch)\n \n return hex(chksum%256)[2:]", "def sha3_256(x):\n return hashlib.sha3_256(x).digest()", "def elf_hash(s):\n h = 0\n for c in s:\n h = (h << 4) + ord(c)\n t = (h & 0xF0000000)\n if t != 0:\n h = h ^ (t >> 24)\n h = h & ~t\n return h", "def _sha256(sha256):\n if not sha256:\n sha256 = \"0\" * 64\n\n return sha256", "def HexDigest(self, name, truncation_length=None):\n\n if truncation_length is None:\n truncation_length = 64\n name_bytes = name.encode('UTF-8')\n return hashlib.sha256(name_bytes).hexdigest()[:truncation_length]", "def _s_hash(fn, data: str):\n\n return fn(_b(data)).hexdigest()", "def SHA256(self) -> _n_0_t_3[_n_0_t_9]:", "def digest(string):\n return md5(string.encode(\"utf-8\")).hexdigest()", "def sha256(f: IO[str]) -> str:\n pos = f.tell()\n f.seek(0)\n digest = hashlib.sha256(f.read().encode()).hexdigest()\n f.seek(pos)\n\n return digest", "def hash_with_salt(self, s):\n\n data = f'{s} {self.salt}'.encode('ascii') # encode string to raw bytes object\n hash_obj = hashlib.md5(data) # hash it \n if self.trunc > 0:\n hash_txt = hash_obj.hexdigest()[0:self.trunc] # get truncated hash symbols\n else:\n hash_txt = hash_obj.hexdigest()\n return f'{s} {hash_txt}'" ]
[ "0.78749025", "0.78616035", "0.7789191", "0.7787693", "0.77747834", "0.7772844", "0.7752103", "0.7292027", "0.7292027", "0.71310705", "0.70151746", "0.6988451", "0.6974215", "0.6970847", "0.69703436", "0.696001", "0.69504154", "0.6933909", "0.688362", "0.6874997", "0.6868837", "0.68475115", "0.68470204", "0.6740903", "0.6725309", "0.67123234", "0.6701599", "0.6693095", "0.66650563", "0.66566527" ]
0.8390986
0
Evaluate all metrics in the collection and return the results.
def evaluate(self) -> Dict[str, Any]: kwargs = {"ids": self._ids} return { metric.value: self._metric_funcs[metric]( self._targets, self._preds, **kwargs ) for metric in self._metrics }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def evaluate(self) -> Dict[str, float]:\n eval_dataloader = self.get_eval_dataloader()\n\n output = self._prediction_loop(eval_dataloader, description=\"Evaluation\")\n return output.metrics", "def evaluate(self, data_collection):\n outputs = self.meter.evaluate(data_collection)\n outputs.add_tags(self.tagspace)\n return outputs", "def run(self):\n self.evaluate()\n self.accumulate()\n self.summarize()", "def compute_metrics(self, results: list) -> dict:", "def evaluate(self):\n results = dict()\n for metric in self.metrics:\n print('Evaluating clustering with metric %s' % metric)\n if metric in LABEL_METRICS.keys():\n results[metric] = LABEL_METRICS[metric](self.X, self.model.labels_)\n results['adjusted_rand_score'] = SCORE_METRICS['adjusted_rand_score'](self.Y[:, 0], self.model.labels_)\n self.results = results\n return results", "def _evaluate(dataset: dict, name: str, metrics=None):\n if metrics is None:\n metrics = ['Accuracy', 'AUROC', 'AUPRC', 'Precision', 'Recall', 'F1', 'F2']\n measures = [dataset[metric] for metric in metrics]\n measures.insert(0, name)\n return measures", "def compute(self, *args, **kwargs):\n for node in self.evaluation_sequence:\n node.evaluate()", "def evaluate(self):\n iterator = self._iterators['main']\n\n if self.eval_hook:\n self.eval_hook(self)\n\n summary = reporting.DictSummary()\n\n progress = IterationStatus(len(iterator))\n if self._progress_bar:\n pbar = _IteratorProgressBar(iterator=progress)\n\n last_iter = len(iterator) - 1\n with _in_eval_mode(self._targets.values()):\n for idx, batch in enumerate(iterator):\n last_batch = idx == last_iter\n progress.current_position = idx\n observation = {}\n with reporting.report_scope(observation):\n if isinstance(batch, (tuple, list)):\n outs = self.eval_func(*batch)\n elif isinstance(batch, dict):\n outs = self.eval_func(**batch)\n else:\n outs = self.eval_func(batch)\n for metric in self._metrics:\n metric(batch, outs, last_batch)\n summary.add(observation)\n\n if self._progress_bar:\n pbar.update()\n\n if self._progress_bar:\n pbar.close()\n\n return summary.compute_mean()", "def evaluate(self, csls, evals, mode=\"csls\"):\n metrics = {}\n for eval_func in evals:\n assert hasattr(self, eval_func), \\\n \"Eval Function {0} not found\".format(eval_func)\n metrics = getattr(self, eval_func)(csls, metrics, mode=mode)\n return metrics", "def evaluate_with_metrics(self, dataset, metrics, *args, **kwargs):\n\n utils.assert_raise(isinstance(metrics, dict), ValueError,\n '\"metrics\" must be a dict with metric_name -> metric_function')\n result = dict()\n\n for sample in dataset:\n output = self.predict(sample)\n\n for key, call in metrics.items():\n holder = result.get(key, list())\n holder.append(call(output, sample))\n\n result[key] = holder\n\n return result", "def evaluate(self, X: np.ndarray, y: list, X_train=None, y_train=None) -> dict:\n metrics = self.compute_metrics(X, y)\n\n print(\"evaluation: \", metrics)\n return metrics", "def evaluate(self, test):\r\n self.logger.info(\"Testing model over test set\")\r\n metrics = self.run_evaluate(test)\r\n msg = \" - \".join([\"{} {:04.2f}\".format(k, v)\r\n for k, v in metrics.items()])\r\n self.logger.info(msg)\r\n return metrics", "def evaluate(self, test):\n self.logger.info(\"Testing model over test set\")\n metrics = self.run_evaluate(test)\n msg = \" - \".join([\"{} {:04.2f}\".format(k, v)\n for k, v in metrics.items()])\n self.logger.info(msg)\n return metrics", "def compute_metrics(self):\n pass", "def evaluate(self, data_collection):\n # map inputs\n mapped_input_dict = self._dict_from_data_collection(self.input_mapping,\n data_collection)\n\n # combine auxiliary inputs with mapped inputs\n all_inputs = mapped_input_dict.copy()\n all_inputs.update(self.auxiliary_inputs)\n\n # evaluate meter\n outputs_dict = self.evaluate_raw(**all_inputs)\n\n # combine auxiliary outputs with raw outputs\n all_outputs = outputs_dict.copy()\n all_outputs.update(self.auxiliary_outputs)\n\n\n # map meter evaluations back to data_collection form\n mapped_output_data_collection = self._data_collection_from_dict(\n self.output_mapping, all_outputs)\n\n # combine with original data, add tags as necessary\n mapped_output_data_collection.add_tags(self.tagspace)\n\n return mapped_output_data_collection", "def evaluate(self, batches):\n total_batches = batches.batches_per_epoch()\n logs = dict()\n for batch in range(total_batches):\n X_batch, y_batch = batches.get_batch()\n feed_dict = {\n self.x: X_batch,\n self.y: y_batch,\n self.keep_prob: 1.0}\n fetch_dict = {\n \"loss\": self.loss_op,\n \"accuracy\": self.accuracy_op}\n result = self.session.run(fetch_dict, feed_dict)\n for metric in result:\n if not metric in logs:\n logs[metric] = []\n logs[metric].append(result[metric])\n for metric in logs:\n logs[metric] = sum(logs[metric]) / len(logs[metric])\n return logs", "def evaluateWithSeveralMetrics(self, dataset, metricSets=None):\n if metricSets is None: # all metrics\n metricSets = [{\"metricName\": \"areaUnderROC\"},\n {\"metricName\": \"areaUnderPR\"},\n {\"metricName\": \"precisionAtGivenRecall\", \"metricParams\": {\"recallValue\": 0.05}}] \n resultMetricSets = [None for _ in range(len(metricSets))]\n pagrs = []\n for i in range(len(metricSets)):\n params = metricSets[i]\n if params[\"metricName\"] != \"precisionAtGivenRecall\":\n value = self.evaluate(dataset, params)\n if len(params.keys()) == 1:\n key = params[\"metricName\"]\n else:\n key = params[\"metricName\"] + \" at recallValue \" + str(params[\"metricParams\"][\"recallValue\"])\n resultMetricSets[i] = {key:value}\n else: \n pagrs.append([i,params[\"metricParams\"][\"recallValue\"]])\n continue\n if None in resultMetricSets:\n pr_params = {\"metricName\": \"precisionAtGivenMultipleRecalls\", \"metricParams\": {\"recallValues\": [x[1] for x in pagrs]}}\n precisions = self.evaluate(dataset, pr_params)\n i = 0\n for item in pagrs:\n key = \"precisionAtGivenRecall\" + \" at recallValue \" + str(pagrs[i][1])\n resultMetricSets[item[0]] = {key:precisions[i]}\n i += 1 \n \n return resultMetricSets", "def evaluate(self, dataset):\n logging.info('Start evaluation')\n\n loss, predictions, labels = self.run_one_epoch(dataset, RunnerPhase.VALIDATE)\n\n metrics_dict = self.metric_class.get_metrics_dict(predictions, labels)\n\n eval_info = self.metric_class.metrics_dict_to_str(metrics_dict)\n\n logging.info(eval_info)\n\n logging.info('Evaluation finished')\n\n return metrics_dict", "def compute(self) -> Any:\n # ddp hotfix, could be done better\n # but metric must handle DDP on it's own\n if self._ddp_backend == \"xla\":\n device = get_device()\n for key in self.statistics:\n key_statistics = torch.tensor([self.statistics[key]], device=device)\n key_statistics = xm.all_gather(key_statistics).sum(dim=0).cpu().numpy()\n self.statistics[key] = key_statistics\n elif self._ddp_backend == \"ddp\":\n for key in self.statistics:\n value: List[np.ndarray] = all_gather(self.statistics[key])\n value: np.ndarray = np.sum(np.vstack(value), axis=0)\n self.statistics[key] = value\n\n per_class, micro, macro, weighted = get_aggregated_metrics(\n tp=self.statistics[\"tp\"],\n fp=self.statistics[\"fp\"],\n fn=self.statistics[\"fn\"],\n support=self.statistics[\"support\"],\n zero_division=self.zero_division,\n )\n if self.compute_per_class_metrics:\n return per_class, micro, macro, weighted\n else:\n return [], micro, macro, weighted", "def compute(self) -> Any:\n # ddp hotfix, could be done better\n # but metric must handle DDP on it's own\n if self._ddp_backend == \"xla\":\n device = get_device()\n for key in self.statistics:\n key_statistics = torch.tensor([self.statistics[key]], device=device)\n key_statistics = xm.all_gather(key_statistics).sum(dim=0).cpu().numpy()\n self.statistics[key] = key_statistics\n elif self._ddp_backend == \"ddp\":\n for key in self.statistics:\n value: List[np.ndarray] = all_gather(self.statistics[key])\n value: np.ndarray = np.sum(np.vstack(value), axis=0)\n self.statistics[key] = value\n\n per_class, micro, macro, weighted = get_aggregated_metrics(\n tp=self.statistics[\"tp\"],\n fp=self.statistics[\"fp\"],\n fn=self.statistics[\"fn\"],\n support=self.statistics[\"support\"],\n zero_division=self.zero_division,\n )\n if self.compute_per_class_metrics:\n return per_class, micro, macro, weighted\n else:\n return [], micro, macro, weighted", "def evaluate(self, size: int) -> dict:\n if len(self.results) == 0:\n print_log(\n f'{self.__class__.__name__} got empty `self.results`. Please '\n 'ensure that the processed results are properly added into '\n '`self.results` in `process` method.',\n logger='current',\n level=logging.WARNING)\n\n if self.collect_device == 'cpu':\n results = collect_results(\n self.results,\n size,\n self.collect_device,\n tmpdir=self.collect_dir)\n else:\n results = collect_results(self.results, size, self.collect_device)\n\n if is_main_process():\n # cast all tensors in results list to cpu\n results = _to_cpu(results)\n _metrics = self.compute_metrics(results) # type: ignore\n # Add prefix to metric names\n if self.prefix:\n _metrics = {\n '/'.join((self.prefix, k)): v\n for k, v in _metrics.items()\n }\n metrics = [_metrics]\n else:\n metrics = [None] # type: ignore\n\n broadcast_object_list(metrics)\n\n # reset the results list\n self.results.clear()\n return metrics[0]", "def _evaluate(self):\n if not self._evaluated:\n query = { 'query' : self._query.serialize() }\n if self._min_score is not None:\n query['min_score'] = self._min_score\n if self._highlight is not None:\n query['highlight'] = self._highlight\n if self._order is not None:\n query['sort'] = self._order\n\n params = {}\n if self._offset is not None:\n params['from'] = int(self._offset)\n if self._limit is not None:\n params['size'] = int(self._limit)\n if self._only_fields:\n params['fields'] = \",\".join(self._only_fields)\n\n self._results = self._document._meta.search_engine.search(\n query,\n **params\n )\n self._evaluated = True\n\n return self._results", "def evaluate_questions(self):\n for question in self.question_list:\n question.evaluate_question()", "def _evaluate(self, train_x, train_y, test_x, test_y, n_targets, name):\n r_temp = {}\n for metric_name in self.metrics:\n r_temp.update({f\"{metric_name}_Model\": name, f\"{metric_name}_Sum\": 0,\n f\"{metric_name}_Min\": 1000000, f\"{metric_name}_Max\": 0})\n\n for i in range(self.repetitions):\n is_nan = True\n while (is_nan):\n model = self.get_model(train_x.shape[1], n_targets)\n model.fit(train_x, train_y, **self.fit_kwargs)\n result = model.predict(test_x)\n is_nan = np.any(np.isnan(result))\n del model\n\n for metric_name in self.metrics:\n metric = self.get_metrics(metric_name)\n value = metric(result, test_y)\n r_temp[f\"{metric_name}_Sum\"] += value\n if r_temp[f\"{metric_name}_Min\"] > value:\n r_temp[f\"{metric_name}_Min\"] = value\n if r_temp[f\"{metric_name}_Max\"] < value:\n r_temp[f\"{metric_name}_Max\"] = value\n keras.backend.clear_session()\n for metric_name in self.metrics:\n r_temp[f\"{metric_name}_Mean\"] = r_temp[f\"{metric_name}_Sum\"] / self.repetitions\n return r_temp", "def _evaluate_fn(model, dataset):\n # Reset the local variables so that the returned metrics are computed using\n # the given data. Similar to the `reset_states` method of `tf.metrics.Metric`.\n for var in model.local_variables:\n if var.initial_value is not None:\n var.assign(var.initial_value)\n else:\n var.assign(tf.zeros_like(var))\n\n def eval_fn(dummy_state, batch):\n \"\"\"Evaluates the model on a batch.\"\"\"\n model.forward_pass(batch, training=False)\n return dummy_state\n\n # Evaluate on the dataset.\n dataset.reduce(initial_state=0, reduce_func=eval_fn)\n\n # Obtain the metrics.\n results = collections.OrderedDict()\n local_outputs = model.report_local_outputs()\n for name, metric in local_outputs.items():\n if isinstance(metric, list) and (len(metric) == 2):\n # Some metrics returned by `report_local_outputs()` can have two scalars:\n # one represents `sum`, and the other represents `count`. Ideally, we want\n # to return a single scalar for each metric.\n results[name] = metric[0] / metric[1]\n else:\n results[name] = metric[0] if isinstance(metric, list) else metric\n return results", "def evaluate(self, data, category, dims=None, overall=True):\n n_data = len(data)\n eval_scores = [{} for _ in range(n_data)]\n\n if dims == None:\n eval_dims = self.dimensions\n else:\n assert isinstance(dims, list)\n eval_dims = dims\n\n for dim in eval_dims:\n output_list, ref_list = [], []\n for i in range(n_data):\n output_list.append(data[i]['system_output'])\n ref_list.append(data[i]['reference'])\n\n input_list = add_question(dimension=dim, output=output_list, ref=ref_list, task=self.task)\n score = self.scorer.score(input_list, self.task, category, dim)\n\n for i in range(n_data):\n eval_scores[i][dim] = score[i]\n\n # Customize your overall score here.\n if overall == True:\n for i in range(n_data):\n eval_scores[i]['overall'] = np.mean(list(eval_scores[i].values()))\n\n return eval_scores", "def evaluate(self):\n scores = []\n scores.append(self.word_analogy())\n print(\"Word Analogy (acc): \", scores[0])\n scores.append(self.word_similarity())\n print(\"Word Similarity (MSE): \", scores[1])\n scores.append(self.concept_categorization())\n print(\"Concept Categorization (purity): \", scores[2])\n scores.append(self.sentiment_analysis())\n print(\"Sentiment Analysis (acc): \", scores[3])\n return scores", "def compute_and_print_eval_metrics(self):\n s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R', '[email protected]', '[email protected]:.95')\n precision, recall, f1, mean_precision, mean_recall, map50, map = 0., 0., 0., 0., 0., 0., 0.\n ap = []\n eval_stats = [np.concatenate(x, 0) for x in zip(*self.eval_stats)]\n if len(eval_stats) and eval_stats[0].any():\n precision, recall, ap, f1, ap_class = ap_per_class(*eval_stats)\n precision, recall, ap50, ap = precision[:, 0], recall[:, 0], ap[:, 0], ap.mean(1)\n mean_precision, mean_recall, map50, map = precision.mean(), recall.mean(), ap50.mean(), ap.mean()\n nt = np.bincount(eval_stats[3].astype(np.int64), minlength=len(self.class_names)) # number of targets per class\n else:\n nt = np.zeros(1)\n\n pf = '%20s' + '%12.5g' * 6 # print format\n print(\"\\n EVALUTAION \\n\")\n print(s)\n print(pf % ('all', self.seen, nt.sum(), mean_precision, mean_recall, map50, map))\n if self.cfg.eval.verbose:\n for indx, cls in enumerate(ap_class):\n print(pf % (self.class_names[cls], self.seen, nt[cls], precision[indx], recall[indx], ap50[indx], ap[indx]))", "def compute(self) -> Dict[str, np.ndarray]:\n return {name: self.metrics[name].compute() for name in self.metrics}", "def evaluate(self, metrics, verbose=True, as_table=False, table_sep='\\t'):\n\n if metrics is None:\n metrics = list(['MAE', 'RMSE'])\n\n results = RatingPredictionEvaluation(verbose=verbose, as_table=as_table, table_sep=table_sep, metrics=metrics\n ).evaluate_recommender(predictions=self.predictions,\n test_set=self.test_set)\n\n for metric in metrics:\n self.evaluation_results[metric.upper()] = results[metric.upper()]" ]
[ "0.70427406", "0.69830465", "0.6881999", "0.6870439", "0.67992187", "0.6793207", "0.67534006", "0.6707386", "0.6638833", "0.6621402", "0.65452796", "0.65181375", "0.64700747", "0.6465631", "0.63947964", "0.6357802", "0.63296866", "0.6318049", "0.626176", "0.626176", "0.6245624", "0.6239238", "0.61940527", "0.61901724", "0.61759263", "0.6152412", "0.6124462", "0.60963714", "0.60910374", "0.60841805" ]
0.7014923
1
Get the start date of the analysis period based on an end date
def get_start_date(end_date=datetime.now(), num_years=ANALYSIS_PERIOD): start_date = end_date - timedelta(num_years*365) start_date = pd.to_datetime(date(start_date.year, start_date.month, start_date.day)) return(start_date)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_start_and_end_dates(new_start_date=None):\n curr_date = datetime.utcnow()\n curr_date = pd.to_datetime(date(curr_date.year, curr_date.month, curr_date.day))\n if not(new_start_date):\n end_date = curr_date\n start_date = get_start_date(end_date, ANALYSIS_PERIOD)\n else:\n start_date = new_start_date\n end_date = curr_date\n \n start_date = start_date.replace(tzinfo=timezone.utc)\n end_date = end_date.replace(tzinfo=timezone.utc)\n return start_date, end_date", "def get_first_period(start_record, end_record, start_period, end_period):\n start_record, end_record, start_period, end_period = to_datetime(start_record, end_record, start_period, end_period)\n pspan = end_period - start_period\n delta_year = relativedelta(years=1)\n # what is the first day of year of the start of the period that fits the record?\n start_rec_year = start_record.year\n d = datetime(start_rec_year, start_period.month, start_period.day)\n if d < start_record:\n d = d + delta_year\n delta_years = start_period.year - d.year\n e = end_period + relativedelta(years=-delta_years)\n return (d, e)", "def date_start_end(mytrip_start_date,mytrip_end_date):\n mytrip_start_date = dt.date(2015, 8, 10)\n mytrip_end_date= dt.date(2015, 8,14)\n prev_year = dt.timedelta(days=365)\n start_dt_strftime=dt.datetime.strptime('2014-08-10',\"%Y-%m-%d\")\n end_dt_strftime=dt.datetime.strptime('2014-08-14',\"%Y-%m-%d\") \n date_start_end_results=session.query(func.min(measurements.tobs), func.avg(measurements.tobs),func.max(measurements.tobs)).\\\n filter(measurements.date >= mytrip_start_date).filter(measurements.date <= end_dt_strftime).all()\n return(date_start_end_results)", "def report_start_and_end_date(self):\n start_date, end_date = self.start_date, self.end_date\n if start_date:\n db_import_time = time.strptime(str(start_date), \"%Y-%m-%d %H:%M:%S\")\n db_import_time = time.strftime(\"%Y-%m-%dT%H:%M:%S\", db_import_time)\n start_date = time.strftime(\"%Y-%m-%dT%H:%M:%S\", time.gmtime(\n time.mktime(time.strptime(db_import_time, \"%Y-%m-%dT%H:%M:%S\"))))\n start_date = str(start_date) + 'Z'\n else:\n today = datetime.now()\n earlier = today - timedelta(days=30)\n earlier_str = earlier.strftime(\"%Y-%m-%dT%H:%M:%S\")\n start_date = earlier_str + 'Z'\n\n if end_date:\n db_import_time = time.strptime(str(end_date), \"%Y-%m-%d %H:%M:%S\")\n db_import_time = time.strftime(\"%Y-%m-%dT%H:%M:%S\", db_import_time)\n end_date = time.strftime(\"%Y-%m-%dT%H:%M:%S\", time.gmtime(\n time.mktime(time.strptime(db_import_time, \"%Y-%m-%dT%H:%M:%S\"))))\n end_date = str(end_date) + 'Z'\n else:\n today = datetime.now()\n earlier_str = today.strftime(\"%Y-%m-%dT%H:%M:%S\")\n end_date = earlier_str + 'Z'\n\n return start_date, end_date", "def period_dates(period):\n end = date.today() - timedelta(days=1) # yesterday\n\n if period == LAST_7_DAYS:\n start = end - timedelta(days=7)\n elif period == LAST_30_DAYS:\n start = end - timedelta(days=30)\n elif period == LAST_90_DAYS:\n start = end - timedelta(days=90)\n elif ALL_TIME:\n start = settings.GA_START_DATE\n\n return start, end", "def _get_first_end_date(self):\n ins = acm.FInstrument['SACPI']\n market = \"internal\"\n start_year = acm.Time.DateAddDelta(acm.Time.FirstDayOfYear(self.start_date), 0, 0, -1)\n this_year_prices = acm.FPrice.Select(\"instrument='%s' and market='%s' and day>'%s' and day<'%s'\" \n % (ins.Name(), market, start_year, self.start_date))\n\n prices = sorted(this_year_prices, key=lambda price: price.Day(), reverse=True)\n last_sacpi_day = prices[0].Day()\n sacpi_plus_five_m = acm.Time.FirstDayOfMonth(acm.Time.DateAddDelta(last_sacpi_day, 0, 5, 0))\n return sacpi_plus_five_m", "def _get_start_date(self):\n today = datetimedate.today()\n if self.start == 'week':\n start_date = today - timedelta(days=today.weekday())\n elif self.start == 'month':\n start_date = today.replace(day=1)\n elif self.start == 'quarter':\n quarter = math.ceil(today.month / 3)\n start_date = datetimedate(\n today.year,\n ((quarter - 1) * 3) + 1,\n 1\n )\n elif self.start == 'year':\n start_date = datetimedate(today.year, 1, 1)\n elif self.start == 'all':\n start_date = datetimedate(2010, 1, 1)\n else:\n try:\n start_date = datetime.strptime(self.start, \"%Y-%m-%d\").date()\n except Exception as e:\n raise ParseError(\"start argument not valid\")\n\n self.start_date = start_date", "def get_start_date():\n\n\tprint()\n\tprint('Getting the first start date to query for.')\n\tprint()\n\tprint('The date must be greater than Jan 1st, 2018')\n\tyear = int(input('Enter a value for the year: '))\n\tmonth = int(input('Enter a value for the month: '))\n\tday = int(input('Enter a value for the day: '))\n\tprint()\n\n\treturn datetime.datetime(year, month, day)", "def starting_date(self):\n return datetime.date(2016, 1, 4)", "def _get_end_date_without_look_ahead(self, end_date=None):\n\n current_time = self.timer.now() + RelativeDelta(second=0, microsecond=0)\n end_date = end_date + RelativeDelta(second=0, microsecond=0) if end_date is not None else current_time\n\n return min(current_time, end_date)", "def get_start_date(self):\n return \"%d%02d\" % (self.year, self.term)", "def start_end_conversion(date_time: datetime, frequency: str, to_end: bool = True) -> datetime:\n return period(date_time, TimeHelper.freq_map[frequency])[int(to_end)]", "def get_start_date(self, req, milestone):\n\n if milestone.start:\n return milestone.start.date()\n elif 'approx_start_date' in req.args:\n return datetime.strptime(req.args['approx_start_date'], '%Y-%m-%d').date() + timedelta(days=1)", "def get_ticker_start_and_end_dates(df_data):\n if df_data.empty:\n start_date, end_date = get_start_and_end_dates()\n else:\n new_start_date = df_data.index.max() + timedelta(days=1)\n logger.debug(f'new start date = {new_start_date}')\n start_date, end_date = get_start_and_end_dates(new_start_date)\n logger.debug(f'returning {start_date} and {end_date} from get_ticker_start_and_end_dates')\n return start_date, end_date", "def get_diff_start_end(start_date, end_date, min_yr=None, monthly=False):\n day_s, mon_s, yr_s = start_date[0], start_date[1], start_date[2]\n day_e, mon_e, yr_e = end_date[0], end_date[1], end_date[2]\n\n if not min_yr:\n min_yr = yr_s\n\n start, end = date(yr_s, mon_s, day_s), date(yr_e, mon_e, day_e)\n\n # For daily date\n if not monthly:\n # Calculate the days till the start and end\n till_start_days = (start - date(min_yr, Month.January, 1)).days\n till_end_days = (end - date(min_yr, Month.January, 1)).days\n return till_start_days, till_end_days + 1\n\n # For monthly data\n start, end = date(yr_s, mon_s, day_s), date(yr_e, mon_e, day_e)\n till_start_mon = len(list(rrule.rrule(rrule.MONTHLY, dtstart=date(min_yr, Month.January, 1), until=start)))\n till_end_mon = len(list(rrule.rrule(rrule.MONTHLY, dtstart=date(min_yr, Month.January, 1), until=end)))\n if mon_s == Month.January and yr_s == min_yr:\n till_start_mon = 0\n return till_start_mon, till_end_mon", "def start_end(start_date,end_date):\n\n session = Session(engine)\n\n # Query from database full temp results for dates range\n temp_results = session.query(func.min(measurement.tobs), func.avg(measurement.tobs), func.max(measurement.tobs)).\\\n filter(measurement.date >= start_date).\\\n filter(measurement.date <= end_date).all()\n \n session.close() \n \n return jsonify(temp_results)", "def start_end_diff(self):\n s = self.l_start_date\n e = self.l_end_date\n start = datetime.date(s.year, s.month, s.day)\n end = datetime.date(e.year, e.month, e.day)\n diff = start - end\n return abs(diff.days)", "def compute_default_start_date(display_end_date, num_weeks):\n # compute the end of the week that we will be displaying and then\n # move the number of weeks prior\n days_earlier = 7 * num_weeks - 1\n start_date = display_end_date - timedelta(days_earlier)\n return start_date", "def get_reference_period(\n dataset: Dict,\n ) -> Tuple[Optional[datetime], Optional[datetime]]:\n reference_period = dataset[\"dataset_date\"]\n if not reference_period:\n return None, None\n date_info = DateHelper.get_date_info(reference_period)\n return date_info[\"startdate\"], date_info[\"enddate\"]", "def cal_start(self):\n return self.datetime_start", "def get_period_range(self, period, start, end, inclusive_start=True, inclusive_end=True):\n if not isinstance(start, datetime.datetime):\n start = self.get_date_from_string(start, '%Y-%m-%d')\n if not isinstance(end, datetime.datetime):\n end = self.get_date_from_string(end, '%Y-%m-%d')\n\n if period == 'month':\n get_period = self.get_current_month_range\n get_next_period = self.get_next_month\n get_previous_period = self.get_previous_month\n if period == 'week':\n get_period = self.get_current_week_range\n get_next_period = self.get_next_week\n get_previous_period = self.get_previous_week\n\n #####################\n # inclusive_start means that the result set will include the whole period\n # containing the start date. Likewise for inclusive_end.\n #\n # If you are, say, reporting on a 'last completed month' or something,\n # but your report date (and end date) is mid-month or something, then setting 'inclusive_end'\n # to False will insure that the report ends with the month prior to the\n # end date.\n #\n # If you're doing projections starting with the month following the one\n # you're in, setting inclusive_start to False will insure that the first\n # period in the range is the one *after* the period you're in now.\n #######################\n if not inclusive_start:\n start = get_next_period(start)[0]\n if not inclusive_end:\n end = get_previous_period(end)[1]\n\n returnvals = []\n\n\n firstper = get_period(start)\n returnvals.append(firstper)\n per = firstper\n while per[1] < end:\n # goes as long as the *end* of the period is < our end date.\n # the intent is that if end is 2010-10-04, the last period will be\n # (2010-10-01, 2010-10-31)\n per = get_next_period(per[1])\n returnvals.append(per)\n\n return returnvals", "def date_range(start, end):\n delta = (end - start).days + 1\n return (start + timedelta(n) for n in range(delta))", "def start_month(d):\n return date(d.year, d.month, 1)", "def project(self, end: datetime) -> pd.Series:\n if self.recur is not None:\n if self.date is None:\n self.date = datetime.combine(datetime.today(), datetime.min.time())\n\n if isinstance(end, int):\n end = self.date + timedelta(days=end)\n\n dates = pd.date_range(\n start=self.date,\n freq=self.recur,\n end=end,\n )\n if len(dates) < 2:\n dates = pd.date_range(\n start=self.date,\n freq=self.recur,\n periods=2\n )\n if dates[0] > self.date:\n try:\n dates = dates.union(pd.date_range(\n start=self.date,\n freq=f'-{dates.freqstr}',\n periods=2\n ))\n except ValueError as e:\n dates = dates.union(pd.date_range(\n start=self.date,\n freq=f'-1{dates.freqstr}',\n periods=2\n ))\n\n if self.compile is not None:\n total = self.amount * (dates.shape[0] - 1)\n amt = round(total / dates.to_series().diff().sum().days, 2)\n else:\n amt = self.amount\n\n res = pd.Series(data=np.full(dates.shape[0], amt), index=dates)\n LOGGER.debug('-' * 50)\n LOGGER.debug(f'Amount: {amt}')\n LOGGER.debug(res.index)\n\n if self.compile is not None:\n res = res.resample('D').pad()\n LOGGER.debug(f'{res.index[0]} to {res.index[-1]}')\n res = res[self.date:end]\n res = res.resample(self.compile).sum()\n LOGGER.debug(res.index)\n\n if self.offset > 0:\n freq = self.compile or self.recur\n if freq == 'MS':\n offset = self.offset - 1\n else:\n offset = self.offset\n res.index += timedelta(days=offset)\n LOGGER.debug(res.index)\n\n # prevents dates that are out of range\n res = res[self.date:end]\n\n # prevents recurring charges compiled based on a number of days from all showing up on the first day\n if 'D' in self.recur or (self.compile is not None and 'D' in self.compile):\n res = res[res.index != self.date]\n return res\n else:\n return pd.Series(data=[self.amount], index=[self.date])", "def find_start_index():\n def recursive_find_index(lower_bound, upper_bound):\n if upper_bound - lower_bound <= 1:\n if intervals[upper_bound][0] <= start_dt:\n return upper_bound\n return lower_bound\n index = (upper_bound + lower_bound) // 2\n if intervals[index][0] <= start_dt:\n return recursive_find_index(index, upper_bound)\n else:\n return recursive_find_index(lower_bound, index)\n\n if start_dt <= intervals[0][0] - tolerance:\n return -1\n if end_dt >= intervals[-1][1] + tolerance:\n return -1\n return recursive_find_index(0, len(intervals) - 1)", "def start(self) -> datetime:\n return self.df.index[0].to_pydatetime()", "def date_range(self):\n start_date = input(\"Enter a start date in the format DD/MM/YYYY> \")\n end_date = input(\"Enter an end date in the format DD/MM/YYYY> \")\n return start_date, end_date", "def date_start(mytrip_start_date):\n mytrip_start_date = dt.date(2015, 8, 10)\n prev_year = dt.timedelta(days=365)\n start_dt_strftime=dt.datetime.strptime('2014-08-10',\"%Y-%m-%d\") \n date_start_results=session.query(func.min(measurements.tobs), func.avg(measurements.tobs),func.max(measurements.tobs)).\\\n filter(measurements.date >= mytrip_start_date).all()\n return(date_start_results)", "def get_date_range(startdate, enddate):\n if enddate < startdate:\n raise Exception(\"Passed in enddate that was before start date, did you flip your variables around?\")\n \n if isinstance(startdate, datetime.datetime): startdate = startdate.date()\n if isinstance(enddate, datetime.datetime): enddate = enddate.date()\n \n totalspan = enddate-startdate\n return [startdate + timedelta(days=day) for day in range(0, totalspan.days+1)]", "def determine_end_date(self, start: str, end: datetime):\n date = datetime.fromisoformat(start)\n diff_delta = end - date\n if diff_delta.days < 7:\n delta = diff_delta\n else:\n delta = timedelta(days=7)\n return (date + delta).isoformat()" ]
[ "0.6723257", "0.6191366", "0.6188484", "0.6066166", "0.6057369", "0.595907", "0.5914861", "0.5914648", "0.5906011", "0.58999103", "0.5897399", "0.5868481", "0.58304363", "0.5827823", "0.5826368", "0.57813686", "0.57741296", "0.5772709", "0.5770168", "0.57555985", "0.57435155", "0.5726759", "0.5719082", "0.57184124", "0.571172", "0.5709232", "0.5691578", "0.5691206", "0.56700784", "0.5601839" ]
0.742821
0
Get the start and end dates based on the start date input if the start date is none, get the whole daya
def get_start_and_end_dates(new_start_date=None): curr_date = datetime.utcnow() curr_date = pd.to_datetime(date(curr_date.year, curr_date.month, curr_date.day)) if not(new_start_date): end_date = curr_date start_date = get_start_date(end_date, ANALYSIS_PERIOD) else: start_date = new_start_date end_date = curr_date start_date = start_date.replace(tzinfo=timezone.utc) end_date = end_date.replace(tzinfo=timezone.utc) return start_date, end_date
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def date_range(self):\n start_date = input(\"Enter a start date in the format DD/MM/YYYY> \")\n end_date = input(\"Enter an end date in the format DD/MM/YYYY> \")\n return start_date, end_date", "def create_date_list(start_date = start_date, end_date = end_date):", "def get_ticker_start_and_end_dates(df_data):\n if df_data.empty:\n start_date, end_date = get_start_and_end_dates()\n else:\n new_start_date = df_data.index.max() + timedelta(days=1)\n logger.debug(f'new start date = {new_start_date}')\n start_date, end_date = get_start_and_end_dates(new_start_date)\n logger.debug(f'returning {start_date} and {end_date} from get_ticker_start_and_end_dates')\n return start_date, end_date", "def get_start_end_dates(row):\n\n if row['wo_id'] == 'UTLY' or row['wo_id'] == 'TSW':\n return row['job_start_dt'], row['job_end_dt']\n\n else:\n\n if row['job_completed_cbox'] == 1:\n return row['job_end_dt'], row['job_end_dt']\n\n else:\n return row['start'], row['end']", "def getDateRange(start, end=None, kind=None, zone=None, hour=0, eod=None, end_eod=None):\n if start is None:\n start = datetime.now()\n if zone is None or zone == \"\":\n zone = \"UTC\"\n if start == end:\n end = None\n if eod != None:\n hour = eod\n\n start = parseDate(start)\n orig_start = start\n if end:\n end = parseDate(end)\n if start == end:\n end = None\n\n if kind:\n start = start.replace(minute=0, second=0, microsecond=0)\n if kind == \"hour\":\n end = start + timedelta(hours=1)\n elif kind == \"day\":\n start = start.replace(hour=0)\n end = start + timedelta(hours=24)\n elif kind == \"week\":\n start = start.replace(hour=0)\n start, end = getWeek(start)\n elif kind == \"month\":\n start = start.replace(hour=0, day=1)\n end = getEndOfMonth(start)\n elif kind == \"year\":\n start = start.replace(hour=0, day=1, month=1)\n end = getEndOfMonth(start.replace(month=12))\n elif type(kind) is int or (isinstance(kind, str) and kind.isdigit()):\n end = start + timedelta(hours=24)\n start = end - timedelta(days=int(kind))\n if end is None:\n end = start + timedelta(hours=24)\n\n if zone and zone.lower() == \"utc\":\n zone = None\n if not kind and eod:\n hour = None\n # now lets convert our times to the zone\n if zone or hour:\n if zone is None:\n zone = \"UTC\"\n offset = getTimeZoneOffset(zone, start, hour=hour)\n if offset:\n start = start + timedelta(hours=offset)\n if end_eod:\n hour = end_eod\n offset = getTimeZoneOffset(zone, end, hour=hour)\n if offset:\n end = end + timedelta(hours=offset)\n\n # print(\"daterange: {} to {}\".format(start, end))\n return start, end", "def date_start_end(mytrip_start_date,mytrip_end_date):\n mytrip_start_date = dt.date(2015, 8, 10)\n mytrip_end_date= dt.date(2015, 8,14)\n prev_year = dt.timedelta(days=365)\n start_dt_strftime=dt.datetime.strptime('2014-08-10',\"%Y-%m-%d\")\n end_dt_strftime=dt.datetime.strptime('2014-08-14',\"%Y-%m-%d\") \n date_start_end_results=session.query(func.min(measurements.tobs), func.avg(measurements.tobs),func.max(measurements.tobs)).\\\n filter(measurements.date >= mytrip_start_date).filter(measurements.date <= end_dt_strftime).all()\n return(date_start_end_results)", "def get_start_and_end_dates(data):\n dates = re.findall(r'\\d{8}', data)\n\n start_date = datetime.strptime(dates[0], '%Y%m%d')\n end_date = datetime.strptime(dates[-1], '%Y%m%d')\n\n return start_date, end_date", "def from_start_date_to_end_date(start, end):\n\n first_canonicalized = start.replace(\" \", \"\").lower()\n second_canonicalized = end.replace(\" \", \"\").lower()\n first_search_date = start.replace(\" \", \"\").lower()\n second_search_date = end.replace(\" \", \"\").lower() \n all_dates_between_start_date_and_end_date = [multiple_dates for multiple_dates in temperature_parameters_list if multiple_dates[\"date\"\n ] >= first_search_date and multiple_dates[\"date\"] <= second_search_date]\n \n if first_search_date == first_canonicalized and second_search_date == second_canonicalized:\n return jsonify(all_dates_between_start_date_and_end_date)\n\n return jsonify({\"error\": f\"{start} and {end} not found.\"}), 404", "def get_date_values(self, all_dates):\n\n if self.day_value == 'all':\n working_dates = all_dates[:]\n non_working_dates = []\n elif self.day_value == 'weekdays':\n working_dates, non_working_dates = self.working_days(all_dates)\n elif self.day_value == 'custom':\n working_dates, non_working_dates = self.working_days(all_dates,\n blacklisted_dates)\n\n # we always want the day before the milestone starts to be a working day\n # regardless if it is a weekday or weekend\n # if it was a non working day the ideal effort curve would not decrease\n # by the end of the actual start date\n day_before = all_dates[0]\n if day_before not in working_dates:\n non_working_dates.remove(day_before)\n working_dates.insert(0, day_before)\n # else it must be in working dates already\n\n return working_dates, non_working_dates", "def dates_between_two_dates(start_date, end_date, frequency='m', complete_period=True):\n year1 = None\n month1 = None\n day1 = None\n year2 = None\n month2 = None\n day2 = None\n if '/' in start_date:\n year1 = str(start_date).split('/')[2]\n month1 = str(start_date).split('/')[1]\n day1 = str(start_date).split('/')[0]\n\n year2 = str(end_date).split('/')[2]\n month2 = str(end_date).split('/')[1]\n day2 = str(end_date).split('/')[0]\n\n\n elif '-' in start_date:\n year1 = str(start_date).split('-')[2]\n month1 = str(start_date).split('-')[1]\n day1 = str(start_date).split('-')[0]\n\n year2 = str(end_date).split('-')[2]\n month2 = str(end_date).split('-')[1]\n day2 = str(end_date).split('-')[0]\n\n list_official_dates = [date(int(year1), int(month1), int(day1))]\n\n sdate = date(int(year1), int(month1), int(day1)) # start date\n edate = date(int(year2), int(month2), int(day2)) # end date\n dates = pandas.date_range(sdate, edate, freq=frequency, normalize=True)\n\n\n for i in range(len(dates)):\n list_official_dates.append(dates[i])\n\n list_official_dates.append(date(int(year2), int(month2), int(day2)))\n\n\n for i in range(len(list_official_dates)):\n list_official_dates[i] = str(list_official_dates[i]).replace(' 00:00:00', '')\n\n\n return list_official_dates", "def get_date_range(startdate, enddate):\n if enddate < startdate:\n raise Exception(\"Passed in enddate that was before start date, did you flip your variables around?\")\n \n if isinstance(startdate, datetime.datetime): startdate = startdate.date()\n if isinstance(enddate, datetime.datetime): enddate = enddate.date()\n \n totalspan = enddate-startdate\n return [startdate + timedelta(days=day) for day in range(0, totalspan.days+1)]", "def _DateRangeQuery(self, start_date='2007-01-01', end_date='2007-07-01'):\n\n print 'Date range query for events on Primary Calendar: %s to %s' % (\n start_date, end_date,)\n query = gdata.calendar.client.CalendarEventQuery(start_min=start_date, start_max=end_date)\n feed = self.cal_client.GetCalendarEventFeed(q=query)\n for i, an_event in zip(xrange(len(feed.entry)), feed.entry):\n print '\\t%s. %s' % (i, an_event.title.text,)\n for a_when in an_event.when:\n print '\\t\\tStart time: %s' % (a_when.start,)\n print '\\t\\tEnd time: %s' % (a_when.end,)", "def get_day_range(a, b, date_format='%Y-%m-%d'):\n today = datetime.datetime.now().date()\n res = [today + datetime.timedelta(days=a), today + datetime.timedelta(days=b)]\n\n if date_format is None:\n return res\n return [datetime.datetime.strftime(x, date_format) for x in res]", "def getDates(self, startDate, endDate, endpoint=False):\n\n if self._type == 'M2': # If MERRA2 data type\n return self._merra2Dates(startDate, endDate, endpoint)\n else:\n raise Exception('Data type not supported : {}'.format(self._type))", "def start_and_end_of(date):\n y, m, d = date.year, date.month, date.day\n\n if getattr(date, 'tzinfo', None) is None:\n\n return (\n datetime.datetime(y, m, d, 00, 00, 00),\n datetime.datetime(y, m, d, 23, 59, 59)\n )\n else:\n return (\n datetime.datetime(y, m, d, 00, 00, 00, tzinfo=date.tzinfo),\n datetime.datetime(y, m, d, 23, 59, 59, tzinfo=date.tzinfo)\n )", "def filter_date_range(start: datetime.date, end: datetime.date, weekday: Optional[int] = None) -> Iterator[datetime.date]:\n date = start\n while date <= end:\n if weekday is None and SHORT_WEEKDAYS[date.weekday()] in WORK_DAYS or date.weekday() == weekday:\n yield date\n date += datetime.timedelta(days=1)", "def iter_dates(start, end):\n one_day = timedelta(days=1)\n date = start\n while date <= end:\n yield date\n date += one_day", "def _drange(start: Date, end: Date) -> Iterator[Date]:\n while start <= end:\n yield start\n start = start + TimeDelta(days=1)", "def select_date_interval_menu():\n while True:\n start_date = input('\\nInput desired start date with format dd-mm-yyyy:\\n')\n try:\n start_date = datetime.strptime(start_date, '%d-%m-%Y')\n break\n except ValueError:\n print('invalid start date selected')\n while True:\n end_date = input('\\nInput desired start date with format dd-mm-yyyy,\\nor hit enter to select todays date\\n')\n if end_date == '':\n end_date = date.today()\n break\n else:\n try:\n end_date = datetime.strptime(end_date, '%d-%m-%Y')\n break\n except ValueError:\n print('invalid end date selected')\n list_of_dates = pd.date_range(start_date, end_date, freq='d')\n list_of_dates = [i.strftime('%d%m%Y') for i in list_of_dates]\n return list_of_dates", "def DATEDIF(\n start_date: func_xltypes.XlDateTime,\n end_date: func_xltypes.XlDateTime,\n unit: func_xltypes.XlText\n) -> func_xltypes.XlNumber:\n\n if start_date > end_date:\n raise xlerrors.NumExcelError(\n f'Start date must be before the end date. Got Start: \\\n {start_date}, End: {end_date}')\n\n datetime_start_date = utils.number_to_datetime(int(start_date))\n datetime_end_date = utils.number_to_datetime(int(end_date))\n\n if str(unit).upper() == 'Y':\n date_list = list(rrule.rrule(rrule.YEARLY,\n dtstart=datetime_start_date,\n until=datetime_end_date))\n return len(date_list) - 1 # end of day to end of day / \"full days\"\n\n elif str(unit).upper() == 'M':\n date_list = list(rrule.rrule(rrule.MONTHLY,\n dtstart=datetime_start_date,\n until=datetime_end_date))\n return len(date_list) - 1 # end of day to end of day / \"full days\"\n\n elif str(unit).upper() == 'D':\n date_list = list(rrule.rrule(rrule.DAILY,\n dtstart=datetime_start_date,\n until=datetime_end_date))\n return len(date_list) - 1 # end of day to end of day / \"full days\"\n\n elif str(unit).upper() == 'MD':\n modified_datetime_start_date = datetime_start_date.replace(year=1900,\n month=1)\n modified_datetime_end_date = datetime_end_date.replace(year=1900,\n month=1)\n date_list = list(rrule.rrule(rrule.DAILY,\n dtstart=modified_datetime_start_date,\n until=modified_datetime_end_date))\n return len(date_list) - 1 # end of day to end of day / \"full days\"\n\n elif str(unit).upper() == 'YM':\n modified_datetime_start_date = datetime_start_date.replace(year=1900,\n day=1)\n modified_datetime_end_date = datetime_end_date.replace(year=1900,\n day=1)\n date_list = list(rrule.rrule(rrule.MONTHLY,\n dtstart=modified_datetime_start_date,\n until=modified_datetime_end_date))\n return len(date_list) - 1 # end of day to end of day / \"full days\"\n\n elif str(unit).upper() == 'YD':\n modified_datetime_start_date = datetime_start_date.replace(year=1900)\n modified_datetime_end_date = datetime_end_date.replace(year=1900)\n date_list = list(rrule.rrule(rrule.DAILY,\n dtstart=modified_datetime_start_date,\n until=modified_datetime_end_date))\n return len(date_list) - 1 # end of day to end of day / \"full days\"", "def generate_dates(self, event):\n dates = []\n dtstart = self.tz_localize(event['dtstart'].dt)\n if 'dtend' in event:\n dtend = self.tz_localize(event['dtend'].dt)\n # DTEND is exclusive, so the real ending date is one day before\n if is_date(dtend):\n dtend -= datetime.timedelta(days=1)\n else:\n dtend = None\n # Normal case: no repetition\n if not 'rrule' in event:\n dates.append(self.format_dateinterval(dtstart, dtend))\n # Handle recurrent events\n else:\n ruleset = rrule.rruleset()\n rule = rrule.rrulestr(event['rrule'].to_ical().decode('utf-8'),\n dtstart=dtstart)\n ruleset.rrule(rule)\n # Parse all types of recurrence constraints\n for prop in ['rdate', 'exdate']:\n if not prop in event:\n continue\n # This can return either a single value or a list, so it's\n # a mess...\n prop_dates = event[prop]\n if not isinstance(prop_dates, list):\n prop_dates = [prop_dates]\n for prop_date in prop_dates:\n # This is a vDDDLists\n for vddd in prop_date.dts:\n dt = vddd.dt\n # EXDATE and RDATE are allowed to be dates,\n # convert them to datetime. TODO: should the time\n # be midnight, or the time from DTSTART?\n if is_date(dt):\n dt = datetime.datetime.combine(dt, datetime.time())\n dt = self.tz_localize(dt)\n ruleset.__getattribute__(prop)(dt)\n # We now have a ruleset that expands to a list of starting\n # date or datetime, one for each repetition.\n for dtstart_repeat in itertools.islice(ruleset, MAX_RECURRING_EVENTS):\n # Handle case where dtstart is a date, since rrule always\n # returns datetime objects.\n if is_date(dtstart):\n dtstart_repeat = dtstart_repeat.date()\n # Compute matching dtend if applicable\n if dtend == None:\n dtend_repeat = None\n else:\n dtend_repeat = dtend + (dtstart_repeat - dtstart)\n dates.append(self.format_dateinterval(dtstart_repeat, dtend_repeat))\n return dates", "def diveDates(self,start,finish):\n start = datetime.strptime(start,\"%Y-%m-%d\")\n finish = datetime.strptime(finish,\"%Y-%m-%d\")\n return start+(finish-start)/2", "def get_diff_start_end(start_date, end_date, min_yr=None, monthly=False):\n day_s, mon_s, yr_s = start_date[0], start_date[1], start_date[2]\n day_e, mon_e, yr_e = end_date[0], end_date[1], end_date[2]\n\n if not min_yr:\n min_yr = yr_s\n\n start, end = date(yr_s, mon_s, day_s), date(yr_e, mon_e, day_e)\n\n # For daily date\n if not monthly:\n # Calculate the days till the start and end\n till_start_days = (start - date(min_yr, Month.January, 1)).days\n till_end_days = (end - date(min_yr, Month.January, 1)).days\n return till_start_days, till_end_days + 1\n\n # For monthly data\n start, end = date(yr_s, mon_s, day_s), date(yr_e, mon_e, day_e)\n till_start_mon = len(list(rrule.rrule(rrule.MONTHLY, dtstart=date(min_yr, Month.January, 1), until=start)))\n till_end_mon = len(list(rrule.rrule(rrule.MONTHLY, dtstart=date(min_yr, Month.January, 1), until=end)))\n if mon_s == Month.January and yr_s == min_yr:\n till_start_mon = 0\n return till_start_mon, till_end_mon", "def dates_inbetween(self, start, end):\n\n return [start + timedelta(days=i) for i in xrange((end - start).days + 1)]", "def _date_range(start: str, end: str) -> List[str]:\n start_dt = _parse_ISO8601_date(start)\n end_dt = _parse_ISO8601_date(end)\n if start_dt > end_dt:\n raise ValidationError(\n \"Start date needs to be greater than or equal end date.\"\n )\n if (\n start_dt < _parse_ISO8601_date('1900') or\n end_dt > datetime.datetime.now().astimezone()\n ):\n raise ValidationError(\n \"Start date needs to be less than 1900-01-01T00:00:00Z and end\"\n \" date can't be from the feature.\"\n )\n return map(lambda date: date.isoformat(), rrule(\n freq=DAILY,\n dtstart=start_dt,\n until=end_dt,\n cache=True\n ))", "def report_start_and_end_date(self):\n start_date, end_date = self.start_date, self.end_date\n if start_date:\n db_import_time = time.strptime(str(start_date), \"%Y-%m-%d %H:%M:%S\")\n db_import_time = time.strftime(\"%Y-%m-%dT%H:%M:%S\", db_import_time)\n start_date = time.strftime(\"%Y-%m-%dT%H:%M:%S\", time.gmtime(\n time.mktime(time.strptime(db_import_time, \"%Y-%m-%dT%H:%M:%S\"))))\n start_date = str(start_date) + 'Z'\n else:\n today = datetime.now()\n earlier = today - timedelta(days=30)\n earlier_str = earlier.strftime(\"%Y-%m-%dT%H:%M:%S\")\n start_date = earlier_str + 'Z'\n\n if end_date:\n db_import_time = time.strptime(str(end_date), \"%Y-%m-%d %H:%M:%S\")\n db_import_time = time.strftime(\"%Y-%m-%dT%H:%M:%S\", db_import_time)\n end_date = time.strftime(\"%Y-%m-%dT%H:%M:%S\", time.gmtime(\n time.mktime(time.strptime(db_import_time, \"%Y-%m-%dT%H:%M:%S\"))))\n end_date = str(end_date) + 'Z'\n else:\n today = datetime.now()\n earlier_str = today.strftime(\"%Y-%m-%dT%H:%M:%S\")\n end_date = earlier_str + 'Z'\n\n return start_date, end_date", "def get_indices_for_start_and_end_dates(lines, startdate, enddate):\n if startdate > enddate:\n raise RuntimeError(\"start date must be earlier than end date\")\n index_start_date = -999\n index_end_date = -999\n for index, line in enumerate(lines):\n temp_date = get_date_from_line(line)\n if index_start_date == -999:\n if temp_date >= startdate:\n index_start_date = index\n if index == len(lines) - 1:\n index_end_date = None\n else:\n index_end_date = index\n else:\n if temp_date > enddate:\n index_end_date = index - 1\n break\n elif index == len(lines) - 1:\n index_end_date = None\n if index_start_date == -999:\n print('Desired date is later than last date in file. Aborting...')\n exit()\n if index_end_date == -999:\n raise ValueError(\"index_end_date has not been set\")\n print('Start and end indices:', index_start_date, index_end_date)\n return index_start_date, index_end_date", "def _merra2Dates(self, startDate, endDate, endpoint):\n\n if self._F in (1, 3, 6, 'D'): # For all frequencies less than or equal to one (1) day\n dateInc = lambda x: x + timedelta( days = 1 ) # Define lambda function to increment date by one (1) day\n else: # For all other frequencies\n dateInc = next_month # Use the monthInc function\n\n while startDate < endDate: # With startDate is less than endDate\n yield startDate # Yield the start date\n startDate = dateInc( startDate ) # Increment the startDate using dateInc function\n if endpoint and startDate == endDate: # If the endpoint bool is True AND startDate == endDate\n yield startDate # Yield startDate", "def get_rest_days(self, cr, uid, employee_id, dt, context=None):\n\n day = dt.strftime(OE_DTFORMAT)\n ids = self.search(\n cr, uid, [\n ('employee_id', '=', employee_id),\n ('date_start', '<=', day),\n ('date_end', '>=', day),\n ], context=context)\n if len(ids) == 0:\n return None\n elif len(ids) > 1:\n raise orm.except_orm(_('Programming Error'), _(\n 'Employee has a scheduled date in more than one schedule.'))\n\n # If the day is in the middle of the week get the start of the week\n if dt.weekday() == 0:\n week_start = dt.strftime(OE_DFORMAT)\n else:\n week_start = (\n dt + relativedelta(days=-dt.weekday())).strftime(OE_DFORMAT)\n\n return self.get_rest_days_by_id(\n cr, uid, ids[0], week_start, context=context\n )", "def returnDatesAndRegions(start=None, end=None, theRegs=None, isWeekly=False, isViral=False):\r\n\t# Default values\r\n\tregions = [\"global\", \"ad\", \"ar\", \"at\", \"au\", \"be\", \"bg\", \"bo\", \"br\", \"ca\", \"ch\", \"cl\", \"co\", \"cr\", \"cy\", \"cz\", \"de\", \"dk\", \"do\", \"ec\", \"ee\", \"es\", \"fi\", \"fr\", \"gb\", \"gr\", \"gt\", \"hk\", \"hn\", \"hu\", \"id\", \"ie\", \"il\", \"is\", \"it\", \"jp\", \"lt\", \"lu\", \"lv\", \"mc\", \"mt\", \"mx\",\"my\", \"ni\", \"nl\", \"no\", \"nz\", \"pa\", \"pe\", \"ph\", \"pl\", \"pt\", \"py\", \"ro\", \"se\", \"sg\", \"sk\", \"sv\", \"th\", \"tr\", \"tw\", \"us\", \"uy\", \"vn\"]\r\n\tviralWeeklyStart = \"2017-01-05\"\r\n\ttopWeeklyStart = \"2016-12-22\"\r\n\tallDailyStart = \"2017-01-01\"\r\n\r\n\t#Required since dates taken are very specific\r\n\tdefaultList = defaultListOfDates(isWeekly, isViral)\r\n\t#--------------------------------------------\r\n\r\n\t# Helper for Exception handling\r\n\tif(isWeekly and isViral):\r\n\t\tfunc = \"viral50Weekly\"\r\n\telif(isWeekly and not isViral):\r\n\t\tfunc = \"top200Weekly\"\r\n\telif(not isWeekly and isViral):\r\n\t\tfunc = \"viral50Daily\"\r\n\telif(not isWeekly and not isViral):\r\n\t\tfunc = \"top200Daily\"\r\n\t# \r\n\r\n\t# Start dates\r\n\tif(start is None): #From the beginning\r\n\t\tif(isWeekly):\r\n\t\t\tif(isViral):\r\n\t\t\t\tstart = datetime.datetime.strptime(viralWeeklyStart, \"%Y-%m-%d\")\r\n\t\t\telse:\r\n\t\t\t\tstart = datetime.datetime.strptime(topWeeklyStart, \"%Y-%m-%d\") \r\n\t\telse:\r\n\t\t\tstart = datetime.datetime.strptime(allDailyStart, \"%Y-%m-%d\")\r\n\telse:\r\n\t\tif(start in defaultList):\r\n\t\t\tstart = datetime.datetime.strptime(start, \"%Y-%m-%d\")\r\n\t\telse:\r\n\t\t\torderedList = sorted(defaultList, key=lambda x: datetime.datetime.strptime(x, \"%Y-%m-%d\") - datetime.datetime.strptime(start, \"%Y-%m-%d\"))\r\n\t\t\tclosest = [d for d in orderedList if d >= start]\r\n\t\t\tsuggest = closest[0:5]\r\n\t\t\tlogger.info(f\"The start date {start} provided for {func} is invalid. Wanna give one these a try? {suggest}\")\r\n\t\t\tchoice = input(\"Enter (1) to use the first suggestion, or (2) to quit and set yourself: \")\r\n\t\t\tif(int(choice) == 1):\r\n\t\t\t\tstart = datetime.datetime.strptime(suggest[0], \"%Y-%m-%d\")\r\n\t\t\telif(int(choice) == 2):\r\n\t\t\t\tsys.exit()\r\n\t\t\telse:\r\n\t\t\t\traise FyChartsException(\"Invalid Choice.\")\r\n\r\n\r\n\t# End dates\r\n\tif(end is None): #Up to now\r\n\t\tend = datetime.datetime.now()\r\n\telse:\r\n\t\tend = datetime.datetime.strptime(end, \"%Y-%m-%d\")\r\n\t\t\r\n\r\n\t# Region\r\n\tregion = []\r\n\tif(theRegs is None):\r\n\t\tregion = regions\r\n\telse:\r\n\t\tif(type(theRegs) is not list):\r\n\t\t\tregs = []\r\n\t\t\tregs.append(theRegs)\r\n\t\t\ttheRegs = regs\r\n\t\t\t\r\n\t\tfor aReg in theRegs:\r\n\t\t\tif(aReg in regions):\r\n\t\t\t\tregion.append(aReg)\r\n\t\t\telse:\r\n\t\t\t\traise FyChartsException(f\"Data for the region --> {aReg} <-- requested for {func} does not exist. Please try another region\")\r\n\r\n\t#Generate list of dates\r\n\tdates = [] \r\n\tif(isWeekly): \r\n\t\tif(isViral):\r\n\t\t\tgen = [start + datetime.timedelta(weeks=x) for x in range(0, (end-start).days+1)]\r\n\t\t\tfor date in gen:\r\n\t\t\t\tif(date<end):\r\n\t\t\t\t\tdt = date + datetime.timedelta(days=0)\r\n\t\t\t\t\tdates.append(dt.strftime(\"%Y-%m-%d\"))\r\n\t\telse:\r\n\t\t\tgen = [start + datetime.timedelta(weeks=x) for x in range(0, (end-start).days+1)]\r\n\t\t\tfor date in gen:\r\n\t\t\t\tif(date<end):\r\n\t\t\t\t\tdt = date + datetime.timedelta(days=0)\r\n\t\t\t\t\tdates.append(dt.strftime(\"%Y-%m-%d\"))\r\n\r\n\telse:\r\n\t\tgen = [start + datetime.timedelta(days=x) for x in range(0, (end-start).days+1)]\r\n\t\tfor date in gen:\r\n\t\t\tif(date<=end):\r\n\t\t\t\tdates.append(date.strftime(\"%Y-%m-%d\"))\r\n\r\n\tvar = {\"dates\": dates, \"region\": region}\r\n\treturn var" ]
[ "0.68113667", "0.6709884", "0.64297885", "0.64231473", "0.64026463", "0.6387936", "0.6387378", "0.6332218", "0.6329555", "0.6272463", "0.62494594", "0.6216098", "0.6211771", "0.6210365", "0.62078756", "0.6185185", "0.61821514", "0.61221343", "0.611361", "0.61102355", "0.61099374", "0.6105464", "0.610077", "0.60970795", "0.6085721", "0.60825056", "0.60801566", "0.6076244", "0.60749775", "0.6040915" ]
0.6989214
0
retrieve date from yahoo
def ping_yahoo_for_ticker(ticker, start_date, end_date): logger.debug(f'retrieving for {ticker} from yahoo between {start_date} and {end_date}') try: df = web.DataReader(ticker, 'yahoo', start_date, end_date) logger.debug('Successfully retrieved data for {}'.format(ticker)) return df except Exception as e: logging.error('Error while accessing Yahoo - {}'.format(str(e))) return pd.DataFrame()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_data_from_yahoo():\n try:\n ticker = input('Enter the ticker symbol: ').upper()\n start = dt.datetime(2004, 8, 19)\n end = dt.datetime.today()\n\n df = web.DataReader(ticker, 'yahoo', start, end)\n df.to_csv('stock_data.csv')\n except Exception as e:\n print(e)\n exit()", "def stock_data(ticker, start,today=date.today()):\n df= web.DataReader(ticker,'yahoo',start,today)\n return df", "def get_daily():\n cbr_response = requests.get(CBR_DAILY_URL)\n if not cbr_response.ok:\n abort(503)\n\n result = parse_cbr_currency_base_daily(cbr_response.text)\n\n return result, 200", "def _get_dates():\n remote = os.path.join(BASE_URL, RSS_FEED)\n local = os.path.join(TMP, RSS_FEED)\n u..(remote, local)\n\n with open(local) as f:\n return PUB_DATE.findall(f.read())", "def stock_url(stock_symbol, day=None, month=None, year=None):\r\n\r\n page = \"http://ichart.finance.yahoo.com/table.csv?\"\r\n page = ''.join([page, 's=', stock_symbol])\r\n now = datetime.datetime.now()\r\n if day == None:\r\n day = now.day\r\n if month == None:\r\n month = now.month\r\n if year == None:\r\n year = now.year\r\n page = ''.join([page, '&amp;d=', str(month)])\r\n page = ''.join([page, '&amp;e=', str(day)])\r\n page = ''.join([page, '&amp;f=', str(year)])\r\n page = ''.join([page, '&amp;g=d'])\r\n # Set the start date to Jan 1 1960 and the file will pick up data for as far\r\n # back as possible\r\n page = ''.join([page, '&amp;a=1'])\r\n page = ''.join([page, '&amp;b=1'])\r\n page = ''.join([page, '&amp;c=1960'])\r\n page = ''.join([page, '&amp;ignore=.csv'])\r\n # print(page)\r\n return(page)", "def gethistory(ticker):\n link = 'http://ichart.finance.yahoo.com/table.csv?s=' + ticker\n response = urllib.urlopen(link)\n html = response.read()\n return readcsv(html)", "def get(yahoo_code,inicio,fin):\n #######################################\n #inicio=(aaaa,mm,dd), fin=(aaaa,mm,dd)#\n #######################################\n # connection parameters\n # ----------------------------------- #\n\n # http timeout\n timeout_secs = 5\n\n # retries\n num_retries = 4\n\n # url encoding\n yahoo_url = r'https://finance.yahoo.com/quote/{0}/history?p={0}'.format(yahoo_code)\n\n # init headers\n headers = dict()\n headers['Connection'] = 'keep-alive'\n headers['Upgrade-Insecure-Requests'] = '1'\n headers['User-Agent'] = r\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36\"\n\n\n # manejo de conexion\n # ----------------------------------- #\n\n csv_data = None\n while num_retries>0:\n\n try:\n\n session = requests.Session()\n\n r = session.get(yahoo_url,headers=headers,timeout=timeout_secs)\n r.encoding = 'utf-8'\n html_text = r.text\n\n # get crumb\n pattern = r\"(\\\"CrumbStore\\\":{\\\"crumb\\\":\\\")([^\\\"]+)(\\\"})\"\n m = re.search(pattern, html_text)\n crumb = m.group(2).replace(\"\\\\u002F\",\"/\")\n\n # Obtener datos desde inicio=(aaaa,mm,dd) (UTC)\n start_time = calendar.timegm(datetime(inicio[0],inicio[1],inicio[2]).utctimetuple())\n# #hasta hoy: end_time = calendar.timegm(datetime.now().utctimetuple()) \n end_time = calendar.timegm(datetime(fin[0],fin[1],fin[2]).utctimetuple())\n\n # url para descargar datos\n data_url = r\"https://query1.finance.yahoo.com/v7/finance/download/{0}?period1={1}&period2={2}&interval=1d&events=history&crumb={3}\".format(yahoo_code, start_time, end_time, crumb)\n\n # bajar datos en formato csv\n r = session.get(data_url,headers=headers,timeout=timeout_secs)\n csv_data = csv.reader(r.content.decode().splitlines(),delimiter=',')\n\n except requests.exceptions.Timeout:\n\n wtext = 'Connection timeout, {0} reintentos restantes'.format(str(num_retries))\n\n # print or log\n print(wtext)\n\n except AttributeError:\n\n wtext = 'Error de migajas (crumb error), {0} reintentos restantes'.format(str(num_retries))\n\n # print or log\n print(wtext)\n\n except Exception:\n\n wtext = 'Error genérico, {1} intentos restantes'.format(str(num_retries))\n\n # print or log\n print(wtext)\n\n finally:\n\n if csv_data:\n wtext = 'Los datos para {0} se bajaron sin pedos'.format(yahoo_code)\n\n # print or log\n print(wtext)\n break\n\n else:\n num_retries -= 1\n\n # asset-data\n if csv_data:\n eod_data = []\n for ii,row in enumerate(csv_data):\n\n if ii>0 and not 'null' in row:\n\n eod_data.append({\n 'date': row[0],\n 'open': float(row[1]),\n 'high': float(row[2]),\n 'low': float(row[3]),\n 'close': float(row[4]),\n 'adj_close': float(row[5]),\n 'volume': int(row[6])\n })\n\n else:\n\n wtext = 'No se pudo descargar {0} :c'.format(yahoo_code)\n\n # print or log\n print(wtext)\n\n return eod_data", "def get_today_url():\n\n lines, numlst = get_items()\n if not lines:\n return ''\n month = time.localtime().tm_mon\n day = time.localtime().tm_mday\n date = str(month) +'月' + str(day) + '日'\n url = get_url(lines, numlst, date)\n return url", "def get(sym, start, end):\n fmt = '%Y-%m-%d'\n s = datetime.strptime(start, fmt)\n f = datetime.strptime(end, fmt)\n\n url = ['http://ichart.finance.yahoo.com/table.csv?g=d&ignore=.csv',\n '&s=%s' % sym,\n '&a=%i' % (s.month-1),\n '&b=%i' % s.day,\n '&c=%i' % s.year,\n '&d=%i' % (f.month-1),\n '&e=%i' % f.day,\n '&f=%i' % f.year]\n url = ''.join(url)\n return build_data_list(urllib.urlopen(url).readlines())", "def date(self, response):\n\t\tx = response.xpath(\"//div[@class='wrapper appendbottom-10']/div/p/text()\")[-1].re('(\\w+)')\n\t\treturn x", "def get_date(self,yearlimits=[1500,2020]):\n\t\thead = self.raw_text()[:300] \t \t \n\t\tparser = Regexdate(head) \t \t\t\n\t\tyear = parser.find_year(yearlimits)\t\t\n\t\tmonth = parser.find_month()\n\t\tday = parser.find_day()\n\t\tif day and year != \"\":\n\t\t\treturn year + \"-\" + month + \"-\" + day\t\n\t\tif year:\n\t\t\treturn year\n\t\treturn \"\"", "def native_yahoo(self, html, parse_datetime=False):\n # Create a main parse object\n soup = BeautifulSoup(html, \"lxml\")\n # DIV - main content\n content = soup.find('section', id='mediacontentstory')\n # Title\n title = content.find('h1', class_='headline').text.strip()\n # Article date.\n if parse_datetime:\n credit = content.find('div', class_='credit')\n date_string = credit.find('abbr').text.strip()\n try:\n # Old article - get article date from page.\n published_datetime = datetime.datetime.strptime(date_string, '%B %d, %Y %I:%M %p')\n except ValueError:\n # Recent article - should not be here, because we skip new articles.\n published_datetime = False\n else:\n published_datetime = False\n # Get article Text\n a_content = content.find('div', class_='mw_release')\n if not a_content:\n a_content = content.find('div', itemtype='http://schema.org/Article')\n # Unfortunately, sometimes this does not work, even if the tag is present. Probably JS rendering...\n # See http://finance.yahoo.com/news/inplay-briefing-com-055139997.html\n if not a_content:\n raise ParsingNotImplementedException('Content DIV of the article was not found. Title: '+title)\n # Parse Text\n text = self.__yahoo_parse_text(a_content)\n # Result\n return {'title': title, 'datetime': published_datetime, 'text': text}", "def get_daily_data_from_stooq(ticker_symbol, start_date, end_date):\n # check whether the start_date and end_date are strings\n if isinstance(start_date, str) and isinstance(end_date, str):\n pass\n else:\n raise ValueError(\"Dates passed to the function are not strings!!!\")\n # validate formats of dates passed to the function\n validate_date_format_yyy_mm_dd(start_date)\n print(\"Validation of start_date format result: positive...\")\n validate_date_format_yyy_mm_dd(end_date)\n print(\"Validation of end_date format result: positive...\")\n d_1 = start_date.replace(\"-\", \"\")\n d_2 = end_date.replace(\"-\", \"\")\n temp_url = \"https://stooq.com/q/d/l/?s=\" + ticker_symbol + \"&d1=\" \\\n + d_1 + \"&d2=\" + d_2 + \"&i=d\"\n print(\"Getting data from URL: \", temp_url)\n # try-except block to catch the cases when the ticker symbol is nonexistent\n try:\n data_in = pd.read_csv(temp_url, usecols=['Date', 'Close'],\n parse_dates=[0])\n except ValueError:\n print(\"ValueError occurred! Probably a nonexistent ticker has been\"\n \" passed to the function\")\n except Exception:\n print(\"General error has occurred! Please check function arguments...\")\n else:\n # if data is obtained, rename \"Close\" ===> ticker name\n data_in.rename(columns={\"Close\": ticker_symbol}, inplace=True)\n return data_in", "def get_price_data(ticker, days_befoure):\r\n #config_file=raw_input('config file: ')\r\n config_file=\"d:/tmp/moex.json\" \r\n try:\r\n with open(config_file) as config_file: \r\n conn_data = json.load(config_file)\r\n except:\r\n print \"Error: Unable to read config file. \"\r\n sys.exit(1)\r\n\r\n username = conn_data['username']\r\n password = conn_data['password']\r\n my_config = Config(user=username, password=password, proxy_url='')\r\n\r\n my_auth = MicexAuth(my_config)\r\n date = datetime.datetime.now() - datetime.timedelta(days_befoure)\r\n \r\n #ticker = 'SBER' # for tesing...\r\n \r\n if my_auth.is_real_time():\r\n iss = MicexISSClient(my_config, my_auth, MyDataHandler, MyData)\r\n iss.get_history_securities('stock',\r\n 'shares',\r\n 'tqbr',\r\n ticker, \r\n date.strftime(\"%Y-%m-%d\")\r\n #here to be start end dates\r\n )\r\n #print iss.handler.data.history\r\n return iss.handler.data.as_dataframe()", "def __get(self, ticker_symbol):\n\n # n = name\n # l1 = last trade\n # c1 = change\n # p2 = change percent\n url = \"http://finance.yahoo.com/d/quotes.csv?s=%s&f=nl1c1p2\" % ticker_symbol\n req = Request(url)\n resp = urlopen(req) \n csv_str = resp.read().decode().strip()\n\n elems = csv_str.split(',')\n\n return dict(name=elems[0].strip('\"'), ask_price=elems[1], change=elems[2], changep=elems[3].strip('\"'))", "def get_company_info(company_name):\n\n # Fix formatting of name\n co = company_name.replace(\".\", \"\").replace(\" \", \"%20\")\n\n query = f\"http://d.yimg.com/autoc.finance.yahoo.com/autoc?query={co}\\\n &region=1&lang=en&callback=YAHOO.Finance.SymbolSuggest.ssCallback\"\n\n response = requests.get(query)\n\n fdata = response.text.split(\"(\", 1)[1]\n fdata = fdata.rsplit(\")\", 1)[0]\n data = json.loads(fdata)\n yahoo_json = data[\"ResultSet\"][\"Result\"]\n\n return yahoo_json", "def ticker(self):\n response = self.query('ticker')\n return response", "def get_full_history(symbol):\n to_date = int(datetime.datetime.timestamp(datetime.datetime.now()))\n from_date = int(datetime.datetime.timestamp(datetime.datetime(1990, 1, 1, 1, 0, 0)))\n url_base = \"https://query1.finance.yahoo.com/v7/finance/download/\"\n url_params = f\"{symbol}.NS?period1={from_date}&period2={to_date}&interval=1d&events=history\"\n resp = requests.get(url_base + url_params)\n a = csv_to_list(resp)[1:]\n return create_price(symbol, a)", "def get_data(stockSymbol, full_data=False, start_date=None, end_date=None, check_stockSymbol=True):\n\n if(check_stockSymbol is True):\n check_name(stocks_values, stocks_values, stockSymbol)\n\n stockSymbol = stockSymbol.replace('&', '%26')\n symbolCount = scrape_symbolCount(stockSymbol)\n\n if(full_data is True):\n\n print(\"Downloading Full data for\", stockSymbol)\n x = datetime.datetime.strptime('1-1-1992', \"%d-%m-%Y\")\n y = datetime.datetime.today()\n\n else:\n\n if(start_date is None or end_date is None):\n raise ValueError(\"Provide start and end date.\")\n\n x = parse_date(start_date)\n y = parse_date(end_date)\n\n if(x > y):\n raise ValueError(\"Starting date is greater than end date.\")\n\n result = scrape_data(\n x, y, 'stock', stockSymbol=stockSymbol, symbolCount=symbolCount)\n return result", "def __download(self, since = workingday(1900,1,1)):\n\t\tuntil = workingday.today()\n\n\t\tinput_tuple = (self.symbol,\n\t\t\tstr(since.month - 1), str(since.day), str(since.year),\n\t\t\tstr(until.month - 1), str(until.day), str(until.year))\n\n\t\tself.price = dict()\n\t\tself.dividend = dict()\n\t\tself.split = dict()\n\n\t\ttry:\n\t\t\turl = 'http://ichart.yahoo.com/table.csv?s=%s&g=d&a=%s&b=%s&c=%s&d=%s&e=%s&f=%s&ignore=.csv' % input_tuple\n\t\t\traw_data = urlopen(url)\n\t\t\traw_data.readline()\n\n\t\t\tfor line in raw_data:\n\t\t\t\tl = line.split(',')\n\t\t\t\td = workingday.strptime(l[0],'%Y-%m-%d')\n\t\t\t\trow = [\n\t\t\t\t\tfloat(l[1]), # Open\n\t\t\t\t\tfloat(l[2]), # High\n\t\t\t\t\tfloat(l[3]), # Low\n\t\t\t\t\tfloat(l[4]), # Close\n\t\t\t\t\tfloat(l[-1][:-1]), # Adj\n\t\t\t\t\tint(l[5])] # Volume\n\t\t\t\tself.price[d] = row\n\n\t\t\t# get dividend and split data\n\t\t\turl\t= 'http://ichart.finance.yahoo.com/x?s=%s&g=v&a=%s&b=%s&c=%s&d=%s&e=%s&f=%s&ignore=.csv' % input_tuple\n\t\t\traw_data = urlopen(url)\n\t\t\traw_data.readline()\n\n\t\t\tfor line in raw_data:\n\t\t\t\tl = line.split(',')\n\t\t\t\tif l[0] == 'DIVIDEND':\n\t\t\t\t\td = workingday(int(l[1][1:5]), int(l[1][5:7]), int(l[1][7:9]))\n\t\t\t\t\tself.dividend[d] = float(l[2][:-1])\n\t\t\t\telif l[0] == 'SPLIT':\n\t\t\t\t\td = workingday(int(l[1][1:5]), int(l[1][5:7]), int(l[1][7:9]))\n\t\t\t\t\tself.split[d] = tuple(map(int, l[2][:-1].split(':')))\n\n\t\texcept:\n\t\t\tprint 'Error downloading ' + self.symbol", "def init(self):\n url = 'https://finance.yahoo.com/quote/%s/history' % (self.ticker)\n try:\n r = requests.get(url)\n except requests.exceptions.RequestException:\n raise ValueError\n txt = r.content\n try:\n cookie = r.cookies['B']\n pattern = re.compile('.*\"CrumbStore\":\\{\"crumb\":\"(?P<crumb>[^\"]+)\"\\}')\n for line in txt.splitlines():\n m = pattern.match(line.decode(\"utf-8\"))\n if m is not None:\n crumb = m.groupdict()['crumb']\n crumb = crumb.replace(u'\\\\u002F', '/')\n return cookie, crumb # return a tuple of crumb and cookie\n except KeyError:\n raise KeyError(\"{0} is not in Yahoo\".format(self.ticker))", "def get_url(ticker: str, period: int) -> str:\n periods = {\n 1: 'period1=1577836800&period2=1585699200', # First Quarter 2020\n 2: 'period1=1585699200&period2=1593561600', # Second Quarter 2020\n 3: 'period1=1593561600&period2=1601510400', # Third Quarter 2020\n 4: 'period1=1601510400&period2=1609459200', # Fourth Quarter 2020\n 5: 'period1=1609459200&period2=1617235200' # First Quarter 2021\n }\n return (f\"https://finance.yahoo.com/quote/%5E{ticker}/history?{periods[period]}&interval=1d\"\n '&filter=history&frequency=1d&includeAdjustedClose=true')", "def get_price_history_dates(access_token,ticker,start_date,end_date,frequencyType,frequency):\r\n \r\n price_url = 'https://api.tdameritrade.com/v1/marketdata/{}/pricehistory'.format(ticker)\r\n\r\n #The header for getting a quote needs to define the input type (json)\r\n headers = {'Authorization':'Bearer {}'.format(access_token),\r\n 'Content-Type':'application/json'}\r\n\r\n #Parameters for period of time and frequency of data to get\r\n params = {'startDate':start_date,\r\n 'endDate': end_date,\r\n 'frequencyType': frequencyType,\r\n 'frequency': frequency}\r\n \r\n #Make the get request to TD Ameritrade\r\n price_history_json = requests.get(url=price_url,headers=headers,params=params)\r\n return price_history_json.json()", "def gettickerdata(tickername):\n\n r = requests.get(constants.bloomurl + getticker(tickername) + ':US')\n soup = BeautifulSoup(r.text, 'html.parser')\n results = soup.find_all('div', class_=\"price\")\n return (\"$\" + results[0].text)", "def get_cur_date(self):\n tmp = self.soup.find('small', text=re.compile('market', re.IGNORECASE)).text.split('Market')[0].strip()\n\n # assign year\n self.year = Settings.year.search(tmp).group(0)\n\n # assign day\n self.day = Settings.day.search(tmp).group(0)\n\n months = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec']\n\n # iterate over months and flag if match found\n for ii, mo in enumerate(months, 1):\n more = re.compile(mo, re.IGNORECASE)\n if more.search(tmp):\n self.month = ii\n break", "def get_call_data(stock_name, expire_time, strike_price):\n date = time.mktime(datetime.datetime.strptime(expire_time, \"%d/%m/%Y\").timetuple())+(16*3600)\n url = 'https://finance.yahoo.com/quote/'+stock_name+'/options?date='+str(int(date))+'&p='+stock_name\n response = requests.get(url)\n soup = BeautifulSoup(response.text, \"html.parser\")\n values = soup.findAll(\"td\" )\n\n for i in range(2,len(values),11):\n x = float(str(values[i].contents[0].contents[0]))\n if x == float(strike_price):\n option_link = 'https://finance.yahoo.com/'+str(values[i-2].contents[0])[61:109]\n bid = float(values[i+2].contents[0])\n ask = float(values[i+3].contents[0])\n return bid, ask", "def get_day(x):\n return x[\"SALE DATE\"].day", "def get_date():\n temp = pd.read_sql_query(_query['date'], connect())\n return temp.values", "def fetchOHLC(ticker,interval = \"minute\",duration=4):\r\n data = pd.DataFrame(kite.historical_data(ticker,dt.date.today()-dt.timedelta(duration), dt.date.today(),interval))\r\n data.date =data.date.map(lambda t: t.strftime('%Y-%m-%d %H:%M'))\r\n return data", "def open_hotel_date(self, soup):\n logging.info('Getting hotel booking registration date.')\n if soup.select_one('span.hp-desc-highlighted') is None:\n logging.error('Cant get hotel date.')\n return ''\n else:\n open_date_text = soup.select_one('span.hp-desc-highlighted').text.strip()\n if \" с \" in open_date_text:\n index = soup.select_one('span.hp-desc-highlighted').text.strip().find(\" с \")\n date = open_date_text[index+3:].replace('.', '')\n try:\n day, month, year = date.split(' ')\n month = RU_MONTH_VALUES[month[0:3]]\n date = '/'.join([day, month, year])\n except Exception:\n logging.error('Cant get hotel date.')\n return ''\n return date\n else:\n logging.error('Cant get hotel date.')\n return ''" ]
[ "0.6303952", "0.62257695", "0.5854441", "0.5763959", "0.5719899", "0.5719668", "0.57072276", "0.5702455", "0.56732655", "0.56607074", "0.56428176", "0.56095546", "0.5577825", "0.54943275", "0.5489212", "0.5487476", "0.54844296", "0.54761714", "0.5463254", "0.54550713", "0.54020876", "0.54003745", "0.5393767", "0.5373485", "0.5363747", "0.5331979", "0.5327305", "0.53024316", "0.5298108", "0.5293867" ]
0.62580943
1
Test getting a sorted document form an stream reader.
def test_stream_sorted(): archive = Archive() archive.commit(doc=DataFrameDocument(df=DF1)) names = list(archive.open(version=0).sorted(keys=[0]).to_df()['Name']) assert names == ['Alice', 'Alice', 'Bob', 'Claire']
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_sorted_page_stream(self):\n self._test_insertion(Macros, 0)", "def test_document_retrieval(self):", "def test_sorted_cursor_stream(self):\n self._test_insertion(TicketAudits, 0)", "def test_loading_document(self):", "def test_read(self):\n self.reader._timing = [3, 2, 2, 1, 1, 1]\n score, time = self.reader.read(self.books[0], 0, 3)\n self.assertTrue(self.books[0].id_book not in self.reader._books)\n self.assertEqual(3, score)\n self.assertEqual(6, time)\n self.assertEqual([3, 3, 3, 2, 2, 2], self.reader._timing)\n score, time = self.reader.read(self.books[3], 4, 5)\n self.assertTrue(self.books[3].id_book not in self.reader._books)\n self.assertEqual(0, score)\n self.assertEqual(7, time)\n self.assertEqual([3, 3, 3, 2, 3, 3], self.reader._timing)", "def test_reader(qn_filepath, answers_dirpath):\n qns = get_questions(qn_filepath)\n for qn in qns:\n if qn.qid == 100:\n q = qn\n break\n assert q\n docs = get_documents(answers_dirpath, q.qid)\n print docs\n print docs[0].content", "def test_get_stream(self):\n pass", "def test_sortby_documents_helpful(self):\n r1 = RevisionFactory(is_approved=True)\n r2 = RevisionFactory(is_approved=True)\n HelpfulVoteFactory(revision=r2, helpful=True)\n\n # Note: We have to wipe and rebuild the index because new\n # helpful_votes don't update the index data.\n self.setup_indexes()\n self.reindex_and_refresh()\n\n # r2.document should come first with 1 vote.\n response = self.client.get(reverse('search.advanced'), {\n 'w': '1', 'a': '1', 'sortby_documents': 'helpful',\n 'format': 'json'\n })\n eq_(200, response.status_code)\n\n content = json.loads(response.content)\n eq_(r2.document.title, content['results'][0]['title'])\n\n # Vote twice on r1, now it should come first.\n HelpfulVoteFactory(revision=r1, helpful=True)\n HelpfulVoteFactory(revision=r1, helpful=True)\n\n self.setup_indexes()\n self.reindex_and_refresh()\n\n response = self.client.get(reverse('search.advanced'), {\n 'w': '1', 'a': '1', 'sortby_documents': 'helpful',\n 'format': 'json'})\n eq_(200, response.status_code)\n\n content = json.loads(response.content)\n eq_(r1.document.title, content['results'][0]['title'])", "def test_another_read(self):\n self.reader._timing = [3, 2, 3, 3, 1, 1]\n score, time = self.reader.read(self.books[0], 0, 6)\n self.assertTrue(self.books[0].id_book not in self.reader._books)\n self.assertEqual(0, score)\n self.assertEqual(9, time)", "def test_client_document_retrieve(self):\n pass", "def test_get_feeds_order_title(reader, chunk_size):\n # for https://github.com/lemon24/reader/issues/203\n reader._storage.chunk_size = chunk_size\n\n parser = Parser()\n reader._parser = parser\n\n feed2 = parser.feed(2, datetime(2010, 1, 2), title='two')\n feed1 = parser.feed(1, datetime(2010, 1, 1), title='one')\n feed3 = parser.feed(3, datetime(2010, 1, 3), title='three')\n feed4 = parser.feed(4, datetime(2010, 1, 1))\n feed5 = parser.feed(5, datetime(2010, 1, 1))\n\n reader.add_feed(feed2.url)\n reader.add_feed(feed1.url)\n reader.add_feed(feed3.url)\n reader.add_feed(feed4.url)\n reader.add_feed(feed5.url)\n\n assert list(f.url for f in reader.get_feeds()) == '1 2 3 4 5'.split()\n\n reader.update_feeds()\n reader.set_feed_user_title(feed5, 'five')\n\n assert list(f.url for f in reader.get_feeds()) == '4 5 1 3 2'.split()", "def test_read_index(self):\n\n indexfile = tempfile.mktemp()\n self.addCleanup(os.unlink, indexfile)\n\n TroveIndexBuilder(\"test/short.dat\", out=indexfile)\n\n index = TroveIndex()\n index.reload(indexfile)\n\n docs = sorted([doc for doc in index.documents])\n self.assertEquals(10, len(docs))\n\n self.assertEquals([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], docs)\n\n doc = index.get_document(1)\n ref = {u\"id\":\"1\",u\"titleName\":u\"Hello\"}\n self.assertNotEquals(None, doc, \"Document not found for id 1\")\n self.assertDictEqual(ref, doc)\n\n doc = index.get_document(10)\n ref = {\"id\":\"10\",\"titleName\":\"Hello\"}\n self.assertNotEquals(None, doc)\n self.assertDictEqual(ref, doc)", "def test_read_different_location(self):\n try:\n self.reader.read(self.books[1], 0, 1)\n self.fail(\"Readed book was not in the library\")\n except AssertionError:\n pass", "def test_valid_search_order(self) -> None:\n\n # fetch the most recent first, largest timestamp\n channel = self.make_request(\n \"GET\",\n self.url + \"?dir=b\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(200, channel.code, msg=channel.json_body)\n self.assertEqual(channel.json_body[\"total\"], 20)\n self.assertEqual(len(channel.json_body[\"event_reports\"]), 20)\n report = 1\n while report < len(channel.json_body[\"event_reports\"]):\n self.assertGreaterEqual(\n channel.json_body[\"event_reports\"][report - 1][\"received_ts\"],\n channel.json_body[\"event_reports\"][report][\"received_ts\"],\n )\n report += 1\n\n # fetch the oldest first, smallest timestamp\n channel = self.make_request(\n \"GET\",\n self.url + \"?dir=f\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(200, channel.code, msg=channel.json_body)\n self.assertEqual(channel.json_body[\"total\"], 20)\n self.assertEqual(len(channel.json_body[\"event_reports\"]), 20)\n report = 1\n while report < len(channel.json_body[\"event_reports\"]):\n self.assertLessEqual(\n channel.json_body[\"event_reports\"][report - 1][\"received_ts\"],\n channel.json_body[\"event_reports\"][report][\"received_ts\"],\n )\n report += 1", "def test_not_sorted_stream(self):\n self._test_insertion(TicketMetrics)", "def test_iterate_over_stream():\n archive = Archive()\n archive.commit(doc=DataFrameDocument(df=DF1))\n rows = list()\n with archive.open(version=0).open() as reader:\n for row in reader:\n rows.append(row)\n assert rows == [\n (0, 0, ['Alice', 32]),\n (1, 1, ['Bob', 45]),\n (2, 2, ['Claire', 27]),\n (3, 3, ['Alice', 23])\n ]", "def test_04_read_publications(self):\n publications = self.client.get(PUBLICATIONS_PATH, params={\n 'created': self.publication['created']\n })\n self.assertEqual(len(publications), 1, publications)\n for key, val in self.publication.items():\n with self.subTest(key=key):\n self.assertEqual(publications[0][key], val)", "def _read_sorted_slice(self, *args, **kwargs): # real signature unknown\n pass", "def _read_sorted_slice(self, *args, **kwargs): # real signature unknown\n pass", "def test_get_entries_recent_feed_order(reader, chunk_size, pre_stuff, call_method):\n reader._storage.chunk_size = chunk_size\n\n parser = Parser()\n reader._parser = parser\n reader._now = lambda: naive_datetime(2010, 1, 1)\n\n feed = parser.feed(1, datetime(2010, 1, 1))\n three = parser.entry(1, 3, datetime(2010, 1, 1))\n two = parser.entry(1, 2, datetime(2010, 1, 1))\n four = parser.entry(1, 4, datetime(2010, 1, 1))\n one = parser.entry(1, 1, datetime(2010, 1, 1))\n\n reader.add_feed(feed.url)\n reader.update_feeds()\n pre_stuff(reader)\n\n assert [eval(e.id)[1] for e in call_method(reader)] == [3, 2, 4, 1]\n\n parser.feed(1, datetime(2010, 1, 2))\n del parser.entries[1][1]\n one = parser.entry(1, 1, datetime(2010, 1, 2))\n del parser.entries[1][4]\n four = parser.entry(1, 4, datetime(2010, 1, 2))\n del parser.entries[1][2]\n two = parser.entry(1, 2, datetime(2010, 1, 2))\n\n reader.update_feeds()\n pre_stuff(reader)\n\n assert [eval(e.id)[1] for e in call_method(reader)] == [1, 4, 2, 3]", "def test_02_read_publications(self):\n publications = self.client.get(PUBLICATIONS_PATH, params={\n 'repository_version': self.repo['_href']\n })\n self.assertEqual(len(publications), 1, publications)\n for key, val in self.publication.items():\n with self.subTest(key=key):\n self.assertEqual(publications[0][key], val)", "def test_05_read_publications(self):\n body = gen_distribution()\n body['publication'] = self.publication['_href']\n distribution = self.client.post(DISTRIBUTION_PATH, body)\n self.addCleanup(self.client.delete, distribution['_href'])\n self.publication.update(self.client.get(self.publication['_href']))\n publications = self.client.get(PUBLICATIONS_PATH, params={\n 'distributions': distribution['_href']\n })\n self.assertEqual(len(publications), 1, publications)\n for key, val in self.publication.items():\n with self.subTest(key=key):\n self.assertEqual(publications[0][key], val)", "def test_all_documents(self):", "def test_author_sorted_articles(self):\n\n self.make_test('articles', ArticleListSerializer, 'author:articles')", "async def test_txn_list_sorted(self):\n paging = Mocks.make_paging_response(0, 3)\n transactions = Mocks.make_txns('0', '1', '2')\n self.stream.preset_response(head_id='2', paging=paging, transactions=transactions)\n\n response = await self.get_assert_200('/transactions?sort=header_signature')\n page_controls = Mocks.make_paging_controls()\n sorting = Mocks.make_sort_controls('header_signature')\n self.stream.assert_valid_request_sent(\n paging=page_controls,\n sorting=sorting)\n\n self.assert_has_valid_head(response, '2')\n self.assert_has_valid_link(response,\n '/transactions?head=2&sort=header_signature')\n self.assert_has_valid_paging(response, paging)\n self.assert_has_valid_data_list(response, 3)\n self.assert_txns_well_formed(response['data'], '0', '1', '2')", "def test_03_read_publications(self):\n publications = self.client.get(PUBLICATIONS_PATH, params={\n 'publisher': self.publisher['_href']\n })\n self.assertEqual(len(publications), 1, publications)\n for key, val in self.publication.items():\n with self.subTest(key=key):\n self.assertEqual(publications[0][key], val)", "def test_get_document(index_with_documents):\n response = index_with_documents().get_document(\"500682\")\n assert isinstance(response, Document)\n assert hasattr(response, \"title\")\n assert response.title == \"The Highwaymen\"", "def test_docdict_order():\n from mne.utils.docs import docdict\n\n # read the file as text, and get entries via regex\n docs_path = Path(__file__).parent.parent / \"utils\" / \"docs.py\"\n assert docs_path.is_file(), docs_path\n with open(docs_path, \"r\", encoding=\"UTF-8\") as fid:\n docs = fid.read()\n entries = re.findall(r'docdict\\[(?:\\n )?[\"\\'](.+)[\"\\']\\n?\\] = ', docs)\n # test length & uniqueness\n assert len(docdict) == len(entries)\n # test order\n assert sorted(entries) == entries", "def test_sort_order(self):\n obj = self.conn.search(self.basedn, 2, attrlist=['uidNumber'],\n sort_order=[\"-uidNumber\"])\n sort = [o['uidNumber'][0] for o in obj if 'uidNumber' in o]\n self.assertTrue((all(sort[i] >= sort[i+1]\n for i in range(len(sort)-1))), \"Not sorted\")", "def test_get_result_top_file(self):\n pass" ]
[ "0.62664425", "0.59971935", "0.589076", "0.5752063", "0.5700779", "0.5668757", "0.5662684", "0.56397104", "0.55843824", "0.557145", "0.54686254", "0.5442828", "0.54101425", "0.5386695", "0.5313699", "0.5304388", "0.5282291", "0.52730596", "0.52730596", "0.52368385", "0.52248627", "0.51951027", "0.5190211", "0.5185053", "0.51707935", "0.5166379", "0.5155871", "0.5151527", "0.514927", "0.5072288" ]
0.630785
0
Save timezoneaware values for created and updated fields.
def save(self, *args, **kwargs): if self.pk is None: self.created = timezone.now() self.updated = timezone.now() super(Base, self).save(*args, **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save(self, *args, **kwargs):\r\n if self.pk is None:\r\n self.created = timezone.now()\r\n self.updated = timezone.now()\r\n super(Base, self).save(*args, **kwargs)", "def save(self, *args, **kwargs):\n if not self.id:\n self.create_date = timezone.now()\n self.update_date = timezone.now()\n super().save(*args, **kwargs)", "def save(self):\n self.updated_at = datetime.now()", "def test_save_2_datetime(self):\n date = BaseModel()\n updat_at1 = date.updated_at\n updat_at2 = datetime.now()", "def get_db_prep_save(self, value, connection=None):\n\n if value is not None:\n if value.tzinfo is not None:\n ## convert to settings.TIME_ZONE\n value = value.astimezone(default_tz)\n \n value = value.replace(tzinfo=None)\n return super(LocalizedDateTimeField, self).get_db_prep_save(value, connection=connection)", "def save(self, *args, **kwargs):\n self.modified_at = datetime.datetime.utcnow()\n return super().save(*args, **kwargs)", "def save(self, *args, **kwargs):\n self.modified_at = datetime.datetime.utcnow()\n return super().save(*args, **kwargs)", "def save(self, *args, **kwargs):\n if not self.id:\n self.created_at = datetime.now()\n self.updated_at = datetime.now()\n super().save(*args, **kwargs)", "def save(self, *args, **kwargs):\n if not self.id:\n self.created_at = datetime.now()\n self.updated_at = datetime.now()\n super().save(*args, **kwargs)", "def save(self):\n self.updated_at = datetime.today()\n models.storage.save()", "def save(self):\n self.updated_at = datetime.today()\n models.storage.save()", "def save(self, *args, **kwargs):\n if self.created_at == None:\n self.created_at = datetime.datetime.now()\n self.updated_at = datetime.datetime.now()\n\n super(Application, self).save(*args, **kwargs)", "def save_now(self):\r\n self.save()", "def save_now(self):\r\n self.save()", "def save(self):\r\n self.updated_at = datetime.now()\r\n models.storage.save()", "def test_updated_atviasavemethod(self):\n b1 = BaseModel()\n b1.save()\n self.assertEqual(type(b1.updated_at), type(datetime.now()))\n self.assertEqual(type(b1.updated_at), datetime)\n self.assertTrue(hasattr(b1, \"updated_at\"))", "def save(self):\n from models import storage\n self.updated_at = datetime.datetime.now()\n storage.save()", "def save(self, *args, **kwargs):\n self.modify_ts = datetime.now()\n super(ModelBase, self).save(*args, **kwargs)", "def save(self):\n from models import storage\n self.updated_at = datetime.now()\n storage.save()", "def test_User_to_save(self):\n tmpobj = User()\n tmpobj.save()\n self.assertNotEqual(tmpobj.created_at, tmpobj.updated_at)", "def update(self, request, *args, **kwargs):\n if not settings.DEBUG:\n log_msg = \"User %s setting timezone to %s\"\n logger.info(log_msg % (request.user.id, request.data.get('timezone', None)))\n request.data['user'] = request.user.id\n return super(UserProfileViewSet, self).update(request, *args, **kwargs)", "def pre_save(self, model_instance, add):\n if add:\n setattr(model_instance, self.attname, timezone.now())\n return super().pre_save(model_instance, add)", "def save(self):\n if not self.id:\n self.created_date = now()\n self.modified_date = now()\n return super().save()", "def save(self):\n \"\"\"\n if current_posted_date != prev_posted_date\n if self.id == None or self.edit_posted == True:\n site_id = settings.SITE_ID\n site = Site.objects.select_related().get(pk=site_id)\n blog = site.blog_set.all()[0]\n tz = timezone(blog.timezone)\n\n #replace the timezone first, then convert to utc\n\n #self.posted = self.posted.replace(tzinfo=tz).astimezone(pytz.utc)\n \"\"\"\n super(Entry,self).save()\n self.tags = self.tag_list\n\n try:\n ping_google()\n except Exception:\n # Bare 'except' because we could get a variety\n # of HTTP-related exceptions.\n pass", "def save(self, upsert=True):\n if self.created_at is None:\n self.created_at = datetime.utcnow()\n self.updated_at = self.created_at\n else:\n self.updated_at = datetime.utcnow()\n\n return self.service.update_one({\"_id\": self._id}, {\"$set\": self.__dict__}, upsert=upsert)", "def save(self):\n self.updated_at = datetime.now()\n models.storage.save()", "def save(self):\n self.updated_at = datetime.now()\n models.storage.save()", "def save(self):\n self.updated_at = datetime.now()\n models.storage.save()", "def save(self, *args, **kwargs):\n if not self.user_id or not self.created:\n self.created = datetime.datetime.today()\n self.modified = datetime.datetime.today()\n return super(UserProfile, self).save(*args, **kwargs)", "def save_settings(client_id, time_format, country, time_zone):\n\tsettings = Settings(user_id=client_id,\n\t\t\ttime_format=time_format,\n\t\t\tcountry=country,\n\t\t\ttime_zone=time_zone)\n\tsession.merge(settings)\n\tsession.commit()" ]
[ "0.6919896", "0.6347257", "0.632574", "0.63116753", "0.62761855", "0.61552626", "0.61552626", "0.61448216", "0.61448216", "0.6132212", "0.6132212", "0.6124235", "0.61193013", "0.61193013", "0.60128635", "0.6011387", "0.59883195", "0.5987516", "0.59655243", "0.59053195", "0.5905276", "0.59011054", "0.5892553", "0.5845903", "0.5836595", "0.58280987", "0.58280987", "0.58280987", "0.5792454", "0.57777023" ]
0.68968886
1
Create Page objects if saved for the first time.
def save(self, *args, **kwargs): created = False if self.pk is None: created = True super(Base, self).save(*args, **kwargs) if created is True: for i in range(self.page_count): page = Page(work=self, number=i+1) page.save()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_page_objects(self, data):\n for page in data['pages']:\n self.create_page(page)", "def pre_save_page(instance, raw, **kwargs):\n instance.old_page = None\n try:\n instance.old_page = Page.objects.get(pk=instance.pk)\n except ObjectDoesNotExist:\n pass", "def test_create_page(self):\n self.assertEqual(self.client.get(reverse('home')).status_code, 404)\n\n page = Page.objects.create(**_page_data)\n\n self.assertEqual(page.title, _page_data['title'])\n self.assertEqual(page.page_type, _page_data['page_type'])\n\n response = self.client.get(reverse('home'))\n\n self.assertEqual(response.status_code, 200)\n self.assertIn('page_settings', response.context_data)", "def setUp(self):\n\n pages = {}\n\n pages['A'] = Page.objects.create(title='A')\n pages['B'] = Page.objects.create(title='B', parent=pages['A'])\n pages['C'] = Page.objects.create(title='C', parent=pages['B'])\n pages['D'] = Page.objects.create(title='D', parent=pages['B'])\n pages['E'] = Page.objects.create(title='E')\n\n self.pages = pages", "def CreatePage(url1: str) -> Page:\n page = Page(\n name=url1,\n queried=1,\n )\n db.session.add(page)\n db.session.commit()\n return page", "def new(self):\n if g.my['rank'] < 15:\n self.__request_first_block()\n \n self.__request_second_block()\n self.__request_label_and_alias()\n self.__request_values()\n \n if self.message is None:\n try:\n g.db.pages.insert(self.page)\n return True\n except PyMongoError:\n self.message = g.pages_msg('error_mongo_new')\n \n return False", "def create(self, validated_data):\n category_data = validated_data.pop('categories')\n p = Page.objects.create(**validated_data)\n for c in category_data:\n cat = Category.objects.filter(title=c['title'])\n if cat:\n p.categories.add(cat[0])\n else:\n cat = Category.objects.create(**c)\n p.categories.add(cat)\n p.save()\n return p", "async def _init_page(self, page: Page) -> None:\r\n self.pages[page] = {\r\n 'id': str(uuid4()),\r\n 'is_idle': False\r\n }\r\n # add custom settings to page.\r\n await self._add_page_settings(page)\r\n # add page to idle queue.\r\n await self.set_idle(page)\r\n # start task to periodically check page idle status.\r\n asyncio.create_task(\r\n self._check_idle_status(page))", "def create_page(self):", "def setUp(self):\n\n pages = {}\n\n pages['A'] = Page.objects.create(title='A')\n pages['B'] = Page.objects.create(title='B', parent=pages['A'])\n pages['C'] = Page.objects.create(title='C', parent=pages['B'])\n pages['D'] = Page.objects.create(title='D', parent=pages['C'])\n pages['E'] = Page.objects.create(title='E', parent=pages['C'])\n pages['F'] = Page.objects.create(title='F', parent=pages['B'])\n pages['G'] = Page.objects.create(title='G', parent=pages['B'])\n pages['H'] = Page.objects.create(title='H', parent=pages['G'])\n pages['I'] = Page.objects.create(title='I', parent=pages['A'])\n pages['J'] = Page.objects.create(title='J')\n\n self.pages = pages", "def _add_page_to_storage(page: Page):\n if page:\n if page.valid_mime:\n CrawlerStorageManager.create_file_from_page(page)\n page.save_to_json_file()", "def create_page_in_admin(comicsite,title,content=\"testcontent\",permission_lvl=\"\"):\n \n if permission_lvl == \"\":\n permission_lvl = Page.ALL\n \n page_admin = PageAdmin(Page,admin.site)\n page = Page.objects.create(title=title,\n comicsite=comicsite,\n html=content,\n permission_lvl=permission_lvl)\n page_admin.first_save(page)\n return page", "def test_page_existence(self):\r\n for page in self.pages:\r\n page.visit()", "def create_page(self, document_data: dict) -> None:\n wiki_obj = WikiService()\n token = wiki_obj.get_token()\n wiki_obj.check_token(token)\n\n project_wikitext_data = self.generate_page_sections_dict(\n document_data\n )\n\n updated_text = wiki_obj.generate_page_text_from_dict(\n self.project_page_template,\n f\"=={self.page_initial_section}==\",\n project_wikitext_data,\n self.users_list_section\n )\n\n project_page_name = f\"{document_data['project']['name']}\"\n\n if wiki_obj.is_existing_page(project_page_name):\n wiki_obj.edit_page(\n token,\n project_page_name,\n updated_text\n )\n else:\n wiki_obj.create_page(\n token,\n project_page_name,\n updated_text\n )", "def add_page(self, page): \n self.pages.append(Page(page))", "def create_page_from_import(request, parent_page, page_class, parsed_doc):\n\n page = page_class.create_from_import(parsed_doc, request.user)\n\n edit_handler = page_class.get_edit_handler()\n edit_handler = edit_handler.bind_to(request=request, instance=page)\n form_class = edit_handler.get_form_class()\n\n next_url = get_valid_next_url_from_request(request)\n\n form = form_class(instance=page, parent_page=parent_page)\n has_unsaved_changes = False\n\n edit_handler = edit_handler.bind_to(form=form)\n\n return render(\n request,\n \"wagtailadmin/pages/create.html\",\n {\n \"content_type\": page.content_type,\n \"page_class\": page_class,\n \"parent_page\": parent_page,\n \"edit_handler\": edit_handler,\n \"action_menu\": PageActionMenu(\n request, view=\"create\", parent_page=parent_page\n ),\n \"preview_modes\": page.preview_modes,\n \"form\": form,\n \"next\": next_url,\n \"has_unsaved_changes\": has_unsaved_changes,\n },\n )", "def page_create(request, slug, template_name='groups/pages/page_form.html'):\n group = get_object_or_404(Group, slug=slug)\n form = GroupPageForm(initial={'group': group})\n\n if request.method == 'POST':\n form = GroupPageForm(request.POST)\n if form.is_valid():\n page = form.save(commit=False)\n page.group = group\n page.save()\n return redirect(request, page)\n\n return render(request, template_name, {\n 'group': group,\n 'form': form\n })", "def new(): \n pages_object = Pages()\n page = pages_object.page\n \n language_name = languages_object.get_languages(3)\n \n # Creation new page\n if request.method == 'POST':\n if pages_object.new():\n return redirect(url_for('pages.overview'))\n \n # Come back a message when there is an error\t\n if not pages_object.message is None:\n message = pages_object.message\n status = pages_object.status\n \n return render_template('{}/new.html'.format(MODULE_DIR), **locals())", "def create_page(self, space, title, body, parent_id=None, update_message=None):\n page_structure = {\n 'title': title,\n 'type': 'page',\n 'space': {\n 'key': space\n },\n 'body': {\n 'storage': {\n 'value': body,\n 'representation': 'storage'\n }\n }\n }\n\n if parent_id is not None:\n if type(parent_id) is str:\n parent_id = int(parent_id)\n page_structure['ancestors'] = [{'id': parent_id}]\n\n if update_message is not None:\n page_structure['version'] = {'message': update_message}\n\n print(json.dumps(page_structure))\n return self.api.content.post(json=page_structure)", "def post_save_page(instance, raw, created, **kwargs):\n old_page = instance.old_page\n del(instance.old_page)\n \n if settings.CMS_MODERATOR:\n # tell moderator something was happen with this page\n from cms.utils.moderator import page_changed\n page_changed(instance, old_page)", "def create_new_page(self, survey_id: str) -> json:\n payload = {\n \"title\": \"Page title\"\n }\n url_extras = [survey_id, \"pages\"]\n return self.make_request(RequestTypes.POST, payload, url_extras)", "def rpc_campaign_landing_page_new(self, campaign_id, hostname, page):\n\t\tpage = page.lstrip('/')\n\t\tsession = db_manager.Session()\n\t\tquery = session.query(db_models.LandingPage)\n\t\tquery = query.filter_by(campaign_id=campaign_id, hostname=hostname, page=page)\n\t\tif query.count() == 0:\n\t\t\tlanding_page = db_models.LandingPage(campaign_id=campaign_id, hostname=hostname, page=page)\n\t\t\tsession.add(landing_page)\n\t\t\tsession.commit()\n\t\tsession.close()\n\t\treturn", "def PreCreate(self, pre):\n pass", "def PreCreate(self, pre):\n pass", "def PreCreate(self, pre):\n pass", "def PreCreate(self, pre):\n pass", "def PreCreate(self, pre):\n pass", "def add_page(self,**app_names_and_pages):\n \n for app,pages in app_names_and_pages.items():\n if os.path.exists(os.path.join(self._main,app)):\n for page in pages:\n os.makedirs(os.path.join(self._main,app,page))\n self._create_init_routes(self._main,app,page)\n else:\n print(\"that app does not exist\")\n\n self._update_add_app_or_page()", "def save(self, *args, **kwargs):\n\n if self.id:\n firstcreation = False\n else:\n firstcreation = True\n\n #common save functionality for all models\n self._save_base()\n self.save_default(firstcreation)\n super(ComicSiteModel,self).save()", "def get_new_page_data(self, draft=False):\n page_data = {\n 'title': 'test page %d' % self.counter,\n 'slug': 'test-page-%d' % self.counter, 'language': 'en',\n 'sites': [1], 'status': Page.DRAFT if draft else Page.PUBLISHED,\n # used to disable an error with connected models\n 'document_set-TOTAL_FORMS': 0, 'document_set-INITIAL_FORMS': 0,\n }\n self.counter = self.counter + 1\n return page_data" ]
[ "0.69514966", "0.6622692", "0.6590603", "0.6394531", "0.62881696", "0.62271976", "0.6174904", "0.6163099", "0.61445093", "0.6078789", "0.605919", "0.6027516", "0.5903286", "0.57282794", "0.5698673", "0.56549186", "0.5653584", "0.55958694", "0.5554187", "0.55439764", "0.5527265", "0.55179846", "0.5508478", "0.5508478", "0.5508478", "0.5508478", "0.5508478", "0.55027324", "0.5479148", "0.5471998" ]
0.7246435
0
Generic, classic Binary Search implementation >>> from py_algorithms.search import new_binary_search >>>
def new_binary_search() -> Search: return _BinarySearch()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def binary_search(array, item):\n # implement binary_search_iterative and binary_search_recursive beleft, then\n # change this to call your implementation to verify it passes all tests\n return binary_search_iterative(array, item)\n # return binary_search_recursive(array, item)", "def binary_search(array, item):\n # implement binary_search_iterative and binary_search_recursive below, then\n # change this to call your implementation to verify it passes all tests\n return binary_search_iterative(array, item)\n # return binary_search_recursive(array, item)", "def binary_search(array, item):\n # implement binary_search_iterative and binary_search_recursive below, then\n # change this to call your implementation to verify it passes all tests\n return binary_search_iterative(array, item)\n # return binary_search_recursive(array, item)", "def binary_search(array, item):\n # implement binary_search_iterative and binary_search_recursive below, then\n # change this to call your implementation to verify it passes all tests\n return binary_search_iterative(array, item)\n # return binary_search_recursive(array, item)", "def binary_search(keys, query):\n return bin_search(keys, query, 0, len(keys) - 1)", "def binary_search(array, item):\n # change this to call your implementation to verify it passes all tests\n # return binary_search_iterative(array, item)\n return binary_search_recursive(array, item)", "def binary_search(alist, target):\n index = binary_search_iterative(alist, target)\n return index", "def binarySearch(A, k):\n \n #TODO: Implement without using python's in-built functiondef binary(A, k):\n def bSearch(A, k, low, high):\n if high == low:\n if A[low] == k:\n return low\n else:\n return -1\n mid = (low + high)//2\n if A[mid] == k:\n return mid\n elif A[mid] > k:\n if low == mid:\n return -1\n else:\n return bSearch(A, k, low, mid-1)\n else:\n return bSearch(A, k, mid+1, high)\n if isinstance(A, list) == False or isinstance(k, int) == False:\n return -1\n else:\n if len(A) == 0:\n return -1\n else:\n x = bSearch(A, k, 0, len(A)-1)\n return x", "def test_binary_search(self):\n A = sorted([1,4,6,7,8,2,3,4,5,7,10,15,25,30,35])\n self.assertTrue(mst.binary_search(A, 3))\n self.assertFalse(mst.binary_search(A, 17))", "def binary_search(data, target, low, high):\n if low > high:\n return -1\n else:\n mid = (low + high) // 2\n if target == data[mid]:\n return mid\n elif target < data[mid]:\n return binary_search(data, target, low, mid - 1)\n else:\n return binary_search(data, target, mid + 1, high)", "def test_binary_search(self):\n self.init_player(\n '2', 'The Lazy Magician', 'How does he do it?')\n self.submit_and_compare('Dont know', 'we should watch him first',\n 'town square')\n self.submit_and_compare(0, '', 'Is it')\n self.submit_and_compare(2, '', 'Do you want to play again?')\n # TODO(sll): Redo all of the following as a JS test, since the\n # backend can no longer parse expressions:\n #\n # self.submit_and_compare(1, '', 'how do you think he does it?')\n # self.submit_and_compare('middle',\n # 'he\\'s always picking a number in the middle',\n # 'what number the magician picked')\n # self.submit_and_compare(0, 'Exactly!', 'try it out')\n # self.submit_and_compare(10, '', 'best worst case')\n # self.submit_and_compare(\n # 0, '', 'to be sure our strategy works in all cases')\n # self.submit_and_compare(0, 'try to guess', '')", "def mybinsearch(lst: List[T], elem: S, compare: Callable[[T, S], int]) -> int:\n\n lower = 0\n upper = len(lst)-1\n mid = 0\n while lower <= upper:\n mid = int((lower + upper) / 2)\n if compare(lst[mid], elem) > 0: #meaning that elem is in the lower half\n upper = mid - 1 #makes new upper bound right below mid\n elif compare(lst[mid], elem) < 0: #meaning that elem is in the upper half\n lower = mid + 1 #makes lower bound right above mid\n else:\n while(not(mid == 0) and compare(lst[mid-1], elem) == 0):\n mid-= 1\n return mid\n return -1 #if the while loop runs and doesn't catch then we return -1", "def BinarySearch(A, target):\n return _BinarySearchAux(A, target, 0, len(A) - 1)", "def _bin_search_recursive(self, v, start, end):\n if end < start:\n return start\n\n mid = (start + end) / 2\n if self.values[mid] == v:\n return mid\n elif self.values[mid] < v:\n return self._bin_search_recursive(v, mid + 1, end)\n else:\n return self._bin_search_recursive(v, start, mid - 1)", "def bin_search2(A,x, low=0, hi=None):\n hi = hi if hi is not None else len(A)\n pos = bisect.bisect_left(A,x,low,hi)\n return pos", "def binary_search(elements, to_find, lo=0, hi=None):\n if hi is None:\n hi = len(elements)\n while lo < hi:\n mid = (lo+hi)//2\n midval = elements[mid]\n if midval < to_find:\n lo = mid+1\n elif midval > to_find: \n hi = mid\n else:\n return hi\n return hi", "def _binary_search(mylist, key, left, right):\n\t### TODO\n\tif left <= right:\n\n\t\tmidPoint = (left + right) // 2\n\t\tif mylist[midPoint] == key:\n\t\t\treturn midPoint\n\n\t\telif mylist[midPoint] > key:\n\t\t\treturn _binary_search(mylist, key, left, midPoint - 1)\n\n\t\telif mylist[midPoint] < key:\n\t\t\treturn _binary_search(mylist, key, midPoint + 1, right)\n\n\treturn -1", "def binary_search(lst, key):\n lst.sort()\n low = 0\n high = len(lst) - 1\n while low <= high:\n mid = low + (high - low) //2\n if lst[mid] == key:\n return mid\n if lst[mid] < key:\n low = mid + 1\n else:\n hihg = mid - 1\n return -low - 1", "def binarySearch(searchValue, array):\r\n first = 0\r\n last = len(array) - 1\r\n beenFound = False\r\n\t\r\n while first <= last and not beenFound:\r\n midpoint = (first + last)//2\r\n\r\n if array[midpoint] == searchValue:\r\n result = str(searchValue) + \" has been found\"\r\n beenFound = True\r\n \r\n\t \r\n else:\r\n if searchValue < array[midpoint]:\r\n last = midpoint-1\r\n else:\r\n first = midpoint+1\t\r\n return()", "def search_binary(xs, target):\n lb = 0\n ub = len(xs)\n while True:\n if lb == ub: # If region of interest (ROI) becomes empty\n return -1\n\n # Next probe should be in the middle of the ROI\n mid_index = (lb + ub) // 2\n\n # Fetch the item at that position\n item_at_mid = xs[mid_index]\n\n print(\"ROI[{0}:{1}](size={2}), probed='{3}', target='{4}'\".format(lb, ub, ub-lb, item_at_mid, target))\n\n # How does the probed item compare to the target?\n if item_at_mid == target:\n return mid_index # Found it!\n if item_at_mid < target:\n lb = mid_index + 1 # Use upper half of ROI next time\n else:\n ub = mid_index # Use lower half of ROI next time", "def bin_search(array, key):\n return bin_search_util(array, key, 0, len(array) - 1)", "def bin_search(arr, x):\n \n low = 0\n hi = len(arr) - 1\n \n while(low <= hi): \n \n mid = int((low + hi) / 2) # find middle idx\n\n if( x >= arr[mid]): # if x on the right, change low idx and search right side\n low = mid + 1; \n else: # else search left side\n hi = mid - 1\n\n return hi", "def binarySearch(lst, x, lo = 0, hi = None):\r\r\n comparisons = 0\r\r\n if hi is None:\r\r\n hi = len(lst)\r\r\n while lo < hi:\r\r\n comparisons += 1\r\r\n mid = (lo + hi)//2\r\r\n midval = lst[mid]\r\r\n if midval < x:\r\r\n lo = mid+1\r\r\n elif midval > x: \r\r\n hi = mid\r\r\n else:\r\r\n return (mid, comparisons)\r\r\n return (-1, comparisons)", "def binary_search_iterative(data, target):\n low = 0\n high = len(data) - 1\n while low <= high:\n mid = (low + high) // 2\n if target == data[mid]:\n return True\n elif target < data[mid]:\n high = mid - 1\n else:\n low = mid + 1\n return False", "def binsearch(alist, key, start, end):\n mid = len(alist) // 2\n if start > end:\n return None\n elif start < end:\n return binsearch(alist, key, start, mid-1)\n else:\n return mid", "def binary_search(a,x):\n start = 0 \n end = len(a)-1\n\n while start<= end:\n mid = (start+end)//2 \n if x == a[mid]:\n return mid\n elif x < a[mid]:\n end = mid-1\n else:\n start = mid+1\n return -1", "def binarySearch(nums, key): # find the most closer one but smaller then the key (better one)\n begin = 0\n end = len(nums)-1\n while end - begin > 1:\n mid = begin + (end - begin >> 1)\n if key <= nums[mid]:\n end = mid\n else:\n begin = mid\n if key > nums[end]:\n return end\n if nums[begin]< key <= nums [end]:\n return begin\n return -1", "def binary_search_tree():\n\n class Node(object):\n def __init__(self, key):\n self.left = None\n self.right = None\n self.key = key\n\n def insert(node, key):\n \"\"\" Insertion method for a binary search tree \"\"\"\n # If the tree is empty, return a new node\n if node is None:\n return Node(key)\n\n # Otherwise recur down the tree\n if key < node.key:\n node.left = insert(node.left, key)\n else:\n node.right = insert(node.right, key)\n\n # return the (unchanged) node pointer\n return node\n\n \"\"\" Let us create the following BST \n 50 \n / \\ \n 30 70 \n / \\ / \\ \n 20 40 60 80\n \"\"\"\n\n root = None\n root = insert(root, 50)\n root = insert(root, 30)\n root = insert(root, 20)\n root = insert(root, 40)\n root = insert(root, 70)\n root = insert(root, 60)\n root = insert(root, 80)", "def binarySearch(values: list, target: int) -> int:\n lower: int = 0\n upper: int = len(values) - 1\n\n while lower <= upper: \n mid: int = (lower + upper) // 2\n\n if target == values[mid]:\n return mid\n elif values[mid] > target:\n upper = mid - 1\n else:\n lower = mid + 1\n\n return -1", "def binary_search(input_array, value):\n \n array_length = len(input_array)\n \n #(\"array length:\", array_length)\n \n left = 0\n right = array_length-1\n \n while left <= right:\n \n mid = ( left + right ) // 2\n #print(\"mid, mid value: \", mid, input_array[mid])\n \n if input_array[ mid ] == value:\n return mid\n \n elif input_array[ mid ] < value:\n # midpoint value is smaller than target, then search right half\n left = mid + 1\n \n else:\n # midpoint value is larger than target, then search left half\n right = mid - 1\n \n \n \n return -1" ]
[ "0.7193458", "0.7162694", "0.7162694", "0.7162694", "0.7151702", "0.71070755", "0.7014812", "0.69561255", "0.6896984", "0.6866832", "0.6845102", "0.6837522", "0.683216", "0.68076146", "0.6786597", "0.67074275", "0.6704192", "0.66481316", "0.6644689", "0.66392684", "0.65938276", "0.65685713", "0.65539515", "0.6546561", "0.6539377", "0.6538168", "0.65344036", "0.6526833", "0.6523882", "0.65210724" ]
0.8576695
0
Provides a default method to compute the penalty incurred when two edges are of a same type or of different types.
def compute_penalty(edge_1, edge_2): if edge_1 == edge_2: return 0 elif {edge_1, edge_2} == {EdgeType.NONE, EdgeType.FORWARD}: return 1 elif {edge_1, edge_2} == {EdgeType.NONE, EdgeType.BACKWARD}: return 1 elif {edge_1, edge_2} == {EdgeType.NONE, EdgeType.UNDIRECTED}: return 1 elif {edge_1, edge_2} == {EdgeType.FORWARD, EdgeType.BACKWARD}: return 1 elif {edge_1, edge_2} == {EdgeType.FORWARD, EdgeType.UNDIRECTED}: return 1 elif {edge_1, edge_2} == {EdgeType.BACKWARD, EdgeType.UNDIRECTED}: return 1 else: raise ImpossibleEdgeConfiguration
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _by_weight_then_from_protocol_specificity(edge_1, edge_2):\n\n # edge_1 and edge_2 are edges, of the form (mro_distance, offer)\n\n mro_distance_1, offer_1 = edge_1\n mro_distance_2, offer_2 = edge_2\n\n # First, compare the MRO distance.\n if mro_distance_1 < mro_distance_2:\n return -1\n elif mro_distance_1 > mro_distance_2:\n return 1\n\n # The distance is equal, prefer more specific 'from_protocol's\n if offer_1.from_protocol is offer_2.from_protocol:\n return 0\n\n if issubclass(offer_1.from_protocol, offer_2.from_protocol):\n return -1\n elif issubclass(offer_2.from_protocol, offer_1.from_protocol):\n return 1\n\n return 0", "def edge_model(label1, label2):\n if label1 == label2:\n return ALPHA\n else:\n return 1-ALPHA", "def _compute_penalty(self):\n raise ValueError('Implement in a child class')", "def similarity(self, e1, e2):\n\t\tpass", "def unit_costs(node1, node2):\r\n # insertion cost\r\n if node1 is None:\r\n return 1\r\n \r\n # deletion cost\r\n if node2 is None:\r\n return 0\r\n \r\n # substitution cost\r\n if node1.label != node2.label:\r\n return 1\r\n else:\r\n return 0", "def astar_heuristic(n1, n2):\n average_speed = 70\n return edge_weight(n1, n2, 70)", "def penalty(self):\n return 0", "def cost(self,e1,e2):\n pass", "def get_edge_weight(self, vertex1, vertex2):\n if not self.is_weighted():\n print(\"WARNING: Graph is NOT weighted!\")\n return None\n if self.adjacent(vertex1, vertex2):\n return self._graph[vertex1][vertex2]", "def get_weight(self, node1, node2):\n if not self.is_connected(node1, node2):\n raise ValueError('There is no edge between the given nodes')\n else:\n return self._graph[node1][node2]", "def _svm_loss_penalty_dual(name):\n return hp.choice(name, [\n ('hinge', 'l2', True),\n ('squared_hinge', 'l2', True),\n ('squared_hinge', 'l1', False),\n ('squared_hinge', 'l2', False)\n ])", "def add_edge(self, val1, val2, weight=0):\n self.setdefault(val1, {})\n self.setdefault(val2, {})\n if val2 not in self[val1]:\n self[val1][val2] = weight", "def intersection_score(method1,method2):\n\tpass", "def if_any(self, other):\n return self.weighted_by_sum(other)", "def _constraints_other(self):\n pass", "def add_edge(self, n1, n2, weight):\n self.edges[n1.identifier][n2.identifier] = weight", "def add_edge(self, n1, n2, weight):\n self.edges[n1.identifier][n2.identifier] = weight\n self.edges[n2.identifier][n1.identifier] = weight", "def weight(self):", "def geo_dist_penalty(p_a, p_b): # created on Nov.3 2019\n\n # Offset is 0 for the 1st destination.\n distance_matrix = Network.dist_mat\n if p_a[0] != p_b[0] or p_a[-1] != p_b[-1]:\n raise ValueError('Paths have different o or d.')\n\n # define the penalty in utility form for every two destinations. u_ik stands for the generalized cost of travel\n o, d = p_a[0], p_a[-1]\n\n path_a, path_b = p_a[1:-1], p_b[1:-1] # excluding origin and destination\n\n path_node_check = []\n for _path in [path_a, path_b]:\n _new_path = []\n for node in _path:\n if node <= min(distance_matrix.shape) - 1:\n _new_path.append(node)\n path_node_check.append(_new_path)\n path_a, path_b = path_node_check[0], path_node_check[1]\n\n # utility (negative) penalty evaluation\n cost, a, b = 0, o, o # let a, b be origin\n\n # if exist empty path\n if not path_a: # if observed path is empty\n return cost\n\n while path_a and path_b:\n a, b = path_a.pop(0), path_b.pop(0) # a, b correspond to the i_th node in path_a, path_b\n cost += distance_matrix[a][b]\n\n if path_a: # length of path_a > path b\n while path_a:\n a = path_a.pop(0)\n cost += distance_matrix[a][b]\n else: # case when length of path_b > path a\n while path_b:\n b = path_b.pop(0)\n cost += distance_matrix[a][b]\n return cost", "def wedge(self, other):\n from sage.tensor.modules.free_module_alt_form import FreeModuleAltForm\n from sage.tensor.modules.format_utilities import is_atomic\n if self._domain.is_subdomain(other._domain):\n if not self._ambient_domain.is_subdomain(other._ambient_domain):\n raise TypeError(\"Incompatible ambient domains for exterior \" + \n \"product.\")\n elif other._domain.is_subdomain(self._domain):\n if not other._ambient_domain.is_subdomain(self._ambient_domain):\n raise TypeError(\"Incompatible ambient domains for exterior \" + \n \"product.\")\n dom_resu = self._domain.intersection(other._domain)\n ambient_dom_resu = self._ambient_domain.intersection(\n other._ambient_domain)\n self_r = self.restrict(dom_resu)\n other_r = other.restrict(dom_resu)\n if ambient_dom_resu.is_manifestly_parallelizable():\n # call of the FreeModuleAltForm version:\n return FreeModuleAltForm.wedge(self_r, other_r)\n # otherwise, the result is created here:\n if self._name is not None and other._name is not None:\n sname = self._name\n oname = other._name\n if not is_atomic(sname):\n sname = '(' + sname + ')'\n if not is_atomic(oname):\n oname = '(' + oname + ')'\n resu_name = sname + '/\\\\' + oname\n if self._latex_name is not None and other._latex_name is not None:\n slname = self._latex_name\n olname = other._latex_name\n if not is_atomic(slname):\n slname = '(' + slname + ')'\n if not is_atomic(olname):\n olname = '(' + olname + ')'\n resu_latex_name = slname + r'\\wedge ' + olname\n dest_map = self._vmodule._dest_map\n dest_map_resu = dest_map.restrict(dom_resu, \n subcodomain=ambient_dom_resu)\n vmodule = dom_resu.vector_field_module(dest_map=dest_map_resu)\n resu_degree = self._tensor_rank + other._tensor_rank\n resu = vmodule.alternating_form(resu_degree, name=resu_name, \n latex_name=resu_latex_name)\n for dom in self_r._restrictions:\n if dom in other_r._restrictions:\n resu._restrictions[dom] = self_r._restrictions[dom].wedge(\n other_r._restrictions[dom])\n return resu", "def weight(edge, modifiers):\r\n weight = edge.weight\r\n for modifier in modifiers:\r\n weight *= modifier.get_multiplier(edge)\r\n return weight", "def costSubstitution(el1,el2):\r\n if el1<el2:\r\n return 0\r\n else:\r\n return 5", "def add_edge(self, name1: Any, name2: Any, weight: float = 1.0) -> None:\n if name1 in self._vertices and name2 in self._vertices:\n v1 = self._vertices[name1]\n v2 = self._vertices[name2]\n\n # Add the new edge\n v1.neighbours[v2] = weight\n v2.neighbours[v1] = weight\n else:\n # We didn't find an existing vertex for both items.\n raise ValueError", "def compatibility(genome1, genome2, c1, c2, c3):\n excess = Genome.count_excess_genes(genome1, genome2)\n disjoint = Genome.count_disjoint_genes(genome1, genome2)\n weight_diff = Genome.average_weight_diff(genome1, genome2)\n return excess * c1 + disjoint * c2 + weight_diff * c3", "def calc_w_inference(g1, inf_g1, g2, inf_g2, consider_label):\n edges_g1 = np.count_nonzero(g1)\n edges_g2 = np.count_nonzero(g2)\n\n overlap_r1 = 0\n overlap_r2 = 0\n n_nodes = len(g1)\n for i in range(n_nodes):\n for j in range(n_nodes):\n if consider_label:\n if (g1[i][j] != NO_REL_SYMBOL and inf_g2[i][j]!= NO_REL_SYMBOL) and (g1[i][j] == inf_g2[i][j]):\n overlap_r1 += 1 # how much g1 recalls \"populated\"-g2\n if (inf_g1[i][j] != NO_REL_SYMBOL and g2[i][j]!= NO_REL_SYMBOL) and (inf_g1[i][j] == g2[i][j]):\n overlap_r2 += 1 # how much g2 recalls \"populated\"-g2\n else:\n if (g1[i][j] != NO_REL_SYMBOL and inf_g2[i][j]!= NO_REL_SYMBOL):\n overlap_r1 += 1\n if (inf_g1[i][j] != NO_REL_SYMBOL and g2[i][j]!= NO_REL_SYMBOL):\n overlap_r2 += 1\n\n r1 = float(overlap_r1) / float(edges_g1)\n r2 = float(overlap_r2) / float(edges_g2)\n return (r1 + r2) / float(2)", "def __gt__(self, other):\n return self.weight() > other.weight()", "def test_PoissonRegression_penalty_elastic_net_ratio(self):\n ratio_1 = 0.6\n ratio_2 = 0.3\n for penalty in PoissonRegression._penalties.keys():\n if penalty == 'elasticnet':\n learner = PoissonRegression(penalty=penalty, C=self.float_1,\n elastic_net_ratio=ratio_1)\n self.assertEqual(learner.C, self.float_1)\n self.assertEqual(learner.elastic_net_ratio, ratio_1)\n self.assertEqual(learner._prox_obj.strength, 1. / self.float_1)\n self.assertEqual(learner._prox_obj.ratio, ratio_1)\n\n learner.elastic_net_ratio = ratio_2\n self.assertEqual(learner.C, self.float_1)\n self.assertEqual(learner.elastic_net_ratio, ratio_2)\n self.assertEqual(learner._prox_obj.ratio, ratio_2)\n\n else:\n msg = '^Penalty \"%s\" has no elastic_net_ratio attribute$$' % \\\n penalty\n with self.assertWarnsRegex(RuntimeWarning, msg):\n if penalty == 'binarsity':\n PoissonRegression(penalty=penalty,\n elastic_net_ratio=0.8,\n blocks_start=[0], blocks_length=[1])\n else:\n PoissonRegression(penalty=penalty,\n elastic_net_ratio=0.8)\n\n if penalty == 'binarsity':\n learner = PoissonRegression(\n penalty=penalty, blocks_start=[0], blocks_length=[1])\n else:\n learner = PoissonRegression(penalty=penalty)\n\n with self.assertWarnsRegex(RuntimeWarning, msg):\n learner.elastic_net_ratio = ratio_1", "def structural_hamming_distance(self,\n other,\n penalty_edge_mismatch_func=None):\n\n edges_1 = self.edges\n edges_2 = other.edges\n if penalty_edge_mismatch_func is None:\n penalty_edge_mismatch_func = GraphViaEdges.compute_penalty\n\n if set(edges_1.keys()) != set(edges_2.keys()):\n msg = 'The Structural Hamming Distances cannot be computed : the '\n msg += 'graphs cannot be compared.'\n raise GraphsCannotBeCompared(msg)\n\n shd = 0\n\n for key in edges_1.keys():\n\n shd += penalty_edge_mismatch_func(\n edge_1=edges_1[key],\n edge_2=edges_2[key]\n )\n\n return shd", "def add_edge(self, v1, v2):\n pass # TODO", "def compare(self, left, right):\n if left.lower() == right.lower():\n return 1.0 * self.specificity(left)\n return 0.0" ]
[ "0.6627848", "0.58065695", "0.5716301", "0.54881287", "0.54837036", "0.54098004", "0.5400932", "0.53619206", "0.535488", "0.532785", "0.5312015", "0.5289913", "0.5235407", "0.5224458", "0.5208916", "0.5180457", "0.51760596", "0.5172492", "0.51664996", "0.5129541", "0.51100844", "0.50969094", "0.5079021", "0.50474787", "0.50430816", "0.5020781", "0.5011972", "0.5009425", "0.50085074", "0.5002767" ]
0.732774
0
Computes the Structural Hamming Distance between two graphs. By default it is equal to the number of edges in the graphs that are not of the same type. A different weighted scheme for penalty computation may be provided (we may want to penalise the presence of an edge in the opposite direction more than the absence of an edge, for example).
def structural_hamming_distance(self, other, penalty_edge_mismatch_func=None): edges_1 = self.edges edges_2 = other.edges if penalty_edge_mismatch_func is None: penalty_edge_mismatch_func = GraphViaEdges.compute_penalty if set(edges_1.keys()) != set(edges_2.keys()): msg = 'The Structural Hamming Distances cannot be computed : the ' msg += 'graphs cannot be compared.' raise GraphsCannotBeCompared(msg) shd = 0 for key in edges_1.keys(): shd += penalty_edge_mismatch_func( edge_1=edges_1[key], edge_2=edges_2[key] ) return shd
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hamming_dist(s1, s2):\n\n if s1 is None or s2 is None:\n return np.NaN\n if pd.isnull(s1) or pd.isnull(s2):\n return np.NaN\n\n # Create the similarity measure object\n measure = sm.HammingDistance()\n\n s1 = gh.convert_to_str_unicode(s1)\n s2 = gh.convert_to_str_unicode(s2)\n\n\n # Call the function to compute the distance\n return measure.get_raw_score(s1, s2)", "def hamming_distance(s1, s2):\n assert(len(s1) == len(s2))\n return np.sum([1 if c1 != c2 else 0 for c1, c2 in zip(s1, s2)])", "def hamming_dist(self):\r\n distance = 0\r\n distance = abs(len(self.s1) - len(self.s2))\r\n distance += sum(i1 != i2 for i1,i2 in zip(self.s2,self.s1))\r\n return distance", "def hamming(s1, s2):\n weight = abs(len(s1)-len(s2))\n if len(s1) < len(s2):\n s1, s2 = s2, s1\n for i in range(len(s2)):\n weight += not s1[i] == s2[i]\n return weight", "def hamming_distance(h1, h2):\n b1 = bitarray.bitarray()\n b1.frombytes(h1)\n b2 = bitarray.bitarray()\n b2.frombytes(h2)\n return bitarray.bitdiff(b1, b2)", "def hamming_distance(a, b):\n assert len(a) == len(b)\n dist = sum(item_a != item_b for item_a, item_b in zip(a, b))\n return dist", "def hamming_dist(a_b, b_b):\n return sum(bin(a_b[n] ^ b_b[n]).count('1') for n in range(len(a_b)))", "def hamming_distance(lhs,rhs):\n return len([(x,y) for x,y in zip(lhs,rhs) if x !=y])", "def hamming_sim(s1, s2):\n\n if s1 is None or s2 is None:\n return np.NaN\n if pd.isnull(s1) or pd.isnull(s2):\n return np.NaN\n\n # Create the similarity measure object\n measure = sm.HammingDistance()\n\n s1 = gh.convert_to_str_unicode(s1)\n s2 = gh.convert_to_str_unicode(s2)\n\n # Call the function to compute the similarity score.\n return measure.get_sim_score(s1, s2)", "def hamming_distance(s1, s2):\n return sum(c1 != c2 for c1, c2 in zip(s1, s2))", "def hamming_dist(seq1, seq2):\n diffs = 0\n length = 0\n for x, y in zip(str(seq1), str(seq2)):\n if x == '-' or y == '-':\n continue\n elif x != y:\n diffs += 1\n length += 1\n try:\n return float(diffs) / length\n except:\n return 0.5", "def hamming_dist(v1, v2):\r\n edits = (v1 != v2)\r\n return edits.sum()", "def hamming_distance(x1: np.ndarray, x2: np.ndarray) -> int:\n assert isinstance(x1, np.ndarray) and isinstance(x2, np.ndarray)\n return (x1 != x2).sum()", "def hamming_distance(a, b):\n return np.count_nonzero(a != b)", "def generalised_hamming_distance(a, b):\n if len(a) == len(b):\n return hamming_distance(a, b)\n if len(a) > len(b):\n dna = a\n kmer = b\n else:\n dna = b\n kmer = a\n k = len(kmer)\n\n dist = min([hamming_distance(kmer, kmer2) for kmer2 in kmers_from_dna(dna, k)])\n return dist", "def hamming_distance(s1, s2):\n if len(s1) > len(s2):\n s2 = s2.ljust(len(s1))\n else:\n s1 = s1.ljust(len(s2))\n\n return sum(el1 != el2 for el1, el2 in zip(s1, s2))", "def hamming_distance(s1, s2):\n if len(s1) != len(s2):\n raise ValueError(\"Undefined for sequences of unequal lenght.\")\n return sum(ch1 != ch2 for ch1, ch2 in zip(s1, s2))", "def CompareGraphsSpectrum(graph1, graph2):\n laplacian1 = nx.spectrum.laplacian_spectrum(graph1)\n laplacian2 = nx.spectrum.laplacian_spectrum(graph2)\n k1 = select_k(laplacian1)\n k2 = select_k(laplacian2)\n # take the fewer dimensions to describe the result\n k = min(k1, k2)\n # the similarity is the sum of the eukleidian distance of the most\n # important nodes\n similarity = sum((laplacian1[:k] - laplacian2[:k])**2)\n return similarity", "def hamming_dist(bytes1, bytes2):\n if type(bytes1) == str:\n bytes1 = [ord(c) for c in str1]\n if type(bytes2) == str:\n bytes2 = [ord(c) for c in str2]\n bins = [bin(o1 ^ o2) for o1, o2 in zip(bytes1, bytes2)]\n return len([i for i in ''.join(bins) if i == '1'])", "def hamming_distance(s1, s2):\n assert len(s1)==len(s2), \",\".join((s1, s2))\n s1 = np.array(s1.upper(), dtype=\"c\")\n s2 = np.array(s2.upper(), dtype=\"c\")\n return np.sum(s1 != s2)", "def hamming_distance(input1, input2):\n if len(input1) != len(input2):\n raise ValueError('Length of input1 and input2 are not equal.')\n input1 = hex_decode(hex_encode(input1))\n input2 = hex_decode(hex_encode(input2))\n # the general strategy here is to xor the two strings together\n # and then just count the number of 1s in the output (i.e., where the\n # two strings differed).\n output = fixed_xor(input1, input2)\n distance = 0\n for byte in output:\n for i in range(8):\n bit_mask = 1 << i\n if (bit_mask & byte) == bit_mask:\n distance += 1\n return distance", "def __hamming_distance(s1, s2):\n if len(s1) != len(s2):\n raise ValueError(\"Undefined for sequences of unequal length\")\n return sum(el1 != el2 for el1, el2 in zip(s1, s2))", "def hamming_distance(bytes_0: bytes, bytes_1: bytes) -> int:\n assert len(bytes_0) == len(bytes_1)\n return sum(sum(bits(byte_0 ^ byte_1)) for (byte_0, byte_1) in zip(bytes_0, bytes_1))", "def hellSimilarity(topicDict1,topicDict2):\n K = len(topicDict1)\n hellDis = 0\n for key in topicDict1.keys():\n if key not in topicDict2:\n print '%d is not in another dict...' % key\n return\n else:\n if topicDict1[key] < 0:\n topicDict1[key] = 1.0 / 10000000\n if topicDict2[key] < 0:\n topicDict2[key] = 1.0 / 10000000\n hellDis += (math.sqrt(topicDict1[key]) - math.sqrt(topicDict2[key]))**2\n hellDis = math.sqrt(hellDis)\n #distance\n hellDis = hellDis * (1.0/math.sqrt(2))\n if hellDis == 0:\n hellDis = 1.0 / 10000000\n #similarity\n hellSimilarity = 1.0 / hellDis\n return hellSimilarity", "def hamming_distance(str1, str2):\n\n # TODO: Write your solution here\n # Edge case check\n if len(str1) != len(str2):\n return None\n\n count = 0\n for index in range(len(str1)):\n if str1[index] != str2[index]:\n count += 1\n\n if count is 0:\n return None\n\n return count", "def hamming_dist(gene_1, gene_2):\n ham_dist = 0\n for c1, c2 in zip(gene_1, gene_2):\n if c1 != c2:\n ham_dist += 1\n return ham_dist", "def graph_dist(i1: int, g1: nx.Graph, i2: int, g2: nx.Graph) -> t.Tuple[int, int, float]:\n space1, space2 = map(dict, map(mut_space, [g1, g2]))\n d = 0\n for k in set(list(space1) + list(space2)):\n if k in space1 and k in space2:\n d += len(set(space1[k]).symmetric_difference(set(space2[k])))\n continue\n if k in space1:\n d += len(set(space1[k]))\n if k in space2:\n d += len(set(space2[k]))\n return i1, i2, d", "def compute_hamming_distance(str1, str2):\n\n mismatches = 0\n len_strs = len(str1)\n for i in range(len_strs):\n if str1[i] != str2[i]:\n mismatches = mismatches + 1\n return mismatches", "def hammingDistance(s1 = \"\", s2 = \"\"):\n # if len(s1) != len(s2):\n # raise ValueError(\"Undefined for sequences of unequal length\")\n return sum(bool(ord(ch1) - ord(ch2)) for ch1, ch2 in zip(s1, s2))", "def HammingDistance(pattern1, pattern2):\n distance = 0\n if len(pattern1) == len(pattern2):\n for i in range(len(pattern1)):\n if pattern1[i]!=pattern2[i]:\n distance += 1\n return distance\n else:\n assert 0, \"Two patterns have different lengths.\"" ]
[ "0.7030229", "0.67276067", "0.6668017", "0.6613405", "0.65800333", "0.65618753", "0.6504339", "0.6503381", "0.6501891", "0.6434182", "0.6396513", "0.63164246", "0.6268241", "0.62652636", "0.62545633", "0.6210348", "0.62090605", "0.6204939", "0.6188579", "0.61692894", "0.6163425", "0.61481655", "0.60932803", "0.608113", "0.6029697", "0.598682", "0.5937193", "0.59241265", "0.591962", "0.590258" ]
0.82934976
0
Checks whether the object is equal to another GraphViaEdges object. Two GraphViaEdges objects are equal if they have the same edges and the same names.
def __eq__(self, other): if self.edges != other.edges: return False if self.name != other.name: return False return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __eq__(self, other: Vertex) -> bool:\n if isinstance(other, self.__class__):\n return self.id == other.id and self.edges == other.edges\n return False", "def __eq__(self, other):\n if not type(other) == type(self):\n return False\n sedges, oedges = self.edges, other.edges\n return ((len(sedges) == len(oedges)) and\n all(numpy.all(se == oe) for (se, oe) in zip(sedges, oedges)))", "def __eq__(self, other):\n if isinstance(other, type(self)):\n same_edges = self._edges == other._edges\n same_weights = self._weights == other._weights\n return same_edges and same_weights\n else:\n return False", "def __eq__(self, other) -> bool:\n if not isinstance(other, self.__class__):\n return False\n\n if self.number_of_nodes() != other.number_of_nodes():\n return False\n if self.number_of_edges() != other.number_of_edges():\n return False\n\n if list(self.nodes) != list(other.nodes):\n return False\n\n # Compare node data.\n for i in self.nodes:\n # We may want to exclude the 'name' attribute from comparisons, assuming\n # it has no logical meaning.\n if self.nodes[i] != other.nodes[i]:\n return False\n\n if list(self.edges) != list(other.edges):\n return False\n\n for i, j in self.edges:\n # Compare edge data.\n if self.edges[i, j] != other.edges[i, j]:\n return False\n\n return True", "def __eq__(self, other_vertex):\n return self.name == other_vertex.name and self.state == other_vertex.state", "def __eq__(self, other):\n if not isinstance(other, Model):\n return False\n return self.graph == other.graph", "def __eq__(self, other):\n return (self.vertices == other.vertices and self.weight == other.weight)", "def __eq__(self, other):\n # check equality of names since names are unique identifiers of nodes\n return self.name.__eq__(other.get_name())", "def __eq__(self, other):\n # check equality of names since names are unique identifiers of nodes\n return self.name.__eq__(other.get_name())", "def __eq__(self, other):\n # check equality of names and attributes as well as that of the incident Node objects\n return \\\n self.weight == other.get_weight() and \\\n self.attributes.__eq__(other.get_attributes()) and \\\n self.get_incident_nodes().__eq__(other.get_incident_nodes())", "def __eq__(self, other):\n if isinstance(other, UnDirectedWeightedGraphEdge):\n if self.head_vertex != other.head_vertex:\n return False\n elif self.tail_vertex != other.tail_vertex:\n return False\n elif self.weight != other.weight:\n return False\n return True\n return NotImplemented", "def __eq__(self, other):\n if isinstance(other, DirectedWeightedGraphEdge):\n if self.head_vertex != other.head_vertex:\n return False\n elif self.tail_vertex != other.tail_vertex:\n return False\n elif self.weight != other.weight:\n return False\n return True\n return NotImplemented", "def __eq__(self, other):\n if isinstance(other, Edge):\n return self.label == other.label\n return NotImplemented", "def __eq__(self, other):\n if isinstance(other, GraphEdge):\n return self.head_vertex == other.head_vertex and self.tail_vertex == other.tail_vertex\n return NotImplemented", "def __eq__(self, other) -> bool:\n if other is None or not isinstance(other, Graph):\n name = other.name if other else None\n print(f'{name} is not a Graph object.')\n return False\n\n def match(op1: Operator, op2: Operator) -> bool:\n if not op1.equals(op2):\n print(f'{op1.name} is different.')\n return False\n\n # check input nodes and further\n for i1, i2 in zip(op1.input_ops.values(), op2.input_ops.values()):\n if not match(i1, i2):\n return False\n return True\n\n for o1, o2 in zip(self.get_outputs(), other.get_outputs()):\n if not match(o1, o2):\n return False\n return True", "def __eq__(self, other):\n if isinstance(other, DirectedGraphEdge):\n return self.head_vertex == other.head_vertex and self.tail_vertex == other.tail_vertex\n return NotImplemented", "def __eq__(self, other):\n\n return (self.nodes[0].id == other.nodes[0].id) & \\\n (self.nodes[1].id == other.nodes[1].id) & \\\n (self.name == other.name)", "def __eq__(self, other):\r\n return self.__name == other.__name", "def __eq__(self, other):\r\n return self.__name == other.__name", "def __eq__(self, other):\n return self.name == other.name", "def __eq__(self, other):\n if isinstance(other, six.string_types):\n return other == self.name\n elif isinstance(other, type(self)):\n return self.arrow_dtype == other.arrow_dtype\n else:\n return False", "def __eq__(self, other):\r\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__" ]
[ "0.7573153", "0.73986137", "0.72373873", "0.7203145", "0.7197092", "0.7126384", "0.70328027", "0.69937086", "0.69937086", "0.6891229", "0.6870849", "0.6869632", "0.68648255", "0.6817948", "0.68070734", "0.6747166", "0.66999537", "0.65927106", "0.65927106", "0.65757036", "0.65371335", "0.6508962", "0.650843", "0.650843", "0.650843", "0.650843", "0.650843", "0.650843", "0.650843", "0.650843" ]
0.79617494
0
Se randomiza un numero (se usa para definir a que elemnto llamar de la lista 'posibilidades_de_movimiento')
def mover_aleatoriamente(self): self.randomizador = random.randint(0,4)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mover_rectilineamente(self):\n if self.randomizador < 4:\n self.posibilidades_de_movimiento[self.randomizador]()", "def randVacantPoint(L):\n pliste = vacantPoint(L)\n\n return pliste[random.randint(0, len(pliste)-1)]", "def randomize(self, pos):\n random_value = random.randint(1, 9)\n if self.board.valid(pos, random_value):\n self.board.update_board(pos, random_value)\n else:\n self.randomize(pos)", "def random_number():\n return random.randint(0, 9999)", "def random_move(self):\n available_idx = self.get_empty_cells(self.game_board)\n return random.choice(available_idx)", "def random_number():\n random_num = random.choice(empty)\n return random_num", "def loto() -> List[int]:\n numeros = []\n nbre_valeurs = 6\n val_min = 1\n val_max = 49\n\n nbre_elements = 0\n while nbre_elements <= nbre_valeurs:\n numero = random.randint(val_min, val_max)\n if numero not in numeros:\n numeros.append(numero)\n nbre_elements += 1\n\n return numeros", "def add_number(self):\n # take one of the free positions in the grid at random\n x, y = random.choice(self.free_positions)\n # with the probability of Game.proba_four, put a 4 in the box. Else\n # put a 2\n if random.random() < Game.proba_four:\n self.grid[x][y] = 4\n else:\n self.grid[x][y] = 2", "def getRandom(self) -> int:\n return random.choice(self.elements)", "def random_legal_move():\n return random.choice(legal_moves())", "def randomMove(self, game):\n #time.sleep(0.25)\n return random.choice(game.get_all_legal_moves())", "def get_random_move(self, valid_moves):\n return random.choice(valid_moves)", "def move_random(self, board):\n self.get_moves(board.board)\n return random.choice(self.available_moves)", "def addNbr (self) :\n #we pick out the random number : 2 or 4\n if random.randint(1,10) == 1:\n randomNbr = 4\n else :\n randomNbr = 2\n\n #we pick a random position for the number\n emptyCounter = 0\n for k in range (4) :\n for i in range (4) :\n if self.grid[k,i] == 0 :\n emptyCounter += 1\n\n randomPosition = random.randint(0,emptyCounter-1)\n counter = 0\n for k in range (4) :\n for i in range (4) :\n if self.grid[k,i] == 0 :\n if (counter == randomPosition) :\n self.grid[k,i] = randomNbr\n return #we leave the function\n counter += 1", "def remove_randomico(lista, qtd_remocao):\n for i in range(qtd_remocao):\n lista.pop(random.randrange(len(lista))) \n return lista", "def move_random(self, board: Board) -> None:\n rnd_move_idx = randint(0,4)\n # moves: stay, up, left, right, down\n moves = [[0,0], [0,-1], [-1,0], [1,0], [0,1]]\n\n if board.can_position_at(self.x + moves[rnd_move_idx][0], self.y + moves[rnd_move_idx][1]):\n board.set_element_at_position(0, self.x, self.y)\n self.x += moves[rnd_move_idx][0]\n self.y += moves[rnd_move_idx][1]\n board.set_element_at_position(3, self.x, self.y)\n print(\"Bomberman moved to [\", self.x, \",\", self.y, \"]\")", "def randomize_value(self) -> None:", "def random(self, n=1):\n # self.num_generated += n", "def random_position():\n pos = np.random.randn(3)\n pos[2] = 0\n return pos", "def make_move(self, board: Board) -> int:\n return random.choice(board.get_valid_moves())", "def getRandom(self) -> int:\n return random.choice(self.list)", "def getRandom(self) -> int:\n return random.choice(self.list)", "def getRandom(self) -> int:\n return random.choice(self.list)", "def getRandom(self) -> int:\n return random.choice(self.list)", "def make_random_move(self):\n #completely random move\n all_moves = set(itertools.product(range(self.height), range(self.width)))\n moves_left = list(all_moves - self.mines - self.moves_made)\n if not moves_left:\n return None\n return random.choice(moves_left)", "def shuffle_pos(self, ):\n x, y = 0, 0\n while self.maze.structure[int(y / 40)][int(x / 40)] != \"0\" \\\n or (x, y) in self.forbiden_tulpes:\n x = random.randint(0, 14) * sprite_size\n y = random.randint(0, 14) * sprite_size\n self.forbiden_tulpes.append((x, y))\n return x, y", "def random_pos(self, ):\n self.pos_item['needle'] = self.shuffle_pos()\n self.pos_item['ether'] = self.shuffle_pos()\n self.pos_item['tube'] = self.shuffle_pos()", "def girar_aleatorio(self):\n for i in range(len(self.ativo_horas)):\n self.ativo_horas[i] = randint(0, 1)", "def random(self):\r\n return random.randint(1, 4)", "def getRandom(self) -> int:" ]
[ "0.6768249", "0.6449843", "0.63939553", "0.6114297", "0.6083678", "0.6059127", "0.5929723", "0.5912634", "0.59067637", "0.5902121", "0.58631676", "0.5811645", "0.58034986", "0.57963586", "0.5784861", "0.57704055", "0.5750152", "0.574356", "0.5739486", "0.5729298", "0.57202774", "0.57202774", "0.57202774", "0.57202774", "0.5701258", "0.570027", "0.5691367", "0.5686491", "0.56812227", "0.56611985" ]
0.6709772
1
El WhiteWalker en base a los resultados que le pidio al mapa se mueve o no a la derecha
def mover_bm_derecha(self): self.nueva_posicion_posible_parte_superior = self.mapa.consultar_casilla_por_movimiento([self.casilla[0] + 1, self.casilla[1]], [self.vertice_2[0] + self.velocidad , self.vertice_2[1]], [self.vertice_1[0] + 5, self.vertice_1[1]]) self.nueva_posicion_posible_parte_inferior = self.mapa.consultar_casilla_por_movimiento([self.casilla[0] + 1, self.casilla[1] + 1], [self.vertice_4[0] + self.velocidad, self.vertice_4[1]], self.vertice_1) if self.nueva_posicion_posible_parte_superior[0] != 1 and self.nueva_posicion_posible_parte_inferior[0] != 1: self.x += self.velocidad * (self.x <= 655) self.posicion = [self.x,self.posicion[1]] self.casilla = [self.casilla[0] + self.nueva_posicion_posible_parte_superior[1], self.casilla[1]] self.redefinir_vertices()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def encontrarMejor(padre, hijos, dist):\n\n #el valor minimo de la distancia\n minimo = dist[hijos[0].getId()][padre]\n #aqui almacenaremos la posicion del mejor hijo en el arreglo hijos[]\n mejor = 0\n #se van a recorrer todos los hijos que posee el padre\n for i in range(1, len(hijos)):\n #si la distancia de el hijo actual es menor a la menor distancia que hemos encontrado, ese hijo sera nuestro nuevo mejor hijo\n if dist[hijos[i].getId()][padre] < minimo: \n minimo = dist[hijos[i].getId()][padre]\n mejor = i\n #ahora tenemos que retornar el nodo con el mejor hijo\n return hijos[mejor]", "def ia_reflexion(data_ia, data_map):\n ia = data_ia['ia_id']\n enemy = data_ia['enemy_id']\n commands = {}\n\n new_positions = []\n moved_units = []\n\n for ia_unit in data_ia[ia]:\n unit_has_attacked = False\n unit_targets = []\n\n for enemy_unit in data_ia[enemy]:\n # Find each possible target for the Dwarves.\n if data_ia[ia][ia_unit][0] == 'D':\n if (ia_unit[0] - 1) <= enemy_unit[0] <= (ia_unit[0] + 1) and (ia_unit[1] - 1) <= enemy_unit[1] <= (ia_unit[1] + 1):\n # Add the unit to the target list.\n unit_targets.append(enemy_unit)\n\n # Find each possible target for the Elves - ATTACK\n else:\n for i in range(2):\n if (ia_unit[0] - (1 + i)) <= enemy_unit[0] <= (ia_unit[0] + (1 + i)) and (ia_unit[1] - (1 + i)) <= enemy_unit[1] <= (ia_unit[1] + (1 + i)):\n # Add the unit to the target list.\n unit_targets.append(enemy_unit)\n\n # Find the weakest units.\n if unit_targets:\n target = unit_targets[0]\n for enemy_unit in unit_targets:\n if data_ia[enemy][enemy_unit][0] == 'D' or data_ia[enemy][enemy_unit][1] < data_ia[enemy][target][1]:\n target = enemy_unit\n\n # Write the attack.\n commands[data_ia[ia][ia_unit][2]] = [ia_unit, ' -a-> ', target]\n unit_has_attacked = True\n\n # Find the weakest of all enemy's units - MOVE\n if not unit_has_attacked:\n target_list = data_ia[enemy].keys()\n target = target_list[0]\n\n for enemy_unit in data_ia[enemy]:\n if data_ia[enemy][enemy_unit][0] == 'D' or data_ia[enemy][enemy_unit][1] < data_ia[enemy][target][1]:\n target = enemy_unit\n\n target_cell = [ia_unit[0], ia_unit[1]]\n # Move on Y axis\n if target and abs(ia_unit[1] - target[1]) > abs(ia_unit[0] - target[0]) and 1 <= ia_unit[0] <= data_map['map_size'] and 1 <= ia_unit[1] <= data_map['map_size']:\n if ia_unit[1] > target[1]:\n target_cell[1] -= 1\n else:\n target_cell[1] += 1\n # Move on X axis\n elif target and 1 <= ia_unit[0] <= data_map['map_size'] and 1 <= ia_unit[1] <= data_map['map_size']:\n if ia_unit[0] > target[0]:\n target_cell[0] -= 1\n else:\n target_cell[0] += 1\n\n new_target = False\n # Check if he can move on the targeted position.\n enemy_positions = data_ia[enemy].keys()\n ia_positions = data_ia[ia].keys()\n for units in moved_units:\n del ia_positions[ia_positions.index(units)]\n\n # If the units can't move, find another free cell.\n if target_cell in (new_positions or enemy_positions or ia_positions):\n new_target_cells = []\n for line in range(target_cell[0] - 1, target_cell[0] + 2):\n for column in range(target_cell[1] - 1, target_cell[1] + 2):\n\n # Append the possible free cell to the list.\n if (line, column) not in (new_positions or enemy_positions or ia_positions):\n new_target_cells.append((line, column))\n\n # Choose the nearest free cell.\n if new_target_cells:\n new_target = new_target_cells[0]\n for cell in new_target_cells:\n if abs(ia_unit[0] - cell[0]) + abs(ia_unit[1] - cell[1]) < abs(ia_unit[0] - new_target[0]) + abs(ia_unit[1] - new_target[1]):\n new_target = new_target_cells[new_target_cells.index(cell)]\n\n # Save the new target in the correct variable.\n if new_target:\n target_cell = new_target\n\n # Write the move\n if target_cell != ia_unit:\n commands[data_ia[ia][ia_unit][2]] = [ia_unit, ' -m-> ', target_cell]\n new_positions.append(target_cell)\n moved_units.append(ia_unit)\n\n return commands", "def intercambiar(mapa, mapa2):\n for e in mapa.bloqueadas:\n mapa2.bloqueadas.append(e)", "def pingjiazhibiao(result):\n import math\n list_ed_normal = []\n list_es_normal = []\n list_ed_true = []\n list_es_true = []\n # these definations are for statistic\n ed_pred_all, es_pred_all,ed_true_all,es_true_all,ed_match,es_match,ed_normal,es_normal,ed_nomiss,es_nomiss= 0,0,0,0,0,0,0,0,0,0\n total_error_ed,total_error_es = 0,0\n sample_missimg_num = 0\n a4cdDict = {}\n a4csDict = {}\n for i in range(-5,7):\n a4cdDict[i] = 0\n a4csDict[i] = 0\n for i in result:\n pred = i[0]\n ed_pred = pred[0]\n es_pred = pred[1]\n if ed_pred == [] or es_pred == []:\n sample_missimg_num += 1\n true = i[1]\n ed_true = true[0]\n es_true = true[1]\n\n # avoid many to one\n ed_pred.sort()\n es_pred.sort()\n deleteAmong10frames(ed_pred)\n deleteAmong10frames(es_pred)\n \n for j in ed_pred:\n ed_pred_all += 1\n for t in ed_true:\n if math.fabs(j - t) < 6:\n ed_normal += 1\n total_error_ed += math.fabs(t - j)\n a4cdDict[j-t]+=1\n break\n # all - normal = FP\n # normal is TP\n a4cdDict[6] = ed_pred_all-ed_normal\n\n for j in es_pred:\n es_pred_all += 1\n for t in es_true:\n if math.fabs(j - t) < 6:\n es_normal += 1\n total_error_es += math.fabs(t - j)\n a4csDict[j-t]+=1\n break\n a4csDict[6] = es_pred_all-es_normal\n for j in ed_true:\n ed_true_all += 1\n for t in ed_pred:\n if math.fabs(t - j) < 6:\n ed_nomiss += 1\n break\n\n for j in es_true:\n es_true_all += 1\n for t in es_pred:\n if math.fabs(t - j) < 6:\n es_nomiss += 1\n break\n # aFD precision recall \n ed_result = total_error_ed / ed_normal,(ed_normal / ed_pred_all),(ed_nomiss / ed_true_all)\n es_result = total_error_es / es_normal,(es_normal / es_pred_all),(es_nomiss / es_true_all)\n return ed_result,a4cdDict, es_result,a4csDict, sample_missimg_num / len(result)", "def ludnosc(lista):\n # wynik - lista zawierajaca wynik koncowy dzialania funkcji(lata i wartosci dla poszczegolnych panstw)\n wynik = []\n for panstwo in lista:\n # rok - lista zawierajaca lata\n # wartosc - lista zawierajaca wartosci dla lat\n rok = []\n wartosc = []\n for element in panstwo:\n # sprawdzenie czy klucz posiada odpowiednia wartosc\n if element[1].get('key') == \"EN.POP.DNST\":\n # dodanie roku do listy\n rok.append(int(element[2].text))\n # rozpatrywanie przypadku w ktorym wartosc jest None\n if element[3].text is None:\n wartosc.append(element[3].text)\n else:\n wartosc.append(float(element[3].text))\n # dodawanie list dla poszczegolnych panstw do listy wynikowej\n wynik.append(rok)\n wynik.append(wartosc)\n\n return wynik", "def decision(grid):\n child = Maximize((grid,0),-999999999,999999999)[0]\n Child = child.map\n g = grid.clone()\n for M in range(4):\n if g.move(M):\n if g.map == Child:\n # global prune\n # global pruneLog\n # pruneLog.append(prune)\n # print(prune)\n # print(sum(pruneLog)/len(pruneLog))\n return M\n g = grid.clone()", "def preencherJogadores():\n global jogadores\n for x in participantes:\n if x['porta'] != lider['porta']:\n jogadores.append(x)", "def verif_site_catal(serine,d,x=15):\n\n w=dict()\n #dans un premier temp, on recherche tous les carbones alpha dans un rayon de x angstrom\n #a partir de la serine utilise comme reference\n cmd.select(\"selection_pour_site\",\"name ca within \"+str(x)+\" of (resi \"+str(serine[0])+\" and chain \"+serine[1]+\" and name ca)\")\n stored.list=list()\n cmd.iterate(\"selection_pour_site\",\"stored.list.append((resi,chain,resn))\")\n #print \"liste genere par pymol\"#debug\n #print stored.list #debug\n \n \n #on recherche dans un deuxieme temps s'il existe une histidine dans cette selection\n his,w[\"his\"]=site_utils.verif_histidine(stored.list,d)\n \n #dans un troisieme temps on recherche un aspartate ou un glutamate idealement place \n acide,w[\"acide\"]=site_utils.verif_acide(stored.list,d)\n \n w[\"dist\"]=x\n \n cmd.delete(\"selection_pour_site\")\n return [his,acide,w]", "def search_clues(self):\r\n print(\"\\n************Searching Clues************\\n\")\r\n for word_id in self.words.keys():\r\n if not self.words[word_id].see and not self.words[word_id].wth:\r\n clue = pop_backslash(self.words[word_id].clue)\r\n temp = word_domain(\"allintext:\" + clue +' -crossword',self.words[word_id].length)\r\n temp2 = temp + word_domain(clue +' -crossword',self.words[word_id].length)\r\n domain = temp2 + data_muse(clue, self.words[word_id].length)\r\n unique_list = []\r\n for x in domain: \r\n y = x.upper()\r\n # check if exists in unique_list or not \r\n if y not in unique_list: \r\n unique_list.append(y) \r\n \r\n self.words[word_id].assign_word_domain(unique_list)\r\n print(\"\\nSearch is done...\")", "def update(self):\n\n # Ordenamos los personajes por fila, para luego dibujarlos correctamente. Para que no se solapen.\n self._personajes.sort(self._comparar_coordenadas_personajes)\n\n for personaje in self._personajes:\n if (personaje.andando): # Si está andando.\n # Si el personaje se encuentra en el centro de la celda a donde debia llegar ...\n if (personaje.obtener_coordenadas() == self._mapa.obtener_coordenadas_por_posicion((personaje.camino[0][0], personaje.camino[0][1]),self._mapa.CENTER)): \n del personaje.camino[:1] # Eliminamos esa celda del camino ha seguir porque ya ha llegado a ella. \n if ((personaje.camino == []) or (personaje.camino == None)): # Si ya no queda camino a seguir...\n personaje.parar() # Paramos al Personaje.\n if not(personaje.accion == None): # Si tiene asignada alguna acción después de haber llegado a su destino ...\n personaje.accion() # Ejecutamos la acción\n personaje.accion = None # Y limpiamos la acción\n if (personaje.nombre != \"Jugador\"): # Si el Personaje no es el Jugador establacemos su dirección final.\n personaje.actualizar_direccion(personaje.direccion_final)\n else: # Calculamos la nueva direccion hacia donde tiene que mover\n # Obtenemos la fila y columna donde se encuenta el personaje.\n origen = self._mapa.obtener_posicion_por_coordenadas(personaje.obtener_coordenadas())\n # Establecemos hacia donde tiene que mirar el Personaje para ir en esa dirección.\n personaje.actualizar_direccion(self._mapa.direcciones.index([personaje.camino[0][0] - origen[0], personaje.camino[0][1] - origen[1]]))\n else: # Si el personaje no esa todavia en el centro de la celda \n if (not self._hay_colision(personaje, (personaje.camino[0][0], personaje.camino[0][1]))): # Si no hay colisión en la celda de destino \n personaje.mover(personaje.obtener_direccion()) # Movemos al personaje en esa dirección.\n else: # Si hay colision\n celda_personaje = self._mapa.obtener_posicion_por_coordenadas(personaje.obtener_coordenadas())\n personaje.actualizar_posicion(self._mapa.obtener_coordenadas_por_posicion((celda_personaje[0], celda_personaje[1]),self._mapa.CENTER))\n # Volvermos a calcular una ruta para llegar al destino.\n self.ir_a(personaje, personaje.destino, personaje.direccion_final)\n \n personaje.update() # Actualizamos el personaje.\n \n for objeto in self._objetos: # Actualizamos los objetos.\n objeto.update()\n \n self._dibujar(\n )", "def analyse_donnees(self, mere, foetus, pere, log):\n concordance_mf = 0\n concordance_pf = None\n if len(pere) != 0:\n concordance_pf = 0\n log = log + \"Père détecté.................................\\n\"\n log = log + \"\\n\\nVérification concordance des ADNs entre père et foetus..............................\\n\"\n for Alleles in range(len(foetus)):\n for Allele_Foe in range(3):\n if foetus[Alleles].allele[Allele_Foe] in pere[Alleles].allele:\n if foetus[Alleles].allele[Allele_Foe] != 0.0:\n pere[Alleles].concordance_pere_foetus = \"OUI\"\n concordance_pf = concordance_pf + 1\n log = log + \"Concordance pour marqueur \" + str(\n foetus[Alleles].marqueur) + \" OK..................\\n\"\n break\n else:\n pere[Alleles].concordance_pere_foetus = \"NON\"\n log = log + \"Concordance pour marqueur \" + foetus[\n Alleles].marqueur + \" PAS OK..............\\n\"\n break\n log = log + \"\\n\\nVérification concordance des ADNs entre mère et foetus..............................\\n\"\n for Alleles in range(len(foetus)):\n for Allele_Foe in range(3):\n if foetus[Alleles].allele[Allele_Foe] in mere[Alleles].allele:\n if foetus[Alleles].allele[Allele_Foe] != 0.0:\n foetus[Alleles].concordance_mere_foetus = \"OUI\"\n concordance_mf = concordance_mf + 1\n log = log + \"Concordance pour marqueur \" + str(\n foetus[Alleles].marqueur) + \" OK..................\\n\"\n break\n else:\n foetus[Alleles].concordance_mere_foetus = \"NON\"\n log = log + \"Concordance pour marqueur \" + foetus[Alleles].marqueur + \" PAS OK..............\\n\"\n break\n log = log + \"Vérification concordance des ADns terminée..................................\\n\\n\\n\"\n if concordance_mf != len(foetus):\n resultats, conclusion = self.resultat(concordance_mf, concordance_pf, foetus, mere, pere)\n log = log + \"Concordance des ADNs PAS OK....................\\n\"\n log = log + \"Erreur dans l'échantillon...................\\n\"\n log = log + \"Revérifier s'il vous plaît.............\\n\"\n return resultats, conclusion, log\n else:\n log = log + \"Traitement des 15 autres marqueurs..............................\\n\"\n for nbre_lignes in range(1, len(mere)):\n log = log + \"Traitement du marqueur \" + str(foetus[nbre_lignes].marqueur) + \"..........\\n\"\n pic = foetus[nbre_lignes].foetus_pics()\n log = log + \"Calcul du nombre d'allèles pour le foetus......................\\n\"\n log = log + \"Nombre d'allèles pour le foetus : \" + str(pic) + \".........\\n\"\n log = log + \"Vérification de l'homozygotie de la mère......................\\n\"\n mere[nbre_lignes].homozygotie()\n log = log + \"Mère homozygote : \" + str(mere[nbre_lignes].homozygote) + \"...............\\n\"\n log = log + \"Vérification mère et foetus mêmes allèles......................\\n\"\n foetus[nbre_lignes].allele_semblable(mere[nbre_lignes])\n log = log + \"Code de retour vérification allèles semblables: \" + str(\n foetus[nbre_lignes].informatif) + \"...............\\n\"\n log = log + \"Initialisation du taux de contamination pour calcul à venir...............\\n\"\n foetus[nbre_lignes].taux = 0.0\n log = log + \"Taux initialisé.................................\\n\"\n log = log + \"Si code informatif de retour allèles semblables différent de 2, vérification écho.............\\n\"\n log = log + \"Si écho, affection code informatif 3...............\\n\"\n if foetus[nbre_lignes].informatif != 2:\n log = log + \"Vérification si écho......................\\n\"\n mere[nbre_lignes].echo(foetus[nbre_lignes])\n log = log + \"Code retour vérification écho : \" + str(\n foetus[nbre_lignes].informatif) + \"...............\\n\"\n log = log + \"Début chaîne de traitement...........................\\n\"\n if pic == 3:\n log = log + \"Trois allèles détectés......................\\n\"\n foetus[nbre_lignes].contamination_heterozygote(mere[nbre_lignes])\n log = log + \"Marqueur informatif, affectation du code contamination 1..............\\n\"\n foetus[nbre_lignes].informatif = 1\n log = log + \"Calcul taux de contamination du marqueur..........\\n\"\n foetus[nbre_lignes].contamination = 2\n log = log + \"Calcul terminé....................\\n\"\n elif mere[nbre_lignes].homozygote:\n log = log + \"Mère homozygote.......................\\n\"\n log = log + \"Marqueur non informatif, affectation du code informatif 0............\\n\"\n foetus[nbre_lignes].informatif = 0\n elif pic == 2:\n log = log + \"Deux allèles détectés..............\\n\"\n if foetus[nbre_lignes].informatif == 2:\n log = log + \"Si mêmes allèles, vérification homozygote contaminé...............\\n\"\n foetus[nbre_lignes].verif_homozygote_contamine(self)\n if foetus[nbre_lignes].contamination == 1:\n log = log + \"Homozygote contaminé identifié.....................\\n\"\n log = log + \"Calcul du taux de contamination....................\\n\"\n foetus[nbre_lignes].homozygote_contamine(self)\n log = log + \"Calcul du taux de contamination effectué...........\\n\"\n else:\n if foetus[nbre_lignes].informatif != 3:\n log = log + \"Code calcul écho différent de 3..................\\n\"\n log = log + \"Marqueur informatif, affectation du code informatif 1.............\\n\"\n foetus[nbre_lignes].informatif = 1\n log = log + \"Marqueur non contaminé, affectation du code contamination 0................\\n\"\n foetus[nbre_lignes].contamination = 0\n else:\n log = log + \"Un seul allèle détecté............\\n\"\n if foetus[nbre_lignes].informatif != 3:\n log = log + \"Code informatif différent de 3...........\\n\"\n log = log + \"Marqueur informatif, affectation du code informatif 1.............\\n\"\n foetus[nbre_lignes].informatif = 1\n log = log + \"Marqueur non contaminé, affectation du code contamination 0................\\n\"\n foetus[nbre_lignes].contamination = 0\n log = log + \"\\n\\n\"\n log = log + \"Calcul échantillon contaminé ou non......\\n\"\n log = log + \"Marqueur contaminé si >\" + str(self.seuil_taux_conta) + \".......\\n\"\n log = log + \"Echantillon contaminé si plus de \" + str(\n self.seuil_nbre_marqueurs) + \"marqueurs contaminés...\\n\"\n self.conclusion_echantillon(foetus)\n log = log + \"Calcul échantillon terminé.....\\n\"\n log = log + \"Fin de traitement...........\\n\"\n resultats, conclusion = self.resultat(concordance_mf, concordance_pf, foetus, mere, pere)\n return resultats, conclusion, log", "def resultat_match(self, binomes):\n for binome in binomes:\n while True:\n score_un = self.vue.entree_resultats(binome[0])\n score_deux = self.vue.entree_resultats(binome[1])\n if score_un + score_deux != 1:\n self.vue.erreur_score()\n continue\n else:\n binome[0].ajout_score(score_un)\n binome[1].ajout_score(score_deux)\n table_players.update({\"score\": binome[0].points},\n doc_ids=[binome[0].id])\n table_players.update({\"score\": binome[1].points},\n doc_ids=[binome[1].id])\n break\n self.vue.afficher_resultats(binomes)", "def seleccion(datos,multifasta,querys):\n\n #Hacemos una lista con los nombres de las querys que están en el archivo\n nombres_query=[]\n with open (querys,mode=\"r\") as f:\n for linea in f:\n if linea[0]==\">\":\n nombres_query.append(linea[1:len(linea)-1])\n f.close()\n\n #Obtenemos los nombres de las query y de los subject con los que ha hecho hit\n nombres2=datos[\"Nombre_subject\"]\n nombres1=datos[\"Nombre_query\"]\n nombres1=list(nombres1[1:])\n nombres2=list(nombres2[1:])\n \n seleccion={}#diccionario querys:hits blast\n #Parseamos las listas para obtener el nombre de la query como clave\n #y como valor una lista con los subjects con los que ha hecho hit\n for i in range(len(nombres1)): \n for x in range(len(nombres_query)):\n if nombres_query[x]==nombres1[i]:\n clave=nombres_query[x]\n valor=nombres2[i]\n if clave in seleccion:\n seleccion[clave].append(valor)\n else:\n seleccion[clave]=[valor]\n #Elimino valores duplicados en los valores\n for k, v in seleccion.items():\n nuevo=[]\n for item in v:\n if item not in nuevo:\n nuevo.append(item)\n seleccion[k] = nuevo\n\n #Contador para determinar si se encuentra en una linea con el nombre (>) o con la secuencia\n n=0\n #Contador para recorrer la lista con los nombres de las querys\n cuenta=0\n #Lista con los nombres de los archivos generados\n lista_nombres=[]\n for opciones in seleccion.items():\n abre_query=open(querys,\"r\")#Abrimos el archivo de las querys\n keys=seleccion.keys()#Generamos una lista con las keys del diccionario, que son las querys\n modifica=[]\n modifica1=[]\n modifica2=[]\n modifica3=[]\n\n nombre_archivo=opciones[0]\n with open (multifasta,mode=\"r\") as f:\n with open(nombre_archivo,\"w+\") as archivo: #El nombre de cada archivo será el nombre de su query\n #Forma una lista con todos los hits de blast\n modifica2=opciones[1]\n \n # Forma una lista con el nombre de cada una de las querys\n for x in abre_query: \n if x[0]==\">\":\n modifica1.append(x[1:len(x)-1])\n \n #En caso de que los hits que encuentra en blast no sean las query, las elimina\n eliminar=[item for item in modifica1 if item not in modifica2]\n for r in eliminar:\n modifica1.remove(r)\n \n #Nos quedamos solamente con los hits que encontró en blast, quitando las querys\n modifica3 = [item for item in modifica2 if item not in modifica1]\n modifica3.sort()\n \n #genera la lista con todos los hits, incluidas las query\n if len(modifica1)<=len(keys):\n modifica=modifica1+modifica3\n\n #Forma un archivo por cada query introducida, con los nombres y secuencias\n #que se obtuvieron en el blast\n for linea in f:\n if cuenta==(len(modifica)):\n break\n if linea[1:(len(linea)-1)]==modifica[cuenta]:\n archivo.write(linea)\n n+=1\n elif n==1 and linea[0]!=\">\":\n archivo.write(linea)\n cuenta+=1\n n=0\n else:\n n=0\n lista_nombres=lista_nombres+[nombre_archivo] \n archivo.close()\n n=0\n cuenta=0\n f.close()\n \n \n \n\n \n return lista_nombres", "def game (self,mapa):\n\n self.titulo()\n\n for fila in mapa:\n print(\"\".join(fila))", "def mover_bm_izquierda(self):\n self.nueva_posicion_posible_parte_superior = self.mapa.consultar_casilla_por_movimiento([self.casilla[0] - 1,\n self.casilla[1]],\n [self.vertice_1[0] - self.velocidad,self.vertice_1[1]], \n [self.vertice_1[0] - 5 - 5, self.vertice_1[1]])\n self.nueva_posicion_posible_parte_inferior = self.mapa.consultar_casilla_por_movimiento([self.casilla[0] - 1,\n self.casilla[1] + 1],\n [self.vertice_3[0] - self.velocidad,self.vertice_3[1]],\n [self.vertice_3[0] - 5,self.vertice_3[1]]) \n if self.nueva_posicion_posible_parte_superior[0] != 1 and self.nueva_posicion_posible_parte_inferior[0] != 1:\n self.x -= self.velocidad * (self.x >= 15)\n self.posicion = [self.x,self.posicion[1]]\n self.casilla = [self.casilla[0] - self.nueva_posicion_posible_parte_superior[1] *(self.nueva_posicion_posible_parte_inferior[0] != 1) * (self.nueva_posicion_posible_parte_superior[0] != 1), self.casilla[1]]\n self.redefinir_vertices()", "def encontrar_mejor_sitio(ubicaciones):\n\ttorretas_adicionales = ubicaciones.count(\"t\") # obtengo cuantas t (torretas adicionales) hay en ubicaciones\n\tif torretas_adicionales == 0:\n\t\tprint \"Posicion: 5\"\n\tif torretas_adicionales == 4:\n\t\tprint \"Posicion: \",\n\t\tfor i in range(len(ubicaciones)):\n\t\t\tif ubicaciones[i] == \"o\":\n\t\t\t\tprint i,", "def imprime_mapa(lat,lon):\r\n\r\n lista=[\"colegio\", \"starbucks\",\"estadio de baloncesto\", \"bar\",\"restaurante vegano\",\"peluqueria perros\",\"aeropuerto\"]\r\n \r\n tipo=list()\r\n latitud=list()\r\n longitud=list()\r\n\r\n for q in lista:\r\n resultado=foursquare_visual({'latitud':lat, 'longitud':lon},q)\r\n \r\n for r in resultado:\r\n tipo.append(q.replace(\" \",\"_\"))\r\n latitud.append(r['latitud'])\r\n longitud.append(r['longitud'])\r\n #if q == \"colegio\" or q == \"peluqueria perros\":\r\n # print(pd.DataFrame({'tipo':tipo,'latitud':latitud,'logitud':longitud}))\r\n # raise\r\n \r\n \r\n df=pd.DataFrame({'tipo':tipo,'latitud':latitud,'logitud':longitud})\r\n\r\n \r\n\r\n mapa = Map(location=[lat,lon],zoom_start=15)\r\n\r\n empresa = {\r\n \"location\":[lat, lon ],\r\n \"tooltip\" : \"Empresa\"\r\n }\r\n icon = Icon(color = \"red\",\r\n prefix = \"fa\",\r\n icon = \"fa-dot-circle-o\",\r\n icon_color = \"white\"\r\n )\r\n Marker(**empresa,icon = icon ).add_to(mapa)\r\n\r\n\r\n for i, row in df.iterrows():\r\n establecimiento = {\r\n \"location\":[row[\"latitud\"], row[\"logitud\"]],\r\n \"tooltip\" : row[\"tipo\"].replace(\"_\",\" \").capitalize()\r\n }\r\n\r\n if row[\"tipo\"] == \"starbucks\":\r\n icon = Icon(color = \"green\",\r\n prefix = \"fa\",\r\n icon = \"fa-coffee\",\r\n icon_color = \"white\"\r\n )\r\n \r\n elif row[\"tipo\"] == \"restaurante_vegano\":\r\n icon = Icon(color = \"green\",\r\n prefix = \"fa\",\r\n icon = \"leaf\",\r\n icon_color = \"black\"\r\n )\r\n\r\n elif row[\"tipo\"] == \"colegio\":\r\n icon = Icon(color = \"blue\",\r\n prefix = \"fa\",\r\n icon = \"fa-graduation-cap \",\r\n icon_color = \"black\"\r\n )\r\n \r\n elif row[\"tipo\"] == \"peluqueria_perros\":\r\n icon = Icon(color = \"red\",\r\n prefix = \"fa\",\r\n icon = \"fa-paw\",\r\n icon_color = \"black\"\r\n )\r\n\r\n elif row[\"tipo\"] == \"estadio_de_baloncesto\":\r\n icon = Icon(color = \"orange\",\r\n prefix = \"fa\",\r\n icon = \"fa-futbol-o \",\r\n icon_color = \"black\"\r\n )\r\n\r\n elif row[\"tipo\"] == \"aeropuerto\":\r\n icon = Icon(color = \"white\",\r\n prefix = \"fa\",\r\n icon = \"fa-plane\",\r\n icon_color = \"black\"\r\n )\r\n elif row[\"tipo\"] == \"bar\":\r\n icon = Icon(color = \"pink\",\r\n prefix = \"fa\",\r\n icon = \"fa-glass\",\r\n icon_color = \"white\"\r\n )\r\n \r\n else:\r\n prefix = \"fa\",\r\n icon = \"briefcase\",\r\n icon_color = \"black\" \r\n Marker(**establecimiento,icon = icon ).add_to(mapa)\r\n return mapa", "def sprawdz(lista):\n # do_usuniecia - lista zawierajaca indeksy pol ktore zostana usuniete z glownej listy\n do_usuniecia = []\n # petla przechodzaca po wartosciach\n for i in range(len(lista) / 2):\n # j - indeks wartosci dla poszczgolnego panstwa\n j = 2 * i + 1\n # k - indeks pod ktorym nie ma wartosci\n k = 0\n # sprawdzanie ktore elementy sa bez wartosci oraz dodawanie ich do listy do usuniecia\n for el in lista[j]:\n if el is None:\n # zastosowanie unikalnosci indeksow\n if not k in do_usuniecia:\n do_usuniecia.append(k)\n\n k += 1\n # sortowanie listy z indeksami do usuniecia w sposob rosnacy\n do_usuniecia.sort()\n # nowalista - lista zawierajaca statystyki dostepne dla wszystkich panstw odpowiednio [Lata],[Wartosc]\n nowalista = []\n for i in range(len(lista)):\n # wartosc - lista zawierajaca poszczegolne dane z glownej listy\n wartosc = []\n # dodawanie wartosci, ktore sa dostepne dla wszystkich panstw do tabeli wartosc\n for j in range(len(lista[i])):\n # zastosowanie unikalnosci indeksow dla ktorych nie ma wartosci\n if not j in do_usuniecia:\n wartosc.append(lista[i][j])\n # dodawanie listy zawierajacej wynik dla poszczegolnych danych\n nowalista.append(wartosc)\n\n return nowalista", "def demander_map(nom_maps):\n\n # On affiche la liste des labyrinthes disponibles\n map_msg = \"Labyrinthes existants : \"\n print(map_msg)\n\n # On affiche les choix possibles\n for i, nom in enumerate(nom_maps):\n print(\" {} - {}\".format(i + 1, nom))\n\n # Tant que l'input est incorrecte...\n indice = None\n while indice not in range(1, len(nom_maps) + 1):\n # ...On demande l'indice du labyrinthe\n try:\n indice = int(input(\"Entrez un numéro de labyrinthe pour commencer à jouer : \"))\n except:\n pass\n\n return indice - 1", "def process_results(master_state_map, rigged_party):\n party = \"\"\n reps_electorate_points = 0\n dems_electorate_points = 0\n total_num_votes = 0\n rigged_electoral_points = 0\n\n if rigged_party is None:\n\n\n\n\n for state_num, state_data in master_state_map.items():\n print(f\"state_num: {state_num}, this represents the item number in the dictionary.\")\n print(f\"state_data: {state_data}, this contains all the state data\")\n # print(state_data)\n for key, value in state_data.items():\n if key == 'state':\n print(f\"state: {value}\")\n elif key == 'num_voters':\n print(f\"num_voters: {value}\")\n elif key == 'party':\n print(f\"party: {value}\")\n party = value\n elif key == 'votes':\n print(f\"votes: {value}\")\n total_num_votes += value\n elif key == 'electorate':\n print(f\"electorate: {value}\")\n if party == \"republican\":\n reps_electorate_points += value\n elif party == \"democrat\":\n dems_electorate_points += value\n\n print(f\"reps: {reps_electorate_points}\")\n print(f\"dems: {dems_electorate_points}\")\n print(f\"the total amount of votes is : {total_num_votes}\")\n # if republican electorate points are greater than the democrat electorate points, then republics win\n # else, democrats win\n if reps_electorate_points > dems_electorate_points:\n print(f\"The Republicans win with {reps_electorate_points} electoral points!\")\n elif dems_electorate_points > reps_electorate_points:\n print(f\"The Democrats win with {dems_electorate_points} electoral points!\")\n else:\n print('wow')\n\n if rigged_party:\n for state_num, state_data in master_state_map.items():\n print(f\"state_num: {state_num}, this represents the item number in the dictionary.\")\n print(f\"state_data: {state_data}, this contains all the state data\")\n # print(state_data)\n for key, value in state_data.items():\n if key == 'state':\n print(f\"state: {value}\")\n elif key == 'num_voters':\n print(f\"num_voters: {value}\")\n elif key == 'party':\n print(f\"party: {rigged_party}\")\n party = value\n elif key == 'votes':\n print(f\"votes: {value}\")\n total_num_votes += value\n elif key == 'electorate':\n print(f\"electorate: {value}\")\n if party == rigged_party:\n rigged_electoral_points += value\n elif party == \"democrat\":\n rigged_electoral_points += value\n elif party == \"republican\":\n rigged_electoral_points += value\n elif party == \"libertarian\":\n rigged_electoral_points += value\n elif party == \"independent\":\n rigged_electoral_points += value\n\n\n print(f\"The {rigged_party} party won with {rigged_electoral_points}!\")", "def se_dicta(materia):\n return len(materia['cursos']) > 0", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n from util import Stack\n Pilha_Caminho = Stack()\n Pilha_Estados = Stack()\n Caminho = []\n Visitados = []\n\n Pilha_Caminho.push(Caminho) # empilha caminho (vazio, no começo)\n Pilha_Estados.push(problem.getStartState()) # empilha estado inicial\n\n while (Pilha_Caminho.isEmpty() == False and Pilha_Estados.isEmpty() == False):\n Caminho_Andado = Pilha_Caminho.pop() # atualiza caminho\n Estado_Atual = Pilha_Estados.pop() # atualiza estado\n if problem.isGoalState(Estado_Atual): # caso estado atual seja o desejado,\n return Caminho_Andado # retorna o caminho total\n if Estado_Atual not in Visitados: # caso estado atual não tenha sido visitado\n Visitados.append(Estado_Atual) # marca estado como visitado\n for Sucessor in problem.getSuccessors(Estado_Atual): # busca sucessores\n if Sucessor[0] not in Visitados: # caso sucessor não tenha sido visitado\n Pilha_Caminho.push(Caminho_Andado + [Sucessor[1]]) # atualiza caminho total na pilha\n Pilha_Estados.push(Sucessor[0]) # atualiza estado\n return", "def Wygrana():\r\n for x in range (0, ROZMIAR_PLANSZY):\r\n for y in range (0, ROZMIAR_PLANSZY):\r\n for kierunek in (\"poziom\", \"pion\", \"skos prawy\", \"skos lewy\"):\r\n iksy, kolka = SprawdzLinie ((x, y), kierunek)\r\n if iksy == 5:\r\n return X\r\n if kolka == 5:\r\n return O\r\n return False", "def eksport(lista):\n # wynik - lista zawierajaca wynik koncowy dzialania funkcji(lata i wartosci dla poszczegolnych panstw)\n wynik = []\n for panstwo in lista:\n # rok - lista zawierajaca lata\n # wartosc - lista zawierajaca wartosci dla lat\n rok = []\n wartosc = []\n for element in panstwo:\n # sprawdzenie czy klucz posiada odpowiednia wartosc\n if element[1].get('key') == \"NE.EXP.GNFS.CD\":\n # dodanie roku do listy\n rok.append(int(element[2].text))\n # rozpatrywanie przypadku w ktorym wartosc jest None\n if element[3].text is None:\n wartosc.append(element[3].text)\n else:\n wartosc.append(float(element[3].text))\n # dodawanie list dla poszczegolnych panstw do listy wynikowej\n wynik.append(rok)\n wynik.append(wartosc)\n\n return wynik", "def turysci(lista):\n # wynik - lista zawierajaca wynik koncowy dzialania funkcji(lata i wartosci dla poszczegolnych panstw)\n wynik = []\n for panstwo in lista:\n # rok - lista zawierajaca lata\n # wartosc - lista zawierajaca wartosci dla lat\n rok = []\n wartosc = []\n for element in panstwo:\n # sprawdzenie czy klucz posiada odpowiednia wartosc\n if element[1].get('key') == \"ST.INT.ARVL\":\n # dodanie roku do listy\n rok.append(int(element[2].text))\n # rozpatrywanie przypadku w ktorym wartosc jest None\n if element[3].text is None:\n wartosc.append(element[3].text)\n else:\n wartosc.append(float(element[3].text))\n # dodawanie list dla poszczegolnych panstw do listy wynikowej\n wynik.append(rok)\n wynik.append(wartosc)\n\n return wynik", "def distance_score(vertex1, board, player_id): #implement preference for closer settlements\n num_buildings = 0\n total_dist = 0\n player_buildings = board.get_player_settlements(player_id) + board.get_player_cities(player_id)\n\n if len(player_buildings) == 0: #if it is our first turn\n return 0\n\n player_roads = board.get_player_roads(player_id)\n accessible_vertices = list(set(player_buildings+ [vertex for pair in player_roads for vertex in pair]))\n get_distance = lambda v: manhattan_distance(v, vertex1, board)\n min_distance = min(map(get_distance, accessible_vertices))\n\n enemy_buildings = [v for v in board.settlements if board.settlements[v] != player_id]\n enemy_roads = [r for r in board.roads if board.roads[r] != player_id]\n\n\n \"\"\"\n for s in board.settlements:\n if board.settlements[s] != player_id:\n vertex2 = s\n total_dist_enemies += manhattan_distance(vertex1, vertex2, board)\n num_buildings+=1\n\n for c in board.cities:\n if board.cities[c] != player_id:\n vertex2 = c\n total_dist_enemies += manhattan_distance(vertex1, vertex2, board)\n num_buildings+=1\n\n \"\"\"\n return min_distance", "def reachable_province(self, ctx):\n return self.reachable_tiles(ctx)", "def preberi_pot(ukazi):", "def preberi_pot(ukazi):", "def preberi_pot(ukazi):" ]
[ "0.54672736", "0.51602787", "0.5158227", "0.51512283", "0.51180106", "0.5109656", "0.50967324", "0.5000111", "0.49935594", "0.4985653", "0.4984618", "0.4949592", "0.49373594", "0.4920492", "0.49203885", "0.49047872", "0.4896582", "0.4890663", "0.48779795", "0.4862627", "0.48474187", "0.4838787", "0.481917", "0.4809571", "0.48019892", "0.47843838", "0.4781735", "0.4769568", "0.4769568", "0.4769568" ]
0.5229537
1
Se redefinen los vertices de los WhiteWalkers
def redefinir_vertices(self): self.nueva_posicion_posible_parte_inferior = [0,0] self.nueva_posicion_posible_parte_superior = [0,0] self.vertice_1 = self.posicion self.vertice_2 = [self.posicion[0] + self.medidas, self.posicion[1]] self.vertice_3 = [self.posicion[0], self.posicion[1] + self.medidas] self.vertice_4 = [self.posicion[0] + self.medidas, self.posicion[1] + self.medidas]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dfs(self):\n # Run time => O(V)\n for aVertex in self:\n aVertex.setColor('white')\n aVertex.setPredecessor(-1)\n # Run time => O(V)\n for aVertex in self:\n if aVertex.getColor() == 'white':\n self.dfs_visit(aVertex)", "def __init__(self, vertices):\n self.vertices = vertices", "def _generate_vertexes(self):\n # generate list of sets for each vms\n self._graph = [set() for _ in range(self._vm_count)]", "def __init__(self):\n self.vertices = {}", "def recalc_verts(self):\n tongue_w = self.TEX.height\n\n along = self.fly_pos - self.mouth_pos\n across = along.normalized().rotated(90) * tongue_w * 0.5\n\n along *= self.length\n\n self.dl.vertices = [c for v in [\n self.mouth_pos - across,\n self.mouth_pos - across + along,\n self.mouth_pos + across + along,\n self.mouth_pos + across,\n ] for c in v]", "def __init__(self,vertices):\n self._vertices = vertices\n self._edges = []\n for i in range(len(self._vertices)-1)\n self._edges.append( [i,i+1] )", "def vertices(self, *args, **kwargs) -> Any:\n pass", "def set_vertices(self):\n if self.rotation == 0:\n self.v0, self.v1, self.v2, self.v3 = self.non_rotated_vertices()\n else:\n self.v0, self.v1, self.v2, self.v3 = self.rotated_vertices()", "def __init__(self):\n self.vert_list = {}\n self.num_vertices = 0", "def __init__(self) -> None:\n self._vertices = {}", "def __init__(self):\n self.vert_dict = {}\n # self.vert_dict = []\n self.num_vertices = 0", "def registerVertices(self,vl):\n self.set('patchmesh.vertices',FuzzList(vl))", "def vertices(self, v):\n self._vertices = v", "def fill_walk(self):\n\n #Seguir tomando caminos hasta que se alcance la cantidad establecida.\n while len(self.x_values) < self.num_points:\n\n #Decidir cual direccion tomar y cuan lejos ir hacia esa direccion.\n x_direction = choice([1, -1])\n x_distance = choice([0, 1, 2, 3, 4])\n x_step = x_direction * x_distance\n\n y_direction = choice([1,-1])\n y_distance = choice([0, 1, 2, 3, 4])\n y_step = y_direction * y_distance\n\n #Ignorar movimientos nulos.\n if x_step == 0 and y_step == 0:\n continue\n\n #Calcular la nueva posicion.\n x = self.x_values[-1] + x_step\n y = self.y_values[-1] + y_step\n\n self.x_values.append(x)\n self.y_values.append(y)", "def getStartVertex(self):", "def __init__(self):\n self._vertices = {}\n self._degreesDesc = []", "def reset_path(self):\n for i in self.grid:\n for y in i:\n y.g = 0\n y.h = 0\n y.f = 0\n y.parent = None\n y.visited = False", "def _update_vertices(self):\n raise NotImplementedError(\"_update_vertices must be defined\"\n \"for every ShapeBase subclass\")", "def __init__(self, nombre_='', dirigido=False):\n self.nombre = nombre_\n self.vertices = {}\n self.dirigido = dirigido", "def init_vertices(self):\n self.vertices = []\n for key in self.graph:\n self.vertices.append(self.Vertex(key, self.graph[key]))", "def AGM_prim(mtr_adj, limited_nodes=[], raiz=1):\n\tnum_vertices = len(mtr_adj)\n\n\tfila = []\n\tvertices = [] # ordenados de acordo com a chave\n\t\n\t# adicionando os nodos na lista de vertices e ordenando pela key\n\tfor i in range( 1, num_vertices+1 ):\n\t\tvertices.append( vertice(i) )\n\n\tvertices.sort(key = lambda x: x.key)\n\tlog.debug('vertices: %s' % vertices)\n\t\n\t# fila a ser ordenada pela distancia\n\tfor nodo in vertices:\n\t\tfila.append(nodo)\n\n\t# se a raiz tiver grau máximo =1, seleciona o nodo mais próximo da raiz\n\t# para tornar ele a 'raiz'\n\tverificar_raiz(mtr_adj, limited_nodes, vertices, raiz)\n\t\n\tvertices[raiz-1].dist = 0\n\n\t# ordena a fila por ordem de distancia para o predecessor.\n\treordenar(fila)\n\tlog.debug('fila: %s' % fila)\n\n\t# criando arvore com os nodos que aceitam mais de 1 grau\n\twhile len(fila):\n\t\t# nodo a ser testado\n\t\tu = fila.pop(0)\n\t\t\n\t\t# evitar os nodos com grau máximo = 1 por enquanto\n\t\tif u.key in limited_nodes:\n\t\t\tcontinue\n\t\t\n\n\t\t# passando por todos os outros vértices, e adicionando\n\t\t# para selecionar os nodos que tem o nodo u como predecessor.\n\t\tfor v in range(1, num_vertices+1):\n\t\t\tif u.key != v and \\\n\t\t\tv not in limited_nodes and \\\n\t\t\tmtr_adj[ u.key-1 ][v-1] < vertices[v-1].dist and \\\n\t\t\tna_fila(v, fila):\n\t\t\t\tvertices[v-1].pred = u.key\n\t\t\t\tvertices[v-1].dist = mtr_adj[ u.key-1 ][v-1]\n\t\t\t\treordenar(fila)\n\t\tlog.debug('fila: %s' % fila)\t\n\t\n\t# conectando os nodos que aceitam grau maximo = 1 na arvore\n\tfor u in vertices:\n\t\tif u == raiz:\n\t\t\tcontinue\n\n\t\t# para cada nodo u de grau máximo = 1\n\t\tif u.key in limited_nodes:\n\t\t\t# verificar qual o nodo mais próximo, não limitado, diferente de u\n\t\t\tfor v in range(1, num_vertices+1):\n\t\t\t\tv_dist = mtr_adj[u.key-1][v-1] # distância de u até v\n\t\t\t\tif v != u.key and v_dist < u.dist and v not in limited_nodes:\n\t\t\t\t\tu.dist = v_dist\n\t\t\t\t\tu.pred = v\n\n\treturn vertices", "def mover_bm_izquierda(self):\n self.nueva_posicion_posible_parte_superior = self.mapa.consultar_casilla_por_movimiento([self.casilla[0] - 1,\n self.casilla[1]],\n [self.vertice_1[0] - self.velocidad,self.vertice_1[1]], \n [self.vertice_1[0] - 5 - 5, self.vertice_1[1]])\n self.nueva_posicion_posible_parte_inferior = self.mapa.consultar_casilla_por_movimiento([self.casilla[0] - 1,\n self.casilla[1] + 1],\n [self.vertice_3[0] - self.velocidad,self.vertice_3[1]],\n [self.vertice_3[0] - 5,self.vertice_3[1]]) \n if self.nueva_posicion_posible_parte_superior[0] != 1 and self.nueva_posicion_posible_parte_inferior[0] != 1:\n self.x -= self.velocidad * (self.x >= 15)\n self.posicion = [self.x,self.posicion[1]]\n self.casilla = [self.casilla[0] - self.nueva_posicion_posible_parte_superior[1] *(self.nueva_posicion_posible_parte_inferior[0] != 1) * (self.nueva_posicion_posible_parte_superior[0] != 1), self.casilla[1]]\n self.redefinir_vertices()", "def dfs(self, starting_vertex, destination_vertex):\n pass # TODO", "def removeVertices(self, vertices: set):\n\n for v in copy.copy(vertices):\n self.removeVertex(v)", "def clean_edges(self):", "def registerVertices(self,vl):\n self.set('mesh.vertices',FuzzList(vl))\n return self", "def is_vertex(self): \n return False", "def move_vertices(self, vertices, code):\n for vertex in vertices:\n self.move_vertice(vertex, code)", "def _remove_unreferenced_vertices(self):\n # convert vertices to an array\n vertex_array = np.array(self.mesh_.vertices())\n num_v = vertex_array.shape[0]\n\n # fill in a 1 for each referenced vertex\n reffed_array = np.zeros([num_v, 1])\n for f in self.mesh_.triangles():\n if f[0] < num_v and f[1] < num_v and f[2] < num_v:\n reffed_array[f[0]] = 1\n reffed_array[f[1]] = 1\n reffed_array[f[2]] = 1\n\n # trim out vertices that are not referenced\n reffed_v_old_ind = np.where(reffed_array == 1)\n reffed_v_old_ind = reffed_v_old_ind[0]\n reffed_v_new_ind = np.cumsum(reffed_array).astype(np.int) - 1 # counts number of reffed v before each ind\n\n try:\n self.mesh_.set_vertices(vertex_array[reffed_v_old_ind, :].tolist())\n if self.mesh_.normals() is not None:\n normals_array = np.array(self.mesh_.normals())\n self.mesh_.set_normals(normals_array[reffed_v_old_ind, :].tolist())\n except IndexError:\n return False\n\n # create new face indices\n new_triangles = []\n for f in self.mesh_.triangles():\n new_triangles.append([reffed_v_new_ind[f[0]], reffed_v_new_ind[f[1]], reffed_v_new_ind[f[2]]] )\n self.mesh_.set_triangles(new_triangles)\n return True", "def agregar_vertice(self, v):\n if not v in self.vertices.keys():\n self.vertices[v] = {}" ]
[ "0.585346", "0.5853237", "0.58507043", "0.5826494", "0.57883185", "0.5762866", "0.5679694", "0.56735766", "0.5643428", "0.5612465", "0.55687296", "0.5547157", "0.5533484", "0.54954654", "0.5488889", "0.5472278", "0.544285", "0.5436261", "0.54361176", "0.5413029", "0.5389942", "0.5373584", "0.53723186", "0.5343726", "0.5337464", "0.5337358", "0.53205043", "0.53190833", "0.5306904", "0.5298841" ]
0.6724158
0
Reconfigures the plugin it should be called when the configuration of the plugin is changed during the operation of the South device service; The new configuration category should be passed.
def plugin_reconfigure(handle, new_config): _LOGGER.info("Old config for MAX31865 plugin {} \n new config {}".format(handle, new_config)) # Find diff between old config and new config diff = utils.get_diff(handle, new_config) # TODO new_handle = copy.deepcopy(new_config) new_handle['restart'] = 'no' return new_handle
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plugin_reconfigure(handle, new_config):\n _LOGGER.info(\"Old config for Enviro pHAT plugin {} \\n new config {}\".format(handle, new_config))\n new_handle = copy.deepcopy(new_config)\n return new_handle", "def refresh_configuration(self):\n pass", "def refresh_plugin(self):\n pass", "def conf_update(self):\n pass", "def reload_config(self):\n pass", "def _on_config_changed(self, _):\n self._configure_pod()", "def reload_config(self):\n if self.faucet is not None:\n self.faucet.reload_config(None)", "def reload_configurations(self) -> None:\n ...", "def updated(self, newConfiguration):\n log.debug('ConfigListener: configuration %s updated' % newConfiguration)", "def _overwrite_with_config(self, new_cfg):\n for section in new_cfg.sections():\n for key, val in new_cfg.items(section):\n self.config.set(section, key, val)", "def _auto_update_configuration(self) -> None:\n self.config = rasa.utils.train_utils.update_confidence_type(self.config)\n rasa.utils.train_utils.validate_configuration_settings(self.config)\n self.config = rasa.utils.train_utils.update_similarity_type(self.config)\n self.config = rasa.utils.train_utils.update_evaluation_parameters(self.config)", "def update_configs(self, config):\n for what in self.plugins: # backend, repo etc.\n for key in self.plugins[what]: # s3, filesystem etc.\n # print(\"Updating configuration of\", what, key)\n self.plugins[what][key].config(what='set', params=config)\n return", "def reconfigure(self):\n log.debug('Reconfiguring and restarting the DHCP daemon...')\n\n # Don't set the daemon running status here, but let the status\n # check take care of that.\n\n p = Properties(self.storage, CONFIG_SECTION)\n p.addCallback(self.changed).\\\n addCallback(lambda trigger: p.load()).\\\n addCallback(self.emit_config, p).\\\n addCallback(self.restart_daemon).\\\n addErrback(self.restart_error)", "def update_config(self):\n if self.integration is None:\n return\n self.enabled = self.integration.has_option(self.get_config_name())\n self.pedantic = self.integration.configuration.get_bool(\n 'filter.mrproper')", "def update(self):\n self.save_config_file()", "def reset_config():\r\n # TODO implement configuration reset\r\n pass", "def edit_cass_conf(self, out, plugin_instance):\n if config.DEBUG:\n filepath = '{conf_dir}/{conf_name}'.format(\n conf_dir=config.PLUGIN_CONF_DIR,\n conf_name=self.conf_name)\n else:\n filepath = '{app_dir}/{conf_dir}/{conf_name}'.format(\n app_dir=config.APP_DIR,\n conf_dir=config.PLUGIN_CONF_DIR,\n conf_name=self.conf_name)\n\n try:\n with open(filepath) as cfile:\n for line in cfile:\n url_re = re.search(\n r'ServiceURL \"CHANGE_THIS\"', line)\n if url_re is None:\n out.write(line)\n else:\n out.write(plugin_instance)\n except (IOError, OSError) as e:\n utils.eprint('Cannot open {}'.format(filepath))\n raise Exception(\n 'Error: {}\\n'\n 'Cannot open {}.'.format(e, filepath))", "def on_step_options_swapped(self, plugin, old_step_number, step_number):\n pass", "def _move_all_to_config_section(self):\n for section in self.OLD_SECTIONS:\n if not self.has_section(section):\n continue\n\n all_configs = self.keys(section)\n for key in all_configs:\n self.set('config',\n key,\n super().getraw(section, key))\n\n self._conf.remove_section(section)", "def fusion_api_reapply_interconnect_configuration(self, uri, api=None, headers=None):\n param = '/configuration'\n return self.ic.put(body=None, uri=uri, api=api, headers=headers, param=param)", "def handle_adminreloadconfig(bot, event):\n try:\n bot.cfg.reload()\n getmainconfig().reload()\n except Exception, ex: handle_exception()\n event.done()", "def config_updated(self):\n if callable(self.on_config_updated):\n self.on_config_updated(self.config())", "def configure(self, section):", "def handle_config_inited(app, config):\n\n def handle_legacy(new, orig):\n if getattr(config, new) is None and getattr(config, orig) is not None:\n config[new] = config[orig]\n\n # copy over deprecated configuration names to new names (if any)\n handle_legacy('confluence_publish_allowlist', 'confluence_publish_subset')\n handle_legacy('confluence_purge_from_root', 'confluence_purge_from_master')\n handle_legacy('confluence_root_homepage', 'confluence_master_homepage')\n handle_legacy('confluence_space_key', 'confluence_space_name')", "async def hook(action, config_key):\n await opp.services.async_call(DOMAIN, SERVICE_RELOAD_CORE_CONFIG)", "def configure(new_config: Mapping):\n config.update(new_config)", "def updateConfig(self):\n # Make sure to keep the default values in place.\n if self.newConfig['sensor'] == 0:\n self.newConfig['sensor'] = self.config['sensor']\n if self.newConfig['camera'] == 0:\n self.newConfig['camera'] = self.config['camera']\n if not self.newConfig['auto']['times']:\n self.newConfig['auto']['times'] = self.config['auto']['times']\n if not self.newConfig['auto']['days']:\n self.newConfig['auto']['days'] = self.config['auto']['days']\n\n # Show the changes.\n if self.verbosity >= 1:\n print('%s: Updating configuration file...' % self.feederName)\n try:\n for key in self.config.keys():\n if type(self.config[key]) is dict:\n for subkey in self.config[key].keys():\n if self.config[key][subkey] != self.newConfig[key][subkey]:\n print('%s: Updating %s from %s to %s.' % (self.feederName, subkey, self.config[key][subkey], self.newConfig[key][subkey]))\n elif self.config[key] != self.newConfig[key]:\n print('%s: Updating %s from %s to %s.' % (self.feederName, key, self.config[key], self.newConfig[key]))\n except ValueError:\n if self.verbosity >= 1:\n print('%s: Configuration file does not contain a valid JSON object.' % self.feederName)\n if self.verbosity == 2:\n print('%s: Overwriting configuration file to: %s.' % (self.feederName, self.config))\n\n # Change the configuration file.\n self.config = self.newConfig\n self.writeConfig()", "def apply_plugin_settings(self, options):\n pass", "def deconfigure(self):\n\n pass", "def _update_cloudwatch_config(self, config_type):\n param_name = self._get_ssm_param_name(config_type)\n cw_config_ssm = self._set_cloudwatch_ssm_config_param(\n param_name, config_type)\n cur_cw_config_crc = self._sha1_hash_file(config_type)\n ssm_cw_config_crc = self._sha1_hash_json(cw_config_ssm)\n # check if user updated cloudwatch related config files.\n # if so, perform corresponding actions.\n if cur_cw_config_crc != ssm_cw_config_crc:\n logger.info(\n \"Cloudwatch {} config file has changed.\".format(config_type))\n self.CLOUDWATCH_CONFIG_TYPE_TO_UPDATE_CONFIG_FUNC.get(\n config_type)()" ]
[ "0.6766236", "0.6411359", "0.6185151", "0.6173756", "0.6100152", "0.5961948", "0.5958596", "0.58808684", "0.5801247", "0.5779878", "0.5764558", "0.5757872", "0.56068724", "0.55619735", "0.5555461", "0.55109584", "0.5477052", "0.54474026", "0.5430415", "0.54262197", "0.539742", "0.53844446", "0.5362044", "0.53588873", "0.5356671", "0.53520566", "0.53255504", "0.53041935", "0.52964175", "0.52761155" ]
0.691585
0
Regress each agent with a Gaussian process
def agent_regress(traj): #TODO: regress x- y- coordinate saparately according to he time points time = traj[:, 0].reshape(len(traj[:, 0]), 1) x_dir = traj[:, 1] x_dir = x_dir.reshape(len(x_dir), 1) k = GPy.kern.RBF(input_dim=1, variance=1., lengthscale=1.) mod_x = GPy.models.GPRegression(time, x_dir, k) mod_x.optimize(messages=False) mod_x.optimize_restarts(num_restarts = 30) time = traj[:, 0].reshape(len(traj[:, 0]), 1) k = GPy.kern.RBF(input_dim=1, variance=1., lengthscale=1.) y = traj[:, 2] y = y.reshape(len(y), 1) m_y = GPy.models.GPRegression(time, y, k) m_y.optimize(messages=False) m_y.optimize_restarts(num_restarts = 30) m_xy = [mod_x, m_y] return m_xy
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cmd_gaus():\n cmds = []\n cmds.append(\"r_m[0.0,-1,1]\")\n cmds.append(\"r_s[2.5,0,10]\")\n cmds.append('Gaussian::res(x,r_m,r_s)')\n return cmds", "def test_Gaussian_NB_estimators():", "def gaussianise_series(self, train_x):\n\n n_batches = train_x.shape[0]\n\n for batch in range(n_batches):\n train_x[batch, :, :] = gaussianise(train_x[batch, :, :], target_sigma=1.0)\n\n return train_x", "def train(args):\n # prepare environment\n brain_name = env.brain_names[0]\n brain = env.brains[brain_name]\n env_info = env.reset(train_mode=True)[brain_name]\n\n num_agents = len(env_info.agents)\n print('Number of agents:', num_agents)\n\n # size of each action\n action_size = brain.vector_action_space_size\n print('Size of each action:', action_size)\n\n # examine the state space\n states = env_info.vector_observations\n state_size = states.shape[1]\n print('There are {} agents. Each observes a state with length: {}'.format(\n states.shape[0], state_size))\n print('The state for the first agent looks like:', states[0])\n\n # Crate instance of MADDPG Class, mainly possible to control the model dimensions, learnrates and batch sizes\n agent = MADDPG(state_size,\n action_size,\n lr_actor=args.lr_actor,\n lr_critic=args.lr_critic,\n lr_decay=args.lr_decay,\n replay_buff_size=args.replay_buff_size,\n gamma=args.gamma,\n batch_size=args.batch_size,\n random_seed=args.random_seed,\n soft_update_tau=args.soft_update_tau,\n actor_layer_dim_1=args.actor_layer_dim_1,\n actor_layer_dim_2=args.actor_layer_dim_2,\n actor_layer_dim_3=args.actor_layer_dim_3,\n critic_layer_dim_1=args.critic_layer_dim_1,\n critic_layer_dim_2=args.critic_layer_dim_2,\n critic_layer_dim_3=args.critic_layer_dim_3\n\n )\n\n total_rewards = []\n avg_scores = []\n max_avg_score = -1\n max_score = -1\n threshold_init = 20\n noise_t = args.epsilon\n noise_decay = args.epsilon_decay\n latest_avg_score = -1\n # for early-stopping training if consistently worsen for # episodes\n worsen_tolerance = threshold_init\n for i_episode in range(1, 1+args.num_episodes):\n\n env_inst = env.reset(train_mode=True)[\n brain_name] # reset the environment\n states = env_inst.vector_observations # get the current state\n # initialize score array\n scores = np.zeros(num_agents)\n dones = [False]*num_agents\n while not np.any(dones):\n # select an action\n actions = agent.act(states, noise_t)\n # send the action to the environment\n env_inst = env.step(actions)[brain_name]\n next_states = env_inst.vector_observations # get the next state\n rewards = env_inst.rewards # get the reward\n dones = env_inst.local_done # see if episode has finished\n agent.update(states, actions, rewards, next_states, dones)\n\n noise_t *= noise_decay\n scores += rewards # update scores\n states = next_states\n\n episode_score = np.max(scores)\n total_rewards.append(episode_score)\n print(\"\\rEpisodic {} Score: {:.4f}\\t Avg Score: {:.4f}\".format(\n i_episode, episode_score, latest_avg_score), end=' ')\n\n if max_score <= episode_score:\n max_score = episode_score\n # save best model so far\n agent.save(\n \"chkpts/{}/{:02d}_best_model.checkpoint\".format(args.model_path, args.loop_counter))\n\n # record avg score for the latest 100 steps\n if len(total_rewards) >= args.test_n_run:\n latest_avg_score = sum(\n total_rewards[(len(total_rewards)-args.test_n_run):]) / args.test_n_run\n avg_scores.append(latest_avg_score)\n\n if max_avg_score <= latest_avg_score: # record better results\n worsen_tolerance = threshold_init # re-count tolerance\n max_avg_score = latest_avg_score\n else:\n if max_avg_score > 0.5:\n worsen_tolerance -= 1 # count worsening counts\n print(\"Loaded from last best model.\")\n # continue from last best-model\n agent.load(\n \"chkpts/{}/{:02d}_best_model.checkpoint\".format(args.model_path, args.loop_counter))\n if worsen_tolerance <= 0: # earliy stop training\n print(\"Early Stop Training.\")\n break\n del agent\n return total_rewards", "def doubleGaussian(x, m1, s1, a1, m2, s2, a2):\n # primary peak\n g1 = np.exp(-0.5*((x-m1)/s1)**2)\n # secondary peak\n g2 = np.exp(-0.5*((x-m2)/s2)**2)\n # total model\n mod1 = 1 - a1 * g1\n mod2 = 1 - a2 * g2\n modt = mod1 + mod2 - 1\n return modt", "def step(self):\n\n with torch.no_grad():\n for group in self.param_groups:\n lr = group[\"lr\"]\n for p in group[\"params\"]:\n\n if p.grad is None:\n continue\n\n lambda_square = self.mf.conf_factor(p, keepdim=True) ** 2\n p.data.copy_(self.mf.exp(p, -lr * p.grad.data / lambda_square))", "def resample_gmms(model_set):\n samples = np.zeros(iter_num)\n\n for i in range(iter_num):\n rand_num = random()\n # For each distribution in the model\n for gmm_distro in model_set:\n # If the random number is less than the distribution's weight, where the weight is the sum of all\n # distribution's weights so far\n if rand_num < gmm_distro[3]:\n # Then sample from the distribution and save it as the path cost, then skip to the next iteration\n samples[i] = gauss(gmm_distro[0], gmm_distro[1])\n break\n\n # plt.hist(samples, bins=50, density=True)\n # plt.show()\n\n return samples", "def train(self):\n for cov_type in self.cov_types:\n for n_components in self.n_components_range:\n # Fit a mixture of Gaussians with EM\n gmm = mixture.GaussianMixture(n_components=n_components,\n covariance_type=cov_type,\n random_state=self.random_state)\n gmm.fit(self.X)\n self.bic.append(gmm.bic(self.X))\n self.aic.append(gmm.aic(self.X))\n\n if self.bic[-1] < self.lowest_bic:\n self.lowest_bic = self.bic[-1]\n self.best_gmm_bic = gmm\n\n if self.aic[-1] < self.lowest_aic:\n self.lowest_aic = self.aic[-1]\n self.best_gmm_aic = gmm\n\n self.set_best_model()\n self.y_pred = self.predict(self.X)\n return(self)", "def run():\n trials = 100\n\n multipliers = [0.25, 0.3, 0.35, 0.5, 0.75, 1, 1.25, 1.45, 1.5, 1.55, 1.6] # Coefficients for learning rate\n\n mean_penalty = []\n median_penalty = []\n std_penalty = []\n\n mean_trial_time = []\n median_trial_time = []\n std_trial_time = []\n\n mean_success_rate = []\n median_success_rate = []\n std_success_rate = []\n\n for m in multipliers:\n all_penalties = [] # All penalties from trail sets\n all_average_trial_time = []\n all_success_rates = []\n\n for i in range(0, 20):\n # print \"Trial set:\", i\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n agent = e.create_agent(LearnerAgent) # create agent\n agent.mult = m\n e.set_primary_agent(agent, enforce_deadline=True) # specify agent to track\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.0, display=False) # create simulator (uses pygame when display=True, if available)\n\n sim.run(n_trials=trials) # run for a specified number of trials\n\n all_penalties.append(agent.all_trails_penalties)\n all_average_trial_time.append(agent.time/float(trials))\n all_success_rates.append(float(trials-agent.aborted_trials)/trials)\n\n mean_penalty.append(np.mean(all_penalties))\n median_penalty.append(np.median(all_penalties))\n std_penalty.append(np.std(all_penalties))\n\n mean_trial_time.append(np.mean(all_average_trial_time))\n median_trial_time.append(np.median(all_average_trial_time))\n std_trial_time.append(np.std(all_average_trial_time))\n\n mean_success_rate.append(np.mean(all_success_rates))\n median_success_rate.append(np.median(all_success_rates))\n std_success_rate.append(np.std(all_success_rates))\n\n for i in range(0, len(multipliers)):\n print \"\"\n print \"Multiplier:\", multipliers[i]\n print \"\"\n print \"Mean penalty per {} trials:\".format(trials), mean_penalty[i]\n print \"Median penalty per {} trials:\".format(trials), median_penalty[i]\n print \"Std.Dev. penalty per {} trials:\".format(trials), std_penalty[i]\n\n print \"\"\n print \"Mean trial time:\", mean_trial_time[i]\n print \"Median trial time:\", median_trial_time[i]\n print \"Std.Dev. trial time:\", std_trial_time[i]\n\n print \"\"\n print \"Mean success rate per {} trials:\".format(trials), mean_success_rate[i]\n print \"Median success rate per {} trials:\".format(trials), median_success_rate[i]\n print \"Std.Dev. success rate per {} trials:\".format(trials), std_success_rate[i]", "def GaussianPosteriorSample(bs, ls) :\n def gps(args) :\n mu, log_var = args\n eps = K.random_normal(shape=(bs, ls), mean=0.0, stddev=1.0) # 10 x 2\n return mu + K.exp(log_var / 2.) * eps\n return gps", "def agentbasedsim(L, a, pi, aenv, pienv, xi,\n adev=1.0, pidev=0.5,\n nind=10, ngeneration=100, nburnin=10,\n prng=None,\n callback=None):\n\n p, q = from_api(a, pi)\n alpha, beta = from_api(aenv, pienv)\n if not adev == 1.0:\n delta, epsilon = from_api(adev, pidev)\n\n # all parameters need to be in array form if cython acceleration is used\n if usecstepmarkov:\n alpha = _arrayify(alpha, L)\n beta = _arrayify(beta, L)\n p = _arrayify(p, (nind, L))\n q = _arrayify(q, (nind, L))\n if not adev == 1.0:\n delta = _arrayify(delta, (nind, L))\n epsilon = _arrayify(epsilon, (nind, L))\n \n env = np.zeros(L, dtype = bool)\n gen = np.zeros((nind, L), dtype = bool)\n \n totoffsprings = np.zeros(ngeneration)\n prng = prng if prng else np.random\n \n for generation in range(ngeneration):\n # time step environment\n rand = prng.rand(L)\n env = stepmarkov(env, alpha, beta, rand)\n if callback and generation >= nburnin:\n callback(gen, env)\n if not adev == 1.0:\n rand = prng.rand(nind, L)\n phen = stepmarkov2d(gen, delta, epsilon, rand)\n else:\n phen = gen\n # calculate growth rate\n noffspring = xi(phen, env)\n totoffspring = noffspring.sum()\n totoffsprings[generation] = totoffspring\n # time step population\n rand = prng.rand(nind, L)\n parent = gen[np.arange(nind).repeat(prng.multinomial(nind, noffspring/totoffspring))]\n gen = stepmarkov2d(parent, p, q, rand)\n \n # calculate Lambda = mean growth rate\n return np.mean(np.log(totoffsprings[nburnin:]/nind))", "def Transformer(robot1,robot2): \n #with a 50% change surgery will be triggered\n return Random_Actuation.surgery(\"Transformer\",robot1,robot2,50)", "def cal_gaussian_process(b, sigma2, X_train, y_train, X_test):\n n = X_train.shape[0]\n p = X_test.shape[0]\n\n K_n = np.array([[kernel(X_train[i], X_train[j], b) for i in range(n)] for j in range(n)])\n inv = np.linalg.inv(np.diag([sigma2] * n) + K_n)\n miu = np.zeros(p)\n Sigma = np.zeros(p)\n \n for j in range(p): # for every new point x0 in testing data.\n x0 = X_test[j]\n K_Dn = np.zeros(n) # initialize K_Dn \n for i in range(n):\n K_Dn[i] = kernel(X_train[i], x0, b) # calculate every item in K_Dn\n \n miu[j] = K_Dn.dot(inv).dot(y_train)[0] # calculate new distribution parameters\n Sigma[j] = sigma2 + kernel(x0, x0, b) - K_Dn.dot(inv).dot(K_Dn.T)\n \n return miu, Sigma", "def collect_experiences(self):\n for i in range(self.num_frames_per_proc):\n # Do one agent-environment interaction\n\n preprocessed_obs0 = self.preprocess_obss(self.obs0, device=self.device)\n \n preprocessed_obs1 = self.preprocess_obss(self.obs1, device=self.device)\n \n with torch.no_grad():\n \n model_results0 = self.acmodel0(preprocessed_obs1, self.memory0 * self.mask0.unsqueeze(1)) ### NOTE\n \n dist0 = model_results0['dist'] ### NOTE\n value0 = model_results0['value']\n memory0 = model_results0['memory']\n msg0 = model_results0['message']\n dists_speaker0 = model_results0['dists_speaker']\n extra_predictions0 = model_results0['extra_predictions']\n #self.rng_states0[i] = model_results0['rng_states']\n #if torch.cuda.is_available():\n # self.cuda_rng_states0[i] = model_results0['cuda_rng_states']\n \n preprocessed_obs0.instr *= 0\n preprocessed_obs0.image *= 0\n model_results1 = self.acmodel1(preprocessed_obs0, self.memory1 * self.mask1.unsqueeze(1), msg=(msg0.transpose(0, 1) * self.mask1.unsqueeze(1).unsqueeze(2)).transpose(0, 1)) ### NOTE\n \n dist1 = model_results1['dist']\n value1 = model_results1['value']\n memory1 = model_results1['memory']\n msg1 = model_results1['message']\n dists_speaker1 = model_results1['dists_speaker']\n extra_predictions1 = model_results1['extra_predictions']\n #self.rng_states1[i] = model_results1['rng_states']\n #if torch.cuda.is_available():\n # self.cuda_rng_states1[i] = model_results1['cuda_rng_states']\n \n #state = torch.get_rng_state()\n action0 = dist0.sample()\n \n #torch.set_rng_state(state)\n action1 = dist1.sample()\n\n obs0, reward0, done0, env_info0 = self.env0.step(action0.cpu().numpy())\n \n obs1, reward1, done1, env_info1 = self.env1.step(action1.cpu().numpy())\n \n # mask any rewards based on (previous) been_done\n rewardos0 = [0] * self.num_procs\n rewardos1 = [0] * self.num_procs\n for j in range(self.num_procs):\n rewardos0[j] = reward0[j] * (1 - self.been_done0[j].item())\n rewardos1[j] = reward1[j] * (1 - self.been_done1[j].item())\n \n reward0 = tuple(rewardos0)\n reward1 = tuple(rewardos1)\n \n #reward0 = tuple(0.5*r0 + 0.5*r1 for r0, r1 in zip(reward0, reward1)) ### NOTE\n #reward1 = reward0\n \n # reward sender agent (0) equally for success of receiver agent (1) ### NOTE\n reward0 = reward1\n \n self.been_done0 = (1 - (1 - self.been_done0) * (1 - torch.tensor(done0, device=self.device, dtype=torch.float)))\n self.been_done1 = (1 - (1 - self.been_done1) * (1 - torch.tensor(done1, device=self.device, dtype=torch.float)))\n both_done = self.been_done0 * self.been_done1\n \n # reset if receiver agent (1) is done ### NOTE\n both_done = self.been_done1\n \n obs0 = self.env0.sync_reset(both_done, obs0)\n obs1 = self.env1.sync_reset(both_done, obs1)\n \n if self.aux_info:\n env_info0 = self.aux_info_collector0.process(env_info0)\n # env_info0 = self.process_aux_info0(env_info0)\n \n env_info1 = self.aux_info_collector1.process(env_info1)\n # env_info1 = self.process_aux_info1(env_info1)\n\n # Update experiences values\n\n self.obss0[i] = self.obs0\n self.obs0 = obs0\n \n self.obss1[i] = self.obs1\n self.obs1 = obs1\n\n self.memories0[i] = self.memory0\n self.memory0 = memory0\n \n self.memories1[i] = self.memory1\n self.memory1 = memory1\n \n self.msgs0[i] = self.msg0\n self.msg0 = msg0\n \n self.msgs1[i] = self.msg1\n self.msg1 = msg1\n \n self.msgs_out0[i] = msg0\n \n self.msgs_out1[i] = msg1\n\n self.masks0[i] = self.mask0\n #self.mask0 = 1 - torch.tensor(done0, device=self.device, dtype=torch.float)\n self.mask0 = 1 - both_done\n self.actions0[i] = action0\n self.values0[i] = value0\n if self.reshape_reward is not None:\n self.rewards0[i] = torch.tensor([\n self.reshape_reward(obs_, action_, reward_, done_)\n for obs_, action_, reward_, done_ in zip(obs0, action0, reward0, done0)\n ], device=self.device)\n else:\n self.rewards0[i] = torch.tensor(reward0, device=self.device)\n self.log_probs0[i] = dist0.log_prob(action0)\n self.speaker_log_probs0[i] = self.acmodel0.speaker_log_prob(dists_speaker0, msg0)\n \n self.masks1[i] = self.mask1\n #self.mask1 = 1 - torch.tensor(done1, device=self.device, dtype=torch.float)\n self.mask1 = 1 - both_done\n self.actions1[i] = action1\n self.values1[i] = value1\n if self.reshape_reward is not None:\n self.rewards1[i] = torch.tensor([\n self.reshape_reward(obs_, action_, reward_, done_)\n for obs_, action_, reward_, done_ in zip(obs1, action1, reward1, done1)\n ], device=self.device)\n else:\n self.rewards1[i] = torch.tensor(reward1, device=self.device)\n self.log_probs1[i] = dist1.log_prob(action1)\n self.speaker_log_probs1[i] = self.acmodel1.speaker_log_prob(dists_speaker1, msg1)\n\n if self.aux_info:\n self.aux_info_collector0.fill_dictionaries(i, env_info0, extra_predictions0)\n \n self.aux_info_collector1.fill_dictionaries(i, env_info1, extra_predictions1)\n\n # Update log values\n\n self.log_episode_return0 += torch.tensor(reward0, device=self.device, dtype=torch.float)\n self.log_episode_reshaped_return0 += self.rewards0[i]\n \n self.log_episode_return1 += torch.tensor(reward1, device=self.device, dtype=torch.float)\n self.log_episode_reshaped_return1 += self.rewards1[i]\n \n self.log_episode_num_frames0 += torch.ones(self.num_procs, device=self.device)\n self.log_episode_num_frames1 += torch.ones(self.num_procs, device=self.device)\n \n #for i, done_ in enumerate(done0):\n for i in range(self.num_procs):\n #if done_:\n if both_done[i]:\n self.log_done_counter0 += 1\n self.log_return0.append(self.log_episode_return0[i].item())\n self.log_reshaped_return0.append(self.log_episode_reshaped_return0[i].item())\n self.log_num_frames0.append(self.log_episode_num_frames0[i].item())\n \n #for i, done_ in enumerate(done1):\n #if done_:\n self.log_done_counter1 += 1\n self.log_return1.append(self.log_episode_return1[i].item())\n self.log_reshaped_return1.append(self.log_episode_reshaped_return1[i].item())\n self.log_num_frames1.append(self.log_episode_num_frames1[i].item())\n\n # if both are done, reset both to not done\n self.been_done0 *= (1 - both_done)\n self.been_done1 *= (1 - both_done)\n\n self.log_episode_return0 *= self.mask0\n self.log_episode_reshaped_return0 *= self.mask0\n self.log_episode_num_frames0 *= self.mask0\n\n self.log_episode_return1 *= self.mask1\n self.log_episode_reshaped_return1 *= self.mask1\n self.log_episode_num_frames1 *= self.mask1\n\n # Add advantage and return to experiences\n\n preprocessed_obs0 = self.preprocess_obss(self.obs0, device=self.device)\n preprocessed_obs1 = self.preprocess_obss(self.obs1, device=self.device)\n \n with torch.no_grad():\n tmp = self.acmodel0(preprocessed_obs1, self.memory0 * self.mask0.unsqueeze(1)) ### NOTE\n next_value0 = tmp['value']\n \n preprocessed_obs0.instr *= 0\n preprocessed_obs0.image *= 0\n next_value1 = self.acmodel1(preprocessed_obs0, self.memory1 * self.mask1.unsqueeze(1), msg=(tmp['message'].transpose(0, 1) * self.mask1.unsqueeze(1).unsqueeze(2)).transpose(0, 1))['value'] ### NOTE\n\n for i in reversed(range(self.num_frames_per_proc)):\n next_mask0 = self.masks0[i+1] if i < self.num_frames_per_proc - 1 else self.mask0\n next_value0 = self.values0[i+1] if i < self.num_frames_per_proc - 1 else next_value0\n next_advantage0 = self.advantages0[i+1] if i < self.num_frames_per_proc - 1 else 0\n \n next_mask1 = self.masks1[i+1] if i < self.num_frames_per_proc - 1 else self.mask1\n next_value1 = self.values1[i+1] if i < self.num_frames_per_proc - 1 else next_value1\n next_advantage1 = self.advantages1[i+1] if i < self.num_frames_per_proc - 1 else 0\n\n delta0 = self.rewards0[i] + self.discount * next_value0 * next_mask0 - self.values0[i]\n self.advantages0[i] = delta0 + self.discount * self.gae_lambda * next_advantage0 * next_mask0\n \n delta1 = self.rewards1[i] + self.discount * next_value1 * next_mask1 - self.values1[i]\n self.advantages1[i] = delta1 + self.discount * self.gae_lambda * next_advantage1 * next_mask1\n\n # Flatten the data correctly, making sure that\n # each episode's data is a continuous chunk\n\n exps0 = DictList()\n exps0.obs = [self.obss0[i][j]\n for j in range(self.num_procs)\n for i in range(self.num_frames_per_proc)]\n \n exps1 = DictList()\n exps1.obs = [self.obss1[i][j]\n for j in range(self.num_procs)\n for i in range(self.num_frames_per_proc)]\n \n # In commments below T is self.num_frames_per_proc, P is self.num_procs,\n # D is the dimensionality\n\n # T x P x D -> P x T x D -> (P * T) x D\n exps0.memory = self.memories0.transpose(0, 1).reshape(-1, *self.memories0.shape[2:])\n \n exps1.memory = self.memories1.transpose(0, 1).reshape(-1, *self.memories1.shape[2:])\n \n exps0.message = self.msgs0.transpose(1, 2).transpose(0, 1).reshape(-1, self.acmodel0.max_len_msg, self.acmodel0.num_symbols)\n \n exps1.message = self.msgs1.transpose(1, 2).transpose(0, 1).reshape(-1, self.acmodel1.max_len_msg, self.acmodel1.num_symbols)\n \n exps0.message_out = self.msgs_out0.transpose(1, 2).transpose(0, 1).reshape(-1, self.acmodel0.max_len_msg, self.acmodel0.num_symbols)\n \n exps1.message_out = self.msgs_out1.transpose(1, 2).transpose(0, 1).reshape(-1, self.acmodel1.max_len_msg, self.acmodel1.num_symbols)\n \n #exps0.rng_states = self.rng_states0.transpose(0, 1).reshape(-1, *self.rng_states0.shape[2:])\n #if torch.cuda.is_available():\n # exps0.cuda_rng_states = self.cuda_rng_states0.transpose(0, 1).reshape(-1, *self.cuda_rng_states0.shape[2:])\n \n #exps1.rng_states = self.rng_states1.transpose(0, 1).reshape(-1, *self.rng_states1.shape[2:])\n #if torch.cuda.is_available():\n # exps1.cuda_rng_states = self.cuda_rng_states1.transpose(0, 1).reshape(-1, *self.cuda_rng_states1.shape[2:])\n \n # T x P -> P x T -> (P * T) x 1\n exps0.mask = self.masks0.transpose(0, 1).reshape(-1).unsqueeze(1)\n \n exps1.mask = self.masks1.transpose(0, 1).reshape(-1).unsqueeze(1)\n\n # for all tensors below, T x P -> P x T -> P * T\n exps0.action = self.actions0.transpose(0, 1).reshape(-1)\n exps0.value = self.values0.transpose(0, 1).reshape(-1)\n exps0.reward = self.rewards0.transpose(0, 1).reshape(-1)\n exps0.advantage = self.advantages0.transpose(0, 1).reshape(-1)\n exps0.returnn = exps0.value + exps0.advantage\n exps0.log_prob = self.log_probs0.transpose(0, 1).reshape(-1)\n exps0.speaker_log_prob = self.speaker_log_probs0.transpose(0, 1).reshape(-1)\n \n exps1.action = self.actions1.transpose(0, 1).reshape(-1)\n exps1.value = self.values1.transpose(0, 1).reshape(-1)\n exps1.reward = self.rewards1.transpose(0, 1).reshape(-1)\n exps1.advantage = self.advantages1.transpose(0, 1).reshape(-1)\n exps1.returnn = exps1.value + exps1.advantage\n exps1.log_prob = self.log_probs1.transpose(0, 1).reshape(-1)\n exps1.speaker_log_prob = self.speaker_log_probs1.transpose(0, 1).reshape(-1)\n\n if self.aux_info:\n exps0 = self.aux_info_collector0.end_collection(exps0)\n \n exps1 = self.aux_info_collector1.end_collection(exps1)\n\n # Preprocess experiences\n\n exps0.obs = self.preprocess_obss(exps0.obs, device=self.device)\n\n exps1.obs = self.preprocess_obss(exps1.obs, device=self.device)\n\n # Log some values\n\n keep0 = max(self.log_done_counter0, self.num_procs)\n\n keep1 = max(self.log_done_counter1, self.num_procs)\n\n log0 = {\n \"return_per_episode\": self.log_return0[-keep0:],\n \"reshaped_return_per_episode\": self.log_reshaped_return0[-keep0:],\n \"num_frames_per_episode\": self.log_num_frames0[-keep0:],\n \"num_frames\": self.num_frames,\n \"episodes_done\": self.log_done_counter0,\n }\n\n log1 = {\n \"return_per_episode\": self.log_return1[-keep1:],\n \"reshaped_return_per_episode\": self.log_reshaped_return1[-keep1:],\n \"num_frames_per_episode\": self.log_num_frames1[-keep1:],\n \"num_frames\": self.num_frames,\n \"episodes_done\": self.log_done_counter1,\n }\n\n self.log_done_counter0 = 0\n self.log_return0 = self.log_return0[-self.num_procs:]\n self.log_reshaped_return0 = self.log_reshaped_return0[-self.num_procs:]\n self.log_num_frames0 = self.log_num_frames0[-self.num_procs:]\n\n self.log_done_counter1 = 0\n self.log_return1 = self.log_return1[-self.num_procs:]\n self.log_reshaped_return1 = self.log_reshaped_return1[-self.num_procs:]\n self.log_num_frames1 = self.log_num_frames1[-self.num_procs:]\n\n return exps0, log0, exps1, log1", "def simulate_gaussian_process(n_sim, n_periods, kernel, seed=None, kernel_kwargs=None):\n seed = 0 if seed is None else seed\n np.random.seed(seed)\n grid = np.linspace(0, 1, n_periods)\n cov = get_kernel(kernel, kernel_kwargs)(grid)\n process = np.random.multivariate_normal(np.zeros(n_periods), cov, size=n_sim).T\n return process", "def distribute_Gaussian(self):\n\n sigma_x = np.sqrt(self.emitx*self._betax)\n sigma_xp = np.sqrt(self.emitx*self._gammax)\n\n sigma_y = np.sqrt(self.emity*self._betay)\n sigma_yp = np.sqrt(self.emity*self._gammay)\n\n self.particles[:,0] = np.random.randn(self.npart)*sigma_x #set x-coordinates\n self.particles[:,1] = np.random.randn(self.npart)*sigma_xp #set xp-coordinates\n self.particles[:,2] = np.random.randn(self.npart)*sigma_y #set y-coordinates\n self.particles[:,3] = np.random.randn(self.npart)*sigma_yp #set yp-coordinates", "def GD_method(self, X, Y):\n X = self.normalize(X)\n for epoch in range(self.epochs):\n y_pred = self.logistic_function(X, self.b0, self.b1)\n D_b0, D_b1 = self.cost_derivative(y_pred, Y, X)\n # Update Weights\n self.b0 = self.b0 - self.learning_rate * D_b0\n self.b1 = self.b1 - self.learning_rate * D_b1\n y_pred = self.predict(X)\n self.evaluate(y_pred, Y)", "def estimator(envs, model, seed=99999, model_type=\"rf\", _idx=None, name=None):\n\n #wandb.watch(model, log='all')\n wandb.init(project='rgm_single', reinit=True, tags=[flags.wandb_tag])\n wandb.config.update(flags)\n wandb.config.actual_measure = name\n\n if flags.optim == 'adam':\n optimizer = optim.Adam(model.parameters(), lr=flags.lr)\n elif flags.optim == 'sgdm':\n optimizer = optim.SGD(model.parameters(), lr=flags.lr, momentum=0.9)\n else:\n optimizer = optim.SGD(model.parameters(), lr=flags.lr)\n\n risks = None\n for step in range(flags.steps):\n for env in envs.values():\n logits = model(env['X'])\n env['mse'] = mean_mse(logits, env['Y'])\n env['irm_penalty'] = penalty(logits, env['Y'])\n\n risks = torch.stack([e['mse'] for e in envs.values()])\n\n risk_weightings = (~torch.lt(risks, risks.max())).float().detach()\n robustness_penalty = (risks * risk_weightings).mean()\n\n train_mse = risks.mean()\n rex_penalty = risks.var()\n irmv1_penalty = torch.stack([e['irm_penalty'] for e in envs.values()]).mean()\n\n weight_norm = torch.tensor(0.)\n for w in model.parameters():\n weight_norm += w.norm().pow(2)\n\n # minmax\n loss = robustness_penalty\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n if model.w.data.numpy().item() < 0.0:\n if flags.nonnegative_weights_only:\n model.w.data = torch.Tensor([[0.0]])\n runs_zeros[r][_idx] = np.ones((1,))\n\n wandb.log({'loss': loss.cpu().item(),\n 'train_mse': train_mse.cpu().item(),\n 'irmv1_penalty': irmv1_penalty.cpu().item(),\n 'rex_penalty': rex_penalty.cpu().item(),\n 'robustness_penalty': robustness_penalty.cpu().item(),\n 'risk_argmax': risks.argmax().cpu().item(),\n 'risk_max': risks.max().cpu().item(),\n 'risk_min': risks.min().cpu().item(),\n 'risk_range': (risks.max() - risks.min()).cpu().item(),\n 'weight': model.w.squeeze().cpu().item(),\n 'weight_grad': model.w.grad.squeeze().cpu().item(),\n 'bias': model.b.squeeze().cpu().item() if flags.bias else 0.0,\n 'bias_grad': model.b.grad.squeeze().cpu().item() if flags.bias else 0.0,\n })\n wandb.join()\n np.save(f'results/regression/risks/{flags.selected_single_measure}__{flags.env_split}__{flags.exp_type}__{flags.only_bias__ignore_input}.npy', risks.detach().numpy())\n return model", "def gaussian_prior(self):\n self.prior = sps.multivariate_normal(self.m0,self.S0)", "def train(self, X, y, do_optimize=True):\n\n self.X = X\n\n if self.basis_func is not None:\n self.X_transformed = self.basis_func(X)\n else:\n self.X_transformed = self.X\n\n self.y = y\n\n if do_optimize:\n if self.do_mcmc:\n sampler = emcee.EnsembleSampler(self.n_hypers, 2,\n self.marginal_log_likelihood)\n\n # Do a burn-in in the first iteration\n if not self.burned:\n # Initialize the walkers by sampling from the prior\n self.p0 = self.prior.sample_from_prior(self.n_hypers)\n\n # Run MCMC sampling\n result = sampler.run_mcmc(self.p0,\n self.burnin_steps,\n rstate0=self.rng)\n self.p0 = result.coords\n\n self.burned = True\n\n # Start sampling\n pos = sampler.run_mcmc(self.p0,\n self.chain_length,\n rstate0=self.rng)\n\n # Save the current position, it will be the start point in\n # the next iteration\n self.p0 = pos.coords\n\n # Take the last samples from each walker\n self.hypers = np.exp(sampler.chain[:, -1])\n else:\n # Optimize hyperparameters of the Bayesian linear regression \n res = optimize.fmin(self.negative_mll, self.rng.rand(2))\n self.hypers = [[np.exp(res[0]), np.exp(res[1])]]\n\n else:\n self.hypers = [[self.alpha, self.beta]]\n\n self.models = []\n for sample in self.hypers:\n alpha = sample[0]\n beta = sample[1]\n\n logger.debug(\"Alpha=%f ; Beta=%f\" % (alpha, beta))\n\n S_inv = beta * np.dot(self.X_transformed.T, self.X_transformed)\n S_inv += np.eye(self.X_transformed.shape[1]) * alpha\n try:\n S = np.linalg.inv(S_inv)\n except np.linalg.linalg.LinAlgError:\n S = np.linalg.inv(S_inv + np.random.rand(S_inv.shape[0], S_inv.shape[1]) * 1e-8)\n\n m = beta * np.dot(np.dot(S, self.X_transformed.T), self.y)\n\n self.models.append((m, S))", "def estimate_multi_gaussian(X):\n m, n = X.shape\n mu = mean(X, axis=0)\n sigma = cov_matrix(X, mu)\n\n return mu, sigma", "def do_fit(self):\n\n if (self._flag == 1):\n self._gf = [0.2]\n self._gf = self.par*(self._num_fu*len(self._sites)*2)\n x, F = self.read_from_file(\n self._sn, self._qn, self._path) # read data from the file\n # ,ftol=1.0e-7,xtol=1.0e-8)\n popt, pcov = curve_fit(\n self.modelfun, x, F, p0=self._gf, maxfev=5000)\n self._gf = popt\n\n elif (self._flag == 2):\n\n# par=[0.0]*(self._num_fu*5)\n# for j in range(self._num_fu):\n# par[j*5]=0.0*math.copysign(1,(pow(-1,j)))\n# self._gf[j*5]=0.1\n# par[j*5+1]=6.45\n# par[j*5+2]=0.0\n# par[j*5+3]=0.05\n# par[j*5+4]=1.0\n\n X, F = self.read_from_file(self._sn, self._qn, self._path) # read data from the file\n\n# height, xx, width=self.moments(F)\n# Tracer()()\n# par=[0.0]*(self._num_fu*5)\n# for j in range(self._num_fu):\n# par[j*5]=x[0,xx]\n# par[j*5]=X[0,xx]*math.copysign(1,(pow(-1,j)))\n# par[j*5+1]=X[1,xx]\n# par[j*5+2]=X[2,xx]\n# par[j*5+3]=0.007\n# par[j*5+4]=height*math.copysign(1,(pow(-1,j)))\n\n xi, yi, zi = np.mgrid[-6.5:6.5:160j, 4.0:8.9:160j, -7.5:7.5:160j]\n x, y, z = xi.flatten(), yi.flatten(), zi.flatten()\n XX = np.vstack((x, y, z))\n\n invdisttree = Invdisttree(X.T, F, leafsize=10, stat=1)\n AA = invdisttree(XX.T, nnear=130, eps=0, p=1)\n\n# aaa1,bbb1=self.detect_local_minima(-AA.reshape(xi.shape))\n# aaa2,bbb2=self.detect_local_maxima(-AA.reshape(xi.shape))\n if self.peaks==[]:\n print('\\n---------------------------------------------------------------------')\n print('Detecting maxima and minima of target function...',)\n\n peaks_min, min_coord, peaks_max, max_coord = self.detect_min_max(AA.reshape(xi.shape))\n print('done')\n print('Number of the min peaks: {}'.format(len(peaks_min)))\n print('Number of the max peaks: {}'.format(len(peaks_max)))\n print('---------------------------------------------------------------------\\n')\n # fig=plt.figure()\n # ax = fig.add_subplot(111, projection='3d')\n # ax.plot_surface(xi[:,:,60],yi[:,:,60],bbb2[:,:,60], cmap=cm.jet, linewidth=0.2)\n # plt.hold(True)\n # plt.show()\n\n if peaks_max==[]:\n peaks=np.insert(peaks_min, np.arange(len(peaks_max)), peaks_max)\n coords=np.insert(min_coord, np.arange(max_coord.shape[1]), max_coord, axis=1)\n else:\n peaks = np.insert(peaks_max, np.arange(len(peaks_min)), peaks_min)\n coords = np.insert(max_coord, np.arange(min_coord.shape[1]), min_coord, axis=1)\n\n self.peaks=peaks\n self.coords=coords\n\n par = [0.0]*(self._num_fu*5)\n j1 = 0\n aaaa = 1\n for j in range(self._num_fu):\n if (j > aaaa*self.coords.shape[1]-1):\n j1 = 0\n aaaa += 1\n par[j*5] = xi[self.coords[0, j1], self.coords[0, j1], self.coords[0, j1]]\n par[j*5+1] = yi[self.coords[1, j1], self.coords[1, j1], self.coords[1, j1]]\n par[j*5+2] = zi[self.coords[2, j1], self.coords[2, j1], self.coords[2, j1]]\n # par[j*5+3] = 0.1003+0.1000*math.copysign(1, (pow(-1, j)))\n par[j*5+3] = 0.0001\n# if j < 15:\n# par[j*5+3] = 0.00001\n# else:\n# par[j*5+3] = 0.0005\n par[j*5+4] = self.peaks[j1]\n# print(coords[0, j1], coords[1, j1], coords[2, j1])\n j1 += 1\n # popt, pcov = curve_fit(self.modelfun1, x[:,1:20000], F[1:20000],p0=par,maxfev=150000,xtol=1e-8,ftol=1e-8)\n popt, pcov = curve_fit(\n self.modelfun1, X, F, p0=par, maxfev=150000, xtol=1e-6,\n ftol=1e-8)\n # popt, pcov = curve_fit(self.modelfun1, XX, AA, p0=par)\n self._gf = popt\n# self.error=np.diagonal(pcov, offset=0)\n# print(pcov)\n else:\n # pass\n sys.exit(\"Wrong flag in do_fit\")", "def update(self, samples, agent_number):\n\n # need to transpose each element of the samples\n # to flip obs[parallel_agent][agent_number] to\n # obs[agent_number][parallel_agent]\n obs, obs_full, action, reward, next_obs, next_obs_full, done = samples\n \n agent = self.maddpg_agent[agent_number]\n agent.critic_optimizer.zero_grad()\n\n #critic loss = batch mean of (y- Q(s,a) from target network)^2\n #y = reward of this timestep + discount * Q(st+1,at+1) from target network\n target_actions = self.target_act(next_obs)\n \n with torch.no_grad():\n q_next = agent.target_critic(next_obs_full, target_actions.view(-1, 4))\n\n y = reward[:,agent_number].view(-1, 1) + self.discount_factor * q_next * (1 - done[:, agent_number].view(-1, 1))\n q = agent.critic(obs_full, action.view(-1, 4))\n\n huber_loss = torch.nn.SmoothL1Loss()\n critic_loss = huber_loss(q, y.detach())\n critic_loss.backward()\n torch.nn.utils.clip_grad_norm_(agent.critic.parameters(), 0.5)\n agent.critic_optimizer.step()\n\n #update actor network using policy gradient\n agent.actor_optimizer.zero_grad()\n # make input to agent\n # detach the other agents to save computation\n # saves some time for computing derivative\n \n agent_obs = obs[:, agent_number]\n agent_actions = agent.actor(agent_obs)\n q_input = action.clone()\n q_input[:, agent_number] = agent_actions\n\n # get the policy gradient\n actor_loss = -agent.critic(obs_full, q_input.view(-1, 4)).mean()\n actor_loss.backward()\n torch.nn.utils.clip_grad_norm_(agent.actor.parameters(),0.5)\n agent.actor_optimizer.step()\n\n al = actor_loss.cpu().detach().item()\n cl = critic_loss.cpu().detach().item()\n \n return al, cl", "def amasslinregress(*args):\r\n TINY = 1.0e-20\r\n if len(args) == 1: # more than 1D array?\r\n args = args[0]\r\n if len(args) == 2:\r\n x = N.ravel(args[0])\r\n y = args[1]\r\n else:\r\n x = N.ravel(args[:,0])\r\n y = args[:,1]\r\n else:\r\n x = args[0]\r\n y = args[1]\r\n x = x.astype(N.float_)\r\n y = y.astype(N.float_)\r\n n = len(x)\r\n xmean = amean(x)\r\n ymean = amean(y,0)\r\n shp = N.ones(len(y.shape))\r\n shp[0] = len(x)\r\n x.shape = shp\r\n print x.shape, y.shape\r\n r_num = n*(N.add.reduce(x*y,0)) - N.add.reduce(x)*N.add.reduce(y,0)\r\n r_den = N.sqrt((n*ass(x) - asquare_of_sums(x))*(n*ass(y,0)-asquare_of_sums(y,0)))\r\n zerodivproblem = N.equal(r_den,0)\r\n r_den = N.where(zerodivproblem,1,r_den) # avoid zero-division in 1st place\r\n r = r_num / r_den # need to do this nicely for matrix division\r\n r = N.where(zerodivproblem,0.0,r)\r\n z = 0.5*N.log((1.0+r+TINY)/(1.0-r+TINY))\r\n df = n-2\r\n t = r*N.sqrt(df/((1.0-r+TINY)*(1.0+r+TINY)))\r\n prob = abetai(0.5*df,0.5,df/(df+t*t))\r\n\r\n ss = float(n)*ass(x)-asquare_of_sums(x)\r\n s_den = N.where(ss==0,1,ss) # avoid zero-division in 1st place\r\n slope = r_num / s_den\r\n intercept = ymean - slope*xmean\r\n sterrest = N.sqrt(1-r*r)*asamplestdev(y,0)\r\n return slope, intercept, r, prob, sterrest, n", "def step(self):\n\t\tnumpy.random.shuffle(self.agents_list)\n\t\tfor agent in self.agents_list:\n\t\t\tagent.produce()\n\t\tfor agent in self.agents_list:\n\t\t\tagent.charge()\n\t\tfor agent in self.agents_list:\n\t\t\tif agent.strategy == 0: \n\t\t\t\tagent.retribute()\n\t\tfor agent in self.agents_list:\n\t\t\tif agent.strategy == 0: \n\t\t\t\tagent.give()\n\t\tfor agent in self.agents_list:\n\t\t\tagent.consume()\n\t\tfor agent in self.agents_list:\n\t\t\tagent.solve_consumption_deficit()\n\t\tfor site in self.sites:\n\t\t\tsite.recovery()\n\t\tfor agent in self.agents_list:\n\t\t\tagent.sprout()", "def _gaussian_for_learn_denosing_model(image):\n return add_gaussian_noise(image, 0, 0.2)", "def modelfun1(self, x, *par):\n\n g = np.zeros(len(x[0]))\n\n for j in range(len(par)/5):\n x1 = par[j*5]\n x2 = par[j*5+1]\n x3 = par[j*5+2]\n w = par[j*5+3]\n a = par[j*5+4]\n r1 = pow((x[0]-x1), 2)+pow((x[1]-x2), 2)+pow((x[2]-x3), 2)\n # if ((a > 1.1) or (a < -1.1)): a=0\n g = g+a*np.exp(-r1/abs(w))\n\n return g", "def __init__(self, \n\t\t\t\tsites_count, \n\t\t\t\tsites_init_resource_mean, \n\t\t\t\tsites_init_resource_sigma, \n\t\t\t\tsites_recovery_rate_mean, \n\t\t\t\tsites_recovery_rate_sigma, \n\t\t\t\tsites_predictability_mean, \n\t\t\t\tsites_predictability_sigma,\n\t\t\t\tagents_count,\n\t\t\t\tagents_skill_mean,\n\t\t\t\tagents_skill_sigma,\n\t\t\t\tagents_stock_max,\n\t\t\t\tagents_consumption_demanded_mean,\n\t\t\t\tagents_consumption_demanded_sigma,\n\t\t\t\tagents_reproduction_prob,\n\t\t\t\tagents_inheritance,\n\t\t\t\tagents_interest_rate):\n\t\tself.sites = []\n\t\tfor i in range(sites_count):\n\t\t\tsites_init_resource = abs(numpy.random.normal(sites_init_resource_mean, sites_init_resource_sigma))\n\t\t\tsites_resource = sites_init_resource\n\t\t\tsites_recovery_rate = abs(numpy.random.normal(sites_recovery_rate_mean, sites_recovery_rate_sigma))\n\t\t\tsites_predictability = abs(numpy.random.normal(sites_predictability_mean, sites_predictability_sigma))\n\t\t\tself.sites.append(Site(sites_init_resource, \n\t\t\t\t\t\t\t\t\tsites_resource, \n\t\t\t\t\t\t\t\t\tsites_recovery_rate,\n\t\t\t\t\t\t\t\t\tsites_predictability))\n\t\tself.agents_list = []\n\t\tfor i in range(agents_count):\n\t\t\tagents_list = self.agents_list\n\t\t\tagents_skill = abs(numpy.random.normal(agents_skill_mean, agents_skill_sigma))\n\t\t\tagents_stock = 0.0\n\t\t\tagents_stock_max = agents_stock_max\n\t\t\tagents_consumption_demanded = abs(numpy.random.normal(agents_consumption_demanded_mean, agents_consumption_demanded_sigma))\n\t\t\tagents_reproduction_prob = agents_reproduction_prob\n\t\t\tagents_inheritance = agents_inheritance\n\t\t\tagents_strategy = numpy.random.randint(0, 2)\n\t\t\tagents_site = numpy.random.choice(self.sites)\n\t\t\tagents_threshold_debt = 5.0 * agents_consumption_demanded\n\t\t\tagents_threshold_death = 10.0 * agents_consumption_demanded\n\t\t\tagents_interest_rate = agents_interest_rate\n\t\t\tself.agents_list.append(Agent(agents_list, \n\t\t\t\t\t\t\t\t\tagents_skill, \n\t\t\t\t\t\t\t\t\tagents_stock, \n\t\t\t\t\t\t\t\t\tagents_stock_max, \n\t\t\t\t\t\t\t\t\tagents_consumption_demanded, \n\t\t\t\t\t\t\t\t\tagents_reproduction_prob,\n\t\t\t\t\t\t\t\t\tagents_inheritance, \n\t\t\t\t\t\t\t\t\tagents_strategy,\n\t\t\t\t\t\t\t\t\tagents_site, \n\t\t\t\t\t\t\t\t\tagents_threshold_debt, \n\t\t\t\t\t\t\t\t\tagents_threshold_death,\n\t\t\t\t\t\t\t\t\tagents_interest_rate))", "def optimize_agent(trial):\n\tmodel_params = optimize_ppo2(trial)\n\t\n\t\"\"\"\n\tenv = SubprocVecEnv([make_env(i, agents) for i in range(num_cpu)])\n\tmodel = PPO2(POLICY_TYPE, env, nminibatches=1, **model_params) \n\t# n_steps (int) – The number of steps to run for each environment per update (i.e. rollout buffer size is n_steps * n_envs where n_envs is number of environment copies running in parallel)\n\t# by default n_steps=128. After 128 steps for each env, the policy will be updated. If 3 days per game and 2 seq per day, then every update reqires 128/2/3 = 21 games\n\tenv.env_method(\"set_model_reference\", model.get_parameters())\n\t\"\"\"\n\tenv = TradingGameEnv.TradingGameEnv(player_count = NUM_PLAYERS, other_agent_list = agents,\n\t\t\tseq_per_day = SEQ_PER_DAY, cards_per_suit = CARDS_PER_SUIT, player_hand_count = HAND_COUNT,\n\t\t\trandom_seq = True, self_play = SELF_PLAY, policy_type = POLICY_TYPE, self_copy_freq = SELF_COPY_FREQ,\n\t\t\tobs_transaction_history_size=TRANSACTION_HISTORY_SIZE)\n\tmodel = PPO2(POLICY_TYPE, env, nminibatches=1, **model_params) \n\tenv.set_model_reference(model.get_parameters())\n\t\n\t# save a copy of model every 5e4*num_cpu games\n\tcopy_call_back = CustomCallback(model, env)\n\tcall_back_list = [EveryNTimesteps(n_steps=model_params['n_steps']*10, callback=copy_call_back)]\n\n\tmodel.learn(total_timesteps=TRAINING_TIME_STEPS, callback=call_back_list)\n\t\n\t# Evaluate the result against baseline agent\n\tenv = TradingGameEnv.TradingGameEnv(player_count = NUM_PLAYERS, other_agent_list = agents,\n\t\tseq_per_day = SEQ_PER_DAY, cards_per_suit = CARDS_PER_SUIT, player_hand_count = HAND_COUNT,\n\t\trandom_seq = True, self_play = False, obs_transaction_history_size=TRANSACTION_HISTORY_SIZE,\n\t\teval=True)\n\n\tmean_reward, std_reward = evaluate_policy(model, env, n_eval_episodes=EVAL_EPISODES)\n\t\n\twith open(\"optuna_params/\"+str(trial.number)+\".txt\", \"w\") as file:\n\t\t# Writing data to a file\n\t\tfile.write(\"mean reward: \" + str(mean_reward) + \"\tstd reward: \" + str(std_reward) +\"\\n\")\n\t\tfile.write(str(model_params))\n\t\n\treturn -1 * mean_reward", "def test_GA():\n\tgenerationSize = 150\n\tmutationProb = 0.01\n\tgenerations = 500\n\tX = []\n\tT = []\n\tY = [] \n\tfitnesses = [0]*generationSize\n\tfor i in range(DATA_POINTS_NUM):\n\t\tX.append((i - DATA_POINTS_NUM/2)*0.1)\n\t\tT.append(polynomi_3N(REFERENCE, X[-1]))\n\t\tY.append(0)\n\t\n\tga = GA.GA(generationSize, 4, mutationProb)\n\tgenomes = ga.seedGenomes()\n\t#plot initial genomes\n\tplt.figure(1)\n\tplt.title('Initial genomes')\n\tfor i in range(len(genomes)):\n\t\tGenome = prescale(genomes[i])\n\t\tprint Genome\n\t\tfor j in range(DATA_POINTS_NUM):\n\t\t\tY[j] = (polynomi_3N(Genome, X[j]))\n\t\tfitnesses[i] = calculate_fitness(T, Y)\n\t\tplt.plot(X,Y, 'b-')\n\tplt.plot(X,T, 'r-')\n\t\t\n\t\n\t#live and learn\n\tfor k in range(generations):\n\t\tprint \".\",\n\t\tfor i in range(len(genomes)):\n\t\t\tGenome = prescale(genomes[i])\n\t\t\tfor j in range(DATA_POINTS_NUM):\n\t\t\t\tY[j] = (polynomi_3N(Genome,X[j]))\n\t\t\tfitnesses[i] = calculate_fitness(T, Y)\n\t\tga.fitnessUpdate(fitnesses)\n\t\tgenomes = ga.createNextGeneration()\n\t\t\n\t#plot final genomes\n\tplt.figure(2)\n\tplt.title('Final genomes')\n\tprint \"\\nfinal Genomes\"\n\tfor i in range(len(genomes)):\n\t\tGenome = prescale(genomes[i])\n\t\tfor j in range(DATA_POINTS_NUM):\n\t\t\tY[j] = (polynomi_3N(Genome,X[j]))\n\t\tprint \"fit:%5.1f [%7.4f, %7.4f, %7.4f, %7.4f]\"%\\\n\t\t (calculate_fitness(T, Y), Genome[0],\n\t\t Genome[1], Genome[2], Genome[3])\n\t\tplt.plot(X,Y, 'b-')\n\tplt.plot(X,T, 'r-')\n\t\t\n\t#plot progress\n\tP = []\n\thistory = ga.generations[:]\n\tfor f in history:\n\t\t#f[1].sort()\n\t\tP.append(max(f[1]))\n\tplt.figure(3)\n\tplt.title('progress')\n\tplt.plot(P)\n\tplt.show()\n\t\n\t#print the result:\t\n\tbestGene = fitnesses.index(max(fitnesses))\n\tG = prescale(genomes[bestGene])\n print \"\"\n\tprint \"And the result is:\"\n\tprint \"%.4f => %.4f (%.4f)\"%(A, G[0], abs(A - G[0]))\n\tprint \"%.4f => %.4f (%.4f)\"%(B, G[1], abs(B - G[1]))\n\tprint \"%.4f => %.4f (%.4f)\"%(C, G[2], abs(C - G[2]))\n\tprint \"%.4f => %.4f (%.4f)\"%(D, G[3], abs(D - G[3]))" ]
[ "0.57449484", "0.57261455", "0.5646443", "0.55190086", "0.5501289", "0.5473667", "0.54654324", "0.54576737", "0.5431625", "0.542115", "0.54180634", "0.54104614", "0.537796", "0.5360979", "0.5360929", "0.53550553", "0.5354414", "0.5327859", "0.5325124", "0.5316475", "0.53092295", "0.5303783", "0.52948624", "0.52899694", "0.5281603", "0.52535766", "0.5248382", "0.5247928", "0.52463263", "0.5243385" ]
0.64945245
0
find the predicted path of the overall crowds with the max interaction potential
def path_prediction(mods, t_points, samp_num): h = 1 alpha = 0.99 time_points = t_points.reshape((-1,1)) sample_collection = np.empty((samp_num, len(mods), len(time_points), 2)) interact_pot = np.empty((samp_num, 1)) for i in range(0, samp_num): sample_path = path_sample(mods, time_points) sample_collection[i, :, :, :] = sample_path interact_pot[i, :] = interact_potential(sample_path, h, alpha) m_index = np.argmax(interact_pot) max_path = sample_collection[m_index, :, :, :] #TODO: sample the trajectory for the crowds # calculate the interaction potential # find the maximum one return max_path
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_closest_path(self):\n\t\tclosest_distance = sys.maxint\n\t\tclosest_path = 0\n\t\tbike_position = (self.map_model.bike.xB, self.map_model.bike.yB)\n\t\tfor path_index in range(len(self.map_model.paths)):\n\t\t\tnearest_point = geometry.nearest_point_on_path(self.map_model.paths[path_index], bike_position)\n\t\t\tdistance_to_bike = geometry.distance(nearest_point, bike_position)\n\t\t\tif (closest_distance > distance_to_bike):\n\t\t\t\tclosest_distance = distance_to_bike\n\t\t\t\tclosest_path = path_index \n\t\tdisp_next = self.displacement_to_turn(target_path = (closest_path+1)%len(self.map_model.paths))\n\t\ttarget_path = (closest_path+1)%len(self.map_model.paths)\n\t\tdistance_next = geometry.distance_from_path(bike_position, self.map_model.paths[target_path])\n\t\tif disp_next - np.abs(distance_next)>-0.01:\n\t\t\tclosest_path = np.mod(closest_path + 1,len(self.map_model.paths))\n\t\treturn closest_path", "def calculate_path(self):\n #Se repite el ciclo para el número especificado de veces\n for i in range(self.iterations):\n for ant in self.ants:\n ant.setup_ant()\n while not ant.final_node_reached:\n #Seleccion aleatoria del nodo a visitar\n node_to_vist = self.select_next_node(self.map.nodes_array[int(ant.actual_node[0])][int(ant.actual_node[1])])\n #Mover la hormiga al siguiente nodo seleccionado al azar\n ant.move_ant(node_to_visit)\n #Compruebe si se ha alcanzado la solución\n ant.is_final_node_reached()\n #Agregar la ruta resultante a la lista de rutas\n self.add_to_path_results(self.delete_loops(ant.get_visited_nodes()))\n # Habilitar a la hormiga para otra busqueda\n ant.enable_start_new_path()\n \n # Actualizar el nivel global de feromonas\n self.pheromone_update()\n self.best_result = self.paths[0]\n\n #Vaciar la lista de rutas\n self.empty_paths()\n print('Iteration: ', i, 'lenght of the path: ', len(self.best_result))\n return self.best_result", "def leaf_prediction(self, node_id): #returns y_idx\n values = self.tree.value[node_id]\n return np.argmax(values)", "def max_path_cost(self, path, attr): \n return max([self.G[path[i]][path[i+1]][attr] for i in range(len(path)-1)])", "def argmax(X):\n\tN,K,_ = X.shape\n\tg0 = X[0,0]\n\tg = X[1:]\n\n\tB = ones((N,K), dtype=int32) * -1\n\t# compute max-marginals and backtrace matrix\n\tV = g0\n\tfor t in xrange(1,N):\n\t\tU = empty(K)\n\t\tfor y in xrange(K):\n\t\t\tw = V + g[t-1,:,y]\n\t\t\tB[t,y] = b = w.argmax()\n\t\t\tU[y] = w[b]\n\t\tV = U\n\t# extract the best path by brack-tracking\n\ty = V.argmax()\n\ttrace = []\n\tfor t in reversed(xrange(N)):\n\t\ttrace.append(y)\n\t\ty = B[t, y]\n\ttrace.reverse()\n\treturn trace", "def calculate_max_paths(self):\n self.max_paths = {}\n first_nodes = [key for (key, val) in self.predecessors.items() if not val]\n for nm in first_nodes:\n self.max_paths[nm] = {}\n self._checking_path(node_name=nm, first_name=nm)", "def longest_path_callback(self, data):\n min_idx = self.find_minimum_distance(data.poses) # closest point index\n if min_idx is None:\n return\n else:\n # extract 20 points along the closest point\n # use z position of ego car since z displacement doesn't matter\n # truncate if encounter head or tail\n path_points = np.array([\n (pose.pose.position.x, pose.pose.position.y, self.car_pos[2])\n for pose in data.poses[max(min_idx-10, 0):min_idx+10]]\n )\n # use svd to find the approximate tangent direction of longest path\n approx_dir = np.linalg.svd(path_points-np.mean(path_points,axis=0))[2][0]\n self.last_yaw_longestpath = np.arctan2(approx_dir[1], approx_dir[0])\n # perpendicular distance is then the norm of vector\n # (car_pos - pos_point) x approx_dir, x is cross product\n self.last_dist_longestpath = np.linalg.norm(\n np.cross(path_points[0,:] - self.car_pos, approx_dir)\n )\n # publish\n self.pub_closest_dist_longestpath.publish(self.last_dist_longestpath)", "def find_steps(walk):\n cols = ['id', 'acc_x', 'acc_y', 'acc_z', 'gy_x', 'gy_y', 'gy_z', 'mag_x', 'mag_y', 'mag_z'] \n df = DataFrame(walk)\n df.columns = cols\n acc_mag, mag_mag, mag_gy = get_mags(df)\n reduced_gy = reduce_signal(mag_gy)\n gy = [make_auto(mag_gy), make_auto(reduced_gy)]\n maxtab, mintab = peakdet(gy[1], .02)\n ind = maxtab[:, 0]\n ind = ind.astype(int)\n len_btw_steps = [j-i for i, j in zip(ind[:-1], ind[1:])]\n loc_and_len = zip(ind, len_btw_steps)\n steps = []\n for location, length in loc_and_len:\n start = location\n stop = location + length\n step = longest[start:stop]\n steps.append(step)\n return steps", "def get_solution(self):\n return self.P_plot[-1]", "def solve_model(self,max_wait_time = 0,max_per_veh = 99999, save_output=True):\n self.manager = pywrapcp.RoutingIndexManager(len(self.data['time_matrix']),\n self.data['num_vehicles'], self.data['depot'])\n routing = pywrapcp.RoutingModel(self.manager)\n\n transit_callback_index = routing.RegisterTransitCallback(self.time_callback)\n routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)\n time = 'Time'\n routing.AddDimension(\n transit_callback_index,\n max_wait_time, # allow waiting time\n max_per_veh, # maximum time per vehicle\n False, # Don't force start cumul to zero.\n time)\n time_dimension = routing.GetDimensionOrDie(time)\n # Add time window constraints for each location except depot.\n for location_idx, time_window in enumerate(self.data['time_windows']):\n if location_idx == 0:\n continue\n index = self.manager.NodeToIndex(location_idx)\n time_dimension.CumulVar(index).SetRange(time_window[0], time_window[1]-self.VisitTime)\n # Add time window constraints for each vehicle start node.\n for vehicle_id in range(self.data['num_vehicles']):\n index = routing.Start(vehicle_id)\n time_dimension.CumulVar(index).SetRange(self.data['time_windows'][self.data['depot']][0],\n self.data['time_windows'][self.data['depot']][1])\n for i in range(self.data['num_vehicles']):\n routing.AddVariableMinimizedByFinalizer(\n time_dimension.CumulVar(routing.Start(i)))\n routing.AddVariableMinimizedByFinalizer(\n time_dimension.CumulVar(routing.End(i)))\n '''Routing Settings:https://developers.google.com/optimization/routing/routing_options\n '''\n search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n search_parameters.first_solution_strategy = (\n routing_enums_pb2.FirstSolutionStrategy.PATH_MOST_CONSTRAINED_ARC)\n search_parameters.time_limit.seconds = 3600\n #search_parameters.log_search = True\n search_parameters.local_search_metaheuristic = (\n routing_enums_pb2.LocalSearchMetaheuristic.AUTOMATIC)\n sol_status={0:'ROUTING_NOT_SOLVED: Problem not solved yet.',\n 1:'ROUTING_SUCCESS: Problem solved successfully.',\n 2:'ROUTING_FAIL: No solution found to the problem.',\n 3:'ROUTING_FAIL_TIMEOUT: Time limit reached before finding a solution.',\n 4:'ROUTING_INVALID: Model, model parameters, or flags are not valid.'}\n print('Start Solving the problem....')\n _start_ = systime.time()\n assignment = routing.SolveWithParameters(search_parameters)\n print(\"Solver status: \", sol_status[routing.status()])\n soltime = systime.time()-_start_\n print('Solving takes: '+ str(round(soltime,2))+' Secs')\n if assignment:\n self.print_save_solution(routing, assignment,save_res=save_output)\n else:\n print('Solving Failed')", "def calc_prior_path_prob(self, output_filenm=\"\"):\n logger.info(\"Calculating prior map\")\n programs_map = {}\n unique_cluster_ids = set() # have to do this since the assigned cluster ids doesnt seems to be contiguous or start from 0 or end at K-1\n for c in self.args.cluster_assignments:\n unique_cluster_ids.add(c)\n for c in unique_cluster_ids:\n for _, ((e1, r), e2_list) in enumerate(tqdm((self.train_map.items()))):\n if self.args.cluster_assignments[self.entity_vocab[e1]] != c:\n # if this entity does not belong to this cluster, don't consider.\n continue\n if c not in programs_map:\n programs_map[c] = {}\n if r not in programs_map[c]:\n programs_map[c][r] = {}\n all_paths_around_e1 = self.all_paths[e1]\n nn_answers = e2_list\n for nn_ans in nn_answers:\n programs = self.get_programs(e1, nn_ans, all_paths_around_e1)\n for p in programs:\n p = tuple(p)\n if len(p) == 1:\n if p[0] == r: # don't store query relation\n continue\n if p not in programs_map[c][r]:\n programs_map[c][r][p] = 0\n programs_map[c][r][p] += 1\n for c, r in programs_map.items():\n for r, path_counts in programs_map[c].items():\n sum_path_counts = 0\n for p, p_c in path_counts.items():\n sum_path_counts += p_c\n for p, p_c in path_counts.items():\n programs_map[c][r][p] = p_c / sum_path_counts\n\n if not output_filenm:\n dir_name = os.path.join(args.data_dir, \"data\", self.args.dataset_name, \"linkage={}\".format(self.args.linkage))\n if not os.path.exists(dir_name):\n os.makedirs(dir_name)\n output_filenm = os.path.join(dir_name, \"path_prior_map.pkl\")\n\n logger.info(\"Dumping path prior pickle at {}\".format(output_filenm))\n with open(output_filenm, \"wb\") as fout:\n pickle.dump(programs_map, fout)", "def max_power_candidate_wind_rule(_m, g, y, s, t):\r\n\r\n return m.p[g, y, s, t] - (m.Q_W[g, y, s, t] * sum(m.x_c[g, j] for j in m.Y if j <= y)) <= 0", "def _dense_reward(self) -> float:\n y = 1\n target_goal_dists = []\n for target_shape in self.__debris_shapes:\n target_pos = target_shape.shape_body.position\n goal_pos = (target_pos[0], y) # Top of screen.\n dist = np.linalg.norm(target_pos - goal_pos)\n if target_pos[1] > 0.88:\n dist = 0\n target_goal_dists.append(dist)\n target_goal_dists = np.mean(target_goal_dists)\n return -1.0 * target_goal_dists", "def find_potential(n=10,nwalks=200):\n \n boundary = np.zeros((n+1, n+1)) # Store boundary potentials\n v = np.zeros((n+1, n+1)) # Store potential for all positions\n \n # Set the boundary conditions\n for i in range(1,n):\n boundary[0,i] = 10\n boundary[n,i] = 10\n boundary[i,0] = 5\n boundary[i,n] = 5\n # Set the the boundary position that maximizes the potential at [3, 5] to 20\n boundary[3,0] = boundary[4,0] = boundary[5,0] = boundary[6,0] = boundary[7,0] = 20\n #boundary[0,3] = boundary[0,4] = boundary[0,5] = boundary[0,6] = boundary[0,7] = 20\n \n v = np.copy(boundary) # Store potential for all positions\n\n # Compute Greens function for each position\n for x in range(1,n):\n for y in range(1,n):\n position = [x, y] # Position to compute Greens function for\n Greens_func = Greens_function_approxRW(n=n, nwalks=nwalks, start_position=position) # The Greens function\n \n # Find potential at current position\n v_pos = potential_from_Greens(boundary, n=n, G=Greens_func, nwalks=nwalks)\n v[position[1], position[0]] = v_pos\n \n # v is now computed for all locations and can be plotted\n fig = plt.figure()\n plt.title('Maximized potential for [3,5]', fontsize = 18)\n im = plt.imshow(v, cmap=None, interpolation='nearest')\n cb = fig.colorbar(im)\n cb.ax.tick_params(labelsize=14)\n plt.show()", "def path(self):\n return self.alignment.matching_function_bestpath(self.idx)", "def _decode_by_maxprob(self):\n self.best_path = np.zeros(self.N, dtype=np.int)\n P = self.prob.copy()\n \n for i in range(self.N):\n idx = np.unravel_index(np.argmax(P, axis=None), P.shape)\n self.best_path[idx[0]] = idx[1]\n P[idx[0],:] = 0\n P[:,idx[1]] = 0", "def find_matching(confusion_matrix):\r\n _, n = confusion_matrix.shape\r\n path = []\r\n for i in range(n):\r\n max_val = -1e10\r\n max_ind = -1\r\n for j in range(n):\r\n if j in path:\r\n pass\r\n else:\r\n temp = confusion_matrix[i, j]\r\n if temp > max_val:\r\n max_val = temp\r\n max_ind = j\r\n path.append(max_ind)\r\n return path", "def find_max_score_location(grid, shape):", "def termination_heuristics(self, state):\n\n point_state = state[0][0]\n target_mask = get_target_mask(point_state)\n point_state = point_state[:3, target_mask].T\n depth_heuristics = self.graspnet_closure(point_state) \n if (depth_heuristics):\n print('object inside gripper? start retracting...')\n return depth_heuristics", "def CalculateChebyPaths(self):\n Kmin, Kmax = self.Kmin, self.Kmax\n self.apath = array([0 for y in range(self.T)], dtype=float)\n self.cpath = array([0 for y in range(self.T)], dtype=float)\n self.npath = array([0 for y in range(self.T)], dtype=float)\n # generate each generation's asset, consumption and labor supply forward\n for y in range(self.T-1): # y = 0, 1,..., 58\n self.cpath[y] = self.chebeval(array([self.apath[y]]),self.ac[y],Kmin,Kmax)\n # if self.cpath[y] < 0:\n # self.cpath[y] = 0\n if y >= self.W:\n income = self.b\n else:\n self.npath[y] = self.chebeval(array([self.apath[y]]),self.an[y],Kmin,Kmax)\n income = (1-self.tau)*self.w*self.npath[y]\n self.apath[y+1] = (1+self.r)*self.apath[y] + income - self.cpath[y]\n self.upath[y] = self.util(self.cpath[y], self.npath[y])\n # the oldest generation's consumption and labor supply\n self.cpath[self.T-1] = (1+self.r)*self.apath[self.T-1] + self.b\n # self.cpath[self.T-1] = self.chebeval(array([self.apath[self.T-1]]),self.ac[self.T-1],Kmin,Kmax)\n self.upath[self.T-1] = self.util(self.cpath[self.T-1], self.npath[self.T-1])\n # print self.cpath, self.apath, self.npath", "def softmax_opt(x, incentive, idx_i, theta=1):\n\n # print x\n # Multiplication by theta to make the values of np.exp(.) more reasonable\n # y = np.copy(x) * theta\n y = (np.copy(x)/60.0 * -0.086 + 0 * 0.7) * theta\n\n y[idx_i] = (np.copy(x[idx_i])/60.0 * -0.086 + incentive * 0.7) * theta\n # print y\n p = np.minimum(np.maximum(np.exp(y), 1e-20), 1e20) / np.sum(np.minimum(np.maximum(np.exp(y), 1e-20), 1e20), axis=0)\n # print y\n\n # If any element of p is Nan, return equal probablity for all the paths\n if np.isnan(p).any():\n p = np.ones(len(x)) / len(x)\n return p", "def calc_control_and_trajectory(x, dw, goal, ob):\n x_init = np.array(x)\n\n best_u = [0.0, 0.0]\n best_trajectory = None\n\n u_samples = []\n traj_samples = []\n cost_samples = []\n # evaluate all trajectory with sampled input in dynamic window\n for v in np.arange(dw[0], dw[1], Config.v_resolution):\n for y in np.arange(dw[2], dw[3], Config.yaw_rate_resolution):\n\n trajectory = predict_trajectory(x_init, v, y)\n # calc cost\n to_goal_cost = calc_to_goal_cost(trajectory, goal)\n speed_cost = (Config.max_speed - trajectory[-1, 3])\n ob_cost = calc_obstacle_cost(trajectory, ob)\n\n cost_samples.append([to_goal_cost,speed_cost,ob_cost])\n u_samples.append([v, y])\n traj_samples.append(trajectory)\n cost_samples = np.array(cost_samples)\n # normalization\n def normalize_0_1(x):\n if np.max(x)-np.min(x)>1e-4:\n return (x-np.min(x))/(np.max(x)-np.min(x))\n else:\n return np.zeros_like(x)\n\n def normalize_div_sum(x):\n if np.sum(x) > 1e-4:\n return x/np.sum(x)\n else:\n return np.zeros_like(x)\n\n # for i in range(3):\n # non_inf = cost_samples[:,i] != np.inf\n # if np.sum(non_inf) >0:\n # cost_samples[non_inf,i] = normalize_0_1(cost_samples[non_inf,i])\n \n cost_samples = Config.to_goal_cost_gain*cost_samples[:,0]+Config.speed_cost_gain*cost_samples[:,1]+Config.obstacle_cost_gain*cost_samples[:,2]\n ind = np.argmin(cost_samples)\n best_u = u_samples[ind]\n best_trajectory = traj_samples[ind]\n\n if abs(best_u[0]) < Config.robot_stuck_flag_cons \\\n and abs(x[3]) < Config.robot_stuck_flag_cons:\n # to ensure the robot do not get stuck in\n # [v,w] = [0,0], force the robot rotate\n best_u[1] = -Config.max_delta_yaw_rate\n return best_u, best_trajectory", "def Find_Optimal_Cutoff(target, predicted, fname):\n class_names = ['Food', 'Nonfood']\n confusion_matrix = cm.ConfusionMeter(len(class_names))\n o1 = open(\"Opt_TruePositve.csv\", \"w\")\n o2 = open(\"Opt_FalsePositive.csv\", \"w\")\n o3 = open(\"Opt_FalseNegative.csv\", \"w\")\n o4 = open(\"Opt_TrueNegative.csv\", \"w\")\n\n #print(fname)\n fpr, tpr, threshold = roc_curve(target, predicted)\n i = np.arange(len(tpr))\n roc = pd.DataFrame({'tf' : pd.Series(tpr-(1-fpr), index=i), 'threshold' : pd.Series(threshold, index=i),\n 'tpr' : pd.Series(tpr), 'fpr' : pd.Series(fpr) })\n roc_t = roc.iloc[(roc.tf-0).abs().argsort()[:1]]\n print(roc_t)\n\n\n y_score_opt = np.where(predicted > list(roc_t['threshold'])[0], 1, 0)\n\n from sklearn.metrics import confusion_matrix\n categories = ['nonspecific', 'hyperplastic', 'ssa', 'adenoma', 'tsa', 'carcinoma']\n opt_conf = confusion_matrix(target, y_score_opt, labels=np.unique(target))\n\n plt.subplots(figsize=(fig_horizontal,fig_vertical))\n\n con_m = opt_conf\n\n df_con_m = pd.DataFrame(con_m, index= [i for i in class_names], columns = [i for i in class_names])\n ### each labels percentage\n cm1 = con_m\n cm_sum = np.sum(cm1, axis=1, keepdims=True)\n cm_perc = cm1 / cm_sum.astype(float) * 100\n annot = np.empty_like(opt_conf).astype(str)\n nrows, ncols = cm1.shape\n\n for i in range(nrows):\n for j in range(ncols):\n c = cm1[i, j]\n p = cm_perc[i, j]\n if i == j:\n s = cm_sum[i]\n annot[i, j] = '%d (%d%%)' % (c, p)\n elif c == 0:\n annot[i, j] = '%d (%d%%)' % (c, p)\n else:\n annot[i, j] = '%d (%d%%)' % (c, p)\n\n cm1 = pd.DataFrame(cm1, index= [i for i in class_names], columns = [i for i in class_names])\n ### each labels percentage\n\n group_counts = [\"{0:0.0f}\".format(value) for value in opt_conf.flatten()]\n group_percentages = [\"{0:.2%}\".format(value) for value in opt_conf.flatten()/np.sum(opt_conf)]\n\n labels = [f\"{v1} ({v2})\" for v1, v2 in zip(group_counts,group_percentages)]\n\n labels = np.asarray(labels).reshape(2,2)\n\n categories = ['nonspecific', 'hyperplastic', 'ssa', 'adenoma', 'tsa', 'carcinoma']\n #make_confusion_matrix(opt_conf, categories=categories, cmap='Blues')\n\n # sn.set(font_scale= 2.5)\n sn.heatmap((cm1.T / cm1.sum(axis=1)).T, annot=annot, fmt='', annot_kws={\"size\" : 60}, square=True, cmap='Blues',cbar = True, vmin=0, vmax=1)\n #sn.heatmap(df_con_m, annot=True,fmt='g', annot_kws={\"size\" : 40}, cbar = False, cmap=\"Blues\")\n #sn.heatmap(df_con_m/np.sum(df_con_m), annot=True, fmt='.2%', cbar = False, cmap='Blues')\n #sn.heatmap(inte_df_con_m, annot=True, fmt='.2%', cbar = False, cmap='Blues')\n plt.yticks(rotation = 0)\n #plt.xticks(rotation = 45)\n plt.ylabel('True class')\n plt.xlabel('Predicted class')\n global dirnamefordraw\n dirnamefordraw_opt_conf = 'binary_opt_confusion_' + dirnamefordraw + '.png'\n plt.savefig(os.path.join('models', dirnamefordraw_opt_conf),dpi=100, format='png')\n #fig.savefig(os.path.join('models', 'mucosal_resnet_exp2'))\n\n ##################### 결과 파일명 뽑기\n sourceNDArray = np.array(y_score_opt)\n optList = sourceNDArray.tolist()\n\n targetNDArray = np.array(target)\n tarList = targetNDArray.tolist()\n\n\n\n\n for i in range(len(optList)):\n filename = os.path.basename(fname[i])\n pathname = os.path.dirname(fname[i])\n patientname = filename.split(' ')[0]\n\n if tarList[i] == 0 and tarList[i] == optList[i]:\n o1.write(\"{}, {}, {}, label: {}, predicted: {}, 1\\n\".format(pathname, filename, patientname, tarList[i], optList[i]))\n elif tarList[i] == 0 and tarList[i] != optList[i]:\n o2.write(\"{}, {}, {}, label: {}, predicted: {}, 2\\n\".format(pathname, filename, patientname, tarList[i], optList[i]))\n elif tarList[i] == 1 and tarList[i] == optList[i]:\n o4.write(\"{}, {}, {}, label: {}, predicted: {}, 4\\n\".format(pathname, filename, patientname, tarList[i], optList[i]))\n elif tarList[i] == 1 and tarList[i] != optList[i]:\n o3.write(\"{}, {}, {}, label: {}, predicted: {}, 3\\n\".format(pathname, filename, patientname, tarList[i], optList[i]))\n\n# if tarList[i] == 1 and tarList[i] != optList[i]:\n# o1.write(\"{}, {}, label: {}, predicted: {}, 1\\n\".format(filename, patientname, tarList[i], optList[i]))\n# # elif tarList[i] == 0 and tarList[i] != optList[i]:\n# # o2.write(\"{}, {}, label: {}, predicted: {}, 2\\n\".format(filename, patientname, tarList[i], optList[i]))\n# # elif tarList[i] == 1 and tarList[i] == optList[i]:\n# # o4.write(\"{}, {}, label: {}, predicted: {}, 4\\n\".format(filename, patientname, tarList[i], optList[i]))\n# # elif tarList[i] == 1 and labels != optList[i]:\n# # o3.write(\"{}, {}, label: {}, predicted: {}, 3\\n\".format(filename, patientname, tarList[i], optList[i]))\n\n o1.close()\n o2.close()\n o3.close()\n o4.close()\n\n\n # print('optimal accuracy : \\n', accuracy_score(target, y_score_opt))\n # print('optimal conf mat : \\n', confusion_matrix(target, y_score_opt))\n # tn, fp, fn, tp = confusion_matrix(target, y_score_opt).ravel() # sensitivty / specificity / ppv / npv\n # # print(tn, fp, fn, tp)\n # print('sensitivity : ', tp / (tp + fn))\n # print('specificity : ', tn / (fp + tn))\n # print('PPV : ', tp / (tp + fp))\n # print('NPV : ', tn / (tn + fn))\n # print('------------------------------------')\n\n opt_acc = str(accuracy_score(target, y_score_opt))\n f.write('------------------------------------ \\n')\n f.write('optimal accuracy : '+ opt_acc+'\\n')\n print('optimal accuracy : \\n', accuracy_score(target, y_score_opt))\n\n opt_mat = str(confusion_matrix(target, y_score_opt))\n f.write('optimal conf mat : '+ opt_mat + '\\n')\n print('optimal conf mat : \\n', confusion_matrix(target, y_score_opt))\n\n tn, fp, fn, tp = confusion_matrix(target, y_score_opt).ravel() # sensitivty / specificity / ppv / npv\n # print(tn, fp, fn, tp)\n str_sensitivity = str(tp / (tp + fn))\n f.write('sensitivity : '+ str_sensitivity + '\\n')\n print('sensitivity : ', tp / (tp + fn))\n\n str_specificity = str(tn / (fp + tn))\n f.write('specificity : ' + str_specificity + '\\n')\n print('specificity : ', tn / (fp + tn))\n\n str_ppv = str(tp / (tp + fp))\n f.write('PPV : '+ str_ppv + '\\n')\n print('PPV : ', tp / (tp + fp))\n\n str_npv = str(tn / (tn + fn))\n f.write('NPV : ' + str_npv + '\\n')\n print('NPV : ', tn / (tn + fn))\n\n str_opt_thr = str(roc_t['threshold'])\n f.write('optimal threshold: '+ str_opt_thr + '\\n')\n print('optimal threshold: ', roc_t['threshold'])\n\n f.write('------------------------------------ \\n')\n print('------------------------------------')\n\n\n return list(roc_t['threshold'])", "def best_params(self):\n return self.X[np.argmax(self.y.numpy())]", "def get_max_cl(Re, r):\n xf = XFoil()\n if r <= 0.175: \n xf.airfoil = naca6409\n else:\n xf.airfoil = naca2412\n xf.Re = Re\n xf.Re = Re\n xf.max_iter = 200\n xf.n_crit = 9.00\n xf.xtr = [1.00, 1.00]\n xf.M = 0\n a_seq, cl_seq, cd_seq, cm_seq, cp_seq = xf.aseq(10,15,0.1)\n # ignore nan by making it 0\n cl_seq = np.nan_to_num(cl_seq)\n # find the maximum cl \n cl_maxi = np.max(cl_seq)\n # index of the maximum cl\n idx = np.argmax(cl_seq)\n return round(cl_maxi,2),round(a_seq[idx],2), round(cd_seq[idx],2)", "def get_objective(self):\n self.objective = 0\n for r in self.routes:\n r.update_route(self.vrpdata)\n self.objective += r.distance\n # all() returns True if all elements of the iterable are true\n self.solutionValid = (all([r.tourValid for r in self.routes]) and len(self.routes) <= self.vrpdata.MaxNumVeh)\n if self.solutionValid:\n return self.objective\n return -1", "def greedy_selector(self):\n r_k = 0 \n best_route = []\n cities_to_visit = [i for i in range(1, self.city_count)]\n for _ in range(1, self.city_count):\n s_ind = np.argmax([self.tau[(r_k, u)] for u in cities_to_visit])\n s_k = cities_to_visit.pop(s_ind)\n best_route.append((r_k, s_k))\n r_k = s_k\n best_route.append((r_k, 0))\n \n shortest_path = np.sum([self.phi[(p)] for p in best_route])\n return best_route, shortest_path", "def evaluate_optimum(dataset: Dataset) -> pd.DataFrame:\n # Get the index of data point with highest observed objective\n optimum_idx = dataset.pretransform_df[dataset.pretransform_output_name].argmax()\n # Get the inputs of the data point with highest observed objective\n optimum_loc = dataset.pretransform_df[dataset.pretransform_input_names].iloc[[optimum_idx]]\n return optimum_loc", "def deviate_postprocess(g, cars, depth):\n print \"\\n\"*3\n for car_id in range(8):\n print \"-------------\"\n print \" optimizing car %i\" % cars[car_id].id\n start = time.time()\n g.reset()\n new_cars = [\n Car(id=i, position=g[ORIGIN])\n for i in range(8)\n ]\n for i in range(8):\n if i != car_id:\n new_cars[i].follow_path(cars[i].edges)\n original_path = cars[car_id].edges\n (former_one_way, former_distance, former_relax7, former_margin) = compute_distance_and_margin(original_path, TIME, {})\n best_deviation = max_or_none(deviate_path(g, original_path, depth))\n if best_deviation:\n (oneway, d, r7, m, prefix) = best_deviation\n \n middle_point = prefix[-1].stop\n suffix = {\n e.start: original_path[i:]\n for (i,e) in enumerate(original_path)\n }.get(middle_point, [])\n new_path = prefix + suffix\n (new_one_way, new_distance, new_relax7, new_margin) = compute_distance_and_margin(new_path, TIME, {})\n print \"onway\",oneway, new_one_way\n new_cars[car_id].follow_path(new_path, TIME)\n assert d == new_distance\n if (new_one_way, new_distance, new_relax7, new_margin) > (former_one_way, former_distance, former_relax7, former_margin):\n print \" one_way :\", new_one_way - former_one_way\n print \" distance gain :\", new_distance - former_distance\n print \" margin gain :\", new_margin - former_margin\n cars = new_cars\n else:\n print \"no deviations\"\n else:\n print \"no deviations\"\n end = time.time()\n print \"took \", (end-start)\n return cars", "def action(self):\n\n # assume the smart opponent can always choose the best step\n # Depth First Search\n steps = 2\n stack = [(self.game_in_head, (), 0)]\n maxmin = None\n good_paths = []\n\n while len(stack) > 0:\n parent_node, path, score = stack.pop(-1)\n if len(path) >= steps*2:\n \n # leaf node in the search tree\n if maxmin is None:\n maxmin = score\n good_paths.append(path)\n elif maxmin == score:\n good_paths.append(path)\n elif maxmin < score:\n maxmin = score\n good_paths.clear()\n good_paths.append(path)\n else:\n # root node, find its leaves\n children_nodes = self.one_step_infe(parent_node, path, score)\n stack += children_nodes\n\n path_dec = random.choice(good_paths) \n if self.colour == 'upper':\n return path_dec[0] \n elif self.colour == 'lower':\n return path_dec[1]" ]
[ "0.57972264", "0.550477", "0.54675025", "0.5363666", "0.53550845", "0.5350111", "0.5339136", "0.53186786", "0.53116816", "0.5307387", "0.52610135", "0.51967216", "0.51658577", "0.5161842", "0.5156469", "0.5155317", "0.51481575", "0.51478475", "0.51448447", "0.5130879", "0.5130634", "0.51251394", "0.5113059", "0.5107389", "0.5105936", "0.5098794", "0.5065589", "0.50632864", "0.50562155", "0.5041366" ]
0.60463923
0
Set internal import stop state to True
def stop_import_process(self): self.stop_import = True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def stop(self):\n self._run = False", "def stop(self):\n self._stop_flag = True", "def stop(self):\n self.startState = None", "def stop(self):\n self.startState = None", "def stop(self):\n self._should_run = False", "def stop(self):\r\n self.stopped = True", "def _stop(self):\n return True", "def stop(self):\n self.stopped = True", "def stop(self):\n self.stopped = True", "def reset_and_stop(self):\n self.enabled = False\n self.start_time = None", "def stop(self):\r\n self.running = False", "def stop(self):\r\n self.running = False", "def stopif(self, stop):\n if stop:\n self._stopsim = True", "def set_auto_start_import(self, flag):\n\t\tself.checkAutoStartImport.set_active(flag)", "def stop(self):\n self.running = False", "def stop(self):\n self.running = False", "def stop(self):\n self.running = False", "def stop(self):\n self.running = False", "def stop(self):\n self.running = False", "def need_stop(self, path):", "def stop(self):\n self.stopped = True", "def stop(self):\n self.__running = False", "def _prepare_to_stop(self):\n pass", "def stop(self):\n self._running = False", "def stop(self):\n self._running = False", "def _():\n global _python_shutting_down\n _python_shutting_down = True", "def stop(self):\n self._is_stabilizing = False", "def stop(_):\n if \"fscad.fscad\" in sys.modules:\n del sys.modules[\"fscad.fscad\"]", "def stop(self):\n self.finished = True", "def stop(self) -> None:\n self._running = False" ]
[ "0.6483913", "0.64209807", "0.6380567", "0.6380567", "0.6356307", "0.62640345", "0.62431157", "0.622715", "0.622715", "0.6218141", "0.6171902", "0.6171902", "0.6168702", "0.6164733", "0.61421144", "0.61421144", "0.61421144", "0.61421144", "0.61421144", "0.6140017", "0.6134257", "0.6116756", "0.6043821", "0.60123473", "0.60123473", "0.59797776", "0.5967245", "0.59592927", "0.59517217", "0.59466547" ]
0.8206366
0
Convert mask to centroid image
def mask2centroid(mask: npt.NDArray[Union[bool, np.ubyte, np.ushort, np.uintc, np.uint]], intensity_image: npt.NDArray[Any] = None) -> npt.NDArray[np.ushort]: # Label objects if not done yet mask = measure.label(mask, background=0) # Check if particles are darker or brighter than background mean_particle_intensity = np.mean(intensity_image, where=mask > 0) mean_background_intensity = np.mean(intensity_image, where=mask == 0) if mean_background_intensity > mean_particle_intensity: # particles are dark intensity_image = intensity_image.max() - intensity_image # invert for weighted centroid calculation # Get centroids centroid_list = [] props = measure.regionprops(mask, intensity_image=intensity_image) for j in range(len(props)): # Use weighted centroids centroid_list.append(props[j].centroid_weighted) # Convert mask to centroids mask = np.zeros_like(mask) for j, centroid in enumerate(centroid_list): mask[round(centroid[0]), round(centroid[1])] = j + 1 mask = mask.astype(np.uint16) return mask
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_object_centroid(labelmask, id):\n # Get coordinates \n coords = np.where(labelmask == id)\n # Find mean of each coordinate, remove negatives, make int.\n return tuple([int(np.mean(x)) for x in coords])", "def get_object_centroid(labelmask, id):\n # Get coordinates \n coords = np.where(labelmask == id)\n # Find mean of each coordinate, remove negatives, make int.\n return tuple([int(np.mean(x)) for x in coords])", "def get_region_centroid(mask, region):\n coords = np.column_stack(np.where(mask == region))\n coords = np.apply_along_axis(np.mean, 0, coords).round()\n coords = np.uint8(coords)\n return(coords)", "def test_centroids_mask():\n data = np.ones((2, 2)).astype(np.float)\n mask = [[False, False], [True, True]]\n centroid = centroid_com(data, mask=None)\n centroid_mask = centroid_com(data, mask=mask)\n assert_allclose([0.5, 0.5], centroid, rtol=0, atol=1.e-6)\n assert_allclose([0.5, 0.0], centroid_mask, rtol=0, atol=1.e-6)", "def centroid(im, mask=None, w=None, x=None, y=None):\n from numpy import ones, arange, meshgrid\n # 2009-09-02 13:35 IJC: Created\n if mask==None:\n mask = ones(im.shape)\n if w==None:\n w = ones(im.shape)\n if not (im.shape==mask.shape and im.shape==w.shape):\n print \"Image, mask, and weights must have same shape! Exiting.\"\n return -1\n if x==None or y==None:\n xx = arange(im.shape[1])\n yy = arange(im.shape[0])\n x,y = meshgrid(xx,yy)\n x0 = (x*im*mask*w).sum()/(im*mask*w).sum()\n y0 = (y*im*mask*w).sum()/(im*mask*w).sum()\n\n return (x0,y0)", "def centroid(im, mask=None, w=None, x=None, y=None):\n from numpy import ones, arange, meshgrid\n # 2009-09-02 13:35 IJC: Created\n if mask==None:\n mask = ones(im.shape)\n if w==None:\n w = ones(im.shape)\n if not (im.shape==mask.shape and im.shape==w.shape):\n print \"Image, mask, and weights must have same shape! Exiting.\"\n return -1\n if x==None or y==None:\n xx = arange(im.shape[1])\n yy = arange(im.shape[0])\n x,y = meshgrid(xx,yy)\n x0 = (x*im*mask*w).sum()/(im*mask*w).sum()\n y0 = (y*im*mask*w).sum()/(im*mask*w).sum()\n\n return (x0,y0)", "def mask_center_label ( gray ) :\n\n assert gray is not None\n\n # s = ndimage.generate_binary_structure(2,2) # iterate structure\n label_im, nb_labels = label(gray)\n\n # get center label\n\n h = label_im.shape[0]\n w = label_im.shape[1]\n\n l = label_im [h//2,w//2]\n\n gray [ label_im == l ] = 255\n gray [ label_im != l ] = 0\n\n return gray", "def update_mean(img, clustermask):\n flat = img.flatten()\n flat.reshape((int(flat.shape[0] / 3), 3))\n w, h, _ = clustermask.shape\n cluster_assignees={}\n for cid,_ in enumerate(current_cluster_centers):\n cluster_assignees[cid] = []\n for x in range(w):\n for y in range(h):\n cid = clustermask[x, y][0]\n cluster_assignees[cid].append(img[x,y])\n for cid, pixels in cluster_assignees.items():\n current_cluster_centers[cid] = np.mean(np.array(pixels),axis=0)\n return clustermask", "def image_segmentain(img_flat):\n\tkmeans = KMeans(n_clusters = n_CLUSTERS, random_state = 0).\\\n\t\t\t\t\t\t\t\t\t\t\tfit(img_flat.reshape(-1,1)) \n\t\"\"\"Kmeans lables had issue with masking so center of each cluster\n\tis assigned for corresponding labels\"\"\"\n\n\tkmeans_centers = kmeans.cluster_centers_[kmeans.labels_]\n\n\treturn kmeans_centers.flatten()", "def centroids(img):\n _, _, _, centr = cv2.connectedComponentsWithStats(img)\n return centr[1:]", "def normalization_mask(img, mask):\n zone1 = img[mask != 0]\n zone2 = img[mask == 0]\n zone1 = (zone1 - zone1.min()) / (zone1.max() - zone1.min())\n zone2 = (zone2 - zone2.min()) / (zone2.max() - zone2.min())\n imge = img.copy()\n imge[mask != 0] = zone1\n imge[mask == 0] = zone2\n return imge", "def center_of_mass(mask):\n M = cv2.moments(mask)\n # Usando a expressão do centróide definida em: https://en.wikipedia.org/wiki/Image_moment\n if M[\"m00\"] == 0:\n M[\"m00\"] = 1\n cX = int(M[\"m10\"] / M[\"m00\"])\n cY = int(M[\"m01\"] / M[\"m00\"])\n return [int(cX), int(cY)]", "def centroid_of_rect(roi):\n return int(roi.shape[0] / 2), int(roi.shape[1] / 2)", "def test_centroid_com_mask_shape():\n with pytest.raises(ValueError):\n mask = np.zeros((2, 2), dtype=bool)\n centroid_com(np.zeros((4, 4)), mask=mask)", "def calculateCentroid(self,image):\n\t\tim=cv2.imread(image,0) #reads it in greyscale\n\t\tret,thresh = cv2.threshold(img_copy,128,255,cv2.THRESH_OTSU)\n\t\tim2,contours,hierarchy = cv2.findContours(thresh, 1, 2)\n\t\tcnt = contours[0]\n\t\tM = cv2.moments(cnt)\n\t\tcx = int(M['m10']/M['m00'])\n\t\tcy = int(M['m01']/M['m00'])\n\t\tcentroid=(cx,cy)\n\t\treturn centroid", "def centroid(image, threshold=0, binarize=False):\n\n signal = np.where(image > threshold)\n sy, sx = image.shape[0], image.shape[1]\n\n temp = np.zeros((sy, sx))\n\n if binarize is True:\n temp[signal] = 1.0\n else:\n temp[signal] = image[signal]\n\n profx = 1.0 * temp.sum(axis=0)\n profy = 1.0 * temp.sum(axis=1)\n profx -= np.min(profx)\n profy -= np.min(profy)\n\n x0 = (profx * np.arange(sx)).sum() / profx.sum()\n y0 = (profy * np.arange(sy)).sum() / profy.sum()\n\n return (x0, y0)", "def centroid(cnt):\n\tM = cv2.moments(cnt)\n\tcx = int(M['m10']/M['m00'])\n\tcy = int(M['m01']/M['m00'])\n\treturn (cx, cy)", "def centroid(self):\n return self.contours_to_matrix().mean(axis=0)", "def cell_centroids_original(crd, con):\n \n nele = con.shape[0]\n dim = crd.shape[1]\n centroid_xy = np.zeros((nele, dim))\n for i in range(len(con)):\n el_crds = crd[con[i, :], :] # (4, 2)\n centroid_xy[i, :] = (el_crds).mean(axis=0)\n return centroid_xy", "def update_mean(img: np.ndarray, clustermask: np.ndarray):\n\n for k in range(numclusters):\n current_cluster_centers[k, 0, :] = np.mean(img[clustermask==k], axis=0)", "def _preprocessing(image) -> np.ndarray:\n # TODO: Turn mapping into generic function.\n processed_image = cv.cvtColor(image, cv.COLOR_BGR2GRAY)\n processed_image[~mask] = 255\n return processed_image", "def normalization_brain(img, mask):\n zone1 = img[mask != 0]\n imge = img.copy()\n imge[mask != 0] = (zone1 - zone1.min()) / (zone1.max() - zone1.min())\n imge[mask == 0] = 0\n return imge", "def apply_mask_to_image(img, mask):\n img_size = img.shape[0]\n mask = cv2.resize(mask, dsize=(img_size, img_size))\n\n # Find contour of the mask\n imgray = mask\n ret,thresh = cv2.threshold(imgray, 127, 255, 0)\n contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n # Draw contours on image\n segmented_img = cv2.drawContours(img, contours, -1, (0,255,0), 3)\n\n return segmented_img", "def prepareMask(self, mask):\n\n # Make sure that the mask has the same\n # number of voxels as the atlas image.\n # Use nearest neighbour interpolation\n # for resampling, as it is most likely\n # that the mask is binary.\n try:\n mask, xform = resample.resample(\n mask, self.shape[:3], dtype=np.float32, order=0)\n\n except ValueError:\n raise MaskError('Mask has wrong number of dimensions')\n\n # TODO allow non-aligned mask - as long as it overlaps\n # in world coordinates, it should be allowed\n if not fslimage.Image(mask, xform=xform).sameSpace(self):\n raise MaskError('Mask is not in the same space as atlas')\n\n return mask", "def create_norm(mask):\r\n\tgray = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)\r\n\tnorm = np.zeros_like(mask)\r\n\tfor i in range(mask.shape[0]):\r\n\t\tfor j in range(mask.shape[1]):\r\n\t\t\tif gray[i][j] == 255:\r\n\t\t\t\tnorm[i][j] = np.ones(3)\r\n\t\t\telse:\r\n\t\t\t\tnorm[i][j] = np.zeros(3)\r\n\treturn norm", "def apply_mask(face: np.array, mask: np.array) -> np.array:\n mask_h, mask_w, _ = mask.shape\n face_h, face_w, _ = face.shape\n\n # Resize the mask to fit on face\n factor = min(face_h / mask_h, face_w / mask_w)\n new_mask_w = int(factor * mask_w)\n new_mask_h = int(factor * mask_h)\n new_mask_shape = (new_mask_w, new_mask_h)\n resized_mask = cv2.resize(mask, new_mask_shape)\n\n # Add mask to face - ensure mask is centered\n face_with_mask = face.copy()\n non_white_pixels = (resized_mask < 250).all(axis=2)\n off_h = int((face_h - new_mask_h) / 2)\n off_w = int((face_w - new_mask_w) / 2)\n face_with_mask[off_h: off_h+new_mask_h, off_w: off_w+new_mask_w][non_white_pixels] = \\\n resized_mask[non_white_pixels]\n\n return face_with_mask", "def center_crop7(x):\n\n return x[:, :, 2:-2, 2:-2].contiguous()", "def create_centers(img_input, r, color_treshold):\n\n # make a copy of the input image\n img_np = np.copy(img_input[:,:,2])\n\n # cast radius to int\n r = np.int32(r)\n\n # define the dimensions of extended image\n ext1 = img_np.shape[0]+2*r\n ext2 = img_np.shape[1]+2*r\n\n # create the extended image \n img_ext = np.zeros((ext1, ext2))\n \n # indexing for copying all img_np pixels into img_ext\n left_index = (r,r)\n right_index = (img_ext.shape[0]-r, img_ext.shape[1]-r)\n \n # select axis0 and axis1 values of img_ext which are to be \n # replaced with img_np values.\n img_ext[left_index[0]:right_index[0], left_index[1]:right_index[1]] = img_np\n #io.imshow(img_ext)\n #io.show()\n #print(img_ext)\n\n # define the circular mask of radius r. \n mask = mk.circular_mask(r)\n\n \n # WHILE INSTANTIATION\n # This loop finds out the positions of intensity values maxcol \n # in the image. maxcol is initially set to 255, but \n # gets updated during the loop and will correspond to the maximum\n # intensity value found in the image. Then, all pixels will be \n # selected with the same intensity value. \n \n maxcol = 255\n\n # create an empty list to save the maximum intensity value corresponding \n # to the center of a nucleus. \n \n save_c_max = []\n\n while maxcol > color_treshold:\n # find maximum intensity value in img_ext.\n maxcol = np.amax(img_ext)\n\n # find position of maxcol value\n img_whitex, img_whitey = np.where(img_ext == maxcol)\n\n # select the first position with maximum intensity value\n first = (img_whitex[0], img_whitey[0])\n \n # specify indices where to apply the mask\n left_index = (first[0]-r, first[1]-r)\n right_index = (first[0]+r, first[1]+r)\n \n # create a squared subselection of the img_ext whose size is equal to mask\n submattochange = img_ext[left_index[0]:right_index[0], left_index[1]:right_index[1]]\n \n # apply the mask\n img_ext[left_index[0]:right_index[0], left_index[1]:right_index[1]] = np.multiply(submattochange,mask)\n \n # show the cells replaced by the mask\n # io.imshow(img_ext)\n # io.show()\n \n # save the values of position and intensity\n list_save = [first[0]-r, first[1]-r, maxcol]\n \n # put list_save list into save_c_max\n save_c_max.append(list_save)\n\n # cast save_c_max to int\n save_c_max = np.int32(np.array(save_c_max))\n\n i = 0\n while i < save_c_max.shape[0]:\n \n # This while iterates over all found center pixels of\n # the nuclei and replaces their color with red \n # (channel 0, intensity 255). \n \n img_input[save_c_max[i,0], save_c_max[i,1], 0] = 255\n i = i+1\n \n #r\"\"\"\n #Display image of the nuclei whose found center pixel \n #is colored red. \n #\"\"\"\n #plt.figure()\n #io.imshow(img_input)\n #io.show()\n \n return save_c_max # np.array that contains int of position and intensity of the centers", "def get_inst_centroid(inst_map):\n inst_centroid_list = []\n inst_id_list = list(np.unique(inst_map))\n for inst_id in inst_id_list[1:]: # avoid 0 i.e background\n mask = np.array(inst_map == inst_id, np.uint8)\n inst_moment = cv2.moments(mask)\n inst_centroid = [\n (inst_moment[\"m10\"] / inst_moment[\"m00\"]),\n (inst_moment[\"m01\"] / inst_moment[\"m00\"]),\n ]\n inst_centroid_list.append(inst_centroid)\n return np.array(inst_centroid_list)", "def load_mask(path, image, mask_name='module_unet', center=True):\n with open(path, 'r') as file:\n data = json.load(file)\n # if len(data[\"objects\"]) == 0:\n # return None\n # code = data[\"objects\"][0][\"bitmap\"][\"data\"]\n # origin = data[\"objects\"][0][\"bitmap\"][\"origin\"]\n # else:\n # flag = True\n # for obj in data[\"objects\"]:\n # if obj['classTitle'] == mask_name:\n inx = has_mask(mask_name, data=data)\n if inx is not False:\n obj = data[\"objects\"][inx]\n code = obj[\"bitmap\"][\"data\"]\n origin = obj[\"bitmap\"][\"origin\"]\n else:\n mask = np.zeros((image.shape[0], image.shape[1]))\n mask = mask.astype('uint8')\n mask_center = np.array([mask.shape[1]/2, mask.shape[0]/2])\n if center:\n return mask, mask_center\n else:\n return mask\n mask = base64_2_mask(code)\n mask_center = np.array([mask.shape[1]/2, mask.shape[0]/2])\n mask_center += origin\n\n up = np.zeros((origin[1], mask.shape[1]))\n mask2 = np.vstack((up, mask))\n left = np.zeros((mask2.shape[0], origin[0]))\n mask3 = np.hstack((left, mask2))\n down = np.zeros((image.shape[0] - mask3.shape[0], mask3.shape[1]))\n mask4 = np.vstack((mask3, down))\n right = np.zeros((mask4.shape[0], image.shape[1] - mask4.shape[1]))\n mask5 = np.hstack((mask4, right))\n\n if center:\n return mask5.astype('uint8'), mask_center.astype(int)\n else:\n return mask5.astype('uint8')" ]
[ "0.6837863", "0.6837863", "0.67218816", "0.66596466", "0.66575944", "0.66575944", "0.6468121", "0.6447253", "0.64076734", "0.64021444", "0.628941", "0.6269008", "0.6233817", "0.6229852", "0.62211686", "0.6215505", "0.6189341", "0.61493725", "0.6097249", "0.6065854", "0.6061627", "0.6060152", "0.60315263", "0.6014628", "0.60051364", "0.6002547", "0.6000099", "0.598367", "0.59627324", "0.5958294" ]
0.73660994
0
Indicates whether the interface is designed specifically to handle the supplied object's type. By default simply checks if the object is one of the types declared on the class, however if the type is expensive to import at load time the method may be overridden.
def applies(cls, obj): return type(obj) in cls.types
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_object_type(self):\n raise exceptions.NotImplementedError()", "def verify_type(self, obj):\n return isinstance(obj, self.type_)", "def isinstancemethod(cls, obj):\n return _isinstancemethod(cls, obj)", "def is_type(obj):\n return type(obj) is type or type(obj) is types.ClassType", "def is_supported_type(type_object):\n return not _is_typing_object(type_object) or (\n _is_typing_object(type_object) and _is_supported_generic(type_object)\n )", "def isclass(object):\r\n return isinstance(object, (type, types.ClassType))", "def object_type_present(self, object_type):\n # Check input.\n if not isinstance(object_type, str):\n raise TypeError('object_type must be a string.')\n\n # Lookup object type and return.\n return object_type in self.model_map['object']", "def is_type(obj: Any) -> bool:\n return type(obj).__name__ == \"type\"", "def _isinstance(self, obj, raise_error=True):\n rv = isinstance(obj, self.__model__)\n if not rv and raise_error:\n raise ValueError('%s is not of type %s' % (obj, self.__model__))\n return rv", "def _is_typing_object(type_object):\n return type_object.__module__ == \"typing\"", "def _is_user_class(obj):\n type_dict = type(obj).__dict__\n is_user_class = '_pandas_type' in type_dict\n return is_user_class", "def _isinstance(cls, x):\n return isinstance(x, cls.PYTHON_TYPE_CHECK)", "def is_type(self, typ):\n return typ == self.__class__.__name__", "def has_type(self, item_type):\n raise NotImplementedError()", "def is_model_type(obj: Any) -> bool: # pragma: no cover\n pass", "def _is_supported_generic(type_object):\n return (\n _is_typing_object(type_object)\n and type_object.__origin__ in _supported_generic_types\n )", "def isclass(object):\n if not inspect.isclass(object):\n return False\n if isbuiltin(object):\n return False\n return type not in inspect.getmro(object)", "def _valid_typable_object(ui_object, platform=Platform.ANDROID):\n if platform == Platform.ANDROID:\n return ui_object.obj_type in _TYPABLE_OBJECT_DESC.keys()\n else:\n assert False, 'Wrong Platform'", "def _isinstancetype(an_obj):\n if an_obj is None: return False\n if not PY3K:\n return isinstance(an_obj, types.InstanceType)\n typstr = str(type(an_obj))\n # the following logic works, as PyRAF users expect, in both v2 and v3\n return typstr==\"<type 'instance'>\" or \\\n (typstr.startswith(\"<class '\") and ('.' in typstr))", "def match(self, cls):\n return isinstance(self, cls)", "def _is_primitive_type(io: type) -> bool:\n return any(io.__module__.startswith(mldesigner_pkg) and item.__name__ == param_name for item in getmro(io))", "def is_kind_of_class(obj, a_class):\n if isinstance(obj, a_class) is True:\n return True\n else:\n return False", "def CheckType(self, *args, **kwargs):\n pass", "def isinstance_blender_object(self, b_obj):\n # lame and slow, but functional\n return b_obj in Blender.Object.Get()", "def class_is(cls: Class) -> bool:\n pass", "def is_kind_of_class(obj, a_class):\n return(isinstance(obj, a_class))", "def is_kind_of_class(obj, a_class):\n if isinstance(obj, a_class):\n return True\n else:\n return False", "def is_kind_of_class(obj, a_class):\n if isinstance(obj, a_class):\n return True\n else:\n return False", "def is_kind_of_class(obj, a_class):\n return (isinstance(obj, a_class))", "def is_kind_of_class(obj, a_class):\n return isinstance(obj, a_class)" ]
[ "0.72881466", "0.72243005", "0.70506996", "0.70410687", "0.6942121", "0.69025177", "0.68854284", "0.68413585", "0.67842245", "0.6779543", "0.671683", "0.66760117", "0.6669651", "0.6640231", "0.66180044", "0.6610032", "0.65361595", "0.65310186", "0.65304136", "0.6527261", "0.64797664", "0.6451194", "0.6437904", "0.6434628", "0.64197385", "0.6416001", "0.6406305", "0.6406305", "0.63975805", "0.63971835" ]
0.7322255
0
Given a list of Dataset objects, cast them to the specified datatype (by default the format matching the current interface) with the given cast_type (if specified).
def cast(cls, datasets, datatype=None, cast_type=None): datatype = datatype or cls.datatype cast = [] for ds in datasets: if cast_type is not None or ds.interface.datatype != datatype: ds = ds.clone(ds, datatype=[datatype], new_type=cast_type) cast.append(ds) return cast
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def castData(data, type='int64'):\n data = data.astype(type)\n return data", "def list_cast(inputs, dst_type):\n return iter_cast(inputs, dst_type, return_type=list)", "def _cast(self, sample, set_dtype=True, other=None):\n cuda = self.cuda if other is None else other.is_cuda\n dtype = self.dtype if other is None else other.dtype\n\n if isinstance(sample, pt.Tensor):\n sample = sample.type(dtype) if set_dtype else sample\n sample = sample.cuda() if cuda else sample\n return sample\n elif isinstance(sample, str): # catch strings so they are not handled as Sequence instances\n return sample\n elif hasattr(sample, '_fields'): # catch namedtuples\n return sample.__class__(*[self._cast(s, set_dtype) for s in sample])\n elif isinstance(sample, Sequence):\n return sample.__class__([self._cast(s, set_dtype) for s in sample])\n else:\n return sample # default case: return unaltered", "def _cast_dtype(tik_instance, dst, src, cast_repeat_time,\n cast_remainder, cast_case):\n if cast_case == \"int8_2_float16\":\n tik_instance.vconv(MAX_MASK, 'none', dst, src, cast_repeat_time,\n 1, 1, 8, 4, None)\n with tik_instance.if_scope(cast_remainder != 0):\n tik_instance.vconv(cast_remainder, 'none',\n dst[cast_repeat_time * MAX_MASK],\n src[cast_repeat_time * MAX_MASK],\n 1, 1, 1, 8, 4, None)\n elif cast_case == \"float16_2_int8\":\n tik_instance.vconv(MAX_MASK, 'none', dst, src, cast_repeat_time,\n 1, 1, 4, 8, None)\n with tik_instance.if_scope(cast_remainder != 0):\n tik_instance.vconv(cast_remainder, 'none',\n dst[cast_repeat_time * MAX_MASK],\n src[cast_repeat_time * MAX_MASK],\n 1, 1, 1, 4, 8, None)", "def convert_cast(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n dtype = attrs[\"dtype\"]\n\n # dtype can be mapped only with types from TensorProto\n # float32 is mapped to float and float64 to double in onnx\n # following tensorproto mapping https://github.com/onnx/onnx/blob/master/onnx/mapping.py\n if dtype == 'float32':\n dtype = 'float'\n elif dtype == 'float64':\n dtype = 'double'\n\n node = onnx.helper.make_node(\n \"Cast\",\n input_nodes,\n [name],\n to=getattr(onnx.TensorProto, dtype.upper()),\n name=name,\n )\n return [node]", "def test_cast_list_multidim(self):\n categories = list(range(10))\n categories[0] = \"asdfa\"\n categories[2] = \"lalala\"\n dim = Categorical(\"yolo\", categories, shape=2)\n sample = [\"asdfa\", \"1\"] # np.array(['asdfa', '1'], dtype=object)\n assert dim.cast(sample) == [\"asdfa\", 1]", "def datatype_conversion(self):\n\n category_cols = self.FEATURE_TYPES[\"category_cols\"]\n integer_cols = self.FEATURE_TYPES[\"integer_cols\"]\n float_cols = self.FEATURE_TYPES[\"float_cols\"]\n datetime_cols = self.FEATURE_TYPES[\"datetime_cols\"]\n string_cols = self.FEATURE_TYPES[\"string_cols\"]\n bool_cols = self.FEATURE_TYPES[\"bool_cols\"]\n data = self.data\n \n data[category_cols] = data[category_cols].astype('category',copy=False) \n data[integer_cols] = data[integer_cols].astype('int64',copy=False)\n data[float_cols] = data[float_cols].astype('float64',copy=False)\n data[datetime_cols] = data[datetime_cols].astype('datetime64[ns]',copy=False)\n data[string_cols] = data[string_cols].astype('str',copy=False)\n data[bool_cols] = data[bool_cols].astype('bool', copy=False)\n\n return data", "def iter_cast(inputs, dst_type, return_type=None):\n if not isinstance(inputs, abc.Iterable):\n raise TypeError(\"inputs must be an iterable object\")\n if not isinstance(dst_type, type):\n raise TypeError('\"dst_type\" must be a valid type')\n\n out_iterable = map(dst_type, inputs)\n\n if return_type is None:\n return out_iterable\n else:\n return return_type(out_iterable)", "def _cast_types(self, input_dict):\n return cast_types(input_dict, self.params['dtype'])", "def convert_cast(g, op, block):\n\n dtype = op.attr(\"out_dtype\")\n dtype = _convert_dtype_value(dtype)\n x = g.get_node(op.input(\"X\")[0])\n out = _op.cast(x, dtype=dtype)\n g.add_node(op.output(\"Out\")[0], out)", "def test_cast_list(self):\n dim = Integer(\"yolo\", \"uniform\", -3, 5)\n assert dim.cast([\"1\", \"2\"]) == [1, 2]", "def __convToTyped(index, value, dtypes):\n\t#print(index, value)\n\tdtype = dtypes[index]\n\ttvalue = value\n\tif dtype == \"int\":\n\t\ttvalue = int(value)\n\telif dtype == \"float\":\n\t\ttvalue = float(value)\n\treturn tvalue", "def load_list_dataset(h_node,base_type,py_obj_type):\n\n if h_node.shape is None:\n\n # empty list tuple or set just return new instance of py_obj_type\n return py_obj_type() if isinstance(py_obj_type,tuple) else py_obj_type(())\n\n str_type = h_node.attrs.get('str_type', None)\n content = h_node[()]\n if str_type in (b'str','str'):\n\n # decode bytes representing python string before final conversion\n if h_node.dtype.itemsize > 1 and 'bytes' in h_node.dtype.name:\n\n # string dataset 4.0.x style convert it back to python string\n content = np.array(content, copy=False, dtype=str).tolist()\n else:\n\n # decode bytes representing python string before final conversion\n content = bytes(content).decode(\"utf8\")\n return py_obj_type(content) if content.__class__ is not py_obj_type else content", "def test_cast_list(self):\n categories = {\"asdfa\": 0.1, 2: 0.2, 3.0: 0.3, \"lalala\": 0.4}\n dim = Categorical(\"yolo\", categories)\n assert dim.cast([\"asdfa\"]) == [\"asdfa\"]\n assert dim.cast([\"2\"]) == [2]\n assert dim.cast([\"3.0\"]) == [3.0]", "def test_cast_list(self):\n dim = Real(\"yolo\", \"uniform\", -3, 4)\n assert dim.cast([\"1\", \"2\"]) == [1.0, 2.0]", "def cast(*args):\n return _itkStatisticsImageFilterPython.itkStatisticsImageFilterID3_cast(*args)", "def cast(*args):\n return _ITKCostFunctionsPython.itkMultipleValuedCostFunction_cast(*args)", "def cast(self, dtype):\n self.dtype = np.dtype(dtype)\n self.preprocess = False\n self.set_data(self.data)", "def cast(*args):\n return _itkStatisticsImageFilterPython.itkStatisticsImageFilterIUL3_cast(*args)", "def cast_tensor_type(inputs, src_type, dst_type):\n if isinstance(inputs, torch.Tensor):\n return inputs.to(dst_type)\n elif isinstance(inputs, str):\n return inputs\n elif isinstance(inputs, np.ndarray):\n return inputs\n elif isinstance(inputs, abc.Mapping):\n return type(inputs)({\n k: cast_tensor_type(v, src_type, dst_type)\n for k, v in inputs.items()\n })\n elif isinstance(inputs, abc.Iterable):\n return type(inputs)(\n cast_tensor_type(item, src_type, dst_type) for item in inputs)\n\n return inputs", "def cast_tensor_type(inputs, src_type, dst_type):\n if isinstance(inputs, torch.Tensor):\n return inputs.to(dst_type)\n elif isinstance(inputs, str):\n return inputs\n elif isinstance(inputs, np.ndarray):\n return inputs\n elif isinstance(inputs, abc.Mapping):\n return type(inputs)({\n k: cast_tensor_type(v, src_type, dst_type)\n for k, v in inputs.items()\n })\n elif isinstance(inputs, abc.Iterable):\n return type(inputs)(\n cast_tensor_type(item, src_type, dst_type) for item in inputs)\n\n return inputs", "def tuple_cast(inputs, dst_type):\n return iter_cast(inputs, dst_type, return_type=tuple)", "def test_all_datatypes_read(self):\n self.all_datatypes_prepare()\n\n tempfile = self.get_temp_file()\n\n with open(tempfile.name, 'w') as csvfile:\n writer = csv.writer(csvfile)\n # serializing blob bytearray in friendly format\n data_set = list(self.data)\n\n data_set[2] = self.format_blob(self.data[2])\n # Here we convert containers of blobs to strings that match exactly the output of the SELECT *\n # because otherwise the comparison fails due to extra quotes added by the csv writer around the blobs\n # that were converted to strings. White spaces do matter\n data_set[24] = '{3: ' + self.format_blob(self.data[24][3]) + '}'\n data_set[25] = '[' + ', '.join(self.format_blob(b) for b in self.data[25]) + ']'\n data_set[26] = '{' + ', '.join(self.format_blob(b) for b in self.data[26]) + '}'\n writer.writerow(data_set)\n\n def _test(prepared_statements):\n logger.debug('Importing from csv file: {name}'.format(name=tempfile.name))\n out, err, _ = self.run_cqlsh(cmds=\"COPY ks.testdatatype FROM '{}' WITH PREPAREDSTATEMENTS = {}\"\n .format(tempfile.name, prepared_statements))\n\n out, err, _ = self.run_cqlsh(cmds=\"SELECT * FROM ks.testdatatype\")\n results = self.parse_cqlsh_query(out=out, num_cols=len(self.data), timestamps_to_be_rounded=[10, 17])\n\n self.assertCsvResultEqual(tempfile.name, results, 'testdatatype')\n\n _test(True)\n _test(False)", "def convert_dtypes(\n self,\n infer_objects: bool = True,\n convert_string: bool = True,\n convert_integer: bool = True,\n convert_boolean: bool = True,\n convert_floating: bool = True,\n dtype_backend: DtypeBackend = \"numpy_nullable\",\n ):\n return DataFrameDefault.register(pandas.DataFrame.convert_dtypes)(\n self,\n infer_objects=infer_objects,\n convert_string=convert_string,\n convert_integer=convert_integer,\n convert_boolean=convert_boolean,\n convert_floating=convert_floating,\n dtype_backend=dtype_backend,\n )", "def cast_tensor_type(inputs, src_type, dst_type):\n if isinstance(inputs, torch.Tensor):\n return inputs\n elif isinstance(inputs, str):\n return inputs\n elif isinstance(inputs, np.ndarray):\n return inputs\n elif isinstance(inputs, abc.Mapping):\n return type(inputs)({k: cast_tensor_type(v, src_type, dst_type) for k, v in inputs.items()})\n elif isinstance(inputs, abc.Iterable):\n return type(inputs)(cast_tensor_type(item, src_type, dst_type) for item in inputs)\n return inputs", "def convert_dataset(dataset: h5py.Dataset, refs: h5py.Group):\n # all MATLAB variables have the attribute MATLAB_class\n # if this is not present, it is not convertible\n if has_refs(dataset):\n mtype = \"cell_with_refs\"\n else:\n try:\n mtype = dataset.attrs[\"MATLAB_class\"].decode()\n except KeyError:\n raise ValueError(f\"{dataset} is not a MATLAB type.\")\n\n if mtype == \"cell_with_refs\":\n cell = []\n for ref in dataset:\n row = []\n for r in ref:\n entry = convert_dataset(refs.get(r), refs=refs)\n row.append(entry)\n cell.append(row)\n return cell\n\n elif mtype == \"char\":\n if len(dataset) == 0:\n return \"\"\n elif isinstance(dataset[0], np.ndarray):\n return \"\".join([chr(x[0]) for x in dataset]).replace(\"\\x00\", \"\")\n else:\n return \"\".join([chr(x) for x in dataset]).replace(\"\\x00\", \"\")\n\n elif mtype == \"bool\":\n return bool(dataset)\n\n elif mtype == \"logical\":\n arr = np.array(dataset, dtype=bool).T.squeeze()\n if arr.size == 1:\n arr = bool(arr)\n return arr\n\n elif mtype == \"canonical empty\":\n return None\n\n # complex numbers need to be filtered out separately\n elif \"imag\" in str(dataset.dtype):\n if dataset.attrs[\"MATLAB_class\"] == b\"single\":\n dtype = np.complex64\n else:\n dtype = np.complex128\n arr = np.array(dataset)\n arr = (arr[\"real\"] + arr[\"imag\"] * 1j).astype(dtype)\n return arr.T.squeeze()\n\n # if it is none of the above, we can convert to numpy array\n elif mtype in (\n \"double\",\n \"single\",\n \"int8\",\n \"int16\",\n \"int32\",\n \"int64\",\n \"uint8\",\n \"uint16\",\n \"uint32\",\n \"uint64\",\n \"cell\",\n ):\n if mtype == \"cell\":\n print(dataset)\n arr = np.array(dataset, dtype=dataset.dtype)\n return arr.T.squeeze()\n\n else:\n raise ValueError(f\"Data type not supported: {mtype}, {dataset.dtype}.\")", "def cast(*args):\n return _itkStatisticsImageFilterPython.itkStatisticsImageFilterIF3_cast(*args)", "def test_casting_with_iterable(test_fixture, test_input, expected):\n test_fixture.cast_prop = test_input\n assert test_fixture.cast_prop == expected\n assert type(test_fixture.cast_prop) is tuple\n for val in test_fixture.cast_prop:\n assert type(val) is float", "def convert_dtypes(rows):\n dtype_map = {pd.Timestamp: lambda x: x.to_pydatetime(),\n np.int8: lambda x: int(x),\n np.int16: lambda x: int(x),\n np.int32: lambda x: int(x),\n np.int64: lambda x: int(x),\n np.float16: lambda x: float(x),\n np.float32: lambda x: float(x),\n np.float64: lambda x: float(x),\n np.float128: lambda x: float(x)}\n for row in rows:\n yield [dtype_map.get(type(elem), lambda x: x)(elem) for elem in row]", "def convert_types(df):\n \n # Iterate through each column\n for c in df:\n \n # Convert ids and booleans to integers\n if ('SK_ID' in c):\n df[c] = df[c].fillna(0).astype(np.int32)\n \n # Convert objects to category\n elif (df[c].dtype == 'object') and (df[c].nunique() < df.shape[0]):\n df[c] = df[c].astype('category')\n \n # Booleans mapped to integers\n elif list(df[c].unique()) == [1, 0]:\n df[c] = df[c].astype(bool)\n \n # Float64 to float32\n elif df[c].dtype == float:\n df[c] = df[c].astype(np.float32)\n \n # Int64 to int32\n elif df[c].dtype == int:\n df[c] = df[c].astype(np.int32)\n \n return df" ]
[ "0.6137207", "0.6128256", "0.57472014", "0.5688774", "0.5682158", "0.5568439", "0.5551305", "0.5530607", "0.54946697", "0.5493658", "0.54673314", "0.54476064", "0.5440923", "0.54331124", "0.5413348", "0.5373463", "0.53722644", "0.5361807", "0.53259426", "0.5319939", "0.5319939", "0.5311255", "0.53087986", "0.5294972", "0.5285947", "0.5270704", "0.52574027", "0.52346116", "0.52329236", "0.52250564" ]
0.8129356
0
Should return a persisted version of the Dataset.
def persist(cls, dataset): return dataset
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save(self):\n if self.loaded:\n full_file_name = self.resource_manager.get_dataset(self.corpus, self.embeddings.vsm_name)\n logging.info('Saving dataset to [%s]', full_file_name)\n with lzma.open(full_file_name, 'wb') as f:\n pickle.dump(self, f)\n else:\n logging.error('Dataset not loaded, call \"build\" method first!')", "def save(self):\n if self._data is None and self._meta is None:\n w = \"No data/meta components found in the DataSet.\"\n warnings.warn(w)\n return None\n ds_clone = self.clone()\n self._cache['savepoint'] = ds_clone.split()\n return None", "def save(self):\n if self.loaded:\n list_embeddingNames = [self.embeddings.vsm_name, self.synset_embeddings.vsm_name, self.imagined_embeddings.vsm_name]\n full_file_name = self.resource_manager.get_multimodal_dataset(self.corpus, list_embeddingNames)\n logging.info('Saving dataset to [%s]', full_file_name)\n with lzma.open(full_file_name, 'wb') as f:\n pickle.dump(self, f)\n else:\n logging.error('Dataset not loaded, call \"build\" method first!')", "def get_dataset(self):\n return", "def get_dataset():\n\n return db.store.all()", "def dataset(self):\n with self._lock:\n if self._dataset is None:\n if isinstance(self._orig_dataset, DaskLazyIndexer):\n self._orig_dataset = self._orig_dataset.dataset\n dataset = dask_getitem(self._orig_dataset, self.keep)\n for transform in self.transforms:\n dataset = transform(dataset)\n self._dataset = dataset\n self._orig_dataset = None\n return self._dataset", "def get_dataset(self):\n return datasets.get_dataset(self.dataset_id)", "def get_dataset(self) -> datasets.OpenMLDataset:\n return datasets.get_dataset(self.dataset_id)", "def to_dataset(self):\n if not self.type:\n raise aspecd.exceptions.MissingDatasetError\n dataset = aspecd.utils.object_from_class_name(self.type)\n dataset.id = self.id\n for history_record in self.history:\n history_record.replay(dataset)\n return dataset", "def dataset(self) -> np.ndarray:\n if self._cache_dataset_list:\n # Concatenates the `self._dataset` and the datasets in\n # `self._cache_dataset_list`.\n if self._dataset.size > 0:\n dataset_list = [self._dataset] + self._cache_dataset_list\n else:\n dataset_list = self._cache_dataset_list\n\n self._dataset = np.vstack(dataset_list)\n self._cache_dataset_list = []\n return self._dataset", "def create_dataset_version(self):\n assert self.dataset_id, 'dataset_id required!'\n return self._datasets_request('POST', dataset_id=self.dataset_id, versions_request=True)", "def export_dataset(self):\n raise NotImplementedError", "def get_dataset(self):\n if self.mode == \"test\":\n return OnlineQueryDataset(self.mode, self.df, self.tokenizer)\n else:\n return OnlineQueryDataset(self.mode, self.df_reindex, self.tokenizer)", "def DataSet(self):\n if hasattr(self, '_dataset') and self._dataset and self._dataset.Get():\n return WrapDataObject(self._dataset.Get())\n\n return None", "def build_dataset(self):\n self.dataset = KITTIBEVDataset(self.dataset_config, self.transform)\n return self.dataset", "def _dataset():\n dataset = tf_record_dataset(DatasetName.SIGNUM, DatasetType.TRAIN)\n dataset = dataset.batch(1)\n dataset = dataset.map(transform_for_prediction)\n dataset = dataset.unbatch()\n dataset = dataset.filter(lambda frames, label, signer: tf.math.equal(label, 420) and tf.math.equal(signer, 1))\n dataset = dataset.batch(1)\n return dataset.take(1)", "def persisted(self):\n return True if self._data else False", "def backup_dataset(outfile=None):\n return backup_es(Dataset, outfile=outfile)", "def get_dataset(self, dataset_path=None, normalize=True, return_original=False):\n if dataset_path is None:\n dataset_path = self.dir\n \n if \"mocap\" in dataset_path.lower():\n print(\"Loading Mocap dataset.\")\n df = get_mocap()\n df_orig = df\n elif \"profi\" in dataset_path.lower():\n print(\"Loading Profiset dataset.\")\n df = get_profiset()\n df_orig = df\n else:\n print(\"Loading CoPhIR dataset.\")\n df_orig, attr_lengths = get_objects_with_indexes(self.labels, f'{dataset_path}/level-{str(self.n_levels)}.txt', f'{dataset_path}/objects.txt')\n if normalize:\n df = scale_per_descriptor(df_orig, self.labels, attr_lengths)\n else:\n df = df_orig\n \n assert df.shape[1] == self.descriptor_values + self.n_levels + len([\"object_id\"])\n logging.info(f\"Loaded dataset of shape: {df.shape}\")\n if return_original:\n return df, df_orig\n else:\n return df", "def create_dataset(self, dataset: DatasetDB) -> DatasetDB:\n\n self._es.add_document(\n index=DATASETS_INDEX_NAME,\n doc_id=dataset.id,\n document=self._dataset_to_es_doc(dataset),\n )\n\n self._es.create_index(\n index=dataset_records_index(dataset.id),\n force_recreate=True,\n )\n return dataset", "def persistData(self):\n\n assert self.is_valid is not None, ('You must override the '\n 'persistData method if you want '\n 'to persist the data without '\n 'validating it first.')\n\n registry = self.registry\n registry.save()\n\n self.result = json.dumps(registry.data)\n return registry.instance", "def dataset(self):\n return self.predictor_data_manager.dataset(\n self.data_name, self.trait_name, data=self._data, trait=self.trait,\n categorical_trait=self.categorical_trait)", "def get_pydicom(self) -> pydicom.FileDataset:\n return util.get_pydicom(self.client, self.id_)", "def reproduce(self) -> LocalDataset:\n return LocalDataset(self.path)", "def reproduce(self) -> LocalDataset:\n return LocalDataset(self.path)", "def store_data(self):\n return self._store_data", "def saveData(self):\n pass", "def make_dataset(self) -> torch.utils.data.Dataset:\n transform = cnn_utils.ToTensor()\n return cnn_utils.ArtifactDataset(self.stamps, transform)", "def _getDatasetPath(self):\n return self.__dataset_path", "def copy(self):\n return Dataset(self._data.copy(), self.gene_meta.copy(), self.n_genes)" ]
[ "0.6963877", "0.68919927", "0.6889675", "0.6845472", "0.6794341", "0.6776925", "0.67478496", "0.65695554", "0.64923984", "0.64115924", "0.63665664", "0.6357858", "0.62832326", "0.6263471", "0.625156", "0.6228931", "0.6202975", "0.61697525", "0.61264986", "0.60952103", "0.60604995", "0.60427624", "0.6041442", "0.60336745", "0.60336745", "0.6017287", "0.6003498", "0.6002828", "0.5994456", "0.59626997" ]
0.8214712
0
Should return a computed version of the Dataset.
def compute(cls, dataset): return dataset
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_dataset(self):\n return", "def dataset(self):\n with self._lock:\n if self._dataset is None:\n if isinstance(self._orig_dataset, DaskLazyIndexer):\n self._orig_dataset = self._orig_dataset.dataset\n dataset = dask_getitem(self._orig_dataset, self.keep)\n for transform in self.transforms:\n dataset = transform(dataset)\n self._dataset = dataset\n self._orig_dataset = None\n return self._dataset", "def build_dataset(self):\n self.dataset = KITTIBEVDataset(self.dataset_config, self.transform)\n return self.dataset", "def dataset(self) -> np.ndarray:\n if self._cache_dataset_list:\n # Concatenates the `self._dataset` and the datasets in\n # `self._cache_dataset_list`.\n if self._dataset.size > 0:\n dataset_list = [self._dataset] + self._cache_dataset_list\n else:\n dataset_list = self._cache_dataset_list\n\n self._dataset = np.vstack(dataset_list)\n self._cache_dataset_list = []\n return self._dataset", "def get_dataset(self, dataset_path=None, normalize=True, return_original=False):\n if dataset_path is None:\n dataset_path = self.dir\n \n if \"mocap\" in dataset_path.lower():\n print(\"Loading Mocap dataset.\")\n df = get_mocap()\n df_orig = df\n elif \"profi\" in dataset_path.lower():\n print(\"Loading Profiset dataset.\")\n df = get_profiset()\n df_orig = df\n else:\n print(\"Loading CoPhIR dataset.\")\n df_orig, attr_lengths = get_objects_with_indexes(self.labels, f'{dataset_path}/level-{str(self.n_levels)}.txt', f'{dataset_path}/objects.txt')\n if normalize:\n df = scale_per_descriptor(df_orig, self.labels, attr_lengths)\n else:\n df = df_orig\n \n assert df.shape[1] == self.descriptor_values + self.n_levels + len([\"object_id\"])\n logging.info(f\"Loaded dataset of shape: {df.shape}\")\n if return_original:\n return df, df_orig\n else:\n return df", "def get_dataset(self):\n if self.mode == \"test\":\n return OnlineQueryDataset(self.mode, self.df, self.tokenizer)\n else:\n return OnlineQueryDataset(self.mode, self.df_reindex, self.tokenizer)", "def get_dataset(self):\n return datasets.get_dataset(self.dataset_id)", "def make_dataset(self) -> torch.utils.data.Dataset:\n transform = cnn_utils.ToTensor()\n return cnn_utils.ArtifactDataset(self.stamps, transform)", "def getDataset(self, train=True):\n \n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\") \n \n if self.dataset == \"ELLIPSE\":\n a = np.array([[0,1.0],[1.0,2.0]]) \n b = a*0.5 \n myE = el.ellipse(device, 500, 100, a, b) \n if train == True:\n return myE.create_dataset(myE.examples)\n return myE.create_dataset(myE.valid) \n \n if self.dataset == \"SWISS\": \n myS = sw.SwissRoll(device, 500, 0.2) \n if train == True:\n return myS.create_dataset(myS.examples)\n return myS.create_dataset(myS.valid)\n \n \n #open file\n myFile = h5py.File(self.dataString, 'r', self.driver)\n \n if train == True: \n inputString = \"train_inputs\"\n labelsString = \"train_labels\"\n \n else:\n inputString = \"test_inputs\"\n labelsString = \"test_labels\"\n \n #get hdf5 datsets\n features = myFile.get(inputString)\n labels = myFile.get(labelsString)\n \n #convert to tensors\n features = torch.from_numpy(np.array(features))\n labels = torch.from_numpy(np.array(labels))\n \n #close file to ensure dataset is in memory\n myFile.close()\n \n #conver to correct datatypes\n features = features.float()\n \n if self.conv_sg == False:\n labels = labels.long() \n \n dataset = torch.utils.data.TensorDataset(features, labels)\n \n return dataset", "def get_dataset(args):\n\n if args['experiment']['dataset'] == Dataset.mindsets:\n xs, ys, cs = make_mindsets(mindset_sizes=args['dataset']['mindset_sizes'],\n nb_questions=args['dataset']['nb_questions'],\n nb_useless=args['dataset']['nb_useless'],\n noise=args['dataset']['noise'],\n seed=args['experiment']['seed'])\n\n return Data(xs=xs, ys=ys, cs=cs)\n\n if args['experiment']['dataset'] == Dataset.questionnaire_likert:\n xs, ys, cs = make_likert_questionnaire(nb_samples=args['dataset']['nb_samples'],\n nb_features=args['dataset']['nb_features'],\n nb_mindsets=args['dataset']['nb_mindsets'],\n centers=args['dataset']['centers'],\n range_answers=args['dataset']['range_answers'],\n seed=args['experiment']['seed'])\n\n return Data(xs=xs, ys=ys, cs=cs)\n\n if args['experiment']['dataset'] == Dataset.retinal:\n xs, ys = load_RETINAL(root_path=args['root_dir'],\n nb_bins=args['dataset']['nb_bins'],\n max_idx=args['dataset']['max_idx'])\n\n return Data(xs=xs, ys=ys)\n\n if args['experiment']['dataset'] == Dataset.moons:\n xs, ys = make_moons(n_samples=args['dataset']['n_samples'],\n noise=args['dataset']['noise'],\n random_state=args['experiment']['seed'])\n\n return Data(xs=xs, ys=ys)\n\n if args['experiment']['dataset'] == Dataset.breast_cancer_wisconsin:\n xs, ys = load_CANCER(args['dataset']['nb_bins'])\n\n return Data(xs=xs, ys=ys)\n\n if args['experiment']['dataset'] == Dataset.SBM:\n A, ys, G = load_SBM(block_sizes=args['dataset']['block_sizes'],\n p_in=args['dataset']['p'],\n p_out=args['dataset']['q'],\n seed=args['experiment']['seed'])\n\n return Data(ys=ys, A=A, G=G)\n\n if args['experiment']['dataset'] == Dataset.gaussian_mixture:\n xs, ys = make_blobs(n_samples=args['dataset']['blob_sizes'],\n centers=args['dataset']['blob_centers'],\n n_features=args['dataset']['blob_centers'],\n cluster_std=args['dataset']['blob_variances'],\n random_state=args['experiment']['seed'])\n\n return Data(xs=xs, ys=ys)\n\n if args['experiment']['dataset'] == Dataset.LFR:\n A, ys, G = load_LFR(nb_nodes=args['dataset']['nb_nodes'],\n tau1=args['dataset']['tau1'],\n tau2=args['dataset']['tau2'],\n mu=args['dataset']['mu'],\n average_degree=args['dataset']['average_degree'],\n min_community=args['dataset']['min_community'],\n seed=args['experiment']['seed'])\n\n return Data(ys=ys, A=A, G=G)\n\n if args['experiment']['dataset'] == Dataset.wave:\n df = pd.read_csv('datasets/waveform.csv')\n xs = df[df.columns[:-1]].to_numpy()\n ys = df[df.columns[-1]].to_numpy()\n\n return Data(xs=xs, ys=ys)\n\n raise ValueError('Wrong name for a dataset')", "def get_dataset(self) -> datasets.OpenMLDataset:\n return datasets.get_dataset(self.dataset_id)", "def iris():\n return IrisDataset()", "def _dataset():\n dataset = tf_record_dataset(DatasetName.SIGNUM, DatasetType.TRAIN)\n dataset = dataset.batch(1)\n dataset = dataset.map(transform_for_prediction)\n dataset = dataset.unbatch()\n dataset = dataset.filter(lambda frames, label, signer: tf.math.equal(label, 420) and tf.math.equal(signer, 1))\n dataset = dataset.batch(1)\n return dataset.take(1)", "def copy(self):\n return Dataset(self._data.copy(), self.gene_meta.copy(), self.n_genes)", "def _get_dataset(self):\n if self.mode == 'train':\n return (\n tf.data.Dataset.from_tensor_slices(\n tensors=(tf.constant(value=self.file_paths),\n tf.reshape(tensor=tf.constant(self.labels), shape=[-1]))\n )\n .shuffle(buffer_size=self.num_samples, reshuffle_each_iteration=True)\n .map(map_func=self.import_waveforms_fn_train, num_parallel_calls=self.num_parallel_calls)\n .repeat()\n .batch(batch_size=self.batch_size)\n .prefetch(buffer_size=self.prefetch_buffer)\n )\n else:\n return (\n tf.data.Dataset.from_tensor_slices(\n tensors=(tf.constant(value=self.file_paths),\n tf.reshape(tensor=tf.constant(self.labels), shape=[-1]))\n )\n .map(map_func=self.import_waveforms_fn_val, num_parallel_calls=self.num_parallel_calls)\n .repeat()\n .batch(batch_size=self.batch_size)\n .prefetch(buffer_size=self.prefetch_buffer)\n )", "def GetDataset():\n x_train = []\n x_test = []\n y_train = []\n y_test = []\n\n classes1 = set()\n classes2 = set()\n for f in GetInputFiles():\n class1, class2, fold, fname = f.split('\\\\')[-4:]\n classes1.add(class1)\n classes2.add(class2)\n class1 = class1.split('_')[0]\n class2 = class2.split('_')[0]\n\n x = ReadAndTokenize(f)\n y = [int(class1 == 'positive'), int(class2 == 'truthful')]\n if fold == 'fold4':\n x_test.append(x)\n y_test.append(y)\n else:\n x_train.append(x)\n y_train.append(y)\n\n ### Make numpy arrays.\n x_test = MakeDesignMatrix(x_test)\n x_train = MakeDesignMatrix(x_train)\n y_test = numpy.array(y_test, dtype='float32')\n y_train = numpy.array(y_train, dtype='float32')\n\n dataset = (x_train, y_train, x_test, y_test)\n with open('dataset.pkl', 'wb') as fout:\n pickle.dump(dataset, fout)\n return dataset", "def download_dataset(self):\n raise NotImplementedError", "def make_dataset(self, df, **kwargs):\n\t\treturn df", "def copy(self) -> \"Dataset\":\n\n return deepcopy(self)", "def reproduce(self) -> LocalDataset:\n return LocalDataset(self.path)", "def reproduce(self) -> LocalDataset:\n return LocalDataset(self.path)", "def get_dataset(self):\n\n trainset = datasets.CIFAR100('datasets/CIFAR100/train/', train=True, transform=self.train_transforms,\n target_transform=None, download=True)\n valset = datasets.CIFAR100('datasets/CIFAR100/test/', train=False, transform=self.val_transforms,\n target_transform=None, download=True)\n\n return trainset, valset", "def persist(cls, dataset):\n return dataset", "def get_val_dataset(self):\n return SubsetDataset(self.ds, self.valid_ids)", "def get_dataset(self):\n\n trainset = datasets.CIFAR10('datasets/CIFAR10/train/', train=True, transform=self.train_transforms,\n target_transform=None, download=True)\n valset = datasets.CIFAR10('datasets/CIFAR10/test/', train=False, transform=self.val_transforms,\n target_transform=None, download=True)\n\n return trainset, valset", "def get_new_data(self):\n if self.is_dethist:\n return self._generate_dethist_data()\n else:\n return self._generate_data()", "def DataSet(self):\n if hasattr(self, '_dataset') and self._dataset and self._dataset.Get():\n return WrapDataObject(self._dataset.Get())\n\n return None", "def datasets(self):\n return [Dataset.ENSEMBL]", "def __init__(self, dataset: Dataset):\n self.dataset = dataset", "def _getDatasetPath(self):\n return self.__dataset_path" ]
[ "0.7378825", "0.72528124", "0.71179456", "0.70104563", "0.67688876", "0.6758021", "0.6701081", "0.6660037", "0.6647292", "0.66434914", "0.6596838", "0.65712315", "0.6510925", "0.64786947", "0.64778185", "0.6453151", "0.6453069", "0.6432501", "0.63689977", "0.6350902", "0.6350902", "0.6347801", "0.6324245", "0.630048", "0.6297288", "0.6294321", "0.6281869", "0.62638956", "0.6256724", "0.62082815" ]
0.7553438
0
Replace `nodata` value in data with NaN
def replace_value(cls, data, nodata): data = data.astype('float64') mask = data != nodata if hasattr(data, 'where'): return data.where(mask, np.NaN) return np.where(mask, data, np.NaN)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_nan(x):\n x[x == -999] = np.nan\n return x", "def isnan(data):\n return _make.isnan(data)", "def replace_nan(data):\r\n lst_ind = np.array(['valence_intensity', 'anger_intensity',\r\n 'fear_intensity', 'sadness_intensity', 'joy_intensity'])\r\n for i in lst_ind:\r\n native = data[:][i]\r\n avg = np.nanmean(native)\r\n data[:][i] = np.where(np.isnan(native), avg, native)\r\n return data", "def fill_nans(data):\n for col in data.columns:\n data[col].fillna(-999, inplace=True)", "def test_missing_to_nan(self, input_data, value, expected):\n actual = data._missing_to_nan(input_data, value)\n pd.testing.assert_series_equal(actual, expected)\n self.assertIsNot(actual, input_data)", "def replace_na(data, replace=\"average\", remove=False, columns):\n \n return", "def fix_nan(image, replace=0.):\n h = pyfits.open(image, mode='update')\n imgdata = h[0].data\n imgdata = np.where(np.isnan(imgdata), replace, imgdata)\n h[0].data = imgdata\n h.flush()\n h.close()", "def fix_data(self, df):\n return df.dropna(axis='columns', how='all').fillna(0.0)", "def _clean(self, dataset):\n # Replace missing values with numpy's NaN. The missing value is\n # usually 1e+20, but values can be like 1.0000002e+20, which is\n # different. Ergo the inequality.\n for var in dataset.data_vars.itervalues():\n if 'missing_value' in var.attrs:\n missing_data_value = var.missing_value\n try:\n var.values[var.values >= missing_data_value] = np.NaN\n except ValueError:\n print \"Encountered ValueError in {0}. Ignoring\".format(var.name)", "def nan_value(data):\n return data.isnull().any()", "def pd_isnan(val):\n return val is None or val != val", "def test_nodata_value(self):\n\n # Read files with -9999 as nominated nodata value\n for filename in [os.path.join(TESTDATA, 'Population_2010_clip.tif'),\n os.path.join(HAZDATA,\n 'Lembang_Earthquake_Scenario.asc')]:\n\n R = read_layer(filename)\n A = R.get_data(nan=False)\n\n # Verify nodata value\n Amin = min(A.flat[:])\n msg = ('Raster must have -9999 as its minimum for this test. '\n 'We got %f for file %s' % (Amin, filename))\n assert Amin == -9999, msg\n\n # Verify that GDAL knows about this\n nodata = R.get_nodata_value()\n msg = ('File %s should have registered nodata '\n 'value %i but it was %s' % (filename, Amin, nodata))\n assert nodata == Amin, msg\n\n # Then try using numpy.nan\n A = R.get_data(nan=True)\n\n # Verify nodata value\n Amin = numpy.nanmin(A.flat[:])\n msg = ('True raster minimum must exceed -9999. '\n 'We got %f for file %s' % (Amin, filename))\n assert Amin > -9999, msg\n\n # Then try with a number\n A = R.get_data(nan=-100000)\n\n # Verify nodata value\n Amin = numpy.nanmin(A.flat[:])\n msg = ('Raster must have -100000 as its minimum for this test. '\n 'We got %f for file %s' % (Amin, filename))\n assert Amin == -100000, msg\n\n # Try with illegal nan values\n for illegal in [{}, (), [], None, 'a', 'oeuu']:\n try:\n R.get_data(nan=illegal)\n except InaSAFEError:\n pass\n else:\n msg = ('Illegal nan value %s should have raised '\n 'exception' % illegal)\n raise RuntimeError(msg)", "def NaN_cleaning(df):\n df = df.replace(np.nan, 'unknown')\n return df.reset_index(drop=True)", "def code_unknown_to_nan(data, attribute_values):\n attribute_values_unknown = attribute_values[attribute_values['Meaning'] == \"unknown\"]\n for i in range(len(attribute_values_unknown)):\n colname = attribute_values_unknown.iloc[i]['Attribute']\n unknown_values = eval('[' + str(attribute_values_unknown.iloc[i]['Value']) + ']')\n try:\n data[colname] = data[colname].replace(unknown_values, float('nan'))\n except:\n pass\n return data", "def na_value():\n return pd.NA", "def replace_nan(cls, prop_obj):\n for key, item in enumerate(prop_obj):\n for column, value in item.items():\n if str(value) == 'nan':\n prop_obj[key][column] = 0.0", "def set_nan_as_string(data, replace_str='0'):\n for i, x in enumerate(data):\n for key, value in x.items():\n if value == '':\n x[key] = replace_str\n data[i] = x", "def test_replace_question_mark_with_nan(raw_data: pd.DataFrame):\n\n raw_data_nan = replace_question_mark_with_nan(raw_data)\n\n # Check the existence of NaN values\n assert check_null_values(raw_data_nan)\n\n # Check that there are no more question marks\n assert not check_question_mark_as_null(raw_data_nan)", "def handle_na(self, data: pd.DataFrame) -> pd.DataFrame:\n return remove_missing(\n data,\n self.params[\"na_rm\"],\n list(self.REQUIRED_AES | self.NON_MISSING_AES),\n self.__class__.__name__,\n )", "def test_nodata(self):\n\n \n filename = 'data/test_grid.asc'\n R = read_coverage(filename)\n \n nan = R.get_nodata_value()\n assert nan == -9999\n \n A = R.get_data(nan=False)\n assert numpy.min(A[:]) == -9999\n assert numpy.allclose(numpy.max(A[:]), 50.9879837036) \n \n A = R.get_data(nan=True)\n assert numpy.allclose(numpy.nanmin(A[:]), -50.60135540866)\n assert numpy.allclose(numpy.nanmax(A[:]), 50.9879837036)", "def _nan_data(data, to_nan=0.2):\n # Number of values to be NaNed as int\n to_nan = int(len(data) * to_nan)\n # Existing NaN's as indicies\n existing_nans = data[data.isnull() == True].index\n return to_nan, existing_nans", "def na_value() -> pandas.NA:\n return pandas.NA", "def na_value() -> pandas.NA:\n return pandas.NA", "def nonull(val):\n return val if not pd.isnull(val) else None", "def replace_nan(arr, value):\n arr[np.isnan(arr)] = value\n return arr", "def is_nan(self, row_data):\n return math.isnan(row_data)", "def change_nan(dict):\n\n for k,v in dict.items():\n if np.isnan(v):\n dict[k] = 0.0\n else:\n dict[k] = v", "def NA():\n return float('nan')", "def fix_null_vals(dataset):\n\tprint(\"\\tFixing null values\")\n\n\tif not dataset.isnull().any().any():\n\t\treturn dataset\n\telse:\n\t\treturn dataset.fillna(method=\"ffill\")", "def fill_missing_data_points(data):\n return data.interpolate()" ]
[ "0.72733575", "0.7264341", "0.71801853", "0.70042026", "0.6998662", "0.6991272", "0.6980195", "0.69300354", "0.6904613", "0.68298775", "0.6807619", "0.67213553", "0.6716433", "0.6622631", "0.65843123", "0.65732735", "0.657039", "0.6568355", "0.6560903", "0.65405995", "0.6515394", "0.65076894", "0.65076894", "0.64965636", "0.6489222", "0.64845514", "0.64651066", "0.64259136", "0.6423595", "0.64227563" ]
0.7983567
0
Given a Dataset object and a dictionary with dimension keys and selection keys (i.e. tuple ranges, slices, sets, lists, or literals) return a boolean mask over the rows in the Dataset object that have been selected.
def select_mask(cls, dataset, selection): mask = np.ones(len(dataset), dtype=np.bool_) for dim, sel in selection.items(): if isinstance(sel, tuple): sel = slice(*sel) arr = cls.values(dataset, dim) if util.isdatetime(arr): try: sel = util.parse_datetime_selection(sel) except Exception: pass if isinstance(sel, slice): with warnings.catch_warnings(): warnings.filterwarnings('ignore', r'invalid value encountered') if sel.start is not None: mask &= sel.start <= arr if sel.stop is not None: mask &= arr < sel.stop elif isinstance(sel, (set, list)): iter_slcs = [] for ik in sel: with warnings.catch_warnings(): warnings.filterwarnings('ignore', r'invalid value encountered') iter_slcs.append(arr == ik) mask &= np.logical_or.reduce(iter_slcs) elif callable(sel): mask &= sel(arr) else: index_mask = arr == sel if dataset.ndims == 1 and np.sum(index_mask) == 0: data_index = np.argmin(np.abs(arr - sel)) mask = np.zeros(len(dataset), dtype=np.bool_) mask[data_index] = True else: mask &= index_mask return mask
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def select_mask(cls, dataset, selection):\n mask = None\n for dim, sel in selection.items():\n if isinstance(sel, tuple):\n sel = slice(*sel)\n arr = cls.values(dataset, dim, keep_index=True)\n if util.isdatetime(arr):\n try:\n sel = util.parse_datetime_selection(sel)\n except Exception:\n pass\n\n new_masks = []\n if isinstance(sel, slice):\n with warnings.catch_warnings():\n warnings.filterwarnings('ignore', r'invalid value encountered')\n if sel.start is not None:\n new_masks.append(sel.start <= arr)\n if sel.stop is not None:\n new_masks.append(arr < sel.stop)\n if not new_masks:\n continue\n new_mask = new_masks[0]\n for imask in new_masks[1:]:\n new_mask &= imask\n elif isinstance(sel, (set, list)):\n for v in sel:\n new_masks.append(arr==v)\n if not new_masks:\n continue\n new_mask = new_masks[0]\n for imask in new_masks[1:]:\n new_mask |= imask\n elif callable(sel):\n new_mask = sel(arr)\n else:\n new_mask = arr == sel\n\n if mask is None:\n mask = new_mask\n else:\n mask &= new_mask\n return mask", "def indexed(cls, dataset, selection):\n selected = list(selection.keys())\n all_scalar = all((not isinstance(sel, (tuple, slice, set, list))\n and not callable(sel)) for sel in selection.values())\n all_kdims = all(d in selected for d in dataset.kdims)\n return all_scalar and all_kdims", "def select_dic(old_dic,**kwargs):\n \n \n bool_all=np.ones(old_dic['MODEL_KEY'].shape,dtype=bool)\n for key in old_dic:\n lim_val = kwargs.get(key,None)\n if lim_val is None:\n continue\n array_val=old_dic[key]\n bool_sel=(array_val<=lim_val[1]) & (array_val>=lim_val[0])\n \n bool_all=bool_all & bool_sel\n \n new_dic={key:old_dic[key][bool_all] for key in old_dic}\n \n logging.info('Initial number of elements is %i'%(len(old_dic['MODEL_KEY'])))\n logging.info('Final number of elements is %i'%(len(new_dic['MODEL_KEY'])))\n\n \n return new_dic,bool_all", "def __getitem__(self, keep):\n ndim = len(self.dataset.shape)\n # Ensure that keep is a tuple (then turn it into a list to simplify further processing)\n keep = list(keep) if isinstance(keep, tuple) else [keep]\n # The original keep tuple will be passed to data transform chain\n original_keep = tuple(keep)\n # Ensure that keep is same length as data dimension (truncate or pad with blanket slices as necessary)\n keep = keep[:ndim] + [slice(None)] * (ndim - len(keep))\n # Map current selection to original data indices based on any existing initial selection, per data dimension\n keep = [(dkeep if dlookup is None else dlookup[dkeep]) for dkeep, dlookup in zip(keep, self._lookup)]\n # Iterate over dimensions of dataset, storing information on selection on each dimension:\n # `selection` is a list with one element per dimension; each element is a list of contiguous segments along\n # the dimension, and each segment is represented by a tuple of 3 elements:\n # (dataset selection, post-selection, output array selection)\n # Similarly, `segment_sizes` is a list of lists of segment lengths (empty lists for scalar-selected dimensions)\n selection, segment_sizes = [], []\n for dim_keep, dim_len in zip(keep, self.dataset.shape):\n if np.isscalar(dim_keep):\n # If selection is a scalar, pass directly to dataset selector and remove dimension from output\n selection.append([(dim_keep, None, None)])\n segment_sizes.append([])\n elif isinstance(dim_keep, slice):\n # If selection is a slice, pass directly to dataset selector without post-selection\n start, stop, stride = dim_keep.indices(dim_len)\n segm_size = len(range(start, stop, stride))\n selection.append([(slice(start, stop, stride), slice(None), slice(0, segm_size, 1))])\n segment_sizes.append([segm_size])\n elif len(dim_keep) == 0:\n # If selection is empty, pass to post-selector, as HDF5 datasets do not support zero-length selection\n selection.append([(slice(0, 1, 1), slice(0, 0, 1), slice(0, 0, 1))])\n segment_sizes.append([0])\n else:\n # Anything else is advanced indexing via bool or integer sequences\n dim_keep = np.atleast_1d(dim_keep)\n # Turn boolean mask into integer indices (True means keep that index)\n if dim_keep.dtype == bool and len(dim_keep) == dim_len:\n dim_keep = np.nonzero(dim_keep)[0]\n elif not np.all(dim_keep == np.unique(dim_keep)):\n raise TypeError('LazyIndexer cannot handle duplicate or unsorted advanced integer indices')\n # Split indices into multiple contiguous segments (specified by first and one-past-last data indices)\n jumps = np.nonzero(np.diff(dim_keep) > 1)[0]\n first = [dim_keep[0]] + dim_keep[jumps + 1].tolist()\n last = dim_keep[jumps].tolist() + [dim_keep[-1]]\n segments = np.c_[first, np.array(last) + 1]\n if len(dim_keep) > 0.2 * dim_len and len(segments) > 1:\n # If more than 20% of data are selected in 2 or more separate segments (the Ratcliffian benchmark),\n # select data at dataset level with a single slice spanning segments and then postselect the ndarray\n selection.append([(slice(segments[0, 0], segments[-1, 1], 1),\n dim_keep - dim_keep[0], slice(0, len(dim_keep), 1))])\n segment_sizes.append([len(dim_keep)])\n else:\n # Turn each segment into a separate slice at dataset level without post-selection,\n # and construct contiguous output slices of the same segment sizes\n segm_sizes = [end - start for start, end in segments]\n segm_starts = np.cumsum([0] + segm_sizes)\n selection.append([(slice(start, end, 1), slice(None), slice(segm_starts[n], segm_starts[n + 1], 1))\n for n, (start, end) in enumerate(segments)])\n segment_sizes.append(segm_sizes)\n # Short-circuit the selection if all dimensions are selected with scalars (resulting in a scalar output)\n if segment_sizes == [[]] * ndim:\n out_data = self.dataset[tuple([select[0][0] for select in selection])]\n else:\n # Use dense N-dimensional meshgrid to slice data set into chunks, based on segments along each dimension\n chunk_indices = np.mgrid[[slice(0, len(select), 1) for select in selection]]\n # Pre-allocate output ndarray to have the correct shape and dtype (will be at least 1-dimensional)\n out_data = np.empty([np.sum(segments) for segments in segment_sizes if segments], dtype=self.dataset.dtype)\n # Iterate over chunks, extracting them from dataset and inserting them into the right spot in output array\n for chunk_index in chunk_indices.reshape(ndim, -1).T:\n # Extract chunk from dataset (don't use any advanced indexing here, only scalars and slices)\n dataset_select = tuple([select[segment][0] for select, segment in zip(selection, chunk_index)])\n chunk = self.dataset[dataset_select]\n # Perform post-selection on chunk (can be fancier / advanced indexing because chunk is now an ndarray)\n post_select = [select[segment][1] for select, segment in zip(selection, chunk_index)]\n # If any dimensions were dropped due to scalar indexing, drop them from post_select/out_select tuples\n post_select = tuple([select for select in post_select if select is not None])\n # Do post-selection one dimension at a time, as ndarray does not allow simultaneous advanced indexing\n # on more than one dimension. This caters for the scenario where more than one dimension satisfies\n # the Ratcliffian benchmark (the only way to get advanced post-selection).\n for dim in range(len(chunk.shape)):\n # Only do post-selection on this dimension if non-trivial (otherwise an unnecessary copy happens)\n if not (isinstance(post_select[dim], slice) and post_select[dim] == slice(None)):\n # Prepend the appropriate number of colons to the selection to place it at correct dimension\n chunk = chunk[[slice(None)] * dim + [post_select[dim]]]\n # Determine appropriate output selection and insert chunk into output array\n out_select = [select[segment][2] for select, segment in zip(selection, chunk_index)]\n out_select = tuple([select for select in out_select if select is not None])\n out_data[out_select] = chunk\n # Apply transform chain to output data, if any\n return reduce(lambda data, transform: transform(data, original_keep), self.transforms, out_data)", "def select_bounds(ds, bounds):\n \n xs = slice(bounds[0][0], bounds[1][0])\n ys = slice(bounds[1][1], bounds[0][1])\n # select over x and y axis\n return ds.sel(x=xs, y=ys)", "def filter_dsets_with_restrictions(datasets, restrictions):\n # collect the masks of the existing restrictions\n list_of_masks = [restriction.get_mask() for restriction in restrictions if restriction is not None]\n\n # create one mask from all the masks\n filtering_mask = logical_and_on_list_of_masks(list_of_masks)\n\n # apply the final mask to all the datasets in the dictonary\n if filtering_mask is not None:\n return utils.filter_dictionary_by_mask(datasets, filtering_mask)\n\n return datasets", "def spatial_subset(dataset: xr.Dataset,\n bbox: Tuple[float, float, float, float]) -> xr.Dataset:\n x1, y1, x2, y2 = bbox\n gm = GridMapping.from_dataset(dataset)\n x_name, y_name = gm.xy_dim_names\n return dataset.sel({\n x_name: slice(x1, x2),\n y_name: slice(y1, y2) if gm.is_j_axis_up else slice(y2, y1)\n })", "def test_select_roi():\n _c = io.create_sample_Dataset(n_frames=5, rows=10, cols=10)\n _c = _c.sel(x=slice(35, 70), y=slice(30, 90))\n assert _c.u.shape == (7, 2, 5) # note the last dimension is preserved", "def check_subgraph_for_data_mask_optimization(args: dict) -> bool:\n whitelist = {\n \"drop_dimension\",\n \"filter_bands\",\n \"filter_bbox\",\n \"filter_spatial\",\n \"filter_temporal\",\n \"load_collection\",\n }\n\n children_node_types = flatten_children_node_types(args[\"data\"])\n # If children_node_types exists only out of whitelisted nodes, an intersection should have no effect.\n if len(children_node_types.intersection(whitelist)) != len(children_node_types):\n return False\n\n data_children_node_names = flatten_children_node_names(args[\"data\"])\n mask_children_node_names = flatten_children_node_names(args[\"mask\"])\n if not data_children_node_names.isdisjoint(mask_children_node_names):\n # To avoid an issue in integration tests:\n _log.info(\"Overlap between data and mask node. Will not pre-apply mask on load_collections.\")\n return False\n\n return True", "def filter_by_match(df: pd.DataFrame, d: dict) -> np.ndarray:\n incl = np.ones([len(df)], dtype=bool)\n for k, v in d.items():\n incl = incl & (df[k] == v)\n return incl", "def any(self, *names):\n for cut in names:\n if not isinstance(cut, str) or cut not in self._names:\n raise ValueError(\n \"All arguments must be strings that refer to the names of existing selections\"\n )\n consider = 0\n for name in names:\n idx = self._names.index(name)\n consider |= 1 << idx\n return (self._data & self._dtype.type(consider)) != 0", "def subsetmask(df, mask_df = get_contest_mask()):\n return pd.merge(df, mask_df, on=['lat', 'lon'], how='inner')", "def mask(self, mask):\n ds_out = self._obj\n for var in self.vars:\n ds_out[var] = ds_out[var].raster.mask(mask)\n return ds_out", "def _prep_mask(dataset, trial_split):\n split_to_mask = lambda x: (dataset.trial_info.split == x) if isinstance(x, str) else x\n if isinstance(trial_split, list):\n trial_mask = np.any([split_to_mask(split) for split in trial_split], axis=0)\n else:\n trial_mask = split_to_mask(trial_split)\n return trial_mask", "def sel(\n self,\n **kwargs,\n ) -> \"Dataset\":\n res = [da.sel(**kwargs) for da in self]\n return Dataset(data=res, validate=False)", "def selectData(self, features, target, d):\n\n scan_mask, self.scan_starts, self.scan_ends = self.GetScanPositions(d)\n\n selectFeature = self.featureBits(features.astype(float), self.ifeature)\n\n self.select_mask = (scan_mask & selectFeature)\n \n return self.select_mask", "def selectData(self, xlim=None, ylim=None, alter='', offsets=False, data=None):\n if isinstance(alter, str):\n alter = ['', alter]\n # retrieves entry data\n if data is None:\n if offsets:\n x, y = self.x_offsets(alter=alter[0]), self.y_offsets(alter=alter[1])\n else:\n x, y = self.x(alter=alter[0]), self.y(alter=alter[1])\n else:\n x, y = np.array(data[0]), np.array(data[1])\n # identify boundaries\n if xlim is None:\n xlim = [min(x), max(x)]\n if ylim is None:\n ylim = [min(y), max(y)]\n # construct a data mask\n mask = np.ones(len(x), dtype=bool)\n for i in range(len(mask)):\n if x[i] < xlim[0] or x[i] > xlim[1]:\n mask[i] = False\n if y[i] < ylim[0] or y[i] > ylim[1]:\n mask[i] = False\n return x[mask], y[mask]", "def _contains(self, df: pandas.DataFrame, mapped_triples: MappedTriples, invert: bool = False) -> numpy.ndarray:\n raise NotImplementedError", "def select_cell(adata, grp_keys, grps, presel=None, mode=\"union\", output_format=\"index\"):\n if type(grp_keys) is str:\n grp_keys = [grp_keys]\n if not isarray(grps):\n grps = [grps]\n\n if len(grp_keys) == 1 and len(grps) > 1:\n grp_keys = np.repeat(grp_keys, len(grps))\n\n if mode == \"intersection\":\n pred = AlwaysTrue()\n elif mode == \"union\":\n pred = AlwaysFalse()\n else:\n raise NotImplementedError(f\"The mode {mode} is not implemented.\")\n\n for i, k in enumerate(grp_keys):\n # check if all keys in grp_keys are in adata.obs\n if k not in adata.obs.keys():\n raise Exception(f\"The group key `{k}` is not in .obs.\")\n else:\n in_grp = AnnDataPredicate(k, grps[i])\n if mode == \"intersection\":\n pred = pred & in_grp\n else:\n pred = pred | in_grp\n\n cell_idx = pred.check(adata.obs)\n\n if presel is not None:\n if np.issubsctype(presel, int):\n temp = np.zeros(adata.n_obs, dtype=bool)\n temp[presel] = True\n presel = temp\n if mode == \"intersection\":\n cell_idx = np.logical_and(presel, cell_idx)\n else:\n cell_idx = np.logical_or(presel, cell_idx)\n\n if output_format == \"index\":\n cell_idx = np.where(cell_idx)[0]\n elif output_format == \"mask\":\n pass\n else:\n raise NotImplementedError(f\"The output format `{output_format}` is not supported.\")\n\n return cell_idx", "def select_fits_subset(self, pre_processing=None, binning=None,\n exposure=(None, 'max'), reduction=None,\n observatory=None, jd=(None, 'exact')):\n self.data_subset = self.all_data.copy()\n\n # Select on pre-processing if provided.\n if pre_processing:\n self.data_subset = self.data_subset.loc[\n self.data_subset['PreProcessing'] == pre_processing]\n\n # Select on binning if provided.\n if binning:\n self.data_subset = self.data_subset.loc[\n self.data_subset['Binning'] == binning]\n\n # Select on exposure if provided.\n if exposure[0]:\n if exposure[1] == 'exact':\n self.data_subset = self.data_subset.loc[\n self.data_subset['Exposure'] == exposure[0]]\n elif exposure[1] == 'min':\n self.data_subset = self.data_subset.loc[\n self.data_subset['Exposure'] > exposure[0]]\n elif exposure[1] == 'max':\n self.data_subset = self.data_subset.loc[\n self.data_subset['Exposure'] < exposure[0]]\n elif exposure[1] == 'between':\n self.data_subset = self.data_subset.loc[\n (self.data_subset['Exposure'] > exposure[0][0]) &\n (self.data_subset['Exposure'] < exposure[0][1])]\n\n # Select on reduction if provided.\n if reduction:\n self.data_subset = self.data_subset.loc[\n self.data_subset['Reduction'] == reduction]\n\n # Select on observatory if provided.\n if observatory:\n self.data_subset = self.data_subset.loc[\n self.data_subset['Observatory'] == observatory]\n\n # Select on jd if provided.\n if jd[0]:\n if jd[1] == 'exact':\n if isinstance(jd[0], list):\n self.data_subset = self.data_subset.loc[\n self.data_subset['JD'].isin(jd[0])]\n else:\n self.data_subset = self.data_subset.loc[\n self.data_subset['JD'] == jd[0]]\n elif jd[1] == 'except':\n if isinstance(jd[0], list):\n self.data_subset = self.data_subset.drop(\n self.data_subset[self.data_subset[\n 'JD'].isin(jd[0])].index)\n else:\n self.data_subset = self.data_subset.drop(\n self.data_subset[self.data_subset['JD']\n == jd[0]].index)\n elif jd[1] == 'after':\n self.data_subset = self.data_subset.loc[\n self.data_subset['JD'] > jd[0]]\n elif jd[1] == 'before':\n self.data_subset = self.data_subset.loc[\n self.data_subset['JD'] < jd[0]]\n elif jd[1] == 'between':\n self.data_subset = self.data_subset.loc[\n (self.data_subset['JD'] > jd[0][0]) &\n (self.data_subset['JD'] < jd[0][1])]\n\n # Sort by JD and reset index.\n self.data_subset = self.data_subset.sort_values(\n by=['JD'], ascending=True).reset_index(drop=True)\n\n # Check spectra found:\n if len(self.data_subset) == 0:\n raise FileExistsError(\n 'No spectra found for that location or subset.')\n\n print('Selecting {} spectra from fits dataset.'.format(\n len(self.data_subset)))", "def selectOfSample(self, indexes):\n index_set = set()\n for idx in indexes:\n i = list(self.sample[self.sample['masked'] == False].index)[idx]\n index_set.add(i)\n for ind in list(self.sample[self.sample['masked'] == False].index):\n if ind not in index_set:\n self.sample.at[ind, 'masked'] = True\n return index_set", "def get_subset(df, constraints):\n for constraint in constraints:\n subset = df.loc[df[constraint[0]].isin(constraint[1])]\n df = subset\n return subset", "def subsetmask(df, mask_df):\r\n return pd.merge(df, mask_df, on=['lat', 'lon'], how='inner')", "def dictsub(subset,superset):\n\treturn all(item in superset.items() for item in subset.items())", "def gate(self, *dim_ranges):\n relevant_data = self.get_points(*[r.dim for r in dim_ranges])\n mins = np.array([r.min for r in dim_ranges])\n maxes = np.array([r.max for r in dim_ranges])\n test1 = np.alltrue(relevant_data >= mins, axis=1)\n test2 = np.alltrue(relevant_data <= maxes, axis=1)\n final = np.logical_and(test1, test2) \n return DataTable(self.data[final], self.dims, self.legends, self.tags.copy())", "def require(self, **names):\n for cut, v in names.items():\n if not isinstance(cut, str) or cut not in self._names:\n raise ValueError(\n \"All arguments must be strings that refer to the names of existing selections\"\n )\n\n consider = 0\n require = 0\n for name, val in names.items():\n val = bool(val)\n idx = self._names.index(name)\n consider |= 1 << idx\n require |= int(val) << idx\n return (self._data & self._dtype.type(consider)) == require", "def selection_fn(self, trace, points, selector):\n self.segment = self.fig.layout[\"sliders\"][0].active\n seg = self.segment\n\n xrange = selector.xrange\n wave = self.wave[seg]\n mask = self.mask[seg]\n\n # Choose pixels and value depending on selected type\n if self.mask_type == \"good\":\n value = 1\n idx = (wave > xrange[0]) & (wave < xrange[1]) & (mask == 0)\n elif self.mask_type == \"bad\":\n value = 0\n idx = (wave > xrange[0]) & (wave < xrange[1])\n elif self.mask_type == \"line\":\n value = 1\n idx = (wave > xrange[0]) & (wave < xrange[1]) & (mask != 0)\n print(np.count_nonzero(idx))\n elif self.mask_type == \"cont\":\n value = 2\n idx = (wave > xrange[0]) & (wave < xrange[1]) & (mask == 1)\n else:\n return\n\n # Apply changes if any\n if np.count_nonzero(idx) != 0:\n self.mask[seg][idx] = value\n\n with self.fig.batch_update():\n # Update Line Mask\n m = self.line_mask_idx[seg]\n x, y = self.create_mask_points(\n self.wave[seg], self.spec[seg], self.mask[seg], 1\n )\n self.fig.data[m].x = x\n self.fig.data[m].y = y\n\n # Update Cont Mask\n m = self.cont_mask_idx[seg]\n x, y = self.create_mask_points(\n self.wave[seg], self.spec[seg], self.mask[seg], 2\n )\n self.fig.data[m].x = x\n self.fig.data[m].y = y", "def apply_selection(selection, dataset):\n for seq in walk(dataset, SequenceType):\n # apply only relevant selections\n conditions = [condition for condition in selection\n if re.match('%s\\.[^\\.]+(<=|<|>=|>|=|!=)' % re.escape(seq.id), condition)]\n for condition in conditions:\n id1, op, id2 = parse_selection(condition, dataset)\n seq.data = seq[ op(id1, id2) ].data\n return dataset", "def match_mask(self, mask, key):\n #dt = self.dtypes[key]\n for ti, tac in enumerate(mask):\n if tac == '?': continue # wildcard matches all keys\n tacs = self._expand_theme(self._themes[ti], tac)\n if key[ti] not in tacs: return False # reject key\n return True # key matches", "def _dataset_match(geno, dataset):\n return all(dataset[k] == v for (k, v) in _dataset_fields(geno).items())" ]
[ "0.7402568", "0.7394645", "0.59214824", "0.5817601", "0.56964636", "0.56530863", "0.55419517", "0.54942536", "0.54495037", "0.5437994", "0.5406027", "0.53977627", "0.5370405", "0.5368601", "0.53328323", "0.5330212", "0.53261733", "0.5287541", "0.52621627", "0.52252233", "0.5222055", "0.5217804", "0.5208142", "0.5206405", "0.5197292", "0.5191291", "0.5188051", "0.51807", "0.5180619", "0.51784825" ]
0.7668252
0
Given a Dataset object and selection to be applied returns boolean to indicate whether a scalar value has been indexed.
def indexed(cls, dataset, selection): selected = list(selection.keys()) all_scalar = all((not isinstance(sel, (tuple, slice, set, list)) and not callable(sel)) for sel in selection.values()) all_kdims = all(d in selected for d in dataset.kdims) return all_scalar and all_kdims
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_scalar(self, indexable, axis):\n index = self._obj.index\n complete_key = False\n partial_key = False\n duplicated_key = False\n if axis == 0 and self._has_fancy_index():\n try:\n if type(indexable) is tuple:\n complete_key = (len(indexable) == len(index.levshape) and\n indexable in index)\n partial_key = not complete_key and indexable in index\n except TypeError: # Unhashable type, no biggie\n pass\n if index.has_duplicates:\n duplicated_key = indexable in index.get_duplicates()\n return (not duplicated_key and\n ((np.isscalar(indexable) and not partial_key) or complete_key))", "def __contains__(self, idx):\n return idx in self._data", "def is_scalar(self):", "def __contains__(self, item):\n return item in self.default_dataset", "def __contains__(self, x):\n indexes = self.get_indexes(x)\n return self.sketch[indexes] > 0", "def test_scalar_index(self):\n dset = self.f.create_dataset('x', shape=(), dtype='f')\n out = dset[...]\n self.assertIsInstance(out, np.ndarray)\n self.assertEqual(out.shape, ())", "def _isIndexedDataframe(self, dataframe):\n return len(dataframe.index.names) > 1 or not dataframe.index.names[0] is None", "def select(condition: Union[Callable, int], meta: Counter) -> bool:\n if condition is None:\n return True\n elif isinstance(condition, int):\n return sum(meta.values()) == condition\n elif callable(condition):\n if not isinstance(condition(meta), bool):\n raise TypeError('selection condition expected to return a boolean')\n return condition(meta)\n return False", "def is_satisfied(self, data):\n if self.__col == None and self.__val == None:\n return True\n \n return data[0, self.__col] == self.__val", "def is_dataset(self):\n return self._dataset is not None", "def is_scalar(self):\n return len(self.coeffs.shape[self.sdim:]) == 0", "def isScalar(self) -> bool:\n\n indices = list(range(self.layout.gaDims))\n indices.remove(self.layout.gradeList.index(0))\n\n for i in indices:\n if abs(self.value[i]) < _eps:\n continue\n else:\n return False\n\n return True", "def any(self, *names):\n for cut in names:\n if not isinstance(cut, str) or cut not in self._names:\n raise ValueError(\n \"All arguments must be strings that refer to the names of existing selections\"\n )\n consider = 0\n for name in names:\n idx = self._names.index(name)\n consider |= 1 << idx\n return (self._data & self._dtype.type(consider)) != 0", "def test_sparse(self):\n # pretty much the same as normal dataframe, but with type checking\n\n # test series selection\n selector = ItemSelector('a')\n data = pd.SparseDataFrame({'a': [1, 2, 3], 'b': [6, 5, 4], 'c': [1, 0, 1]})\n\n selected_data = selector.transform(data)\n expected_data = pd.Series([1, 2, 3])\n self.assertIsInstance(selected_data, pd.SparseSeries) # check datatype\n pd.testing.assert_series_equal(selected_data, expected_data, check_names=False)\n\n # test dataframe selection\n selector = ItemSelector(['a'])\n data = pd.SparseDataFrame({'a': [1, 2, 3], 'b': [6, 5, 4], 'c': [1, 0, 1]})\n\n selected_data = selector.transform(data)\n expected_data = pd.SparseDataFrame({'a': [1, 2, 3]})\n self.assertIsInstance(selected_data, pd.SparseDataFrame) # check\n pd.testing.assert_frame_equal(selected_data, expected_data)\n\n # test dataframe selection multiple cols\n selector = ItemSelector(['a', 'c'])\n data = pd.SparseDataFrame({'a': [1, 2, 3], 'b': [6, 5, 4], 'c': [1, 0, 1]})\n\n selected_data = selector.transform(data)\n expected_data = pd.SparseDataFrame({'a': [1, 2, 3], 'c': [1, 0, 1]})\n self.assertIsInstance(selected_data, pd.SparseDataFrame) # check datatype\n pd.testing.assert_frame_equal(selected_data, expected_data)", "def _dataset_match(geno, dataset):\n return all(dataset[k] == v for (k, v) in _dataset_fields(geno).items())", "def __contains__(self, obj):\n if isinstance(obj, self):\n query = self.where(**obj.data).select()\n result = query.execute()\n if result.count:\n return True\n return False", "def vectorized(self):\n return False", "def is_scalar(obj: _std_typing.Any) -> bool:\n return obj.ndim == 0", "def is_scalar_assign(self):\n return self.is_scalar and not self.is_Increment", "def test_scalar_index(setup_teardown_file):\n f = setup_teardown_file[3]\n\n dset = f.create_dataset('x', shape=(), dtype='f')\n out = dset[...]\n assert isinstance(out, np.ndarray)\n assert out.shape == ()", "def is_indexed(self):\r\n return self._indexed", "def is_series_like(self):\n return len(self.columns) == 1 or len(self.index) == 1", "def is_scalar(x: Any) -> bool:\r\n return np.isscalar(x) or (isinstance(x, np.ndarray) and x.ndim == 0)", "def select_mask(cls, dataset, selection):\n mask = np.ones(len(dataset), dtype=np.bool_)\n for dim, sel in selection.items():\n if isinstance(sel, tuple):\n sel = slice(*sel)\n arr = cls.values(dataset, dim)\n if util.isdatetime(arr):\n try:\n sel = util.parse_datetime_selection(sel)\n except Exception:\n pass\n if isinstance(sel, slice):\n with warnings.catch_warnings():\n warnings.filterwarnings('ignore', r'invalid value encountered')\n if sel.start is not None:\n mask &= sel.start <= arr\n if sel.stop is not None:\n mask &= arr < sel.stop\n elif isinstance(sel, (set, list)):\n iter_slcs = []\n for ik in sel:\n with warnings.catch_warnings():\n warnings.filterwarnings('ignore', r'invalid value encountered')\n iter_slcs.append(arr == ik)\n mask &= np.logical_or.reduce(iter_slcs)\n elif callable(sel):\n mask &= sel(arr)\n else:\n index_mask = arr == sel\n if dataset.ndims == 1 and np.sum(index_mask) == 0:\n data_index = np.argmin(np.abs(arr - sel))\n mask = np.zeros(len(dataset), dtype=np.bool_)\n mask[data_index] = True\n else:\n mask &= index_mask\n return mask", "def any(self):\n boolean = True\n if type(self.idxs) == np.ndarray:\n boolean = all(self.idxs.shape)\n elif type(self.idxs) == list:\n sh = np.array(self.idxs).shape\n if len(sh) >= 2:\n boolean = np.all(sh)\n return boolean", "def _contains(self, df: pandas.DataFrame, mapped_triples: MappedTriples, invert: bool = False) -> numpy.ndarray:\n raise NotImplementedError", "def has_index(self):\n return self.index is not None", "def has(self, id_):\n with self._db_connection() as connection:\n return connection.contains_dataset(id_)", "def is_scalar(val,\n include_np: bool = True,\n include_torch: bool = True) -> bool:\n if isinstance(val, numbers.Number):\n return True\n elif include_np and isinstance(val, np.ndarray) and val.ndim == 0:\n return True\n elif include_torch and isinstance(val, torch.Tensor) and len(val) == 1:\n return True\n else:\n return False", "def has(self, *args):\n return _ida_hexrays.qvector_ccase_t_has(self, *args)" ]
[ "0.6400813", "0.61126477", "0.57768244", "0.56953377", "0.56712884", "0.5666469", "0.56647056", "0.565334", "0.56435555", "0.56070197", "0.5548717", "0.5503382", "0.5455719", "0.54446787", "0.54330516", "0.5416925", "0.53629106", "0.5330685", "0.5303776", "0.52907795", "0.5289607", "0.52609926", "0.5260558", "0.52486145", "0.523814", "0.5235922", "0.5213682", "0.5201631", "0.52013826", "0.51974565" ]
0.6942563
0
Utility function to concatenate an NdMapping of Dataset objects.
def concatenate(cls, datasets, datatype=None, new_type=None): from . import Dataset, default_datatype new_type = new_type or Dataset if isinstance(datasets, NdMapping): dimensions = datasets.kdims keys, datasets = zip(*datasets.data.items()) elif isinstance(datasets, list) and all(not isinstance(v, tuple) for v in datasets): # Allow concatenating list of datasets (by declaring no dimensions and keys) dimensions, keys = [], [()]*len(datasets) else: raise DataError('Concatenation only supported for NdMappings ' 'and lists of Datasets, found %s.' % type(datasets).__name__) template = datasets[0] datatype = datatype or template.interface.datatype # Handle non-general datatypes by casting to general type if datatype == 'array': datatype = default_datatype elif datatype == 'image': datatype = 'grid' if len(datasets) > 1 and not dimensions and cls.interfaces[datatype].gridded: raise DataError('Datasets with %s datatype cannot be concatenated ' 'without defining the dimensions to concatenate along. ' 'Ensure you pass in a NdMapping (e.g. a HoloMap) ' 'of Dataset types, not a list.' % datatype) datasets = template.interface.cast(datasets, datatype) template = datasets[0] data = list(zip(keys, datasets)) if keys else datasets concat_data = template.interface.concat(data, dimensions, vdims=template.vdims) return template.clone(concat_data, kdims=dimensions+template.kdims, new_type=new_type)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def concatDic(dic1, dic2):\n pass", "def concatenate(data, dim=0):\n if isinstance(data[0], (tuple, list)):\n return honor_type(data[0], (concatenate([d[i] for d in data], dim=dim) for i in range(len(data[0]))))\n elif isinstance(data[0], Mapping):\n return type(data[0])({k: concatenate([d[k] for d in data], dim=dim) for k in data[0].keys()})\n elif not isinstance(data[0], torch.Tensor):\n raise TypeError(f\"Can only concatenate tensors but got {type(data[0])}\")\n return torch.cat(data, dim=dim)", "def concatenate_data():", "def ConnectDataSets(dictin, N, xlabel='t'):\n outdict = {}\n for key, value in dictin.iteritems():\n if key!=xlabel:\n outdict[key]=StringMatrix(value,N)\n nro=shape(outdict[key])[0]\n outdict[xlabel]=MakeLongTime(dictin[xlabel],nro)\n return outdict", "def Concat(datasets):\n\n dataset_num = len(datasets)\n dataset = datasets[0]\n for i in range(1, dataset_num):\n dataset.concatenate(datasets[i])\n return dataset", "def concat_dict(d1, d2):\n if d1 is None:\n return d2\n if d2 is None:\n return d1\n else:\n assert set(d1.keys()) == set(d2.keys())\n return {k: np.concatenate([d1[k], d2[k]], axis=0) for k in d1}", "def combine_data(data_files_dict):\n key_list = list(data_files_dict.keys())\n no_col = len(data_files_dict[key_list[0]])\n combined = []\n for n in range(0, no_col):\n d = np.empty(shape=[0, 1])\n for k in data_files_dict:\n d = np.append(d, data_files_dict[k][n])\n combined.append(d)\n return combined", "def concat(cls, objects, dimension=None):\n if len(objects) == 1:\n return objects[0]\n\n new_data = cls()\n new_data.attrs.update(objects[0].attrs)\n for var in objects[0]:\n if isinstance(objects[0][var], cls):\n new_data[var] = cls.concat(\n [obj[var] for obj in objects],\n dimension)\n else:\n if dimension is None:\n dimension = 0\n new_data[var] = np.concatenate(\n [obj[var] for obj in objects],\n dimension)\n\n return new_data", "def concat_dicts(self, dict1, dict2):\n result = dict()\n for key, value in dict1.items():\n if len(value.shape) == 1:\n result[key] = np.concatenate([value, dict2[key]])\n else:\n result[key] = np.vstack([value, dict2[key]])\n return result", "def coadd_maps(geom, maps, preserve_counts=True):\n\n # FIXME: This functionality should be built into the Map.coadd method\n map_out = gammapy.maps.Map.from_geom(geom)\n for m in maps:\n m_tmp = m\n if isinstance(m, gammapy.maps.HpxNDMap):\n if m.geom.order < map_out.geom.order:\n factor = map_out.geom.nside // m.geom.nside\n m_tmp = m.upsample(factor, preserve_counts=preserve_counts)\n map_out.coadd(m_tmp)\n\n return map_out", "def merge(datasets: Sequence[\"Dataset\"]) -> \"Dataset\":\n ds = datasets[0].copy()\n for dsj in datasets[1:]:\n ds = ds._append_items(dsj, copy=False)\n\n return ds", "def concat(self: TAvalancheDataset, other: TAvalancheDataset) -> TAvalancheDataset:\n return self.__class__([self, other])", "def append_predictions(ds, predictions):\n def _append_predictions(x, p):\n return dict(x, prediction=p)\n predictions_ds = tf.data.Dataset.from_tensor_slices(predictions)\n return (tf.data.Dataset\n .zip((ds, predictions_ds))\n .map(_append_predictions, num_parallel_calls=TF_AUTOTUNE))", "def concat(column_based_table_1: dict[str, list[str]], column_based_table_2: dict[str, list[str]]) -> dict[str, list[str]]:\n combined_data_table: dict[str, list[str]] = {}\n for column in column_based_table_1:\n combined_data_table[column] = column_based_table_1[column]\n keys_list = list(combined_data_table.keys())\n for column in column_based_table_2:\n if column in keys_list:\n column_data = combined_data_table[column]\n column_data_2 = column_based_table_2[column]\n # append to list\n for item in column_data_2:\n column_data.append(item)\n combined_data_table[column] = column_data\n else:\n combined_data_table[column] = column_based_table_2[column]\n return combined_data_table", "def _merge_mapper(mapper1, mapper2):\n if len(mapper1) > 0:\n if len(mapper2) > 0:\n clusters1 = mapper1['cluster']\n clusters2 = mapper2['cluster']\n clusters = np.unique(np.concatenate((clusters1, clusters2), 0))\n\n mapper1['cluster'] = clusters\n mapper1['links'] += mapper2['links']\n else:\n mapper1 = mapper2\n return mapper1", "def __add__(self, other):\r\n # Make a defaultdict of defaultdicts, the latter of which returns\r\n # None when an key is not present\r\n merged_data = defaultdict(lambda: defaultdict(lambda: None))\r\n\r\n # We will keep track of all unique sample_ids and metadata headers\r\n # we have seen as we go\r\n all_sample_ids = set()\r\n all_headers = set()\r\n\r\n # add all values from self into the merged_data structure\r\n for sample_id, data in self._metadata.iteritems():\r\n all_sample_ids.add(sample_id)\r\n for header, value in data.iteritems():\r\n all_headers.add(header)\r\n merged_data[sample_id][header] = value\r\n\r\n # then add all data from other\r\n for sample_id, data in other._metadata.iteritems():\r\n all_sample_ids.add(sample_id)\r\n for header, value in data.iteritems():\r\n all_headers.add(header)\r\n # if the two mapping files have identical sample_ids and\r\n # metadata columns but have DIFFERENT values, raise a value\r\n # error\r\n if merged_data[sample_id][header] is not None and \\\r\n merged_data[sample_id][header] != value:\r\n raise ValueError(\"Different values provided for %s for \"\r\n \"sample %s in different mapping files.\"\r\n % (header, sample_id))\r\n else:\r\n merged_data[sample_id][header] = value\r\n\r\n # Now, convert what we have seen into a normal dict\r\n normal_dict = {}\r\n for sample_id in all_sample_ids:\r\n if sample_id not in normal_dict:\r\n normal_dict[sample_id] = {}\r\n\r\n for header in all_headers:\r\n normal_dict[sample_id][header] = \\\r\n merged_data[sample_id][header]\r\n\r\n # and create a MetadataMap object from it; concatenate comments\r\n return self.__class__(normal_dict, self.Comments + other.Comments)", "def coco_union(dsets):\n merged = ub.odict([\n ('categories', []),\n ('licenses', []),\n ('info', []),\n ('images', []),\n ('annotations', []),\n ])\n\n merged_cat_name_to_id = {}\n\n def update_ifnotin(d1, d2):\n \"\"\" copies keys from d2 that doent exist in d1 into d1 \"\"\"\n for k, v in d2.items():\n if k not in d1:\n d1[k] = v\n return d1\n\n for key, old_dset in dsets.items():\n # hack: in our case the key is the subdir\n subdir = key\n\n # Create temporary indexes to map from old to new\n cat_id_map = {}\n img_id_map = {}\n\n # Add the licenses / info into the merged dataset\n # Licenses / info are unused in our datas, so this might not be correct\n merged['licenses'].extend(old_dset['licenses'])\n merged['info'].extend(old_dset['info'])\n\n # Add the categories into the merged dataset\n for old_cat in old_dset['categories']:\n new_id = merged_cat_name_to_id.get(old_cat['name'], None)\n if new_id is None:\n # The same category might exist in different datasets.\n new_id = len(merged_cat_name_to_id) + 1\n merged_cat_name_to_id[old_cat['name']] = new_id\n\n new_cat = ub.odict([\n ('id', new_id),\n ('name', old_cat['name']),\n ('supercategory', old_cat['supercategory']),\n ])\n update_ifnotin(new_cat, old_cat)\n cat_id_map[old_cat['id']] = new_cat['id']\n merged['categories'].append(new_cat)\n\n # Add the images into the merged dataset\n for old_img in old_dset['images']:\n new_img = ub.odict([\n ('id', len(merged['images']) + 1),\n ('file_name', join(subdir, old_img['file_name'])),\n ])\n # copy over other metadata\n update_ifnotin(new_img, old_img)\n img_id_map[old_img['id']] = new_img['id']\n merged['images'].append(new_img)\n\n # Add the annotations into the merged dataset\n for old_annot in old_dset['annotations']:\n old_cat_id = old_annot['category_id']\n old_img_id = old_annot['image_id']\n new_cat_id = cat_id_map.get(old_cat_id, None)\n new_img_id = img_id_map.get(old_img_id, None)\n if new_cat_id is None:\n continue\n print('annot {} in {} has bad category-id {}'.format(old_annot['id'], key, old_cat_id))\n if new_img_id is None:\n continue\n print('annot {} in {} has bad image-id {}'.format(old_annot['id'], key, old_img_id))\n new_annot = ub.odict([\n ('id', len(merged['annotations']) + 1),\n ('image_id', new_img_id),\n ('category_id', new_cat_id),\n ])\n update_ifnotin(new_annot, old_annot)\n merged['annotations'].append(new_annot)\n return merged", "def combine_data_main(data1,data2,lookup,foutput):\n\n # Get the maximum number of ortholog probesets we'll have to append\n max_orthologs = 0\n for probe_set_id in data1.keys():\n max_orthologs = max(max_orthologs,len(lookup(probe_set_id)))\n logging.debug(\"Max_orthologs = %d\" % max_orthologs)\n \n # Write header line\n line = [data1.header()]\n for i in range(1,max_orthologs+1):\n logging.debug(\"Adding header set #%d\" % i)\n for item in data2.header().split('\\t'): line.append(\"%s_%s\" % (item,i))\n foutput.write(\"%s\\n\" % '\\t'.join(line))\n\n # Append data\n for probe_set_id in data1.keys():\n # Build line to output to file\n line = [data1.fetch(probe_set_id)]\n # Get the corresponding ortholog probe set ID(s)\n logging.debug(\"Processing probe set ID %s\" % probe_set_id)\n for ortholog_probe_set_id in lookup(probe_set_id):\n ortholog_data = data2.fetch(ortholog_probe_set_id)\n if ortholog_data is not None:\n line.append(ortholog_data)\n # Write line to file\n foutput.write(\"%s\\n\" % '\\t'.join(line))", "def init_datasets(self, data_dict, label_dict):\n splits = data_dict.keys()\n dataset_dict = {\n key: ArrayDataset(data_dict[key], torch.LongTensor(label_dict[key]))\n for key in splits\n }\n return dataset_dict", "def combine_all(self):\n combined = copy.deepcopy(self.train)\n\n def _combine_data(data):\n for img_path, pid, camid in data:\n\n if pid in self._junk_pids:\n continue\n #pdb.set_trace()\n pid = self.dataset_name + \"_\" + str(pid)\n camid = self.dataset_name + \"_\" + str(camid)\n combined.append((img_path, pid, camid))\n\n _combine_data(self.query)\n _combine_data(self.gallery)\n\n self.train = combined\n self.num_train_pids = self.get_num_pids(self.train)", "def init_datasets(self, data_dict, label_dict, **kwargs):\n return {\n key: TensorDataset(\n torch.FloatTensor(data_dict[key]), torch.LongTensor(label_dict[key])\n )\n for key in data_dict.keys()\n }", "def concatenate_dictionary_keys(*dictionaries):\n concat_dict = []\n for entry in dictionaries:\n concat_dict.append(np.concatenate(list(entry[i] for i in entry)))\n return concat_dict", "def copy_nc_attrs(src, dest):\n with xarray.open_dataset(src) as s:\n attrs = s.attrs\n # Write empty root dataset with attributes\n ds = xarray.Dataset(attrs=attrs)\n ds.to_netcdf(dest, mode=\"a\")", "def msetnx(self, mapping):\r\n items = []\r\n [items.extend(pair) for pair in mapping.iteritems()]\r\n return self.format_multi_bulk('MSETNX', *items)", "def combine_all(self):\n if self._train_only:\n return\n\n combined = copy.deepcopy(self.train)\n\n # relabel pids in gallery (query shares the same scope)\n g_pids = set()\n for items in self.gallery:\n pid = items[1]\n if pid in self._junk_pids:\n continue\n g_pids.add(pid)\n pid2label = {pid: i for i, pid in enumerate(g_pids)}\n\n def _combine_data(data):\n for img_path, pid, camid, dsetid in data:\n if pid in self._junk_pids:\n continue\n pid = pid2label[pid] + self.num_train_pids\n combined.append((img_path, pid, camid, dsetid))\n\n _combine_data(self.query)\n _combine_data(self.gallery)\n\n self.train = combined\n self.num_train_pids = self.get_num_pids(self.train)", "def init_datasets(self, data_dict, label_dict):\n splits = data_dict.keys()\n dataset_dict = {\n key: ArrayDataset(\n data_dict[key], torch.LongTensor(label_dict[key]), convert_sparse=False\n )\n for key in splits\n }\n return dataset_dict", "def _umap_concat(data, **umap_kwargs):\n data_tiles = []\n for i in range(5):\n data_i = slice_vec_bands(data, start=i, end=i + 1)\n data_tiles.append(umap.UMAP(**umap_kwargs).fit_transform(data_i))\n\n data_concat = numpy.empty((\n data_tiles[0].shape[0],\n sum(dt.shape[1] for dt in data_tiles)\n ))\n\n start_col = 0\n for dt in data_tiles:\n end_col = start_col + dt.shape[1]\n data[:, start_col:end_col] = dt\n start_col = end_col\n\n return data_concat", "def concat_from_dict(d, keyvar):\n for e in d.keys():\n if keyvar is not None:\n aux = d[e]\n del d[e]\n Reg = pd.DataFrame(e, index=aux.index, columns=[keyvar])\n d[e] = pd.concat([aux, Reg], axis=1)\n d = pd.concat(list(d.values()))\n return d", "def concatenate(tensors, axis=0):\n raise NotImplementedError", "def concat(objs, dim='concat_dim', indexers=None, mode='different',\n concat_over=None, compat='equals'):\n # TODO: add join and ignore_index arguments copied from pandas.concat\n # TODO: support concatenating scaler coordinates even if the concatenated\n # dimension already exists\n try:\n first_obj, objs = utils.peek_at(objs)\n except StopIteration:\n raise ValueError('must supply at least one object to concatenate')\n cls = type(first_obj)\n return cls._concat(objs, dim, indexers, mode, concat_over, compat)" ]
[ "0.6320997", "0.611885", "0.6035339", "0.6027557", "0.5929903", "0.5878462", "0.5772045", "0.5706965", "0.5619259", "0.5577487", "0.54758805", "0.54367036", "0.5373829", "0.5343098", "0.52893007", "0.5261989", "0.52427924", "0.52400863", "0.5187234", "0.5172838", "0.5171507", "0.515818", "0.5145577", "0.5138804", "0.51340663", "0.512344", "0.5120216", "0.5110777", "0.51048136", "0.50826585" ]
0.697596
0
Returns the data of a Dataset as a dataframe avoiding copying if it already a dataframe type.
def as_dframe(cls, dataset): return dataset.dframe()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_dataframe(dataset: LGBMTypes.DatasetType) -> pd.DataFrame:\n if isinstance(dataset, lgb.Dataset):\n x = LGBMUtils.to_dataframe(dataset=dataset.data)\n if dataset.label is None:\n return x\n y = LGBMUtils.to_dataframe(dataset=dataset.label)\n return LGBMUtils.concatenate_x_y(x=x, y=y)[0]\n try:\n return MLUtils.to_dataframe(dataset=dataset)\n except mlrun.errors.MLRunInvalidArgumentError:\n raise mlrun.errors.MLRunInvalidArgumentError(\n f\"Could not convert the given dataset into a pandas DataFrame. Supporting conversion from: \"\n f\"{LGBMUtils.get_union_typehint_string(LGBMTypes.DatasetType)}. The given dataset was of type: \"\n f\"'{type(dataset)}'\"\n )", "def get_dataset( # noqa: C901, PLR0912, PLR0913, PLR0915\n dataset_type,\n data,\n schemas=None,\n profiler=ColumnsExistProfiler,\n caching=True,\n table_name=None,\n sqlite_db_path=None,\n):\n df = pd.DataFrame(data)\n if dataset_type == \"PandasDataset\":\n if schemas and \"pandas\" in schemas:\n schema = schemas[\"pandas\"]\n pandas_schema = {}\n for key, value in schema.items():\n # Note, these are just names used in our internal schemas to build datasets *for internal tests*\n # Further, some changes in pandas internal about how datetimes are created means to support pandas\n # pre- 0.25, we need to explicitly specify when we want timezone.\n\n # We will use timestamp for timezone-aware (UTC only) dates in our tests\n if value.lower() in [\"timestamp\", \"datetime64[ns, tz]\"]:\n df[key] = pd.to_datetime(df[key], utc=True)\n continue\n elif value.lower() in [\"datetime\", \"datetime64\", \"datetime64[ns]\"]:\n df[key] = pd.to_datetime(df[key])\n continue\n elif value.lower() in [\"date\"]:\n df[key] = pd.to_datetime(df[key]).dt.date\n value = \"object\" # noqa: PLW2901\n try:\n type_ = np.dtype(value)\n except TypeError:\n # noinspection PyUnresolvedReferences\n type_ = getattr(pd, value)()\n pandas_schema[key] = type_\n # pandas_schema = {key: np.dtype(value) for (key, value) in schemas[\"pandas\"].items()}\n df = df.astype(pandas_schema)\n return PandasDataset(df, profiler=profiler, caching=caching)\n\n elif dataset_type == \"SparkDFDataset\":\n spark_types = {\n \"StringType\": pyspark.types.StringType,\n \"IntegerType\": pyspark.types.IntegerType,\n \"LongType\": pyspark.types.LongType,\n \"DateType\": pyspark.types.DateType,\n \"TimestampType\": pyspark.types.TimestampType,\n \"FloatType\": pyspark.types.FloatType,\n \"DoubleType\": pyspark.types.DoubleType,\n \"BooleanType\": pyspark.types.BooleanType,\n \"DataType\": pyspark.types.DataType,\n \"NullType\": pyspark.types.NullType,\n }\n spark = get_or_create_spark_application(\n spark_config={\n \"spark.sql.catalogImplementation\": \"hive\",\n \"spark.executor.memory\": \"450m\",\n # \"spark.driver.allowMultipleContexts\": \"true\", # This directive does not appear to have any effect.\n }\n )\n # We need to allow null values in some column types that do not support them natively, so we skip\n # use of df in this case.\n data_reshaped = list(\n zip(*(v for _, v in data.items()))\n ) # create a list of rows\n if schemas and \"spark\" in schemas:\n schema = schemas[\"spark\"]\n # sometimes first method causes Spark to throw a TypeError\n try:\n spark_schema = pyspark.types.StructType(\n [\n pyspark.types.StructField(\n column, spark_types[schema[column]](), True\n )\n for column in schema\n ]\n )\n # We create these every time, which is painful for testing\n # However nuance around null treatment as well as the desire\n # for real datetime support in tests makes this necessary\n data = copy.deepcopy(data)\n if \"ts\" in data:\n print(data)\n print(schema)\n for col in schema:\n type_ = schema[col]\n if type_ in [\"IntegerType\", \"LongType\"]:\n # Ints cannot be None...but None can be valid in Spark (as Null)\n vals = []\n for val in data[col]:\n if val is None:\n vals.append(val)\n else:\n vals.append(int(val))\n data[col] = vals\n elif type_ in [\"FloatType\", \"DoubleType\"]:\n vals = []\n for val in data[col]:\n if val is None:\n vals.append(val)\n else:\n vals.append(float(val))\n data[col] = vals\n elif type_ in [\"DateType\", \"TimestampType\"]:\n vals = []\n for val in data[col]:\n if val is None:\n vals.append(val)\n else:\n vals.append(parse(val))\n data[col] = vals\n # Do this again, now that we have done type conversion using the provided schema\n data_reshaped = list(\n zip(*(v for _, v in data.items()))\n ) # create a list of rows\n spark_df = spark.createDataFrame(data_reshaped, schema=spark_schema)\n except TypeError:\n string_schema = pyspark.types.StructType(\n [\n pyspark.types.StructField(column, pyspark.types.StringType())\n for column in schema\n ]\n )\n spark_df = spark.createDataFrame(data_reshaped, string_schema)\n for c in spark_df.columns:\n spark_df = spark_df.withColumn(\n c, spark_df[c].cast(spark_types[schema[c]]())\n )\n elif len(data_reshaped) == 0:\n # if we have an empty dataset and no schema, need to assign an arbitrary type\n columns = list(data.keys())\n spark_schema = pyspark.types.StructType(\n [\n pyspark.types.StructField(column, pyspark.types.StringType())\n for column in columns\n ]\n )\n spark_df = spark.createDataFrame(data_reshaped, spark_schema)\n else:\n # if no schema provided, uses Spark's schema inference\n columns = list(data.keys())\n spark_df = spark.createDataFrame(data_reshaped, columns)\n return SparkDFDataset(spark_df, profiler=profiler, caching=caching)\n else:\n warnings.warn(f\"Unknown dataset_type {dataset_type!s}\")", "def _get_data_as_df(self, data):\n if isinstance(data, pd.DataFrame):\n return data\n if isinstance(data, dict):\n data = [data]\n data, original_df_dtypes = data_utils.json_to_dataframe(\n json_lines=data,\n selected_columns=self.selected_keys,\n read_in_string=False\n )\n self._original_df_dtypes = original_df_dtypes\n return data", "def convert_to_df(data):\r\n ans = pd.DataFrame(data)\r\n return ans", "def make_dataset(self, df, **kwargs):\n\t\treturn df", "def _read_dataset_as_dataframe(\n dataset: DatasetType,\n label_columns: Union[str, List[str]] = None,\n drop_columns: Union[str, List[str], int, List[int]] = None,\n) -> Tuple[pd.DataFrame, List[str]]:\n # Turn the `drop labels` into a list if given:\n if drop_columns is not None:\n if not isinstance(drop_columns, list):\n drop_columns = [drop_columns]\n\n # Check if the dataset is in fact a Feature Vector:\n store_uri_prefix, _ = mlrun.datastore.parse_store_uri(dataset.artifact_url)\n if mlrun.utils.StorePrefix.FeatureVector == store_uri_prefix:\n # Try to get the label columns if not provided:\n if label_columns is None:\n label_columns = dataset.meta.status.label_column\n # Get the features and parse to DataFrame:\n dataset = fs.get_offline_features(\n dataset.meta.uri, drop_columns=drop_columns\n ).to_dataframe()\n else:\n # Parse to DataFrame according to the dataset's type:\n if isinstance(dataset, (list, np.ndarray)):\n # Parse the list / numpy array into a DataFrame:\n dataset = pd.DataFrame(dataset)\n # Validate the `drop_columns` is given as integers:\n if drop_columns and not all(isinstance(col, int) for col in drop_columns):\n raise mlrun.errors.MLRunInvalidArgumentError(\n \"`drop_columns` must be an integer / list of integers if provided as a list.\"\n )\n elif isinstance(dataset, mlrun.DataItem):\n # Turn the DataITem to DataFrame:\n dataset = dataset.as_df()\n else:\n # Parse the object (should be a pd.DataFrame / pd.Series, dictionary) into a DataFrame:\n try:\n dataset = pd.DataFrame(dataset)\n except ValueError as e:\n raise mlrun.errors.MLRunInvalidArgumentError(\n f\"Could not parse the given dataset of type {type(dataset)} into a pandas DataFrame. \"\n f\"Received the following error: {e}\"\n )\n # Drop columns if needed:\n if drop_columns:\n dataset.drop(drop_columns, axis=1, inplace=True)\n\n # Turn the `label_columns` into a list by default:\n if label_columns is None:\n label_columns = []\n elif isinstance(label_columns, (str, int)):\n label_columns = [label_columns]\n\n return dataset, label_columns", "def data_frame(self, arg=None):\n if arg is None:\n return pl.DataFrame()\n if isinstance(arg, pl.DataFrame):\n return arg\n if isinstance(arg, pl.LazyFrame):\n return arg.collect()\n return pl.DataFrame(arg)", "def _to_dataframe(self, dataset_name):\n values = self[dataset_name][:]\n columns = self.get_columns(dataset_name)\n timestamps = self.get_timestamps(dataset_name)[...]\n if len(columns) < values.shape[1]:\n columns.resize(values.shape[1])\n\n # transform missing data into NaNs\n mask = missing_values(values) != 0\n try:\n values[mask] = numpy.nan\n except ValueError: # ValueError: cannot convert float NaN to integer\n # don't bother converting non-float arrays' -0.0 into NaNs\n pass\n\n dataframe = pandas.DataFrame(data=values,\n index=[datetime.datetime.fromtimestamp(t) for t in timestamps],\n columns=columns)\n return dataframe", "def dataframe(self):\n\n if self._dataframe is None:\n try:\n import pandas as pd\n except ImportError:\n raise RuntimeError('To enable dataframe support, '\n 'run \\'pip install datadotworld[pandas]\\'')\n\n self._dataframe = pd.DataFrame.from_records(self._iter_rows(),\n coerce_float=True)\n\n return self._dataframe", "def to_dataframe(self, dataset_name):\n if self.get_version(dataset_name=dataset_name) is None:\n return self._to_dataframe_h5lmt(dataset_name)\n return self._to_dataframe(dataset_name)", "def as_dataframe(self) -> \"pd.DataFrame\":\n import pandas as pd\n\n df = pd.DataFrame([row.as_series() for row in self.rows])\n return df", "def getDataFrame(self):\n return self.df", "def to_pandas_df(self):\n data = self._get_data(pd=True)\n return data", "def dataframe(self):\n return self._df", "def get_dataset() -> pd.DataFrame:\n\n data = load_iris(as_frame=True)\n\n dataset = data.frame\n dataset.rename(\n columns=lambda colname: colname.strip(' (cm)').replace(' ', '_'),\n inplace=True\n )\n\n return dataset", "def to_pandas(self):\n dataframe = self.get().to_pandas()\n assert type(dataframe) is pandas.DataFrame or type(dataframe) is pandas.Series\n\n return dataframe", "def get_data(self)->pd.DataFrame:\n pass", "def data_frame_creator(self):\n\n return pd.DataFrame()", "def dataframe(self):\n\t\treturn self._dataframe", "def as_DataFrame (self):\n return DataFrame(self.table)", "def to_dataframe(self, include_metadata: bool = True) -> pd.DataFrame:\n # Get all our data first with async\n # Note that all our pandas work will tax CPU so we wouldn't expect any\n # performance gains from doing the data parsing as a callback\n records = self.to_dict()\n data = []\n for series in records:\n df = pd.DataFrame(series.pop(\"data\"), columns=[\"period\", \"value\"])\n if include_metadata:\n df = df.assign(**series)\n data.append(df)\n return pd.concat(data, ignore_index=True)", "def dataframe(self, *args, **kwargs):\n\n from pandas import DataFrame\n\n # Just normal data, so use the iterator in this object.\n headers = next(islice(self, 0, 1))\n data = islice(self, 1, None)\n\n return DataFrame(list(data), columns=headers)", "def to_dataset(self):\n return self._data._copy_listed(self._names)", "def dataframe(self) -> pyspark.DataFrame:\n if self.batch_manager.active_batch_data is None:\n raise ValueError(\n \"Batch has not been loaded - please run load_batch() to load a batch.\"\n )\n\n return cast(SparkDFBatchData, self.batch_manager.active_batch_data).dataframe", "def to_dataframe(self):\n return df_util.to_dataframe(requests.get(self.__url).json())", "def data_pandas(detections):\n return DataWrapperPandas(detections, duplicates_radius=1)", "def as_frame(df_like: DataFrameLike) -> pd.DataFrame:\n try:\n return df_like.to_frame()\n except AttributeError:\n return df_like", "def data(self):\n return self.as_named_DataFrame()", "def getDataframe(self):\n self._loadCSVFile()\n self._cleanProcessDf()\n return self._df", "def qset_to_df(qset, datatype='object'):\n df = pd.DataFrame(list(qset.values()), dtype=datatype)\n return df" ]
[ "0.74988043", "0.71751237", "0.7006334", "0.69861305", "0.6919226", "0.6832426", "0.68180156", "0.6808118", "0.6801346", "0.67590004", "0.6756455", "0.6708862", "0.66681975", "0.6589894", "0.6567048", "0.6565977", "0.65588295", "0.65258104", "0.64525884", "0.6371686", "0.63464093", "0.63352644", "0.63303465", "0.63288456", "0.6269317", "0.62371993", "0.623678", "0.6224202", "0.62122744", "0.61806" ]
0.7543955
0
parse the fec verity data to give the verity metadata and verity hash data offset and length
def parse_fec_verity_data(b_file,b_offset,b_len): f = open(b_file, 'rb') system_total_sz=getsize(b_file) #read the beginning FEC_HEADER_SZ bytes at last block f.seek(-FEC_BLOCK_SZ, 2) header_bin = f.read(FEC_HEADER_SZ) (magic, version, header_sz, roots, size, inp_size) = struct.unpack("<5IQ", header_bin) verity_metadata_len=FEC_VERITY_METADATA_SZ verity_metadata_offset=0 verity_hash_offset=0 verity_hash_len=0 if magic == FEC_MAGIC: fec_supported = True verity_metadata_len=FEC_VERITY_METADATA_SZ verity_metadata_offset=system_total_sz-FEC_BLOCK_SZ-size-verity_metadata_len verity_hash_offset=b_offset verity_hash_len=int(b_len)-verity_metadata_len-size-FEC_BLOCK_SZ else: fec_supported = False return (fec_supported,verity_metadata_offset,verity_metadata_len,verity_hash_offset,verity_hash_len)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _parseHeader(self):\n # Big or little endian for the header.\n self._getEndianess()\n # Read the fixed header.\n self._readFixedHeader()\n # Get the present blockettes.\n self._getBlockettes()\n # Calculate the starttime.\n self._calculateStarttime()", "def sample_vcf():\n file_content = b\"\"\"##fileformat=VCFv4.2\n##hailversion=0.2.100-2ea2615a797a\n##INFO=<ID=QUALapprox,Number=1,Type=Integer,Description=\"\">\n##INFO=<ID=SB,Number=.,Type=Integer,Description=\"\">\n##INFO=<ID=MQ,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=MQRankSum,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=VarDP,Number=1,Type=Integer,Description=\"\">\n##INFO=<ID=AS_ReadPosRankSum,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=AS_pab_max,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=AS_QD,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=AS_MQ,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=QD,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=AS_MQRankSum,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=FS,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=AS_FS,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=ReadPosRankSum,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=AS_QUALapprox,Number=1,Type=Integer,Description=\"\">\n##INFO=<ID=AS_SB_TABLE,Number=.,Type=Integer,Description=\"\">\n##INFO=<ID=AS_VarDP,Number=1,Type=Integer,Description=\"\">\n##INFO=<ID=AS_SOR,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=SOR,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=singleton,Number=0,Type=Flag,Description=\"\">\n##INFO=<ID=transmitted_singleton,Number=0,Type=Flag,Description=\"\">\n##INFO=<ID=omni,Number=0,Type=Flag,Description=\"\">\n##INFO=<ID=mills,Number=0,Type=Flag,Description=\"\">\n##INFO=<ID=monoallelic,Number=0,Type=Flag,Description=\"\">\n##INFO=<ID=AS_VQSLOD,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=InbreedingCoeff,Number=1,Type=Float,Description=\"\">\n##FILTER=<ID=AC0,Description=\"Allele count is zero after filtering out low-confidence genotypes (GQ < 20; DP < 10; and AB < 0.2 for het calls)\">\n##FILTER=<ID=AS_VQSR,Description=\"Failed VQSR filtering thresholds of -2.7739 for SNPs and -1.0606 for indels\">\n##contig=<ID=chr1,length=248956422,assembly=GRCh38>\n#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\nchr1\t10330\t.\tCCCCTAACCCTAACCCTAACCCTACCCTAACCCTAACCCTAACCCTAACCCTAA\tC\t.\tPASS\tQUALapprox=21493;SB=325,1077,113,694;MQ=32.1327;MQRankSum=0.720000;VarDP=2236;AS_ReadPosRankSum=-0.736000;AS_pab_max=1.00000;AS_QD=5.17857;AS_MQ=29.5449;QD=9.61225;AS_MQRankSum=0.00000;FS=8.55065;AS_FS=.;ReadPosRankSum=0.727000;AS_QUALapprox=145;AS_SB_TABLE=325,1077,2,5;AS_VarDP=28;AS_SOR=0.311749;SOR=1.48100;singleton;AS_VQSLOD=13.4641;InbreedingCoeff=-0.000517845\"\"\"\n file = io.BytesIO(file_content)\n return file", "def _read_old_header(self, raw):\n\n byte_count = 0\n\n data_size = 4\n self.label = struct.unpack('<4s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 2\n self.version = struct.unpack('<h',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 2\n self.revision = struct.unpack('<h',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 26\n self.date = struct.unpack('<26s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 2\n self.file_format = struct.unpack('<h',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.file_type = struct.unpack('<4s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 65\n self.original_file_name = struct.unpack('<65s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 65\n self.reference_file_name = struct.unpack('<65s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 65\n self.related_file_name_a = struct.unpack('<65s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 65\n self.related_file_name_b = struct.unpack('<65s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 65\n self.related_file_name_c = struct.unpack('<65s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 65\n self.related_file_name_d = struct.unpack('<65s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 82\n self.annotate = struct.unpack('<82s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 33\n self.instrument_model = struct.unpack('<33s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 33\n self.instrument_serial_number = struct.unpack('<33s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 33\n self.software_version_number = struct.unpack('<33s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 33\n self.crystal_material = struct.unpack('<33s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.laser_wavelength_microns = struct.unpack('<f',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 2\n self.laser_null_doubling = struct.unpack('<h',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.optical_ratio = struct.unpack('<f',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.dispersion_constant_xc = struct.unpack('<f',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.dispersion_constant_xm = struct.unpack('<f',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.dispersion_constant_xb = struct.unpack('<f',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 2\n self.interferogram_size = struct.unpack('<H',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 2\n self.interferogram_center.append(struct.unpack('<H',\n raw[byte_count:byte_count+data_size])[0])\n byte_count += data_size\n\n data_size = 2\n self.interferogram_center.append(struct.unpack('<H',\n raw[byte_count:byte_count+data_size])[0])\n byte_count += data_size\n\n data_size = 2\n self.acquire_mode = struct.unpack('<h',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 2\n self.emissivity = struct.unpack('<h',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 2\n self.apodization = struct.unpack('<h',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 2\n self.zero_fill = struct.unpack('<h',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 2\n self.run_time_math = struct.unpack('<h',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 2\n self.fft_size = struct.unpack('<h',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 2\n self.number_of_coadds = struct.unpack('<h',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 2\n self.number_of_igrams = struct.unpack('<h',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.amb_temperature = struct.unpack('<f',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.inst_temperature = struct.unpack('<f',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.wbb_temperature = struct.unpack('<f',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.cbb_temperature = struct.unpack('<f',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 20\n self.spare_i = struct.unpack('<hhhhhhhhhh',\n raw[byte_count:byte_count+data_size])\n byte_count += data_size\n\n data_size = 40\n self.spare_f = struct.unpack('<ffffffffff',\n raw[byte_count:byte_count+data_size])\n byte_count += data_size\n\n data_size = 40\n self.spare_l = struct.unpack('<ffffffffff',\n raw[byte_count:byte_count+data_size])\n byte_count += data_size\n\n data_size = 65\n self.spare_na = struct.unpack('<65s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 65\n self.spare_nb = struct.unpack('<65s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 65\n self.spare_nc = struct.unpack('<65s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 65\n self.spare_nd = struct.unpack('<65s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 65\n self.spare_ne = struct.unpack('<65s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.header_end = struct.unpack('<4s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size", "def parse(self):\n\n\t\t# Open and parse the file\n\t\twith open(self.name, 'r') as fdi:\n\t\t\tfor line in fdi:\n\t\t\t\twords = [word for word in line.split(' ') if word != ' ' and word != ':']\n\t\t\t\t\n\t\t\t\t# Store the data in the hash\n\t\t\t\tself.data_hash[int(words[0], 16)] = int(words[1])\n\n\t\t# Sort the dictionary by addresses\n\t\tself.data_hash = od(sorted(self.data_hash.items(), key = lambda t : t[0]))\n\n\t\tprint 'Total Bytes :', float(sum(self.data_hash.values())) / 1024 / 1024\n\n\t\treturn", "def _read_new_header(self, raw):\n\n byte_count = 0\n\n data_size = 4\n self.label = struct.unpack('<4s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.version = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.revision = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 28\n self.date = struct.unpack('<28s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.file_format = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.file_type = struct.unpack('<4s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 68\n self.original_file_name = struct.unpack('<68s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 68\n self.reference_file_name = struct.unpack('<68s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 68\n self.related_file_name_a = struct.unpack('<68s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 68\n self.related_file_name_b = struct.unpack('<68s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 68\n self.related_file_name_c = struct.unpack('<68s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 84\n self.annotate = struct.unpack('<84s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 36\n self.instrument_model = struct.unpack('<36s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 36\n self.instrument_serial_number = struct.unpack('<36s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 36\n self.software_version_number = struct.unpack('<36s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 36\n self.crystal_material = struct.unpack('<36s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 8\n self.laser_wavelength_microns = struct.unpack('<d',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.laser_null_doubling = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.padding = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 8\n self.dispersion_constant_xc = struct.unpack('<d',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 8\n self.dispersion_constant_xm = struct.unpack('<d',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 8\n self.dispersion_constant_xb = struct.unpack('<d',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.num_chan = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.interferogram_size = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.scan_direction = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.acquire_mode = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.emissivity = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.apodization = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.zero_fill = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.run_time_math = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.fft_size = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.number_of_coadds = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.single_sided = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.chan_display = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 8\n self.amb_temperature = struct.unpack('<d',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 8\n self.inst_temperature = struct.unpack('<d',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 8\n self.wbb_temperature = struct.unpack('<d',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 8\n self.cbb_temperature = struct.unpack('<d',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 8\n self.temperature_dwr = struct.unpack('<d',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 8\n self.emissivity_dwr = struct.unpack('<d',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 8\n self.laser_temperature = struct.unpack('<d',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 40\n self.spare_i = struct.unpack('<llllllllll',\n raw[byte_count:byte_count+data_size])\n byte_count += data_size\n\n data_size = 80\n self.spare_f = struct.unpack('<dddddddddd',\n raw[byte_count:byte_count+data_size])\n byte_count += data_size\n\n data_size = 68\n self.spare_na = struct.unpack('<68s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 68\n self.spare_nb = struct.unpack('<68s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 68\n self.spare_nc = struct.unpack('<68s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 68\n self.spare_nd = struct.unpack('<68s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 68\n self.spare_ne = struct.unpack('<68s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.header_end = struct.unpack('<4s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size", "def _decode(self):\n \n self.version = int(data_to_hex_str(self.packet[0])[2])\n self.header_len = int(data_to_hex_str(self.packet[0])[3]) * 4\n self.type_of_service = data_to_hex_str(self.packet[1:2])\n self.total_len = int(data_to_hex_str(self.packet[2:4]), 16)\n self.id = data_to_hex_str(self.packet[4:6])\n \n #parse the flags fields(reservedbit, don't fragment, more fragment)\n if ((ord(self.packet[6]) & (1 << 7)) != 0):\n self.flags_reservedbit = 1\n else:\n self.flags_reservedbit = 0\n #endof if\n \n if ((ord(self.packet[6]) & (1 << 6)) != 0):\n self.flags_dont_fragment = 1\n else:\n self.flags_dont_fragment = 0\n #endof if\n \n if ((ord(self.packet[6]) & (1 << 5)) != 0):\n self.flags_more_fragment = 1\n else:\n self.flags_more_fragment = 0\n #endof if\n \n #parse the offset field(in packet[6:7]): 00011111 & packet[6] (to filter flags) -->> get packet[6:7] in hex_str\n #tmp = str(31 & ord(self.packet[6]))\n self.fragment_offset = int(data_to_hex_str(self.packet[6:8]), 16)\n if (self.fragment_offset >= (1 << 13)):\n #take away the flags fields: 00011111 11111111 & self.fragment_offset\n self.fragment_offset = self.fragment_offset & ((1 << 13) - 1) \n \n self.TTL = ord(self.packet[8])\n self.protocol = IPPROTO[ord(self.packet[9])]\n self.header_checksum = data_to_hex_str(self.packet[10:12])\n \n self.src = str(ord(self.packet[12])) + '.' + str(ord(self.packet[13])) + '.' + \\\n str(ord(self.packet[14])) + '.' + str(ord(self.packet[15]))\n self.dst = str(ord(self.packet[16])) + '.' + str(ord(self.packet[17])) + '.' + \\\n str(ord(self.packet[18])) + '.' + str(ord(self.packet[19]))\n \n if (self.header_len > 20):\n self.opt_paddings = self.packet[20 : (self.header_len)]", "def _extract_nos_version(self, data: str) -> None:\n if self.devtype == \"linux\":\n for line in data.splitlines():\n if line.startswith(\"VERSION_ID\"):\n self.version = line.split('=')[1] \\\n .strip().replace('\"', '')\n break\n else:\n self.version = \"all\"\n self.logger.error(\n f'Cannot parse version from {self.address}:{self.port}')", "def carve(self, bs, dataFile, verbose=False):\n _bs = bs\n records = []\n headers = []\n\n i = 0\n # Find all occurrences of the magic string\n found = _bs.findall(evt_header.MagicString, bytealigned=False)\n readSoFarBits = 0\n for idx in found:\n _bs.pos = idx\n r = EvtRecord()\n r.setPathname(dataFile)\n r.setPosition(_bs.pos)\n\n # Read an EVT header field:\n # The algorithm here is to find the message separator \n # and use that as a basis for locating the other fields.\n # Since we split large input files, \"offset\" fields are\n # invalid. \n\n # Message length\n fieldBits = 32\n lenIdx = idx - fieldBits # Set position to idx of length\n _bs.pos = lenIdx\n recordLength = _bs.read(fieldBits).uintle\n r.setField(\"length\", recordLength)\n readSoFarBits += fieldBits\n\n # Calculate size of variable data at end of record \n varDataSize = evt_record.FixedSize - recordLength \n # When reading the size in a header\n if varDataSize < 0: \n varDataSize = 0\n\n # Reset stream position\n _bs.pos = idx\n\n # Message separator\n fieldBits = 32 \n # Check to see if we are reading past end of stream\n data = self.carveField(_bs, \"reserved\", \"uint\",\\\n fieldBits, verbose)\n if data == self.ERROR_END_OF_STREAM:\n break\n r.setField(\"reserved\", data)\n\n # Record number\n fieldBits = 32 \n data = self.carveField(_bs, \"recordNumber\", \"uintle\",\\\n fieldBits, verbose)\n if data == self.ERROR_END_OF_STREAM:\n break\n r.setField(\"recordNumber\", data)\n\n # Date created\n fieldBits = 32 \n data = self.carveField(_bs, \"timeGenerated\", \"uintle\",\\\n fieldBits, verbose)\n if data == self.ERROR_END_OF_STREAM:\n break\n r.setField(\"timeGenerated\", data)\n\n # Date written\n fieldBits = 32 \n data = self.carveField(_bs, \"timeWritten\", \"uintle\",\\\n fieldBits, verbose)\n if data == self.ERROR_END_OF_STREAM:\n break\n r.setField(\"timeWritten\", data)\n\n # Event ID\n fieldBits = 16 \n data = self.carveField(_bs, \"eventID\", \"uintle\",\\\n fieldBits, verbose)\n if data == self.ERROR_END_OF_STREAM:\n break\n r.setField(\"eventID\", data)\n \n # Event RVA offset\n fieldBits = 16 \n data = self.carveField(_bs, \"eventRVA\", \"uintle\",\\\n fieldBits, verbose)\n if data == self.ERROR_END_OF_STREAM:\n break\n r.setField(\"eventRVA\", data)\n\n # Event type\n fieldBits = 16 \n data = self.carveField(_bs, \"eventType\", \"uintle\",\\\n fieldBits, verbose)\n if data == self.ERROR_END_OF_STREAM:\n break\n r.setField(\"eventType\", data)\n\n # Num strings\n fieldBits = 16 \n data = self.carveField(_bs, \"numStrings\", \"uintle\",\\\n fieldBits, verbose)\n if data == self.ERROR_END_OF_STREAM:\n break\n r.setField(\"numStrings\", data)\n\n # Category\n fieldBits = 16 \n data = self.carveField(_bs, \"eventCategory\", \"uintle\",\\\n fieldBits, verbose)\n if data == self.ERROR_END_OF_STREAM:\n break\n r.setField(\"eventCategory\", data)\n\n # Reserved flags \n fieldBits = 16 \n data = self.carveField(_bs, \"reservedFlags\", \"uint\",\\\n fieldBits, verbose)\n if data == self.ERROR_END_OF_STREAM:\n break\n r.setField(\"reservedFlags\", data)\n\n # Closing record number\n fieldBits = 32 \n data = self.carveField(_bs, \"closingRecordNumber\", \"uint\",\\\n fieldBits, verbose)\n if data == self.ERROR_END_OF_STREAM:\n break\n r.setField(\"closingRecordNumber\", data)\n\n # String offset\n fieldBits = 32 \n data = self.carveField(_bs, \"stringOffset\", \"uint\",\\\n fieldBits, verbose)\n if data == self.ERROR_END_OF_STREAM:\n break\n r.setField(\"stringOffset\", data)\n\n # User SID length\n fieldBits = 32\n data = self.carveField(_bs, \"userSidLength\", \"uintle\",\\\n fieldBits, verbose)\n if data == self.ERROR_END_OF_STREAM:\n break\n r.setField(\"userSidLength\", data)\n\n # User SID offset\n fieldBits = 32 \n data = self.carveField(_bs, \"userSidOffset\", \"uintle\",\\\n fieldBits, verbose)\n if data == self.ERROR_END_OF_STREAM:\n break\n r.setField(\"userSidOffset\", data)\n\n # Data length\n fieldBits = 32 \n data = self.carveField(_bs, \"dataLength\", \"uintle\",\\\n fieldBits, verbose)\n if data == self.ERROR_END_OF_STREAM:\n break\n r.setField(\"dataLength\", data)\n\n # Data offset\n fieldBits = 32\n data = self.carveField(_bs, \"dataOffset\", \"uintle\",\\\n fieldBits, verbose)\n if data == self.ERROR_END_OF_STREAM:\n break\n r.setField(\"dataOffset\", data)\n\n # Variable data\n # FIXME: dont rely on peek() to avoid reading past end of stream\n fieldBits = int(r.getField(\"length\"))\n try:\n data = _bs.peek(\"bytes\" + \":\" + str(fieldBits))\n except bitstring.ReadError:\n if verbose:\n print \"[EVT]: Unable to read EVT data field; \"\\\n \"it would be truncated\"\n break\n data = self.carveField(_bs, \"varData\", \"bytes\",\\\n fieldBits, verbose)\n if data == self.ERROR_END_OF_STREAM:\n break\n r.setField(\"varData\", data)\n\n # SID\n # FIXME: find out why sidLength is so weird\n #sidLength = r.getField(\"userSidLength\")\n #if sidLength > 0:\n # sidOffset = r.getField(\"userSidOffset\")\n # if sidOffset <= _bs.length:\n # _bs.pos = sidOffset\n # fieldBits = sidLength\n # if readSoFarBits + fieldBits >= _bs.len:\n # fieldBits = _bs.len - _bs.pos\n # sid = _bs.read(fieldBits).uint\n # r.setField(\"sid\", sid)\n # break\n # sid = _bs.read(fieldBits).uint\n # r.setField(\"sid\", sid)\n #readSoFarBits += fieldBits\n records.append(r)\n return (headers, records)", "def read_uef_details(chunks):\n\n\tpos, chunk = find_next_chunk(chunks, 0, [0x0])\n\n\tif pos == None:\n\n\t\toriginator = 'Unknown'\n\n\telif chunk[1] == '':\n\n\t\toriginator = 'Unknown'\n\telse:\n\t\toriginator = chunk[1]\n\n\tpos, chunk = find_next_chunk(chunks, 0, [0x5])\n\n\tif pos == None:\n\n\t\tmachine, keyboard = 'Unknown', 'Unknown'\n\n\telse:\n\n\t\tmachines = ('BBC Model A', 'Electron', 'BBC Model B', 'BBC Master')\n\t\tkeyboards = ('Any layout', 'Physical layout', 'Remapped')\n\n\t\tmachine = ord(chunk[1][0]) & 0x0f\n\t\tkeyboard = (ord(chunk[1][0]) & 0xf0) >> 4\n\n\t\tif machine < len(machines):\n\t\t\tmachine = machines[machine]\n\t\telse:\n\t\t\tmachine = 'Unknown'\n\n\t\tif keyboard < len(keyboards):\n\t\t\tkeyboard = keyboards[keyboard]\n\t\telse:\n\t\t\tkeyboard = 'Unknown'\n\n\tpos, chunk = find_next_chunk(chunks, 0, [0xff00])\n\n\tif pos == None:\n\n\t\temulator = 'Unknown'\n\n\telif chunk[1] == '':\n\n\t\temulator = 'Unknown'\n\telse:\n\t\temulator = chunk[1]\n\n\n\t# Remove trailing null bytes\n\twhile originator[-1] == '\\000':\n\n\t\toriginator = originator[:-1]\n\n\twhile emulator[-1] == '\\000':\n\n\t\temulator = emulator[:-1]\n\n\tfeatures = ''\n\tif find_next_chunk(chunks, 0, [0x1])[0] != None:\n\t\tfeatures = features + '\\n' + 'Instructions'\n\tif find_next_chunk(chunks, 0, [0x2])[0] != None:\n\t\tfeatures = features + '\\n' + 'Credits'\n\tif find_next_chunk(chunks, 0, [0x3])[0] != None:\n\t\tfeatures = features + '\\n' + 'Inlay'\n\n\treturn originator, machine, keyboard, emulator, features", "def parse(self) :\n self._curname = None\n self._curattributes = None\n \n self.setVersion((ord(self._data[0]), ord(self._data[1])))\n self.setOperationId(unpack(\">H\", self._data[2:4])[0])\n self.setRequestId(unpack(\">I\", self._data[4:8])[0])\n self.position = 8\n endofattributes = self.tagvalues[\"end-of-attributes-tag\"]\n maxdelimiter = self.tagvalues[\"event_notification-attributes-tag\"]\n nulloffset = lambda : 0\n #try :\n if 1:\n tag = ord(self._data[self.position])\n while tag != endofattributes :\n self.position += 1\n name = self.tags[tag]\n if name is not None :\n func = getattr(self, name.replace(\"-\", \"_\"), nulloffset)\n self.position += func()\n if ord(self._data[self.position]) > maxdelimiter :\n self.position -= 1\n continue\n oldtag = tag\n tag = ord(self._data[self.position])\n if tag == oldtag :\n self._curattributes.append([])\n #except IndexError :\n # raise IPPError, \"Unexpected end of IPP message.\"\n \n self.data = self._data[self.position+1:]\n self.parsed = True", "def _decode_35701(data):\n start_byte = 0\n n_bytes = 2\n var_id = struct.unpack('<H', data[start_byte:start_byte + n_bytes])[0]\n if var_id == 29995:\n start_byte += n_bytes\n n_bytes = 4\n n_points = struct.unpack(\n '<I', data[start_byte:start_byte + n_bytes])[0]\n return {'n_points': n_points}", "def ver_dec_content(parts, sign_key=None, enc_key=None, sign_alg=\"SHA256\"):\n\n if parts is None:\n return None\n elif len(parts) == 3:\n # verify the cookie signature\n timestamp, load, b64_mac = parts\n mac = base64.b64decode(b64_mac)\n verifier = HMACSigner(algorithm=sign_alg)\n if verifier.verify(\n load.encode(\"utf-8\") + timestamp.encode(\"utf-8\"), mac, sign_key.key\n ):\n return load, timestamp\n else:\n raise VerificationError()\n elif len(parts) == 4:\n b_timestamp = parts[0]\n iv = base64.b64decode(parts[1])\n ciphertext = base64.b64decode(parts[2])\n tag = base64.b64decode(parts[3])\n\n decrypter = AES_GCMEncrypter(key=enc_key.key)\n try:\n msg = decrypter.decrypt(ciphertext, iv, tag=tag)\n except InvalidTag:\n return None\n\n p = lv_unpack(msg.decode(\"utf-8\"))\n load = p[0]\n timestamp = p[1]\n if len(p) == 3:\n verifier = HMACSigner(algorithm=sign_alg)\n if verifier.verify(\n load.encode(\"utf-8\") + timestamp.encode(\"utf-8\"),\n base64.b64decode(p[2]),\n sign_key.key,\n ):\n return load, timestamp\n else:\n return load, timestamp\n return None", "def parse_version(header, data):\n log = unpack('<I', data)\n game, save = unpack('<7sxf', header)\n if save == -1:\n save = unpack('<I', header)\n if save == 37:\n save = 37.0\n else:\n save /= (1<<16)\n version = get_version(game.decode('ascii'), round(save, 2), log)\n return version, game.decode('ascii'), round(save, 2), log", "def decode(self, data, tables=True):\n if data is None or not len(data):\n raise BufrDecodeWarning(\"Data buffer is empty!\")\n self._blob = data\n self._meta = {}\n logger.info(\"SECT 0..5 DECODE\")\n\n o, l, r = sect.decode_sect0(self._blob, 0)\n self._meta.update(r)\n self._edition = r['edition']\n logger.debug(\"SECT_0\\t offs:%d len:%d = %s\" , o, l, r)\n\n o, l, r = sect.decode_sect1(self._blob, o, edition=self._edition)\n self._meta.update(r)\n logger.debug(\"SECT_1\\t offs:%d len:%d = %s\" , o, l, r)\n\n tables_fail = None\n if tables:\n try:\n self._tables = self.load_tables()\n except StandardError or Warning as exc:\n tables_fail = exc\n\n if r['sect2']:\n o, l, r = sect.decode_sect2(self._blob, o)\n self._meta.update(r)\n logger.debug(\"SECT_2\\t offs:%d len:%d = %s\" , o, l, r)\n\n o, l, r = sect.decode_sect3(self._blob, o)\n self._meta.update(r)\n self._subsets = r['subsets']\n self._compressed = r['comp']\n self._desc = r['descr']\n logger.debug(\"SECT_3\\t offs:%d len:%d = %s\" , o, l, r)\n\n o, l, r = sect.decode_sect4(self._blob, o)\n self._meta.update(r)\n logger.debug(\"SECT_4\\t offs:%d len:%d = %s\" , o, l, r)\n self._data_s = r['data_start']\n self._data_e = r['data_end']\n\n o, l, r = sect.decode_sect5(self._blob, o)\n self._meta.update(r)\n logger.debug(\"SECT_5\\t offs:%d len:%d = %s\" , o, l, r)\n\n if o == -1:\n logger.error(\"End '7777' not found\")\n raise BufrDecodeError(\"End '7777' not found\")\n\n if self._meta['size'] != o:\n logger.error(\"size/offset error: size %d <> offset %d\" , self._meta['size'], o)\n raise BufrDecodeError(\"Size/offset error\")\n\n if tables_fail is not None:\n raise tables_fail\n\n return self._meta", "def read_szf_fmv_13(eps_file):\n data = {}\n metadata = {}\n\n n_lines = eps_file.mdr_counter\n n_node_per_line = eps_file.mdr[\"LONGITUDE_FULL\"].shape[1]\n idx_nodes = np.arange(n_lines).repeat(n_node_per_line)\n\n # extract metadata\n metadata[\"spacecraft_id\"] = np.int8(eps_file.mphr[\"SPACECRAFT_ID\"][-1])\n metadata[\"orbit_start\"] = np.uint32(eps_file.mphr[\"ORBIT_START\"])\n metadata[\"state_vector_time\"] = datetime.strptime(\n eps_file.mphr[\"STATE_VECTOR_TIME\"][:-4], \"%Y%m%d%H%M%S\")\n\n fields = [\n \"processor_major_version\", \"processor_minor_version\",\n \"format_major_version\", \"format_minor_version\"\n ]\n for f in fields:\n metadata[f] = np.int16(eps_file.mphr[f.upper()])\n\n # extract time\n dt = np.datetime64(\n \"2000-01-01\") + eps_file.mdr[\"UTC_LOCALISATION\"][\"day\"].astype(\n \"timedelta64[D]\"\n ) + eps_file.mdr[\"UTC_LOCALISATION\"][\"time\"].astype(\"timedelta64[ms]\")\n data[\"time\"] = dt[idx_nodes]\n\n fields = [\n \"degraded_inst_mdr\", \"degraded_proc_mdr\", \"sat_track_azi\",\n \"beam_number\", \"flagfield_rf1\", \"flagfield_rf2\", \"flagfield_pl\",\n \"flagfield_gen1\"\n ]\n\n fields = [\n \"degraded_inst_mdr\", \"degraded_proc_mdr\", \"sat_track_azi\",\n \"beam_number\"\n ]\n\n # 101 min = 6082 seconds\n # state_vector_time = ascending node crossing time - 1520.5,\n # time crossing at -90 lat\n orbit_start_time = metadata[\"state_vector_time\"] - timedelta(\n seconds=1520.5)\n orbit_end_time = orbit_start_time + timedelta(seconds=6082)\n\n data[\"orbit_nr\"] = np.ma.zeros(\n data[\"time\"].size, dtype=np.int32,\n fill_value=int32_nan) + metadata[\"orbit_start\"]\n data[\"orbit_nr\"][data[\"time\"] > orbit_end_time] += 1\n\n metadata[\"orbits\"] = {}\n for orbit_nr in np.unique(data[\"orbit_nr\"]):\n if orbit_nr == metadata[\"orbit_start\"]:\n metadata[\"orbits\"][orbit_nr] = (orbit_start_time, orbit_end_time)\n else:\n metadata[\"orbits\"][orbit_nr] = (orbit_end_time, orbit_end_time +\n timedelta(seconds=6082))\n\n # extract data\n for f in fields:\n if eps_file.mdr_sfactor[f.upper()] == 1:\n data[f] = eps_file.mdr[f.upper()].flatten()[idx_nodes]\n else:\n data[f] = (eps_file.mdr[f.upper()].flatten() * 1. /\n eps_file.mdr_sfactor[f.upper()])[idx_nodes]\n\n data[\"swath_indicator\"] = (data[\"beam_number\"].flatten() > 3).astype(\n np.uint8)\n data[\"as_des_pass\"] = (data[\"sat_track_azi\"] < 270).astype(np.uint8)\n\n fields = [(\"longitude_full\", long_nan), (\"latitude_full\", long_nan),\n (\"sigma0_full\", long_nan), (\"inc_angle_full\", uint_nan),\n (\"azi_angle_full\", int_nan), (\"flagfield\", int_nan)]\n\n for f, nan_val in fields:\n data[f] = eps_file.mdr[f.upper()].flatten()\n invalid = eps_file.mdr[f.upper()].flatten() == nan_val\n\n if eps_file.mdr_sfactor[f.upper()] != 1:\n data[f] = data[f] * 1. / eps_file.mdr_sfactor[f.upper()]\n\n data[f][invalid] = nan_val\n\n # modify longitudes from (0, 360) to (-180, 180)\n mask = np.logical_and(data[\"longitude_full\"] != long_nan,\n data[\"longitude_full\"] > 180)\n data[\"longitude_full\"][mask] += -360.\n\n # modify azimuth from (-180, 180) to (0, 360)\n idx = (data[\"azi_angle_full\"] != int_nan) & (data[\"azi_angle_full\"] < 0)\n data[\"azi_angle_full\"][idx] += 360\n\n # set flags\n data[\"f_usable\"] = set_flags_fmv13(data[\"flagfield\"])\n\n return data, metadata", "def parse_from_bytes(self, raw_buffer):\n\n try:\n (cpu_svn,\n self.misc_select,\n _,\n attributes,\n mr_enclave,\n _,\n mr_signer,\n _,\n self.isv_prod_id,\n self.isv_svn,\n _,\n report_data) = \\\n struct.unpack(self._format, raw_buffer)\n\n # Further parse embedded structures\n self.cpu_svn.parse_from_bytes(cpu_svn)\n self.attributes.parse_from_bytes(attributes)\n self.mr_enclave.parse_from_bytes(mr_enclave)\n self.mr_signer.parse_from_bytes(mr_signer)\n self.report_data.parse_from_bytes(report_data)\n except struct.error as se:\n raise ValueError('Unable to parse: {}'.format(se))", "def decode_faceshift_datastream(self, data):\n \n #block_id = struct.unpack_from('H', data)\n #print(\"Received block id \" + str(block_id)) ;\n\n offset = 0\n block_id, version, block_size = struct.unpack_from('HHI', data, offset)\n \n #print(\"ID, v, size = \" + str(block_id) + \",\" + str(version) + \",\" + str(block_size) )\n \n offset += 8\n\n if(block_id == BLOCK_ID_TRACKING_STATE):\n n_blocks, = struct.unpack_from('H', data, offset)\n #print(\"n_blocks = \" + str(n_blocks))\n offset += 2\n\n track_ok = 0 # Will be a byte: 1 if tracking ok, 0 otherwise.\n head_rotation_quat = None # Will be filled with the rotation using mathutils.Quaternion\n blend_shape_values = [] # Will be a list of float in the range 0-1\n #eyes_values = None # Will be a sequence of 4 angle values\n markers_position = [] # Will be a list of mathutils.Vector\n \n curr_block = 0\n while(curr_block < n_blocks):\n block_id, version, block_size = struct.unpack_from('HHI', data, offset)\n #print(\"ID, v, size = \" + str(block_id) + \",\" + str(version) + \",\" + str(block_size) )\n \n # put the offset at the beginning of the block\n offset += 8\n \n if(block_id == 101): # Frame Information blobk (timestamp and tracking status)\n ts, track_ok = struct.unpack_from('dB', data, offset)\n #print(\"timestamp, track_ok \" + str(ts) + \", \" + str(track_ok) )\n #offset += 9\n elif(block_id == 102): # Pose block (head rotation and position)\n x,y,z,w = struct.unpack_from('ffff', data, offset)\n #head_rotation_quat = mathutils.Quaternion((w,x,y,z))\n elif(block_id == 103): # Blendshapes block (blendshape values)\n n_coefficients, = struct.unpack_from('I', data, offset)\n #print(\"Blend shapes count=\"+ str(n_coefficients) )\n i = 0\n coeff_list = \"\"\n while(i < n_coefficients):\n # Offset of the block, plus the 4 bytes for int n_coefficients, plus 4 bytes per float\n val, = struct.unpack_from('f', data, offset + 4 + (i*4))\n blend_shape_values.append(val)\n coeff_list += repr(val) + \" \"\n i += 1\n print(\"Values: \" + coeff_list)\n elif(block_id == 104): # Eyes block (eyes gaze)\n leye_theta, leye_phi, reye_theta, reye_phi = struct.unpack_from('ffff', data, offset)\n elif(block_id == 105): # Markers block (absolute position of mark points)\n n_markers, = struct.unpack_from('H', data, offset)\n #print(\"n markers=\"+str(n_markers))\n i = 0\n while(i < n_markers):\n # Offset of the block, plus the 2 bytes for int n_markers, plus 4 bytes for each x,y,z floats\n x, y, z = struct.unpack_from('fff', data, offset + 2 + (i*4*3))\n #print(\"m\" + str(i) + \" \" + str(x) + \"\\t\" + str(y) + \"\\t\" + str(z))\n markers_position.append(mathutils.Vector((x,y,z)))\n i += 1\n \n curr_block += 1\n offset += block_size\n \n msg = fsMsgTrackingState()\n\n msg.m_timestamp = ts\n\n self.pub.publish(msg)\n\n # end -- while on blocks. Track State scan complete", "def parse_header(self):", "def _decode_5104(data):\n\n text = []\n start_byte = 0\n while start_byte + 2 < len(data):\n tag = data[start_byte:start_byte + 2]\n if tag == b'#u':\n start_byte += 2\n text_size = struct.unpack(\n '<h', data[start_byte:start_byte + 2])[0]\n start_byte += 2\n text.append(data[start_byte:start_byte + text_size].decode('utf8'))\n start_byte += text_size\n start_byte += 6\n elif tag == b'$u':\n start_byte += 2\n text.append(struct.unpack(\n '<h', data[start_byte:start_byte + 2])[0])\n start_byte += 2\n start_byte += 6\n elif tag == b',u':\n start_byte += 2\n text.append(struct.unpack(\n '<h', data[start_byte:start_byte + 2])[0])\n start_byte += 2\n else:\n start_byte += 1\n\n return {'analyst': text[0],\n 'date': text[2],\n 'image_name': text[4],\n 'instrument_model': text[5],\n 'instrument_serial_number': text[6],\n 'instrument_software_version': text[7],\n 'accumulations': text[9],\n 'detector': text[11],\n 'source': text[12],\n 'beam_splitter': text[13],\n 'apodization': text[15],\n 'spectrum_type': text[16],\n 'beam_type': text[17],\n 'phase_correction': text[20],\n 'ir_accessory': text[26],\n 'igram_type': text[28],\n 'scan_direction': text[29],\n 'background_scans': text[32]}", "def load_vcf_data(vcf_file):\n \n if(vcf_file[-3:]==\".gz\"):\n vcf_data=gzip.open(vcf_file, \"r\")\n else:\n vcf_data=open(vcf_file, \"r\")\n \n snp_names=[]\n snp_pos=[]\n genotype_data=[]\n\n missing=0\n \n for line in vcf_data:\n\n if line[0:2] == '##':\n continue\n elif line[0:1] == '#':\n data=line[1:-1]\n data=data.split(\"\\t\")\n if data[0:9]==[\"CHROM\", \"POS\", \"ID\", \"REF\", \"ALT\", \"QUAL\", \"FILTER\", \"INFO\", \"FORMAT\"]:\n sample_names=data[9:]\n else:\n print data[0:9]\n raise Exception(\"Bad vcf header line\")\n else:\n data=line[:-1]\n data=data.split(\"\\t\")\n\n if len(data[4].split(\",\"))>1: \n print \"Warning: ignoring multi alleleic site at \" + data[0]+\":\"+data[1] \n continue # multi-allelic sites. \n\n if data[2] != \".\":\n snp_names.append(data[2])\n else:\n snp_names.append(data[0]+\":\"+data[1])\n\n snp_pos.append(int(data[1]))\n\n if not all([(x[0]==\".\" and x[2]==\".\") or (x[0] in [\"0\", \"1\"] and x[2] in [\"0\", \"1\"]) for x in data[9:]]):\n raise Exception(\"Could not read line: \" + line) \n \n genotype_data.append([ 3 if x[0]==\".\" and x[2]==\".\" else int(x[0])+int(x[2]) for x in data[9:] ])\n\n return {\"sample_names\":sample_names, \"snp_names\":snp_names, \"snp_pos\":snp_pos, \"genotype_data\":genotype_data}", "def _parseBlockette(self, blkt_type):\n blkt_dict = SimpleOrderedDict()\n # Check the blockette number.\n if blkt_type == 1000:\n unpack_values = unpack('%s3B' % self.endian,\n self.file.read(3))\n blkt_dict['Encoding Format'] = int(unpack_values[0])\n blkt_dict['Word Order'] = int(unpack_values[1])\n blkt_dict['Data Record Length'] = int(unpack_values[2])\n elif blkt_type == 1001:\n unpack_values = unpack('%sBBxB' % self.endian,\n self.file.read(4))\n blkt_dict['Timing quality'] = int(unpack_values[0])\n blkt_dict['mu_sec'] = int(unpack_values[1])\n blkt_dict['Frame count'] = int(unpack_values[2])\n return blkt_dict", "def readVLTUS(self): \n fname= os.environ['VMECFDIR'] +\"/CFG/ctp/DB/VALID.LTUS\"\n try:\n database=open(fname,\"r\") \n except IOError:\n print \"Cannot open \",fname\n return None\n else:\n print \"File \",fname,\" open successfuly.\"\n #print \"database= \",database\n lines=database.readlines()\n database.close() \n #print lines,len(lines) \n Detectors=[] \n for i in lines:\n for j in i:\n if j == ' ': continue\n else: break\n if j=='#': continue\n items=i.split('=')\n detector={}\n detector['name']=items[0]\n Detectors.append(detector)\n #print Detectors \n #print '-----------------------------' \n return Detectors", "def readenergyfile(filename):\n def parsemeta(metalines):\n \"\"\"Parse metadata lines to get metadata object (ordered dict)\n\n Allow only numbers, lists of numbers and strings\n \"\"\"\n def parseline(line):\n res = [val.strip() for val in line[5:].split(u':', 1)]\n key, value = (res[0], res[1]) if len(res) == 2 else (res[0], u'')\n if re.match(r'^-?\\d*[\\.|,]?\\d+$', value):\n value = float(value)\n elif re.match(r'^\\[(.*)\\]', value):\n value = [val.strip() for val in value[1:-1].split(u',')]\n value = [float(val) if re.match(r'^-?\\d*[\\.|,]?\\d+$', val) else val for val in value]\n return key, value\n return OrderedDict(parseline(line) for line in metalines if line.startswith(u'#CTE_'))\n\n with io.open(filename, 'r') as datafile:\n components, meta = [], []\n for ii, line in enumerate(datafile):\n line = line.strip()\n if (line == '') or line.startswith('vector'):\n continue\n elif line.startswith('#'):\n meta.append(line)\n else:\n fields = line.split('#', 1)\n data = [x.strip() for x in fields[0].split(',')]\n comment = fields[1] if len(fields) > 1 else ''\n carrier, ctype, originoruse = data[0:3]\n values = [float(v.strip()) for v in data[3:]]\n\n if ctype not in ('PRODUCCION', 'CONSUMO'):\n raise ValueError(\"Carrier type is not 'CONSUMO' or 'PRODUCCION' in line %i\\n\\t%s\" % (ii+2, line))\n if originoruse not in ('EPB', 'NEPB', 'INSITU', 'COGENERACION'):\n raise ValueError((\"Origin or end use is not 'EPB', 'NEPB', 'INSITU' or 'COGENERACION'\"\n \" in line %i\\n\\t%s\" % (ii+2, line)))\n\n components.append({ \"carrier\": carrier, \"ctype\": ctype,\n \"originoruse\": originoruse,\n \"values\": values, \"comment\": comment })\n numsteps = [len(c['values']) for c in components]\n if max(numsteps) != min(numsteps):\n raise ValueError(\"All input must have the same number of timesteps.\")\n return (parsemeta(meta), components)", "def _read_onmd_4(self, data: bytes, ndata: int) -> int:\n if not data:\n return ndata\n op2 = self.op2\n\n fdata = np.frombuffer(data, dtype=op2.fdtype8)\n idata = np.frombuffer(data, dtype=op2.idtype8)\n ndata = len(idata)\n #op2.log.warning(f'ndata={ndata}')\n eids = idata[::2] // 10\n data = fdata[1::2]\n #print(f'eids = {eids}')\n #print(f'data = {data}')\n self.obj.eids = eids\n self.obj.data = data\n\n key = op2.isubcase\n responses = op2.op2_results.responses\n if responses.normalized_mass_density is None:\n responses.normalized_mass_density = defaultdict(list)\n responses.normalized_mass_density[key].append(self.obj)\n return ndata", "def _extract_nos_version(self, data) -> None:\n match = re.search(r'Junos: (\\S+)', data)\n if match:\n self.version = match.group(1).strip()\n else:\n self.logger.warning(\n f'Cannot parse version from {self.address}:{self.port}')\n self.version = \"all\"", "def formver(self) -> Tuple[int]:\n return (self.header.format, self.header.version)", "def finalOpcodeData(scriptVersion, script):\n # Avoid unnecessary work.\n if len(script) == 0:\n return None\n\n data = None\n tokenizer = ScriptTokenizer(scriptVersion, script)\n while tokenizer.next():\n data = tokenizer.data()\n if tokenizer.err is not None:\n return None\n return data", "def _parseData(self, payload):\n out=[]\n bytesParsed = 0\n while bytesParsed < len(payload):\n\n #check for the extended Code Level, code and length\n #count the number of EXCODE_BYTE\n #extendedCodeLevel = sum([1 for x in data if x == EXCODE_BYTE] )\n #bytesParsed += extendedCodeLevel\n\n #identify the length of the expected bytes in the payload\n code = payload[bytesParsed]\n bytesParsed +=1\n if code > 0x7F:\n # multi-byte code, length > 1\n length = payload[bytesParsed]\n bytesParsed +=1\n else:\n length = 1\n\n if code == SENSOR_STATUS:\n # value of 0==no contact, 200==contact\n #print \"leadoff: %i\" % payload[bytesParsed]\n out.append( {'timestamp': self.curtime, 'leadoff': payload[bytesParsed] } )\n bytesParsed +=1\n\n elif code == HEART_RATE:\n #print \"HR: %i\" % payload[bytesParsed]\n out.append( {'timestamp': self.curtime, 'HR': payload[bytesParsed:] } )\n bytesParsed +=1\n\n elif code == CONFIG_BYTE:\n #print \"config: %i\" % payload[bytesParsed]\n out.append( {'timestamp': self.curtime, 'config': payload[bytesParsed:] } )\n bytesParsed +=1\n\n elif code == RAW_ECG:\n # raw value is between -32768 and 32767, in twos compliment form\n # if the raw value is higher than 32768, it should be rolled around to allow for negative values\n raw = payload[bytesParsed]*256 + payload[bytesParsed]\n if raw >= 32768: \n raw = raw - 65536\n #print \"ecg: %i\" % ecg\n\n # create the timestamp on each ECG sample, starting from the first\n if self.starttime is None:\n self.starttime = time.time()\n self.curtime = self.starttime\n else:\n self.curtime = self.curtime + 1./self.Fs\n\n out.append( {'timestamp': self.curtime, 'ecg_raw': raw } )\n bytesParsed += length\n\n elif code == DEBUG_1:\n #print \"debug1: \" + str(payload[bytesParsed:]).strip('[]')\n out.append( {'timestamp': self.curtime, 'debug1': payload[bytesParsed:] } )\n bytesParsed += length\n\n elif code == DEBUG_2:\n #print \"debug2: \" + str(payload[bytesParsed:]).strip('[]')\n out.append( {'timestamp': self.curtime, 'debug2': payload[bytesParsed:] } )\n bytesParsed += length\n\n else:\n print \"unknown code: %i\" % code\n\n return out", "def read_fw_version(self):\n\n # This function expects the firmware version to be in a line\n # prefixed with 'Product Extra'.\n # At the moment, it takes the form:\n # Product Extra : MCH FW V2.18.8 Final (r14042) (Mar 31 2017 - 11:29)\n # The following two parts will be extracted:\n # mch_fw_ver: V2.18.8 Final\n # mch_fw_date: Mar 31 2017 - 11:29\n # If NAT change the format, then this function will need to be updated\n\n pattern = \".*: MCH FW (.*) \\(.*\\) \\((.*)\\)\"\n\n for mch in range(1,3):\n try:\n result = self.mch_comms.call_ipmitool_command([\"fru\", \"print\", str(mch + MCH_FRU_ID_OFFSET)])\n\n for line in result.splitlines():\n if FW_TAG in line:\n match = re.match(pattern, line)\n if match:\n self.mch_fw_ver[mch] = match.group(1)\n self.mch_fw_date[mch] = match.group(2)\n else:\n self.mch_fw_ver[mch] = \"Unknown\"\n self.mch_fw_date[mch] = \"Unknown\"\n except CalledProcessError as e:\n self.mch_fw_ver[mch] = \"Unknown\"\n self.mch_fw_date[mch] = \"Unknown\"\n except TimeoutExpired as e:\n print(\"read_fw_version: caught TimeoutExpired exception: {}\".format(e))", "def test_parse(self):\n report = (\n \"KJFK 032151Z 16008KT 10SM FEW034 FEW130 BKN250 27/23 A3013 RMK AO2 SLP201\"\n )\n data, units = metar.parse(report[:4], report)\n self.assertIsInstance(data, structs.MetarData)\n self.assertIsInstance(units, structs.Units)\n self.assertEqual(data.raw, report)" ]
[ "0.61757946", "0.6068177", "0.5776372", "0.56304955", "0.557445", "0.554829", "0.55332154", "0.54537684", "0.538945", "0.5372999", "0.536848", "0.5348484", "0.53264505", "0.53117347", "0.53007567", "0.5277842", "0.5234419", "0.52270865", "0.52115095", "0.52013606", "0.51338416", "0.5119743", "0.51190054", "0.51182497", "0.5097035", "0.50915194", "0.50887036", "0.5085939", "0.5083072", "0.5081548" ]
0.70668906
0
lifecycle_query_installed will return a list installed in peer. then execute cmd to get all chaincode with tar.gz format installed in peer.
def lifecycle_get_installed_package(self, timeout): if self.version in BasicEnv.binary_versions_v2: res, installed = self.lifecycle_query_installed("3s") res_return = 0 if res == 0: for item in installed['installed_chaincodes']: # packages_id.append(item['package_id']) res_get = os.system("./../bin/{}/bin/peer lifecycle chaincode getinstalledpackage --package-id {} " "--output-directory ./ --connTimeout {}" .format(self.version, item['package_id'], timeout)) res_get = res_get >> 8 res_return = res_return or res_get else: print("package_id get failed.") return 1, {} # res = os.system("./../bin/{}/bin/peer lifecycle chaincode getinstalledpackage --package-id {} " # "--output-directory ./ --connTimeout {}".format(self.version, packages_id[0], timeout)) # res = res >> 8 return res_return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def lifecycle_query_installed(self, timeout):\n if self.version in BasicEnv.binary_versions_v2:\n # res = os.system(\"./../bin/{}/bin/peer lifecycle chaincode queryinstalled --output json --connTimeout\n # {} > queryInstalled.txt\"\n # .format(self.version, timeout), \"r\")\n # # with open('./queryInstalled.txt', 'r', encoding='utf-8') as f:\n # # content = f.read()\n # # os.system(\"rm ./queryInstalled.txt\")\n # body = res.read()\n # installed_chaincodes = json.loads(body)\n\n res = subprocess.Popen(\"./../bin/{}/bin/peer lifecycle chaincode queryinstalled --output json \"\n \"--connTimeout {}\".format(self.version, timeout), shell=True, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n stdout, stderr = res.communicate()\n return_code = res.returncode\n if return_code == 0:\n content = str(stdout, encoding=\"utf-8\")\n installed_chaincodes = json.loads(content)\n return return_code, installed_chaincodes\n else:\n stderr = str(stderr, encoding=\"utf-8\")\n return return_code, stderr", "def get_installations():\n github_app = get_default_app()\n pprint(github_app.get_installations())", "def lifecycle_package(self, cc_name, cc_version, cc_path, language):\n if self.version in BasicEnv.binary_versions_v2:\n label = cc_name+\"_\"+cc_version\n res = os.system(\"./../bin/{}/bin/peer lifecycle chaincode package {}.tar.gz --path {} --lang {} --label {}\"\n .format(self.version, cc_name, cc_path, language, label))\n res = res >> 8\n print(\"res\", res)\n return", "def run(self):\n logging.debug('List Installed Programs')\n if self.short:\n print(' '.join([ent for ent in pakit.conf.IDB]))\n return\n\n nchars = 12\n fmt = str(nchars).join(['{prog:', '} {repo:',\n '} {hash:', '} {date}'])\n installed = ['Program Repo Hash Date']\n for prog in pakit.conf.IDB:\n entry = pakit.conf.IDB[prog]\n installed.append(fmt.format(prog=prog[0:nchars],\n repo=entry['repo'][0:nchars],\n date=entry['date'],\n hash=entry['hash'][0:nchars]))\n\n msg = 'Installed Programs:'\n msg += PREFIX + PREFIX.join(installed)\n print(msg)\n return msg", "def list_installed(self) -> Generator[Path, None, None]:\n LOGGER.verbose(\"checking %s for Terraform versions...\", self.versions_dir)\n return self.versions_dir.rglob(\"*.*.*\")", "def peer_status(self):\n cmdlist = shlex.split(\"gluster peer status\")\n output = subprocess.Popen(cmdlist, stdout=subprocess.PIPE)\n stdout = output.stdout.read()\n print json.dumps({\n \"output\": stdout\n })", "def getversions(package_name: str) -> list:\n\t# execute command\n\tproc = subprocess.Popen(['pip', 'install', package_name+'==CRASHME'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\tproc.wait()\n\n\t# processed returned data\n\tlines = proc.stderr.read().decode('utf8')\n\tsearchterm = \"(from versions:\"\n\tstart = lines.find(searchterm) + len(searchterm)\n\tend = lines.find(\")\", start)\n\tlines = lines[start:end].split(',')\n\tlines = list(map(lambda x: x.strip(), lines))\n\n\treturn lines", "async def discover_catalog_hook(\n self,\n plugin_invoker: PluginInvoker,\n exec_args: Tuple[str, ...] = (),\n ):\n # Discover only in sync mode (i.e. no args)\n if exec_args:\n return\n\n try:\n await self.discover_catalog(plugin_invoker)\n except PluginLacksCapabilityError:\n pass", "def getInstalledOSPatches(self, df: str = None, status: str = None, cursor: str = None, pageSize: int = None, installedBefore: str = None, installedAfter: str = None):\n params = {\n 'df': df,\n 'status': status,\n 'cursor': cursor,\n 'pageSize': pageSize,\n 'installedBefore': installedBefore,\n 'installedAfter': installedAfter\n }\n return self.api_get_request(f'{self.NINJA_API_QUERIES_OS_PATCH_INSTALLS}', params=params)", "def test_installments_get(self):\n pass", "def listpacks(all: bool=False) -> [str, str]:\n\t# execute command\n\tcommand = ['pip', 'freeze']\n\tif all:\n\t\tcommand.append('--all')\n\tproc = subprocess.Popen(command, stdout=subprocess.PIPE)\n\tproc.wait()\n\n\t# process returned data\n\tlines = proc.stdout.read().decode('utf8')\n\tlines = list(\n\t\tfilter(\n\t\t\tlambda inf: inf[0] and inf[0].split(' ')[0].lower() != '-e',\n\t\t\tmap(\n\t\t\t\tlambda inf: list(map(\n\t\t\t\t\tlambda x: x.lower().strip(),\n\t\t\t\t\tinf.split('==')\n\t\t\t\t\t)),\n\t\t\t\tlines.split('\\n')\n\t\t\t)\n\t\t)\n\t)\n\n\treturn lines", "def install_deps():\n click.echo(\"install_deps\")", "def install_command(self):\n result = ['cargo', 'install', '--vers ' + self.version + ' ' +\n self.package if self.version else self.package]\n return result", "def ls(query):\n\n if (query != None):\n query = query.lower()\n\n # Search in commands return list of all matches\n matches = [x for x in commands if ( query in x['alias'].lower() or query in x['command'].lower() or query in x['description'].lower())]\n else:\n matches = commands\n\n grouped = groupCommands(matches)\n\n for group in grouped:\n if(len(group) > 0):\n echoGroup(group[0]['group'])\n for match in group:\n echoCommand(match, commands.index(match))\n click.echo(\" \")", "def start_list(command_line):\n stack_driver = CloudStackUtility(command_line)\n return stack_driver.list()", "def LoadInstallations(counter):\n process = subprocess.Popen([\"pip\", \"list\", \"--format=json\"],\n stdout=subprocess.PIPE)\n output, _ = process.communicate()\n installations = json.loads(output.decode())\n for i in installations:\n counter.labels(i[\"name\"], i[\"version\"]).inc()", "def all_installation(self):\n\t\tself.db = DB()\n\t\tinstallation_all = self.db.select_all_from(\"installations\")\n\t\ttmpl = lookup.get_template(\"installation.html\")\n\t\treturn (tmpl.render(installation=installation_all))", "def test_get_list(self):\n result = self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"get\",\n \"vendor.fetchai.connections.p2p_libp2p.config.entry_peers\",\n ],\n standalone_mode=False,\n )\n assert result.exit_code == 0\n assert result.output == \"[]\\n\"", "def test_installed_packages():\n features = (\n \"Package Identity : Capa1\\r\\n State : Installed\\r\\n\"\n \"Package Identity : Capa2\\r\\n State : Installed\\r\\n\"\n )\n\n mock = MagicMock(return_value=features)\n with patch.dict(dism.__salt__, {\"cmd.run\": mock}):\n out = dism.installed_packages()\n mock.assert_called_once_with(\n [dism.bin_dism, \"/English\", \"/Online\", \"/Get-Packages\"]\n )\n assert out == [\"Capa1\", \"Capa2\"]", "def _install(self, host):\n pass", "def check_install():\n if platform.dist()[0] not in ['fedora', 'redhat', 'centos']:\n print \"{} not supported\".format(platform.dist()[0])\n sys.exit(1)\n print \"\\ndetected {} {} ...\".format(platform.dist()[0], platform.dist()[1])\n\n import yum\n # Remove loggin. Taken from: https://stackoverflow.com/a/46716482\n from yum.logginglevels import __NO_LOGGING\n yumloggers = [\n 'yum.filelogging.RPMInstallCallback', 'yum.verbose.Repos',\n 'yum.verbose.plugin', 'yum.Depsolve', 'yum.verbose', 'yum.plugin',\n 'yum.Repos', 'yum', 'yum.verbose.YumBase', 'yum.filelogging',\n 'yum.verbose.YumPlugins', 'yum.RepoStorage', 'yum.YumBase',\n 'yum.filelogging.YumBase', 'yum.verbose.Depsolve'\n ]\n for loggername in yumloggers:\n logger = logging.getLogger(loggername)\n logger.setLevel(__NO_LOGGING)\n\n yumbase = yum.YumBase()\n pkg = 'Percona-XtraDB-Cluster-server-<%= @percona_major_version %>'\n if yumbase.rpmdb.searchNevra(name=pkg):\n pkg_list = yumbase.rpmdb.searchNevra(name=pkg)\n print 'detected {} ...'.format(pkg_list[0])\n else:\n print \"{}{} not installed{}\".format(RED, pkg, WHITE)\n sys.exit(1)\n return 'percona'", "def getAllInstalledPackages(installedPkgPath):\n allPkgVers = []\n if os.path.exists(installedPkgPath):\n for pkg in os.listdir(installedPkgPath):\n pkgVersions = os.listdir(os.path.join(installedPkgPath, pkg))\n for pkgVersion in pkgVersions:\n pkgPath = os.path.join(installedPkgPath, pkg)\n if not fnmatch.fnmatch(pkgVersion, '*.inprogress'):\n allPkgVers.append(os.path.join(pkgPath, pkgVersion))\n return allPkgVers", "def command_package_ls(*args):\n\n # Setup build, install, and data directories\n package_dirs = os.listdir(packages_path())\n\n for packname in package_dirs:\n # Filter to directories with config files\n if not os.path.isdir(package_path(packname)): continue\n if not package_load_config(packname): continue\n\n # Check for installation\n installed_flag = ''\n installdir = install_dir(packname)\n bindir = os.path.join(installdir, 'bin')\n # This is just a very basic sanity check for binaries we\n # require\n binfiles = (os.path.exists(bindir) and os.listdir(bindir)) or []\n if ( ('space' in binfiles or 'space_d' in binfiles) and\n ('cppoh' in binfiles or 'cppoh_d' in binfiles) ):\n installed_flag = '*'\n\n print packname, installed_flag\n\n return 0", "def viosupgrade_query(module):\n ret = 0\n\n if module.param['target_file_name']:\n cmd = '/usr/sbin/viosupgrade -q -f {}'\\\n .format(module.param['target_file_name'])\n (ret, stdout, stderr) = module.run_command(cmd)\n\n logging.info(\"[STDOUT] {}\".format(stdout))\n if ret == 0:\n logging.info(\"[STDERR] {}\".format(stderr))\n else:\n logging.error(\"command {} failed: {}\".format(stderr))\n ret = 1\n else:\n for target in module.param['targets']:\n cmd = '/usr/sbin/viosupgrade -q -n {}'.format(target)\n (rc, stdout, stderr) = module.run_command(cmd)\n\n logging.info(\"[STDOUT] {}\".format(stdout))\n if rc == 0:\n logging.info(\"[STDERR] {}\".format(stderr))\n else:\n logging.error(\"command {} failed: {}\".format(stderr))\n ret += 1\n return ret", "def sync(to_be_installed, to_be_uninstalled, verbose=False):\n\n flags = []\n\n if not verbose:\n flags.append('-q')\n\n if to_be_uninstalled:\n pip.main([\"uninstall\", '-y'] + flags + [str(req) for req in to_be_uninstalled])\n\n if to_be_installed:\n pip.main([\"install\"] + flags + [str(req) for req in to_be_installed])", "def get_apps(provider, query):\n\n workdir = os.path.dirname(os.path.realpath(__file__))\n with open(os.path.join(workdir, '..', 'config.yml')) as f:\n config = yaml.load(f)\n ex = Explorer()\n logging.info('Read bucket: %s', config['SCOOP_BUCKET'])\n apps = ex.get_apps(os.path.expandvars(config['SCOOP_BUCKET']), query)\n logging.info(\"Apps count = %d\", len(apps))\n installed = provider.get_installed()\n\n # check if already installed\n for app in apps:\n app['installed'] = app['name'] in installed\n\n return apps", "def command_list(self, query):\n return query", "def _get_packages_for_state(self, state, observer):\n available = super(AdbRemote, self)._get_packages_for_state(state, observer)\n\n if not available:\n if (state == AdbRemote.adb_shell) or (state == AdbRemote.adb_shell_root):\n available = {TextualDevice.cmds: ['moler.cmd.unix'],\n TextualDevice.events: ['moler.events.shared']}\n if available:\n return available[observer]\n elif state == UnixRemote.unix_remote: # this is unix extended with adb commands\n if observer == TextualDevice.cmds:\n available.append('moler.cmd.adb')\n\n return available", "def daos_ver_after_upgraded(self, host):\n cmds = [\n \"daos version\",\n \"dmg version\",\n \"daos pool query {}\".format(self.pool.identifier)]\n for cmd in cmds:\n self.log.info(\"==cmd= %s\", cmd)\n result = pcmd(host, cmd, False)\n if 0 not in result or len(result) > 1:\n failed = []\n for item, value in list(result.items()):\n if item != 0:\n failed.extend(value)\n raise CommandFailure(\"##Error occurred running '{}' on {}\".format(\n cmd, host))\n self.log.info(\"==>%s result= %s\", cmd, result)", "def command_ls(self, list_what):\n if list_what in ('available', 'mounted', 'unmounted'):\n callback = getattr(self.environment, 'get_%s_ids' % list_what)\n lst = callback()\n else:\n lst = []\n if len(lst) != 0:\n print((\"\\n\".join(lst)))" ]
[ "0.79515684", "0.55123", "0.5289305", "0.52338034", "0.5118079", "0.50910693", "0.5029205", "0.49814945", "0.49266955", "0.4898349", "0.48580378", "0.48288074", "0.4790245", "0.47845927", "0.47845408", "0.47839504", "0.47588703", "0.47367048", "0.47221714", "0.4721731", "0.47110236", "0.47049558", "0.4701027", "0.46906388", "0.46881926", "0.46851814", "0.46832138", "0.4681957", "0.4681883", "0.46810725" ]
0.5991446
1
The administrator can use the peer lifecycle chaincode approveformyorg subcommand to approve the chain code on behalf of the organization.
def lifecycle_approve_for_my_org(self, orderer_url, orderer_tls_rootcert, channel_name, cc_name, chaincode_version, policy, sequence=1): res, installed = self.lifecycle_query_installed("3s") cc_label = cc_name+"_"+chaincode_version package_id = "" for each in installed['installed_chaincodes']: if each['label'] == cc_label: package_id = each['package_id'] break if package_id == "": return 1, "not exist the chaincode, please check chaincode_name and chaincode_version" if os.getenv("CORE_PEER_TLS_ENABLED") == "false" or os.getenv("CORE_PEER_TLS_ENABLED") is None: if self.version in BasicEnv.binary_versions_v2: res = os.system("./../bin/{}/bin/peer lifecycle chaincode approveformyorg -o {} " " --channelID {} --name {} --version {} --init-required --package-id {} --sequence {}" " --signature-policy {} > ./approve.txt" .format(self.version, orderer_url, channel_name, cc_name, chaincode_version, package_id, sequence, policy)) else: if self.version in BasicEnv.binary_versions_v2: res = subprocess.Popen("./../bin/{}/bin/peer lifecycle chaincode approveformyorg -o {} --tls " "--cafile {} --channelID {} --name {} --version {} --init-required --package-id " "{} --sequence {} --signature-policy {}" .format(self.version, orderer_url, orderer_tls_rootcert, channel_name, cc_name, chaincode_version, package_id, sequence, policy), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = res.communicate() return_code = res.returncode if return_code == 0: content = str(stdout, encoding="utf-8") else: stderr = str(stderr, encoding="utf-8") return return_code, stderr return return_code, content
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def approve(self):\n self._check_if_open()\n data = {\"approved\": True}\n return self.post(\"approve\", data)", "def test_approve(self):\n\n username,userpass = self.testdata.find_account_for('toolsubmitter')\n\n self.utils.account.login_as(username,userpass)\n\n self.contribtool.approve(TOOLNAME,TOOLLICENSEDATA)", "def approve(self):\n self.approved = True\n self.quest_node['approved'] = True\n graph.push(self.quest_node)\n self.payout()", "def test_approve_agreement(self):\n pass", "def jao_approve(self):\n print \"JAO approved this form. Current state:\", self.state", "def ApproveApprovalRequest(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")", "def approve(self, approver: PrivateKey):\n sig = crypto.get_signature_for_deploy_approval(\n self.hash, approver.private_key, approver.key_algo\n )\n self._append_approval(DeployApproval(approver.account_key, sig))", "def Approve(self, request, global_params=None):\n config = self.GetMethodConfig('Approve')\n return self._RunMethod(\n config, request, global_params=global_params)", "def approve(self, approver: str, to: str, amount, key: bytes):\n raw_tx = self.approve_build_transaction(approver, to, amount)\n signed_tx = self._sign(raw_tx, key)\n self.send_and_wait(signed_tx)", "def hod_approve(self):\n print \"HOD approved this form. Current state:\", self.state", "def change_approval(self, status):\r\n if status == 'approve':\r\n return self.approve()\r\n elif status == 'disapprove':\r\n return self.disapprove()", "def approve_me(message):\n users = hf.get_users()\n for user in users:\n if user[\"id\"] == message._get_user_id():\n if user[\"approval_level\"] == \"unapproved\": # Unknown\n message.reply(Strings['APPROVER_REQUEST'])\n admins = hf.get_admins()\n names = []\n for admin in admins:\n names.append(admin[\"name\"])\n\n approval_message = Strings[\n 'APPROVER_REQUEST_DETAIL'].format(\">, <@\".join(names), user[\"name\"])\n\n #message._client.send_message(config.AUTH_CHANNEL, approval_message)\n message._client.send_message(public_channel, approval_message)\n else:\n message.reply(\":x: Your approval level is already: \" + str(user[\"approval_level\"]))", "def grr_request_approval(line: Text) -> None:\n args = grr_request_approval.parser.parse_args(shlex.split(line))\n magics_impl.grr_request_approval_impl(args.reason, args.approvers, args.wait)", "def auto_approve_cod(self, auto_approve_cod):\n\n self._auto_approve_cod = auto_approve_cod", "def corporate_approve(self, request, object_id, extra_context=None):\n obj = self.get_object(request, unquote(object_id))\n if request.method == 'POST':\n url = reverse('admin:prescription_prescription_detail',\n args=[str(obj.id)])\n if request.POST.get('_cancel'):\n return HttpResponseRedirect(url)\n if request.POST.get('_save'):\n if (obj.planning_status == obj.PLANNING_DRAFT and obj.can_corporate_approve):\n obj.planning_status = obj.PLANNING_SUBMITTED\n obj.planning_status_modified = timezone.now()\n obj.save()\n self.message_user(\n request, \"Successfully submitted for corporate approval.\")\n return HttpResponseRedirect(url)\n if obj.planning_status == obj.PLANNING_SUBMITTED:\n # Only ePFP Application Administrator can apply corporate approval\n if ((not request.user.has_perm(\n 'prescription.can_corporate_approve'))):\n raise PermissionDenied\n\n obj.planning_status = obj.PLANNING_APPROVED\n obj.planning_status_modified = timezone.now()\n obj.save()\n self.message_user(\n request, \"Corporate approval successful.\")\n return HttpResponseRedirect(url)\n elif request.POST.get('_delete'):\n if (obj.planning_status == obj.PLANNING_APPROVED and request.user.has_perm('prescription.can_admin')):\n obj.planning_status = obj.PLANNING_DRAFT\n obj.planning_status_modified = timezone.now()\n obj.save()\n self.message_user(\n request, \"Successfully deleted for corporate approval.\")\n return HttpResponseRedirect(url)\n\n context = {\n 'current': obj,\n }\n return TemplateResponse(request, self.corporate_approval_template,\n context, current_app=self.admin_site.name)", "def approve (self, response) :\n if 'event' in response and 'moderator' in response :\n eventId = response ['event']\n userId = response ['moderator']\n else :\n raise ModerationError (response)\n\n mod_status = 'OK'\n if 'status' in response :\n mod_status = response ['status']\n \n event = Event.object.get (id = eventId)\n approval = Approval (approved = event, moderatorId = userId, status = mod_status)\n approval.save ()\n self.editValues (event.answer, response)", "def approve(_spender: address, _amount: uint256) -> bool:\n\n self.allowed[msg.sender][_spender] = _amount\n log.Approval(msg.sender, _spender, _amount)\n return True", "def approve_me(message):\n load_users(message._client.users)\n sender_id = message._get_user_id()\n target = user_list[sender_id].details['name']\n if (user_list[sender_id].is_unknown):\n message.reply(Strings['APPROVER_REQUEST'])\n names = list_to_names(user_list.admin_list)\n approval_message = Strings[\n 'APPROVER_REQUEST_DETAIL'].format(\">, <@\".join(names), target)\n message._client.send_message(config.AUTH_CHANNEL, approval_message)\n else:\n message.reply(\n \"Your status is already: \" + user_list[sender_id].level.name)", "def start_approval_process(self, request=None):\r\n # done here to avoid circular import\r\n from cbhooks.models import HookPoint\r\n\r\n hook_point = HookPoint.objects.filter(name=\"order_approval\").first()\r\n orch_actions = cbhooks._get_orchestration_actions_to_run(hook_point)\r\n if orch_actions:\r\n #the orchestration action NEEDs to be first in order to allow a hook\r\n # to model the approval process correctly and not have something\r\n # auto-approve before the hook is run\r\n logger.debug(\"Order Approval orchestration actions exist, so bypassing built-in approver emails.\")\r\n try:\r\n cbhooks.run_hooks(\"order_approval\", order=self)\r\n except cbhooks.exceptions.HookFailureException as e:\r\n msg = _(\"Failed to run hook for order approval. Status: {status},\"\r\n \" Output: {output}, Errors: {errors}\").format(status=e.status, output=e.output, errors=e.errors)\r\n raise CloudBoltException(msg)\r\n return \"\"\r\n\r\n #now that the hooks have run, check if it should be auto-approved\r\n profile = request.get_user_profile()\r\n if self.is_multilevel_approval():\r\n self.approve_my_grms(profile)\r\n\r\n if self.should_auto_approve():\r\n logger.debug(\"Order can be automatically approved, attempting approval by {}\".format(self.owner))\r\n jobs, msg = self.approve(self.owner)\r\n if jobs:\r\n msg = render_to_string(\r\n 'orders/approved_msg.html', {\r\n 'order': self,\r\n 'autoapproved': True,\r\n 'num_jobs': len(jobs),\r\n 'extramsg': msg,\r\n })\r\n return msg\r\n else:\r\n # No auto approval and no approval hooks, so go with\r\n # the default process of emailing a set of approvers, unless the\r\n # owner is an approver.\r\n msg = _(\"Order #{order_id} has been submitted for approval. \").format(order_id=self.id)\r\n msg += orders.mail.email_approvers(self, request)\r\n logger.debug(msg)\r\n return msg", "def approve_request(self, request_name):\n self.logger.info(\"Approving request '%s' ...\", request_name)\n\n json_args = json.dumps({\"RequestStatus\": \"assignment-approved\"})\n urn = self.urn_prefix + \"/request/%s\" % request_name\n status, data = self.http_request(\"PUT\", urn, data=json_args,\n headers=self.headersBody)\n\n if status != 200:\n self.logger.error(\"Failed to approve request with status: %s, data: %s\", status, data)\n sys.exit(1)\n self.logger.info(\"Approve succeeded.\")", "def approve(self, message):\n boto_connection = connection.get_connection()\n boto_connection.approve_assignment(self.assignment_id, message)", "def dr_approve(self):\n print \"DR approved this form. Current state:\", self.state", "def approve(self, request, object_id, extra_context=None):\n obj = self.get_object(request, unquote(object_id))\n title = self._approve_title(obj)\n\n AdminAddApprovalForm = self._approve_approval_form(request)\n\n form = AdminAddApprovalForm(initial={'prescription': obj})\n if request.method == 'POST':\n url = reverse('admin:prescription_prescription_detail',\n args=[str(obj.id)])\n if obj.approval_status == obj.APPROVAL_DRAFT and obj.can_approve:\n # create an approval\n obj.approval_status = obj.APPROVAL_SUBMITTED\n obj.approval_status_modified = timezone.now()\n obj.save()\n self.message_user(\n request, \"Successfully submitted for approval.\")\n return HttpResponseRedirect(url)\n elif obj.approval_status == obj.APPROVAL_SUBMITTED:\n if request.POST.get('_cancel'):\n obj.clear_approvals()\n msg = 'Delete: Clearing Approvals/Endorsements', 'Burn ID: {}, Deleted by: {}'. format(obj.burn_id, request.user.get_full_name())\n logger.warning(msg)\n support_email('Delete: Clearing Approvals/Endorsements', msg)\n\n self.message_user(\n request, \"Approval rejected. ePFP is now draft.\")\n return HttpResponseRedirect(url)\n\n form = AdminAddApprovalForm(request.POST,\n initial={'prescription': obj})\n if form.is_valid():\n approval = form.save(commit=False)\n approval.prescription = obj\n approval.creator = request.user\n approval.modifier = request.user\n approval.save()\n obj.approval_status = obj.APPROVAL_APPROVED\n obj.approval_status_modified = timezone.now()\n obj.save()\n self.message_user(\n request, \"Successfully approved.\")\n return HttpResponseRedirect(url)\n elif obj.is_approved:\n if obj.is_closed:\n self.message_user(\n request, \"You can't extend an approval after the \"\n \"prescribed fire plan has been closed.\")\n return HttpResponseRedirect(url)\n if request.POST.get('_cancel'):\n self.message_user(\n request, \"Didn't extend approval.\")\n return HttpResponseRedirect(url)\n else:\n approval = obj.current_approval\n if approval and approval.extension_count < 3:\n approval.extension_count = approval.extension_count + 1\n approval.valid_to = approval.next_valid_to\n approval.save()\n self.message_user(\n request, \"Successfully extended approval.\")\n else:\n self.message_user(request, \"You can't extend an \"\n \"approval more than 3 times.\")\n return HttpResponseRedirect(url)\n\n admin_form, media = self._approve_form(request, obj, form)\n\n context = {\n 'title': title,\n 'current': obj,\n 'form': admin_form,\n 'media': media,\n 'errors': None,\n }\n return TemplateResponse(request, \"admin/prescription/prescription/\"\n \"approval.html\", context,\n current_app=self.admin_site.name)", "def approve(self,toolname,data):\n\n self.logger.info(\"approving the tool '%s'\" % (toolname))\n\n po = self.catalog.load_pageobject('ToolsStatusInstalledPage',toolname)\n po.goto_page()\n\n # click the approve link\n po.flip_status_to_approved()\n\n\n po = self.catalog.load_pageobject('ToolsStatusApproveConfirmVersionPage',toolname)\n\n # check for error on page\n err = po.get_error_info()\n if err:\n # update the version information\n old_version = po.version_form.version.value\n new_version = str(float(old_version) + 0.01)\n po.version_form.submit_form({'version':new_version})\n\n # check for error on page\n err = po.get_error_info()\n if err:\n raise RuntimeError('error found on page: %s' % (err))\n\n # check for the success message\n ok = po.get_success_info()\n if not ok:\n raise RuntimeError('missing success message after updating version')\n\n # click the approve link again ?!?\n po = self.catalog.load_pageobject('ToolsStatusInstalledPage',toolname)\n po.flip_status_to_approved()\n\n # confirm the version\n po = self.catalog.load_pageobject('ToolsStatusApproveConfirmVersionPage',toolname)\n po.version_form.submit_form()\n\n # confirm the license\n po = self.catalog.load_pageobject('ToolsStatusApproveConfirmLicensePage',toolname)\n po.submit_form(data)\n\n # check for error on page\n err = po.get_error_info()\n if err:\n raise RuntimeError('error found on page: %s' % (err))\n\n # confirm the tool info\n po = self.catalog.load_pageobject('ToolsStatusApproveConfirmToolInfoPage',toolname)\n po.approve_tool()\n\n # check for the success message\n po = self.catalog.load_pageobject('ToolsStatusApprovedPage',toolname)\n ok = po.get_success_info()\n if not ok:\n raise RuntimeError('missing success message after approving tool info')", "def approve(self, approver=None, parent_job=None):\r\n if self.status != 'PENDING':\r\n msg = _(\r\n \"Only orders that are in 'PENDING' state can be approved. \"\r\n \"Current state of order is '{status}'.\"\r\n ).format(status=self.status)\r\n raise CloudBoltException(msg)\r\n\r\n approve_this_order = False\r\n if self.is_multilevel_approval():\r\n logger.info('models.approve is multilevel!')\r\n self.approve_my_grms(approver)\r\n logger.info(f'models.approve after approve_my_grms ({approver})!')\r\n if self.is_multilevel_approval():\r\n logger.info('models.approve ml approval complete!')\r\n approve_this_order = True\r\n else:\r\n logger.info('models.approve is NOT multilevel!')\r\n #single-level approval\r\n approve_this_order = True\r\n\r\n if not approve_this_order:\r\n #should only kick off if multilevel approvals\r\n msg = _(\r\n \"Cannot fully approve this order. Multilevel approvals not complete. \"\r\n \"Current state of order is '{status}'.\"\r\n ).format(status=self.status)\r\n return [], msg\r\n\r\n try:\r\n # Raise an error to bubble up specific reason as part of the exception\r\n self.group.quota_set.can_use(raise_error=True, **self.net_usage())\r\n except QuotaSetError as quota_set_error:\r\n raise QuotaError(_(\r\n \"Cannot approve order #{order_id} because doing so would exceed the \"\r\n \"quota for group '{group}'. {error}\"\r\n ).format(order_id=self.id, group=self.group, error=quota_set_error))\r\n\r\n # Before we create job records, order the order items to make\r\n # sure decom jobs are queued before prov jobs. the job engine\r\n # may still parallelize them, that's something we can revisit\r\n # later. In the meantime, customers can set the concurrency\r\n # level to 1 to prevent this.\r\n # we're taking advantage of the fact that \"decom\" comes before\r\n # \"prov\" in the alphabet here.\r\n order_items = [oi.cast() for oi in self.top_level_items.order_by(\r\n \"real_type\", \"add_date\")]\r\n\r\n order_items, msg = self.__filter_illegal_order_items(order_items)\r\n if not order_items:\r\n msg = _(\"{message} There are no valid order items left. This order is \"\r\n \"being marked as complete.\").format(message=msg)\r\n self.complete(\"SUCCESS\")\r\n return [], msg\r\n\r\n self.status = \"ACTIVE\"\r\n self.approved_by = approver\r\n self.approve_date = get_current_time()\r\n self.save()\r\n\r\n history_msg = _(\"The '{order}' order has been approved.\").format(order=escape(self))\r\n self.add_event('APPROVED', history_msg, profile=self.owner)\r\n\r\n # run pre order execution hook\r\n try:\r\n cbhooks.run_hooks(\"pre_order_execution\", order=self)\r\n except cbhooks.exceptions.HookFailureException as e:\r\n self.status = \"FAILURE\"\r\n self.save()\r\n msg = _(\"Failed to run hook for order approval. Status: {status},\"\r\n \" Output: {output}, Errors: {errors}\").format(status=e.status, output=e.output, errors=e.errors)\r\n\r\n history_msg = _(\"The '{order}' order has failed.\").format(order=escape(self))\r\n self.add_event('FAILED', history_msg, profile=self.owner)\r\n raise CloudBoltException(msg)\r\n\r\n from jobs.models import Job\r\n # Saving job objects will cause them to be kicked off by the\r\n # job engine within a minute\r\n jobs = []\r\n\r\n for order_item in order_items:\r\n jobtype = getattr(order_item, 'job_type', None)\r\n if not jobtype:\r\n # the job type will default to the first word of the class type\r\n # ex. \"provision\", \"decom\"\r\n\r\n jobtype = str(order_item.real_type).split(\" \", 1)[0]\r\n quantity = 1\r\n # quantity is a special field on order_items. If an\r\n # order_item has the quantity field, kick off that many\r\n # jobs\r\n if hasattr(order_item, 'quantity') and \\\r\n order_item.quantity is not None and \\\r\n order_item.quantity != '':\r\n quantity = int(order_item.quantity)\r\n for i in range(quantity):\r\n job = Job(job_parameters=order_item,\r\n type=jobtype,\r\n owner=self.owner,\r\n parent_job=parent_job)\r\n job.save()\r\n\r\n # Associate the job with any server(s)\r\n # This may seem unnecessary because it's done when most jobs\r\n # run, but it's needed at the very least for scheduled server\r\n # modification jobs (for changing resources) so they show up on\r\n # the server as scheduled before they actually run\r\n servers = []\r\n if hasattr(order_item, \"server\"):\r\n servers = [order_item.server]\r\n elif hasattr(order_item, \"servers\"):\r\n servers = order_item.servers.all()\r\n for server in servers:\r\n server.jobs.add(job)\r\n\r\n jobs.append(job)\r\n\r\n # If it didn't make any jobs, just call it done\r\n if not jobs:\r\n self.complete(\"SUCCESS\")\r\n\r\n return jobs, msg", "def approve_person(message, target):\n users = hf.get_users()\n if target == 'me':\n return\n for user in users:\n if user[\"name\"] == target:\n approver = message._get_user_id()\n admins = hf.get_admins()\n for admin in admins:\n if admin[\"id\"] == approver:\n if user is not None:\n if user[\"approval_level\"] == \"unapproved\":\n message.reply(\"Approved user: <@{}>\".format(target))\n user[\"approval_level\"] = \"approved\"\n hf.save_users(users)\n return\n elif user[\"approval_level\"] == \"denied\":\n message.reply(Strings['MARKED_DENIED'])\n return\n else:\n message.reply(\":x: {} is already: {}.\".format(target,\n user[\"approval_level\"]))\n return\n else:\n message.reply(Strings['USER_NOT_FOUND'].format(target))\n return\n\n message.reply(Strings['CANT_APPROVE'])", "def action_approve(self):\n if not self.date_approve:\n self.date_approve = fields.Datetime.now()\n\n config = self.env['ka_hr_payroll.config'].default_config()\n if check_rapel_status(self, config):\n self.action_rapel()\n else:\n self.action_done()", "def can_approve(self, user, **data):\n raise Return(False)", "def approve(self, approved_by=\"system\"):\n\n self.confirm_state(completed=False, cancelled=False)\n\n self.is_valid(\"task invalid before approval\")\n\n # We approve the task before running actions,\n # that way if something goes wrong we know if it was approved,\n # when it was approved, and who approved it.\n self.task.approved = True\n self.task.approved_on = timezone.now()\n self.task.approved_by = approved_by\n self.task.save()\n\n # approve all actions\n for action in self.actions:\n try:\n action.approve()\n except Exception as e:\n handle_task_error(e, self.task, error_text=\"while approving task\")\n\n self.is_valid(\"task invalid after approval\")\n\n need_token = any([act.need_token for act in self.actions])\n if need_token:\n self._create_token()\n else:\n self.submit()", "def update_approve(to_address: str, delta_amount: int) -> int:\n raise NotImplementedError()" ]
[ "0.6677485", "0.6585099", "0.65434873", "0.6402488", "0.6379898", "0.630007", "0.6281091", "0.6263889", "0.6210674", "0.6187286", "0.5938496", "0.5935341", "0.59223574", "0.59098077", "0.5873319", "0.5873149", "0.58564305", "0.582476", "0.58143497", "0.5805873", "0.57971424", "0.5737113", "0.57028764", "0.5699925", "0.56819963", "0.56440115", "0.5616441", "0.56121284", "0.5587305", "0.55820477" ]
0.73413444
0
Checks if boundary condition 'bc' is valid. Each bc must be either 'dirichlet' or 'neumann'
def checkBC(bc): if isinstance(bc, string_types): bc = [bc, bc] assert isinstance(bc, list), 'bc must be a list' assert len(bc) == 2, 'bc must have two elements' for bc_i in bc: assert isinstance(bc_i, string_types), "each bc must be a string" assert bc_i in ['dirichlet', 'neumann'], ("each bc must be either," "'dirichlet' or 'neumann'") return bc
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _check_branches(num_branches, num_blocks, in_channels, num_channels):\n if num_branches != len(num_blocks):\n error_msg = f'NUM_BRANCHES({num_branches}) != NUM_BLOCKS({len(num_blocks)})'\n raise ValueError(error_msg)\n if num_branches != len(num_channels):\n error_msg = f'NUM_BRANCHES({num_branches}) != NUM_CHANNELS({len(num_channels)})'\n raise ValueError(error_msg)\n if num_branches != len(in_channels):\n error_msg = f'NUM_BRANCHES({num_branches}) != NUM_INCHANNELS({len(in_channels)})'\n raise ValueError(error_msg)", "def check_tab_conditions(self, symbol, bcs):\n # Check symbol domain\n domain = symbol.domain[0]\n mesh = self.mesh[domain]\n\n if domain != \"current collector\":\n raise pybamm.ModelError(\n \"\"\"Boundary conditions can only be applied on the tabs in the domain\n 'current collector', but {} has domain {}\"\"\".format(\n symbol, domain\n )\n )\n # Replace keys with \"left\" and \"right\" as appropriate for 1D meshes\n if isinstance(mesh, pybamm.SubMesh1D):\n # send boundary conditions applied on the tabs to \"left\" or \"right\"\n # depending on the tab location stored in the mesh\n for tab in [\"negative tab\", \"positive tab\"]:\n if any(tab in side for side in list(bcs.keys())):\n bcs[mesh.tabs[tab]] = bcs.pop(tab)\n # if there was a tab at either end, then the boundary conditions\n # have now been set on \"left\" and \"right\" as required by the spatial\n # method, so there is no need to further modify the bcs dict\n if all(side in list(bcs.keys()) for side in [\"left\", \"right\"]):\n pass\n # if both tabs are located at z=0 then the \"right\" boundary condition\n # (at z=1) is the condition for \"no tab\"\n elif \"left\" in list(bcs.keys()):\n bcs[\"right\"] = bcs.pop(\"no tab\")\n # else if both tabs are located at z=1, the \"left\" boundary condition\n # (at z=0) is the condition for \"no tab\"\n else:\n bcs[\"left\"] = bcs.pop(\"no tab\")\n\n return bcs", "def check_parameter_bounds(self):\n for p in self.variables.keys():\n data = self.get_attr(p)\n if isinstance(data, dc_cp):\n if data.val > data.max_val + err:\n msg = (\n 'Invalid value for ' + p + ': ' + p + ' = ' +\n str(data.val) + ' above maximum value (' +\n str(data.max_val) + ') at component ' + self.label +\n '.')\n logger.warning(msg)\n\n elif data.val < data.min_val - err:\n msg = (\n 'Invalid value for ' + p + ': ' + p + ' = ' +\n str(data.val) + ' below minimum value (' +\n str(data.min_val) + ') at component ' + self.label +\n '.')\n logger.warning(msg)\n\n elif isinstance(data, dc_cc) and data.is_set:\n expr = self.get_char_expr(data.param, **data.char_params)\n data.char_func.get_domain_errors(expr, self.label)\n\n elif isinstance(data, dc_gcc) and data.is_set:\n for char in data.elements:\n char_data = self.get_attr(char)\n expr = self.get_char_expr(\n char_data.param, **char_data.char_params)\n char_data.char_func.get_domain_errors(expr, self.label)", "def boundary_conditions(self):\n pass", "def make_boundary_conditions(constraints, states, costates, parameters, coparameters, cost, derivative_fn, location):\n prefix_map = (('initial', (r'([\\w\\d\\_]+)_0', r\"_x0['\\1']\", sympify('-1'))),\n ('terminal', (r'([\\w\\d\\_]+)_f', r\"_xf['\\1']\", sympify('1'))))\n prefix_map = dict(prefix_map)\n bc_list = []\n for x in constraints[location]:\n bc = sanitize_constraint_expr(x, states, location, prefix_map)\n bc_list.append(bc)\n\n *_, sign = dict(prefix_map)[location]\n cost_expr = sign * cost\n bc_list += [costate - derivative_fn(cost_expr, state) for state, costate in zip(states, costates)]\n bc_list += [coparameter - derivative_fn(cost_expr, parameter)\n for parameter, coparameter in zip(parameters, coparameters)]\n\n return bc_list", "def apply_boundary_conditions(self):\n E = self.__mesh.get_edge_list()\n\n # Top and bottom wall Dirichlet bcs (boundary_id = 21)\n \n e21_iterator = self.__mesh.edge_iterator(21)\n\n self.edge_center_value[e21_iterator[0]:e21_iterator[1]+1] = 0.0 \n \n # Left Dirichlet bc (boundary_id = 2)\n \n e2_iterator = self.__mesh.edge_iterator(2)\n\n b = np.sin(np.pi*self.y_e[e2_iterator[0]:e2_iterator[1]+1])\n\n self.edge_center_value[e2_iterator[0]:e2_iterator[1]+1] \\\n = b\n \n # Right Neumann bc (Zero flux, boundary_id = 3)\n \n e3_iterator = self.__mesh.edge_iterator(3)\n \n for i in range(e3_iterator[0], e3_iterator[1]+1):\n LC = E[i].get_straddling_cells()\n n = LC.get_global_cell_number() - 1\n self.edge_center_value[i] = self.cell_centroid_value[n]", "def model_check(blk):\n # Check temperature bounds\n if value(blk.temperature) < blk.temperature.lb:\n _log.error('{} Temperature set below lower bound.'.format(blk.name))\n if value(blk.temperature) > blk.temperature.ub:\n _log.error('{} Temperature set above upper bound.'.format(blk.name))\n\n # Check pressure bounds\n if value(blk.pressure) < blk.pressure.lb:\n _log.error('{} Pressure set below lower bound.'.format(blk.name))\n if value(blk.pressure) > blk.pressure.ub:\n _log.error('{} Pressure set above upper bound.'.format(blk.name))", "def check_correctness(S, bc_start='not-a-knot', bc_end='not-a-knot',\n tol=1e-14):\n x = S.x\n c = S.c\n dx = np.diff(x)\n dx = dx.reshape([dx.shape[0]] + [1] * (c.ndim - 2))\n dxi = dx[:-1]\n\n # Check C2 continuity.\n assert_allclose(c[3, 1:], c[0, :-1] * dxi**3 + c[1, :-1] * dxi**2 +\n c[2, :-1] * dxi + c[3, :-1], rtol=tol, atol=tol)\n assert_allclose(c[2, 1:], 3 * c[0, :-1] * dxi**2 +\n 2 * c[1, :-1] * dxi + c[2, :-1], rtol=tol, atol=tol)\n assert_allclose(c[1, 1:], 3 * c[0, :-1] * dxi + c[1, :-1],\n rtol=tol, atol=tol)\n\n # Check that we found a parabola, the third derivative is 0.\n if x.size == 3 and bc_start == 'not-a-knot' and bc_end == 'not-a-knot':\n assert_allclose(c[0], 0, rtol=tol, atol=tol)\n return\n\n # Check periodic boundary conditions.\n if bc_start == 'periodic':\n assert_allclose(S(x[0], 0), S(x[-1], 0), rtol=tol, atol=tol)\n assert_allclose(S(x[0], 1), S(x[-1], 1), rtol=tol, atol=tol)\n assert_allclose(S(x[0], 2), S(x[-1], 2), rtol=tol, atol=tol)\n return\n\n # Check other boundary conditions.\n if bc_start == 'not-a-knot':\n if x.size == 2:\n slope = (S(x[1]) - S(x[0])) / dx[0]\n assert_allclose(S(x[0], 1), slope, rtol=tol, atol=tol)\n else:\n assert_allclose(c[0, 0], c[0, 1], rtol=tol, atol=tol)\n elif bc_start == 'clamped':\n assert_allclose(S(x[0], 1), 0, rtol=tol, atol=tol)\n elif bc_start == 'natural':\n assert_allclose(S(x[0], 2), 0, rtol=tol, atol=tol)\n else:\n order, value = bc_start\n assert_allclose(S(x[0], order), value, rtol=tol, atol=tol)\n\n if bc_end == 'not-a-knot':\n if x.size == 2:\n slope = (S(x[1]) - S(x[0])) / dx[0]\n assert_allclose(S(x[1], 1), slope, rtol=tol, atol=tol)\n else:\n assert_allclose(c[0, -1], c[0, -2], rtol=tol, atol=tol)\n elif bc_end == 'clamped':\n assert_allclose(S(x[-1], 1), 0, rtol=tol, atol=tol)\n elif bc_end == 'natural':\n assert_allclose(S(x[-1], 2), 0, rtol=2*tol, atol=2*tol)\n else:\n order, value = bc_end\n assert_allclose(S(x[-1], order), value, rtol=tol, atol=tol)", "def validate_branch_config(branch_cfg, branch, n):\n res = False\n if len(branch) == 4:\n if \"double\" in branch_cfg:\n diff1 = branch[0]-branch[1]\n diff3 = branch[2]-branch[3]\n diff2 = branch[1]%n-branch[2]%n\n diff_adj_clk = [-3, 1]\n diff_adj_clk_ctr = [-e for e in diff_adj_clk]\n diff_opp = [2, -2]\n if \"adjacent\" and \"clockwise\" in branch_cfg and diff1 == diff3 == 0 and diff2 in diff_adj_clk:\n res = True\n elif \"adjacent\" and \"counter clockwise\" in branch_cfg and diff1 == diff3 == 0 and diff2 in diff_adj_clk_ctr:\n res = True\n elif \"opposite\" in branch_cfg and diff1 == diff3 == 0 and diff2 in diff_opp:\n res = True\n elif \"single\" in branch_cfg:\n res = True\n elif len(branch) == 2:\n res = True\n return res", "def check_boundary(self,x):\n b_cells = np.zeros(self.n_c)\n b_cells[self.n_C:] = 1\n vBC = b_cells[self.tris]\n considered_triangles = vBC.sum(axis=1) == 2\n add_extra = ((self.Angles*(1-vBC)>np.pi/2).T*considered_triangles.T).T\n if add_extra.any():\n I,J = np.nonzero(add_extra)\n for k,i in enumerate(I):\n j = J[k]\n xs = x[self.tris[i]]\n re = xs[np.mod(j-1,3)] - xs[np.mod(j+1,3)]\n re = re/np.linalg.norm(re)\n re = np.array([re[1],-re[0]])\n rpe = xs[j]\n x_new = 2*np.dot(xs[np.mod(j-1,3)]-rpe,re)*re + rpe\n x = np.vstack((x,x_new))\n self.n_c = x.shape[0]\n self._triangulate(x)\n self.assign_vertices()\n\n C = get_C_boundary(self.n_c,self.CV_matrix)\n #\n # #Remove extra cells\n # keep_mask = C[self.n_C:, :self.n_C].sum(axis=1)>0 #I'm assuming this is the same thing. This removes all boundary centroids that are not connected to at least one real centroid.\n # if keep_mask.any():\n # c_keep = np.nonzero(keep_mask)[0]\n # x = np.concatenate((x[:self.n_C],x[c_keep + self.n_C]))\n # self.n_c = x.shape[0]\n # self._triangulate(x)\n # self.assign_vertices()\n #\n\n #Remove all boundary particles not connected to exactly two other boundary particles\n remove_mask = C[self.n_C:, self.n_C:].sum(axis=1)!=2\n if remove_mask.any():\n c_keep = np.nonzero(~remove_mask)[0]\n x = np.concatenate((x[:self.n_C],x[c_keep + self.n_C]))\n self.n_c = x.shape[0]\n self._triangulate(x)\n self.assign_vertices()\n self.Angles = tri_angles(x, self.tris)\n #\n # remove_mask = C[self.n_C:, self.n_C:].sum(axis=1)==0\n # if remove_mask.any():\n # c_keep = np.nonzero(~remove_mask)[0]\n # x = np.concatenate((x[:self.n_C],x[c_keep + self.n_C]))\n # self.n_c = x.shape[0]\n # self._triangulate(x)\n # self.assign_vertices()\n # self.Angles = tri_angles(x, self.tris)\n\n\n return x", "def set_bc(self, problem):\n bcs = problem.bcs\n n_bound = cfg.const['N_GHOST_CELLS']\n # Left X-b.c.\n for i in range(0, self.i_min):\n for j in range(self.j_min, self.j_max):\n for k in range(self.k_min, self.k_max): \n if bcs[0] == 't': \n self.U[i][j][k] = self.U[self.i_min][j][k]\n elif bcs[0] == 'w':\n for num in [0, 2, 3, 4]: # 0 -> 3, 1 -> 2, i_min-1 -> i_min, i_min-2 -> i_min+1\n self.U[i][j][k][num] = self.U[self.i_min + (self.i_min - i - 1)][j][k][num]\n for num in [1]:\n self.U[i][j][k][num] = - self.U[self.i_min + (self.i_min - i - 1)][j][k][num]\n else:\n print(\"Errof field.set_ics(): only wall-type and transmissive boundaries supported! Bye!\")\n # Right X-b.c.\n for i in range(self.i_max, self.i_max+n_bound):\n for j in range(self.j_min, self.j_max):\n for k in range(self.k_min, self.k_max): \n if bcs[1] == 't':\n self.U[i][j][k] = self.U[self.i_max-1][j][k]\n elif bcs[1] == 'w':\n for num in [0, 2, 3, 4]: # i_max -> i_max-1 , i_max+1-> i_max-2\n self.U[i][j][k][num] = self.U[self.i_max - (i - self.i_max + 1)][j][k][num]\n for num in [1]:\n self.U[i][j][k][num] = - self.U[self.i_max - (i - self.i_max + 1)][j][k][num]\n else:\n print(\"Error field.set_ics(): only wall-type and transmissive boundaries supported! Bye!\")\n # Left Y-b.c.\n for i in range(0, self.i_max+n_bound):\n for j in range(0, self.j_min):\n for k in range(self.k_min, self.k_max): \n if bcs[2] == 't':\n self.U[i][j][k] = self.U[i][self.j_min][k]\n elif bcs[2] == 'w':\n for num in [0, 1, 3, 4]:\n self.U[i][j][k][num] = self.U[i][self.j_min + (self.j_min - j - 1)][k][num]\n for num in [2]:\n self.U[i][j][k][num] = - self.U[i][self.j_min + (self.j_min - j - 1)][k][num]\n else:\n print(\"Error field.set_ics(): only wall-type and transmissive boundaries supported! Bye!\")\n # Right Y-b.c.\n for i in range(0, self.i_max+n_bound):\n for j in range(self.j_max, self.j_max+n_bound):\n for k in range(self.k_min, self.k_max): \n if bcs[3] == 't':\n self.U[i][j][k] = self.U[i][self.j_max-1][k]\n elif bcs[3] == 'w':\n for num in [0, 1, 3, 4]:\n self.U[i][j][k][num] = self.U[i][self.j_max - (j - self.j_max + 1)][k][num]\n for num in [2]:\n self.U[i][j][k][num] = -self.U[i][self.j_max - (j - self.j_max + 1)][k][num]\n else:\n print(\"Error field.set_ics(): only wall-type and transmissive boundaries supported! Bye!\")\n # Left Z-b.c.\n for i in range(0, self.i_max+n_bound):\n for j in range(0, self.j_max+n_bound):\n for k in range(0, self.k_min): \n if bcs[4] == 't':\n self.U[i][j][k] = self.U[i][j][self.k_min]\n elif bcs[4] == 'w':\n for num in [0, 1, 2, 4]:\n self.U[i][j][k][num] = self.U[i][j][self.k_min + (self.k_min - k - 1)][num]\n for num in [3]:\n self.U[i][j][k][num] = - self.U[i][j][self.k_min + (self.k_min - k - 1)][num]\n else:\n print(\"Error field.set_ics(): only wall-type and transmissive boundaries supported! Bye!\")\n # Right Z-b.c.\n for i in range(0, self.i_max+n_bound):\n for j in range(0, self.j_max+n_bound):\n for k in range(self.k_max, self.k_max+n_bound):\n if bcs[5] == 't':\n self.U[i][j][k] = self.U[i][j][self.k_max-1]\n elif bcs[5] == 'w':\n for num in [0, 1, 2, 4]:\n self.U[i][j][k][num] = self.U[i][j][self.k_max - (k - self.k_max + 1)][num]\n for num in [3]:\n self.U[i][j][k][num] = - self.U[i][j][self.k_max - (k - self.k_max + 1)][num]\n else:\n print(\"Error field.set_ics(): only wall-type and transmissive boundaries supported! Bye!\")", "def is_bh(v) -> bool:\n try:\n _validate(v, prefixes=[b'B'])\n except (ValueError, TypeError):\n return False\n return True", "def input_check(self):\n\n if self.species == 'He': assert self.line_model == 'voigt'\n n_upper_range, e_dens_range, temp_range, b_field_range = get_param_ranges(self.line_model)\n\n if np.isnan(n_upper_range).sum() <= 1:\n assert (self.n_upper in range(n_upper_range[0], n_upper_range[1]))\n if np.isnan(e_dens_range).sum() <= 1:\n assert (e_dens_range[0] <= self.e_dens <= e_dens_range[1])\n if np.isnan(temp_range).sum() <= 1:\n assert (temp_range[0] <= self.temp <= temp_range[1])\n if np.isnan(b_field_range).sum() <= 1:\n assert (b_field_range[0] <= self.b_field <= b_field_range[1])", "def _check_condition_set_currency_and_balance_32B(self):\n type_of_event = FFXMMConfirmationOutUtils_Override.get_event_type_MT330(self.acm_obj)\n\n return type_of_event in ['CHNG', 'CINT', 'CONF']", "def set_internal_boundary_conditions(self, model):\n\n def boundary_gradient(left_symbol, right_symbol):\n pybamm.logger.debug(\n \"Calculate boundary gradient ({} and {})\".format(\n left_symbol, right_symbol\n )\n )\n left_domain = left_symbol.domain[0]\n right_domain = right_symbol.domain[0]\n\n left_mesh = self.spatial_methods[left_domain].mesh[left_domain]\n right_mesh = self.spatial_methods[right_domain].mesh[right_domain]\n\n left_symbol_disc = self.process_symbol(left_symbol)\n right_symbol_disc = self.process_symbol(right_symbol)\n\n return self.spatial_methods[left_domain].internal_neumann_condition(\n left_symbol_disc, right_symbol_disc, left_mesh, right_mesh\n )\n\n bc_keys = list(self.bcs.keys())\n\n internal_bcs = {}\n for var in model.boundary_conditions.keys():\n if isinstance(var, pybamm.Concatenation):\n children = var.orphans\n\n first_child = children[0]\n next_child = children[1]\n\n lbc = self.bcs[var][\"left\"]\n rbc = (boundary_gradient(first_child, next_child), \"Neumann\")\n\n if first_child not in bc_keys:\n internal_bcs.update({first_child: {\"left\": lbc, \"right\": rbc}})\n\n for current_child, next_child in zip(children[1:-1], children[2:]):\n lbc = rbc\n rbc = (boundary_gradient(current_child, next_child), \"Neumann\")\n if current_child not in bc_keys:\n internal_bcs.update(\n {current_child: {\"left\": lbc, \"right\": rbc}}\n )\n\n lbc = rbc\n rbc = self.bcs[var][\"right\"]\n if children[-1] not in bc_keys:\n internal_bcs.update({children[-1]: {\"left\": lbc, \"right\": rbc}})\n\n self.bcs.update(internal_bcs)", "def is_valid_bathroom(input_string):\n assert input_string is not None\n try:\n br = float(input_string)\n if br < 0:\n raise ValueError\n if round(br) - br == 0 or round(br) - br == 0.5:\n return True\n else:\n return False\n except ValueError:\n return False", "def test_attempt_bc_correction_golay12_exceeds_range(self):\r\n\r\n # Has 4 changes from a valid golay code\r\n curr_bc = \"TCGTGCACTTGT\"\r\n all_bcs = [\"AGCAGCACTTGT\", \"ACAGAGTCGGCT\"]\r\n barcode_type = \"golay_12\"\r\n actual_bc, actual_errs = attempt_bc_correction(curr_bc,\r\n all_bcs, barcode_type)\r\n\r\n expected_bc = None\r\n expected_errs = 4\r\n self.assertEqual(actual_bc, expected_bc)\r\n self.assertEqual(actual_errs, expected_errs)", "def test_b2_calc_bounds_column(self):\n type_of = \"c\"\n self.assert_calc_bounds(type_of)\n\n\n\n # config.NR", "def brepalgo_IsValid(*args):\n return _BRepAlgo.brepalgo_IsValid(*args)", "def check_par_cut(self, conn_components):\r\n count_tot = 0\r\n count_neg = 0\r\n for i in range(len(conn_components)):\r\n conn1 = conn_components[i]\r\n for j in range(i + 1, len(conn_components)):\r\n conn2 = conn_components[j]\r\n for act1 in conn1:\r\n for act2 in conn2:\r\n count_tot = count_tot + 1\r\n if not ((act1 in self.outgoing and act2 in self.outgoing[act1]) and (\r\n act1 in self.ingoing and act2 in self.ingoing[act1])):\r\n count_neg = count_neg + 1\r\n\r\n if count_neg <= shared_constants.PAR_CUT_CONSTANT * count_tot:\r\n return True\r\n\r\n return False", "def check_bp(self):\n return self.min_basepairs <= self.seqdata.basepairs <= self.max_basepairs", "def check_bc(R,D,p,mn):\n R -= (R/D>=p)*np.floor(R/D)*D\n R -= (R/D<=mn)*np.floor(R/D)*D\n return R", "def test_solvers_bc():\n tol = 3E-12 # Appropriate tolerance for these tests (P2, 20x20 mesh)\n import sympy as sym\n x, y = sym.symbols('x[0], x[1]')\n u = 1 + x**2 + 2*y**2\n f = -sym.diff(u, x, 2) - sym.diff(u, y, 2)\n f = sym.simplify(f)\n u_00 = u.subs(x, 0) # x=0 boundary\n u_01 = u.subs(x, 1) # x=1 boundary\n g = -sym.diff(u, y).subs(y, 1) # x=1 boundary\n r = 1000 # arbitrary function can go here\n s = u\n\n # Turn to C/C++ code for UFL expressions\n f = sym.printing.ccode(f)\n u_00 = sym.printing.ccode(u_00)\n u_01 = sym.printing.ccode(u_01)\n g = sym.printing.ccode(g)\n r = sym.printing.ccode(r)\n s = sym.printing.ccode(s)\n print('Test problem (C/C++):\\nu = %s\\nf = %s' % (u, f))\n print('u_00: %s\\nu_01: %s\\ng = %s\\nr = %s\\ns = %s' %\n (u_00, u_01, g, r, s))\n\n # Turn into FEniCS objects\n u_00 = Expression(u_00)\n u_01 = Expression(u_01)\n f = Expression(f)\n g = Expression(g)\n r = Expression(r)\n s = Expression(s)\n u_exact = Expression(sym.printing.ccode(u))\n\n # Define boundary conditions\n boundary_conditions = {0: {'Dirichlet': u_00},\n 1: {'Dirichlet': u_01},\n 2: {'Robin': (r, s)},\n 3: {'Neumann': g}}\n\n for Nx, Ny in [(3,3), (3,5), (5,3), (20,20)]:\n for degree in 1, 2, 3:\n for linear_solver in ['direct']:\n print('solving on 2(%dx%dx) mesh with P%d elements'\n % (Nx, Ny, degree)),\n print(' %s solver, %s function' %\n (linear_solver, solver_func.__name__))\n kappa = Constant(1)\n u, kappa = solver_bc(\n kappa, f, boundary_conditions, Nx, Ny, degree,\n linear_solver=linear_solver,\n abs_tol=0.1*tol,\n rel_tol=0.1*tol)\n # Make a finite element function of the exact u_D\n V = u.function_space()\n u_e_Function = interpolate(u_exact, V) # exact solution\n # Check that dof arrays are equal\n u_e_array = u_e_Function.vector().array() # dof values\n max_error = (u_e_array - u.vector().array()).max()\n msg = 'max error: %g for 2(%dx%d) mesh, degree=%d,'\\\n ' %s solver, %s' % \\\n (max_error, Nx, Ny, degree, linear_solver,\n solver_func.__name__)\n print(msg)\n assert max_error < tol, msg", "def check_invalid_bbb_args(config):\n if config.mnet_only and config.hyper_gauss_init:\n warnings.warn('Option \"hyper_gauss_init\" has no effect if no ' +\n 'hypernetwork is used.')\n if config.keep_orig_init and not \\\n (config.mnet_only or config.hyper_gauss_init):\n warnings.warn('Option \"keep_orig_init\" has no effect if main ' +\n 'network has no parameters or option ' +\n '\"hyper_gauss_init\" is not activated.')\n if not config.mnet_only and config.hyper_gauss_init and \\\n config.normal_init:\n warnings.warn('Option \"normal_init\" has no effect if ' +\n '\"hyper_gauss_init\" is activated.')\n if config.mnet_only and not config.keep_orig_init and \\\n config.normal_init:\n warnings.warn('Option \"normal_init\" has no effect for main net ' +\n 'initialization if \"keep_orig_init\" is not ' +\n 'activated.')\n if config.local_reparam_trick:\n if hasattr(config, 'dropout_rate') and config.dropout_rate != -1:\n raise ValueError('Dropout not implemented for network with ' +\n 'local reparametrization trick.')\n if hasattr(config, 'specnorm') and config.specnorm:\n raise ValueError('Spectral norm not implemented for network ' +\n 'with local reparametrization trick.')\n if hasattr(config, 'batchnorm') and config.batchnorm or \\\n hasattr(config, 'no_batchnorm') and not config.no_batchnorm:\n raise ValueError('Batchnorm not implemented for network ' +\n 'with local reparametrization trick.')\n if not config.local_reparam_trick and config.disable_lrt_test:\n warnings.warn('Option \"disable_lrt_test\" has no effect if the local-'\n 'reparametrization trick is not used.')\n\n if hasattr(config, 'mean_only') and config.mean_only:\n if hasattr(config, 'kl_scale') and config.kl_scale != 0 or \\\n hasattr(config, 'kl_schedule') and config.kl_schedule != 0:\n raise ValueError('Prior-matching is not applicable for ' +\n 'deterministic networks.')\n if config.regularizer != 'mse':\n raise ValueError('Only \"mse\" regularizer can be applied to ' +\n 'deterministic networks.')\n if config.local_reparam_trick:\n raise ValueError('Local-reparametrization trick cannot be ' +\n 'applied to non-Gaussian networks.')\n if config.hyper_gauss_init:\n raise ValueError('Gaussian-hypernet init cannot be applied to ' +\n 'non-Gaussian networks.')\n if hasattr(config, 'use_prev_post_as_prior') and \\\n config.use_prev_post_as_prior:\n raise ValueError('Option \"use_prev_post_as_prior\" cannot be ' +\n 'enforced for deterministic networks.')\n if config.train_sample_size > 1:\n warnings.warn('A \"train_sample_size\" greater than 1 doesn\\'t ' +\n 'make sense for a deterministic network.')\n if config.val_sample_size > 1:\n warnings.warn('A \"val_sample_size\" greater than 1 doesn\\'t ' +\n 'make sense for a deterministic network.')\n if config.disable_lrt_test:\n warnings.warn('Option \"disable_lrt_test\" not applicable to ' +\n 'deterministic networks.')\n if config.use_logvar_enc:\n warnings.warn('Option \"use_logvar_enc\" not applicable to ' +\n 'deterministic networks.')\n if config.regularizer != 'mse':\n if hasattr(config, 'hnet_reg_batch_size') and \\\n config.hnet_reg_batch_size != -1:\n raise NotImplementedError('Mini-batching of regularizer other ' +\n 'than the MSE reg is not implemented ' +\n 'yet.')\n\n if config.radial_bnn:\n if config.local_reparam_trick:\n raise ValueError('Local-reparametrization trick is not compatible '+\n 'with Radial BNNs since the weights posterior is '+\n 'not Gaussian anymore.')\n if config.regularizer != 'mse':\n raise NotImplementedError('Only the MSE regularizer has been ' +\n 'implemented for radial BNN ' +\n 'distributions.')\n if hasattr(config, 'use_prev_post_as_prior') and \\\n config.use_prev_post_as_prior:\n raise NotImplementedError('Option \"use_prev_post_as_prior\" not ' +\n 'implemented for Radial BNN.')", "def test_attempt_bc_correction_generic(self):\r\n\r\n curr_bc = \"GGCAGCACTA\"\r\n all_bcs = [\"AACTCGTCGA\", \"AGCAGCACTT\", \"ACAGAGTCGG\"]\r\n barcode_type = 10\r\n actual_bc, actual_errs = attempt_bc_correction(curr_bc,\r\n all_bcs, barcode_type)\r\n\r\n expected_bc = \"AGCAGCACTT\"\r\n expected_errs = 2\r\n self.assertEqual(actual_bc, expected_bc)\r\n self.assertEqual(actual_errs, expected_errs)", "def test_elemental_composition_constraint_validation():\n name = \"Property Band gap\"\n elements = [\"Ga\", \"N\"]\n\n # Minimum can't be less than 0\n minimum = -1\n maximum = 2\n\n try:\n c = ElementalCompositionConstraint(name=name,\n elements=elements,\n minimum=minimum,\n maximum=maximum)\n assert False\n except CitrinationClientError:\n pass\n\n # Maximum can't be greater than 100\n minimum = 30\n maximum = 120\n\n try:\n c = ElementalCompositionConstraint(name=name,\n elements=elements,\n minimum=minimum,\n maximum=maximum)\n assert False\n except CitrinationClientError:\n pass\n\n # Maximum can't be less than minimum\n minimum = 90\n maximum = 60\n\n try:\n c = ElementalCompositionConstraint(name=name,\n elements=elements,\n minimum=minimum,\n maximum=maximum)\n assert False\n except CitrinationClientError:\n pass\n\n # Valid values are OK\n minimum = 20\n maximum = 50\n\n c = ElementalCompositionConstraint(name=name,\n elements=elements,\n minimum=minimum,\n maximum=maximum)", "def process_boundary_conditions(self, model):\n\n processed_bcs = {}\n\n # process and set pybamm.variables first incase required\n # in discrisation of other boundary conditions\n for key, bcs in model.boundary_conditions.items():\n processed_bcs[key] = {}\n\n # check if the boundary condition at the origin for sphere domains is other\n # than no flux\n for subdomain in key.domain:\n if (\n self.mesh[subdomain].coord_sys\n in [\"spherical polar\", \"cylindrical polar\"]\n and list(self.mesh.geometry[subdomain].values())[0][\"min\"] == 0\n ):\n if bcs[\"left\"][0].value != 0 or bcs[\"left\"][1] != \"Neumann\":\n raise pybamm.ModelError(\n \"Boundary condition at r = 0 must be a homogeneous \"\n \"Neumann condition for {} coordinates\".format(\n self.mesh[subdomain].coord_sys\n )\n )\n\n # Handle any boundary conditions applied on the tabs\n if any(\"tab\" in side for side in list(bcs.keys())):\n bcs = self.check_tab_conditions(key, bcs)\n\n # Process boundary conditions\n for side, bc in bcs.items():\n eqn, typ = bc\n pybamm.logger.debug(\"Discretise {} ({} bc)\".format(key, side))\n processed_eqn = self.process_symbol(eqn)\n processed_bcs[key][side] = (processed_eqn, typ)\n\n return processed_bcs", "def check(self):\n\n Rbo = self.get_Rbo()\n alpha = self.comp_alpha()\n\n if self.W0 < (self.W2 + self.W3):\n raise S51_WCheckError(\"You must have W2+W3 < W0\")\n\n if Rbo < self.H0 + self.H2:\n raise S51_RHCheckError(\"You must have H0+H2 < Rbo\")\n\n if alpha > pi / 2:\n raise S51_AlphaCheckError(\"You must have alpha < pi/2\")", "def test_weyl_specialize_fsim_abmb(self, aaa=0.456, bbb=0.132):\n a, b, c = aaa, bbb, -bbb\n for da, db, dc in DELTAS:\n for k1l, k1r, k2l, k2r in K1K2SB:\n k1 = np.kron(k1l.data, k1r.data)\n k2 = np.kron(k2l.data, k2r.data)\n self.check_two_qubit_weyl_specialization(\n k1 @ Ud(a + da, b + db, c + dc) @ k2,\n 0.999,\n TwoQubitWeylfSimabmbEquiv,\n {\"rx\": 7, \"ry\": 4, \"rxx\": 1, \"ryy\": 1, \"rzz\": 1},\n )", "def test_attempt_bc_correction_golay12(self):\r\n\r\n curr_bc = \"GGCAGCACTTGT\"\r\n all_bcs = [\"AACTCGTCGATG\", \"AGCAGCACTTGT\", \"ACAGAGTCGGCT\"]\r\n barcode_type = \"golay_12\"\r\n actual_bc, actual_errs = attempt_bc_correction(curr_bc,\r\n all_bcs, barcode_type)\r\n\r\n expected_bc = \"AGCAGCACTTGT\"\r\n expected_errs = 1\r\n self.assertEqual(actual_bc, expected_bc)\r\n self.assertEqual(actual_errs, expected_errs)" ]
[ "0.59690285", "0.5924145", "0.5829166", "0.5814814", "0.58086747", "0.5798797", "0.57984674", "0.5739989", "0.57389224", "0.5719257", "0.5700577", "0.56972617", "0.56966865", "0.56768215", "0.5673192", "0.56680053", "0.565017", "0.56335694", "0.562896", "0.5600559", "0.5596921", "0.558718", "0.5575096", "0.5561437", "0.55539334", "0.55455476", "0.5523504", "0.5504893", "0.5497274", "0.5488989" ]
0.68956906
0
Create 1D derivative operator from cellcenters to nodes this means we go from n to n+1
def ddxCellGrad(n, bc): bc = checkBC(bc) D = sp.spdiags((np.ones((n+1, 1))*[-1, 1]).T, [-1, 0], n+1, n, format="csr") # Set the first side if(bc[0] == 'dirichlet'): D[0, 0] = 2 elif(bc[0] == 'neumann'): D[0, 0] = 0 # Set the second side if(bc[1] == 'dirichlet'): D[-1, -1] = -2 elif(bc[1] == 'neumann'): D[-1, -1] = 0 return D
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ddxCellGradBC(n, bc):\n bc = checkBC(bc)\n\n ij = (np.array([0, n]), np.array([0, 1]))\n vals = np.zeros(2)\n\n # Set the first side\n if(bc[0] == 'dirichlet'):\n vals[0] = -2\n elif(bc[0] == 'neumann'):\n vals[0] = 0\n # Set the second side\n if(bc[1] == 'dirichlet'):\n vals[1] = 2\n elif(bc[1] == 'neumann'):\n vals[1] = 0\n D = sp.csr_matrix((vals, ij), shape=(n+1, 2))\n return D", "def DDG(self, n, e, r, f):\n pre = (-e[:, None] + np.divide.outer((n - 1), r))**2\n pre -= np.divide.outer((n - 1), r**2)\n return pre*f", "def _compute_nodes_1d(npts, ilbds1d): # pylint: disable=line-too-long\n if npts % 2 == 0:\n raise ValueError(\"Please enter odd npts\")\n ind = np.arange(1, npts + 1)\n nodes = 0.5 * (1 - np.cos(np.pi * ind / (npts + 1)))\n return nodes * (ilbds1d[1] - ilbds1d[0]) + ilbds1d[0]", "def createGridNodesinFRF(x0, y0, dx, dy, ni, nj):\n assert dx.shape[0] == ni-1, 'This function assumes that there are n-1 dx values'\n\n if np.mean(np.diff(dx)) != np.mean(dx): # vairable spacing cell array\n icoord = np.zeros(ni) # assume\n jcoord = np.zeros(nj)\n icoord[0] = x0\n jcoord[0] = y0\n for xx, dxx in enumerate(dx):\n icoord[xx+1] = icoord[xx] - dxx # assumes offshore origin\n for yy, dyy in enumerate(dy):\n jcoord[yy+1] = jcoord[yy] - dyy\n else:\n raise NotImplementedError\n\n return icoord, jcoord", "def derivative_ex(dirichl_space, neumann_space, ep_in, ep_ex, kappa, operator_assembler):\n phi_id = sparse.identity(dirichl_space, dirichl_space, dirichl_space)\n dph_id = sparse.identity(neumann_space, neumann_space, neumann_space)\n ep = ep_ex/ep_in\n\n dF = laplace.double_layer(dirichl_space, dirichl_space, dirichl_space, assembler=operator_assembler)\n dP = modified_helmholtz.double_layer(dirichl_space, dirichl_space, dirichl_space, kappa, assembler=operator_assembler)\n B = 1/ep * dF - dP\n\n F = laplace.single_layer(neumann_space, dirichl_space, dirichl_space, assembler=operator_assembler)\n P = modified_helmholtz.single_layer(neumann_space, dirichl_space, dirichl_space, kappa, assembler=operator_assembler)\n A = F - P\n\n ddF = laplace.hypersingular(dirichl_space, neumann_space, neumann_space, assembler=operator_assembler)\n ddP = modified_helmholtz.hypersingular(dirichl_space, neumann_space, neumann_space, kappa, assembler=operator_assembler)\n D = 1/ep * (ddP - ddF)\n\n dF0 = laplace.adjoint_double_layer(neumann_space, neumann_space, neumann_space, assembler=operator_assembler)\n dP0 = modified_helmholtz.adjoint_double_layer(neumann_space, neumann_space, neumann_space, kappa, assembler=operator_assembler)\n C = dF0 - 1.0/ep*dP0\n\n A_sys = bempp.api.BlockedOperator(2, 2)\n A_sys[0, 0] = (0.5*(1.0 + (1.0/ep))*phi_id) + B\n A_sys[0, 1] = -A\n A_sys[1, 0] = D\n A_sys[1, 1] = (0.5*(1.0 + (1.0/ep))*dph_id) - C\n\n return A_sys", "def dpdnc(x, y):\n ed = ED.EdData(x, y)\n dpdnc_ed(x, y, ed)\n ed.complete_path()\n ed.complete_reverse()\n ed.complete_indexes()\n return ed", "def nodalGrad(self):\n if getattr(self, '_nodalGrad', None) is None:\n # The number of cell centers in each direction\n n = self.vnC\n # Compute divergence operator on faces\n if(self.dim == 1):\n G = ddx(n[0])\n elif(self.dim == 2):\n D1 = sp.kron(speye(n[1]+1), ddx(n[0]))\n D2 = sp.kron(ddx(n[1]), speye(n[0]+1))\n G = sp.vstack((D1, D2), format=\"csr\")\n elif(self.dim == 3):\n D1 = kron3(speye(n[2]+1), speye(n[1]+1), ddx(n[0]))\n D2 = kron3(speye(n[2]+1), ddx(n[1]), speye(n[0]+1))\n D3 = kron3(ddx(n[2]), speye(n[1]+1), speye(n[0]+1))\n G = sp.vstack((D1, D2, D3), format=\"csr\")\n # Compute lengths of cell edges\n L = self.edge\n self._nodalGrad = sdiag(1/L)*G\n return self._nodalGrad", "def diffnc(X, dt):\n [traj_length, D] = X.shape\n XX = np.zeros((traj_length + 2, D))\n for d in range(D):\n XX[:, d] = np.convolve(X[:, d], np.array([1, 0, -1]) / 2.0 / dt)\n\n X = XX[1:traj_length + 1, :]\n X[0, :] = X[1, :]\n X[traj_length - 1, :] = X[traj_length - 2, :]\n\n return X", "def gather_derivatives(self):\n self.xdot[0,0:self.n] = self.mdot[0:self.n] \n self.xdot[1,0:self.n] = self.rdot[0:self.n,0]\n self.xdot[2,0:self.n] = self.rdot[0:self.n,1]\n self.xdot[3,0:self.n] = self.rdot[0:self.n,2]\n self.xdot[4,0:self.n] = self.vdot[0:self.n,0]\n self.xdot[5,0:self.n] = self.vdot[0:self.n,1]\n self.xdot[6,0:self.n] = self.vdot[0:self.n,2]\n self.xdot[7,0:self.n] = self.rhodot[0:self.n] \n self.xdot[8,0:self.n] = 0\n self.xdot[9,0:self.n] = 0\n self.xdot[10,0:self.n] = self.udot[0:self.n]\n return self.xdot", "def gather_derivatives(self):\n self.xdot[0,0:self.n] = self.mdot[0:self.n] \n self.xdot[1,0:self.n] = self.rdot[0:self.n,0]\n self.xdot[2,0:self.n] = self.rdot[0:self.n,1]\n self.xdot[3,0:self.n] = self.rdot[0:self.n,2]\n self.xdot[4,0:self.n] = self.vdot[0:self.n,0]\n self.xdot[5,0:self.n] = self.vdot[0:self.n,1]\n self.xdot[6,0:self.n] = self.vdot[0:self.n,2]\n return self.xdot", "def calcDist(self):\n rhoOp = self.rhoOp\n s = np.array([[1,0,0],[0,-1,0],[0,0,1]])\n sAdj = s.conj().T \n symRhoOp = np.dot(s,np.dot(rhoOp,sAdj))\n self.dist = Node.S1(rhoOp, symRhoOp)", "def dndt2(jx, iy, h, n, u, v, dx, dy) :\n p5 = np.float32(0.5)\n depth_jm0im0 = h[jx, iy ]+n[jx, iy]\n depth_jp1im0 = h[jx+1,iy] +n[jx+1,iy]\n depth_jm1im0 = h[jx-1,iy] +n[jx-1,iy]\n depth_jm0ip1 = h[jx, iy+1]+n[jx, iy+1]\n depth_jm0im1 = h[jx, iy-1]+n[jx, iy-1]\n \n hx_jp1 = u[jx+1,iy]*(depth_jm0im0 + depth_jp1im0)*p5\n hx_jm0 = u[jx, iy]*(depth_jm1im0 + depth_jm0im0)*p5\n \n \n hy_ip1 = v[jx,iy+1]*(depth_jm0im0 + depth_jm0ip1)*p5\n hy_im0 = v[jx,iy ]*(depth_jm0im1 + depth_jm0im0)*p5\n \n # assume u and v are zero on edge\n dhx = (hx_jp1-hx_jm0)/dx#[jx,iy]\n dhy = (hy_ip1-hy_im0)/dy#[jx,iy]\n\n \n return ( -dhx-dhy )", "def dE_mdn(self, x, y, t, w1 = None, w2 = None):\n if w2 == None:\n w2 = self.w2\n M = int(self.M)\n # avoid underrun\n \n alpha, sigma, mu = self.getMixtureParams(y.T)\n #import pdb; pdb.set_trace()\n \n #T = t.T[None, None, :] # note: np.tile is slower than this notation\n T = t.T[None, :]\n \n phi = self._phi(T, mu, sigma)\n aphi = alpha*phi\n pi = aphi / np.sum(aphi, 0)\n \n # derivatives of E with respect to the output variables (s. Bishop 1995, chp. 6.4)\n dE_dy_alpha = alpha - pi\n dE_dy_sigma = - 0.5 * pi * ((np.sum((T-mu)**2 , 1) / sigma) - self.c)\n dE_dy_mu = pi[:,np.newaxis,:] * (mu - T) / sigma[:,np.newaxis,:]\n\n dk = np.zeros([self.ny, x.shape[0]])\n dk[0:M,:] = dE_dy_alpha\n dk[M:2*M,:] = dE_dy_sigma\n \n dk[2*M:] = np.reshape(dE_dy_mu, [M*self.c, x.shape[0]])\n \n # back-propagate the dks\n #t0=datetime.now()\n dEnw1, dEnw2 = self.backward(x, dk, None, w2)\n #print 'eval of dE_mdn:' + str((datetime.now()-t0))\n #dj = (1 - self.z[1:]**2) * np.dot(w2[:,1:].T, dk)\n # evaluate derivatives with respect to the weights\n #dEnw1 = (dj[:,:,np.newaxis]*x[np.newaxis,:,:]).transpose(1,0,2)\n #dEnw2 = (dk[:,:,np.newaxis]*self.z.T[np.newaxis,:,:]).transpose(1,0,2)\n return dEnw1, dEnw2", "def reflect(d,n):\n\t# coefficent c, because easier\n\tc = 2 * dot(d,n)\n\treturn [di - c * ni for (di, ni) in zip(d,n)]", "def n1derivative_clee(cl_array,bins,n1bins,clpp,norms,cls,cltt,clee,clbb,clte,NOISE_LEVEL,polnoise,lmin,LMAXOUT,LMAX_TT,Lstep,Lmin_out):\n \n bins=bins-2\n array1001=perturbe_clist(cl_array,bins,1.001)\n array999=perturbe_clist(cl_array,bins,0.999)\n \n N1001=[[],[],[],[],[]] #list of lists containing tt,ee,eb,te,tb\n N0999=[[],[],[],[],[]]\n \n for i in range(len(array1001)):\n print(i)\n \n a=compute_n1_py(clpp,norms,cls,cltt,array1001[i],clbb,clte,NOISE_LEVEL,polnoise,lmin,LMAXOUT,LMAX_TT,Lstep,Lmin_out)\n b=compute_n1_py(clpp,norms,cls,cltt,array999[i],clbb,clte,NOISE_LEVEL,polnoise,lmin,LMAXOUT,LMAX_TT,Lstep,Lmin_out)\n for j in range(len(N1001)):\n N1001[j].append(a[j])\n N0999[j].append(b[j])\n\n delta=diff_cl(cl_array,bins)\n\n \n keys=['TT','EE','EB','TE','TB']\n \n derlist=[]\n for k in range(len(keys)):\n diff=[n1bins]\n for i in range(len(N1001[1])):\n der=((N1001[k][i][:len(n1bins)]-N0999[k][i][:len(n1bins)])*(n1bins*(n1bins+1))**2*0.25)/delta[i]\n diff.append(der)\n der=np.insert(np.transpose(diff),0,np.insert(bins,0,0),axis=0)\n derlist.append(der)\n np.savetxt('../data/n1{}dclee.txt'.format(keys[k]),der)\n return derlist", "def derivative(self, *args):\n if self.i_dim == 0:\n return np.ones_like(*args[0])\n else:\n return np.zeros_like(*args[0])", "def gradient(f, *varargs):\n N = len(f.shape) # number of dimensions\n n = len(varargs)\n if n == 0:\n dx = [1.0]*N\n elif n == 1:\n dx = [varargs[0]]*N\n elif n == N:\n dx = list(varargs)\n else:\n raise SyntaxError, \"invalid number of arguments\"\n\n # use central differences on interior and first differences on endpoints\n\n outvals = []\n\n # create slice objects --- initially all are [:, :, ..., :]\n slice1 = [slice(None)]*N\n slice2 = [slice(None)]*N\n slice3 = [slice(None)]*N\n\n otype = f.dtype.char\n if otype not in ['f', 'd', 'F', 'D']:\n otype = 'd'\n\n for axis in range(N):\n # select out appropriate parts for this dimension\n out = zeros(f.shape, f.dtype.char)\n slice1[axis] = slice(1, -1)\n slice2[axis] = slice(2, None)\n slice3[axis] = slice(None, -2)\n # 1D equivalent -- out[1:-1] = (f[2:] - f[:-2])/2.0\n out[slice1] = (f[slice2] - f[slice3])/2.0\n slice1[axis] = 0\n slice2[axis] = 1\n slice3[axis] = 0\n # 1D equivalent -- out[0] = (f[1] - f[0])\n out[slice1] = (f[slice2] - f[slice3])\n slice1[axis] = -1\n slice2[axis] = -1\n slice3[axis] = -2\n # 1D equivalent -- out[-1] = (f[-1] - f[-2])\n out[slice1] = (f[slice2] - f[slice3])\n\n # divide by step size\n outvals.append(out / dx[axis])\n\n # reset the slice object in this dimension to \":\"\n slice1[axis] = slice(None)\n slice2[axis] = slice(None)\n slice3[axis] = slice(None)\n\n if N == 1:\n return outvals[0]\n else:\n return outvals", "def compute_ghost_series(self,terms):\n\t\tp=self.p\n\t\tcomp=self.comp\n\t\tf1=self.f1\n\t\tf2=self.f2\n\n\t\tghost_coefs = [[] for i in range(terms+1)]\n\n\n\t\tk=comp;\n\t\tif k==0 or k==1:\n\t\t\tk=k+p-1\n\n\t\t## Precompute the needed dimensions\n\t\tself.f1v = [f1(k)]\n\t\tself.f2v = [f2(k)]\n\t\tk = k + p-1\n\t\twhile self.f1v[len(self.f1v)-1] <= terms+1:\n\t\t\tself.f1v += [f1(k)]\n\t\t\tself.f2v += [f2(k)]\n\t\t\tk = k + p-1\n\t\t\n\t\t## Starting at weight 2, we run through weights in the component,\n\t\t## compute the associated indices, and then record the weights at those\n\t\t## indices with appropriate multiplicities\n\n\t\tk = comp;\n\t\tif k==0 or k==1:\n\t\t\tk=k+p-1\n\t\tn = 0\n\n\t\tinds = range(self.f1v[n]+1,self.f1v[n]+self.f2v[n])\n\t\twhile (len(inds)==0 or inds[0]<=terms+1):\n\t\t\t## This loops adds the weights to the appropriate indices with the appropriate multiplicities\n\t\t\tfor m in range(floor((len(inds)+1)/2)):\n\t\t\t\tif m < floor(len(inds)/2):\n\t\t\t\t\tif inds[m]<=terms:\n\t\t\t\t\t\tghost_coefs[inds[m]] += [(k,m+1)]\n\t\t\t\t\tif (inds[len(inds)-1-m]<=terms):\n\t\t\t\t\t\tghost_coefs[inds[len(inds)-1-m]] += [(k,m+1)]\n\t\t\t\telse:\n\t\t\t\t\tif inds[m]<=terms:\n\t\t\t\t\t\tghost_coefs[inds[m]] += [(k,m+1)]\n\t\t\tk = k + p-1\n\t\t\tn = n + 1\n\t\t\tinds = range(self.f1v[n]+1,self.f1v[n]+self.f2v[n])\n\t\tself.series=ghost_coefs", "def diff(x, n=1, axis=-1):\r\n return DiffOp(n=n, axis=axis)(x)", "def _create_neighbor_distances(self):\n # --------------------------------\n # Create Directions from Point\n # --------------------------------\n diff = [[0 for _ in range(self._dim)]]\n curr = diff[0][:]\n for i in range(self._dim):\n # Each diff is a unit vector, only having one value at +1 or -1 and all others at 0.\n curr[i] = 1\n diff.append(curr[:])\n curr[i] = -1\n diff.append(curr[:])\n curr[i] = 0\n # Remove initial blank unit vector with all values at 0.\n diff.pop(0)\n del curr\n\n # --------------------------------\n # Breadth First Search\n # --------------------------------\n distances = []\n queue = [[0 for _ in range(self._dim)]]\n\n while queue:\n # Get latest distance\n curr = queue.pop()\n\n # The distance from any possible point should be less than or equal to the number of dimensions.\n # This can be shown using basic calculations.\n if self._metric(np.array(curr), np.zeros(shape=(len(curr),))) >= 2 * np.sqrt(self._dim) or \\\n np.any(np.abs(np.array(curr)) > self._extent / 2) or curr in distances:\n continue\n\n # Calculate all distances from child and add to queue\n queue.extend([list(np.array(curr) + np.array(diff[i])) for i in range(len(diff))])\n\n # Add current distance to distances\n distances.append(curr)\n\n # Return all possible neighbor distances\n return np.array(distances, dtype=int)", "def disconnected_graph(n):\n g = nx.DiGraph()\n for i in range(0, n):\n g.add_node(i)\n return g", "def create_derivative_graph(f, xrange, n):\n plot_points = []\n for x in xrange:\n plot_points.append(nth_derivative(f, x, n))\n return plot_points", "def centderiv(A, dim='x', boundary='periodic'):\n return centdiff(A, dim=dim, boundary=boundary)/centspacing(A[dim])", "def n1derivative_clte(cl_array,bins,n1bins,clpp,norms,cls,cltt,clee,clbb,clte,NOISE_LEVEL,polnoise,lmin,LMAXOUT,LMAX_TT,Lstep,Lmin_out):\n bins=bins-2\n array1001=perturbe_clist(cl_array,bins,1.001)\n array999=perturbe_clist(cl_array,bins,0.999)\n \n N1001=[[],[],[],[],[]] #list of lists containing tt,ee,eb,te,tb\n N0999=[[],[],[],[],[]]\n \n for i in range(len(array1001)):\n print(i)\n a=compute_n1_py(clpp,norms,cls,cltt,clee,clbb,array1001[i],NOISE_LEVEL,polnoise,lmin,LMAXOUT,LMAX_TT,Lstep,Lmin_out)\n b=compute_n1_py(clpp,norms,cls,cltt,clee,clbb,array999[i],NOISE_LEVEL,polnoise,lmin,LMAXOUT,LMAX_TT,Lstep,Lmin_out)\n for j in range(len(N1001)):\n N1001[j].append(a[j])\n N0999[j].append(b[j])\n\n delta=diff_cl(cl_array,bins)\n \n keys=['TT','EE','EB','TE','TB']\n\n derlist=[]\n for k in range(len(keys)):\n diff=[n1bins]\n for i in range(len(N1001[1])):\n der=((N1001[k][i][:len(n1bins)]-N0999[k][i][:len(n1bins)])*(n1bins*(n1bins+1))**2*0.25)/delta[i]\n diff.append(der)\n der=np.insert(np.transpose(diff),0,np.insert(bins,0,0),axis=0)\n derlist.append(der)\n np.savetxt('../data/n1{}dclte.txt'.format(keys[k]),der)\n return derlist", "def discretizespace(self):\n \n # Grid\n self.xgriddim = ( self.x0_n , self.x1_n )\n \n self.xd = [ None , None ]\n self.xd[0] = np.linspace( self.DS.x_lb[0] , self.DS.x_ub[0] , self.x0_n )\n self.xd[1] = np.linspace( self.DS.x_lb[1] , self.DS.x_ub[1] , self.x1_n )\n \n self.x_grid2node = np.zeros( ( self.x0_n , self.x1_n ) , dtype = int ) # grid of corresponding index\n \n # 1-D List of nodes\n self.nodes_n = self.x0_n * self.x1_n\n self.nodes_state = np.zeros(( self.nodes_n , self.DS.n ), dtype = float ) # Number of nodes x state dimensions\n self.nodes_index = np.zeros(( self.nodes_n , self.DS.n ), dtype = int ) # Number of nodes x state dimensions", "def ddx(n, dx, f):\n fx = np.zeros(n)\n for j in range(n):\n fx[j] = (f[get_index(j+1, n)]-f[get_index(j-1, n)])/(2*dx)\n return fx", "def cell_edges(self):", "def evaluate_shape_derivatives(self, locations, elements=None):\n # points = np.zeros((5, 4, self.n_cells, 3))\n # points[:, :, even_mask, :] = nodes[:, even_mask, :][self.tetra_mask_even, :, :]\n # points[:, :, ~even_mask, :] = nodes[:, ~even_mask, :][self.tetra_mask, :, :]\n\n # # changing order to points, tetra, nodes, coord\n # points = points.swapaxes(0, 2)\n # points = points.swapaxes(1, 2)\n if elements is None:\n elements = np.arange(0, self.n_elements, dtype=int)\n ps = self.nodes[\n self.elements, :\n ] # points.reshape(points.shape[0] * points.shape[1], points.shape[2], points.shape[3])\n # vertices = self.nodes[self.elements[col,:]]\n m = np.array(\n [\n [\n (ps[:, 1, 0] - ps[:, 0, 0]),\n (ps[:, 1, 1] - ps[:, 0, 1]),\n (ps[:, 1, 2] - ps[:, 0, 2]),\n ],\n [\n (ps[:, 2, 0] - ps[:, 0, 0]),\n (ps[:, 2, 1] - ps[:, 0, 1]),\n (ps[:, 2, 2] - ps[:, 0, 2]),\n ],\n [\n (ps[:, 3, 0] - ps[:, 0, 0]),\n (ps[:, 3, 1] - ps[:, 0, 1]),\n (ps[:, 3, 2] - ps[:, 0, 2]),\n ],\n ]\n )\n I = np.array(\n [[-1.0, 1.0, 0.0, 0.0], [-1.0, 0.0, 1.0, 0.0], [-1.0, 0.0, 0.0, 1.0]]\n )\n m = np.swapaxes(m, 0, 2)\n element_gradients = np.linalg.inv(m)\n\n element_gradients = element_gradients.swapaxes(1, 2)\n element_gradients = element_gradients @ I\n\n return element_gradients[elements, :, :], elements", "def nodalLaplacian(self):\n if getattr(self, '_nodalLaplacian', None) is None:\n print('Warning: Laplacian has not been tested rigorously.')\n # The number of cell centers in each direction\n n = self.vnC\n # Compute divergence operator on faces\n if(self.dim == 1):\n D1 = sdiag(1./self.hx) * ddx(self.nCx)\n L = - D1.T*D1\n elif(self.dim == 2):\n D1 = sdiag(1./self.hx) * ddx(n[0])\n D2 = sdiag(1./self.hy) * ddx(n[1])\n L1 = sp.kron(speye(n[1]+1), - D1.T * D1)\n L2 = sp.kron(- D2.T * D2, speye(n[0]+1))\n L = L1 + L2\n elif(self.dim == 3):\n D1 = sdiag(1./self.hx) * ddx(n[0])\n D2 = sdiag(1./self.hy) * ddx(n[1])\n D3 = sdiag(1./self.hz) * ddx(n[2])\n L1 = kron3(speye(n[2]+1), speye(n[1]+1), - D1.T * D1)\n L2 = kron3(speye(n[2]+1), - D2.T * D2, speye(n[0]+1))\n L3 = kron3(- D3.T * D3, speye(n[1]+1), speye(n[0]+1))\n L = L1 + L2 + L3\n self._nodalLaplacian = L\n return self._nodalLaplacian", "def get_cells(self):\r\n return \\\r\n (self.nx-1 if self.nx>1 else 1)* \\\r\n (self.ny-1 if self.ny>1 else 1)* \\\r\n (self.nz-1 if self.nz>1 else 1)" ]
[ "0.60936165", "0.6051758", "0.6035031", "0.5841803", "0.57565576", "0.5718515", "0.56391853", "0.5623003", "0.5616664", "0.5558391", "0.5462912", "0.54524446", "0.5431657", "0.5402466", "0.53735256", "0.5369108", "0.53685844", "0.53657275", "0.5364053", "0.5359304", "0.53534204", "0.534498", "0.5342869", "0.5334845", "0.53318524", "0.53316146", "0.5331596", "0.5320838", "0.5312872", "0.5312851" ]
0.62730515
0
Create 1D derivative operator from cellcenters to nodes this means we go from n to n+1
def ddxCellGradBC(n, bc): bc = checkBC(bc) ij = (np.array([0, n]), np.array([0, 1])) vals = np.zeros(2) # Set the first side if(bc[0] == 'dirichlet'): vals[0] = -2 elif(bc[0] == 'neumann'): vals[0] = 0 # Set the second side if(bc[1] == 'dirichlet'): vals[1] = 2 elif(bc[1] == 'neumann'): vals[1] = 0 D = sp.csr_matrix((vals, ij), shape=(n+1, 2)) return D
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ddxCellGrad(n, bc):\n bc = checkBC(bc)\n\n D = sp.spdiags((np.ones((n+1, 1))*[-1, 1]).T, [-1, 0], n+1, n,\n format=\"csr\")\n # Set the first side\n if(bc[0] == 'dirichlet'):\n D[0, 0] = 2\n elif(bc[0] == 'neumann'):\n D[0, 0] = 0\n # Set the second side\n if(bc[1] == 'dirichlet'):\n D[-1, -1] = -2\n elif(bc[1] == 'neumann'):\n D[-1, -1] = 0\n return D", "def DDG(self, n, e, r, f):\n pre = (-e[:, None] + np.divide.outer((n - 1), r))**2\n pre -= np.divide.outer((n - 1), r**2)\n return pre*f", "def _compute_nodes_1d(npts, ilbds1d): # pylint: disable=line-too-long\n if npts % 2 == 0:\n raise ValueError(\"Please enter odd npts\")\n ind = np.arange(1, npts + 1)\n nodes = 0.5 * (1 - np.cos(np.pi * ind / (npts + 1)))\n return nodes * (ilbds1d[1] - ilbds1d[0]) + ilbds1d[0]", "def createGridNodesinFRF(x0, y0, dx, dy, ni, nj):\n assert dx.shape[0] == ni-1, 'This function assumes that there are n-1 dx values'\n\n if np.mean(np.diff(dx)) != np.mean(dx): # vairable spacing cell array\n icoord = np.zeros(ni) # assume\n jcoord = np.zeros(nj)\n icoord[0] = x0\n jcoord[0] = y0\n for xx, dxx in enumerate(dx):\n icoord[xx+1] = icoord[xx] - dxx # assumes offshore origin\n for yy, dyy in enumerate(dy):\n jcoord[yy+1] = jcoord[yy] - dyy\n else:\n raise NotImplementedError\n\n return icoord, jcoord", "def derivative_ex(dirichl_space, neumann_space, ep_in, ep_ex, kappa, operator_assembler):\n phi_id = sparse.identity(dirichl_space, dirichl_space, dirichl_space)\n dph_id = sparse.identity(neumann_space, neumann_space, neumann_space)\n ep = ep_ex/ep_in\n\n dF = laplace.double_layer(dirichl_space, dirichl_space, dirichl_space, assembler=operator_assembler)\n dP = modified_helmholtz.double_layer(dirichl_space, dirichl_space, dirichl_space, kappa, assembler=operator_assembler)\n B = 1/ep * dF - dP\n\n F = laplace.single_layer(neumann_space, dirichl_space, dirichl_space, assembler=operator_assembler)\n P = modified_helmholtz.single_layer(neumann_space, dirichl_space, dirichl_space, kappa, assembler=operator_assembler)\n A = F - P\n\n ddF = laplace.hypersingular(dirichl_space, neumann_space, neumann_space, assembler=operator_assembler)\n ddP = modified_helmholtz.hypersingular(dirichl_space, neumann_space, neumann_space, kappa, assembler=operator_assembler)\n D = 1/ep * (ddP - ddF)\n\n dF0 = laplace.adjoint_double_layer(neumann_space, neumann_space, neumann_space, assembler=operator_assembler)\n dP0 = modified_helmholtz.adjoint_double_layer(neumann_space, neumann_space, neumann_space, kappa, assembler=operator_assembler)\n C = dF0 - 1.0/ep*dP0\n\n A_sys = bempp.api.BlockedOperator(2, 2)\n A_sys[0, 0] = (0.5*(1.0 + (1.0/ep))*phi_id) + B\n A_sys[0, 1] = -A\n A_sys[1, 0] = D\n A_sys[1, 1] = (0.5*(1.0 + (1.0/ep))*dph_id) - C\n\n return A_sys", "def dpdnc(x, y):\n ed = ED.EdData(x, y)\n dpdnc_ed(x, y, ed)\n ed.complete_path()\n ed.complete_reverse()\n ed.complete_indexes()\n return ed", "def nodalGrad(self):\n if getattr(self, '_nodalGrad', None) is None:\n # The number of cell centers in each direction\n n = self.vnC\n # Compute divergence operator on faces\n if(self.dim == 1):\n G = ddx(n[0])\n elif(self.dim == 2):\n D1 = sp.kron(speye(n[1]+1), ddx(n[0]))\n D2 = sp.kron(ddx(n[1]), speye(n[0]+1))\n G = sp.vstack((D1, D2), format=\"csr\")\n elif(self.dim == 3):\n D1 = kron3(speye(n[2]+1), speye(n[1]+1), ddx(n[0]))\n D2 = kron3(speye(n[2]+1), ddx(n[1]), speye(n[0]+1))\n D3 = kron3(ddx(n[2]), speye(n[1]+1), speye(n[0]+1))\n G = sp.vstack((D1, D2, D3), format=\"csr\")\n # Compute lengths of cell edges\n L = self.edge\n self._nodalGrad = sdiag(1/L)*G\n return self._nodalGrad", "def diffnc(X, dt):\n [traj_length, D] = X.shape\n XX = np.zeros((traj_length + 2, D))\n for d in range(D):\n XX[:, d] = np.convolve(X[:, d], np.array([1, 0, -1]) / 2.0 / dt)\n\n X = XX[1:traj_length + 1, :]\n X[0, :] = X[1, :]\n X[traj_length - 1, :] = X[traj_length - 2, :]\n\n return X", "def gather_derivatives(self):\n self.xdot[0,0:self.n] = self.mdot[0:self.n] \n self.xdot[1,0:self.n] = self.rdot[0:self.n,0]\n self.xdot[2,0:self.n] = self.rdot[0:self.n,1]\n self.xdot[3,0:self.n] = self.rdot[0:self.n,2]\n self.xdot[4,0:self.n] = self.vdot[0:self.n,0]\n self.xdot[5,0:self.n] = self.vdot[0:self.n,1]\n self.xdot[6,0:self.n] = self.vdot[0:self.n,2]\n self.xdot[7,0:self.n] = self.rhodot[0:self.n] \n self.xdot[8,0:self.n] = 0\n self.xdot[9,0:self.n] = 0\n self.xdot[10,0:self.n] = self.udot[0:self.n]\n return self.xdot", "def gather_derivatives(self):\n self.xdot[0,0:self.n] = self.mdot[0:self.n] \n self.xdot[1,0:self.n] = self.rdot[0:self.n,0]\n self.xdot[2,0:self.n] = self.rdot[0:self.n,1]\n self.xdot[3,0:self.n] = self.rdot[0:self.n,2]\n self.xdot[4,0:self.n] = self.vdot[0:self.n,0]\n self.xdot[5,0:self.n] = self.vdot[0:self.n,1]\n self.xdot[6,0:self.n] = self.vdot[0:self.n,2]\n return self.xdot", "def calcDist(self):\n rhoOp = self.rhoOp\n s = np.array([[1,0,0],[0,-1,0],[0,0,1]])\n sAdj = s.conj().T \n symRhoOp = np.dot(s,np.dot(rhoOp,sAdj))\n self.dist = Node.S1(rhoOp, symRhoOp)", "def dndt2(jx, iy, h, n, u, v, dx, dy) :\n p5 = np.float32(0.5)\n depth_jm0im0 = h[jx, iy ]+n[jx, iy]\n depth_jp1im0 = h[jx+1,iy] +n[jx+1,iy]\n depth_jm1im0 = h[jx-1,iy] +n[jx-1,iy]\n depth_jm0ip1 = h[jx, iy+1]+n[jx, iy+1]\n depth_jm0im1 = h[jx, iy-1]+n[jx, iy-1]\n \n hx_jp1 = u[jx+1,iy]*(depth_jm0im0 + depth_jp1im0)*p5\n hx_jm0 = u[jx, iy]*(depth_jm1im0 + depth_jm0im0)*p5\n \n \n hy_ip1 = v[jx,iy+1]*(depth_jm0im0 + depth_jm0ip1)*p5\n hy_im0 = v[jx,iy ]*(depth_jm0im1 + depth_jm0im0)*p5\n \n # assume u and v are zero on edge\n dhx = (hx_jp1-hx_jm0)/dx#[jx,iy]\n dhy = (hy_ip1-hy_im0)/dy#[jx,iy]\n\n \n return ( -dhx-dhy )", "def dE_mdn(self, x, y, t, w1 = None, w2 = None):\n if w2 == None:\n w2 = self.w2\n M = int(self.M)\n # avoid underrun\n \n alpha, sigma, mu = self.getMixtureParams(y.T)\n #import pdb; pdb.set_trace()\n \n #T = t.T[None, None, :] # note: np.tile is slower than this notation\n T = t.T[None, :]\n \n phi = self._phi(T, mu, sigma)\n aphi = alpha*phi\n pi = aphi / np.sum(aphi, 0)\n \n # derivatives of E with respect to the output variables (s. Bishop 1995, chp. 6.4)\n dE_dy_alpha = alpha - pi\n dE_dy_sigma = - 0.5 * pi * ((np.sum((T-mu)**2 , 1) / sigma) - self.c)\n dE_dy_mu = pi[:,np.newaxis,:] * (mu - T) / sigma[:,np.newaxis,:]\n\n dk = np.zeros([self.ny, x.shape[0]])\n dk[0:M,:] = dE_dy_alpha\n dk[M:2*M,:] = dE_dy_sigma\n \n dk[2*M:] = np.reshape(dE_dy_mu, [M*self.c, x.shape[0]])\n \n # back-propagate the dks\n #t0=datetime.now()\n dEnw1, dEnw2 = self.backward(x, dk, None, w2)\n #print 'eval of dE_mdn:' + str((datetime.now()-t0))\n #dj = (1 - self.z[1:]**2) * np.dot(w2[:,1:].T, dk)\n # evaluate derivatives with respect to the weights\n #dEnw1 = (dj[:,:,np.newaxis]*x[np.newaxis,:,:]).transpose(1,0,2)\n #dEnw2 = (dk[:,:,np.newaxis]*self.z.T[np.newaxis,:,:]).transpose(1,0,2)\n return dEnw1, dEnw2", "def reflect(d,n):\n\t# coefficent c, because easier\n\tc = 2 * dot(d,n)\n\treturn [di - c * ni for (di, ni) in zip(d,n)]", "def n1derivative_clee(cl_array,bins,n1bins,clpp,norms,cls,cltt,clee,clbb,clte,NOISE_LEVEL,polnoise,lmin,LMAXOUT,LMAX_TT,Lstep,Lmin_out):\n \n bins=bins-2\n array1001=perturbe_clist(cl_array,bins,1.001)\n array999=perturbe_clist(cl_array,bins,0.999)\n \n N1001=[[],[],[],[],[]] #list of lists containing tt,ee,eb,te,tb\n N0999=[[],[],[],[],[]]\n \n for i in range(len(array1001)):\n print(i)\n \n a=compute_n1_py(clpp,norms,cls,cltt,array1001[i],clbb,clte,NOISE_LEVEL,polnoise,lmin,LMAXOUT,LMAX_TT,Lstep,Lmin_out)\n b=compute_n1_py(clpp,norms,cls,cltt,array999[i],clbb,clte,NOISE_LEVEL,polnoise,lmin,LMAXOUT,LMAX_TT,Lstep,Lmin_out)\n for j in range(len(N1001)):\n N1001[j].append(a[j])\n N0999[j].append(b[j])\n\n delta=diff_cl(cl_array,bins)\n\n \n keys=['TT','EE','EB','TE','TB']\n \n derlist=[]\n for k in range(len(keys)):\n diff=[n1bins]\n for i in range(len(N1001[1])):\n der=((N1001[k][i][:len(n1bins)]-N0999[k][i][:len(n1bins)])*(n1bins*(n1bins+1))**2*0.25)/delta[i]\n diff.append(der)\n der=np.insert(np.transpose(diff),0,np.insert(bins,0,0),axis=0)\n derlist.append(der)\n np.savetxt('../data/n1{}dclee.txt'.format(keys[k]),der)\n return derlist", "def gradient(f, *varargs):\n N = len(f.shape) # number of dimensions\n n = len(varargs)\n if n == 0:\n dx = [1.0]*N\n elif n == 1:\n dx = [varargs[0]]*N\n elif n == N:\n dx = list(varargs)\n else:\n raise SyntaxError, \"invalid number of arguments\"\n\n # use central differences on interior and first differences on endpoints\n\n outvals = []\n\n # create slice objects --- initially all are [:, :, ..., :]\n slice1 = [slice(None)]*N\n slice2 = [slice(None)]*N\n slice3 = [slice(None)]*N\n\n otype = f.dtype.char\n if otype not in ['f', 'd', 'F', 'D']:\n otype = 'd'\n\n for axis in range(N):\n # select out appropriate parts for this dimension\n out = zeros(f.shape, f.dtype.char)\n slice1[axis] = slice(1, -1)\n slice2[axis] = slice(2, None)\n slice3[axis] = slice(None, -2)\n # 1D equivalent -- out[1:-1] = (f[2:] - f[:-2])/2.0\n out[slice1] = (f[slice2] - f[slice3])/2.0\n slice1[axis] = 0\n slice2[axis] = 1\n slice3[axis] = 0\n # 1D equivalent -- out[0] = (f[1] - f[0])\n out[slice1] = (f[slice2] - f[slice3])\n slice1[axis] = -1\n slice2[axis] = -1\n slice3[axis] = -2\n # 1D equivalent -- out[-1] = (f[-1] - f[-2])\n out[slice1] = (f[slice2] - f[slice3])\n\n # divide by step size\n outvals.append(out / dx[axis])\n\n # reset the slice object in this dimension to \":\"\n slice1[axis] = slice(None)\n slice2[axis] = slice(None)\n slice3[axis] = slice(None)\n\n if N == 1:\n return outvals[0]\n else:\n return outvals", "def derivative(self, *args):\n if self.i_dim == 0:\n return np.ones_like(*args[0])\n else:\n return np.zeros_like(*args[0])", "def compute_ghost_series(self,terms):\n\t\tp=self.p\n\t\tcomp=self.comp\n\t\tf1=self.f1\n\t\tf2=self.f2\n\n\t\tghost_coefs = [[] for i in range(terms+1)]\n\n\n\t\tk=comp;\n\t\tif k==0 or k==1:\n\t\t\tk=k+p-1\n\n\t\t## Precompute the needed dimensions\n\t\tself.f1v = [f1(k)]\n\t\tself.f2v = [f2(k)]\n\t\tk = k + p-1\n\t\twhile self.f1v[len(self.f1v)-1] <= terms+1:\n\t\t\tself.f1v += [f1(k)]\n\t\t\tself.f2v += [f2(k)]\n\t\t\tk = k + p-1\n\t\t\n\t\t## Starting at weight 2, we run through weights in the component,\n\t\t## compute the associated indices, and then record the weights at those\n\t\t## indices with appropriate multiplicities\n\n\t\tk = comp;\n\t\tif k==0 or k==1:\n\t\t\tk=k+p-1\n\t\tn = 0\n\n\t\tinds = range(self.f1v[n]+1,self.f1v[n]+self.f2v[n])\n\t\twhile (len(inds)==0 or inds[0]<=terms+1):\n\t\t\t## This loops adds the weights to the appropriate indices with the appropriate multiplicities\n\t\t\tfor m in range(floor((len(inds)+1)/2)):\n\t\t\t\tif m < floor(len(inds)/2):\n\t\t\t\t\tif inds[m]<=terms:\n\t\t\t\t\t\tghost_coefs[inds[m]] += [(k,m+1)]\n\t\t\t\t\tif (inds[len(inds)-1-m]<=terms):\n\t\t\t\t\t\tghost_coefs[inds[len(inds)-1-m]] += [(k,m+1)]\n\t\t\t\telse:\n\t\t\t\t\tif inds[m]<=terms:\n\t\t\t\t\t\tghost_coefs[inds[m]] += [(k,m+1)]\n\t\t\tk = k + p-1\n\t\t\tn = n + 1\n\t\t\tinds = range(self.f1v[n]+1,self.f1v[n]+self.f2v[n])\n\t\tself.series=ghost_coefs", "def diff(x, n=1, axis=-1):\r\n return DiffOp(n=n, axis=axis)(x)", "def _create_neighbor_distances(self):\n # --------------------------------\n # Create Directions from Point\n # --------------------------------\n diff = [[0 for _ in range(self._dim)]]\n curr = diff[0][:]\n for i in range(self._dim):\n # Each diff is a unit vector, only having one value at +1 or -1 and all others at 0.\n curr[i] = 1\n diff.append(curr[:])\n curr[i] = -1\n diff.append(curr[:])\n curr[i] = 0\n # Remove initial blank unit vector with all values at 0.\n diff.pop(0)\n del curr\n\n # --------------------------------\n # Breadth First Search\n # --------------------------------\n distances = []\n queue = [[0 for _ in range(self._dim)]]\n\n while queue:\n # Get latest distance\n curr = queue.pop()\n\n # The distance from any possible point should be less than or equal to the number of dimensions.\n # This can be shown using basic calculations.\n if self._metric(np.array(curr), np.zeros(shape=(len(curr),))) >= 2 * np.sqrt(self._dim) or \\\n np.any(np.abs(np.array(curr)) > self._extent / 2) or curr in distances:\n continue\n\n # Calculate all distances from child and add to queue\n queue.extend([list(np.array(curr) + np.array(diff[i])) for i in range(len(diff))])\n\n # Add current distance to distances\n distances.append(curr)\n\n # Return all possible neighbor distances\n return np.array(distances, dtype=int)", "def disconnected_graph(n):\n g = nx.DiGraph()\n for i in range(0, n):\n g.add_node(i)\n return g", "def create_derivative_graph(f, xrange, n):\n plot_points = []\n for x in xrange:\n plot_points.append(nth_derivative(f, x, n))\n return plot_points", "def centderiv(A, dim='x', boundary='periodic'):\n return centdiff(A, dim=dim, boundary=boundary)/centspacing(A[dim])", "def n1derivative_clte(cl_array,bins,n1bins,clpp,norms,cls,cltt,clee,clbb,clte,NOISE_LEVEL,polnoise,lmin,LMAXOUT,LMAX_TT,Lstep,Lmin_out):\n bins=bins-2\n array1001=perturbe_clist(cl_array,bins,1.001)\n array999=perturbe_clist(cl_array,bins,0.999)\n \n N1001=[[],[],[],[],[]] #list of lists containing tt,ee,eb,te,tb\n N0999=[[],[],[],[],[]]\n \n for i in range(len(array1001)):\n print(i)\n a=compute_n1_py(clpp,norms,cls,cltt,clee,clbb,array1001[i],NOISE_LEVEL,polnoise,lmin,LMAXOUT,LMAX_TT,Lstep,Lmin_out)\n b=compute_n1_py(clpp,norms,cls,cltt,clee,clbb,array999[i],NOISE_LEVEL,polnoise,lmin,LMAXOUT,LMAX_TT,Lstep,Lmin_out)\n for j in range(len(N1001)):\n N1001[j].append(a[j])\n N0999[j].append(b[j])\n\n delta=diff_cl(cl_array,bins)\n \n keys=['TT','EE','EB','TE','TB']\n\n derlist=[]\n for k in range(len(keys)):\n diff=[n1bins]\n for i in range(len(N1001[1])):\n der=((N1001[k][i][:len(n1bins)]-N0999[k][i][:len(n1bins)])*(n1bins*(n1bins+1))**2*0.25)/delta[i]\n diff.append(der)\n der=np.insert(np.transpose(diff),0,np.insert(bins,0,0),axis=0)\n derlist.append(der)\n np.savetxt('../data/n1{}dclte.txt'.format(keys[k]),der)\n return derlist", "def cell_edges(self):", "def discretizespace(self):\n \n # Grid\n self.xgriddim = ( self.x0_n , self.x1_n )\n \n self.xd = [ None , None ]\n self.xd[0] = np.linspace( self.DS.x_lb[0] , self.DS.x_ub[0] , self.x0_n )\n self.xd[1] = np.linspace( self.DS.x_lb[1] , self.DS.x_ub[1] , self.x1_n )\n \n self.x_grid2node = np.zeros( ( self.x0_n , self.x1_n ) , dtype = int ) # grid of corresponding index\n \n # 1-D List of nodes\n self.nodes_n = self.x0_n * self.x1_n\n self.nodes_state = np.zeros(( self.nodes_n , self.DS.n ), dtype = float ) # Number of nodes x state dimensions\n self.nodes_index = np.zeros(( self.nodes_n , self.DS.n ), dtype = int ) # Number of nodes x state dimensions", "def ddx(n, dx, f):\n fx = np.zeros(n)\n for j in range(n):\n fx[j] = (f[get_index(j+1, n)]-f[get_index(j-1, n)])/(2*dx)\n return fx", "def evaluate_shape_derivatives(self, locations, elements=None):\n # points = np.zeros((5, 4, self.n_cells, 3))\n # points[:, :, even_mask, :] = nodes[:, even_mask, :][self.tetra_mask_even, :, :]\n # points[:, :, ~even_mask, :] = nodes[:, ~even_mask, :][self.tetra_mask, :, :]\n\n # # changing order to points, tetra, nodes, coord\n # points = points.swapaxes(0, 2)\n # points = points.swapaxes(1, 2)\n if elements is None:\n elements = np.arange(0, self.n_elements, dtype=int)\n ps = self.nodes[\n self.elements, :\n ] # points.reshape(points.shape[0] * points.shape[1], points.shape[2], points.shape[3])\n # vertices = self.nodes[self.elements[col,:]]\n m = np.array(\n [\n [\n (ps[:, 1, 0] - ps[:, 0, 0]),\n (ps[:, 1, 1] - ps[:, 0, 1]),\n (ps[:, 1, 2] - ps[:, 0, 2]),\n ],\n [\n (ps[:, 2, 0] - ps[:, 0, 0]),\n (ps[:, 2, 1] - ps[:, 0, 1]),\n (ps[:, 2, 2] - ps[:, 0, 2]),\n ],\n [\n (ps[:, 3, 0] - ps[:, 0, 0]),\n (ps[:, 3, 1] - ps[:, 0, 1]),\n (ps[:, 3, 2] - ps[:, 0, 2]),\n ],\n ]\n )\n I = np.array(\n [[-1.0, 1.0, 0.0, 0.0], [-1.0, 0.0, 1.0, 0.0], [-1.0, 0.0, 0.0, 1.0]]\n )\n m = np.swapaxes(m, 0, 2)\n element_gradients = np.linalg.inv(m)\n\n element_gradients = element_gradients.swapaxes(1, 2)\n element_gradients = element_gradients @ I\n\n return element_gradients[elements, :, :], elements", "def get_cells(self):\r\n return \\\r\n (self.nx-1 if self.nx>1 else 1)* \\\r\n (self.ny-1 if self.ny>1 else 1)* \\\r\n (self.nz-1 if self.nz>1 else 1)", "def nodalLaplacian(self):\n if getattr(self, '_nodalLaplacian', None) is None:\n print('Warning: Laplacian has not been tested rigorously.')\n # The number of cell centers in each direction\n n = self.vnC\n # Compute divergence operator on faces\n if(self.dim == 1):\n D1 = sdiag(1./self.hx) * ddx(self.nCx)\n L = - D1.T*D1\n elif(self.dim == 2):\n D1 = sdiag(1./self.hx) * ddx(n[0])\n D2 = sdiag(1./self.hy) * ddx(n[1])\n L1 = sp.kron(speye(n[1]+1), - D1.T * D1)\n L2 = sp.kron(- D2.T * D2, speye(n[0]+1))\n L = L1 + L2\n elif(self.dim == 3):\n D1 = sdiag(1./self.hx) * ddx(n[0])\n D2 = sdiag(1./self.hy) * ddx(n[1])\n D3 = sdiag(1./self.hz) * ddx(n[2])\n L1 = kron3(speye(n[2]+1), speye(n[1]+1), - D1.T * D1)\n L2 = kron3(speye(n[2]+1), - D2.T * D2, speye(n[0]+1))\n L3 = kron3(- D3.T * D3, speye(n[1]+1), speye(n[0]+1))\n L = L1 + L2 + L3\n self._nodalLaplacian = L\n return self._nodalLaplacian" ]
[ "0.62740684", "0.60518783", "0.60337484", "0.5841564", "0.5756277", "0.57178116", "0.56408596", "0.56202203", "0.5615585", "0.5557149", "0.5462184", "0.545099", "0.543022", "0.54015505", "0.53731513", "0.5367494", "0.53671384", "0.53652203", "0.5362306", "0.53591526", "0.5353351", "0.53449553", "0.5341936", "0.5334354", "0.5332984", "0.5332195", "0.5330996", "0.53213227", "0.5313412", "0.53132206" ]
0.60949314
1
Construct divergence operator in the x component (facestg to cellcentres).
def faceDivx(self): if getattr(self, '_faceDivx', None) is None: # The number of cell centers in each direction n = self.vnC # Compute faceDivergence operator on faces if(self.dim == 1): D1 = ddx(n[0]) elif(self.dim == 2): D1 = sp.kron(speye(n[1]), ddx(n[0])) elif(self.dim == 3): D1 = kron3(speye(n[2]), speye(n[1]), ddx(n[0])) # Compute areas of cell faces & volumes S = self.r(self.area, 'F', 'Fx', 'V') V = self.vol self._faceDivx = sdiag(1/V)*D1*sdiag(S) return self._faceDivx
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __call__(self, x):\n\n self.dbeads.q = x\n e = self.dforces.pot # Energy\n g = -self.dforces.f # Gradient\n\n return e, g", "def x(self):\n values = self._interpolate_table(\"x\")\n values += self._corrections((\"ortho_eop\", iers.ortho_eop, 0, 1e-6), (\"pmsdnut2\", iers.pmsdnut2, 0, 1e-6))\n return values", "def make_divergence(bcs: Boundaries) -> OperatorType:\n assert isinstance(bcs.grid, CylindricalSymGrid)\n bcs.check_value_rank(0)\n boundary_r, boundary_z = bcs\n\n # calculate preliminary quantities\n dim_r, dim_z = bcs.grid.shape\n dr = bcs.grid.discretization[0]\n scale_r, scale_z = 1 / (2 * bcs.grid.discretization)\n\n value_outer = boundary_r.high.make_virtual_point_evaluator()\n region_z = boundary_z.make_region_evaluator()\n\n # use processing for large enough arrays\n parallel = dim_r * dim_z >= config[\"numba.parallel_threshold\"]\n\n @jit_allocate_out(parallel=parallel, out_shape=(dim_r, dim_z))\n def divergence(arr, out=None):\n \"\"\"apply divergence operator to array `arr`\"\"\"\n for j in nb.prange(0, dim_z): # iterate axial points\n # inner radial boundary condition\n i = 0\n arr_z_l, _, arr_z_h = region_z(arr[1], (i, j))\n d_r = (arr[0, 1, j] + 3 * arr[0, 0, j]) * scale_r\n d_z = (arr_z_h - arr_z_l) * scale_z\n out[i, j] = d_r + d_z\n\n for i in range(1, dim_r - 1): # iterate radial points\n arr_z_l, _, arr_z_h = region_z(arr[1], (i, j))\n d_r = (arr[0, i + 1, j] - arr[0, i - 1, j]) * scale_r\n d_r += arr[0, i, j] / ((i + 0.5) * dr)\n d_z = (arr_z_h - arr_z_l) * scale_z\n out[i, j] = d_r + d_z\n\n # outer radial boundary condition\n i = dim_r - 1\n arr_z_l, _, arr_z_h = region_z(arr[1], (i, j))\n arr_r_h = value_outer(arr[0], (i, j))\n d_r = (arr_r_h - arr[0, i - 1, j]) * scale_r\n d_r += arr[0, i, j] / ((i + 0.5) * dr)\n d_z = (arr_z_h - arr_z_l) * scale_z\n out[i, j] = d_z + d_r\n\n return out\n\n return divergence # type: ignore", "def faceDiv(self):\n if getattr(self, '_faceDiv', None) is None:\n n = self.vnC\n # Compute faceDivergence operator on faces\n if(self.dim == 1):\n D = ddx(n[0])\n elif(self.dim == 2):\n D1 = sp.kron(speye(n[1]), ddx(n[0]))\n D2 = sp.kron(ddx(n[1]), speye(n[0]))\n D = sp.hstack((D1, D2), format=\"csr\")\n elif(self.dim == 3):\n D1 = kron3(speye(n[2]), speye(n[1]), ddx(n[0]))\n D2 = kron3(speye(n[2]), ddx(n[1]), speye(n[0]))\n D3 = kron3(ddx(n[2]), speye(n[1]), speye(n[0]))\n D = sp.hstack((D1, D2, D3), format=\"csr\")\n # Compute areas of cell faces & volumes\n S = self.area\n V = self.vol\n self._faceDiv = sdiag(1/V)*D*sdiag(S)\n return self._faceDiv", "def cellGradx(self):\n if getattr(self, '_cellGradx', None) is None:\n G1 = self._cellGradxStencil()\n # Compute areas of cell faces & volumes\n V = self.aveCC2F*self.vol\n L = self.r(self.area/V, 'F','Fx', 'V')\n self._cellGradx = sdiag(L)*G1\n return self._cellGradx", "def _derX(self, x, y):\n if _isscalar(x):\n x_pos = max(min(self.xSearchFunc(self.x_list, x), self.x_n - 1), 1)\n y_pos = max(min(self.ySearchFunc(self.y_list, y), self.y_n - 1), 1)\n else:\n x_pos = self.xSearchFunc(self.x_list, x)\n x_pos[x_pos < 1] = 1\n x_pos[x_pos > self.x_n - 1] = self.x_n - 1\n y_pos = self.ySearchFunc(self.y_list, y)\n y_pos[y_pos < 1] = 1\n y_pos[y_pos > self.y_n - 1] = self.y_n - 1\n beta = (y - self.y_list[y_pos - 1]) / (\n self.y_list[y_pos] - self.y_list[y_pos - 1]\n )\n dfdx = (\n (\n (1 - beta) * self.f_values[x_pos, y_pos - 1]\n + beta * self.f_values[x_pos, y_pos]\n )\n - (\n (1 - beta) * self.f_values[x_pos - 1, y_pos - 1]\n + beta * self.f_values[x_pos - 1, y_pos]\n )\n ) / (self.x_list[x_pos] - self.x_list[x_pos - 1])\n return dfdx", "def make_tensor_divergence(bcs: Boundaries) -> OperatorType:\n assert isinstance(bcs.grid, CylindricalSymGrid)\n bcs.check_value_rank(1)\n\n divergence_r = make_divergence(bcs.extract_component(0))\n divergence_z = make_divergence(bcs.extract_component(1))\n divergence_phi = make_divergence(bcs.extract_component(2))\n\n @jit_allocate_out(out_shape=(3,) + bcs.grid.shape)\n def tensor_divergence(arr, out=None):\n \"\"\"apply gradient operator to array `arr`\"\"\"\n divergence_r(arr[0], out=out[0])\n divergence_z(arr[1], out=out[1])\n divergence_phi(arr[2], out=out[2])\n return out\n\n return tensor_divergence # type: ignore", "def build_rhs():\n\n def div(\n coeff_rho,\n momentum_x,\n momentum_y,\n momentum_z,\n ):\n \"\"\"Computes the divergence of the velocity field.\"\"\"\n # Compute the fourth order derivative of the pressure for the face\n # velocity correction.\n p_corr = (\n states['p']\n if self._params.enable_rhie_chow_correction else states['dp'])\n d4p_dx4 = self._kernel_op.apply_kernel_op_x(p_corr, 'k4d2x')\n d4p_dy4 = self._kernel_op.apply_kernel_op_y(p_corr, 'k4d2y')\n d4p_dz4 = self._kernel_op.apply_kernel_op_z(p_corr, 'k4d2z',\n 'k4d2zsh')\n\n # Compute velocity gradient based on interpolated values on cell faces.\n coeff_x = dt / (4. * coeff_rho * dx**2)\n du = self._kernel_op.apply_kernel_op_x(momentum_x, 'kDx')\n du_dx = [\n du_i / (2. * dx) + coeff_x * d4p_dx4_i\n for du_i, d4p_dx4_i in zip(du, d4p_dx4)\n ]\n\n coeff_y = dt / (4. * coeff_rho * dy**2)\n dv = self._kernel_op.apply_kernel_op_y(momentum_y, 'kDy')\n dv_dy = [\n dv_i / (2. * dy) + coeff_y * d4p_dy4_i\n for dv_i, d4p_dy4_i in zip(dv, d4p_dy4)\n ]\n\n coeff_z = dt / (4. * coeff_rho * dz**2)\n dw = self._kernel_op.apply_kernel_op_z(momentum_z, 'kDz', 'kDzsh')\n dw_dz = [\n dw_i / (2. * dz) + coeff_z * d4p_dz4_i\n for dw_i, d4p_dz4_i in zip(dw, d4p_dz4)\n ]\n\n return [\n du_dx_i + dv_dy_i + dw_dz_i\n for du_dx_i, dv_dy_i, dw_dz_i in zip(du_dx, dv_dy, dw_dz)\n ]\n\n def add_factor(\n v,\n factor,\n ):\n return [factor * v_i for v_i in v]\n\n b_terms = {\n _B_TERM_SOURCE_RHO: add_factor(src_rho, inv_dt),\n }\n if isinstance(rho_info, ConstantDensityInfo):\n b_terms.update({\n _B_TERM_DIV:\n add_factor(\n div(rho_info.rho, states['u'], states['v'], states['w']),\n inv_dt * rho_info.rho),\n _B_TERM_DRHO_DT: [\n tf.zeros_like(src_rho_i) for src_rho_i in src_rho\n ],\n })\n\n elif isinstance(rho_info, VariableDensityInfo):\n b_terms.update({\n _B_TERM_DIV:\n add_factor(\n div(1.0, states['rho_u'], states['rho_v'], states['rho_w']),\n inv_dt),\n _B_TERM_DRHO_DT:\n add_factor(rho_info.drho_dt, inv_dt),\n })\n\n else:\n raise ValueError('`rho_info` has to be either `ConstantDensityInfo` or '\n '`VariableDensityInfo`.')\n\n # pylint: disable=g-complex-comprehension\n return [(div_i + drho_dt_i - src_rho_i)\n for div_i, drho_dt_i, src_rho_i in zip(\n b_terms[_B_TERM_DIV],\n b_terms[_B_TERM_DRHO_DT],\n b_terms[_B_TERM_SOURCE_RHO],\n )], b_terms\n # pylint: enable=g-complex-comprehension", "def divergence(self, **kwargs) -> np.ndarray:\n\n if self._div is None:\n self._div = divergence(self.asym(), W=self.W, **kwargs)\n return self._div", "def __call__(self, x):\n\n np.subtract(x, self.d, out=x)\n np.divide(self.a, x, out=x)\n np.subtract(x, self.b, out=x)\n np.log(x, out=x)\n np.divide(x, -self.e, out=x)\n np.add(x, self.c, out=x)\n\n return x", "def x(self) -> int:\n return self.data.x_centre >> 4", "def x(self):\n return np.sum(self.bbox, 0)[0] / 2", "def __init__(self, x, y):\n if type(y) not in (int, float, Decimal):\n raise OperatorError(\"supported operand only in types int,float\"\n \" for Div Operator of Money\")\n y = Decimal(y)\n self.x, self.y = x, y", "def unit_x(cls):\n return cls(1, 0)", "def __call__(self, x):\n f_beam_gaussian = self.i / (np.sqrt(2 * constants.pi) * constants.e * self.sigma * self.w_z) * \\\n np.exp(-(x - self.x_c) ** 2 / (2 * self.sigma ** 2))\n\n # Convert the flux density unit atoms/nm^2s to atoms/cm^2s by multiplying with factor 1e14\n return f_beam_gaussian * 1e14", "def unit_x(cls):\n return cls(1, 0, 0)", "def __call__(self, x):\n f_beam_errf = (self.i / (2 * self.w_x * self.w_z * constants.e)) * \\\n (special.erf(-(x - self.x_2) / (np.sqrt(2) * self.sigma)) -\n special.erf(-(x - self.x_1) / (np.sqrt(2) * self.sigma)))\n\n # Convert the flux density unit from atoms/nm^2s to atoms/cm^2s by multiplying with factor 1e14\n return f_beam_errf * 1e14", "def cyl_x_unit_vector(gravity: sc.Variable, incident_beam: sc.Variable) -> sc.Variable:\n v_x = sc.cross(incident_beam, gravity)\n return v_x / sc.norm(v_x)", "def encode(self, x):\n\n try:\n xp = (np.atleast_2d(x) - self.xmin[None, :]) / self.xspan[None, :]\n return xp.T\n except:\n xp = (np.atleast_2d(x) - self.xmin[:, None]) / self.xspan[:, None]\n return xp", "def get_x0(self):\n from xfel.cxi.postrefine.mod_leastsqr import prep_input\n from xfel.cxi.postrefine.test_rs import calc_spot_radius\n\n a_star = sqr(self.orientation.reciprocal_matrix())\n miller_indices = self.miller_array.indices()\n spot_radius = calc_spot_radius(a_star, miller_indices, self.wavelength)\n x_init = [self.G, - 1 * self.minus_2B / 2, 0, 0,\n spot_radius, spot_radius, 0.0026]\n x_init.extend(self.uc)\n x0_all = np.array(x_init)\n x0 = prep_input(x0_all, self.crystal_system)\n return x0", "def xform_homog( self , xfrmMatx ):\r\n for i in xrange( 0 , len( self.vertices ) , 3 ):\r\n self.vertX[ i : i+4 ] = apply_homog( xfrmMatx , self.vertices[ i : i+4 ] )", "def __init__(self, expr1, expr2, variables=('both', 'both'),\n singularity_handling='raise', eps=None, name='divide'):\n super(DivisionExpression2D, self).__init__(e1=expr1, e2=expr2, name=name)\n self._v1, self._v2 = variables\n if self._v1 not in ('both', 0, 1) or self._v2 not in ('both', 0, 1):\n raise TypeError(\"Expressions can only depend on element 0, 1 or both.\")\n if singularity_handling not in (\"raise\", \"zero\", \"one\", \"+inf\", \"-inf\"):\n raise TypeError(\"Singularity handling must be one of \"\n \"'raise', 'zero', 'one', '+inf', '-inf'.\")\n self._sing_handling = singularity_handling\n self._eps = eps\n self.domain = self._get_domain()", "def xcoeff(self):\n a, c, d, b = self.to_ccw()\n p1, q1 = a + b, c + d\n p2, q2 = a + c, b + d\n n = p1 + q1\n\n cov = self.covar()\n\n if n == 0:\n return np.nan\n elif a == n or d == n:\n return 0.5\n elif b == n or c == n:\n return -1.0\n elif cov > 0.0:\n return _div(cov, min(p1 * q2, p2 * q1))\n elif cov < 0.0:\n return _div(cov, min(n * c, n * b))\n else:\n return 0.0", "def x ( self ) :\n return self.xvar", "def dfdx(self, X):\n \n return 3*(X[0])**2", "def __init__(self, expr1, expr2, name='divide'):\n super().__init__(e1=expr1, e2=expr2, domain=expr1.domain, name=name)", "def x_nondim(self, x):\n x[0:4] /= self.r_scale\n return x", "def x(self):\n if self.es_elemento_neutro():\n raise AttributeError(\"El elemento neutro no tiene componente x\")\n else:\n return self._x", "def to_x_coordinates(self,ax):\n self.xx_cells = self._Mesh.cell2x(self.xx_cells)", "def cells_x(self):\n return self._cells[0]" ]
[ "0.5872187", "0.5829122", "0.5811972", "0.58066034", "0.5778079", "0.577153", "0.57689637", "0.5755432", "0.5672428", "0.56478095", "0.562732", "0.5525031", "0.5520553", "0.5520544", "0.551753", "0.5508819", "0.5459851", "0.5396037", "0.5354177", "0.53370064", "0.5333066", "0.53080624", "0.528458", "0.52711445", "0.5270202", "0.52468574", "0.52449346", "0.5240962", "0.52104056", "0.52076584" ]
0.6976973
0
The cell centered Gradient, takes you to cell faces.
def cellGrad(self): if getattr(self, '_cellGrad', None) is None: G = self._cellGradStencil() S = self.area # Compute areas of cell faces & volumes V = self.aveCC2F*self.vol # Average volume between adjacent cells self._cellGrad = sdiag(S/V)*G return self._cellGrad
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def nodalGrad(self):\n if getattr(self, '_nodalGrad', None) is None:\n # The number of cell centers in each direction\n n = self.vnC\n # Compute divergence operator on faces\n if(self.dim == 1):\n G = ddx(n[0])\n elif(self.dim == 2):\n D1 = sp.kron(speye(n[1]+1), ddx(n[0]))\n D2 = sp.kron(ddx(n[1]), speye(n[0]+1))\n G = sp.vstack((D1, D2), format=\"csr\")\n elif(self.dim == 3):\n D1 = kron3(speye(n[2]+1), speye(n[1]+1), ddx(n[0]))\n D2 = kron3(speye(n[2]+1), ddx(n[1]), speye(n[0]+1))\n D3 = kron3(ddx(n[2]), speye(n[1]+1), speye(n[0]+1))\n G = sp.vstack((D1, D2, D3), format=\"csr\")\n # Compute lengths of cell edges\n L = self.edge\n self._nodalGrad = sdiag(1/L)*G\n return self._nodalGrad", "def getGradient(self,j):\n i = int(self.indicator['term'][j])\n r = int(self.indicator['row'][j])\n c = int(self.indicator['col'][j])\n rv = -np.kron(self.Fstar()[i][:,[r]],self.Astar()[i][[c],:])\n return rv", "def GradientAdjuster(self):\n pass", "def cells_center(self,refresh=False,mode='first3'):\n if refresh is True:\n to_update=slice(None)\n elif refresh is not False:\n to_update=refresh\n else:\n to_update = np.isnan(self.cells['_center'][:,0])\n\n if np.sum(to_update) > 0:\n if mode=='first3':\n p1,p2,p3 = [self.nodes['x'][self.cells['nodes'][to_update,i]] for i in [0,1,2]]\n self.cells['_center'][to_update] = circumcenter(p1,p2,p3)\n elif mode=='sequential':\n for c in np.arange(self.Ncells())[to_update]:\n points=self.nodes['x'][self.cell_to_nodes(c)]\n self.cells['_center'][c] = poly_circumcenter(points)\n \n return self.cells['_center']", "def center(self):\n return np.array([0,0,1/self.C+self.pos()])", "def gradient(self):\n functional = self\n\n class KLCrossEntCCGradient(Operator):\n\n \"\"\"The gradient operator of this functional.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize a new instance.\"\"\"\n super().__init__(functional.domain, functional.domain,\n linear=False)\n\n def _call(self, x):\n \"\"\"Apply the gradient operator to the given point.\"\"\"\n if functional.prior is None:\n return np.exp(x)\n else:\n return functional.prior * np.exp(x)\n\n return KLCrossEntCCGradient()", "def center_of_gravity(self):\n weights = [self.stabilizer_h.weight, self.stabilizer_vright.weight, self.stabilizer_vleft.weight]\n cgs = [self.stabilizer_h.center_of_gravity, self.stabilizer_vright.center_of_gravity,\n self.stabilizer_vleft.center_of_gravity]\n total_weight = sum(weights)\n cg_x = sum([weights[i] * cgs[i].x for i in range(0, len(weights))]) / total_weight\n cg_y = sum([weights[i] * cgs[i].y for i in range(0, len(weights))]) / total_weight\n cg_z = sum([weights[i] * cgs[i].z for i in range(0, len(weights))]) / total_weight\n\n return Point(cg_x, cg_y, cg_z)", "def getCellCenter(self):\n x = np.zeros(self.nElements)\n for iElt in range(self.nElements):\n x[iElt] = Elements._all[iElt].center\n return x", "def center(self):\n return self._lower + 0.5 * (self._upper - self._lower)", "def cellGradz(self):\n if self.dim < 3:\n return None\n if getattr(self, '_cellGradz', None) is None:\n G3 = self._cellGradzStencil()\n # Compute areas of cell faces & volumes\n V = self.aveCC2F*self.vol\n L = self.r(self.area/V, 'F', 'Fz', 'V')\n self._cellGradz = sdiag(L)*G3\n return self._cellGradz", "def cellGradBC(self):\n if getattr(self, '_cellGradBC', None) is None:\n BC = self.setCellGradBC(self._cellGradBC_list)\n n = self.vnC\n if(self.dim == 1):\n G = ddxCellGradBC(n[0], BC[0])\n elif(self.dim == 2):\n G1 = sp.kron(speye(n[1]), ddxCellGradBC(n[0], BC[0]))\n G2 = sp.kron(ddxCellGradBC(n[1], BC[1]), speye(n[0]))\n G = sp.block_diag((G1, G2), format=\"csr\")\n elif(self.dim == 3):\n G1 = kron3(speye(n[2]), speye(n[1]), ddxCellGradBC(n[0], BC[0]))\n G2 = kron3(speye(n[2]), ddxCellGradBC(n[1], BC[1]), speye(n[0]))\n G3 = kron3(ddxCellGradBC(n[2], BC[2]), speye(n[1]), speye(n[0]))\n G = sp.block_diag((G1, G2, G3), format=\"csr\")\n # Compute areas of cell faces & volumes\n S = self.area\n V = self.aveCC2F*self.vol # Average volume between adjacent cells\n self._cellGradBC = sdiag(S/V)*G\n return self._cellGradBC", "def _UpdateGradient(self):\n self.mol.GetGradient('analytic')", "def get_centre(self):\n return self.c", "def cellGradx(self):\n if getattr(self, '_cellGradx', None) is None:\n G1 = self._cellGradxStencil()\n # Compute areas of cell faces & volumes\n V = self.aveCC2F*self.vol\n L = self.r(self.area/V, 'F','Fx', 'V')\n self._cellGradx = sdiag(L)*G1\n return self._cellGradx", "def _get_centre(self, gdf):\n bounds = gdf[\"geometry\"].bounds\n centre_x = (bounds[\"maxx\"].max() + bounds[\"minx\"].min()) / 2\n centre_y = (bounds[\"maxy\"].max() + bounds[\"miny\"].min()) / 2\n return centre_x, centre_y", "def center(self):\n return (self.upper_right + self.lower_left) * 0.5", "def gradient(self):\n functional = self\n\n class KLCCGradient(Operator):\n\n \"\"\"The gradient operator of this functional.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize a new instance.\"\"\"\n super().__init__(functional.domain, functional.domain,\n linear=False)\n\n def _call(self, x):\n \"\"\"Apply the gradient operator to the given point.\n\n The gradient is not defined in points where one or more\n components are larger than or equal to one.\n \"\"\"\n if functional.prior is None:\n return 1.0 / (1 - x)\n else:\n return functional.prior / (1 - x)\n\n return KLCCGradient()", "def center(self):\n try: \n return self._center\n except AttributeError:\n self._center = vector(ZZ, [0]*self.ambient_dim())\n for v in self.vertex_generator(): self._center += v.vector()\n self._center /= self.n_vertices()\n return self._center", "def draw_border_gradient(img, center_color=(0, 0, 0, 255), \\\nouter_color=(255, 255, 255, 255), excluding_region=None):\n center = (img.size[0] / 2, img.size[1] / 2)\n # Used to calculate scalar\n max_distance = math.sqrt(center[0] ** 2 + center[1] ** 2)\n for x in range(img.size[0]):\n for y in range(img.size[1]):\n if excluding_region != None:\n if not (x < excluding_region[0] or x >= excluding_region[1] or \\\n y < excluding_region[2] or y >= excluding_region[3]):\n continue\n \n # This gets the delta between the current point and the center\n distance = math.sqrt((center[0] - x) ** 2 + (center[1] - y) ** 2)\n # We turn this into a scale from 0 to 1, with 1 meaning being on the\n # very outside, and 0 being on the center\n distance_scalar = float(distance) / max_distance\n \n r = (1 - distance_scalar) * center_color[0] + distance_scalar * \\\n outer_color[0]\n g = (1 - distance_scalar) * center_color[1] + distance_scalar * \\\n outer_color[1]\n b = (1 - distance_scalar) * center_color[2] + distance_scalar * \\\n outer_color[2]\n \n img.putpixel((x, y), (int(r), int(g), int(b), 255))", "def find_centroid_cell(self):\n\n x_min, y_min = self.find_min()\n x_max, y_max = self.find_max()\n x_centroid = int((x_max+x_min)/2)\n y_centroid = int((y_max+y_min)/2)\n centroide = x_centroid, y_centroid\n return centroide", "def Gradient(self):\n \n OutList = []\n for dim in range(0,Dimension):\n X = self._CreateSameType()\n \n for Ind in self.IndList():\n \n Factor = -1j * Ind[dim]\n \n X[Ind] = Factor * self[Ind]\n \n OutList.append(X)\n \n return BZO(array(OutList))", "def center(self):\n return self.centralizer(self)", "def gradient(self):\n return NotImplemented", "def cell_edges(self):", "def gradient(self):\n return ZeroOperator(self.domain)", "def get_clust_cent(self):\r\n\r\n return self.__clust_cent", "def GetFirstGradientColour(self):\r\n \r\n return self._firstcolour", "def cells_centroid_py(self):\n A=self.cells_area()\n cxy=np.zeros( (self.Ncells(),2), np.float64)\n\n refs=self.nodes['x'][self.cells['nodes'][:,0]]\n\n all_pnts=self.nodes['x'][self.cells['nodes']] - refs[:,None,:]\n\n for c in np.nonzero(~self.cells['deleted'])[0]:\n nodes=self.cell_to_nodes(c)\n\n i=np.arange(len(nodes))\n ip1=(i+1)%len(nodes)\n nA=all_pnts[c,i]\n nB=all_pnts[c,ip1]\n\n tmp=(nA[:,0]*nB[:,1] - nB[:,0]*nA[:,1])\n cxy[c,0] = ( (nA[:,0]+nB[:,0])*tmp).sum()\n cxy[c,1] = ( (nA[:,1]+nB[:,1])*tmp).sum()\n cxy /= 6*A[:,None] \n cxy += refs\n return cxy", "def get_cell_centroids(mesh):\n num_els = mesh.num_cells()\n coords = mesh.coordinates()\n cells = mesh.cells()\n dim = len(coords[0])\n\n cell_cent = np.zeros((num_els, dim), dtype=float, order='c')\n\n for i in range(num_els):\n pts = [coords[idx] for idx in cells[i]]\n cell_cent[i] = (1/(dim+1))*sum(pts) #this works only for 2D/3D triangles\n\n return cell_cent", "def get_face_barycenters(self, idx=-1):\n if idx >= len(self.faces):\n raise IndexError\n if idx >= 0:\n v = np.vectorize(lambda x: self.vertices[x], signature='()->(n)')(self.faces[idx])\n return np.mean(v, axis=0)\n else:\n v = self.vertices\n f = self.faces\n return v[f.flatten()].reshape((-1, 3, 3)).mean(axis=1)" ]
[ "0.5621755", "0.5469282", "0.546437", "0.5444811", "0.5382192", "0.53690034", "0.535502", "0.5318761", "0.5255991", "0.52483857", "0.523278", "0.52231425", "0.52080166", "0.520761", "0.52018565", "0.5180706", "0.51670074", "0.51604354", "0.5160297", "0.51142305", "0.51004773", "0.50849515", "0.5063987", "0.5042605", "0.50226307", "0.50210416", "0.5006082", "0.50014704", "0.49946362", "0.4987154" ]
0.55576605
1
The weak form boundary condition projection matrices when mixed boundary condition is used
def getBCProjWF_simple(self, discretization='CC'): if discretization is not 'CC': raise NotImplementedError('Boundary conditions only implemented' 'for CC discretization.') def projBC(n): ij = ([0, n], [0, 1]) vals = [0, 0] vals[0] = 1 vals[1] = 1 return sp.csr_matrix((vals, ij), shape=(n+1, 2)) def projDirichlet(n, bc): bc = checkBC(bc) ij = ([0, n], [0, 1]) vals = [0, 0] if(bc[0] == 'dirichlet'): vals[0] = -1 if(bc[1] == 'dirichlet'): vals[1] = 1 return sp.csr_matrix((vals, ij), shape=(n+1, 2)) BC = [['dirichlet', 'dirichlet'], ['dirichlet', 'dirichlet'], ['dirichlet', 'dirichlet']] n = self.vnC indF = self.faceBoundaryInd if(self.dim == 1): Pbc = projDirichlet(n[0], BC[0]) B = projBC(n[0]) indF = indF[0] | indF[1] Pbc = Pbc*sdiag(self.area[indF]) elif(self.dim == 2): Pbc1 = sp.kron(speye(n[1]), projDirichlet(n[0], BC[0])) Pbc2 = sp.kron(projDirichlet(n[1], BC[1]), speye(n[0])) Pbc = sp.block_diag((Pbc1, Pbc2), format="csr") B1 = sp.kron(speye(n[1]), projBC(n[0])) B2 = sp.kron(projBC(n[1]), speye(n[0])) B = sp.block_diag((B1, B2), format="csr") indF = np.r_[(indF[0] | indF[1]), (indF[2] | indF[3])] Pbc = Pbc*sdiag(self.area[indF]) elif(self.dim == 3): Pbc1 = kron3(speye(n[2]), speye(n[1]), projDirichlet(n[0], BC[0])) Pbc2 = kron3(speye(n[2]), projDirichlet(n[1], BC[1]), speye(n[0])) Pbc3 = kron3(projDirichlet(n[2], BC[2]), speye(n[1]), speye(n[0])) Pbc = sp.block_diag((Pbc1, Pbc2, Pbc3), format="csr") B1 = kron3(speye(n[2]), speye(n[1]), projBC(n[0])) B2 = kron3(speye(n[2]), projBC(n[1]), speye(n[0])) B3 = kron3(projBC(n[2]), speye(n[1]), speye(n[0])) B = sp.block_diag((B1, B2, B3), format="csr") indF = np.r_[ (indF[0] | indF[1]), (indF[2] | indF[3]), (indF[4] | indF[5]) ] Pbc = Pbc*sdiag(self.area[indF]) return Pbc, B.T
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def boundary_conditions(self):\n ce = 2 * self.dy * self.g * self.mu * self.m_u / self.kb\n self.e[0, :] = (4 * self.e[1, :] - self.e[2, :]) / (\n ce / self.T[0, :] + 3\n )\n self.rho[0, :] = (\n self.e[0, :]\n * (self.Y - 1)\n * self.mu\n * self.m_u\n / (self.kb * self.T[0, :])\n )\n self.u[0, :] = (4 * self.u[1, :] - self.u[2, :]) / 3\n self.w[0, :] = 0\n\n self.e[-1, :] = (4 * self.e[-2, :] - self.e[-3, :]) / (\n 3 - ce / self.T[-1, :]\n )\n self.rho[-1, :] = (\n self.e[-1, :]\n * (self.Y - 1)\n * self.mu\n * self.m_u\n / (self.kb * self.T[-1, :])\n )\n self.u[-1, :] = (4 * self.u[-2, :] - self.u[-3, :]) / 3\n self.w[-1, :] = 0", "def assemble_matrices(self):\n \n #Pointer reassignment for convenience\n N = self.ngrids\n\n #Begin with a linked-list data structure for the transmissibilities,\n #and one-dimenstional arrays for the diagonal of B and the flux vector\n T = lil_matrix((N, N), dtype=np.double)\n B = np.zeros(N, dtype=np.double)\n Q = np.zeros(N, dtype=np.double)\n\n #Read in boundary condition types and values\n bcs = self.input_data['boundary conditions']\n bc_type_1 = bcs['left']['type'].lower()\n bc_type_2 = bcs['right']['type'].lower()\n bc_value_1 = bcs['left']['value']\n bc_value_2 = bcs['right']['value']\n \n #Loop over all grid cells\n for i in range(N):\n\n #Apply left BC\n if i == 0:\n T[i, i+1] = -self.compute_transmissibility(i, i + 1)\n\n if bc_type_1 == 'neumann':\n T[i, i] = T[i,i] - T[i, i+1]\n elif bc_type_1 == 'dirichlet':\n #Computes the transmissibility of the ith block\n T0 = self.compute_transmissibility(i, i)\n T[i, i] = T[i,i] - T[i, i+1] + 2.0 * T0\n Q[i] = 2.0 * T0 * bc_value_1\n else:\n pass #TODO: Add error checking here if no bc is specified\n\n #Apply right BC\n elif i == (N - 1):\n T[i, i-1] = -self.compute_transmissibility(i, i - 1)\n\n if bc_type_2 == 'neumann':\n T[i, i] = T[i,i] - T[i, i-1]\n elif bc_type_2 == 'dirichlet':\n #Computes the transmissibility of the ith block\n T0 = self.compute_transmissibility(i, i)\n T[i, i] = T[i, i] - T[i, i-1] + 2.0 * T0\n Q[i] = 2.0 * T0 * bc_value_2\n else:\n pass #TODO:Add error checking here if no bc is specified\n\n #If there is no boundary condition compute interblock transmissibilties\n else:\n T[i, i-1] = -self.compute_transmissibility(i, i-1)\n T[i, i+1] = -self.compute_transmissibility(i, i+1)\n T[i, i] = (self.compute_transmissibility(i, i-1) +\n self.compute_transmissibility(i, i+1))\n\n #Compute accumulations\n B[i] = self.compute_accumulation(i)\n\n #If constant-rate wells are present, add them to the flux vector\n if self.rate_well_grids is not None:\n Q[self.rate_well_grids] += self.rate_well_values\n\n \n #Return sparse data-structures\n return (T.tocsr(), \n csr_matrix((B, (np.arange(N), np.arange(N))), shape=(N,N)), \n Q)", "def getBCProjWF(self, BC, discretization='CC'):\n\n if discretization is not 'CC':\n raise NotImplementedError('Boundary conditions only implemented'\n 'for CC discretization.')\n\n if isinstance(BC, string_types):\n BC = [BC for _ in self.vnC] # Repeat the str self.dim times\n elif isinstance(BC, list):\n assert len(BC) == self.dim, 'BC list must be the size of your mesh'\n else:\n raise Exception(\"BC must be a str or a list.\")\n\n for i, bc_i in enumerate(BC):\n BC[i] = checkBC(bc_i)\n\n def projDirichlet(n, bc):\n bc = checkBC(bc)\n ij = ([0, n], [0, 1])\n vals = [0, 0]\n if(bc[0] == 'dirichlet'):\n vals[0] = -1\n if(bc[1] == 'dirichlet'):\n vals[1] = 1\n return sp.csr_matrix((vals, ij), shape=(n+1, 2))\n\n def projNeumannIn(n, bc):\n bc = checkBC(bc)\n P = sp.identity(n+1).tocsr()\n if(bc[0] == 'neumann'):\n P = P[1:, :]\n if(bc[1] == 'neumann'):\n P = P[:-1, :]\n return P\n\n def projNeumannOut(n, bc):\n bc = checkBC(bc)\n ij = ([0, 1], [0, n])\n vals = [0, 0]\n if(bc[0] == 'neumann'):\n vals[0] = 1\n if(bc[1] == 'neumann'):\n vals[1] = 1\n return sp.csr_matrix((vals, ij), shape=(2, n+1))\n\n n = self.vnC\n indF = self.faceBoundaryInd\n if(self.dim == 1):\n Pbc = projDirichlet(n[0], BC[0])\n indF = indF[0] | indF[1]\n Pbc = Pbc*sdiag(self.area[indF])\n\n Pin = projNeumannIn(n[0], BC[0])\n\n Pout = projNeumannOut(n[0], BC[0])\n\n elif(self.dim == 2):\n Pbc1 = sp.kron(speye(n[1]), projDirichlet(n[0], BC[0]))\n Pbc2 = sp.kron(projDirichlet(n[1], BC[1]), speye(n[0]))\n Pbc = sp.block_diag((Pbc1, Pbc2), format=\"csr\")\n indF = np.r_[(indF[0] | indF[1]), (indF[2] | indF[3])]\n Pbc = Pbc*sdiag(self.area[indF])\n\n P1 = sp.kron(speye(n[1]), projNeumannIn(n[0], BC[0]))\n P2 = sp.kron(projNeumannIn(n[1], BC[1]), speye(n[0]))\n Pin = sp.block_diag((P1, P2), format=\"csr\")\n\n P1 = sp.kron(speye(n[1]), projNeumannOut(n[0], BC[0]))\n P2 = sp.kron(projNeumannOut(n[1], BC[1]), speye(n[0]))\n Pout = sp.block_diag((P1, P2), format=\"csr\")\n\n elif(self.dim == 3):\n Pbc1 = kron3(speye(n[2]), speye(n[1]), projDirichlet(n[0], BC[0]))\n Pbc2 = kron3(speye(n[2]), projDirichlet(n[1], BC[1]), speye(n[0]))\n Pbc3 = kron3(projDirichlet(n[2], BC[2]), speye(n[1]), speye(n[0]))\n Pbc = sp.block_diag((Pbc1, Pbc2, Pbc3), format=\"csr\")\n indF = np.r_[\n (indF[0] | indF[1]),\n (indF[2] | indF[3]),\n (indF[4] | indF[5])\n ]\n Pbc = Pbc*sdiag(self.area[indF])\n\n P1 = kron3(speye(n[2]), speye(n[1]), projNeumannIn(n[0], BC[0]))\n P2 = kron3(speye(n[2]), projNeumannIn(n[1], BC[1]), speye(n[0]))\n P3 = kron3(projNeumannIn(n[2], BC[2]), speye(n[1]), speye(n[0]))\n Pin = sp.block_diag((P1, P2, P3), format=\"csr\")\n\n P1 = kron3(speye(n[2]), speye(n[1]), projNeumannOut(n[0], BC[0]))\n P2 = kron3(speye(n[2]), projNeumannOut(n[1], BC[1]), speye(n[0]))\n P3 = kron3(projNeumannOut(n[2], BC[2]), speye(n[1]), speye(n[0]))\n Pout = sp.block_diag((P1, P2, P3), format=\"csr\")\n\n return Pbc, Pin, Pout", "def apply_boundary_conditions(self):\n E = self.__mesh.get_edge_list()\n\n # Top and bottom wall Dirichlet bcs (boundary_id = 21)\n \n e21_iterator = self.__mesh.edge_iterator(21)\n\n self.edge_center_value[e21_iterator[0]:e21_iterator[1]+1] = 0.0 \n \n # Left Dirichlet bc (boundary_id = 2)\n \n e2_iterator = self.__mesh.edge_iterator(2)\n\n b = np.sin(np.pi*self.y_e[e2_iterator[0]:e2_iterator[1]+1])\n\n self.edge_center_value[e2_iterator[0]:e2_iterator[1]+1] \\\n = b\n \n # Right Neumann bc (Zero flux, boundary_id = 3)\n \n e3_iterator = self.__mesh.edge_iterator(3)\n \n for i in range(e3_iterator[0], e3_iterator[1]+1):\n LC = E[i].get_straddling_cells()\n n = LC.get_global_cell_number() - 1\n self.edge_center_value[i] = self.cell_centroid_value[n]", "def _get_proj_mat(self): \n if self._proj_mat is None:\n if self.symmetric:\n IP_mat = self.vec_space.compute_symmetric_inner_product_mat(\n self.basis_vecs)\n else:\n IP_mat = self.vec_space.compute_inner_product_mat(\n self.adjoint_basis_vecs, self.basis_vecs)\n self._proj_mat = np.linalg.inv(IP_mat)\n return self._proj_mat", "def process_boundary_conditions(self, model):\n\n processed_bcs = {}\n\n # process and set pybamm.variables first incase required\n # in discrisation of other boundary conditions\n for key, bcs in model.boundary_conditions.items():\n processed_bcs[key] = {}\n\n # check if the boundary condition at the origin for sphere domains is other\n # than no flux\n for subdomain in key.domain:\n if (\n self.mesh[subdomain].coord_sys\n in [\"spherical polar\", \"cylindrical polar\"]\n and list(self.mesh.geometry[subdomain].values())[0][\"min\"] == 0\n ):\n if bcs[\"left\"][0].value != 0 or bcs[\"left\"][1] != \"Neumann\":\n raise pybamm.ModelError(\n \"Boundary condition at r = 0 must be a homogeneous \"\n \"Neumann condition for {} coordinates\".format(\n self.mesh[subdomain].coord_sys\n )\n )\n\n # Handle any boundary conditions applied on the tabs\n if any(\"tab\" in side for side in list(bcs.keys())):\n bcs = self.check_tab_conditions(key, bcs)\n\n # Process boundary conditions\n for side, bc in bcs.items():\n eqn, typ = bc\n pybamm.logger.debug(\"Discretise {} ({} bc)\".format(key, side))\n processed_eqn = self.process_symbol(eqn)\n processed_bcs[key][side] = (processed_eqn, typ)\n\n return processed_bcs", "def get_bforce_wm_ws_Gx_surf(self):\n\n [Ly,N] = self.b.shape\n z_u_w = self.grid_dict['z_u_w'] \n \n self.Gm1 = np.zeros([Ly])\n self.dGm1_dS = np.zeros([Ly]) \n self.Gt1 = np.zeros([Ly])\n self.dGt1_dS = np.zeros([Ly]) \n self.Bfsfc_bl = np.zeros([Ly])\n self.Av_bl = np.zeros([Ly])\n self.dAv_bl = np.zeros([Ly])\n \n #debugging\n self.wm_surf = np.zeros([Ly])\n self.ws_surf = np.zeros([Ly]) \n\n #---> j-loop\n for j in range(Ly): \n k_w = self.kbl[j] # KBL is \"new bl index after calling find_new_kbl()\n z_bl = z_u_w[j,N] - self.hbls[j]\n zscale = self.hbls[j] \n \n if self.swr_frac[j,k_w-1] > 0:\n Bfsfc = self.Bo[j] + self.Bosol[j] * ( 1. - self.swr_frac[j,k_w-1]\\\n * self.swr_frac[j,k_w] * ( z_u_w[j,k_w] - z_u_w[j,k_w-1] )\\\n / (self.swr_frac[j,k_w] * (z_u_w[j,k_w] - z_bl)\\\n + self.swr_frac[j,k_w-1] * (z_bl - z_u_w[j,k_w-1]) ))\n \n else:\n Bfsfc = self.Bo[j] + self.Bosol[j]\n \n # CALCUALTE TURBULENT VELOCITY SCALES\n wm,ws = self.lmd_wscale_wm_and_ws(Bfsfc,zscale,self.ustar[j],self.hbls[j])\n self.wm_surf[j] = wm\n self.ws_surf[j] = ws \n\n if self.LIMIT_UNSTABLE_ONLY:\n f1 = 5. * np.max([0,Bfsfc]) * self.vonKar / (self.ustar[j]**4+self.eps)\n else:\n f1 = 0\n\n \n cff = 1. / (z_u_w[j,k_w] - z_u_w[j,k_w-1])\n cff_up = cff * (z_bl - z_u_w[j,k_w])\n cff_dn = cff * (z_u_w[j,k_w] - z_bl)\n\n #MOMENTUM \n Av_bl = cff_up * self.Kv_old[j,k_w] + cff_dn * self.Kv_old[j,k_w-1]\n dAv_bl = cff * (self.Kv_old[j,k_w] - self.Kv_old[j,k_w-1])\n self.Av_bl[j] = Av_bl\n self.dAv_bl[j] = dAv_bl\n self.Gm1[j] = Av_bl / (self.hbls[j] * wm + self.eps)\n self.dGm1_dS[j] = np.min([0.,Av_bl*f1-dAv_bl/(wm+self.eps)]) \n\n #TEMPERATURE(BUOYANCY)\n At_bl = cff_up * self.Kt_old[j,k_w] + cff_dn * self.Kt_old[j,k_w-1]\n dAt_bl = cff * (self.Kt_old[j,k_w] - self.Kt_old[j,k_w-1])\n self.Gt1[j] = At_bl / (self.hbls[j] * ws + self.eps)\n self.dGt1_dS[j] = np.min([0.,At_bl*f1-dAt_bl/(ws+self.eps)]) \n\n self.Bfsfc_bl[j] = Bfsfc", "def _get_proj_mat(self):\n if self._proj_mat is None:\n if self.symmetric:\n IP_mat = self.vec_space.compute_symmetric_inner_product_mat(\n self.basis_vec_handles)\n else:\n IP_mat = self.vec_space.compute_inner_product_mat(\n self.adjoint_basis_vec_handles, self.basis_vec_handles)\n self._proj_mat = np.linalg.inv(IP_mat)\n return self._proj_mat", "def set_internal_boundary_conditions(self, model):\n\n def boundary_gradient(left_symbol, right_symbol):\n pybamm.logger.debug(\n \"Calculate boundary gradient ({} and {})\".format(\n left_symbol, right_symbol\n )\n )\n left_domain = left_symbol.domain[0]\n right_domain = right_symbol.domain[0]\n\n left_mesh = self.spatial_methods[left_domain].mesh[left_domain]\n right_mesh = self.spatial_methods[right_domain].mesh[right_domain]\n\n left_symbol_disc = self.process_symbol(left_symbol)\n right_symbol_disc = self.process_symbol(right_symbol)\n\n return self.spatial_methods[left_domain].internal_neumann_condition(\n left_symbol_disc, right_symbol_disc, left_mesh, right_mesh\n )\n\n bc_keys = list(self.bcs.keys())\n\n internal_bcs = {}\n for var in model.boundary_conditions.keys():\n if isinstance(var, pybamm.Concatenation):\n children = var.orphans\n\n first_child = children[0]\n next_child = children[1]\n\n lbc = self.bcs[var][\"left\"]\n rbc = (boundary_gradient(first_child, next_child), \"Neumann\")\n\n if first_child not in bc_keys:\n internal_bcs.update({first_child: {\"left\": lbc, \"right\": rbc}})\n\n for current_child, next_child in zip(children[1:-1], children[2:]):\n lbc = rbc\n rbc = (boundary_gradient(current_child, next_child), \"Neumann\")\n if current_child not in bc_keys:\n internal_bcs.update(\n {current_child: {\"left\": lbc, \"right\": rbc}}\n )\n\n lbc = rbc\n rbc = self.bcs[var][\"right\"]\n if children[-1] not in bc_keys:\n internal_bcs.update({children[-1]: {\"left\": lbc, \"right\": rbc}})\n\n self.bcs.update(internal_bcs)", "def boundary(self):\n answer = self.zero()\n for k, v in self.items():\n for idx, cube in enumerate(k):\n acc_dim = sum((cube_l.dimension for cube_l in k[:idx]))\n for i in range(cube.dimension):\n for epsilon in (0, 1):\n new_cube = cube.face(i, epsilon)\n new_k = k[:idx] + (new_cube,) + k[idx + 1:]\n sign_exp = (acc_dim + i + epsilon) % 2\n answer += answer.create({new_k: v * (-1)**sign_exp})\n return answer", "def impose_boundary_conditions(self) -> _ImposeBoundaryConditionsResults:\n\n stiffness = self.get_stiffness_matrix()\n force_vector = self.get_force_vector()\n\n restrained_dofs = self.get_supported_dofs()\n\n for axis in range(2):\n stiffness = np.delete(\n stiffness,\n [dof for dof in restrained_dofs],\n axis=axis,\n )\n\n force_vector = np.delete(\n force_vector,\n [dof for dof in restrained_dofs],\n axis=0,\n )\n\n return _ImposeBoundaryConditionsResults(\n stiffness=stiffness,\n force=force_vector,\n )", "def invariant_bilinear_form(self):\n m = self.gap().InvariantBilinearForm()['matrix'].matrix()\n m.set_immutable()\n return m", "def bloch_matrix(self):\n if self.gf_r is None:\n self.gf()\n\n return -self.gf_r.dot(self.lead[1])", "def mw_boundaries(self):\n phi = np.arange(0., 2.0*np.pi, 0.1)\n theta_l = np.ones_like(phi)* 110 * np.pi / 180.\n theta_h = np.ones_like(phi)* 70 * np.pi / 180.\n ra_l, dec_l = self.gc2radec(phi, theta_l)\n ra_h, dec_h = self.gc2radec(phi, theta_h)\n return (ra_h, dec_h), (ra_l, dec_l)", "def base_projection_matrix(self, fiber):\n return matrix(ZZ, fiber.vertices()).right_kernel_matrix()", "def forward_problem(m):\n hm = om.HeadMat(m['geometry'])\n hm.invert() # invert in place (no copy)\n dsm = om.DipSourceMat(m['geometry'], m['dipsources'])\n return hm * dsm", "def _getBMat(self):\n\n \"\"\"B matrix is just a mass matrix, can be easily assembled\n through fenics. However, the ordering in Fenics is not the\n mesh ordering. So we build a temp matrix and then use the\n vertex to dof map to get the right ordering interms of our\n mesh nodes.\n \"\"\"\n\n # create function space of order 1. For KL, we only restrict\n # to first order spaces.\n V = FunctionSpace(self._mesh, \"CG\", 1)\n # Define basis and bilinear form\n u = TrialFunction(V)\n v = TestFunction(V)\n a = u * v * dx\n # assemble in a temp matrix\n B_temp = assemble(a)\n\n # create petsc matrix B\n B = PETSc.Mat().create()\n B.setType('aij')\n B.setSizes(self.domain.getNodes(), self.domain.getNodes())\n B.setUp()\n\n # store the value in a a temp array B_ij\n B_ij = B_temp.array()\n\n # get the vertex to dof map\n v_to_d_map = vertex_to_dof_map(V)\n\n print '---------------------------'\n print '---------------------------'\n print ' Building Mass Matrix '\n print '---------------------------'\n print '---------------------------'\n for node_i in range(0, self.domain.getNodes()):\n for node_j in range(node_i, self.domain.getNodes()):\n B_ij_nodes = B_ij[v_to_d_map[node_i], v_to_d_map[node_j]]\n if B_ij_nodes > 0:\n B.setValue(node_i, node_j, B_ij_nodes)\n B.setValue(node_j, node_i, B_ij_nodes)\n\n B.assemblyBegin()\n B.assemblyEnd()\n print '---------------------------'\n print '---------------------------'\n print ' Finished Mass Matrix '\n print '---------------------------'\n print '---------------------------'\n return B", "def m_c(self) -> np.ndarray:\n assert self._k is not None, \"camera must be calibrated\"\n return forge_projective_matrix(self._k)", "def _solve_implicit(self, initial_conditions):\n coeff = self.a ** 2 * self.tau / self.h ** 2\n l_and_u = (1, 1)\n ab = np.empty((3, self.n_x))\n # main diagonal\n ab[1] = 1 + 2.0 * coeff\n # upper and lower diagonals\n ab[0] = ab[2] = -coeff\n\n # left bc\n if self.left_bc_type == \"DIRICHLET\":\n ab[0][1] = 0 # upper diagonal\n ab[1][0] = 1 # main diagonal\n elif self.left_bc_type == \"NEUMANN\":\n ab[0][1] = 1 # upper diagonal\n ab[1][0] = -1 # main diagonal\n\n # right bc\n if self.right_bc_type == \"DIRICHLET\":\n ab[1][-1] = 1 # main diagonal\n ab[2][-2] = 0 # lower diagonal\n elif self.right_bc_type == \"NEUMANN\":\n ab[1][-1] = 1 # main diagonal\n ab[2][-2] = -1 # lower diagonal\n\n current_solution = initial_conditions\n solutions = []\n\n for t in self.t_grid:\n b = current_solution + self.tau * self.rhs(self.x_grid, t)\n # left bc\n if self.left_bc_type == \"DIRICHLET\":\n b[0] = self.left_bc(t)\n elif self.left_bc_type == \"NEUMANN\":\n b[0] = self.h * self.left_bc(t)\n # right bc\n if self.right_bc_type == \"DIRICHLET\":\n b[-1] = self.right_bc(t)\n elif self.right_bc_type == \"NEUMANN\":\n b[-1] = self.h * self.right_bc(t)\n\n next_solution = solve_banded(l_and_u, ab, b)\n if self.mode == \"VISUALIZATION\":\n solutions.append((t, next_solution.copy()))\n current_solution = next_solution\n if self.mode == \"TEST\":\n # print(\"Result: \", current_solution.tolist())\n # print(\"Right answer: \", self.anl_solution.tolist())\n self._norma(current_solution)\n elif self.mode == \"VISUALIZATION\":\n return solutions", "def m(self) -> np.ndarray:\n assert self._k is not None and self._r is not None and self._t is not None, \"camera must be calibrated\"\n return forge_projective_matrix(self._k, r=self._r, t=self._t)", "def bcR(self, rng=None, aspect_ratio=1.0):\n if rng is None:\n rng = random.PRNGKey(1)\n\n x = self.bcmesh\n n = self.n\n n_y = equations.num_row(n, aspect_ratio)\n y = np.linspace(0, 1, num=n_y)\n if rng is not None:\n coeffs = random.multivariate_normal(rng, np.zeros(16),\n np.diag(np.ones(16)))\n else:\n key = random.randint(random.PRNGKey(1), (1,), 1, 1000)\n coeffs = random.multivariate_normal(\n random.PRNGKey(key[0]), np.zeros(16), np.diag(np.ones(16)))\n left = coeffs[0] * y**3 + coeffs[1] * y**2 + coeffs[2] * y + coeffs[3]\n right = coeffs[4] * y**3 + coeffs[5] * y**2 + coeffs[6] * y + coeffs[7]\n lower = coeffs[8] * x**3 + coeffs[9] * x**2 + coeffs[10] * x + coeffs[11]\n upper = coeffs[12] * x**3 + coeffs[13] * x**2 + coeffs[14] * x + coeffs[15]\n shape = 2 * x.shape\n source = onp.zeros(shape)\n source[0, :] = upper\n source[n_y - 1, :] = lower\n source[0:n_y, -1] = right\n source[0:n_y, 0] = left\n # because this makes the correct order of boundary conditions\n return source * (n + 1)**2", "def weak_repulsion_boundary(Cents,a,k, CV_matrix,n_c,n_C):\n CCW = np.dstack((roll_reverse(Cents[:,:,0]),roll_reverse(Cents[:,:,1])))#np.column_stack((Cents[:,1:3],Cents[:,0].reshape(-1,1,2)))\n CCW_displacement = Cents - CCW\n rij = np.sqrt(CCW_displacement[:,:,0]**2 + CCW_displacement[:,:,1]**2)\n norm_disp = (CCW_displacement.T/rij.T).T\n V_soft_mag = -k*(rij - 2*a)*(rij<2*a)\n V_soft_CCW = (V_soft_mag.T*norm_disp.T).T\n V_soft_CW = -(roll_forward(V_soft_mag).T*norm_disp.T).T\n V_soft = V_soft_CW + V_soft_CCW\n F_soft = np.zeros((n_c, 2))\n for i in range(3):\n F_soft += np.asfortranarray(CV_matrix[:, :, i])@np.asfortranarray(V_soft[:, i])\n F_soft[n_C:] = 0\n return F_soft", "def set_bc(self, problem):\n bcs = problem.bcs\n n_bound = cfg.const['N_GHOST_CELLS']\n # Left X-b.c.\n for i in range(0, self.i_min):\n for j in range(self.j_min, self.j_max):\n for k in range(self.k_min, self.k_max): \n if bcs[0] == 't': \n self.U[i][j][k] = self.U[self.i_min][j][k]\n elif bcs[0] == 'w':\n for num in [0, 2, 3, 4]: # 0 -> 3, 1 -> 2, i_min-1 -> i_min, i_min-2 -> i_min+1\n self.U[i][j][k][num] = self.U[self.i_min + (self.i_min - i - 1)][j][k][num]\n for num in [1]:\n self.U[i][j][k][num] = - self.U[self.i_min + (self.i_min - i - 1)][j][k][num]\n else:\n print(\"Errof field.set_ics(): only wall-type and transmissive boundaries supported! Bye!\")\n # Right X-b.c.\n for i in range(self.i_max, self.i_max+n_bound):\n for j in range(self.j_min, self.j_max):\n for k in range(self.k_min, self.k_max): \n if bcs[1] == 't':\n self.U[i][j][k] = self.U[self.i_max-1][j][k]\n elif bcs[1] == 'w':\n for num in [0, 2, 3, 4]: # i_max -> i_max-1 , i_max+1-> i_max-2\n self.U[i][j][k][num] = self.U[self.i_max - (i - self.i_max + 1)][j][k][num]\n for num in [1]:\n self.U[i][j][k][num] = - self.U[self.i_max - (i - self.i_max + 1)][j][k][num]\n else:\n print(\"Error field.set_ics(): only wall-type and transmissive boundaries supported! Bye!\")\n # Left Y-b.c.\n for i in range(0, self.i_max+n_bound):\n for j in range(0, self.j_min):\n for k in range(self.k_min, self.k_max): \n if bcs[2] == 't':\n self.U[i][j][k] = self.U[i][self.j_min][k]\n elif bcs[2] == 'w':\n for num in [0, 1, 3, 4]:\n self.U[i][j][k][num] = self.U[i][self.j_min + (self.j_min - j - 1)][k][num]\n for num in [2]:\n self.U[i][j][k][num] = - self.U[i][self.j_min + (self.j_min - j - 1)][k][num]\n else:\n print(\"Error field.set_ics(): only wall-type and transmissive boundaries supported! Bye!\")\n # Right Y-b.c.\n for i in range(0, self.i_max+n_bound):\n for j in range(self.j_max, self.j_max+n_bound):\n for k in range(self.k_min, self.k_max): \n if bcs[3] == 't':\n self.U[i][j][k] = self.U[i][self.j_max-1][k]\n elif bcs[3] == 'w':\n for num in [0, 1, 3, 4]:\n self.U[i][j][k][num] = self.U[i][self.j_max - (j - self.j_max + 1)][k][num]\n for num in [2]:\n self.U[i][j][k][num] = -self.U[i][self.j_max - (j - self.j_max + 1)][k][num]\n else:\n print(\"Error field.set_ics(): only wall-type and transmissive boundaries supported! Bye!\")\n # Left Z-b.c.\n for i in range(0, self.i_max+n_bound):\n for j in range(0, self.j_max+n_bound):\n for k in range(0, self.k_min): \n if bcs[4] == 't':\n self.U[i][j][k] = self.U[i][j][self.k_min]\n elif bcs[4] == 'w':\n for num in [0, 1, 2, 4]:\n self.U[i][j][k][num] = self.U[i][j][self.k_min + (self.k_min - k - 1)][num]\n for num in [3]:\n self.U[i][j][k][num] = - self.U[i][j][self.k_min + (self.k_min - k - 1)][num]\n else:\n print(\"Error field.set_ics(): only wall-type and transmissive boundaries supported! Bye!\")\n # Right Z-b.c.\n for i in range(0, self.i_max+n_bound):\n for j in range(0, self.j_max+n_bound):\n for k in range(self.k_max, self.k_max+n_bound):\n if bcs[5] == 't':\n self.U[i][j][k] = self.U[i][j][self.k_max-1]\n elif bcs[5] == 'w':\n for num in [0, 1, 2, 4]:\n self.U[i][j][k][num] = self.U[i][j][self.k_max - (k - self.k_max + 1)][num]\n for num in [3]:\n self.U[i][j][k][num] = - self.U[i][j][self.k_max - (k - self.k_max + 1)][num]\n else:\n print(\"Error field.set_ics(): only wall-type and transmissive boundaries supported! Bye!\")", "def findStableState(L, boundaryConditions, Minv = None):\n\tn = L.shape[0]\n\tm = len(boundaryConditions)\n\tVb = np.zeros(m)\n\tpositions = {}\n\tfor i in range(m):\n\t\tcondition = boundaryConditions[i]\n\t\tVb[i] = condition[0]\n\t\tpositions[condition[0]] = condition[1]\n\tVb = np.sort(Vb)\n\tBPrime = np.zeros((m, n))\n\tYPrime = np.zeros((m, 3))\n\tfor i in range(m):\n\t\tBPrime[i][int(Vb[i])] = 1\n\t\tYPrime[i] = positions[Vb[i]]\n\n\tif Minv is None:\n\t\tzeroCorner = np.zeros((m, m))\n\t\tM = np.array(np.bmat([[L, -BPrime.T], [BPrime, zeroCorner]]))\n\t\tMinv = np.linalg.inv(M)\n\n\tXT = np.zeros((3, n))\n\t# find x coordinates\n\ty = np.zeros(n + m)\n\ty[n:] = YPrime.T[0]\n\tx = np.dot(Minv, y)\n\tXT[0] = x[:n]\n\t# find y coordinates\n\ty = np.zeros(n + m)\n\ty[n:] = YPrime.T[1]\n\tx = np.dot(Minv, y)\n\tXT[1] = x[:n]\n\t# find z coordinates\n\ty = np.zeros(n + m)\n\ty[n:] = YPrime.T[2]\n\tx = np.dot(Minv, y)\n\tXT[2] = x[:n]\n\n\treturn XT.T", "def use_blas(self):\r\n #the gemm version only support that case\r\n if self.out_mode == 'valid' and self.dx == 0 and self.dy == 0:\r\n #We use a faster version in those case.\r\n if (self.imshp != self.imshp_logical or\r\n self.kshp != self.kshp_logical or\r\n self.unroll_patch or\r\n self.unroll_batch > 0 or\r\n self.unroll_kern > 0):\r\n\r\n return False\r\n return True\r\n return False", "def invariant_bilinear_form(self):\n from sage.matrix.constructor import identity_matrix\n m = identity_matrix(self.base_ring(), self.degree())\n m.set_immutable()\n return m", "def full_matrix(self):\n # print(self.b,np.shape(self.b))\n # print(self.b_u[:, np.newaxis])\n # print(self.b_i[np.newaxis:, ],\"TTTT\")\n # print(self.P.dot(self.Q.T))\n if (self.type=='bias'):\n return self.b + self.b_u[:, np.newaxis] + self.b_i[np.newaxis:, ] + self.P.dot(self.Q.T)\n elif (self.type=='nonbias'):\n return self.P.dot(self.Q.T)", "def mesh_boundary(mesh):\n adja = edges_to_adjacency_matrix(mesh)\n r = sparse.extract.find(adja)\n li = r[0][np.where(r[2] == 1)]\n lj = r[1][np.where(r[2] == 1)]\n edges_boundary = np.vstack([li, lj]).T\n \"\"\"\n # alternative implementation based on edges and grouping from trimesh\n # instead of adjacency matrix\n from trimesh import grouping\n groups = grouping.group_rows(mesh.edges_sorted, require_count=1)\n # vertex_boundary = np.unique(open_mesh.edges_sorted[groups])\n edges_boundary = mesh.edges_sorted[groups]\n \"\"\"\n if li.size == 0:\n print('No holes in the surface !!!!')\n return np.array()\n else:\n return edges_to_boundary(edges_boundary)", "def getProjectionMatrix(sorted_eigvecs):\n matrix_w = np.vstack(sorted_eigvecs).transpose()\n return matrix_w", "def check_boundary(self,x):\n b_cells = np.zeros(self.n_c)\n b_cells[self.n_C:] = 1\n vBC = b_cells[self.tris]\n considered_triangles = vBC.sum(axis=1) == 2\n add_extra = ((self.Angles*(1-vBC)>np.pi/2).T*considered_triangles.T).T\n if add_extra.any():\n I,J = np.nonzero(add_extra)\n for k,i in enumerate(I):\n j = J[k]\n xs = x[self.tris[i]]\n re = xs[np.mod(j-1,3)] - xs[np.mod(j+1,3)]\n re = re/np.linalg.norm(re)\n re = np.array([re[1],-re[0]])\n rpe = xs[j]\n x_new = 2*np.dot(xs[np.mod(j-1,3)]-rpe,re)*re + rpe\n x = np.vstack((x,x_new))\n self.n_c = x.shape[0]\n self._triangulate(x)\n self.assign_vertices()\n\n C = get_C_boundary(self.n_c,self.CV_matrix)\n #\n # #Remove extra cells\n # keep_mask = C[self.n_C:, :self.n_C].sum(axis=1)>0 #I'm assuming this is the same thing. This removes all boundary centroids that are not connected to at least one real centroid.\n # if keep_mask.any():\n # c_keep = np.nonzero(keep_mask)[0]\n # x = np.concatenate((x[:self.n_C],x[c_keep + self.n_C]))\n # self.n_c = x.shape[0]\n # self._triangulate(x)\n # self.assign_vertices()\n #\n\n #Remove all boundary particles not connected to exactly two other boundary particles\n remove_mask = C[self.n_C:, self.n_C:].sum(axis=1)!=2\n if remove_mask.any():\n c_keep = np.nonzero(~remove_mask)[0]\n x = np.concatenate((x[:self.n_C],x[c_keep + self.n_C]))\n self.n_c = x.shape[0]\n self._triangulate(x)\n self.assign_vertices()\n self.Angles = tri_angles(x, self.tris)\n #\n # remove_mask = C[self.n_C:, self.n_C:].sum(axis=1)==0\n # if remove_mask.any():\n # c_keep = np.nonzero(~remove_mask)[0]\n # x = np.concatenate((x[:self.n_C],x[c_keep + self.n_C]))\n # self.n_c = x.shape[0]\n # self._triangulate(x)\n # self.assign_vertices()\n # self.Angles = tri_angles(x, self.tris)\n\n\n return x" ]
[ "0.61380446", "0.605193", "0.59296656", "0.56866676", "0.5662838", "0.56362325", "0.56183934", "0.5609619", "0.55788034", "0.5507546", "0.54627687", "0.5442847", "0.54037845", "0.53641254", "0.5336187", "0.5328589", "0.53109026", "0.5291004", "0.5286625", "0.52734387", "0.5267653", "0.5249822", "0.523185", "0.5222699", "0.5204552", "0.51974183", "0.516918", "0.5169075", "0.51625144", "0.5157949" ]
0.6168079
0
Upgrade workflow of timeslots and days
def upgrade_timeslot_workflow(context, logger=None): if logger is None: # Called as upgrade step: define our own logger. logger = logging.getLogger('uwosh.timeslot') #Run the workflow setup setup = getToolByName(context, 'portal_setup') setup.runImportStepFromProfile(PROFILE_ID, 'workflow') remap_workflow(context, type_ids=('Day', 'Time Slot'), chain='(Default)', state_map={'hidden': 'private'} ) remap_workflow(context, type_ids=('Day', 'Time Slot'), chain=('uwosh_timeslot_hidden_workflow',), state_map={'private': 'open', 'pending': 'open', 'published': 'open'} ) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_workflows_restart(self):\n pass", "def test_cron_workflow_service_update_cron_workflow(self):\n pass", "def upgrade():\n\n op.execute(\"\"\"\n UPDATE task_group_tasks\n SET start_date = CURDATE(), end_date=CURDATE()\n WHERE (start_date IS NOT NULL AND start_date < \"1900-01-01\") OR\n (end_date IS NOT NULL AND end_date < \"1900-01-01\")\n \"\"\")", "def do_upt(self, arg):\n self.do_timesheet('update today')", "def task_upgrade(self):\n with settings(user=self.serviceUser):\n self.update()\n run(\"~/virtualenv/bin/trac-admin {}/trac-env upgrade\".format(self.configDir))\n run(\"~/virtualenv/bin/trac-admin {}/trac-env wiki upgrade\".format(self.configDir))\n\n self.task_restart()", "def upgrade_db():\n import publicprize.db_upgrade\n\n backup_db()\n for field, date in (\n (\"submission_start\", \"6/16/2017 12:0:0\"),\n (\"submission_end\", \"9/7/2017 12:0:0\"),\n (\"public_voting_start\", \"9/8/2017 12:0:0\"),\n (\"public_voting_end\", \"9/15/2017 12:0:0\"),\n (\"judging_start\", \"9/27/2017 12:0:0\"),\n (\"judging_end\", \"9/27/2017 19:0:0\"),\n ):\n set_contest_date_time('esprit-venture-challenge', date, field)\n db.session.commit()", "def task_changed(old_task, diff, now_task):", "def updatepublicationtime(self, **kwargs):\n if 'workflow' not in kwargs or not kwargs['workflow']:\n raise InvalidParameter(\"Task name not found in the input parameters\")\n\n workflow = kwargs['workflow']\n authz_owner_match(self.api, [workflow], self.Task) #check that I am modifying my own workflow\n\n self.api.modify(self.Task.UpdatePublicationTime_sql, workflow=[workflow])\n\n return []", "def daily_sync_workflow(db_session: SessionLocal, project: Project):\n workflow_plugin = plugin_service.get_active_instance(\n db_session=db_session, project_id=project.id, plugin_type=\"workflow\"\n )\n if not workflow_plugin:\n log.warning(f\"No workflow plugin is enabled. ProjectId: {project.id}\")\n return\n\n incidents = incident_service.get_all(db_session=db_session, project_id=project.id).all()\n sync_workflows(db_session, project, workflow_plugin, incidents, notify=False)", "def do_up(self, arg):\n self.do_timesheet('update %s' % arg)", "def test_update_instances_schedule_state(self):\n pass", "def test_cron_workflow_service_create_cron_workflow(self):\n pass", "def set_workflow_field(apps, schema_editor):\n ScheduledOperation = apps.get_model('ontask', 'ScheduledOperation')\n for sitem in ScheduledOperation.objects.all():\n if sitem.workflow:\n continue\n\n if not sitem.action:\n raise Exception('Unable to set workflow in ScheduledOperation')\n\n sitem.workflow = sitem.action.workflow\n sitem.save()", "def test_change_workflow_definition(self):\n pass", "def upgrade(self):", "def upgrade(self):", "def update_task(self, name, fields):\n pass", "def restart_interrupted_tasks(app, organization=None, team=None):\n pass", "def test_set_power_schedule_for_deployment_run(self):\n pass", "def test_command__dont_update_old_activity(self):\n assignment = WorkflowCollectionAssignment.objects.get(id=self.assignment_2.id)\n self.assertEqual(assignment.status, \"IN_PROGRESS\")\n\n out = StringIO()\n call_command(\"assignment_terminator\", days_old=\"30\", type=\"SURVEY\", stdout=out)\n\n assignment.refresh_from_db()\n self.assertEqual(assignment.status, \"IN_PROGRESS\")", "def do_upm(self, arg):\n self.do_timesheet('update week')", "def do_upw(self, arg):\n self.do_timesheet('update week')", "def migrateUp(self):\n te = self.store.findFirst(TimedEvent, sort=TimedEvent.time.descending)\n if te is not None:\n self._transientSchedule(te.time, None)", "def deploy():\n update_treesheets()\n restart_treesheets()", "def stepETAUpdate(build, step, ETA, expectations):", "def test_deploy_workflow_definition(self):\n pass", "def extra_tasks_for_today(self):\n localtz = tzlocal()\n datetime_today = datetime.fromtimestamp(rospy.get_rostime().to_sec(), tz=localtz)\n day_today = datetime_today.strftime(\"%A\")\n date_today = datetime_today.date()\n rospy.loginfo('Looking for daily tasks for %s, %s' % (day_today, date_today))\n \n eight_forty_five= time(8,45, tzinfo=localtz)\n eleven_thirty= time(11,30, tzinfo=localtz)\n fourteen_thirty=time(14,30, tzinfo=localtz)\n seventeen_fifteen= time(17,15, tzinfo=localtz)\n past_bedtime = time(23,59, tzinfo=localtz)\n \n # day_end = seventeen_fifteen\n day_end = past_bedtime\n\n\n\n metric_wps=['WayPoint13', 'WayPoint18', 'WayPoint9','WayPoint11','WayPoint5','WayPoint3'] \n object_learn_wps=['WayPoint13', 'WayPoint18', 'WayPoint9', 'WayPoint11'] \n object_search_wps=['WayPoint1', 'WayPoint2', 'WayPoint3']\n door_wps=['WayPoint7', 'WayPoint4']\n \n morning_start = eight_forty_five\n morning_duration = delta_between(eleven_thirty, morning_start)\n \n lunch_start = eleven_thirty\n lunch_duration = delta_between(fourteen_thirty, lunch_start)\n\n afternoon_start = fourteen_thirty\n afternoon_duration = delta_between(day_end, afternoon_start)\n\n tasks = []\n \n #door checks at fixed times (to evaluate system ability to do stuff at corret times)\n task=create_door_check_task(door_wps[0])\n start_time=datetime.combine(date_today, time(10,30, tzinfo=localtz))\n end_time = start_time+timedelta(seconds=30)\n task.start_after=rospy.Time(unix_time(start_time))\n task.end_before=rospy.Time(unix_time(end_time))\n tasks.append(task)\n \n task=create_door_check_task(door_wps[0])\n start_time=datetime.combine(date_today, time(13,30, tzinfo=localtz))\n end_time = start_time+timedelta(seconds=30)\n task.start_after=rospy.Time(unix_time(start_time))\n task.end_before=rospy.Time(unix_time(end_time))\n tasks.append(task)\n \n task=create_door_check_task(door_wps[0])\n start_time=datetime.combine(date_today, time(16,30, tzinfo=localtz))\n end_time = start_time+timedelta(seconds=30)\n task.start_after=rospy.Time(unix_time(start_time))\n task.end_before=rospy.Time(unix_time(end_time))\n tasks.append(task)\n \n \n #random tasks\n for i in range(4):\n #morning\n task=create_metric_map_task(random.choice(metric_wps))\n self.set_random_task_time(task, date_today, morning_start, morning_duration)\n tasks.append(task)\n \n task=create_door_check_task(random.choice(door_wps))\n self.set_random_task_time(task, date_today, morning_start, morning_duration)\n tasks.append(task)\n \n if i<3:\n task=create_object_learn_task(random.choice(object_learn_wps))\n self.set_random_task_time(task, date_today, morning_start, morning_duration)\n tasks.append(task)\n \n task=create_object_search_task(random.choice(object_search_wps))\n self.set_random_task_time(task, date_today, morning_start, morning_duration)\n tasks.append(task)\n \n #lunch (less tasks because we want the robot mostly learning people tracks)\n if i<1:\n task=create_metric_map_task(random.choice(metric_wps))\n self.set_random_task_time(task, date_today, lunch_start, lunch_duration)\n tasks.append(task)\n \n task=create_door_check_task(random.choice(door_wps))\n self.set_random_task_time(task, date_today, lunch_start, lunch_duration)\n tasks.append(task)\n \n task=create_object_learn_task(random.choice(object_learn_wps))\n self.set_random_task_time(task, date_today, lunch_start, lunch_duration)\n tasks.append(task)\n \n task=create_object_search_task(random.choice(object_search_wps))\n self.set_random_task_time(task, date_today, lunch_start, lunch_duration)\n tasks.append(task)\n \n \n #afternoon\n task=create_metric_map_task(random.choice(metric_wps))\n self.set_random_task_time(task, date_today, afternoon_start, afternoon_duration)\n tasks.append(task)\n \n task=create_door_check_task(random.choice(door_wps))\n self.set_random_task_time(task, date_today, afternoon_start, afternoon_duration)\n tasks.append(task)\n \n if i<3:\n task=create_object_learn_task(random.choice(object_learn_wps))\n self.set_random_task_time(task, date_today, afternoon_start, afternoon_duration)\n tasks.append(task)\n \n task=create_object_search_task(random.choice(object_search_wps))\n self.set_random_task_time(task, date_today, afternoon_start, afternoon_duration)\n tasks.append(task)\n return tasks", "def updateschedd(self, **kwargs):\n if 'scheddname' not in kwargs or not kwargs['scheddname']:\n raise InvalidParameter(\"Schedd name not found in the input parameters\")\n if 'workflow' not in kwargs or not kwargs['workflow']:\n raise InvalidParameter(\"Task name not found in the input parameters\")\n\n workflow = kwargs['workflow']\n authz_owner_match(self.api, [workflow], self.Task) #check that I am modifying my own workflow\n\n self.api.modify(self.Task.UpdateSchedd_sql, scheddname=[kwargs['scheddname']], workflow=[workflow])\n\n return []", "def test_relaunch_deployment_run(self):\n pass", "def _create_schedules(self):\n\n ''''''" ]
[ "0.6154629", "0.61379015", "0.60122925", "0.59856623", "0.579435", "0.57895106", "0.5563299", "0.55514747", "0.5502582", "0.5454234", "0.540223", "0.53500813", "0.5343529", "0.53403306", "0.53280693", "0.53280693", "0.53085685", "0.53028834", "0.52990687", "0.52546155", "0.52516", "0.5240073", "0.52371407", "0.5201355", "0.5190823", "0.5185894", "0.5181738", "0.51667726", "0.50849277", "0.5070267" ]
0.729469
0
Add the new EHS notifications portal action
def upgrade_notifications_action(context, logger=None): #Run the workflow setup setup = getToolByName(context, 'portal_setup') setup.runImportStepFromProfile(PROFILE_ID, 'actions') return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_action(self, action):\n self.action = action", "def add_action(self, action):\n self.action = action", "def add_action(self, action):\n self._main_model.add_action(action)", "def registerAction(self, actionId, action): #$NON-NLS-1$\r", "def add_action(self, action: BaseAction):\n\n logger.debug(f'registering action {action}')\n self.actions.append(action)", "def add_action(self, action):\n self._actions.append(action)", "def add(self, action, debug=False):\n self.h.append(action)\n if debug:\n print(action.get_data())\n self.autosave and self.save_func()\n self._edited = True", "def addPostStartAction ( action ) :\n global __Bender_PostStart_Actions\n if action : __Bender_PostStart_Actions.append ( action ) \n return tuple(__Bender_PostStart_Actions)", "async def send_event_created(self, action_id: int):\n async with self.pg.acquire() as conn:\n data = await conn.fetchrow(\n \"\"\"\n SELECT a.company AS company_id, u.role AS host_role, u.id AS host_user_id,\n full_name(u.first_name, u.last_name, u.email) AS host_name,\n e.id AS event_id, e.name AS event_name,\n (e.start_ts AT TIME ZONE e.timezone)::date AS event_date,\n cat.name AS cat_name, cat.slug AS cat_slug,\n event_link(cat.slug, e.slug, e.public, $2) AS event_link\n FROM actions AS a\n JOIN users AS u ON a.user_id = u.id\n JOIN events AS e ON a.event = e.id\n JOIN categories AS cat ON e.category = cat.id\n WHERE a.id=$1\n \"\"\",\n action_id,\n self.settings.auth_key,\n )\n\n link = f'/dashboard/events/{data[\"event_id\"]}/'\n ctx = dict(\n summary='{host_name} created an event \"{event_name}\"'.format(**data),\n details=(\n 'Event \"{event_name}\" ({cat_name}) created by \"{host_name}\" ({host_role}), '\n 'click the link below to view the event.'\n ).format(**data),\n action_label='View Event',\n action_link=link,\n )\n users = [\n UserEmail(id=r['id'], ctx=ctx)\n for r in await conn.fetch(\"SELECT id FROM users WHERE role='admin' AND company=$1\", data['company_id'])\n ]\n await self.send_emails.direct(data['company_id'], Triggers.admin_notification, users)\n if data['host_role'] != 'admin':\n ctx = {\n 'event_link': data['event_link'],\n 'event_dashboard_link': link,\n 'event_name': data['event_name'],\n 'event_date': format_dt(data['event_date']),\n 'category_name': data['cat_name'],\n is_cat(data['cat_slug']): True,\n }\n await self.send_emails.direct(\n data['company_id'], Triggers.event_host_created, [UserEmail(data['host_user_id'], ctx)]\n )", "def publish_action(self, action):\n raise NotImplementedError", "def append(self, action: Action):\n self.actions.append(action)", "def _createAction(self, item, actionString):\n action = {\n \"action\": actionString,\n \"item_id\": item.item_id\n }\n\n pocketLogger.debug(\"Action\" + str(action))\n\n self.actions.append(action)", "def addPostRunAction ( action ) :\n global __Bender_PostRun_Actions\n if action : __Bender_PostRun_Actions.append ( action )\n return tuple(__Bender_PostRun_Actions)", "def send_actions(self, actions):\n pass", "def add_action_template(self, action_template: ActionTemplate) -> None:\n self._action_templates.append(action_template)", "def add_action(self, talk_action):\n self.action_queue.put(talk_action)", "def create_notification(self, notifying_href, notifying_action, notified_href, owner):\n if self.id == owner.id:\n return\n new_notification = Notification()\n new_notification.eid = make_uuid()\n new_notification.notifier = self\n new_notification.notifying_href = notifying_href\n new_notification.notifying_action = notifying_action\n new_notification.notified_href = notified_href\n new_notification.owner = owner\n new_notification.save()", "def addActions (self, actions) :\r\n self.action_buffer.extend(actions)", "def add_alerts(self):", "def configureCMFNotification(portal,logger):\n ntool = getToolByName(portal, NTOOL_ID)\n changeProperty = lambda key, value: \\\n ntool.manage_changeProperties(**{key: value})\n \n if not ntool.isExtraSubscriptionsEnabled():\n changeProperty('extra_subscriptions_enabled',True)\n #enable notification on Item creation\n \n changeProperty('item_creation_notification_enabled', True)\n changeProperty('on_item_creation_mail_template',['* :: string:creation_mail_notification'])\n logger.info(\"On Item Creation Notification has been enabled.\")\n \n #enable notification on Item modification\n changeProperty('item_modification_notification_enabled', True)\n changeProperty('on_item_modification_mail_template',['* :: string:modification_mail_notification'])\n logger.info(\"On Item Modification Notification has been enabled.\")\n \n #enable notification on Work Flow Transition\n changeProperty('wf_transition_notification_enabled', True)\n changeProperty('on_wf_transition_mail_template',['* :: string:workflow_mail_notification'])\n logger.info(\"On Workflow transition Notification has been enabled.\")\n \n #enable notification on Discussion Item Creation\n changeProperty('discussion_item_creation_notification_enabled',True)\n changeProperty('on_discussion_item_creation_mail_template',['* :: string:discussion_mail_notification'])\n logger.info(\"On Discussion Item Creation Notification has been enabled.\")", "def action_defined(sender, instance, created, raw, using, **kwargs):\n if created:\n raw_hook_event.send(\n sender=None,\n event_name=\"action_defined\",\n instance=instance,\n payload=ActionSerializer(instance).data,\n user=instance.team,\n )", "def post(self, request, pk):\n action_key = request.POST.get(\"action\")\n _, method = self.actions[action_key]\n getattr(self, method)()\n return HttpResponseRedirect(reverse(\"event_admin\", kwargs={\"pk\": pk}))", "def addPostInitAction ( action ) :\n global __Bender_PostInit_Actions\n if action : __Bender_PostInit_Actions.append ( action ) \n return tuple(__Bender_PostInit_Actions)", "def add_object_created_notification(self, dest: \"IBucketNotificationDestination\", *filters: \"NotificationKeyFilter\") -> None:\n return jsii.invoke(self, \"addObjectCreatedNotification\", [dest, *filters])", "def action_invite(self):\n self.ensure_one()\n\n if not self.env.user.email:\n raise UserError(_(\"Unable to post message, please configure the sender's email address.\"))\n\n mail_values = []\n for partner_id in self.partner_ids:\n slide_channel_partner = self.channel_id._action_add_members(partner_id)\n if slide_channel_partner:\n mail_values.append(self._prepare_mail_values(slide_channel_partner))\n\n # TODO awa: change me to create multi when mail.mail supports it\n for mail_value in mail_values:\n self.env['mail.mail'].sudo().create(mail_value)\n\n return {'type': 'ir.actions.act_window_close'}", "def notification(request):\n return HttpResponseRedirect('/settings/notifications/')", "def admin_actions():\n\n create_default_admin()\n return response('Admin account has been created', 201)", "def add_action(self, action):\n if action in self.actions:\n return\n elif self.actions == Action.actor_idle:\n self.actions = [action]\n elif action == Action.idle:\n self.actions = Action.actor_idle\n elif action not in self.actions:\n self.actions += [action]", "def addAction(self,name,params):\n\t\tnewaction = abstractAction.getAction(len(self._actionList)+1,self,name,params)\n\t\tself._actionList.append(newaction)\n\t\treturn newaction", "def setupActions(obj):\n at = getToolByName(obj, 'portal_actions')\n ait = getToolByName(obj, 'portal_actionicons')\n for action in at.listActions():\n if action.getId() == 'atom':\n break\n else:\n at.addAction(id = 'atom',\n name = \"Atom feed of this folder's contents\",\n action = \"string:$object_url/ATOM\",\n condition = \"python:portal.portal_syndication.isSyndicationAllowed(object)\",\n permission = CMFCorePermissions.View,\n category = 'document_actions',\n visible = True)\n for ai in ait.listActionIcons():\n if ai.getActionId() == 'atom':\n break\n else:\n ait.addActionIcon(category = 'plone',\n action_id = 'atom',\n icon_expr = 'atom.gif',\n title = 'Atom Syndication')" ]
[ "0.61897093", "0.61897093", "0.5927142", "0.5921507", "0.5749052", "0.57384455", "0.5704734", "0.568285", "0.5602667", "0.55947185", "0.5563545", "0.5556106", "0.55510795", "0.55410975", "0.5507621", "0.54634005", "0.5441426", "0.5424937", "0.53629506", "0.5347637", "0.5327337", "0.5285283", "0.5280147", "0.52569854", "0.5238247", "0.5234624", "0.5213418", "0.51817757", "0.51807916", "0.5171886" ]
0.6326241
0
Get the registered hash value of an image id, None if not found
def search_image_hash(self, image_id: str) -> int: return self._id_to_hash.get(image_id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_old_hash(img):\n try:\n old_hash = seals_data[img.split('.')[0]]['hash']\n except KeyError:\n old_hash = None\n return old_hash", "def image_hash(image_location):\n image = pygame.image.load(image_location)\n grey = greyscale(image)\n avg = average_image_value(grey)\n\n bitstring = ''\n for pixels in get_pixels(grey):\n if pixels[0] < avg:\n bitstring += '1'\n else: bitstring += '0'\n hash = int(bitstring, 2).__format__('016x').upper()\n return hash", "def existing_hash(self, id):\r\n return self._read_sha_by_id(id)", "def avatar_hash(self) -> undefined.UndefinedNoneOr[str]:", "def search_image_id(self, image_path: str) -> str:\n try:\n with open(image_path, \"rb\") as f:\n content = f.read()\n if self._check_corrupted:\n nparr = np.frombuffer(content, np.uint8)\n img_np = cv2.imdecode(nparr, cv2.IMREAD_COLOR)\n if img_np is None:\n msg = \"Decode image {} failed\".format(image_path)\n raise ValueError(msg)\n except EnvironmentError as err:\n raise EnvironmentError(\"Load image {} failed\".format(image_path))\n\n hash_value = self._hash(content)\n\n hash_check = self._hash_to_id.get(hash_value, [])\n for image_id, path in hash_check:\n if filecmp.cmp(image_path, path):\n registered_id = image_id\n registered_path = path\n break\n else:\n registered_id = None\n registered_path = None\n\n return registered_id, registered_path, hash_value", "def get_hashid_and_urlid(url):\n existing = db.select('id, hashid', 'ImageURLs', 'url LIKE \"%s\"' % clean_url(url))\n if existing:\n urlid = existing[0][0]\n hashid = existing[0][1]\n return hashid, urlid, False\n\n # Download image\n if url.startswith('//'):\n url = 'http:%s' % url\n logger.debug('Downloading %s ...' % url)\n try:\n image_buffer = web.download(url)\n except Exception as e:\n logger.debug('Failed')\n raise Exception('Unable to download image at %s: %s' % (url, e))\n\n # Get image hash\n try:\n logger.debug('Hashing ...')\n image = image_from_buffer(image_buffer)\n (width, height) = image.size\n image_hash = str(avhash(image))\n except Exception as e:\n logger.debug('Failed')\n raise e\n logger.debug('Indexing ... ')\n\n # Insert image hash into Hashes table\n hashid = db.insert('Hashes', (None, image_hash))\n if hashid == -1:\n # Already exists, need to lookup existing hash\n hashids = db.select('id', 'Hashes', 'hash = \"%s\"' % (image_hash,))\n if not hashids:\n raise Exception('unable to add hash to table, or find hash (wtf?)')\n hashid = hashids[0][0]\n\n # Image attributes\n try:\n filesize = len(image_buffer)\n url = clean_url(url)\n urlid = db.insert('ImageURLs', (None, url, hashid, width, height, filesize))\n create_thumb(image, urlid)\n logger.debug('Done')\n except Exception as e:\n raise e\n return hashid, urlid, True", "def get_hash_from_file(img):\n with open(img, 'rb') as f:\n return hashlib.sha256(f.read()).hexdigest()", "def image_digest(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"image_digest\")", "def avatar_hash(self) -> typing.Optional[str]:", "def banner_hash(self) -> undefined.UndefinedNoneOr[str]:", "def get_hash(self, data: Optional[bytes] = None) -> str:\n return self.__handle__.hash", "def _extract_image_short_id(scan_result: dict[str, Any]) -> str:\n\n if \"id\" not in scan_result:\n return \"sha256:unknown\"\n\n image_id: str = scan_result[\"id\"]\n\n if image_id.startswith(\"sha256:\"):\n return image_id[:17]\n return image_id[:10]", "def get_hash(self):\n return self.__hash", "async def get_hash(identifier):\n return hashlib.md5(identifier.encode('utf8')).hexdigest()", "def get_hash(file_url):\n file_extension = os.path.splitext(file_url)[1]\n return str(HASHES.get(file_extension))", "def get_average_hash(image_path):\n\n return imagehash.average_hash(Image.open(image_path))", "def image_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"image_id\")", "def image_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"image_id\")", "def compute_phash(im):\n return imagehash.phash(ensure_pil(im))", "def compute_dhash(im):\n return imagehash.dhash(ensure_pil(im))", "def get_hash(self):\r\n return", "def get_hash(self, params):\n return self.sha", "def get_image_id(image):\n if not is_valid_image(image):\n return False\n\n return AVAILABLE_IMAGES[image]['imageid']", "def image_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"image_id\")", "def image_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"image_id\")", "def _calc_hash(self) -> None:\n self.image = Image.open(self.path)\n self.image = self.image.convert(\"L\")\n self.image = self.image.resize((self.width, self.height), Image.ANTIALIAS)\n lpixels = list(self.image.getdata())\n self.hash = \"0b\"\n for i, pixel in enumerate(lpixels):\n if (i + 1) % self.width == 0 and i != 0:\n continue\n if pixel < lpixels[i + 1]:\n self.hash += \"1\"\n continue\n self.hash += \"0\"\n self.hash_hex = DHash.bin2hex(self.hash)", "def _get_image_checksum(self):\n\t\tignoreKeyError = True\n\n\t\tmd5Checksum = None\n\t\ttry:\n\t\t\tmd5Checksum = self.labels['IMAGE'][\"MD5_CHECKSUM\"]\n\t\texcept KeyError:\n\t\t\tif self.log: self.log.debug(\"Did not find md5 checksum\")\n\t\t\tif not ignoreKeyError:\n\t\t\t\traise\n\t\t\tpass\n\t\telse:\n\t\t\tif self.log: self.log.debug(\"Found md5 checksum\")\n\t\t\tmd5Checksum = md5Checksum[1:-1]\n\n\t\treturn md5Checksum", "def get_key(self, obj):\n if hasattr(obj, \"id\"):\n hashed_id = hashlib.md5(str(obj.id).encode(\"utf-8\")).hexdigest()\n return hashed_id\n else:\n return None", "def get_best_hash(self):\n if not self.hashes:\n return None\n try:\n return (\"SHA1\", self.hashes['SHA1'])\n except KeyError:\n pass\n try:\n return (\"MD5\", self.hashes['MD5'])\n except KeyError:\n pass\n return self.hashes.items()[0]", "def calculate_hash_id(self):\n return get_md5_hash(f'{self.type}{self.get_primary_id()}')" ]
[ "0.7397036", "0.71517426", "0.71407926", "0.6864395", "0.67980075", "0.6432143", "0.642891", "0.6412715", "0.6400453", "0.6383878", "0.63607794", "0.62991333", "0.6290454", "0.627996", "0.6270038", "0.6223887", "0.6196117", "0.6196117", "0.61933583", "0.61728746", "0.61723137", "0.6167904", "0.6160762", "0.6129171", "0.6129171", "0.6127649", "0.610278", "0.6074436", "0.606307", "0.60604024" ]
0.792045
0
Return the registered image id of an image content This function will match image by content and content only
def search_image_id(self, image_path: str) -> str: try: with open(image_path, "rb") as f: content = f.read() if self._check_corrupted: nparr = np.frombuffer(content, np.uint8) img_np = cv2.imdecode(nparr, cv2.IMREAD_COLOR) if img_np is None: msg = "Decode image {} failed".format(image_path) raise ValueError(msg) except EnvironmentError as err: raise EnvironmentError("Load image {} failed".format(image_path)) hash_value = self._hash(content) hash_check = self._hash_to_id.get(hash_value, []) for image_id, path in hash_check: if filecmp.cmp(image_path, path): registered_id = image_id registered_path = path break else: registered_id = None registered_path = None return registered_id, registered_path, hash_value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __get_image_id(self):\n return self.__get_multi_images_ids(1)", "def get_image_id(image):\n if not is_valid_image(image):\n return False\n\n return AVAILABLE_IMAGES[image]['imageid']", "def search_id(self,obj):\r\n ##### create the new id ###########\r\n #for x in self.objectValues('Image'):\r\n for x in obj:\r\n liste_id.append(str(x.id())[0:6])\r\n for digit0 in liste_digit:\r\n for digit1 in liste_digit:\r\n for digit2 in liste_digit:\r\n for digit3 in liste_digit:\r\n for digit4 in liste_digit:\r\n for digit5 in liste_digit:\r\n searched_dict=0\r\n searched=str(digit0)+str(digit1)+str(digit2)+str(digit3)+str(digit4)+str(digit5)\r\n if(self.toolbox.hasProperty('eigene_formate')):\r\n self_val=self.toolbox.getProperty('eigene_formate').split(',')\r\n for x in self_val:\r\n liste_val.append('_'+x+'.jpeg')\r\n for extension in liste_val:\r\n searched_extension=str(searched)\r\n if searched_extension in liste_id:\r\n searched_dict=searched_dict+1\r\n if searched_dict==0:\r\n return searched\r\n return ''", "def find_image(image_name):\n imgs = pyrax.images\n image = imgs.list(name=image_name)[0]\n\n # print image.id\n return image.id", "def look_for_img(txt):\n global images_to_add\n global REL_PATH_IMAGES\n to_look = \"\\<img src='\" + REL_PATH_IMAGES + \"\\/(.)*\\>\"\n match = re.search(r\"\"+to_look, txt)\n if match:\n to_sub = \"<img src='\" + REL_PATH_IMAGES\n #print(to_sub)\n img = re.sub(to_sub, \"\", re.search(r\"\"+to_look, txt).group())\n img_name = img.split(\"'\")\n images_to_add += [img_name[0].replace(to_sub, \"\")]\n #print(images_to_add)\n return 1\n else:\n return 0", "def search_image_hash(self, image_id: str) -> int:\n return self._id_to_hash.get(image_id)", "def getImageIdentifier(_session, _el):\n import sc_core.constants as sc_constants\n import sc_core.pm as sc\n import ogre.renderer.OGRE as ogre\n \n addr = _el\n assert addr is not None\n icon_name = \"image_%s\" % str(addr.this)\n \n # check if icon already loaded\n if ogre.TextureManager.getSingleton().getByName(icon_name) is not None:\n return icon_name\n \n icon = None\n idtf_set = searchOneShotBinPairAttrToNode(_session, addr, keynodes.common.nrel_identification, sc.SC_CONST)\n if idtf_set is not None:\n \n it1 = _session.create_iterator(_session.sc_constraint_new(sc_constants.CONSTR_3_f_a_a,\n idtf_set,\n sc.SC_A_CONST,\n sc.SC_N_CONST), True)\n while not it1.is_over():\n if checkIncToSets(_session, it1.value(2), [keynodes.common.group_image], sc.SC_CONST):\n icon = it1.value(2)\n break \n it1.next()\n \n if icon is None:\n return None\n \n _fmt = getContentFormat(_session, icon)\n assert _fmt is not None\n\n _cont = _session.get_content_const(icon)\n assert _cont is not None\n\n _cont_data = _cont.convertToCont()\n\n data = _cont.get_data(_cont_data.d.size)\n stream = ogre.MemoryDataStream(\"%s\" % str(addr.this), _cont_data.d.size, False)\n stream.setData(data)\n\n try:\n img = ogre.Image()\n img.load(stream, ogre.Image.getFileExtFromMagic(stream))\n except:\n import sys, traceback\n print \"Error:\", sys.exc_info()[0]\n traceback.print_exc(file=sys.stdout)\n \n ogre.TextureManager.getSingleton().loadImage(icon_name, \"General\", img)\n return icon_name\n \n return None", "def get_legacy_image_ids(self, content_retriever):\n pass", "def get_legacy_image_ids(self, content_retriever):\n pass", "def get_imageId_from_fackmask(filename):\n filename = os.path.splitext(filename)[0]\n regex = re.compile(r'\\d+')\n iid = regex.search(filename).group(0)\n image_id = int(iid)\n if filename.isdigit():\n return int(filename)\n return image_id", "def image_reference(self, image_id):\n\n info = self.image_info[image_id]\n if info[\"source\"] == \"openimage\":\n return info[\"id\"]\n else:\n super(self.__class__, self).image_reference(image_id)", "def _get_image_url_in_content(self, content):\n begin_token = 'src=\"'\n begin = content.find(begin_token)\n if begin == -1:\n return None\n\n # Acrescentamos o tamanho do 'begin_token' no 'begin'\n begin += len(begin_token)\n end = content.find('\"', begin)\n url = content[begin:end]\n return url.split('?')[0]", "def get_image_id(self, image_name):\n _url = \"http://\" + self.host_ip + \":8774/v2/\" +\\\n self.cloud_admin_info[\"project_id\"] + \"/images/detail\"\n _headers = {'Content-type': 'application/json',\n 'x-auth-token': self.cloud_admin_info[\"token_project\"]}\n _body = None\n\n _result = self.request(\"GET\", _url, _headers, _body)\n if _result is None:\n LOG_OBJ.error(\"No response from server while getting images.\")\n return\n if _result.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Get image ID Failed with status %s \" %\n _result.status)\n return _result.status\n\n _output = json.loads(_result.data)\n for _images in _output['images']:\n if _images['name'].lower() == image_name.lower():\n LOG_OBJ.info(\"Image Name: %s, Image ID : %s \" %\n (image_name, _images['id']))\n return _images['id']\n LOG_OBJ.error(\"The image: %s is NOT found\" % image_name)", "def get_a_similar_img(self, imgID, db = \"train\"):\n if db==\"train\":\n the_landmark_id = self.get_landmark_id(imgID)\n subset = self.train_data.loc[self.train_data.landmark_id == the_landmark_id, \"id\"].values \n return self.choose_an_imgID(subset, imgID)\n \n return None", "def parse_image_id(image_ref):\n temp = image_ref.rsplit('/')\n #Return the last item, which is the image id\n return temp[len(temp) - 1]", "def image_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"image_id\")", "def image_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"image_id\")", "def test_image_id(self):\n result = self.test_client.image_id\n\n assert result == \"1238012\"", "def image_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"image_id\")", "def image_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"image_id\")", "def image_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"image_id\")", "def image_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"image_id\")", "def image(image_id):\n\n found = False\n img = None\n \n try:\n for img in api.get_all_images():\n if img.id == image_id:\n found = True\n break\n except Exception:\n logging.error(\"Cannot make API connection to retrieve image info!\")\n\n if not found:\n return None\n\n return img", "def find(self, source_img, element_img, cachename=\"\"):\n if cachename != \"\" and cachename in self.db:\n s = self.db[cachename]\n else:\n s = self.complexImageMatch(source_img, element_img)[0]\n if cachename != \"\":\n self.db[cachename] = s\n return s", "def test_get_image_id_by_name_in_uuid(self):\n img_id = str(uuid.uuid4())\n img_name = str(uuid.uuid4())\n self.my_image.id = img_id\n self.my_image.name = img_name\n self.sahara_client.images.get.side_effect = [\n sahara_base.APIException(error_code=400,\n error_name='IMAGE_NOT_REGISTERED')]\n\n self.sahara_client.images.find.return_value = [self.my_image]\n self.assertEqual(img_id, self.sahara_plugin.get_image_id(img_name))\n\n self.sahara_client.images.get.assert_called_once_with(img_name)\n self.sahara_client.images.find.assert_called_once_with(name=img_name)", "def verify_image_id(name: str, img_text: str) -> bool:\r\n fst, snd = name.lower().split()\r\n text = img_text.lower()\r\n return fst in text and snd in text", "def image_id_at(self, i):\n return i", "def get_imageId_from_fileName(filename):\n filename = os.path.splitext(filename)[0]\n if filename.isdigit():\n return int(filename)\n return id_iter", "def get_image_ref() -> str:\n images_rq = request(\n method=\"GET\", url=app.config[\"IMAGE_REF\"], headers=build_header(),\n )\n if not images_rq.ok:\n HTTPError(f\"Can not get image id for virtual machine: {images_rq.status_code}\")\n\n [image] = images_rq.json()[\"images\"]\n return image[\"id\"]", "def find_identity(frame, x1, y1, x2, y2):\n height, width, channels = frame.shape\n # The padding is necessary since the OpenCV face detector creates the bounding box around the face and not the head\n part_image = frame[max(0, y1):min(height, y2), max(0, x1):min(width, x2)]\n \n return who_is_it(part_image, database, FRmodel)" ]
[ "0.6564013", "0.6499344", "0.64345217", "0.64091", "0.6397784", "0.6262968", "0.61659104", "0.61129624", "0.61129624", "0.607925", "0.6077456", "0.60461605", "0.6043515", "0.6004683", "0.5994362", "0.59930915", "0.59930915", "0.59668595", "0.5921665", "0.5921665", "0.5900386", "0.5900386", "0.58979636", "0.58946514", "0.5850443", "0.58414215", "0.5795593", "0.5767142", "0.5766826", "0.5765352" ]
0.7357817
0
Simple function to start report manager (if any)
def _start_report_manager(self, start_time=None): if self.report_manager is not None: if start_time is None: self.report_manager.start() else: self.report_manager.start_time = start_time
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def start(self, report):\r\n self.report = report\r\n self.report.open()\r\n\r\n self._main_root_workunit = WorkUnit(run_tracker=self, parent=None, labels=[],\r\n name=RunTracker.DEFAULT_ROOT_NAME, cmd=None)\r\n self.register_thread(self._main_root_workunit)\r\n self._main_root_workunit.start()\r\n self.report.start_workunit(self._main_root_workunit)", "def show_report(*args):\n for report in args:\n os.startfile(report)", "def _open_report(self):\n global last_report_path\n subprocess.Popen([last_report_path], shell=True)", "def __execute_reporter(self):\n if not self.__args.report:\n return\n reporter.HTMLReporter().generate_report_from_file(\n self.__lst_json_files)", "def reports_cli():", "def launch_reporteditor():\r\n import sys\r\n from PyQt4 import QtGui\r\n from freeseer.frontend.reporteditor.reporteditor import ReportEditorApp\r\n\r\n profile = settings.profile_manager.get()\r\n config = profile.get_config('freeseer.conf', settings.FreeseerConfig,\r\n storage_args=['Global'], read_only=True)\r\n db = profile.get_database()\r\n\r\n app = QtGui.QApplication(sys.argv)\r\n main = ReportEditorApp(config, db)\r\n main.show()\r\n sys.exit(app.exec_())", "def start_command2(self):\n app_manager.start_application(CharReportApp)", "def initial_reporting(config, run_tracker):\r\n reports_dir = config.get('reporting', 'reports_dir',\r\n default=os.path.join(config.getdefault('pants_workdir'), 'reports'))\r\n link_to_latest = os.path.join(reports_dir, 'latest')\r\n if os.path.exists(link_to_latest):\r\n os.unlink(link_to_latest)\r\n\r\n run_id = run_tracker.run_info.get_info('id')\r\n if run_id is None:\r\n raise ReportingError('No run_id set')\r\n run_dir = os.path.join(reports_dir, run_id)\r\n safe_rmtree(run_dir)\r\n\r\n html_dir = os.path.join(run_dir, 'html')\r\n safe_mkdir(html_dir)\r\n os.symlink(run_dir, link_to_latest)\r\n\r\n report = Report()\r\n\r\n # Capture initial console reporting into a buffer. We'll do something with it once\r\n # we know what the cmd-line flag settings are.\r\n outfile = StringIO()\r\n capturing_reporter_settings = PlainTextReporter.Settings(outfile=outfile, log_level=Report.INFO,\r\n color=False, indent=True, timing=False,\r\n cache_stats=False)\r\n capturing_reporter = PlainTextReporter(run_tracker, capturing_reporter_settings)\r\n report.add_reporter('capturing', capturing_reporter)\r\n\r\n # Set up HTML reporting. We always want that.\r\n template_dir = config.get('reporting', 'reports_template_dir')\r\n html_reporter_settings = HtmlReporter.Settings(log_level=Report.INFO,\r\n html_dir=html_dir,\r\n template_dir=template_dir)\r\n html_reporter = HtmlReporter(run_tracker, html_reporter_settings)\r\n report.add_reporter('html', html_reporter)\r\n\r\n # Add some useful RunInfo.\r\n run_tracker.run_info.add_info('default_report', html_reporter.report_path())\r\n port = ReportingServerManager.get_current_server_port()\r\n if port:\r\n run_tracker.run_info.add_info('report_url', 'http://localhost:%d/run/%s' % (port, run_id))\r\n\r\n return report", "def _sessionstart(self, item):\n self.class_logger.info(\"Configuring reporting server...\")\n self.server_cmd(\"open\", [self.self_name])\n for _var in MODULES:\n if \"reports_conf.\" in _var:\n commands = MODULES[_var].ReportingServerConfig._sessionstart( # pylint: disable=protected-access\n self.class_logger, item, self.self_name, self.buildname(item.config.env.env_prop))\n for comm in commands:\n self.server_cmd(*comm)\n # Order TM reporting to server.\n\n # Order and configure XML report to server.", "def report():\n pass", "def gReport(self, event):\n \n reports.createReports()", "def run(self):\n report_file = self.get_report_file_name()\n self.export_records_to_file(report_file)\n print(\"Report file ({}) generated.\".format(report_file))", "def StartReportingThread(self):\n self.ReportQueue = Queue.Queue()\n self.ReportingThread = threading.Thread(\n target=self.ReportingThreadMain,\n name='ReportingThread'\n )\n self.ReportingThread.start()\n return", "def main():\n config_file = get_conf(get_config_name())\n if not config_file:\n sys.exit(1)\n log = get_last_file(config_file[\"LOG_DIR\"])\n MAIN_LOGGER.info(\"we've got log file named %s\", log.path)\n file_name = os.path.join(os.path.dirname(__file__), config_file['REPORT_DIR'],\n \"report-{}.html\".format(log.date))\n if os.path.exists(file_name):\n MAIN_LOGGER.info(\"%s already exists\", file_name)\n sys.exit()\n res = gen_parse_log(log, config_file['PERCENT_FAILS'])\n if not res:\n sys.exit(1)\n MAIN_LOGGER.info(\"log parsed\")\n report = []\n for _ in range(int(config_file[\"REPORT_SIZE\"])):\n try:\n report.append(next(res))\n except StopIteration:\n pass\n MAIN_LOGGER.info(\"report file name %s\", file_name)\n\n if report:\n save_report(report, config_file['TEMPLATE_FILE'], file_name)", "def test_basic_usage(self):\n self._test_reports_helper({}, [\"report.txt\"])", "def main():\n r = ReportHelper()\n today = dt.today()\n\n start_date = (today - timedelta(days=1)).strftime('%Y-%m-%d')\n end_date = today.strftime('%Y-%m-%d')\n response, ingestion_results = r.get_report(start_date, end_date, 'daily')\n logger.debug('Daily report data from {s} to {e}'.format(s=start_date, e=end_date))\n logger.debug(json.dumps(response, indent=2))\n logger.debug(json.dumps(ingestion_results, indent=2))\n\n if time_to_generate_monthly_report(today):\n last_day_of_prev_month = date(today.year, today.month, 1) - timedelta(days=1)\n last_month_first_date = last_day_of_prev_month.strftime('%Y-%m-01')\n last_month_end_date = last_day_of_prev_month.strftime('%Y-%m-%d')\n response, ingestion_results = r.get_report(last_month_first_date,\n last_month_end_date,\n 'monthly')\n logger.debug('Monthly report data from {s} to {e}'.format(s=start_date, e=end_date))\n logger.debug(json.dumps(response, indent=2))\n\n return response", "def do(self):\r\n self.dlCsvReport()\r\n self.dlXlsReport()", "def initialize_report(output_dir,\n subject_name='Subject',\n log=True,\n filename='report',\n prepreproc_undergone=\"\",\n dcm2nii=False,\n deleteorient=False,\n fwhm=None, anat_fwhm=None,\n slice_timing=False,\n realign=False,\n coregister=False,\n coreg_func_to_anat=False,\n segment=False,\n normalize=False,\n dartel=False,\n command_line=None,\n has_func=True\n ):\n report_outfile = os.path.join(output_dir, '{}.html'.format(filename))\n\n report_dict = {}\n report_dict['preproc_undergone'] = generate_preproc_steps_docstring(\n dcm2nii=dcm2nii,\n deleteorient=deleteorient,\n slice_timing=slice_timing,\n realign=realign,\n coregister=coregister,\n segment=segment,\n normalize=normalize,\n fwhm=fwhm, anat_fwhm=anat_fwhm,\n dartel=dartel,\n coreg_func_to_anat=coreg_func_to_anat,\n prepreproc_undergone=prepreproc_undergone,\n has_func=has_func\n )\n report_dict['subject_name'] = subject_name\n report_dict['start_time'] = strftime(\"%d-%b-%Y %H:%M:%S\", gmtime())\n report_dict['end_time'] = \"STILL RUNNING...\"\n report_text = embed_in_HTML('report_template.html', report_dict)\n report_HTML = HTMLDocument(report_text).save_as_html(report_outfile)\n\n if log:\n # create a separate HTML with all the logs\n log_outfile = os.path.join(output_dir, '{}_log.html'.format(filename))\n log_HTML = HTMLDocument(\"<html><body>\").save_as_html(log_outfile)\n return report_outfile, log_outfile\n else:\n return report_outfile, None", "def main():\n\n app_config = load_config()\n database_connection = mysql.connector.connect(**app_config[\"database\"])\n panelists = retrieve_all_panelist_appearance_counts(database_connection)\n show_years = retrieve_all_years(database_connection)\n\n rendered_report = render_report(show_years=show_years,\n panelists=panelists,\n report_settings=app_config[\"report\"])\n\n generate_output_files(rendered_report=rendered_report,\n report_settings=app_config[\"report\"])", "def run(self):\n report_details = self.report_client.get(self.csv_report)\n print(\"Report Details - \", report_details)", "def buildReports(self):\n pass", "def start(self):\n\t\tself.app.printflush('Sitemap: ' + self.sitemap_url)\n\t\tself.getUrlsList()\n\t\tself.app.printflush('Fetched: ' + str(self.fetched_count))\n\t\tself.app.printflush('Processes: ' + str(self.processes))\n\t\tself.CheckURLs()\n\t\tself.printReport()", "def initialize_reporting(self):\n reporting_params = self.reporting_params\n reporting_params[\"heartbeat_path\"] = self.result_paths[\"current_heartbeat\"]\n reporting_handler = ReportingHandler(**reporting_params)\n\n #################### Make Unified Logging Globally Available ####################\n G.log = reporting_handler.log\n G.debug = reporting_handler.debug\n G.warn = reporting_handler.warn", "def report(db, openfile):\n pass", "def display_reports(self, layout): # pylint: disable=arguments-differ", "def launch_report_thread(self, event):\n # Do not launch the reporting thread if an old one is still alive.\n # To guard against a zombie thread (alive, but doing nothing) launch\n # anyway if enough time has passed.\n if self.thread and self.thread.isAlive() and time.time()-self.launch_time < self.max_wait:\n return\n \n try:\n self.thread = weewx.reportengine.StdReportEngine(self.config_dict,\n self.engine.stn_info,\n first_run= not self.launch_time) \n self.thread.start()\n self.launch_time = time.time()\n except thread.error:\n syslog.syslog(syslog.LOG_ERR, \"Unable to launch report thread.\")\n self.thread = None", "def report(self, *reporters):\n if len(reporters) == 0:\n reporters = [c() for c in dexy.reporter.Reporter.plugins if c.ALLREPORTS]\n\n for reporter in reporters:\n self.log.debug(\"Running reporter %s\" % reporter.ALIASES[0])\n reporter.run(self)", "def start(self):\n if self.driver:\n eventlet.spawn_n(self.driver.monitor_events)", "def run(ctx, report_ids):\n client = ctx.obj[\"client\"]\n for report_id in report_ids:\n report = client.run_report(report_id)\n click.secho(f\"Running {report}\")", "def setup_parser_report(subparsers):\r\n subparsers.add_parser('report', help='Freeseer reporting functions')" ]
[ "0.7185138", "0.69041795", "0.6595344", "0.65820175", "0.6453679", "0.6412547", "0.64120275", "0.63057476", "0.6291027", "0.6269089", "0.6235882", "0.6231339", "0.6214937", "0.62096953", "0.6059954", "0.60499275", "0.6017881", "0.59796536", "0.5939616", "0.5906054", "0.5880803", "0.57598287", "0.57140994", "0.57087535", "0.56917864", "0.56849647", "0.568457", "0.5667001", "0.5646525", "0.56270176" ]
0.74751675
0
Simple function to report training stats (if report_manager is set) see `onmt.utils.ReportManagerBase.report_training` for doc
def _maybe_report_training(self, step, num_steps, learning_rate, report_stats): if self.report_manager is not None: return self.report_manager.report_training( step, num_steps, learning_rate, report_stats, multigpu=self.n_gpu > 1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_training_metrics(self) -> None:\n self.trainer_metrics.write_training_metrics()", "def report_func(opt, global_step, epoch, batch, num_batches,\n start_time, lr, report_stats):\n if batch % opt.steps_per_stats == -1 % opt.steps_per_stats:\n report_stats.print_out(epoch, batch+1, num_batches, start_time)\n report_stats = nmt.Statistics()\n\n return report_stats", "def log(self, report, epoch):\n train_return_values = np.asarray([trajectory['reward'].sum()\n for trajectory in report['training_trajectories']])\n trajectories_infos = [trajectory['info'] for trajectory in report.pop('training_trajectories')]\n sum_costs = np.asarray([sum(list(map(lambda info: info.get('cost', 0.0), trajectory)))\n for trajectory in trajectories_infos])\n report.update(dict(\n training_rl_objective=train_return_values.mean(),\n sum_rewards_stddev=train_return_values.std(),\n mean_sum_costs=sum_costs.mean()\n ))\n training_step = report.pop('total_training_steps')\n for key, value in report.items():\n self.training_logger.log_scalar(value, key, training_step)\n self.training_logger.flush()", "def log_training_results(engine: Engine):\n train_evaluator.run(self.train_dl)\n metrics: Dict[str, float] = train_evaluator.state.metrics\n avg_accuracy: float = metrics['accuracy']\n avg_bce: float = metrics['bce']\n pbar.log_message(\n f'Training Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.4f} Avg loss: {avg_bce:.4f}')", "def log_training(self, batch, total_batches, result):\n metrics = [\"loss\", \"accuracy\"]\n for metric in metrics:\n if metric not in self.logs:\n self.logs[metric] = []\n self.logs[metric].append(result[metric])\n if batch % self.log_frequency == 0 or batch + 1 == total_batches:\n print(\"Batch {} / {} = {:.2f} %\".format(batch, total_batches, 100 * batch / total_batches))\n print(\"{:20}: {}\".format(\"Global step\", result[\"global_step\"]))\n print(\"{:20}: {:.4e}\".format(\"Learning rate\", result[\"learning_rate\"]))\n for metric in metrics:\n metric_logs = self.logs[metric]\n average = sum(metric_logs) / len(metric_logs)\n print(\"{:20}: {:.4}\".format(\"Training \" + metric, average))\n self.logs[metric] = []\n val_metrics = self.evaluate(self.batches_valid)\n for k, v in val_metrics.items():\n print(\"{:20}: {:.4}\".format(\"Validation \" + k, v))", "def before_epoch(self):\n if self.trainer._mode == 'train':\n with open(os.path.join(self.root_path, 'metrics.txt'), 'a+') as fout:\n if hasattr(self.trainer, '_metrics'):\n fout.write(\n str(self.trainer._epoch - 1) + '\\t' +\n str(self.trainer._metrics) + '\\n')", "def write_training_summaries(self):\n for metric, epochs in self._training_summaries.items():\n self._write_scalar_to_tensorboard(\n name=f\"{self._Sections.SUMMARY}/training_{metric}\",\n value=epochs[-1],\n step=self._epochs,\n )", "def train(oa, network, oaName, training_ints, testing_ints, measure, training_iterations, out_path):\n print(\"\\nError results for %s\\n---------------------------\".format(oaName))\n times = [0]\n for iteration in xrange(training_iterations):\n start = time.clock()\n oa.train()\n elapsed = time.clock()-start\n \ttimes.append(times[-1]+elapsed)\n if iteration % 10 == 0:\n \t MSE_trg, acc_trg = errorOnDataSet(network,training_ints,measure)\n MSE_tst, acc_tst = errorOnDataSet(network,testing_ints,measure)\n txt = '{},{},{},{},{},{}\\n'.format(iteration,MSE_trg,MSE_tst,acc_trg,acc_tst,times[-1])\n print txt\n with open(out_path,'a+') as f:\n f.write(txt)", "def on_epoch(\n self,\n trainer: \"GradientDescentTrainer\",\n metrics: Dict[str, Any],\n epoch: int,\n is_primary: bool = True,\n **_: Any,\n ) -> None:\n if not is_primary:\n return None\n\n value = metrics.get(self._monitor)\n if value is None:\n return\n\n self._trial.report(float(value), epoch)\n if self._trial.should_prune():\n raise TrialPruned()", "def measure_stats(sess, loss, accuracy, epoch, x, y,\n X_train, Y_train, X_valid, Y_valid):\n t_c = sess.run(loss, feed_dict={x: X_train, y: Y_train})\n t_a = sess.run(accuracy, feed_dict={x: X_train, y: Y_train})\n v_c = sess.run(loss, feed_dict={x: X_valid, y: Y_valid})\n v_a = sess.run(accuracy, feed_dict={x: X_valid, y: Y_valid})\n print('After {epoch} epochs:'.format(epoch=epoch))\n print('\\tTraining Cost: {cost}'.format(cost=t_c))\n print('\\tTraining Accuracy: {accuracy}'.format(accuracy=t_a))\n print('\\tValidation Cost: {cost}'.format(cost=v_c))\n print('\\tValidation Accuracy: {accuracy}'.format(accuracy=v_a))", "def _report_step(self, learning_rate, step, train_stats=None,\n valid_stats=None):\n if self.report_manager is not None:\n return self.report_manager.report_step(\n learning_rate, step, train_stats=train_stats,\n valid_stats=valid_stats)", "def generate_report(self, output_path):\n generated_on = str(datetime.datetime.now())\n checkpoint_name = self._get_checkpoint_name()\n dataset_name = Path(self._train_dataset_path).resolve().name\n train_features, train_labels = self.get_features_and_labels(\n self._read_dataset(self._train_dataset_path)\n )\n test_features, test_lables = self.get_features_and_labels(\n self._read_dataset(self._test_dataset_path)\n )\n training_accuracy = self.get_model_accuracy(\n train_features,\n train_labels\n )\n test_accuracy = self.get_model_accuracy(\n test_features,\n test_lables,\n )\n with open(output_path, 'a') as f:\n report = (\n \"*****************************************************\\n\"\n \"Report generated on: {}\\n\"\n \"Training dataset: {}\\n\"\n \"Model checkpoint: {}\\n\"\n \"---\\n\"\n \"Accuracy on training data: {}\\n\"\n \"Accuracy on testing data: {}\\n\"\n \"\\n\"\n ).format(\n generated_on,\n dataset_name,\n checkpoint_name,\n training_accuracy,\n test_accuracy,\n )\n f.writelines(report)", "def see_evaluation(epoch, training_acc, test_acc):\n print (\"Epoch \", epoch, \"Training acc: \", training_acc*100, \"Test acc: \", test_acc*100)", "def main(cls, args):\n #cls.trainOfflineAndTest(100, 0.1, 0.1, 0.9);\n #cls.trainOfflineAndTest(500, 0.1, 0.1, 1.0);\n\n cls.trainer.teachActiveAndSaveStatistics(\"onlineTest\", 10, 0.8, 1.0 ,1.0, 0.0, 0.3, True, True,True);\n cls.trainer.teachActiveAndSaveStatistics(\"path\", 10, 0.0, 0.0, 0.0, 0.0, 0.0, True, False, False)\n\n #trainer.teachActiveAndSaveStatistics(\"onlineTest\", 10000, 0.1f, 0.1f, 1.0f, 0.0f, 0.1f,true, true, true);\n # \t\ttrainer.teachActiveAndSaveStatistics(\"onlineTest\", 10000, 0.1f, 0.1f, 1.0f, 0.0f, 0.1f,\n # \t\t\t\tfalse, true, true);\n # \t\t\n # \t\ttestAgentFromFileWithOutLearning(\"net10000_a0.1_b0.1g_1.0e_0.1_first.net\", 10000, true);\n # \t\ttestAgentFromFileWithOutLearning(\"net10000_a0.1_b0.1g_1.0e_0.1_secound.net\", 10000, true);\n #cls.testAgentFromFileWithOutLearning(\"net10000_a0.1_b0.1g_1.0e_0.1_first.net\", 10000, False)\n #cls.testAgentFromFileWithOutLearning(\"net10000_a0.1_b0.1g_1.0e_0.1_secound.net\", 10, False)\n # \t\ttestAgentFromFileWithOutLearning(\"net10000_a0.1_b0.1g_1.0e_0.1.net\", 10000, false);", "def start_training(self, logdir: str, **info):\n pass", "def training_info(self):\n pass", "def training_metrics(self):\r\n if self._training_metrics is None:\r\n # Builds the per-task metrics and losses.\r\n self._training_metrics = {}\r\n for name, task in self.multi_task.tasks.items():\r\n self._training_metrics[name] = task.build_metrics(training=True)\r\n return self._training_metrics", "def test(self):\n statistics = self.__progress(self.testing, self.__val_fn)\n print('Loss: {}'.format(statistics[0]))\n print('Precision: {:.3%}'.format(statistics[1]))\n print('Recall: {:.3%}'.format(statistics[2]))\n print('Accuracy: {:.3%}'.format(statistics[3]))\n self.report['test_loss'] = statistics[0]\n self.report['test_precision'] = statistics[1]\n self.report['test_recall'] = statistics[2]\n self.report['test_accuracy'] = statistics[3]", "def generate_learning_reports(self) -> None:\n\n # Generate a report consisting of the evaluation metrics for\n # each sub-experiment comprising each cross-validation\n # experiment for each learner\n (pd.DataFrame(list(chain(*self.cv_learner_stats_)))\n .to_csv(self.stats_report_path_,\n index=False))\n\n # Generate a report consisting of the aggregated evaluation\n # metrics from each cross-validation experiment with each\n # learner\n (self.training_cv_aggregated_stats_\n .to_csv(self.aggregated_stats_report_path_,\n index=False))", "def report_func(epoch, batch, num_batches, start_time, report_stats,\n report_every):\n if batch % report_every == -1 % report_every:\n report_stats.output(epoch, batch + 1, num_batches, start_time)\n report_stats = utils.Statistics()\n\n return report_stats", "def training_report_view(request, application_slug):\n return training_report(request, application_slug, attach=False)", "def train(self, epochs):\n print('Starting training...')\n print('\\n{:13} '\n '{:>17} '\n '{:^38}'\n ''.format('', '--- Training ---', '--- Validation ---'))\n print('{:4} {:>8} '\n '{:>8} {:>8} '\n '{:>8} {:>8} {:>8} {:>8}'\n ''.format('', '', 'Loss', 'Acc', 'Loss', 'Prc', 'Rec', 'Acc'))\n training_time = 0\n for epoch in range(1, epochs + 1):\n start_time = time.time()\n trn_stats = self.__progress(self.training, self.__train_fn)\n val_stats = self.__progress(self.validation, self.__val_fn)\n elapsed_time = time.time() - start_time\n training_time += elapsed_time\n print('{:>4} {:>7.2f}s '\n '{:>8.3f} {:>8.1%} '\n '{:>8.3f} {:>8.1%} {:>8.1%} {:>8.1%}'\n ''.format(epoch, elapsed_time,\n trn_stats[0], trn_stats[-1],\n *val_stats))\n self.history.append([epoch] + list(trn_stats) + list(val_stats))\n self.report['epochs'] = epochs\n self.report['time_per_epoch'] = training_time / epochs", "def logging_loop(self, num_gpus):\n # Launch the test worker to get performance metrics\n self.test_worker = self_play.SelfPlay.options(\n num_cpus=0, num_gpus=num_gpus,\n ).remote(\n self.checkpoint,\n self.Game,\n self.config,\n self.config.seed + self.config.num_workers,\n )\n self.test_worker.continuous_self_play.remote(\n self.shared_storage_worker, None, True\n )\n\n # Write everything in TensorBoard\n writer = SummaryWriter(self.config.results_path)\n\n print(\n \"\\nTraining...\\nRun tensorboard --logdir ./results and go to http://localhost:6006/ to see in real time the training performance.\\n\"\n )\n\n # Save hyperparameters to TensorBoard\n hp_table = [\n f\"| {key} | {value} |\" for key, value in self.config.__dict__.items()\n ]\n writer.add_text(\n \"Hyperparameters\",\n \"| Parameter | Value |\\n|-------|-------|\\n\" + \"\\n\".join(hp_table),\n )\n # Save model representation\n writer.add_text(\n \"Model summary\", self.summary,\n )\n # Loop for updating the training performance\n counter = 0\n keys = [\n \"total_reward\",\n \"wormzero_reward\",\n \"opponent_reward\",\n \"episode_length\",\n \"mean_value\",\n \"training_step\",\n \"lr\",\n \"total_loss\",\n \"value_loss\",\n \"policy_loss\",\n \"num_played_games\",\n \"num_played_steps\",\n \"num_reanalysed_games\",\n ]\n info = ray.get(self.shared_storage_worker.get_info.remote(keys))\n try:\n while info[\"training_step\"] < self.config.training_steps:\n info = ray.get(self.shared_storage_worker.get_info.remote(keys))\n writer.add_scalar(\n \"1.Total_reward/1.Total_reward\", info[\"total_reward\"], counter,\n )\n writer.add_scalar(\n \"1.Total_reward/2.Mean_value\", info[\"mean_value\"], counter,\n )\n writer.add_scalar(\n \"1.Total_reward/3.Episode_length\", info[\"episode_length\"], counter,\n )\n writer.add_scalar(\n \"1.Total_reward/4.WormZero_reward\", info[\"wormzero_reward\"], counter,\n )\n writer.add_scalar(\n \"1.Total_reward/5.Opponent_reward\",\n info[\"opponent_reward\"],\n counter,\n )\n writer.add_scalar(\n \"2.Workers/1.Self_played_games\", info[\"num_played_games\"], counter,\n )\n writer.add_scalar(\n \"2.Workers/2.Training_steps\", info[\"training_step\"], counter\n )\n writer.add_scalar(\n \"2.Workers/3.Self_played_steps\", info[\"num_played_steps\"], counter\n )\n writer.add_scalar(\n \"2.Workers/4.Reanalysed_games\",\n info[\"num_reanalysed_games\"],\n counter,\n )\n writer.add_scalar(\n \"2.Workers/5.Training_steps_per_self_played_step_ratio\",\n info[\"training_step\"] / max(1, info[\"num_played_steps\"]),\n counter,\n )\n writer.add_scalar(\"2.Workers/6.Learning_rate\", info[\"lr\"], counter)\n writer.add_scalar(\n \"3.Loss/1.Total_weighted_loss\", info[\"total_loss\"], counter\n )\n writer.add_scalar(\"3.Loss/Value_loss\", info[\"value_loss\"], counter)\n writer.add_scalar(\"3.Loss/Policy_loss\", info[\"policy_loss\"], counter)\n print(\n f'Last test reward: {info[\"total_reward\"]:.2f}. Training step: {info[\"training_step\"]}/{self.config.training_steps}. Played games: {info[\"num_played_games\"]}. Loss: {info[\"total_loss\"]:.2f}',\n end=\"\\r\",\n )\n counter += 1\n time.sleep(0.5)\n except KeyboardInterrupt:\n pass\n\n self.terminate_workers()\n\n if self.config.save_model:\n # Persist replay buffer to disk\n print(\"\\n\\nPersisting replay buffer games to disk...\")\n pickle.dump(\n {\n \"buffer\": self.replay_buffer,\n \"num_played_games\": self.checkpoint[\"num_played_games\"],\n \"num_played_steps\": self.checkpoint[\"num_played_steps\"],\n \"num_reanalysed_games\": self.checkpoint[\"num_reanalysed_games\"],\n },\n open(os.path.join(self.config.results_path, \"replay_buffer.pkl\"), \"wb\"),\n )", "def show_learning_stats(track, train_loss, train_acc, valid_acc, test_acc):\n\n if track[\"valid\"] and track[\"test\"]:\n print(\"Train loss: {:.4f} -- Train acc: {:.4f} -- Val acc: {:.4f} -- Test acc: {:.4f}\".format(\n train_loss, train_acc, valid_acc, test_acc))\n\n if track[\"valid\"] and not track[\"test\"]:\n print(\"Train loss: {:.4f} -- Train acc: {:.4f} -- Val acc: {:.4f}\".format(\n train_loss, train_acc, valid_acc))\n\n if not track[\"valid\"] and track[\"test\"]:\n print(\"Train loss: {:.4f} -- Train acc: {:.4f} -- Test acc: {:.4f}\".format(\n train_loss, train_acc, test_acc))\n\n if not track[\"valid\"] and not track[\"test\"]:\n print(\"Train loss: {:.4f} -- Train acc: {:.4f} \".format(\n train_loss, train_acc))", "def report(LOGDIR, epoch, e_dict, saver, sess, fh_log):\n # print loss\n print (\"Epoch: %i; Loss: %f; KLd: %f; CE %f\" % (epoch, e_dict[\"loss\"][-1], e_dict[\"KLd\"][-1], e_dict[\"CE\"][-1]))\n fh_log.write(\"%i\\t%0.5e\\t%0.5e\\t%0.5e\\n\" % (epoch, e_dict[\"loss\"][-1], e_dict[\"KLd\"][-1], e_dict[\"CE\"][-1]))", "def train_stats(opt, model, dset):\n stats = {}\n if isinstance(model, SVIModel):\n # Stats collected during traning use a single sample from the\n # posterior. Therefore we check the accuracy once more using\n # the same no of samples as on the validation set.\n stats[\"lossMC\"], stats[\"accMC\"] = validate(\n DataLoader(dset, **vars(opt.val_loader)), model, opt.tst_mcs\n )\n if hasattr(opt, \"log\") and opt.log.train_no_aug:\n # We also look at the accuracy on un-augmented training data.\n # This is done on both MLE and SVI\n rlog.info(\"Compute accuracy on un-augmented train data.\")\n mc_samples = opt.tst_mcs if isinstance(model, SVIModel) else 0\n stats[\"lossNoAug\"], stats[\"accNoAug\"] = validate(\n DataLoader(get_unaugmented(dset), **vars(opt.val_loader)),\n model,\n mc_samples,\n )\n if hasattr(opt, \"log\") and opt.log.mle_ish:\n # Use the means of the posterior to set a pseudo-MLE model.\n assert isinstance(\n model, SVIModel\n ), \"This stat only makes sense for SVI models.\"\n model.sync_mle_model()\n rlog.info(\"Synced MLE model using means from posterior.\")\n rlog.info(\"Compute accuracy with a pseudo-MLE model.\")\n stats[\"lossMLE\"], stats[\"accMLE\"] = validate(\n DataLoader(dset, **vars(opt.val_loader)),\n model._mle_model, # pylint: disable=protected-access\n 0,\n )\n return stats", "def test_train(self):\n print \"x=\",self.trainer.train()", "def print_metrics(mva, df_train, df_test,\n y_train, y_test,\n mva_response_train=None, mva_response_test=None,\n w_train=None, w_test=None):\n\n train_prediction = mva.predict(df_train)\n test_prediction = mva.predict(df_test)\n\n if mva_response_train is None:\n mva_response_train = classifiers.evaluate_mva(df_train, mva)\n if mva_response_test is None:\n mva_response_test = classifiers.evaluate_mva(df_test, mva)\n\n print(\"\\nClassification Reports:\")\n print(\"Test sample:\")\n print(classification_report(y_test, test_prediction,\n target_names=[\"background\", \"signal\"]))\n print(\"Training sample:\")\n print(classification_report(y_train, train_prediction,\n target_names=[\"background\", \"signal\"]))\n\n print(\"Confusion matrix:\")\n print(\"Test sample:\")\n print(confusion_matrix(y_test, test_prediction))\n print(\"Training sample:\")\n print(confusion_matrix(y_train, train_prediction))\n print()\n\n print(\"KS Test p-value:\")\n print(\"Signal:\")\n print(ks_2samp(mva_response_train[y_train == 1],\n mva_response_test[y_test == 1],\n None if w_train is None else w_train[y_train == 1],\n None if w_test is None else w_test[y_test == 1])[1])\n print(\"Background:\")\n print(ks_2samp(mva_response_train[y_train == 0],\n mva_response_test[y_test == 0],\n None if w_train is None else w_train[y_train == 0],\n None if w_test is None else w_test[y_test == 0])[1])\n print()\n\n # Try really hard to get the feature importances\n feature_importances = []\n try:\n feature_importances = mva.feature_importances_\n except AttributeError:\n pass\n try: # last step of a pipeline?\n feature_importances = mva.steps[-1][1].feature_importances_\n except AttributeError:\n pass\n try: # grid search?\n feature_importances = mva.best_estimator_.feature_importances_\n except AttributeError:\n pass\n try: # grid search last step of pipeline?\n feature_importances = \\\n mva.steps[-1][1].best_estimator_.feature_importances_\n except AttributeError:\n pass\n try: # grid search of a pipeline?\n feature_importances = \\\n mva.best_estimator_.steps[-1][0].feature_importances_\n except AttributeError:\n pass\n\n if len(feature_importances):\n print(\"Feature importance:\")\n for var, importance in sorted(\n zip(list(df_train), feature_importances),\n key=lambda x: x[1],\n reverse=True):\n print(\"{0:15} {1:.3E}\".format(var, importance))\n else:\n pass\n print()", "def epoch_train(tools, **kwargs):\n sess = tools.sess\n optimizer = tools.optimizer\n\n feed_dict = kwargs.get(\"feed_dict\", {})\n\n infos, summary, e, _ = sess.run(tools.infos, feed_dict=feed_dict)\n if config.VERBOSE_EACH:\n if not int(e) % config.VERBOSE_EACH:\n print(config.INFOMESSAGE(infos))\n sys.stdout.flush()\n else:\n print(config.INFOMESSAGE(infos))\n sys.stdout.flush()\n\n tools.reporter(summary, e)\n\n try:\n if not feed_dict:\n while True:\n sess.run(optimizer)\n else:\n while True:\n sess.run(optimizer, feed_dict=feed_dict)\n except tf.errors.OutOfRangeError:\n pass\n return infos", "def report(param):\n featurecount_dir = param['working_dir']+'report/featureCount/'\n if not os.path.exists(featurecount_dir):\n os.makedirs(featurecount_dir)\n \n #report only if there were actually results\n out_file = param['working_dir']+'deliverables/featureCount_raw_counts.txt'\n if os.path.exists(out_file):\n param['report'].write('<center><br><br><h2>FeatureCount statistics</h2>')\n table = process_stat_files(param)\n MODULE_HELPER.create_sub_report(param, out_file, table, 'featureCount', 'FeatureCount') \n MODULE_HELPER.plot_count_overview(param, 'featureCount', table)" ]
[ "0.683947", "0.6739739", "0.66196144", "0.6327586", "0.61808234", "0.6113651", "0.61012864", "0.6088131", "0.6067345", "0.60212106", "0.59971434", "0.59587336", "0.59487474", "0.591192", "0.5910844", "0.5906236", "0.58908254", "0.58837074", "0.58761394", "0.58608997", "0.5854817", "0.58419865", "0.58070415", "0.5790767", "0.57895106", "0.57699436", "0.5740826", "0.5739962", "0.57326376", "0.57125866" ]
0.75158477
0
Simple function to report stats (if report_manager is set) see `onmt.utils.ReportManagerBase.report_step` for doc
def _report_step(self, learning_rate, step, train_stats=None, valid_stats=None): if self.report_manager is not None: return self.report_manager.report_step( learning_rate, step, train_stats=train_stats, valid_stats=valid_stats)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def report_func(opt, global_step, epoch, batch, num_batches,\n start_time, lr, report_stats):\n if batch % opt.steps_per_stats == -1 % opt.steps_per_stats:\n report_stats.print_out(epoch, batch+1, num_batches, start_time)\n report_stats = nmt.Statistics()\n\n return report_stats", "def report():\n pass", "def report_func(epoch, batch, num_batches, start_time, report_stats,\n report_every):\n if batch % report_every == -1 % report_every:\n report_stats.output(epoch, batch + 1, num_batches, start_time)\n report_stats = utils.Statistics()\n\n return report_stats", "def _maybe_report_training(self, step, num_steps, learning_rate,\n report_stats):\n if self.report_manager is not None:\n return self.report_manager.report_training(\n step, num_steps, learning_rate, report_stats,\n multigpu=self.n_gpu > 1)", "def report(self):\n #i need to figure out how to pass all these in a list or something, woof.\n self.report_generator_module.run(\\\n self.total,\\\n self.unique,\\\n self.top_10,\\\n self.top_10_base,\\\n self.lengths,\\\n self.counts,\\\n self.one_to_six,\\\n self.trailing_number,\\\n self.last_1digit,\\\n self.last_2digit,\\\n self.last_3digit,\\\n self.last_4digit,\\\n self.last_5digit,\\\n self.charset)", "def report(self, **options):\n pass", "def test_execution_profiling(self):\n self._test_reports_helper({\"--profile-execution\": \"\"}, [\"report.txt\"])", "def report_step_progress(self, step):\n pass", "def report_scenario_progress(self):\n pass", "def report():\n _get_cached_tracker().report()", "def pytest_runtest_logreport(self, report):\n status = None\n if report.passed:\n # ignore setup/teardown\n if report.when == \"call\":\n status = \"Passed\"\n elif report.failed:\n if report.when in [\"setup\", \"teardown\"]:\n status = \"Error\"\n else:\n status = \"Failed\"\n elif report.skipped:\n status = \"Skipped\"\n # status = \"Blocked\"\n if not status and hasattr(report, 'monitor'):\n status = \"Monitor\"\n if status is not None:\n _report = {}\n _report['longrepr'] = \"\"\n _report['when'] = report.when\n if hasattr(report, \"longrepr\"):\n # Remove all bash escape sequences\n _report['longrepr'] = xml_escape(re_sub(r\"\\x1b.*?m\", \"\", str(report.longrepr)))\n # longrepr = xml_unescape(re_sub(r\"\\x1b.*?m\", \"\", report['longrepr']))\n if hasattr(report, \"keywords\") and \"xfail\" in report.keywords:\n _report['keywords'] = {}\n # TODO: check xfail in keywords because now it's number\n _report['keywords']['xfail'] = report.keywords['xfail']\n if hasattr(report, \"sections\"):\n _report['sections'] = []\n for i in range(len(report.sections)):\n if isinstance(report.sections[i], str):\n _report['sections'].append(xml_escape(report.sections[i]))\n # _report['sections'] = report.sections\n if hasattr(report, \"duration\"):\n if not self._opts.tc_duration and self.detailed_duration.get(report.nodeid) and self.detailed_duration.get(report.nodeid).get('call'):\n _report['detailed_duration'] = dict()\n _report['detailed_duration']['setup'] = \\\n self.detailed_duration.get(report.nodeid).get('call') - self.detailed_duration.get(report.nodeid).get('setup')\n _report['detailed_duration']['longrepr'] = time.time() - self.detailed_duration.get(report.nodeid).get('call') - report.duration\n _report['duration'] = report.duration\n if hasattr(report, \"retval\"):\n _report['retval'] = report.retval\n if hasattr(report, \"monitor\"):\n _report['monitor'] = report.monitor\n tc_name = get_tcname(report)\n suite_name = get_suite_name(report.nodeid)\n if self.buildname() is not None:\n self.server_cmd(\"post\", [self.self_name, self.buildname(), suite_name, tc_name, status, _report, \"\", self._get_build_info()])\n else:\n self.post_queue.append([self.self_name, self.buildname(), suite_name, tc_name, status, _report])", "def report(self, output_dir):", "def report(self, report_options=None):\n raise NotImplementedError()", "def stat(**kwargs):\n print(\"output stats\")", "def pytest_runtest_logreport(report):\n\n report_test_status(logger, report)", "def printReport(self):\n\t\tself.app.printflush('Fetched: ' + str(self.fetched_count), self.app.IGNORE_EXIT_FLAG)\n\t\tself.app.printflush('Processes: ' + str(self.processes), self.app.IGNORE_EXIT_FLAG)\n\t\tself.app.printflush('Updated: ' + str(self.updated_count), self.app.IGNORE_EXIT_FLAG)\n\t\tself.app.printflush('Average page load time: ' + str(self.average_time), self.app.IGNORE_EXIT_FLAG)\n\t\tself.app.printflush('Returned with code: ' + repr(self.code_statistics), self.app.IGNORE_EXIT_FLAG)\n\t\tself.app.printflush('Closing Processes... ', self.app.IGNORE_EXIT_FLAG)", "def report(self):\r\n print(\"\".join(self.memory), self.error, self.steps)", "def report():\n Robot.report()", "def report(self, stream):\n from collections import OrderedDict\n self.stats['total'] = sum(self.stats.values())\n for group in self.report_data.values():\n group.stats['total'] = sum(group.stats.values())\n self.report_file.write(self.jinja.get_template('report.html').render(\n report=OrderedDict(sorted(self.report_data.items())),\n stats=self.stats,\n ))\n self.report_file.close()\n if self.config.verbosity > 1:\n stream.writeln(\"-\" * 70)\n stream.writeln(\"HTML: %s\" % self.report_file.name)", "def dump_step(self,status):\n\n L = self.level\n stats.add_to_stats(step=status.step, time=status.time, type='timing_step', value=time.time()-self.t0)\n stats.add_to_stats(step=status.step, time=status.time, type='niter', value=status.iter)\n stats.add_to_stats(step=status.step, time=status.time, type='residual', value=L.status.residual)\n\n pass", "def report_trial(self):\n pass", "def report(self) -> computation_base.Computation:\n return self._report_fn", "def report(param):\n featurecount_dir = param['working_dir']+'report/featureCount/'\n if not os.path.exists(featurecount_dir):\n os.makedirs(featurecount_dir)\n \n #report only if there were actually results\n out_file = param['working_dir']+'deliverables/featureCount_raw_counts.txt'\n if os.path.exists(out_file):\n param['report'].write('<center><br><br><h2>FeatureCount statistics</h2>')\n table = process_stat_files(param)\n MODULE_HELPER.create_sub_report(param, out_file, table, 'featureCount', 'FeatureCount') \n MODULE_HELPER.plot_count_overview(param, 'featureCount', table)", "def gen_one_step_report(self, step, steps, path):\r\n num_total = self.graph.GetNodes()\r\n num_infected = len(self.infected_list)\r\n num_recovered = len(self.recovered_list)\r\n num_susceptible = num_total - num_infected - num_recovered\r\n precent_infected = round(float(num_infected) / num_total, 2)\r\n precent_recovered = round(float(num_recovered) / num_total, 2)\r\n percent_susceptible = round(float(num_susceptible) / num_total, 2)\r\n if SHOW_DETAIL_LOG:\r\n output = [\r\n f\"Step {step}/{steps}\",\r\n f\"Number of susceptible: {num_susceptible}, Percentage of susceptible: {percent_susceptible}\",\r\n f\"Number of infected: {num_infected}, Percentage of infected: {precent_infected}\",\r\n f\"Number of recovered: {num_recovered}, Percentage of recovered: {precent_recovered}.\"\r\n ]\r\n [self.system.print_to(line, path) for line in output]", "def report(self, report_options=None):\n if not report_options:\n report_options = {\n \"output_format\": None,\n \"omit_keys\": None,\n }\n\n output_format = report_options.get(\"output_format\", None)\n omit_keys = report_options.get(\"omit_keys\", None)\n\n report = OrderedDict([\n (\"global_stats\", {\n \"samples_used\": self.total_samples,\n \"empty_line_count\": self._empty_line_count,\n \"file_type\": self.file_type,\n \"encoding\": self.encoding,\n \"memory_size\": self.memory_size,\n \"times\": self.times,\n }),\n (\"data_stats\", OrderedDict()),\n ])\n report[\"data_stats\"] = self._profile.profile\n return _prepare_report(report, output_format, omit_keys)", "def get_report(self) -> str:\n if self.total_batches == 0 or self.total_cuts == 0:\n return (\n \"Sampling statistics unavailable: the SamplerDiagnostics received no cuts or batches. \"\n \"If this is unexpected, and you're using a custom sampler, ensure that the sampler \"\n \"is registering the batches in SamplerDiagnostics.\"\n )\n return (\n f\"Sampling statistics: \\n\"\n f\"Kept {self.kept_stats.num_cuts:d}/{self.total_cuts:d} \"\n f\"({self.kept_stats.num_cuts / self.total_cuts:.2%}) cuts \"\n f\"({self.discarded_stats.num_cuts:d} cuts discarded).\\n\"\n f\"Kept {self.num_kept_batches:d}/{self.total_batches:d} \"\n f\"({self.num_kept_batches / self.total_batches:.2%}) batches \"\n f\"({self.num_discarded_batches:d} batches discarded).\\n\"\n f\"Overall, {round(self.discarded_stats.current):d} seconds of supervision were discarded.\"\n )", "def report_performance(self):\n performance = self.amygdala.visualize(self.timestep, \n self.name, \n self.log_dir)\n print('Final performance is {0:.3}'.format(performance))\n self.backup()\n return performance", "def test_basic_usage(self):\n self._test_reports_helper({}, [\"report.txt\"])", "def run_report(self) -> None:\n t1 = self.t1 or time.time()\n\n dt = t1 - self.t0\n\n if dt and self.max_tasks:\n speed = len(self.statistics) / dt / self.max_tasks\n else:\n speed = 0\n\n LOGGER.info('CRAWLER STATISTICS REPORT')\n\n show = list(self.statistics)\n show.sort(key=lambda stat: str(stat.url))\n\n for stat in show:\n self.log_url_metadata(stat)\n\n LOGGER.info(\n f'Completed parsing {len(self.statistics)} urls in {dt} secs; (max_tasks={self.max_tasks}) ({speed} urls per second per task)', # pylint: disable=C0301 # noqa: E501\n )\n\n LOGGER.info(f'Remaining: {self.queue.qsize()}')\n LOGGER.info(f'Total Statistics: {len(self.statistics)}')\n LOGGER.info(f'Datetime: {time.ctime()} local time')", "def report(self) -> Any:" ]
[ "0.73522985", "0.65985715", "0.6561438", "0.65518916", "0.6421544", "0.6374874", "0.6304178", "0.62417376", "0.6236645", "0.6230698", "0.6213709", "0.6194249", "0.6160915", "0.6147586", "0.61226684", "0.6106042", "0.6069683", "0.60593307", "0.60544866", "0.6028478", "0.60205936", "0.60152185", "0.59816384", "0.5978619", "0.59755576", "0.59178454", "0.591665", "0.5892449", "0.58656514", "0.58595365" ]
0.75299054
0
Save the model if a model saver is set
def _maybe_save(self, step): if self.model_saver is not None: self.model_saver.maybe_save(step)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_model(self):\n if self.model:\n self.model.save(self.config[\"model_path\"])", "def save_model(self):\n pass", "def _save_model(self):\n save_generic(self.model, self.model_pkl_fname)", "def save(path_to_model):\n pass", "def save(self):\n print(\"==> Saving model to\", self.model_dir)\n self.model.save(self.model_dir)", "def save_model(self, model_path: str):", "def save_model(self, fname):\n self.get_booster().save_model(fname)", "def saveModel(self):\n log.info(\"Saving model to %s...\" % self.savedModelsPath)\n self.model.save(self.savedModelsPath)", "def save(\n self,\n modelSavePath\n ):\n pass", "def saveModel(self, savePath=\"DataStore/SavedModels/Forecasters/\"):\n projectRoot = self.getProjectRoot()\n ds = projectRoot + \"DataStore/\"\n savePath = projectRoot + savePath\n if \"SavedModels\" not in os.listdir(ds):\n os.mkdir(ds + \"SavedModels\")\n os.mkdir(ds + \"SavedModels/Agents\")\n os.mkdir(ds + \"SavedModels/Forecasters\")\n modelName = self.__class__.__name__\n if modelName not in os.listdir(savePath):\n os.mkdir(savePath + modelName)\n savePath = savePath + modelName\n\n saveDateTime = str(datetime.datetime.now())[:-10].replace(\" \", \"@\")\n if saveDateTime in os.listdir(savePath):\n message = \"model already exists for this datetime\"\n raise Exception(message)\n savePath = \"{}/{}/\".format(savePath, saveDateTime)\n os.mkdir(savePath)\n with open(savePath + \"modelSummary.txt\", \"w+\") as f:\n self.model.summary(print_fn=lambda x: f.write(x + '\\n'))\n with open(savePath + \"modelConfig.json\", \"w+\") as f:\n f.write(self.model.to_json())\n with open(savePath + \"history.pickle\", \"wb+\") as f:\n pickle.dump(self.history, f)\n self.model.save(savePath + \"model\", save_format=\"tf\")", "def save_model(model, model_filepath):", "def save_model(self, *args, **kwargs):\n raise NotImplementedError", "def save_model(self, path):\n pass", "def save_models(self):\n\n if self.keep_models_fixed:\n return\n\n super().save_models(os.path.join(MODELS_PATH, self.name))\n if not hasattr(self, 'train_checkpointer'):\n self._create_train_checkpointer()\n self.train_checkpointer.save(0)", "def save_model(learn, name):\n# callback_fns = learn.callback_fns # preserve wandb callback and others\n# callbacks = learn.callbacks\n \n# learn.callback_fns = [] # clean callbacks\n# learn.callbacks = []\n \n learn.save(PATH_TO_MODELS / name) # save only weights, adds .pth automatically\n learn.export(PATH_TO_MODELS / f\"{name}.pkl\") # serialize entire model, need to add .pkl", "def save(self) -> None:\n self.saver.save_model_and_weights(self.model)\n self.saver.save_data_shuffle_indices(\n self.data.eval_shuffler.ds_inds\n )\n self.saver.save_input_scaler(self.data.x.scaler)", "def save_model(self, filename):\r\n pass", "def save_model(model):\n # ***\n # Please remove the comment to enable model save.\n # However, it will overwrite the baseline model we provided.\n # ***\n model.save(\"model/model.h5\")\n print(\"Model Saved Successfully.\")", "def saveModel(self):\n self._model.save_weights('./my_model')\n return None", "def save_checkpoint(self, model):\n # print(f\"save model {self.save_model_path}\")\n torch.save(model.state_dict(), self.save_model_path)", "def save(self, checkpoint) -> None:\r\n self.model.save(checkpoint)", "def save_model(self, file_name=None):\n try:\n if file_name:\n self.agent.save_model(file_name)\n else:\n self.agent.save_model()\n print('Model saved successfully')\n return 1\n except:\n print('Failed to save model')\n return 0", "def save_model(cls, vocab, path, filename):\n return super().save_model(vocab, path, filename)", "def save_model(self, model_learnable: ModelLearnable, fl_ctx: FLContext):\n if model_learnable:\n if fl_ctx.get_prop(AppConstants.CURRENT_ROUND) == fl_ctx.get_prop(AppConstants.NUM_ROUNDS) - 1:\n self.logger.info(f\"Saving received model to {os.path.abspath(self.save_path)}\")\n # save 'weights' which contains model parameters\n model = model_learnable[ModelLearnableKey.WEIGHTS]\n dump(model, self.save_path, compress=1)", "def _save_model_and_checkpoint(self, save_model_class=False):\n import os\n\n try:\n import cloudpickle\n except ImportError:\n cloudpickle = None\n\n logger.info(\"Saving model...\")\n output_dir = os.path.join(\n self.args.output_dir, f\"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}\"\n )\n\n # save model parameters\n self._save_checkpoint(self.model, trial=None, metrics=None)\n # save the serialized model\n if save_model_class:\n # TODO : fix serialization of DatasetSchema object\n if cloudpickle is None:\n raise ValueError(\"cloudpickle is required to save model class\")\n\n with open(os.path.join(output_dir, \"model_class.pkl\"), \"wb\") as out:\n cloudpickle.dump(self.model.module, out)", "def save_model(self, file=None):\n return None", "def saveModel(self, fileName):\n\n if self.saver is None:\n self.saver = tf.train.Saver()\n self.saver.save(self.sess, fileName)\n else:\n self.saver.save(self.sess, fileName)", "def save_model(program, model_path):\n fluid.save(program, model_path)\n logger.info(\"Already save model in {}\".format(model_path))", "def save(self,model_path):\n pass\n # filename = \"Models/\"+model_path+\"1.sav\"\n # pickle.dump(self.crf_model, open(filename, 'wb'))", "def save(self, model_name = 'mr-senti'):\n\n\t\tjoblib.dump(self.classifier, os.path.join('model', model_name + '.pkl'))" ]
[ "0.7979706", "0.7732025", "0.7690536", "0.7415625", "0.73621875", "0.7316188", "0.7281133", "0.72715974", "0.72543734", "0.72068334", "0.7184453", "0.71583945", "0.71555865", "0.70722026", "0.7051326", "0.7045108", "0.7034688", "0.7021703", "0.699732", "0.69931716", "0.6992812", "0.698004", "0.69727284", "0.6958941", "0.69468063", "0.69467586", "0.69333965", "0.68920845", "0.6888585", "0.6887361" ]
0.77783674
1
Clean EXIF metadata using exiv2
def clean_exif(self, path): try: args = ['exiv2', 'rm', path] check_call(args, shell=False, stdout=DEVNULL, stderr=DEVNULL) if self.verbose: print('File %s cleaned' % path) except FileNotFoundError: print('exiv2 not found. Please install it!') sys.exit(-1) except CalledProcessError as e: if self.verbose: print('Error cleaning EXIF in %s' % path) if path not in self.errors: self.errors.append(path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def strip_exif(self,img):\n data = list(img.getdata())\n image_without_exif = PIL.Image.new(img.mode, img.size)\n image_without_exif.putdata(data)\n return image_without_exif", "def CleanEpi(self):\n for entry in self.info.keys():\n info = self.info[entry]\n if info['psdname'] == 'epi':\n for tag in ('imgfile', 'imgfile_m', 'imgfile_mf', 'imgfile_t'):\n if info.has_key(tag) and info[tag] is not None and \\\n os.path.exists(info[tag]):\n print 'Deleting %s*' % (info[tag], info['suffix'])\n cmd = '/bin/rm %s%s*' % (info[tag], info['suffix'])\n self.ExecCmd(cmd)\n if '.BRIK' in info['suffix']:\n cmd = '/bin/rm %s%s*' % (info[tag], \\\n info['suffix'].replace('.BRIK','.HEAD'))\n self.ExecCmd(cmd)", "def test_exif_data_removed_from_added_thumbnail(self):\n\n # The image that has GPS data:\n path = \"tests/core/fixtures/images/tester_exif_gps.jpg\"\n\n # Double-check the original image does have some GPS data:\n exif_dict = piexif.load(path)\n self.assertEqual(len(exif_dict[\"GPS\"].keys()), 15)\n\n pub = PublicationFactory(thumbnail__from_path=path)\n\n exif_dict = piexif.load(pub.thumbnail.path)\n self.assertEqual(exif_dict[\"GPS\"], {})\n\n # Tidy up:\n pub.thumbnail.delete()", "def test_format_files_with_exif(self):\n # File contains exif data\n shutil.copytree(\"testimages/\", \"testimages_to_format/\")\n os.chdir(\"testimages_to_format\")\n self.vimiv.quit()\n self.init_test([\"arch_001.jpg\"])\n self.vimiv.paths = [os.path.abspath(\"arch_001.jpg\")]\n self.vimiv[\"fileextras\"].format_files(\"formatted_%Y_\")\n self.assertIn(\"formatted_2016_001.jpg\", os.listdir())\n # File does not contain exif data\n self.vimiv.paths = [os.path.abspath(\"arch-logo.png\")]\n self.vimiv[\"fileextras\"].format_files(\"formatted_%Y_\")\n message = self.vimiv[\"statusbar\"].left_label.get_text()\n self.assertIn(\"No exif data for\", message)", "def test_exif_data_removed_from_updated_thumbnail(self):\n\n # The image that has GPS data:\n path = \"tests/core/fixtures/images/tester_exif_gps.jpg\"\n\n # Double-check it does have some GPS data:\n exif_dict = piexif.load(path)\n self.assertEqual(len(exif_dict[\"GPS\"].keys()), 15)\n\n # Add an initial image that nas no GPS data:\n pub = PublicationFactory(thumbnail__filename=\"tester.jpg\")\n # Double-check that:\n exif_dict = piexif.load(pub.thumbnail.path)\n self.assertEqual(exif_dict[\"GPS\"], {})\n\n # Save the path so we can delete the file at the end:\n old_thumbnail_path = pub.thumbnail.path\n\n # Change the thumbnail to the one with GPS EXIF data:\n pub.thumbnail.save(os.path.basename(path), File(open(path, \"rb\")))\n\n pub.refresh_from_db()\n\n # Check it does have the new image that had GPS data:\n self.assertEqual(os.path.basename(pub.thumbnail.name), os.path.basename(path))\n\n # Check the GPS data has now gone:\n exif_dict = piexif.load(pub.thumbnail.path)\n self.assertEqual(exif_dict[\"GPS\"], {})\n\n # Tidy up:\n pub.thumbnail.delete()\n os.remove(old_thumbnail_path)", "def process_file(filename, exiftool_path):\n exiftool = ExifTool(filename, exiftool=exiftool_path)\n exifdict = exiftool.asdict()\n\n # ExifTool returns dict with tag group names (e.g. IPTC:Keywords)\n # also add the tag names without group name\n exif_no_group = {}\n for k, v in exifdict.items():\n k = re.sub(r\".*:\", \"\", k)\n exif_no_group[k] = v\n exifdict.update(exif_no_group)", "def test_jpeg_exif(h, f):\n if h[6:10].lower() == 'exif':\n return 'jpeg'", "def test_exif_broken(self):\n user = UserFactory.create()\n file_path = os.path.join(os.path.dirname(__file__), \"broken_exif.jpg\")\n self._upload_photo(user, file_path)", "def fetch_exif_tags(image, s3bucket):\n\n s3client = boto3.client('s3', region_name=AWS_REGION)\n\n useful_exif_tags = [ # List of useful EXIF tags as presented in ExifRead\n 'Image Make',\n 'Image Model',\n 'Image DateTime',\n # 'Image Orientation',\n 'EXIF LensModel',\n 'EXIF ISOSpeedRatings',\n 'EXIF ExposureTime',\n 'EXIF FNumber',\n 'EXIF ExposureProgram',\n # 'EXIF ExposureMode'\n 'EXIF FocalLength',\n # 'EXIF ExifImageWidth',\n # 'EXIF ExifImageLength',\n 'GPS GPSAltitude',\n 'GPS GPSLatitude',\n 'GPS GPSLatitudeRef',\n 'GPS GPSLongitude',\n 'GPS GPSLongitudeRef',\n ]\n\n try:\n temp_file = '/tmp/tmpimage.jpg'\n\n with open(temp_file, 'wb') as data:\n s3client.download_fileobj(s3bucket, image, data)\n\n tf = open(temp_file, 'rb')\n exif_tags = exif.process_file(tf, details=False)\n\n exifs_dict = {}\n\n for tag in exif_tags.keys():\n if tag in useful_exif_tags: # Filtering whole EXIF array to select only list of useful\n\n if tag == 'Image DateTime': # Creating datetime in ISO format\n shoot_date = datetime.datetime.strptime(exif_tags[tag].printable,\n \"%Y:%m:%d %H:%M:%S\").isoformat()\n exifs_dict.update({'ShootingTime': shoot_date})\n\n elif tag.startswith('EXIF'):\n exif_tag_str = tag.lstrip('EXIF')\n exifs_dict.update({exif_tag_str.lstrip(): exif_tags[tag].printable})\n\n elif tag.startswith('GPS'):\n exif_tag_str = tag.lstrip('GPS')\n exifs_dict.update({exif_tag_str.lstrip(): exif_tags[tag].printable})\n\n else:\n exifs_dict.update({tag: exif_tags[tag].printable})\n\n return exifs_dict\n\n except Exception as e:\n print(\"EXIF tags fetching failed because of : \", e)", "def handleCleanMetadataKeep(self):\n logging.debug(\"Removing all metadata found...\")\n filePath = self.filesList.selectedItems()[0].text(2)\n self.filesList.removeAllMeta(filePath)", "def exif(filename):\n clef = ['Exif.Image.Make',\n 'Exif.Image.Model',\n 'Exif.Image.DateTime',\n 'Exif.Photo.ExposureTime',\n 'Exif.Photo.FNumber',\n 'Exif.Photo.DateTimeOriginal',\n 'Exif.Photo.DateTimeDigitized',\n 'Exif.Photo.ShutterSpeedValue',\n 'Exif.Photo.ApertureValue',\n 'Exif.Photo.ExposureBiasValue',\n 'Exif.Photo.Flash',\n 'Exif.Photo.FocalLength',\n 'Exif.Photo.ISOSpeedRatings'\n]\n data = {}\n image_exif = Exif(filename)\n image_exif.read()\n comment = image_exif.comment\n\n for i in clef:\n try:\n data[i] = image_exif.interpretedExifValue(i)\n except:\n data[i] = \"\"\n return data, comment", "def remove_office_metadata(file_name):\n\tns = {\n\t\t'cp': 'http://schemas.openxmlformats.org/package/2006/metadata/core-properties',\n\t\t'dc': 'http://purl.org/dc/elements/1.1/',\n\t\t'dcterms': 'http://purl.org/dc/terms/',\n\t\t'dcmitype': 'http://purl.org/dc/dcmitype/',\n\t\t'xsi': 'http://www.w3.org/2001/XMLSchema-instance'\n\t}\n\tfor prefix, uri in ns.items():\n\t\tElementTree.register_namespace(prefix, uri)\n\n\t_, file_ext = os.path.splitext(file_name)\n\ttmpfd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name), suffix=file_ext)\n\tos.close(tmpfd)\n\twith zipfile.ZipFile(file_name, 'r') as zin:\n\t\twith zipfile.ZipFile(tmpname, 'w') as zout:\n\t\t\tzout.comment = zin.comment\n\t\t\tfor item in zin.infolist():\n\t\t\t\tdata = zin.read(item.filename)\n\t\t\t\tif item.filename == 'docProps/core.xml':\n\t\t\t\t\troot = ElementTree.fromstring(data)\n\t\t\t\t\troot.clear()\n\t\t\t\t\tdata = ElementTree.tostring(root, 'UTF-8')\n\t\t\t\tzout.writestr(item, data)\n\tos.remove(file_name)\n\tos.rename(tmpname, file_name)", "def clean_me(args):\n with open(args.input, 'rb') as infile:\n with open(args.output, 'wb') as outfile:\n \n for line in infile:\n if not 'xsi:nil=\"true\"' in line:\n outfile.write(line)\n else:\n print \"Removing %s\" % line", "def clean_metadata_from_xml(cls, xml_object):\r\n for attr in cls.metadata_attributes:\r\n if xml_object.get(attr) is not None:\r\n del xml_object.attrib[attr]", "def test_delete_image_signature(self):\n pass", "def extract_exif(fname):\n\n try:\n # check if file has EXIF date, exception if not\n exif_data = fileops.get_exif_datetimeorig_tag(fname)\n\n # extract the date/time string from EXIF, exception if\n # not the proper format\n datetimestr = exif_to_datetimestr(exif_data)\n\n logging.debug(\"Found EXIF Tag %r for file %r\", datetimestr, \n os.path.basename(fname))\n\n return datetimestr\n\n except fileops.EXIFTagError:\n logging.warning(\"%r does not have a proper EXIF tag\",\n os.path.basename(fname))\n return \"\";\n\n except DateStrError:\n logging.warning(\"%r EXIF tag not the right format\",\n os.path.basename(fname))\n return \"\";", "def test_write_metadata():\n img = Image.open('Tests/images/lena.tif')\n\n f = tempfile('temp.tiff')\n img.save(f, tiffinfo = img.tag)\n\n loaded = Image.open(f)\n\n original = img.tag.named()\n reloaded = loaded.tag.named()\n\n ignored = ['StripByteCounts', 'RowsPerStrip', 'PageNumber', 'StripOffsets']\n \n for tag, value in reloaded.items():\n if tag not in ignored:\n assert_equal(original[tag], value, \"%s didn't roundtrip\" % tag)\n\n for tag, value in original.items():\n if tag not in ignored: \n assert_equal(value, reloaded[tag], \"%s didn't roundtrip\" % tag)", "def get_EXIF_features(mistery_photo, features='default', verbose=False):\n exif_data = dict()\n\n f = os.path.join(mistery_photo['dir'], mistery_photo['filename'])\n # open in binary mode\n photo = open(f, 'rb')\n # Read EXIF data\n tags = exifread.process_file(photo, details=False)\n # Extract time\n\n # Quick&Dirty to extract month\n # NEED TO BE IMPROVED TO SUPORT year\n try:\n timestamp = tags['EXIF DateTimeOriginal'].values\n d, h = timestamp.split()\n exif_data['day'] = d.split(':')[2].strip()\n exif_data['month'] = d.split(':')[1].strip()\n exif_data['year'] = d.split(':')[0].strip()\n\n exif_data['hour'] = h.split(':')[0].strip()\n exif_data['min'] = h.split(':')[1].strip()\n exif_data['sec'] = h.split(':')[2].strip()\n\n except:\n # add log\n # configuration file default photos\n if verbose:\n print('error with {}'.format(f))\n\n exif_data['year'] = None\n\n return exif_data", "def test_im_file_deinterlace(self):\n im = IMBackend()\n path = im.deinterlace(self.IMG_225x225)\n cmd = im.identify_cmd + [\n '-format', '%[interlace]', syspath(path, prefix=False),\n ]\n out = command_output(cmd).stdout\n self.assertTrue(out == b'None')", "def remove_data(writer: UFOWriter, filename: str) -> None:\n writer.removeImage(filename)", "def clear_images(self):\r\n\r\n # audio = self.MutagenType(self['filename'])\r\n self.audio.pop(\"metadata_block_picture\", None)\r\n self.audio.pop(\"coverart\", None)\r\n self.audio.pop(\"coverartmime\", None)\r\n self.audio.save()", "def photo_file_cleanup(sender, **kwargs):\n instance = kwargs.get('instance')\n filename = instance.path.url[1:]\n if os.path.exists(filename):\n os.remove(filename)", "def _update_metadata_imagedata(metadata, out_filebase, i):\n metadata['FITSImageFilename'] = [out_filebase + FITS_EXT]\n metadata['PNGImageFileName'] = [out_filebase + PNG_EXT]\n metadata['PNGThumbNailFileName'] = [out_filebase + '_tnail' + PNG_EXT]\n\n image_keys = [\"IntegrationTime\", \"RightAscension\", \"Declination\",\n \"DecRa\", \"Targets\", \"KatpointTargets\"]\n for key in image_keys:\n metadata[key] = [metadata[key][i]]", "def cleanup(segment):\n cnt = ''.join(segment.file_content)\n index = cnt.find('\\\\annotate')\n if index < 0:\n return\n while index >= 0:\n cnt, new_ind = parse_annotation(cnt, index)\n index = cnt.find('\\\\annotate', new_ind)\n f = codecs.open(segment.filename, 'w', 'utf-8')\n f.write(cnt)\n f.close()\n info('Updated: {} {}'.format(segment.voice_name, segment.name))", "def handleCleanMetadataRecon(self):\n logging.debug(\"Removing compromising personal info and remaking the file...\")\n filePath = self.filesList.selectedItems()[0].text(2)\n fileType = self.filesList.getFileObj(filePath).type\n self.printPdfPersonalData(filePath, \n fileType,\n AddedFile.changeBase(filePath, self.outputPath))\n self.tabArea.setCurrentIndex(1)\n self.changeCursor()\n self.filesList.getFileObj(filePath).reconMetaCleaned = True", "def _metadata_png(self, metadata_file_path):\n warnings.simplefilter('error', Image.DecompressionBombWarning)\n try:\n img = Image.open(self.src_path)\n for tag in sorted(img.info.keys()):\n # These are long and obnoxious/binary\n if tag not in ('icc_profile'):\n with open(metadata_file_path, 'w+') as metadata_file:\n metadata_file.write(\"Key: {}\\tValue: {}\\n\".format(tag, img.info[tag]))\n # LOG: handle metadata\n self.set_property('metadata', 'png')\n img.close()\n # Catch decompression bombs\n except Exception as e:\n # TODO: only catch DecompressionBombWarnings here?\n self.add_error(e, \"Caught exception processing metadata for {}\".format(self.src_path))\n self.make_dangerous('exception processing metadata')\n return False", "def photo_edit_file_cleanup(sender, **kwargs):\n instance = kwargs.get('instance')\n filename = instance.upload.url[1:]\n if os.path.exists(filename):\n os.remove(filename)", "def applyMorphologicalCleaning(self, image):", "def process_image(\n src: str,\n dest: str,\n desc: str,\n timestamp: datetime.datetime,\n geo: Tuple[float, float] = None,\n altitude: float = None,\n tags: Iterable[str] = None,\n) -> None:\n\n shutil.copy2(src, dest)\n exif_dict = piexif.load(src)\n\n if geo:\n exif_dict[\"GPS\"] = create_gps_block(lat=geo[0], lng=geo[1], altitude=altitude)\n\n if desc:\n exif_dict[\"0th\"][piexif.ImageIFD.ImageDescription] = desc\n\n if timestamp:\n exif_time = timestamp.strftime(\"%Y:%m:%d %H:%M:%S\")\n exif_dict[\"Exif\"][piexif.ExifIFD.DateTimeOriginal] = exif_time\n exif_dict[\"0th\"][piexif.ImageIFD.DateTime] = exif_time\n if timestamp.tzinfo:\n tzoffset = timestamp.tzinfo.utcoffset(timestamp)\n if tzoffset:\n exif_dict[\"1st\"][piexif.ImageIFD.TimeZoneOffset] = int(\n tzoffset.total_seconds()\n )\n\n if tags:\n keywords = \",\".join(tags)\n print(keywords)\n exif_dict[\"0th\"][piexif.ImageIFD.XPKeywords] = keywords.encode(\"utf-16le\")\n\n exif_bytes = piexif.dump(exif_dict)\n piexif.insert(exif_bytes, dest)", "def test_remove_existing_images(self):\n disk.merge_datasets(self.input_datasets,\n self.output_dataset, remove_existing_images=True)\n self.assertEqual(5, len(self.output_dataset.metadata()))\n\n overwritten_image_metadata = self.output_dataset.image_metadata(\n \"loc1\", \"src0\")\n self.assertEqual(\n {\"dataset\": 2}, overwritten_image_metadata[\"metadata\"])" ]
[ "0.69053125", "0.63573146", "0.63087946", "0.6254756", "0.6251132", "0.61521065", "0.5934415", "0.59326994", "0.5930887", "0.57566583", "0.5745966", "0.57040155", "0.5628695", "0.56155694", "0.55910784", "0.556056", "0.555619", "0.5512483", "0.5477117", "0.5393206", "0.5386504", "0.53465635", "0.53321165", "0.53120446", "0.5305982", "0.52853525", "0.5282335", "0.5273232", "0.5252091", "0.521774" ]
0.7290019
0
Check the EXIF metadata presence in a given file
def check_exif_presence(self, path): rc = False try: args = ['exiv2', 'pr', path] check_call(args, shell=False, stdout=DEVNULL, stderr=DEVNULL) rc = True # File has exif, rc=0 running exiv2 except CallProgramError as e: if e.returncode is 253: pass # File hasn't exif else: raise finally: return rc
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _metadata_exif(self, metadata_file_path):\n # TODO: this method is kind of long, can we shorten it somehow?\n img = open(self.src_path, 'rb')\n tags = None\n try:\n tags = exifread.process_file(img, debug=True)\n except Exception as e:\n self.add_error(e, \"Error while trying to grab full metadata for file {}; retrying for partial data.\".format(self.src_path))\n if tags is None:\n try:\n tags = exifread.process_file(img, debug=True)\n except Exception as e:\n self.add_error(e, \"Failed to get any metadata for file {}.\".format(self.src_path))\n img.close()\n return False\n for tag in sorted(tags.keys()):\n # These tags are long and obnoxious/binary so we don't add them\n if tag not in ('JPEGThumbnail', 'TIFFThumbnail'):\n tag_string = str(tags[tag])\n # Exifreader truncates data.\n if len(tag_string) > 25 and tag_string.endswith(\", ... ]\"):\n tag_value = tags[tag].values\n tag_string = str(tag_value)\n with open(metadata_file_path, 'w+') as metadata_file:\n metadata_file.write(\"Key: {}\\tValue: {}\\n\".format(tag, tag_string))\n # LOG: how do we want to log metadata?\n self.set_property('metadata', 'exif')\n img.close()\n return True", "def test_jpeg_exif(h, f):\n if h[6:10].lower() == 'exif':\n return 'jpeg'", "def file_present(self,imagefile=None):\n import hashlib\n if self.filesize()==0:\n return False # empty files are never present\n if imagefile==None:\n imagefile=self.imagefile # use this one\n for hashname in ['md5','sha1']:\n oldhash = self.tag(hashname)\n if oldhash:\n newhash = hashlib.new(hashname,self.contents(imagefile=imagefile)).hexdigest()\n return oldhash==newhash\n raise ValueError,\"Cannot process file \"+self.filename()+\": no hash in \"+str(self)", "def test_format_files_with_exif(self):\n # File contains exif data\n shutil.copytree(\"testimages/\", \"testimages_to_format/\")\n os.chdir(\"testimages_to_format\")\n self.vimiv.quit()\n self.init_test([\"arch_001.jpg\"])\n self.vimiv.paths = [os.path.abspath(\"arch_001.jpg\")]\n self.vimiv[\"fileextras\"].format_files(\"formatted_%Y_\")\n self.assertIn(\"formatted_2016_001.jpg\", os.listdir())\n # File does not contain exif data\n self.vimiv.paths = [os.path.abspath(\"arch-logo.png\")]\n self.vimiv[\"fileextras\"].format_files(\"formatted_%Y_\")\n message = self.vimiv[\"statusbar\"].left_label.get_text()\n self.assertIn(\"No exif data for\", message)", "def images_are_present(file_info):\n currentdir = os.path.join(WORKDIR, file_info['folder'])\n if not os.path.exists(currentdir):\n return False\n count = len([x for x in os.listdir(currentdir) if x.endswith('.png')])\n if count != file_info['size']:\n print([x for x in os.listdir(currentdir) if x.endswith('.png')])\n print('Count does not match')\n print(count)\n print(file_info['size'])\n return False\n return True", "def has_metadata(self):\n if self.mimetype in Config.mimes_metadata:\n return True\n return False", "def exif_date(path):\n\n # gets the exif data for jpg files\n ext = path.split(\".\")[-1].lower()\n if ext != \"jpg\":\n return False\n\n with open(path, \"rb\") as image_file:\n my_image = Image(image_file)\n\n if my_image.has_exif:\n try:\n date = datetime.datetime.strptime(my_image.datetime, \"%Y:%m:%d %H:%M:%S\")\n print(\"exif date\", path, date)\n except AttributeError:\n print(\"attribute error on\", path)\n return False\n return date\n\n return False", "def __check_metadata(s3client, key, bucket_name):\n response = s3client.head_object(Bucket=bucket_name, Key=key)\n if 'status' in response['Metadata']:\n return response['Metadata']['status'] == 'uploaded'\n return False", "def CheckSupportedFormat(cls, path):\n try:\n zip_file = zipfile.ZipFile(\n path, mode='r', compression=zipfile.ZIP_DEFLATED, allowZip64=True)\n\n with zip_file.open('metadata.txt', mode='r') as file_object:\n stream_data = file_object.read()\n\n storage_metadata_reader = _StorageMetadataReader()\n storage_metadata = storage_metadata_reader.Read(stream_data)\n\n cls._CheckStorageMetadata(storage_metadata)\n\n zip_file.close()\n result = True\n\n except (IOError, KeyError, zipfile.BadZipfile):\n result = False\n\n return result", "def _detect_files(data):\n return any(attr[\"extra\"].get(\"files\")\n for attr in data[\"attributes\"] if attr[\"extra\"])", "def getfilemeta(path):\n if os.path.isfile(path):\n meta = os.stat(path)\n return (meta)\n else:\n raise Exception('File not exist')", "def verifyFileInfo(file_dict, guid):\n\n ec = 0\n error = PilotErrors()\n\n # does the file info dictionary have the correct file info? (non-zero and non-empty string)\n if file_dict.has_key(guid):\n if file_dict[guid] != \"\" and file_dict[guid] != \"0\":\n tolog(\"Valid file for guid %s: %s\" % (guid, file_dict[guid]))\n else:\n ec = error.ERR_NOPFC\n else:\n ec = error.ERR_NOPFC\n\n return ec", "def fits_file_exists (filepath):\n return validate_file_path(filepath, FITS_EXTENTS)", "def check_image_file_header(filename):\n with tf.gfile.Open(filename, 'rb') as f:\n magic = read32(f)\n read32(f) # num_images, unused\n rows = read32(f)\n cols = read32(f)\n if magic != 2051:\n raise ValueError('Invalid magic number %d in MNIST file %s' % (magic,\n f.name))\n if rows != 28 or cols != 28:\n raise ValueError(\n 'Invalid MNIST file %s: Expected 28x28 images, found %dx%d' %\n (f.name, rows, cols))", "def check_image_file_header(filename):\n with tf.gfile.Open(filename, 'rb') as f:\n magic = read32(f)\n read32(f) # num_images, unused\n rows = read32(f)\n cols = read32(f)\n if magic != 2051:\n raise ValueError('Invalid magic number %d in MNIST file %s' % (magic,\n f.name))\n if rows != 28 or cols != 28:\n raise ValueError(\n 'Invalid MNIST file %s: Expected 28x28 images, found %dx%d' %\n (f.name, rows, cols))", "def get_exif_data(fname):\n ret = {}\n try:\n img = Image.open(fname)\n if hasattr( img, '_getexif' ):\n exifinfo = img._getexif()\n if exifinfo != None:\n for tag, value in exifinfo.items():\n decoded = TAGS.get(tag, tag)\n ret[decoded] = value\n except IOError:\n print ('IOERROR ' + fname)\n return ret", "def identify_file(self, file):", "def is_file_present(file):\n\n return os.path.isfile(file)", "def get_exif_data(fname):\n ret = {}\n try:\n img = Image.open(StringIO.StringIO(fname))\n if hasattr( img, '_getexif' ):\n exifinfo = img._getexif()\n if exifinfo != None:\n for tag, value in exifinfo.items():\n decoded = TAGS.get(tag, tag)\n ret[decoded] = value\n except IOError:\n print 'IOERROR ' + fname\n return ret", "def identify_filename_metadata(filename, file_format='CMIP6'):\n if file_format == 'CMIP5':\n components = ['cmor_name', 'table', 'climate_model', 'experiment',\n 'rip_code', 'date_string']\n elif file_format == 'CMIP6':\n components = ['cmor_name', 'table', 'climate_model', 'experiment',\n 'rip_code', 'grid', 'date_string']\n else:\n raise NotImplementedError('file_format must be CMIP5 or CMIP6')\n\n basename = os.path.basename(filename)\n directory = os.path.dirname(filename)\n metadata = {'basename': basename, 'directory': directory}\n\n # split the filename into sections\n if basename.endswith('-clim.nc'):\n filename_sects = basename.rpartition('-clim.nc')[0].split('_')\n else:\n filename_sects = basename.rpartition('.nc')[0].split('_')\n\n # but if experiment present_day was in the filename, join these sections\n # back together. This should only occur in pre-PRIMAVERA data.\n if filename_sects[3] == 'present' and filename_sects[4] == 'day':\n filename_sects[3] += '_' + filename_sects.pop(4)\n\n # deduce as much as possible from the filename\n try:\n for cmpt_name, cmpt in zip(components, filename_sects):\n if cmpt_name == 'date_string':\n frequency = _get_frequency(metadata['table'])\n start_date, end_date = cmpt.split('-')\n try:\n metadata['start_date'] = _make_partial_date_time(\n start_date, frequency)\n metadata['end_date'] = _make_partial_date_time(\n end_date, frequency)\n except ValueError:\n msg = 'Unknown date format in filename: {}'.format(\n filename)\n raise FileValidationError(msg)\n else:\n metadata[cmpt_name] = cmpt\n except ValueError:\n msg = 'Unknown filename format: {}'.format(filename)\n raise FileValidationError(msg)\n\n # fixed variables won't have a time range and so create blank values\n potential_missing_values = ['start_date', 'end_date']\n for missing_value in potential_missing_values:\n if missing_value not in metadata:\n metadata[missing_value] = None\n\n metadata['filesize'] = os.path.getsize(filename)\n\n for freq in FREQUENCY_VALUES:\n if freq in metadata['table'].lower():\n metadata['frequency'] = freq\n break\n if 'frequency' not in metadata:\n # set a blank frequency if one hasn't been found\n metadata['frequency'] = ''\n\n return metadata", "def validate_file_contents(cube, metadata):\n _check_start_end_times(cube, metadata)\n _check_contiguity(cube, metadata)\n _check_data_point(cube, metadata)", "def verifyShiftFile(self):\n if self['refimage'] and fu.findFile(self['refimage']):\n return True\n else: return False", "def verify(path):\n valid = False\n try:\n zf = zipfile.ZipFile(path)\n except (zipfile.BadZipfile, IsADirectoryError):\n pass\n else:\n names = sorted(zf.namelist())\n names = [nn for nn in names if nn.endswith(\".tif\")]\n names = [nn for nn in names if nn.startswith(\"SID PHA\")]\n for name in names:\n with zf.open(name) as pt:\n fd = io.BytesIO(pt.read())\n if SingleTifPhasics.verify(fd):\n valid = True\n break\n zf.close()\n return valid", "def is_file_exists(self):\n pass", "def _fileinfo_has_changed(self, metadata_filename, new_fileinfo):\n \n # If there is no fileinfo currently stored for 'metadata_filename',\n # try to load the file, calculate the fileinfo, and store it.\n if metadata_filename not in self.fileinfo:\n self._update_fileinfo(metadata_filename)\n\n # Return true if there is no fileinfo for 'metadata_filename'.\n # 'metadata_filename' is not in the 'self.fileinfo' store\n # and it doesn't exist in the 'current' metadata location.\n if self.fileinfo.get(metadata_filename) is None:\n return True\n\n current_fileinfo = self.fileinfo[metadata_filename]\n\n if current_fileinfo['length'] != new_fileinfo['length']:\n return True\n\n # Now compare hashes. Note that the reason we can't just do a simple\n # equality check on the fileinfo dicts is that we want to support the\n # case where the hash algorithms listed in the metadata have changed\n # without having that result in considering all files as needing to be\n # updated, or not all hash algorithms listed can be calculated on the\n # specific client.\n for algorithm, hash_value in new_fileinfo['hashes'].items():\n # We're only looking for a single match. This isn't a security\n # check, we just want to prevent unnecessary downloads.\n if hash_value == current_fileinfo['hashes'][algorithm]:\n return False\n\n return True", "def extract_exif(fname):\n\n try:\n # check if file has EXIF date, exception if not\n exif_data = fileops.get_exif_datetimeorig_tag(fname)\n\n # extract the date/time string from EXIF, exception if\n # not the proper format\n datetimestr = exif_to_datetimestr(exif_data)\n\n logging.debug(\"Found EXIF Tag %r for file %r\", datetimestr, \n os.path.basename(fname))\n\n return datetimestr\n\n except fileops.EXIFTagError:\n logging.warning(\"%r does not have a proper EXIF tag\",\n os.path.basename(fname))\n return \"\";\n\n except DateStrError:\n logging.warning(\"%r EXIF tag not the right format\",\n os.path.basename(fname))\n return \"\";", "def exif(filename):\n clef = ['Exif.Image.Make',\n 'Exif.Image.Model',\n 'Exif.Image.DateTime',\n 'Exif.Photo.ExposureTime',\n 'Exif.Photo.FNumber',\n 'Exif.Photo.DateTimeOriginal',\n 'Exif.Photo.DateTimeDigitized',\n 'Exif.Photo.ShutterSpeedValue',\n 'Exif.Photo.ApertureValue',\n 'Exif.Photo.ExposureBiasValue',\n 'Exif.Photo.Flash',\n 'Exif.Photo.FocalLength',\n 'Exif.Photo.ISOSpeedRatings'\n]\n data = {}\n image_exif = Exif(filename)\n image_exif.read()\n comment = image_exif.comment\n\n for i in clef:\n try:\n data[i] = image_exif.interpretedExifValue(i)\n except:\n data[i] = \"\"\n return data, comment", "def check_etag(manifest_filename: str) -> bool:\n if manifest_filename is None:\n return False\n\n # The first line has the headers\n header = get_first_line(manifest_filename)\n if 'ETAG' in header:\n return True\n\n return False", "def check_magic(self, target: str):\n\t\twith open(target, \"rb+\") as archive:\n\t\t\tmagic = archive.read(4)\n\t\t\tif magic == struct.pack(\"I\", self.magic):\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\treturn False", "def fetch_exif_tags(image, s3bucket):\n\n s3client = boto3.client('s3', region_name=AWS_REGION)\n\n useful_exif_tags = [ # List of useful EXIF tags as presented in ExifRead\n 'Image Make',\n 'Image Model',\n 'Image DateTime',\n # 'Image Orientation',\n 'EXIF LensModel',\n 'EXIF ISOSpeedRatings',\n 'EXIF ExposureTime',\n 'EXIF FNumber',\n 'EXIF ExposureProgram',\n # 'EXIF ExposureMode'\n 'EXIF FocalLength',\n # 'EXIF ExifImageWidth',\n # 'EXIF ExifImageLength',\n 'GPS GPSAltitude',\n 'GPS GPSLatitude',\n 'GPS GPSLatitudeRef',\n 'GPS GPSLongitude',\n 'GPS GPSLongitudeRef',\n ]\n\n try:\n temp_file = '/tmp/tmpimage.jpg'\n\n with open(temp_file, 'wb') as data:\n s3client.download_fileobj(s3bucket, image, data)\n\n tf = open(temp_file, 'rb')\n exif_tags = exif.process_file(tf, details=False)\n\n exifs_dict = {}\n\n for tag in exif_tags.keys():\n if tag in useful_exif_tags: # Filtering whole EXIF array to select only list of useful\n\n if tag == 'Image DateTime': # Creating datetime in ISO format\n shoot_date = datetime.datetime.strptime(exif_tags[tag].printable,\n \"%Y:%m:%d %H:%M:%S\").isoformat()\n exifs_dict.update({'ShootingTime': shoot_date})\n\n elif tag.startswith('EXIF'):\n exif_tag_str = tag.lstrip('EXIF')\n exifs_dict.update({exif_tag_str.lstrip(): exif_tags[tag].printable})\n\n elif tag.startswith('GPS'):\n exif_tag_str = tag.lstrip('GPS')\n exifs_dict.update({exif_tag_str.lstrip(): exif_tags[tag].printable})\n\n else:\n exifs_dict.update({tag: exif_tags[tag].printable})\n\n return exifs_dict\n\n except Exception as e:\n print(\"EXIF tags fetching failed because of : \", e)" ]
[ "0.7204896", "0.6898728", "0.6584521", "0.6531383", "0.6483777", "0.64496535", "0.6253193", "0.6245931", "0.6216941", "0.612987", "0.61224896", "0.6114118", "0.61132056", "0.61092824", "0.61092824", "0.61076564", "0.61012423", "0.60833186", "0.60606617", "0.60033613", "0.5971878", "0.596818", "0.59531045", "0.59378415", "0.5935459", "0.5920662", "0.5890558", "0.58616567", "0.58572286", "0.58531106" ]
0.7412544
0
Show the errors after execution
def show_errors(self): if self.errors: print('Clean error in:') for file in self.errors: print(' %s' % file)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def showerrors():\n errorMessages = middleware.ixn.showErrorMessage(silentMode=True)\n if errorMessages:\n print(errorMessages)\n print()", "def display(self):\n print self.careErrors\n\n\n return self.exec_()", "def show_error(self):\n print('LSE Error : {}'.format(self._error))", "def show_error(self):\n if self.error is None:\n return\n from PartSeg.common_gui.error_report import ErrorDialog\n\n if isinstance(self.error, TiffFileException):\n mess = QMessageBox()\n mess.setIcon(QMessageBox.Critical)\n mess.setText(\"During read file there is an error: \" + self.error.args[0])\n mess.setWindowTitle(\"Tiff error\")\n mess.exec()\n return\n if isinstance(self.error, SegmentationLimitException):\n mess = QMessageBox()\n mess.setIcon(QMessageBox.Critical)\n mess.setText(\"During segmentation process algorithm meet limitations:\\n\" + \"\\n\".join(self.error.args))\n mess.setWindowTitle(\"Segmentation limitations\")\n mess.exec()\n return\n dial = ErrorDialog(self.error, \"Exception during program run\")\n # TODO check\n # dial.moveToThread(QApplication.instance().thread())\n dial.exec()", "def show_errors(self, view, output):\n errors = self.compiler_variant.errors_from_output(output)\n self.error_vis.generate(view, errors)\n self.error_vis.show_regions(view)", "def error(self, message=None, show_help=True):", "def error(self):\n ...", "def _on_error(self, error):\n print(error + \" for \" + self.session_name)", "def print_error():\n print(\"Incorrect Selection, try again!\\n\")", "def error(self, message):\n print message", "def __showError(self, out):\n self.errorGroup.show()\n self.errors.insertPlainText(Utilities.filterAnsiSequences(out))\n self.errors.ensureCursorVisible()\n \n QCoreApplication.processEvents()", "def error(self):\n pass", "def on_error(self, exception):\n traceback.print_exc()", "def _on_error(self, type, value, tb):\n \n # get exception\n exception = traceback.format_exception(type, value, tb)\n exception = '\\n'.join(exception)\n \n # show error message\n dlg = ErrorDlg(self, exception)\n dlg.ShowModal()\n dlg.Destroy()", "def error_mess():\n print(\"Sorry, I didn't understand that.\")", "def reportError(self):\n self.Q['err'].put(sys.exc_info()[:2])", "def indicate_error(self):\n pass", "def display_error():\n clear_screen()\n line = '#' * 20\n print(f'{line}\\n# CONNECTION ERROR #\\n{line}')\n exit(1)", "def run_error_messages(self):\r\n self.error = \"\"\r\n #while self.error_queue:\r\n #self.error += (self.error_messages.get(\r\n # self.error_queue.popleft, None\r\n # ) + \" \")\r\n #self.error += self.I_source.query(\"STAT:QUE?\")\r\n #print(self.error)\r\n #self.I_source.write(\"STAT:QUE:CLE\")\r\n #self.message_box.setText(self.error)\r\n #self.message_box.exec_()\r", "def oops(self):\n QMessageBox.information(self, 'Error', \"Ada yang salah...\", QMessageBox.Ok, QMessageBox.Ok)", "def _error(self, *args, **kwargs):\n print(\"[{}]\".format(self.type), *args, file=sys.stderr, **kwargs)\n sys.exit(1)", "def show_error(title, message, print_message=False):\n\n pass", "def print_std_error(self):\n print(self.std_error)\n sys.exit()", "def output_error(text):\n if conf.eval_output:\n info_dict = {'type':'error', 'text' : text}\n output_result_eval(info_dict)\n else:\n output_result('[ERROR] ' + text)", "def finalize_error():\n print('')\n exit(-1)", "def check_errors():\n\n for error in errors:\n ERROR('%s' % str(error))\n\n if len(errors) != 0:\n sys.exit(1)", "def error(self, *lines):\n if self.__debug_level >= DEBUG_LEVELS['error']:\n self.print_lines(self.colored(('red', 'bold'), lines))", "def do_get_error(self):\n if self._last_exception is None:\n print('no errors')\n else:\n traceback.print_exception(*self._last_exception)", "def error(self, *args, **kwargs):\n if len(args) == 3:\n print(f\"ERROR: {args[1]}\")\n else:\n print(f\"ERROR: {args[0]}\")", "def error(self, message):\n sys.stderr.write('error: %s\\n' % message)\n self.print_help()\n sys.exit(2)" ]
[ "0.77923447", "0.75230545", "0.7374107", "0.7190836", "0.7086748", "0.7050644", "0.70011866", "0.6963619", "0.69599324", "0.693487", "0.69319034", "0.69010955", "0.68853796", "0.68728274", "0.6846897", "0.6839558", "0.6790114", "0.67648125", "0.671032", "0.6692815", "0.6667575", "0.6665649", "0.66650224", "0.66628844", "0.66575354", "0.66408736", "0.663584", "0.66304785", "0.66083896", "0.66059744" ]
0.77545685
1