query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
listlengths
30
30
negative_scores
listlengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
add words to the hook, the freq can be zero
def add_word(self, word, freq=None): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hook(self, sentence, words):\n pass", "def add_word(self, word):\r\n word = word.strip().lower()\r\n if word in self.builtin_words:\r\n return\r\n if word not in self.word_count:\r\n self.word_count[word] = 1\r\n else:\r\n self.word_count[word] += 1", "def _add_word(self, word):\n if not word in self._word2idx.keys():\n self._word2idx[word] = self.vocab_size\n self.freqs[word] = 0\n self._idx2word[self.vocab_size] = word\n self.vocab_size += 1\n self.freqs[word] += 1", "def update(self, word, freq):\n if word in self.dict:\n self.dict[word] = freq", "def add_words_from_dict(self, kind: str, fn: str, words: Any) -> None:\n for word in words or []:\n self.words.add(word)\n self.words.add(word.lower())", "def add_words(self, words):\r\n for word in words:\r\n self.add(word)", "def add_to_dict(self, tokens):\n# TODO: ?add normalization of a token?\n for token in tokens:\n if self.embedding_words and (token not in self.embedding_words):\n continue\n self.freq[token] += 1\n if token not in self.tok2ind:\n index = len(self.tok2ind)\n self.tok2ind[token] = index\n self.ind2tok[index] = token", "def rebuild_by_freq(self, thd=3):\n self.word2idx = {'<unk>': 0, '<pad>': 1, '<mask>': 2}\n self.idx2word = ['<unk>', '<pad>', '<mask>']\n\n for k, v in self.word2frq.items():\n if v >= thd and (k not in self.idx2word):\n self.idx2word.append(k)\n self.word2idx[k] = len(self.idx2word) - 1\n\n print('Number of words:', len(self.idx2word))\n return len(self.idx2word)", "def add_word(words, summary):\n for word in words:\n if word not in summary:\n summary[word] = 1 # If not a word exists, add the word and set value as 1\n else:\n summary[word] += 1 # If a word exists, just increase value by 1", "def add_words(self, words: List[str], **kwargs: Any) -> None:\n kwargs.setdefault('check_density', False)\n kwargs.setdefault('check_count', False)\n for word in words:\n with suppress(WordLengthError):\n self.add_word(word, **kwargs)", "def add(self, tokens):\n\n for token in tokens:\n self.vocab.add(token)\n\n for leader, token in generate_ngrams(tokens, self.size, include_terminator = self.size != 1):\n if leader not in self.frequencies:\n self.frequencies[leader] = Frequency()\n\n self.frequencies[leader].add(token)", "def add_word(self, word):\n if word not in self.word2index:\n self.word2index[word] = self.n_words\n self.word2count[word] = 1\n self.index2word[self.n_words] = word\n self.n_words += 1\n else:\n self.word2count[word] += 1", "def add_word(self, word):\n if word not in self.word2index:\n self.word2index[word] = self.n_words\n self.word2count[word] = 1\n self.index2word[self.n_words] = word\n self.n_words += 1\n else:\n self.word2count[word] += 1", "def word_frequency( tokenized, dic ):\n print( 'computing word frequencies' )\n start = time.time()\n for i, text in enumerate( tokenized ):\n for token in text:\n if token not in dic:\n dic[ token ] = 1\n else:\n dic[ token ] += 1\n if i % 10000 == 0:\n sys.stdout.write( '\\rprocessed : {}/{} reviews in {}s'.format( i, NO_REVIEWS, time.time() - start ) )\n sys.stdout.write( '\\rprocessed : {}/{} reviews in {}s\\n'.format( i, NO_REVIEWS, time.time() - start ) )", "def iterate_words(counter, li, all_dict, emotion_dict):\n\n counter += 1\n # iterate through the words in the list\n for word in li:\n # if word not in the dict of all words add it with frequency 1, else increase its frequency by 1\n if word not in all_dict:\n all_dict[word] = 1\n else:\n all_dict[word] += 1\n # if word not in the dict of words with certain emotion add it with frequency 1, else increase its frequency by 1\n if word not in emotion_dict:\n emotion_dict[word] = 1\n else:\n emotion_dict[word] += 1\n\n return counter", "def add_word_tag(self, token, label):\n # Add total count for label\n self.label_counts[label] += 1\n # Add count for word given label\n if token not in self.words_labels_counts[label]:\n self.words_labels_counts[label][token] = 1\n else:\n self.words_labels_counts[label][token] += 1", "def words(self, word):\n pass", "def add_word(self, word):\n word = word.lower()\n if word in self.word_list:\n self.word_list[word] += 1\n else:\n self.word_list[word] = 1", "def computeWordFrequencies(self, tokens: ['token'], frequencies: {'token': int}):\n # project2: update this method to take existing dict as parameter and modify it\n # additionally, stopwords are not inserted in the dict;\n # words shorter than 3 character or contains all digits are ignored\n for token in tokens:\n # if the key is not in dict, dict.setdefault method initiates the value at 0\n # if token not in stopwords and len(token) >= 3 and not token.isdigit():\n frequencies[token] = frequencies.setdefault(token, 0) + 1", "def addWord(wmap, tok, lem):\n\n if (not tok in tt.setStopWords) and (not (tok.isupper() and tok.lower() in tt.setStopWords)): #Don't add stopwords - but be carful US vs us\n olem = lem\n lem = lem.lower() # makes many things simpler\n if tok in wmap: # tok is mapped already..., this is needed, sometimes the lemmatizing is inconsistent, eg. \"prototyping\" might go to \"prototyping\" or \"prototype\"\n if wmap[tok] != lem: #token exists in map, but is mapped differently\n clem = wmap[tok]\n if len(lem) < len(clem): ##new word is shorter (usually this means no plural form or so), eg. houses vs house\n if not clem in wmap or wmap[clem] == clem: #if not exists, add new mapping from old lemma of word to new lemma,eg. if mwords[Houses]=houses then we add mwords[houses]=house\n wmap[clem] = lem\n else:\n if not lem in wmap or wmap[lem] == lem: #existing lemma is shorter, we map to new lemma to the existing one\n wmap[lem] = wmap[tok]\n lem = wmap[tok]\n wmap[tok] = lem\n wmap[lem] = lem # a lemma maps to itself (maybe difference in capitalization)\n if olem != lem: wmap[olem] = lem # a lemma maps to itself\n if len(tok) > len(lem) and not tok.islower(): #if have Responsibilities -> responsibility, than add responsibilities -> responsibility, the \">=\" might be changed to \">\" without much loss\n addWord(wmap,tok.lower(),lem)", "def countWords(text):\r\n\r\n\tlistOfWord = []\r\n\tlistOfFrequency = []\r\n\r\n\tfor word in text:\t\t\t\t\t \t# menghitung frekuensi kata\r\n if word == '':\r\n pass\r\n elif word not in listOfWord:\t\t\t\t\t# menyimpan kata ke dalam list\r\n listOfWord.append(word)\r\n listOfFrequency.append(1)\r\n else:\r\n index = listOfWord.index(word)\r\n listOfFrequency[index] = listOfFrequency[index] + 1 # menambah frekuensi kata yang sudah ada\r\n\r\n\r\n\tlst = [listOfWord, listOfFrequency]\r\n\r\n\treturn lst", "def lemmas_freq_doc2(doc_id):\n db.doc_apply(doc_id, lemmas_freq_doc)", "def addWord(self, word):\n if word:\n self.word_dict[len(word)].append(word)", "def update_frequencies():\n pass", "def add(self, key, idx=None, count=1):\n key = self.lower_(key)\n if idx is not None:\n self.index2word[idx] = key\n self.word2index[key] = idx\n else:\n if key not in self.word2index:\n idx = len(self.word2index)\n self.index2word[idx] = key\n self.word2index[key] = idx\n\n if key not in self.word_count:\n self.word_count[key] = count\n else:\n self.word_count[key] += count", "async def wordfilter_add(self, ctx, *, phrase):\n phrase = phrase.lower()\n await self.bot.redis.rpush('wordfilter', phrase)\n self.words.append(phrase)\n await ctx.send(f'Added `{phrase}` to the filtered words')", "def add_string(self, s):\n\n #sent_lengths\n sent_len = sentence_length(s)\n for sentences in sent_len:\n if sentences not in self.sentence_lengths:\n self.sentence_lengths[sentences] = 1\n elif sentences in self.sentence_lengths:\n self.sentence_lengths[sentences] += 1\n \n s = clean_text(s)\n word_list = s.split(' ')\n\n for w in word_list:\n self.numwords += 1\n # frequency of words\n if w not in self.words:\n self.words[w] = 1\n elif w in self.words:\n self.words[w] += 1\n # freqency of length of words\n if len(w) not in self.word_lengths:\n self.word_lengths[len(w)] = 1\n elif len(w) in self.word_lengths:\n self.word_lengths[len(w)] += 1\n #word stemming\n word_stem = stem(w)\n if word_stem not in self.stems:\n self.stems[word_stem] = 1\n elif word_stem in self.stems:\n self.stems[word_stem] += 1\n\n # ten most common words\n a = list(self.words)\n maximum_count = self.words[a[0]] \n for word in a:\n if self.words[word] > maximum_count:\n maximum_count = self.words[word]\n count = 1\n cw_list = []\n while count <= maximum_count:\n for word in a:\n if self.words[word] == count:\n cw_list = [word] + cw_list\n count += 1\n\n self.common_word = cw_list[:10]\n \n #simplify stemlist\n a = list(self.stems)\n for x in range(len(a)):\n for y in a[x+1:]:\n if y[:4] == a[x][:4]:\n self.stems[a[x]] += self.stems[y]\n del self.stems[y]\n a.remove(y)", "def get_words_with_nplus_frequency(tokenized_sentences, count_threshold):\r\n\r\n closed_vocab = []\r\n \r\n\r\n word_counts = count_words(tokenized_sentences)\r\n \r\n\r\n for word, cnt in word_counts.items(): # complete this line\r\n \r\n\r\n if cnt >= count_threshold:\r\n \r\n # append the word to the list\r\n closed_vocab.append(word)\r\n \r\n return closed_vocab", "def make_word_to_freq(self):\n\t\tword_to_freq = {}\n\t\tdocuments = self.tokenized_documents[\"train\"]\n\t\tfor document in documents:\n\t\t\tfor word in document:\n\t\t\t\tif not word in self.worddict: # make sure we have not found one of the pre-defined words\n\t\t\t\t\tword_to_freq[word] = word_to_freq.get(word, 0) + 1\n\t\t\n\t\treturn word_to_freq", "def add_vocab_words(self, words):\n for word in words:\n word = word.strip()\n if word:\n self.add_vocab_word(word)" ]
[ "0.69517446", "0.6657809", "0.6642947", "0.66001993", "0.65204823", "0.65122044", "0.6511084", "0.64812547", "0.64495873", "0.6446654", "0.64199805", "0.6322514", "0.6322514", "0.63023627", "0.6289761", "0.6264287", "0.625373", "0.6221482", "0.62176096", "0.6214713", "0.61868274", "0.616173", "0.61283326", "0.6119272", "0.6068827", "0.6052138", "0.60331404", "0.60299826", "0.60083723", "0.5997705" ]
0.7625774
0
hook to the new words
def hook(self, sentence, words): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _onWord(self, name, location, length):\n logging.debug(\"onWord...\")", "def words(self, word):\n pass", "def new_match(self, new_word): \n self.rhyming_words.append(new_word)", "def replace_words_fun(self):\n\n cleaned_doc = []\n for word in str(self.doc).split():\n if word.lower() in self.replacement_list.keys():\n cleaned_doc.append(self.replacement_list[word.lower()])\n else:\n cleaned_doc.append(word)\n self.doc = ' '.join(cleaned_doc)", "def add_word(self, word):\n word = self.map_word(word)\n super(InvariantLanguage, self).add_word(word)", "def _get_new_words(self, current_text, index):\n raise NotImplementedError()", "def words(self, words):\n self._words = words", "def setWord(self,newword):\n\t\tself.word = newword;", "def next_word(self):\n self.append = self.add_new_word", "def update_word(self, word):\n self.word = word", "def add_word(self, word):\r\n word = word.strip().lower()\r\n if word in self.builtin_words:\r\n return\r\n if word not in self.word_count:\r\n self.word_count[word] = 1\r\n else:\r\n self.word_count[word] += 1", "def add_words(self, words):\r\n for word in words:\r\n self.add(word)", "def add_words_from_dict(self, kind: str, fn: str, words: Any) -> None:\n for word in words or []:\n self.words.add(word)\n self.words.add(word.lower())", "def addWord(wmap, tok, lem):\n\n if (not tok in tt.setStopWords) and (not (tok.isupper() and tok.lower() in tt.setStopWords)): #Don't add stopwords - but be carful US vs us\n olem = lem\n lem = lem.lower() # makes many things simpler\n if tok in wmap: # tok is mapped already..., this is needed, sometimes the lemmatizing is inconsistent, eg. \"prototyping\" might go to \"prototyping\" or \"prototype\"\n if wmap[tok] != lem: #token exists in map, but is mapped differently\n clem = wmap[tok]\n if len(lem) < len(clem): ##new word is shorter (usually this means no plural form or so), eg. houses vs house\n if not clem in wmap or wmap[clem] == clem: #if not exists, add new mapping from old lemma of word to new lemma,eg. if mwords[Houses]=houses then we add mwords[houses]=house\n wmap[clem] = lem\n else:\n if not lem in wmap or wmap[lem] == lem: #existing lemma is shorter, we map to new lemma to the existing one\n wmap[lem] = wmap[tok]\n lem = wmap[tok]\n wmap[tok] = lem\n wmap[lem] = lem # a lemma maps to itself (maybe difference in capitalization)\n if olem != lem: wmap[olem] = lem # a lemma maps to itself\n if len(tok) > len(lem) and not tok.islower(): #if have Responsibilities -> responsibility, than add responsibilities -> responsibility, the \">=\" might be changed to \">\" without much loss\n addWord(wmap,tok.lower(),lem)", "def onWordRecognised(self, *_args):\n # Unsubscribe to the event when talking,\n # to avoid repetitions\n memory.unsubscribeToEvent(\"WordRecognized\",\"AudioRecognition\")\n\n # We access to the word recognised in the memory\n word = memory.getData(\"WordRecognized\")\n\n # Debug : Print the word recognised\n print(\"Mot :\")\n print(word[0])\n print(\"Indice de confiance :\")\n print(word[1])\n print\n\n\n # We acknoledge a word if the trust is high enough\n if (word[1] > 0.28):\n self.mot = word[0]\n #self.tts.say(\"Le mot reconnu est :\"+self.mot)\n StateManager(self)\n \n\n # Subscribe again to the event\n memory.subscribeToEvent(\"WordRecognized\",\n \"AudioRecognition\",\n \"onWordRecognised\")", "def setWordKnown(self):\n self.wordKnown = ''.join(['_ ' if w not in self.guessedRight else w for w in self.getWord()])", "def process(self, message, **kwargs):\n\n spc = SpellChecker()\n res = word_tokenize(message.text)\n [print(\"before -> \", word) for word in res]\n\n new_words = []\n for word in res:\n new_words.append(spc.correction(word))\n [print(\"after -> \", spc.correction(word)) for word in res]\n\n message.text = untokenize(new_words)\n message.set(\"text\", message.text, True)\n print(\"The corrected sentence -> \", untokenize(new_words))", "def this_word(self):\n self.append = self.add_to_current_word", "def _do_word(self, word, lemma=None, type=None):\n # Improve 3rd person singular \"'s\" lemma to \"be\", e.g., as in \"he's fine\".\n if lemma == \"'s\" and type in (\"VB\", \"VBZ\"):\n lemma = \"be\"\n self.words.append(Word(self, word, lemma, type, index=len(self.words)))", "def _add_text(self, elem):\n words = WORD_SEPARATORS.split(elem.string.lower())\n for word in words:\n word = word.strip()\n if word in self._ignored_words:\n continue\n self._curr_words.append((self.crawler.word_id(word), self._font_size))", "def add(self, event: Event = None) -> None:\n if self.loaded:\n w = self.currentWord\n if w:\n self.spellController.add(w)\n self.tab.onFindButton()", "def words(self):\n pass", "def _add_text(self, elem):\n words = WORD_SEPARATORS.split(elem.string.lower())\n for word in words:\n word = word.strip()\n if word in self._ignored_words:\n continue\n self._curr_words.append((self.word_id(word), self._font_size))\n\n \"\"\" Update inverted index \"\"\"\n if self.word_id(word) in self._inverted_index:\n self._inverted_index[self.word_id(word)].add(self._curr_doc_id)\n self._resolved_inverted_index[word].add(self._curr_url)\n\n else:\n self._inverted_index[self.word_id(word)] = {self._curr_doc_id}\n self._resolved_inverted_index[word] = {self._curr_url}", "def add_vocab_word(self, word):\n # If it's a special token, it'll be separatelly processed during saving file. Skip here.\n if word in special_tokens:\n return\n # Check each character in the word. We don't want none-character (control code) in the vocaburary.\n for char in word:\n if cu.is_none_char(char):\n return\n # If it's a new word, store it.\n if (not word in self.words_ext) and (not word in self.words_new):\n self.words_new.append(word)", "def add(self, word: str) -> None:\n self.d.add(word)\n self.d.add(word.lower())\n self.save_user_dict()", "def testAddWords(self):\n\n\t\t\t\twords = ['mac', 'tips', 'tricks', 'macintosh', 'help', 'hack']\n\t\t\t\tspinner.Word.objects.add(words)\n\t\t\t\tfor word in words:\n\t\t\t\t\t\tword = spinner.Word.objects.get(name=word)\n\t\t\t\t\t\tword.delete()", "def add_word(self):\n word = self.word # easier to call word now\n\n wordlist_path = self.get_wordlist_path()\n with open(wordlist_path) as f:\n data = json.load(f)\n\n if exists_already(data,word):\n exit()\n\n next_index = int(data[\"cur_index\"]) + 1 # new index\n data[\"words\"][next_index] = word # update wordlist\n data[\"words\"] = dict(sorted(data[\"words\"].items(), key=lambda item: item[1])) # alphabetisize\n data[\"cur_index\"] = next_index # update index\n\n with open(wordlist_path, 'w') as f:\n json.dump(data, f, indent = 4)\n\n print(f\"[{word}] added to [{self.pos}]. This is the [{next_index}] indexed word added.\")", "def add_word(self, word, freq=None):\n pass", "def botentry(msg, state):\n if 'words' not in state:\n # expensive initialization, do ALAP\n log.info(\"Loading word corpus...\")\n state['words'] = [w for w in nlp.nlp.vocab if w.has_vector]\n #cosine = lambda v1, v2: dot(v1, v2) / (norm(v1) * norm(v2))\n\n entry = []\n for t in state['textshape']:\n log.debug(\"Searching for replacement for {0} ({1})\".format(\n state['doc'][t], state['doc'][t].pos_\n ))\n try:\n state['words'].sort(key=lambda w: \n w.similarity(state['doc'][t]),\n reverse=True\n )\n #cosine(w.vector, state['doc'][t].vector)\n state['words'].reverse\n except TypeError:\n # perhaps our word lacks a vector?\n pass\n\n if state['options']['matchpos']:\n sent = [x.string for x in list(state['doc'])]\n pos = state['doc'][t].pos_\n for ctr in range(10):\n # TODO: Parametrize the bounds on random here\n newword = state['words'][random.randint(50, 500)]\n log.debug(\"Trying \" + newword.orth_.lower())\n sent[t] = newword.orth_.lower() + \" \"\n newsent = nlp.nlp(\"\".join(sent))\n if newsent[t].pos_ == pos:\n break\n entry.append(newword.orth_.lower())\n log.debug(\"Word found: {0} ({1})\".format(\n entry[-1], newsent[t].pos_\n ))\n else:\n entry.append(\n state['words'][random.randint(50, 500)].orth_.lower()\n )\n log.debug(\"Word found: \" + entry[-1])\n\n log.info(\"Bot enters: \" + \", \".join(entry))\n state['entries'].append((config.nick, entry, 0))\n # no entry in state['votes']", "def register(cls, category, words):\n cls.correction_dict[category] = words" ]
[ "0.73335236", "0.70898795", "0.6857864", "0.6740064", "0.6655703", "0.66475874", "0.66398466", "0.65910035", "0.6578497", "0.65458024", "0.6518663", "0.6515241", "0.6465367", "0.6461474", "0.64537036", "0.643757", "0.64302313", "0.6394466", "0.6372546", "0.6360671", "0.63558465", "0.63247234", "0.6303381", "0.6281997", "0.6279032", "0.62551904", "0.62531894", "0.6196023", "0.6192379", "0.61839795" ]
0.8223545
0
Get the value of the bracket_as_entity option.
def bracket_as_entity(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _entity_as_text(self):\n return str(self.value)", "def tag_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"tag_value\")", "def value_expression(self) -> Optional[str]:\n return pulumi.get(self, \"value_expression\")", "def value_expression(self) -> str:\n return pulumi.get(self, \"value_expression\")", "def value(self) -> str:\n return self[\"Value\"]", "def get_interact_value(self):\n return self.value", "def get_entity(obj):\n return obj.or_expression.and_expression.cmp_expression.arith_expression. \\\n mul_expression.unary_expression.pow_expression.primary_expression. \\\n entity", "def value(self) -> str:\n if hasattr(self, \"_value\"):\n return self._value\n _args: list[Arg] = []\n _ctx = self._select(\"value\", _args)\n return _ctx.execute_sync(str)", "def value(self) -> str:\n if hasattr(self, \"_value\"):\n return self._value\n _args: list[Arg] = []\n _ctx = self._select(\"value\", _args)\n return _ctx.execute_sync(str)", "def value(self) -> Optional[Expression]:\n return self.__value", "def entity(self):\n return self._entity", "def entity(self):\n return self._entity", "def value(self) -> Optional[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> Optional[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> Optional[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> Optional[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> Optional[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> Optional[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> Optional[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> Optional[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> Optional[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> Optional[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> Optional[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> Optional[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> Optional[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> Optional[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> Optional[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> Optional[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> Optional[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> Optional[str]:\n return pulumi.get(self, \"value\")" ]
[ "0.5723108", "0.546387", "0.5426668", "0.5333664", "0.53097063", "0.5231422", "0.52310586", "0.52190936", "0.52190936", "0.52109015", "0.52036643", "0.52036643", "0.51383114", "0.51383114", "0.51383114", "0.51383114", "0.51383114", "0.51383114", "0.51383114", "0.51383114", "0.51383114", "0.51383114", "0.51383114", "0.51383114", "0.51383114", "0.51383114", "0.51383114", "0.51383114", "0.51383114", "0.51383114" ]
0.6956395
0
Get the value of the en_quote_as_entity option.
def en_quote_as_entity(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _entity_as_text(self):\n return str(self.value)", "def zh_quote_as_entity(self):\n pass", "def get_quotes(self):\n # However ignore the 'true' autodetection setting.\n jscs_quotes = self.jscs_options.get('validateQuoteMarks')\n if isinstance(jscs_quotes, dict):\n jscs_quotes = jscs_quotes.get('mark')\n if jscs_quotes and jscs_quotes is not True:\n return jscs_quotes\n\n # Use whatever quote type is set in preferences\n return get_quotes()", "def quote_id(self) -> Optional[str]:\n return pulumi.get(self, \"quote_id\")", "def get_enable_sgxquotehelper(self) -> bool:\n # determine the value of constants\n addon_consts = self.get_addon_consts()\n CONST_CONFCOM_ADDON_NAME = addon_consts.get(\"CONST_CONFCOM_ADDON_NAME\")\n CONST_ACC_SGX_QUOTE_HELPER_ENABLED = addon_consts.get(\"CONST_ACC_SGX_QUOTE_HELPER_ENABLED\")\n\n # read the original value passed by the command\n enable_sgxquotehelper = self.raw_param.get(\"enable_sgxquotehelper\")\n # try to read the property value corresponding to the parameter from the `mc` object\n if (\n self.mc and\n self.mc.addon_profiles and\n CONST_CONFCOM_ADDON_NAME in self.mc.addon_profiles and\n self.mc.addon_profiles.get(\n CONST_CONFCOM_ADDON_NAME\n ).config.get(CONST_ACC_SGX_QUOTE_HELPER_ENABLED) is not None\n ):\n enable_sgxquotehelper = self.mc.addon_profiles.get(\n CONST_CONFCOM_ADDON_NAME\n ).config.get(CONST_ACC_SGX_QUOTE_HELPER_ENABLED) == \"true\"\n\n # this parameter does not need dynamic completion\n # this parameter does not need validation\n return enable_sgxquotehelper", "def include_quote(self) -> Optional[bool]:\n return pulumi.get(self, \"include_quote\")", "def value_as_text(self):\n property_name = \"_%s_as_text\" % self.attribute.type\n return getattr(self, property_name, self.value)", "def value(self) -> str:\n if hasattr(self, \"_value\"):\n return self._value\n _args: list[Arg] = []\n _ctx = self._select(\"value\", _args)\n return _ctx.execute_sync(str)", "def value(self) -> str:\n if hasattr(self, \"_value\"):\n return self._value\n _args: list[Arg] = []\n _ctx = self._select(\"value\", _args)\n return _ctx.execute_sync(str)", "def get_entity_value(\n self,\n entity_type: str,\n default: Any = None\n ) -> Any:\n text = self.get_text()\n entities = self.get_entities()\n value = default\n checkers = {\n \"bot_command\": lambda entity: entity.is_bot_command(),\n \"url\": lambda entity: entity.is_url()\n }\n is_valid = checkers.get(entity_type)\n\n if is_valid is None:\n raise ValueError(\"Entity type not supported\")\n\n for entity in entities:\n if not is_valid(entity):\n continue\n\n offset = entity.offset\n length = entity.length\n value = text[offset:offset + length]\n\n # next values will be ignored\n break\n\n return value", "def encoded_value(self):\n return self._encoded_value", "def __str__(self):\n\n if self._s == '':\n return ''\n\n if len(self.quote) == 1:\n s = self.to_short()\n else:\n s = self.to_long()\n\n try:\n eval(self.quote + s + self.quote)\n except UnicodeDecodeError:\n if self._safe_mode:\n raise\n\n self._safe_mode = True\n\n assert eval(self.quote + s + self.quote) == self._s\n\n return s", "def standardise_quotes(self, val):\n if val.startswith(self.altquote) and val.endswith(self.altquote):\n middle = val[1:-1]\n val = \"%s%s%s\" % (self.quote, middle, self.quote)\n\n val = self.escape_quotes(val)\n\n return val", "def value(self) -> str:\n return self._value", "def value(self) -> str:\n return self._value", "def value(self) -> str:\n return self._value", "def quotes(self):\n return self._quotes", "def quotes(self):\n return self._quotes", "def entity_path(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"entity_path\")", "def value(self) -> str:\n _args: list[Arg] = []\n _ctx = self._select(\"value\", _args)\n return _ctx.execute_sync(str)", "def value(self) -> str:\n\n return self._value", "def entity_guid(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"entity_guid\")", "def serialize_quote(self):\n partofs = PartOfQuote.objects.filter(part_of=self)\n quote = self.text\n for x in partofs:\n quote = quote.replace(x.text, create_tag(x))\n return quote", "def entity_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"entity_name\")", "def entity_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"entity_name\")", "def value_expression(self) -> str:\n return pulumi.get(self, \"value_expression\")", "def entity_type(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"entity_type\")", "def value_expression(self) -> Optional[str]:\n return pulumi.get(self, \"value_expression\")", "def entity_id(self) -> str:\n return self._entity_id", "def value(self) -> str:\n return pulumi.get(self, \"value\")" ]
[ "0.675374", "0.64509875", "0.57066256", "0.56627864", "0.5602044", "0.55100125", "0.53501636", "0.53001523", "0.53001523", "0.52353424", "0.5217777", "0.520476", "0.5186793", "0.51678133", "0.51678133", "0.51678133", "0.51433784", "0.51433784", "0.5109276", "0.50877744", "0.5085243", "0.5077704", "0.506783", "0.5065544", "0.5065544", "0.50649816", "0.5051937", "0.5034583", "0.5033274", "0.5009421" ]
0.7657667
0
Get the value of the use_en option.
def use_en(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_locale():\n setting = Setting.query.filter(Setting.name == 'default_language').first()\n\n if setting is not None:\n return setting.value\n\n # Return default language when none found\n return 'en'", "def get_language(self):\n return self.lang", "def get_lang(self):\n return self.langs.lang", "def get_locale():\n return \"he\"", "def get_language(self):\r\n return self.language", "def get_language(self) -> str:\n return settings.LANGUAGE_CODE", "def Language(self, default=None):\n return self.data.get('language', default)", "def get_lang(self):\n props = getToolByName(self.context,\n 'portal_properties')\n return props.site_properties.getProperty('default_language') or 'en'", "def get_language(self, article):\r\n # we don't want to force the target laguage\r\n # so we use the article.meta_lang\r\n if self.config.use_meta_language == True:\r\n if article.meta_lang:\r\n return article.meta_lang[:2]\r\n return self.config.target_language", "def get_language(self) -> str:\n return self.language", "def language(self):\r\n return self._get('language', {})", "def lang(self):\n return self._lang", "def get_locale():\n if (session.get(\"language\") is not None):\n return session.get('language')['charcode']\n return request.accept_languages.best_match(app.config['LANGUAGES'].keys())", "def language(self):\n if \"language\" in self._prop_dict:\n return self._prop_dict[\"language\"]\n else:\n return None", "def get_locale(self):\n return self.locale", "def getLanguage(self):\n return self.getOrDefault(self.language)", "def label(self) -> Optional[str]:\n return self._itempage.labels.get(\"en\", None)", "def get_locale():\n localLang = request.args.get('locale')\n supportLang = app.config['LANGUAGES']\n if localLang in supportLang:\n return localLang\n userId = request.args.get('login_as')\n if userId:\n localLang = users[int(userId)]['locale']\n if localLang in supportLang:\n return localLang\n localLang = request.headers.get('locale')\n if localLang in supportLang:\n return localLang\n return request.accept_languages.best_match(app.config['LANGUAGES'])", "def language(self):\n if self.consent:\n self.consent.language\n translation.activate(self.consent.language)\n self._language = translation.get_language()\n else:\n self._language = settings.LANGUAGE_CODE\n return self._language", "def _getLang(self, language):\n if language == None:\n language = self.getDefaultLanguage()\n\n return language", "def getVKBLanguage(self):\r\n\r\n return self.phone.sx('(send (send (get-input-locale-manager) get-current-locale) get-iso)', convertToString=False)", "def language(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"language\")", "def get_locale(self):\n\n return to_locale(settings.LANGUAGE_CODE).replace(\"_\", \"-\")", "def get_language(self):\n return self.language if self.language is not None else get_language()", "def srclang(self):\n return self.__srclang", "def language_code(self) -> str:\n return pulumi.get(self, \"language_code\")", "def get_language(self, article):\r\n # we don't want to force the target laguage\r\n # so we use the article.meta_lang\r\n if self.config.use_meta_language == True:\r\n if article.meta_lang:\r\n self.language = article.meta_lang[:2]\r\n self.language = self.config.target_language", "def getIsMultilingual(self):\n return self.getOrDefault(self.isMultilingual)", "def to_language(self):\n return self.language()", "def default_locale(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"default_locale\")" ]
[ "0.6574013", "0.6493536", "0.6492421", "0.64415383", "0.64187646", "0.6410013", "0.63214946", "0.6320696", "0.62423205", "0.62373483", "0.61986905", "0.6169279", "0.6161956", "0.6158396", "0.61440474", "0.61357933", "0.61304784", "0.61289316", "0.6108886", "0.6078264", "0.60595584", "0.6040994", "0.6033561", "0.6021548", "0.60000134", "0.5953721", "0.59452116", "0.59305185", "0.5928907", "0.5920228" ]
0.7486109
0
Get the value of the use_zh option.
def use_zh(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def t(eng, chinese):\n return chinese if 'zh' in get_info().user_language else eng", "def use_chinese_lang():\n with patch.object(seafes_config, 'lang', 'chinese'):\n yield", "def get_language_of_horizon_url(self) -> str:\n if 'horizons-mag' in self.url_h:\n self.language = 'en'\n elif 'horizonte-magazin' in self.url_h:\n self.language = 'de'\n elif 'revue-horizons' in self.url_h:\n self.language = 'fr'", "def srclang(self):\n return self.__srclang", "def get_locale():\n return \"he\"", "def get_language(self):\n return self.lang", "def getOption(self, *args):\n return _libsbml.ConversionProperties_getOption(self, *args)", "def get_language(self):\r\n return self.language", "def get_lang(self):\n return self.langs.lang", "def get_meta_lang(self):\n # we have a lang attribute in html\n attr = self.parser.getAttribute(self.article.doc, attr='lang')\n if attr is None:\n # look up for a Content-Language in meta\n items = [\n {'tag': 'meta', 'attr': 'http-equiv', 'value': 'content-language'},\n {'tag': 'meta', 'attr': 'name', 'value': 'lang'}\n ]\n for item in items:\n meta = self.parser.getElementsByTag(self.article.doc, **item)\n if meta:\n attr = self.parser.getAttribute(meta[0], attr='content')\n break\n\n if attr:\n value = attr[:2]\n if re.search(RE_LANG, value):\n return value.lower()\n\n return None", "def getVKBLanguage(self):\r\n\r\n return self.phone.sx('(send (send (get-input-locale-manager) get-current-locale) get-iso)', convertToString=False)", "def language(self):\n if \"language\" in self._prop_dict:\n return self._prop_dict[\"language\"]\n else:\n return None", "def get_option(self, option):\n\t\treturn self.options[option]", "def get_autohsts_value(self, vh_path):\n header_path = self.config.parser.find_dir(\"Header\", None, vh_path)\n if header_path:\n pat = '(?:[ \"]|^)(strict-transport-security)(?:[ \"]|$)'\n for head in header_path:\n if re.search(pat, self.config.parser.aug.get(head).lower()):\n return self.config.parser.aug.get(\n head.replace(\"arg[3]\", \"arg[4]\"))\n return None # pragma: no cover", "def rotor_setting(self):\n return self._charset[self._rot_offset]", "def subtitle_language(self):\n # type: () -> string_types\n return self._subtitle_language", "def get(self, key):\n try:\n if key == key.upper():\n return self.config[key]\n return self.options[key]\n except KeyError:\n return None", "def lang(self):\n return self._lang", "def get_language(self) -> str:\n return self.language", "def options(self): # 获取火车票查询选项 ex: iquary -dgktz 上海 北京 返回dgktz\n arg = self.get(0) # -dgktz\n if arg.startswith('-') and not self.is_asking_for_help:\n return arg[1:] # dgktz\n return ''.join(x for x in arg if x in 'dgktz')", "def get_language(self) -> str:\n return settings.LANGUAGE_CODE", "def getLanguage(self):\n return self.getOrDefault(self.language)", "def get_locale():\n if (session.get(\"language\") is not None):\n return session.get('language')['charcode']\n return request.accept_languages.best_match(app.config['LANGUAGES'].keys())", "def use_en(self):\n pass", "def language(self):\r\n return self._get('language', {})", "def audio_language(self):\n # type: () -> string_types\n return self._audio_language", "def get_language(self):\n return self.language if self.language is not None else get_language()", "def get_locale():\n localLang = request.args.get('locale')\n supportLang = app.config['LANGUAGES']\n if localLang in supportLang:\n return localLang\n userId = request.args.get('login_as')\n if userId:\n localLang = users[int(userId)]['locale']\n if localLang in supportLang:\n return localLang\n localLang = request.headers.get('locale')\n if localLang in supportLang:\n return localLang\n return request.accept_languages.best_match(app.config['LANGUAGES'])", "def hvac_mode(self) -> str:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"hvac_mode\"))\r\n return self._hvac_mode", "def get_lang(ix):\n\tlang = None\n\tif ix == 0:\n\t\tlang = setting.TLA_ENG\n\telif ix == 1:\n\t\tlang = setting.TLA_JP\n\telse:\n\t\tlang = setting.TLA_VN\n\n\tf = open (f\"lang\\\\{lang}.json\", encoding=setting.TLA_UTF8)\n\tglobal data_json\n\tdata_json = json.load(f)\n\n\treturn lang" ]
[ "0.6717505", "0.56976795", "0.5680074", "0.55777305", "0.55127275", "0.5499445", "0.54354364", "0.5415517", "0.53936756", "0.5374183", "0.5350128", "0.533878", "0.5309943", "0.5309798", "0.5280812", "0.52690613", "0.5253077", "0.5231036", "0.52178603", "0.52027667", "0.51873636", "0.51732427", "0.51623285", "0.51395625", "0.5111741", "0.51115555", "0.5099094", "0.5089853", "0.5081166", "0.5075849" ]
0.74061096
0
Get the value of the zh_quote_as_entity option.
def zh_quote_as_entity(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def en_quote_as_entity(self):\n pass", "def quote_id(self) -> Optional[str]:\n return pulumi.get(self, \"quote_id\")", "def _entity_as_text(self):\n return str(self.value)", "def include_quote(self) -> Optional[bool]:\n return pulumi.get(self, \"include_quote\")", "def get_quotes(self):\n # However ignore the 'true' autodetection setting.\n jscs_quotes = self.jscs_options.get('validateQuoteMarks')\n if isinstance(jscs_quotes, dict):\n jscs_quotes = jscs_quotes.get('mark')\n if jscs_quotes and jscs_quotes is not True:\n return jscs_quotes\n\n # Use whatever quote type is set in preferences\n return get_quotes()", "def get_enable_sgxquotehelper(self) -> bool:\n # determine the value of constants\n addon_consts = self.get_addon_consts()\n CONST_CONFCOM_ADDON_NAME = addon_consts.get(\"CONST_CONFCOM_ADDON_NAME\")\n CONST_ACC_SGX_QUOTE_HELPER_ENABLED = addon_consts.get(\"CONST_ACC_SGX_QUOTE_HELPER_ENABLED\")\n\n # read the original value passed by the command\n enable_sgxquotehelper = self.raw_param.get(\"enable_sgxquotehelper\")\n # try to read the property value corresponding to the parameter from the `mc` object\n if (\n self.mc and\n self.mc.addon_profiles and\n CONST_CONFCOM_ADDON_NAME in self.mc.addon_profiles and\n self.mc.addon_profiles.get(\n CONST_CONFCOM_ADDON_NAME\n ).config.get(CONST_ACC_SGX_QUOTE_HELPER_ENABLED) is not None\n ):\n enable_sgxquotehelper = self.mc.addon_profiles.get(\n CONST_CONFCOM_ADDON_NAME\n ).config.get(CONST_ACC_SGX_QUOTE_HELPER_ENABLED) == \"true\"\n\n # this parameter does not need dynamic completion\n # this parameter does not need validation\n return enable_sgxquotehelper", "def get_valueTransferEncoding(self):\n return \"utf-8\"", "def unicode_quote(value):\n return quote(value.encode('utf-8'))", "def standardise_quotes(self, val):\n if val.startswith(self.altquote) and val.endswith(self.altquote):\n middle = val[1:-1]\n val = \"%s%s%s\" % (self.quote, middle, self.quote)\n\n val = self.escape_quotes(val)\n\n return val", "def native_value(self) -> str | datetime | int | float | None:\n if (key := self.entity_description.value_key) is None:\n return None\n return self.coordinator.data[key]", "def entity_url_template(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"entity_url_template\")", "def queryValue(self):\n return utf8decoder(self.payload)[0]", "def encoded_value(self):\n return self._encoded_value", "def getValue(self):\n return _libsbml.ConversionOption_getValue(self)", "def value(self) -> str:\n if hasattr(self, \"_value\"):\n return self._value\n _args: list[Arg] = []\n _ctx = self._select(\"value\", _args)\n return _ctx.execute_sync(str)", "def value(self) -> str:\n if hasattr(self, \"_value\"):\n return self._value\n _args: list[Arg] = []\n _ctx = self._select(\"value\", _args)\n return _ctx.execute_sync(str)", "def unquote(cls, value):\n if six.PY2:\n return unquote(value).decode(\"utf8\")\n else:\n return unquote(value.decode(\"ascii\"))", "def __get_place_of_trade(self):\n return self.get_random_row('exchanges')['exchange_code']", "def quotes(self):\n return self._quotes", "def quotes(self):\n return self._quotes", "def quote(value, *args, **kwargs):\n return parse.quote(encode(value, *args, **kwargs))", "def entity_path(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"entity_path\")", "def value(self) -> Optional[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> Optional[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> Optional[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> Optional[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> Optional[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> Optional[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> Optional[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> Optional[str]:\n return pulumi.get(self, \"value\")" ]
[ "0.6517354", "0.5597231", "0.5549282", "0.5526326", "0.5342145", "0.52442676", "0.5122187", "0.48057586", "0.47615916", "0.47427595", "0.47066078", "0.46741903", "0.4667675", "0.46673623", "0.4613167", "0.4613167", "0.4602033", "0.45982847", "0.45765555", "0.45765555", "0.4566633", "0.4555786", "0.4518783", "0.4518783", "0.4518783", "0.4518783", "0.4518783", "0.4518783", "0.4518783", "0.4518783" ]
0.7811794
0
Get the email addresses collected between startdate and enddate.
def get_email_addresses(startdate, enddate, user, password): emails = [] page = 1 more_pages = True while more_pages: response = requests.get( 'https://restapi.surveygizmo.com/v2/survey/{survey}' '/surveyresponse?' 'filter[field][0]=datesubmitted' '&filter[operator][0]=>=&filter[value][0]={start}+0:0:0' '&filter[operator][1]=<&filter[value][1]={end}+0:0:0' '&filter[field][1]=status&filter[operator][1]==' '&filter[value][1]=Complete' '&resultsperpage=500' '&page={page}' '&user:pass={user}:{password}'.format( survey=EMAIL_COLLECTION_SURVEY_ID, start=startdate, end=enddate, page=page, user=user, password=password)) results = json.loads(response.content) total_pages = results['total_pages'] more_pages = page < total_pages emails = emails + [r['[question(13)]'] for r in results['data']] return emails
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_email_addresses(survey, startdatetime, enddatetime):\n token = settings.SURVEYGIZMO_API_TOKEN\n secret = settings.SURVEYGIZMO_API_TOKEN_SECRET\n emails = []\n page = 1\n more_pages = True\n survey_id = SURVEYS[survey][\"email_collection_survey_id\"]\n dtfmt = \"%Y-%m-%d+%H:%M:%S\"\n\n # Can't do anything without credentials.\n if token is None or secret is None:\n return emails\n\n while more_pages:\n response = requests.get(\n \"https://restapi.surveygizmo.com/v2/survey/{survey}\"\n \"/surveyresponse?\"\n \"filter[field][0]=datesubmitted\"\n \"&filter[operator][0]=>=&filter[value][0]={start}\"\n \"filter[field][1]=datesubmitted\"\n \"&filter[operator][1]=<&filter[value][1]={end}\"\n \"&filter[field][2]=status&filter[operator][2]==\"\n \"&filter[value][2]=Complete\"\n \"&resultsperpage=500\"\n \"&page={page}\"\n \"&api_token={token}\"\n \"&api_token_secret={secret}\".format(\n survey=survey_id,\n start=startdatetime.strftime(dtfmt),\n end=enddatetime.strftime(dtfmt),\n page=page,\n token=token,\n secret=secret,\n ),\n timeout=300,\n )\n\n results = json.loads(response.content)\n total_pages = results.get(\"total_pages\", 1)\n more_pages = page < total_pages\n emails = emails + [r[\"[question(13)]\"] for r in results[\"data\"]]\n page += 1\n\n valid_emails = []\n for email in emails:\n try:\n validate_email(email)\n except ValidationError:\n pass\n else:\n valid_emails.append(email)\n\n return valid_emails", "def get_events(self, start_date: datetime, end_date: datetime):\n\n events = []\n # Iterate through all events over the given\n for event_string in self._calendar.date_search(start_date, end_date):\n events.append(Event(event_string))\n return events", "def get_emails(self):\n email_ids = self.get_email_ids()\n Email = get_email_class()\n return [email for email in Email.objects.filter(pk__in=email_ids)]", "def _DateRangeQuery(self, start_date='2007-01-01', end_date='2007-07-01'):\n\n print 'Date range query for events on Primary Calendar: %s to %s' % (\n start_date, end_date,)\n query = gdata.calendar.client.CalendarEventQuery(start_min=start_date, start_max=end_date)\n feed = self.cal_client.GetCalendarEventFeed(q=query)\n for i, an_event in zip(xrange(len(feed.entry)), feed.entry):\n print '\\t%s. %s' % (i, an_event.title.text,)\n for a_when in an_event.when:\n print '\\t\\tStart time: %s' % (a_when.start,)\n print '\\t\\tEnd time: %s' % (a_when.end,)", "def filter_meetings_by_date(self, start_date, end_date):\n db_connection = DbConnection()\n\n try:\n connection = db_connection.get_connection()\n\n cursor = connection.cursor()\n cursor.execute(self.select_sql, (start_date, end_date))\n rows = cursor.fetchall()\n\n cursor.close()\n db_connection.close_connection()\n except Exception:\n raise\n\n else:\n\n return rows", "def fetch_daterange(self, start_date, end_date=None, table='fashion'):\n\n if end_date is None:\n end_date = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')\n\n end_date_obj = datetime.strptime(end_date, '%Y-%m-%d %H:%M:%S')\n end_day = '{:04d}-{:02d}-{:02d}'.format(end_date_obj.year, \n end_date_obj.month, \n end_date_obj.day)\n\n start_date_obj = datetime.strptime(start_date, '%Y-%m-%d %H:%M:%S')\n curr_day = '{:04d}-{:02d}-{:02d}'.format(start_date_obj.year, \n start_date_obj.month, \n start_date_obj.day)\n \n record_lookup_stmt = \"SELECT * FROM {} WHERE date=%s AND t>%s and t<%s\".format(table)\n \n record_list = []\n while curr_day <= end_day: \n record_list += self.session.execute(record_lookup_stmt, [curr_day, \n start_date,\n end_date])\n start_date_obj += timedelta(days=1)\n curr_day = '{:04d}-{:02d}-{:02d}'.format(start_date_obj.year, \n start_date_obj.month, \n start_date_obj.day) \n\n return record_list", "def get_emails(self, is_verified=True, include_primary=True):\n if include_primary:\n emails = self.associated_emails.filter(is_verified=is_verified)\n else:\n emails = self.associated_emails.filter(is_verified=is_verified,\n is_primary_email=False)\n return [ae.email for ae in emails]", "def getEmail(self, data):\r\n\t\tprint('test')\r\n\t\t# Empty array to hold unique emails\r\n\t\tno_dp_email = []\r\n\r\n\t\t# Loop through each row in the dataframe...\r\n\t\tfor row in data.itertuples():\r\n\t\t\tprint('test')\r\n\r\n\t\t\t# Parse through the row's keywords string for emails...\r\n\t\t\temails = re.findall(\"[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\\.[A-Za-z]{2,4}\", row.keywords)\r\n\t\t\tprint(emails)\r\n\t\t\tprint('test')\r\n\r\n\t\t\t# For each email in the array...\r\n\t\t\tfor email in emails:\r\n\t\t\t\tprint('test')\r\n\r\n\t\t\t\temail = str(email)\r\n\r\n\t\t\t\t# Append this email onto the array if it is not a repeat\r\n\t\t\t\tif email not in no_dp_email:\r\n\t\t\t\t\tprint('test')\r\n\r\n\t\t\t\t\tno_dp_email.append(email)\r\n\t\t\r\n\t\t# return array of unique emails\r\n\t\treturn no_dp_email", "def get_emails():\n\n # generate the gmail api service\n service = build_gmail_api_v1()\n\n # compute date for one year ago\n today = date.today()\n one_year_ago = today - timedelta(days=365.25)\n start = one_year_ago - timedelta(days=1)\n end = one_year_ago + timedelta(days=1)\n start_string = start.strftime(\"%Y/%m/%d\")\n end_string = end.strftime(\"%Y/%m/%d\")\n query_string = f'after:{start_string} before:{end_string}'\n\n # generate the gmail api request (get list of messages from one year ago)\n request = service.users().messages().list(userId='me', q=query_string)\n\n # try to get the api response\n try:\n response = request.execute()\n except HTTPError as e:\n print('Error response status code : {0}, reason : {1}'.format(\n e.resp.status, e.error_details))\n return []\n\n # get list of message ids from the api response\n messages = list(response[\"messages\"])\n ids = [message[\"id\"] for message in messages]\n\n # store all emails in a list\n data_to_display = []\n\n # loop through each message id\n for id in ids:\n\n try:\n # store email data in a dict\n email = {}\n\n # get message data by querying gmail api using message id\n request = service.users().messages().get(userId='me', id=id)\n response = request.execute()\n\n # get date, subject, from, to, etc from message header\n headers = list(response[\"payload\"][\"headers\"])\n looking_for = [\"Date\", \"Subject\", \"From\", \"To\"]\n for header in headers:\n if header[\"name\"] in looking_for:\n email[header[\"name\"]] = header[\"value\"]\n\n # try to get message body (base64) from response\n # the json structure varies a lot so that is why there are no many try/except\n try:\n base64_message = response[\"payload\"][\"parts\"][0][\"parts\"][0][\"body\"][\"data\"]\n except (KeyError, TypeError) as e:\n try:\n base64_message = response[\"payload\"][\"parts\"][1][\"body\"][\"data\"]\n except (KeyError, TypeError, IndexError) as e:\n try:\n base64_message = response[\"payload\"][\"parts\"][0][\"body\"][\"data\"]\n except (KeyError, TypeError, IndexError) as e:\n try:\n base64_message = response[\"payload\"][\"body\"][\"data\"]\n except (KeyError, TypeError, IndexError) as e:\n base64_message = \"Ti9B\"\n\n # decode the email body\n email[\"body\"] = base64.urlsafe_b64decode(\n base64_message).decode('utf-8')\n\n # populate list with email\n data_to_display.append(email)\n\n except HTTPError as e:\n print('Error response status code : {0}, reason : {1}'.format(\n e.resp.status, e.error_details))\n\n return data_to_display", "def on_call_email_addresses(self):\n if self._on_call_email_addresses is not None:\n return self._on_call_email_addresses\n\n url = 'https://{}.pagerduty.com/api/v1/users/on_call'.format(self.pager_duty_domain_prefix)\n on_call = self._make_request(url, headers={'Authorization': 'Token token=' + self.pager_duty_token})\n users = set() # users can be in multiple schedule, this will de-dupe\n\n for user in on_call['users']:\n for schedule in user['on_call']:\n if schedule['level'] <= self.escalation_level:\n users.add(user['email'])\n\n log.info('Found %d users on-call', len(users))\n self._on_call_email_addresses = users\n return users", "def get_email_addresses(r):\n email_match = re.findall(r'[\\w.-]+@[\\w.-]+.\\w+', r)\n email_list = []\n if email_match:\n for match in email_match:\n if match not in email_list:\n email_list.append(match)\n email_list = set(email_list)\n return email_list", "def subscriber_email_addresses(self) -> Sequence[str]:\n return pulumi.get(self, \"subscriber_email_addresses\")", "def get_dates(self, candidates=None, start=None, end=None):\n if candidates is not None:\n return [date for date in candidates if date in self.data]\n if start is None:\n start = self.first_date\n if end is None:\n end = self.last_date\n return [date for date in self.data if start <= date <= end]", "def get_emails_from_addressbook(self, id, limit=0, offset=0):\n logger.info(\"Function call: get_emails_from_addressbook: '{}'\".format(id, ))\n return self.__handle_error(\"Empty addressbook id\") if not id else self.__handle_result(self.__send_request('addressbooks/{}/emails'.format(id), 'GET', {'limit': limit or 0, 'offset': offset or 0}))", "def _date_range(start: str, end: str) -> List[str]:\n start_dt = _parse_ISO8601_date(start)\n end_dt = _parse_ISO8601_date(end)\n if start_dt > end_dt:\n raise ValidationError(\n \"Start date needs to be greater than or equal end date.\"\n )\n if (\n start_dt < _parse_ISO8601_date('1900') or\n end_dt > datetime.datetime.now().astimezone()\n ):\n raise ValidationError(\n \"Start date needs to be less than 1900-01-01T00:00:00Z and end\"\n \" date can't be from the feature.\"\n )\n return map(lambda date: date.isoformat(), rrule(\n freq=DAILY,\n dtstart=start_dt,\n until=end_dt,\n cache=True\n ))", "def dates_inbetween(self, start, end):\n\n return [start + timedelta(days=i) for i in xrange((end - start).days + 1)]", "def temp_daterange(start_date,end_date):\r\n # Query\r\n mam_temp_dr_results = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\\\r\n filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all()\r\n \r\n # Convert results into a list of min, ave, max temps for date range with specific start_date and end_date\r\n mam_temp_start_end = list(np.ravel(mam_temp_dr_results))\r\n return jsonify(mam_temp_start_end)", "def get_emails(params, start_response):\n custodian = params.getfirst('custodian')\n date = params.getfirst('date')\n tfidf = params.getfirst('tfidf')\n out = json.dumps(documents_out(custodian, date, tfidf))\n status = '200 OK'\n response_headers = [('Content-type', 'application/json'),\n ('Access-Control-Allow-Origin', '*'),\n ('Content-Length', str(len(out)))]\n start_response(status, response_headers)\n return [out]", "def temp_range(start_date, end_date):\n \"\"\"for dates between the start and end date inclusive.\"\"\"\n results = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\\\n filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all()\n\n # Convert list of tuples into normal list\n startend = list(np.ravel(results))\n\n return jsonify(startend)", "def get_from_email(column_names, start_dates, end_dates, mail_server,\n account, sender, password):\n time_flag = None\n dfs = {test: pd.DataFrame(columns=column_names[test]) \\\n for test in [\"covid_ag\", \"flu_ag\"]}\n start_date = compare_dates(start_dates[\"covid_ag\"],\n start_dates[\"flu_ag\"], \"s\")\n end_date = compare_dates(end_dates[\"covid_ag\"],\n end_dates[\"flu_ag\"], \"l\")\n\n with MailBox(mail_server).login(account, password, 'INBOX') as mailbox:\n for search_date in [start_date + timedelta(days=x)\n for x in range((end_date - start_date).days + 1)]:\n for message in mailbox.fetch(A(AND(date=search_date.date(), from_=sender))):\n for att in message.attachments:\n name = att.filename\n\n # Check the test type\n if \"Sars\" in name:\n test = \"covid_ag\"\n elif \"Flu\" in name:\n test = \"flu_ag\"\n else:\n continue\n\n # Check whether we pull the data from a valid time range\n whether_in_range = check_whether_date_in_range(\n search_date, start_dates[test], end_dates[test])\n if not whether_in_range:\n continue\n\n print(f\"Pulling {test} data received on %s\"%search_date.date())\n toread = io.BytesIO()\n toread.write(att.payload)\n toread.seek(0) # reset the pointer\n newdf = pd.read_excel(toread) # now read to dataframe\n newdf = regulate_column_names(newdf, test)\n dfs[test] = dfs[test].append(newdf)\n time_flag = search_date\n return dfs, time_flag", "def recipients(self) -> ty.List[str]:", "def getMatchesInDateRange(self, startDate=None, endDate=None):\n return None", "def get_emails(parsed_data):\n result = []\n known_values = []\n contacts = {'registrant_contact': [], 'administrative_contact': [], 'technical_contact': [],\n 'domain_registrar': []}\n if 'registrant_contact' in parsed_data:\n contacts['registrant_contact'].append(parsed_data['registrant_contact'])\n if 'administrative_contact' in parsed_data:\n contacts['administrative_contact'].append(parsed_data['administrative_contact'])\n if 'technical_contact' in parsed_data:\n contacts['technical_contact'].append(parsed_data['technical_contact'])\n if 'domain_registrar' in parsed_data:\n contacts['domain_registrar'].append(parsed_data['domain_registrar'])\n # parsing email address from contact block\n\n for contact, info in contacts.items():\n if info is not None:\n d = {'type': 2, 'data': '', 'properties': {}, 'special_properties': {}, 'is_valid': False, 'ref': {}}\n # properties dictionary\n is_valid = {}\n owner = {'owner': '', 'type': 11}\n organization = {'organization': '', 'type': 11}\n local_address = {'local_address': '', 'type': 5}\n domain_name = {'domain_name': '', 'type': 12}\n properties_list = []\n special_properties_list = []\n d.update({'ref': {'task': 'whois', 'whois_for': '', 'whois_from': ''}})\n if 'domain_name' in parsed_data and len(parsed_data['domain_name']) > 0:\n d['ref']['whois_for'] = parsed_data['domain_name']\n if 'whois_server' in parsed_data:\n d['ref']['whois_from'] = parsed_data['whois_server']\n\n for name in info:\n if \"email_address\" in name:\n if name['email_address'] in known_values:\n break\n for feature in name.keys():\n if feature == \"email_address\":\n d['data'] = name['email_address']\n known_values.append(name['email_address'])\n\n if feature == \"full_name\":\n owner['owner'] = name['full_name']\n properties_list.append(owner)\n\n if feature == \"city_name\":\n organization['organization'] = name['city_name']\n properties_list.append(organization)\n\n d['is_valid'] = ''\n is_valid = {'isvalid': '', 'type': 0}\n\n # prevent from create result if phone number of contact is not available\n if d['data'] == '':\n continue\n try:\n domain_name['domain_name'] = d['data'].split('@')[1]\n local_address['local_address'] = d['data'].split('@')[0]\n properties_list.append(domain_name)\n properties_list.append(local_address)\n except:\n\n domain_name['domain_name'] = ''\n local_address['local_address'] = d['data']\n properties_list.append(domain_name)\n properties_list.append(local_address)\n\n d.update({'ref': {'task': 'whois', 'whois_for': '', 'whois_from': '', 'label': ''}})\n d['ref']['label'] = \"%s_name\" % contact\n if 'domain_name' in parsed_data and len(parsed_data['domain_name']) > 0:\n d['ref']['whois_for'] = parsed_data['domain_name']\n if 'whois_server' in parsed_data:\n d['ref']['whois_from'] = parsed_data['whois_server']\n d['properties'] = properties_list\n special_properties_list.append(is_valid)\n d['special_properties'] = special_properties_list\n result.append(d)\n\n return result", "def email_all():\n\tSubscribtion = session.query(email).all()\n\treturn subscribtion_object", "def email_list(self) -> Sequence[str]:\n return pulumi.get(self, \"email_list\")", "def emails(self):\r\n return emails.Emails(self)", "def select_by_sent_date(begin_date, end_date):\n sql = \"SELECT * FROM dostawy.przesylki WHERE przesylka_dataNadania > %s AND przesylka_dataNadania < %s;\"\n val = (begin_date, end_date)\n rows = DBconnector.fetch_query_parameters(sql, val)\n return _wrap_in_parcel_list(rows)", "def emails(self):\r\n url = api_base + 'emails/'\r\n return json.loads(self.load_url(url))", "def get_email_addresses(user_ids: Set[UserID]) -> Set[Tuple[UserID, str]]:\n return db.session \\\n .query(\n DbUser.id,\n DbUser.email_address,\n ) \\\n .filter(DbUser.id.in_(user_ids)) \\\n .all()", "def get_events(self, from_date=None, to_date=None, owner=None):\n kwargs = {}\n\n if from_date and to_date:\n kwargs['start_datetime__range'] = [from_date, to_date]\n\n if owner:\n if isinstance(owner, Iterable):\n kwargs['owner__in'] = owner\n else:\n kwargs['owner'] = owner\n\n return self.model.objects.filter(**kwargs)" ]
[ "0.73143333", "0.6333139", "0.6218128", "0.60530376", "0.6030352", "0.59828067", "0.59139085", "0.5899869", "0.5890026", "0.584429", "0.5836746", "0.57966304", "0.57616365", "0.5750106", "0.5744556", "0.57397467", "0.57338107", "0.5727751", "0.5707086", "0.57034266", "0.56868106", "0.56773853", "0.5651271", "0.5650166", "0.5647068", "0.5635123", "0.5620741", "0.56136835", "0.5579764", "0.55567056" ]
0.77689946
0
Checks that the default parameters of an ExtrudeCircleShape are correct.
def test_default_parameters(self): assert self.test_shape.rotation_angle == 360 assert self.test_shape.extrude_both
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_default_parameters(self):\n\n assert self.test_shape.center_coordinate == (0.0, 0.0, 0.0)", "def test_default_parameters(self):\n\n # assert self.test_shape.rotation_angle == 360\n assert self.test_shape.start_angle == 0", "def test_default_parameters(self):\n\n assert self.test_shape.rotation_angle == 360", "def _check_input(self, **kwargs):\n\n combi_a = ['nxny', 'dxdy', 'ul_corner']\n combi_b = ['nxny', 'dxdy', 'll_corner']\n if all(kwargs[k] is not None for k in combi_a):\n nx, ny = kwargs['nxny']\n dx, dy = kwargs['dxdy']\n x0, y0 = kwargs['ul_corner']\n if (dx <= 0.) or (dy >= 0.):\n raise ValueError('dxdy and input params not compatible')\n origin = 'upper-left'\n elif all(kwargs[k] is not None for k in combi_b):\n nx, ny = kwargs['nxny']\n dx, dy = kwargs['dxdy']\n x0, y0 = kwargs['ll_corner']\n if (dx <= 0.) or (dy <= 0.):\n raise ValueError('dxdy and input params not compatible')\n origin = 'lower-left'\n else:\n raise ValueError('Input params not compatible')\n\n self._nx = int(nx)\n self._ny = int(ny)\n if (self._nx <= 0) or (self._ny <= 0):\n raise ValueError('nxny not valid')\n self._dx = float(dx)\n self._dy = float(dy)\n self._x0 = float(x0)\n self._y0 = float(y0)\n self._origin = origin\n\n # Check for pixel ref\n self._pixel_ref = kwargs['pixel_ref'].lower()\n if self._pixel_ref not in ['corner', 'center']:\n raise ValueError('pixel_ref not recognized')", "def validation(self):\r\n\r\n if self.__radius <= 0:\r\n raise ValueError(\"the input radius must be a positive number\")", "def _verify_arguments(self, kwargs: dict[str, Any]):\n geom_stat_args = kwargs.keys() | self._stat._kwargs.keys()\n unknown = (\n geom_stat_args\n - self.aesthetics()\n - self.DEFAULT_PARAMS.keys() # geom aesthetics\n - self._stat.aesthetics() # geom parameters\n - self._stat.DEFAULT_PARAMS.keys() # stat aesthetics\n - { # stat parameters\n \"data\",\n \"mapping\",\n \"show_legend\", # layer parameters\n \"inherit_aes\",\n \"raster\",\n }\n ) # layer parameters\n if unknown:\n msg = (\n \"Parameters {}, are not understood by \"\n \"either the geom, stat or layer.\"\n )\n raise PlotnineError(msg.format(unknown))", "def checkParameters(self):\n EDVerbose.DEBUG(\"EDPluginExecGnomv0_1.checkParameters\")\n self.checkMandatoryParameters(self.dataInput, \"Data Input is None\")\n self.checkMandatoryParameters(self.dataInput.experimentalDataQ, \"Scattering vector values are missing\")\n self.checkMandatoryParameters(self.dataInput.experimentalDataValues, \"Experimental intensity values are missing\")", "def checkParameters(self):\n self.DEBUG(\"EDPluginExecDatGnomv1_0.checkParameters\")\n self.checkMandatoryParameters(self.dataInput, \"Data Input is None\")\n self.checkMandatoryParameters(self.dataInput.inputCurve, \"No input Curve file provided\")", "def validate_parameters(self):\n\n flag = True\n warnings = \"\"\n # Check radius\n r = self.parameters.get('r', 0)\n if type(r) not in [int, float]:\n flag = False\n warnings += \"Radius r must be a float value\\n\"\n else:\n if r <= 0:\n flag = False\n warnings += \"Radius r must be higher than 0\\n\"\n # Check if is full penetrating\n op = self.parameters.get('full', False)\n\n if not op:\n # Check observation well length\n if 'd' in self.parameters and 'l' in self.parameters:\n d = self.parameters.get('d', -1)\n l = self.parameters.get('l', -1)\n if type(l) not in [int, float]:\n flag = False\n warnings += \"Depth of well bottom must be a float value\\n\"\n else:\n if l < 0:\n flag = False\n warnings += \"Depth l must be higher than 0\\n\"\n if type(d) not in [int, float]:\n flag = False\n warnings += \"Depth of well screen must be a float value\\n\"\n else:\n if d < 0 or d > l:\n flag = False\n warnings += \"Depth d must be in range 0 <= d <= l\\n\"\n # Check piezometer depth\n elif 'z' in self.parameters:\n z = self.parameters.get('z', -1)\n if type(z) not in [int, float]:\n flag = False\n warnings += \"Depth of piezometer must be a float value\\n\"\n else:\n if z < 0:\n flag = False\n warnings += \"Depth z must be higher than 0\\n\"\n else:\n flag = False\n warnings += \"Well don't contain well depth attributes\\n\"\n return(flag, warnings) # End Function", "def _checkSettings(self):\n geomsThatNeedMeshSize = (\"1D slab\", \"1D cylinder\")\n if self.geometry in geomsThatNeedMeshSize:\n if self.meshSubdivisionsPerCm is None:\n raise ValueError(\n \"{} geometry requires `mesh points per cm` to be defined in cross sections.\".format(\n self.geometry\n )\n )\n if self.criticalBuckling != False:\n raise ValueError(\n \"{} geometry cannot model critical buckling. Please disable\".format(\n self.geometry\n )\n )", "def check_param(self):\n if scipy.ndim(self.param['initial_heading'].shape) > 1:\n raise(ValueError, 'initial_heading must have ndim=1')\n\n equal_shape_list = ['x_start_position','y_start_position','flight_speed','release_time']\n for item in equal_shape_list:\n if self.param[item].shape != self.param['initial_heading'].shape:\n raise(ValueError, '{0}.shape must equal initial_heading.shape'.format(item))", "def _validate_usage_of_optional(self) -> None:\n # Because None can be the default value, None cannot be used to to indicate no default. This is why we need the optional field. This check prevents users of InputSpec from setting these two values to an inconsistent state, forcing users of InputSpec to be explicit about optionality.\n if self.optional is False and self.default is not None:\n raise ValueError(\n f'`optional` argument to {self.__class__.__name__} must be True if `default` is not None.'\n )", "def test_set_outside_bounds_default_value(self):\n with pytest.raises(ValueError):\n Real(\"yolo\", \"uniform\", -3, 2, default_value=5)", "def test_creation(self):\n\n assert self.test_shape.solid is not None\n assert self.test_shape.volume() > 1000", "def test_creation(self):\n\n assert self.test_shape.solid is not None\n assert self.test_shape.volume() > 1000", "def test_validate_non_default_follows_default_raises(self, constructor):\n default = constructor('d', default=None)\n nondefault = constructor('nd')\n fsig = FSignature(\n [default, nondefault],\n __validate_parameters__=False,\n )\n with pytest.raises(SyntaxError) as excinfo:\n fsig.validate()\n assert excinfo.value.args[0] == (\n 'non-default parameter follows default parameter'\n )", "def test_centroid_com_mask_shape():\n with pytest.raises(ValueError):\n mask = np.zeros((2, 2), dtype=bool)\n centroid_com(np.zeros((4, 4)), mask=mask)", "def test_basic1(self):\r\n self.validate((2, 2, 3, 3), (2, 2, 2, 2), 'valid', verify_grad=False)", "def test_no_default_value(self):\n dim = Dimension(\"yolo\", \"uniform\", -3, 4)\n assert dim.default_value is None", "def validate_params(cls, args):\n if not (len(args) == 3 or len(args) == 5 or len(args) == 7):\n sys.exit(\n 'Execute o script passando o caminho do diretório das'\n ' imagens, ou apenas o path de uma imagem e decida se'\n ' deseja mover ou não'\n )\n args_dict = cls.__make_params(args)\n keys_args_set = set(args_dict.keys())\n if keys_args_set.difference(KEYS_DEFAULT_AS_SET) != set():\n sys.exit(\n 'Verifique a passagem de parâmetros.'\n ' Foi encontrado parâmetros desconhecidos.'\n )\n\n return cls.__check_args(args_dict)", "def test_no_default_value(self):\n dim = Real(\"yolo\", \"uniform\", -3, 4)\n assert dim.default_value is None", "def test_init_attr(self):\n \n for k in (0, -1, 1):\n s = space(curvature=k)\n self.assertTrue(isclose(\n s.curvature,\n k\n ))\n \n for k in (1.75, 0.325, 1/7, -1.75, -0.325, -1/7):\n s = space(curvature=k)\n self.assertTrue(s.curvature == k)\n\n for fk in (0, -1, 1, 1.75, 0.325, 1/7, -1.75, -0.325, -1/7):\n s = space(fake_curvature=fk)\n self.assertTrue(isclose(\n s.curvature,\n fk * abs(fk)\n ))\n\n for r in (1, 2, 1j, 2j, float('inf')):\n s = space(radius=r)\n self.assertTrue(s.curvature == 1/r**2)", "def _check_params(self):\n if self.k_initial <= 0 :\n raise ValueError('Initial K should be 1 or more.')", "def test_init_with_default_value(self):\n with pytest.raises(NotImplementedError):\n Dimension(\"yolo\", \"uniform\", -3, 4, default_value=4)", "def validate_parameters(self):\n\n flag = True\n warnings = \"\"\n # Check radius\n r = self.parameters.get('rw', 0)\n if type(r) not in [int, float]:\n flag = False\n warnings += \"Well radius rw must be a float value\\n\"\n else:\n if r <= 0:\n flag = False\n warnings += \"Well radius rw must be higher than 0\\n\"\n # Check if is full penetrating\n op = self.parameters.get('full', False)\n\n if not op:\n # Check observation well length\n if 'd' in self.parameters and 'l' in self.parameters:\n d = self.parameters.get('d', -1)\n l = self.parameters.get('l', -1)\n if type(l) not in [int, float]:\n flag = False\n warnings += \"Depth of well bottom must be a float value\\n\"\n else:\n if l < 0:\n flag = False\n warnings += \"Depth l must be higher than 0\\n\"\n if type(d) not in [int, float]:\n flag = False\n warnings += \"Depth of well screen must be a float value\\n\"\n else:\n if d < 0 or d > l:\n flag = False\n warnings += \"Depth d must be in range 0 <= d <= l\\n\"\n return(flag, warnings) # End Function", "def checkParameters(self):\n self.DEBUG(\"EDPluginExecDatcmpv2_0.checkParameters\")\n self.checkMandatoryParameters(self.getDataInput(), \"Data Input is None\")\n self.checkMandatoryParameters(self.getDataInput().inputCurve, \"No input 1D curves file provided\")", "def _check_shape(placeholder_shape, data_shape):\n\n return True", "def validate(cls, params):\n if np.isnan(params['loc']).sum():\n raise InvalidParamsError(\n \"Real location (mu) values are required for\"\n \" lognormal uncertainties.\"\n )\n if np.isnan(params['scale']).sum() or (params['scale'] <= 0).sum():\n raise InvalidParamsError(\n \"Real, positive scale (sigma) values are required for\"\n \" lognormal uncertainties.\"\n )", "def __init__(\n self,\n newE0=None,\n newE1=None,\n newE2=None,\n newMaximumHorizontalProjection=None,\n newMaximumVerticalProjection=None,\n newEquivalentHorizontalRadius=None,\n ):\n\n # Required Keys\n if newE0 is not None:\n self.e0 = newE0\n else:\n self.e0 = processingformats.errorEllipseAxis.ErrorEllipseAxis()\n\n if newE1 is not None:\n self.e1 = newE1\n else:\n self.e1 = processingformats.errorEllipseAxis.ErrorEllipseAxis()\n\n if newE2 is not None:\n self.e2 = newE2\n else:\n self.e2 = processingformats.errorEllipseAxis.ErrorEllipseAxis()\n\n if newMaximumHorizontalProjection is not None:\n self.maximumHorizontalProjection = newMaximumHorizontalProjection\n\n if newMaximumVerticalProjection is not None:\n self.maximumVerticalProjection = newMaximumVerticalProjection\n\n if newEquivalentHorizontalRadius is not None:\n self.equivalentHorizontalRadius = newEquivalentHorizontalRadius", "def test_default():\n fig, ax = plt.subplots()\n ax.plot([0, 1], [0, 2])\n slide = _get_empty_slide()\n text = ax.set_title(\"TITLE TEXT\")\n ax.set_xlabel(\"X_LABEL\")\n ax.set_ylabel(\"Y_LABEL\")\n shape = figpptx.send(fig, slide=slide)\n assert get_typename(shape) == \"Shape\"\n shapes = _get_shapes(slide, individual=True)\n assert {shape.Type for shape in shapes} == {constants.msoPicture, constants.msoTextBox}" ]
[ "0.7234929", "0.6976932", "0.6569412", "0.6049638", "0.5911374", "0.58454925", "0.55786914", "0.55746585", "0.55698836", "0.55458", "0.5545174", "0.54440904", "0.54202956", "0.5415855", "0.5415855", "0.541363", "0.5402548", "0.5379901", "0.5365727", "0.53643066", "0.53586715", "0.535284", "0.5346959", "0.5326648", "0.53111345", "0.5304439", "0.5300108", "0.52884084", "0.527861", "0.52701735" ]
0.7183801
1
Creates an ExtrudeCircleShape with another ExtrudeCircleShape cut out and checks that the volume is correct.
def test_cut_volume(self): shape_with_cut = ExtrudeCircleShape(points=[(30, 0)], radius=20, distance=40, cut=self.test_shape) assert shape_with_cut.volume() == pytest.approx((math.pi * (20**2) * 40) - (math.pi * (10**2) * 30))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_intersect_volume(self):\n\n intersect_shape = ExtrudeCircleShape(points=[(30, 0)], radius=5, distance=50)\n\n intersected_shape = ExtrudeCircleShape(\n points=[(30, 0)],\n radius=10,\n distance=50,\n intersect=[self.test_shape, intersect_shape],\n )\n\n assert intersected_shape.volume() == pytest.approx(math.pi * 5**2 * 30)", "def __sub__(self, other):\n new_circle = None\n new_area = self.area - other.area\n if new_area > 0:\n new_radius = math.sqrt(new_area/math.pi)\n new_circle = Circle(new_radius)\n\n return new_circle", "def test_extrude_both(self):\n\n test_volume_extrude_both = self.test_shape.volume()\n self.test_shape.extrude_both = False\n assert self.test_shape.volume() == pytest.approx(test_volume_extrude_both)", "def calculate_shape(self):\n\n # error handling\n if self.radius <= 0: raise ValueError(\"Radius must be positive.\")\n if self.inner_radius < 0: raise ValueError(\"Inner radius must not be negative\")\n if self.inner_radius > self.radius: raise ValueError(\"Inner radius must be smaller than radius\")\n if self.thickness <= 0: raise ValueError(\"Thickness must be positive\")\n\n self.area = pi * self.radius ** 2\n self.area -= pi * self.inner_radius ** 2\n\n self.volume = self.area * self.thickness", "def test_creation(self):\n\n assert self.test_shape.solid is not None\n assert self.test_shape.volume() > 1000", "def test_creation(self):\n\n assert self.test_shape.solid is not None\n assert self.test_shape.volume() > 1000", "def cutout(self, centre, radius):", "def createCylinder( basePoint=(0,-1,0), tipPoint=(0,1,0), radius = 1.0, colour=(0.6,0.6,0.6), samples = 20 ):\r\n \r\n basePoint = PyUtils.toPoint3d(basePoint)\r\n tipPoint = PyUtils.toPoint3d(tipPoint)\r\n baseToTipVector = Vector3d(basePoint,tipPoint)\r\n if baseToTipVector.isZeroVector() :\r\n raise ValueError( 'Invalid points for cylinder: base and tip are equal!' )\r\n baseToTipUnitVector = baseToTipVector.unit()\r\n xUnitVector = baseToTipUnitVector.crossProductWith( Vector3d(0,0,1) )\r\n if xUnitVector.length() < 0.5 :\r\n xUnitVector = baseToTipUnitVector.crossProductWith( Vector3d(0,-1,0) )\r\n xUnitVector.toUnit()\r\n yUnitVector = baseToTipUnitVector.crossProductWith( Vector3d(-1,0,0) )\r\n if yUnitVector.length() < 0.5 :\r\n yUnitVector = baseToTipUnitVector.crossProductWith( Vector3d(0,1,0) )\r\n yUnitVector.toUnit()\r\n\r\n vertices = []\r\n for i in range(samples):\r\n theta = i * 2 * math.pi / float(samples)\r\n vertices.append( basePoint + xUnitVector * math.cos(theta) * radius + yUnitVector * math.sin(theta) * radius )\r\n for i in range(samples):\r\n theta = i * 2 * math.pi / float(samples)\r\n vertices.append( tipPoint + xUnitVector * math.cos(theta) * radius + yUnitVector * math.sin(theta) * radius )\r\n for i in range(samples):\r\n theta = i * 2 * math.pi / float(samples)\r\n vertices.append( basePoint + xUnitVector * math.cos(theta) * radius + yUnitVector * math.sin(theta) * radius )\r\n vertices.append( tipPoint + xUnitVector * math.cos(theta) * radius + yUnitVector * math.sin(theta) * radius )\r\n \r\n faces = [ range(0,samples), range(samples,2*samples) ]\r\n for i in range(0,2*samples,2) :\r\n base = 2*samples\r\n size = 2*samples\r\n faces.append( (base+i, base+i+1, base+(i+3)%size, base+(i+2)%size ) )\r\n \r\n return create( vertices, faces, colour )", "def createCone( basePoint=(0,-1,0), tipPoint=(0,1,0), radius = 1.0, colour=(0.6,0.6,0.6), samples = 20 ):\r\n \r\n basePoint = PyUtils.toPoint3d(basePoint)\r\n tipPoint = PyUtils.toPoint3d(tipPoint)\r\n baseToTipVector = Vector3d(basePoint,tipPoint)\r\n if baseToTipVector.isZeroVector() :\r\n raise ValueError( 'Invalid points for cylinder: base and tip are equal!' )\r\n baseToTipUnitVector = baseToTipVector.unit()\r\n xUnitVector = baseToTipUnitVector.crossProductWith( Vector3d(0,0,1) )\r\n if xUnitVector.length() < 0.5 :\r\n xUnitVector = baseToTipUnitVector.crossProductWith( Vector3d(0,-1,0) )\r\n xUnitVector.toUnit()\r\n yUnitVector = baseToTipUnitVector.crossProductWith( Vector3d(-1,0,0) )\r\n if yUnitVector.length() < 0.5 :\r\n yUnitVector = baseToTipUnitVector.crossProductWith( Vector3d(0,1,0) )\r\n yUnitVector.toUnit()\r\n\r\n vertices = []\r\n for i in range(samples):\r\n theta = i * 2 * math.pi / float(samples)\r\n vertices.append( basePoint + xUnitVector * math.cos(theta) * radius + yUnitVector * math.sin(theta) * radius )\r\n for i in range(samples):\r\n theta = i * 2 * math.pi / float(samples)\r\n vertices.append( basePoint + xUnitVector * math.cos(theta) * radius + yUnitVector * math.sin(theta) * radius )\r\n vertices.append( tipPoint )\r\n \r\n faces = [ range(0,samples) ]\r\n for i in range(0,samples) :\r\n base = samples\r\n size = samples\r\n faces.append( (base+i, base+(i+1)%size, 2*samples ) )\r\n \r\n return create( vertices, faces, colour )", "def objects_radius(self, centre, radius):", "def create_cylinder(self, pt1, pt2, radius):\n ## Compute the cylinder axis and center.\n axis = [pt1[i] - pt2[i] for i in range(0,3)]\n length = vtk.vtkMath.Normalize(axis)\n center = [(pt1[i] + pt2[i])/2.0 for i in range(0,3)]\n\n # Determine angle to rotate cylinder into given axis.\n vec = [ 0.0, 1.0, 0.0 ]\n rotate_axis = 3*[0.0]\n tmp_cross = 3*[0.0]\n vtk.vtkMath.Cross(vec, axis, rotate_axis)\n radangle = math.atan2(vtk.vtkMath.Norm(rotate_axis), vtk.vtkMath.Dot(axis,vec))\n degangle = vtk.vtkMath.DegreesFromRadians(radangle)\n\n # Create cylinder.\n cylinder = vtk.vtkCylinderSource()\n cylinder.SetCenter(0.0,0.0,0.0);\n cylinder.SetHeight(length);\n cylinder.SetRadius(radius);\n cylinder.SetResolution(32)\n cylinder.Update()\n\n # Transform.\n transformer = vtk.vtkTransform()\n transformer.Translate(center[0], center[1], center[2])\n transformer.RotateWXYZ(degangle,rotate_axis)\n\n # Get the polydata (polygon mesh) for the transformed cylinder.\n polyDataTransformer = vtk.vtkTransformPolyDataFilter()\n polyDataTransformer.SetInputData(cylinder.GetOutput())\n polyDataTransformer.SetTransform(transformer)\n polyDataTransformer.Update()\n return polyDataTransformer.GetOutput()", "def HollowCylinder(self,center=(0,0,0),inner_radius=1.0,outer_radius=2.,\n element_type='hex',isotropic=True,nrad=5,ncirc=10, nlong=20,length=10):\n\n if element_type != \"hex\":\n raise NotImplementedError('Generating {} mesh of cylinder is not supported yet'.format(element_type))\n\n if not isinstance(center,tuple):\n raise ValueError(\"The center for the base of the cylinder should be given in a tuple with three elements (x,y,z)\")\n\n self.__reset__()\n\n nlong = int(nlong)\n if nlong==0:\n nlong = 1\n\n mesh = Mesh()\n mesh.HollowCircle(center=(center[0],center[1]), inner_radius=inner_radius,\n outer_radius=outer_radius, element_type=\"quad\",\n isotropic=isotropic, nrad=nrad, ncirc=ncirc)\n\n self.Extrude(base_mesh=mesh, length=length, nlong=nlong)\n self.points += center[2]", "def __add__(self, other):\n new_area = self.area + other.area\n new_radius = math.sqrt(new_area/math.pi)\n new_circle = Circle(new_radius)\n\n return new_circle", "def isCrossingCircle(self, other):\n vector = Vector.createFromTwoPoints(self.center, other.center)\n return vector.norm < self.radius + other.radius", "def test_compound_stiffened_isection():\n uc = steel_sections.i_section(d=400, b=400, t_f=25, t_w=25, r=30, n_r=8)\n plate1 = (\n sections.rectangular_section(b=500, d=10).align_center(uc).align_to(uc, \"top\")\n )\n plate2 = (\n sections.rectangular_section(b=500, d=10)\n .align_center(uc)\n .align_to(uc, \"bottom\")\n )\n geom = uc + plate1 + plate2\n\n new_geom = geom.offset_perimeter(-9)\n new_geom.create_mesh([100])\n section = Section(new_geom)\n\n new_geom = geom.offset_perimeter(-10)\n new_geom.create_mesh([100])\n section = Section(new_geom)\n\n new_geom = geom.offset_perimeter(-11)\n new_geom.create_mesh([100])\n section = Section(new_geom)", "def _create_main_shape(self):\n\n a, b = gc( self.size/2,\n self._ZERO_DEGREES - self.angle,\n self._180_DEGREES + self.angle)\n self.wafer_points = zip(a,b)\n self.wafer_polygon = gdspy.Polygon(self.wafer_points, self.WAFER_LAYER)\n self.cell.add(self.wafer_polygon)", "def test_absolute_shape_volume(self):\n\n assert self.test_shape.solid is not None\n assert self.test_shape.volume() == pytest.approx(math.pi * (10**2) * 30)", "def construct_by_ellipse(a_xx, h_xy, b_yy, g_x, f_y, d, focal_length):\n gamma = - focal_length\n a = gamma**2 * a_xx\n b = gamma**2 * b_yy\n c = d\n d = gamma**2 * d\n f = -gamma*(f_y)\n g = -gamma*(g_x)\n h = gamma**2 * h_xy\n #Not needed\n u = gamma**2 * g_x\n v = gamma**2 * f_y\n w = -gamma*(d)\n return ConeCamera(a, b, c, f, g, h)", "def cylinder_circles(node_a, node_b, radius, element_number=10):\n\n vector = (np.array(node_a) - np.array(node_b)).tolist()\n pts_a = circle(node_a, vector, radius, element_number)\n pts_b = circle(node_b, vector, radius, element_number)\n\n return pts_a, pts_b", "def __circle_collision(self, circle):\n raise Exception('--- This methods have not been implemented yet! Use circle_collider instead ---')", "def _add_cylinder_basic(self, point1, point2, radius):\n\n self._cylinder_end_nodes.append(point1)\n self._cylinder_end_nodes.append(point2)\n self._cylinder_end_nodes_radiuses.append(radius)\n self._cylinder_end_nodes_radiuses.append(radius)\n self.object_number += 1", "def __init__(self, ax, onselect, minspan=None, useblit=False, circprops=None):\n if circprops is None:\n circprops = dict(fc='w', alpha=0.5) \n \n self.ax = ax\n self.visible = True\n self.canvas = ax.figure.canvas\n self.canvas.mpl_connect('motion_notify_event', self.onmove)\n self.canvas.mpl_connect('button_press_event', self.press)\n self.canvas.mpl_connect('button_release_event', self.release)\n self.canvas.mpl_connect('draw_event', self.update_background)\n\n self.circ = None\n self.background = None\n\n self.circprops = circprops\n self.onselect = onselect\n self.useblit = useblit\n self.minspan = minspan\n\n self.circ = Circle( (0,0), 1, **self.circprops)\n \n\tself.unit_verts = [v for v in self.circ.verts]\n\tself.circ.set_visible(False)\n\n if not self.useblit: self.ax.add_patch(self.circ)\n self.pressx = None", "def patch(self, **kwargs):\n return patches.Ellipse(self.center, 2.0 * self.a, 2.0 * self.b,\n angle=numpy.rad2deg(self.tilt), **kwargs)", "def cylinder_intersection_check(r0, step, orientation, radius): \n A = 1 - (step[0]*orientation[0]+step[1]*orientation[1]+step[2]*orientation[2])**2\n B = 2 * (r0[0]*step[0]+r0[1]*step[1]+r0[2]*step[2] - (r0[0]*orientation[0]+r0[1]*orientation[1]+r0[2]*orientation[2]) * (step[0]*orientation[0]+step[1]*orientation[1]+step[2]*orientation[2]))\n C = r0[0]**2+r0[1]**2+r0[2]**2 - radius**2 -(r0[0]*orientation[0]+r0[1]*orientation[1]+r0[2]*orientation[2])**2 \n t = (-B + math.sqrt(B**2 - 4*A*C)) / (2*A)\n return t", "def create_ellipse(self, ratio):\n circ = Point(self.center).buffer(1.0)\n ell = affinity.scale(circ, float(\n self.lengths[0]*ratio), float(self.lengths[1]*ratio))\n ellr = affinity.rotate(ell, self.angle)\n return ellr", "def __init__(self, shape, r=2, d=-1):\n self.radius = r\n if d == -1:\n self.stride = 2*r+1\n else:\n self.stride = d\n self.image_shape = shape\n self.patch_shape = ( r*2+1, 2*r+1 )", "def __init__(self, pos, heading=vector(0,0,1)):\n # list of vPython 3D shapes that make up this player\n self.parts = []\n \n self.pos = vector(pos)\n # Direction in which robot is moving, normalized to unit length\n self.heading = norm(heading)\n\n self.radius = 1.0\n\n self.velocity = vector(0,0,0)\n\n face = cylinder(pos=self.pos, axis = (0,1.5,0), radius=.75,\n color=color.white, material = materials.chrome)\n self.parts += [face] \n\n self.head = sphere(pos=self.pos, radius = .75, color = color.white, material = materials.chrome)\n self.parts += [self.head]\n\n \n left_eye = sphere(pos=self.pos+vector(.35,.4,.6), \n radius=0.36, color=color.blue, material = materials.emissive)\n self.parts += [left_eye]\n right_eye = sphere(pos=self.pos+vector(-.35,.4,.6),\n radius=0.36, color=color.blue, material = materials.emissive)\n self.parts += [right_eye]\n\n neck = cylinder(pos=self.pos+vector(0,-1,0), axis = (0,.5,0), radius = .05, color=color.white)\n self.parts += [neck]\n\n self.body = cylinder(pos=self.pos+vector(0,-1.75,0),axis = (0,.75,-.2), radius = .35, color=color.white, material = materials.chrome)\n self.parts += [self.body]\n\n bottom = sphere(pos=self.pos+vector(0,-1.75,0), radius =.35, color = color.white, material = materials.chrome)\n self.parts += [bottom]\n\n right_shoulder = sphere(pos = self.pos+vector(-.35,-1,0), radius = .20, color = color.blue, material = materials.chrome)\n self.parts += [right_shoulder]\n\n left_shoulder = sphere(pos= self.pos+vector(.35,-1,0), radius = .20, color = color.blue, material = materials.chrome)\n self.parts += [left_shoulder]\n\n right_arm = cone(pos = self.pos+vector(-.36, -1.1, 0), axis = (-.2, -.7, -.4), radius = .12, color = color.white, material = materials.chrome)\n self.parts += [right_arm]\n\n left_arm = cone(pos = self.pos+vector(.36, -1.1, 0), axis = (.2, -.7, -.4), radius = .12, color = color.white, material = materials.chrome)\n self.parts += [left_arm]\n\n right_leg = cone(pos = self.pos+vector(-.32, -2.85, 0), axis = (.1, .8, .1), radius = .2, color = color.white, material = materials.chrome)\n self.parts += [right_leg]\n\n left_leg = cone(pos = self.pos+vector(.32,-2.15,.8), axis = (-.1, .1, -.8), radius = .2, color = color.white, material = materials.chrome)\n self.parts += [left_leg]", "def ArcCylinder(self, center=(0.,0.,0.), radius=1., start_angle=0, end_angle=np.pi/2.,\n length=10., nrad=16, ncirc=40, nlong=50, element_type=\"hex\"):\n\n if element_type != \"hex\":\n raise NotImplementedError('Generating {} mesh of cylinder is not supported yet'.format(element_type))\n\n if not isinstance(center,tuple):\n raise ValueError(\"The center for the base of the cylinder should be given in a tuple with three elements (x,y,z)\")\n\n self.__reset__()\n\n nlong = int(nlong)\n if nlong==0:\n nlong = 1\n\n mesh = Mesh()\n mesh.Arc(center=(center[0],center[1]), radius=radius, start_angle=start_angle,\n end_angle=end_angle, nrad=nrad, ncirc=ncirc, element_type=\"quad\")\n\n self.Extrude(base_mesh=mesh, length=length, nlong=nlong)\n self.points += center[2]", "def __init__(self, size:Point, **kwargs):\n PhysicsEntity.__init__(self, **kwargs)\n self.size = size\n self.collision_shape = to_collision_rect(self.size)", "def tube(outside_diam, inside_diam, height):\n global _cmds, fragments\n r1 = outside_diam / 2\n r2 = inside_diam / 2\n _cmds = (\n \"difference(){\\n\"\n f\"cylinder(h={height},r1={r1},r2={r1},\"\n f\"center=false,$fn={fragments});\\n\"\n f\"cylinder(h={height*3},r1={r2},r2={r2},\"\n f\"center=true,$fn={fragments});\\n\"\n \"}\\n\") + _cmds" ]
[ "0.7168453", "0.619414", "0.5716917", "0.56402206", "0.5580943", "0.5580943", "0.55319566", "0.5295469", "0.52860016", "0.52201635", "0.5207854", "0.51808774", "0.5158859", "0.5153207", "0.5115594", "0.51125956", "0.51054686", "0.50718325", "0.5044905", "0.50279474", "0.49805546", "0.49528223", "0.49490973", "0.48958465", "0.48892346", "0.48819572", "0.48803875", "0.4835122", "0.4834773", "0.48159912" ]
0.7148607
1
Creates ExtrudeCircleShapes with other ExtrudeCircleShapes intersected and checks that their volumes are correct.
def test_intersect_volume(self): intersect_shape = ExtrudeCircleShape(points=[(30, 0)], radius=5, distance=50) intersected_shape = ExtrudeCircleShape( points=[(30, 0)], radius=10, distance=50, intersect=[self.test_shape, intersect_shape], ) assert intersected_shape.volume() == pytest.approx(math.pi * 5**2 * 30)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _intersected(positions, radius):\n P1 = positions[0]\n P2 = positions[1]\n P3 = positions[2]\n temp1 = P2 - P1\n e_x = temp1 / np.linalg.norm(temp1)\n temp2 = P3 - P1\n i = np.dot(e_x, temp2)\n temp3 = temp2 - i * e_x\n e_y = temp3 / np.linalg.norm(temp3)\n e_z = np.cross(e_x, e_y)\n d = np.linalg.norm(P2 - P1)\n j = np.dot(e_y, temp2) \n x = d / 2\n y = (-2*i*x + i*i + j*j) / (2*j)\n temp4 = radius**2 - x*x - y*y\n if temp4 < 0:\n return False\n return True", "def test_cut_volume(self):\n\n shape_with_cut = ExtrudeCircleShape(points=[(30, 0)], radius=20, distance=40, cut=self.test_shape)\n\n assert shape_with_cut.volume() == pytest.approx((math.pi * (20**2) * 40) - (math.pi * (10**2) * 30))", "def test_add_new_surface_objects(self):\n\n # create object with ReactionSystem behavior\n class rsys:\n pass\n\n class item:\n pass\n\n T = item()\n P = item()\n T.value_si = 1000.0\n P.value_si = 101000.0\n rsys.T = T\n rsys.P = P\n procnum = 2\n\n cerm = CoreEdgeReactionModel()\n\n spcA = Species().from_smiles('[OH]')\n spcs = [Species().from_smiles('CC'), Species().from_smiles('[CH3]')]\n spc_tuples = [((spcA, spc), ['H_Abstraction']) for spc in spcs]\n\n rxns = list(itertools.chain.from_iterable(react(spc_tuples, procnum)))\n rxns += list(itertools.chain.from_iterable(react([((spcs[0], spcs[1]), ['H_Abstraction'])], procnum)))\n\n for rxn in rxns:\n cerm.make_new_reaction(rxn)\n\n cerm.core.species = [spcA] + spcs\n\n corerxns = []\n edgerxns = []\n edgespcs = set()\n for rxn in rxns:\n if set(rxn.reactants + rxn.products) <= set(cerm.core.species):\n corerxns.append(rxn)\n else:\n edgespcs |= set(cerm.core.species) - set(rxn.reactants + rxn.products)\n edgerxns.append(rxn)\n\n cerm.edge.species += list(edgespcs)\n\n cerm.core.reactions = corerxns\n cerm.edge.reactions = edgerxns\n\n cerm.surface.species = []\n cerm.surface.reactions = []\n\n new_surface_reactions = [cerm.edge.reactions[0]]\n new_surface_species = []\n obj = new_surface_reactions\n\n cerm.add_new_surface_objects(obj, new_surface_species, new_surface_reactions, rsys)\n\n empty = set()\n\n self.assertEqual(cerm.new_surface_spcs_add, empty)\n self.assertEqual(cerm.new_surface_spcs_loss, empty)\n self.assertEqual(cerm.new_surface_rxns_loss, empty)\n self.assertEqual(cerm.new_surface_rxns_add, set([cerm.edge.reactions[0]]))", "def test_compound_stiffened_isection():\n uc = steel_sections.i_section(d=400, b=400, t_f=25, t_w=25, r=30, n_r=8)\n plate1 = (\n sections.rectangular_section(b=500, d=10).align_center(uc).align_to(uc, \"top\")\n )\n plate2 = (\n sections.rectangular_section(b=500, d=10)\n .align_center(uc)\n .align_to(uc, \"bottom\")\n )\n geom = uc + plate1 + plate2\n\n new_geom = geom.offset_perimeter(-9)\n new_geom.create_mesh([100])\n section = Section(new_geom)\n\n new_geom = geom.offset_perimeter(-10)\n new_geom.create_mesh([100])\n section = Section(new_geom)\n\n new_geom = geom.offset_perimeter(-11)\n new_geom.create_mesh([100])\n section = Section(new_geom)", "def test_extrude_both(self):\n\n test_volume_extrude_both = self.test_shape.volume()\n self.test_shape.extrude_both = False\n assert self.test_shape.volume() == pytest.approx(test_volume_extrude_both)", "def find_intersections_circle_circle(circle1: Circle, circle2: Circle) -> {Point}:\n # Determine some constants, for easy access\n center1 = circle1.center\n r1 = circle1.radius\n center2 = circle2.center\n r2 = circle2.radius\n diff_between_centers = center2 - center1\n distance_between_centers = abs(diff_between_centers)\n if hasattr(distance_between_centers, 'evalf'):\n dis_evalf = distance_between_centers.evalf()\n # print(distance_between_centers, dis_evalf)\n if not hasattr(dis_evalf, '__float__'):\n raise ValueError(f'dis_evalf has no float attribute: {type(dis_evalf)} {dis_evalf}')\n distance_between_centers_float = float(dis_evalf)\n else:\n print(f'{distance_between_centers} has no evalf property.')\n distance_between_centers_float = float(distance_between_centers)\n\n # Determine if the circles even do intersect. There are four cases:\n # 1. Centers are the same => Cannot intersect (either coincident or one contained in other)\n # 2. Circles are further apart than the sum of their radius => Cannot intersect (too far apart)\n # 3. Circles are close than the absolute value of the difference of radius => Cannot intersect(one inside other)\n # 4. Otherwise => Circles intersect\n # Note that in the 4th case, we can find two sub-cases, where the circles are tangent (and thus intersect once)\n # or where the circles intersect twice. Technically, we could just handle the second sub-case, but we separate\n # them here for computational speed.\n if distance_between_centers == 0:\n # Circles that have same center are either coincident or one is contained within the other\n return set()\n elif distance_between_centers_float > float((r1 + r2).evalf()):\n # Circles are too far apart to intersect\n return set()\n elif distance_between_centers_float < float(abs(r1 - r2).evalf()):\n # One circle contained in other\n return set()\n else:\n # For certain, our circles intercept.\n # Calculate the distance to the intersection area center.\n distance_recip = (1 / distance_between_centers)\n dis_to_area_center = (r1 ** 2 - r2 ** 2 + distance_between_centers ** 2) / (2 * distance_between_centers)\n # Calculate the center of the intersection area\n center_of_intersection_area = center1 + dis_to_area_center * diff_between_centers * distance_recip\n if dis_to_area_center == r1:\n # The two circles are tangent and thus intersect at exactly one point\n # Technically this check is unnecessary, since the below computation will return two equal points.\n # But to save on speed, we can just return the center point, since we know that is the single\n # intersection point\n return {center_of_intersection_area}\n else:\n # Two circles intersect at two points\n height = sqrt(r1 ** 2 - dis_to_area_center ** 2)\n x2 = center_of_intersection_area.x\n y2 = center_of_intersection_area.y\n diff_y = center2.y - center1.y\n diff_x = center2.x - center1.x\n height_times_distance_recip = height * distance_recip\n y_displacement = diff_y * height_times_distance_recip\n x_displacement = diff_x * height_times_distance_recip\n\n x3 = x2 + y_displacement\n y3 = y2 - x_displacement\n x4 = x2 - y_displacement\n y4 = y2 + x_displacement\n return {Point(x3, y3), Point(x4, y4)}", "def create_solids(self):\n\n plasma = self.create_plasma()\n pf_coils = self.create_pf_coils()\n tf_coil = self.create_tf_coils()\n vessel = self.create_vessel_components()\n\n shapes_and_components = plasma + pf_coils + vessel[:-1] + tf_coil\n self.shapes_and_components = shapes_and_components\n\n return shapes_and_components", "def drawShapes(self):\n self.draw_polygon(self.poly3.get_points() , color = \"#000\")\n self.draw_polygon(self.poly2.get_points() , color = \"#000\")\n self.draw_polygon(self.poly1.get_points() , color = \"#000\")\n self.draw_rect(0, 0, self.width, self.height, color= \"#000\")\n \"\"\"These statements are used to determine if a point is inside any of the\n 3 polygons and if so changes the point's color\"\"\"\n if (self.poly2.point_inside_polygon(self.p1) or self.poly1.point_inside_polygon(self.p1)\n or self.poly3.point_inside_polygon(self.p1)):\n color = \"#0F0\"\n else:\n color = \"#F00\"\n self.fill_oval(self.p1.x, self.p1.y, 7, 7, color)\n\n if (self.poly2.point_inside_polygon(self.p2) or self.poly1.point_inside_polygon(self.p2)\n or self.poly3.point_inside_polygon(self.p2)):\n color = \"#0F0\"\n else:\n color = \"#F00\"\n self.fill_oval(self.p2.x, self.p2.y, 7, 7, color)\n if (self.poly2.point_inside_polygon(self.p3) or self.poly1.point_inside_polygon(self.p3)\n or self.poly3.point_inside_polygon(self.p3)):\n color = \"#0F0\"\n else:\n color = \"#F00\"\n self.fill_oval(self.p3.x, self.p3.y, 7, 7, color)", "def compose(self):\n # Stack the spheres on top of one another\n self.child_nodes[0].translate(0, -0.6, 0)\n self.child_nodes[1].translate(0, 0.1, 0)\n self.child_nodes[2].translate(0, 0.75, 0)\n\n # Shrink the middle sphere to 80%\n self.child_nodes[1].scaling_matrix = np.dot(\n self.scaling_matrix,\n F.scaling([0.8, 0.8, 0.8])\n )\n\n # Scale the top sphere to 70%\n self.child_nodes[2].scaling_matrix = np.dot(\n self.scaling_matrix,\n F.scaling([0.7, 0.7, 0.7])\n )\n\n # Color the spheres white\n for child in self.child_nodes:\n child.color_index = color.MIN_COLOR\n\n # Create a bounding box around the shapes\n self.aabb = AABB([0.0, 0.0, 0.0], [0.5, 1.1, 0.5])", "def intersect(self, rays):\n has_segments = bool(self._merged_segments)\n has_arcs = bool(self._merged_arcs)\n \n seg = {}\n arc = {}\n \n if has_segments:\n # do segment intersection\n seg[\"x\"], seg[\"y\"], seg[\"valid\"], seg[\"ray_u\"], seg[\"segment_u\"], \\\n seg[\"gather_ray\"], seg[\"gather_segment\"] = self._segment_intersection(\n rays[\"x_start\"],\n rays[\"y_start\"],\n rays[\"x_end\"],\n rays[\"y_end\"],\n self._merged_segments[\"x_start\"],\n self._merged_segments[\"y_start\"],\n self._merged_segments[\"x_end\"],\n self._merged_segments[\"y_end\"],\n self.intersect_epsilion,\n self.size_epsilion,\n self.ray_start_epsilion\n )\n seg[\"norm\"] = tf.gather(\n tf.atan2(\n self._merged_segments[\"y_end\"] - self._merged_segments[\"y_start\"],\n self._merged_segments[\"x_end\"] - self._merged_segments[\"x_start\"]\n ) + PI/2.0,\n seg[\"gather_segment\"]\n )\n \n if has_arcs:\n # do arc intersection\n arc[\"x\"], arc[\"y\"], arc[\"valid\"], arc[\"ray_u\"], arc[\"arc_u\"], \\\n arc[\"gather_ray\"], arc[\"gather_arc\"] = self._arc_intersection(\n rays[\"x_start\"],\n rays[\"y_start\"],\n rays[\"x_end\"],\n rays[\"y_end\"],\n self._merged_arcs[\"x_center\"],\n self._merged_arcs[\"y_center\"],\n self._merged_arcs[\"angle_start\"],\n self._merged_arcs[\"angle_end\"],\n self._merged_arcs[\"radius\"],\n self.intersect_epsilion,\n self.size_epsilion,\n self.ray_start_epsilion\n )\n arc[\"norm\"] = self._get_arc_norm(\n self._merged_arcs[\"radius\"], arc[\"arc_u\"], arc[\"gather_arc\"]\n )\n \n if has_segments and has_arcs:\n # has arcs and segments, so we need to chooose between segment and arc \n # intersections.\n seg[\"valid\"], arc[\"valid\"] = self._seg_or_arc(\n seg[\"ray_u\"], arc[\"ray_u\"], seg[\"valid\"], arc[\"valid\"]\n )\n \n return seg, arc", "def add_distractor_objects(self, env, block_obj_info,\n bowl_obj_info, select_colors, width, height):\n n_distractors = 0\n max_distractors = self.n_distractors\n distractor_obj_info = []\n while n_distractors < max_distractors:\n is_block = np.random.rand() > 0.35\n urdf = block_obj_info[\"urdf\"] if is_block else bowl_obj_info[\"urdf\"]\n size = block_obj_info[\"size\"] if is_block else bowl_obj_info[\"size\"]\n colors = select_colors(is_block)\n pose = self.get_random_pose(env, size)\n if not pose:\n continue\n obj_id = env.add_object(urdf, pose)\n color = np.random.choice(colors)\n if not obj_id:\n continue\n pb.changeVisualShape(obj_id, -1, rgbaColor=utils.COLORS[color] + [1])\n n_distractors += 1\n object_pix = utils.xyz_to_pix(pose[0], self.bounds, self.pix_size)\n distractor_obj_info.append({\n \"obj_id\":\n obj_id,\n \"pose\":\n pose,\n \"size\":\n size,\n \"urdf\":\n urdf,\n \"color\":\n color,\n \"unknown_color\":\n color in utils.EVAL_COLORS,\n \"pix\":\n object_pix,\n \"region\":\n determine_region(object_pix[0], object_pix[1], width,\n height), # hmap is transposed.\n })\n return distractor_obj_info", "def test_create_shapes(data_dir):\n dataset.create_shapes(10, 10, 1, data_dir=data_dir)\n img_path = os.path.join(data_dir, \"ellipse/0.png\")\n assert os.path.exists(img_path)\n img = imageio.imread(img_path)\n assert img.shape == (10, 10, 4)", "def test_circle_draw():\n with TestingCanvas():\n ellipse = visuals.Ellipse(pos=(0.5, 0.3, 0), radius=0.4,\n color=(1, 0, 0, 1))\n ellipse.draw()\n assert_image_equal(\"screenshot\", 'visuals/circle1.png')\n\n gloo.clear()\n ellipse = visuals.Ellipse(pos=(0.5, 0.3, 0), radius=0.4,\n color=(1, 0, 0, 1),\n border_color=(0, 1, 1, 1))\n ellipse.draw()\n assert_image_equal(\"screenshot\", 'visuals/circle2.png')\n\n gloo.clear()\n ellipse = visuals.Ellipse(pos=(0.5, 0.3, 0), radius=0.4,\n border_color=(0, 1, 1, 1))\n ellipse.draw()\n assert_image_equal(\"screenshot\", 'visuals/circle3.png')", "def add_triangles(t1, t2):\n solutions = []\n for i in range(3):\n for j in range(3):\n # See if t1 angle 0 and t2 angle i can be merged\n if eq(t1.angles[i] + t2.angles[j], math.pi):\n # The two angles (t1[i] and t2[j]) fit together to form a straight\n # line. Now we just need to make sure that the sides that are\n # merging are the same length\n if eq(t1.sides[(i + 1) % 3], t2.sides[(j + 2) % 3]):\n # Calculate the dx and dy on the side of t1 that's being \"extended\"\n dx = t1.vertices[i][0] - t1.vertices[(i + 1) % 3][0]\n dy = t1.vertices[i][1] - t1.vertices[(i + 1) % 3][1]\n\n v3x = t1.vertices[i][0] + dx * t2.sides[(j + 1) % 3] / t1.sides[(i + 2) % 3]\n v3y = t1.vertices[i][1] + dy * t2.sides[(j + 1) % 3] / t1.sides[(i + 2) % 3]\n solutions.append(Triangle([t1.vertices[(i + 1) % 3],\n t1.vertices[(i + 2) % 3],\n (v3x, v3y)]))\n\n if eq(t1.sides[(i + 2) % 3], t2.sides[(j + 1) % 3]):\n # Calculate the dx and dy on the side of t1 that's being \"extended\"\n dx = t1.vertices[i][0] - t1.vertices[(i + 2) % 3][0]\n dy = t1.vertices[i][1] - t1.vertices[(i + 2) % 3][1]\n\n v3x = t1.vertices[i][0] + dx * t2.sides[(j + 2) % 3] / t1.sides[(i + 1) % 3]\n v3y = t1.vertices[i][1] + dy * t2.sides[(j + 2) % 3] / t1.sides[(i + 1) % 3]\n solutions.append(Triangle([t1.vertices[(i + 1) % 3],\n t1.vertices[(i + 2) % 3],\n (v3x, v3y)]))\n\n return solutions", "def exportCircles(self):\n # Remember to compute circumcircles if not done before\n # for t in self.triangles:\n # self.circles[t] = self.circumcenter(t)\n\n # Filter out triangles with any vertex in the extended BBox\n # Do sqrt of radius before of return\n return [(self.circles[(a, b, c)][0], sqrt(self.circles[(a, b, c)][1]))\n for (a, b, c) in self.triangles if a > 3 and b > 3 and c > 3]", "def main():\n arg_parser = argparse.ArgumentParser()\n arg_parser.add_argument('-s', '--side', required=True)\n arg_parser.add_argument('-r', '--radius', required=True)\n arg_parser.add_argument('-i', '--images', required=True)\n arg_parser.add_argument('-d', '--dots', required=True)\n args = arg_parser.parse_args(sys.argv[1:])\n\n side_px = int(args.side)\n\n radius_px = int(args.radius)\n max_overlap_squared = (1 * radius_px)**2\n\n nb_dots = int(args.dots)\n nb_images = int(args.images)\n\n img_prefix = f'dots_radius_{radius_px}_count_{nb_dots:02d}'\n pos_prefix = f'positions_radius_{radius_px}_count_{nb_dots:02d}'\n\n for i in range(nb_images):\n surf = cr.ImageSurface(cr.FORMAT_RGB24, side_px, side_px)\n ctx = cr.Context(surf)\n\n ctx.set_source_rgb(1, 1, 1)\n ctx.rectangle(0, 0, side_px, side_px)\n ctx.fill()\n\n ctx.set_source_rgb(0, 0, 0)\n generated_dots = []\n for _ in range(nb_dots):\n sanity_max_tries = 500\n nb_tries = 0\n good_position = False\n while not good_position and nb_tries < sanity_max_tries:\n centre_x = random.uniform(radius_px, side_px - radius_px - 1)\n centre_y = random.uniform(radius_px, side_px - radius_px - 1)\n\n all_good = True\n for dot in generated_dots:\n dist_sq = dist_squared(centre_x, centre_y, dot[0], dot[1])\n if dist_sq < max_overlap_squared:\n all_good = False # too close\n break\n good_position = all_good\n nb_tries += 1\n\n if not good_position:\n print(\"Error: could not place disk without excessive overlap.\")\n print(f\"Tried {nb_tries} times. Exiting.\")\n sys.exit(1)\n else:\n generated_dots.append((centre_x, centre_y))\n\n ctx.new_sub_path()\n ctx.arc(centre_x, centre_y, radius_px, 0, 2 * pi)\n ctx.fill()\n\n # Save image out.\n surf.write_to_png(f'{img_prefix}_{i:05d}.png')\n\n # Also save out generated dot centres.\n with open(f'{pos_prefix}_{i:05d}.csv', 'wt') as pos_out:\n for dot in generated_dots:\n pos_out.write(f'{dot[0]}, {dot[1]}\\n')", "def cylinder_circles(node_a, node_b, radius, element_number=10):\n\n vector = (np.array(node_a) - np.array(node_b)).tolist()\n pts_a = circle(node_a, vector, radius, element_number)\n pts_b = circle(node_b, vector, radius, element_number)\n\n return pts_a, pts_b", "def test_creation(self):\n\n assert self.test_shape.solid is not None\n assert self.test_shape.volume() > 1000", "def test_creation(self):\n\n assert self.test_shape.solid is not None\n assert self.test_shape.volume() > 1000", "def __init__(self, factory, radii, heights, layers_lcs, transform_data,\n layers_physical_names, transfinite_r_data, transfinite_h_data,\n transfinite_phi_data, straight_boundary=None,\n layers_surfaces_names=None, surfaces_names=None,\n volumes_names=None):\n primitives = []\n k = 1 / 3.0 # inner quadrangle part of the first layer radius\n transfinite_types = [0, 0, 0, 1, 3]\n h_cnt = 0.0 # height counter\n if layers_lcs is None:\n layers_lcs = [[1 for _ in radii] for _ in heights]\n if surfaces_names is None:\n surfaces_names = [['NX', 'X', 'NY', 'Y', 'NZ', 'Z']]\n if layers_surfaces_names is None:\n layers_surfaces_names = [[0 for _ in radii] for _ in heights]\n if volumes_names is not None:\n new_layers_physical_names = [[volumes_names[x] for x in y]\n for y in layers_physical_names]\n layers_physical_names = new_layers_physical_names\n for i, h in enumerate(heights):\n c = radii[0] / math.sqrt(2.0)\n kc = k * radii[0] / math.sqrt(2.0)\n bottom_h = h_cnt # primitive bottom h\n top_h = h_cnt + h # primitive top h\n h_cnt += h\n if straight_boundary is None:\n # Core center\n primitives.append(Primitive(\n factory,\n [\n [kc, kc, bottom_h, layers_lcs[i][0]],\n [-kc, kc, bottom_h, layers_lcs[i][0]],\n [-kc, -kc, bottom_h, layers_lcs[i][0]],\n [kc, -kc, bottom_h, layers_lcs[i][0]],\n [kc, kc, top_h, layers_lcs[i][0]],\n [-kc, kc, top_h, layers_lcs[i][0]],\n [-kc, -kc, top_h, layers_lcs[i][0]],\n [kc, -kc, top_h, layers_lcs[i][0]]\n ],\n transform_data,\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [[], [], [], [], [], [], [], [], [], [], [], []],\n [\n transfinite_phi_data,\n transfinite_phi_data,\n transfinite_h_data[i]\n ],\n transfinite_types[0],\n layers_physical_names[i][0],\n surfaces_names=surfaces_names[layers_surfaces_names[i][0]]\n ))\n # Core X\n primitives.append(Primitive(\n factory,\n [\n [c, c, bottom_h, layers_lcs[i][0]],\n [kc, kc, bottom_h, layers_lcs[i][0]],\n [kc, -kc, bottom_h, layers_lcs[i][0]],\n [c, -c, bottom_h, layers_lcs[i][0]],\n [c, c, top_h, layers_lcs[i][0]],\n [kc, kc, top_h, layers_lcs[i][0]],\n [kc, -kc, top_h, layers_lcs[i][0]],\n [c, -c, top_h, layers_lcs[i][0]]\n ],\n transform_data,\n [0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0],\n [\n [], [], [], [],\n [[0, 0, bottom_h, 1]], [], [], [[0, 0, top_h, 1]],\n [], [], [], []\n ],\n [\n transfinite_r_data[0],\n transfinite_phi_data,\n transfinite_h_data[i]\n ],\n transfinite_types[1],\n layers_physical_names[i][0],\n surfaces_names=surfaces_names[layers_surfaces_names[i][0]]\n ))\n # Core Y\n primitives.append(Primitive(\n factory,\n [\n [c, c, bottom_h, layers_lcs[i][0]],\n [-c, c, bottom_h, layers_lcs[i][0]],\n [-kc, kc, bottom_h, layers_lcs[i][0]],\n [kc, kc, bottom_h, layers_lcs[i][0]],\n [c, c, top_h, layers_lcs[i][0]],\n [-c, c, top_h, layers_lcs[i][0]],\n [-kc, kc, top_h, layers_lcs[i][0]],\n [kc, kc, top_h, layers_lcs[i][0]]\n ],\n transform_data,\n [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [\n [[0, 0, bottom_h, 1]], [[0, 0, top_h, 1]], [], [],\n [], [], [], [],\n [], [], [], []\n ],\n [\n transfinite_phi_data,\n transfinite_r_data[0],\n transfinite_h_data[i],\n ],\n transfinite_types[2],\n layers_physical_names[i][0],\n surfaces_names=surfaces_names[layers_surfaces_names[i][0]]\n ))\n # Core NX\n if transfinite_r_data[0][\n 1] == 0: # If type is Progression then reverse coefficient\n rc = 1.0 / transfinite_r_data[0][2]\n else:\n rc = transfinite_r_data[0][2]\n primitives.append(Primitive(\n factory,\n [\n [-kc, kc, bottom_h, layers_lcs[i][0]],\n [-c, c, bottom_h, layers_lcs[i][0]],\n [-c, -c, bottom_h, layers_lcs[i][0]],\n [-kc, -kc, bottom_h, layers_lcs[i][0]],\n [-kc, kc, top_h, layers_lcs[i][0]],\n [-c, c, top_h, layers_lcs[i][0]],\n [-c, -c, top_h, layers_lcs[i][0]],\n [-kc, -kc, top_h, layers_lcs[i][0]]\n ],\n transform_data,\n [0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0],\n [\n [], [], [], [],\n [], [[0, 0, bottom_h, 1]], [[0, 0, top_h, 1]], [],\n [], [], [], []\n ],\n [\n [transfinite_r_data[0][0], transfinite_r_data[0][1],\n rc],\n transfinite_phi_data,\n transfinite_h_data[i]\n ],\n transfinite_types[3],\n layers_physical_names[i][0],\n surfaces_names=surfaces_names[layers_surfaces_names[i][0]]\n ))\n # Core NY\n if transfinite_r_data[0][\n 1] == 0: # If type is Progression then reverse coefficient\n rc = 1.0 / transfinite_r_data[0][2]\n else:\n rc = transfinite_r_data[0][2]\n primitives.append(Primitive(\n factory,\n [\n [kc, -kc, bottom_h, layers_lcs[i][0]],\n [-kc, -kc, bottom_h, layers_lcs[i][0]],\n [-c, -c, bottom_h, layers_lcs[i][0]],\n [c, -c, bottom_h, layers_lcs[i][0]],\n [kc, -kc, top_h, layers_lcs[i][0]],\n [-kc, -kc, top_h, layers_lcs[i][0]],\n [-c, -c, top_h, layers_lcs[i][0]],\n [c, -c, top_h, layers_lcs[i][0]]\n ],\n transform_data,\n [0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n [\n [], [], [[0, 0, top_h, 1]], [[0, 0, bottom_h, 1]],\n [], [], [], [],\n [], [], [], []\n ],\n [\n transfinite_phi_data,\n [transfinite_r_data[0][0], transfinite_r_data[0][1],\n rc],\n transfinite_h_data[i],\n ],\n transfinite_types[4],\n layers_physical_names[i][0],\n surfaces_names=surfaces_names[layers_surfaces_names[i][0]]\n ))\n # Layers\n for j in range(1, len(radii)):\n c1 = radii[j - 1] / math.sqrt(2.0)\n c2 = radii[j] / math.sqrt(2.0)\n # Layer X\n primitives.append(Primitive(\n factory,\n [\n [c2, c2, bottom_h, layers_lcs[i][j]],\n [c1, c1, bottom_h, layers_lcs[i][j]],\n [c1, -c1, bottom_h, layers_lcs[i][j]],\n [c2, -c2, bottom_h, layers_lcs[i][j]],\n [c2, c2, top_h, layers_lcs[i][j]],\n [c1, c1, top_h, layers_lcs[i][j]],\n [c1, -c1, top_h, layers_lcs[i][j]],\n [c2, -c2, top_h, layers_lcs[i][j]]\n ],\n transform_data,\n [0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0],\n [\n [], [], [], [],\n [[0, 0, bottom_h, 1]],\n [[0, 0, bottom_h, 1]],\n [[0, 0, top_h, 1]],\n [[0, 0, top_h, 1]],\n [], [], [], []\n ],\n [\n transfinite_r_data[j],\n transfinite_phi_data,\n transfinite_h_data[i]\n ],\n transfinite_types[1],\n layers_physical_names[i][j],\n surfaces_names=surfaces_names[\n layers_surfaces_names[i][j]]\n ))\n # Layer Y\n primitives.append(Primitive(\n factory,\n [\n [c2, c2, bottom_h, layers_lcs[i][j]],\n [-c2, c2, bottom_h, layers_lcs[i][j]],\n [-c1, c1, bottom_h, layers_lcs[i][j]],\n [c1, c1, bottom_h, layers_lcs[i][j]],\n [c2, c2, top_h, layers_lcs[i][j]],\n [-c2, c2, top_h, layers_lcs[i][j]],\n [-c1, c1, top_h, layers_lcs[i][j]],\n [c1, c1, top_h, layers_lcs[i][j]]\n ],\n transform_data,\n [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n [\n [[0, 0, bottom_h, 1]],\n [[0, 0, top_h, 1]],\n [[0, 0, top_h, 1]],\n [[0, 0, bottom_h, 1]],\n [], [], [], [],\n [], [], [], []\n ],\n [\n transfinite_phi_data,\n transfinite_r_data[j],\n transfinite_h_data[i]\n ],\n transfinite_types[2],\n layers_physical_names[i][j],\n surfaces_names=surfaces_names[\n layers_surfaces_names[i][j]]\n ))\n # Layer NX\n if transfinite_r_data[j][\n 1] == 0: # If type is Progression then reverse coefficient\n rc = 1.0 / transfinite_r_data[j][2]\n else:\n rc = transfinite_r_data[j][2]\n primitives.append(Primitive(\n factory,\n [\n [-c1, c1, bottom_h, layers_lcs[i][j]],\n [-c2, c2, bottom_h, layers_lcs[i][j]],\n [-c2, -c2, bottom_h, layers_lcs[i][j]],\n [-c1, -c1, bottom_h, layers_lcs[i][j]],\n [-c1, c1, top_h, layers_lcs[i][j]],\n [-c2, c2, top_h, layers_lcs[i][j]],\n [-c2, -c2, top_h, layers_lcs[i][j]],\n [-c1, -c1, top_h, layers_lcs[i][j]]\n ],\n transform_data,\n [0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0],\n [\n [], [], [], [],\n [[0, 0, bottom_h, 1]],\n [[0, 0, bottom_h, 1]],\n [[0, 0, top_h, 1]],\n [[0, 0, top_h, 1]],\n [], [], [], []\n ],\n [\n [transfinite_r_data[j][0], transfinite_r_data[j][1],\n rc],\n transfinite_phi_data,\n transfinite_h_data[i]\n ],\n transfinite_types[3],\n layers_physical_names[i][j],\n surfaces_names=surfaces_names[\n layers_surfaces_names[i][j]]\n ))\n # Layer NY\n if transfinite_r_data[j][\n 1] == 0: # If type is Progression then reverse coefficient\n rc = 1.0 / transfinite_r_data[j][2]\n else:\n rc = transfinite_r_data[j][2]\n primitives.append(Primitive(\n factory,\n [\n [c1, -c1, bottom_h, layers_lcs[i][j]],\n [-c1, -c1, bottom_h, layers_lcs[i][j]],\n [-c2, -c2, bottom_h, layers_lcs[i][j]],\n [c2, -c2, bottom_h, layers_lcs[i][j]],\n [c1, -c1, top_h, layers_lcs[i][j]],\n [-c1, -c1, top_h, layers_lcs[i][j]],\n [-c2, -c2, top_h, layers_lcs[i][j]],\n [c2, -c2, top_h, layers_lcs[i][j]]\n ],\n transform_data,\n [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n [\n [[0, 0, bottom_h, 1]],\n [[0, 0, top_h, 1]],\n [[0, 0, top_h, 1]],\n [[0, 0, bottom_h, 1]],\n [], [], [], [],\n [], [], [], []\n ],\n [\n transfinite_phi_data,\n [transfinite_r_data[j][0], transfinite_r_data[j][1],\n rc],\n transfinite_h_data[i]\n ],\n transfinite_types[4],\n layers_physical_names[i][j],\n surfaces_names=surfaces_names[\n layers_surfaces_names[i][j]]\n ))\n else:\n if straight_boundary[0] == 0:\n curve_types = {\n 'C': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n 'X': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n 'Y': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n 'NX': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n 'NY': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n }\n elif straight_boundary[0] == 1:\n curve_types = {\n 'C': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n 'X': [0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0],\n 'Y': [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n 'NX': [0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0],\n 'NY': [0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]\n }\n elif straight_boundary[0] == 2:\n curve_types = {\n 'C': [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],\n 'X': [0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0],\n 'Y': [0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n 'NX': [0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0],\n 'NY': [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n }\n else:\n curve_types = {\n 'C': [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],\n 'X': [0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0],\n 'Y': [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n 'NX': [0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0],\n 'NY': [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]\n }\n # Core center\n primitives.append(Primitive(\n factory,\n [\n [kc, kc, bottom_h, layers_lcs[i][0]],\n [-kc, kc, bottom_h, layers_lcs[i][0]],\n [-kc, -kc, bottom_h, layers_lcs[i][0]],\n [kc, -kc, bottom_h, layers_lcs[i][0]],\n [kc, kc, top_h, layers_lcs[i][0]],\n [-kc, kc, top_h, layers_lcs[i][0]],\n [-kc, -kc, top_h, layers_lcs[i][0]],\n [kc, -kc, top_h, layers_lcs[i][0]]\n ],\n transform_data,\n curve_types['C'],\n [\n [[0, 0, bottom_h, 1]],\n [[0, 0, top_h, 1]],\n [[0, 0, top_h, 1]],\n [[0, 0, bottom_h, 1]],\n [[0, 0, bottom_h, 1]],\n [[0, 0, bottom_h, 1]],\n [[0, 0, top_h, 1]],\n [[0, 0, top_h, 1]],\n [], [], [], []],\n [\n transfinite_phi_data,\n transfinite_phi_data,\n transfinite_h_data[i]\n ],\n transfinite_types[0],\n layers_physical_names[i][0],\n surfaces_names=surfaces_names[layers_surfaces_names[i][0]]\n ))\n # Core X\n primitives.append(Primitive(\n factory,\n [\n [c, c, bottom_h, layers_lcs[i][0]],\n [kc, kc, bottom_h, layers_lcs[i][0]],\n [kc, -kc, bottom_h, layers_lcs[i][0]],\n [c, -c, bottom_h, layers_lcs[i][0]],\n [c, c, top_h, layers_lcs[i][0]],\n [kc, kc, top_h, layers_lcs[i][0]],\n [kc, -kc, top_h, layers_lcs[i][0]],\n [c, -c, top_h, layers_lcs[i][0]]\n ],\n transform_data,\n curve_types['X'],\n [\n [], [], [], [],\n [[0, 0, bottom_h, 1]],\n [[0, 0, bottom_h, 1]],\n [[0, 0, top_h, 1]],\n [[0, 0, top_h, 1]],\n [], [], [], []\n ],\n [\n transfinite_r_data[0],\n transfinite_phi_data,\n transfinite_h_data[i]\n ],\n transfinite_types[1],\n layers_physical_names[i][0],\n surfaces_names=surfaces_names[\n layers_surfaces_names[i][0]]\n ))\n # Core Y\n primitives.append(Primitive(\n factory,\n [\n [c, c, bottom_h, layers_lcs[i][0]],\n [-c, c, bottom_h, layers_lcs[i][0]],\n [-kc, kc, bottom_h, layers_lcs[i][0]],\n [kc, kc, bottom_h, layers_lcs[i][0]],\n [c, c, top_h, layers_lcs[i][0]],\n [-c, c, top_h, layers_lcs[i][0]],\n [-kc, kc, top_h, layers_lcs[i][0]],\n [kc, kc, top_h, layers_lcs[i][0]]\n ],\n transform_data,\n curve_types['Y'],\n [\n [[0, 0, bottom_h, 1]],\n [[0, 0, top_h, 1]],\n [[0, 0, top_h, 1]],\n [[0, 0, bottom_h, 1]],\n [], [], [], [],\n [], [], [], []\n ],\n [\n transfinite_phi_data,\n transfinite_r_data[0],\n transfinite_h_data[i],\n ],\n transfinite_types[2],\n layers_physical_names[i][0],\n surfaces_names=surfaces_names[layers_surfaces_names[i][0]]\n ))\n # Core NX\n if transfinite_r_data[0][\n 1] == 0: # If type is Progression then reverse coefficient\n rc = 1.0 / transfinite_r_data[0][2]\n else:\n rc = transfinite_r_data[0][2]\n primitives.append(Primitive(\n factory,\n [\n [-kc, kc, bottom_h, layers_lcs[i][0]],\n [-c, c, bottom_h, layers_lcs[i][0]],\n [-c, -c, bottom_h, layers_lcs[i][0]],\n [-kc, -kc, bottom_h, layers_lcs[i][0]],\n [-kc, kc, top_h, layers_lcs[i][0]],\n [-c, c, top_h, layers_lcs[i][0]],\n [-c, -c, top_h, layers_lcs[i][0]],\n [-kc, -kc, top_h, layers_lcs[i][0]]\n ],\n transform_data,\n curve_types['NX'],\n [\n [], [], [], [],\n [[0, 0, bottom_h, 1]],\n [[0, 0, bottom_h, 1]],\n [[0, 0, top_h, 1]],\n [[0, 0, top_h, 1]],\n [], [], [], []\n ],\n [\n [transfinite_r_data[0][0], transfinite_r_data[0][1],\n rc],\n transfinite_phi_data,\n transfinite_h_data[i]\n ],\n transfinite_types[3],\n layers_physical_names[i][0],\n surfaces_names=surfaces_names[layers_surfaces_names[i][0]]\n ))\n # Core NY\n if transfinite_r_data[0][\n 1] == 0: # If type is Progression then reverse coefficient\n rc = 1.0 / transfinite_r_data[0][2]\n else:\n rc = transfinite_r_data[0][2]\n primitives.append(Primitive(\n factory,\n [\n [kc, -kc, bottom_h, layers_lcs[i][0]],\n [-kc, -kc, bottom_h, layers_lcs[i][0]],\n [-c, -c, bottom_h, layers_lcs[i][0]],\n [c, -c, bottom_h, layers_lcs[i][0]],\n [kc, -kc, top_h, layers_lcs[i][0]],\n [-kc, -kc, top_h, layers_lcs[i][0]],\n [-c, -c, top_h, layers_lcs[i][0]],\n [c, -c, top_h, layers_lcs[i][0]]\n ],\n transform_data,\n curve_types['NY'],\n [\n [[0, 0, bottom_h, 1]],\n [[0, 0, top_h, 1]],\n [[0, 0, top_h, 1]],\n [[0, 0, bottom_h, 1]],\n [], [], [], [],\n [], [], [], []\n ],\n [\n transfinite_phi_data,\n [transfinite_r_data[0][0], transfinite_r_data[0][1],\n rc],\n transfinite_h_data[i],\n ],\n transfinite_types[4],\n layers_physical_names[i][0],\n surfaces_names=surfaces_names[layers_surfaces_names[i][0]]\n ))\n # Layers\n for j in range(1, len(radii)):\n if straight_boundary[j] == 0:\n curve_types = {\n 'X': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n 'Y': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n 'NX': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n 'NY': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n }\n elif straight_boundary[j] == 1:\n curve_types = {\n 'X': [0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0],\n 'Y': [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n 'NX': [0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0],\n 'NY': [0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]\n }\n elif straight_boundary[j] == 2:\n curve_types = {\n 'X': [0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0],\n 'Y': [0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n 'NX': [0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0],\n 'NY': [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n }\n else:\n curve_types = {\n 'X': [0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0],\n 'Y': [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n 'NX': [0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0],\n 'NY': [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]\n }\n c1 = radii[j - 1] / math.sqrt(2.0)\n c2 = radii[j] / math.sqrt(2.0)\n # Layer X\n primitives.append(Primitive(\n factory,\n [\n [c2, c2, bottom_h, layers_lcs[i][j]],\n [c1, c1, bottom_h, layers_lcs[i][j]],\n [c1, -c1, bottom_h, layers_lcs[i][j]],\n [c2, -c2, bottom_h, layers_lcs[i][j]],\n [c2, c2, top_h, layers_lcs[i][j]],\n [c1, c1, top_h, layers_lcs[i][j]],\n [c1, -c1, top_h, layers_lcs[i][j]],\n [c2, -c2, top_h, layers_lcs[i][j]]\n ],\n transform_data,\n curve_types['X'],\n [\n [], [], [], [],\n [[0, 0, bottom_h, 1]],\n [[0, 0, bottom_h, 1]],\n [[0, 0, top_h, 1]],\n [[0, 0, top_h, 1]],\n [], [], [], []\n ],\n [\n transfinite_r_data[j],\n transfinite_phi_data,\n transfinite_h_data[i]\n ],\n transfinite_types[1],\n layers_physical_names[i][j],\n surfaces_names=surfaces_names[\n layers_surfaces_names[i][j]]\n ))\n # Layer Y\n primitives.append(Primitive(\n factory,\n [\n [c2, c2, bottom_h, layers_lcs[i][j]],\n [-c2, c2, bottom_h, layers_lcs[i][j]],\n [-c1, c1, bottom_h, layers_lcs[i][j]],\n [c1, c1, bottom_h, layers_lcs[i][j]],\n [c2, c2, top_h, layers_lcs[i][j]],\n [-c2, c2, top_h, layers_lcs[i][j]],\n [-c1, c1, top_h, layers_lcs[i][j]],\n [c1, c1, top_h, layers_lcs[i][j]]\n ],\n transform_data,\n curve_types['Y'],\n [\n [[0, 0, bottom_h, 1]],\n [[0, 0, top_h, 1]],\n [[0, 0, top_h, 1]],\n [[0, 0, bottom_h, 1]],\n [], [], [], [],\n [], [], [], []\n ],\n [\n transfinite_phi_data,\n transfinite_r_data[j],\n transfinite_h_data[i]\n ],\n transfinite_types[2],\n layers_physical_names[i][j],\n surfaces_names=surfaces_names[\n layers_surfaces_names[i][j]]\n ))\n # Layer NX\n if transfinite_r_data[j][\n 1] == 0: # If type is Progression then reverse coefficient\n rc = 1.0 / transfinite_r_data[j][2]\n else:\n rc = transfinite_r_data[j][2]\n primitives.append(Primitive(\n factory,\n [\n [-c1, c1, bottom_h, layers_lcs[i][j]],\n [-c2, c2, bottom_h, layers_lcs[i][j]],\n [-c2, -c2, bottom_h, layers_lcs[i][j]],\n [-c1, -c1, bottom_h, layers_lcs[i][j]],\n [-c1, c1, top_h, layers_lcs[i][j]],\n [-c2, c2, top_h, layers_lcs[i][j]],\n [-c2, -c2, top_h, layers_lcs[i][j]],\n [-c1, -c1, top_h, layers_lcs[i][j]]\n ],\n transform_data,\n curve_types['NX'],\n [\n [], [], [], [],\n [[0, 0, bottom_h, 1]],\n [[0, 0, bottom_h, 1]],\n [[0, 0, top_h, 1]],\n [[0, 0, top_h, 1]],\n [], [], [], []\n ],\n [\n [transfinite_r_data[j][0],\n transfinite_r_data[j][1], rc],\n transfinite_phi_data,\n transfinite_h_data[i]\n ],\n transfinite_types[3],\n layers_physical_names[i][j],\n surfaces_names=surfaces_names[\n layers_surfaces_names[i][j]]\n ))\n # Layer NY\n if transfinite_r_data[j][\n 1] == 0: # If type is Progression then reverse coefficient\n rc = 1.0 / transfinite_r_data[j][2]\n else:\n rc = transfinite_r_data[j][2]\n primitives.append(Primitive(\n factory,\n [\n [c1, -c1, bottom_h, layers_lcs[i][j]],\n [-c1, -c1, bottom_h, layers_lcs[i][j]],\n [-c2, -c2, bottom_h, layers_lcs[i][j]],\n [c2, -c2, bottom_h, layers_lcs[i][j]],\n [c1, -c1, top_h, layers_lcs[i][j]],\n [-c1, -c1, top_h, layers_lcs[i][j]],\n [-c2, -c2, top_h, layers_lcs[i][j]],\n [c2, -c2, top_h, layers_lcs[i][j]]\n ],\n transform_data,\n curve_types['NY'],\n [\n [[0, 0, bottom_h, 1]],\n [[0, 0, top_h, 1]],\n [[0, 0, top_h, 1]],\n [[0, 0, bottom_h, 1]],\n [], [], [], [],\n [], [], [], []\n ],\n [\n transfinite_phi_data,\n [transfinite_r_data[j][0],\n transfinite_r_data[j][1], rc],\n transfinite_h_data[i]\n ],\n transfinite_types[4],\n layers_physical_names[i][j],\n surfaces_names=surfaces_names[\n layers_surfaces_names[i][j]]\n ))\n Complex.__init__(self, factory, primitives)", "def main():\n\n print \"************************************************************************************\"\n print \" This test shows various operators within Dihedral and DihedralContainer classes \"\n print \" Also shows memory management structure and access methods \"\n print \"************************************************************************************ \\n\"\n\n p1 = Particle( [1.1, 1.1, 1.1], \"Si\", 2.0, 1.23)\n p2 = Particle( [2.2, 2.2, 2.2], \"Si\", 1.0, 2.34)\n p3 = Particle( [3.3, 3.3, 3.3], \"Si\", 1.0, 2.34)\n p4 = Particle( [4.4, 4.4, 4.4], \"Si\", 1.0, 2.34)\n #\n p5 = Particle( [5.5, 5.5, 5.5], \"C\", 1.0, 2.34)\n p6 = Particle( [6.6, 6.6, 6.6], \"C\", 1.0, 2.34)\n p7 = Particle( [7.7, 7.7, 7.7], \"C\", 1.0, 2.34)\n p8 = Particle( [8.8, 8.8, 8.8], \"C\", 1.0, 2.34)\n\n b1 = Bond( 1, 2, 1.11, \"hooke\")\n b2 = Bond( 2, 3, 2.22, \"hooke\")\n #\n b3 = Bond( 1, 2, 3.33, \"hooke\")\n b4 = Bond( 1, 2, 4.44, \"hooke\")\n\n d1 = Dihedral(1, 2, 3, 4, 1.11, \"stiff\")\n #\n d2 = Dihedral(1, 2, 3, 4, 2.22, \"stiff\")\n\n atoms1 = ParticleContainer()\n atoms2 = ParticleContainer()\n atoms1.put(p1)\n atoms1.put(p2)\n atoms1.put(p3)\n #\n atoms2.put(p4)\n atoms2.put(p5)\n atoms2.put(p6)\n\n bonds1 = BondContainer()\n bonds2 = BondContainer()\n bonds1.put(b1)\n bonds1.put(b2)\n bonds2.put(b3)\n bonds2.put(b4)\n\n dihedrals1 = DihedralContainer()\n dihedrals2 = DihedralContainer()\n dihedrals1.put(d1)\n dihedrals2.put(d2)\n\n del p1, p2, p3, p4, p5, p6, b1, b2, b3, b4, d1, d2\n print \"\\n Cleaning memory for initial objects \\n\" \n\n print \"dihedral1 container\"\n print dihedrals1\n\n print \" \"\n print \"dihedral2 container\"\n print dihedrals2\n\n print \"Testing 'in' operator (1 in dihedrals1)\"\n if (1 in dihedrals1):\n print \"dihedrals1 contains gid 1\"\n else:\n print \"key not found in dihedrals1\"\n\n print \"Testing 'in' operator (5 in dihedrals1)\"\n if (5 in dihedrals1):\n print \"dihedrals1 contains gid 5\"\n else:\n print \"key not found in dihedrals1\"\n\n print \" \"\n dihedrals1 += dihedrals2\n print \"Will print the new dihedrals1 after adding dihedrals1 += dihedrals2\"\n print dihedrals1\n\n\n print \"Check for pre-existing dihedral\"\n if dihedrals1.hasDihedral([4,3,2,1]):\n print \"dihedral 1--2--3--4 exists\"\n else:\n print \"dihedral 1--2--3--4 does NOT exists\"\n\n print \"Check for pre-existing dihedral\"\n if dihedrals1.hasDihedral([2,3,1,4]):\n print \"dihedral 2--3--1--4 exists\"\n else:\n print \"dihedral 2--3--1--4 does NOT exists\"", "def __circle_collision(self, circle):\n raise Exception('--- This methods have not been implemented yet! Use circle_collider instead ---')", "def defineCircleLayout(self):\n # Define a 2-D array representing the position of each mesh point\n self.xPoints = self.frange(0,self.R,self.h)\n self.yPoints = self.frange(0,self.R,self.h)\n\n # Position of internal mesh points\n internal_xyCoord = [(i,j) for i in self.xPoints for j in self.yPoints if (i - self.R)**2 + (j - self.R)**2 < self.R^2] \n\n # Define the dictionary containing internal points\n for k in internal_xyCoord:\n x = k[0]\n y = k[1]\n xLabel = xPoints.index(x)\n yLabel = yPoints.index(y)\n self.internalPoints[(xLabel,yLabel)] = meshPoint(type = 'internal',x = x, y = y, xLabel = xLabel, yLabel = yLabel) \n\n # Position of the boundary points\n # Find the intersection of each mesh line with the circle\n # For a given vertical mesh line: \n # y = R - sqrt(R^2 - (x-R)^2) & y = R + sqrt(R^2 - (x-R)^2)\n # For a given horizontal mesh line: \n # x = R - sqrt(R^2 - (y-R)^2) & x = R + sqrt(R^2 - (y-R)^2)\n boundary_xyCoord = [(0,self.R),(self.R,0),(self.R,2*self.R),(2*self.R,self.R)] + [(x,self.R - math.sqrt(self.R**2 - (x-self.R)**2)) for x in self.xPoints[1:len(self.xPoints)-1]] + [(x,self.R - math.sqrt(self.R**2 + (x-self.R)**2)) for x in self.xPoints[1:len(self.xPoints)-1]] + [(self.R - math.sqrt(self.R**2 - (y-self.R)**2),y) for y in self.yPoints[1:len(yPoints)-1]] + [(self.R + math.sqrt(self.R**2 - (y-self.R)**2),y) for y in self.yPoints[1:len(yPoints)-1]] \n\n # Define the dictionary containing boundary points\n for k in boundary_xyCoord:\n x = k[0]\n y = k[1]\n [xLabel,yLabel] = self.findLabel(x,y)\n self.boundaryPoints[(xLabel,yLabel)] = meshPoint(type = 'boundary',x = x, y = y, xLabel = xLabel, yLabel = yLabel) \n \n # Now that we have assigned the labels we can define fE, fW, fN and fS\n self.fCalc()", "def test_ellipse_draw():\n with TestingCanvas():\n ellipse = visuals.Ellipse(pos=(0., 0.), radius=(0.4, 0.3),\n color=(0, 0, 1, 1))\n ellipse.transform = transforms.STTransform(scale=(2.0, 3.0))\n ellipse.draw()\n assert_image_equal(\"screenshot\", 'visuals/ellipse1.png')\n\n gloo.clear()\n ellipse = visuals.Ellipse(pos=(0., 0.), radius=(0.4, 0.3),\n color=(0, 0, 1, 1),\n border_color=(1, 0, 0, 1))\n ellipse.transform = transforms.STTransform(scale=(2.0, 3.0))\n ellipse.draw()\n assert_image_equal(\"screenshot\", 'visuals/ellipse2.png')\n\n gloo.clear()\n ellipse = visuals.Ellipse(pos=(0., 0.), radius=(0.4, 0.3),\n border_color=(1, 0, 0, 1))\n ellipse.transform = transforms.STTransform(scale=(2.0, 3.0))\n ellipse.draw()\n assert_image_equal(\"screenshot\", 'visuals/ellipse3.png')", "def _collide(self):\n\n collisions = self._get_collisions()\n for collision in collisions:\n self._update_excitation(collision)\n atom1 = self.atoms[collision[0]]\n atom2 = self.atoms[collision[1]]\n\n r = atom1.pos-atom2.pos\n r_mag = np.linalg.norm(r)\n r_hat = r/r_mag\n\n v_1_r = np.dot(atom1.vel, r_hat)\n v_2_r = np.dot(atom2.vel, r_hat)\n\n v_1_r_f = (atom1.mass-atom2.mass)*v_1_r/(atom1.mass + atom2.mass)\\\n + 2*atom2.mass*v_2_r/(atom1.mass + atom2.mass)\n v_2_r_f = (atom2.mass-atom1.mass)*v_2_r/(atom1.mass + atom2.mass)\\\n + 2*atom1.mass*v_1_r/(atom1.mass + atom2.mass)\n\n delta_v_1 = (v_1_r_f - v_1_r)*r_hat\n delta_v_2 = (v_2_r_f - v_2_r)*r_hat\n\n self.atoms[collision[0]].vel += delta_v_1\n self.atoms[collision[1]].vel += delta_v_2", "def test_shapes_and_exceptions(self):\n output_path = FLAGS.test_tmpdir\n output_name = 'temp'\n equation_name = 'advection_diffusion'\n discretization = 'finite_volume'\n dataset_type = 'all_derivatives'\n high_resolution = 125\n low_resolution = 25\n shards = 2\n example_num_time_steps = 3\n batch_size = 4\n diffusion_coefficient = 0.3\n\n expected_equation = advection_equations.FiniteVolumeAdvectionDiffusion(\n diffusion_coefficient=diffusion_coefficient)\n\n # create a temporary dataset\n with flagsaver.flagsaver(\n dataset_path=output_path,\n dataset_name=output_name,\n equation_name=equation_name,\n discretization=discretization,\n simulation_grid_size=high_resolution,\n output_grid_size=low_resolution,\n equation_kwargs=str(dict(diffusion_coefficient=diffusion_coefficient)),\n dataset_type=dataset_type,\n num_shards=shards,\n total_time_steps=10,\n example_num_time_steps=example_num_time_steps,\n time_step_interval=5,\n num_seeds=4,\n ):\n create_training_data.main([], runner=beam.runners.DirectRunner())\n\n metadata_path = os.path.join(output_path, output_name + '.metadata.json')\n self.assertTrue(gfile.exists(metadata_path))\n dataset_metadata = readers.load_metadata(metadata_path)\n low_res_grid = readers.get_output_grid(dataset_metadata)\n high_res_grid = readers.get_simulation_grid(dataset_metadata)\n equation = readers.get_equation(dataset_metadata)\n\n self.assertEqual(low_res_grid.size_x, low_resolution)\n self.assertEqual(low_res_grid.size_y, low_resolution)\n self.assertEqual(high_res_grid.size_x, high_resolution)\n self.assertEqual(high_res_grid.size_y, high_resolution)\n self.assertAlmostEqual(high_res_grid.step, 2 * np.pi / high_resolution)\n self.assertAlmostEqual(\n equation.diffusion_coefficient, diffusion_coefficient)\n self.assertIs(type(equation), type(expected_equation))\n\n state_keys = expected_equation.key_definitions\n valid_data_keys = ((state_keys['concentration'].exact(),),\n (state_keys['concentration_edge_x'].exact(),\n state_keys['concentration_y_edge_y'].exact()))\n invalid_data_keys = ((state_keys['concentration'],\n state_keys['concentration_edge_x']),\n (state_keys['concentration_edge_x'],))\n valid_data_grids = (low_res_grid, low_res_grid)\n invalid_data_grids = (low_res_grid, high_res_grid)\n\n with self.assertRaises(ValueError):\n readers.initialize_dataset(\n dataset_metadata, invalid_data_keys, valid_data_grids)\n with self.assertRaises(ValueError):\n readers.initialize_dataset(\n dataset_metadata, valid_data_keys, invalid_data_grids)\n with self.assertRaises(ValueError):\n readers.initialize_dataset(\n dataset_metadata, invalid_data_keys, invalid_data_grids)\n\n dataset = readers.initialize_dataset(\n dataset_metadata, valid_data_keys, valid_data_grids)\n dataset = dataset.repeat()\n dataset = dataset.batch(batch_size)\n\n [(first_state, second_state)] = dataset.take(1)\n self.assertEqual(set(first_state.keys()), set(valid_data_keys[0]))\n self.assertEqual(set(second_state.keys()), set(valid_data_keys[1]))\n first_state_shape = np.shape(first_state[valid_data_keys[0][0]])\n second_state_shape = np.shape(second_state[valid_data_keys[1][0]])\n expected_shape = (\n batch_size, example_num_time_steps, low_resolution, low_resolution)\n self.assertEqual(first_state_shape, expected_shape)\n self.assertEqual(second_state_shape, expected_shape)", "def example2_concatenated_RE(GeomCA_parameters):\n num_pts = 100\n GeomCA_parameters['experiment_filename_prefix'] = 'problematic_overlap_'\n subfolder = 'problematic_overlap'\n GeomCA_parameters['comp_consistency_threshold'] = 0.0\n GeomCA_parameters['comp_quality_threshold'] = 0.3\n R = np.concatenate([circle(n=num_pts, r=1.5), circle(n=int(num_pts/5), r=0.3)])\n E = np.concatenate([circle(n=num_pts, r=1.2), circle(n=int(num_pts/5), r=0.32)])\n return run_GeomCA_and_visualize(R, E, subfolder, GeomCA_parameters)", "def checkCrossingEdges(face1Orig, face1Vec, face2Orig, face2Vec):\n face1Size = len(face1Orig)\n face2Size = len(face2Orig)\n for i in xrange(0, face1Size, 2):\n o1x = face1Orig[i]\n o1y = face1Orig[i+1]\n v1x = face1Vec[i]\n v1y = face1Vec[i+1]\n n1x = v1y\n n1y = -v1x\n for j in xrange(0, face2Size, 2):\n # Given ray1(O1, V1) and ray2(O2, V2)\n # Normal of ray1 is (V1.y, V1.x)\n o2x = face2Orig[j]\n o2y = face2Orig[j+1]\n v2x = face2Vec[j]\n v2y = face2Vec[j+1]\n n2x = v2y\n n2y = -v2x\n \n # Find t for ray2\n # t = [(o1x-o2x)n1x + (o1y-o2y)n1y] / (v2x * n1x + v2y * n1y)\n denum = v2x * n1x + v2y * n1y\n # Edges are parallel if denum is close to 0.\n if math.fabs(denum) < 0.000001: continue\n t2 = ((o1x-o2x)* n1x + (o1y-o2y) * n1y) / denum\n if (t2 < 0.00001 or t2 > 0.99999): continue\n \n # Find t for ray1\n # t = [(o2x-o1x)n2x + (o2y-o1y)n2y] / (v1x * n2x + v1y * n2y)\n denum = v1x * n2x + v1y * n2y\n # Edges are parallel if denum is close to 0.\n if math.fabs(denum) < 0.000001: continue\n t1 = ((o2x-o1x)* n2x + (o2y-o1y) * n2y) / denum\n \n # Edges intersect\n if (t1 > 0.00001 and t1 < 0.99999): return 1\n \n return 0", "def generate(self, objects):\n\n 'Initialize/reset each target volume with zeros'\n self.classification_mask = np.zeros([self.num_classes, self.target_height, self.target_width])\n self.bbox_mask = np.zeros([self.num_coords, self.target_height, self.target_width])\n self.depth_mask = np.zeros([self.num_depth, self.target_height, self.target_width])\n\n 'Set entire mask as background until objects are processed'\n self.classification_mask[0, :, :] = 1\n\n for obj in objects:\n obj_class = self._get_class(obj)\n shrink_factor = self._get_shrink_factor(obj_class)\n mask_coords = self._get_mask_coords(obj, shrink_factor)\n\n self._update_classification_mask(obj_class, mask_coords)\n self._update_bbox_mask(obj.bounding_box, mask_coords)\n self._update_depth_mask(obj.bounding_box, mask_coords)\n\n \"Suppress background and dont care classes\"\n suppress_mask = self._suppress_bg_dc()\n\n return np.copy(self.classification_mask), np.copy(self.bbox_mask), \\\n np.copy(self.depth_mask), np.copy(suppress_mask)", "def cylinder_collision_detection(\n point_a1, point_a2, radius_a, point_b1, point_b2, radius_b, bbox_a=None, bbox_b=None\n):\n\n if bbox_a is None:\n bbox_a = get_bbox([point_a1, point_a2], margin=radius_a)\n if bbox_b is None:\n bbox_b = get_bbox([point_b1, point_b2], margin=radius_b)" ]
[ "0.54239005", "0.5234049", "0.52122986", "0.5175614", "0.5172687", "0.51635194", "0.51555014", "0.5080032", "0.5074395", "0.50595355", "0.50466657", "0.50424504", "0.5032449", "0.5030777", "0.50256217", "0.501177", "0.5008883", "0.50016856", "0.50016856", "0.49965402", "0.49896276", "0.49815482", "0.4978592", "0.49544573", "0.49437985", "0.49396878", "0.4936221", "0.49339634", "0.49230528", "0.49084294" ]
0.71537566
0
Creates an ExtrudeCircleShape with extrude_both = True and False and checks that the volumes are correct.
def test_extrude_both(self): test_volume_extrude_both = self.test_shape.volume() self.test_shape.extrude_both = False assert self.test_shape.volume() == pytest.approx(test_volume_extrude_both)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_intersect_volume(self):\n\n intersect_shape = ExtrudeCircleShape(points=[(30, 0)], radius=5, distance=50)\n\n intersected_shape = ExtrudeCircleShape(\n points=[(30, 0)],\n radius=10,\n distance=50,\n intersect=[self.test_shape, intersect_shape],\n )\n\n assert intersected_shape.volume() == pytest.approx(math.pi * 5**2 * 30)", "def test_cut_volume(self):\n\n shape_with_cut = ExtrudeCircleShape(points=[(30, 0)], radius=20, distance=40, cut=self.test_shape)\n\n assert shape_with_cut.volume() == pytest.approx((math.pi * (20**2) * 40) - (math.pi * (10**2) * 30))", "def HollowCylinder(self,center=(0,0,0),inner_radius=1.0,outer_radius=2.,\n element_type='hex',isotropic=True,nrad=5,ncirc=10, nlong=20,length=10):\n\n if element_type != \"hex\":\n raise NotImplementedError('Generating {} mesh of cylinder is not supported yet'.format(element_type))\n\n if not isinstance(center,tuple):\n raise ValueError(\"The center for the base of the cylinder should be given in a tuple with three elements (x,y,z)\")\n\n self.__reset__()\n\n nlong = int(nlong)\n if nlong==0:\n nlong = 1\n\n mesh = Mesh()\n mesh.HollowCircle(center=(center[0],center[1]), inner_radius=inner_radius,\n outer_radius=outer_radius, element_type=\"quad\",\n isotropic=isotropic, nrad=nrad, ncirc=ncirc)\n\n self.Extrude(base_mesh=mesh, length=length, nlong=nlong)\n self.points += center[2]", "def patch(self, **kwargs):\n return patches.Ellipse(self.center, 2.0 * self.a, 2.0 * self.b,\n angle=numpy.rad2deg(self.tilt), **kwargs)", "def test_creation(self):\n\n assert self.test_shape.solid is not None\n assert self.test_shape.volume() > 1000", "def test_creation(self):\n\n assert self.test_shape.solid is not None\n assert self.test_shape.volume() > 1000", "def Circle(self, center=(0.,0.), radius=1., nrad=16, ncirc=40,\n element_type=\"tri\", refinement=False, refinement_level=2, algorithm=\"standard\"):\n\n if not isinstance(center,tuple):\n raise ValueError(\"The center of the circle should be given in a tuple with two elements (x,y)\")\n\n self.__reset__()\n\n if algorithm == \"midpoint_subdivision\":\n from Florence.MeshGeneration.CustomMesher import SubdivisionCircle\n mesh = SubdivisionCircle(center=center, radius=radius, nrad=nrad, ncirc=ncirc,\n element_type=element_type, refinement=refinement, refinement_level=refinement_level)\n self.__update__(mesh)\n return\n\n if refinement:\n ndivider = refinement_level\n if nrad==1: nrad=2\n else:\n ndivider = 1\n\n ncirc = int(ncirc/ndivider)\n nrad = int(nrad/ndivider)\n\n\n if ncirc % 8 != 0 or ncirc < 8:\n ncirc = (ncirc // 8)*8 + 8\n\n radii = radius\n\n radius = np.linspace(0,radii,nrad+1)[1:]\n t = np.linspace(0,2*np.pi,ncirc+1)\n x = radius[0]*np.sin(t)[::-1][:-1]\n y = radius[0]*np.cos(t)[::-1][:-1]\n\n points = np.zeros((ncirc+1,2),dtype=np.float64)\n points[0,:] = [0.,0.]\n points[1:,:] = np.array([x,y]).T\n\n\n self.elements = np.zeros((ncirc // 2,4),dtype=np.int64)\n aranger = np.arange(ncirc // 2)\n self.elements[:,1] = 2*aranger + 1\n self.elements[:,2] = 2*aranger + 2\n self.elements[:,3] = 2*aranger + 3\n self.elements[-1,-1] = 1\n\n for i in range(1,nrad):\n t = np.linspace(0,2*np.pi,ncirc+1);\n x = radius[i]*np.sin(t)[::-1][:-1];\n y = radius[i]*np.cos(t)[::-1][:-1];\n points = np.vstack((points,np.array([x,y]).T))\n\n points[:,0] += center[0]\n points[:,1] += center[1]\n\n elements = np.zeros((ncirc,4),dtype=np.int64)\n for i in range(1,nrad):\n aranger = np.arange(1+ncirc*(i-1),ncirc*i+1)\n elements[:,0] = aranger\n elements[:,1] = aranger + ncirc\n elements[:,2] = np.append((aranger + 1 + ncirc)[:-1],i*ncirc+1)\n elements[:,3] = np.append((aranger + 1)[:-1],1+(i-1)*ncirc)\n\n self.elements = np.concatenate((self.elements,elements),axis=0)\n\n makezero(points)\n self.points = points\n self.elements[:ncirc // 2,:] = self.elements[:ncirc // 2, [1,2,3,0]]\n\n self.element_type = \"quad\"\n self.nelem = self.elements.shape[0]\n self.nnode = self.points.shape[0]\n self.GetBoundaryEdges()\n\n if refinement:\n mesh = self.QuadrilateralProjection(points=self.points[self.elements[0,:],:], npoints=ndivider)\n for i in range(1,self.nelem):\n mesh += self.QuadrilateralProjection(points=self.points[self.elements[i,:],:], npoints=ndivider)\n self.__update__(mesh)\n\n # SECOND LEVEL OF REFINEMENT IF NEEDED\n # mesh = self.QuadrilateralProjection(points=self.points[self.elements[0,:],:], npoints=2)\n # for i in range(1,self.nelem):\n # mesh += self.QuadrilateralProjection(points=self.points[self.elements[i,:],:], npoints=2)\n # self.__update__(mesh)\n\n if element_type == \"tri\":\n sys.stdout = open(os.devnull, \"w\")\n self.ConvertQuadsToTris()\n sys.stdout = sys.__stdout__", "def vector_feild(dx,dy, shapes = False, curve = False):\n x,y = np.linspace(-5,5,10), np.linspace(-5,5,10)\n X,Y = np.meshgrid(x,y)\n\n # fx = 4*X + 3*X*Y\n # fy = (X**2) +2*Y\n fx = dx\n fy = dy\n graph = plt.quiver(X,Y,fx,fy)\n if curve == True:\n curves(0,1)\n if shapes != False:\n if shapes == 'c':\n geoshapes(c= True)\n elif shapes == 'r':\n geoshapes(r = True)\n elif shapes == 't':\n geoshapes(t = True)\n plt.show()", "def createCylinder( basePoint=(0,-1,0), tipPoint=(0,1,0), radius = 1.0, colour=(0.6,0.6,0.6), samples = 20 ):\r\n \r\n basePoint = PyUtils.toPoint3d(basePoint)\r\n tipPoint = PyUtils.toPoint3d(tipPoint)\r\n baseToTipVector = Vector3d(basePoint,tipPoint)\r\n if baseToTipVector.isZeroVector() :\r\n raise ValueError( 'Invalid points for cylinder: base and tip are equal!' )\r\n baseToTipUnitVector = baseToTipVector.unit()\r\n xUnitVector = baseToTipUnitVector.crossProductWith( Vector3d(0,0,1) )\r\n if xUnitVector.length() < 0.5 :\r\n xUnitVector = baseToTipUnitVector.crossProductWith( Vector3d(0,-1,0) )\r\n xUnitVector.toUnit()\r\n yUnitVector = baseToTipUnitVector.crossProductWith( Vector3d(-1,0,0) )\r\n if yUnitVector.length() < 0.5 :\r\n yUnitVector = baseToTipUnitVector.crossProductWith( Vector3d(0,1,0) )\r\n yUnitVector.toUnit()\r\n\r\n vertices = []\r\n for i in range(samples):\r\n theta = i * 2 * math.pi / float(samples)\r\n vertices.append( basePoint + xUnitVector * math.cos(theta) * radius + yUnitVector * math.sin(theta) * radius )\r\n for i in range(samples):\r\n theta = i * 2 * math.pi / float(samples)\r\n vertices.append( tipPoint + xUnitVector * math.cos(theta) * radius + yUnitVector * math.sin(theta) * radius )\r\n for i in range(samples):\r\n theta = i * 2 * math.pi / float(samples)\r\n vertices.append( basePoint + xUnitVector * math.cos(theta) * radius + yUnitVector * math.sin(theta) * radius )\r\n vertices.append( tipPoint + xUnitVector * math.cos(theta) * radius + yUnitVector * math.sin(theta) * radius )\r\n \r\n faces = [ range(0,samples), range(samples,2*samples) ]\r\n for i in range(0,2*samples,2) :\r\n base = 2*samples\r\n size = 2*samples\r\n faces.append( (base+i, base+i+1, base+(i+3)%size, base+(i+2)%size ) )\r\n \r\n return create( vertices, faces, colour )", "def construct_by_ellipse(a_xx, h_xy, b_yy, g_x, f_y, d, focal_length):\n gamma = - focal_length\n a = gamma**2 * a_xx\n b = gamma**2 * b_yy\n c = d\n d = gamma**2 * d\n f = -gamma*(f_y)\n g = -gamma*(g_x)\n h = gamma**2 * h_xy\n #Not needed\n u = gamma**2 * g_x\n v = gamma**2 * f_y\n w = -gamma*(d)\n return ConeCamera(a, b, c, f, g, h)", "def createCone( basePoint=(0,-1,0), tipPoint=(0,1,0), radius = 1.0, colour=(0.6,0.6,0.6), samples = 20 ):\r\n \r\n basePoint = PyUtils.toPoint3d(basePoint)\r\n tipPoint = PyUtils.toPoint3d(tipPoint)\r\n baseToTipVector = Vector3d(basePoint,tipPoint)\r\n if baseToTipVector.isZeroVector() :\r\n raise ValueError( 'Invalid points for cylinder: base and tip are equal!' )\r\n baseToTipUnitVector = baseToTipVector.unit()\r\n xUnitVector = baseToTipUnitVector.crossProductWith( Vector3d(0,0,1) )\r\n if xUnitVector.length() < 0.5 :\r\n xUnitVector = baseToTipUnitVector.crossProductWith( Vector3d(0,-1,0) )\r\n xUnitVector.toUnit()\r\n yUnitVector = baseToTipUnitVector.crossProductWith( Vector3d(-1,0,0) )\r\n if yUnitVector.length() < 0.5 :\r\n yUnitVector = baseToTipUnitVector.crossProductWith( Vector3d(0,1,0) )\r\n yUnitVector.toUnit()\r\n\r\n vertices = []\r\n for i in range(samples):\r\n theta = i * 2 * math.pi / float(samples)\r\n vertices.append( basePoint + xUnitVector * math.cos(theta) * radius + yUnitVector * math.sin(theta) * radius )\r\n for i in range(samples):\r\n theta = i * 2 * math.pi / float(samples)\r\n vertices.append( basePoint + xUnitVector * math.cos(theta) * radius + yUnitVector * math.sin(theta) * radius )\r\n vertices.append( tipPoint )\r\n \r\n faces = [ range(0,samples) ]\r\n for i in range(0,samples) :\r\n base = samples\r\n size = samples\r\n faces.append( (base+i, base+(i+1)%size, 2*samples ) )\r\n \r\n return create( vertices, faces, colour )", "def tilted_ellipse(s, pos1, pos2, size_x, size_y, color, angle):\n surface = pygame.Surface((150, 150), pygame.SRCALPHA, 32).convert_alpha()\n ellipse(surface, color, (0, 0, size_x, size_y))\n surface2 = pygame.transform.rotate(surface, angle)\n return s.blit(surface2, (pos1, pos2))", "def createEllipsoid( position=(0,0,0), radius=(1,1,1), colour=(0.6,0.6,0.6), samplesY = 20, samplesXZ = 20, exponentBottom = 2, exponentTop = 2, exponentSide = 2 ):\r\n \r\n if exponentBottom < 2.0 or exponentTop < 2.0 or exponentSide < 2.0 :\r\n raise ValueError( 'Exponents for ellipsoid must all be under 2.0!' )\r\n \r\n position = PyUtils.toPoint3d(position)\r\n vertices = []\r\n for i in range(1,samplesY):\r\n thetaI = i*math.pi/float(samplesY)\r\n if i < samplesY / 2 : \r\n n = exponentTop\r\n else:\r\n n = exponentBottom\r\n cos = math.cos(thetaI) \r\n y = cos * radius[1]\r\n scaleXZ = math.pow( 1-math.pow(math.fabs(cos),n), 1.0/float(n) )\r\n for j in range(0,samplesXZ):\r\n thetaJ = j*2.0*math.pi/float(samplesXZ)\r\n n = exponentSide\r\n cos = math.cos(thetaJ)\r\n x = cos * scaleXZ * radius[0]\r\n z = math.pow( 1-math.pow(math.fabs(cos),n), 1.0/float(n) ) * math.copysign(1, math.sin(thetaJ)) * scaleXZ * radius[2]\r\n vertices.append( position + Vector3d(x,y,z) )\r\n vertices.append( position + Vector3d(0,radius[1],0) )\r\n vertices.append( position + Vector3d(0,-radius[1],0) ) \r\n\r\n faces = []\r\n for i in range(0,(samplesY-2)*samplesXZ,samplesXZ) :\r\n for j in range(0,samplesXZ) :\r\n faces.append( (i+j, i+(j+1)%samplesXZ, i+samplesXZ+(j+1)%samplesXZ, i+samplesXZ+j) ) \r\n\r\n for i in range(0,samplesXZ) :\r\n base = (samplesY-2)*samplesXZ\r\n faces.append( ((i+1)%samplesXZ, i, (samplesY-1)*samplesXZ) ) \r\n faces.append( (base+i, base+(i+1)%samplesXZ, (samplesY-1)*samplesXZ+1) ) \r\n\r\n \r\n return create( vertices, faces, colour )", "def _create_main_shape(self):\n\n a, b = gc( self.size/2,\n self._ZERO_DEGREES - self.angle,\n self._180_DEGREES + self.angle)\n self.wafer_points = zip(a,b)\n self.wafer_polygon = gdspy.Polygon(self.wafer_points, self.WAFER_LAYER)\n self.cell.add(self.wafer_polygon)", "def add_circle(self, center: Point, point2: Point,\n counts_as_step: bool = True, interesting: bool = False) -> Circle:\n circle = Circle(center=center, point2=point2)\n self.add_step_premade(circle, counts_as_step=counts_as_step, interesting=interesting)\n return circle", "def test_cyclohexane(self):\n self._test_shape(\n image_size=(1000, 1000),\n expected_corners=np.array([\n [[400, 400]],\n [[487, 350]],\n [[574, 400]],\n [[574, 500]],\n [[487, 550]],\n [[400, 500]]\n ]),\n drawer=lambda image: image.add_regular_hexagon(\n 100, start_coord=(400, 400)\n )\n )", "def calculate_shape(self):\n\n # error handling\n if self.radius <= 0: raise ValueError(\"Radius must be positive.\")\n if self.inner_radius < 0: raise ValueError(\"Inner radius must not be negative\")\n if self.inner_radius > self.radius: raise ValueError(\"Inner radius must be smaller than radius\")\n if self.thickness <= 0: raise ValueError(\"Thickness must be positive\")\n\n self.area = pi * self.radius ** 2\n self.area -= pi * self.inner_radius ** 2\n\n self.volume = self.area * self.thickness", "def createCircularMask(shape, radius=4, center=None):\n w = shape[0]\n h = shape[1]\n if center is None: \n center = [int(w/2), int(h/2)]\n if radius is None:\n radius = min(center[0], center[1], w-center[0], h-center[1])\n X, Y = np.ogrid[:w, :h]\n dist2 = (X - center[0])**2 + (Y-center[1])**2\n mask = dist2 <= radius**2\n return mask", "def cylinder_circles(node_a, node_b, radius, element_number=10):\n\n vector = (np.array(node_a) - np.array(node_b)).tolist()\n pts_a = circle(node_a, vector, radius, element_number)\n pts_b = circle(node_b, vector, radius, element_number)\n\n return pts_a, pts_b", "def create_cylinders_volume(\n shape: (int, int, int),\n cylinders_list: list,\n foreground=1,\n dtype=np.uint8):\n volume = np.zeros(shape, dtype=bool)\n for point1, point2, radius in cylinders_list:\n volume = add_cylinder_px(volume, point1, point2, radius)\n return volume_bool_to_dtype(volume, fg=foreground, dtype=dtype)", "def ArcCylinder(self, center=(0.,0.,0.), radius=1., start_angle=0, end_angle=np.pi/2.,\n length=10., nrad=16, ncirc=40, nlong=50, element_type=\"hex\"):\n\n if element_type != \"hex\":\n raise NotImplementedError('Generating {} mesh of cylinder is not supported yet'.format(element_type))\n\n if not isinstance(center,tuple):\n raise ValueError(\"The center for the base of the cylinder should be given in a tuple with three elements (x,y,z)\")\n\n self.__reset__()\n\n nlong = int(nlong)\n if nlong==0:\n nlong = 1\n\n mesh = Mesh()\n mesh.Arc(center=(center[0],center[1]), radius=radius, start_angle=start_angle,\n end_angle=end_angle, nrad=nrad, ncirc=ncirc, element_type=\"quad\")\n\n self.Extrude(base_mesh=mesh, length=length, nlong=nlong)\n self.points += center[2]", "def geoshapes(c = False, r = False, t = False):\n if c == True:\n circle = plt.Circle((0,0), radius = 3, fill = None, edgecolor = 'y')\n plt.gca().add_patch(circle)\n elif r == True:\n rectangle = plt.Rectangle((0, 0), 5, 5, fill = None, edgecolor='r')\n plt.gca().add_patch(rectangle)\n elif t == True:\n points = [[0,0], [0,3], [3,3]]\n triangle = plt.Polygon(points, fill = None, edgecolor = 'r')\n plt.gca().add_patch(triangle)\n else:\n print('Error: please input a True value for either c, r , or t')", "def _draw_ellipse(data, obj, draw_options):\n if isinstance(obj, mpl.patches.Circle):\n # circle specialization\n return _draw_circle(data, obj, draw_options)\n x, y = obj.center\n ff = data[\"float format\"]\n\n if obj.angle != 0:\n draw_options.append(\n f\"rotate around={{{obj.angle:{ff}}:(axis cs:{x:{ff}},{y:{ff}})}}\"\n )\n\n do = \",\".join(draw_options)\n content = (\n f\"\\\\draw[{do}] (axis cs:{x:{ff}},{y:{ff}}) ellipse \"\n f\"({0.5 * obj.width:{ff}} and {0.5 * obj.height:{ff}});\\n\"\n )\n content += _patch_legend(obj, draw_options, \"area legend\")\n\n return data, content", "def defineCircleLayout(self):\n # Define a 2-D array representing the position of each mesh point\n self.xPoints = self.frange(0,self.R,self.h)\n self.yPoints = self.frange(0,self.R,self.h)\n\n # Position of internal mesh points\n internal_xyCoord = [(i,j) for i in self.xPoints for j in self.yPoints if (i - self.R)**2 + (j - self.R)**2 < self.R^2] \n\n # Define the dictionary containing internal points\n for k in internal_xyCoord:\n x = k[0]\n y = k[1]\n xLabel = xPoints.index(x)\n yLabel = yPoints.index(y)\n self.internalPoints[(xLabel,yLabel)] = meshPoint(type = 'internal',x = x, y = y, xLabel = xLabel, yLabel = yLabel) \n\n # Position of the boundary points\n # Find the intersection of each mesh line with the circle\n # For a given vertical mesh line: \n # y = R - sqrt(R^2 - (x-R)^2) & y = R + sqrt(R^2 - (x-R)^2)\n # For a given horizontal mesh line: \n # x = R - sqrt(R^2 - (y-R)^2) & x = R + sqrt(R^2 - (y-R)^2)\n boundary_xyCoord = [(0,self.R),(self.R,0),(self.R,2*self.R),(2*self.R,self.R)] + [(x,self.R - math.sqrt(self.R**2 - (x-self.R)**2)) for x in self.xPoints[1:len(self.xPoints)-1]] + [(x,self.R - math.sqrt(self.R**2 + (x-self.R)**2)) for x in self.xPoints[1:len(self.xPoints)-1]] + [(self.R - math.sqrt(self.R**2 - (y-self.R)**2),y) for y in self.yPoints[1:len(yPoints)-1]] + [(self.R + math.sqrt(self.R**2 - (y-self.R)**2),y) for y in self.yPoints[1:len(yPoints)-1]] \n\n # Define the dictionary containing boundary points\n for k in boundary_xyCoord:\n x = k[0]\n y = k[1]\n [xLabel,yLabel] = self.findLabel(x,y)\n self.boundaryPoints[(xLabel,yLabel)] = meshPoint(type = 'boundary',x = x, y = y, xLabel = xLabel, yLabel = yLabel) \n \n # Now that we have assigned the labels we can define fE, fW, fN and fS\n self.fCalc()", "def tail_shaft_circle(self):\n _profile = Circle(position=self.stabilizer_vright.position, radius=(self.critical_thickness / 2.0) * 1.5)\n _extrude = ExtrudedSolid(island=_profile, distance=self.stabilizer_h.root_chord)\n return _profile, _extrude", "def shape_type(self):\n return \"circle\"", "def HollowCircle(self,center=(0,0),inner_radius=1.0,outer_radius=2.,element_type='tri',isotropic=True,nrad=5,ncirc=10):\n\n # FOR SAFETY, RESET THE CLASS\n self.__reset__()\n\n if np.allclose(inner_radius,0):\n raise ValueError('inner_radius cannot be zero')\n\n t = np.linspace(0,2*np.pi,ncirc+1)\n if isotropic is True:\n radii = np.linspace(inner_radius,outer_radius,nrad+1)\n else:\n base = 3\n radii = np.zeros(nrad+1,dtype=np.float64)\n mm = np.linspace(np.power(inner_radius,1./base),np.power(outer_radius,1./base),nrad+1)\n for i in range(0,nrad+1):\n radii[i] = mm[i]**base\n\n\n # base = 3\n # mm = np.linspace(np.power(inner_radius,1./base),np.power(2.,1./base),nrad+1)\n # mm = np.append(mm,np.linspace(2,outer_radius,nrad+1))\n # radii = np.zeros(mm.shape[0],dtype=np.float64)\n # for i in range(0,mm.shape[0]):\n # radii[i] = mm[i]**base\n\n\n # dd = np.logspace(inner_radius,outer_radius,nrad+1,base=2)/2**np.linspace(inner_radius,outer_radius,nrad+1)\n # print dd*np.linspace(inner_radius,outer_radius,nrad+1)\n # print np.logspace(0,1.5,nrad+1,base=2)\n\n\n xy = np.zeros((radii.shape[0]*t.shape[0],2),dtype=np.float64)\n for i in range(0,radii.shape[0]):\n xy[i*t.shape[0]:(i+1)*t.shape[0],0] = radii[i]*np.cos(t)\n xy[i*t.shape[0]:(i+1)*t.shape[0],1] = radii[i]*np.sin(t)\n\n\n # REMOVE DUPLICATES GENERATED BY SIN/COS OF LINSPACE\n xy = xy[np.setdiff1d( np.arange(xy.shape[0]) , np.linspace(t.shape[0]-1,xy.shape[0]-1,radii.shape[0]).astype(int) ),:]\n\n connec = np.zeros((1,4),dtype=np.int64)\n\n for j in range(1,radii.shape[0]):\n for i in range((j-1)*(t.shape[0]-1),j*(t.shape[0]-1)):\n if i<j*(t.shape[0]-1)-1:\n connec = np.concatenate((connec,np.array([[i,t.shape[0]-1+i,t.shape[0]+i,i+1 ]])),axis=0)\n # connec = connec + ((i,t.shape[0]-1+i,t.shape[0]+i,i+1),)\n else:\n connec = np.concatenate((connec,np.array([[i,t.shape[0]-1+i,j*(t.shape[0]-1),(j-1)*(t.shape[0]-1) ]])),axis=0)\n # connec = connec + ((i,t.shape[0]-1+i,j*(t.shape[0]-1),(j-1)*(t.shape[0]-1)),)\n\n connec = connec[1:,:]\n # connec = np.asarray(connec[1:])\n\n\n if element_type == 'tri':\n connec_tri = np.zeros((2*connec.shape[0],3),dtype=np.int64)\n for i in range(connec.shape[0]):\n connec_tri[2*i,:] = np.array([connec[i,0],connec[i,1],connec[i,3]])\n connec_tri[2*i+1,:] = np.array([connec[i,2],connec[i,3],connec[i,1]])\n\n self.elements = connec_tri\n self.nelem = self.elements.shape[0]\n self.element_type = element_type\n # OBTAIN MESH EDGES\n self.GetBoundaryEdgesTri()\n\n elif element_type == 'quad':\n self.elements = connec\n self.nelem = self.elements.shape[0]\n self.element_type = element_type\n self.GetBoundaryEdgesQuad()\n\n # ASSIGN NODAL COORDINATES\n self.points = xy\n # IF CENTER IS DIFFERENT FROM (0,0)\n self.points[:,0] += center[0]\n self.points[:,1] += center[1]\n # ASSIGN PROPERTIES\n self.nnode = self.points.shape[0]", "def circlePrimitive(self):\n\n def drawWireframe(self):\n\n # Draw line\n glFT.glBegin(OpenMayaRender.MGL_LINE_STRIP)\n\n # Get Color\n # self.getColor(\"edge\")\n\n def_circle = 100\n for idx in range(def_circle + 1):\n\n theta = 2 * 3.141592 * idx / def_circle\n x = self.shape_size * math.cos(theta)\n z = self.shape_size * math.sin(theta)\n\n point = OpenMaya.MVector(x, 0.0, z)\n point = self.setUpAxis(point)\n point = self.addOffsetRotation(point)\n point = self.addOffsetPosition(point)\n self.getBoundingBoxSize(point)\n\n glFT.glVertex3f(point.x, point.y, point.z)\n\n glFT.glEnd()\n \n def drawShaded(self):\n\n # Draw quad\n glFT.glBegin(OpenMayaRender.MGL_POLYGON)\n\n # Get Color\n # self.getColor(\"polygon\")\n\n def_circle = 100\n for idx in range(def_circle + 1):\n\n theta = 2 * 3.141592 * idx / def_circle\n x = self.shape_size * math.cos(theta)\n z = self.shape_size * math.sin(theta)\n\n point = OpenMaya.MVector(x, 0.0, z)\n point = self.setUpAxis(point)\n point = self.addOffsetRotation(point)\n point = self.addOffsetPosition(point)\n self.getBoundingBoxSize(point)\n\n glFT.glVertex3f(point.x, point.y, point.z)\n\n glFT.glEnd()\n\n glFT.glNewList(self.vList_id, OpenMayaRender.MGL_COMPILE)\n\n # Draw lines\n if self.draw_type == 0 or self.draw_type == 2:\n drawWireframe(self)\n \n # Draww Polygons\n if self.draw_type == 1 or self.draw_type == 2:\n drawShaded(self)\n\n glFT.glEndList()", "def HollowArc(self, center=(0.,0.), inner_radius=1., outer_radius=2., nrad=16, ncirc=40,\n start_angle=0., end_angle=np.pi/2., element_type=\"tri\", refinement=False, refinement_level=2):\n\n # CHECK FOR ANGLE\n PI = u\"\\u03C0\".encode('utf-8').strip()\n EPS = np.finfo(np.float64).eps\n if np.abs(start_angle) + EPS > 2.*np.pi:\n raise ValueError(\"The starting angle should be either in range [-2{},0] or [0,2{}]\".format(PI,PI))\n if np.abs(end_angle) + EPS > 2.*np.pi:\n raise ValueError(\"The end angle should be either in range [-2{},0] or [0,2{}]\".format(PI,PI))\n\n\n if np.sign(start_angle) == np.sign(end_angle):\n total_angle = np.abs(end_angle - start_angle)\n if np.isclose(total_angle,0.) or total_angle > 2.*np.pi:\n self.Circle(center=center, radius=radius, nrad=nrad, ncirc=ncirc, element_type=element_type)\n return\n\n if not isinstance(center,tuple):\n raise ValueError(\"The center of the arc should be given in a tuple with two elements (x,y)\")\n\n self.__reset__()\n\n if refinement:\n ndivider = refinement_level\n else:\n ndivider = 1\n\n ncirc = int(ncirc/ndivider)\n nrad = int(nrad/ndivider) + 2\n\n if ncirc % 2 != 0 or ncirc < 2:\n ncirc = (ncirc // 2)*2 + 2\n\n # radius = np.linspace(inner_radius,outer_radius,nrad)\n # points = np.zeros((1,2),dtype=np.float64)\n # for i in range(nrad):\n # t = np.linspace(start_angle,end_angle,ncirc+1)\n # x = radius[i]*np.cos(t)[::-1]\n # y = radius[i]*np.sin(t)[::-1]\n # points = np.vstack((points,np.array([x,y]).T))\n # points = points[ncirc+2:,:]\n\n radius = np.linspace(inner_radius,outer_radius,nrad-1)\n points = np.zeros((1,2),dtype=np.float64)\n for i in range(nrad-1):\n t = np.linspace(start_angle,end_angle,ncirc+1)\n x = radius[i]*np.cos(t)[::-1]\n y = radius[i]*np.sin(t)[::-1]\n points = np.vstack((points,np.array([x,y]).T))\n points = points[1:,:]\n\n points[:,0] += center[0]\n points[:,1] += center[1]\n makezero(points)\n self.points = points\n\n self.elements = np.zeros((1,4),dtype=np.int64)\n elements = np.zeros((ncirc,4),dtype=np.int64)\n for i in range(nrad-2):\n aranger = np.arange(ncirc*i,ncirc*(i+1))\n elements[:,0] = aranger + i\n elements[:,1] = aranger + i + ncirc + 1\n elements[:,2] = aranger + i + ncirc + 2\n elements[:,3] = aranger + i + 1\n\n self.elements = np.concatenate((self.elements,elements),axis=0)\n self.elements = self.elements[1:,:]\n\n\n self.element_type = \"quad\"\n self.nelem = self.elements.shape[0]\n self.nnode = self.points.shape[0]\n self.GetBoundaryEdges()\n\n if refinement:\n mesh = self.QuadrilateralProjection(points=self.points[self.elements[0,:],:], npoints=ndivider)\n for i in range(1,self.nelem):\n mesh += self.QuadrilateralProjection(points=self.points[self.elements[i,:],:], npoints=ndivider)\n self.__update__(mesh)\n\n\n if element_type == \"tri\":\n sys.stdout = open(os.devnull, \"w\")\n self.ConvertQuadsToTris()\n sys.stdout = sys.__stdout__\n self.CheckNodeNumbering(change_order_to=\"anti-clockwise\", verbose=False)\n\n self.points = np.ascontiguousarray(self.points)", "def __init__(self, shape, r=2, d=-1):\n self.radius = r\n if d == -1:\n self.stride = 2*r+1\n else:\n self.stride = d\n self.image_shape = shape\n self.patch_shape = ( r*2+1, 2*r+1 )" ]
[ "0.6457117", "0.53838617", "0.5200422", "0.51258683", "0.50411856", "0.50411856", "0.49835268", "0.4979422", "0.49603626", "0.4946774", "0.490671", "0.48764306", "0.48548558", "0.4835276", "0.48327264", "0.4805391", "0.4801365", "0.4793305", "0.47818324", "0.47760746", "0.4771546", "0.4755739", "0.47446305", "0.4743991", "0.47410545", "0.47394645", "0.47267845", "0.47231856", "0.47218487", "0.47169846" ]
0.59421295
1
Exports and stp file with mode = solid and wire and checks that the outputs exist and relative file sizes are correct.
def test_export_stp(self): os.system("rm test_solid.stp test_solid2.stp test_wire.stp") self.test_shape.export_stp("test_solid.stp", mode="solid") self.test_shape.export_stp("test_solid2.stp") self.test_shape.export_stp("test_wire.stp", mode="wire") assert Path("test_solid.stp").exists() is True assert Path("test_solid2.stp").exists() is True assert Path("test_wire.stp").exists() is True assert Path("test_solid.stp").stat().st_size == Path("test_solid2.stp").stat().st_size assert Path("test_wire.stp").stat().st_size < Path("test_solid2.stp").stat().st_size os.system("rm test_solid.stp test_solid2.stp test_wire.stp")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_file_placement(mol_params):\n snapshot = init_from_none(mol_params)\n context = hoomd.context.initialize(mol_params.hoomd_args)\n production(snapshot, context, mol_params, dynamics=True)\n\n params = {\n \"molecule\": mol_params.molecule,\n \"pressure\": mol_params.pressure,\n \"temperature\": mol_params.temperature,\n }\n outdir = Path(mol_params.output)\n print(list(outdir.glob(\"*\")))\n base_filename = \"{molecule}-P{pressure:.2f}-T{temperature:.2f}.gsd\".format(**params)\n assert (outdir / base_filename).is_file()\n assert (outdir / (\"dump-\" + base_filename)).is_file()\n assert (outdir / (\"thermo-\" + base_filename.replace(\".gsd\", \".log\"))).is_file()\n assert (outdir / (\"trajectory-\" + base_filename)).is_file()", "def test_st_final00102m4_positive(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/SType/ST_final/ST_final00102m/ST_final00102m4.xsd\",\n instance=\"sunData/SType/ST_final/ST_final00102m/ST_final00102m4_p.xml\",\n class_name=\"Test\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def test_st_final00101m4_positive(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/SType/ST_final/ST_final00101m/ST_final00101m4.xsd\",\n instance=\"sunData/SType/ST_final/ST_final00101m/ST_final00101m4_p.xml\",\n class_name=\"Test\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def testFilledExporter(self):\n filename_a = os.path.join(FLAGS.test_tmpdir, 'test_a.far')\n filename_b = os.path.join(FLAGS.test_tmpdir, 'test_b.far')\n FLAGS.outputs = 'a=' + filename_a + ',b=' + filename_b\n with self.assertRaises(SystemExit):\n multi_grm.run(generator_method)\n\n stored_fsts_a = _read_fst_map(filename_a)\n self.assertLen(stored_fsts_a, 1)\n self.assertTrue(stored_fsts_a['FST1'])\n\n stored_fsts_b = _read_fst_map(filename_b)\n self.assertLen(stored_fsts_b, 2)\n self.assertTrue(stored_fsts_b['FST2'])\n self.assertTrue(stored_fsts_b['FST3'])", "def test_st_final00103m2_positive(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/SType/ST_final/ST_final00103m/ST_final00103m2.xsd\",\n instance=\"sunData/SType/ST_final/ST_final00103m/ST_final00103m2_p.xml\",\n class_name=\"Test\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def test_st_final00102m5_positive(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/SType/ST_final/ST_final00102m/ST_final00102m5.xsd\",\n instance=\"sunData/SType/ST_final/ST_final00102m/ST_final00102m5_p.xml\",\n class_name=\"Test\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def test_st_final00103m3_positive(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/SType/ST_final/ST_final00103m/ST_final00103m3.xsd\",\n instance=\"sunData/SType/ST_final/ST_final00103m/ST_final00103m3_p.xml\",\n class_name=\"Test\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def test_st_final00102m3_positive(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/SType/ST_final/ST_final00102m/ST_final00102m3.xsd\",\n instance=\"sunData/SType/ST_final/ST_final00102m/ST_final00102m3_p.xml\",\n class_name=\"Test\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def test_to_file_assert_filetype():\n output_file = \"./out.shp\"", "def test_st_final00101m5_positive(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/SType/ST_final/ST_final00101m/ST_final00101m5.xsd\",\n instance=\"sunData/SType/ST_final/ST_final00101m/ST_final00101m5_p.xml\",\n class_name=\"Test\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def test_st_final00102m2_positive(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/SType/ST_final/ST_final00102m/ST_final00102m2.xsd\",\n instance=\"sunData/SType/ST_final/ST_final00102m/ST_final00102m2_p.xml\",\n class_name=\"Test\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def test_st_final00101m3_positive(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/SType/ST_final/ST_final00101m/ST_final00101m3.xsd\",\n instance=\"sunData/SType/ST_final/ST_final00101m/ST_final00101m3_p.xml\",\n class_name=\"Test\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def test_st_final00101m2_positive(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/SType/ST_final/ST_final00101m/ST_final00101m2.xsd\",\n instance=\"sunData/SType/ST_final/ST_final00101m/ST_final00101m2_p.xml\",\n class_name=\"Test\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def test_st_final00102m6_positive(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/SType/ST_final/ST_final00102m/ST_final00102m6.xsd\",\n instance=\"sunData/SType/ST_final/ST_final00102m/ST_final00102m6_p.xml\",\n class_name=\"Test\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def export(self, output_path: str, export_format: str = 'csv', z_positive_up: bool = True, **kwargs):\n strt = perf_counter()\n print('****Exporting surface data to {}****'.format(export_format))\n fmt = export_format.lower()\n if os.path.exists(output_path):\n tstmp = datetime.now().strftime('%Y%m%d_%H%M%S')\n foldername, filname = os.path.split(output_path)\n filnm, filext = os.path.splitext(filname)\n output_path = os.path.join(foldername, '{}_{}{}'.format(filnm, tstmp, filext))\n\n if fmt == 'csv':\n self._export_csv(output_path, z_positive_up=z_positive_up)\n elif fmt == 'geotiff':\n self._export_geotiff(output_path, z_positive_up=z_positive_up)\n elif fmt == 'bag':\n self._export_bag(output_path, z_positive_up=z_positive_up, **kwargs)\n else:\n raise ValueError('fqpr_surface_v3: Unrecognized format {}'.format(fmt))\n end = perf_counter()\n print('****Export complete: {}s****'.format(round(end - strt, 3)))", "def psdExport(*args, alphaChannelIdx: Union[int, bool]=0, bytesPerChannel: Union[int, bool]=0,\n emptyLayerSet: bool=True, format: Union[AnyStr, bool]=\"\", layerName: Union[AnyStr,\n bool]=\"\", layerSetName: Union[AnyStr, bool]=\"\", outFileName: Union[AnyStr,\n bool]=\"\", preMultiplyAlpha: bool=True, psdFileName: Union[AnyStr, bool]=\"\",\n q=True, query=True, **kwargs)->Union[None, Any]:\n pass", "def test_st_final00101m6_positive(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/SType/ST_final/ST_final00101m/ST_final00101m6.xsd\",\n instance=\"sunData/SType/ST_final/ST_final00101m/ST_final00101m6_p.xml\",\n class_name=\"Test\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def _OpenOutputFiles(self):\n self.gfile = open(self.geomout, \"w\")\n self.efile = open(self.energyout, \"w\")\n self.PrintEnergyHeader()", "def write_mesh_file(allxyz, beck_bed):\n if SAVEMESH:\n print('+> Saving finite element mesh files...', end='')\n fname = FNAME.rsplit('.', 1)[0]\n ncol = beck_bed[0,:].size\n nrow = beck_bed[:,0].size\n nele = (nrow-1)*(ncol-1)*2\n d = compute_mesh(nrow, ncol, nele)\n h = ':NodeCount ' + str(allxyz[:,0].size) + '\\n:ElementCount ' \\\n + str(nele) + '\\n#\\n:EndHeader\\n'\n with open(fname + '_mesh.t3s', 'w') as f: \n f.write(h)\n with open(fname + '_mesh.t3s', 'a') as f:\n np.savetxt(f, allxyz, fmt='%.6e')\n np.savetxt(f, d, fmt='%d')\n f.write('\\n\\n')\n h = 'TITLE = \\\"' + fname \\\n + '_mesh\\\"\\nVARIABLES = \\\"X\\\", \\\"Y\\\", \\\"' + fname \\\n + '_mesh\\\"\\nZONE NODES=' + str(allxyz[:,0].size) + ', ELEMENTS=' \\\n + str(nele) + ', DATAPACKING=POINT, ZONETYPE=FETRIANGLE\\n'\n with open(fname + '_mesh.dat', 'w') as f: \n f.write(h)\n with open(fname + '_mesh.dat', 'a') as f:\n np.savetxt(f, allxyz, fmt='%.6e')\n np.savetxt(f, d, fmt='%d')\n f.write('\\n\\n')\n inlet = np.zeros((ncol,), dtype=int)\n outlet = np.zeros((ncol,), dtype=int)\n for i in range(ncol):\n inlet[i] = 1 + i*nrow\n outlet[i] = (1 + i)*nrow\n left = np.zeros((nrow-2,), dtype=int)\n right = np.zeros((nrow-2,), dtype=int)\n for i in range(1, nrow-1):\n left[i-1] = (ncol-2)*nrow + i + 1\n right[i-1] = (ncol-1)*nrow + i + 1\n cli = np.zeros((2*(nrow+ncol-2), 13))\n cli[:,:2] = 2\n cli[:,7] = 2\n cli[:,11] = np.concatenate((inlet, outlet, left, right))\n cli[:,12] = np.arange(2*(nrow+ncol-2)) + 1\n cli[:ncol,0] = 4\n cli[:ncol,1] = 5\n cli[:ncol,2] = 5\n cli[:ncol,7] = 4\n cli[ncol:2*ncol,0] = 5\n cli[ncol:2*ncol,1] = 4\n cli[ncol:2*ncol,2] = 4\n cli[ncol:2*ncol,7] = 4\n np.savetxt(fname + '_BC_tmp.cli', cli, fmt='%d')\n with open(fname + '_BC.cli', 'w') as out_f:\n with open(fname + '_BC_tmp.cli', 'r') as in_f:\n for i, line in enumerate(in_f):\n if i < ncol:\n s = ' #Inlet'\n elif i >= ncol and i < 2*ncol:\n s = ' #Outlet'\n else:\n s = ' #'\n out_f.write(line.rstrip('\\n') + s + '\\n')\n out_f.write('\\n')\n os.remove(fname + '_BC_tmp.cli')\n h = ':FileType bc2 ASCII EnSim 1.0' \\\n + '\\n:NodeCount ' + str(allxyz[:,0].size) \\\n + '\\n:ElementCount ' + str(nele) \\\n + '\\n:ElementType T3' \\\n + '\\n:BoundarySegmentCount 2' \\\n + '\\n# id code sectionCount startNode1 endNode1 startNode2 endNode2 tracerCode name' \\\n + '\\n:BoundarySegment 1 455 1 1 ' + str(ncol) + ' 1 1 4 \\\"Inlet\\\"' \\\n + '\\n:BoundarySegment 2 544 1 ' + str(ncol+1) + ' ' + str(2*ncol) + ' 1 1 4 \\\"Outlet\\\"' \\\n + '\\n:ShorelineCount 1' \\\n + '\\n:ShorelineNodeCount ' + str(2*(nrow+ncol-2)) \\\n + '\\n:EndHeader' \\\n + '\\n:BeginNodes ' + str(allxyz[:,0].size) + '\\n'\n with open(fname + '_BC.bc2', 'w') as f: \n f.write(h)\n with open(fname + '_BC.bc2', 'a') as f:\n xyz = np.copy(allxyz)\n xyz[:,2] = 0\n np.savetxt(f, xyz, fmt='%.6e')\n f.write(':EndNodes\\n:BeginElements ' + str(nele) + '\\n')\n np.savetxt(f, d, fmt='%d')\n f.write(':EndElements\\n:BeginTable ' + str(2*(nrow+ncol-2)) + ' 15\\n')\n with open(fname + '_BC.cli', 'r') as g:\n lines = g.read()\n f.write(lines[:-1])\n f.write(':EndTable\\n\\n')\n print(' [done]')", "def test_atm_psf_save_file(self):\n psf_file = os.path.join(self.test_dir, 'save_atm_psf.pkl')\n config = {\n 'psf': {\n 'type': 'AtmosphericPSF'\n },\n 'input': {\n 'atm_psf': {\n 'airmass': self.opsim_data['airmass'],\n 'rawSeeing': self.opsim_data['rawSeeing'],\n 'band': self.opsim_data['band'],\n 'screen_scale': 6.4,\n 'boresight': {\n 'type': 'RADec',\n 'ra': { 'type': 'Degrees', 'theta': self.opsim_data['rightascension'], },\n 'dec': { 'type': 'Degrees', 'theta': self.opsim_data['declination'], }\n },\n 'save_file': psf_file\n }\n },\n 'image_pos': galsim.PositionD(0,0), # This would get set appropriately during\n # normal config processing.\n 'image' : {\n 'random_seed': 1234,\n 'wcs': {\n 'type' : 'Tan',\n 'dudx' : 0.2,\n 'dudy' : 0.,\n 'dvdx' : 0.,\n 'dvdy' : 0.2,\n 'ra' : '@input.atm_psf.boresight.ra',\n 'dec' : '@input.atm_psf.boresight.dec',\n }\n }\n }\n\n if os.path.isfile(psf_file):\n os.remove(psf_file)\n\n config['wcs'] = galsim.config.BuildWCS(config['image'], 'wcs', config)\n config1 = galsim.config.CopyConfig(config)\n config2 = galsim.config.CopyConfig(config)\n\n # The first time, it will build the psf from scratch and save the screens.\n t0 = time.time()\n galsim.config.ProcessInput(config1)\n t1 = time.time()\n\n assert os.path.isfile(psf_file)\n\n # The second time, it will be faster, since it loads the screens from the file.\n t2 = time.time()\n galsim.config.ProcessInput(config2)\n t3 = time.time()\n\n print('Times = ',t1-t0,t3-t2)\n assert t1-t0 > t3-t2\n\n # Both input objects will make the same PSF at the same location:\n psf1 = galsim.config.BuildGSObject(config1, 'psf')[0]\n psf2 = galsim.config.BuildGSObject(config2, 'psf')[0]\n assert psf1 == psf2", "def valid_and_export(template, dashname):\n\n if not json_validation(template):\n print('Bad json format for ' + dashname + ' grafana dashboard')\n else:\n if export_file(template, dashname + '.json'):\n print('Successfully generated dashboard: ' + dashname)\n else:\n print('Error during export dashboard: ' + dashname)", "def create_output(filename):\n if not os.path.isfile(filename):\n pd.DataFrame(columns=pre.pcap.COLUMNS).set_index(pre.pcap.INDEX) \\\n .to_csv(ARGS.out, compression='gzip')\n print(\"Created empty output file {}\".format(ARGS.out))\n else:\n size = os.path.getsize(filename) / 1.0e+6\n print(\"Found existing output file which will be appended {} ({:.2F} MB)\"\n .format(ARGS.out, size))", "def test_export():\n tpot_obj = TPOTClassifier()\n\n try:\n tpot_obj.export(\"test_export.py\")\n assert False # Should be unreachable\n except ValueError:\n pass", "def save(sans, describer, minParams, minPars, stats, location, fitInfo, description):\n\n while path.exists(location) == False:\n print('error: file path does not exist. Please input a valid file path')\n location = input('file path: ')\n\n # for idx, char in enumerate(sans.expData.shear[0]):\n # if char != ' ':\n # continue\n # else:\n # shearIdx = idx\n # break\n\n # Build name for modelled scattering data\n # shear = sans.expData.shear[0][0:shearIdx]\n shear = sans.expData.shear[0]\n\n name = sans.expData.sample[0] + '_' + shear + 'ps'\n post1 = '_sim'\n type1 = '.dat'\n\n saveName1 = name + post1 + describer + '_'\n # versionNum1 = input(\"Input a version number: \" )\n versionNum1 = description\n\n # Write modelled scattering data to 3 column dat file\n write_3_column(location + saveName1 + versionNum1 + type1, sans)\n\n # Build name for modelled scattering data statistics\n post2 = '_simInfo'\n type2 = '.txt'\n\n saveName2 = name + post2 + describer + '_'\n\n output = []\n\n # Build output file\n output.append('qmin = ' + str(sans.qmin))\n output.append('ftol = ' + str(fitInfo[0]))\n output.append('method = ' + str(fitInfo[1]))\n output.append(' ')\n\n for key, val in minParams.items():\n if type(val) == str:\n output.append(str(key) + '=' + str(val) + ',')\n else:\n output.append(str(key) + '=' + str(round(val, sans.dp)) + ',')\n output.append(' ')\n\n output.append(' static parameters ')\n for key, val in sans.staticPars.items():\n if type(val) == str:\n output.append(str(key) + '=' + str(val) + ',')\n else:\n output.append(str(key) + '=' + str(round(val, sans.dp)) + ',')\n\n output.append(' ')\n\n output.append('Fitting_performed_over_the_following_parameters:')\n for key in minPars.keys():\n output.append(str(key))\n\n output.append('Returned_the_following_goodness_of_fit_measures:')\n output = output + stats\n output.append(str(datetime.datetime.now()))\n\n # Write output to txt file\n with open(location + saveName2 + versionNum1 + type2, 'w') as file:\n for lines in output:\n file.write(lines)\n file.write(\"\\n\")\n\n print('file was saved with filename: ' + saveName1 + versionNum1 + type1)\n return", "def test_export(filename, folder, space_type):\n grid = bempp.api.shapes.cube(h=0.5)\n space = bempp.api.function_space(grid, *space_type)\n function = bempp.api.GridFunction(\n space, coefficients=np.random.rand(space.global_dof_count)\n )\n bempp.api.export(os.path.join(folder, filename), grid_function=function)", "def export_to_file(self):\r\n return True", "def test_st_name00401m_st_name00401m1_p(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/SType/ST_name/ST_name00401m/ST_name00401m.xsd\",\n instance=\"sunData/SType/ST_name/ST_name00401m/ST_name00401m1_p.xml\",\n class_name=\"Test\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def check_file_valid_output(config, modname, fname, fdict, indent=''):\n\n cnts = [0] * NUMCNTS\n msginfo = \"module %s, %s, %s\" % \\\n (modname, pfwdefs.SW_OUTPUTS, fname)\n\n # check that it has pfwdefs.DIRPAT : err\n # can I check that all values for pfwdefs.DIRPAT exist?\n if pfwdefs.DIRPAT not in fdict:\n error(indent, \"%s - Missing %s\" % (msginfo, pfwdefs.DIRPAT))\n cnts[ERRCNT_POS] += 1\n else:\n # todo: check that all values for pfwdefs.DIRPAT exist\n pass\n\n # check that it has filepat, filename (required)\n if pfwdefs.SW_FILEPAT not in fdict and \\\n pfwdefs.FILENAME not in fdict and \\\n 'fullname' not in fdict:\n error(indent, \"%s - Missing terms needed to determine output filename\" % \\\n (msginfo))\n cnts[ERRCNT_POS] += 1\n else:\n\n # check that any given filename pattern has a definition\n if pfwdefs.SW_FILEPAT in fdict:\n cnts2 = check_filepat_valid(config, fdict[pfwdefs.SW_FILEPAT],\n modname, fname, indent + ' ')\n cnts = [x + y for x, y in zip(cnts, cnts2)] # increment counts\n\n # check that it has filetype : err\n if pfwdefs.FILETYPE not in fdict:\n error(indent, \"%s - Missing %s\" % (msginfo, pfwdefs.FILETYPE))\n cnts[ERRCNT_POS] += 1\n elif fdict[pfwdefs.FILETYPE] not in config[fmdefs.FILETYPE_METADATA]:\n error(indent, \"%s - Invalid %s (%s)\" % \\\n (msginfo, pfwdefs.FILETYPE, fdict[pfwdefs.FILETYPE]))\n cnts[ERRCNT_POS] += 1\n\n return cnts", "def test_st_basetd00201m_st_base_td00201m1_p(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/SType/ST_baseTD/ST_baseTD00201m/ST_baseTD00201m.xsd\",\n instance=\"sunData/SType/ST_baseTD/ST_baseTD00201m/ST_baseTD00201m1_p.xml\",\n class_name=\"Test\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def makeSpkSetupFile(leapSecondFilePath, outputPath):\n\n # If the file already exists, delete it and rewrite it.\n if os.path.exists(outputPath):\n os.remove(outputPath)\n\n# print 'Generating LRONAC compatible .pvl file ' + halfResFilePath\n f = open(outputPath, 'w')\n f.write(\"\\\\begindata\\n\")\n f.write(\"INPUT_DATA_TYPE = 'STATES'\\n\")\n f.write(\"OUTPUT_SPK_TYPE = 13\\n\")\n f.write(\"OBJECT_ID = -85\\n\") # LRO\n f.write(\"CENTER_ID = 301\\n\") # Moon\n f.write(\"REF_FRAME_NAME = 'J2000'\\n\")\n f.write(\"PRODUCER_ID = 'Lronac Pipeline'\\n\")\n f.write(\"DATA_ORDER = 'epoch x y z vx vy vz'\\n\")\n f.write(\"DATA_DELIMITER = ','\\n\")\n f.write(\"LEAPSECONDS_FILE = '\" + leapSecondFilePath + \"'\\n\")\n f.write(\"LINES_PER_RECORD = 1\\n\")\n f.write(\"TIME_WRAPPER = '# ETSECONDS'\\n\")\n #f.write(\"EPOCH_STR_LENGTH = 16\\n\")\n f.write(\"INPUT_DATA_UNITS = ('ANGLES=DEGREES' 'DISTANCES=km')\\n\")\n f.write(\"POLYNOM_DEGREE = 11\\n\")\n f.write(\"SEGMENT_ID = 'SPK_STATES_13'\\n\")\n# f.write(\"INPUT_DATA_FILE = 'spkDataFile.txt'\")\n# f.write(\"OUTPUT_SPK_FILE = '/home/smcmich1/testSpkFile.bsp'\")\n f.write(\"\\\\begintext\\n\")\n f.close()" ]
[ "0.574924", "0.56949127", "0.56544244", "0.56111264", "0.55992764", "0.5592849", "0.5569697", "0.5554061", "0.5532291", "0.5523748", "0.55029756", "0.55000603", "0.54876405", "0.5481802", "0.5458784", "0.5409814", "0.5402028", "0.5396599", "0.5384231", "0.5362747", "0.53096676", "0.5288716", "0.52872175", "0.523987", "0.5238897", "0.5203372", "0.5201954", "0.5193864", "0.5175276", "0.51521516" ]
0.7696644
1
Matches if all of the given matchers are satisfied by any elements of the sequence.
def has_items(*items): matchers = [] for item in items: matchers.append(wrap_matcher(item)) return IsSequenceContainingEvery(*matchers)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def any_of(*args):\n class AnyOfMatcher:\n def __init__(self, values):\n self.values = values\n\n def __eq__(self, other):\n return any(map(lambda v: v == other, self.values))\n\n def __ne__(self, other):\n return all(map(lambda v: v != other, self.values))\n\n if not args:\n raise ValueError(\n \"at least one argument should be provided for any_of matcher\")\n return AnyOfMatcher(args)", "def ANY(*R):\n return lambda l, i: any(r(l, i) for r in R)", "def _match_regex_list(subject, expressions):\n for expr in expressions:\n if re.search(expr, subject):\n return True\n return False", "def all(seq, pred=None):\n for elem in itertoos.ifilterfalse(pred, seq):\n return False\n return True", "def any(seq, pred=None):\n for elem in itertools.ifilter(pred, seq):\n return True\n return False", "def forall(seq,cond):\n for x in seq:\n if not cond(x): return False\n return True", "def forall(seq,cond):\n for x in seq:\n if not cond(x): return False\n return True", "def contains_any(self, *items):\n return any(item in self for item in items)", "def contains_all(self, *items):\n return all(item in self for item in items)", "def any_values(*values):\n values = [_normalize(v) for v in values]\n for v in zip(*values):\n yield any(v)", "def match(self, item):\n if not self._unused_matchers:\n raise RuntimeError('Matcher exhausted, no more matchers to use')\n\n for matcher in self._unused_matchers:\n if matcher(item):\n self._used_matchers.append(matcher)\n break\n\n if not self._unused_matchers:\n # All patterns have been matched\n return True\n\n return False", "def _match_all(abs_dir, matching, not_matching):\n num_not_matching = 0\n\n for expression in matching:\n if not fnmatch.fnmatch(abs_dir, expression):\n num_not_matching += 1\n\n if num_not_matching == len(matching):\n return False\n\n for expression in not_matching:\n if fnmatch.fnmatch(abs_dir, expression):\n return False\n\n return True", "def any_of(*conditions):\n def check():\n for c in conditions:\n if c():\n return True\n return False\n return check", "def anyof(*what):\n return P(lambda y: y in what)", "def all(*args):\n if not args:\n raise ValueError(\"Any must take at least 1 argument\")\n if len(args) == 1:\n return args[0]\n ret = _make.And(args[0], args[1])\n for i in range(2, len(args)):\n ret = _make.And(ret, args[i])\n return ret", "def containsAll(self, *args):\n pass", "def search(self, *queries):\n haystack = self.make_haystack()\n # re.search() returns a match object or None. The call to any() will\n # cast everything to booleans.\n return [any([re.search(query, data) for data in haystack])\n for query in queries]", "def any(self, values):\n return self.aggregate(values, \"any\")", "def match(self, *ial):\n for b, c in ial:\n assert len(b) == len(c), \"parameter length mismatch\"\n if self._.d != len(b):\n continue\n if len(self._match(b, c)) > 0:\n return True\n return False", "def all_of(*conditions):\n def check():\n for c in conditions:\n if not c():\n return False\n return True\n return check", "def all(selectors, subitem): #pylint: disable=redefined-builtin\n for sel in selectors:\n if isinstance(sel, list):\n passed = False\n for subsel in sel:\n if subsel(subitem):\n passed = True\n break\n if not passed:\n return False\n elif not sel(subitem):\n return False\n return True", "def any_of(*args:List[str]) -> str:\n return group(\"|\".join(args))", "def fts_match_all(self, fts, inv, normalize=True):\n return all([self.fts(s, normalize) >= fts for s in inv])", "def any_yields(functions, value):\n return any(f(value) for f in functions)", "def fts_match_any(self, fts, inv, normalize=True):\n return any([self.fts(s, normalize) >= fts for s in inv])", "def intersection(*seqs):\n return (item for item in seqs[0]\n if all(item in seq for seq in seqs[1:]))", "def match_all(self):\n return self._match_all", "def all(self, predicate):\n return all(predicate(item) for item in self)", "def _check_items(cls, sequence):\n all([cls._check_item(x) for x in sequence])", "def matches(self):\n pass" ]
[ "0.6793648", "0.62049425", "0.6130358", "0.61209214", "0.60859215", "0.5958338", "0.5958338", "0.5905211", "0.58609545", "0.58154076", "0.58081245", "0.5749853", "0.5748864", "0.5746182", "0.5741673", "0.5715509", "0.5714579", "0.57087773", "0.56990945", "0.56659824", "0.5553601", "0.55339086", "0.5512037", "0.55088425", "0.5497342", "0.5475448", "0.5436372", "0.5435709", "0.5428755", "0.5421259" ]
0.7360219
0
Gets the access_window of this PopSettings. The range of messages which are accessible via POP.
def access_window(self): return self._access_window
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def access_window(self, access_window):\n allowed_values = [\"accessWindowUnspecified\", \"allMail\", \"disabled\", \"fromNowOn\"]\n if access_window not in allowed_values:\n raise ValueError(\n \"Invalid value for `access_window` ({0}), must be one of {1}\"\n .format(access_window, allowed_values)\n )\n\n self._access_window = access_window", "def cutover_window(self):\n return self._cutover_window", "def GetWindow(self):\r\n\r\n return self.window", "def window(self):\n return self._window", "def window(self):\n return self._window", "def GetWindow(self):\r\n\r\n return self._wnd", "def settings_access_level(self):\n return self._settings_access_level", "def GetWindow(self):\n\n return self._window", "def window(self):\n\tif getattr(self.android.settings, 'LV_AVOID_FOCUSED_COMMAND',\n\t\t\t\tself.android.internal.device.google_experience):\n\t\treturn window.previous(self)\n\n def fallback_window_command():\n try:\n w=self.android.internal.transport.view_server_query( 'FOCUSED\\n' )[0]\n except:\n w=\"\"\n return w\n\n try:\n # can't use GET_FOCUS command in secure builds, so fall back to FOCUSED command\n if self.android.device.is_secure_build():\n raise Exception()\n\t w=self.android.internal.transport.view_server_query('GET_FOCUS\\n')[0].split()[1]\n except:\n w = fallback_window_command()\n\n\tself.android.log.verbose(android.ui.TAG, \"Current window: '%s'\" % w)\n\treturn w", "def get_window_property(self, connection, window, atom):\n self.logger.debug(\"Getting property %s from window %s\", atom, window)\n cookie = connection.core.GetProperty(\n False,\n window,\n atom,\n GetPropertyType.Any,\n 0,\n 2 ** 32 - 1\n )\n reply = cookie.reply()\n return self.get_property_value(reply)", "def get_account_limits(self, receive_window: Optional[int] = None):\n api_params = {\n \"timestamp\": get_current_time_milliseconds()\n }\n\n if receive_window is not None:\n api_params['receiveWindow'] = receive_window\n\n return self.request.get(path='/account/limits', params=api_params)", "def get_window(self):\n if self.isWindow:\n return self\n else:\n return self.window", "def chat(self):\n return self._get(\"chat\")", "def wm(self):\n return self.position", "def window(self) -> Optional[pulumi.Input['MaintenanceWindowArgs']]:\n return pulumi.get(self, \"window\")", "def access(self):\n return self._access", "def access(self):\n return self._access", "def size_with_window(self):\n return self.container['size_with_window']", "def win(self):\n return self._get(\"win\")", "def refrwindow(self):\n return self.attribute('RW')", "def current_window(self):\n return self._impl.get_current_window().interface", "def IncludeMaximumEndPopMsd(self):\r\n\t\treturn self._get_attribute('includeMaximumEndPopMsd')", "def get_window(self):\n \n # get window from browser\n return self.browser.get_window()", "def __window_GetAttention(self):\n pass", "def get_current_viewport(self,*args):\n vp_string = self.run_cmd(['xprop', '-root', \n '-notype', '_NET_DESKTOP_VIEWPORT'])\n vp_list=vp_string.decode().strip().split('=')[1].split(',')\n return tuple( int(i) for i in vp_list )", "def GetWindow(self, idx):\n assert idx < len(self._windows)\n return self._windows[idx]", "def get_window_size(self):\n return self.__window_size", "def window(self) -> Optional[pulumi.Input['TimeWindowArgs']]:\n return pulumi.get(self, \"window\")", "def inclusive_maximum(self):\n\n return self._inclusive_maximum", "def get_below(self):\n current_index = ALL_WINDOWS.index(self)\n if current_index == 0:\n return BASE_SCREEN\n\n return ALL_WINDOWS[current_index - 1]" ]
[ "0.5653705", "0.53341067", "0.5250064", "0.51709527", "0.51709527", "0.5137791", "0.5124928", "0.502916", "0.50011027", "0.49883795", "0.49689424", "0.49561146", "0.49306852", "0.49140164", "0.48545784", "0.48499143", "0.48499143", "0.48053774", "0.48042098", "0.47514024", "0.47494546", "0.47439954", "0.4735316", "0.4728942", "0.4724215", "0.47235528", "0.47221494", "0.46850473", "0.46635664", "0.46586397" ]
0.6474205
0
Sets the access_window of this PopSettings. The range of messages which are accessible via POP.
def access_window(self, access_window): allowed_values = ["accessWindowUnspecified", "allMail", "disabled", "fromNowOn"] if access_window not in allowed_values: raise ValueError( "Invalid value for `access_window` ({0}), must be one of {1}" .format(access_window, allowed_values) ) self._access_window = access_window
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_window_rect(self, value: bool):\n self._caps['setWindowRect'] = value", "def SetWindow(self, w):\r\n\r\n self.window = w", "def handleMonitorSettings(self):\n winPos = self.mainWindow.pos()\n popPos = QtCore.QPoint(winPos.x() + (self.mainWindow.width() - self.settingsPopup.width()) / 2, \n winPos.y() + self.mainWindow.height() / 2)\n self.monitorPopUp.move(popPos)\n self.monitorPopUp.show()", "def access_window(self):\n return self._access_window", "def settings_access_level(self, settings_access_level):\n allowed_values = [\"SYSTEM\", \"SHOONYA\", \"NONE\"]\n if settings_access_level not in allowed_values:\n raise ValueError(\n \"Invalid value for `settings_access_level` ({0}), must be one of {1}\"\n .format(settings_access_level, allowed_values)\n )\n\n self._settings_access_level = settings_access_level", "def DoSetPopupControl(self, popup):\n pass", "def window_size(self, window_size):\n\n self._window_size = window_size", "def set_window_position(self, left, top, right, bottom, state, is_floating):\n self._set_window_position(left, top, right, bottom, state, is_floating)", "def set_connectionwindow(value):\n environ[\"XRD_CONNECTIONWINDOW\"] = str(value)", "def set_window_status(window, start):\r\n window['-FOLDER-'](disabled=not start)\r\n window['-BROWSE-'](disabled=not start)\r\n window['-ADDNOTE-'](disabled=start)\r\n window['-START-'](disabled=not start)\r\n window['-STOP-'](disabled=start)\r\n\r\n if start:\r\n window['-TOPIC-']('', disabled=not start)\r\n window['-STARTH-']('', disabled=not start)\r\n window['-STARTM-']('', disabled=not start)\r\n window['-STOPH-']('', disabled=start)\r\n window['-STOPM-']('', disabled=start)\r\n else:\r\n window['-TOPIC-'](disabled=not start)\r\n window['-STARTH-'](disabled=not start)\r\n window['-STARTM-'](disabled=not start)\r\n window['-STOPH-'](disabled=start)\r\n window['-STOPM-'](disabled=start)", "def set_window(window: Optional[\"Window\"]) -> None:\n global _window\n _window = window", "def set_access_category(self, access_category):\n if access_category == \"video\":\n self.txop_limit = 4\n self.aifs = 34\n self.n = 8\n self.kp1 = 9\n self.mind = 32\n self.maxd = 95\n elif access_category == \"best_effort\":\n \tself.txop_limit = 6\n \tself.aifs = 43\n \tself.n = 16\n \tself.kp1 = 17\n \tself.mind = 41\n \tself.maxd = 176\n elif access_category == \"background\":\n \tself.txop_limit = 6\n \tself.aifs = 79\n \tself.n = 16\n \tself.kp1 = 17\n \tself.mind = 77\n \tself.maxd = 212\n elif access_category == \"voice\":\n \tself.txop_limit = 2\n \tself.aifs = 34\n \tself.n = 4\n \tself.kp1 = 5\n \tself.mind = 32\n \tself.maxd = 59\n else:\n raise Exception(\"Unknown access category {}\".format(access_category))\n self.access_category = access_category\n\n self.p_max = np.asarray(np.zeros(self.kp1))\n self.p_max[0] = 0.05\n self.p_max[self.kp1 - 1] = 1\n\n if self.access_category == \"voice\":\n \tfor i in range(1, 4):\n \t\tself.p_max[i] = self.p_max[0] + i * 0.25\n elif self.access_category == \"video\":\n \tself.p_max[1] = 0.18\n \tfor i in range(2, 7):\n \t\tself.p_max[i] = self.p_max[1] + (i - 1) * 0.125\n \tself.p_max[self.kp1 - 2] = 1\n elif self.access_category == \"best_effort\" or self.access_category == \"background\":\n \tself.p_max[1] = 0.12\n \tfor i in range(2, 16):\n \t\tself.p_max[i] = self.p_max[1] + (i - 1) * 0.0625\n else:\n raise Exception(\"Unknown access category {}\".format(access_category))\n print(\"Running analysis for access category {}\".format(self.access_category))", "def set_window(self, handle):\n pass", "def set_popup_mode(self, mode: str):\n if mode not in POPUP_MODES:\n raise ValueError(\"Invalid mode.\")\n self.setPopupMode(POPUP_MODES[mode])", "def setAllowScreenReaders(self,value):\n self.PDFreactorConfiguration.in1[\"allowScreenReaders\"] = value", "def windows(self, windows):\n\n self._windows = windows", "def showPDataSettings(self):\n winPos = self.mainWindow.pos()\n popPos = QtCore.QPoint(winPos.x() + (self.mainWindow.width() - self.settingsPopup.width()) / 2, \n winPos.y() + self.mainWindow.height() / 2)\n self.settingsPopup.move(popPos)\n self.settingsPopup.show()", "def change_window_size(self, size):\n value = 0\n try:\n value = int(size)\n except ValueError:\n raise ValueError(\"Please type in a valid number.\")\n\n if value >= 0:\n self.__window_size = value\n else:\n raise ValueError(\"Please type in a valid positive number.\")", "def open(self, position=MAX_POSITION):\n self.gripper_io.set_signal_value(\"position_m\", position)", "def setPop(self, pop: Population):\n self.population = pop\n self.population_size = pop.size()", "def _setwin(self, win):\n\t\tself.win = win", "def open_settings_window(self):\n self.screen_blank_timer.stop()\n self.settings_window.show()\n # Ensure the window is raised in top, useful when main window is fullscreened\n # and settings window is accidentally sent to the background\n getattr(self.settings_window, \"raise\")()\n self.settings_window.activateWindow()\n event_logger.debug(\"Settings window opened\")", "def GetAccessApprovalSettings(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")", "def UpdateAccessApprovalSettings(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")", "def set_window_id(self, window_id):\r\n self.window_id = window_id", "def SetWindowSize(self, size):\n self.WINDOW_SIZE = size", "def network_access(self, network_access):\n\n self._network_access = network_access", "def set_pop_cap(self):\n v = simpledialog.askinteger(\n \"Settings\", \"Population cap:\",\n initialvalue=self.lnp.settings.popcap, parent=self.root)\n if v is not None:\n self.lnp.set_option('popcap', v)\n self.update_displays()", "def setwin(self, win):\n\t\tself._setwin(win)", "def Window(self, w):\r\n\r\n self.window = w\r\n return self" ]
[ "0.486651", "0.48349455", "0.47570282", "0.46907958", "0.4688979", "0.45993996", "0.4583975", "0.45784625", "0.4382934", "0.43772432", "0.43756816", "0.43718892", "0.4349676", "0.43433076", "0.43346232", "0.43279564", "0.43230143", "0.43029824", "0.43011537", "0.42961788", "0.4289692", "0.42562652", "0.42365748", "0.4230365", "0.42290032", "0.4219549", "0.42130846", "0.4196994", "0.4191813", "0.41691247" ]
0.73256314
0
Gets the disposition of this PopSettings. The action that will be executed on a message after it has been fetched via POP.
def disposition(self): return self._disposition
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def disposition(self, disposition):\n allowed_values = [\"archive\", \"dispositionUnspecified\", \"leaveInInbox\", \"markRead\", \"trash\"]\n if disposition not in allowed_values:\n raise ValueError(\n \"Invalid value for `disposition` ({0}), must be one of {1}\"\n .format(disposition, allowed_values)\n )\n\n self._disposition = disposition", "def disposition_time(self) -> str:\n return pulumi.get(self, \"disposition_time\")", "def get_popup_mode(self) -> str:\n return POPUP_MODES.inv[self.popupMode()]", "def post(self):\n return self.get_request_handler(request.headers).create_new_disposition(request)", "def action(self):\n return self._action", "def action(self):\n return self._action", "def action(self):\n return self._action", "def action(self):\n return self._action", "def action(self):\n return self._action", "def action(self):\n return self._action", "def get_action(self):\n return self.__action", "def action(self):\n return self._get_field(\"action\")", "def get(self):\n\n return self.get_request_handler(request.headers).get_all_dispositions()", "def prefect_mandate(self):\n return self._prefect_mandate", "def comeback(self):\n return self._get(\"comeback\")", "def message_type(self):\n return self._message_type", "def message_type(self):\n return self._message_type", "def Mime(self):\n if self.force_auto_sync:\n self.get('Mime')\n return self._Mime", "def _deliveryPref(self):\n # If the level of the object is below the Preference level,\n # recursively calls base (super) classes to get preference at specified level\n return self.get_pref_setting_for_level(DELIVERY_PREF, self._delivery_pref.level)[0]", "def get_action(self):\n return self.current_action", "def get_message_type(self):\n return self.message_type", "def get_last_action_proba(self):\n return self._action_list[-1]['proba']", "def action_type(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"action_type\")", "def delivery(self):\n return self._delivery", "def message_type(self):\n return self.type", "def message_type(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"message_type\")", "def get_message_triggered_status(self, message):\n if message in self.__messages_to_trigger:\n return self.__messages_to_trigger[message]\n else:\n return None", "def action(self) -> Optional[str]:\n return pulumi.get(self, \"action\")", "def action(self) -> Optional[str]:\n return pulumi.get(self, \"action\")", "def action(self) -> Optional[str]:\n return pulumi.get(self, \"action\")" ]
[ "0.58163434", "0.558854", "0.51367563", "0.49677005", "0.4939197", "0.4939197", "0.4939197", "0.4939197", "0.4939197", "0.4939197", "0.49037772", "0.4865473", "0.48381123", "0.47464013", "0.47021428", "0.46780324", "0.46780324", "0.46662363", "0.46597967", "0.46557334", "0.46361864", "0.46296757", "0.4624202", "0.46061504", "0.46019286", "0.45768046", "0.4541336", "0.45383617", "0.45383617", "0.45383617" ]
0.7230888
0
Sets the disposition of this PopSettings. The action that will be executed on a message after it has been fetched via POP.
def disposition(self, disposition): allowed_values = ["archive", "dispositionUnspecified", "leaveInInbox", "markRead", "trash"] if disposition not in allowed_values: raise ValueError( "Invalid value for `disposition` ({0}), must be one of {1}" .format(disposition, allowed_values) ) self._disposition = disposition
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def disposition(self):\n return self._disposition", "def set_popup_mode(self, mode: str):\n if mode not in POPUP_MODES:\n raise ValueError(\"Invalid mode.\")\n self.setPopupMode(POPUP_MODES[mode])", "def delivery_mode(self, delivery_mode):\n\n self._delivery_mode = delivery_mode", "def post(self):\n return self.get_request_handler(request.headers).create_new_disposition(request)", "def prefect_mandate(self, prefect_mandate):\n\n self._prefect_mandate = prefect_mandate", "def disposition_time(self) -> str:\n return pulumi.get(self, \"disposition_time\")", "async def deliver_order(self, behaviour):\n\n assert self.loaded_order is not None\n assert self.msg_order is not None\n assert self.order is not None\n\n reply = self.msg_order.make_reply()\n reply.set_metadata('performative', 'inform')\n await send(behaviour, reply)\n self.loaded_order = None\n self.msg_order = None\n self.order = None\n self.idle = True", "def set_popup_mode(self, mode):\n widget = self.widget\n q_mode = POPUP_MODES[mode]\n if q_mode == widget.popupMode():\n return\n with self.geometry_guard():\n widget.setPopupMode(q_mode)\n widget.setIcon(widget.icon()) # force-resets the internal cache", "def set_cover_position(self, **kwargs):\n self.action(\"REVEAL\", percentage=kwargs.get(ATTR_POSITION))", "def content_disposition_header(\n disptype: str, quote_fields: bool = True, _charset: str = \"utf-8\", **params: str\n) -> str:\n if not disptype or not (TOKEN > set(disptype)):\n raise ValueError(\"bad content disposition type {!r}\" \"\".format(disptype))\n\n value = disptype\n if params:\n lparams = []\n for key, val in params.items():\n if not key or not (TOKEN > set(key)):\n raise ValueError(\n \"bad content disposition parameter\" \" {!r}={!r}\".format(key, val)\n )\n if quote_fields:\n if key.lower() == \"filename\":\n qval = quote(val, \"\", encoding=_charset)\n lparams.append((key, '\"%s\"' % qval))\n else:\n try:\n qval = quoted_string(val)\n except ValueError:\n qval = \"\".join(\n (_charset, \"''\", quote(val, \"\", encoding=_charset))\n )\n lparams.append((key + \"*\", qval))\n else:\n lparams.append((key, '\"%s\"' % qval))\n else:\n qval = val.replace(\"\\\\\", \"\\\\\\\\\").replace('\"', '\\\\\"')\n lparams.append((key, '\"%s\"' % qval))\n sparams = \"; \".join(\"=\".join(pair) for pair in lparams)\n value = \"; \".join((value, sparams))\n return value", "def set_attachment_content_disposition(request, filename, file=None):\n if not filename:\n return\n\n if file:\n contenttype = get_contenttype(file)\n request.response.setHeader(\"Content-Type\", contenttype)\n request.response.setHeader(\"Content-Length\", file.getSize())\n\n user_agent = request.get('HTTP_USER_AGENT', '')\n if 'MSIE' in user_agent:\n filename = quote(filename)\n request.response.setHeader(\n \"Content-disposition\", 'attachment; filename=%s' % filename)\n\n else:\n request.response.setHeader(\n \"Content-disposition\", 'attachment; filename=\"%s\"' % filename)", "def set_pop_cap(self):\n v = simpledialog.askinteger(\n \"Settings\", \"Population cap:\",\n initialvalue=self.lnp.settings.popcap, parent=self.root)\n if v is not None:\n self.lnp.set_option('popcap', v)\n self.update_displays()", "def SetPop(self, fname, var):\n\n\t\tself._pop_fname = fname\n\t\tself._pop_var = var", "def set_emissivity(self, emis):\n message = b'SEP ' + str(emis).encode() + self.end_mess_bytes\n if self.gui_message is not None:\n self.gui_message.message('[LSP] Setting emissivity: {}'.format(emis))\n else:\n print('[LSP] Setting emissivity: {}'.format(emis))\n self.sock.sendall(message)", "def presentation(self, presentation):\n\n self._presentation = presentation", "def set_presentation(self, presentation):\n self.presentation = presentation", "def on_accepted(self, delivery, message, disposition):\n filename = self.abspath('deliveries', '%s.dstate' % delivery.tag)\n os.unlink(filename)", "def handle_preset_mode_received(msg: ReceiveMessage) -> None:\n preset_mode = self.render_template(msg, CONF_PRESET_MODE_VALUE_TEMPLATE)\n if preset_mode in [PRESET_NONE, PAYLOAD_NONE]:\n self._attr_preset_mode = PRESET_NONE\n get_mqtt_data(self.hass).state_write_requests.write_state_request(self)\n return\n if not preset_mode:\n _LOGGER.debug(\"Ignoring empty preset_mode from '%s'\", msg.topic)\n return\n if (\n not self._attr_preset_modes\n or preset_mode not in self._attr_preset_modes\n ):\n _LOGGER.warning(\n \"'%s' received on topic %s. '%s' is not a valid preset mode\",\n msg.payload,\n msg.topic,\n preset_mode,\n )\n else:\n self._attr_preset_mode = str(preset_mode)\n\n get_mqtt_data(self.hass).state_write_requests.write_state_request(self)", "def content_header_disposition(self, query, http_s_obj):\n if (query and (query.find('inline') != -1)) or (http_s_obj.getStatusCode() != 200):\n http_s_obj.setHeader('Content-Disposition', 'inline')", "def set_autofeed_mode(self, mode):\n self._info(\"set_autofeed_mode\")\n self.parent.controller.set_autofeed_mode(mode)", "async def async_set_cover_position(self, **kwargs):\n position = kwargs.get(ATTR_POSITION)\n\n async with aiohttp.ClientSession() as session:\n await session.post(\n self._url + \"/actions/set_position\",\n json={\"set_position\": {\"input\": {\"position\": position}}},\n )", "def setAction(self, value):\n return self._set(action=value)", "def send_by_email(self):\r\n ir_model_data = self.env['ir.model.data']\r\n try:\r\n template_id = ir_model_data.get_object_reference(\r\n 'ng_church', 'email_template_church_pledge_report')[1]\r\n except ValueError:\r\n template_id = False\r\n try:\r\n compose_form_id = ir_model_data.get_object_reference(\r\n 'mail', 'email_compose_message_wizard_form')[1]\r\n except ValueError:\r\n compose_form_id = False\r\n ctx = dict(self._context)\r\n ctx.update({\r\n 'default_model': 'church.pledge',\r\n 'default_res_id': self._ids[0],\r\n 'default_use_template': bool(template_id),\r\n 'default_template_id': template_id,\r\n 'default_composition_mode': 'comment',\r\n })\r\n return {\r\n 'name': _('Compose Email'),\r\n 'type': 'ir.actions.act_window',\r\n 'view_type': 'form',\r\n 'view_mode': 'form',\r\n 'res_model': 'mail.compose.message',\r\n 'views': [(compose_form_id, 'form')],\r\n 'view_id': compose_form_id,\r\n 'target': 'new',\r\n 'context': ctx,\r\n }", "def delivery(self, value: dict):\n self._delivery = value\n # Ensure the correct key is updated and object is set as dirty\n flag_modified(self, '_delivery')", "def mode(self, mode):\n self.set_mode(mode)", "def set_receive_mail(self):\n self.__mail = True", "def mode_manual(self):\n if self.__check_mode_change():\n self.communications.set_status(\"Piloting Bot\")\n self.__check_move()", "def set_message(msg, m_type):\n cherrypy.session.acquire_lock()\n cherrypy.session[KEY_MESSAGE] = msg\n cherrypy.session[KEY_MESSAGE_TYPE] = m_type\n cherrypy.session.release_lock()", "def message_delivery(self, delivery: MessageDelivery):\n self._message_delivery = delivery", "def set_probability_mode(self, mode):\n self.grid.probability_mode = mode" ]
[ "0.62285453", "0.50006455", "0.4993129", "0.4591924", "0.4530001", "0.45073426", "0.4486179", "0.44548517", "0.4436408", "0.43279582", "0.4305703", "0.42922822", "0.42594072", "0.42561954", "0.41788253", "0.4150163", "0.41420588", "0.41411808", "0.40983132", "0.40756333", "0.40745032", "0.40386388", "0.4004753", "0.39633054", "0.39619952", "0.3951611", "0.39426467", "0.39410147", "0.394096", "0.39263678" ]
0.7115136
0
Given the urls already in the correct order, downloads each image into the given directory. Gives the images local filenames img0, img1, and so on. Creates an index.html in the directory with an img tag to show each local image file. Creates the directory if necessary.
def download_images(img_urls, dest_dir): # Creating the directory if the directory does not already exist if not os.path.exists(str(dest_dir)): os.mkdir(dest_dir) print ('Retrieving...') with open(str(dest_dir) + '/index.html', 'w') as f: f.write("<html>\n<body>\n") for index, url in enumerate(img_urls): img_name = 'img' + str(index + 1) urllib.urlretrieve("https://code.google.com" + url, filename=str(dest_dir) + '/' + img_name +'.jpg') print ('Downloaded ' + url[-10:] + ": " + \ str(index + 1) + " images downloaded") f.write("<img src=" + '"' + img_name +".jpg" +'">') f.write("\n</html>\n</body>") print ('Download Complete!') pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def download_images(img_urls, dest_dir):\n if not os.path.exists(dest_dir):\n # If the directory doesn't exist, create it\n os.mkdir(dest_dir)\n count = 0\n img_string = ''\n # Copies each file from the url provided to the directory provided\n for file in img_urls:\n new_filename = '{}/img{}.jpg'.format(dest_dir, count)\n print \"Retrieving {}\".format(file)\n urllib.urlretrieve(file, new_filename)\n img_string += \"<img src = 'img{}.jpg'>\".format(count)\n count += 1\n print \"Retrieved {} files\".format(count)\n # Creates an html file to display the completed image\n with open('{}/index.html'.format(dest_dir), 'w') as f:\n f.write(\n '<html>\\n<body>\\n{}\\n</body>\\n</html>'.format(img_string)\n )\n pass", "def download_images(img_urls, dest_dir):\n # +++your code here+++\n (errcode, statusmsg) = check_create_dir(dest_dir)\n if errcode:\n print statusmsg\n sys.exit(errcode)\n else: print statusmsg\n # retrieve images and generate html code for files\n html_str = '<html>\\n<body>\\n' # opening html file tags\n i = 0\n for img in img_urls:\n img_filename = 'img' + str(i)\n full_filepath = os.path.join(dest_dir, img_filename) \n print 'Retrievieng ' + img + ' to ' + full_filepath + ' file..'\n urllib.urlretrieve(img, full_filepath)\n html_str += '<img src=\\\"' + img_filename + '\\\">'\n i += 1\n html_str += '\\n</html>\\n</body>' # closing html file tags\n # create html file\n html_filename = os.path.join(dest_dir, 'index.html')\n f = open(html_filename, 'w')\n f.write(html_str) \n f.close()\n print 'File ' + html_filename + ' was created.'", "def download_images(img_urls, dest_dir, base_url=\"http://code.google.com\"):\n create_dir(dest_dir)\n img_tags = fetch_call(img_urls, dest_dir)\n create_html(dest_dir, img_tags)", "def download_images(src_dir, dest_dir):\n # +++your code here+++\n if not os.path.exists(dest_dir):\n os.mkdir(dest_dir)\n res=utility(src_dir)\n k=0\n f=file(dest_dir+\"/\"+\"index.html\", 'w')\n f.write(\"<html><body>\")\n for i in res:\n local_name='image'+str(k)\n print \"downloading image%d\" %(k)\n urllib.urlretrieve(i, os.path.join(dest_dir, local_name))\n f.write(\"<img src=\"+'\"'+os.path.join(dest_dir, local_name)+'\"'+\">\")\n k+=1\n f.write(\"</body></html>\")\n f.close()\n cmd=\"xdg-open\"+\" \"+'\"'+dest_dir+\"/\"+\"index.html\"+'\"'\n (status, output)=commands.getstatusoutput(cmd)\n sys.exit(1)", "def download_imgs(img_urls, outfolder):\n \n print \"Downloading %d images from: \" %len(img_urls), url\n \n for image in img_urls:\n filename = image.split('/')[-1]\n outpath = os.path.join(outfolder, filename)\n img_url = urljoin(url, image)\n try:\n urlretrieve(image, outpath)\n print img_url, \"downloaded successfully.\"\n \n except IOError:\n print \"Failed to download file:\", img_url\n pass", "def downloadImages(self):\n\t\ti = 0\n\t\tfor im in self.images:\n\t\t\t# Let's get the file extension and file name and make the final file path. \n\t\t\t# We need to do this to slugify the file name and avoid errors when loading images\n\t\t\tfile_name, file_extension = os.path.splitext(im['url'])\n\t\t\tfile_name = file_name.split(\"/\")[-1]\n\n\t\t\tfile_path = self.data_path + self.dataset + \"/\" + im['slug'] + '/' + str(im['id']) + '_' + slugify(file_name) + file_extension\n\n\t\t\t# If file is not in the file path, then download from the url\n\t\t\tif not os.path.exists(file_path):\n\t\t\t\ttry:\n\t\t\t\t\turllib.urlretrieve(im['url'], file_path )\n\t\t\t\t\tprint \"i:{} url:{}\".format(i,im['url'])\n\t\t\t\texcept Exception, e:\n\t\t\t\t\tprint e\n\t\t\ti += 1", "def download_images(urlList):\n fileNumber = 1;\n fileName = \"\"\n\n # urlList[0] is just titles, so we start at 1\n for url in urlList[1:]:\n sys.stdout.write(\"\\rFile number %i of %i \" % (fileNumber+1, len(urlList)))\n\n sys.stdout.flush()\n\n try:\n fileName = str(fileNumber) + \".png\"\n # Download the file from `url` and save it locally under `fileName`:\n # I append png to the end of the file to \"make it\" png, but there's definitely a better way\n with urllib.request.urlopen(url) as response, open(fileName, 'wb') as out_file:\n shutil.copyfileobj(response, out_file)\n except urllib.error.HTTPError:\n sys.stdout.flush()\n print(\"\\r %s is not a downloadable image. Skipping to next url...\" % url)\n \n fileNumber += 1;\n\n sys.stdout.write(\"\\r\\nDone!\")\n sys.stdout.flush()\n sys.stdout.write(\"\\r\\n\")", "def download_images(img_urls, dest_dir):\n if len(img_urls) > 0 :\n if not os.path.exists(dest_dir):\n os.mkdir(dest_dir)\n # save each images file name\n image_names = []\n # Iterate over each image url, downloading the image to a local file\n img_ctr = 0\n for url in img_urls :\n file_name = 'img' + str(img_ctr) + '.jpg'\n image_names.append(file_name)\n full_name = dest_dir + '/' + file_name\n print('Writing file: %s from %s' % (full_name, url) )\n # When calling the SSLContext constructor directly, CERT_NONE is the default.\n # Since it does not authenticate the other peer it can be insecure\n # Beyond the scope of this exercise (emoji holding my nose)\n unsecure_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)\n with urllib.request.urlopen(url, context=unsecure_context) as response, open(full_name, 'wb') as out_file:\n shutil.copyfileobj(response, out_file)\n img_ctr += 1\n return image_names", "def download_image(urls):\r\n image_paths = []\r\n\r\n base_url = \"https://classifieds.castanet.net\"\r\n image_directory = os.path.join('C:\\\\', 'users', 'ccholon', 'my documents', 'castanet images')\r\n\r\n for url in urls:\r\n listing_url = base_url + url\r\n image_page = requests.get(listing_url)\r\n image_soup = BeautifulSoup(image_page.text, 'html.parser')\r\n\r\n # find the URL for the listing image\r\n image_element = image_soup.find(name='div', class_='image_container')\r\n image_element = image_element.find(name='img')\r\n image_url = image_element.get('src')\r\n\r\n # download the image\r\n #image = requests.get(image_url, stream=True)\r\n\r\n # save to local directory\r\n #image_file = open(os.path.join(image_directory, os.path.basename(image_url)), 'wb')\r\n #for bytes in image.iter_content(100000):\r\n #image_file.write(bytes)\r\n #image_file.close()\r\n\r\n image_paths.append(os.path.join(image_directory, os.path.basename(image_url)))\r\n\r\n return image_paths", "def download_urls(urls, path):\n count = 0\n if urls:\n for url in urls:\n try:\n res = requests.get(url, verify=False, stream=True)\n rawdata = res.raw.read()\n with open(os.path.join(path, 'img_' + str(count) + '.jpg'), 'wb') as f:\n f.write(rawdata)\n count += 1\n except Exception as e:\n print('Failed to write rawdata.')\n print(e)", "def download_photos(urls, folder=''):\n folder_path = os.path.join('photos', folder)\n if not os.path.exists(folder_path):\n os.mkdir(folder_path)\n for url in urls:\n image = requests.get(url)\n filename = os.path.join(folder_path, url.split('/')[-1])\n with open(filename, 'wb') as f:\n f.write(image.content)", "def _download_images(self, image_urls: typing.List[str], save_dir: str) -> typing.List[str]:\n\n\t\timage_paths = []\n\n\t\tfor i, url in enumerate(image_urls):\n\t\t\timage = self.send_request_image(url)\n\n\t\t\timage_ext = url.split(\".\")[-1]\n\n\t\t\timage_dst_path = os.path.join(save_dir, f\"{i}.{image_ext}\")\n\n\t\t\tif image is not None:\n\t\t\t\twith open(image_dst_path, \"wb\") as fh:\n\n\t\t\t\t\t# Magic boolean which makes it work\n\t\t\t\t\timage.raw.decode_content = True\n\n\t\t\t\t\t# noinspection PyBroadException\n\n\t\t\t\t\t# Attempt to download the image from the URL\n\t\t\t\t\ttry:\n\t\t\t\t\t\tshutil.copyfileobj(image.raw, fh)\n\n\t\t\t\t\t# We should reduce the scope\n\t\t\t\t\texcept Exception:\n\t\t\t\t\t\tpass\n\n\t\t\t\t\t# We downloaded the image without any errors\n\t\t\t\t\telse:\n\t\t\t\t\t\timage_paths.append(image_dst_path)\n\n\t\treturn image_paths", "def download_pics(pics_links):\n\n for link in range(len(pics_links)):\n r = requests.get(pics_links[link][0])\n with open(os.path.join(\"tmp\", f\"{link}.jpg\"), \"wb\") as dl:\n dl.write(r.content)", "def download_pics(pic_urls, directory):\r\n print(\"downloading pictures...\")\r\n for url in pic_urls:\r\n name = url.split(\"/\")[-1]\r\n if len(name) >= 20:\r\n name = name[len(name)-20:]\r\n \r\n print('from:', url)\r\n pic_path = directory + name\r\n if not os.path.exists(pic_path):\r\n print(\"downloading ->\", pic_path)\r\n try:\r\n urllib.request.urlretrieve(url, pic_path)\r\n except ValueError:\r\n # 'http://' missing from link\r\n urllib.request.urlretrieve(\"http://\" + url, pic_path)\r\n except urllib.error.HTTPError:\r\n # access forbidden\r\n # ex: http://puu.sh/n2zPL/2491975ef3.jpg\r\n print(\"URL skipped due to HTTPError\", url)\r\n else:\r\n print(\"already downloaded ->\", pic_path)\r\n print(\"Downloads Finished\")", "def download_images(image_urls):\n fetched = []\n count = 0\n for img_url in image_urls:\n if not db.is_image_in_db(img_url):\n filename = os.path.basename(img_url)\n if not os.path.exists(cfg.PHOTO_DIR + filename):\n referer_string = web.get_referrer_string(img_url) # to trick 4walled.org\n cmd = \"wget -t {retry_count} -T {timeout} {ref} {url} -O {save}\".format(url=img_url,\n save=os.path.join(cfg.PHOTO_DIR, filename),\n ref=referer_string,\n retry_count=cfg.WGET_RET,\n timeout=cfg.WGET_TIMEOUT)\n print cmd\n os.system(cmd)\n fetched.append(img_url)\n count += 1\n else:\n print(\"# {0} was already fetched once...\".format(img_url))\n\n print(\"# new imgage(s): {0}\".format(count))\n return fetched", "def download_images(urls: List[str] = None):\n are_images = [is_url_image(url) for url in urls]\n if not are_images[: sum(are_images)]:\n raise NotImplementedError('Only images are supported')\n downloads = [requests.get(url) for url in urls]\n images = [load_image(io.BytesIO(download.content)) for download in downloads]\n return images", "def downloadLocal(url_list,path):\n print(\"You are downloading {} images\".format(parser_arguments().limit),end=\" \");print(\"of {} class.\".format(parser_arguments().classes))\n print(\"Please, be patient :)\")\n for i in range(len(url_list)):\n filename= url_list[i].split(\"/\")[-1] # name of the picture file\n r = requests.get(url_list[i], stream =True)\n print(filename)\n\n with open(filename,'wb') as f : # create the file locally in binary-write mode\n r = requests.get(url_list[i], stream =True)\n shutil.copyfileobj(r.raw, f) #write our image to the file\n shutil.move(filename,path)\n print('Done!')", "def download_images(links):\n\n for link in links:\n print(\"Processing\", link)\n try:\n response = requests.get(link,\n timeout=METADATA_REQUEST_TIMEOUT, stream=True)\n except requests.exceptions.RequestException as e:\n print(e)\n sys.exit(1)\n\n artist_name = link.rsplit('/', 2)[1]\n image_name = link.rsplit('/', 2)[2]\n image_name = artist_name + image_name\n\n file_location = ASSET_PATH.joinpath(image_name)\n\n with open(str(file_location), 'wb') as outfile:\n shutil.copyfileobj(response.raw, outfile)", "async def save_url_images(images):\n for source, image in images:\n name = source.split('/')[-1]\n async with aiofiles.open(f'{OUTPUT_FOLDER}/{name}', 'wb') as f:\n await f.write(image)", "def create_images(jsons_info, image_dir, photos_info_dict, num_images=200):\n for item in jsons_info:\n json_info = json.load(open(item, \"r\"))\n category_dir = os.path.join(image_dir, os.path.splitext(os.path.basename(item))[0])\n print(\"Downloading in -- \", category_dir)\n if not os.path.exists(category_dir):\n os.makedirs(category_dir)\n count = 0\n i = 0\n while count < num_images:\n photo_id = json_info[i][\"photo\"]\n link = photos_info_dict[f'{photo_id:09}']\n try:\n urllib.request.urlretrieve(link, f\"{category_dir}/{count}.jpg\")\n count = count + 1\n i = i + 1\n except:\n i = i + 1\n print(\"Image - Downloaded\")", "def downloader(urls, path):\n counter = 1\n for media_file in urls:\n # Create the file name\n file_name = \"meme\" + str(counter) + \".jpg\"\n file_location = path + \"/\" + file_name\n print(f\"Downloading {media_file} as {file_name}.\")\n # Overwrite files\n if os.path.exists(file_location):\n os.remove(file_location)\n print(f\"{file_name} will overwrite an existing file of the same name.\")\n wget.download(media_file, out=file_location)\n print(\"\\n\")\n counter += 1\n print(f\"{counter - 1} items were downloaded.\")\n return counter - 1", "def test_z_download_images(self):\n #img_urls = logpuzzle.read_urls('place_code.google.com')\n img_urls = logpuzzle.read_urls('animal_code.google.com')\n dest_dir = './puzzle_images'\n logpuzzle.download_images(img_urls, dest_dir)\n\n result = os.listdir(dest_dir)\n expected_result = ['img0.jpg', 'img1.jpg', 'img10.jpg', 'img11.jpg', 'img12.jpg', 'img13.jpg', 'img14.jpg', 'img15.jpg', 'img16.jpg', 'img17.jpg', 'img18.jpg', 'img19.jpg', 'img2.jpg', 'img3.jpg', 'img4.jpg', 'img5.jpg', 'img6.jpg', 'img7.jpg', 'img8.jpg', 'img9.jpg']\n self.assertEqual(expected_result, result,\n 'write_index_file() expected {} but got {}'.format(expected_result, result))", "def create_image_urls(self):\n self._image_urls = []\n while True:\n image_url = self._create_random_url()\n request = urllib2.Request(image_url)\n opener = urllib2.build_opener(NoRedirection)\n try:\n response = opener.open(request)\n code = response.code\n except urllib2.HTTPError as error:\n code = error.code\n if code == 200:\n print \"Found a successful url!\"\n self._image_urls.append(image_url)\n if len(self._image_urls) > 100:\n break\n print self._image_urls\n image_url_file = open(self._image_urls_file_name, 'w')\n for image_url in self._image_urls:\n image_url_file.write(image_url + '\\n')\n image_url_file.close()", "def _download_images(self, url_file, destination_dir, log_file):\n logger = self.setup_log(log_file)\n logger.info(config.LOG_INITIAL_MESSAGE % (url_file, destination_dir))\n\n with open(url_file) as urls:\n for i, l in enumerate(urls):\n pass\n bar = progressbar.ProgressBar(i + 1)\n\n download_count = 0\n\n # opening the url file and reading the urls\n with open(url_file, 'r') as urls:\n for i, url in enumerate(urls):\n bar.set(i)\n\n url = url.strip()\n components = urllib.parse.urlparse(url)\n if not (components.scheme and components.netloc and components.path):\n logger.error('%s: \"%s\"' % (config.LOG_URL_INVALID, self.truncate_middle(url, config.MAX_URL)))\n continue\n \n # check whether the robots.txt allows us to crawl this URL\n try:\n can_fetch = self.download_allowed(url, components.scheme, components.netloc)\n except (AttributeError, urllib.error.URLError, ValueError):\n logger.error('%s: %s' % (config.LOG_ERROR_ROBOTS, self.truncate_middle(url, config.MAX_URL)))\n continue\n\n # log that image download is disallowed\n if not can_fetch:\n logger.error('%s: %s' % (config.LOG_DISALLOWED, self.truncate_middle(url, config.MAX_URL)))\n continue\n \n # open image url\n try:\n url_response = urllib.request.urlopen(url)\n except urllib.error.URLError as error:\n logger.error('%s: %s' % (config.LOG_ERROR_OPENING, self.truncate_middle(url, config.MAX_URL)))\n continue\n\n # check whether the URL content is an image \n if url_response.info().get_content_maintype().lower() != config.IMAGE_MIMETYPE:\n logger.error('%s: %s' % (config.LOG_NOT_AN_IMAGE, self.truncate_middle(url, config.MAX_URL)))\n continue\n \n # retrieve the content and store in the destination directory\n os.makedirs(destination_dir, exist_ok=True) \n image_name = '%s_%s' % (download_count + 1, os.path.basename(url))\n with open(os.path.join(destination_dir, image_name), 'wb') as image_file:\n try:\n image_file.write(url_response.read())\n except urllib.error.URLError as error:\n logger.error('%s: %s' % (config.LOG_ERROR_DOWNLOADING, self.truncate_middle(url, config.MAX_URL)))\n continue\n \n # log download and increment the counter\n logger.info('%s %s, url: %s' % (config.LOG_DOWNLOADED, self.truncate_middle(image_name, config.MAX_FILE_NAME), self.truncate_middle(url, config.MAX_URL)))\n download_count += 1\n\n # set the progress bar to 100 percent and print a comment and new line for the returning prompt\n bar.complete('completed')\n\n # release the logger handles\n self.shutdown_log(logger)", "def get_images(outputdir, parent_key, key, searchurl, maximum, json_path):\n body, browser = build_browser(searchurl)\n\n urls = []\n\n while len(urls) < maximum:\n try:\n page_source = browser.page_source\n\n soup = BeautifulSoup(page_source, 'lxml')\n\n search_result_soup = get_div_child(soup.body, \"islrg\")\n images = search_result_soup.find_all('img')\n urls = get_url_from_images(images)\n print(urls)\n\n for i in range(50):\n scroll_down(body)\n # browser.find_element_by_xpath('//*[@id=\"islmp\"]/div/div/div/div')\n browser.find_element_by_class_name(\"mye4qd\").click()\n print(len(urls) < maximum)\n except ElementNotInteractableException as e: # There is no next page\n print(e)\n break\n\n\n\n if not os.path.exists(outputdir):\n os.makedirs(outputdir)\n\n write_urls(json_path, parent_key, key, urls)\n\n # download_urls(urls, outputdir)\n browser.close()", "def fetch_files_from_urls(urls, dir):\n makedir(dir)\n try:\n pool = []\n for url in urls:\n p = Process(target=download, args=(url, dir,))\n p.start()\n pool.append(p)\n for p in pool:\n p.join()\n except KeyboardInterrupt:\n print \"Shutdown requested...exiting\"\n # except Exception:\n # traceback.print_exc(file=sys.stdout)\n\n # print(\"removing temporary files from current directory\")\n map(os.remove, glob.glob(\"*.tmp\"))", "def download(input_file, img_dir):\n xml_parser = OvenXMLParser()\n xml_parser.load_file(opts.input_file)\n downloader = IMGDownloader()\n for item in xml_parser.item_generator():\n ad_id = item.find(conf.AD_ID_KEY).text\n output_dir = \"%s/%s\" % (img_dir, ad_id)\n img_sources = [img.find(\"src\").text for\n img in item.find(conf.IMGS_KEY).findall(\"value\")]\n if img_sources and not os.path.exists(output_dir):\n os.makedirs(output_dir, 0755)\n for src in img_sources:\n filename = ntpath.basename(src)\n outpath = \"%s/%s\" % (output_dir, filename)\n if not os.path.exists(outpath):\n downloader.download_img(src, outpath)\n else:\n print(\"Img file already exists: %s (not overwriting)\" % outpath)", "def read_from_server(url_base=\"http://10.200.102.18/\", url_dir=\"G179-dataset/\"):\n\n all_images = urllib2.urlopen(url_base + url_dir).read()\n\n parser = ImagesHTMLParser()\n parser.feed(all_images)\n data = parser.data\n imgs = []\n\n print(\"Found %d images!\" % len(data))\n print(\"Started Download!\")\n i = 1\n\n for d in data:\n print(\"\\rProgress: %d/%d \" % (i, len(data)), end='')\n dl_img = urllib2.urlopen(url_base + url_dir + d).read()\n asd = cStringIO.StringIO(dl_img)\n img = Image.open(asd)\n imgs.append(np.array(img))\n i = i + 1\n\n return imgs", "def download_images(self, url_file, destination_dir, log_file):\n try:\n self._download_images(url_file, destination_dir, log_file)\n except IOError as error:\n sys.stderr.write(str(error))\n sys.exit(error.errno)\n except Exception as error:\n sys.stderr.write('[Unknown error] %s' % str(error))\n sys.exit(1)", "def download_images(main_keyword, supplemented_keywords, download_dir): \n image_links = set()\n print('Process {0} Main keyword: {1}'.format(os.getpid(), main_keyword))\n\n # create a directory for a main keyword\n img_dir = download_dir + main_keyword + '/'\n if not os.path.exists(img_dir):\n os.makedirs(img_dir)\n\n for j in range(len(supplemented_keywords)):\n print('Process {0} supplemented keyword: {1}'.format(os.getpid(), supplemented_keywords[j]))\n search_query = quote(main_keyword + ' ' + supplemented_keywords[j])\n # url = 'https://www.google.com/search?q=' + search_query + '&espv=2&biw=1366&bih=667&site=webhp&source=lnms&tbm=isch&sa=X&ei=XosDVaCXD8TasATItgE&ved=0CAcQ_AUoAg'\n url = 'https://www.google.com/search?q=' + search_query + '&source=lnms&tbm=isch'\n image_links = image_links.union(parse_page(url))\n print('Process {0} get {1} links so far'.format(os.getpid(), len(image_links)))\n time.sleep(2)\n print (\"Process {0} get totally {1} links\".format(os.getpid(), len(image_links)))\n\n print (\"Start downloading...\")\n count = 1\n for link in image_links:\n try:\n req = urllib.request.Request(link, headers = {\"User-Agent\": generate_user_agent()})\n response = urllib.request.urlopen(req)\n data = response.read()\n file_path = img_dir + '{0}.jpg'.format(count)\n with open(file_path,'wb') as wf:\n wf.write(data)\n print('Process {0} fininsh image {1}/{2}.jpg'.format(os.getpid(), main_keyword, count))\n count += 1\n except urllib.error.URLError as e:\n logging.error('URLError while downloading image {0}\\nreason:{1}'.format(link, e.reason))\n continue\n except urllib.error.HTTPError as e:\n logging.error('HTTPError while downloading image {0}\\nhttp code {1}, reason:{2}'.format(link, e.code, e.reason))\n continue\n except Exception as e:\n logging.error('Unexpeted error while downloading image {0}\\nerror type:{1}, args:{2}'.format(link, type(e), e.args))\n continue\n\n print(\"Finish downloading, total {0} errors\".format(len(image_links) - count))" ]
[ "0.8684506", "0.83942056", "0.83148164", "0.79432535", "0.7850097", "0.7684212", "0.7544121", "0.7534679", "0.7316021", "0.7306084", "0.7305959", "0.7237736", "0.716367", "0.71477824", "0.69875336", "0.6962605", "0.69536203", "0.69242907", "0.6902611", "0.6824033", "0.6726385", "0.67220694", "0.6700179", "0.66364044", "0.65989596", "0.6597569", "0.6596402", "0.6546427", "0.6468078", "0.64387506" ]
0.8439321
1
Parse args, scan for urls, get images from urls
def main(args): parser = create_parser() if not args: parser.print_usage() sys.exit(1) parsed_args = parser.parse_args(args) img_urls = read_urls(parsed_args.logfile) if parsed_args.todir: download_images(img_urls, parsed_args.todir) else: print('\n'.join(img_urls))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main(args):\n parser = create_parser()\n\n if not args:\n parser.print_usage()\n sys.exit(1)\n\n parsed_args = parser.parse_args(args)\n\n img_urls = read_urls(parsed_args.logfile)\n if parsed_args.todir:\n download_images(img_urls, parsed_args.todir)\n else:\n print('\\n'.join(img_urls))", "async def getImageURLS(self, tags, fuzzy=False, singlePage=False):\n if fuzzy:\n tags = tags.split(\" \")\n for tag in tags:\n tag = tag + \"~\"\n temp = \" \"\n tags = temp.join(tags)\n print(tags)\n num = await self.totalImages(tags)\n if num != 0:\n PID = 0\n imgList = []\n XML = None\n t = True\n tempURL = self.urlGen(tags=tags, PID=PID)\n while t:\n with async_timeout.timeout(10):\n async with self.session.get(url=tempURL) as XML:\n XML = await XML.read()\n XML = ET.XML(XML)\n XML = self.ParseXML(XML)\n if XML is None:\n return None\n if len(imgList) >= int(XML['posts']['@count']): # \"if we're out of images to process\"\n t = False # \"end the loop\"\n else:\n for data in XML['posts']['post']:\n imgList.append(str(data['@file_url']))\n if singlePage:\n return imgList\n PID += 1\n return imgList\n else:\n return None", "def main(url):\n print(f\"Running main with URL = {url}...\")\n imagehits(downloaddata(url))", "def main(argv=sys.argv[1:]): # pylint: disable=dangerous-default-value\n parser = argparse.ArgumentParser()\n image = argparse.ArgumentParser()\n parser.add_argument(\"action\", choices=[\"image\", \"sources\"])\n\n image.add_argument('-d', '--download',\n help='Download the result to a file.',\n default=False, action=\"store_true\")\n image.add_argument('-f', '--file',\n help=\"Filename to download to.\",\n default=lambda x: x.split(\"/\")[-1])\n image.add_argument('source', help=\"Image source to use.\")\n image.add_argument('query', help=\"Tags to use during search.\",\n default='', nargs=\"*\")\n\n args = parser.parse_args(argv)\n\n if args.action == \"sources\":\n sources = \"\\n\".join(\"\\n\".join(v for v in source) for source in\n nsfw_dl.SOURCES.values())\n print(sources)\n\n else:\n args = image.parse_args(argv[1:])\n download(args.source, args.query, args.file, args.download)", "def parse_args():\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--csv\", type=str, help=\"Input CSV file with Archive urls\")\n parser.add_argument(\"--db\", type=str, help=\"Input DB file with urls\")\n parser.add_argument(\"--picsout\", type=str, help=\"Path of directory to output the screenshots\")\n parser.add_argument(\"--indexcsv\", type=str, help=\"The CSV file to write the index\")\n parser.add_argument(\"--method\", type=int, help=\"Which method to take the screenshots, \"\n \"0 for chrome, 1 for puppeteer, 2 for cutycapt\")\n parser.add_argument(\"--timeout\", type=str, help=\"(optional) Specify duration before timeout, \"\n \"in seconds, default 30 seconds\")\n parser.add_argument(\"--lazy\", type=int, help=\"(optional) Continues to the next archive after taking n pictures\")\n\n args = parser.parse_args()\n\n # some error checking\n if args.csv is not None and args.indexcsv is None:\n print(\"invalid output index file\\n\")\n exit()\n if args.csv is None and args.db is None:\n print(\"Must provide input file\\n\")\n exit()\n if args.csv is not None and args.db is not None:\n print(\"must only use only one type of input file\\n\")\n exit()\n if args.picsout is None:\n print(\"Must specify output path for pictures\")\n exit()\n if args.method is None:\n print(\"Must specify screenshot method\\n\")\n exit()\n\n pics_out_path = args.picsout + '/'\n screenshot_method = int(args.method)\n\n if args.csv is not None:\n csv_in_name = args.csv\n use_csv = True\n else:\n use_csv = False\n\n if args.db is not None:\n connect_sql(args.db)\n use_db = True\n else:\n use_db = False\n\n if args.indexcsv is not None:\n csv_out_name = args.indexcsv\n make_csv = True\n else:\n make_csv = False\n\n if args.timeout is None:\n timeout_duration = \"30\"\n else:\n timeout_duration = args.timeout\n\n if args.lazy is not None:\n be_lazy = True\n lazy = int(args.lazy)\n else:\n be_lazy = False\n lazy = None\n\n return csv_in_name, csv_out_name, pics_out_path, screenshot_method, use_csv, use_db, make_csv, \\\n timeout_duration, lazy, be_lazy", "def download_images(main_keyword, supplemented_keywords, download_dir): \n image_links = set()\n print('Process {0} Main keyword: {1}'.format(os.getpid(), main_keyword))\n\n # create a directory for a main keyword\n img_dir = download_dir + main_keyword + '/'\n if not os.path.exists(img_dir):\n os.makedirs(img_dir)\n\n for j in range(len(supplemented_keywords)):\n print('Process {0} supplemented keyword: {1}'.format(os.getpid(), supplemented_keywords[j]))\n search_query = quote(main_keyword + ' ' + supplemented_keywords[j])\n # url = 'https://www.google.com/search?q=' + search_query + '&espv=2&biw=1366&bih=667&site=webhp&source=lnms&tbm=isch&sa=X&ei=XosDVaCXD8TasATItgE&ved=0CAcQ_AUoAg'\n url = 'https://www.google.com/search?q=' + search_query + '&source=lnms&tbm=isch'\n image_links = image_links.union(parse_page(url))\n print('Process {0} get {1} links so far'.format(os.getpid(), len(image_links)))\n time.sleep(2)\n print (\"Process {0} get totally {1} links\".format(os.getpid(), len(image_links)))\n\n print (\"Start downloading...\")\n count = 1\n for link in image_links:\n try:\n req = urllib.request.Request(link, headers = {\"User-Agent\": generate_user_agent()})\n response = urllib.request.urlopen(req)\n data = response.read()\n file_path = img_dir + '{0}.jpg'.format(count)\n with open(file_path,'wb') as wf:\n wf.write(data)\n print('Process {0} fininsh image {1}/{2}.jpg'.format(os.getpid(), main_keyword, count))\n count += 1\n except urllib.error.URLError as e:\n logging.error('URLError while downloading image {0}\\nreason:{1}'.format(link, e.reason))\n continue\n except urllib.error.HTTPError as e:\n logging.error('HTTPError while downloading image {0}\\nhttp code {1}, reason:{2}'.format(link, e.code, e.reason))\n continue\n except Exception as e:\n logging.error('Unexpeted error while downloading image {0}\\nerror type:{1}, args:{2}'.format(link, type(e), e.args))\n continue\n\n print(\"Finish downloading, total {0} errors\".format(len(image_links) - count))", "def get_images_urls(self, grab, parse_first_image=True):\n images = []\n if parse_first_image:\n first_image = grab.doc.select(\n '//figure[@class=\"item\"]' +\n '/img[@class=\"img-fluid\"]'\n )\n if first_image.exists() and 'http' in first_image.attr('src'):\n images.append(first_image.attr('src'))\n\n for image in grab.doc.select(\n '//figure[@class=\"item\"]' +\n '/img[@class=\"lazyOwl img-fluid\"]'):\n images.append(image.attr('data-src'))\n if len(images) < 1:\n logging.debug(\"Images not found in: %s\" % grab.doc.url)\n\n return images", "async def get_url_images(session, url):\n content = await get_page(session, url)\n if not content:\n return []\n soup = BeautifulSoup(content, features=\"html.parser\")\n image_sources = [img['src'] for img in soup.find_all('img')]\n image_sources_fixed = [f'https:{source}' if 'https:' not in source else source for source in image_sources]\n images = []\n for source in image_sources_fixed:\n image = await get_image(session, source)\n if image:\n images.append((source, image))\n\n return images", "def find_img_urls(mainURL):\n \n imglist = []\n \n class IMGParser(HTMLParser):\n def handle_starttag(self, tag, attrs):\n if tag == 'img':\n imglist.append(dict(attrs)[\"src\"])\n \n URL = urlopen(mainURL)\n html = URL.read()\n \n parser = IMGParser()\n parser.feed(html)\n parser.close()\n \n return imglist", "def main(args):\n parser = create_parser()\n\n if not args:\n parser.print_usage()\n sys.exit(1)\n\n parsed_args = parser.parse_args(args)\n scrape_url(parsed_args.url)", "def parse_args():\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('-t', '--types', action='append',\n help=\"RegEx to select image sets from the manifest file.\")\n parser.add_argument('-i', '--install-location',\n default=None,\n help=\"Set custom install location for images\")\n parser.add_argument('-m', '--manifest-location', type=str, default=\"\",\n help=\"Set custom location for the manifest file\")\n parser.add_argument('-I', '--inventory-location', type=str, default=\"\",\n help=\"Set custom location for the inventory file\")\n parser.add_argument('-l', '--list-targets', action=\"store_true\", default=False,\n help=\"Print targets in the manifest file to stdout, and exit.\\n\"\n \"To get relative paths only, specify an empty base URL (-b '').\")\n parser.add_argument('--url-only', action=\"store_true\", default=False,\n help=\"With -l, only print the URLs, nothing else.\")\n parser.add_argument(\"--buffer-size\", type=int, default=_DEFAULT_BUFFER_SIZE,\n help=\"Set download buffer size\")\n parser.add_argument(\"--download-limit\", type=int, default=_DEFAULT_DOWNLOAD_LIMIT,\n help=\"Set threshold for download limits. Any download \"\n \"larger than this will require approval, either \"\n \"interactively, or by providing --yes.\")\n parser.add_argument(\"--http-proxy\", type=str,\n help=\"Specify HTTP proxy in the format \"\n \"http://user:[email protected]:port\\n\"\n \"If this this option is not given, the environment \"\n \"variable HTTP_PROXY can also be used to specify a proxy.\")\n parser.add_argument(\"-b\", \"--base-url\", type=str, default=_DEFAULT_BASE_URL,\n help=\"Set base URL for images download location\")\n parser.add_argument(\"-k\", \"--keep\", action=\"store_true\", default=False,\n help=\"Keep the downloaded images archives in the image directory\")\n parser.add_argument(\"-T\", \"--test\", action=\"store_true\", default=False,\n help=\"Verify the downloaded archives before extracting them\")\n parser.add_argument(\"-y\", \"--yes\", action=\"store_true\", default=False,\n help=\"Answer all questions with 'yes' (for scripting purposes).\")\n parser.add_argument(\"-n\", \"--dry-run\", action=\"store_true\", default=False,\n help=\"Print selected target without actually downloading them.\")\n parser.add_argument(\"--refetch\", action=\"store_true\", default=False,\n help=\"Ignore the inventory file and download all images.\")\n parser.add_argument('-V', '--version', action='version', version=_UHD_VERSION)\n parser.add_argument('-q', '--quiet', action='count', default=0,\n help=\"Decrease verbosity level\")\n parser.add_argument('-v', '--verbose', action='count', default=0,\n help=\"Increase verbosity level\")\n # Some sanitation that's easier to handle outside of the argparse framework:\n args = parser.parse_args()\n if not args.base_url.endswith('/') and args.base_url != \"\":\n args.base_url += '/'\n if args.yes:\n global _YES\n _YES = True\n if args.http_proxy:\n global _PROXIES\n _PROXIES['http'] = args.http_proxy\n # Set the verbosity\n global _LOG_LEVEL\n log(\"TRACE\", \"Default log level: {}\".format(_LOG_LEVEL))\n _LOG_LEVEL = _LOG_LEVEL - args.verbose + args.quiet\n return args", "def getimgs():", "def _parse_args():\n parser = optparse.OptionParser(usage=\"%prog imagefile+number.suffix\", description=\"Opens up a sequence of pictures in a web browser.\")\n\n return parser.parse_args()", "def __init__(self,args):\n storage_client = storage.Client()\n self.parsed = urlparse(args.input_dir)\n \n #parse gcp path\n self.bucket = storage_client.get_bucket(self.parsed.hostname) \n images=self.bucket.list_blobs(prefix=self.parsed.path[1:])\n \n #image list\n self.image_list=[]\n for image in images:\n self.image_list.append(\"gs://\" + self.bucket.name +\"/\"+ str(image.name))\n \n #if no ceiling, process all arguments\n if not args.limit:\n limit=images.num_results\n else:\n limit=args.limit", "def extract_images_url(url, source):\n if source == \"mangaseeonline\":\n r = s.post(\n \"http://playwright:5000/scrape\",\n json={\n \"url\": url.replace(\"-page-1\", \"\"), \"wait\": 1}\n )\n tree = html.fromstring(r.text)\n return tree.xpath('//*[@id=\"TopPage\"]/descendant::img/@src')\n if source == \"nettruyen\":\n r = s.get(\n settings.SPLASH_URL, params={\n \"url\": url.replace(\"-page-1\", \"\"), \"wait\": 1}\n )\n tree = html.fromstring(r.text)\n return tree.xpath('//*[@class=\"reading-detail box_doc\"]/div/img/@src')\n if source == \"doctruyen3q\":\n r = s.get(\n settings.SPLASH_URL, params={\"url\": url, \"wait\": 1}\n )\n tree = html.fromstring(r.text)\n return tree.xpath('//*[contains(@id, \"page_\")]/img/@src')\n if source == \"truyenkinhdien\":\n r = s.get(\n settings.SPLASH_URL.replace(\"render.html\", \"execute\"),\n params={\"url\": url, \"lua_source\": lua_script, \"wait\": 1},\n )\n tree = html.fromstring(r.json()[\"html\"])\n return tree.xpath(\n '//*[@class=\"sgdg-gallery\"]/a[not(contains(@style,\"display:none\"))]/img/@src'\n )", "def main():\n try:\n pixid = sys.argv[1]\n except IndexError:\n print('Usage: python pixget.py [pixid] (save_path)')\n exit(1)\n\n # get the path\n if len(sys.argv) > 2:\n path = sys.argv[2]\n else:\n path = '.'\n\n imgInfoPool = []\n if get_image_url(pixid, imgInfoPool):\n exit(1)\n download_image(path, imgInfoPool)", "def maincall(self, usernamelist, output_choice, tag_bool, com_bool):\n \n dict1 = self.userpage_scraper(usernamelist)\n dict2 = self.user_images_url(dict1)\n\n self.crawling_images_url(dict2, output_choice, com_bool, tag_bool)", "def get_images(url):\n soup = make_soup(url)\n # this makes a list of bs4 element tags\n images = [img for img in soup.findAll('img')]\n print(str(len(images)) + \" images found.\")\n # compile our unicode list of image links\n image_links = [each.get('src') for each in images]\n # clean list\n image_links = [each for each in image_links if each is not None]\n # specific for test site\n if len(image_links) > 0 and image_links[0][:4] != 'http':\n links = [url + link for link in image_links]\n else:\n links = image_links\n return links", "def scrape(self):\n reg = re.compile(self.regex)\n images = self.soup.findAll('img')\n results = []\n for img in images:\n try:\n url = dict(img.attrs)['src']\n url = self._make_url_path(url)\n if reg.match(url):\n results.append(url)\n\n except:\n pass\n\n print 'Img tag scraping OK'\n return results", "def _read_image_urls(self):\n if not os.path.isfile(self._image_urls_file_name):\n raise IOError, \"'%s' is not found\" % self._image_urls_file_name\n if os.path.getsize(self._image_urls_file_name) == 0:\n raise IOError, \"'%s' is empty\" % self._image_urls_file_name\n for line in open(self._image_urls_file_name, 'r'):\n self._image_urls.append(line.strip())", "def main(args):\n args = parse_args(args)\n setup_logging(args.loglevel)\n\n download_images_from_grid_vector(grid_vector=args.grid_vector,\n output_dir=args.output_dir,\n type_id=args.type,\n product_type_id=args.product_type,\n num_jobs=args.num_jobs)", "def main():\n args = parse_args()\n images_dir = get_images_dir(args)\n log(\"INFO\", \"Images destination: {}\".format(os.path.abspath(images_dir)))\n try:\n manifest = parse_manifest(get_manifest_raw(args))\n if args.list_targets:\n print_target_list(\n manifest,\n args\n )\n return True\n log(\"TRACE\", \"Manifest:\\n{}\".format(\n \"\\n\".join(\"{}\".format(item) for item in manifest.items())\n ))\n\n # Read the inventory into a dictionary we can perform lookups on\n if os.path.isfile(args.inventory_location):\n inventory_fn = args.inventory_location\n else:\n inventory_fn = os.path.join(images_dir, _INVENTORY_FILENAME)\n inventory = parse_inventory(inventory_fn=inventory_fn)\n log(\"TRACE\", \"Inventory: {}\\n{}\".format(\n os.path.abspath(inventory_fn),\n \"\\n\".join(\"{}\".format(item) for item in inventory.items())\n ))\n\n # Determine the URLs to download based on the input regular expressions\n if not args.types:\n types_regex_l = [_DEFAULT_TARGET_REGEX]\n else:\n types_regex_l = args.types\n\n log(\"TRACE\", \"RegExs for target selection: {}\".format(types_regex_l))\n targets_info = lookup_urls(types_regex_l, manifest, inventory, args.refetch)\n # Exit early if we don't have anything to download\n if targets_info:\n target_urls = [info.get(\"url\") for info in targets_info]\n log(\"DEBUG\", \"URLs to download:\\n{}\".format(\n \"\\n\".join(\"{}\".format(item) for item in target_urls)\n ))\n else:\n return True\n\n ## Now download all the images archives into a temp directory\n if args.dry_run:\n for target_info in targets_info:\n log(\"INFO\", \"[Dry Run] Fetch target: {}\".format(\n target_info.get(\"filename\")))\n return True\n with TemporaryDirectory() as temp_dir:\n for target_info in targets_info:\n update_target(\n target_info,\n temp_dir,\n images_dir,\n inventory,\n args\n )\n ## Update inventory with all the new content\n write_inventory(inventory, inventory_fn)\n\n except Exception as ex:\n log(\"ERROR\", \"Downloader raised an unhandled exception: {ex}\\n\"\n \"You can run this again with the '--verbose' flag to see more information\\n\"\n \"If the problem persists, please email the output to: {contact}\"\n .format(contact=_CONTACT, ex=ex))\n # Again, we wait on Windows systems because if this is executed in a\n # window, and immediately fails, the user doesn't have a way to see the\n # error message, and if they're not very savvy, they won't know how to\n # execute this in a shell.\n if not _YES and platform.system() == 'Windows':\n input('Hit Enter to continue.')\n return False\n log(\"INFO\", \"Images download complete.\")\n return True", "def main():\n images = Images()\n #print images.create_image_urls()\n print images.get_image_random()\n print images.get_image(12)", "def getURLs():", "def get_images(self, start=0, limit=100):\n if not start:\n start = 0\n if not limit:\n limit = 100\n start = int(start)\n limit = int(limit)\n urls = self._image_urls[start:start + limit]\n message = \"%i Successful URLs found.\" % len(urls)\n return (urls, message)", "def download_images(urls: List[str] = None):\n are_images = [is_url_image(url) for url in urls]\n if not are_images[: sum(are_images)]:\n raise NotImplementedError('Only images are supported')\n downloads = [requests.get(url) for url in urls]\n images = [load_image(io.BytesIO(download.content)) for download in downloads]\n return images", "def read_from_server(url_base=\"http://10.200.102.18/\", url_dir=\"G179-dataset/\"):\n\n all_images = urllib2.urlopen(url_base + url_dir).read()\n\n parser = ImagesHTMLParser()\n parser.feed(all_images)\n data = parser.data\n imgs = []\n\n print(\"Found %d images!\" % len(data))\n print(\"Started Download!\")\n i = 1\n\n for d in data:\n print(\"\\rProgress: %d/%d \" % (i, len(data)), end='')\n dl_img = urllib2.urlopen(url_base + url_dir + d).read()\n asd = cStringIO.StringIO(dl_img)\n img = Image.open(asd)\n imgs.append(np.array(img))\n i = i + 1\n\n return imgs", "def main():\n\n options = parse_arguments()\n\n directories = find_directories(options.folder)\n process_pool = Pool(len(directories))\n\n function_call = partial(find_URLs, options=options)\n\n process_pool.map(function_call, directories)", "def download_images(image_urls):\n fetched = []\n count = 0\n for img_url in image_urls:\n if not db.is_image_in_db(img_url):\n filename = os.path.basename(img_url)\n if not os.path.exists(cfg.PHOTO_DIR + filename):\n referer_string = web.get_referrer_string(img_url) # to trick 4walled.org\n cmd = \"wget -t {retry_count} -T {timeout} {ref} {url} -O {save}\".format(url=img_url,\n save=os.path.join(cfg.PHOTO_DIR, filename),\n ref=referer_string,\n retry_count=cfg.WGET_RET,\n timeout=cfg.WGET_TIMEOUT)\n print cmd\n os.system(cmd)\n fetched.append(img_url)\n count += 1\n else:\n print(\"# {0} was already fetched once...\".format(img_url))\n\n print(\"# new imgage(s): {0}\".format(count))\n return fetched", "def read_urls(filename):\n with open(filename, 'r') as f:\n line = f.readline()\n pattern = \"GET\" + \"(.+?)\"+ \"jpg\"\n result = []\n\n while len(line) > 0:\n end_point = re.search(pattern, line)\n if end_point != None and end_point.group(0)[4:] not in result:\n if \"no_picture\" not in end_point.group(0)[4:]:\n result.append(end_point.group(0)[4:])\n line = f.readline()\n return sorted(result, key = lambda x: x.split(\"/\")[-1].split(\"-\")[-1])" ]
[ "0.70296556", "0.65749073", "0.65041465", "0.64431983", "0.63387156", "0.6273555", "0.61993766", "0.618466", "0.61794204", "0.61319304", "0.60635006", "0.60485786", "0.6020802", "0.59960306", "0.59640354", "0.5945829", "0.5926145", "0.5853876", "0.58419275", "0.58381164", "0.58253115", "0.58079356", "0.58076054", "0.5798884", "0.57880616", "0.57876974", "0.5783897", "0.5781596", "0.5777931", "0.5751969" ]
0.7047148
1
Loads a yaml file and adds line numbers to the objects
def load_yaml_with_lines(config): try: # Source: https://stackoverflow.com/a/13319530 loader = yaml.Loader(config) def compose_node(parent, index): # the line number where the previous token has ended (plus empty lines) line = loader.line node = Composer.compose_node(loader, parent, index) node.__line__ = line + 1 return node def construct_mapping(node, deep=False): mapping = Constructor.construct_mapping(loader, node, deep=deep) mapping['__line__'] = node.__line__ return mapping loader.compose_node = compose_node loader.construct_mapping = construct_mapping return loader.get_single_data() except yaml.YAMLError as e: raise Exception('Error occurred while parsing YAML: {}'.format(e))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_yaml_file(self, path):\n with path.open('r') as handle:\n data = load_yaml(handle)\n\n self.set_all(**self.SCHEMA.load(data).data)", "def __init__(self, yaml_file_path: Path) -> None:\n with yaml_file_path.open(\"r\") as yaml_file:\n self._yaml = YAML().load(yaml_file.read())", "def loadFromFile(self,filename):\n path = os.path.dirname(__file__)+\"/\"+filename\n if os.path.exists(path) and os.path.isfile(path):\n self.load(yaml.load(open(path, 'r')))", "def loadseasoning(self):\n stream = open(self.fileref)\n self.config = yaml.safe_load(stream)\n stream.close()", "def readGraphFromYAMLFile(self, filename):\n self.G = nx.read_yaml(filename)\n # TODO: buiild up the indexes !!!", "def _include_yaml(loader: SafeLineLoader, node: yaml.nodes.Node) -> JSON_TYPE:\n fname = os.path.join(os.path.dirname(loader.name), node.value)\n try:\n return _add_reference(load_yaml(fname), loader, node)\n except FileNotFoundError as exc:\n raise XKNXException(f\"{node.start_mark}: Unable to read file {fname}.\") from exc", "def read_yaml(self):\n with open(self.yaml_fn) as file:\n info = yaml.load(file, Loader=yaml.FullLoader)\n nudging_info = info['nudging']\n self.info = nudging_info\n self.start_date = nudging_info['start_date']\n self.rnday = nudging_info['rnday']\n self.end_date = self.start_date + datetime.timedelta(self.rnday)\n self.nudge_step = nudging_info['step_nu_tr']\n self.datetime = pd.date_range(self.start_date, self.end_date,\n freq=self.nudge_step)\n self.time = pd.to_datetime(self.datetime.values)- \\\n pd.to_datetime(self.start_date)\n self.time_seconds = self.time.total_seconds().astype(int)\n self.hgrid_fn = nudging_info['hgrid_input_file']\n self.vgrid_fn = nudging_info['vgrid_input_file']\n self.default_value = nudging_info['default']\n self.mesh = read_mesh(self.hgrid_fn,self.vgrid_fn)\n self.node_x = self.mesh.nodes[:,0]\n self.node_y = self.mesh.nodes[:,1]\n self.node_z = self.mesh.nodes[:,2] \n self.nnode = self.mesh.n_nodes()\n self.nvrt = self.mesh.n_vert_levels\n self._mesh_gpd = None\n self._z = None", "def _load_data_yaml(self, pathname): \n pathname = self._yaml_extension(pathname)\n\n with open(pathname) as file:\n traj_data = yaml.load(file, Loader=yaml.FullLoader)\n \n return traj_data", "def get_yaml(path):\n end = False\n yaml = \"\"\n num = 0\n\n with open(path, 'r') as f:\n\n for line in f.readlines():\n if line.strip() == '---':\n if end:\n break\n else:\n end = True\n continue\n else:\n num += 1\n\n yaml += line\n\n return yaml, num", "def loadData(self):\n machineToNode = {}\n self.listOfMachines = []\n nextID = 0\n self.processingSteps = []\n with open(self.filename) as f:\n lines = f.read().splitlines()\n for line in lines:\n formatted = line.split(\"\\t\")\n order = int(formatted[0])\n machine = int(formatted[1])\n timestamp = float(formatted[2])\n if machine not in machineToNode: # normalizing machines according to the nodes (1,2,3... instead of 1,34,2...)\n machineToNode[machine] = nextID\n nextID +=1\n self.listOfMachines.append(machineToNode[machine]) # normalized list of all machines\n\n pstep = ProcessingStep(machineToNode[machine], timestamp, order)\n self.processingSteps.append(pstep)", "def from_yaml(cls, yaml_file):\n return cls(OrderedDict(yaml.load(open(yaml_file, \"r\"), \n Loader=yaml.FullLoader)))", "def loadfrom_yaml(key, path):\n\twith open(path, 'r') as f:\n\t\td = yaml.load(f)\n\t\tnew_namespace(key)\n\t\t\n\t\t# ns = get_namespace(key)\n\n\t\t# for key, value in d.items():\n\t\t# \t_recurse(0, key, value, ns)", "def test_012_yaml_load(self):\n HEADING()\n db = self.db\n db.connect()\n\n # Clear all jobs currently in the database to ensure a correct final assertion\n db.clear()\n\n # Add the jobs outlined in the YAML file\n db.add_from_yaml(\"etc/jobs.yaml\")\n\n count_fgrep = len(Shell.fgrep(\"input:\", \"etc/jobs.yaml\").split(\"\\n\"))\n\n # Assert that the correct number jobs have been added\n assert(db.count() == count_fgrep)", "def test_02_ReadFile(self):\n l_node = config_tools.Yaml(self.m_pyhouse_obj).read_yaml(self.m_filename)\n l_config = l_node.Yaml\n # print(PrettyFormatAny.form(l_node, 'C1-02-A'))\n # print(PrettyFormatAny.form(l_config, 'C1-02-B'))\n self.assertEqual(l_config['Location']['Street'], '1600 Pennsylvania Ave NW')\n self.assertEqual(len(l_config['Location']), 10)", "def load_yaml_file(i):\n\n import yaml\n\n fn = i['yaml_file']\n\n try:\n if sys.version_info[0] > 2:\n f = open(fn, 'r', encoding='utf8')\n else:\n f = open(fn, 'r')\n except Exception as e:\n return {'return': 16, 'error': 'problem opening YAML file='+fn+' ('+format(e)+')'}\n\n try:\n s = f.read()\n except Exception as e:\n f.close()\n return {'return': 1, 'error': 'problem reading YAML file='+fn+' ('+format(e)+')'}\n\n f.close()\n\n try:\n d = yaml.load(s, Loader=yaml.FullLoader)\n except Exception as e:\n return {'return': 1, 'error': 'problem parsing YAML from file='+fn+' ('+format(e)+')'}\n\n return {'return': 0, 'dict': d}", "def _load_yaml_file(yaml_file):\n with io.open(yaml_file, 'r', encoding='utf-8') as stream:\n yaml_content = yaml.load(stream)\n FileUtils._check_format(yaml_file, yaml_content)", "def load_metadata(self, path):\n self.paths = []\n self.annotations = []\n\n with open(path, \"r\") as f:\n for line in f:\n line = line.strip().split(\" \")\n \n rgb_path = line[0]\n\n if len(line) > 1:\n bounding_boxes = np.array([list(map(int, box.split(','))) for box in line[1:]])\n else:\n bounding_boxes = []\n \n self.annotations.append({\n \"rgb_path\": rgb_path, \n \"bounding_boxes\": bounding_boxes,\n })", "def load(self, yaml_file):\n try:\n with open(yaml_file, 'r') as fp:\n data = yaml.load(fp)\n \n for key in data:\n if hasattr(self, key):\n setattr(self, key, data[key])\n return True # Return true if we succeeded\n \n except IOError: \n return False # Return false if we didn't succeed", "def load(yml_files, debug = False):\n\n dc = {}\n\n if type(yml_files) == dict:\n dc = yml_files\n elif type(yml_files) == str:\n with open(yml_files, \"r\") as f:\n dc = yaml.load(f)\n elif type(yml_files) == list or type(yml_files) == tuple:\n for yml_file in yml_files:\n with open(yml_file, \"r\") as f:\n dc_cur = yaml.load(f)\n # check that now key is overwritten\n for k in dc_cur.keys():\n if k in dc:\n raise Exception (\"Key %s is defined in at least to yml files (e.g. in %s)\" % (k, yml_file) )\n dc.update(dc_cur)\n\n return build_plasm_from_dictionary(dc, debug)", "def load_poems(self):\n file = open(self.name, \"r\")\n content = file.readlines()\n for i in content:\n self.add_msg_and_index(i.strip())", "def from_yaml(cls, model: nn.Module, yaml_path: str) -> pl.LightningModule:\n with open(yaml_path, \"r\") as stream:\n kwargs = yaml.full_load(stream)\n\n return cls(model, **kwargs)", "def __loadFromFile(self):\n fh = open(self.__fileName)\n for line in fh:\n if line.strip() == \" \":\n continue # we have an empty line, just skip\n st = self.__createStudentFromLine(line)\n # invoke the store method from the base class\n StudentsRepo.store_student(self, st)\n fh.close()", "def load(self, file):\n\n with open(file, 'r') as f:\n self._lines = Lines(f.read().splitlines())\n\n self._parse()", "def load(self, file):\n\n with open(file, 'r') as f:\n self._lines = Lines(f.read().splitlines())\n\n self._parse()", "def load_yaml(self):\n env = self.state.document.settings.env\n relpath, abspath = env.relfn2path(directives.path(self.arguments[0]))\n\n env.note_dependency(relpath)\n\n encoding = self.options.get('encoding', env.config.source_encoding)\n with io.open(abspath, 'rt', encoding=encoding) as stream:\n spec = yaml.load(stream, _YamlOrderedLoader) # nosec\n self.spec = spec\n self.paths = spec[self.path_path]\n self.definitions = spec[self.models_path]\n self.openapi_version = spec.get('swagger', None) or spec['openapi']\n self.options.setdefault('uri', 'file://%s' % abspath)", "def load_file(self, file_path):\n with open(file_path, \"r\") as mappings_file:\n for raw_line in mappings_file:\n line = raw_line.split()\n # Add new record to the records dictionary.\n new_record = Record(line[0], line[1], line[2], line[3])\n self.add_record(new_record)", "def load(self, lines, start_address=0):\n addr = start_address\n lineno = 0\n try:\n for line in lines:\n lineno += 1\n parsed = self.parse(line)\n if parsed is None:\n continue\n if isinstance(parsed, tuple):\n if parsed[0] is CHANGE_ADDRESS:\n addr = parsed[1]\n continue\n if parsed[0] is DECLARE_LABEL:\n if parsed[1] in self.labels:\n raise ParseException('Redeclaration of ' + parsed[1])\n self.labels[parsed[1]] = addr\n continue\n if addr >= MEMSIZE:\n raise ParseException('Out of memory parsing program')\n if self.memory[addr] is not None:\n raise ParseException('Memory not None at ' + str(addr))\n self.memory[addr] = parsed\n addr += 1\n except ParseException as e:\n sys.stderr.write('%s: ERROR: %s\\n' % (lineno, line))\n sys.stderr.write('%s: ERROR: %s\\n' % (lineno, e))\n raise e\n self.program_validate()", "def load_file(self, filepath):\n filepath = self._yaml_extension(filepath)\n data = self._load_data_yaml(filepath)\n return data", "def _load_datas(self) -> tp.Dict[str, dict]:\n with open(self._file, \"r\") as stream:\n try:\n load: tp.Dict[str, dict] = yaml.safe_load(stream)\n logger.info(\"YAML imported\")\n return load\n except yaml.YAMLError as exc:\n logger.debug(\"YAML import error : %s\", exc)\n raise", "def load(self):\n file_name = common.RANK_FILE % (self.week.season.name, self.week.num)\n with open(file_name, 'r') as rank_file:\n for record in rank_file:\n team, score = common.parse(record)\n self.score[team] = score" ]
[ "0.6319262", "0.6127903", "0.61255294", "0.60530853", "0.5874739", "0.5830585", "0.58296055", "0.5783401", "0.57766116", "0.57485676", "0.5716792", "0.5712814", "0.5704829", "0.5702566", "0.5691395", "0.56750816", "0.56460726", "0.5640595", "0.56267923", "0.5590943", "0.55880564", "0.55768245", "0.55697745", "0.55697745", "0.55653024", "0.55309045", "0.5483788", "0.5475276", "0.5472164", "0.5453565" ]
0.6834053
0
Prepare the sequences used by the Neural Network
def prepare_sequences(notes, n_vocab): sequence_length = 50 # get all pitch names pitchnames = sorted(set(item for item in notes)) # create a dictionary to map pitches to integers note_to_int = dict((note, number) for number, note in enumerate(pitchnames)) network_input = [] network_output = [] # create input sequences and the corresponding outputs for i in range(0, len(notes) - sequence_length, 1): sequence_in = notes[i:i + sequence_length] sequence_out = notes[i + sequence_length] network_input.append([note_to_int[char] for char in sequence_in]) network_output.append(note_to_int[sequence_out]) n_patterns = len(network_input) # reshape the input into a format compatible with LSTM layers network_input = numpy.reshape(network_input, (n_patterns, sequence_length, 1)) # normalize input network_input = network_input / float(n_vocab) network_output = np_utils.to_categorical(network_output) return (network_input, network_output)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def prepare_sequences(notes, n_vocab):\n sequence_length = GLOBAL_SEQUENCE_LENGTH\n\n # Get all pitch names\n pitchnames = sorted(set(item for item in notes))\n\n # Create a dictionary to map pitches to integers\n note_to_int = dict((note, number) for number, note in enumerate(pitchnames))\n\n network_input = []\n \n\n # create input sequences and the corresponding outputs\n for i in range(0, len(notes) - sequence_length, 1):\n sequence_in = notes[i:i + sequence_length]\n sequence_out = notes[i + sequence_length]\n network_input.append([note_to_int[char] for char in sequence_in])\n \n n_patterns = len(network_input)\n\n # Reshape the input into a format compatible with LSTM layers\n network_input = np.array(network_input)\n # Normalize input between -1 and 1\n network_input = (network_input - float(n_vocab)/2) / (float(n_vocab)/2)\n\n return network_input", "def prepare_sequences(notes, n_vocab):\n sequence_length = 100\n\n pitchnames = sorted(set(item for item in notes))\n\n note_to_int = dict((note, number) for number, note in enumerate(pitchnames))\n\n network_input = []\n network_output = []\n\n for i in range(0, len(notes) - sequence_length, 1):\n sequence_in = notes[i:i + sequence_length]\n sequence_out = notes[i + sequence_length]\n network_input.append([note_to_int[char] for char in sequence_in])\n network_output.append(note_to_int[sequence_out])\n\n n_patterns = len(network_input)\n\n network_input = np.reshape(network_input, (n_patterns, sequence_length, 1))\n\n network_input = (network_input - float(n_vocab)/2) / (float(n_vocab)/2)\n network_output = np_utils.to_categorical(network_output)\n\n return (network_input, network_output)", "def prep_data(self):\n\n self.fit_tokenizer(texts=self.texts)\n sequences = self.get_sequences(self.texts)\n self.text_data = pad_sequences(sequences, maxlen=self.MAX_SEQUENCE_LENGTH)\n\n self.labels = to_categorical(np.asarray(self.labels))\n print('Shape of data tensor:', self.text_data.shape)\n print('Shape of label tensor:', self.labels.shape)\n\n # split the data into a training set and a validation set\n indices = np.arange(self.text_data.shape[0])\n np.random.shuffle(indices)\n self.text_data = self.text_data[indices]\n self.labels = self.labels[indices]\n nb_validation_samples = int(self.VALIDATION_SPLIT * self.text_data.shape[0])\n\n x_train = self.text_data[:-nb_validation_samples]\n y_train = self.labels[:-nb_validation_samples]\n x_val = self.text_data[-nb_validation_samples:]\n y_val = self.labels[-nb_validation_samples:]\n\n return x_train,y_train, x_val, y_val", "def sequencePreparation(self):\n #Calculation of the number of frames in function of the duration + LED list for the acquisition\n if self.seqMode == \"rgbMode\":\n self._rgbSequenceInit()\n elif self.seqMode == 'rbMode':\n self._rbSequenceInit()\n else:\n print('Please select a valid mode of led sequence initialization')\n #Sending nb of frames to initialize the progress bar\n if type(self.nbFrames) == int:\n self.nbFramesSig.emit(self.nbFrames)\n\n print('acquisition Side : ', self.expRatio)\n #Saving the configuration of the experiment file (.json)\n self.savePath = cfgFileSaving(self.experimentName,\n self.nbFrames,\n self.duration,\n self.expRatio,\n self.acquMode,\n self.seqMode,\n self.rgbLedRatio,\n self.greenFrameInterval,\n round(1/self.cycleTime,2), #framerate\n self.folderPath,\n self.colorMode,\n self.mmc,\n 'Zyla') #WARNING > modulabilty (there is a way to get device label but it's not so easy)\n\n #initialization of the acquisition saving files : .tif (frames) and .txt (metadata)\n (self.tiffWriterList, self.textFile) = filesInit( self.savePath,\n self.experimentName,\n self.nbFrames,\n self.maxFrames)\n #send all informations to each LED driver\n self.arduinoSync()", "def prepare_data():\n gennet.prepare_data('Resnet50')", "def prepare_sequences(notes, pitch_names, n_vocab):\n \n # Length of note sequences to be created for model prediction seed\n sequence_length = 25\n \n # Create a dictionary to map note pitches to integers\n note_to_int = dict((note, number) for number, note in enumerate(pitch_names))\n\n # Create empty lists for note sequence inputs (many notes)\n network_input = []\n \n # Create input sequences (of length 'sequence_length')\n for i in range(0, len(notes) - sequence_length, 1):\n sequence_in = notes[i:i + sequence_length]\n network_input.append([note_to_int[char] for char in sequence_in])\n\n # Number of different input sequence patterns\n n_patterns = len(network_input)\n\n # Reshape the input into a format compatible with LSTM layers\n normalized_input = np.reshape(network_input, (n_patterns, sequence_length, 1))\n \n # Normalize the network input by dividing by n_vocab (number of unique notes, rests, and chords)\n normalized_input = normalized_input / float(n_vocab)\n\n return (network_input, normalized_input)", "def prepare_data(seqs, addIdxNum=0, maxlen=None, win_size=1):\n # x: a list of sentences\n lengths = [len(s) for s in seqs]\n\n '''if maxlen is not None:\n new_seqs = []\n new_labels = []\n new_lengths = []\n for l, s, y in zip(lengths, seqs, labels):\n if l < maxlen:\n new_seqs.append(s)\n new_labels.append(y)\n new_lengths.append(l)\n lengths = new_lengths\n labels = new_labels\n seqs = new_seqs\n\n if len(lengths) < 1:\n return None, None, None'''\n\n n_samples = len(seqs)\n maxlen = numpy.max(lengths)\n\n '''\n n_samples : numbers of sentences\n '''\n\n x = numpy.zeros((maxlen, n_samples)).astype('int32')\n x_mask = numpy.zeros(((maxlen - addIdxNum) / win_size, n_samples)).astype(theano.config.floatX)\n\n for idx, s in enumerate(seqs):\n x[:lengths[idx], idx] = s\n x_mask[:((lengths[idx] - addIdxNum) / win_size), idx] = 1.\n\n #labels = numpy.asarray(labels).astype('int32')\n\n return x, x_mask, maxlen - addIdxNum", "def preprocess_inde(self, sequence, src_seq):\r\n sequence = sequence + [len(src_seq) - 1] # add sen\r\n sequence = torch.Tensor(sequence)\r\n return sequence", "def _generateSequence(self, classifications, detections):\n det_len = len(detections)\n\n # Convert classifications and detections to input required for network\n seq_len = int(self.input_tensor.shape[1])\n fea_len = int(self.input_tensor.shape[2])\n input_data = np.zeros((seq_len,fea_len))\n\n # Add padding before and after sequence based on KEYFRAME_OFFSET\n input_data[:KEYFRAME_OFFSET,0] = np.ones(KEYFRAME_OFFSET)\n input_data[det_len:det_len+KEYFRAME_OFFSET,0] = np.ones(KEYFRAME_OFFSET)\n # Iterate through each frame of the data\n for idx, frame_detections in enumerate(detections):\n # We have already padded before and after\n seq_idx = idx + KEYFRAME_OFFSET\n\n # Skip through frames with no detections\n if len(frame_detections) == 0:\n input_data[seq_idx][0] = 1.0\n continue\n\n detection = frame_detections[0]\n classification = classifications[idx][0]\n\n # Do a size check on input\n # We expect either 1 or 2 models per sequence\n num_species = len(classification.species)\n num_cover = len(classification.cover)\n num_loc = len(detection.location)\n num_fea = num_species + num_cover + num_loc + 2\n num_of_models = int(fea_len / num_fea)\n\n if num_of_models != 2 and num_of_models != 1:\n raise Exception('Bad Feature Length')\n\n # Layout of the feature is:\n # Species, Cover, Normalized Location, Confidence, SSD Species\n # Optional duplicate\n\n for model_idx in range(num_of_models):\n # Calculate indices of vector based on model_idx\n fea_idx = model_idx * num_fea\n species_stop = fea_idx + num_species\n cover_stop = species_stop + num_cover\n loc_stop = cover_stop + num_loc\n ssd_conf = loc_stop\n ssd_species = ssd_conf + 1\n\n input_data[seq_idx,fea_idx:species_stop] = \\\n classification.species\n input_data[seq_idx,species_stop:cover_stop] = \\\n classification.cover\n input_data[seq_idx,cover_stop:loc_stop] = \\\n self._normalizeDetection(detection.location)\n input_data[seq_idx, ssd_conf] = detection.confidence\n input_data[seq_idx, ssd_species] = detection.species\n return input_data", "def preprocess_sequential():\n dataset = generate_sequential(\n num_users=100,\n num_items=1000,\n num_interactions=10000,\n concentration_parameter=0.01,\n order=3,\n )\n logger.info(\"Generated sequential dataset: %s\", dataset)\n\n test_percentage = 0.2\n logger.info(\n \"Generating a %d/%d train/test split\",\n 100 * (1 - test_percentage),\n 100 * test_percentage,\n )\n train, test = user_based_train_test_split(\n dataset, test_percentage=test_percentage, random_state=np.random.RandomState(87)\n )\n\n train = train.to_sequence()\n logger.info(\"Generated train sequence dataset: %s\", train)\n test = test.to_sequence()\n logger.info(\"Generated test sequence dataset: %s\", test)\n\n train_config_path = write_sequence_interactions(\n train,\n identifier=\"train\",\n )\n logger.info(\"Wrote train config to %s\", train_config_path)\n test_config_path = write_sequence_interactions(\n test,\n identifier=\"test\",\n )\n logger.info(\"Wrote test config to %s\", test_config_path)", "def prepare_learning(self):\n print 'Separating inputs and outputs...'\n self.inputs, self.outputs = extract_samples(self.matches,\n self.input_features,\n self.output_feature)\n\n print 'Normalizing data...'\n self.normalizer, self.inputs = normalize(self.inputs)\n\n print 'Separating train and test sets...'\n self.train_inputs, self.train_outputs, self.test_inputs, self.test_outputs = split_samples(self.inputs, self.outputs)\n\n print 'Building neural network...'\n self.network = buildNetwork(len(self.input_features),\n 2 * len(self.input_features),\n 1,\n outclass=SigmoidLayer,\n bias=True)\n\n print 'Building and filling pybrain train set object...'\n self.train_set = ClassificationDataSet(len(self.input_features))\n\n for i, input_line in enumerate(self.train_inputs):\n self.train_set.addSample(self.train_inputs[i],\n [self.train_outputs[i] - 1])\n\n self.trainer = BackpropTrainer(self.network, dataset=self.train_set,\n momentum=0.5, weightdecay=0.0)\n\n self.train_set.assignClasses()", "def prepare_gen(self, targets):\r\n pass", "def sequence_params(self):", "def prepare_data(seqs, labels, sentences, titles, imgs, maxlen=None) :\n # x: a list of sentences\n lengths = [len(s) for s in seqs]\n\n if maxlen is not None:\n new_seqs = []\n new_labels = []\n new_sentences = []\n new_imgs = []\n new_lengths = []\n for l, s, y, sents, img in zip(lengths, seqs, labels, sentences, imgs):\n if l < maxlen:\n new_seqs.append(s)\n new_labels.append(y)\n new_lengths.append(l)\n new_sentences.append(sents)\n new_imgs.append(img)\n lengths = new_lengths\n labels = new_labels\n seqs = new_seqs\n sentences = new_sentences\n imgs = new_imgs\n\n if len(lengths) < 1:\n return None, None, None, None, None\n\n n_samples = len(seqs)\n maxlen = numpy.max(lengths)\n\n x = numpy.zeros((maxlen, n_samples)).astype('int64')\n x_mask = numpy.zeros((maxlen, n_samples)).astype(theano.config.floatX)\n sentence = []\n title = []\n img = []\n for idx, [s, sent, t, i] in enumerate(zip(seqs, sentences, titles, imgs)):\n sentence.append(sent)\n title.append(t)\n img.append(i)\n x[:lengths[idx], idx] = s\n x_mask[:lengths[idx], idx] = 1.\n\n return x, x_mask, labels, sentence, title, img", "def prepare(self):\n # get data from file\n train_data, test_data = return_speechacts()\n # y are the speechacts or 'labels'\n y_train = [t.split(' ')[0] for t in train_data]\n y_test = [t.split(' ')[0] for t in test_data]\n # x are the sentences\n x_train = [\" \".join(t.split(' ')[1:]) for t in train_data]\n x_test = [\" \".join(t.split(' ')[1:]) for t in test_data]\n # use the tokenizer and padding from keras to assign arrays of integers\n # to sentences, out of vocabulary token is 1\n self.tokenizer_x = Tokenizer(oov_token=1)\n self.tokenizer_x.fit_on_texts(x_train + x_test)\n xt_train = self.tokenizer_x.texts_to_sequences(x_train)\n xt_train = pad_sequences(xt_train, maxlen=self.sentence_size,\n dtype='int32')\n xt_test = self.tokenizer_x.texts_to_sequences(x_test)\n xt_test = pad_sequences(xt_test, maxlen=self.sentence_size,\n dtype='int32')\n # vocab is the number of words in our vocabulary\n self.vocab = len(self.tokenizer_x.word_index) + 1\n # do the same for labels\n self.tokenizer_y = Tokenizer()\n self.tokenizer_y.fit_on_texts(y_train + y_test)\n yt_train = self.tokenizer_y.texts_to_sequences(y_train)\n yt_train = [t[0] for t in yt_train]\n yt_train = to_categorical(yt_train)\n yt_test = self.tokenizer_y.texts_to_sequences(y_test)\n yt_test = [t[0] for t in yt_test]\n yt_test = to_categorical(yt_test)\n self.x_train = x_train\n self.y_train = y_train\n self.x_test = x_test\n self.y_test = y_test\n self.xt_train = xt_train\n self.yt_train = yt_train\n self.xt_test = xt_test\n self.yt_test = yt_test", "def set_target_sequence(self):\n self.target_sequence = ''\n target_residues = []\n if len(self.target_residues) < 1:\n for i in range(0, self.target.size()):\n target_residues.append(i+1)\n self.target_residues = target_residues\n for resnum in self.target_residues:\n self.target_sequence += self.target.sequence(resnum, resnum)", "def prepare(params, samples):\r\n return", "def prepareData(self):\n\t\tprint ('')\n\t\tfrom keras.preprocessing.sequence import pad_sequences\n\t\tfrom sklearn.model_selection import train_test_split\n\t\tfrom keras.utils import to_categorical\n\t\timport numpy as np\n\n\t\tfrom sklearn.preprocessing import LabelBinarizer, LabelEncoder\n\n\t\tX_snt = [[self.word2idx[w] if w in self.word2idx else self.word2idx[self.word_unk_token] for w in s] for s in self.x_document]\n\t\ty_tag = [[self.tag2idx[t]] for t in self.y_document]\n\n\t\tX_snt = pad_sequences(maxlen=self.parameters['max_doc_len'], sequences=X_snt, padding='post', value=self.word2idx[self.word_pad_token])\n\t\ty_tag = to_categorical(y_tag, self.tags_len)\n\n\t\tprint (\"\\tRandom:\\t\", self.random)\n\t\tprint (\"\\tTest size:\\t\", self.split_train_test)\n\n\t\tself.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X_snt, y_tag, test_size=self.split_train_test, random_state=self.random)\n\n\t\tself.X_train = np.array(self.X_train)\n\t\tself.X_test = np.array(self.X_test)\n\t\tself.y_train = np.array(self.y_train)\n\t\tself.y_test = np.array(self.y_test)\n\n\t\tprint ('\\n\\tWords: {}\\t{}'.format(self.X_train.shape, self.X_test.shape) )\n\t\tprint ('\\tTags: {}\\t{}\\n'.format(self.y_train.shape, self.y_test.shape))", "def get_train_data(sequence_length=100):\n\n network_input = list()\n network_output = list()\n notes = read_binary_file(str(data_dir / \"notes.pkl\"))\n\n # get all pitch names\n pitch_names = sorted(set(item for item in notes))\n # Embedding #TODO use keras Embedding layer instead\n note_to_int = read_binary_file(metadata_dir / \"note_to_int.pkl\")\n vocab_size = len(set(note_to_int))\n\n # create input sequences and the corresponding outputs\n for i in range(0, len(notes) - sequence_length, 1):\n sequence_in = notes[i : i + sequence_length]\n sequence_out = notes[i + sequence_length]\n network_input.append([note_to_int[char] for char in sequence_in])\n network_output.append(note_to_int[sequence_out])\n\n n_patterns = len(network_input)\n # reshape the input into a format compatible with LSTM layers\n network_input = np.reshape(network_input, (n_patterns, sequence_length, 1))\n # normalize input\n network_input = network_input / float(vocab_size)\n network_output = np_utils.to_categorical(network_output)\n\n with open(metadata_dir / \"sequence_in.pkl\", \"wb\") as f:\n pickle.dump(network_input, f)\n with open(metadata_dir / \"sequence_out.pkl\", \"wb\") as f:\n pickle.dump(network_output, f)\n return network_input, network_output, vocab_size", "def setup_training(self):\n print('setup training called')\n self.steps_done = 0\n self.current_episode_num = 1\n self.total_reward = 0\n\n # self.optimizer = optim.RMSprop(policy_net.parameters())\n self.memory = ReplayMemory(300000)\n self.total_reward_history = []\n # self.loss_history = []\n self.positions = []\n self.n_destroyed_crates = 0\n self.is_in_bomb_range = False", "def setup_training(self):\n self.transitions = deque(maxlen=TRANSITION_HISTORY_SIZE)\n self.total_rewards = []\n self.rewards = []\n self.steps = []\n self.average_rewards = []\n self.average_steps = []\n self.model = initialize_model()\n self.invalid_actions = 0\n self.average_invalid_actions = []\n self.total_invalid_actions = []", "def __init__(self, sequences, config=Config()):\n self.sequences = sequences\n self.config = config\n self.matrix = self.initializeMatrix(sequences)\n self.enterelement([0]*len(self.sequences), Score([0]*len(self.sequences), 0)) # Set the origin of the matrix to 0\n self.counter = [1] + ([0] * (len(sequences)-1)) # Creates a counter which is used to transverse a matrix of arbitrary size", "def setup_training(self):\n # Example: Setup an array that will note transition tuples\n # (s, a, r, s')\n self.transitions = deque(maxlen=TRANSITION_HISTORY_SIZE)\n self.repition = deque(maxlen=3)\n #self.samefeature = deque(maxlen=REPETION_SEARCH)\n self.states = np.array([])\n self.coins_collected = REPETION_SEARCH\n max_distance = 3 # PUES CAMBIAR ESTO PARA RESULTADOS DIFERENTES, TAMBIEN EN CALLBACKS\n possible_outcomes = max_distance * 2 + 1\n q_values = np.ones((16*possible_outcomes**2 * max_distance*2 + 16*4*30 + 1, 6))\n #q_values = np.ones((201411,6))\n q_values[:, 5] = -40000\n q_values[:, 4] = -40000\n if self.train and os.path.isfile(\"q_values.npy\"):\n #np_load_old = np.load\n # modify the default parameters of np.load\n #np.load = lambda *a, **k: np_load_old(*a, allow_pickle=True, **k)\n # call load_data with allow_pickle implicitly set to true\n q_values = np.load(\"q_values.npy\")\n print(\"File was loaded\")\n # restore np.load for future normal usage\n #np.load = np_load_old\n self.q_values = q_values", "def train(self, uTrainingSequences):\n layers_type = [INTERMEDIATE for s in self.layers]\n layers_type[0] = ENTRY\n layers_type[-1] = OUTPUT\n \n ## for each layer\n for i in range(len(self.layers)): \n\n ## for each training sequence\n for pattern in uTrainingSequences[layers_type[i]]:\n (input_raw, input_info) = pattern\n \n if i == ENTRY: self.expose(input_raw, uJustUpperLeftCorner=True)\n else: self.expose(input_raw)\n \n for m in range(i):\n self.layers[m].inference()\n self.propagate(m, m + 1)\n\n self.layers[i].train(input_info)\n\n self.layers[i].finalize()", "def __init__(self,\n data,\n num_steps,\n num_test_steps=None,\n seed=None):\n seed1, seed2 = random_seed.get_seed(seed)\n # If op level seed is not set, use whatever graph level seed is returned\n np.random.seed(seed1 if seed is None else seed2)\n \n #inps, outs = slide_window(data, num_steps)\n #inps = data[:,:num_steps,:]\n #outs = data[:,1:num_steps+1,:]\n \n time_len = data.shape[1]\n if num_test_steps is None:\n num_test_steps= time_len-num_steps \n enc_inps = data[:,:num_steps, :]\n dec_inps = np.insert(data[:,num_steps:num_steps+num_test_steps-1,:], 0, SOS, axis=1)\n #dec_outs = np.insert(data[:,num_steps:num_steps+num_test_steps,:], num_test_steps, EOS, axis=1)\n dec_outs = data[:,num_steps:num_steps+num_test_steps,:]\n\n assert enc_inps.shape[0] == dec_outs.shape[0], (\n 'inps.shape: %s outs.shape: %s' % (inps.shape, outs.shape))\n\n\n self._num_examples = enc_inps.shape[0]\n self._enc_inps = enc_inps\n self._dec_inps = dec_inps\n self._dec_outs = dec_outs\n self._epochs_completed = 0\n self._index_in_epoch = 0", "def _reconstruct(self, num_samples=None):", "def create_sequences(dataset, SEQ_LEN, feature_to_predict, scaler):\n\n feature_to_predict_scaler = scaler\n\n # We need to iterate over the values, store all the features, NOT THE LABEL, then append it to list when we\n # reach the SEQ_LEN\n for col_name in dataset.columns:\n dataset.dropna(inplace=True)\n if col_name == feature_to_predict:\n dataset[col_name] = feature_to_predict_scaler.fit_transform(np.array(dataset[col_name]).reshape((len(dataset[col_name]), 1)))\n else:\n dataset[col_name] = scaler.fit_transform(np.array(dataset[col_name]).reshape((len(dataset[col_name]), 1)))\n\n dataset.dropna(inplace=True)\n\n sequential_data = []\n prev_days = deque(maxlen=SEQ_LEN)\n\n for val in dataset.values:\n prev_days.append([n for n in val[:-1]])\n if len(prev_days) == SEQ_LEN:\n sequential_data.append([np.array(prev_days), val[-1]])\n\n # If user picks a test size which we cannot create a sequence for, we will append whatever days we have,\n # then we will find the mean of the each feature column and label column we have then fill the rest of the\n # columns with the mean.\n if len(sequential_data) < 1:\n missing_days = SEQ_LEN - len(prev_days)\n mean_values = dataset.mean(axis=0)\n\n features_mean_value = mean_values[:-1]\n target_mean_value = mean_values[-1]\n\n for i in range(missing_days):\n # Fill the rest with mean value\n temp = []\n for mean in features_mean_value:\n temp.append(mean)\n prev_days.append(temp)\n\n sequential_data.append([np.array(prev_days), target_mean_value])\n\n random.shuffle(sequential_data)\n\n X = []\n y = []\n\n for seq, target in sequential_data: # going over our new sequential data\n X.append(seq) # X is the features\n y.append(target) # y is the label\n\n return np.array(X), y, feature_to_predict_scaler", "def train_model_encdec(all_train_data: List[Example], test_data: List[Example], input_indexer, output_indexer, args) -> Seq2SeqSemanticParser:\n print(\"==========START DATA PROCESSING=========\")\n input_max_len = np.max(np.asarray([len(ex.x_indexed) for ex in all_train_data]))\n output_max_len = np.max(np.asarray([len(ex.y_indexed) for ex in all_train_data]))\n print(\"Train length: %i\" % input_max_len)\n print(\"Train output length: %i\" % output_max_len)\n\n if args.debug:\n output_indexer = input_indexer\n\n # Configuration\n attn_model = args.attn_model\n hidden_size = 100\n n_layers = 2\n dropout = 0.1\n teacher_forcing_ratio = 0.5\n learning_rate = 0.0001\n decoder_learning_ratio = 5.0\n\n # Initialize models\n encoder = EncoderRNN(len(input_indexer), hidden_size, n_layers, dropout=dropout)\n decoder = LuongAttnDecoderRNN(attn_model, hidden_size, len(output_indexer), n_layers, dropout=dropout)\n\n # Initialize optimizers and criterion\n encoder_optimizer = optim.Adam(encoder.parameters(), lr=learning_rate)\n decoder_optimizer = optim.Adam(decoder.parameters(), lr=learning_rate * decoder_learning_ratio)\n criterion = nn.CrossEntropyLoss()\n\n print(\"==========START TRAINING=========\")\n start = time.time()\n print_loss_total = 0\n for epoch in range(1, args.epochs+1):\n for iter in range(int(len(all_train_data)/args.batch_size)):\n\n input_batches, input_lengths, target_batches, target_lengths = random_batch(all_train_data, args.batch_size,\n input_max_len, output_max_len, input_indexer, output_indexer)\n\n if args.debug:\n target_batches = input_batches\n target_lengths = input_lengths\n\n # Run the train function\n loss, ec, dc = train_batch(\n input_batches, input_lengths, target_batches, target_lengths,\n encoder, decoder, encoder_optimizer, decoder_optimizer, output_max_len,\n criterion, args\n )\n print_loss_total += loss\n print('%s (%d %d%%) %.4f' % (time_since(start, epoch / args.epochs),\n epoch, epoch / args.epochs * 100, print_loss_total))\n print_loss_total = 0\n\n return Seq2SeqSemanticParser(encoder, decoder, input_indexer, output_indexer, input_max_len, output_max_len)", "def _loopPreparation(self, stimNumber):\n self.nbFrames=10000 #TO DO --> better place for this line of code\n\n self.stimName= self.experimentName+'_S%(number)03d' % {\"number\": stimNumber} #%02d return a 2 char string : 1-->01\n (self.tiffWriterList, self.textFile) = filesInit( self.savePath,\n self.stimName,\n self.nbFrames,\n self.maxFrames)\n if self.seqMode == \"rgbMode\":\n self._rgbSequenceInit()\n elif self.seqMode == 'rbMode':\n self._rbSequenceInit()\n self.arduinoSync()", "def sequence(self):\n inigen = IniGen()\n fields = algorithm_fields.algorithms['sequence']\n\n output_uuid_map = {}\n\n # set up global parameters\n algorithm_path = fields['path']\n enabled = \"True\"\n inigen.emit_global(algorithm_path, enabled)\n\n label = \"SEQ\"\n for t in ['C','L']:\n run_label = label+'_'+t\n t1Mag_label = '{0}1MAG'.format(t)\n t2Mag_label = '{0}2MAG'.format(t)\n t3Mag_label = '{0}3MAG'.format(t)\n t1Ang_label = '{0}1ANG'.format(t)\n t2Ang_label = '{0}2ANG'.format(t)\n t3Ang_label = '{0}3ANG'.format(t)\n distillate_label = \"{0}-ALL\".format(t)\n\n # header\n inigen.emit_run_header(run_label, CHUNKING, MINTIME, MAXTIME)\n\n # body\n dep_1Mag_label = t1Mag_label\n dep_1Mag_name = fields['deps'][0]\n dep_1Mag_uuid = self.uuid_map[t1Mag_label]\n\n dep_2Mag_label = t2Mag_label\n dep_2Mag_name = fields['deps'][1]\n dep_2Mag_uuid = self.uuid_map[t2Mag_label]\n\n dep_3Mag_label = t3Mag_label\n dep_3Mag_name = fields['deps'][2]\n dep_3Mag_uuid = self.uuid_map[t3Mag_label]\n\n dep_1Ang_label = t1Ang_label\n dep_1Ang_name = fields['deps'][3]\n dep_1Ang_uuid = self.uuid_map[t1Ang_label]\n\n dep_2Ang_label = t2Ang_label\n dep_2Ang_name = fields['deps'][4]\n dep_2Ang_uuid = self.uuid_map[t2Ang_label]\n\n dep_3Ang_label = t3Ang_label\n dep_3Ang_name = fields['deps'][5]\n dep_3Ang_uuid = self.uuid_map[t3Ang_label]\n \n deps = [[dep_1Mag_label, dep_1Mag_name, dep_1Mag_uuid],\n [dep_2Mag_label, dep_2Mag_name, dep_2Mag_uuid],\n [dep_3Mag_label, dep_3Mag_name, dep_3Mag_uuid],\n [dep_1Ang_label, dep_1Ang_name, dep_1Ang_uuid],\n [dep_2Ang_label, dep_2Ang_name, dep_2Ang_uuid],\n [dep_3Ang_label, dep_3Ang_name, dep_3Ang_uuid]]\n\n param_section_name = fields['params'][0]\n param_section_value = \"Production/{0}/{1}/{2}\".format(self.location, self.name, distillate_label)\n param_name_name = fields['params'][1]\n param_name_value = \"SEQ\"\n params = [[param_section_name, param_section_value], [param_name_name, param_name_value]]\n\n outputs = fields['outputs']\n\n emitted = inigen.emit_run_body(deps, params, outputs)\n\n output_uuid_map[\"ZER_{0}ANG\".format(t)] = emitted[-9][-36:]\n output_uuid_map[\"ZER_{0}MAG\".format(t)] = emitted[-8][-36:]\n output_uuid_map[\"POS_{0}ANG\".format(t)] = emitted[-7][-36:]\n output_uuid_map[\"POS_{0}MAG\".format(t)] = emitted[-6][-36:]\n output_uuid_map[\"NEG_{0}ANG\".format(t)] = emitted[-5][-36:]\n output_uuid_map[\"NEG_{0}MAG\".format(t)] = emitted[-4][-36:]\n output_uuid_map[\"UNB_{0}NEG\".format(t)] = emitted[-3][-36:]\n output_uuid_map[\"UNB_{0}ZER\".format(t)] = emitted[-2][-36:]\n\n filename = \"{0}/SEQ_{1}.ini\".format(self.dirname, self.name)\n inigen.generate_file(filename)\n return output_uuid_map" ]
[ "0.6553083", "0.6475834", "0.6432852", "0.6399662", "0.6352282", "0.6343611", "0.6190251", "0.6177846", "0.61391133", "0.6133018", "0.611801", "0.6101248", "0.60848916", "0.6037434", "0.60365427", "0.60153496", "0.60053694", "0.5964059", "0.59410936", "0.5940806", "0.59196734", "0.59047794", "0.58991665", "0.5893309", "0.5865546", "0.5844906", "0.58216524", "0.5816076", "0.57788056", "0.5761567" ]
0.6686778
0
Add last layer to the convnet
def add_new_last_layer(base_model, nb_classes): x = base_model.output x = GlobalAveragePooling2D()(x) x = Dense(FC_SIZE, activation='relu')(x) #new FC layer, random init predictions = Dense(nb_classes, activation='softmax')(x) #new softmax layer model = Model(inputs=base_model.input, outputs=predictions) return model
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_new_last_layer(base_model, nb_classes):\r\n x = base_model.output\r\n x = GlobalAveragePooling2D()(x)\r\n x = Dense(fc_size, activation='relu')(x) #new FC layer, random init\r\n predictions = Dense(nb_classes, activation='softmax')(x) #new softmax layer\r\n model = Model(inputs=base_model.input, outputs=predictions)\r\n return model", "def add_new_last_layer(base_model, nb_classes):\n x = base_model.output\n x = GlobalAveragePooling2D()(x)\n x = Dense(FC_SIZE, activation='relu')(x) # new FC layer, random init\n predictions = Dense(nb_classes, activation='softmax')(x) # new softmax layer\n model = Model(input=base_model.input, output=predictions)\n return model", "def add_new_last_layer(base_model, nb_classes):\n x = base_model.output\n x = GlobalAveragePooling2D()(x)\n x = Dense(num_fc_neurons, activation='relu')(x)\n x = Dropout(dropout)(x)\n x = Dense(num_fc_neurons, activation='relu')(x)\n x = Dropout(dropout)(x)\n predictions = Dense(nb_classes, activation='softmax')(x)\n model = Model(inputs=base_model.input, outputs=predictions)\n\n return model", "def add_new_last_layer(base_model, nb_classes):\r\n x = base_model.output\r\n x = AveragePooling2D((8, 8), border_mode='valid', name='avg_pool')(x)\r\n x = Dropout(0.25)(x)\r\n x = Flatten()(x)\r\n predictions = Dense(7, activation='softmax')(x)\r\n model = Model(input=base_model.input, output=predictions)\r\n return model", "def add_layer(self, freeze = True, add = True):\n if add:\n self.num_layers += 1\n if self.conv_dim == 1:\n new_cnn = layers.Conv1D(self.n_filters,\n (self.n_kernels),\n activation='elu',\n input_shape=(None, self.inp_shape[0], self.n_filters),\n padding=\"same\",\n name='cnn_1d_{}'.format(self.num_layers-1),\n kernel_initializer = initializers.get(self.initializer),\n bias_initializer=initializers.get(\"zeros\"),\n kernel_regularizer=self.regularizer,\n bias_regularizer=self.regularizer\n )\n elif self.conv_dim == 2:\n new_cnn = layers.Conv2D(self.n_filters,\n (self.n_kernels, self.n_kernels),\n activation='elu',\n input_shape=(None, self.inp_shape[0],self.inp_shape[1], self.n_filters),\n padding=\"same\",\n name='cnn_2d_{}'.format(self.num_layers-1),\n kernel_initializer=initializers.get(self.initializer),\n bias_initializer=initializers.get(\"zeros\"),\n kernel_regularizer=self.regularizer,\n bias_regularizer=self.regularizer\n )\n self.list_cnn.append(new_cnn)\n\n if freeze:\n for index in range(0,self.num_layers-1):\n self.list_cnn[index].trainable = False\n else:\n for index in range(0,self.num_layers-1):\n self.list_cnn[index].trainable = True", "def append_or_update_layer(self, layer: 'Layer'):\n if layer.has_pos():\n self.layers[layer.get_pos()] = layer\n else:\n self.layers.append(layer)\n layer.set_pos(self.get_num_layers() - 1)", "def append_layer(self, *args, **kwargs) :\n \n self.insert_layer(len(self._layers), *args, **kwargs)", "def _append_conv_layer(self, param):\n self._parameterized_conv_layers.append(\n tf.keras.layers.Conv3D(\n padding='same',\n use_bias=False,\n kernel_regularizer=self._kernel_regularizer,\n **param,\n ))\n norm_layer_params = self._build_norm_layer_params(param)\n self._parameterized_conv_layers.append(self._norm(**norm_layer_params))\n\n relu_layer_params = self._build_activation_layer_params(param)\n self._parameterized_conv_layers.append(\n tf.keras.layers.Activation('relu', **relu_layer_params))", "def add_layer(self, func, *args, **kwargs):\n scope_name = self.name + '_layer' + str(self.layer_count)\n with tf.variable_scope(scope_name, reuse=self.reuse):\n self.last_layer = func(self.last_layer, *args, **kwargs)\n self.layer_seq += [self.last_layer]\n pass\n self.layer_count += 1\n return self.last_layer", "def plus(self, layer):\n\n input1 = self.node(layer)\n if not input1:\n return\n LOGGER.debug('Plus layer to last:%s', layer)\n if not self.last_node:\n self.last_node = nuke.nodes.Constant()\n\n if layer not in self.layers():\n input1 = nuke.nodes.Shuffle(inputs=[input1], out=layer)\n self.last_node = nuke.nodes.Merge2(\n inputs=[self.last_node, input1], operation='plus',\n also_merge=layer if layer not in self.layers() else 'none',\n label=utf8(self.l10n(layer)),\n output='rgb')", "def append(self, layer):\n self.layers.append(layer)", "def test_get_last_layer(self):\n\t\t\n\t\tprint(\"test_get_last_layer\")\n\t\tlayer_iterator = ww.WeightWatcher().make_layer_iterator(self.model)\n\t\tnum_layers = 0\n\t\tfor ww_layer in layer_iterator:\n\t\t\tnum_layers += 1\n\t\t\tprint(num_layers, ww_layer.name, ww_layer.layer_id)\n\t\t\t\n\t\tself.assertEqual('layer4.1.conv2', ww_layer.name)\n\t\t# layer id is 40 because we skup batch normlayers\n\t\tself.assertEqual(40, ww_layer.layer_id)\n\n\t\treturn", "def LayerAddflatten(bottom_model, num_classes):\n top_model = bottom_model.output\n top_model = Flatten(name = \"flatten\")(top_model)\n top_model = Dense(526, activation = \"relu\")(top_model)\n top_model = Dense(263, activation = \"relu\")(top_model)\n top_model = Dense(num_classes, activation = \"sigmoid\")(top_model)\n return top_model", "def add_layer(self, layer):\r\n\r\n if not isinstance(layer, Layer):\r\n raise ValueError('layer must be a Layer object')\r\n\r\n # the first layer added define the input dimension of the neural network\r\n if len(self.layers) == 0:\r\n self.input_dimension = layer.get_num_input()\r\n self.topology.append(layer.get_num_input())\r\n # the new layer must have an input dimension equal\r\n # to the number of units in the last layer added\r\n elif layer.get_num_input() != self.output_dimension:\r\n raise ValueError(\r\n \"The number of input for this new layer must be equal to previous layer\")\r\n\r\n self.topology.append(layer.get_num_unit())\r\n\r\n # the last layer inserted define the output dimension\r\n self.output_dimension = layer.get_num_unit()\r\n\r\n self.layers.append(layer)", "def add_conv_layer(self, input_layer, hyperparams, func='relu', bn=True):\n W = self._weight_variable(shape=hyperparams[0])\n b = self._bias_variable(shape=hyperparams[1])\n if bn:\n return self._batch_normalize(\n self._nonlinearity(func)(self._conv2d(input_layer, W) + b))\n elif not bn:\n return self._nonlinearity(func)(self._conv2d(input_layer, W) + b)", "def add(self, layer):\n layer.set_dtype(self.dtype)\n self.layers = np.append(self.layers, layer)", "def get_last_conv_name(net):\n layer_name = None\n for name, m in net.named_modules():\n if isinstance(m, nn.Conv2d):\n layer_name = name\n return layer_name", "def layer(self, x, conv, batch_norm, padding=(0, 0, 31, 32)):\r\n x = F.pad(x, padding)\r\n x = conv(x)\r\n x = F.relu(x)\r\n x = batch_norm(x)\r\n return F.max_pool2d(x, (2, 1), (2, 1))", "def add_layer(self, layer):\n self.__layers.append(layer)", "def add(self, layer):\n if len(self.layers) == 0:\n if not layer.n_inputs:\n raise Exception('Need to have n_inputs for layer.')\n else:\n layer.n_inputs = self.layers[-1].units\n self.layers.append(layer)", "def add_layer(self, layer):\n idx = len(self.dict_topo)\n idx += 1\n self.dict_topo[idx] = layer", "def add_pooling_layer(self, input_layer):\n return self._max_pool(input_layer)", "def add_layer(self, layer):\n assert isinstance(layer, torch.nn.Module)\n setattr(self, 'layer'+str(self._layer_counter), layer)\n self._layer_counter += 1\n # layer indexing : layer 0 is closest to input", "def add_layer(self, layer: layers.Layer) -> layers.Layer:\n layer.events.select.connect(self._update_active_layer)\n layer.events.deselect.connect(self._update_active_layer)\n layer.events.status.connect(self._update_status)\n layer.events.help.connect(self._update_help)\n layer.events.interactive.connect(self._update_interactive)\n layer.events.cursor.connect(self._update_cursor)\n layer.events.cursor_size.connect(self._update_cursor_size)\n layer.events.data.connect(self._on_layers_change)\n layer.dims.events.ndisplay.connect(self._on_layers_change)\n layer.dims.events.order.connect(self._on_layers_change)\n layer.dims.events.range.connect(self._on_layers_change)\n self.layers.append(layer)\n self._update_layers(layers=[layer])\n\n if len(self.layers) == 1:\n self.reset_view()\n return layer", "def _layer_forward(self, z_prev, layer, use_relu=True):\n\n self.__dict__['z_prev_'+layer] = z_prev\n b = self.__getattribute__('b_'+layer)\n w = self.__getattribute__('w_'+layer)\n\n dim_out = w.shape[0]\n\n # simplification due to np broadcasting\n a = [email protected] + b\n\n z = relu(a) if use_relu else a\n\n return (a, z)", "def putLayer(self, layer):\t\n\t\t# force use different address id ( prevent use same defined layer more than once, eg: bottleneck in torchvision)\n\t\t# tmp_layer = copy.deepcopy(layer)\n\t\tlayer_id = id(layer)\n\t\tself.tmp_list.append(layer)\n\t\tlayer_id = id(self.tmp_list[-1])\n\t\tif layer_id in self.graph:\n\t\t\ttmp_layer = copy.deepcopy(layer)\n\t\t\tself.tmp_list.append(tmp_layer)\n\t\t\t# layer_id = id(self.tmp_list[-1])\n\t\t\tlayer_id = id(tmp_layer)\n\n\t\tself.graph[layer_id] = layer\n\t\tself.bottoms[layer_id] = [self.cur_id]\n\t\tself.cur_id = layer_id\n\t\t# del layer, tmp_layer, layer_id", "def addLayer(self, layer):\n self.layers.append(layer)", "def test_get_last_layer(self):\n\t\t\n\t\tprint(\"test_get_last_layer\")\n\t\tlayer_iterator = ww.WeightWatcher().make_layer_iterator(self.model)\n\t\tnum_layers = 0\n\t\tfor ww_layer in layer_iterator:\n\t\t\tnum_layers += 1\n\t\t\tprint(num_layers, ww_layer.name, ww_layer.layer_id)\n\t\t\t\n\t\tself.assertEqual('fc', ww_layer.name)\n\t\t# layer id is 40 because we skup batch normlayers\n\t\tself.assertEqual(40, ww_layer.layer_id)\n\n\t\treturn", "def __init__(self):\n super(GatherLastLayer, self).__init__()", "def addConv2dLayer(mainGraph,\n inputOperation=None,\n nFilters=1,\n filterHeigth=2,\n filterWidth=2,\n padding=\"SAME\",\n convStride=1,\n activation=ReLUActivation,\n batchNormalisation=False,\n pooling=MaxPoolOperation,\n poolHeight=2,\n poolWidth=2,\n poolStride=2):\n\n N, C, H, W = inputOperation.shape\n\n w = generateRandomVariable(shape=(nFilters, C, filterHeigth, filterWidth),\n transpose=False, nInputs=(filterHeigth * filterWidth * C))\n b = generateRandomVariable(shape=(1, nFilters, 1, 1), transpose=False, nInputs=1)\n\n filterWop = mainGraph.addOperation(w, doGradient=True, feederOperation=False)\n opConv2d = mainGraph.addOperation(Conv2dOperation(\n inputOperation, filterWop, stride=convStride, paddingMethod=padding))\n\n filterBop = mainGraph.addOperation(b, doGradient=True, feederOperation=False)\n addConv2d = mainGraph.addOperation(AddOperation(opConv2d, filterBop))\n\n if (batchNormalisation):\n beta = mainGraph.addOperation(generateRandomVariable((1, *addConv2d.shape[1:])), doGradient=True)\n gamma = mainGraph.addOperation(generateRandomVariable((1, *addConv2d.shape[1:])), doGradient=True)\n bnorm = mainGraph.addOperation(BatchNormalisationOperation(addConv2d, beta, gamma))\n else:\n bnorm = addConv2d\n\n actop = mainGraph.addOperation(activation(bnorm),\n doGradient=False,\n finalOperation=False)\n\n poolOP = mainGraph.addOperation(pooling(inputA=actop,\n poolHeight=poolHeight,\n poolWidth=poolWidth,\n stride=poolStride))\n\n return poolOP" ]
[ "0.7642628", "0.75002295", "0.7415982", "0.73166895", "0.66593826", "0.6591134", "0.6555395", "0.6465286", "0.6461949", "0.6428054", "0.6412877", "0.6385468", "0.6374509", "0.63645524", "0.62897575", "0.6263358", "0.62580836", "0.6250857", "0.6193484", "0.6168419", "0.61653715", "0.61430115", "0.61354536", "0.6124465", "0.6083746", "0.60824794", "0.60235244", "0.60210055", "0.59961325", "0.59949386" ]
0.7567512
1
Removes an item from a multidict key.
def remove_from_multidict(d: MultiDict, key: str, item: typing.Any): # works by popping all, removing, then re-adding into i = d.popall(key, []) if item in i: i.remove(item) for n in i: d.add(key, n) return d
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _del_item(dic: dict, keys: list):\n\tdic = _get_item(dic, keys[:-1])\n\tdel dic[keys[-1]]", "def remove_item(self, item):\r\n\r\n for key in self._inner_dict:\r\n if item in self._inner_dict[key]:\r\n idx = self._inner_dict[key].index(item)\r\n del self._inner_dict[key][idx]", "def __delitem__(self, key: tuple):\n s, a = key\n del self.store[s][a]", "def __delitem__(self, key):\r\n key = self.key(key)\r\n if key in self.data_with_same_key:\r\n if len(self.data_with_same_key[key]) == 1:\r\n self.data[key] = self.data_with_same_key.pop(key)[0]\r\n else:\r\n self.data[key] = self.data_with_same_key[key].pop(-1)\r\n else:\r\n del self.data[key]", "def __delitem__(self, key):\n try:\n kvp = self.keyvaluepair_set.get(key=key)\n except KeyValuePair.DoesNotExist:\n raise KeyError\n else:\n kvp.delete()", "def __delitem__(self, key):\n self.f_remove(key)", "def remove_item(self, key, item):\n self[key].remove(item)\n self._remove_reverse_mapping(item, key)", "def __delitem__(self, key):\n if isinstance(key, types.SliceType):\n # FIXME: efficiency?\n keys = self._sequence[key]\n for entry in keys:\n dict.__delitem__(self, entry)\n del self._sequence[key]\n else:\n # do the dict.__delitem__ *first* as it raises\n # the more appropriate error\n dict.__delitem__(self, key)\n self._sequence.remove(key)", "def _map___delitem__(self, key):\n if not isinstance(key, self.keytype):\n raise KeyError('type of `key` should be ' + repr(self.keytype) + ' but got ' + repr(type(key)))\n if key not in self:\n raise KeyError('key not found')\n self.erase(self.find(key))\n return", "def remove(self, key):", "def remove_from_dictionary(self,dictionary,*keys):\r\n for key in keys:\r\n if key in dictionary:\r\n value = dictionary.pop(key)\r\n logger.info(\"removed item with key '%s' and value '%s'\" %(key,value))\r\n else:\r\n logger.info(\"Key '%s' not found\" %(key))", "def remove(self, key: int | str):\n self.__delitem__(key)", "def __delitem__(self, key: tuple):\n s, a = key\n if not isinstance(s, self.observation_space) or not isinstance(a, self.action_space):\n raise KeyError\n del self.store[s][a]", "def __delitem__(self, key: T) -> None:\n self.delete(key)", "def remove_item_from_all_keys(self, item):\n for key in self._reverse_store[item]:\n self[key].remove(item)\n del self._reverse_store[item]", "def remove(self, item):\n del self._dict[item]", "def __delitem__(self, key):\n if self.__pepth__ != 0:\n return plist.__getattr__(self, '__delitem__')(key)\n try:\n if (isinstance(key, list)\n and plist(key).all(isinstance, int)):\n for k in sorted(key, reverse=True):\n operator.__delitem__(self, k)\n else:\n # Handles slices and ints. Other key types will fail.\n list.__delitem__(self, key)\n except Exception as first_exception:\n try:\n if isinstance(key, list):\n for i, k in enumerate(key):\n operator.__delitem__(self[i], k)\n elif isinstance(key, tuple):\n try:\n for x in self:\n operator.__delitem__(x, key)\n except Exception:\n for x in self:\n for k in key:\n operator.__delitem__(x, k)\n else:\n for x in self:\n operator.__delitem__(x, key)\n except Exception as second_exception:\n raise TypeError('Failed to apply index to self or elements.\\nself exception: %s\\nelements exception: %s' % (str(first_exception), str(second_exception)))\n\n # Allow chaining of set ops when using apply('__delitem__', k) and apply(operators.__delitem__, k)\n return self", "def remove(self, key):\n ha = self.myhash(key)\n if key in self.hashmap[ha][0]:\n i = self.hashmap[ha][0].index(key)\n self.hashmap[ha][0].pop(i)\n self.hashmap[ha][1].pop(i)", "def __delitem__(self, key: Union[Hashable, Sequence[Hashable]]) -> None:\n self.contents = {i: self.contents[i] for i in self.contents \n if i not in more_itertools.always_iterable(key)}\n return", "def __delitem__(self, key):\n self.delete(key)", "def __delitem__(self, key):\n self.delete(key)", "def __delitem__(self, key):\n\n # If key is in hash map\n if self.__contains__(key):\n\n # Get hashed key\n i = self.hash(key)\n\n # Get chain index of key value pair\n chain_idx = self.keys_ref[i].index(key)\n\n # Delete value associated with key in hash map\n del self.table[i][chain_idx]\n\n # Delete key from hash table\n del self.keys_ref[i][chain_idx]\n\n # Remove key from set of keys\n self.keys_set.remove(key)\n\n # Decrement size\n self.size -= 1\n\n # If key not in hash map\n else:\n\n # Raise error\n raise KeyError(key)", "def remove(self, key):\n h = key%self.m\n a = self.a\n if a[h]:\n a[h] = None", "def removeDic(dic, key):\n pass", "def __delitem__(self, key):\n self.deleteAttributes([key])", "def __delitem__(self, key):\n try:\n del self._maps[0][key]\n except KeyError:\n raise KeyError(\n 'Key not found in the last mapping: {!r}'.format(key))", "def delete_key_HELPER(data_dict, key_list, key_to_delete):\n data_dict = get_key_from_dict_HELPER(data_dict, key_list[:-1])\n data_dict.pop(key_to_delete)\n return data_dict", "def remove_value(self, thing_key, dkey):\n if thing_key in self.things_dict:\n dic = self.things_dict[thing_key]\n if type(dic) != type({}):\n return\n dic.pop(dkey, None)", "def delete(self, key):\n try: \n self.pop(key)\n \n except KeyError: \n raise KeyError", "def __delitem__(self, key, *args, **kwargs):\n self._del(key, *args, **kwargs)" ]
[ "0.7515226", "0.75018364", "0.73690116", "0.7158925", "0.70968735", "0.70163935", "0.699003", "0.69833624", "0.69397354", "0.6935612", "0.69065166", "0.68776804", "0.6868091", "0.68556577", "0.6816006", "0.68149227", "0.68076974", "0.6798965", "0.6796473", "0.6791444", "0.6791444", "0.67830443", "0.6767725", "0.67243993", "0.67186314", "0.67152953", "0.66868013", "0.6682816", "0.66673446", "0.66640985" ]
0.85784006
0
Base64ifys an image to send to discord.
def base64ify(image_data: bytes): # Convert the avatar to base64. mimetype = imghdr.what(None, image_data) if not mimetype: raise ValueError("Invalid image type") b64_data = base64.b64encode(image_data).decode() return "data:{};base64,{}".format(mimetype, b64_data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def b64_image(self) -> bytes:\n buffer = BytesIO()\n self.image.save(buffer, \"PNG\") \n im_b64 = base64.b64encode(buffer.getvalue())\n im_b64 = b\"data:image/png;base64,\" + im_b64\n return im_b64", "def base64(self):\n image = self.png.getvalue()\n return base64.encodestring(image).decode('utf-8')", "def img_to_base64(img):\n with io.BytesIO() as output:\n img.save(output, format=\"PNG\")\n img_string = base64.b64encode(output.getvalue())\n return img_string.decode(\"utf-8\")", "def get_image_base64_str(self, message: ImageMessage) -> str:\n return ImageContentProcessor.binary_img_to_base64_str(self._core.get_message_content(str(message.id)).content)", "def process_image(self, base64_string: str) -> str:\n self.convert_base64_to_image(base64_string)\n self.corp_image()\n self.change_image_pixels()\n return self.image_to_string()", "def image_to_base64str(image):\n file_bytes = image.file.read()\n base64_img_str = 'data:image;base64, '\n base64_img_str += str(base64.b64encode(file_bytes), 'utf-8')\n return base64_img_str", "def encode_image(image):\n return base64.b64encode(image).decode('ascii')", "def save_img_base64(_preds):\n img = Image.fromarray(_preds)\n buff = BytesIO()\n img.save(buff, format=\"JPEG\")\n return base64.b64encode(buff.getvalue())", "def convert_base64_to_image(self, image_in_base64):\n image_in_base64 = str(image_in_base64).replace('data:image/jpeg;base64,', '')\n image_data = base64.b64decode(image_in_base64)\n\n # Save image as image file\n with open(self.captcha_image_filename, 'wb') as file:\n file.write(image_data)", "def image_to_base64(pixbuf, activity):\n _file_name = os.path.join(get_path(activity, 'instance'), 'imagetmp.png')\n if pixbuf != None:\n pixbuf.save(_file_name, \"png\")\n _base64 = os.path.join(get_path(activity, 'instance'), 'base64tmp')\n _cmd = \"base64 <\" + _file_name + \" >\" + _base64\n subprocess.check_call(_cmd, shell=True)\n _file_handle = open(_base64, 'r')\n _data = _file_handle.read()\n _file_handle.close()\n return _data", "def encode_image(self, image):\n\t\t# Encode in Base64 and print encoded string for copying\n\t\twith open(image, 'rb') as image:\n\t\t\tprint(\"[+] Image has been encoded. Copy this string:\\n\")\n\t\t\timg_64 = '<img src=\"data:image/png;base64,{}\">'.format(base64.b64encode(image.read()).decode('ascii'))\n\t\t\tprint(img_64 + \"\\n\")\n\t\t\tprint(\"[+] End of encoded string.\")", "def get_image_base64(path):\n with open(path, 'r') as img:\n return base64.b64encode(img.read())", "def formatImage(imgData):\n imgstr = re.search(b'base64,(.*)', imgData).group(1)\n with open('output.png','wb') as output:\n output.write(base64.decodebytes(imgstr))", "def picture_image_base64(self, picture_image_base64):\n\n self._picture_image_base64 = picture_image_base64", "def image_to_base64(image, format='JPEG'):\n in_mem_file = io.BytesIO()\n image.save(in_mem_file, format=format)\n # reset file pointer to start\n in_mem_file.seek(0)\n img_bytes = in_mem_file.read()\n base64_bstr = base64.b64encode(img_bytes)\n return base64_bstr.decode('ascii')", "def base64_to_image(base64_image):\n return Image.open(io.BytesIO(base64.b64decode(base64_image)))", "def write_image(image_base64: str, filepath: pathlib.Path):\n with open(filepath, \"wb\") as f:\n f.write(base64.b64decode(image_base64))", "def send_image(image: PIL.Image.Image):\n import base64\n import io\n\n image = image.convert(\"RGB\")\n buffer = io.BytesIO()\n image.save(buffer, format=\"PNG\")\n image_b64 = base64.b64encode(buffer.getvalue())\n send(\"image\", image_b64.decode(\"utf-8\"))", "def storeImageFromBase64(self, data64: str):\n self.data64 = data64\n self._processImageStore()", "def getbase64(nparr,):\n if type(nparr) == type({}):\n nparr = nparr['img']\n im = Image.fromarray(nparr)\n buf = BytesIO()\n im.save(buf,format=\"JPEG\")\n return base64.b64encode(buf.getvalue()).decode('ascii')", "def _get_image(x):\n return b64encode(x).decode('ascii')", "def picture_base64(self) -> str:\n return self.properties.get(MessageField.PICTURE.value)", "def base64_string(self) -> global___Expression:", "def encode(output_image_path):\n with open(output_image_path, 'rb') as image_file:\n encoded_string = base64.b64encode(image_file.read()).decode('utf-8')\n return encoded_string", "def img2base64str(image, ext=\".png\"):\n assert image.shape == (256, 256, 3)\n assert ext in {\".png\", \".jpg\"}\n\n buffer = cv2.imencode('.png', image)[1]\n return b64encode(buffer)", "def convertImage(img):\n return '\\\\includegraphicsdata{%s}' % \":\".join([\n 'data',\n img.contentType,\n \"base64,%s\" % img.data.encode(\"base64\").replace(\"\\n\", \"\"),\n ])", "def convert_to_base64(image_file):\n with open(image_file, 'rb') as f:\n jpeg_bytes = base64.b64encode(f.read()).decode('utf-8')\n predict_request = '{\"instances\" : [{\"b64\": \"%s\"}]}' % jpeg_bytes\n # Write JSON to file\n with open(OUTPUT_FILE, 'w') as f:\n f.write(predict_request)\n return predict_request", "def encodedImage(imageFile):\n imageFile = \"\".join([METRICS_PATH, imageFile])\n encoded = base64.b64encode(open(imageFile, 'rb').read())\n return 'data:image/jpg;base64,{}'.format(encoded.decode())", "def base64_encode_image(inArray):\n imgDat = [base64_encode_array(inArray).decode(\"utf-8\")]\n imgType = str(inArray.dtype)\n imgShape = inArray.shape\n return json.dumps([ imgDat, imgType, imgShape ])", "def send_image(self, device_id, image):\n self.logger.debug(f\"{device_id}: sending processed image!\")\n base64_img = base64.b64encode(\n cv2.imencode('.jpg', image)[1].tostring())\n self.socketio.emit(\n \"image\", {\"message\": base64_img}, room=f\"device-{device_id}\")" ]
[ "0.7444348", "0.7358379", "0.7203927", "0.7129906", "0.7109309", "0.7104519", "0.7082573", "0.6939985", "0.6869495", "0.6822275", "0.6782609", "0.6778094", "0.677527", "0.6764435", "0.6751479", "0.67436206", "0.6699742", "0.66778314", "0.660669", "0.6593207", "0.65859663", "0.6575637", "0.6520984", "0.65121025", "0.64702857", "0.6433467", "0.6427487", "0.6426946", "0.638398", "0.6293807" ]
0.7620646
0
Converts a Discordformatted timestamp to a datetime object.
def to_datetime(timestamp: str) -> datetime.datetime: if timestamp is None: return if timestamp.endswith("+00:00"): timestamp = timestamp[:-6] try: return datetime.datetime.strptime(timestamp, "%Y-%m-%dT%H:%M:%S.%f") except ValueError: # wonky datetimes return datetime.datetime.strptime(timestamp, "%Y-%m-%dT%H:%M:%S")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_timestamp(ts):\n format = '%Y-%m-%d %H:%M:%S'\n return datetime.strptime(ts, format)", "def _timestamp_to_datetime(timestamp):\n return datetime.fromtimestamp(timestamp * 0.001, tz=timezone.utc)", "def get_datetime_from_timestamp(timestamp: str) -> datetime.datetime:\n return datetime.datetime.strptime(timestamp, DateFormat)", "def timestamp_to_datetime(timestamp: int) -> datetime:\n return datetime.fromtimestamp(timestamp, tz=timezone.utc)", "def parse_timestamp(ts):\n return DateTimeField()._to_python(ts)", "def pb_timestamp_to_datetime(timestamp_pb):\n return (\n _EPOCH +\n datetime.timedelta(\n seconds=timestamp_pb.seconds,\n microseconds=(timestamp_pb.nanos / 1000.0),\n )\n )", "def fromtimestamp(cls, timestamp):\n return datetime.datetime.utcfromtimestamp(timestamp)", "def _convert_timestamp(timestamp):\n extract_time = re.match('(.*?\\+\\d{2}):(.*)', timestamp)\n formated = datetime.strptime('{}{}'.format(extract_time.group(1), extract_time.group(2)),\n '%Y-%m-%dT%H:%M:%S%z').strftime('%Y-%m-%dT%H:%M:%S%z')\n return formated", "def convert_timestamp(data):\n try:\n return datetime.datetime.fromtimestamp(float(data))\n except ValueError:\n return datetime.datetime.fromisoformat(data.decode(\"utf-8\"))", "def conv_time(stamp):\n value = datetime.fromtimestamp(stamp)\n return value.strftime('%Y-%m-%d %H:%M:%S')", "def timestamp(self) -> dt.datetime:\n ts = self.json_data['timestamp']\n # Extract the datetime object from the `ts` string\n ts = dt.datetime.strptime(ts, '%Y-%m-%d %H:%M:%S')\n # Localise to Eastern time (Formstack returns Eastern times)\n ts = pytz.timezone('US/Eastern').localize(ts)\n # Convert to UTC time\n return ts.astimezone(pytz.timezone('UTC'))", "def timestamp2dt(timestamp):\n \n dt = datetime.utcfromtimestamp(timestamp)\n \n date = dt.strftime(\"%Y-%m-%d\")\n # TODO: Check this is correct\n time = dt.hour * 3600 + dt.minute * 60 + dt.second + dt.microsecond * 1e-6\n return (date, time)", "def fromtimestamp(cls, timestamp):\n return date()", "def to_local_time(self, tweet_timestamp):\n timestamp = mktime_tz(parsedate_tz(tweet_timestamp))\n return datetime.fromtimestamp(timestamp)", "def datetime_to_timestamp(datetime_str):\n return datetime.datetime.strptime(datetime_str, \"%Y-%m-%d %H:%M:%S\").timestamp()", "def convert_to_datetime(logs):\n extract_datetime = logs.split()[1]\n match_dt_string = '%Y-%m-%dT%H:%M:%S'\n final_datetime = datetime.strptime(extract_datetime, match_dt_string)\n return final_datetime", "def fromtimestamp(cls, timestamp, tz=None):\n return datetime()", "def from_timestamp(timestamp: float, unit: TimeUnit = TimeUnit.SECONDS) -> datetime:\n timestamp_in_sec: float = timestamp / (1000 ** int(unit))\n return datetime.fromtimestamp(timestamp_in_sec, tz=timezone.utc)", "def timestamp_to_toDate(timestamp):\n\n frm = \"%a %b %d %H:%M:%S %z %Y\"\n to = \"%Y%m%d%H%M\"\n toDate_dt = datetime.strptime(timestamp, frm)\n toDate_str = toDate_dt.strftime(to)\n return toDate_dt, toDate_str", "def format_datetime(timestamp):\n return datetime.utcfromtimestamp(timestamp).strftime('%Y-%m-%d @ %H:%M')", "def format_datetime(timestamp):\n return datetime.utcfromtimestamp(timestamp).strftime('%Y-%m-%d @ %H:%M')", "def format_datetime(timestamp):\n return datetime.utcfromtimestamp(timestamp).strftime('%Y-%m-%d @ %H:%M')", "def date_from(timestamp): \n return datetime.fromtimestamp(timestamp)", "def _date_from_timestamp(timestamp, fmt='%Y-%m-%d'):\n return datetime.fromtimestamp(timestamp).strftime(fmt)", "def test_05_timestamp_to_dt(self):\n\n ts = int(datetime.datetime.utcnow().strftime(\"%s\"))\n ts_object = utils.timestamp_to_dt(ts)\n self.assertIsInstance(ts_object, datetime.datetime)", "def parse_timestamp(str_timestamp):\n try:\n dt = parse(str_timestamp)\n except Exception as e:\n api.abort(422, \"date from the request cannot be parsed: {}\".format(e))\n return dt", "def timestamp_to_date(timestamp=None):\n if timestamp is None:\n timestamp = time.time()\n\n date = datetime.date.fromtimestamp(timestamp)\n\n return format_date(date)", "def to_python_datetime(unix_timestamp):\n return datetime.datetime.fromtimestamp(int(unix_timestamp),\n pytz.timezone(settings.TIME_ZONE))", "def interpret_datetime(timestamp):\n formats = (\n \"%Y-%m-%d_%H:%M:%S.%f\",\n \"%Y-%m-%d_%H-%M-%S-%f\",\n DATE_FORMAT,\n )\n\n for i, fmt in enumerate(formats):\n try:\n return datetime.strptime(timestamp, fmt)\n except ValueError:\n if i == len(formats) - 1:\n raise\n continue", "def timestamp_to_time(timestamp):\n n_day = timestamp // (60*24)\n n_time = timestamp - n_day * (60*24)\n n_hour = n_time // 60\n n_minutes = n_time - n_hour * 60\n return Time(n_day, n_hour, n_minutes)" ]
[ "0.745324", "0.73894274", "0.7363517", "0.7086619", "0.70164067", "0.6976756", "0.6949481", "0.6927075", "0.69260484", "0.66861904", "0.6679699", "0.66663504", "0.66598463", "0.664685", "0.6620737", "0.65995663", "0.6588165", "0.65610904", "0.6533557", "0.6528402", "0.6528402", "0.6528402", "0.65016526", "0.64904493", "0.6452619", "0.64395475", "0.63877887", "0.63754374", "0.6368871", "0.63238215" ]
0.7474306
0
Traverses the stack for an object of type ``t``.
def _traverse_stack_for(t: type): for fr in inspect.stack(): frame = fr.frame try: locals = frame.locals except AttributeError: # idk continue else: for object in locals.values(): if type(object) is t: return object finally: # prevent reference cycles del fr
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def probe_stack(depth = 10):\n if depth == 0:\n return\n probe_stack(depth - 1)", "def do_display_stack_objects(self, args):\n start = self.reader.StackTop()\n end = self.reader.StackBottom()\n if len(args) != 0:\n args = args.split(' ')\n start = self.ParseAddressExpr(args[0])\n end = self.ParseAddressExpr(args[1]) if len(args) > 1 else end\n objects = self.heap.FindObjectPointers(start, end)\n for address in objects:\n heap_object = self.padawan.SenseObject(address)\n info = \"\"\n if heap_object:\n info = str(heap_object)\n print(\"%s %s\" % (self.padawan.FormatIntPtr(address), info))", "def __getitem__(self, backtrack: int) -> '_Runtime':\n if backtrack == -1:\n return self\n stack = _Runtime(self.__body)\n stack.__cursor.top = self.__cursor.top[0:backtrack + 1]\n return stack", "def nested_object_traversal(obj: any, leaf_function: Callable, leaf_type: type):\n if isinstance(obj, (list, tuple)):\n result = [Role.nested_object_traversal(elem, leaf_function, leaf_type) for elem in obj]\n return type(obj)(result)\n elif isinstance(obj, dict):\n return {\n k: Role.nested_object_traversal(v, leaf_function, leaf_type)\n for k, v in sorted(obj.items())\n }\n elif isinstance(obj, leaf_type):\n return leaf_function(obj)\n else:\n return obj", "def _get_object_subtree(self):\n raise NotImplementedError", "def test_peek_on_small_stack(small_stack):\n assert small_stack.peek().val == 3", "def peek(self):\n traverse = self.head\n\n if self.head == None:\n return \"empty stack\"\n self.top = self.size() - 1\n for i in range(0, self.top):\n traverse = traverse.next\n\n return traverse.data", "def visit(self, obj):\n pass", "def inspect_object(obj):\n raise TryNext", "def push(self, obj):\n rv = getattr(self._local, 'stack', None)\n if rv is None:\n self._local.stack = rv = [] # pylint: disable=W0201\n rv.append(obj)\n return rv", "def pop(stack):\n item = top(stack)\n stack.top = stack.top.next\n stack.size = stack.size - 1\n return item", "def peek(self):\r\n self.stack.peek(0)", "def visit(self, node):\n cls = node.__class__\n try:\n visitmethod = self.cache[cls]\n except KeyError:\n for subclass in cls.__mro__:\n visitmethod = getattr(self, subclass.__name__, None)\n if visitmethod is not None:\n break\n else:\n visitmethod = self.__object\n self.cache[cls] = visitmethod\n visitmethod(node)", "def stack(context=1):\r\n return getouterframes(sys._getframe(1), context)", "def walk_stack(tw, blk):\n top = find_top_block(blk)\n if blk == top:\n code = tw.lc.run_blocks(top, tw.block_list.list, False)\n return code\n else:\n return []", "def make_stack(tb, stack=None):\n if stack is None:\n stack = []\n if tb is not None:\n make_stack(tb.tb_next, stack)\n stack.append(tb)\n return stack", "def peek(self) -> T:\n if self.is_empty():\n raise EmptyStackError\n else:\n return self._items[len(self) - 1]", "def __get__(self, stack: \"stack.Stack\", stack_class: Type[\"stack.Stack\"]) -> Any:\n with self._lock, self._no_recursive_get(stack):\n if hasattr(stack, self.name):\n return self.get_resolved_value(stack, stack_class)", "def traverse(tree, path):\n for node in path:\n tree = tree[node]\n return tree", "def in_order_traverse(root):\n stack = deque([root])\n visited = set()\n while stack:\n node = stack.pop()\n if node is None:\n continue\n if node.index in visited:\n print(node.index, end=' ')\n continue\n visited.add(node.index)\n stack.append(node.right)\n stack.append(node)\n stack.append(node.left)", "def traverse_gc(task, addr_space, obj_type_string, start, stop, class_names, export_path, alpha):\n tmp = start\n\n global recovered_python_objects\n\n while True:\n found_head = obj.Object(\"_PyGC_Head\", offset=tmp, vm=addr_space)\n found_object = obj.Object(\"_PyInstanceObject1\",\n offset=tmp + 32,\n vm=addr_space)\n \n if not found_head.is_valid():\n print \"_PyGC_Head invalid\"\n sys.exit(0)\n \n recovered_python_objects += 2\n\n print \"curr:\", hex(tmp), \"next:\", hex(found_head.next_val), \"prev:\", hex(found_head.prev_val)\n print found_object.ob_type.dereference().name\n \n if is_model(found_object, class_names):\n print \"Found\", found_object.ob_type.dereference().name, \"at\", hex(found_object.obj_offset)\n process_parameters(task, addr_space, found_object, export_path, alpha)\n return True\n \n if (tmp == stop):\n break\n tmp = found_head.next_val\n return False", "def __init__(self):\n        self.stack=[]\n        self.top1=-1\n        ", "def walk_tb(tb):\n track = False\n result = []\n while tb is not None:\n if track:\n result.append((tb.tb_frame, tb.tb_lineno))\n if '__log_tb_start__' in tb.tb_frame.f_locals:\n result = []\n track = True\n tb = tb.tb_next\n return result", "def traverse(tree):\n nonlocal result\n\n symbol, children, *_ = tree\n\n if children:\n for c in children:\n if c[0].startswith(\"<\"):\n if not c[0].startswith(symbol_name[:-1]):\n if next_leaf(c):\n result += c[0].replace(\"<\", \"\").replace(\">\", \": \") + next_leaf_content(c) + \"\\n\"\n else:\n result += c[0].replace(\"<\", \"\").replace(\">\", \"\") + \" {\" + \"\\n\"\n traverse(c)\n result += \"}\" + \"\\n\"\n else:\n traverse(c) # do not update anything, just traverse", "def show(self):\n traverse = self.head\n\n if self.top <= -1:\n print(\" Stack Underflow\")\n return\n if traverse == None:\n print(\"Stack is empty\")\n return\n\n while traverse.next != None:\n print(traverse.data)\n traverse = traverse.next\n print(traverse.data)", "def cleartomark():\n stack = currentframe().f_back.f_locals.setdefault(SN, [])\n obj = object()\n while obj is not MARK:\n try:\n obj = stack.pop()\n except IndexError:\n pass\n if stack:\n return stack[-1]\n return MARK", "def HierarchyIterator(obj):\n while obj:\n yield obj\n for opChild in SplineInputGeneratorHelper.HierarchyIterator(obj.GetDown()):\n yield opChild\n obj = obj.GetNext()", "def show_tree(obj,d=0):\n print \"%s%s\" % (\"-\"*d,obj.__class__.__name__)\n if 'get_children' in dir(obj):\n for a in obj.get_children(): show_tree(a,d+1)", "def get_stack(self):\n return self.__stack", "def get_objects_rednode(obj):\n from redbaron import RedBaron\n # walk til the first 'locals'\n # Example __qualname__: 'TestClassNodeConv.test_get_datamodel.<locals>.T'\n parent = inspect.getmodule(obj)\n for name in obj.__class__.__qualname__.split('.'):\n if name == '<locals>':\n break\n parent = getattr(parent, name)\n\n try:\n # try to find the source code with traditional means by using inspect, this may faile as it requires class to be defined in a file (not true fro REPL or Notebook)\n # if fails use IPYTHON history\n try:\n parent_code = inspect.getsourcelines(parent)[0]\n\n # monkeypatch the inspect module to use 'parent code' as input for searching the class code (else it searches full file)\n with patch('inspect.linecache.getlines', MagicMock(return_value=parent_code)):\n source = textwrap.dedent(inspect.getsource(obj.__class__))\n\n red_list = RedBaron(source)\n return red_list[0]\n\n except TypeError:\n # try finding the class from local IPYTHON input history\n from IPython import get_ipython\n ipython = get_ipython()\n ipython.run_cell_magic(\"capture\", \"out_var\", \"%history\")\n out_var = str(ipython.ev('out_var'))\n\n # filter up to the last occurance of class def\n import re\n lines = str(out_var).splitlines()\n pat = re.compile(r'^(\\s*)class\\s*' + obj.__class__.__name__ + r'\\b')\n\n last_match = -1\n for i in range(len(lines)):\n match = pat.match(lines[i])\n if match:\n last_match = i\n\n if last_match == -1:\n raise Exception('Class was not found at all...')\n out_var = '\\n'.join(lines[last_match:])\n\n with tempfile.NamedTemporaryFile(mode='w+') as temp:\n temp.write(out_var)\n temp.flush()\n with patch('inspect.getfile', MagicMock(return_value=temp.name)):\n source = textwrap.dedent(inspect.getsource(obj.__class__))\n red_list = RedBaron(source)\n logger.warning(f'Found \"{obj.__class__.__name__}\" source from IPython history!')\n return red_list[0]\n except:\n # This is due to the Inspect needing to open a file...\n # could be a bit relaxed with https://github.com/uqfoundation/dill/issues?utf8=%E2%9C%93&q=getsource, but this only works in regular REPL, not Ipython nor Notebook...\n raise Exception(f'Could not fetch \"{obj.__class__}\" source code (also tried loading from IPython history).')" ]
[ "0.5210974", "0.5194495", "0.5193176", "0.5170919", "0.51102453", "0.5105788", "0.5105216", "0.5067196", "0.5058047", "0.50093937", "0.50008374", "0.4970677", "0.48915708", "0.48852113", "0.48597592", "0.48541418", "0.48447248", "0.48348817", "0.48050487", "0.480471", "0.47981143", "0.47778776", "0.47689334", "0.47519892", "0.4745025", "0.4738777", "0.47278172", "0.4714167", "0.4704454", "0.46965307" ]
0.7825187
0
Subclasses an immutable builtin, providing method wrappers that return the subclass instead of the original.
def subclass_builtin(original: type): def get_wrapper(subclass, func): @functools.wraps(func) def __inner_wrapper(self, *args, **kwargs): result = func(self, *args, **kwargs) new = subclass(result) # copy the parent dataclass if we need to if 'parent' in self.__dict__: new.__dict__['parent'] = self.__dict__['parent'] return new return __inner_wrapper def wrapper(subclass): if hasattr(subclass, "__slots__"): raise RuntimeError("Cannot fix a slotted class") # get all of the methods on the original andparse the docstring; these are in a # well-defined format for builtins for meth_name in dir(original): func = getattr(subclass, meth_name) # only overwrite doc'd functions if not func.__doc__: continue sig = func.__doc__.split("\n")[0] try: rtype: str = sig.split("->")[1] except IndexError: continue # check if it matches our real method rtype = rtype.lstrip().rstrip() if rtype == original.__name__: # make a new function wrapper, which returns the appropriate type # then add it to our subclass wrapper = get_wrapper(subclass, func) setattr(subclass, meth_name, wrapper) return subclass return wrapper
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def wrap(cls, orig):\n # hack to give the timestamp this class' specialized methods\n orig.__class__ = cls\n return orig", "def __copy__(self):\n return type(self)(self.value)", "def make_immutable(self):\n # just set the flag to make object immutable and hashable\n self.immutable = True", "def clone(self) -> \"set_default\":\n return type(self)(self.value)", "def __sub__(self, other: '__class__') -> '__class__':", "def clone(self) -> Mutator:\n raise NotImplementedError", "def __reduce_ex__(self, protocol=None):\n return (\n sm.copyreg._reconstructor,\n (type(self), set, list(self)),\n self.__getstate__(),\n )", "def __copy__(self):\n return type(self)(self.number)", "def copy(self: _R) -> _R:\n return self.__class__(self.dumps())", "def primitiveImmutableFromArgs(interp, s_frame, argcount):\n if argcount == 0:\n raise PrimitiveFailedError\n w_args = s_frame.pop_and_return_n(argcount)[:]\n w_cls = s_frame.pop()\n space = interp.space\n instance_kind = w_cls.as_class_get_shadow(space).get_instance_kind()\n\n if instance_kind == POINTERS:\n cls = select_immutable_pointers_class(w_args)\n return cls(space, w_cls, w_args)\n elif instance_kind == BYTES:\n try:\n bytes = [chr(interp.space.unwrap_uint(b)) for b in w_args]\n except (ValueError, TypeError, UnwrappingError):\n raise PrimitiveFailedError\n return W_Immutable_BytesObject(space, w_cls, bytes)\n elif instance_kind == WORDS:\n try:\n words = [interp.space.unwrap_uint(b) for b in w_args]\n except UnwrappingError:\n raise PrimitiveFailedError\n return W_Immutable_WordsObject(space, w_cls, words)\n\n raise PrimitiveFailedError", "def _patch_implementation(self, original, *args, **kwargs):\n pass", "def __reduce_ex__(self, proto):\n args = (type(self),) + self.__getnewargs__()\n return (__newobj__, args, self.__getstate__())", "def _make_immutable(value):\n if isinstance(value, dict):\n return Object(value)\n elif isinstance(value, (list, tuple)):\n return Array(value)\n elif (\n value is None or\n isinstance(value, string_types) or\n isinstance(value, (int, float, bool, Document, Object, Array, Link))\n ):\n return value\n\n raise TypeError(\"Invalid type in document. Got '%s'.\" % type(value))", "def __wrapper__(self, x):\r\n return wrap(x)", "def __copy__(self, *args, **kwargs):\n return self.copy()", "def copy(self: _R) -> _R:\n return self.__class__(\n self.name,\n list(self.children),\n docstring=self.docstring,\n stringify=self.is_stringified(),\n replace_with_dict=self.replace_with_dict,\n )", "def clone(self) -> \"Mutator\":\n return _ffi_api.MutatorClone(self) # type: ignore # pylint: disable=no-member", "def original(self) -> Any:\n raise NotImplementedError", "def __copy__(self):\n raise NotImplementedError", "def __copy__(self):\n return type(self)(self.sig, *self[1:])", "def copy(self):\n return self.__class__(self)", "def copy(self):\n return self.__class__(self)", "def copy(self):\n return self.__class__(**vars(self))", "def clone(self, **kwargs):\n return super(AttentionWrapperState, self)._replace(**kwargs)", "def __copy__(self):\n return self.copy()", "def __call__(self, *args: Any, **kwds: Any) -> Self:\n return self.__class__(*args, **kwds) # type: ignore", "def copy(self):\n copied = super().copy()\n copied.anonymize()\n return copied", "def copy(self):\n return type(self)(self.parent(), self._express)", "def __rsub__(self, other):\r\n other = self._coerce(other)\r\n if other is NotImplemented:\r\n return NotImplemented\r\n\r\n return runtime.sub(other, self)", "def copy(self):\n return object.__new__(type(self))" ]
[ "0.60920924", "0.59345937", "0.5797206", "0.5673648", "0.56651944", "0.5658072", "0.5650036", "0.5575062", "0.55314976", "0.5498412", "0.54812056", "0.5425845", "0.5407955", "0.5403788", "0.53938985", "0.53634125", "0.53393614", "0.5313916", "0.53056425", "0.52979714", "0.52978486", "0.52978486", "0.5278728", "0.5267773", "0.522032", "0.5192285", "0.51817477", "0.5170929", "0.5157258", "0.51535565" ]
0.6362824
0
Extract FASTQ filename and read header from Waden Sea FASTA headers.
def transform_fasta_header(fastaheader): fastq_source, read_header = fastaheader.split(" ", 1)[0].rsplit("_", 1) fastq_base = fastq_source.rsplit("_", 1)[0] return fastq_base, read_header
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def extractHeader(file) :\n\n headerDispatching = {\n \t'AL': extractALFOSCHeader,\n \t'NC': extractNOTCAMHeader,\n \t'MO': extractMOSCAHeader,\n \t'ST': extractSTANCAMHeader,\t\n\t'FI': extractFIESHeader\n }\n\n fn = headerDispatching.get(os.path.basename(file)[:2], extractOldALFOSCHeader)\n return fn(file)", "def fasta_reader(inp):\n #inp is hard coded as \"Sequence1/2.fasta in this script\".\n with open(inp) as in_file: \n for line in in_file.readlines():\n #Guarantees sequence is pulled from the FASTA file not the title \n if line[0].isalpha():\n seq = line.rstrip()\n return (seq)", "def readFasta(self, fp):\n\t\t\n\t\tfor head, seq in self.parseFasta(fp):\n\t\t\t#analyzing the sequence\n\t\t\tself.analyzeSequence(seq)\n\t\t\t#saving the header\n\t\t\tif head == '':\n\t\t\t\tcontinue\n\t\t\telse:\t\n\t\t\t\tself.header.append(head)", "def fasta_headers(file_name):\n list = []\n with open('../test_files/' + file_name, 'r') as infile:\n text = infile.read()\n seqs = text.split('>')\n for seq in seqs:\n try:\n x = seq.split('\\n', 1)\n if x[0] != '':\n #x[0] contains only headers\n list.append(x[0])\n except:\n pass\n return list", "def test_fasta_get_headers(self):\r\n\r\n header_records = mfau.get_record_headers(full_file_name)\r\n\r\n if debug:\r\n for header_record in header_records:\r\n print header_record.strip()\r\n\r\n self.assertGreaterEqual(len(header_records), 0)", "def readHeader(self) -> None:\n # read header files\n self.headersList = []\n self.chanHeadersList = []\n for headerFile in self.headerF:\n if \"xtrx\" in headerFile.lower():\n headers, chanHeaders = self.readHeaderXTRX(headerFile)\n else:\n headers, chanHeaders = self.readHeaderXTR(headerFile)\n self.headersList.append(headers)\n self.chanHeadersList.append(chanHeaders)\n\n # check to make sure no gaps, calculate out the sample ranges and list the data files for each sample\n self.mergeHeaders(self.headersList, self.chanHeadersList)", "def readFastaFile(filename):", "def _read_header(self):\n f = self._open(self.filename, 'rb')\n idx = 0\n header = b''\n # reading the header \n while idx < 13: \n header += f.readline().rstrip() # removes the \"\\n\\r\" at the end\n idx += 1\n # \"magically\" compute the data offset\n try:\n self._offset_auto = ord(header[2]) + 1856\n except:\n self._offset_auto = header[2] + 1856\n\n\n\n header = header[:self._offset_auto+300] # add an extra random header for offset\n header = re.sub(r'(?P<section>\\[[^\\]]+\\])', '\\n\\g<section>', header.decode('latin1'))\n header = header.splitlines()[1:]\n self.header = dict([self._header_sect2dict(line) for line in header])\n self.shape = np.array(self.header['Acquisition']['areGRBScan'].split(',')[-2:]).astype(np.int)\n f.close()\n\n offset_list = {'auto': self._offset_auto,\n 'from_end': -np.prod(self.shape)*self._nbytes,\n 'from_end_4k': - np.prod(self.shape)*self._nbytes - 4092}\n\n if self._offset_input in offset_list:\n\n self._offset_data = offset_list[self._offset_input]\n if self._offset_input.startswith('from_end'):\n # set the flag to seek from the end of the file.\n self._offset_whence = 2\n elif type(self._offset_input) is int:\n self._offset_data = self._offset_input\n else:\n raise ValueError\n\n \n\n return self.header", "def read_fasta(fasta_name):\n \n \"\"\"first open the file outside \"\"\"\n file_handler = open(fasta_name)\n\n # ditch the boolean (x[0]) and just keep the header or sequence since\n # we know they alternate.\n fasta_iter = (x[1] for x in groupby(file_handler, lambda line: line[0] == \">\"))\n\n for header in fasta_iter:\n # drop the \">\"\n headerStr = header.__next__()[1:].strip()\n\n # join all sequence lines to one.\n seq = \"\".join(s.strip() for s in fasta_iter.__next__())\n\n # yield (headerStr, seq)\n result_record = {'header':headerStr,'seqRecord':seq}\n return result_record", "def readFastaFile(filename):\n info={}\n fhr=open(filename,\"r\")\n while(True):\n line=fhr.readline()\n if not line: break\n if(\">\" in line):\n try:\n info[line.strip()[1:].split()[0]]=fhr.readline().strip()\n except ValueError:\n pass\n return info", "def parse_fasta_header(header):\n\n # Prodigal header example: >NODE-3-length-2984-cov-4.247866_3 # 1439 # 1894 # 1 # ID=3_3;partial=00;start_type=TTG;rbs_motif=TAA;rbs_spacer=8bp;gc_cont=0.340\n prodigal_match = re.match(\n \"^>(?P<contig>.+?)\\s#\\s(?P<start>\\d+)\\s#\\s(?P<end>\\d+)\\s#\\s(?P<strand>.+?)\\s#\",\n header,\n )\n if prodigal_match:\n groups = prodigal_match.groupdict()\n return groups[\"contig\"], groups[\"start\"], groups[\"end\"], groups[\"strand\"]\n\n # FGS header example: >ERZ1759872.3-contig-100_3188_4599_-\n fgs_match = re.match(\"^>.+?_(?P<start>\\d+)_(?P<end>\\d+)_(?P<strand>.)\", header)\n if fgs_match:\n groups = fgs_match.groupdict()\n strand = \"1\"\n if groups[\"strand\"] == \"-\":\n strand = \"-1\"\n return header.rstrip().replace(\">\", \"\"), groups[\"start\"], groups[\"end\"], strand\n\n # unable to parse fasta header\n raise Exception(\"Unable to parse fasta header \" + header)", "def read_header(options, infile):\n\n contigs = dict()\n line = ''\n if options.is_bam:\n #chrm = infile.getrname(line.tid).replace('chr', '')\n for i in range(len(infile.references)):\n if infile.references[i] == 'chrM_rCRS':\n chr_key = 'chrM'\n else:\n chr_key = infile.references[i]\n\n if contigs.has_key(chr_key):\n if not contigs[chr_key] == infile.lengths[i]:\n print >> sys.stderr, \"Headers in BAM files have inconsistent contig lengths. Stopping ...\"\n sys.exit(1)\n else:\n contigs[chr_key] = infile.lengths[i]\n else:\n for line in infile:\n if not line[0] == '@':\n if len(contigs) == 0:\n print >> sys.stderr, \"No header found in %s. Stopping.\" % file\n sys.exit(1)\n else:\n break\n\n sl = line.strip().split('\\t')\n\n if not sl[0] == '@SQ':\n continue\n\n if sl[1][3:] == 'chrM_rCRS':\n chr_key = 'chrM'\n else:\n chr_key = sl[1][3:]\n if contigs.has_key(chr_key):\n if not contigs[chr_key] == int(sl[2][3:]):\n print >> sys.stderr, \"Headers in BAM files have inconsistent contig lengths. Stopping ...\"\n sys.exit(1)\n else:\n contigs[chr_key] = int(sl[2][3:])\n \n return (contigs, line)", "def read_fastq(filename, strip_second_header=True):\n\n with open(filename) as fastq:\n line = fastq.readline()\n if not line.startswith(\"@\"):\n raise IOError(\"Not FASTQ format? First line didn't start with @\")\n while fastq:\n if line.startswith(\"@\"):\n header = line.rstrip()\n seq = fastq.readline().rstrip()\n second_header = fastq.readline()\n if strip_second_header:\n second_header = \"+\"\n scores = fastq.readline().rstrip()\n yield header, seq, second_header, scores\n elif line == \"\": # EOF\n yield header, seq, second_header, scores\n break\n line = fastq.readline()", "def getSequence(ref, fasta):\n\n fasta_header = \"\"\n\n fh_fasta = open(fasta, \"r\")\n entry = (x[1] for x in groupby(fh_fasta, lambda line: line[0] == \">\"))\n\n for header in entry:\n headerStr = header.__next__()[1:].strip()\n\n seq = \"\".join(s.strip() for s in entry.__next__())\n\n if ref == headerStr.replace('>',''):\n filename = os.path.join(os.getcwd(), ref.replace('/','_').split('|')[0])\n fasta_header = replace_char(headerStr)\n\n with open(filename + '.fa', \"w\") as output_file:\n output_file.write(\">\" + fasta_header + \"\\\\n\" + seq.upper() + \"\\\\n\")\n\n fh_fasta.close()\n return fasta_header", "def _parse_header(self):\n # read the first bytes from the file\n header = self._stream_handle.read(HEADER_BYTES)\n match = HEADER_MATCHER.match(header)\n if not match:\n raise SampleException(\"File header does not match the header regex\")\n\n # update the state to show we have read the header\n self._increment_state(HEADER_BYTES)", "def read_fasta(sequence_file :str):\n\n #for gziped files:\n\n if sequence_file.endswith(\".gz\"):\n with gzip.open(sequence_file, \"rt\") as file:\n seqDict = SeqIO.to_dict(SeqIO.parse(file, 'fasta'))\n ident = ident.split(\"|\")[1]\n return seqDict\n\n # for no gziped fasta files:\n else:\n seqRecord = SeqIO.read(sequence_file, \"fasta\")\n sequence = seqRecord.seq\n ident = seqRecord.id\n ident = ident.split(\"|\")[1]\n return ident, sequence", "def readFastaFile(filename):\n if os.path.exists(filename)==False:return {}\n sequences={}\n fhr=open(filename,\"r\")\n for line in fhr:\n if line[0]==\">\":\n sequences[line.strip()[1:].split()[0]]=fhr.readline().strip()\n fhr.close()\n return sequences", "def protein_fref(self):\n # if no (main) protein file is set -> return None\n if not self.protein_file: return None\n header, descr = parseSingleFastaHeaderFromFile(self.protein_file)\n return header", "def extract_filename(header_string):\n\n # Get the last word in the string\n file_name_regex = re.compile(r'\\w+$')\n\n # Use only the first one\n first_header_string = next(header_string)\n header = re.findall(file_name_regex, first_header_string.strip())[0]\n return header", "def read_header(fname):\n with gzip.open(fname, 'rt') as f:\n content = f.readline().split()\n return content[:-1], int(content[-1])", "def GetHeaders(the_file):\n\n data = exifread.process_file(the_file, 'UNDEF', False, False, False)\n return data", "def _parse_fastq(f):\n header = ''\n seq = ''\n skip = False\n for line in f:\n if skip:\n skip = False\n continue\n line = line.strip()\n if line == '':\n continue\n if line[0] == '@':\n header = line.replace('@', '')\n elif line[0] == '+':\n yield header, seq\n skip = True\n else:\n seq = line.upper()", "def test_full_fasta_headers(self):\r\n convert_fastq(self.fasta_file_path, self.qual_file_path,\r\n full_fasta_headers=True, output_directory=self.output_dir)\r\n\r\n actual_output_file_path = get_filename_with_new_ext(\r\n self.fasta_file_path,\r\n '.fastq',\r\n self.output_dir)\r\n\r\n actual_output_file = open(actual_output_file_path)\r\n actual_output = actual_output_file.read()\r\n actual_output_file.close()\r\n self._files_to_remove.append(actual_output_file_path)\r\n\r\n self.assertEquals(actual_output, expected_fastq_full_fasta_headers)", "def read_headers(filelike):\n return reader.Reader.read_headers(filelike).datafile", "def readFastq(filename):\n sequences = []\n qualities = []\n \n with open(filename) as fh:\n while True:\n fh.readline() # skip name line\n seq = fh.readline().rstrip() #read base sequence\n fh.readline() # skip placeholder line\n qual = fh.readline().rstrip() # base quality line\n if len(seq) == 0:\n break\n sequences.append(seq)\n qualities.append(qual)\n \n return sequences, qualities", "def _read_header(self, line):\n try:\n creation_date = datetime.strptime(line[23:33], '%y%m%d%H%M')\n except ValueError as err:\n print('Error parsing file creation date -> ' + str(err))\n creation_date = '000000'\n\n self.file_header = {'Priority Code': line[1:3],\n 'Immediate Destination': line[3:13].strip(),\n 'Immediate Origin': line[13:23].strip(),\n 'Creation Date': creation_date,\n 'File ID Modifier': line[33],\n 'Record Size': int(line[34:37].strip()),\n 'Blocking Factor': int(line[37:39]),\n 'Format Code': line[39],\n 'Immediate Destination Name': line[40:63].strip(),\n 'Immediate Origin Name': line[63:86].strip(),\n 'Reference Code': line[86:93]}", "def splitFastaHeader(name):\n nameParts = re.split('\\s', name, maxsplit=1)\n id_ = nameParts[0]\n if len(nameParts) > 1:\n metadata = nameParts[1].strip()\n else:\n metadata = None\n return (id_, metadata)", "def readFrom(self,fn):\n hdrs = {}\n try:\n f = open(fn+\".headers\",\"tr\")\n for l in f:\n if l[-1:]==\"\\n\":\n l = l[:-1]\n i = l.find(\": \")\n if -1!=i:\n hdrs[l[:i]] = l[i+2:]\n f.close()\n except (Exception,Error) as err:\n log(\"readFrom: header: error: \"+str(err))\n try:\n f2 = open(fn,\"br\")\n data = f2.read()\n f2.close()\n except (Exception,Error) as err:\n log(\"readFrom: body: error: \"+str(err))\n return (hdrs,data)", "def read_header(self, fcs, data_offset=0):\r\n # Ignore first 10 bytes of HEADER contain FCS file format followed by 4 spaces\r\n fcs.read(10)\r\n\r\n for text in (\r\n \"$BEGINSTEXT\",\r\n \"$ENDSTEXT\",\r\n \"$BEGINDATA\",\r\n \"$ENDDATA\",\r\n ):\r\n text_offset = int(fcs.read(8))\r\n self.text_keywords[text] = text_offset + data_offset\r\n\r\n self.data_start = self.text_keywords[\"$BEGINDATA\"]\r\n self.data_end = self.text_keywords[\"$BEGINDATA\"]", "def read_scamp_head(fname, header=None):\n\n with open(fname) as fobj:\n lines = fobj.readlines()\n\n lines = [l.strip() for l in lines if l[0:3] != 'END']\n\n # if header is None an empty FITSHDR is created\n hdr = FITSHDR(header)\n\n for l in lines:\n hdr.add_record(l)\n\n return hdr" ]
[ "0.6401179", "0.62904555", "0.62217855", "0.6192803", "0.60394955", "0.5990652", "0.5989857", "0.59722316", "0.5967548", "0.59647876", "0.59257495", "0.58719146", "0.58522266", "0.584659", "0.5842128", "0.5835512", "0.58078194", "0.57997817", "0.5782842", "0.5766097", "0.57577527", "0.5733544", "0.5720507", "0.567171", "0.5669142", "0.56609136", "0.56528187", "0.56490546", "0.5644635", "0.56444573" ]
0.64500093
0
Return a pair of complete fastq filenames for fastq_base.
def fastq_filename(fastq_base): return fastq_base+"_1.fastq", fastq_base+"_2.fastq"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def all_hq_fq(self):\n return op.join(self.combined_dir, 'all.polished_hq.fastq')", "def all_lq_fq(self):\n return op.join(self.combined_dir, 'all.polished_lq.fastq')", "def fq_of_arrowed_bin(self, first, last):\n return self._arrowed_bin_prefix(first, last) + \".arrowed.fastq\"", "def get_result_files_fastqc(config):\n res_zip = []\n res_html = []\n for path in get_result_files_demux(config):\n ext = \".fastq.gz\"\n if path.endswith(ext):\n folder = os.path.dirname(path)\n base = os.path.basename(path)[: -len(ext)]\n res_zip.append(os.path.join(folder, \"qc\", \"fastqc\", base + \"_fastqc.zip\"))\n res_html.append(os.path.join(folder, \"qc\", \"fastqc\", base + \"_fastqc.html\"))\n return {\"zip\": res_zip, \"html\": res_html}", "def prepare_fastq(Fastq_Root=\"2.Fastq/\", ):\n fastqs = glob.glob(Fastq_Root + \"*.fastq\")\n data = {}\n for fq in fastqs:\n s = os.path.split(fq)[1]\n s = s.replace(\".fastq\", \"\")\n if s.endswith(\"_1\"):\n sample = s.replace(\"_1\", \"\")\n if sample not in data:\n data[sample] = [0, 0]\n data[sample][0] = fq\n if s.endswith(\"_2\"):\n sample = s.replace(\"_2\", \"\")\n if sample not in data:\n data[sample] = [0, 0]\n data[sample][1] = fq\n if not s.endswith(\"_1\") and not s.endswith(\"_2\"):\n data[s] = [fq]\n return data", "def get_fastq_files(self) -> List[Path]:\n return list(self.sequence_data_paths.fastq_path.glob(\"*.fastq.gz\")) # type: ignore", "def get_fastq_files(wildcards):\n return expand(os.path.join(fastq_dir, \"{sample}_{readpair}.fastq\"), readpair=[1, 2], **wildcards)", "def get_fastqc_files(sample, unit, pairs, config, pre):\n if config[\"preprocessing\"][\"fastqc\"]:\n files = expand(config[\"paths\"][\"results\"]+\"/intermediate/fastqc/{sample}_{unit}_{pair}{PREPROCESS}_fastqc.zip\",\n sample=sample, unit=unit, pair=pairs, PREPROCESS=pre)\n return files\n return []", "def get_fastq(wildcards):\n if sample_is_single_end(wildcards.sample):\n return \"16S/\" + samples.loc[(wildcards.sample), [\"fq1\"]].dropna()\n else:\n return \"16S/\" + samples.loc[(wildcards.sample), [\"fq1\", \"fq2\"]].dropna()", "def get_fastq_files(directory, work_dir, item, fc_name, bc_name=None, glob_ext=\"_fastq.txt\",\n config=None, unpack=True):\n if \"files\" in item and bc_name is None:\n names = item[\"files\"]\n if isinstance(names, basestring):\n names = [names]\n files = [x if os.path.isabs(x) else os.path.join(directory, x) for x in names]\n\n else:\n assert fc_name is not None\n lane = item[\"lane\"]\n if bc_name:\n glob_str = \"%s_*%s_%s_*%s\" % (lane, fc_name, bc_name, glob_ext)\n else:\n glob_str = \"%s_*%s*%s\" % (lane, fc_name, glob_ext)\n files = glob.glob(os.path.join(directory, glob_str))\n \n # Include gzipped files\n glob_str = \"%s.gz\" % glob_str\n files.extend(glob.glob(os.path.join(directory, glob_str)))\n \n files.sort()\n if len(files) > 2 or len(files) == 0:\n raise ValueError(\"Did not find correct files for %s %s %s %s\" %\n (directory, lane, fc_name, files))\n ready_files = []\n for fname in files:\n if fname.endswith(\".gz\") and unpack:\n # TODO: Parallelize using pgzip\n ready_name = os.path.splitext(fname)[0]\n ready_files.append(ready_name)\n if not os.path.exists(ready_name):\n cl = [\"gunzip\", fname]\n subprocess.check_call(cl)\n elif fname.endswith(\".bam\"):\n ready_files = convert_bam_to_fastq(fname, work_dir, config)\n else:\n assert os.path.exists(fname), fname\n ready_files.append(fname)\n ready_files = [x for x in ready_files if x is not None]\n return ready_files[0], (ready_files[1] if len(ready_files) > 1 else None)", "def _extract_fastqs_from_fast5(self):\n\t\tfor id, h5path in fastq_paths[self.version].iteritems():\n\t\t\ttry:\n\t\t\t\ttable = self.hdf5file[h5path % self.group]\n\t\t\t\tfq = formats.Fastq(table['Fastq'][()])\n\t\t\t\tfq.name += \" \" + self.filename\n\t\t\t\tself.fastqs[id] = fq\n\t\t\texcept Exception, e:\n\t\t\t\tpass", "def preFqs(fastqRoot):\n fastqs = glob(fastqRoot + \"/*.fastq.gz\")\n data = {}\n for fq in fastqs:\n s = os.path.split(fq)[1]\n s = s.replace(\".fastq.gz\", \"\")\n if s.endswith(\"_R1\"):\n sample = s.replace(\"_R1\", \"\")\n if sample not in data:\n data[sample] = [0, 0]\n data[sample][0] = fq\n if s.endswith(\"_R2\"):\n sample = s.replace(\"_R2\", \"\")\n if sample not in data:\n data[sample] = [0, 0]\n data[sample][1] = fq\n for key, fqs in data.items():\n if len(fqs) != 2:\n logger.error(\n \"for %s there is not paired fastq files, only %s found\" %\n (key, \",\".join(fqs)))\n del data[key]\n return data", "def get_output_filepaths(output_dir,\r\n fasta_fp,\r\n qual_fp):\r\n\r\n if not output_dir.endswith('/'):\r\n output_dir += '/'\r\n\r\n fasta_out_fp = output_dir + basename(fasta_fp).split('.')[0] +\\\r\n \"_filtered.fasta\"\r\n\r\n qual_out_fp = output_dir + basename(qual_fp).split('.')[0] +\\\r\n \"_filtered.qual\"\r\n\r\n return fasta_out_fp, qual_out_fp", "def hq_isoforms_fq(self):\n return op.join(self.fasta_dir, \"hq_isoforms.fastq\")", "def get_latest_files(base_path):\n try:\n adni = os.listdir('{}/qc/phantom/adni'.format(base_path))\n adni = filter(lambda x: '_adni_' in x and 'csv' in x, adni)\n adni.sort()\n adni = adni[-9:]\n except:\n adni = None\n\n try:\n fmri = os.listdir('{}/qc/phantom/fmri'.format(base_path))\n fmri = filter(lambda x: '_fmri_' in x and 'csv' in x, fmri)\n fmri.sort()\n fmri = fmri[-7:]\n except:\n fmri = None\n\n try:\n dti = os.listdir('{}/qc/phantom/dti'.format(base_path))\n dti = filter(lambda x: '_dti_' in x and 'csv' in x, dti)\n dti.sort()\n dti = dti[-1:]\n except:\n dti = None\n\n return adni, fmri, dti", "def list_of_expected_arrow_fq_files(self):\n def iter_script_to_get_fq(script_filename):\n for line in open(script_filename):\n # line might be like:\n # bash <arrow_dir>/c0to9.sh\n sh_file = line.strip().split()[-1]\n assert sh_file.endswith('.sh')\n yield sh_file[:-3] + '.arrowed.fastq'\n\n\n sge_ids = []\n submitted = {} # expected fq --> (\"local\" or SGE jobid, script used to get this)\n for line in open(self.arrow_submission_run_file):\n jobid, script = line.strip().split('\\t')\n # read the script to see which c<i>to<j>.sh files are associated with this\n for fq in iter_script_to_get_fq(script):\n submitted[fq] = (jobid, script)\n if jobid!='local':\n sge_ids.append(jobid)\n\n return sge_ids, submitted", "def gen_result_fname(fs, q_idx=0, p_idx=0, v_idx=None):\n\n appendix = gen_appendix(q_idx, p_idx, v_idx)\n fname = fs['identifier']+'_' + appendix +'.csv'\n\n full_fname = os.path.join(fs['dir']['res'], fname)\n\n return full_fname", "def _getfilenames(self):\n\n # Set up the path and file prefix depending on the filetype.\n if self._filetype == 'nightwatch':\n fileprefix = 'qcframe'\n\n if self._location == 'nersc':\n prefix = '/global/project/projectdirs/desi/spectro/nightwatch/kpno'\n elif self._location == 'kpno':\n prefix = '/exposures/desi' # not correct path!\n else:\n raise ValueError('Unknown location {}'.format(self._location))\n elif self._filetype == 'redux':\n fileprefix = 'sframe'\n\n if self._location == 'nersc':\n prefix = '/global/project/projectdirs/desi/spectro/redux/daily/exposures'\n elif self._location == 'kpno':\n prefix = '/exposures/desi' # not correct path!\n else:\n raise ValueError('Unknown location {}'.format(self._location))\n else:\n raise ValueError('Unknown file type {}'.format(self._filetype))\n\n # Find the exposures files.\n exfiles = {}\n for ex in self._exposures:\n folder = '{}/{}/{:08d}'.format(prefix, self._date, ex)\n files = sorted(glob('{}/{}*.fits'.format(folder, fileprefix)))\n exfiles[ex] = files\n\n return exfiles", "def download_fastq_files(fastq1_s3_path, fastq2_s3_path, working_dir):\n fastq_folder = os.path.join(working_dir, 'fastq')\n\n try:\n os.mkdir(fastq_folder)\n except Exception as e:\n pass\n\n local_fastq1_path = download_file(fastq1_s3_path, fastq_folder)\n local_fastq2_path = download_file(fastq2_s3_path, fastq_folder)\n\n # Isaac requires the fastqs to be symlinked as lane1_read1.fastq.gz and lane1_read2.fastq.gz\n os.symlink(local_fastq1_path, os.path.join(fastq_folder, 'lane1_read1.fastq.gz'))\n os.symlink(local_fastq2_path, os.path.join(fastq_folder, 'lane1_read2.fastq.gz'))\n\n return fastq_folder", "def lq_isoforms_fq(self):\n return op.join(self.fasta_dir, \"lq_isoforms.fastq\")", "def get_filenames():\r\n datadir = \"./phase3_data/\"\r\n samples = os.listdir(datadir)\r\n all_files = []\r\n for i in range(len(samples)):\r\n sampfiles = []\r\n datadir = \"./phase3_data/\" + samples[i]\r\n files = os.listdir(datadir)\r\n for file in files:\r\n if file.endswith(\".bin\"):\r\n sampfiles += [file]\r\n all_files += [sampfiles]\r\n return samples, all_files", "def _get_file_paths(self):\n return [os.path.join(self.path, self.mode, 'waveforms', file_name + '.npy') for file_name in self.file_names]", "def all_lq_fa(self):\n return op.join(self.combined_dir, 'all.polished_lq.fasta')", "def all_hq_fa(self):\n return op.join(self.combined_dir, 'all.polished_hq.fasta')", "def merge_paired_fastqs(target, outdir):\n left, right = target.get_fastq()\n left_fq = join(outdir, target.system_id + \"_1.fq.gz\")\n right_fq = join(outdir, target.system_id + \"_2.fq.gz\")\n commands = []\n\n if is_gz_file(left[0]):\n commands += [f\"cat {' '.join(left)} > {left_fq}\"]\n commands += [f\"cat {' '.join(right)} > {right_fq}\"]\n else:\n commands += [f\"cat {' '.join(left)} | \" f\"gzip -c > {left_fq}\"]\n commands += [f\"cat {' '.join(right)} | \" f\"gzip -c > {right_fq}\"]\n\n remove = [f\"rm {left_fq} {right_fq}\"]\n\n return (left_fq, right_fq), commands, remove", "def resolveNames(qconfname, tails):\n if not tails:\n return (qconfname,)\n # remove extension if exist\n qconfname = os.path.splitext(qconfname)[0]\n # Get core name\n # iterate over tails\n basename = None\n for tail in tails:\n # deconstruct for name.ext\n tailext = os.path.splitext(tail)\n # check if there is common name with qconfname\n if qconfname.find(tailext[0]) != -1:\n basename = qconfname[0:qconfname.find(tailext[0])]\n break\n # check if one tail can be found in base name\n if not basename:\n raise ValueError(\"One of tails should be the same as given base name.\")\n # form output\n ret = []\n for tail in tails:\n if not os.path.splitext(tail)[1]:\n ext = imageExt\n else:\n ext = ''\n ret.append(basename + tail + ext)\n return tuple(ret)", "def concatenate_fastq(path, isfastq, sample_name):\n \n r1 = []\n r2 = []\n filenames = get_filesnames_in_dir(path)\n \n for i in filenames:\n if \"fake_genome\" in i:\n continue\n elif \"R1\" in i:\n r1.append(i)\n elif \"R2\" in i:\n r2.append(i)\n if isfastq:\n nameR1 = sample_name + \"-R1.fastq\"\n nameR2 = sample_name + \"-R2.fastq\"\n else:\n nameR1 = sample_name + \"-R1.fasta\"\n nameR2 = sample_name + \"-R2.fasta\"\n\n #concatinate R1\n with open(path + nameR1, 'w') as outfile:\n for fname in sorted(r1):\n with open(path + fname) as infile:\n outfile.write(infile.read())\n outfile.write(\"\\n\")\n\n #concatinate R2\n with open(path + nameR2, 'w') as outfile:\n for fname in sorted(r2):\n with open(path + fname) as infile:\n outfile.write(infile.read())\n outfile.write(\"\\n\")\n\n \n for i in r1 + r2:\n os.remove(path + i)", "def get_fnams(start = '', dir_base = './', end = ''):\r\n fnams = os.listdir(dir_base)\r\n fnams_out = []\r\n for i, fnam in enumerate(fnams):\r\n if fnam[:len(start)] == start :\r\n if fnam[-len(end):] == end or len(end) == 0 :\r\n temp = os.path.join( dir_base, fnam)\r\n if os.path.isfile( temp ) :\r\n fnams_out.append(temp)\r\n return fnams_out", "def test_get_output_filepaths(self):\r\n\r\n output_dir = \".\"\r\n\r\n fasta_fp = \"seqs.fna\"\r\n\r\n qual_fp = \"seqs.qual\"\r\n\r\n expected_fasta_fp = \"./seqs_filtered.fasta\"\r\n expected_qual_fp = \"./seqs_filtered.qual\"\r\n\r\n actual_fasta_fp, actual_qual_fp =\\\r\n get_output_filepaths(output_dir, fasta_fp, qual_fp)\r\n\r\n self.assertEqual(actual_fasta_fp, expected_fasta_fp)\r\n self.assertEqual(actual_qual_fp, expected_qual_fp)\r\n\r\n # Test for relative paths\r\n output_dir = \"test/\"\r\n\r\n fasta_fp = \"../seqs.fna\"\r\n\r\n qual_fp = \"quality_scores/seqs.qual\"\r\n\r\n expected_fasta_fp = \"test/seqs_filtered.fasta\"\r\n expected_qual_fp = \"test/seqs_filtered.qual\"\r\n\r\n actual_fasta_fp, actual_qual_fp =\\\r\n get_output_filepaths(output_dir, fasta_fp, qual_fp)\r\n\r\n self.assertEqual(actual_fasta_fp, expected_fasta_fp)\r\n self.assertEqual(actual_qual_fp, expected_qual_fp)", "def test_is_fastq(self):\n \n test_fnames = [(\"foo.fastq\",True),\n (\"foo.fastq.gz\",True),\n (\"foo_fastq.txt\",True),\n (\"foo_fastq.txt.gz\",True),\n (\"foo.fastq.bar\",False),\n (\"foo.txt\",False),]\n \n for test_fname, exp_result in test_fnames:\n obs_result = is_fastq(test_fname)\n self.assertEqual(obs_result,\n exp_result,\n \"Did not get expected result ({:s}) for file name {:s}\".format(str(exp_result),test_fname))" ]
[ "0.6794959", "0.6590789", "0.655755", "0.63155407", "0.62391496", "0.6222754", "0.6189609", "0.61213464", "0.60903966", "0.59978986", "0.5983365", "0.5931376", "0.5890936", "0.5836683", "0.5803387", "0.57962996", "0.57486457", "0.5661792", "0.5644376", "0.56189114", "0.5614374", "0.5596162", "0.5566603", "0.55544853", "0.55341303", "0.55190927", "0.5515468", "0.54871273", "0.54506755", "0.5446419" ]
0.82848316
0
Build a query to find all summary entries for the given PDB.
def query(self, session, pdb): return session.query(mod.UnitInteractionSummary).\ filter_by(pdb_id=pdb)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def query_summary(self):\n return self.details[KEY_QUERY_SUMMARY]", "def search_summaries(self, **query):\n for _, results in self._do_search_by_product(query, return_fields=True):\n for columns in results:\n yield dict(columns)", "def get_summary(self, query):\n\n method = \"POST\"\n sub_url = \"/ent_search/summary\"\n payload = query\n\n return self._make_request(sub_url, payload=payload, method=method)", "def db_stats(args: Dict[str, Any]) -> List[object]:\n query = [\n {\n \"$match\": {\n \"body\": {\"$ne\": \"\"},\n \"quotesUpdated\": {\"$exists\": True},\n \"outlet\": {\"$in\": args[\"outlets\"]},\n \"publishedAt\": {\n \"$gte\": args[\"begin_date\"],\n \"$lt\": args[\"end_date\"] + timedelta(days=1),\n },\n }\n },\n {\n \"$group\": {\n \"_id\": \"null\",\n \"totalArticles\": {\"$sum\": 1},\n \"totalQuotes\": {\"$sum\": \"$quoteCount\"},\n \"peopleFemaleCount\": {\"$sum\": \"$peopleFemaleCount\"},\n \"peopleMaleCount\": {\"$sum\": \"$peopleMaleCount\"},\n \"peopleUnknownCount\": {\"$sum\": \"$peopleUnknownCount\"},\n \"sourcesFemaleCount\": {\"$sum\": \"$sourcesFemaleCount\"},\n \"sourcesMaleCount\": {\"$sum\": \"$sourcesMaleCount\"},\n \"sourcesUnknownCount\": {\"$sum\": \"$sourcesUnknownCount\"},\n \"authorsFemaleCount\": {\"$sum\": \"$authorsFemaleCount\"},\n \"authorsMaleCount\": {\"$sum\": \"$authorsMaleCount\"},\n \"authorsUnknownCount\": {\"$sum\": \"$authorsUnknownCount\"},\n }\n },\n ]\n return query", "def compute_query_summary_statistics(data):\n return {\n nameserver: _compute_summary_stats(entries)\n for nameserver, entries in data.items()\n }", "def outlet_stats(args: Dict[str, Any]) -> List[object]:\n query = [\n {\n \"$match\": {\n \"body\": {\"$ne\": \"\"},\n \"quotesUpdated\": {\"$exists\": True},\n \"outlet\": {\"$in\": args[\"outlets\"]},\n \"publishedAt\": {\n \"$gte\": args[\"begin_date\"],\n \"$lt\": args[\"end_date\"] + timedelta(days=1),\n },\n }\n },\n {\n \"$group\": {\n \"_id\": \"$outlet\",\n \"totalArticles\": {\"$sum\": 1},\n \"totalQuotes\": {\"$sum\": \"$quoteCount\"},\n \"peopleFemaleCount\": {\"$sum\": \"$peopleFemaleCount\"},\n \"peopleMaleCount\": {\"$sum\": \"$peopleMaleCount\"},\n \"peopleUnknownCount\": {\"$sum\": \"$peopleUnknownCount\"},\n \"sourcesFemaleCount\": {\"$sum\": \"$sourcesFemaleCount\"},\n \"sourcesMaleCount\": {\"$sum\": \"$sourcesMaleCount\"},\n \"sourcesUnknownCount\": {\"$sum\": \"$sourcesUnknownCount\"},\n \"authorsFemaleCount\": {\"$sum\": \"$authorsFemaleCount\"},\n \"authorsMaleCount\": {\"$sum\": \"$authorsMaleCount\"},\n \"authorsUnknownCount\": {\"$sum\": \"$authorsUnknownCount\"},\n }\n },\n ]\n return query", "def get_summaries(query, **kwargs):\n kwargs.update(stop=40)\n results = search(query, **kwargs)\n return results", "def __printSummary(self, queryTargetId, rD, atomMap):\n logger.info(\"\\n---------------------------- %s -----------------------\", queryTargetId)\n outN = [\"bond_outliers\", \"angle_outliers\", \"torsion_outliers\", \"ring_outliers\"]\n for ind in outN:\n logger.info(\"Type: %-20s Outlier count: %4d\", ind, rD[ind])\n #\n outL = [\"bond_list\", \"angle_list\", \"torsion_list\", \"ring_list\"]\n for ind in outL:\n ll = rD[ind]\n logger.info(\"Feature: %-20s total count: %4d\", ind, len(ll))\n for dD in ll:\n if dD[\"unusual\"]:\n mappedAtomL = self.__mapAtomNames(dD[\"atom_labels\"], atomMap) if atomMap else dD[\"atom_labels\"]\n if dD[\"type\"] in [\"bond\", \"angle\"]:\n logger.info(\"%20s %20s %.4f %.4f %.4f %.4f\", dD[\"atom_labels\"], mappedAtomL, dD[\"value\"], dD[\"mean\"], dD[\"standard_deviation\"], dD[\"z_score\"])\n else:\n logger.info(\"%20s %20s %.4f %.4f %.4f %.4f\", dD[\"atom_labels\"], mappedAtomL, dD[\"value\"], dD[\"mean\"], dD[\"standard_deviation\"], dD[\"local_density\"])", "def build_dmp_query(patient_id, bait_set):\n value = \"\"\n if \"impact341\" in bait_set.lower():\n value = \"IMPACT341\"\n if \"impact410\" in bait_set.lower():\n value = \"IMPACT410\"\n if \"impact468\" in bait_set.lower():\n value = \"IMPACT468\"\n if \"hemepact_v4\" in bait_set.lower():\n value = \"HEMEPACT\"\n if \"impact505\" in bait_set.lower():\n value = \"IMPACT505\"\n assay = Q(metadata__cmo_assay=value)\n # formatting to look like CMO patient IDs in dmp2cmo\n if \"C-\" in patient_id[:2]:\n patient_id = patient_id[2:]\n patient = Q(metadata__patient__cmo=patient_id)\n normal = Q(metadata__type=\"N\")\n query = assay & patient & normal\n return query", "def top_sources_all(args: Dict[str, Any]) -> List[object]:\n query = [\n {\n \"$match\": {\n \"body\": {\"$ne\": \"\"},\n \"quotesUpdated\": {\"$exists\": True},\n \"outlet\": {\"$in\": args[\"outlets\"]},\n \"publishedAt\": {\n \"$gte\": args[\"begin_date\"],\n \"$lt\": args[\"end_date\"] + timedelta(days=1),\n },\n }\n },\n {\n \"$project\": {\n \"outlet\": 1,\n \"sourcesMale\": 1,\n \"sourcesFemale\": 1,\n \"allSources\": {\n \"$concatArrays\": [\n {\"$ifNull\": [\"$sourcesFemale\", []]},\n {\"$ifNull\": [\"$sourcesMale\", []]},\n ]\n },\n }\n },\n {\"$unwind\": {\"path\": \"$allSources\", \"preserveNullAndEmptyArrays\": False}},\n {\"$group\": {\"_id\": \"$allSources\", \"count\": {\"$sum\": 1.0}}},\n {\"$sort\": {\"count\": args[\"sort\"]}},\n {\"$limit\": args[\"limit\"]},\n ]\n return query", "def get_all_summaries() -> Dict[str, CBSummary]:\n return _SUMMARIES", "def get_assembly_summary(id):\n from Bio import Entrez\n esummary_handle = Entrez.esummary(db=\"assembly\", id=id, report=\"full\")\n esummary_record = Entrez.read(esummary_handle)\n return esummary_record", "def summary():\n\n summary_result = session.query(Summary.Count, Summary.Total).all()\n session.close()\n\n # Return a List of Column Names (Sample Names)\n return jsonify(summary_result)", "def summarize_moduledb(moduledb):\n name2module = {} # module_name -> ModuleNode\n for module in moduledb:\n assert module.name not in name2module\n name2module[module.name] = module\n module_names = sorted(name2module)\n\n # module_name -> (list of in Datatypes, out Datatype)\n name2datatypes = {}\n for name, module in name2module.iteritems():\n name2datatypes[name] = module.in_datatypes, module.out_datatype\n\n # All DataType objects found in moduledb.\n datatypes = {} # name -> DataType object\n for (in_datatypes, out_datatype) in name2datatypes.itervalues():\n dts = in_datatypes + [out_datatype]\n for dt in dts:\n if dt.name in datatypes:\n continue\n datatypes[dt.name] = dt\n datatypes = datatypes.values()\n\n x = ModuleDbSummary(module_names, name2module, name2datatypes, datatypes)\n return x", "def get_authors_query():\n\n query = '''select authors.name, count(*) as views\n from authors, articles, log\n where authors.id = articles.author\n and substr(log.path,10)=articles.slug\n group by authors.name order by views desc;'''\n\n return query", "def generate_query_report(self, db_uri, parsed_query, db_name, collection_name):\r\n index_analysis = None\r\n recommendation = None\r\n namespace = parsed_query['ns']\r\n indexStatus = \"unknown\"\r\n\r\n index_cache_entry = self._ensure_index_cache(db_uri,\r\n db_name,\r\n collection_name)\r\n\r\n\r\n query_analysis = self._generate_query_analysis(parsed_query,\r\n db_name,\r\n collection_name)\r\n if ((query_analysis['analyzedFields'] != []) and\r\n query_analysis['supported']):\r\n index_analysis = self._generate_index_analysis(query_analysis,\r\n index_cache_entry['indexes'])\r\n indexStatus = index_analysis['indexStatus']\r\n if index_analysis['indexStatus'] != 'full':\r\n recommendation = self._generate_recommendation(query_analysis,\r\n db_name,\r\n collection_name)\r\n # a temporary fix to suppress faulty parsing of $regexes.\r\n # if the recommendation cannot be re-parsed into yaml, we assume\r\n # it is invalid.\r\n if not validate_yaml(recommendation['index']):\r\n recommendation = None\r\n query_analysis['supported'] = False\r\n\r\n\r\n # QUERY REPORT\r\n return OrderedDict({\r\n 'queryMask': parsed_query['queryMask'],\r\n 'indexStatus': indexStatus,\r\n 'parsed': parsed_query,\r\n 'namespace': namespace,\r\n 'queryAnalysis': query_analysis,\r\n 'indexAnalysis': index_analysis,\r\n 'recommendation': recommendation\r\n })", "def generate_query_report(self, db_uri, parsed_query, db_name, collection_name):\n index_analysis = None\n recommendation = None\n namespace = parsed_query['ns']\n indexStatus = \"unknown\"\n\n index_cache_entry = self._ensure_index_cache(db_uri,\n db_name,\n collection_name)\n\n\n query_analysis = self._generate_query_analysis(parsed_query,\n db_name,\n collection_name)\n if ((query_analysis['analyzedFields'] != []) and\n query_analysis['supported']):\n index_analysis = self._generate_index_analysis(query_analysis,\n index_cache_entry['indexes'])\n indexStatus = index_analysis['indexStatus']\n if index_analysis['indexStatus'] != 'full':\n recommendation = self._generate_recommendation(query_analysis,\n db_name,\n collection_name)\n # a temporary fix to suppress faulty parsing of $regexes.\n # if the recommendation cannot be re-parsed into yaml, we assume\n # it is invalid.\n if not validate_yaml(recommendation['index']):\n recommendation = None\n query_analysis['supported'] = False\n\n\n # QUERY REPORT\n return OrderedDict({\n 'queryMask': parsed_query['queryMask'],\n 'indexStatus': indexStatus,\n 'parsed': parsed_query,\n 'namespace': namespace,\n 'queryAnalysis': query_analysis,\n 'indexAnalysis': index_analysis,\n 'recommendation': recommendation\n })", "def get_query():\n query = \"\"\"{\n repository(name: \"flux\", owner: \"fluxcd\") {\n forkCount\n issues {\n totalCount\n }\n pullRequests {\n totalCount\n }\n releases {\n totalCount\n }\n stargazers {\n totalCount\n }\n watchers {\n totalCount\n }\n }\n}\n \"\"\"\n return query", "def build_query(db, request, tags):\n inner_query, clauses = build_inner_query(request, tags)\n if len(tags) and tags[-1][0] == 'uuid':\n # if we select uuid as the trailing tag we have to be special\n query = \"\"\"\nSELECT DISTINCT s.uuid \nFROM stream AS s\nWHERE s.id IN \"\"\" + inner_query\n elif len(tags) and (tags[-1][1] == None or tags[-1][1] == ''):\n # odd-numbered clasues, so we print matching values of tags\n t = escape_string(tags[-1][0])\n query = \"\"\"\nSELECT DISTINCT metadata -> %s AS svals FROM stream\nWHERE id IN %s AND metadata ? %s\nORDER BY svals ASC\"\"\" % (t, inner_query, t)\n else:\n # otherwise we print all tags matching the restriction\n query = \"\"\"\nSELECT DISTINCT skeys\nFROM (\n SELECT skeys(metadata) FROM stream\n WHERE id IN %s\n) AS skeys ORDER BY skeys ASC\"\"\" % inner_query\n\n log.msg(query)\n d = db.runQuery(query)\n d.addCallback(log_time, time.time())\n return d", "def _generate_query_analysis(self, parsed_query, db_name, collection_name):\r\n\r\n analyzed_fields = []\r\n field_count = 0\r\n supported = True\r\n sort_fields = []\r\n query_mask = None\r\n\r\n if 'command' in parsed_query and parsed_query['command'] not in SUPPORTED_COMMANDS:\r\n supported = False\r\n else:\r\n #if 'orderby' in parsed_query:\r\n sort_component = parsed_query['orderby'] if 'orderby' in parsed_query else []\r\n sort_seq = 0\r\n for key in sort_component:\r\n sort_field = {'fieldName': key,\r\n 'fieldType': SORT_TYPE,\r\n 'seq': sort_seq}\r\n sort_fields.append(key)\r\n analyzed_fields.append(sort_field)\r\n field_count += 1\r\n sort_seq += 1\r\n\r\n query_component = parsed_query['query'] if 'query' in parsed_query else {}\r\n for key in query_component:\r\n if key not in sort_fields:\r\n field_type = UNSUPPORTED_TYPE\r\n if ((key not in UNSUPPORTED_QUERY_OPERATORS) and\r\n (key not in COMPOSITE_QUERY_OPERATORS)):\r\n try:\r\n if query_component[key] == {}:\r\n raise\r\n nested_field_list = query_component[key].keys()\r\n except:\r\n field_type = EQUIV_TYPE\r\n else:\r\n for nested_field in nested_field_list:\r\n if ((nested_field in RANGE_QUERY_OPERATORS) and\r\n (nested_field not in UNSUPPORTED_QUERY_OPERATORS)):\r\n field_type = RANGE_TYPE\r\n else:\r\n supported = False\r\n field_type = UNSUPPORTED_TYPE\r\n break\r\n\r\n if field_type is UNSUPPORTED_TYPE:\r\n supported = False\r\n\r\n analyzed_field = {'fieldName': key,\r\n 'fieldType': field_type}\r\n analyzed_fields.append(analyzed_field)\r\n field_count += 1\r\n\r\n query_mask = parsed_query['queryMask']\r\n\r\n # QUERY ANALYSIS\r\n return OrderedDict({\r\n 'analyzedFields': analyzed_fields,\r\n 'fieldCount': field_count,\r\n 'supported': supported,\r\n 'queryMask': query_mask\r\n })", "async def summary_census(myquery: UserRequestModel):\n age = myquery.age\n class_of_worker = myquery.class_of_worker\n det_ind_code = myquery.industry_code\n det_occ_code = myquery.occupation_code\n marital_stat = myquery.marital_status\n major_ind_code = myquery.major_industry_code\n major_occ_code = myquery.major_occupation_code\n hisp_origin = myquery.hispanic_origin\n sex = myquery.sex\n age = str(age)\n det_ind_code = str(det_ind_code)\n det_occ_code = str(det_occ_code)\n filter_query = \"\"\"\n WITH data AS (\n WITH data_occ AS (\n WITH data_class AS(\n WITH person_total AS (\n WITH person_edu AS (\n WITH person_sex AS (\n WITH person_race AS (\n WITH person_hisp AS (\n SELECT p1.id_person, p1.age, p1.year, p1.marital_stat, p1.race, \n p1.education, p1.sex, hsp.hisp_origin FROM person_tbl as p1\n INNER JOIN hisp_origin_tbl as hsp ON hsp.id = p1.hisp_origin\n )\n SELECT r.race, p2.id_person, p2.age, p2.year, p2.marital_stat,\n p2.education, p2.hisp_origin, p2.sex FROM race_tbl as r \n INNER JOIN person_hisp as p2 ON p2.race = r.id\n )\n SELECT p3.id_person, p3.race, p3.age, p3.year, p3.education, p3.hisp_origin,\n p3.sex, ms.marital_stat FROM person_race AS p3\n INNER JOIN martial_status_tbl as ms ON ms.id = p3.marital_stat\n )\n SELECT p4.id_person, p4.race, p4.age, p4.year, p4.marital_stat, p4.education, \n p4.hisp_origin, sex_tbl.sex FROM person_sex AS p4\n INNER JOIN sex_tbl ON sex_tbl.id = p4.sex\n )\n SELECT p5.id_person, p5.race, p5.age, p5.year, p5.marital_stat, edu.education,\n p5.hisp_origin, p5.sex FROM person_edu as p5\n INNER JOIN education_tbl as edu ON edu.id = p5.education\n )\n SELECT p.id_person, p.race, p.age, p.year, p.marital_stat, p.education, p.hisp_origin, \n p.sex, e.det_occ_code, e.wage_per_hour, e.union_member, e.unemp_reason,\n e.own_or_self, e.weeks_worked, e.income_50k, e.class_worker FROM person_total AS p\n INNER JOIN employee_tbl as e ON e.id_person=p.id_person\n )\n SELECT dcl.id_person, dcl.race, dcl.age, dcl.year, dcl.marital_stat, dcl.education, dcl.hisp_origin,\n dcl.sex, dcl.wage_per_hour, dcl.union_member, dcl.unemp_reason, dcl.own_or_self,\n dcl.weeks_worked, dcl.income_50k, dcl.det_occ_code, cw.class_worker FROM data_class as dcl\n INNER JOIN class_worker_tbl as cw ON cw.id = dcl.class_worker\n )\n SELECT docc.id_person, docc.race, docc.age, docc.year, docc.marital_stat, docc.education, docc.hisp_origin,\n docc.sex, docc.wage_per_hour, docc.union_member, docc.unemp_reason, docc.own_or_self,\n docc.weeks_worked, docc.income_50k, mo.major_occ_code, mo.det_ind_code, docc.class_worker,\n docc.det_occ_code FROM data_occ as docc\n INNER JOIN det_occ_code_tbl as mo ON mo.det_occ_code = docc.det_occ_code\n )\n SELECT data.id_person, data.race, data.age, data.year, data.marital_stat, data.education, data.hisp_origin,\n data.sex, data.wage_per_hour, data.union_member, data.unemp_reason, data.own_or_self, data.class_worker,\n data.weeks_worked, data.income_50k, data.major_occ_code, mi.major_ind_code, \n data.det_ind_code, data.det_occ_code FROM data\n INNER JOIN det_ind_code_tbl as mi ON mi.det_ind_code = data.det_ind_code\n WHERE age = '{}'\"\"\".format(age)\n\n filter_query = filter_query + \" AND class_worker = '{}'\".format(class_of_worker)\n filter_query = filter_query + \" AND data.det_ind_code = '{}'\".format(det_ind_code)\n filter_query = filter_query + \" AND data.det_occ_code = '{}'\".format(det_occ_code) \n\n if None in [marital_stat, major_ind_code, major_occ_code, hisp_origin, sex]:\n if marital_stat is not None:\n filter_query = filter_query + \" AND marital_stat = '{}'\".format(marital_stat)\n if major_ind_code is not None:\n filter_query = filter_query + \" AND major_ind_code = '{}'\".format(major_ind_code)\n if major_occ_code is not None:\n filter_query = filter_query + \" AND major_occ_code = '{}'\".format(major_occ_code)\n if hisp_origin is not None:\n filter_query = filter_query + \" AND hisp_origin = '{}'\".format(hisp_origin)\n if sex is not None:\n filter_query = filter_query + \" AND sex = '{}'\".format(sex) \n\n table_query = filter_query + ';'\n query_to_csv = await database.fetch_all(query=table_query)\n\n # data_file = open('files/filtered_table.csv', 'w', newline='')\n data_file = io.StringIO()\n csv_writer = csv.writer(data_file)\n count = True\n for emp in query_to_csv:\n if count:\n header = emp.keys()\n csv_writer.writerow(header)\n count = False\n csv_writer.writerow(emp.values())\n # data_file.close()\n\n final_block = \"\"\")\n SELECT avg(wage_per_hour) as mean_wage, avg(weeks_worked) as mean_weeks_worked,\n min(wage_per_hour) as min_wage, min(weeks_worked) as min_weeks_worked,\n max(wage_per_hour) as max_wage, max(weeks_worked) as max_weeks_worked,\n sum(income_50k) as person_50k_plus, count(id_person) as num_person\n FROM filter;\"\"\"\n \n filter_query = 'WITH filter AS ( ' + filter_query\n filter_query = filter_query + final_block\n results = await database.fetch_all(query=filter_query)\n\n answer = {}\n for row in results:\n answer.update(dict(row))\n # with open('files/query.json', 'w') as outfile:\n # json.dump(answer, outfile)\n json_writer = json.dumps(answer)#, default=jsonDefault)\n \n # files = ['files/query.json', 'files/filtered_table.csv']\n file_names = ['query.json', 'filtered_table.csv']\n file_objects = [json_writer, data_file.getvalue().encode()]\n files = []\n i = 0\n for f in file_names:\n files.append((f, file_objects[i]))\n i += 1\n\n return zipfiles(files)", "def _generate_query_analysis(self, parsed_query, db_name, collection_name):\n\n analyzed_fields = []\n field_count = 0\n supported = True\n sort_fields = []\n query_mask = None\n\n if 'command' in parsed_query and parsed_query['command'] not in SUPPORTED_COMMANDS:\n supported = False\n else:\n #if 'orderby' in parsed_query:\n sort_component = parsed_query['orderby'] if 'orderby' in parsed_query else []\n sort_seq = 0\n for key in sort_component:\n sort_field = {'fieldName': key,\n 'fieldType': SORT_TYPE,\n 'seq': sort_seq}\n sort_fields.append(key)\n analyzed_fields.append(sort_field)\n field_count += 1\n sort_seq += 1\n\n query_component = parsed_query['query'] if 'query' in parsed_query else {}\n for key in query_component:\n if key not in sort_fields:\n field_type = UNSUPPORTED_TYPE\n if ((key not in UNSUPPORTED_QUERY_OPERATORS) and\n (key not in COMPOSITE_QUERY_OPERATORS)):\n try:\n if query_component[key] == {}:\n raise\n nested_field_list = query_component[key].keys()\n except:\n field_type = EQUIV_TYPE\n else:\n for nested_field in nested_field_list:\n if ((nested_field in RANGE_QUERY_OPERATORS) and\n (nested_field not in UNSUPPORTED_QUERY_OPERATORS)):\n field_type = RANGE_TYPE\n else:\n supported = False\n field_type = UNSUPPORTED_TYPE\n break\n\n if field_type is UNSUPPORTED_TYPE:\n supported = False\n\n analyzed_field = {'fieldName': key,\n 'fieldType': field_type}\n analyzed_fields.append(analyzed_field)\n field_count += 1\n\n query_mask = parsed_query['queryMask']\n\n # QUERY ANALYSIS\n return OrderedDict({\n 'analyzedFields': analyzed_fields,\n 'fieldCount': field_count,\n 'supported': supported,\n 'queryMask': query_mask\n })", "def getDsetSummary(dbsApi, dset):\n\n\n summary = dbsApi.listBlockSummaries(dataset = dset)\n return summary", "def general_stats():\n\n setproctitle(f\"RNANet statistics.py general_stats()\")\n\n reqs = [\n # unique unmapped chains with no issues\n \"\"\" SELECT distinct pdb_id, chain_name, exp_method, resolution\n FROM chain JOIN structure ON chain.structure_id = structure.pdb_id\n WHERE rfam_acc = 'unmappd' AND ISSUE=0;\"\"\",\n\n # unique mapped chains with no issues\n \"\"\" SELECT distinct pdb_id, chain_name, exp_method, resolution\n FROM chain JOIN structure ON chain.structure_id = structure.pdb_id\n WHERE rfam_acc != 'unmappd' AND ISSUE=0;\"\"\",\n\n # mapped chains with no issues\n \"\"\" SELECT pdb_id, chain_name, inferred, rfam_acc, pdb_start, pdb_end, exp_method, resolution\n FROM chain JOIN structure ON chain.structure_id = structure.pdb_id\n WHERE rfam_acc != 'unmappd' AND ISSUE=0;\"\"\",\n\n # mapped chains with no issues that are all inferred\n \"\"\" SELECT DISTINCT pdb_id, c.chain_name, exp_method, resolution\n FROM (\n SELECT inferred, rfam_acc, pdb_start, pdb_end, chain.structure_id, chain.chain_name, r.redundancy, r.inf_redundancy\n FROM chain \n JOIN (SELECT structure_id, chain_name, COUNT(distinct rfam_acc) AS redundancy, SUM(inferred) AS inf_redundancy \n FROM chain \n WHERE rfam_acc != 'unmappd' AND issue=0 \n GROUP BY structure_id, chain_name\n ) AS r ON chain.structure_id=r.structure_id AND chain.chain_name = r.chain_name \n WHERE r.redundancy=r.inf_redundancy AND rfam_acc != 'unmappd' and issue=0\n ) AS c\n JOIN structure ON c.structure_id=structure.pdb_id;\"\"\",\n\n # Number of mapped chains (not inferred)\n \"\"\"SELECT count(*) FROM (SELECT structure_id, chain_name FROM chain WHERE rfam_acc != 'unmappd' AND inferred = 0);\"\"\",\n\n # Number of unique mapped chains (not inferred)\n \"\"\"SELECT count(*) FROM (SELECT DISTINCT structure_id, chain_name FROM chain WHERE rfam_acc != 'unmappd' AND inferred = 0);\"\"\",\n\n # Number of mapped chains (inferred)\n \"\"\"SELECT count(*) FROM (SELECT structure_id, chain_name FROM chain WHERE rfam_acc != 'unmappd' AND inferred = 1);\"\"\",\n\n # Number of unique mapped chains (inferred)\n \"\"\"SELECT count(*) FROM (SELECT DISTINCT structure_id, chain_name FROM chain WHERE rfam_acc != 'unmappd' AND inferred = 1);\"\"\",\n\n # Number of mapped chains inferred once\n \"\"\"SELECT count(*) FROM (\n SELECT structure_id, chain_name, COUNT(DISTINCT rfam_acc) as c \n FROM chain where rfam_acc!='unmappd' and inferred=1 \n GROUP BY structure_id, chain_name\n ) WHERE c=1;\"\"\",\n\n # Number of mapped chains inferred twice\n \"\"\"select count(*) from (\n select structure_id, chain_name, count(distinct rfam_acc) as c \n from chain where rfam_acc!='unmappd' and inferred=1 \n group by structure_id, chain_name\n ) where c=2;\"\"\",\n\n # Number of mapped chains inferred 3 times or more\n \"\"\"select count(*) from (\n select structure_id, chain_name, count(distinct rfam_acc) as c \n from chain where rfam_acc!='unmappd' and inferred=1 \n group by structure_id, chain_name\n ) where c>2;\"\"\",\n\n # Number of chains both mapped with and without inferrence\n \"\"\" SELECT COUNT(*) FROM (\n SELECT structure_id, chain_name, sum(inferred) AS s, COUNT(rfam_acc) AS c \n FROM chain \n WHERE rfam_acc!='unmappd' \n GROUP BY structure_id, chain_name\n ) \n WHERE s < c AND s > 0;\"\"\",\n \n # Number of mapped chains (total)\n \"\"\"SELECT count(*) FROM (SELECT structure_id, chain_name FROM chain WHERE rfam_acc != 'unmappd');\"\"\",\n\n # Number of unique mapped chains\n \"\"\"SELECT count(*) FROM (SELECT DISTINCT structure_id, chain_name FROM chain WHERE rfam_acc != 'unmappd');\"\"\",\n\n # Number of unmapped chains\n \"\"\"SELECT count(*) FROM (SELECT structure_id, chain_name FROM chain WHERE rfam_acc = 'unmappd');\"\"\",\n \n # Number of mapped chains without issues (not inferred)\n \"\"\"SELECT count(*) FROM (SELECT structure_id, chain_name FROM chain WHERE rfam_acc != 'unmappd' AND inferred = 0 AND issue = 0);\"\"\",\n\n # Number of unique mapped chains without issues (not inferred)\n \"\"\"SELECT count(*) FROM (SELECT DISTINCT structure_id, chain_name FROM chain WHERE rfam_acc != 'unmappd' AND inferred = 0 AND issue = 0);\"\"\",\n\n # Number of mapped chains without issues (inferred)\n \"\"\"SELECT count(*) FROM (SELECT structure_id, chain_name FROM chain WHERE rfam_acc != 'unmappd' AND inferred = 1 AND issue=0);\"\"\",\n\n # Number of unique mapped chains without issues (inferred)\n \"\"\"SELECT count(*) FROM (SELECT DISTINCT structure_id, chain_name FROM chain WHERE rfam_acc != 'unmappd' AND inferred = 1 AND issue=0);\"\"\",\n\n # Number of mapped chains without issues (total)\n \"\"\"SELECT count(*) FROM (SELECT structure_id, chain_name FROM chain WHERE rfam_acc != 'unmappd' AND issue=0);\"\"\",\n\n # Number of unique mapped chains without issues\n \"\"\"SELECT count(*) FROM (SELECT DISTINCT structure_id, chain_name FROM chain WHERE rfam_acc != 'unmappd' AND issue=0);\"\"\",\n\n # Number of unmapped chains without issues\n \"\"\"SELECT count(*) FROM (SELECT structure_id, chain_name FROM chain WHERE rfam_acc = 'unmappd' AND issue=0);\"\"\"\n ]\n\n answers = []\n with sqlite3.connect(runDir + \"/results/RNANet.db\") as conn:\n conn.execute('pragma journal_mode=wal')\n for r in reqs:\n answers.append(pd.read_sql(r, conn))\n df_unique = answers[0]\n df_mapped_unique = answers[1]\n df_mapped_copies = answers[2]\n df_inferred_only_unique = answers[3]\n print()\n print(\"> found\", answers[4].iloc[0][0], f\"chains ({answers[5].iloc[0][0]} unique chains) that are mapped thanks to Rfam. Removing chains with issues, only {answers[15].iloc[0][0]} ({answers[16].iloc[0][0]} unique)\")\n if answers[4].iloc[0][0] != answers[5].iloc[0][0]:\n print(\"\\t> This happens because different parts of the same chain can be mapped to different families.\")\n print(\"> found\", answers[6].iloc[0][0], f\"chains ({answers[7].iloc[0][0]} unique chains) that are mapped by inferrence. Removing chains with issues, only {answers[17].iloc[0][0]} ({answers[18].iloc[0][0]} unique).\")\n print(\"\\t> \", answers[8].iloc[0][0], \"chains are mapped only once,\")\n print(\"\\t> \", answers[9].iloc[0][0], \"are mapped to 2 families,\")\n print(\"\\t> \", answers[10].iloc[0][0], \"are mapped to 3 or more.\")\n print(\"> Among them,\", answers[11].iloc[0][0], \"chains are mapped both with families found on Rfam and by inferrence.\")\n if answers[11].iloc[0][0]:\n print(\"\\t> this is normal if you used option -f (--full-inference). Otherwise, there might be a problem.\")\n print(\"> TOTAL:\", answers[12].iloc[0][0], f\"chains ({answers[13].iloc[0][0]} unique chains) mapped to a family. Removing chains with issues, only {answers[19].iloc[0][0]} ({answers[20].iloc[0][0]} unique).\")\n print(\"> TOTAL:\", answers[14].iloc[0][0], f\"unmapped chains. Removing chains with issues, {answers[21].iloc[0][0]}.\")\n if answers[14].iloc[0][0]:\n print(\"\\t> this is normal if you used option --no-homology. Otherwise, there might be a problem.\")\n print()\n\n ##########################################\n # plot N = f(resolution, exp_method)\n ##########################################\n\n methods = df_unique.exp_method.unique()\n\n fig, axs = plt.subplots(1+len(methods), 3, figsize=(15,5*(1+len(methods))), sharex=True)\n df_unique.sort_values('resolution', inplace=True, ignore_index=True)\n df_mapped_unique.sort_values('resolution', inplace=True, ignore_index=True)\n df_inferred_only_unique.sort_values('resolution', inplace=True, ignore_index=True)\n df_mapped_copies.sort_values('resolution', inplace=True, ignore_index=True)\n max_res = max(df_unique.resolution)\n max_structs = max(len(df_mapped_copies.index), len(df_unique.index))\n colors = np.linspace(0,1,1+len(methods))\n plt.xticks( np.arange(0, max_res+2, 2.0).tolist(), np.arange(0, max_res+2, 2.0).tolist() )\n\n axs[0][0].grid(axis='y', ls='dotted', lw=1)\n axs[0][0].hist(df_unique.resolution, bins=np.arange(0, max_res, 0.5), fc=(0, 1, colors[0], 1), label='distribution')\n axs[0][0].hist(df_unique.resolution, bins=np.arange(0, max_res, 0.5), fc=(0, 0, colors[0], 0.5), cumulative=True, label='cumulative')\n axs[0][0].text(0.95*max_res, 0.95*len(df_unique.resolution), \"%d \" % len(df_unique.resolution), \n horizontalalignment='right', verticalalignment='top', fontsize=14)\n axs[0][0].set_ylabel(\"ALL\", fontsize=14)\n axs[0][0].set_title(\"Number of unique RNA chains\", fontsize=14)\n axs[0][0].set_ylim((0, max_structs * 1.05))\n axs[0][0].legend(loc=\"lower right\", fontsize=14)\n\n axs[0][1].grid(axis='y', ls='dotted', lw=1)\n axs[0][1].set_yticklabels([])\n axs[0][1].hist(df_mapped_unique.resolution, bins=np.arange(0, max_res, 0.5), fc=(0, 1, colors[0], 1), label='distribution')\n axs[0][1].hist(df_mapped_unique.resolution, bins=np.arange(0, max_res, 0.5), fc=(0, 0, colors[0], 0.5), cumulative=True, label='cumulative')\n axs[0][1].hist(df_inferred_only_unique.resolution, bins=np.arange(0, max_res, 0.5), fc=(0.2, 0, colors[0], 0.5), cumulative=True, label='only by inference')\n axs[0][1].text(0.95*max_res, 0.95*len(df_mapped_unique.resolution), \"%d \" % len(df_mapped_unique.resolution), \n horizontalalignment='right', verticalalignment='top', fontsize=14)\n axs[0][1].set_title(r\"Number of unique RNA chains\\nmapped to $\\geq 1$ family\", fontsize=14)\n axs[0][1].set_ylim((0, max_structs * 1.05))\n axs[0][1].legend(loc=\"upper left\", fontsize=14)\n\n axs[0][2].grid(axis='y', ls='dotted', lw=1)\n axs[0][2].set_yticklabels([])\n axs[0][2].hist(df_mapped_copies.resolution, bins=np.arange(0, max_res, 0.5), fc=(0, 1, colors[0], 1), label='distribution')\n axs[0][2].hist(df_mapped_copies.resolution, bins=np.arange(0, max_res, 0.5), fc=(0, 0, colors[0], 0.5), cumulative=True, label='cumulative')\n axs[0][2].hist(df_mapped_copies[df_mapped_copies.inferred == 1].resolution, bins=np.arange(0, max_res, 0.5), fc=(0.2, 0, colors[0], 0.5), cumulative=True, label='inferred')\n axs[0][2].text(0.95*max_res, 0.95*len(df_mapped_copies.resolution), \"%d \" % len(df_mapped_copies.resolution), \n horizontalalignment='right', verticalalignment='top', fontsize=14)\n axs[0][2].set_title(\"Number of RNA chains mapped to a\\nfamily (with copies)\", fontsize=14)\n axs[0][2].legend(loc=\"upper left\", fontsize=14)\n axs[0][2].set_ylim((0, max_structs * 1.05))\n\n for i,m in enumerate(methods):\n df_unique_m = df_unique[df_unique.exp_method == m]\n df_mapped_unique_m = df_mapped_unique[df_mapped_unique.exp_method == m]\n df_inferred_only_unique_m = df_inferred_only_unique[df_inferred_only_unique.exp_method == m]\n df_mapped_copies_m = df_mapped_copies[ df_mapped_copies.exp_method == m]\n max_structs = max(len(df_mapped_copies_m.index), len(df_unique_m.index))\n print(\"> found\", max_structs, \"structures with method\", m, flush=True)\n\n axs[1+i][0].grid(axis='y', ls='dotted', lw=1)\n axs[1+i][0].hist(df_unique_m.resolution, bins=np.arange(0, max_res, 0.5), fc=(0, 1, colors[1+i], 1), label='distribution')\n axs[1+i][0].hist(df_unique_m.resolution, bins=np.arange(0, max_res, 0.5), fc=(0, 0, colors[1+i], 0.5), cumulative=True, label='cumulative')\n axs[1+i][0].text(0.95*max_res, 0.95*len(df_unique_m.resolution), \"%d \" % len(df_unique_m.resolution), \n horizontalalignment='right', verticalalignment='top', fontsize=14)\n axs[1+i][0].set_ylim((0, max_structs * 1.05))\n axs[1+i][0].set_ylabel(m, fontsize=14)\n axs[1+i][0].legend(loc=\"lower right\", fontsize=14)\n\n axs[1+i][1].grid(axis='y', ls='dotted', lw=1)\n axs[1+i][1].set_yticklabels([])\n axs[1+i][1].hist(df_mapped_unique_m.resolution, bins=np.arange(0, max_res, 0.5), fc=(0, 1, colors[1+i], 1), label='distribution')\n axs[1+i][1].hist(df_mapped_unique_m.resolution, bins=np.arange(0, max_res, 0.5), fc=(0, 0, colors[1+i], 0.5), cumulative=True, label='cumulative')\n axs[1+i][1].hist(df_inferred_only_unique_m.resolution, bins=np.arange(0, max_res, 0.5), fc=(0.2, 0, colors[1+i], 0.5), cumulative=True, label='only by inference')\n axs[1+i][1].text(0.95*max_res, 0.95*len(df_mapped_unique_m.resolution), \"%d \" % len(df_mapped_unique_m.resolution), \n horizontalalignment='right', verticalalignment='top', fontsize=14)\n axs[1+i][1].set_ylim((0, max_structs * 1.05))\n axs[1+i][1].legend(loc=\"upper left\", fontsize=14)\n \n axs[1+i][2].grid(axis='y', ls='dotted', lw=1)\n axs[1+i][2].set_yticklabels([])\n axs[1+i][2].hist(df_mapped_copies_m.resolution, bins=np.arange(0, max_res, 0.5), fc=(0, 1, colors[1+i], 1), label='distribution')\n axs[1+i][2].hist(df_mapped_copies_m.resolution, bins=np.arange(0, max_res, 0.5), fc=(0, 0, colors[1+i], 0.5), cumulative=True, label='cumulative')\n axs[1+i][2].hist(df_mapped_copies_m[df_mapped_copies_m.inferred == 1].resolution, bins=np.arange(0, max_res, 0.5), fc=(0.2, 0, colors[1+i], 0.5), cumulative=True, label='inferred')\n axs[1+i][2].text(0.95*max_res, 0.95*len(df_mapped_copies_m.resolution), \"%d \" % len(df_mapped_copies_m.resolution), \n horizontalalignment='right', verticalalignment='top', fontsize=14)\n axs[1+i][2].set_ylim((0, max_structs * 1.05))\n axs[1+i][2].legend(loc=\"upper left\", fontsize=14)\n \n axs[-1][0].set_xlabel(\"Structure resolution\\n(Angströms, lower is better)\", fontsize=14)\n axs[-1][1].set_xlabel(\"Structure resolution\\n(Angströms, lower is better)\", fontsize=14)\n axs[-1][2].set_xlabel(\"Structure resolution\\n(Angströms, lower is better)\", fontsize=14)\n\n fig.suptitle(\"Number of RNA chains by experimental method and resolution\", fontsize=16)\n fig.subplots_adjust(left=0.07, right=0.98, wspace=0.05, \n hspace=0.05, bottom=0.05, top=0.92)\n fig.savefig(runDir + \"/results/figures/resolutions.png\")\n plt.close()\n\n ##########################################\n # plot Nfam = f(resolution, exp_method)\n ##########################################\n\n df_mapped_copies['n_fam'] = [ len(df_mapped_copies.rfam_acc[:i+1].unique()) for i in range(len(df_mapped_copies.index)) ]\n\n fig, axs = plt.subplots(1, 1+len(methods), figsize=(5*(1+len(methods)), 5))\n max_res = max(df_mapped_copies.resolution)\n max_fams = max(df_mapped_copies.n_fam)\n colors = np.linspace(0,1,1+len(methods))\n plt.xticks( np.arange(0, max_res+2, 2.0).tolist(), np.arange(0, max_res+2, 2.0).tolist() )\n\n axs[0].grid(axis='y', ls='dotted', lw=1)\n axs[0].plot(df_mapped_copies.resolution, df_mapped_copies.n_fam)\n axs[0].text(0.95*max_res, 0.95*df_mapped_copies.n_fam.iloc[-1], \"%d \" % df_mapped_copies.n_fam.iloc[-1], \n horizontalalignment='right', verticalalignment='top', fontsize=14)\n axs[0].set_title(\"ALL\", fontsize=14)\n axs[0].set_xlabel(\"Structure resolution (Angströms)\", fontsize=14)\n axs[0].set_ylabel(\"Number of Rfam families\", fontsize=14)\n axs[0].set_ylim((0, max_res * 1.05))\n axs[0].set_ylim((0, max_fams * 1.05))\n \n for i,m in enumerate(methods):\n df_mapped_copies_m = df_mapped_copies[ df_mapped_copies.exp_method == m].drop(\"n_fam\", axis=1).copy()\n df_mapped_copies_m['n_fam'] = [ len(df_mapped_copies_m.rfam_acc[:i+1].unique()) for i in range(len(df_mapped_copies_m.index)) ]\n print(\">\", df_mapped_copies_m.n_fam.iloc[-1], \"different RNA families have a 3D structure solved by\", m)\n\n axs[1+i].grid(axis='y', ls='dotted', lw=1)\n axs[1+i].plot(df_mapped_copies_m.resolution, df_mapped_copies_m.n_fam, )\n axs[1+i].text(0.95*max(df_mapped_copies_m.resolution), 0.95*df_mapped_copies_m.n_fam.iloc[-1], \"%d \" % df_mapped_copies_m.n_fam.iloc[-1], \n horizontalalignment='right', verticalalignment='top', fontsize=14)\n axs[1+i].set_xlim((0, max_res * 1.05))\n axs[1+i].set_ylim((0, max_fams * 1.05))\n axs[1+i].set_xlabel(\"Structure resolution (Angströms)\", fontsize=14)\n axs[1+i].set_title(m, fontsize=14)\n axs[1+i].set_yticklabels([])\n \n fig.suptitle(\"Number of RNA families used by experimental method and resolution\", fontsize=16)\n fig.subplots_adjust(left=0.05, right=0.98, wspace=0.05, \n hspace=0.05, bottom=0.12, top=0.84)\n fig.savefig(runDir + \"/results/figures/Nfamilies.png\")\n plt.close()", "def info(\n db_query: DbQuery, ids: Optional[List[str]] = None, date_range: Optional[str] = None\n) -> None:\n results = make_queries(db_query, ids, date_range)\n rows = get_rows_from_runs(results)\n rows = add_header_to_rows(rows)\n print_rows(rows)", "def generate_query_report(self, db_uri, query, db_name, collection_name):\r\n return self._query_analyzer.generate_query_report(db_uri,\r\n query,\r\n db_name,\r\n collection_name)", "def metadata_reporter(self):\n logging.info('Creating summary report')\n header = '{}\\n'.format(','.join(self.headers))\n # Create a string to store all the results\n data = str()\n for sample in self.metadata:\n # Add the value of the appropriate attribute to the results string\n data += GenObject.returnattr(sample, 'name')\n # SampleName\n data += GenObject.returnattr(sample.run, 'SamplePlate')\n # Genus\n data += GenObject.returnattr(sample.general, 'closestrefseqgenus')\n # SamplePurity\n data += GenObject.returnattr(sample.confindr, 'num_contaminated_snvs')\n # N50\n n50 = GenObject.returnattr(sample.quast, 'N50',\n number=True)\n if n50 != '-,':\n data += n50\n else:\n data += '0,'\n # NumContigs\n data += GenObject.returnattr(sample.quast, 'num_contigs',\n number=True)\n # TotalLength\n data += GenObject.returnattr(sample.quast, 'Total_length',\n number=True)\n # MeanInsertSize\n data += GenObject.returnattr(sample.quast, 'mean_insert',\n number=True)\n # InsertSizeSTD\n data += GenObject.returnattr(sample.quast, 'std_insert',\n number=True)\n # AverageCoverageDepth\n data += GenObject.returnattr(sample.qualimap, 'MeanCoveragedata',\n number=True)\n # CoverageDepthSTD\n data += GenObject.returnattr(sample.qualimap, 'StdCoveragedata',\n number=True)\n # PercentGC\n data += GenObject.returnattr(sample.quast, 'GC',\n number=True)\n # MASH_ReferenceGenome\n data += GenObject.returnattr(sample.mash, 'closestrefseq')\n # MASH_NumMatchingHashes\n data += GenObject.returnattr(sample.mash, 'nummatches')\n # 16S_result\n data += GenObject.returnattr(sample.sixteens_full, 'sixteens_match')\n # 16S PercentID\n data += GenObject.returnattr(sample.sixteens_full, 'percent_id')\n # CoreGenesPresent\n data += GenObject.returnattr(sample.gdcs, 'coreresults')\n # rMLST_Result\n try:\n # If the number of matches to the closest reference profile is 53, return the profile number\n if sample.rmlst.matches == 53:\n if type(sample.rmlst.sequencetype) is list:\n rmlst_seq_type = ';'.join(sorted(sample.rmlst.sequencetype)).rstrip(';') + ','\n else:\n rmlst_seq_type = GenObject.returnattr(sample.rmlst, 'sequencetype')\n rmlst_seq_type = rmlst_seq_type if rmlst_seq_type != 'ND,' else 'new,'\n data += rmlst_seq_type\n else:\n # Otherwise the profile is set to new\n data += 'new,'\n except AttributeError:\n data += 'new,'\n # MLST_Result\n try:\n if sample.mlst.matches == 7:\n if type(sample.mlst.sequencetype) is list:\n mlst_seq_type = ';'.join(sorted(sample.mlst.sequencetype)).rstrip(';') + ','\n else:\n mlst_seq_type = GenObject.returnattr(sample.mlst, 'sequencetype')\n mlst_seq_type = mlst_seq_type if mlst_seq_type != 'ND,' else 'new,'\n data += mlst_seq_type\n else:\n data += 'new,'\n except AttributeError:\n data += 'new,'\n # MLST_gene_X_alleles\n try:\n # Create a set of all the genes present in the results (gene name split from allele)\n gene_set = {gene.split('_')[0] for gene in sample.mlst.combined_metadata_results}\n for gene in sorted(gene_set):\n allele_list = list()\n # Determine all the alleles that are present for each gene\n for allele in sample.mlst.combined_metadata_results:\n if gene in allele:\n allele_list.append(allele.replace(' ', '_'))\n # If there is more than one allele in the sample, add both to the string separated by a ';'\n if len(allele_list) > 1:\n data += '{},'.format(';'.join(allele_list))\n # Otherwise add the only allele\n else:\n data += allele_list[0] + ','\n # If there are fewer than seven matching alleles, add a ND for each missing result\n if len(gene_set) < 7:\n data += (7 - len(gene_set)) * 'ND,'\n except AttributeError:\n # data += '-,-,-,-,-,-,-,'\n data += 'ND,ND,ND,ND,ND,ND,ND,'\n # E_coli_Serotype\n try:\n # If no O-type was found, set the output to be O-untypeable\n if ';'.join(sample.ectyper.o_type) == '-':\n otype = 'O-untypeable'\n else:\n otype = sample.ectyper.o_type\n # Same as above for the H-type\n if ';'.join(sample.ectyper.h_type) == '-':\n htype = 'H-untypeable'\n\n else:\n htype = sample.ectyper.h_type\n serotype = '{otype}:{htype},'.format(otype=otype,\n htype=htype)\n # Add the serotype to the data string unless neither O-type not H-type were found; add ND instead\n data += serotype if serotype != 'O-untypeable:H-untypeable,' else 'ND,'\n except AttributeError:\n data += 'ND,'\n # SISTR_serovar_antigen\n data += GenObject.returnattr(sample.sistr, 'serovar_antigen').rstrip(';')\n # SISTR_serovar_cgMLST\n data += GenObject.returnattr(sample.sistr, 'serovar_cgmlst')\n # SISTR_serogroup\n data += GenObject.returnattr(sample.sistr, 'serogroup')\n # SISTR_h1\n data += GenObject.returnattr(sample.sistr, 'h1').rstrip(';')\n # SISTR_h2\n data += GenObject.returnattr(sample.sistr, 'h2').rstrip(';')\n # SISTR_serovar\n data += GenObject.returnattr(sample.sistr, 'serovar')\n # GeneSeekr_Profile\n try:\n if sample.genesippr.report_output:\n data += ';'.join(sample.genesippr.report_output) + ','\n else:\n data += 'ND,'\n except AttributeError:\n data += 'ND,'\n # Vtyper_Profile\n data += GenObject.returnattr(sample.verotoxin, 'verotoxin_subtypes_set')\n # AMR_Profile and resistant/sensitive status\n if sample.resfinder_assembled.pipelineresults:\n # Profile\n for resistance, resistance_set in sorted(sample.resfinder_assembled.pipelineresults.items()):\n data += '{res}({r_set});'.format(res=resistance.replace(',', ';'),\n r_set=';'.join(sorted(list(resistance_set))))\n data += ','\n # Resistant/Sensitive\n data += 'Resistant,'\n else:\n # Profile\n data += 'ND,'\n # Resistant/Sensitive\n data += 'Sensitive,'\n # Plasmid Result'\n if sample.mobrecon.pipelineresults:\n for plasmid, details in sorted(sample.mobrecon.pipelineresults.items()):\n data += '{plasmid}({details});'.format(plasmid=plasmid,\n details=details)\n data += ','\n else:\n data += 'ND,'\n # TotalPredictedGenes\n data += GenObject.returnattr(sample.prodigal, 'predictedgenestotal',\n number=True)\n # PredictedGenesOver3000bp\n data += GenObject.returnattr(sample.prodigal, 'predictedgenesover3000bp',\n number=True)\n # PredictedGenesOver1000bp\n data += GenObject.returnattr(sample.prodigal, 'predictedgenesover1000bp',\n number=True)\n # PredictedGenesOver500bp\n data += GenObject.returnattr(sample.prodigal, 'predictedgenesover500bp',\n number=True)\n # PredictedGenesUnder500bp\n data += GenObject.returnattr(sample.prodigal, 'predictedgenesunder500bp',\n number=True)\n # AssemblyDate\n data += datetime.now().strftime('%Y-%m-%d') + ','\n # PipelineVersion\n data += self.commit + ','\n # Name of the database used in the analyses\n data += os.path.split(self.reffilepath)[-1] + ','\n # Database download date\n data += self.download_date\n # Append a new line to the end of the results for this sample\n data += '\\n'\n # Replace any NA values with ND\n cleandata = data.replace('NA', 'ND')\n with open(os.path.join(self.reportpath, 'combinedMetadata.csv'), 'w') as metadatareport:\n metadatareport.write(header)\n metadatareport.write(cleandata)", "def export_getDBSummary(self):\n gLogger.info(\"RequestManagerHandler.getDBSummary: Attempting to obtain database summary.\")\n try:\n res = requestDB.getDBSummary()\n return res\n except Exception,x:\n errStr = \"RequestManagerHandler.getDBSummary: Exception while getting database summary.\"\n gLogger.exception(errStr,lException=x)\n return S_ERROR(errStr)", "def getQiimeSffDbSummary(self,study_id):\n try:\n con = self.getSFFDatabaseConnection()\n results = con.cursor()\n con.cursor().callproc('get_qiime_sff_db_summary', \\\n [study_id,results])\n return results\n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), str(e))\n return False", "def build_query_dict(self, term_list, issn_list, year_list, jlist):\n journal_frame = self.make_jlist(jlist)\n\n search_terms = self.build_search_terms(term_list)\n dict1 = {}\n #This loop goes through and sets up a dictionary key with an ISSN number\n\n for issn in issn_list:\n\n issn_terms = ' AND ISSN(' + issn + ')'\n dict2 = {}\n #This loop goes and attaches all the years to the outer loop's key.\n for year in year_list:\n\n year_terms = \"AND PUBYEAR IS \" + str(year)\n querystring = search_terms + year_terms + issn_terms\n\n dict2[year] = querystring\n\n dict1[issn] = dict2\n\n return dict1" ]
[ "0.5428255", "0.53583986", "0.52812225", "0.5227681", "0.5199599", "0.5171266", "0.5095973", "0.5043583", "0.49843055", "0.4918133", "0.48670068", "0.48459524", "0.4845896", "0.48094174", "0.4802881", "0.48007256", "0.47844446", "0.477813", "0.47563112", "0.47450638", "0.474027", "0.47188413", "0.47114763", "0.469775", "0.46796316", "0.46687672", "0.4664282", "0.46551293", "0.46484038", "0.46398592" ]
0.632136
0
Increment the count of the current bp. If the base pair is in `Loader.ignore_bp` we return the current counts.
def increment_bp(self, current, bp, crossing): if bp in self.ignore_bp: return current return self.increment(current, 'bps', bp, crossing)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pycount(self):\n\n self.count += 1\n return self.count", "def bpCount(file):\n amount_bp = len(file)\n return amount_bp", "def increment_counter(self) -> None:", "def counter(self) -> int:", "def counter(self) -> int:", "def fget(self):\n if not hasattr(self, \"_n\"):\n self._n = 0\n self._n += 1\n return self._n", "def inc( self ):\n self.count += 1", "def inc(self):\n \n self.count += 1", "def break_count(self):\n return len(self.link_ids) + len(self.crossring_cleavages)", "def add_count(self):\n self.count += 1", "def count_current():\n return current.count()", "def increase_counter(self):\n self.values = self.values + 1", "def split_counts(self) -> Dict[int, int]:\n if self._split_counts is None:\n self.RefreshStats()\n return self._split_counts", "def increment_pc(self):\n self.program_counter[-1] += 1", "def counter(self, value: int, /) -> None:", "def __incKeyCount(self,\n key):\n if (self.__keyCount.has_key(key) == 0): self.__keyCount[key] = 0\n self.__keyCount[key] = self.__keyCount[key] + 1\n return self.__keyCount[key]", "def inc_counter(self, *_, **__): # pylint: disable=arguments-differ\n pass", "def shared_nb(self):\n return self.bbsitting_set.count() + self.booked.count()", "def count(self):\n with self._block:\n counter = re.search(r'count=(\\d+) ', repr(self))\n return int(counter.group(1))", "def increment():\n global total\n total += 1\n return total", "def postfix_increment(self) -> int:\n result = self._counter\n if self._counter < self._max_value:\n self._counter += 1\n return result", "def total_buffers_count(self) -> int:\n return self._counter", "def counter(): # Local function\n nonlocal count\n if count < n:\n count += 1\n return count", "def __numHeads(self):\n count = 1\n\n while (self.__coinFlip() == 1):\n count += 1\n return count", "def inc(self):\n return self._inc", "def split_count(self) -> int:\n return int(self.graph_tuple_stats.split_count or 0)", "def count(self):\r\n return self.count_helper(self.top_node)", "def processor_count(self):\n return self._processor_count", "def incremented_count(self):\n from datetime import datetime\n\n self.__last_count += 1\n\n # get the local time, with timezone\n #\n now = datetime.now(ClientData.tz())\n self.set_last_count_update_time(now)\n return self.last_count()", "def increase_count(self, number=1):\n self.count += number" ]
[ "0.6269762", "0.60979223", "0.60404366", "0.59808373", "0.59808373", "0.5931304", "0.5916859", "0.5893586", "0.5866122", "0.5754383", "0.5724916", "0.5691284", "0.5618478", "0.56114733", "0.5611444", "0.5597437", "0.55804634", "0.55729675", "0.5571835", "0.5542841", "0.5537197", "0.55363435", "0.5535421", "0.55080426", "0.5490957", "0.54586464", "0.54515505", "0.54277027", "0.54242784", "0.5411987" ]
0.6975057
0
Insert the node n beginning at index position pos.
def insert(self, n, pos): if pos == 0: self.cons(n) else: prev = self.index(pos-1) next = prev.next prev.next = n n.next = next self.len += 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def insert(self, value, pos):\r\n\r\n if self.head is None:\r\n self.head = Node(value)\r\n return\r\n\r\n if pos == 0:\r\n self.prepend(value)\r\n return\r\n\r\n index = 0\r\n node = self.head\r\n while node.next and index <= pos:\r\n if (pos - 1) == index:\r\n new_node = Node(value)\r\n new_node.next = node.next\r\n node.next = new_node\r\n return\r\n\r\n index += 1\r\n node = node.next\r\n else:\r\n self.append(value)", "def insert(self, pos, item):\n \n if pos == 0:\n self.add(item)\n \n elif pos >= self.length():\n self.append(item)\n \n else:\n previous = None\n current = self.head\n \n for _ in range(pos):\n previous = current\n current = current.get_next()\n \n n = Node(item)\n previous.set_next(n)\n n.set_next(current)", "def insert(self, n, bin_):\n pred_pos = self.predecessor(n, bin_[0], bin_[1])\n insert_pos = pred_pos + 1 if pred_pos is not None else bin_[0]\n self.nums.insert(insert_pos, n)", "def insert(self, pos, element):\n if pos <= 0:\n self.add(element)\n elif pos >= self.length():\n self.append(element)\n else:\n node = Node(element)\n cursor = self.head\n for i in range(pos-1):\n cursor = cursor.next\n node.next = cursor.next\n node.prev = cursor\n cursor.next.prev = node\n cursor.next = node", "def insert(self, data, index):\n if index == 0:\n self.prepend(data)\n return\n\n current_index = 0\n current = self.head\n previous = None\n\n while current or previous:\n if current_index == index:\n new_node = Node(data)\n new_node.next = current\n previous.next = new_node\n break\n\n previous = current\n current = current.next\n current_index += 1", "def insert(self, pos, data):\n assert pos >= 0\n if pos >= self.size(): # todo: support to insert node in end of the list\n raise Exception(\"pos:%d is out of index:%d\" % (pos, self.size()-1))\n\n last = None\n current = self.head\n count = -1\n while current is not None:\n count += 1\n if count == pos:\n node = Node(data)\n\n if last is None:\n node.next = self.head\n self.head = node\n else:\n node.next = current\n last.next = node\n\n return\n\n last = current\n current = current.next", "def insert_at_position(self, position, data):\n node = Node(data)\n if not self.head:\n self.head = node\n return\n if position == 1:\n node.next = self.head\n self.head = node\n return\n temp = self.head\n for _ in range(1, position - 1):\n if not temp:\n print('Index out of bound')\n return\n temp = temp.next\n node.next = temp.next\n temp.next = node", "def insert(self,x,pos):\n new = ListNode()\n new.value = x\n new.next = pos.next\n pos.next = new", "def insert(self, index, value):\n # check the validity of index\n if index < 0 or index > self.n: # larger than no. of items\n print(\"Index Error; please input valid index\")\n return\n # if index==0, same as push_front\n if index==0:\n self.push_front(value)\n return\n # else,\n new_node = Node(value)\n temp_node = self.head\n for _ in range(index-1):\n temp_node = temp_node.next # traverse the list\n new_node.next = temp_node.next # temp_node is index-1 node\n temp_node.next = new_node\n self.n += 1", "def insert(self, data, index):\n if index == 0:\n self.add(data)\n\n if index > 0:\n new = Node(data)\n position = index # Cada que se llama a current = current.next_node, se decrementa el valor de position en 1, cuando el valor sea cero, se ha llegado al nodo que está actualmente en la posición que queremos insertar el nuevo valor\n current = self.head\n\n while position > 1:\n current = current.next_node\n position -= 1\n \n prev_node = current\n next_node = current.next_node\n\n prev_node.next_node = new\n new.next_node = next_node", "def insert(self, position, data):\n\n node = Node(data)\n traverse = self.head\n\n for i in range(0, position - 1):\n traverse = traverse.next\n temp = traverse.next\n traverse.next = node\n node.next = temp", "def insert(self, index, item):\n \n # Create a new node\n new_code = Node(item)\n \n # Go to node (index - 1)\n curr = self.first\n for i in range(index - 1):\n curr = curr.next\n \n old_next_node = curr.next \n # Update curr's next attribute\n curr.next = new_node\n \n # Update new node's next attribute\n new_node.next = old_next_node", "def insert(self, index, value):\n if self.head is None:\n self.append(value)\n return\n \n from_head = True if index >= 0 else False \n if from_head: \n node = self.head\n steps = index \n else:\n node = self.tail \n steps = abs(index) -1 \n while steps > 0 and node is not None:\n node = node.next_node if from_head else node.prev_node \n steps -= 1 \n \n if node is None:\n if from_head: \n self.append(value)\n return\n else:\n self.push_front(value)\n return\n if node is self.head:\n self.push_front(value)\n return\n else:\n new_node = DLLNode(value)\n new_node.next_node = node\n new_node.prev_node = node.prev_node\n node.prev_node.next_node = new_node\n node.prev_node = new_node \n return", "def insert(self, index: int, tree: 'Tree') -> None:\n ...", "def insert(self, i, node_value):\n node = Node(node_value, None)\n if i < 0 or i > self.num_elements:\n raise IndexError(\"Insert index is out of range.\")\n if i == 0:\n node.next = self.head\n self.head = node\n else:\n current_node = self.head\n for j in xrange(i - 1):\n current_node = current_node.next\n node.next = current_node.next\n current_node.next = node\n self.num_elements += 1", "def insert_at_index(self, data, index):\n new_node = SingleNode(data)\n if index < 1:\n raise IndexError(\"Index out of bonds\")\n elif index == 1:\n new_node.next = self.head\n self.head = new_node\n else:\n temp = self.head\n for i in range(1, index-1):\n if temp is not None:\n temp = temp.next\n if temp is not None:\n new_node.next = temp.next\n temp.next = new_node\n else:\n print(\"The previous node is None\")", "def insert(self, n, new_node):\n curr_node = self.head\n \n i = 0\n while i < n:\n if curr_node.next is None:\n raise IndexError(\"list is shorter than n\")\n curr_node = curr_node.next\n i += 1\n \n new_node.next = curr_node.next\n curr_node.next = new_node\n \n return None", "def insert(self, pos, length):\n if pos in self.insertions:\n self.insertions[pos] += length\n else:\n self.insertions[pos] = length", "def insert(self, position: Node, val: Generic[T], num: int = 1) -> Node:\n def insert_node(position, num, val): #recursive function to insert nodes of value val\n if num == 0:\n return\n node1 = Node(val)\n temp = position.prev\n node1.next = position\n position.prev = node1\n\n temp.next = node1\n node1.prev = temp\n insert_node(node1, num-1, val)\n\n\n return insert_node(position, num, val)", "def insert(self, value, index=0):\n assert index < self._size, \"Limit Execeeded.\"\n\n node = Node(value)\n if index == 0:\n node.next = self._head\n self._head = node\n else:\n count = 0\n current = self._head\n while count < index - 1 and current:\n current = current.next\n count += 1\n current.next = node\n self._size += 1", "def insert_at_beginning(self, data: int) -> None:\n current = self.head\n new_node = Node(data)\n if current is not None:\n new_node.set_next_node(self.head)\n self.head = new_node\n self._increase_length()", "def insert(self, value, index=0):\n # Error case: Index out of acceptable range\n if index < 0 or index > self._size:\n raise RangeError(\"index out of range.\")\n\n # Edge case 1: index == 0\n # Behave like push_front()\n if index == 0:\n self.push_front(value)\n return\n\n # Edge case 2: index == size\n # Behave like push_back()\n if index == self._size:\n self.push_back(value)\n return\n\n new_node = self.Node(value)\n i = 1\n current_node = self._head.next\n\n while(i < index):\n current_node = current_node.next\n i += 1\n\n new_node.next = current_node\n new_node.prev = current_node.prev\n current_node.prev.next = new_node\n current_node.prev = new_node\n self._size += 1", "def insert(self, idx, value):\n assert(isinstance(idx, int))\n nidx = self._normalize_idx(idx)\n self.data.append(None)\n for i in range(len(self.data)-1,idx,-1):\n self.data[i] = self.data[i-1]\n self.data[idx] = value", "def addNodeBefore(self, new_value, before_node): # Class O(n)\r\n if not isinstance(new_value, Node):\r\n if new_value % 1 != 0: raise ValueError(\"Please, insert an integer\")\r\n if before_node > self.length(): raise ValueError(\"Invalid position\")\r\n if before_node == 1:\r\n self.head = Node(new_value, self.head)\r\n else:\r\n self.addNodeAfter(new_value, before_node - 1)", "def insert(self, index, item):\n if index > len(self):\n raise IndexError\n elif index == 0:\n self.insert_first(item)\n else:\n self._rest.insert(index-1, item)", "def insert(self, indexes: Tuple[int, ...], tree: 'Tree') -> None:\n ...", "def insert(self, index: int, item: Any) -> None:\n if self.is_empty() and index != 0:\n raise IndexError\n # Insert at the beginning.\n elif index == 0:\n to_push = self._first\n # modify self._first\n self._first = item\n # Call insert on to_push onto _rest\n if not self._rest and to_push:\n self._rest = RecursiveList([to_push])\n else:\n self._rest.insert(0, to_push)\n # Append case, add at the end when _rest is None\n elif index == 1 and not self._rest:\n self._rest = RecursiveList([item])\n # Recurse on the rest of the list.\n else:\n if not self._rest:\n raise IndexError\n else:\n self._rest.insert(index - 1, item)", "def insert(self, index, data):\n\n n = Node(data)\n\n if self.empty() and index != 0:\n print(\"Linked List is Empty hence value cannot be added to index: \", index)\n return\n\n size = self.size()\n\n if index > size:\n print(\"Size of the Linked List is less than the index\")\n return\n\n if index is size:\n return self.push(data)\n\n idx = 0\n h = self.head\n previous = self.head\n while h.next is not None:\n if idx is index:\n if previous is not h:\n previous.next = n\n n.next = h\n else:\n self.head = n\n self.head.next = h\n h = n\n return\n idx += 1\n previous = h\n h = h.next", "def insert(self, i: int, item: Any) -> None:\n if i < 0:\n i = self._length + i\n\n if i == 0:\n next_node = self._first\n self._first = _Node(item, next_node)\n self._length += 1\n else:\n curr = self._first\n curr_index = 0\n\n while curr is not None and curr_index != i - 1:\n curr_index += 1\n curr = curr.next\n\n if curr is None:\n raise IndexError\n else:\n new_node = _Node(item, curr.next)\n curr.next = new_node\n self._length += 1", "def insert(self, pos, value):\n\t\titems = self.__dict__.values()\n\t\tif not isinstance(pos, int) or pos < 0:\n\t\t\traise ValueError(\"'pos' value is not positive integer.\")\n\t\telif pos > len(items):\n\t\t\traise ValueError(\"'pos' value is not a position in self.__dict__\")\n\t\titems.insert(pos, value)\n\t\tnew_dict = {}\n\t\tfor x, y in enumerate(items):\n\t\t\tnew_dict.update({x: y})\n\t\tself.__dict__ = new_dict" ]
[ "0.72740185", "0.7103719", "0.69398314", "0.6928973", "0.6863136", "0.68557435", "0.6832058", "0.6755653", "0.67459166", "0.667409", "0.666163", "0.6628564", "0.6612427", "0.65446794", "0.6527768", "0.65218896", "0.6517661", "0.65158284", "0.6425271", "0.64027345", "0.6361295", "0.6355887", "0.63331854", "0.63314736", "0.62910914", "0.62780464", "0.62382305", "0.62351036", "0.62268376", "0.6209702" ]
0.82437086
0
Append the node n to the end of the list.
def append(self, n): last = self.last() if last: last.next = n self.len += 1 else: self.cons(n)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def insert(self, n, new_node):\n curr_node = self.head\n \n i = 0\n while i < n:\n if curr_node.next is None:\n raise IndexError(\"list is shorter than n\")\n curr_node = curr_node.next\n i += 1\n \n new_node.next = curr_node.next\n curr_node.next = new_node\n \n return None", "def append(self, value):\n if len(self.data) >= n:\n self.data.pop(0)\n self.data.append(value)", "def append(self, node):\n if not isinstance(node, Node):\n # If the node parameter is not a Node then update it\n # to refer to one.\n node = Node(node)\n\n if self.first_node is None:\n # The first_node is None therefore we just set it.\n self.first_node = node\n else:\n # Find the last_node in the list and update it's next attribute.\n self.last_node.next = node", "def append_node(self, node):\n self.nodes.append(node)\n node.slot = len(self.nodes)", "def append(self, item):\n \n n = Node(item)\n current = self.head\n \n # Special case - empty list\n if current is None:\n self.head = n\n else:\n # Find the last node\n while current.get_next() is not None:\n current = current.get_next()\n current.set_next(n)", "def append(self, item):\n tmpNode = Node(item)\n self.tail.next = tmpNode\n self.tail = tmpNode\n self._size += 1", "def add_node(self, n):\n self.node_dict.setdefault(n, OrderedDict())", "def append(self, value):\n old_tail = self.tail\n self.tail = Node(value, None, old_tail)\n if self.count > 0: # if any Nodes: set tail previous to current Node\n old_tail.previous = self.tail\n else: # adding to an empty, than define front\n self.head = self.tail\n self.count += 1", "def append(self, item: Any) -> None:\n new_node = _Node(item)\n\n if self._first is None:\n self._first = new_node\n else:\n curr = self._first\n while curr.next is not None:\n curr = curr.next\n\n curr.next = new_node\n self._length += 1", "def nth_node_from_end(self, n):\n\n length = 0\n\n if self.head:\n current = self.head\n while current:\n length += 1\n current = current.next\n\n count = 0\n current = self.head\n while count < (length - n): \n count += 1\n current = current.next\n return current.data", "def append(self, data):\n new_node = Node(data)\n if self.head is None:\n self.head = new_node\n self.tail = new_node\n else:\n self.tail.next = new_node\n self.tail = new_node\n self.size += 1", "def append(self, value):\n node = Node(value)\n if self._head is None:\n self._head = node\n else:\n current = self._head\n while current.next:\n current = current.next\n current.next = node\n self._size += 1", "def addAtIndex(self, index: int, val: int) -> None:\n if self.size < index:\n return\n if index < 0:\n return\n\n new_node = Node(val)\n curr = self.head\n for _ in range(index):\n curr = curr.next\n new_node.next = curr.next\n curr.next = new_node\n self.size += 1", "def append(self, data):\n\n new_node = Node(data)\n if self.head is None:\n self.head = new_node\n self.tail = new_node\n else:\n self.tail.next = new_node\n self.tail = new_node\n self.size += 1", "def addAtTail(self, val):\n self.nums.append(val)", "def addAtIndex(self, index, val):\n if index > self.len:\n return\n p = self.head\n while index > 0:\n index -= 1\n p = p.next\n\n node = ListNode(val)\n node.next = p.next\n p.next = node\n\n if p is self.tail:\n self.tail = node\n self.len += 1", "def addAtTail(self, val: int) -> None:\n '''node = self.head\n\n if node:\n while True:\n if node.next:\n node = node.next\n else:\n node.next = MyListNode(val,next_node=None,prev_node=node)\n break\n else:\n self.head = MyListNode(val)'''\n\n if self.node_count > 0:\n node = self.get_node(self.node_count - 1)\n #print(\"Last \")\n temp_node = MyListNode(val, next_node=None, prev_node=node)\n node.next = temp_node\n self.node_count += 1\n else:\n self.head = MyListNode(val)\n self.node_count += 1", "def addNode(self):\n\t\tself.head.insert(self.size, len(self.succ))\n\t\tself.size += 1", "def append(self, data):\n # If list is empty, create a new node\n if self.length == 0:\n self.head = self.Node([data], None)\n self.tail = self.head\n # Otherwise add to the end of the tail. If the tail\n # becomes unbalanced (grows beyond max_node_capacity)\n # then split it, creating a new tail.\n else:\n self.tail.append(data)\n if len(self.tail.data_list) > self.max_node_capacity:\n self.__split_node(self.tail)\n\n self.length += 1", "def addAtTail(self, val):\n tmp = Node(val)\n if self.tail:\n self.tail.nxt = tmp\n k = self.tail\n self.tail = tmp\n else:\n self.head = tmp\n self.tail = tmp\n k = tmp", "def value_n_from_end(self, n):\n # check the validity of the input\n if n > self.n-1:\n print(f\"Error; n is greater than the length of the list = {self.n-1}\") \n return\n \n temp_node = self.head # store head\n for _ in range((self.n-1) - n):\n temp_node = temp_node.next # traverse the list\n return temp_node.val", "def addAtTail(self, val: int) -> None:\n new_node = Node(val)\n temp = self.head\n if self.head is None:\n self.head = new_node\n while temp.next:\n temp = temp.next\n temp.next = new_node", "def addAtTail(self, val: int) -> None:\n self.addAtIndex(self.size, val)", "def append(self, data):\n if self.head is None:\n self.head = ListNode(data, None)\n else:\n itr = self.head\n while itr:\n if itr.next is None:\n itr.next = ListNode(data, None)\n return\n itr = itr.next", "def addAtIndex(self, index, val):\n if index < 0 or index > self._size:\n return\n elif index == 0:\n self.addAtHead(val)\n return\n elif index == self._size:\n self.addAtTail(val)\n return\n\n current = self._head\n for _ in range(index - 1):\n current = current.next\n new_node = Node(val)\n new_node.next = current.next\n current.next = new_node\n self._size += 1", "def push_node(self, node):\n n = node\n if self.empty():\n self.head = n\n return\n\n l = self.head\n while l.next is not None:\n l = l.next\n l.next = n\n return", "def addAtTail(self, val):\n curr = self.head\n while curr.next:\n curr = curr.next\n new_node = ListNode(val)\n curr.next = new_node\n self.length += 1", "def append(self, data):\n new_node = Node(data)\n\n if not self.head:\n self.head = new_node\n return\n\n last_node = self.head\n while last_node.next:\n last_node = last_node.next\n\n last_node.next = new_node", "def append(self, data):\r\n new_node = Node(data)\r\n current_node = self.head\r\n while current_node.next!=None:\r\n current_node = current_node.next\r\n current_node.next = new_node #when we are at the last node, set it's pointer to point at the new Node\r", "def addAtTail(self, val):\n curr = self.head\n if curr is None:\n self.head = Node(val)\n else:\n while curr.next is not None:\n curr = curr.next\n curr.next = Node(val)\n\n self.size += 1" ]
[ "0.69705254", "0.687432", "0.6709675", "0.6645751", "0.6608237", "0.6569515", "0.64727336", "0.64642847", "0.64602023", "0.6447882", "0.64307994", "0.6425153", "0.637969", "0.6370489", "0.6370111", "0.6368923", "0.63669413", "0.6334651", "0.63345844", "0.63304853", "0.6326456", "0.6325326", "0.6316204", "0.63130957", "0.63106203", "0.63040906", "0.63024485", "0.6295301", "0.62899894", "0.62844056" ]
0.8417494
0
Start a cluster which should be in stopped status currently.
def cluster_start(r): cluster_id = request_get(r, "cluster_id") if not cluster_id: logger.warning("No cluster_id is given") return make_fail_response("No cluster_id is given") if cluster_handler.start(cluster_id): return jsonify(response_ok), CODE_OK return make_fail_response("cluster start failed")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def startCluster():\n # attempt to create a cluster\n print(\"Creating a Redshift cluster...\")\n try:\n redshift.create_cluster(\n\n # hardware parameters\n ClusterType=DWH_CLUSTER_TYPE,\n NodeType=DWH_NODE_TYPE,\n NumberOfNodes=int(DWH_NUM_NODES),\n\n # database access configuration\n DBName=DWH_DB,\n ClusterIdentifier=DWH_CLUSTER_IDENTIFIER,\n MasterUsername=DWH_DB_USER,\n MasterUserPassword=DWH_DB_PASSWORD,\n\n # accesses\n IamRoles=[iam.get_role(RoleName=DWH_IAM_ROLE_NAME)[\"Role\"][\"Arn\"]]\n )\n except Exception as e:\n print(e)\n return\n\n # wait for cluster to spin up\n print(\"Waiting for cluster to be available...\")\n while redshift.describe_clusters(\n ClusterIdentifier=DWH_CLUSTER_IDENTIFIER\n )[\"Clusters\"][0][\"ClusterStatus\"] != \"available\":\n time.sleep(30)\n print(\"\\tChecking status again...\")", "def running_cluster(request):\n cluster_name = 'running-cluster-' + random_string()\n launch_cluster(\n cluster_name=cluster_name,\n instance_type=request.param.instance_type,\n spark_version=request.param.spark_version,\n spark_git_commit=request.param.spark_git_commit)\n\n if request.param.restarted:\n stop_cluster(cluster_name)\n start_cluster(cluster_name)\n\n def destroy():\n p = subprocess.run([\n 'flintrock', 'destroy', cluster_name, '--assume-yes'])\n assert p.returncode == 0\n request.addfinalizer(destroy)\n\n return cluster_name", "def cluster_start(args: Namespace, configuration: BareConfig):\n logging.basicConfig(level=logging.DEBUG,\n datefmt='%m-%d %H:%M')\n launch_orchestrator(args=args, conf=configuration)", "def run_cluster(autoscaling: bool = False, **options) -> None:\n if autoscaling:\n thread = AutoScalingCluster.new(**options)\n else:\n thread = RemoteCluster.new(**options)\n try:\n thread.join()\n except Exception:\n thread.stop()\n raise", "def test_starts_cluster_state_service(self):\n options = ControlOptions()\n options.parseOptions(\n [b\"--port\", b\"tcp:8001\", b\"--data-path\", self.mktemp()])\n reactor = MemoryCoreReactor()\n ControlScript().main(reactor, options)\n server = reactor.tcpServers[0]\n service = server[1].resource._v1_user.cluster_state_service\n self.assertEqual((service.__class__, service.running),\n (ClusterStateService, True))", "def start_wsrep_new_cluster(self):\r\n return execute(self._start_wsrep_new_cluster)", "def launch_cluster(params):\n logging.info('Launching cluster of size: {} and type: {}'.format(params.cluster_size, params.instance_type))\n subprocess.check_call(['cgcloud',\n 'create-cluster',\n '--leader-instance-type', 'm3.medium',\n '--instance-type', params.instance_type,\n '--share', params.shared_dir,\n '--num-workers', str(params.cluster_size),\n '-c', params.cluster_name,\n '--spot-bid', str(params.spot_price),\n '--leader-on-demand',\n '--ssh-opts',\n '-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no',\n 'toil'])", "def test_glusterd_restart_stop_start(self):\n # restart glusterd on all servers\n g.log.info(\"Restart glusterd on all servers\")\n ret = restart_glusterd(self.servers)\n self.assertTrue(ret, \"Failed to restart glusterd on all servers\")\n g.log.info(\"Successfully restarted glusterd on all servers\")\n\n # Check if glusterd is running on all servers(expected: active)\n g.log.info(\"Check if glusterd is running on all servers\"\n \"(expected: active)\")\n ret = is_glusterd_running(self.servers)\n self.assertEqual(ret, 0, \"Glusterd is not running on all servers\")\n g.log.info(\"Glusterd is running on all the servers\")\n\n # Stop glusterd on all servers\n g.log.info(\"Stop glusterd on all servers\")\n ret = stop_glusterd(self.servers)\n self.assertTrue(ret, \"Failed to stop glusterd on all servers\")\n g.log.info(\"Successfully stopped glusterd on all servers\")\n\n # Check if glusterd is running on all servers(expected: not running)\n g.log.info(\"Check if glusterd is running on all servers\"\n \"(expected: not running)\")\n ret = is_glusterd_running(self.servers)\n self.assertNotEqual(ret, 0, \"Glusterd is still running on some \"\n \"servers\")\n g.log.info(\"Glusterd not running on any servers as expected.\")\n\n # Start glusterd on all servers\n g.log.info(\"Start glusterd on all servers\")\n ret = start_glusterd(self.servers)\n self.assertTrue(ret, \"Failed to start glusterd on all servers\")\n g.log.info(\"Successfully started glusterd on all servers\")\n\n # Check if glusterd is running on all servers(expected: active)\n g.log.info(\"Check if glusterd is running on all servers\"\n \"(expected: active)\")\n ret = is_glusterd_running(self.servers)\n self.assertEqual(ret, 0, \"Glusterd is not running on all servers\")\n g.log.info(\"Glusterd is running on all the servers\")\n\n # Wait for all the glusterd's to establish communication.\n time.sleep(30)\n\n # Validate all the peers are in connected state\n g.log.info(\"Validating all the peers are in Cluster and Connected\")\n ret = self.are_peers_in_connected_state()\n self.assertTrue(ret, \"Validating Peers to be in Cluster Failed\")\n g.log.info(\"All peers are in connected state\")\n\n self.test_method_complete = True", "def start_cluster_service(self, cluster_name, service_name):\n return self._post(endpoint=('{}/clusters/{}/services/{}/'\n 'commands/start').format(self.api_version,\n cluster_name,\n service_name)).json()", "def launch_cluster(self):\n version = self.get_latest_spark_version()\n import os\n real_path = os.path.dirname(os.path.realpath(__file__))\n if self.is_aws():\n with open(real_path+'/../data/aws_cluster.json', 'r') as fp:\n cluster_json = json.loads(fp.read())\n else:\n with open(real_path+'/../data/azure_cluster.json', 'r') as fp:\n cluster_json = json.loads(fp.read())\n # set the latest spark release regardless of defined cluster json\n cluster_json['spark_version'] = version['key']\n c_info = self.post('/clusters/create', cluster_json)\n self.wait_for_cluster(c_info['cluster_id'])\n return c_info['cluster_id']", "def restart_cluster():\n\n if _TRAFFICCTL:\n cmd = _traffic_ctl(\"cluster\", \"restart\", \"--manager\")\n else:\n cmd = _traffic_line(\"-M\")\n\n return _subprocess(cmd)", "def start(self):\n self._run_start()\n self._stored.is_started = True\n if self._is_single_node and not self._stored.is_initialized:\n self._stored.is_initialized = self._stored.is_initialized = True\n self.on.cluster_initialized.emit(self._get_cluster_id())\n self.on.daemon_started.emit()", "def create(self):\n print(\"+ Creating cluster: {}. This may take a few minutes ...\".format(self.name_hyphenated))\n if self.num_gpus == 0:\n out = util.syscall(\"gcloud container clusters create {} -m {} --disk-size {} --num-nodes {} {}\".\n format(self.name_hyphenated, self.machine_type, self.disk_size, self.num_nodes,\n \"--zone \" + self.location if self.location else \"\"), return_outputs=\"as_str\")\n else:\n out = util.syscall(\"gcloud container clusters create {} --enable-cloud-logging --enable-cloud-monitoring \"\n \"--accelerator type={},count={} {} -m {} --disk-size {} --enable-kubernetes-alpha \"\n \"--image-type UBUNTU --num-nodes {} --cluster-version 1.9.2-gke.1 --quiet\".\n format(self.name_hyphenated, self.gpu_type, self.gpus_per_node,\n \"--zone \"+self.location if self.location else \"\", self.machine_type, self.disk_size,\n self.num_nodes), return_outputs=\"as_str\")\n # check output of cluster generating code\n if re.search(r'error', out, re.IGNORECASE):\n raise util.TFCliError(out)\n else:\n print(\"+ Successfully created cluster.\")\n self.instances, self.primary_name = util.get_compute_instance_specs(self.name_hyphenated)\n self.started = True\n\n # install NVIDIA drivers on machines per local kubectl\n if self.num_gpus > 0:\n print(\"+ Installing NVIDIA GPU drivers and k8s device plugins ...\")\n util.syscall(\"kubectl create -f https://raw.githubusercontent.com/GoogleCloudPlatform/\"\n \"container-engine-accelerators/k8s-1.9/daemonset.yaml\")\n util.syscall(\"kubectl delete -f https://raw.githubusercontent.com/kubernetes/kubernetes/\"\n \"release-1.9/cluster/addons/device-plugins/nvidia-gpu/daemonset.yaml\")\n util.syscall(\"kubectl create -f https://raw.githubusercontent.com/kubernetes/kubernetes/\"\n \"release-1.9/cluster/addons/device-plugins/nvidia-gpu/daemonset.yaml\")\n\n print(\"+ Done. Cluster: {} created.\".format(self.name_hyphenated))", "def createcluster(self):\n for hostitem in OTHER_NODES:\n checkhost(hostitem)\n if OTHER_WSREP:\n for wsrepitem in OTHER_WSREP:\n REMAINING_NODES.append(wsrepitem)\n if REMAINING_NODES:\n alive = str(REMAINING_NODES)[1:-1]\n print \"{}\\nThe following nodes are alive in cluster:{}\\n {}\".format(\n RED, WHITE, alive)\n print \"\\n\\nTo boostrap a new cluster you need to switch them off\\n\"\n os.sys.exit(1)\n else:\n if self.mode == \"new\" and not self.force:\n ask('\\nThis operation will destroy the local data')\n clean_dir(self.datadir)\n initialize_mysql(self.datadir)\n bootstrap_mysql(self.mode)\n if self.mode == \"new\":\n create_monitor_table()\n ALL_NODES.append(\"localhost\")\n for creditem in CREDENTIALS:\n create_users(creditem)\n print \"\"\n drop_anonymous()", "def __init_cluster(self, cluster):\n self.___init_nodes(cluster)\n self.__clusterop.async_rebalance(\n cluster.get_nodes(),\n cluster.get_nodes()[1:],\n []).result()", "def cluster(self):\n assert False", "def start_server(cluster_spec, job_name: str, task_index: int, cpu_device_num: int):\n s = gen_server(cluster_spec, job_name, task_index, cpu_device_num)\n s.join()", "def main():\n if sys.argv[1] == \"start\":\n start_cluster(sys.argv[2], sys.argv[3], int(sys.argv[4]),\n int(sys.argv[5]), sys.argv[6], sys.argv[7],\n int(sys.argv[8]))\n elif sys.argv[1] == \"stop\":\n stop_cluster()\n else:\n print 'Unknown Option'", "def start(self):\n\n # check if docker is up.\n\n if \"OZONE_RUNNER_VERSION\" not in os.environ:\n self.__logger__.error(\"OZONE_RUNNER_VERSION is not set.\")\n sys.exit(1)\n\n if \"HDDS_VERSION\" not in os.environ:\n self.__logger__.error(\"HDDS_VERSION is not set.\")\n sys.exit(1)\n\n self.__logger__.info(\"Starting Ozone Cluster\")\n if Blockade.blockade_status() == 0:\n Blockade.blockade_destroy()\n\n Blockade.blockade_up()\n\n call([Command.docker_compose, \"-f\", self.docker_compose_file,\n \"up\", \"-d\", \"--scale\",\n \"datanode=\" + str(self.conf.datanode_count)])\n self.__logger__.info(\"Waiting 10s for cluster start up...\")\n # Remove the sleep and wait only till the cluster is out of safemode\n time.sleep(10)\n output = subprocess.check_output([Command.docker_compose, \"-f\",\n self.docker_compose_file, \"ps\"])\n node_list = []\n for out in output.split(\"\\n\")[2:-1]:\n node = out.split(\" \")[0]\n node_list.append(node)\n Blockade.blockade_add(node)\n\n self.om = filter(lambda x: 'om' in x, node_list)[0]\n self.scm = filter(lambda x: 'scm' in x, node_list)[0]\n self.datanodes = sorted(list(filter(lambda x: 'datanode' in x, node_list)))\n self.client = filter(lambda x: 'ozone_client' in x, node_list)[0]\n self.scm_uuid = self.__get_scm_uuid__()\n self.datanode_dir = self.get_conf_value(\"hdds.datanode.dir\")\n\n assert node_list, \"no node found in the cluster!\"\n self.__logger__.info(\"blockade created with nodes %s\", ' '.join(node_list))", "def start_instance(self):\n instance_id = self._choose_among_stopped_instances()\n\n # Cancel\n if not instance_id:\n print 'Operation cancelled'\n return\n\n print '# Starting the instance \"%s\"' % instance_id\n if self.compute.start_instance(instance_id):\n print 'The instance has been started'\n else:\n print 'The instance could not be started'", "def create_cluster(rs):\n\n rs.create_cluster(verbose=False)\n print('Creating cluster. Will check every 30 seconds for completed creation.')\n cluster_built = False\n while not cluster_built:\n print('Sleeping 30 seconds.')\n time.sleep(30)\n cluster_built = check_available(rs)", "def bounce_cluster():\n\n if _TRAFFICCTL:\n cmd = _traffic_ctl(\"cluster\", \"restart\")\n else:\n cmd = _traffic_line(\"-B\")\n\n return _subprocess(cmd)", "def restart_cluster(name):\n ret = {\"name\": name, \"changes\": {}, \"result\": None, \"comment\": \"\"}\n\n if __opts__[\"test\"]:\n ret[\"comment\"] = \"Restarting cluster\"\n return ret\n\n __salt__[\"trafficserver.restart_cluster\"]()\n\n ret[\"result\"] = True\n ret[\"comment\"] = \"Restarted cluster\"\n return ret", "def set_cluster(self, cluster_id=None):\n cluster = objects.Cluster.get_by_uid(\n cluster_id, fail_if_not_found=False\n )\n if cluster:\n self._cluster = cluster\n self._set_task(self.EXPECTED, None)\n self._set_task(\n self.CURRENT,\n objects.TransactionCollection.get_last_succeed_run(cluster)\n )\n return True\n return False", "def test_cluster_create(self, mock_is_service_available):\n\n mock_is_service_available.return_value = True\n fake_cluster = FakeCluster(**RETURN_CLUSTER_1)\n cluster = self._create_test_cluster(\n fake_cluster, 'stack_delete', CREATE_CLUSTER_ARG_1)\n scheduler.TaskRunner(cluster.create)()\n self.assertEqual((cluster.CREATE, cluster.COMPLETE), cluster.state)\n self.m.VerifyAll()", "def launch():\n\n os.mkdir(CLUSTER_FOLDER)\n os.system(f\"ssh-keygen -f {PRIVATE_KEY_FILE} -q -N '' -C ''\")\n with open (PUBLIC_KEY_FILE, \"r\") as f:\n public_key = f.read().strip()\n\n with open (\"/cluster.yaml\", \"r\") as f:\n cluster_definition_string = f.read()\n cluster_definition_yaml = yaml.load(cluster_definition_string, Loader=yaml.FullLoader)\n\n if(not \"publicKeys\" in cluster_definition_yaml or not isinstance(cluster_definition_yaml[\"publicKeys\"], list)):\n log(\"Error: The cluster definition file does not contain a valid 'publicKeys' section.\")\n exit(1)\n cluster_definition_yaml[\"publicKeys\"].append(public_key) \n with open (f\"{CLUSTER_FOLDER}/cluster.yaml\", \"w\") as f:\n f.write(yaml.dump(cluster_definition_yaml, default_flow_style=False))\n f.close()\n\n start_time = time.time() \n cluster = create_cluster(os.environ[\"T2_URL\"], os.environ[\"T2_TOKEN\"], yaml.dump(cluster_definition_yaml, default_flow_style=False)) \n if(not cluster):\n log(\"Error: Failed to create cluster via API.\")\n exit(1)\n\n log(f\"Created cluster '{cluster['id']}'. Waiting for cluster to be up and running...\")\n\n cluster = get_cluster(os.environ[\"T2_URL\"], os.environ[\"T2_TOKEN\"], cluster['id'])\n while(TIMEOUT_SECONDS > (time.time()-start_time) and cluster['status']['state'] != 'RUNNING' and not cluster['status']['failed']):\n time.sleep(5)\n cluster = get_cluster(os.environ[\"T2_URL\"], os.environ[\"T2_TOKEN\"], cluster['id'])\n\n if(cluster['status']['failed']):\n log(\"Cluster launch failed.\")\n exit(1)\n\n if(TIMEOUT_SECONDS <= (time.time()-start_time)):\n log(\"Timeout while launching cluster.\")\n exit(1)\n\n log(f\"Cluster '{cluster['id']}' is up and running.\")\n\n with open(f\"{CLUSTER_FOLDER}/uuid\", \"w\") as uuid_text_file:\n print(cluster['id'], file=uuid_text_file)\n\n log(\"Downloading Stackable client script for cluster\")\n\n with open (\"/stackable.sh\", \"w\") as f:\n f.write(get_client_script(os.environ[\"T2_URL\"], os.environ[\"T2_TOKEN\"], cluster['id']))\n f.close()\n os.chmod(\"/stackable.sh\", 0o755)\n\n log(\"Downloading Stackable kubeconfig\")\n\n with open (\"/kubeconfig\", \"w\") as f:\n f.write(get_kubeconfig(os.environ[\"T2_URL\"], os.environ[\"T2_TOKEN\"], cluster['id']))\n f.close()\n\n log(\"Downloading Stackable version information sheet for cluster\")\n\n stackable_versions = get_version_information_sheet(os.environ[\"T2_URL\"], os.environ[\"T2_TOKEN\"], cluster['id'])\n with open (\"/target/stackable-versions.txt\", \"w\") as f:\n f.write(stackable_versions)\n f.close()\n os.system(f\"chown {uid_gid_output} /target/stackable-versions.txt\")\n os.system('chmod 664 /target/stackable-versions.txt')", "def run_with_exceptions(self: RemoteCluster) -> None:\n self.server.start()\n time.sleep(2) # NOTE: give the server a chance to start\n log.debug(f'Launching clients: {self.client_argv}')\n self.clients = Popen(self.client_argv, shell=True, stdout=sys.stdout, stderr=sys.stderr,\n env={**os.environ, **load_task_env()})\n self.clients.wait()\n self.server.join()", "def run(self, eatery_id):\n self.start = time.time()\n do_cluster_ins = DoClusters(eatery_id=eatery_id)\n do_cluster_ins.run()\n return", "def _init_cluster(self):\n self._Init_Cluster()", "def create_cluster():\n config = get_kube_config()\n command = CLUSTER_CREATE_COMMAND.replace('\\n','').format(cluster_name=config['cluster_name'],\n project_name=config['project_name'],\n machine_type=config['machine_type'],\n disk_size=config['disk_size'],\n nodes=config['nodes'],\n zone=config['zone'])\n print \"Creating cluster by running {}\".format(command)\n subprocess.check_call(shlex.split(command))\n command = AUTH_COMMAND.replace('\\n','').format(cluster_name=config['cluster_name'],\n project_name=config['project_name'],\n zone=config['zone'])\n print \"Authenticating with cluster by running {}\".format(command)\n subprocess.check_call(shlex.split(command))" ]
[ "0.71251774", "0.68893415", "0.68576354", "0.6779097", "0.6720754", "0.67115116", "0.6646528", "0.6419284", "0.63756603", "0.632783", "0.6314102", "0.6290016", "0.6287659", "0.62627524", "0.6175558", "0.6173656", "0.61347663", "0.6123808", "0.6108847", "0.61067957", "0.60864526", "0.60357976", "0.6023933", "0.5984605", "0.59144413", "0.59134597", "0.58991075", "0.58877903", "0.5871611", "0.58702856" ]
0.7536392
0
Stop a cluster which should be in running status currently.
def cluster_stop(r): cluster_id = request_get(r, "cluster_id") if not cluster_id: logger.warning("No cluster_id is given") return make_fail_response("No cluster_id is given") if cluster_handler.stop(cluster_id): return jsonify(response_ok), CODE_OK return make_fail_response("cluster stop failed")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def stop(self, data):\n return ClusterStopper(my_cluster=self).detach_cluster(data)", "def stop(self):\n response = self._shell_client.exec_command('{} stop'.format(self._executable_path), async=False)\n\n if not response.is_success():\n self._logger.error('clusterrunner stop failed on host {} with output: {}, error: {}'.format(\n self.host, response.raw_output, response.raw_error))", "def shutdown_cluster(self):\n self.cluster.shutdown()", "def stop(self):\n self.__logger__.info(\"Stopping Ozone Cluster\")\n call([Command.docker_compose, \"-f\", self.docker_compose_file, \"down\"])\n Blockade.blockade_destroy()", "def stop(self: RemoteCluster, wait: bool = False, timeout: int = None) -> None:\n self.server.stop(wait=wait, timeout=timeout)\n self.clients.terminate()\n super().stop(wait=wait, timeout=timeout)", "def stop_cluster_service(self, cluster_name, service_name):\n return self._post(endpoint=('{}/clusters/{}/services/{}/'\n 'commands/stop').format(self.api_version,\n cluster_name,\n service_name)).json()", "def terminate():\n with open (f\"{CLUSTER_FOLDER}/uuid\", \"r\") as f:\n uuid = f.read().strip()\n\n start_time = time.time() \n cluster = delete_cluster(os.environ[\"T2_URL\"], os.environ[\"T2_TOKEN\"], uuid) \n if(not cluster):\n log(\"Failed to terminate cluster via API.\")\n exit(1)\n\n log(f\"Started termination of cluster '{cluster['id']}'. Waiting for cluster to be terminated...\")\n cluster = get_cluster(os.environ[\"T2_URL\"], os.environ[\"T2_TOKEN\"], cluster['id'])\n while(TIMEOUT_SECONDS > (time.time()-start_time) and cluster['status']['state'] != 'TERMINATED' and not cluster['status']['failed']):\n time.sleep(5)\n cluster = get_cluster(os.environ[\"T2_URL\"], os.environ[\"T2_TOKEN\"], cluster['id'])\n\n if(cluster['status']['failed']):\n log(\"Cluster termination failed.\")\n exit(1)\n\n if(TIMEOUT_SECONDS <= (time.time()-start_time)):\n log(\"Timeout while launching cluster.\")\n exit(1)\n\n log(f\"Cluster '{cluster['id']}' is terminated.\")", "def stop_instance(self):\n instance_id = self._choose_among_running_instances()\n\n # Cancel\n if not instance_id:\n print 'Operation cancelled'\n return\n\n print '# Stopping the instance \"%s\"' % instance_id\n self.compute.stop_instance(instance_id)\n print 'The instance has been stopped'", "def stop(self, session, params=None):\n session.set_status('stopping')\n self._run = False", "def stop(self):\n\n # immediate is necessary if it's in recovery (for now).\n # we don't care the result.\n master = gp.MasterStop(\"Stopping Master Standby\",\n self.datadir, mode='immediate')\n master.run()", "def stop(self, **kwargs):\n return self.client.api.stop(self.id, **kwargs)", "def stop(self, name=None):\n server = self.cloudman.get_server(name)['id']\n r = self.cloudman.compute.stop_server(server)\n return r", "def stop(self: AutoScalingCluster, wait: bool = False, timeout: int = None) -> None:\n self.server.stop(wait=wait, timeout=timeout)\n self.autoscaler.stop(wait=wait, timeout=timeout)\n super().stop(wait=wait, timeout=timeout)", "def stop_all_cluster_services(self, cluster_name):\n return self._post(endpoint='{}/clusters/{}/commands/stop'.format(self.api_version,\n cluster_name)).json()", "def stop(self):\n self.scion_sh('stop')", "async def stop(self):\n await self.node._send(op='stop', guildId=self.guild_id)\n self.current = None", "def remove(self):\n method = \"remove_cluster\"\n params = {\n \"cluster_id\": self.id\n }\n make_request = self._client.connection.make_request\n return make_request(method, params)", "def terminate_cluster(cluster_name: str, max_retry: int = 3) -> None:\n retry_cnt = 0\n while True:\n try:\n usage_lib.messages.usage.set_internal()\n sky.down(cluster_name)\n return\n except ValueError:\n # The cluster is already down.\n return\n except Exception as e: # pylint: disable=broad-except\n retry_cnt += 1\n if retry_cnt >= max_retry:\n raise RuntimeError('Failed to terminate the spot cluster '\n f'{cluster_name}.') from e\n logger.error('Failed to terminate the spot cluster '\n f'{cluster_name}. Retrying.'\n f'Details: {common_utils.format_exception(e)}')\n logger.error(f' Traceback: {traceback.format_exc()}')", "def cleanup(self):\n cluster = self.client and self.client.cluster\n\n if self.client:\n self.client.close()\n self.client = None\n\n if cluster:\n try:\n cluster.close(timeout=60.0)\n except RuntimeError as ex:\n ## For some reason, sometimes the cluster can't be closed due to some\n ## problem with 'bkill', which fails with an error that looks like the following.\n ## If that happens, try to re-run bkill one more time in the hopes of really\n ## killing the cluster and not leaving lingering workers running.\n ## (This issue has been observed on the Janelia cluster for both dask and spark clusters.)\n ##\n # RuntimeError: Command exited with non-zero exit code.\n # Exit code: 255\n # Command:\n # bkill 54421878 54421872 54421877\n # stdout:\n #\n # stderr:\n # Job <54421878>: Failed in an LSF library call: Slave LIM configuration is not ready yet\n # Job <54421872>: Failed in an LSF library call: Slave LIM configuration is not ready yet\n # Job <54421877>: Failed in an LSF library call: Slave LIM configuration is not ready yet\n m = re.search(r'bkill( \\d+)+', str(ex))\n if not m:\n raise\n\n logger.warning(\"Failed to kill cluster with bkill, trying one more time...\")\n time.sleep(2.0)\n result = subprocess.run(m.group(), shell=True)\n if result.returncode != 0:\n logger.error(\"Second attempt to kill the cluster failed!\")\n raise", "def stop(self):\n if self.session.status != Session.SessionStatus.RUNNING:\n return\n self.container.stop()\n self.session.status = Session.SessionStatus.NOTRUNNING\n self.session.save()", "def stop(self):\n return self._send_command(\"stop\")", "def cluster_shutdown():\n map(shutdown, cluster)", "def stop(self):\n self._state.transit(sitcpy.THREAD_STOPPING)", "def stop(self):\n self._state.transit(sitcpy.THREAD_STOPPING)", "def stop_instance(InstanceId=None, Force=None):\n pass", "def stop(node_index):\n node = Node.from_index(node_index)\n run_lncli(node, 'stop')", "def stop_instance():\n send_line('stop instance')\n os.system(f'gcloud compute instances stop {os.uname()[1]} --zone us-east1-b')", "async def stop(self):\n await self._bot.lavalink.ws.send(op='stop', guildId=self.guild_id)\n self.current = None", "def stop(self):\n if self._server_thread is None:\n return\n self._stopping.set()\n self._server_thread = None\n self._stopped.wait()", "def stop_run(arn=None):\n pass" ]
[ "0.73711646", "0.7304418", "0.7216645", "0.70051634", "0.6953816", "0.6932318", "0.6856211", "0.67420995", "0.67134136", "0.66507083", "0.6637439", "0.65622604", "0.64982796", "0.6497791", "0.6390566", "0.63871026", "0.63806355", "0.6330831", "0.62956965", "0.6294592", "0.62857723", "0.6274418", "0.6245891", "0.6245891", "0.62421566", "0.624043", "0.6225804", "0.62212723", "0.62137324", "0.62126184" ]
0.8240757
0
Apply a cluster. Return a Cluster json body.
def cluster_apply(r): request_debug(r, logger) user_id = request_get(r, "user_id") if not user_id: logger.warning("cluster_apply without user_id") return make_fail_response("cluster_apply without user_id") allow_multiple, condition = request_get(r, "allow_multiple"), {} consensus_plugin = request_get(r, "consensus_plugin") consensus_mode = request_get(r, "consensus_mode") cluster_size = int(request_get(r, "size") or -1) if consensus_plugin: if consensus_plugin not in CONSENSUS_PLUGINS: logger.warning("Invalid consensus_plugin") return make_fail_response("Invalid consensus_plugin") else: condition["consensus_plugin"] = consensus_plugin if consensus_mode: if consensus_mode not in CONSENSUS_MODES: logger.warning("Invalid consensus_mode") return make_fail_response("Invalid consensus_mode") else: condition["consensus_mode"] = consensus_mode if cluster_size >= 0: if cluster_size not in CLUSTER_SIZES: logger.warning("Invalid cluster_size") return make_fail_response("Invalid cluster_size") else: condition["size"] = cluster_size logger.debug("condition={}".format(condition)) c = cluster_handler.apply_cluster(user_id=user_id, condition=condition, allow_multiple=allow_multiple) if not c: logger.warning("cluster_apply failed") return make_fail_response("No available res for {}".format(user_id)) else: response_ok["data"] = c return jsonify(response_ok), CODE_OK
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cluster_apply_dep():\n request_debug(r, logger)\n\n user_id = request_get(r, \"user_id\")\n if not user_id:\n logger.warning(\"cluster_apply without user_id\")\n return make_fail_response(\"cluster_apply without user_id\")\n\n allow_multiple, condition = request_get(r, \"allow_multiple\"), {}\n\n consensus_plugin = request_get(r, \"consensus_plugin\")\n consensus_mode = request_get(r, \"consensus_mode\")\n cluster_size = int(request_get(r, \"size\") or -1)\n if consensus_plugin:\n if consensus_plugin not in CONSENSUS_PLUGINS:\n logger.warning(\"Invalid consensus_plugin\")\n return make_fail_response(\"Invalid consensus_plugin\")\n else:\n condition[\"consensus_plugin\"] = consensus_plugin\n\n if consensus_mode:\n if consensus_mode not in CONSENSUS_MODES:\n logger.warning(\"Invalid consensus_mode\")\n return make_fail_response(\"Invalid consensus_mode\")\n else:\n condition[\"consensus_mode\"] = consensus_mode\n\n if cluster_size >= 0:\n if cluster_size not in CLUSTER_SIZES:\n logger.warning(\"Invalid cluster_size\")\n return make_fail_response(\"Invalid cluster_size\")\n else:\n condition[\"size\"] = cluster_size\n\n logger.debug(\"condition={}\".format(condition))\n c = cluster_handler.apply_cluster(user_id=user_id, condition=condition,\n allow_multiple=allow_multiple)\n if not c:\n logger.warning(\"cluster_apply failed\")\n return make_fail_response(\"No available res for {}\".format(user_id))\n else:\n response_ok[\"data\"] = c\n return jsonify(response_ok), CODE_OK", "def cluster(self, text):\n body = {'text': text}\n body = json.dumps(body)\n url = self.base_url + '/ml-service/phoenix-ml/cluster'\n headers = {\"ApiKey\": self.api_key, \"Content-type\": \"application/json\"}\n response = requests.post(url=url, data=body, headers=headers)\n response = response.json()\n return response", "def ModifyCluster(self, reason=None, **kwargs):\n query = []\n _AppendReason(query, reason)\n\n body = kwargs\n\n return self._SendRequest(HTTP_PUT,\n \"/%s/modify\" % GANETI_RAPI_VERSION, query, body)", "def cluster_create():\n logger.info(\"/cluster action=\" + r.method)\n request_debug(r, logger)\n if not r.form[\"name\"] or not r.form[\"host_id\"] or not \\\n r.form[\"consensus_plugin\"] or not r.form[\"size\"]:\n logger.warning(\"cluster post without enough data\")\n response_fail[\"error\"] = \"cluster POST without enough data\"\n response_fail[\"data\"] = r.form\n return jsonify(response_fail), CODE_BAD_REQUEST\n else:\n name, host_id, consensus_plugin, consensus_mode, size = \\\n r.form['name'], r.form['host_id'], r.form['consensus_plugin'],\\\n r.form['consensus_mode'] or CONSENSUS_MODES[0], int(r.form[\n \"size\"])\n if consensus_plugin not in CONSENSUS_PLUGINS:\n logger.debug(\"Unknown consensus_plugin={}\".format(\n consensus_plugin))\n return jsonify(response_fail), CODE_BAD_REQUEST\n if consensus_plugin != CONSENSUS_PLUGINS[0] and consensus_mode \\\n not in CONSENSUS_MODES:\n logger.debug(\"Invalid consensus, plugin={}, mode={}\".format(\n consensus_plugin, consensus_mode))\n return jsonify(response_fail), CODE_BAD_REQUEST\n\n if size not in CLUSTER_SIZES:\n logger.debug(\"Unknown cluster size={}\".format(size))\n return jsonify(response_fail), CODE_BAD_REQUEST\n if cluster_handler.create(name=name, host_id=host_id,\n consensus_plugin=consensus_plugin,\n consensus_mode=consensus_mode,\n size=size):\n logger.debug(\"cluster POST successfully\")\n return jsonify(response_ok), CODE_CREATED\n else:\n logger.debug(\"cluster creation failed\")\n response_fail[\"error\"] = \"Failed to create cluster {}\".format(\n name)\n return jsonify(response_fail), CODE_BAD_REQUEST", "def update_cluster(self, cluster, params, *args, **kwargs):\n raise NotImplementedError", "def update(id, body: Body):\n\n cluster = clusters.get_by_id(id)\n\n if cluster is None:\n raise HTTPException(status_code=404, detail=\"Cluster not found for ID: {0}\".format(id))\n\n cluster.update(body.dict())\n cluster = clusters.update(cluster)\n\n return cluster.export()", "def update_cluster(\n self,\n cluster: Union[dto.Cluster, str],\n params: Mapping[str, Any]\n ) -> dto.Cluster:\n raise errors.UnsupportedOperationError(\n \"Operation not supported for provider '{}'\".format(self.provider_name)\n )", "def patch_cluster(self, cluster: Union[dto.Cluster, str]) -> dto.Cluster:\n raise errors.UnsupportedOperationError(\n \"Operation not supported for provider '{}'\".format(self.provider_name)\n )", "def patch_cluster(self, cluster, *args, **kwargs):\n raise NotImplementedError", "def post(self, request, cluster_id):\n cluster = check_obj(Cluster, cluster_id, 'CLUSTER_NOT_FOUND')\n serializer = self.serializer_class(data=request.data, context={\n 'request': request, 'cluster': cluster,\n })\n return create(serializer, id=cluster_id)", "def patch_namespaced_cluster_network(self, body, name, **kwargs):\n\n all_params = ['body', 'name', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method patch_namespaced_cluster_network\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `patch_namespaced_cluster_network`\")\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `patch_namespaced_cluster_network`\")\n\n resource_path = '/oapi/v1/clusternetworks/{name}'.replace('{format}', 'json')\n path_params = {}\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'PATCH',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1ClusterNetwork',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def replace_namespaced_cluster_network(self, body, name, **kwargs):\n\n all_params = ['body', 'name', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method replace_namespaced_cluster_network\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `replace_namespaced_cluster_network`\")\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `replace_namespaced_cluster_network`\")\n\n resource_path = '/oapi/v1/clusternetworks/{name}'.replace('{format}', 'json')\n path_params = {}\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'PUT',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1ClusterNetwork',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def clustering(self) -> 'outputs.ClusteringResponse':\n return pulumi.get(self, \"clustering\")", "def cluster_delete():\n logger.info(\"/cluster action=\" + r.method)\n request_debug(r, logger)\n if not r.form[\"id\"] or not r.form[\"col_name\"]:\n logger.warning(\"cluster operation post without enough data\")\n response_fail[\"error\"] = \"cluster delete without enough data\"\n response_fail[\"data\"] = r.form\n return jsonify(response_fail), CODE_BAD_REQUEST\n else:\n logger.debug(\"cluster delete with id={0}, col_name={1}\".format(\n r.form[\"id\"], r.form[\"col_name\"]))\n if r.form[\"col_name\"] == \"active\":\n result = cluster_handler.delete(id=r.form[\"id\"])\n else:\n result = cluster_handler.delete_released(id=r.form[\"id\"])\n if result:\n return jsonify(response_ok), CODE_OK\n else:\n logger.debug(\"cluster deletion failed\")\n response_fail[\"error\"] = \"Failed to delete cluster {}\".format(\n r.form[\"id\"])\n return jsonify(response_fail), CODE_BAD_REQUEST", "def update_vsan_cluster(self, cluster_id, **kwargs):\n put_body = json.dumps({'cluster': kwargs})\n resp, body = self.put('clusters/%s' % cluster_id, put_body)\n body = json.loads(body)\n self.expected_success(200, resp.status)\n return service_client.ResponseBody(resp, body['cluster'])", "def patch_namespaced_cluster_role(self, body, name, **kwargs):\n\n all_params = ['body', 'name', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method patch_namespaced_cluster_role\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `patch_namespaced_cluster_role`\")\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `patch_namespaced_cluster_role`\")\n\n resource_path = '/oapi/v1/clusterroles/{name}'.replace('{format}', 'json')\n path_params = {}\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'PATCH',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1ClusterRole',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def join_cluster(self, *args, **kwargs):\r\n return execute(self._join_cluster, *args, **kwargs)", "def replace_namespaced_cluster_role(self, body, name, **kwargs):\n\n all_params = ['body', 'name', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method replace_namespaced_cluster_role\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `replace_namespaced_cluster_role`\")\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `replace_namespaced_cluster_role`\")\n\n resource_path = '/oapi/v1/clusterroles/{name}'.replace('{format}', 'json')\n path_params = {}\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'PUT',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1ClusterRole',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def launch_cluster(**overrides) -> dict:\n if os.path.isfile(META_FILE):\n raise FileExistsError(\"Cluster already exists!\")\n\n config = DEFAULT_CONFIG.copy()\n config.update(**overrides)\n\n sg = make_sg()\n config[\"Instances\"].update(AdditionalMasterSecurityGroups=[sg.id])\n emr = get_emr_client()\n\n response = emr.run_job_flow(**config)\n cluster_id = response[\"JobFlowId\"]\n master_addr = wait_init(cluster_id)\n\n meta = {\n \"MasterNodeAddr\": master_addr,\n \"ClusterId\": cluster_id,\n \"SGId\": sg.id\n }\n with open(META_FILE, \"w\") as f:\n json.dump(meta, f)\n\n print(\"INFO: Cluster Launched!\")\n return meta", "def predict_cluster(self, inputs):\n if not self.trained:\n if self.verbose:\n print(\"KMeans Model Class - Predict Cluster Function: No trained model\")\n return -1\n\n\n return self.model.predict(inputs)", "def update_coe_cluster(self, name_or_id, **kwargs):\n self.list_coe_clusters.invalidate(self)\n cluster = self.get_coe_cluster(name_or_id)\n if not cluster:\n raise exc.OpenStackCloudException(\n \"COE cluster %s not found.\" % name_or_id\n )\n\n cluster = self.container_infrastructure_management.update_cluster(\n cluster, **kwargs\n )\n\n return cluster", "def test_crud_cluster(self):\n # create the object\n response = self._create_cluster()\n self.assertEqual(response.status_code, status.HTTP_201_CREATED,\n response.content)\n\n # list the object\n cluster_id = self._list_cluster()\n # Assert that the originally created cluster id is the same as the one\n # returned by list\n self.assertEquals(response.data['id'], cluster_id)\n self.assertEquals(response.data['default_vm_type'], 'm5.24xlarge')\n self.assertEquals(response.data['default_zone']['name'], 'us-east-1b')\n\n # check details\n cluster_id = self._check_cluster_exists(cluster_id)\n\n # update cluster\n response = self._update_cluster(cluster_id)\n self.assertEquals(response['name'], 'new_name')\n\n # delete the object\n response = self._delete_cluster(cluster_id)\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT, response.data)\n\n # check it no longer exists\n self._check_no_clusters_exist()", "def remove(self):\n method = \"remove_cluster\"\n params = {\n \"cluster_id\": self.id\n }\n make_request = self._client.connection.make_request\n return make_request(method, params)", "def update_cluster_results(session: Session, project: MasteringProject) -> Operation:\n op = _update_cluster_results_async(session, project)\n return operation.wait(session, op)", "def create_cluster(self, cluster: dict) -> None:\n if self.master_url:\n return\n try:\n self._cluster_client.create_cluster(\n request={\n 'project_id': self.cluster_metadata.project_id,\n 'region': self.cluster_metadata.region,\n 'cluster': cluster\n })\n _LOGGER.info(\n 'Cluster created successfully: %s',\n self.cluster_metadata.cluster_name)\n self.master_url = self.get_master_url(self.cluster_metadata)\n except Exception as e:\n if e.code == 409:\n _LOGGER.info(\n 'Cluster %s already exists. Continuing...',\n ie.current_env().clusters.default_cluster_name)\n elif e.code == 403:\n _LOGGER.error(\n 'Due to insufficient project permissions, '\n 'unable to create cluster: %s',\n self.cluster_metadata.cluster_name)\n raise ValueError(\n 'You cannot create a cluster in project: {}'.format(\n self.cluster_metadata.project_id))\n elif e.code == 501:\n _LOGGER.error(\n 'Invalid region provided: %s', self.cluster_metadata.region)\n raise ValueError(\n 'Region {} does not exist!'.format(self.cluster_metadata.region))\n else:\n _LOGGER.error(\n 'Unable to create cluster: %s', self.cluster_metadata.cluster_name)\n raise e", "def add_new_cluster(self):\n self.result.append(Cluster.Cluster())\n return len(self.result) - 1", "def cluster(self):\n return self._cluster", "def cluster(self):\n return self._cluster", "def launch_cluster(self):\n version = self.get_latest_spark_version()\n import os\n real_path = os.path.dirname(os.path.realpath(__file__))\n if self.is_aws():\n with open(real_path+'/../data/aws_cluster.json', 'r') as fp:\n cluster_json = json.loads(fp.read())\n else:\n with open(real_path+'/../data/azure_cluster.json', 'r') as fp:\n cluster_json = json.loads(fp.read())\n # set the latest spark release regardless of defined cluster json\n cluster_json['spark_version'] = version['key']\n c_info = self.post('/clusters/create', cluster_json)\n self.wait_for_cluster(c_info['cluster_id'])\n return c_info['cluster_id']", "def cluster(self, cluster):\n\n self._cluster = cluster" ]
[ "0.7015488", "0.66317344", "0.64640945", "0.6455852", "0.643635", "0.63785547", "0.62163526", "0.60002464", "0.5994259", "0.59464735", "0.5941902", "0.59388435", "0.58669645", "0.5845137", "0.58328575", "0.5817479", "0.5765258", "0.570312", "0.57025146", "0.56841063", "0.5670027", "0.56311244", "0.562201", "0.5602619", "0.55944794", "0.5594072", "0.55871415", "0.55871415", "0.55768883", "0.5551248" ]
0.7457299
0
Release a cluster which should be in used status currently.
def cluster_release(r): cluster_id = request_get(r, "cluster_id") if not cluster_id: logger.warning("No cluster_id is given") return make_fail_response("No cluster_id is given") if cluster_handler.release_cluster(cluster_id): return jsonify(response_ok), CODE_OK return make_fail_response("cluster release failed")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def shutdown_cluster(self):\n self.cluster.shutdown()", "def delete_cluster(self):", "def delete_cluster(ctx, project_name, cluster_name):\n project = ctx.obj.groups.byName[project_name].get().data\n ctx.obj.groups[project.id].clusters[cluster_name].delete().data\n click.echo(\"DONE!\")", "def delete_cluster(self):\n cf_namespace_id = self.create_or_fetch_namespace()\n self.delete_action(cf_namespace_id)\n self.create_action(cf_namespace_id)\n self.invoke_action(cf_namespace_id)", "def cluster(self, cluster):\n\n self._cluster = cluster", "def cluster(self, cluster):\n\n self._cluster = cluster", "def cluster(self, cluster):\n\n self._cluster = cluster", "def cluster(self, cluster):\n\n self._cluster = cluster", "def cluster(self, cluster):\n\n self._cluster = cluster", "def cluster(self, cluster):\n\n self._cluster = cluster", "def delete(self):\n # delete the named cluster\n # don't wait for operation to finish\n print(\"+ Deleting cluster {} (async).\".format(self.name_hyphenated))\n util.syscall(\"gcloud container clusters delete {} --quiet --async\".\n format(self.name))\n self.started = False\n self.deleted = True", "def cluster_shutdown():\n map(shutdown, cluster)", "def teardown(self, cluster):\n raise NotImplementedError()", "def close(self):\n self.cluster.shutdown()", "def delete_cluster(self, cluster, *args, **kwargs):\n raise NotImplementedError", "def terminate():\n with open (f\"{CLUSTER_FOLDER}/uuid\", \"r\") as f:\n uuid = f.read().strip()\n\n start_time = time.time() \n cluster = delete_cluster(os.environ[\"T2_URL\"], os.environ[\"T2_TOKEN\"], uuid) \n if(not cluster):\n log(\"Failed to terminate cluster via API.\")\n exit(1)\n\n log(f\"Started termination of cluster '{cluster['id']}'. Waiting for cluster to be terminated...\")\n cluster = get_cluster(os.environ[\"T2_URL\"], os.environ[\"T2_TOKEN\"], cluster['id'])\n while(TIMEOUT_SECONDS > (time.time()-start_time) and cluster['status']['state'] != 'TERMINATED' and not cluster['status']['failed']):\n time.sleep(5)\n cluster = get_cluster(os.environ[\"T2_URL\"], os.environ[\"T2_TOKEN\"], cluster['id'])\n\n if(cluster['status']['failed']):\n log(\"Cluster termination failed.\")\n exit(1)\n\n if(TIMEOUT_SECONDS <= (time.time()-start_time)):\n log(\"Timeout while launching cluster.\")\n exit(1)\n\n log(f\"Cluster '{cluster['id']}' is terminated.\")", "def __init_cluster(self, cluster):\n self.___init_nodes(cluster)\n self.__clusterop.async_rebalance(\n cluster.get_nodes(),\n cluster.get_nodes()[1:],\n []).result()", "async def release(self) -> None:\n ...", "async def release(self) -> None:\n ...", "async def release(self) -> None:\n ...", "def release_lock(self):\n senlin_lock.node_lock_release(self.entity.id, self.id)\n\n # only release cluster lock if it was locked as part of this\n # action (i.e. it's a user intiated action aka CAUSE_RPC from\n # senlin API and a not a CAUSED_DERIVED)\n if self.cause == consts.CAUSE_RPC:\n senlin_lock.cluster_lock_release(self.entity.cluster_id, self.id,\n senlin_lock.NODE_SCOPE)\n return self.RES_OK", "def cleanup(self) -> None:\n try:\n self._cluster_client.delete_cluster(\n request={\n 'project_id': self.cluster_metadata.project_id,\n 'region': self.cluster_metadata.region,\n 'cluster_name': self.cluster_metadata.cluster_name,\n })\n except Exception as e:\n if e.code == 403:\n _LOGGER.error(\n 'Due to insufficient project permissions, '\n 'unable to clean up the default cluster: %s',\n self.cluster_metadata.cluster_name)\n raise ValueError(\n 'You cannot delete a cluster in project: {}'.format(\n self.cluster_metadata.project_id))\n elif e.code == 404:\n _LOGGER.error(\n 'Cluster does not exist: %s', self.cluster_metadata.cluster_name)\n raise ValueError(\n 'Cluster was not found: {}'.format(\n self.cluster_metadata.cluster_name))\n else:\n _LOGGER.error(\n 'Failed to delete cluster: %s', self.cluster_metadata.cluster_name)\n raise e", "def release_node(self, node):\n # use the lua script to release the lock in a safe way\n try:\n node._release_script(keys=[self.resource], args=[self.lock_key])\n except (redis.exceptions.ConnectionError, redis.exceptions.TimeoutError):\n pass", "def deregister_ecs_cluster(EcsClusterArn=None):\n pass", "def test_cluster_delete(self, mock_is_service_available):\n\n mock_is_service_available.return_value = True\n fake_cluster = FakeCluster(**RETURN_CLUSTER_1)\n cluster = self._create_test_cluster(\n fake_cluster, 'stack_delete', CREATE_CLUSTER_ARG_1)\n scheduler.TaskRunner(cluster.create)()\n self.m.UnsetStubs()\n self.setup_cluster_delete(cluster)\n scheduler.TaskRunner(cluster.delete)()\n self.assertEqual((cluster.DELETE, cluster.COMPLETE), cluster.state)\n self.m.VerifyAll()\n self.m.UnsetStubs()", "def release_set(self):\n self._release_locks()\n if self._locks: # pragma: nocover\n # This shouldn't happen, it means we couldn't release our\n # locks, abort\n self._fail_out()\n return\n else:\n with self._state_change:\n if self.failed:\n return\n self.state = PartitionState.ALLOCATING\n self._child_watching(self._allocate_transition, async=True)", "def cleanup(self):\n cluster = self.client and self.client.cluster\n\n if self.client:\n self.client.close()\n self.client = None\n\n if cluster:\n try:\n cluster.close(timeout=60.0)\n except RuntimeError as ex:\n ## For some reason, sometimes the cluster can't be closed due to some\n ## problem with 'bkill', which fails with an error that looks like the following.\n ## If that happens, try to re-run bkill one more time in the hopes of really\n ## killing the cluster and not leaving lingering workers running.\n ## (This issue has been observed on the Janelia cluster for both dask and spark clusters.)\n ##\n # RuntimeError: Command exited with non-zero exit code.\n # Exit code: 255\n # Command:\n # bkill 54421878 54421872 54421877\n # stdout:\n #\n # stderr:\n # Job <54421878>: Failed in an LSF library call: Slave LIM configuration is not ready yet\n # Job <54421872>: Failed in an LSF library call: Slave LIM configuration is not ready yet\n # Job <54421877>: Failed in an LSF library call: Slave LIM configuration is not ready yet\n m = re.search(r'bkill( \\d+)+', str(ex))\n if not m:\n raise\n\n logger.warning(\"Failed to kill cluster with bkill, trying one more time...\")\n time.sleep(2.0)\n result = subprocess.run(m.group(), shell=True)\n if result.returncode != 0:\n logger.error(\"Second attempt to kill the cluster failed!\")\n raise", "def update_cluster(self, cluster, params, *args, **kwargs):\n raise NotImplementedError", "def cleanup_cluster(self, cluster):\n self.log.info(\"removing xdcr/nodes settings\")\n rest = RestConnection(cluster.get_master_node())\n rest.remove_all_replications()\n rest.remove_all_remote_clusters()\n rest.remove_all_recoveries()\n cluster.cleanup_cluster(\"upgradeXDCR\")", "def release(self):\n self._needs_release = False\n send_message(self, \"release\", restype=objc_id, argtypes=[])" ]
[ "0.6199519", "0.608621", "0.59004456", "0.5865586", "0.5860563", "0.5860563", "0.5860563", "0.5860563", "0.5860563", "0.5860563", "0.5804406", "0.5800434", "0.5766697", "0.5753451", "0.5695906", "0.5686576", "0.5686143", "0.5662215", "0.5662215", "0.5662215", "0.56477755", "0.5631381", "0.56063324", "0.55853474", "0.5546735", "0.55403", "0.5508678", "0.54791546", "0.5441245", "0.5436077" ]
0.7784186
0
Issue some operations on the cluster. e.g., /cluster_op?action=apply&user_id=xxx will apply a cluster for user
def cluster_actions(): request_debug(r, logger) action = request_get(r, "action") logger.info("cluster_op with action={}".format(action)) if action == "apply": return cluster_apply(r) elif action == "release": return cluster_release(r) elif action == "start": return cluster_start(r) elif action == "stop": return cluster_stop(r) elif action == "restart": return cluster_restart(r) else: return make_fail_response("Unknown action type")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cluster_apply(r):\n request_debug(r, logger)\n\n user_id = request_get(r, \"user_id\")\n if not user_id:\n logger.warning(\"cluster_apply without user_id\")\n return make_fail_response(\"cluster_apply without user_id\")\n\n allow_multiple, condition = request_get(r, \"allow_multiple\"), {}\n\n consensus_plugin = request_get(r, \"consensus_plugin\")\n consensus_mode = request_get(r, \"consensus_mode\")\n cluster_size = int(request_get(r, \"size\") or -1)\n if consensus_plugin:\n if consensus_plugin not in CONSENSUS_PLUGINS:\n logger.warning(\"Invalid consensus_plugin\")\n return make_fail_response(\"Invalid consensus_plugin\")\n else:\n condition[\"consensus_plugin\"] = consensus_plugin\n\n if consensus_mode:\n if consensus_mode not in CONSENSUS_MODES:\n logger.warning(\"Invalid consensus_mode\")\n return make_fail_response(\"Invalid consensus_mode\")\n else:\n condition[\"consensus_mode\"] = consensus_mode\n\n if cluster_size >= 0:\n if cluster_size not in CLUSTER_SIZES:\n logger.warning(\"Invalid cluster_size\")\n return make_fail_response(\"Invalid cluster_size\")\n else:\n condition[\"size\"] = cluster_size\n\n logger.debug(\"condition={}\".format(condition))\n c = cluster_handler.apply_cluster(user_id=user_id, condition=condition,\n allow_multiple=allow_multiple)\n if not c:\n logger.warning(\"cluster_apply failed\")\n return make_fail_response(\"No available res for {}\".format(user_id))\n else:\n response_ok[\"data\"] = c\n return jsonify(response_ok), CODE_OK", "def cluster_apply_dep():\n request_debug(r, logger)\n\n user_id = request_get(r, \"user_id\")\n if not user_id:\n logger.warning(\"cluster_apply without user_id\")\n return make_fail_response(\"cluster_apply without user_id\")\n\n allow_multiple, condition = request_get(r, \"allow_multiple\"), {}\n\n consensus_plugin = request_get(r, \"consensus_plugin\")\n consensus_mode = request_get(r, \"consensus_mode\")\n cluster_size = int(request_get(r, \"size\") or -1)\n if consensus_plugin:\n if consensus_plugin not in CONSENSUS_PLUGINS:\n logger.warning(\"Invalid consensus_plugin\")\n return make_fail_response(\"Invalid consensus_plugin\")\n else:\n condition[\"consensus_plugin\"] = consensus_plugin\n\n if consensus_mode:\n if consensus_mode not in CONSENSUS_MODES:\n logger.warning(\"Invalid consensus_mode\")\n return make_fail_response(\"Invalid consensus_mode\")\n else:\n condition[\"consensus_mode\"] = consensus_mode\n\n if cluster_size >= 0:\n if cluster_size not in CLUSTER_SIZES:\n logger.warning(\"Invalid cluster_size\")\n return make_fail_response(\"Invalid cluster_size\")\n else:\n condition[\"size\"] = cluster_size\n\n logger.debug(\"condition={}\".format(condition))\n c = cluster_handler.apply_cluster(user_id=user_id, condition=condition,\n allow_multiple=allow_multiple)\n if not c:\n logger.warning(\"cluster_apply failed\")\n return make_fail_response(\"No available res for {}\".format(user_id))\n else:\n response_ok[\"data\"] = c\n return jsonify(response_ok), CODE_OK", "def execute(self, **kwargs):\n # Since node.cluster_id could be reset to '' during action execution,\n # we record it here for policy check and cluster lock release.\n forced = (self.action in [consts.NODE_DELETE, consts.NODE_OPERATION])\n saved_cluster_id = self.entity.cluster_id\n if saved_cluster_id:\n if self.cause == consts.CAUSE_RPC:\n res = senlin_lock.cluster_lock_acquire(\n self.context, self.entity.cluster_id, self.id, self.owner,\n senlin_lock.NODE_SCOPE, False)\n\n if not res:\n return self.RES_RETRY, 'Failed in locking cluster'\n\n try:\n self.policy_check(self.entity.cluster_id, 'BEFORE')\n finally:\n if self.data['status'] != pb.CHECK_OK:\n # Don't emit message since policy_check should have\n # done it\n senlin_lock.cluster_lock_release(\n saved_cluster_id, self.id, senlin_lock.NODE_SCOPE)\n return self.RES_ERROR, ('Policy check: ' +\n self.data['reason'])\n elif self.cause == consts.CAUSE_DERIVED_LCH:\n self.policy_check(saved_cluster_id, 'BEFORE')\n\n try:\n res = senlin_lock.node_lock_acquire(self.context, self.entity.id,\n self.id, self.owner, forced)\n if not res:\n res = self.RES_RETRY\n reason = 'Failed in locking node'\n else:\n res, reason = self._execute()\n if saved_cluster_id and self.cause == consts.CAUSE_RPC:\n self.policy_check(saved_cluster_id, 'AFTER')\n if self.data['status'] != pb.CHECK_OK:\n res = self.RES_ERROR\n reason = 'Policy check: ' + self.data['reason']\n finally:\n senlin_lock.node_lock_release(self.entity.id, self.id)\n if saved_cluster_id and self.cause == consts.CAUSE_RPC:\n senlin_lock.cluster_lock_release(saved_cluster_id, self.id,\n senlin_lock.NODE_SCOPE)\n return res, reason", "def update_cluster(self, cluster, params, *args, **kwargs):\n raise NotImplementedError", "def delete_cluster(self):\n cf_namespace_id = self.create_or_fetch_namespace()\n self.delete_action(cf_namespace_id)\n self.create_action(cf_namespace_id)\n self.invoke_action(cf_namespace_id)", "def update_cluster_results(session: Session, project: MasteringProject) -> Operation:\n op = _update_cluster_results_async(session, project)\n return operation.wait(session, op)", "def process(self):\n assert self.valid, 'cannot apply invalid op'\n from hive.indexer.cached_post import CachedPost\n\n action = self.action\n params = dict(\n date=self.date,\n community=self.community,\n community_id=self.community_id,\n actor=self.actor,\n actor_id=self.actor_id,\n account=self.account,\n account_id=self.account_id,\n post_id=self.post_id,\n role_id=self.role_id,\n notes=self.notes,\n title=self.title,\n )\n\n # Community-level commands\n if action == 'updateProps':\n bind = ', '.join([k+\" = :\"+k for k in list(self.props.keys())])\n DB.query(\"UPDATE hive_communities SET %s WHERE id = :id\" % bind,\n id=self.community_id, **self.props)\n self._notify('set_props', payload=json.dumps(read_key_dict(self.op, 'props')))\n\n elif action == 'subscribe':\n DB.query(\"\"\"INSERT INTO hive_subscriptions\n (account_id, community_id, created_at)\n VALUES (:actor_id, :community_id, :date)\"\"\", **params)\n DB.query(\"\"\"UPDATE hive_communities\n SET subscribers = subscribers + 1\n WHERE id = :community_id\"\"\", **params)\n self._notify('subscribe')\n elif action == 'unsubscribe':\n DB.query(\"\"\"DELETE FROM hive_subscriptions\n WHERE account_id = :actor_id\n AND community_id = :community_id\"\"\", **params)\n DB.query(\"\"\"UPDATE hive_communities\n SET subscribers = subscribers - 1\n WHERE id = :community_id\"\"\", **params)\n\n # Account-level actions\n elif action == 'setRole':\n DB.query(\"\"\"INSERT INTO hive_roles\n (account_id, community_id, role_id, created_at)\n VALUES (:account_id, :community_id, :role_id, :date)\n ON CONFLICT (account_id, community_id)\n DO UPDATE SET role_id = :role_id\"\"\", **params)\n self._notify('set_role', payload=Role(self.role_id).name)\n elif action == 'setUserTitle':\n DB.query(\"\"\"INSERT INTO hive_roles\n (account_id, community_id, title, created_at)\n VALUES (:account_id, :community_id, :title, :date)\n ON CONFLICT (account_id, community_id)\n DO UPDATE SET title = :title\"\"\", **params)\n self._notify('set_label', payload=self.title)\n\n # Post-level actions\n elif action == 'mutePost':\n DB.query(\"\"\"UPDATE hive_posts SET is_muted = '1'\n WHERE id = :post_id\"\"\", **params)\n self._notify('mute_post', payload=self.notes)\n if not DbState.is_initial_sync():\n CachedPost.update(self.account, self.permlink, self.post_id)\n\n elif action == 'unmutePost':\n DB.query(\"\"\"UPDATE hive_posts SET is_muted = '0'\n WHERE id = :post_id\"\"\", **params)\n self._notify('unmute_post', payload=self.notes)\n if not DbState.is_initial_sync():\n CachedPost.update(self.account, self.permlink, self.post_id)\n\n elif action == 'pinPost':\n DB.query(\"\"\"UPDATE hive_posts SET is_pinned = '1'\n WHERE id = :post_id\"\"\", **params)\n self._notify('pin_post', payload=self.notes)\n elif action == 'unpinPost':\n DB.query(\"\"\"UPDATE hive_posts SET is_pinned = '0'\n WHERE id = :post_id\"\"\", **params)\n self._notify('unpin_post', payload=self.notes)\n elif action == 'flagPost':\n self._notify('flag_post', payload=self.notes)\n\n return True", "def cluster(self, text):\n body = {'text': text}\n body = json.dumps(body)\n url = self.base_url + '/ml-service/phoenix-ml/cluster'\n headers = {\"ApiKey\": self.api_key, \"Content-type\": \"application/json\"}\n response = requests.post(url=url, data=body, headers=headers)\n response = response.json()\n return response", "def cluster_delete():\n logger.info(\"/cluster action=\" + r.method)\n request_debug(r, logger)\n if not r.form[\"id\"] or not r.form[\"col_name\"]:\n logger.warning(\"cluster operation post without enough data\")\n response_fail[\"error\"] = \"cluster delete without enough data\"\n response_fail[\"data\"] = r.form\n return jsonify(response_fail), CODE_BAD_REQUEST\n else:\n logger.debug(\"cluster delete with id={0}, col_name={1}\".format(\n r.form[\"id\"], r.form[\"col_name\"]))\n if r.form[\"col_name\"] == \"active\":\n result = cluster_handler.delete(id=r.form[\"id\"])\n else:\n result = cluster_handler.delete_released(id=r.form[\"id\"])\n if result:\n return jsonify(response_ok), CODE_OK\n else:\n logger.debug(\"cluster deletion failed\")\n response_fail[\"error\"] = \"Failed to delete cluster {}\".format(\n r.form[\"id\"])\n return jsonify(response_fail), CODE_BAD_REQUEST", "def publish_clusters(session: Session, project: MasteringProject) -> Operation:\n op = _publish_clusters_async(session, project)\n return operation.wait(session, op)", "def run(ceph_cluster, **kw):\n config = kw[\"config\"]\n\n build = config.get(\"build\", config.get(\"rhbuild\"))\n ceph_cluster.rhcs_version = build\n\n # Manage Ceph using ceph-admin orchestration\n command = config.pop(\"command\")\n log.info(\"Executing client %s\" % command)\n orch = Orch(cluster=ceph_cluster, **config)\n method = MAP_[command]\n method(orch, config)\n return 0", "def ModifyCluster(self, reason=None, **kwargs):\n query = []\n _AppendReason(query, reason)\n\n body = kwargs\n\n return self._SendRequest(HTTP_PUT,\n \"/%s/modify\" % GANETI_RAPI_VERSION, query, body)", "def cluster_create():\n logger.info(\"/cluster action=\" + r.method)\n request_debug(r, logger)\n if not r.form[\"name\"] or not r.form[\"host_id\"] or not \\\n r.form[\"consensus_plugin\"] or not r.form[\"size\"]:\n logger.warning(\"cluster post without enough data\")\n response_fail[\"error\"] = \"cluster POST without enough data\"\n response_fail[\"data\"] = r.form\n return jsonify(response_fail), CODE_BAD_REQUEST\n else:\n name, host_id, consensus_plugin, consensus_mode, size = \\\n r.form['name'], r.form['host_id'], r.form['consensus_plugin'],\\\n r.form['consensus_mode'] or CONSENSUS_MODES[0], int(r.form[\n \"size\"])\n if consensus_plugin not in CONSENSUS_PLUGINS:\n logger.debug(\"Unknown consensus_plugin={}\".format(\n consensus_plugin))\n return jsonify(response_fail), CODE_BAD_REQUEST\n if consensus_plugin != CONSENSUS_PLUGINS[0] and consensus_mode \\\n not in CONSENSUS_MODES:\n logger.debug(\"Invalid consensus, plugin={}, mode={}\".format(\n consensus_plugin, consensus_mode))\n return jsonify(response_fail), CODE_BAD_REQUEST\n\n if size not in CLUSTER_SIZES:\n logger.debug(\"Unknown cluster size={}\".format(size))\n return jsonify(response_fail), CODE_BAD_REQUEST\n if cluster_handler.create(name=name, host_id=host_id,\n consensus_plugin=consensus_plugin,\n consensus_mode=consensus_mode,\n size=size):\n logger.debug(\"cluster POST successfully\")\n return jsonify(response_ok), CODE_CREATED\n else:\n logger.debug(\"cluster creation failed\")\n response_fail[\"error\"] = \"Failed to create cluster {}\".format(\n name)\n return jsonify(response_fail), CODE_BAD_REQUEST", "def cluster_run(self, cmd):\n instances = self.service.get_instances()\n responses = []\n for instance in instances:\n success, output = self.run_remote_script(cmd, instance=instance)\n responses.append((success, output))\n return responses", "def test_crud_cluster(self):\n # create the object\n response = self._create_cluster()\n self.assertEqual(response.status_code, status.HTTP_201_CREATED,\n response.content)\n\n # list the object\n cluster_id = self._list_cluster()\n # Assert that the originally created cluster id is the same as the one\n # returned by list\n self.assertEquals(response.data['id'], cluster_id)\n self.assertEquals(response.data['default_vm_type'], 'm5.24xlarge')\n self.assertEquals(response.data['default_zone']['name'], 'us-east-1b')\n\n # check details\n cluster_id = self._check_cluster_exists(cluster_id)\n\n # update cluster\n response = self._update_cluster(cluster_id)\n self.assertEquals(response['name'], 'new_name')\n\n # delete the object\n response = self._delete_cluster(cluster_id)\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT, response.data)\n\n # check it no longer exists\n self._check_no_clusters_exist()", "def run(job=None, logger=None, **kwargs):\n environment = Environment.objects.get(id=ENV_ID)\n\n # Save cluster data on the resource so teardown works later\n create_required_parameters()\n resource = kwargs['resource']\n resource.create_gke_k8s_cluster_env = environment.id\n resource.create_gke_k8s_cluster_name = CLUSTER_NAME\n resource.name = CLUSTER_NAME\n resource.save()\n\n job.set_progress('Connecting to GKE...')\n builder = GKEClusterBuilder(environment, CLUSTER_NAME)\n\n job.set_progress('Sending request for new cluster {}...'.format(CLUSTER_NAME))\n builder.create_cluster(NODE_COUNT)\n\n job.set_progress('Waiting up to {} seconds for provisioning to complete.'\n .format(TIMEOUT))\n start = time.time()\n job.set_progress('Waiting for cluster IP address...')\n endpoint = builder.wait_for_endpoint(timeout=TIMEOUT)\n if not endpoint:\n return (\"FAILURE\",\n \"No IP address returned after {} seconds\".format(TIMEOUT),\n \"\")\n\n remaining_time = TIMEOUT - (time.time() - start)\n job.set_progress('Waiting for nodes to report hostnames...')\n nodes = builder.wait_for_nodes(NODE_COUNT, timeout=remaining_time)\n if len(nodes) < NODE_COUNT:\n return (\"FAILURE\",\n \"Nodes are not ready after {} seconds\".format(TIMEOUT),\n \"\")\n\n job.set_progress('Importing cluster...')\n cluster = builder.get_cluster()\n tech = ContainerOrchestratorTechnology.objects.get(name='Kubernetes')\n kubernetes = Kubernetes.objects.create(\n name=CLUSTER_NAME,\n ip=cluster['endpoint'],\n port=443,\n protocol='https',\n serviceaccount=cluster['masterAuth']['username'],\n servicepasswd=cluster['masterAuth']['password'],\n container_technology=tech,\n )\n resource.create_gke_k8s_cluster_id = kubernetes.id\n resource.save()\n url = 'https://{}{}'.format(\n PortalConfig.get_current_portal().domain,\n reverse('container_orchestrator_detail', args=[kubernetes.id])\n )\n job.set_progress(\"Cluster URL: {}\".format(url))\n\n job.set_progress('Importing nodes...')\n for node in nodes:\n # Generate libcloud UUID from GCE ID\n id_unicode = '{}:{}'.format(node['id'], 'gce')\n uuid = hashlib.sha1(id_unicode.encode('utf-8')).hexdigest()\n # Create a bbones server record. Other details like CPU and Mem Size\n # will be populated the next time the GCE handler is synced.\n Server.objects.create(\n hostname=node['name'],\n resource_handler_svr_id=uuid,\n environment=environment,\n resource_handler=environment.resource_handler,\n group=resource.group,\n owner=resource.owner,\n )\n\n job.set_progress('Waiting for cluster to report as running...')\n remaining_time = TIMEOUT - (time.time() - start)\n status = builder.wait_for_running_status(timeout=remaining_time)\n if status != 'RUNNING':\n return (\"FAILURE\",\n \"Status is {} after {} seconds (expected RUNNING)\".format(\n status, TIMEOUT),\n \"\")\n\n return (\"SUCCESS\",\n \"Cluster is ready and can be accessed at {}\".format(url),\n \"\")", "def execute_req3(catalog, req_category):\n return controller.execute_req3(catalog, req_category)", "def cluster_reboot(cluster):\n map(reboot, cluster)", "def perform_action(self, action_data):\n pass", "def test_update_hyperflex_cluster(self):\n pass", "def cluster(args):\n\n # if not (args.coverage or args.index):\n # logging.error('Must specify a coverage file or contigs + reference index.')\n\n logging.info('Starting clustering process')\n perform_clustering(args)", "def cluster(approach, datapath):\n report_path = test(datapath, approach, params[approach])\n c.echo('Report compiled at {0}.'.format(report_path))", "def main():\n rs = redshift(config_file=CONFIG_FILENAME)\n \n # check if cluster already available\n try:\n clust_avail = check_available(rs)\n except rs_client.exceptions.ClusterNotFoundFault:\n clust_avail = False\n\n # if cluster not available, create it\n if not clust_avail:\n create_cluster(rs) \n \n print(f'Cluster is available. Cluster information: \\n{rs.get_cluster_info()}')", "def execute(self, context):\n aws_connection = BaseHook.get_connection(self.aws_credentials)\n redshift = PostgresHook(self.conn_id)\n redshift_connection = redshift.get_connection(self.conn_id)\n\n config = configparser.ConfigParser()\n config.read_file(open('/usr/local/airflow/plugins/operators/aws.cfg'))\n KEY = aws_connection.login\n SECRET = aws_connection.password\n REGION = config.get('AWS', 'REGION')\n VPC_SECUTIRY_GROUPS = config.get('AWS', 'VPC_SECUTIRY_GROUPS')\n REDSHIFT_CLUSTER_TYPE = config.get(\"REDSHIFT\", \"CLUSTER_TYPE\")\n REDSHIFT_NODE_TYPE = config.get(\"REDSHIFT\", \"NODE_TYPE\")\n REDSHIFT_CLUSTER_IDENTIFIER = config.get(\"REDSHIFT\", \"CLUSTER_ID\")\n REDSHIFT_DB = redshift_connection.login\n REDSHIFT_DB_USER = redshift_connection.login\n REDSHIFT_DB_PASSWORD = redshift_connection.password\n REDSHIFT_ROLE_NAME = config.get(\"REDSHIFT\", \"S3_ROLE\")\n\n redshift = boto3.client('redshift',\n region_name=REGION,\n aws_access_key_id=KEY,\n aws_secret_access_key=SECRET\n )\n\n response = redshift.create_cluster(\n\n ClusterType=REDSHIFT_CLUSTER_TYPE,\n NodeType=REDSHIFT_NODE_TYPE,\n DBName=REDSHIFT_DB,\n ClusterIdentifier=REDSHIFT_CLUSTER_IDENTIFIER,\n MasterUsername=REDSHIFT_DB_USER,\n MasterUserPassword=REDSHIFT_DB_PASSWORD,\n IamRoles=[REDSHIFT_ROLE_NAME],\n VpcSecurityGroupIds = [VPC_SECUTIRY_GROUPS]\n )\n self.log.info(response)", "def cluster_restart(r):\n cluster_id = request_get(r, \"cluster_id\")\n if not cluster_id:\n logger.warning(\"No cluster_id is given\")\n return make_fail_response(\"No cluster_id is given\")\n if cluster_handler.restart(cluster_id):\n return jsonify(response_ok), CODE_OK\n\n return make_fail_response(\"cluster restart failed\")", "def update_cluster(\n self,\n cluster: Union[dto.Cluster, str],\n params: Mapping[str, Any]\n ) -> dto.Cluster:\n raise errors.UnsupportedOperationError(\n \"Operation not supported for provider '{}'\".format(self.provider_name)\n )", "def show_cluster_status(self, *args, **kwargs):\r\n return execute(self._show_cluster_status, *args, **kwargs)", "def _cluster_status_action(self):\n yaml_load_err = \"Status of '{}' could not be loaded as yaml:\\n{}\"\n status_raw = zaza.model.run_action_on_leader(\"ovn-central\",\n \"cluster-status\")\n status_data = status_raw.data[\"results\"]\n # Verify expected items in the action result\n self.assertIn(\"ovnnb\", status_data)\n self.assertIn(\"ovnsb\", status_data)\n\n try:\n nb_status = yaml.safe_load(status_data[\"ovnnb\"])\n except yaml.YAMLError:\n self.fail(yaml_load_err.format(\"northbound-cluster\",\n status_data[\"ovnnb\"]))\n try:\n sb_status = yaml.safe_load(status_data[\"ovnsb\"])\n except yaml.YAMLError:\n self.fail(yaml_load_err.format(\"southbound-cluster\",\n status_data[\"ovnsb\"]))\n\n return sb_status, nb_status", "def main():\r\n mvip, user, user_pass, mvip_node = get_inputs()\r\n payload = build_payload()\r\n headers, url = build_auth(mvip, user, user_pass, mvip_node)\r\n response_json = connect_cluster(headers, url, payload)\r\n paired_vols = get_replication_status(response_json)\r\n payload = get_vol_stats(paired_vols)\r\n response_json = connect_cluster(headers, url, payload)\r\n parse_volume_stats(paired_vols, response_json)", "def do_operation(self):\n operation = self.inputs['operation']\n res = self.entity.do_operation(self.context, **self.inputs)\n if res:\n return self.RES_OK, \"Node operation '%s' succeeded.\" % operation\n else:\n return self.RES_ERROR, \"Node operation '%s' failed.\" % operation" ]
[ "0.6829137", "0.66824836", "0.58821654", "0.5845386", "0.57353073", "0.57178664", "0.5618976", "0.56063074", "0.55692333", "0.54906636", "0.5473863", "0.546417", "0.5444256", "0.5412049", "0.53761446", "0.5353043", "0.53529376", "0.5323731", "0.5293265", "0.52870196", "0.52766496", "0.5274535", "0.52721524", "0.52622044", "0.52563196", "0.52463895", "0.52251446", "0.52223444", "0.5214766", "0.52100843" ]
0.7598276
0
Parse and return number of incidents by day of the week
def fetch_incident_by_days(parsed_data): incident_counter = dict() for incident in parsed_data: day_of_week = incident['DayOfWeek'] if day_of_week in incident_counter: incident_counter[day_of_week] += 1 else: incident_counter[day_of_week] = 1 return incident_counter
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hindu_day_count(cls, date):\n return date - cls.EPOCH", "def compute_heatsum_per_week(heatsum_day, day=5):\n heatsum_week = {}\n for k in heatsum_day:\n year, week, weekday = map(int, datetime.datetime.strftime(datetime.datetime.strptime(k, '%Y-%m-%d'), '%Y %W %w').split())\n if weekday == day:\n heatsum_week[(year, week)] = heatsum_day[k]\n return heatsum_week", "def weekday_activity(frame):\n\n data = DataFrame()\n data['weekday'] = DatetimeIndex(frame.inserted).weekday\n counts = DataFrame(arange(7)*0)\n return (counts[0]+data.weekday.value_counts()).fillna(0)", "def day_num(x):\r\n if x==\"Sunday\":\r\n return 0\r\n elif x==\"Monday\":\r\n return 1\r\n elif x==\"Tuesday\":\r\n return 2\r\n elif x==\"Wednesday\":\r\n return 3\r\n elif x==\"Thursday\":\r\n return 4\r\n elif x==\"Friday\":\r\n return 5\r\n elif x==\"Saturday\":\r\n return 6", "def get_summary(infile):\n sched = scheduler.parse_sched_file(infile)\n tracker = dict()\n for curr in sched:\n parts = curr.split('-')\n d = date(int(parts[0]), int(parts[1]), int(parts[2]))\n name = sched[curr].strip()\n if name not in tracker:\n tracker[name] = [0, 0]\n if d.weekday() == 4 or d.weekday() == 5:\n tracker[name][1] += 1\n else:\n tracker[name][0] += 1\n for name in tracker:\n print '%s weekdays=%d, weekends=%d' % (name, tracker[name][0], tracker[name][1])", "def cnts_by_airline_dow(flights):\n\n return ...", "def GetWeekNum(self, date):\n (y, m, d) = date.split('-')\n return (dt.date(int(y), int(m), int(d)) - self.START_DATE).days / 7", "def make_weeklycount(records):\n # convert the 'date' field to a datetime.date and add theisoweek\n for record in records:\n if 'start_date_local' in record:\n record['start_date_local'] = (\n datetime.strptime(record['start_date_local'], '%Y-%m-%d').date())\n record['week'] = (record['start_date_local'].isocalendar()[0] * 100\n + record['start_date_local'].isocalendar()[1])\n # then, make a dataset filled with the unique weeks and sports,\n # but no counts yet.\n # This functions is possibly much nicer with a defaultdict\n unique_weeks = SortedSet(record['week'] for record in records)\n unique_sports = SortedSet(record['type'] for record in records)\n data = {'weeks': unique_weeks, 'counts': {}}\n for sport in unique_sports:\n data['counts'].update({sport: []})\n # finally for each sport and week count the occurence of that sport\n for sport in unique_sports:\n for week in unique_weeks:\n count = sum(1 if (record['week'] == week and\n record['type'] == sport)\n else 0 for record in records)\n data['counts'][sport].append(count)\n return data", "def countAbudanceFromNames_byWeek(assignment_d, clade_s, startDate, endDate, delta, tipLog_name):\n\tpsuodocount = 0.1\n\tabundances_d = {} # key: week; value: dict of key:clade; value: count\n\tweekToDate_d = {}\n\t#assignment_d: key: node name; value: clade\n\n\tweek_l = []\n\n\ttipLog_open = open(tipLog_name, \"w\")\n\toutLine = \"\t\".join([\"Week\", \"Clade\", \"sample_withAnnot\", \"sample_inputID\"]) + \"\\n\"\n\ttipLog_open.write(outLine)\n\n\tcurrentStart = startDate\n\tcurrentEnd = currentStart + delta\n\tweek = 0\n\tweekName = str(week)\n\tweek_l = week_l + [weekName]\n\tabundances_d[weekName] = {}\n\n\twhile endDate >= currentEnd:\n\n\t\tlastWeekName = weekName\n\t\tweek += 1\n\t\tweekName = str(week)\n\t\tweek_l = week_l + [weekName]\n\n\t\tweekToDate_d[weekName] = currentStart\n\n\t\tabundances_d[weekName] = {} \n\n\t\tfor clade in abundances_d[lastWeekName]:\n\t\t\tabundances_d[weekName][clade] = psuodocount #change to add psuodocount\n\t\t\t# if clade == 'anc':\n\t\t\t# \tabundances_d[weekName][clade] += 1\n\n\n\t\tfor tip in assignment_d.keys():\n\t\t\tif \"NODE_\" not in tip and \"Wuhan\" not in tip:\n\t\t\t\ttry: \n\t\t\t\t\ttip_date = date.fromisoformat(tip.split(\"_\")[-2])\n\n\t\t\t\t\tif tip_date < currentEnd and tip_date >= currentStart:\n\n\t\t\t\t\t\tclade = assignment_d[tip]\n\t\t\t\t\t\tif clade not in abundances_d[weekName]:\n\t\t\t\t\t\t\tabundances_d[weekName][clade] = psuodocount #change to add psuodocount\n\t\t\t\t\t\tabundances_d[weekName][clade] += 1\n\t\t\t\t\t\tif clade != 'anc':\n\t\t\t\t\t\t\ttipNoAnnot = \"\"\n\t\t\t\t\t\t\tfor name in tip.split(\"_\")[:-3]: \n\t\t\t\t\t\t\t\ttipNoAnnot = tipNoAnnot + \"_\" + name\n\t\t\t\t\t\t\toutLine = \"\t\".join([ weekName, clade, tip, tipNoAnnot[1:]]) + \"\\n\"\n\t\t\t\t\t\t\ttipLog_open.write(outLine)\n\t\t\t\texcept ValueError:\n\t\t\t\t\tpass \n\n\n\t\tcurrentEnd += delta\n\t\tcurrentStart += delta\n\ttipLog_open.close()\n\n\tnoFurtherAbudance = list(clade_s.copy())\n\tfor weekName in (reversed(week_l)):\n\n\t\tnoFurtherAbudance_last = noFurtherAbudance.copy()\n\t\tfor clade in noFurtherAbudance_last:\n\t\t\tif clade in abundances_d[weekName]:\n\n\t\t\t\tif abundances_d[weekName][clade] == psuodocount: #and clade != 'anc':\n\t\t\t\t\tabundances_d[weekName][clade] = 0\n\t\t\t\telse:\n\t\t\t\t\tnoFurtherAbudance.remove(clade)\n\n\treturn(abundances_d, weekToDate_d)", "def weekday(self):\n\n return func.extract('dow', self.start_date) + 1", "def get_no_of_days(self, slug_ls):\n date_ls = []\n #for each country get first case confirmed date\n for i in slug_ls:\n url = self.base_url+\"dayone/country/\"+i+\"/status/confirmed\"\n response = requests.get(url)\n date_ls.append(response.json()[0]['Date'])\n \n t1 = date.today()\n days = []\n #Calculate 'days since first case' for each country\n for i in range(len(date_ls)):\n t2 = datetime.datetime.strptime(date_ls[i],\"%Y-%m-%dT%H:%M:%SZ\")\n days.append(str(t1-t2.date())[0:4])\n return days", "def visualize_days():\n\t\n\t#grab our parsed data that we parsed earlier\n\tdata_file = parse(MY_FILE, \",\")\n\t\n\t#make a new variable, counter, from iterating through each line of\n\t#data in the parsed data, and count how many incidents happen on each\n\t#day of the week\n\tcounter = Counter(item[\"DayOfWeek\"] for item in data_file)\n\t\n\t#separate the x-axis data (days of the week) from the counter variable\n\t#from the y-axis (number of incidents each day)\n\tdata_list = [\n\t\t\t\tcounter[\"Monday\"],\n\t\t\t\tcounter[\"Tuesday\"],\n\t\t\t\tcounter[\"Wednesday\"],\n\t\t\t\tcounter[\"Thursday\"],\n\t\t\t\tcounter[\"Friday\"],\n\t\t\t\tcounter[\"Saturday\"],\n\t\t\t\tcounter[\"Sunday\"]\n\t\t\t\t]\n\tday_tuple = tuple([\"Mon\", \"Tues\", \"Wed\", \"Thurs\", \"Fri\", \"Sat\", \"Sun\"])\n\t\n\t#with y-axis data, assign it to a matplotlib plot instance\n\tplt.plot(data_list)\n\t\n\t#create amount of ticks need for x and y axes and assign labels\n\tplt.xticks(range(len(day_tuple)), day_tuple)\n\t\n\t#save the plot\n\tplt.savefig(\"Days.png\")\n\t\n\t#close plot file\n\tplt.clf()", "def get_weekday_number(date):\n return date.strftime('%w')", "def daily_table(self):\n htable = [0 for i in range(7)]\n for i in range(self.dataset.shape[0]):\n stime = time.localtime(np.int32(self.dataset[i][2]))\n evtime = stime[6]\n htable[evtime] += 1\n return htable", "def ISOWEEKNUM(date):\n return _make_datetime(date).isocalendar()[1]", "def weekly():\n\n response = {}\n\n # 0..6 => Sunday..Saturday\n for i in range(7):\n hours = []\n interactions = 0\n\n for j in range(25):\n try:\n wfile = open(common.stats_path + '/weekly-average/' + str(i) + '/' + str(j))\n data = wfile.read()\n\n if j == 24:\n interactions = int(data)\n else:\n hours.append(int(data))\n\n wfile.close()\n except IOError:\n if i < 24:\n hours.append(0)\n\n response[DAYS[i]] = {'hours': hours, 'interactions': interactions}\n\n return response", "def weekly(evictiondata):\r\n evictions_per_week = {}\r\n for index, row in evictiondata.iterrows():\r\n if row['week_date'] not in evictions_per_week.keys():\r\n evictions_per_week[row['week_date']] = row['filings_2020']\r\n else:\r\n evictions_per_week[row['week_date']] += row['filings_2020']\r\n return evictions_per_week", "def WeekdayNum(name):\n return _WEEKDAY_DICT.get(name.capitalize(), 0)", "def get_number_days(self):\r\n raise NotImplementedError", "def getMessageCountPerDay(self):\n\n # NOTE: We first filter distinct record ids for this filter set\n # and then use those record ids as additional filter parameter when we\n # perform the actual query for message count by date. This workaround\n # is (?) required to not get duplicate record rows that we can't\n # `distinct` away when using `annotate`, due to some crazy db joins.\n # TODO: Revise the workaround\n\n # Query distinct record ids for this filter set\n distinct_records = Record.objects.filter(\n self.getQuery()).distinct().values(\"id\")\n\n\n # Query the sum of message counts per day for above filtered\n # records, ordered by date in ascending order\n return Record.objects.filter(id__in=distinct_records).values(\n \"report__date_range_begin\").annotate(\n date=TruncDay(\"report__date_range_begin\"),\n cnt=Sum(\"count\")).values(\"date\", \"cnt\").order_by(\"date\")", "def days_in_data(len_dict):\n count_day = 0\n days_list = []\n for i in range(len_dict):\n days_list.append(count_day)\n count_day += 1\n return (days_list)", "def gen_weeklyFrequency(self):\n\n if len(self.fields) == 0:\n return None\n\n if self.validator.validate(self.fields) == False:\n return None\n\n weeklyFrequency = 0\n dayFields = ['day1','day2','day3','day4','day5','day6','day7']\n for dayField in dayFields:\n if dayField in self.fields:\n if self.fields[dayField] == True:\n weeklyFrequency += 1\n\n return weeklyFrequency", "def get_data_by_time(filename):\n with open(filename, 'r') as f_in:\n # set up csv reader object\n reader = csv.DictReader(f_in)\n result = {}\n result['n_week'] = [0] * 7\n result['d_week'] = [0] * 7\n result['cus_hour'] = [0] * 24\n result['sub_hour'] = [0] * 24\n for data in reader:\n duration = float(data['duration'])\n if data['day_of_week'] == 'Sunday':\n result['n_week'][0] += 1\n result['d_week'][0] += duration\n elif data['day_of_week'] == 'Monday':\n result['n_week'][1] += 1\n result['d_week'][1] += duration\n elif data['day_of_week'] == 'Tuesday':\n result['n_week'][2] += 1\n result['d_week'][2] += duration\n elif data['day_of_week'] == 'Wednesday':\n result['n_week'][3] += 1\n result['d_week'][3] += duration\n elif data['day_of_week'] == 'Thursday':\n result['n_week'][4] += 1\n result['d_week'][4] += duration\n elif data['day_of_week'] == 'Friday':\n result['n_week'][5] += 1\n result['d_week'][5] += duration\n else:\n result['n_week'][6] += 1\n result['d_week'][6] += duration\n\n hour = int(data['hour'])\n if data['user_type'] == 'Customer':\n result['cus_hour'][hour] += 1\n else:\n result['sub_hour'][hour] += 1\n return result", "def weekNumber(self): # real signature unknown; restored from __doc__\r\n pass", "def day_06_a() -> int:\n return 0", "def count_occurrences_per_day(measurehours=[8,15,23], untiltoday=False, savedatafile=True, savetype='excel', overwrite=False, verbose=True,\n filename='lungemedLPR3dataframe', datafileext=None):\n savepath = 'O:\\Administration\\\\02 - Økonomi og PDK\\Medarbejdermapper\\Kasper\\Focus1 - Ad hoc opgaver\\Lungemed sengedage og visitationer\\plots\\\\'\n if os.path.isfile(savepath+filename) and savedatafile and not overwrite:\n sys.exit(' Was asked to store data but overwrite=False and file already exists... hence exiting')\n\n if verbose: print(' - Getting the data to look at ')\n dataframe_days, dataframe_vis = lbv.getdata(verbose=verbose, filenameext=datafileext)\n outdic = {}\n\n for measurehour in measurehours:\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n if ('SLA' in datafileext) or ('SUH' in datafileext):\n start_day = datetime.datetime.strptime(\"09-03-2022 \" + str(measurehour) + \":00:00\", \"%d-%m-%Y %H:%M:%S\")\n else:\n start_day = datetime.datetime.strptime(\"02-02-2019 \"+str(measurehour)+\":00:00\", \"%d-%m-%Y %H:%M:%S\")\n\n if untiltoday:\n end_day = datetime.datetime.strptime(str(datetime.datetime.today()).split(' ')[0]+' '+str(measurehour)+\":00:00\", \"%Y-%m-%d %H:%M:%S\")\n elif ('SLA' in datafileext) or ('SUH' in datafileext):\n end_day = datetime.datetime.strptime(np.str(dataframe_days['INDTIDSPUNKT_DRGKONTAKT'].max()+datetime.timedelta(days=2)).split(' ')[0]+' '+str(measurehour)+\":00:00\", \"%Y-%m-%d %H:%M:%S\")\n else:\n end_day = datetime.datetime.strptime(\"02-05-2019 \" + str(measurehour) + \":00:00\", \"%d-%m-%Y %H:%M:%S\")\n date_list = [start_day + datetime.timedelta(days=x) for x in range(0, (end_day - start_day).days)]\n\n if verbose: print(' - Will count how many patients are in beds at any given day between '+\n start_day.strftime(\"%d-%m-%Y\")+' and '+end_day.strftime(\"%d-%m-%Y\")+' at '+str(measurehour)+\" o'clock\")\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n if verbose: print('---- \"Counting parameters from \"bed days data frame\" ----')\n count_cpr = [0] * len(date_list)\n occupancy_available = [0] * len(date_list)\n occupancy_actual = [0] * len(date_list)\n\n for pp, patient in enumerate(dataframe_days['CPR']):\n intime = dataframe_days['INDTIDSPUNKT_DRGKONTAKT'][pp]\n outtime = dataframe_days['UDTIDSPUNKT_DRGKONTAKT'][pp]\n\n for dd, datecheck in enumerate(np.asarray(date_list)):\n if verbose:\n infostr = ' Checking the date '+datecheck.strftime(\"%d-%m-%Y\")+' for patient number '+str(pp+1)\n sys.stdout.write(\"%s\\r\" % infostr)\n sys.stdout.flush()\n\n if (intime <= datecheck) and (datecheck <= outtime):\n count_cpr[dd] = count_cpr[dd] + 1\n\n if verbose: print('\\n - Estimating the occupancy in the available and actual beds ')\n for dd, datecheck in enumerate(np.asarray(date_list)):\n if 'SUH' in datafileext:\n if (datecheck > datetime.datetime.strptime(\"10-03-2022 00:00:00\", \"%d-%m-%Y %H:%M:%S\")) & \\\n (datecheck < datetime.datetime.strptime(\"27-06-2022 00:00:00\", \"%d-%m-%Y %H:%M:%S\")):\n NbedsSUH = 18\n elif (datecheck > datetime.datetime.strptime(\"27-06-2022 00:00:00\", \"%d-%m-%Y %H:%M:%S\")) &\\\n (datecheck < datetime.datetime.strptime(\"08-08-2022 00:00:00\", \"%d-%m-%Y %H:%M:%S\")): # sommer lavaktivitet\n NbedsSUH = 14\n elif (datecheck > datetime.datetime.strptime(\"24-12-2022 00:00:00\", \"%d-%m-%Y %H:%M:%S\")) &\\\n (datecheck < datetime.datetime.strptime(\"02-01-2023 00:00:00\", \"%d-%m-%Y %H:%M:%S\")): # jul lavaktivitet\n NbedsSUH = 14\n elif (datecheck > datetime.datetime.strptime(\"02-01-2023 00:00:00\", \"%d-%m-%Y %H:%M:%S\")) &\\\n (datecheck < datetime.datetime.strptime(\"01-01-2024 00:00:00\", \"%d-%m-%Y %H:%M:%S\")):\n NbedsSUH = 20\n else:\n NbedsSUH = 18\n\n occupancy_available[dd] = count_cpr[dd] / NbedsSUH * 100\n elif 'SLA' in datafileext:\n occupancy_available[dd] = count_cpr[dd] / 24. * 100\n else:\n if datecheck < datetime.datetime.strptime(\"10-06-2021 00:00:00\", \"%d-%m-%Y %H:%M:%S\"):\n occupancy_available[dd] = count_cpr[dd] / 24. * 100\n else:\n occupancy_available[dd] = count_cpr[dd] / 16. * 100\n\n if datecheck < datetime.datetime.strptime(\"01-03-2021 00:00:00\", \"%d-%m-%Y %H:%M:%S\"):\n occupancy_actual[dd] = count_cpr[dd] / 24. * 100\n else:\n occupancy_actual[dd] = count_cpr[dd] / 16. * 100\n\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n if verbose: print('---- \"Counting parameters from \"visitations data frame\" ----')\n count_vis_aka = [0] * len(date_list)\n count_vis_lungNAE = [0] * len(date_list)\n count_vis_lungSLA = [0] * len(date_list)\n count_vis_other = [0] * len(date_list)\n\n for pp, patient in enumerate(dataframe_vis['CPR']):\n intime = dataframe_vis['INDTIDSPUNKT_DRGKONTAKT'][pp]\n outtime = dataframe_vis['UDTIDSPUNKT_DRGKONTAKT'][pp]\n\n for dd, datecheck in enumerate(np.asarray(date_list)):\n if verbose:\n infostr = ' Checking the date ' + datecheck.strftime(\n \"%d-%m-%Y\") + ' for patient number ' + str(pp + 1)\n sys.stdout.write(\"%s\\r\" % infostr)\n sys.stdout.flush()\n\n if intime.strftime(\"%d-%m-%Y\") == datecheck.strftime(\"%d-%m-%Y\"):\n if ('Akut Afd. 1.sal, Sengeafs., SLA'.lower() in dataframe_vis['SOR_KONTAKT_SP_Afsnit'][pp].lower()) or \\\n ('Akut Afd., Skadestue, SLA'.lower() in dataframe_vis['SOR_KONTAKT_SP_Afsnit'][pp].lower()) or \\\n ('Akut Afd.stuen, Sengeafs., SLA'.lower() in dataframe_vis['SOR_KONTAKT_SP_Afsnit'][pp].lower()):\n count_vis_aka[dd] = count_vis_aka[dd] + 1\n elif ('Lungemed. Sengeafs., NAE'.lower() in dataframe_vis['SOR_KONTAKT_SP_Afsnit'][pp].lower()) or \\\n ('Med. Lunge Sengeafs., NAE'.lower() in dataframe_vis['SOR_KONTAKT_SP_Afsnit'][pp].lower()):\n count_vis_lungNAE[dd] = count_vis_lungNAE[dd] + 1\n elif ('Med. Lunge Sengeafs., SLA'.lower() in dataframe_vis['SOR_KONTAKT_SP_Afsnit'][pp].lower()):\n count_vis_lungSLA[dd] = count_vis_lungSLA[dd] + 1\n else:\n count_vis_other[dd] = count_vis_other[dd] + 1\n\n if verbose: print(' - Adding results to output dictionary')\n outdic['dates_'+str(measurehour)] = date_list\n outdic['count_cpr_'+str(measurehour)] = count_cpr\n outdic['occupancy_available_'+str(measurehour)] = occupancy_available\n outdic['occupancy_actual_'+str(measurehour)] = occupancy_actual\n outdic['count_vis_aka_'+str(measurehour)] = count_vis_aka\n outdic['count_vis_lungNAE_'+str(measurehour)] = count_vis_lungNAE\n outdic['count_vis_lungSLA_'+str(measurehour)] = count_vis_lungSLA\n outdic['count_vis_other_'+str(measurehour)] = count_vis_other\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n if verbose: print('\\n - Building data frame and returning count of patients and stats')\n df_results = pd.DataFrame(outdic)\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n if verbose: print(' - calculating moving average (30 days window)')\n Ndaysavg = 30\n for measurehour in measurehours:\n df_results['occupancy_available_movingavg_' + str(measurehour)] = \\\n df_results['occupancy_available_' + str(measurehour)].rolling(window=Ndaysavg).mean()\n df_results['occupancy_actual_movingavg_' + str(measurehour)] = \\\n df_results['occupancy_actual_' + str(measurehour)].rolling(window=Ndaysavg).mean()\n\n if verbose: print(' - calculating moving average (5 days window)')\n Ndaysavg = 5\n for measurehour in measurehours:\n df_results['occupancy_available_movingavg5days_' + str(measurehour)] = \\\n df_results['occupancy_available_' + str(measurehour)].rolling(window=Ndaysavg).mean()\n df_results['occupancy_actual_movingavg5days_' + str(measurehour)] = \\\n df_results['occupancy_actual_' + str(measurehour)].rolling(window=Ndaysavg).mean()\n\n if savedatafile:\n if savetype == 'excel':\n gdf.savefile(df_results, savepath + filename, format='excel', overwrite=overwrite, verbose=verbose)\n else:\n gdf.savefile(df_results, savepath + filename, format='csv', overwrite=overwrite, verbose=verbose)\n\n return df_results", "def visualize_days(parsed_data, output_dir):\n\n # Returning no. of incidents by each day of the week\n counter = fetch_incident_by_days(parsed_data)\n\n # data_list = fetch_incident_by_days.keys()\n\n # Separating the counter to have an ordered list\n y_values = [\n counter[\"Monday\"],\n counter[\"Tuesday\"],\n counter[\"Wednesday\"],\n counter[\"Thursday\"],\n counter[\"Friday\"],\n counter[\"Saturday\"],\n counter[\"Sunday\"]\n ]\n\n # Creating labels for x-axis\n x_labels = tuple([\"Mon\", \"Tues\", \"Wed\", \"Thurs\", \"Fri\", \"Sat\", \"Sun\"])\n\n # Assigning the data to plot\n plt.plot(y_values)\n\n # Assigning xticks on x-axis\n plt.xticks(range(len(x_labels)), x_labels)\n\n # Save the graph and show the figure\n file_name = os.path.join(output_dir, DAYS_PLOT_FILENAME)\n plt.savefig(file_name)\n plt.show()", "def get_day_number(day):\n day_dict = {\n 'Sunday': 0,\n 'Monday': 1,\n 'Tuesday': 2,\n 'Wednesday': 3,\n 'Thursday': 4,\n 'Friday': 5,\n 'Saturday': 6\n }\n return day_dict[day]", "def getWeeks(data: Sequence[HistoryElement]) -> Sequence[int]:\r\n _checkData(data)\r\n return [x.timeStamp.toDateTime().weekday() for x in data]", "def day_of_the_week(arg):" ]
[ "0.6143644", "0.60441977", "0.59760845", "0.596039", "0.5862452", "0.58511275", "0.57420796", "0.57377696", "0.5718578", "0.57159406", "0.57054317", "0.56977046", "0.56142634", "0.5614007", "0.5611032", "0.55930954", "0.55916303", "0.55655766", "0.5558739", "0.5548449", "0.5544683", "0.5526198", "0.5515177", "0.5498033", "0.5468787", "0.5456712", "0.54458404", "0.5434064", "0.54268533", "0.5421673" ]
0.79031706
0
Parse and return count of total incidents and unresolved incidents by category
def fetch_incident_by_category_and_resolution(parsed_data): incident_counter = dict() for incident in parsed_data: category = incident['Category'] resolution = incident['Resolution'] if category in incident_counter: incident_counter[category][0] += 1 if resolution == "NONE": incident_counter[category][1] += 1 else: if resolution == "NONE": incident_counter[category] = [1, 1] else: incident_counter[category] = [1, 0] return incident_counter
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def count_risk_categories(data):\n results = Counter([row['risk_category'] for row in data])\n if '' in results:\n results['No Violations'] = results['']\n del results['']\n return results", "def fetch_incident_by_days(parsed_data):\n incident_counter = dict()\n\n for incident in parsed_data:\n day_of_week = incident['DayOfWeek']\n if day_of_week in incident_counter:\n incident_counter[day_of_week] += 1\n else:\n incident_counter[day_of_week] = 1\n\n return incident_counter", "def analyzeCategories(args, cnx):\n rows = dwDbGetAllCategories(cnx)\n\n print('{} distinct categories across all locations.'.format(len(rows)))\n\n cat_tokens_stats = {}\n\n # Create a dict counting the number of categories by tokens count\n for row in rows:\n category = row.BUS_HEADING\n category_tokens = category.split()\n tokens_count = len(category_tokens)\n # print('Category \"{}\": {} tokens'.format(category, tokens_count))\n if not tokens_count in cat_tokens_stats:\n cat_tokens_stats[tokens_count] = 0\n cat_tokens_stats[tokens_count] += 1\n\n # Sort the dict\n sorted_keys = sorted(cat_tokens_stats.keys())\n print(\"Counts: {}\".format(sorted_keys))\n for key in sorted_keys:\n print(\"{}: {}\".format(key, cat_tokens_stats[key]))\n\n return 0, 0, 0", "def getCategoryCounts(self, query):\n facade = self._getFacade()\n results = facade.getCategoryCounts(query)\n total = sum(result['count'] for result in results)\n return {'results': results,\n 'total': total}", "def count_extracted(j_data):\n count = 0\n for record in j_data:\n tmp = {}\n desc = record['lcr_desc'].lower().split('/')\n title = desc[0]\n cat = category(title)\n if cat and 'location' in record:\n count += 1\n return count", "def defect_counts(source):\n rdr = csv.DictReader(source)\n assert rdr.fieldnames == [\"shift\", \"defect_code\", \"count\"]\n convert = map(\n lambda d: ((d['shift'], d['defect_code']), int(d['count'])),\n rdr)\n return collections.Counter(dict(convert))", "def get_crimes_by_category(self):\n\n result = {}\n for crime in self.crimes:\n cat_name = crime.category.category_name\n if cat_name in result:\n result[cat_name] += 1\n else:\n result[cat_name] = 1\n \n return result", "def n_count(category):\r\n sql = text('''\r\n WITH uniq AS (\r\n SELECT COUNT(app.id) FROM task, app\r\n LEFT OUTER JOIN category ON app.category_id=category.id\r\n WHERE\r\n category.short_name=:category\r\n AND app.hidden=0\r\n AND app.info LIKE('%task_presenter%')\r\n AND task.app_id=app.id\r\n GROUP BY app.id)\r\n SELECT COUNT(*) FROM uniq\r\n ''')\r\n\r\n results = db.engine.execute(sql, category=category)\r\n count = 0\r\n for row in results:\r\n count = row[0]\r\n return count", "def get_category_count(self, category):\r\n if category in self.category_count:\r\n return float(self.category_count[category])\r\n else:\r\n return 0.0", "def category_count(self, cat):\n res = self.con.execute('select count from cc where category=\"%s\"'\n %(cat)).fetchone()\n if res == None:\n return 0\n else:\n return float(res[0])", "def parse_incidents(incidents):\n print 'Parsing year: %s' % year\n \n # Set up our data structure.\n incident_list = []\n \n # Loop through the list of Element() objects that were <entry> tags.\n for incident in incidents:\n print 'Reading incident: %s' % int(incident.xpath('title')[0].text_content().replace('ReportedCrime ID:', ''))\n # Set up a data structure for this <entry>.\n incident_dict = {}\n\n # Grab some attributes from this crime incident, like id, date, lat/lon, offense and address.\n incident_dict['id'] = int(incident.xpath('title')[0].text_content().replace('ReportedCrime ID:', ''))\n incident_dict['date'] = incident.xpath('content')[0].getchildren()[0].getchildren()[3].text_content().split('T')[0]\n incident_dict['lat'] = float(incident.xpath('content')[0].getchildren()[0].getchildren()[9].text_content())\n incident_dict['lon'] = float(incident.xpath('content')[0].getchildren()[0].getchildren()[10].text_content())\n incident_dict['offense'] = incident.xpath('content')[0].getchildren()[0].getchildren()[5].text_content()\n incident_dict['address'] = incident.xpath('content')[0].getchildren()[0].getchildren()[8].text_content().strip('B/O ')\n incident_dict['anc'] = incident.xpath('content')[0].getchildren()[0].getchildren()[14].text_content()\n \n # Some of these don't have neighborhood clusters. Let's wrap a simple try/except to capture when they don't.\n try:\n incident_dict['neighborhood_cluster'] = int(incident.xpath('content')[0].getchildren()[0].getchildren()[18].text_content())\n except ValueError:\n incident_dict['neighborhood_cluster'] = None\n \n # Write the entries to our data structure above.\n incident_list.append(incident_dict)\n \n # Returns the list.\n return incident_list", "def parse_incidents(incidents):\n print 'Parsing year: %s' % year\n \n # Set up our data structure.\n incident_list = []\n \n # Loop through the list of Element() objects that were <entry> tags.\n for incident in incidents:\n print 'Reading incident: %s' % int(incident.xpath('title')[0].text_content().replace('ReportedCrime ID:', ''))\n # Set up a data structure for this <entry>.\n incident_dict = {}\n\n # Grab some attributes from this crime incident, like id, date, lat/lon, offense and address.\n incident_dict['id'] = int(incident.xpath('title')[0].text_content().replace('ReportedCrime ID:', ''))\n incident_dict['date'] = incident.xpath('content')[0].getchildren()[0].getchildren()[3].text_content().split('T')[0]\n incident_dict['lat'] = float(incident.xpath('content')[0].getchildren()[0].getchildren()[9].text_content())\n incident_dict['lon'] = float(incident.xpath('content')[0].getchildren()[0].getchildren()[10].text_content())\n incident_dict['offense'] = incident.xpath('content')[0].getchildren()[0].getchildren()[5].text_content()\n incident_dict['address'] = incident.xpath('content')[0].getchildren()[0].getchildren()[8].text_content().strip('B/O ')\n incident_dict['anc'] = incident.xpath('content')[0].getchildren()[0].getchildren()[14].text_content()\n \n # Some of these don't have neighborhood clusters. Let's wrap a simple try/except to capture when they don't.\n try:\n incident_dict['neighborhood_cluster'] = int(incident.xpath('content')[0].getchildren()[0].getchildren()[18].text_content())\n except ValueError:\n incident_dict['neighborhood_cluster'] = None\n \n # Write the entries to our data structure above.\n incident_list.append(incident_dict)\n \n # Returns the list.\n return incident_list", "def prepare_count_incidents(self, object):\n roles = object.actorrole_set.all()\n return Incident.objects.filter(actors_role__in=roles).count()", "def count_occupied_seats_by_category(\n party_id: PartyID,\n) -> List[Tuple[TicketCategory, int]]:\n subquery = db.session \\\n .query(\n DbSeat.id,\n DbSeat.category_id\n ) \\\n .join(DbTicket) \\\n .filter_by(revoked=False) \\\n .subquery()\n\n rows = db.session \\\n .query(\n DbTicketCategory.id,\n DbTicketCategory.party_id,\n DbTicketCategory.title,\n db.func.count(subquery.c.id)\n ) \\\n .outerjoin(subquery, db.and_(DbTicketCategory.id == subquery.c.category_id)) \\\n .filter(DbTicketCategory.party_id == party_id) \\\n .group_by(DbTicketCategory.id) \\\n .order_by(DbTicketCategory.id) \\\n .all()\n\n return [(TicketCategory(row[0], row[1], row[2]), row[3]) for row in rows]", "def get_class_count(Y_category):\n # Assertions\n assert isinstance(Y_category, np.ndarray), \\\n 'Input must be a numpy ndarray.'\n cls, counts = np.unique(Y_category, return_counts = True)\n cls_counts = dict(zip(cls, counts))\n\n return cls_counts", "def _get_conversation_counts(business_id, conversation_tab, survey_id, category, all_conversation_types):\n params = _get_secure_message_threads_params(\n survey_id, business_id, conversation_tab, category, all_conversation_types\n )\n url = f'{current_app.config[\"SECURE_MESSAGE_URL\"]}/messages/count'\n response = requests.get(url, headers={\"Authorization\": _get_jwt()}, params=params)\n return response", "def update_fatalities_counts(persons):\n fatalities_dict = defaultdict(int)\n for person in persons:\n if person['injury_level'] == 'FATAL':\n acc_id = person['acc_id']\n fatalities_dict[acc_id] += 1\n\n for acc_id, num in fatalities_dict.iteritems():\n db_api.accident.set_field(acc_id, 'fatalities_count', num)", "def _convert_to_counts(self, indiv_data):\n count_data = {}\n for indiv in indiv_data:\n for allele in indiv:\n if allele is not None:\n allele_count = count_data.get(str(allele), 0)\n count_data[str(allele)] = allele_count + 1\n return count_data", "def contentcheck_categorical():\n filename = \"Analysis.txt\"\n temp_line = \"\"\n count = 0\n for line in open(filename, 'r'):\n temp_line = temp_line + line\n if \"VALUE COUNTS\" in temp_line:\n count = count + 1\n if \"DATA INFORMATION\" in temp_line:\n count = count + 1\n if \"MEAN, MEDIAN AND MODE:\" in temp_line:\n count = count + 1\n if \"Correlation\" in temp_line:\n count = count + 1\n if \"Normality Tests\" in temp_line:\n count = count + 1\n return count", "def get_categories_group_count():\n categoriesByArticle = RedisHelper.get_cache(KEY_CATEGORIES_COUNT_BY_ARTICLE)\n if RedisHelper.is_cache_exist(KEY_CATEGORIES_COUNT_BY_ARTICLE) is False:\n categoriesByArticle = list(Comment.objects.raw(SQL_GET_CATEGORY_COUNTS_BY_BLOG))\n RedisHelper.create_cache(KEY_CATEGORIES_COUNT_BY_ARTICLE, categoriesByArticle, RedisTimeOut.REDIS_TIMEOUT_1_DAYS)\n return categoriesByArticle", "def count_tags(tag_events):\n tagged_lines = []\n for tag_event in tag_events:\n for tag in tag_event[1][\"tag\"][\"labels\"]:\n tagged_lines.append(tag)\n tag_counts = Counter(tagged_lines)\n return tag_counts", "def summarizeNuclideCategories(self):\n runLog.info(\n \"Nuclide categorization for cross section temperature assignments:\\n\"\n + tabulate.tabulate(\n [\n (\n \"Fuel\",\n createFormattedStrWithDelimiter(\n self._nuclideCategories[\"fuel\"]\n ),\n ),\n (\n \"Coolant\",\n createFormattedStrWithDelimiter(\n self._nuclideCategories[\"coolant\"]\n ),\n ),\n (\n \"Structure\",\n createFormattedStrWithDelimiter(\n self._nuclideCategories[\"structure\"]\n ),\n ),\n ],\n headers=[\"Nuclide Category\", \"Nuclides\"],\n tablefmt=\"armi\",\n )\n )", "def fetch_counts(datestruct):\n response = call_responder('elasticsearch', 'query/daily_proofreader_hits')\n for rec in response['result']['hits']['hits']:\n data = rec['_source']\n if data['user'] not in datestruct:\n datestruct[data['user']] = {\"cleave\": 0, \"merge\": 0,\n \"split-supervoxel\": 0}\n if '/cleave/' in data['uri']:\n datestruct[data['user']]['cleave'] += 1\n elif '/merge' in data['uri']:\n datestruct[data['user']]['merge'] += 1\n elif '/split-supervoxel' in data['uri']:\n datestruct[data['user']]['split-supervoxel'] += 1", "def analyse(source):\n data = defaultdict(int)\n\n with open(source, 'r') as fp:\n soup = BeautifulSoup(fp.read(), 'html.parser')\n v = Validator()\n v.schema = {\n 'payee': {'type': 'string'},\n }\n for transaction in soup.find_all('tr'):\n transaction_info = transaction.find_all('td')\n v.validate({\n 'payee': _parse_payee(transaction_info[0].contents[0].strip()), # noqa\n })\n if v.errors:\n raise Exception(v.errors)\n data[v.document['payee']] += 1\n with open('category_mapping.yaml', 'r') as stream:\n existing_data = yaml.load(stream)\n with open('category_mapping.yaml', 'w') as stream:\n for payee, times in data.items():\n if times >= 2:\n # category = click.prompt(\n # '[*] Enter category for {}: '.format(payee),\n # type=str,\n # default=existing_data.get(payee),\n # )\n stream.write(\n yaml.dump(\n {payee: existing_data.get(payee, '<ENTER CATEGORY>')},\n default_flow_style=False),\n )", "def countTagsInFile(fname):\n with open(fname, 'r', encoding='utf-8') as f:\n for line in f:\n words = line.split(' ')\n for w in words:\n tag = w.split('_')[1].rstrip()\n cat = tag[0].upper()\n if tag not in dictionaries[cat]:\n dictionaries[cat][tag] = 1\n else:\n dictionaries[cat][tag] += 1", "def get_categories_with_ticket_counts_for_party(\n party_id: PartyID,\n) -> Dict[TicketCategory, int]:\n category = db.aliased(DbCategory)\n\n subquery = db.session \\\n .query(\n db.func.count(DbTicket.id)\n ) \\\n .join(DbCategory) \\\n .filter(DbCategory.id == category.id) \\\n .filter(DbTicket.revoked == False) \\\n .subquery() \\\n .as_scalar()\n\n rows = db.session \\\n .query(\n category,\n subquery\n ) \\\n .filter(category.party_id == party_id) \\\n .group_by(category.id) \\\n .all()\n\n return {\n _db_entity_to_category(category): ticket_count\n for category, ticket_count in rows\n }", "def get_resource_count(har_json):\n entries = har_json['log']['entries']\n\n resource_type_counts = Counter()\n\n for entry in entries:\n resource = entry['request']['url']\n dirty_resource_type = resource.split('.')[-1]\n resource_type = dirty_resource_type.split('?')[0] # Remove url params\n if len(resource_type) > 4:\n resource_type_counts['other'] += 1\n # print 'Found other resource type: {0}'.format(resource_type)\n else:\n resource_type_counts[resource_type] += 1\n\n return resource_type_counts", "def count_freq(self, types=1):\n count_dict = {}\n if types == 1:\n for cat in self.categories:\n num_images = sum(\n [1 for i in self.data['annotations'] if i['category_id'] == self.cats_idx[cat]])\n count_dict[cat] = num_images\n elif types == 2:\n pass\n\n return count_dict", "def load_fact_traffic_violations_count_agg(cur,code):\n cur.execute(code)", "def parse_counts(data):\r\n \r\n results = {}\r\n region_year = np.stack([data[0], data[4].astype('datetime64[Y]').astype(int) + 1970], axis=0)\r\n region_years, counts = np.unique(region_year, return_counts=True, axis=1)\r\n region_years_counts = list(zip(region_years[1], region_years[0], counts))\r\n results['years'] = np.unique(region_year[1])\r\n results['regions'] = np.unique(region_year[0])\r\n for year in results['years']:\r\n results[year] = np.array(get_counts_for_year(region_years_counts, year))\r\n return results" ]
[ "0.63893837", "0.6150627", "0.59409535", "0.579892", "0.57733595", "0.5721786", "0.5692909", "0.5685599", "0.5677747", "0.55068487", "0.544998", "0.544998", "0.53930646", "0.5385647", "0.5385461", "0.5301101", "0.52837664", "0.5264914", "0.5201706", "0.5188228", "0.5154808", "0.5118436", "0.51184124", "0.5108079", "0.50602186", "0.5046567", "0.50394565", "0.50315773", "0.5025588", "0.5012569" ]
0.7612092
0
Data visualization of total incidents and fraction of unresolved incidents per category via bar graph
def visualize_type(parsed_data, output_dir): # Fetching incident data by category counter = fetch_incident_by_category_and_resolution(parsed_data) # List of total incidents by Category # list of unsolved incidents by Category y1_values = [item[0] for item in counter.values()] y2_values = [item[1] for item in counter.values()] # Category labels x_labels = tuple(counter.keys()) # Width of each bar bar_width = 0.4 # bar locations on x-axis x1_locations = np.arange(len(x_labels)) x2_locations = x1_locations + bar_width # assigning data to a bar plot plt.bar(x1_locations, y1_values, width=bar_width, label = "Total") plt.bar(x2_locations, y2_values, width=bar_width, label = "Unresolved") # Assigning labels and tick location to x-axis plt.xlabel('Incident Category', fontweight='bold') plt.ylabel('Incident Count', fontweight='bold') plt.xticks(x1_locations + bar_width/2, x_labels, rotation=90) # Giving some more room below x-axis plt.subplots_adjust(bottom=0.4) # Making the overall graph/figure larger plt.rcParams['figure.figsize'] = 12, 8 plt.legend() file_name = os.path.join(output_dir, TYPE_PLOT_FILENAME) plt.savefig(file_name) plt.show()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def category_bar_chart(df):\n label_names = df.drop(['message', 'original', 'genre', 'id'], axis=1).columns\n label_counts = []\n for column in label_names:\n label_counts.append(df[column].sum())\n return {\n 'data': [\n Bar(\n x=label_names,\n y=label_counts\n )\n ],\n\n 'layout': {\n 'title': 'Distribution of Labelled Categories',\n 'yaxis': {\n 'title': \"Count\",\n 'type': 'log'\n },\n 'xaxis': {\n 'title': \"Category\"\n }\n }\n }", "def visualize_type():\n \n data_file = parse(MY_FILE, ',')\n\n # num of incidents per category\n counter = Counter(item['Category'] for item in data_file)\n\n # Set the labels\n labels = tuple(counter.keys())\n\n # Set exactly where the labels hit the x-axis\n xlocations = na.array(range(len(labels))) + 0.5\n\n # Width of each bar\n width = 0.5\n\n # Assign data to a bar plot\n plt.bar(xlocations, counter.values(), width=width)\n\n # Assign labels and tick location to x-axis\n plt.xticks(xlocations + width / 2, labels, rotation=90)\n \n # Give some more room so the x-axis labels aren't cut off\n plt.subplots_adjust(bottom=0.4)\n\n # Make the overall graph/figure larger\n plt.rcParams['figure.figsize'] = 12, 8\n\n # save\n plt.savefig('Type.png')\n\n # close\n plt.clf()", "def compte(df):\n\n df.value_counts()[:100].plot(kind='bar')\n plt.show()", "def visualize_type():\n\t\n\t#grab our parsed data\n\tdata_file = parse(MY_FILE, \",\")\n\t\n\t#make a new variable, counter, from iterating through each line of\n\t#data in parsed data, and count how many incidents happen by category\n\tcounter = Counter(item[\"Category\"] for item in data_file)\n\t\n\t#set the labels which are based on the keys of our counter\n\t#since order doesn't matter, we can just use counter.keys()\n\tlabels = tuple(counter.keys())\n\t\n\t#set exactly where the labels should hit the x-axis\n\txlocations = np.arange(len(labels)) + 0.5\n\t\n\t#width of each bar that will be plotted\n\twidth = 0.5\n\t\n\t#assign data to a bar plot\n\tplt.bar(xlocations, counter.values(), width=width)\n\t\n\t#assign labels and tick location to x-axis\n\tplt.xticks(xlocations + width /2, labels, rotation=90)\n\t\n\t#give more room to the x-axis so the labels aren't cut off\n\tplt.subplots_adjust(bottom=0.4)\n\t\n\t#make the overall graph/figure larger\n\tplt.rcParams['figure.figsize'] = 12, 8\n\t\n\t#save the graph\n\tplt.savefig(\"type.png\")\n\t\n\t#close the plot figure\n\tplt.clf()", "def bar_plot(df_NP):\n cnt = Counter()\n for tax_list in df_NP.taxonomy:\n for tax in list(tax_list):\n if tax != 'no':\n cnt[tax] += 1\n plt.bar(cnt.keys(),cnt.values())\n plt.xlabel('taxonomic provenance')\n plt.ylabel('number of molecules')\n plt.title('number of aglycons with taxonomies')\n plt.savefig(\"output_data/Barplot.png\")\n print(\"BAR PLOT DONE\")", "def graph_cause_count(df):\r\n # set the visual features of the graph\r\n sns.set(font_scale=2)\r\n sns.set_style(\"darkgrid\")\r\n fig, ax = plt.subplots()\r\n fig.set_size_inches(20, 12)\r\n plt.xticks(rotation=45)\r\n ax.set_title(\"Yearly Vehicle Accident Police Deaths\")\r\n # create the graph of the data\r\n plot = sns.barplot(\"year\", \"count\", data=df, palette=\"winter_d\", ci=None)\r\n # plt.show()\r\n # save the graph as an image\r\n fig.savefig(\"2_graph_cause_count.png\")", "def plot_uv_bar(df, colname, colorid=0):\n if (colname in list(df.columns)):\n \n # Set figure size \n fig, ax = plt.subplots(figsize=(8,6))\n \n # set colorid for bar plot\n base_color = sns.color_palette()[colorid]\n\n # variable counts to calculate percentage\n cdict_count = df[colname].value_counts().to_dict() \n total_count = df.shape[0]\n \n \n if (len(list(cdict_count.keys())) > 5):\n # max.count to position the %\n maxcount_pct= np.max(list(cdict_count.values()))*0.125\n # max. no. of categories Vs % rotation \n rottext_pct = 90 \n # font size for % display\n fontsiz_pct = 12\n else:\n # max.count to position the %\n maxcount_pct= np.max(list(cdict_count.values()))*0.075\n # max. no. of categories Vs % rotation \n rottext_pct = 0 \n # font size for % display\n fontsiz_pct = 16\n \n \n # plotting...\n sns.countplot(data = df, x = colname\n , order = list(cdict_count.keys())\n , color = base_color\n , saturation = 0.7)\n\n # title and labels\n plt.title('Order of '+ colname, fontsize=20)\n plt.xlabel(colname + ' Type', fontsize=16)\n plt.ylabel('Count', fontsize=16)\n \n # x-,y- ticks\n locs, labels = plt.xticks(fontsize=16)\n plt.yticks(fontsize=16)\n\n # display % count information on each tower of bar plot\n for loc, label in zip(locs, labels):\n count = cdict_count[label.get_text()]\n pct_string = '{:0.1f}%'.format(count*100/total_count)\n plt.text(loc, count-maxcount_pct, pct_string, ha='center', color='w', fontsize=fontsiz_pct, rotation=rottext_pct)\n\n return plt.show()\n\n else:\n \n print(' >>>Error:',colname,' is not in DataFrame')", "def missing_analysis(df):\n df_isnull = (df.isnull().sum() / len(df))*100\n df_isnull = df_isnull.drop(df_isnull[df_isnull ==0].index).sort_values(ascending = False)\n missing_data = pd.DataFrame({'Percentual Missing': df_isnull})\n missing_data.plot.bar()", "def plot_class_balances(df, col):\n\n ser_counts = df[col].value_counts()\n ser_counts.plot.bar()\n plt.title(col + ' Counts \\n(classes={})'.format(ser_counts.shape[0]))\n \n plt.show()", "def bar_chart_score(self, grouped):\n picked_scenario = self.scenario_dict[\"%d\" % (self.scenario_num-1)]\n distinct_enum_X = self.data_dict[picked_scenario[\"X\"]]['distinct_enum']\n score = 0\n if distinct_enum_X == 1:\n score = 0\n elif distinct_enum_X >= 2 and distinct_enum_X <= 20:\n score = 3\n elif distinct_enum_X > 20:\n score = 40 / distinct_enum_X\n return score", "def plot_bar_chart_quantum_vs_classical(\n df_bugs: pd.DataFrame,\n column_to_inspect: str,\n mapping_dict: Dict[str, str],\n categories_to_exclude: List[str] = [],\n categories_keep_only: List[str] = None,\n out_file_name: str = None,\n out_folder_path: str = None,\n horizontal: bool = False,\n map_value_since_beginning: bool = False,\n figsize: Tuple[int, int] = (10, 5),\n legend_placement: str = 'upper center'\n ):\n\n fig, ax = plt.subplots(figsize=figsize)\n\n df = expand_columns(df_bugs, column_to_inspect)\n df = df[~(df[column_to_inspect].isin(categories_to_exclude))]\n\n if categories_keep_only is not None:\n df = df[df[column_to_inspect].isin(categories_keep_only)]\n\n if map_value_since_beginning:\n df[column_to_inspect] = df[column_to_inspect].map(mapping_dict)\n\n categories_q_bugs = list(df[\n df['type'] == 'Quantum'].groupby(\n column_to_inspect).count().sort_values(\n by='type', ascending=False).index)\n\n for component in df[column_to_inspect].unique():\n if component not in categories_q_bugs:\n categories_q_bugs.append(component)\n\n args = {\n \"hue\": \"type\",\n \"data\": df,\n \"palette\": PALETTE,\n \"ax\": ax,\n \"order\": categories_q_bugs\n }\n\n if horizontal:\n sns.countplot(y=column_to_inspect, **args)\n ax.grid(axis='x')\n else:\n sns.countplot(x=column_to_inspect, **args)\n ax.grid(axis='y')\n\n if not map_value_since_beginning:\n # map the value at the latest stage, thus in the labels\n obj_labels = ax.get_xticklabels()\n for i, l in enumerate(obj_labels):\n obj_labels[i] = mapping_dict[l.get_text()]\n ax.set_xticklabels(obj_labels, rotation=60, ha='right')\n\n ax.set_xlabel(capitalize(column_to_inspect), fontsize=15)\n ax.set_ylabel(\"Count\", fontsize=15)\n plt.legend(title=\"Type of Bug\", loc=legend_placement)\n plt.tight_layout()\n\n if out_file_name is not None and out_folder_path is not None:\n fig.savefig(os.path.join(out_folder_path, out_file_name), format=\"pdf\")", "def show_class_imbalance(df, title='Class Imbalance', PATH=None):\n ax = sns.barplot(x=[\"Normal\", \"Clickbait\"], y=df.groupby(['target']).target.count())\n ax.set_title(title, size=20)\n plt.xticks([0,1],[\"Normal\", \"Clickbait\"], size = 20)\n ax.set_ylabel(\"Document Count\", size=17)\n ax.set_xlabel(\"Article Class\", size=20)\n if PATH:\n plt.savefig(PATH, bbox_inches=\"tight\", transparent=True)\n return ax", "def _bar_example_1(quantity_by_fruit):\n ch = chartify.Chart(blank_labels=True, x_axis_type=\"categorical\")\n ch.set_title(\"Vertical bar plot\")\n ch.set_subtitle(\"Automatically sorts by value counts.\")\n ch.plot.bar(\n data_frame=quantity_by_fruit,\n categorical_columns=\"fruit\",\n numeric_column=\"quantity\",\n )\n ch.show(_OUTPUT_FORMAT)", "def num_of_cat2_per_cat1(df, cat1, cat2, figsize=(12,5), normalize = False, num_label = 1, save_plot = False, path_dir = None ):\n # Group by category #1 and counts the unique values of category #2 for each group\n comp_count = df.groupby(cat1)[cat2].nunique().sort_values(ascending=False)\n if (normalize == True):\n comp_count = comp_count*100.0/(comp_count.sum())\n # Bar plot\n plt.figure(figsize=figsize)\n \n plot = sns.barplot(comp_count.index, comp_count.values, alpha=0.8)\n if (normalize == True):\n plt.ylabel(str('Number of ' + cat2 + ' [%]'), fontsize=12)\n plt.title(str('Percentage of '+ cat2+ ' per '+ cat1))\n else:\n plt.ylabel(str('Number of ' + cat2), fontsize=12)\n plt.title(str('Number of '+ cat2+ ' per '+ cat1))\n plt.xlabel(cat1, fontsize=12)\n plt.xticks(rotation=90)\n for ind, label in enumerate(plot.get_xticklabels()):\n if ind % num_label == 0: # every 15th label is kept\n label.set_visible(True)\n else:\n label.set_visible(False)\n plt.show()\n if save_plot == True:\n plt.savefig((plot_dir + \"count_of\"+str(cat1)+\"per _\"+str(cat2)+\".png\"))\n plt.clf()", "def graph_cause_count_each(df, label):\r\n # set the visual features of the graph\r\n sns.set(font_scale=1.5)\r\n sns.set_style(\"darkgrid\")\r\n fig, ax = plt.subplots()\r\n fig.set_size_inches(15, 8)\r\n plt.xticks(rotation=45)\r\n ax.set_title(label.capitalize() + \" Police Death Causes\")\r\n # create the graph of the data\r\n plot = sns.barplot(\"year\", \"count\", data=df, palette=\"winter_d\")\r\n # plt.show()\r\n # save the graph as an image with the correct cause naming\r\n name = \"2_graph_cause_count_\" + label + \".png\"\r\n fig.savefig(name)", "def get_fracs(counts, num_categories, total, chart_type, sort_data=True):\r\n fracs_labels_other = []\r\n fracs_labels = []\r\n all_counts = []\r\n other_cat = 0\r\n other_frac = 0\r\n red = 0\r\n\r\n # added in the ability to turn off sorting, since we want the data to be\r\n # unsorted for the area charts\r\n if sort_data:\r\n counts.sort()\r\n counts.reverse()\r\n\r\n area_table_out = []\r\n\r\n # this loop iterates over the OTU table and generates html code for the\r\n # data table\r\n for j, (n, t, s) in enumerate(counts):\r\n frac = float(n) / total\r\n if chart_type == 'pie':\r\n if j < num_categories - 1:\r\n red += n\r\n fracs_labels_other.append((t, frac))\r\n elif chart_type == 'area' or chart_type == 'bar':\r\n if j < num_categories:\r\n red += n\r\n fracs_labels_other.append((t, frac))\r\n\r\n tax = s.strip().split(\"<br>\")[-1]\r\n tax = tax.replace('\"', '')\r\n for_overlib = s.strip().rpartition(\"<br>\")[0]\r\n for_overlib = for_overlib.replace('\"', '')\r\n\r\n # Added this code because the data table is being presented\r\n # differently for the area charts\r\n if chart_type == 'pie':\r\n all_counts.append(\r\n DATA_HTML % (\r\n n,\r\n frac * 100,\r\n for_overlib,\r\n tax,\r\n tax,\r\n t))\r\n elif chart_type == 'area' or chart_type == 'bar':\r\n area_table_out.append(str(n))\r\n\r\n # returning a dictionary for the case of area charts, which is different\r\n # than the array passed by the pie charts\r\n if chart_type == 'area' or chart_type == 'bar':\r\n all_counts = area_table_out\r\n\r\n if len(counts) > num_categories:\r\n other_cat = len(counts) - (num_categories - 1)\r\n new_counts = counts[0:num_categories - 1]\r\n other = sum([c_over[0] for c_over in counts[num_categories - 1:]])\r\n other_frac = float(other) / total\r\n fracs_labels = [(t, float(n) / red) for n, t, s in new_counts]\r\n\r\n # added in the ability to turn off sorting, since we want the data to be\r\n # unsorted for the area charts\r\n if sort_data:\r\n fracs_labels_other.sort()\r\n fracs_labels.sort()\r\n\r\n return (\r\n fracs_labels_other, fracs_labels, all_counts, other_cat, red, other_frac\r\n )", "def oneNumBar(df, colName):\n bins = pd.qcut(x=df[colName[0]], q=15, duplicates='drop')\n ax = bins.value_counts()\n bins = bins.cat.as_ordered()\n bins = bins.cat.categories\n bounds = bins.left \n bounds = list(bounds)\n bounds.append(bins[len(bounds)-1].right)\n texts = []\n for x,y in zip(bounds[0::],bounds[1::]):\n texts.append(\"(\" + str(x) + \", \" + str(y) + \"]\") \n barData = [go.Bar(x=texts, \n y=ax,\n marker=dict(\n color = '#92c5de',\n opacity=0.8)\n )] \n layout = go.Layout(\n title=\"Bar Plot Showing Count of Values for \" + str(colName[0]),\n xaxis=dict(\n title= colName[0]\n ),\n yaxis=dict(\n title= \"NUMBER OF RECORDS\", \n )\n )\n fig = go.Figure(data=barData, layout=layout)\n return {\"label\":\"Frequency\", \"plot\":fig}", "def value_counts_plot(df):\n \n plt.figure(figsize=(15,10))\n \n #get rid of sort_index() to change the graph\n return df.value_counts().sort_index().plot(kind='bar')", "def createChart(cladeGroup, data, taxonomyDict, outputFile):\n dfData = []\n for clade in cladeGroup: \n temp, other, totalTemp = valueCountsSpecies(data, cladeGroup[clade], taxonomyDict)\n relativeTemp = {}\n for val in temp:\n relativeTemp[val] = (temp[val] / sum(list(temp.values())))*100\n dfData.append(relativeTemp)\n\n tempDF = pd.DataFrame(dfData, index=list(cladeGroup.keys()))\n tempDF = tempDF.fillna(0)\n\n # Plotting\n sns.set(rc={'figure.figsize':(20,15)}, font_scale=2)\n ax = tempDF.plot(kind=\"bar\", stacked=True, colormap=ListedColormap(sns.color_palette(\"twilight\", 12)), rot=0)\n for rect in ax.patches:\n # Find where everything is located\n height = rect.get_height()\n width = rect.get_width()\n x = rect.get_x()\n y = rect.get_y()\n \n # The height of the bar is the data value and can be used as the label\n label_text = f'{height:.2f}%' # f'{width:.2f}' to format decimal values\n \n # ax.text(x, y, text)\n label_x = x + width / 2\n label_y = y + height / 2\n \n # only plot labels greater than given width\n if height > 0.00:\n ax.text(label_x, label_y, label_text, ha='center', va='center', fontsize=20, color=\"w\")\n\n plt.legend(loc=\"center right\", bbox_to_anchor=(1.25, 0.5), ncol=1)\n plt.savefig(outputFile, bbox_inches=\"tight\")\n plt.show()\n return", "def plot_clusters_cuisines(i,cuisine_countries_clusters):\n df=pd.DataFrame(group_by_cluster.iloc[i,:])\n df.reset_index(level=0, inplace=True)\n df.columns=['cuisine','count']\n df=df.sort_values(by='count',ascending=False)\n sns.set(rc={'figure.figsize':(11.7,5.27)})\n sns.barplot(x=\"cuisine\", y='count', data=df)\n plt.xticks(rotation=90)\n plt.title('cluster '+str(i)+ ' count: '+str(Counter(cuisine_countries_clusters)[i]))\n plt.tight_layout()\n plt.show()", "def normalized_hist_by_stability(metdat, catinfo, vertloc=80):\n\n stabconds = utils.get_stabconds()\n stabcol, _, _= utils.get_vertical_locations(catinfo['columns']['stability flag'], location=vertloc)\n colors = utils.get_colors(len(stabconds), basecolor='span')\n\n temp = metdat[stabcol].dropna()\n garb = temp.groupby(temp.index.hour).value_counts(normalize=True)\n garb.index.names = ['hour','stabclass']\n garb = garb.reorder_levels(['stabclass','hour'])\n\n hours = np.arange(24)\n newbottom = np.zeros(24)\n\n fig,ax = plt.subplots()\n for jj,cond in enumerate(stabconds):\n # Use this for missing data, also works for full data\n a = garb.loc[cond]\n b = a.index.tolist()\n c = a.values.tolist()\n for i in range(len(hours)):\n if (hours[i]) in b:\n pass\n else:\n b.insert(i,hours[i])\n c.insert(i,0)\n\n d = pd.Series(data = c, index = b)\n ax.bar(hours, d, color=colors[jj], bottom=newbottom)\n newbottom += c #<-- for if missing data, also works for full data \n\n #ax.bar(hours, garb.loc[cond], color=colors[jj], bottom=newbottom)\n #newbottom += garb.loc[cond]\n\n ax.set_ylabel('Probability [%]')\n ax.set_xlabel('Time of Day [Hour]')\n fig.legend(stabconds) \n #fig.legend(stabconds, loc=6, bbox_to_anchor=(1,0.5),framealpha=0)\n fig.tight_layout()\n\n return fig, ax", "def age_12_count() :\n\n import numpy as np\n import pandas as pd\n import matplotlib.pyplot as plt\n import itertools\n\n train = pd.read_csv('./data/train.csv')\n \n # Split data to contain ages only up to 12 months\n train_12_months = train.loc[train['Age'] < 13, ['State','Type', 'Age', 'AdoptionSpeed']]\n\n # Divide by dog (Type = 1) and cat (Type = 2)\n dog_df = train_12_months.loc[train_12_months['Type'] == 1, :]\n cat_df = train_12_months.loc[train_12_months['Type'] == 2, :]\n \n dog_max_age = max(dog_df.loc[:, 'Age'])\n dog_min_age = min(dog_df.loc[:, 'Age'])\n \n cat_max_age = max(cat_df.loc[:, 'Age'])\n cat_min_age = min(cat_df.loc[:, 'Age'])\n \n dog_age_labels = []\n dog_count = []\n \n cat_age_labels = []\n cat_count = []\n \n # Find dog count for each age\n for i in range(dog_min_age, dog_max_age + 1) :\n count = (dog_df.Age == i).sum()\n if(count > 0) :\n dog_count.append(count)\n dog_age_labels.append(i)\n\n # Find cat count for each age\n for i in range(cat_min_age, cat_max_age + 1) :\n count = (cat_df.Age == i).sum()\n if(count > 0) :\n cat_count.append(count)\n cat_age_labels.append(i)\n \n # Plot bar graphs\n plt.figure()\n index = np.arange(len(dog_age_labels))\n plt.bar(index, dog_count)\n plt.xlabel('Age in Months')\n plt.xticks(index, dog_age_labels)\n plt.ylabel('Count')\n plt.title('Count of Dogs Up to 12 Months of Age')\n plt.savefig('dog12.png', bbox_inches='tight')\n \n \n plt.figure()\n index = np.arange(len(cat_age_labels))\n plt.bar(index, cat_count)\n plt.xlabel('Age in Months')\n plt.xticks(index, cat_age_labels)\n plt.ylabel('Count')\n plt.title('Count of Cats Up to 12 Months of Age')\n plt.savefig('cat12.png', bbox_inches='tight')", "def visualize_data(df):\n # Remove 'not available'\n genres = df.genre.unique().tolist()\n remove_index = genres.index('Not Available')\n genres.pop(remove_index)\n print('Genres: ', genres)\n\n # Extract number of songs in each genre\n genre_counts = df.genre.value_counts().tolist()\n genre_counts.pop(remove_index)\n print('Counts: ', genre_counts)\n\n # Plot bar graph\n plt.bar(genres, genre_counts)\n plt.xlabel('Genres')\n plt.ylabel('Count')\n plt.show()", "def create_spend_chart(categories: list):\n\n BAR = \"o\"\n TITLE = \"Percentage spent by category\"\n \n # Sum up the total withdrawn amount\n withdrawals = {}\n total_amount_withdrawn = 0\n for category in categories:\n amount = category.get_withdrawals()\n withdrawals[category.name] = {\"amount\" : amount, \"percentage\" : 0}\n total_amount_withdrawn += amount\n \n # Calculate the percentages\n for category_name in withdrawals:\n percentage = withdrawals[category_name][\"amount\"]/total_amount_withdrawn*100\n # Why use floor() instead of int():\n # https://stackoverflow.com/a/31195540\n percentage = int(floor(percentage/10.)*10)\n withdrawals[category_name][\"percentage\"] = percentage\n\n # Make the bars\n percentages_lines = []\n for percentage in range(100, -10, -10):\n percentages_line = \"{:3}|\".format(percentage)\n for category_name in withdrawals:\n if withdrawals[category_name][\"percentage\"] >= percentage:\n percentages_line += \" \" + BAR + \" \"\n else:\n percentages_line += \" \"\n percentages_lines.append(percentages_line + \" \")\n\n # Make the horizontal line\n horizontal_line = \" {}\".format(\"---\"*len(categories) + \"-\")\n \n # Make the names\n bar_names_lines = []\n # find the length of the longest name\n max_name_len = max([len(name) for name in withdrawals])\n for line_num in range(max_name_len):\n bar_names_line = \" \"\n for category_name in withdrawals:\n if line_num < len(category_name):\n bar_names_line += \" \" + category_name[line_num] + \" \"\n else:\n bar_names_line += \" \"\n bar_names_lines.append(bar_names_line + \" \")\n\n chart_lines = [TITLE] + percentages_lines + [horizontal_line] + bar_names_lines\n\n chart_lines = \"\\n\".join(chart_lines)\n\n return chart_lines", "def create_spend_chart(categories):\n graph = \"Percentage spent by category\\n\"\n total_spendings = 0\n spendings = {}\n for category in categories:\n spendings[category.name] = 0\n for x in category.ledger:\n if x['amount'] < 0: #the withdraws are the ones with negative values\n spendings[category.name] += x['amount']\n spendings[category.name] = abs(spendings[category.name])\n for amount in spendings:\n total_spendings += spendings[amount]\n for amount in spendings:\n spendings[amount] = round_down(spendings[amount] / total_spendings * 100) #getting the percentage rounded down\n\n for i in range(100, -10, -10):\n \"\"\"getting the main part of the graph\"\"\"\n graph += str(i).rjust(3) + '| '\n for category in categories:\n if spendings[category.name] >= i:\n graph += 'o '\n else:\n graph += ' '\n graph += '\\n'\n graph += ' ' + '-' * (1 + len(categories) * 3) + '\\n'\n\n maxlen = 0\n for category in categories:\n if len(category.name) > maxlen:\n maxlen = len(category.name) # max string length between category names\n for i in range(maxlen):\n \"\"\"getting the labels for the x-axis\"\"\"\n graph += ' '\n for category in categories:\n if len(category.name) > i:\n graph += category.name[i] + ' '\n else:\n graph += ' '\n graph += '\\n '\n return graph[0:-1]", "def display_stacked_cat_bar(df, groupby, on, order=None, unit=None, palette=None, horizontal=True, figsize=(11, 11)):\n\n # Create a binary dataframe\n stacked_bar_df = pd.concat([df[groupby], pd.get_dummies(df[on])], axis=1)\n bins = list(stacked_bar_df.columns[1:])\n stacked_bar_df = stacked_bar_df.groupby(groupby)[bins].sum().reset_index()\n\n if order:\n if not isinstance(order, list):\n raise ValueError('\"order\" must be a list')\n if set(order) != set(bins):\n raise ValueError('\"order\" iterable must contain all possible values: {}'.format(str(bins)))\n\n stacked_bar_df = stacked_bar_df[[groupby] + order]\n bins = order\n\n # Scale if given unit\n if unit:\n # Calculate total\n stacked_bar_df['total'] = stacked_bar_df[bins].sum(axis=1)\n\n # Scale\n for bin_label in bins:\n stacked_bar_df[bin_label] /= stacked_bar_df['total']\n stacked_bar_df[bin_label] *= unit\n\n # Drop irrelevant 'total' column\n stacked_bar_df = stacked_bar_df.iloc[:, :-1]\n\n # Cumsum row wise\n for idx in range(1, len(bins)):\n stacked_bar_df[bins[idx]] = stacked_bar_df[bins[idx]] + stacked_bar_df[bins[idx - 1]]\n\n # Get relevant palette\n if palette:\n palette = palette[:len(bins)]\n else:\n palette = sns.color_palette()[:len(bins)]\n\n # Plot\n fig = plt.figure(figsize=figsize)\n ax = fig.add_subplot(111)\n\n if horizontal:\n for color, bin_label in reversed(list(zip(palette, bins))):\n sns.barplot(y=groupby, x=bin_label, data=stacked_bar_df, color=color, label=bin_label, ax=ax)\n else:\n for color, bin_label in reversed(list(zip(palette, bins))):\n sns.barplot(x=groupby, y=bin_label, data=stacked_bar_df, color=color, label=bin_label, ax=ax)\n\n ax.legend(bbox_to_anchor=(1.04, 1), loc='upper left')\n\n if unit:\n if horizontal:\n ax.set(xlim=(0, unit))\n else:\n ax.set(ylim=(0, unit))\n\n if horizontal:\n ax.set(xlabel='')\n else:\n ax.set(ylabel='')\n\n return ax", "def create_grouped_bar_chart(stats: dict[str, list[int]]):\n\n figure, axes = plot.subplots()\n\n labels = [str(e) for e in CauseOfDeath]\n x = numpy.arange(len(labels))\n\n bar_width = 0.15\n max_value = 0\n\n rects = []\n i = 0\n for label, values in stats.items():\n max_value = max(max_value, max(values))\n rects.append(axes.bar(x + (i * bar_width), values, bar_width, label=label))\n i = i + 1\n\n axes.set_title(\"Deaths arranged by cause and animal type\")\n axes.set_ylabel(\"Amount\")\n axes.set_xticks(x)\n axes.set_xticklabels(labels)\n axes.legend()\n\n for rect in rects:\n attach_text_labels(rect, axes)\n\n figure.tight_layout()\n return figure", "def visualizeData(df):\n for column in df:\n df[column].value_counts().plot(kind = 'bar', rot = 'vertical', use_index = False)", "def create_course_bars(hist_df, fig, labels):\n colors = [\n \"#60a7ba\",\n \"#f0912d\",\n \"#357025\",\n \"#ba3622\",\n \"#8f33d6\",\n \"#6a4c4d\",\n \"#cf8af3\",\n ]\n all_numbers = []\n\n for index, _ in enumerate(fig[\"layout\"][\"annotations\"]):\n all_numbers.append(float(fig[\"layout\"][\"annotations\"][index][\"text\"]))\n\n for _, idx in enumerate(hist_df.index.unique()):\n row = all_numbers.index(idx)\n show_legend = row == 0\n traces = []\n\n # Calculate subfigure position in figure\n row = (row + 1) / 2\n col = 1 if row.is_integer() else 0\n row = math.ceil(row) - 1\n\n # Calculate dataframe for plot\n task_subset_df = hist_df.loc[idx]\n task_subset_df = task_subset_df.apply(pd.value_counts).T\n task_subset_df = task_subset_df.div(task_subset_df.sum(axis=1), axis=0)\n\n # Handle case if there are only correct answers\n if task_subset_df.shape != (\n 7,\n 2,\n ): # sometimes task_subset_df is in the wrong shape\n if task_subset_df.shape != (\n 7,\n 1,\n ):\n task_subset_df = task_subset_df.T\n\n if \"correct\" in task_subset_df.columns.values:\n task_subset_df[\"incorrect\"] = 0\n\n # Each bar needs a color and a legend entry and will therefore\n # be plotted individually\n for i, color in enumerate(colors):\n trace = go.Bar(\n x=[task_subset_df.index.values[i]],\n y=[task_subset_df.incorrect[i] * 100],\n name=labels[i],\n marker={\"color\": color},\n showlegend=show_legend,\n )\n traces.append(trace)\n\n # All traces build one subfigure\n for trace in traces:\n fig.append_trace(trace, row=row + 1, col=col + 1)\n\n # Figure styling\n fig.update_layout(\n height=400 * (row + 1),\n legend={\n \"orientation\": \"h\",\n \"xanchor\": \"left\",\n \"yanchor\": \"bottom\",\n \"x\": 0.15,\n \"y\": 1.05,\n },\n )\n fig.update_xaxes(showticklabels=False)\n\n # for i in range(0, row + 1):\n fig.update_yaxes(title_text=\"Students\", row=i + 1, col=1)\n return fig", "def _bar_example_3(quantity_by_fruit):\n ch = chartify.Chart(blank_labels=True, y_axis_type=\"categorical\")\n ch.set_title(\"Horizontal bar plot\")\n ch.set_subtitle(\"Horizontal with color grouping\")\n ch.plot.bar(\n data_frame=quantity_by_fruit,\n categorical_columns=\"fruit\",\n numeric_column=\"quantity\",\n color_column=\"fruit\",\n )\n ch.show(_OUTPUT_FORMAT)" ]
[ "0.636478", "0.6169683", "0.61512375", "0.61238253", "0.6020945", "0.59974974", "0.59757274", "0.59205467", "0.5895362", "0.5847873", "0.5773675", "0.57487893", "0.5739332", "0.5734073", "0.57228506", "0.5708981", "0.5675812", "0.5629417", "0.5622804", "0.5611301", "0.560582", "0.55986303", "0.55792964", "0.5574032", "0.5573467", "0.55435157", "0.5541952", "0.55155015", "0.55100596", "0.5507361" ]
0.6276634
1
Check that the given metadata has correct types for all its members.
def check_metadata(metadata): message = 'The given metadata contains unsupported types.' assert all([item['type'] in ['category', 'value'] for item in metadata['details']]), message
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _metadata_is_consistent(metadata):\n checks = []\n required = ('version', 'fields', 'size', 'width', 'height', 'points',\n 'viewpoint', 'data')\n for f in required:\n if f not in metadata:\n print('%s required' % f)\n checks.append((lambda m: all([k in m for k in required]),\n 'missing field'))\n checks.append((lambda m: len(m['type']) == len(m['count']) ==\n len(m['fields']),\n 'length of type, count and fields must be equal'))\n checks.append((lambda m: m['height'] > 0,\n 'height must be greater than 0'))\n checks.append((lambda m: m['width'] > 0,\n 'width must be greater than 0'))\n checks.append((lambda m: m['points'] > 0,\n 'points must be greater than 0'))\n checks.append((lambda m: m['data'].lower() in ('ascii', 'binary',\n 'binary_compressed'),\n 'unknown data type:'\n 'should be ascii/binary/binary_compressed'))\n ok = True\n for check, msg in checks:\n if not check(metadata):\n print('error:', msg)\n ok = False\n return ok", "def test_check_metadata_fields(self):\n contents = self.read_metadata_contents()\n family = Metadata.get_family_metadata(contents)\n\n keys = [(\"name\", str), (\"postScriptName\", str),\n (\"fullName\", str), (\"style\", str),\n (\"weight\", int), (\"filename\", str),\n (\"copyright\", str)]\n\n missing = set([])\n unknown = set([])\n\n for j, itemtype in keys:\n\n for font_metadata in family.fonts:\n if j not in font_metadata:\n missing.add(j)\n\n for k in font_metadata:\n if k not in map(lambda x: x[0], keys):\n unknown.add(k)\n\n if unknown:\n msg = 'METADATA.json \"fonts\" property has unknown items [%s]'\n self.fail(msg % ', '.join(unknown))\n\n if missing:\n msg = 'METADATA.json \"fonts\" property items missed [%s] items'\n self.fail(msg % ', '.join(missing))", "def test_model_metadata_type(self):\n self.assertTrue(type(self.meta) is dict)", "def validate_metadata(self, metadata: Dict[str, dict]):\n encoder = NWBMetaDataEncoder()\n # The encoder produces a serialiazed object so we de serialized it for comparison\n serialized_metadata = encoder.encode(metadata)\n decoded_metadata = json.loads(serialized_metadata)\n validate(instance=decoded_metadata, schema=self.get_metadata_schema())\n if self.verbose:\n print(\"Metadata is valid!\")", "def test_types(self):\n field_types = (\n ('clip_id', int), ('created_at', datetime.datetime),\n ('description', str), ('filename', str),\n ('format', smscsv.MediaFormat), ('media_id', int), ('title', str)\n )\n for item in self.items:\n for name, type_ in field_types:\n self.assertIsInstance(getattr(item, name), type_)", "def CheckType(self, *args, **kwargs):\n pass", "def check_metadata(self):\n sql = f\"SELECT COUNT(*) FROM {constants.TABLES}\"\n if self.dbcnx.execute(sql).fetchone()[0] == 0:\n return False # No metadata; skip.\n sql = f\"SELECT name FROM {constants.TABLES}\"\n tables1 = [r[0] for r in self.dbcnx.execute(sql)]\n sql = \"SELECT name FROM sqlite_master WHERE type=?\"\n tables2 = [r[0] for r in self.dbcnx.execute(sql, (\"table\",))]\n # Do not consider metadata tables and sqlite statistics tables, if any.\n tables2 = [n for n in tables2 if not n.startswith(\"_\")]\n tables2 = [n for n in tables2 if not n.startswith(\"sqlite_\")]\n if set(tables1) != set(tables2):\n raise ValueError(\"corrupt metadata in DbShare Sqlite3 file\")\n # Does the index metatable exist?\n sql = f\"SELECT name, schema FROM {constants.INDEXES}\"\n self.dbcnx.execute(sql)\n # Does the views metatable exist?\n sql = f\"SELECT name, schema FROM {constants.VIEWS}\"\n self.dbcnx.execute(sql)\n return True", "def _verify_dict_field(self, _dict, name, types):\n if type(types) != list:\n types = [types]\n if str in types and unicode not in types:\n types.append(unicode)\n if unicode in types and str not in types:\n types.append(str)\n self.assertTrue(name in _dict, msg=\"Missing field '%s'\" % name)\n self.assertTrue(type(_dict[name]) in types,\n msg=\"Erroneous type of the field '%s': \"\n \"found %s, expected any of %s\" % (\n name, str(type(_dict[name])), \",\".join([str(x) for x in types])))", "def test_metadata_top_keys_types(self):\n self.assertEqual(type(self.metadata.get(\"name\", None)),\n type(u\"\"), msg=\"'name' is {0}, but must be {1}\".format(type(self.metadata.get(\"name\", None)), type(u\"\")))\n\n self.assertEqual(type(self.metadata.get(\"designer\", None)),\n type(u\"\"), msg=\"'designer' is {0}, but must be {1}\".format(type(self.metadata.get(\"designer\", None)), type(u\"\")))\n\n self.assertEqual(type(self.metadata.get(\"license\", None)),\n type(u\"\"), msg=\"'license' is {0}, but must be {1}\".format(type(self.metadata.get(\"license\", None)), type(u\"\")))\n\n self.assertEqual(type(self.metadata.get(\"visibility\", None)),\n type(u\"\"), msg=\"'visibility' is {0}, but must be {1}\".format(type(self.metadata.get(\"visibility\", None)), type(u\"\")))\n\n self.assertEqual(type(self.metadata.get(\"category\", None)),\n type(u\"\"), msg=\"'category' is {0}, but must be {1}\".format(type(self.metadata.get(\"category\", None)), type(u\"\")))\n\n self.assertEqual(type(self.metadata.get(\"size\", None)),\n type(0), msg=\"'size' is {0}, but must be {1}\".format(type(self.metadata.get(\"size\", None)), type(u\"\")))\n\n self.assertEqual(type(self.metadata.get(\"dateAdded\", None)),\n type(u\"\"), msg=\"'dateAdded' is {0}, but must be {1}\".format(type(self.metadata.get(\"dateAdded\", None)), type(u\"\")))", "def test_as_python_types(self):\n obs = _as_python_types(self.metadata_map, self.headers)\n exp = [[2.1, 3.1, 3],\n ['str1', '200', 'string30'],\n [1, 2, 3]]\n self.assertEqual(obs, exp)", "def _type_check(data):\n if data.__class__.__name__ != \"Matrix3\":\n return False\n return True", "def test_there_are_fields(self):\n filds = ['name', 'cost', 'description', 'duration', 'reach', 'school']\n for fild in filds:\n self.assertTrue(fild in dir(Magias),\n 'Class Magias does not have the field {}'.format(fild))", "def test__ScheduledEventEntityType__metadata_type():\n for instance in ScheduledEventEntityType.INSTANCES.values():\n vampytest.assert_subtype(instance.metadata_type, ScheduledEventEntityMetadataBase)", "def check_type(self):\n return True", "def _validate(self):\n if not self._contents.has_key('type'):\n raise ValidationFailed(\"Metadata file %s contains no type field\" % (self._filename))\n \n if not self._contents.has_key('version'):\n raise ValidationFailed(\"Metadata file %s contains no version field\" %\n (self._filename))", "def test_types(self):\n \n self.assertIsInstance(self.detector_type, str)\n self.assertIsInstance(self.psd, dict)\n self.assertIsInstance(self.intensity, dict)\n self.assertIsInstance(self.database, str)\n self.assertIsInstance(self.position, list)\n self.assertIsInstance(self.angle, list)\n self.assertIsInstance(self.linearity_curve, dict)\n self.assertIsInstance(self.FOV, float)\n \n pass", "def test_model_field_types(self):\n self.assertTrue(isinstance(self.UserInfo.have_siblings, str))\n self.assertTrue(isinstance(self.UserInfo.known_env_exposures, str))\n self.assertTrue(isinstance(self.UserInfo.known_genetic_mutations, str))\n self.assertTrue(isinstance(self.UserInfo.age, int))", "def validate_type_annotations(self):\n valid = False\n invalid_types = []\n # skipping the TYPE keyword, iterate through the types\n # collecting invalid type annotations in list annots\n for t in self.annot_types[1:]:\n if t.lower() not in (\"group\", \"numeric\"):\n # if the value is a blank space, store a higher visibility\n # string for error reporting\n if \"Unnamed\" in t:\n invalid_types.append(\"<empty value>\")\n # Duplicated metadata header name causes type annotation issue.\n # Side effect of Pandas adding a suffix to uniquefy the header.\n # These invalid annotations should not be included in invalid\n # type annotation count. This exception may cause miscount of\n # type annot errors if user-supplied annotation has period.\n elif \".\" in t:\n pass\n else:\n invalid_types.append(t)\n if invalid_types:\n msg = 'TYPE row annotations should be \"group\" or \"numeric\"'\n self.store_validation_issue(\n \"error\",\n msg,\n \"format:cap:group-or-numeric\",\n associated_info=invalid_types,\n )\n else:\n valid = True\n return valid", "def _check_structure(input_dict, mandatory, model):\n\n # Check to see if the input dictionary has the keys for the mandatory metadata structure.\n for key, value in mandatory.items():\n if 'custom_fields' in input_dict:\n if key not in input_dict and key not in input_dict['custom_fields']:\n raise ValueError('input dictionary does not have mandatory key: {key}'.format(key=key))\n else:\n if key not in input_dict:\n raise ValueError('input dictionary does not have mandatory key: {key}'.format(key=key))\n # Check to see if the input dictionary has keys that are wrong.\n for key, value in input_dict.items():\n # Checks to see if keys of input dictionary are in the model dictionary.\n if key != 'custom_fields':\n if key not in model:\n raise ValueError('Unknown input dictionary key: {key}.'.format(key=key))\n\n # If the model dictionary key value is a list check to see if value in list are correct type.\n if type(value) is list:\n if type(value[0]) is not model[key][0]:\n err_message = 'input dictionary key: {ky} list type: {ty} is not {ref}'\n err_message = err_message.format(ky=key, ty=value[0], ref=model[key][0])\n raise ValueError(err_message)\n\n else:\n # Checks to see if the type of the value for key is correct, in comparison to the model dictionary.\n if type(value) is not model[key]:\n err_message = 'input dictionary key: {ky} type: {ty} is not {ref}'\n err_message = err_message.format(ky=key, ty=type(value), ref=model[key])\n raise ValueError(err_message)\n return True", "def _check_type(self):\n assert self.mapping == self.mapping_type, \\\n \"Expected header mapping='{}' but got mapping='{}' in '{}'\".format(\n self.mapping_type, self.mapping.upper(), self.filename)", "def test_get_datatypes(self):\n obs = _get_datatypes(self.metadata_map.ix[:, self.headers])\n exp = ['float8', 'varchar', 'integer']\n self.assertEqual(obs, exp)", "def correct_type(data):\n\n if all(data[col].dtypes == data.dtypes[0] for col in data.columns):\n if all(data[col].isnull().sum() == 0 for col in data.columns):\n print('All columns have values of the correct type.')\n else:\n print('Bad result.')", "def validate(self, types):\n known = set(self.fields.keys())\n for name, definition in types.items():\n for field, schema in definition.items():\n field_kind = schema.get('kind')\n if field_kind == 'list':\n items = schema.get('items')\n if not items:\n raise(ValidationError(\n 'items is not found in {name}.{field}'.format(\n name=name, field=field\n )\n ))\n if items not in known:\n fail_field(items, field, name)\n continue\n if field_kind not in known:\n fail_field(field_kind, field, name)\n known.add(name)\n return types", "def member_types(self):\n raise exceptions.NotImplementedError()", "def test_field_types(self):\n\n for mb_model in self.mb_model_list:\n mb_fields = mb_model._meta.fields\n db_cols = connection.introspection.get_table_description(\n self.cursor, mb_model._meta.db_table)\n db_relations = connection.introspection.get_relations(\n self.cursor, mb_model._meta.db_table)\n\n for i in range(0, len(mb_model._meta.fields)):\n expected_field_type = None\n if db_relations.get(i):\n expected_field_type = u'ForeignKey'\n else:\n expected_field_type = get_field_type(db_cols[i].type_code)\n\n self.assertEqual(\n mb_fields[i].get_internal_type(),\n expected_field_type\n )", "def _valid_types(arguments, types):\n for arg in arguments:\n if type(arg) not in types:\n return False\n return True", "def _valid_types(arguments, types):\n for arg in arguments:\n if type(arg) not in types:\n return False\n return True", "def _ValidateType(self, local_field_names, require_guid):\n # Make sure the typename is non-empty.\n if not self.typename:\n self.AddFinding(findings_lib.MissingTypenameError(self))\n elif not isinstance(self.typename, str):\n self.AddFinding(\n findings_lib.IllegalKeyTypeError(self.typename, self.file_context))\n elif not ENTITY_TYPE_NAME_REGEX.match(self.typename):\n self.AddFinding(\n findings_lib.InvalidTypenameError(self.typename, self.file_context))\n\n # Check for correct GUID format.\n if self.guid is not None and not ENTITY_TYPE_GUID_PATTERN.match(self.guid):\n self.AddFinding(findings_lib.InvalidTypeGuidError(self))\n if require_guid and self.guid is None:\n self.AddFinding(findings_lib.MissingTypeGuidError(self))\n\n # Passthrough types cannot be inherited, so make sure they are not defined\n # as abstract.\n if self.allow_undefined_fields and self.is_abstract:\n self.AddFinding(findings_lib.AbstractPassthroughTypeError(self))\n # Make sure the type description is non-empty.\n if not self.description:\n self.AddFinding(findings_lib.MissingEntityTypeDescriptionWarning(self))\n\n # Check for duplicate local fields.\n # this check is case insensitive to catch dupes earlier in the event that\n # we stop explicitly rejecting upper case characters\n check_fields = set()\n for field in local_field_names:\n field_lower = field.lower()\n if field_lower in check_fields:\n self.AddFinding(findings_lib.DuplicateFieldError(self, field))\n continue\n check_fields.add(field_lower)\n\n # TODO(berkoben): Add more checks to validate fields in isolation\n # (in case we don't have a field set to check against)\n # (i.e. check for chirality, formatting. Could use actual Field objects)\n\n # Check formatting of field name\n if len(field.split('/')) > 2:\n self.AddFinding(findings_lib.UnrecognizedFieldFormatError(self, field))\n\n # Check for duplicate parent names.\n parent_names_check = set()\n for parent_name in self.unqualified_parent_names:\n if parent_name in parent_names_check:\n self.AddFinding(findings_lib.DuplicateParentError(self, parent_name))\n continue\n parent_names_check.add(parent_name)\n\n # Check formatting of parent name\n if len(parent_name.split('/')) > 2:\n self.AddFinding(\n findings_lib.UnrecognizedParentFormatError(self, parent_name))\n\n # Enforce that the inherited_fields_expanded field is not set\n if self.inherited_fields_expanded:\n self.AddFinding(findings_lib.InheritedFieldsSetError(self))", "def _check_fields(self, content: JsonDict) -> None:\n self.assertIn(\"id\", content)\n self.assertIn(\"received_ts\", content)\n self.assertIn(\"room_id\", content)\n self.assertIn(\"event_id\", content)\n self.assertIn(\"user_id\", content)\n self.assertIn(\"sender\", content)\n self.assertIn(\"canonical_alias\", content)\n self.assertIn(\"name\", content)\n self.assertIn(\"event_json\", content)\n self.assertIn(\"score\", content)\n self.assertIn(\"reason\", content)\n self.assertIn(\"auth_events\", content[\"event_json\"])\n self.assertIn(\"type\", content[\"event_json\"])\n self.assertIn(\"room_id\", content[\"event_json\"])\n self.assertIn(\"sender\", content[\"event_json\"])\n self.assertIn(\"content\", content[\"event_json\"])", "def _check_dtype(self):\n\n # assert valid dtype\n if self.dtype not in PRIMITIVE_TYPES:\n raise ValueError(\"Type '{}' is invalid. Following types are \"\n \"allowed: {}\"\n .format(self.dtype, PRIMITIVE_TYPES.keys()))\n\n # assert valid dtypes for values\n allowed_types = PRIMITIVE_TYPES[self.dtype]\n\n for value in self.values:\n if not isinstance(value, allowed_types):\n raise TypeError(\"Column '{}' has invalud value '{}' with \"\n \"invalid type '{}'. Allowed types are: {}.\"\n .format(self.name,\n value,\n type(value),\n allowed_types))" ]
[ "0.71089", "0.67898035", "0.65201443", "0.6332316", "0.62834567", "0.6140083", "0.612568", "0.6124426", "0.610049", "0.6036116", "0.6000703", "0.59797204", "0.593941", "0.59330225", "0.592734", "0.5916956", "0.5866322", "0.5858302", "0.5841307", "0.58241904", "0.5821699", "0.5814619", "0.58116496", "0.58111787", "0.5792399", "0.5790605", "0.5790605", "0.57820284", "0.57768637", "0.57593346" ]
0.7577669
0
Validate inputs for functions whose first argument is a numpy.ndarray with shape (n,1).
def check_inputs(function): def decorated(self, data, *args, **kwargs): if not (isinstance(data, np.ndarray) and len(data.shape) == 2 and data.shape[1] == 1): raise ValueError('The argument `data` must be a numpy.ndarray with shape (n, 1).') return function(self, data, *args, **kwargs) decorated.__doc__ = function.__doc__ return decorated
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_input_shapes(*args):\n\n # Collect the shapes of the inputs\n shapes = set()\n\n # DESIGN NOTES - currently allow:\n # - scalars,\n # - 0 dim ndarrays (also scalars but packaged differently)\n # - 1 dim ndarrays with only a single value\n\n for val in args:\n if isinstance(val, np.ndarray):\n # Note that 0-dim ndarrays (which are scalars) pass through as do\n # one dimensional arrays with a single value (also a scalar)\n if not(val.ndim == 0 or val.shape == (1,)):\n shapes.add(val.shape)\n # elif isinstance(val, Series):\n # # Note that 0-dim ndarrays (which are scalars) pass through\n # if val.ndim > 0:\n # shapes.add(val.shape)\n elif val is None or isinstance(val, (float, int, np.generic)):\n pass # No need to track scalars and optional values pass None\n else:\n raise ValueError(f'Unexpected input to check_input_shapes: {type(val)}')\n\n # shapes can be an empty set (all scalars) or contain one common shape\n # otherwise raise an error\n if len(shapes) > 1:\n raise ValueError('Inputs contain arrays of different shapes.')\n\n if len(shapes) == 1:\n return shapes.pop()\n\n return 1", "def array_input(f):\n @wraps(f)\n def wrapped(self, t):\n t = np.atleast_1d(t)\n r = f(self, t)\n return r\n return wrapped", "def validate_ndarray(ndarray, expected_dtypes, expected_dimentions, name):\n\tvalid_dtype_assertion(expected_dtypes, ndarray.dtype, name)\n\tvalid_ndim_assertion(expected_dimentions, ndarray.ndim, name)", "def _validate_function(t):\n t1, t2 = t.fromType, t.toType\n if is_array(t2):\n raise ArrayReturnError(t)\n validate(t1)\n validate(t2)", "def validate_common(ndarray, name):\n\tvalidate_ndarray(ndarray,(np.float, np.int), (2,) , name)", "def _check_array(X):\n return check_array(X,\n accept_sparse=['csr', 'csc'], # Accept sparse csr, csc\n order=None, # Do not enforce C or Fortran\n copy=False, # Do not trigger copying\n force_all_finite=True, # Raise error on np.inf/np.nan\n ensure_2d=True, # Force 'X' do be a matrix\n allow_nd=True, # Allow 'X.ndim' > 2\n warn_on_dtype=False # Mute as 'dtype' is 'None'\n )", "def check_transformations(*args):\n assert args[0].shape == (21,21)\n assert args[0].dtype == np.float64\n if len(args) == 2:\n assert args[1].shape == (2,2)\n assert args[1].dtype == np.float64", "def test_check_X_too_many_dims():\n with pytest.raises(ValueError):\n check_X(np.ones((5,4,3)))", "def _validate_input(self, data: Union[np.ndarray, pd.DataFrame, pd.Series],\n expected_dim: int, inference: bool = False) -> np.ndarray:\n allowed_types = (\n np.ndarray,\n pd.core.frame.DataFrame,\n pd.core.frame.Series\n )\n\n if type(data) not in allowed_types:\n raise TypeError('Supported input types: np.ndarray, '\n 'pd.core.frame.DataFrame, pd.core.frame.Series got'\n ' {}'.format(type(data)))\n\n if isinstance(data, pd.DataFrame) or isinstance(data, pd.Series):\n data = data.values\n\n if data.size == 0:\n raise ValueError('Empty array passed to fit() or predict()')\n\n if data.ndim > expected_dim:\n raise ValueError('Data with incorrect number of dimensions '\n 'passed to fit() or predict(). Max dim is '\n '{}, got {}'.format(expected_dim, data.ndim))\n\n if not np.issubdtype(data.dtype, np.number):\n raise ValueError('Non numeric value found in data')\n\n if not np.isfinite(data).all():\n raise ValueError('Data contains nan or inf')\n\n if inference:\n # additional checks on prediction time\n if not self._fitted:\n raise ValueError('Fit the model first.')\n\n if self._ndim == 2 and data.shape[-1] != self._shape[-1]:\n raise ValueError('Number of features does not match'\n ' data model was trained on. Expected'\n ' {}, got {}'\n .format(self._shape[-1], data.shape[-1]))\n\n return data", "def check_array_1D(X):\n X = check_is_numpy(X)\n if X.ndim != 1:\n raise ValueError(\n \"If passed as a np.array, X must be a 1-dimensional \"\n \"array, but found shape: {}\".format(X.shape)\n )\n if X.size == 0:\n raise ValueError(\n \"Input is empty or have a dimension of size 0\"\n \", found shape: {}\".format(X.shape)\n )\n \n return X", "def _check_input_for_asarray(array_like):\n if isinstance(array_like, (Tensor, list, tuple, int, float, bool, onp.ndarray)):\n return True\n raise TypeError(\"input data must be `int`, `float`, `bool`, `Tensor`, `list`, `tuple`\" + \\\n f\"or numpy.ndarray, but got {type(array_like)}\")", "def _verify_data(inputs, targets):\n check_value_type('inputs', inputs, Tensor)\n if len(inputs.shape) != 4:\n raise ValueError(f'Argument inputs must be 4D Tensor, but got {len(inputs.shape)}D Tensor.')\n check_value_type('targets', targets, (Tensor, int, tuple, list))\n if isinstance(targets, Tensor):\n if len(targets.shape) > 2:\n raise ValueError('Dimension invalid. If `targets` is a Tensor, it should be 0D, 1D or 2D. '\n 'But got {}D.'.format(len(targets.shape)))\n if targets.shape and len(targets) != len(inputs):\n raise ValueError(\n 'If `targets` is a 2D, 1D Tensor, it should have the same length as inputs {}. But got {}.'.format(\n len(inputs), len(targets)))", "def really1d(arr):\n if np.ndim(arr) != 1:\n return False\n # Empty list or array\n if len(arr) == 0:\n return True\n if np.any(np.vectorize(np.ndim)(arr)):\n return False\n return True", "def _is_1d_varray(arr):\r\n return len(arr.shape) < 2 or arr.shape[1] == 1", "def test_convolve_input_dim_check(self, case, fn, x_shape, y_shape):\n x = torch.rand(*x_shape, dtype=self.dtype, device=self.device)\n y = torch.rand(*y_shape, dtype=self.dtype, device=self.device)\n\n message = [\n \"The operands must be the same dimension\",\n \"Leading dimensions of x and y are not broadcastable\",\n ][case]\n with self.assertRaisesRegex(ValueError, message):\n fn(x, y)", "def __DimSiz_restriction_incorrect_ndarray_number2(self):\n\n strTestName = 'The number of rows of a Numpy array equals to a number (correct)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'Numpy array parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramDimEq('parameter1', 1, 'columns')\n\n RxCSObject.parameter1 = np.random.randn(3)\n\n self.__parametersCheck_error(RxCSObject, DimSizError, strTestName)", "def check_input(times,signal,**kwargs):\n #-- check if the input are arrays and have the same 1D shape\n is_array0 = isinstance(times,np.ndarray)\n is_array1 = isinstance(signal,np.ndarray)\n if not is_array0: print(termtools.red('ERROR: time input is not an array'))\n if not is_array1: print(termtools.red('ERROR: signal input is not an array'))\n if not is_array0 or not is_array1:\n times = np.asarray(times)\n signal = np.asarray(signal)\n print(termtools.green(\"---> FIXED: inputs are arrays\"))\n print(termtools.green(\"OK: inputs are arrays\"))\n onedim = (len(times.shape)==1) & (len(signal.shape)==1)\n same_shape = times.shape==signal.shape\n if not onedim or not same_shape:\n print(termtools.red('ERROR: input is not 1D or not of same length'))\n return False\n print(termtools.green(\"OK: inputs are 1D and have same length\"))\n #-- check if the signal constains nans or infs:\n isnan0 = np.sum(np.isnan(times))\n isnan1 = np.sum(np.isnan(signal))\n isinf0 = np.sum(np.isinf(times))\n isinf1 = np.sum(np.isinf(signal))\n if isnan0: print(termtools.red('ERROR: time array contains nans'))\n if isnan1: print(termtools.red('ERROR: signal array contains nans'))\n if isinf0: print(termtools.red('ERROR: time array contains infs'))\n if isinf1: print(termtools.red('ERROR: signal array contains infs'))\n if not isnan0 and not isnan1 and not isinf0 and not isinf1:\n print(termtools.green('OK: no infs or nans'))\n else:\n keep = -np.isnan(times) & -np.isnan(signal) & -np.isinf(times) & -np.isinf(signal)\n times,signal = times[keep],signal[keep]\n print(termtools.green('---> FIXED: infs and nans removed'))\n #-- check if the timeseries is sorted\n is_sorted = np.all(np.diff(times)>0)\n if not is_sorted:\n print(termtools.red('ERROR: time array is not sorted'))\n sa = np.argsort(times)\n times,signal = times[sa],signal[sa]\n print(termtools.green('---> FIXED: time array is sorted'))\n else:\n print(termtools.green(\"OK: time array is sorted\"))\n print(termtools.green(\"No inconsistencies found or inconsistencies are fixed\"))\n \n #-- check keyword arguments:\n fnyq = getNyquist(times,nyq_stat=np.min)\n print(\"Default Nyquist frequency: {}\".format(fnyq))\n if 'nyq_stat' in kwargs:\n fnyq = getNyquist(times,nyq_stat=kwargs['nyq_stat'])\n print(\"Nyquist value manually set to {}\".format(fnyq))\n if 'fn' in kwargs and kwargs['fn']>fnyq:\n print(termtools.red(\"Final frequency 'fn' is larger than the Nyquist frequency\"))\n return times,signal", "def check_np_array_nan(func):\r\n\r\n @functools.wraps(func)\r\n def wrapper(*args, **kwargs):\r\n result = func(*args, **kwargs)\r\n if type(result) in [tuple, list]:\r\n count = 0\r\n for an_array in result:\r\n if type(an_array) is dict:\r\n for key in an_array:\r\n if np.isnan(an_array[key]).any():\r\n hydro_logger.warning(\r\n \"Please check your input data: there are NaN data! It may affect following calculation!!\\n \"\r\n \"The location of NaN values in the \"\r\n + str(count)\r\n + \"-th dict are:\\n\"\r\n )\r\n hydro_logger.warning(\"value of \" + key + \":\\n\")\r\n hydro_logger.warning(np.argwhere(np.isnan(an_array[key])))\r\n else:\r\n if np.isnan(an_array).any():\r\n hydro_logger.warning(\r\n \"Please check your input data: there are NaN data! It may affect following calculation!!\\n \"\r\n \"The location of NaN values in the \"\r\n + str(count)\r\n + \"-th array are:\\n\"\r\n )\r\n hydro_logger.warning(np.argwhere(np.isnan(an_array)))\r\n count = count + 1\r\n elif type(result) is np.array:\r\n if np.isnan(result).any():\r\n hydro_logger.warning(\r\n \"Please check your input data: there are NaN data! It may affect following calculation!!\\n \"\r\n \"The location of NaN values are:\\n\"\r\n )\r\n hydro_logger.warning(np.argwhere(np.isnan(result)))\r\n return result\r\n\r\n return wrapper", "def soft_check_array(array, accept_sparse=True, dtype=None,\n ensure_2d=True, force_all_finite=True, allow_nd=True,\n ensure_min_samples=1, ensure_min_features=1,\n estimator=None):\n # Set initial change flag to False. Will be set to True if any test fails.\n CHANGE = False\n\n context = _get_context(estimator)\n\n # ---- Check dtype -----\n\n # store whether originally we wanted numeric dtype\n dtype_numeric = dtype == \"numeric\"\n\n # Get input array's dtype\n dtype_orig = getattr(array, \"dtype\", None)\n\n if not hasattr(dtype_orig, 'kind'):\n # not a data type (e.g. a column named dtype in a pandas DataFrame)\n dtype_orig = None\n\n if dtype_numeric:\n # We want to check that the dtype is numeric.\n if dtype_orig is not None and dtype_orig.kind == \"O\":\n dtype = np.float64\n else:\n dtype = None\n\n wrong_dtype = False\n if dtype is not None:\n if isinstance(dtype, (list, tuple)):\n wrong_dtype = dtype_orig is not None and dtype_orig not in dtype\n else:\n wrong_dtype = dtype_orig is not None and dtype_orig != dtype\n\n if wrong_dtype:\n CHANGE = True\n msg = (\"%sDtype of input array not the expected type [dtype: %s]. \"\n \"Consider changing to %r\")\n warnings.warn(msg % (context, dtype_orig, dtype), InputDataWarning)\n\n # ----- check array shape ------\n if isinstance(accept_sparse, str):\n accept_sparse = [accept_sparse]\n\n if sp.issparse(array):\n CHANGE = _check_sparse_format(array, accept_sparse, dtype,\n force_all_finite, context)\n else:\n # Check if X is 2d\n if ensure_2d:\n if array.ndim == 1:\n if (ensure_min_samples >= 2) and (len(array) == 1):\n # Raise error if we want X to be 2d, but only have one obs\n raise ValueError(\"%sexpected at least 2 samples provided \"\n \"in a 2 dimensional array-like input\"\n % context)\n # Else,flag for bad formatting\n CHANGE = True\n msg = (\"%sX is one-dimensional. Reshape your data either \"\n \"using X.reshape(-1, 1) if your data has a single\"\n \"feature or X.reshape(1, -1) if it contains a single \"\n \"sample.\")\n warnings.warn(msg % context, InputDataWarning)\n\n # Check for number of dimensions\n if not allow_nd and array.ndim >= 3:\n warnings.warn(\"%sFound array with dim %d. %s expected <= 2.\" % (\n context, array.ndim, context), InputDataWarning)\n\n # Check for finite inputs\n if force_all_finite:\n ALL_FINITE = _check_all_finite(array)\n\n if not ALL_FINITE:\n CHANGE = True\n msg = (\"%sNot all elements in array are finite. This may \"\n \"cause estimation problems. Consider nan conversion \"\n \"and replacing infinite values.\")\n warnings.warn(msg % context, InputDataWarning)\n\n # Check shape\n try:\n shape_repr = _shape_repr(array.shape)\n except Exception as e:\n CHANGE = True\n warnings.warn(\"%sCannot infer shape of input data: may not be \"\n \"a suitable data type for estimation. Will proceed \"\n \"without checking dimensionality. \"\n \"Details:\\n%r\" % (context, e), InputDataWarning)\n shape_repr = 'NaN'\n\n if ensure_min_samples > 0:\n try:\n n_samples = _num_samples(array)\n except Exception as e:\n CHANGE = True\n warnings.warn(\"%sCannot infer samples size of input data: may not \"\n \"be a suitable data type for estimation.\"\n \"Will proceed without checking sample size. \"\n \"Details:\\n%r\" % (context, e), InputDataWarning)\n n_samples = np.inf\n\n if n_samples < ensure_min_samples:\n CHANGE = True\n msg = (\"%sFound array with %d sample(s) (shape=%s) \"\n \"while a minimum of %d is required.\")\n warnings.warn(msg % (context, n_samples, shape_repr,\n ensure_min_samples), InputDataWarning)\n\n if ensure_min_features > 0 and array.ndim == 2:\n try:\n n_features = array.shape[1]\n except Exception as e:\n CHANGE = True\n warnings.warn(\"%sCannot infer feature size of input data: may not \"\n \"be a suitable data type for estimation.\"\n \"Will proceed without checking feature size. \"\n \"Details:\\n%r\" % (context, e), InputDataWarning)\n n_features = np.inf\n\n if n_features < ensure_min_features:\n CHANGE = True\n msg = (\"%sFound array with %d feature(s) (shape=%s) while \"\n \" a minimum of %d is required.\")\n warnings.warn(msg % (context, n_features, shape_repr,\n ensure_min_features), InputDataWarning)\n\n if CHANGE:\n warnings.warn(\"%sInput data failed initial test. Estimation may fail. \"\n \"Consider converting input data to a numpy array with \"\n \"finite elements and no missing values.\" % context,\n InputDataWarning)\n\n return CHANGE", "def _isscalar(x):\n return np.isscalar(x) or hasattr(x, \"shape\") and x.shape == ()", "def _validate_array(t):\n basetype = t.type\n if is_array(basetype):\n raise ArrayOfArrayError(t)\n validate(basetype)", "def validate_event(event):\n # Check that we were passed the required arguments\n if 'method' in event and 'arguments' in event:\n numpy_method_name = event.get('method')\n\n # Check that the NumPy method is valid\n if hasattr(numpy, numpy_method_name) and callable(getattr(numpy, numpy_method_name)):\n return\n else:\n error_message = \"Invalid NumPy method: {}\".format(numpy_method_name)\n logger.error(error_message)\n raise AttributeError(error_message)\n else:\n error_message = \"Missing required parameter(s). Event must contain fields for \\'method\\' and \\'arguments\\'\"\n logger.error(error_message)\n raise TypeError(error_message)", "def check_array(arr: Arrayable) -> np.ndarray:\n if isinstance(arr, np.ndarray):\n return arr\n return np.array(arr)", "def test_input_shape_error(self):\n\n def net_func():\n input_value = paddle.to_tensor([[1, 2, 3], [4, 5, 6]])\n paddle.bincount(input_value)\n\n with self.assertRaises(ValueError):\n self.run_network(net_func)", "def _check_input_timeseries(x: np.ndarray) -> np.ndarray:\n if not isinstance(x, np.ndarray):\n raise ValueError(\"The input time series must be a numpy array.\")\n if x.ndim <= 0 or x.ndim >= 4:\n raise ValueError(\n \"The input time series must have more than 0 dimensions and\"\n \"less than 4 dimensions.\"\n )\n if x.ndim == 3:\n return x[0]\n return x", "def __NDim_restriction_incorrect_ndarray_parameter(self):\n\n strTestName = 'The number of dimensions in a Numpy array equals a parameter (incorrect)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('iRefParameter1', 'Int parameter')\n RxCSObject.paramType('iRefParameter1', int)\n\n # Now, let us define a Numpy array parameter\n RxCSObject.paramAddMan('parameter1', 'Numpy array parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramNDimEq('parameter1', 'iRefParameter1')\n\n RxCSObject.iRefParameter1 = 3\n RxCSObject.parameter1 = np.random.rand(3, 4)\n\n self.__parametersCheck_error(RxCSObject, NDimError, strTestName)", "def _validate(matrix:[[int]], vector: [int]):\n if not vector:\n raise InvalidArgumentError(\"vector must me not empty list\")\n if not matrix:\n raise InvalidArgumentError(\"matrix must me not empty list\")\n if not all(isinstance(row, list) for row in matrix):\n raise InvalidArgumentError(f\"not all matrix rows are lists\")\n if not all(len(row)==len(matrix[0]) for row in matrix):\n raise InvalidArgumentError(f\"not all matrix rows are equal length\")", "def _input_checks(\n true_values: Union[np.ndarray, dask.array.core.Array], pred_values: Union[np.ndarray, dask.array.core.Array]\n):\n\n def _cast(data: Union[np.ndarray, dask.array.core.Array]) -> Tuple[np.ndarray, np.ndarray]:\n if isinstance(data, dask.array.core.Array):\n to_return = data.compute()\n elif isinstance(data, np.ndarray):\n to_return = data\n else:\n raise TypeError(f\"Type {type(data)} is not recognized for true/pred values.\")\n return to_return\n\n true_vals = _cast(true_values)\n pred_vals = _cast(pred_values)\n\n assert len(true_values.shape) == len(pred_values.shape), \"true_values must have same dimensions as pred_values\"\n assert np.all(true_values.shape == pred_values.shape), \"true_values must have same dimensions as pred_values\"\n\n return true_vals, pred_vals", "def inputs(self) -> Sequence[jnp.ndarray]:\n pass", "def __DimSiz_restriction_correct_ndarray_ndarray_pedantic3(self):\n\n strTestName = 'The size of a dimension of a Numpy array higher or equal to the size of a dimension of another Numpy array [pedantic] (3) (correct)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('aParameter1', 'Numpy array parameter')\n RxCSObject.paramType('aParameter1', np.ndarray)\n\n # Now, let us define a Numpy Array\n RxCSObject.paramAddMan('parameter1', 'Numpy array parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramDimHE('parameter1', 'aParameter1', 'rows', 'columns', pedantic=1, add=1)\n\n RxCSObject.parameter1 = np.random.randn(4, 3, 4)\n RxCSObject.aParameter1 = np.random.randn(3, 2)\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)" ]
[ "0.696551", "0.68897444", "0.68860936", "0.6620291", "0.6588289", "0.6457762", "0.64314246", "0.6405052", "0.64038384", "0.6370529", "0.6309466", "0.6070468", "0.6018504", "0.5987222", "0.5961908", "0.5931549", "0.59065855", "0.5864621", "0.5858168", "0.5847836", "0.5837565", "0.5816567", "0.5813501", "0.5790427", "0.577214", "0.57618874", "0.5760614", "0.5757183", "0.57507956", "0.5747331" ]
0.7947115
0
Listener will be called every second with the number of seconds passed since cog load
async def on_timer_update(self, secs: int): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_tick(self, time):\n pass", "def realtime(self):", "def timer_callback(self):\n self.get_logger().debug(f\"Timer heartbeat {self.timer_count}\")\n self.timer_count += 1", "def timer_handler():\r\n \r\n global elapsed_time\r\n elapsed_time += 1", "def time_automation_listener(now):\n action()", "def watch(self, func, seconds=3600):\n func\n time.sleep(seconds)", "def on_epoch_start(self):", "def on_update(self, delta_time):\n pass", "def on_update(self, delta_time):\n pass", "def _inst_run(self):\r\n self._inst_get_img_info_from_db()\r\n print(\"run method: \", time.ctime())\r\n th = threading.Timer(\r\n 10,\r\n self._inst_run\r\n )\r\n th.start()", "def _latency(self):\n\n return\n time.sleep(0.005 + random.random() / 30.)", "def on_timeout(self):\n pass", "def on_timer(self):\n self.read_serial_data()\n # self.update_monitor()", "async def _init_timed_events(self, client: Bot):\n\n await self.bot.wait_until_ready() # Wait for the bot to launch first\n client.secs = 0\n\n secs = 0 # Count number secs\n while True:\n client.dispatch(\"timer_update\", secs)\n secs += 1\n client.secs = secs\n await sleep(1)", "def on_timer(context, data_type, data):\n pass", "def start_timer(self):\n print \"Timer Object Started. Will update ADC Information every %s seconds\" % self.refreshTime\n self.timer=Timer(float(self.refreshTime)*1000, self._refresh_Visible_channels)", "def timer_callback(self):\n # There're 5 logger-level in ROS 2 get_logger() System.\n # Try out and watch whats difference.\n self.get_logger().debug(f'==== Hello ROS 2 : {self.count}====')\n self.get_logger().info(f'==== Hello ROS 2 : {self.count}====')\n self.get_logger().warn(f'==== Hello ROS 2 : {self.count}====')\n self.get_logger().error(f'==== Hello ROS 2 : {self.count}====')\n self.get_logger().fatal(f'==== Hello ROS 2 : {self.count}====')\n\n self.count += 1", "def LingerTime(self) -> int:", "def __init__(self):\n sleep(10)", "def before_tick(self, time):\n pass", "def on_start(self):", "def on_start(self):", "def on_start(self):\n self.write_log(\"策略启动\")\n self.cta_engine.event_engine.register(EVENT_TIMER, self.process_timer_event)", "def onCheckTimeOut(self):\r\n\r\n self.pros += 1\r\n self.pb_load.setValue(self.pros * 5)\r\n \r\n # timeout error\r\n if(self.pros == 20):\r\n self.check_timer.stop()\r\n self.onCheckConnectionError()\r\n # connected to server\r\n if(self.pros > 5 and self.check_result == 0): \r\n self.check_timer.stop()\r\n self.checkSession()", "def cooldown():\n print camera.CoolerON()\n camera.status.update()", "def timer_setup(self):\n pass", "def run(self):\n last_time = time.time()\n while self.running:\n now_time = time.time()\n interval = now_time - last_time\n last_time = now_time\n self.update(interval)\n time.sleep(Options['update interval'])", "def after_tick(self, time):\n pass", "def monitor(self):\n if self.startup():\n time.sleep(0.250)\n self.run()", "def keepAliveReceived(self):" ]
[ "0.6764939", "0.6374665", "0.63279897", "0.6325875", "0.62773454", "0.6251775", "0.6120748", "0.60588634", "0.60588634", "0.6047806", "0.5971216", "0.59522736", "0.5940933", "0.59408", "0.5927215", "0.58737797", "0.5858658", "0.5828485", "0.58142847", "0.58096266", "0.5803449", "0.5803449", "0.57987696", "0.5739499", "0.57244575", "0.57223046", "0.57054764", "0.570231", "0.5692624", "0.5665442" ]
0.68236625
0
Inject the edition tools into the html content and return a BeautifulSoup object of the resulting content + tools.
def inject_edition_tools(response, request=None, context=None, body_top_template_name="pages/_body_top.html", body_bottom_template_name="pages/_body_bottom.html", edit_frame_template_name=None): #pylint:disable=too-many-arguments content_type = response.get('content-type', '') if not content_type.startswith('text/html'): return None if context is None: context = {} if 'urls' not in context: context.update({'urls': { 'edit': { 'api_less_overrides': reverse('pages_api_less_overrides'), 'api_sitecss': reverse('edit_sitecss'), 'api_sources': reverse('pages_api_sources'), 'api_page_element_base': reverse('pages_api_edit_element', kwargs={'path':''}), 'api_medias': reverse('uploaded_media_elements', kwargs={'path':''})}}}) context.update(csrf(request)) soup = None if body_top_template_name: template = loader.get_template(body_top_template_name) body_top = render_template(template, context, request).strip() if body_top: if not soup: soup = BeautifulSoup(response.content, 'html5lib') if soup and soup.body: # Implementation Note: we have to use ``.body.next`` here # because html5lib "fixes" our HTML by adding missing # html/body tags. Furthermore if we use #``soup.body.insert(1, BeautifulSoup(body_top, 'html.parser'))`` # instead, later on ``soup.find_all(class_=...)`` returns # an empty set though ``soup.prettify()`` outputs the full # expected HTML text. soup.body.insert(1, BeautifulSoup( body_top, 'html5lib').body.next) if body_bottom_template_name: template = loader.get_template(body_bottom_template_name) body_bottom = render_template(template, context, request).strip() if body_bottom: if not soup: soup = BeautifulSoup(response.content, 'html5lib') if soup and soup.body: soup.body.append(BeautifulSoup(body_bottom, 'html.parser')) if edit_frame_template_name: template = loader.get_template(edit_frame_template_name) edit_frame = render_template(template, context, request).strip() if edit_frame: if not soup: soup = BeautifulSoup(response.content, 'html5lib') edit_soup = BeautifulSoup(edit_frame, 'html5lib') soup = edit_soup return soup
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_html(self):\n content = self.content\n excerpt = self.excerpt\n\n content_html = publish_parts(content,\n writer_name='html',\n settings_overrides=DOCUTILS_OVERRIDES)['fragment']\n excerpt_html = publish_parts(excerpt,\n writer_name='html',\n settings_overrides=DOCUTILS_OVERRIDES)['fragment']\n\n return (content_html, excerpt_html)", "def update_html(self):\n self.html = self.driver.page_source\n self.soup = BeautifulSoup(self.html, features=\"lxml\")", "def getHTML(self):\n html = requests.get(self.URL).text\n soup = BeautifulSoup(html, \"lxml\")\n return soup", "def export_html(self, build=False):\n if build:\n html = export_html_code(self)\n return (html['script_tags'] +\n (html['html_state']).format(manager_state=json.dumps(html['manager_state'])) +\n html['grid_div'])\n return export_html_code(self)", "def get_content(self):\n response = requests.get(self.url)\n soup = BeautifulSoup(response.text, \"html.parser\")\n return soup", "def test_gettesttools_html(self):\n pass", "def make_file_soup(self):\n soup = BeautifulSoup(self.html, 'html.parser')\n return soup", "def get_html(self):\r\n pass", "def _get_markup(self):\n return make_soup(self.driver.find_element_by_id(\"contestDetailTable\").get_attribute(\"innerHTML\"))", "def get_html(self):\r\n self.do_targeted_feedback(self.tree)\r\n html = contextualize_text(etree.tostring(self._extract_html(self.tree)), self.context)\r\n return html", "def html_content(self):\n hilite = CodeHiliteExtension(linenums=False, css_class='highlight')\n extras = ExtraExtension()\n markdown_content = markdown(self.content, extensions=[hilite, extras])\n oembed_content = parse_html(\n markdown_content,\n oembed_providers,\n urlize_all=True,\n maxwidth=app.config['SITE_WIDTH'])\n return Markup(oembed_content)", "def html_content(self):\n\t\thilite = CodeHiliteExtension(linenums=False, css_class='highlight')\n\t\textras = ExtraExtension()\n\t\tmarkdown_content = markdown(self.content, extensions=[hilite, extras])\n\t\toembed_content = parse_html(\n\t\t\tmarkdown_content,\n\t\t\toembed_providers,\n\t\t\turlize_all=True,\n\t\t\tmaxwidth=app.config['SITE_WIDTH'])\n\t\treturn Markup(oembed_content)", "def __html__(self) -> str:\n components = [\n self.attributee_html,\n self.linked_title if self.title else 'untitled document',\n self.date.string if self.date else '',\n self.descriptive_phrase,\n f'archived in {self.collection}' if self.collection else '',\n ]\n return self.components_to_html(components)", "def __html__(self):\n return self.html", "def soup(self) -> Soup:\n return Soup(self.html)", "def get_html(self):\r\n if self.debug == 'True':\r\n # Reset the user vote, for debugging only!\r\n self.user_voted = False\r\n if self.hints == {}:\r\n # Force self.hints to be written into the database. (When an xmodule is initialized,\r\n # fields are not added to the db until explicitly changed at least once.)\r\n self.hints = {}\r\n\r\n try:\r\n child = self.get_display_items()[0]\r\n out = child.render('student_view').content\r\n # The event listener uses the ajax url to find the child.\r\n child_id = child.id\r\n except IndexError:\r\n out = u\"Error in loading crowdsourced hinter - can't find child problem.\"\r\n child_id = ''\r\n\r\n # Wrap the module in a <section>. This lets us pass data attributes to the javascript.\r\n out += u'<section class=\"crowdsource-wrapper\" data-url=\"{ajax_url}\" data-child-id=\"{child_id}\"> </section>'.format(\r\n ajax_url=self.runtime.ajax_url,\r\n child_id=child_id\r\n )\r\n\r\n return out", "def get_html(self) -> List[ComponentMeta]:\n return [Div(id=\"additions\")]", "def _repr_html_(self) -> str:\n html_template = \"\"\"\n <script src=\"{webcomponents_js}\"></script>\n <link rel=\"import\" href=\"{facets_html}\">\n <facets-dive id=\"dive_elem\" height=\"{height}\"></facets-dive>\n <script>\n document.querySelector(\"#dive_elem\").data = {data};\n </script>\"\"\"\n html = html_template.format(\n facets_html=FACETS_DEPENDENCIES['facets_html'],\n webcomponents_js=FACETS_DEPENDENCIES['webcomponents_js'],\n data=self._data.to_json(orient='records'),\n height=self.height,\n )\n return html", "def get_html(self):\r\n raise NotImplementedError(\r\n \"get_html() must be provided by specific modules - not present in {0}\"\r\n .format(self.__class__))", "def getHtml(self):\n return self.html", "def get_html(self):\n\n # these 3 will be used in class methods\n self.html_id = self.location.html_id()\n self.html_class = self.location.category\n self.configuration_json = self.build_configuration_json()\n params = {\n 'gst_html': self.substitute_controls(self.render),\n 'element_id': self.html_id,\n 'element_class': self.html_class,\n 'configuration_json': self.configuration_json\n }\n content = self.system.render_template(\n 'graphical_slider_tool.html', params)\n return content", "def _format_html(self, file_content):\n old_string = r\"<!-- INSERT JUMP BOX HERE -->\"\n new_string = self._getJumpBoxHtml()\n file_content = string.replace(file_content, old_string, new_string) \n\n additional_head_string = ''' \n<link media=\"screen\" href=\"dataTableMedia/css/demo_table.css\" type=\"text/css\" rel=\"stylesheet\"/>\n<link media=\"screen\" href=\"dataTableMedia/css/TableTools.css\" type=\"text/css\" rel=\"stylesheet\"/>\n<script src=\"util.js\" type=\"text/javascript\"></script>\n<script src=\"jquery.js\" type=\"text/javascript\"></script>\n<script src=\"customTables.js\" type=\"text/javascript\"></script>\n<script src=\"dataTableMedia/js/jquery.dataTables.js\" type=\"text/javascript\"></script>\n<script src=\"dataTableMedia/js/TableTools.js\" type=\"text/javascript\"></script>\n<script src=\"dataTableMedia/js/jquery.dataTables.select.filtering.js\" type=\"text/javascript\" ></script>\n '''\n old_string = r\"<!-- INSERT ADDITIONAL HEAD STRING HERE -->\" \n file_content = string.replace(file_content, old_string, additional_head_string) \n new_string = '''\n <table id=\"dataTables-summaryArchive\" class=\"display\" cellspacing=\"0\" cellpadding=\"0\" border=\"0\"> \n <thead>\n <tr> \n '''\n #Write headers: 'name', 'rog', 'distance_count', 'cs_count', 'chothia_class', 'chain_count', 'res_count'\n for i,_header in enumerate(summaryHeaderList):\n new_string += '\\t<th title=\"{help}\">{header}</th>\\n'.format(header = summaryHeader2List[i],\n help = summaryHeaderTitleList[i])\n # end for \n new_string += '''\n </tr> \n </thead>\n </table>\n '''\n old_string = r\"<!-- INSERT NEW RESULT STRING HERE -->\" \n file_content = string.replace(file_content, old_string, new_string)\n return file_content", "def get_html(self):\r\n\r\n # these 3 will be used in class methods\r\n self.html_id = self.location.html_id()\r\n self.html_class = self.location.category\r\n\r\n self.configuration_json = self.build_configuration_json()\r\n params = {\r\n 'gst_html': self.substitute_controls(self.render),\r\n 'element_id': self.html_id,\r\n 'element_class': self.html_class,\r\n 'configuration_json': self.configuration_json\r\n }\r\n content = self.system.render_template(\r\n 'graphical_slider_tool.html', params\r\n )\r\n return content", "def get_html(self):\r\n context = {\r\n 'display_name': self.display_name_with_default,\r\n 'element_id': self.element_id,\r\n 'instructions_html': self.instructions,\r\n 'content_html': self._render_content()\r\n }\r\n\r\n return self.system.render_template('annotatable.html', context)", "def to_html(self, content, request, **parameters):\n raise NotImplementedError", "def get_html_content(self, url):\n\n req = urllib2.Request(url, headers=self.HEADER)\n page = urllib2.urlopen(req)\n soup = BeautifulSoup(page)\n\n return soup", "def get_context(self):\r\n _context = EditingDescriptor.get_context(self)\r\n # Add some specific HTML rendering context when editing HTML modules where we pass\r\n # the root /c4x/ url for assets. This allows client-side substitutions to occur.\r\n _context.update({\r\n 'base_asset_url': StaticContent.get_base_url_path_for_course_assets(self.location.course_key),\r\n 'enable_latex_compiler': self.use_latex_compiler,\r\n 'editor': self.editor\r\n })\r\n return _context", "def soup(self):\n if not self._soup:\n resp = requests.get(self.url)\n if not resp.ok:\n logging.warning('Status of request is not ok.')\n self._soup = BeautifulSoup(resp.content, 'html.parser')\n\n return self._soup", "def mine(self):\n collections = []\n # Getting HTML snapshot with selenium, storing a soup object in .data\n self.scrape()\n # Returns only the parts of the soup that surround each collection\n collection_elements = self.get_collection_elements()\n # Turns each soup element into a CollectionElement object\n collections = self.get_info_from_collections(collection_elements)\n # NOTE THE RETURN VALUE IS MERELY TO PASS TESTING< MUST BE CHANGED\n return self.data", "def testHTML(self):\n\n html = self.E.html()" ]
[ "0.5796182", "0.57176113", "0.55976796", "0.5577817", "0.55086136", "0.5481117", "0.5449412", "0.537632", "0.5369063", "0.52791226", "0.5236761", "0.5205818", "0.51522255", "0.51490635", "0.51384366", "0.5132662", "0.5127385", "0.5110196", "0.5093892", "0.5060148", "0.50544304", "0.5053689", "0.50346285", "0.50047237", "0.497315", "0.49663144", "0.4960637", "0.49499547", "0.4944421", "0.49415502" ]
0.6480746
0
Ensure that model= is optional for abstract=True.
def test_factory_for_optional(self): class TestObjectFactory(base.Factory): class Meta: abstract = True self.assertTrue(TestObjectFactory._meta.abstract) self.assertIsNone(TestObjectFactory._meta.model)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_abstract_attribute_is_not_inherited(self):\r\n assert not ConcreteModel.__abstract__\r\n assert not ConcreteModelWithCol.__abstract__", "def test_get_base_polymorphic_model_skip_abstract(self):\n class A(PolymorphicModel):\n class Meta:\n abstract = True\n\n class B(A):\n pass\n\n class C(B):\n pass\n\n self.assertIs(get_base_polymorphic_model(A), None)\n self.assertIs(get_base_polymorphic_model(B), B)\n self.assertIs(get_base_polymorphic_model(C), B)\n\n self.assertIs(get_base_polymorphic_model(C, allow_abstract=True), A)", "def test_factory_for_and_abstract_factory_optional(self):\n class TestObjectFactory(base.Factory):\n pass\n\n self.assertTrue(TestObjectFactory._meta.abstract)\n self.assertIsNone(TestObjectFactory._meta.model)", "def check_class_definition(cls):\n super().check_class_definition()\n\n if not cls.model:\n cls.definition_error('Must provide \"model\" attribute.')", "def test_missing_mandatory_attributes():\n model_definition = {'source': {'type': 'list',\n 'required': True,\n 'persisted': True},\n 'resources.title': {'type': 'text',\n 'required': True,\n 'persisted': True}}\n # missing language in the model\n _ = ProductModelFactory(model_definition)", "def is_abstract(cls):\n return cls.__dict__.get('abstract', False)", "def __init__(self, model: Optional[Model] = None) -> None:\n self.model = model", "def _test_abstract(some_type: Type[object]) -> None:\n if hasattr(some_type, '__abstractmethods__') and some_type.__abstractmethods__: # type: ignore[attr-defined]\n raise TypeError(f'{some_type!r} is abstract')", "def test_basemodel_none_kwargs(self):\n with self.assertRaises(TypeError):\n BaseModel(id=None, created_at=None, updated_at=None)", "def test_abstract_model(self):\n self.assertRaises(ImproperlyConfigured, self.site.register, Location)", "def is_abstract(cls) -> bool:\n return cls.get_ql_class() is None", "def is_optional(self):\n raise exceptions.NotImplementedError()", "def test_basemodel_basic_instance_none(self):\n with self.assertRaises(TypeError):\n BaseModel(None)", "def test_noarguments(self):\n self.assertEqual(BaseModel, type(BaseModel()))", "def _raise_none_model(self):\n raise ValueError(\"Model is of type None! Was it not initialized?\")", "def prepare_model(self, **kwargs):\n pass", "def _model_definition_validate(self):\n try:\n assert isinstance(self.__class__.MODEL_TYPE, str)\n assert (isinstance(self.__class__.PRIMARY_KEY, str) or\n self.__class__.PRIMARY_KEY is None)\n assert isinstance(self.__class__.PRIORITY, int)\n for key in self.__class__.MODEL:\n assert re.match(\"^\" + KEY_RE_CONSTRAINT + \"$\", key)\n assert 'name' in self.__class__.MODEL\n except:\n raise ModelInvalidException(\n \"Model %s is invalid and not usable\" % (\n self.__class__.MODEL_TYPE))\n\n if self.__class__.PRIMARY_KEY and self.__class__.PRIMARY_KEY != 'name':\n if self.__class__.PRIMARY_KEY not in self.__class__.MODEL:\n raise ModelInvalidException(\n \"Model %s primary key %s does not exists\" % (\n self.__class__.MODEL_TYPE,\n self.__class__.PRIMARY_KEY))\n\n if not self.__class__.MODEL[self.__class__.PRIMARY_KEY][2]:\n raise ModelInvalidException(\n \"Model %s primary key %s should be mandatory\" % (\n self.__class__.MODEL_TYPE,\n self.__class__.PRIMARY_KEY))\n\n for constraints in self.__class__.MODEL.values():\n if len(constraints) != 6:\n raise ModelInvalidException(\n \"Model %s is invalid and not usable \"\n \"(missing field)\" % (\n self.__class__.MODEL_TYPE))\n\n try:\n # Be sure default values are of the declared type\n # make some others validation on default value\n for key, constraints in self.__class__.MODEL.items():\n # Only act on non-mandatory keys as default\n # is provided. Skip 'name' checking.\n if not constraints[2] and key != 'name':\n # Validate default value type\n assert isinstance(constraints[3],\n constraints[0])\n # Validate default value match the regexp\n # if str type\n if constraints[0] is str:\n assert re.match(constraints[1],\n constraints[3])\n # Validate list default values match the regexp\n # if list type\n if isinstance(constraints[0], list):\n assert all([re.match(constraints[1], c) for\n c in constraints[3]]) is True\n except:\n raise ModelInvalidException(\n \"Model %s is invalid and not usable \"\n \"(Wrong default value according to the type \"\n \"or regex)\" % (\n self.__class__.MODEL_TYPE))\n\n # Validate the callbacks of the inherited model\n try:\n # Be sure we have only the authorized callbacks\n assert len(set(AUTHORIZED_CALLBACKS).symmetric_difference(\n set(self.__class__.CALLBACKS))) is 0\n # Be sure the callbacks are callable or NotImplemented\n for key, callback in self.__class__.CALLBACKS.items():\n if (not callable(callback)\n and callback is not NotImplementedError):\n raise Exception\n except:\n raise ModelInvalidException(\n \"Model %s callbacks are invalid, model is not usable\" % (\n self.__class__.MODEL_TYPE))", "def test_basemodel_save_none(self):\n with self.assertRaises(TypeError):\n BaseModel.save()", "def test_attempting_to_save_abstract_model_fails(self):\r\n with self.assertRaises(CQLEngineException):\r\n AbstractModelWithFullCols.create(pkey=1, data=2)", "def test_alright_when_required_field_is_missing_but_default_is_given():\n\n model_definition = {'language': {'type': 'fixed',\n 'required': True,\n 'persisted': True,\n 'default': 'portuguese'},\n 'source': {'type': 'list',\n 'required': False,\n 'persisted': True}}\n product1 = {'source': ['Whatever']}\n factory = ProductModelFactory(model_definition)\n factory.build('product1', product1)\n # Ok. No exceptions were raised.", "def __init__(self, model: str, **kwargs):\n super().__init__(model=model)", "def __init__(self):\n self.clean_optional()", "def validate_model(self) -> Tuple[T_co, T_co]:\n raise NotImplementedError", "def test_alright_when_non_required_field_is_missing():\n\n model_definition = {'language': {'type': 'fixed',\n 'required': True,\n 'persisted': True},\n 'source': {'type': 'list',\n 'required': False,\n 'persisted': True},\n 'resources.title': {'type': 'text',\n 'required': False,\n 'persisted': True}}\n product1 = {'language': 'english'}\n factory = ProductModelFactory(model_definition)\n factory.build('product1', product1)\n # Ok. No exceptions were raised.", "def abstract(self):\n\n return self._abstract", "def optional(self) -> bool:\n return False", "def isabstract(object):\r\n return bool(isinstance(object, type) and object.__flags__ & TPFLAGS_IS_ABSTRACT)", "def test_abstract_name(self):\n\n app = Zask(__name__)\n app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite://'\n db = sqlalchemy.SQLAlchemy(app)\n\n class Base(db.Model):\n __abstract__ = True\n id = db.Column(db.Integer, primary_key=True)\n\n class Duck(Base):\n pass\n\n self.assertFalse(hasattr(Base, '__tablename__'))\n self.assertEqual(Duck.__tablename__, 'duck')", "def set_abstract(self, abstract):\n\n self._abstract = abstract\n # reset lazy loading\n self._abstract_words = []\n self._full_text = []\n self._full_text_words = []", "def initialize_main_model(self, model, **kwargs):\n return NotImplementedError(\n \"Initializer has not implemented an initialize_main_model method. Derived classes \"\n \"are required to overload this.\"\n )" ]
[ "0.66813874", "0.6643063", "0.64441586", "0.62198526", "0.60281485", "0.5868093", "0.57758063", "0.5757518", "0.57101095", "0.5701127", "0.56429243", "0.5633787", "0.56319803", "0.5624593", "0.5621721", "0.55961514", "0.5595935", "0.5579504", "0.55437565", "0.55372465", "0.5503186", "0.54791296", "0.5459351", "0.5446245", "0.5427288", "0.54057825", "0.5372045", "0.53711206", "0.5366726", "0.5363222" ]
0.69028527
0
Tests that the sequence of a 'slave' factory cannot be reset.
def test_reset_sequence_subclass_fails(self): class SubTestObjectFactory(self.TestObjectFactory): pass with self.assertRaises(ValueError): SubTestObjectFactory.reset_sequence()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reset_slave():\n\n # Confirm slave status in case we need to refer to the values later\n slave_status()\n run_mysql_command(\"STOP SLAVE;\")\n\n with hide('everything'):\n # Store last known log file and position\n master_log_file = run(\"sudo -i mysql -e 'SHOW SLAVE STATUS\\G' | grep '^\\s*Relay_Master_Log_File:' | awk '{ print $2 }'\")\n master_log_pos = run(\"sudo -i mysql -e 'SHOW SLAVE STATUS\\G' | grep '^\\s*Exec_Master_Log_Pos:' | awk '{ print $2 }'\")\n\n if not master_log_file or not master_log_pos:\n abort(\"Failed to determine replication log file and position, aborting.\")\n\n # Forget log file and position\n run_mysql_command(\"RESET SLAVE;\")\n\n # Repoint log file and position to last known values\n run_mysql_command(\"CHANGE MASTER TO MASTER_LOG_FILE='{}', MASTER_LOG_POS={};\"\n .format(master_log_file, master_log_pos))\n run_mysql_command(\"START SLAVE;\")\n\n with hide('everything'):\n seconds_behind_master = run(\"sudo -i mysql -e 'SHOW SLAVE STATUS\\G' | grep '^\\s*Seconds_Behind_Master:' | awk '{ print $2 }'\")\n\n # Compare as a string to ensure we got a non-nil value from MySQL\n if seconds_behind_master != '0':\n abort(\"Slave is still behind master by {} seconds; run mysql.slave_status to check status\"\n .format(seconds_behind_master))", "def test_01_factory_reset(self):\n time.sleep(_LOG_CATCH_UP_DELAY)\n start_time = datetime.datetime.now()\n\n self.device.factory_reset()\n self.assertTrue(\n self.device.connected,\n f\"{self.device.name} is offline after factory_reset() execution \"\n \"finished. factory_reset should block until the device comes back \"\n \"online and becomes responsive.\")\n self._verify_no_unexpected_reboots(start_time)", "def slaveDisconnected(slaveName):", "def test_stopped_first_master(\n event_listener,\n salt_mm_master_1,\n salt_mm_master_2,\n salt_mm_minion_1,\n salt_mm_minion_2,\n mm_master_2_salt_cli,\n):\n with salt_mm_master_1.stopped():\n start_time = time.time()\n\n _run_salt_cmds([mm_master_2_salt_cli], [salt_mm_minion_1, salt_mm_minion_2])\n\n # pylint: disable=unbalanced-tuple-unpacking\n minion_1_ret_events, minion_2_ret_events = _get_all_ret_events_after_time(\n [salt_mm_master_1, salt_mm_master_2],\n [salt_mm_minion_1, salt_mm_minion_2],\n event_listener,\n start_time,\n )\n\n # Each minion should only return to the second master\n assert len(minion_1_ret_events) == 1\n assert len(minion_2_ret_events) == 1\n assert minion_1_ret_events.pop().daemon_id == salt_mm_master_2.id\n assert minion_2_ret_events.pop().daemon_id == salt_mm_master_2.id", "def test_multihop_receiver_on_failure(vo, did_factory, replica_client, root_account, caches_mock, metrics_mock):\n receiver_thread = threading.Thread(target=receiver, kwargs={'id_': 0, 'all_vos': True, 'total_threads': 1})\n receiver_thread.start()\n\n try:\n src_rse = 'XRD1'\n src_rse_id = rse_core.get_rse_id(rse=src_rse, vo=vo)\n jump_rse = 'XRD3'\n jump_rse_id = rse_core.get_rse_id(rse=jump_rse, vo=vo)\n dst_rse = 'XRD4'\n dst_rse_id = rse_core.get_rse_id(rse=dst_rse, vo=vo)\n\n all_rses = [src_rse_id, jump_rse_id, dst_rse_id]\n\n # Register a did which doesn't exist. It will trigger a failure error during the FTS transfer.\n did = did_factory.random_file_did()\n replica_client.add_replicas(rse=src_rse, files=[{'scope': did['scope'].external, 'name': did['name'], 'bytes': 1, 'adler32': 'aaaaaaaa'}])\n\n rule_core.add_rule(dids=[did], account=root_account, copies=1, rse_expression=dst_rse, grouping='ALL', weight=None, lifetime=None, locked=False, subscription_id=None)\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=2, partition_wait_time=0, transfertype='single', filter_transfertool=None)\n\n request = __wait_for_state_transition(dst_rse_id=jump_rse_id, run_poller=False, **did)\n assert request['state'] == RequestState.FAILED\n request = __wait_for_state_transition(dst_rse_id=dst_rse_id, run_poller=False, **did)\n assert request['state'] == RequestState.FAILED\n assert 'Unused hop in multi-hop' in request['err_msg']\n\n assert metrics_mock.get_sample_value('rucio_daemons_conveyor_receiver_update_request_state_total', labels={'updated': 'True'}) >= 1\n\n # Finisher will handle transfers of the same multihop one hop at a time\n finisher(once=True, partition_wait_time=0)\n finisher(once=True, partition_wait_time=0)\n # The intermediate request must not be re-scheduled by finisher\n with pytest.raises(RequestNotFound):\n request_core.get_request_by_did(rse_id=jump_rse_id, **did)\n request = request_core.get_request_by_did(rse_id=dst_rse_id, **did)\n # ensure tha the ranking was correctly decreased for the whole path\n assert __get_source(request_id=request['id'], src_rse_id=jump_rse_id, **did).ranking == -1\n assert __get_source(request_id=request['id'], src_rse_id=src_rse_id, **did).ranking == -1\n assert request['state'] == RequestState.QUEUED\n finally:\n receiver_graceful_stop.set()\n receiver_thread.join(timeout=5)\n receiver_graceful_stop.clear()", "def test_reset_sequence_subclass_force(self):\n class SubTestObjectFactory(self.TestObjectFactory):\n pass\n\n o1 = SubTestObjectFactory()\n self.assertEqual(0, o1.one)\n\n o2 = SubTestObjectFactory()\n self.assertEqual(1, o2.one)\n\n SubTestObjectFactory.reset_sequence(force=True)\n o3 = SubTestObjectFactory()\n self.assertEqual(0, o3.one)\n\n # The master sequence counter has been reset\n o4 = self.TestObjectFactory()\n self.assertEqual(1, o4.one)", "def test_failedResetsInteraction(self):\n self.session.start_interaction()\n node = self.session.resolve(\"service1\", \"1.0\")\n self.session.fail_interaction(\"OHNO\")\n another_node = self.session.resolve(\"service2\", \"1.0\")\n self.session.finish_interaction()\n expected_failed = [self.disco.failurePolicy(node)]\n expected_succeeded = [self.disco.failurePolicy(another_node)]\n expected_nothing = list(self.disco.failurePolicy(n) for n in\n self.all_nodes if\n n.address not in [node.address, another_node.address])\n self.assertPolicyState(expected_failed, 0, 1)\n self.assertPolicyState(expected_succeeded, 1, 0)\n self.assertPolicyState(expected_nothing, 0, 0)", "def test_minion_reconnection_attempts(\n event_listener,\n salt_mm_master_1,\n salt_mm_master_2,\n salt_mm_minion_1,\n salt_mm_minion_2,\n mm_master_1_salt_cli,\n mm_master_2_salt_cli,\n caplog,\n):\n with salt_mm_master_2.stopped():\n with salt_mm_master_1.stopped():\n # Force the minion to restart\n salt_mm_minion_1.terminate()\n with caplog.at_level(logging.DEBUG):\n with pytest.raises(FactoryNotStarted):\n with salt_mm_minion_1.started(start_timeout=30):\n pass\n assert (\n \"Trying to connect to: tcp://{}:{}\".format(\n salt_mm_master_1.config[\"interface\"],\n salt_mm_master_1.config[\"ret_port\"],\n )\n in caplog.text\n )\n assert (\n \"Trying to connect to: tcp://{}:{}\".format(\n salt_mm_master_2.config[\"interface\"],\n salt_mm_master_2.config[\"ret_port\"],\n )\n in caplog.text\n )\n\n start_time = time.time()\n assert not salt_mm_minion_1.is_running()\n\n salt_mm_minion_1.start()\n\n assert salt_mm_minion_1.is_running()\n assert salt_mm_minion_2.is_running()\n\n start_events = event_listener.wait_for_events(\n [(salt_mm_master_1.id, \"salt/minion/{}/start\".format(salt_mm_minion_1.id))],\n timeout=30,\n after_time=start_time,\n )\n assert not start_events.missed\n assert len(start_events.matches) == 1\n\n start_time = time.time()\n _run_salt_cmds([mm_master_1_salt_cli], [salt_mm_minion_1, salt_mm_minion_2])\n\n # pylint: disable=unbalanced-tuple-unpacking\n minion_1_ret_events, minion_2_ret_events = _get_all_ret_events_after_time(\n [salt_mm_master_1, salt_mm_master_2],\n [salt_mm_minion_1, salt_mm_minion_2],\n event_listener,\n start_time,\n )\n\n # Each minion should only return to the first master\n assert len(minion_1_ret_events) == 1\n assert len(minion_2_ret_events) == 1\n assert minion_1_ret_events.pop().daemon_id == salt_mm_master_1.id\n assert minion_2_ret_events.pop().daemon_id == salt_mm_master_1.id\n\n start_events = event_listener.wait_for_events(\n [(salt_mm_master_2.id, \"salt/minion/{}/start\".format(salt_mm_minion_1.id))],\n timeout=30,\n after_time=start_time,\n )\n assert not start_events.missed\n assert len(start_events.matches) == 1\n\n with salt_mm_master_1.stopped():\n start_time = time.time()\n _run_salt_cmds([mm_master_2_salt_cli], [salt_mm_minion_1, salt_mm_minion_2])\n\n # pylint: disable=unbalanced-tuple-unpacking\n minion_1_ret_events, minion_2_ret_events = _get_all_ret_events_after_time(\n [salt_mm_master_1, salt_mm_master_2],\n [salt_mm_minion_1, salt_mm_minion_2],\n event_listener,\n start_time,\n )\n\n # Each minion should only return to the second master\n assert len(minion_1_ret_events) == 1\n assert len(minion_2_ret_events) == 1\n assert minion_1_ret_events.pop().daemon_id == salt_mm_master_2.id\n assert minion_2_ret_events.pop().daemon_id == salt_mm_master_2.id\n\n # Make sure minions work normally\n start_time = time.time()\n\n _run_salt_cmds(\n [mm_master_1_salt_cli, mm_master_2_salt_cli],\n [salt_mm_minion_1, salt_mm_minion_2],\n )\n\n # pylint: disable=unbalanced-tuple-unpacking\n minion_1_ret_events, minion_2_ret_events = _get_all_ret_events_after_time(\n [mm_master_1_salt_cli, mm_master_2_salt_cli],\n [salt_mm_minion_1, salt_mm_minion_2],\n event_listener,\n start_time,\n )\n\n assert len(minion_1_ret_events) == 2\n assert len(minion_2_ret_events) == 2", "def onSlaveLost(self):", "def test_minions_alive_with_no_master(\n grains,\n event_listener,\n salt_mm_failover_master_1,\n salt_mm_failover_master_2,\n salt_mm_failover_minion_1,\n salt_mm_failover_minion_2,\n):\n if grains[\"os_family\"] == \"Debian\" and grains[\"osmajorrelease\"] == 9:\n pytest.skip(\n \"Skipping on Debian 9 until flaky issues resolved. See issue #61749\"\n )\n start_time = time.time()\n with salt_mm_failover_master_1.stopped():\n with salt_mm_failover_master_2.stopped():\n # Make sure they had at least one chance to re-auth\n events = event_listener.wait_for_events(\n [\n (salt_mm_failover_minion_1.id, \"__master_disconnected\"),\n (salt_mm_failover_minion_2.id, \"__master_disconnected\"),\n ],\n timeout=salt_mm_failover_minion_1.config[\"master_alive_interval\"] * 4,\n after_time=start_time,\n )\n assert not events.missed\n assert salt_mm_failover_minion_1.is_running()\n assert salt_mm_failover_minion_2.is_running()\n\n start_time = time.time()\n\n event_patterns = [\n (\n salt_mm_failover_master_1.id,\n \"salt/minion/{}/start\".format(salt_mm_failover_minion_1.id),\n ),\n (\n salt_mm_failover_master_1.id,\n \"salt/minion/{}/start\".format(salt_mm_failover_minion_2.id),\n ),\n (\n salt_mm_failover_master_2.id,\n \"salt/minion/{}/start\".format(salt_mm_failover_minion_1.id),\n ),\n (\n salt_mm_failover_master_2.id,\n \"salt/minion/{}/start\".format(salt_mm_failover_minion_2.id),\n ),\n ]\n events = event_listener.wait_for_events(\n event_patterns,\n timeout=salt_mm_failover_minion_1.config[\"master_alive_interval\"] * 8,\n after_time=start_time,\n )\n\n assert len(events.matches) >= 2\n\n expected_tags = {\n \"salt/minion/{}/start\".format(salt_mm_failover_minion_1.id),\n \"salt/minion/{}/start\".format(salt_mm_failover_minion_2.id),\n }\n assert {event.tag for event in events} == expected_tags", "def test_minion_hangs_on_master_failure_50814(\n event_listener,\n salt_mm_master_1,\n salt_mm_master_2,\n salt_mm_minion_1,\n mm_master_2_salt_cli,\n):\n # Let's make sure everything works with both masters online\n event_count = 3\n while True:\n check_event_start_time = time.time()\n event_tag = \"myco/foo/bar/{}\".format(event_count)\n ret = mm_master_2_salt_cli.run(\n \"event.send\", event_tag, minion_tgt=salt_mm_minion_1.id\n )\n assert ret.returncode == 0\n assert ret.data is True\n # Let's make sure we get the event back\n expected_patterns = [\n (salt_mm_master_1.id, event_tag),\n (salt_mm_master_2.id, event_tag),\n ]\n matched_events = event_listener.wait_for_events(\n expected_patterns, after_time=check_event_start_time, timeout=30\n )\n assert matched_events.found_all_events, (\n \"Minion is not responding to the second master after the first one has\"\n \" gone. Check #50814 for details.\"\n )\n event_count -= 1\n if event_count <= 0:\n break\n time.sleep(0.5)\n\n def wait_for_minion(salt_cli, tgt, timeout=30):\n start = time.time()\n while True:\n ret = salt_cli.run(\n \"test.ping\", \"--timeout=5\", minion_tgt=tgt, _timeout=timeout\n )\n if ret.returncode == 0 and ret.data is True:\n break\n if time.time() - start > timeout:\n raise TimeoutError(\"Minion failed to respond top ping after timeout\")\n\n # Wait for the minion to re-connect so this test will not affect any\n # others.\n salt_mm_master_1.after_start(\n wait_for_minion, salt_mm_master_1.salt_cli(), salt_mm_minion_1.id\n )\n\n # Now, let's try this one of the masters offline\n with salt_mm_master_1.stopped():\n assert salt_mm_master_1.is_running() is False\n # Sending one event would be okay. It would hang after the second with one of the masters offline\n event_count = 1\n while True:\n check_event_start_time = time.time()\n event_tag = \"myco/foo/bar/{}\".format(event_count)\n ret = mm_master_2_salt_cli.run(\n \"event.send\", event_tag, minion_tgt=salt_mm_minion_1.id\n )\n assert ret.returncode == 0\n assert ret.data is True\n\n # Let's make sure we get the event back\n expected_patterns = [\n (salt_mm_master_2.id, event_tag),\n ]\n matched_events = event_listener.wait_for_events(\n expected_patterns, after_time=check_event_start_time, timeout=30\n )\n assert matched_events.found_all_events, (\n \"Minion is not responding to the second master(events sent: {}) after\"\n \" the first has gone offline. Check #50814 for details.\".format(\n event_count\n )\n )\n event_count += 1\n if event_count > 3:\n break\n time.sleep(0.5)", "def test_restore_peer_with_unknown_semaphore():\r\n # Restart Server without \"A\"\r\n with throttle_client(b\"[semaphores]\") as client:\r\n with pytest.raises(Exception, match=\"Unknown semaphore\"):\r\n # Bogus peer id, presumably from a previous run, before lock losts its state\r\n client.restore(\r\n peer_id=5, acquired={\"A\": 1}, expires_in=timedelta(minutes=1)\r\n )", "def test_stopped_second_master(\n event_listener,\n salt_mm_master_1,\n salt_mm_master_2,\n salt_mm_minion_1,\n salt_mm_minion_2,\n mm_master_1_salt_cli,\n):\n with salt_mm_master_2.stopped():\n start_time = time.time()\n\n _run_salt_cmds([mm_master_1_salt_cli], [salt_mm_minion_1, salt_mm_minion_2])\n\n # pylint: disable=unbalanced-tuple-unpacking\n minion_1_ret_events, minion_2_ret_events = _get_all_ret_events_after_time(\n [salt_mm_master_1, salt_mm_master_2],\n [salt_mm_minion_1, salt_mm_minion_2],\n event_listener,\n start_time,\n )\n\n # Each minion should only return to the first master\n assert len(minion_1_ret_events) == 1\n assert len(minion_2_ret_events) == 1\n assert minion_1_ret_events.pop().daemon_id == salt_mm_master_1.id\n assert minion_2_ret_events.pop().daemon_id == salt_mm_master_1.id", "def test_multihop_intermediate_replica_lifecycle(vo, did_factory, root_account, core_config_mock, caches_mock, metrics_mock):\n src_rse1_name = 'XRD1'\n src_rse1_id = rse_core.get_rse_id(rse=src_rse1_name, vo=vo)\n src_rse2_name = 'XRD2'\n src_rse2_id = rse_core.get_rse_id(rse=src_rse2_name, vo=vo)\n jump_rse_name = 'XRD3'\n jump_rse_id = rse_core.get_rse_id(rse=jump_rse_name, vo=vo)\n dst_rse_name = 'XRD4'\n dst_rse_id = rse_core.get_rse_id(rse=dst_rse_name, vo=vo)\n\n all_rses = [src_rse1_id, src_rse2_id, jump_rse_id, dst_rse_id]\n did = did_factory.upload_test_file(src_rse1_name)\n\n # Copy replica to a second source. To avoid the special case of having a unique last replica, which could be handled in a special (more careful) way\n rule_core.add_rule(dids=[did], account=root_account, copies=1, rse_expression=src_rse2_name, grouping='ALL', weight=None, lifetime=3600, locked=False, subscription_id=None)\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], partition_wait_time=0, transfertype='single', filter_transfertool=None)\n replica = __wait_for_replica_transfer(dst_rse_id=src_rse2_id, **did)\n assert replica['state'] == ReplicaState.AVAILABLE\n\n rse_core.set_rse_limits(rse_id=jump_rse_id, name='MinFreeSpace', value=1)\n rse_core.set_rse_usage(rse_id=jump_rse_id, source='storage', used=1, free=0)\n try:\n rule_core.add_rule(dids=[did], account=root_account, copies=1, rse_expression=dst_rse_name, grouping='ALL', weight=None, lifetime=3600, locked=False, subscription_id=None)\n\n # Submit transfers to FTS\n # Ensure a replica was created on the intermediary host with epoch tombstone\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], partition_wait_time=0, transfertype='single', filter_transfertool=None)\n request = request_core.get_request_by_did(rse_id=jump_rse_id, **did)\n assert request['state'] == RequestState.SUBMITTED\n replica = replica_core.get_replica(rse_id=jump_rse_id, **did)\n assert replica['tombstone'] == datetime(year=1970, month=1, day=1)\n assert replica['state'] == ReplicaState.COPYING\n\n request = request_core.get_request_by_did(rse_id=dst_rse_id, **did)\n # Fake an existing unused source with raking of 0 for the second source.\n # The ranking of this source should remain at 0 till the end.\n\n @transactional_session\n def __fake_source_ranking(*, session=None):\n models.Source(request_id=request['id'],\n scope=request['scope'],\n name=request['name'],\n rse_id=src_rse2_id,\n dest_rse_id=request['dest_rse_id'],\n ranking=0,\n bytes=request['bytes'],\n url=None,\n is_using=False). \\\n save(session=session, flush=False)\n\n __fake_source_ranking()\n\n # The intermediate replica is protected by its state (Copying)\n rucio.daemons.reaper.reaper.REGION.invalidate()\n reaper(once=True, rses=[], include_rses=jump_rse_name, exclude_rses=None)\n replica = replica_core.get_replica(rse_id=jump_rse_id, **did)\n assert replica['state'] == ReplicaState.COPYING\n\n # Wait for the intermediate replica to become ready\n replica = __wait_for_replica_transfer(dst_rse_id=jump_rse_id, **did)\n assert replica['state'] == ReplicaState.AVAILABLE\n\n # ensure tha the ranking was correct for all sources and intermediate rses\n assert __get_source(request_id=request['id'], src_rse_id=src_rse1_id, **did).ranking == 0\n assert __get_source(request_id=request['id'], src_rse_id=jump_rse_id, **did).ranking == 0\n assert __get_source(request_id=request['id'], src_rse_id=src_rse2_id, **did).ranking == 0\n # Only group_bulk=1 part of the path was submitted.\n # run submitter again to copy from jump rse to destination rse\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], partition_wait_time=0, transfertype='single', filter_transfertool=None)\n\n # Wait for the destination replica to become ready\n replica = __wait_for_replica_transfer(dst_rse_id=dst_rse_id, **did)\n assert replica['state'] == ReplicaState.AVAILABLE\n\n rucio.daemons.reaper.reaper.REGION.invalidate()\n reaper(once=True, rses=[], include_rses='test_container_xrd=True', exclude_rses=None)\n\n with pytest.raises(ReplicaNotFound):\n replica_core.get_replica(rse_id=jump_rse_id, **did)\n\n # 3 request: copy to second source + 2 hops (each separately)\n # Use inequalities, because there can be left-overs from other tests\n assert metrics_mock.get_sample_value('rucio_daemons_conveyor_poller_update_request_state_total', labels={'updated': 'True'}) >= 3\n assert metrics_mock.get_sample_value('rucio_daemons_conveyor_common_submit_transfer_total') >= 3\n # at least the failed hop\n assert metrics_mock.get_sample_value('rucio_daemons_conveyor_finisher_handle_requests_total') > 0\n finally:\n\n @transactional_session\n def _cleanup_all_usage_and_limits(rse_id, *, session=None):\n session.query(models.RSELimit).filter_by(rse_id=rse_id).delete()\n session.query(models.RSEUsage).filter_by(rse_id=rse_id, source='storage').delete()\n\n _cleanup_all_usage_and_limits(rse_id=jump_rse_id)", "def test_rng_invalid_value(self):\n with pytest.raises(ValueError) as exc:\n check_random_state(\"oh_no_oh_no\")\n\n assert \"'oh_no_oh_no' cannot be used to seed\" in str(exc.value)", "def test_master(busname):\n check_master_not_running()\n\n process = subprocess.Popen(['rebus_master', busname],\n stderr=subprocess.PIPE, bufsize=0)\n # wait for master bus to be ready\n # TODO look into race condition. Another SIGINT handler?\n time.sleep(2)\n output = process.stderr.read(1)\n process.send_signal(signal.SIGINT)\n process.wait()\n assert process.returncode == 0, output + process.stderr.read()", "def test_reset_confirmation_failure(self):\n self._create_program_and_course_enrollment(self.program_uuid, self.user)\n\n with pytest.raises(CommandError):\n with self._replace_stdin('no'):\n call_command(self.command, self.program_uuid)\n\n self._validate_enrollments_count(1)", "def test_wrong_mode(self):\n self.assertRaises(ComponentErrorsEx, self.dp.setRewindingMode, 'FOO')", "def test_raises(self):\n no_replicates = 25\n try:\n replicate(experiment3, no_replicates)\n except RuntimeError as err:\n self.assertEqual(err, FAKE_ERROR)\n else:\n assert False", "def test_reparent_doesnt_hang_if_master_fails(self):\n utils.run_vtctl(['CreateKeyspace', 'test_keyspace'])\n\n # create the database so vttablets start, as they are serving\n tablet_62344.create_db('vt_test_keyspace')\n tablet_62044.create_db('vt_test_keyspace')\n tablet_41983.create_db('vt_test_keyspace')\n tablet_31981.create_db('vt_test_keyspace')\n\n # Start up vttablet\n for t in [tablet_62344, tablet_62044, tablet_31981, tablet_41983]:\n t.init_tablet('replica', 'test_keyspace', '0', start=True,\n wait_for_start=False)\n\n # wait for all tablets to start\n for t in [tablet_62344, tablet_62044, tablet_31981, tablet_41983]:\n t.wait_for_vttablet_state('NOT_SERVING')\n\n # Force the slaves to reparent. Will create the _vt.reparent_journal table.\n utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/0',\n tablet_62344.tablet_alias])\n utils.validate_topology(ping_tablets=True)\n\n # Change the schema of the _vt.reparent_journal table, so that\n # inserts into it will fail. That will make the master fail.\n tablet_62344.mquery('_vt', 'ALTER TABLE reparent_journal'\n ' DROP COLUMN replication_position')\n\n # Perform a planned reparent operation, the master will fail the\n # insert. The slaves should then abort right away. If this fails,\n # the test will timeout.\n _, stderr = utils.run_vtctl(['-wait-time', '3600s',\n 'PlannedReparentShard',\n '-keyspace_shard', 'test_keyspace/0',\n '-new_master', tablet_62044.tablet_alias],\n expect_fail=True)\n self.assertIn('master failed to PopulateReparentJournal, canceling slaves',\n stderr)\n\n # Clean up the tablets.\n tablet.kill_tablets([tablet_62344, tablet_62044, tablet_41983,\n tablet_31981])", "def test_failover_to_second_master(\n event_listener,\n salt_mm_failover_master_1,\n salt_mm_failover_master_2,\n salt_mm_failover_minion_1,\n salt_mm_failover_minion_2,\n mm_failover_master_1_salt_cli,\n mm_failover_master_2_salt_cli,\n run_salt_cmds,\n):\n event_patterns = [\n (\n salt_mm_failover_master_2.id,\n \"salt/minion/{}/start\".format(salt_mm_failover_minion_1.id),\n )\n ]\n\n start_time = time.time()\n with salt_mm_failover_master_1.stopped():\n assert salt_mm_failover_master_2.is_running()\n # We need to wait for them to realize that the master is not alive\n # At this point, only the first minion will need to change masters\n events = event_listener.wait_for_events(\n event_patterns,\n timeout=salt_mm_failover_minion_1.config[\"master_alive_interval\"] * 4,\n after_time=start_time,\n )\n\n assert salt_mm_failover_minion_1.is_running()\n assert not events.missed\n\n returns = run_salt_cmds(\n [mm_failover_master_1_salt_cli, mm_failover_master_2_salt_cli],\n [salt_mm_failover_minion_1, salt_mm_failover_minion_2],\n )\n\n assert len(returns) == 2\n assert (mm_failover_master_2_salt_cli, salt_mm_failover_minion_1) in returns\n assert (mm_failover_master_2_salt_cli, salt_mm_failover_minion_2) in returns", "def test_reset_unmask(self):\r\n module = CapaFactory.create(xml=self.common_shuffle_xml)\r\n get_request_dict = {CapaFactory.input_key(): 'mask_0'}\r\n module.check_problem(get_request_dict)\r\n # On reset, 'old_state' should use unmasked names\r\n with patch.object(module.runtime, 'track_function') as mock_track_function:\r\n module.reset_problem(None)\r\n mock_call = mock_track_function.mock_calls[0]\r\n event_info = mock_call[1][1]\r\n self.assertEquals(mock_call[1][0], 'reset_problem')\r\n self.assertEquals(event_info['old_state']['student_answers'][CapaFactory.answer_key()], 'choice_2')\r\n self.assertIsNotNone(event_info['permutation'][CapaFactory.answer_key()])", "def test_rejoin_after_error(self):\n client = self.mock_client([])\n coord = self.make_coordinator(client)\n coord.on_group_leave = Mock()\n\n def check(rejoin_needed, exc):\n coord._rejoin_needed = False\n coord._rejoin_wait_dc = None\n for call in client.reactor.getDelayedCalls():\n call.cancel()\n client.reset_consumer_group_metadata.reset_mock()\n coord.on_group_leave.reset_mock()\n\n coord.rejoin_after_error(Failure(exc))\n if rejoin_needed:\n self.assertEqual(coord._rejoin_needed, True)\n assert_delayed_calls(1, client)\n else:\n self.assertEqual(coord._rejoin_needed, False)\n assert_delayed_calls(0, client)\n self.assertEqual(coord._rejoin_wait_dc, None)\n\n check(True, RebalanceInProgress())\n check(True, CoordinatorNotAvailable())\n client.reset_consumer_group_metadata.assert_any_call(coord.group_id)\n check(True, IllegalGeneration())\n coord.on_group_leave.assert_any_call()\n check(True, InvalidGroupId())\n coord.on_group_leave.assert_any_call()\n check(True, InconsistentGroupProtocol())\n check(True, RequestTimedOutError())\n coord.on_group_leave.assert_any_call()\n check(True, UnknownError())\n\n coord._stopping = True\n check(False, defer.CancelledError())\n coord._stopping = False\n\n start_d = coord.start()\n start_d.addErrback(lambda f: None)\n check(False, ValueError())\n coord.on_group_leave.assert_any_call()\n self.successResultOf(start_d)", "def slaveof(self, *args, **kwargs) -> NoReturn:\n raise RedisClusterException(\"SLAVEOF is not supported in cluster mode\")", "def test_fts_non_recoverable_failures_handled_on_multihop(vo, did_factory, root_account, replica_client, caches_mock, metrics_mock):\n src_rse = 'XRD1'\n src_rse_id = rse_core.get_rse_id(rse=src_rse, vo=vo)\n jump_rse = 'XRD3'\n jump_rse_id = rse_core.get_rse_id(rse=jump_rse, vo=vo)\n dst_rse = 'XRD4'\n dst_rse_id = rse_core.get_rse_id(rse=dst_rse, vo=vo)\n\n all_rses = [src_rse_id, jump_rse_id, dst_rse_id]\n\n # Register a did which doesn't exist. It will trigger an non-recoverable error during the FTS transfer.\n did = did_factory.random_file_did()\n replica_client.add_replicas(rse=src_rse, files=[{'scope': did['scope'].external, 'name': did['name'], 'bytes': 1, 'adler32': 'aaaaaaaa'}])\n\n rule_core.add_rule(dids=[did], account=root_account, copies=1, rse_expression=dst_rse, grouping='ALL', weight=None, lifetime=None, locked=False, subscription_id=None)\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=2, partition_wait_time=0, transfertype='single', filter_transfertool=None)\n\n request = __wait_for_state_transition(dst_rse_id=dst_rse_id, **did)\n assert 'Unused hop in multi-hop' in request['err_msg']\n assert request['state'] == RequestState.FAILED\n request = request_core.get_request_by_did(rse_id=jump_rse_id, **did)\n assert request['state'] == RequestState.FAILED\n assert request['attributes']['source_replica_expression'] == src_rse\n\n # Each hop is a separate transfer, which will be handled by the poller and marked as failed\n assert metrics_mock.get_sample_value('rucio_daemons_conveyor_poller_update_request_state_total', labels={'updated': 'True'}) >= 2\n\n # Finisher will handle transfers of the same multihop one hop at a time\n finisher(once=True, partition_wait_time=0)\n finisher(once=True, partition_wait_time=0)\n # The intermediate request must not be re-scheduled by finisher\n with pytest.raises(RequestNotFound):\n request_core.get_request_by_did(rse_id=jump_rse_id, **did)\n request = request_core.get_request_by_did(rse_id=dst_rse_id, **did)\n # ensure tha the ranking was correctly decreased for the whole path\n assert __get_source(request_id=request['id'], src_rse_id=jump_rse_id, **did).ranking == -1\n assert __get_source(request_id=request['id'], src_rse_id=src_rse_id, **did).ranking == -1\n assert request['state'] == RequestState.QUEUED", "def test_ESME_RREPLACEFAIL(self):\n yield self.connect('127.0.0.1', self.pbPort)\n\n yield self.add(self.defaultConfig)\n yield self.start(self.defaultConfig.id)\n\n # Wait for 'BOUND_TRX' state\n yield waitFor(2)\n\n # Send submit_sm\n SentSubmitSmPDU = copy.copy(self.SubmitSmPDU)\n SentSubmitSmPDU.params['short_message'] = 'test_error: ESME_RREPLACEFAIL'\n msgid = yield self.submit_sm(self.defaultConfig.id, self.SubmitSmPDU, self.SubmitSmBill.user.uid)\n\n # Wait\n yield waitFor(70)\n\n yield self.stop(self.defaultConfig.id)\n\n # Wait for unbound state\n yield waitFor(2)\n\n # Assertions\n receivedSubmits = self.SMSCPort.factory.lastClient.submitRecords\n # By default, ESME_RREPLACEFAIL is not retried !\n self.assertEqual(len(receivedSubmits), 1)", "def test_finishedResetsInteraction(self):\n self.session.start_interaction()\n node = self.session.resolve(\"service1\", \"1.0\")\n self.session.fail_interaction(\"OHNO\")\n self.session.finish_interaction()\n\n self.session.start_interaction()\n # Resolve same node again:\n while True:\n another_node = self.session.resolve(\"service1\", \"1.0\")\n if node.address == another_node.address:\n break\n self.session.finish_interaction()\n\n self.assertPolicyState([self.disco.failurePolicy(node)], 1, 1)", "def test_shutdown(self):\n server = self._server(None)\n server.bio_shutdown()\n with pytest.raises(Error) as err:\n server.recv(1024)\n # We don't want WantReadError or ZeroReturnError or anything - it's a\n # handshake failure.\n assert type(err.value) in [Error, SysCallError]", "def master(timeout=5): # will only wait 5 seconds for slave to respond\n # set address of RX node into a TX pipe\n nrf.open_tx_pipe(address)\n # ensures the nRF24L01 is in TX mode\n nrf.listen = 0\n\n # on data sent test\n print(\"Pinging: enslaved nRF24L01 without auto_ack\")\n nrf.write(b'ping')\n time.sleep(0.00001) # mandatory 10 microsecond pulse starts transmission\n nrf.ce_pin.value = 0 # end 10 us pulse; now in active TX\n while not nrf.irq_ds and not nrf.irq_df:\n nrf.update() # updates the current status on IRQ flags\n if nrf.irq_ds and not irq.value:\n print('interrupt on data sent successful')\n else:\n print(\n 'IRQ on data sent is not active, check your wiring and call interrupt_config()')\n nrf.clear_status_flags() # clear all flags for next test\n\n # on data ready test\n nrf.listen = 1\n nrf.open_rx_pipe(0, address)\n start = time.monotonic()\n while not nrf.any() and time.monotonic() - start < timeout: # wait for slave to send\n pass\n if nrf.any():\n print('Pong received')\n if nrf.irq_dr and not irq.value:\n print('interrupt on data ready successful')\n else:\n print(\n 'IRQ on data ready is not active, check your wiring and call interrupt_config()')\n nrf.flush_rx()\n else:\n print('pong reception timed out!. make sure to run slave() on the other nRF24L01')\n nrf.clear_status_flags() # clear all flags for next test\n\n # on data fail test\n nrf.listen = False # put the nRF24L01 is in TX mode\n # the writing pipe should still be open since we didn't call close_tx_pipe()\n nrf.flush_tx() # just in case the previous \"on data sent\" test failed\n nrf.write(b'dummy') # slave isn't listening anymore\n time.sleep(0.00001) # mandatory 10 microsecond pulse starts transmission\n nrf.ce_pin.value = 0 # end 10 us pulse; now in active TX\n while not nrf.irq_ds and not nrf.irq_df: # these attributes don't update themselves\n nrf.update() # updates the current status on all IRQ flags (irq_dr, irq_df, irq_ds)\n if nrf.irq_df and not irq.value:\n print('interrupt on data fail successful')\n else:\n print(\n 'IRQ on data fail is not active, check your wiring and call interrupt_config()')\n nrf.clear_status_flags() # clear all flags for next test", "def test_recheck_fails(self):\n raise NotImplementedError" ]
[ "0.6471116", "0.61706674", "0.60744417", "0.58116317", "0.5781919", "0.57753354", "0.57563055", "0.5733071", "0.57308346", "0.56739676", "0.56533957", "0.565217", "0.5629395", "0.560299", "0.55999875", "0.5592068", "0.5589435", "0.55867213", "0.5539783", "0.5514928", "0.5500118", "0.5495716", "0.5477969", "0.5465307", "0.54495925", "0.5427659", "0.5425135", "0.5424967", "0.5422586", "0.5402643" ]
0.6292787
1
Tests that reset_sequence(force=True) works.
def test_reset_sequence_subclass_force(self): class SubTestObjectFactory(self.TestObjectFactory): pass o1 = SubTestObjectFactory() self.assertEqual(0, o1.one) o2 = SubTestObjectFactory() self.assertEqual(1, o2.one) SubTestObjectFactory.reset_sequence(force=True) o3 = SubTestObjectFactory() self.assertEqual(0, o3.one) # The master sequence counter has been reset o4 = self.TestObjectFactory() self.assertEqual(1, o4.one)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_reset(self):\n ran = []\n def foo():\n ran.append(None)\n\n c = task.Clock()\n lc = TestableLoopingCall(c, foo)\n lc.start(2, now=False)\n c.advance(1)\n lc.reset()\n c.advance(1)\n self.assertEqual(ran, [])\n c.advance(1)\n self.assertEqual(ran, [None])", "def test_sequence(self):\n seq_name = 'test_seq'\n\n with self.dbh.sequence_recreate(seq_name):\n try:\n self.assertEqual(self.dbh.get_seq_next_value(seq_name), 1)\n self.assertEqual(self.dbh.get_seq_next_value(seq_name), 2)\n self.assertEqual(self.dbh.get_seq_next_value(seq_name), 3)\n except Exception:\n self.dbh.rollback()\n raise", "def testReset(self):\n \n clk = Signal(0)\n rst = Signal(1)\n clock_gen = ClkDriver(clk, period=4)\n \n out = Signal(intbv(0)[4:])\n counter = Counter(out, clk, rst)\n \n def test():\n for i in range(200):\n # count up to 9 then reset\n if int(out) == 9:\n rst.next = 0\n yield delay(1)\n self.assertEqual(int(out), 0)\n # turn off reset next time\n else:\n rst.next = 1\n yield delay(1)\n \n check = test()\n sim = Simulation(counter, clock_gen, check)\n sim.run(400, quiet=1)", "def hard_reset() -> NoReturn:", "def soft_reset() -> None:\n ...", "def test_reset(sim):\n repeats = 3\n dt = 1\n sim.setup(timestep=dt, min_delay=dt)\n p = sim.Population(1, sim.IF_curr_exp(i_offset=0.1))\n p.record('v')\n\n for i in range(repeats):\n sim.run(10.0)\n sim.reset()\n data = p.get_data(clear=False)\n sim.end()\n\n assert len(data.segments) == repeats\n for segment in data.segments[1:]:\n assert_array_almost_equal(segment.analogsignals[0],\n data.segments[0].analogsignals[0], 10)", "def soft_reset():", "def reset(self):\n raise AssertionError(\"Reset function not implemented\")", "def test_reset_deterministic(self):\n # For statevector output we can combine deterministic and non-deterministic\n # count output circuits\n shots = 100\n circuits = ref_reset.reset_circuits_deterministic(final_measure=True)\n targets = ref_reset.reset_counts_deterministic(shots)\n job = execute(circuits, QasmSimulator(), shots=shots)\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0)", "def test_reset(self, scml_system):\n scml_system._t = 12\n scml_system._k = 33\n state_space = scml_system.state_space\n state_positions = scml_system.state_positions\n initial_state = scml_system.reset()\n target = np.array([0, 0, 0, 0, 0, 0, 560]) / scml_system.limits\n assert np.all(initial_state == target), 'Initial states of the system are incorrect'\n assert scml_system._t == 0, 'Time of the system was not set to zero after reset'\n assert scml_system._k == 0, 'Episode step of the system was not set to zero after reset'\n assert scml_system.converter.reset_counter == scml_system.electrical_motor.reset_counter \\\n == scml_system.mechanical_load.reset_counter == scml_system.supply.reset_counter,\\\n 'The reset was not passed to all components of the SCMLSystem'\n assert scml_system._ode_solver.t == 0, 'The ode solver was not reset correctly'\n assert all(scml_system._ode_solver.y == np.zeros_like(\n scml_system.mechanical_load.state_names + scml_system.electrical_motor.CURRENTS, dtype=float\n )), ' The ode solver was not reset correctly'", "def reset(self):\n print('call reset()')\n self.cur = 0\n if self.shuffle:\n random.shuffle(self.seq)", "def reset() -> None:\n ...", "def reset():\n return True", "def ClearSequence(self):\r\n r = CALL('ClearSequence',self)\r\n return self.CheckForSuccessError(r)", "def testSeq(self, mock_gs):\n self.mr._sequences = ['apple', 'banana']\n\n self.assertEqual(\n 'apple',\n self.mr.seq\n )\n\n mock_gs.assert_called_once_with()\n mock_gs.reset_mock()\n\n self.mr._is_seq = False\n\n self.assertEqual(\n None,\n self.mr.seq\n )\n\n # Test that we pulled from the cache\n self.assertFalse(\n mock_gs.called\n )", "def test_reset_nondeterministic(self):\n # For statevector output we can combine deterministic and non-deterministic\n # count output circuits\n shots = 2000\n circuits = ref_reset.reset_circuits_nondeterministic(final_measure=True)\n targets = ref_reset.reset_counts_nondeterministic(shots)\n job = execute(circuits, QasmSimulator(), shots=shots)\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0.05 * shots)", "def reset():\n pass", "def reset():\n pass", "def test_reset(self):\n\t\tfor AI in self.list_of_AIs:\n\t\t\tAI.reset()", "def sequence_reset(model):\n pk_field, table = pk_sequence_get(model)\n if not pk_field:\n return\n\n if connection.vendor == 'postgresql':\n reset = \"\"\"\n SELECT\n setval(\n pg_get_serial_sequence('{table}', '{column}'),\n coalesce(max({column}),0) + 1,\n false\n )\n FROM {table}\n \"\"\"\n elif connection.vendor == 'sqlite':\n reset = \"\"\"\n UPDATE sqlite_sequence\n SET seq=(SELECT max({column}) from {table})\n WHERE name='{table}'\n \"\"\"\n elif connection.vendor == 'mysql':\n cursor = connection.cursor()\n cursor.execute(\n 'SELECT MAX({column}) + 1 FROM {table}'.format(\n column=pk_field, table=table\n )\n )\n result = cursor.fetchone()[0] or 0\n reset = 'ALTER TABLE {table} AUTO_INCREMENT = %s' % result\n\n connection.cursor().execute(\n reset.format(column=pk_field, table=table)\n )", "def test_reset_reset(self):\n check_attr(self.o, 'reset')\n self.o.reset()\n self.subtest_someAgents(self.o, 2, 10)\n _0 = self.patch_agent_reset(self.o)\n # Démarrage des patches et stockage des mocks\n _1 = [_.start() for _ in _0 ]\n self.assertEqual(sum([_.call_count for _ in _1]), 0)\n self.o.reset()\n self.assertEqual(sum([_.call_count for _ in _1]), len(_0), \"individual calls expected\")\n # Fin du patching\n # for _ in _0 : _.stop()\n hum.patch.stopall()", "def resetSequence(self, **kwargs):\n self.actionSequence = []\n self.baxter.mm.changeMenuTitle(\"Current sequence: %s \" % str(self.actionSequence))", "def _reset(self) -> ts.TimeStep:", "def reset():", "def reset():", "def reset():", "def test_reset(self):\n p1 = self.player()\n p1.reset()\n self.assertEqual(p1.history, [])\n self.assertEqual(p1.genome[0], C)", "def reset(self):\n raise NotImplementedError", "def test_reset_with_clear(sim):\n repeats = 3\n dt = 1\n sim.setup(timestep=dt, min_delay=dt)\n p = sim.Population(1, sim.IF_curr_exp(i_offset=0.1))\n p.record('v')\n\n data = []\n for i in range(repeats):\n sim.run(10.0)\n data.append(p.get_data(clear=True))\n sim.reset()\n\n sim.end()\n\n for rec in data:\n assert len(rec.segments) == 1\n assert_arrays_almost_equal(rec.segments[0].analogsignals[0],\n data[0].segments[0].analogsignals[0], 1e-11)", "def testSeqs(self, mock_gs):\n self.mr._sequences = ['apple', 'banana']\n\n self.assertEqual(\n ['apple', 'banana'],\n self.mr.seqs\n )\n\n mock_gs.assert_called_once_with()\n mock_gs.reset_mock()\n\n self.mr._is_seq = False\n\n self.assertEqual(\n [],\n self.mr.seqs\n )\n\n # Test that we pulled from the cache\n self.assertFalse(\n mock_gs.called\n )" ]
[ "0.6554954", "0.6521152", "0.64447737", "0.6432272", "0.6377116", "0.63428116", "0.6265255", "0.62649095", "0.62627584", "0.62083524", "0.6208312", "0.6206979", "0.6185866", "0.6146366", "0.6129778", "0.6064196", "0.58949924", "0.58949924", "0.5858686", "0.58486927", "0.58481926", "0.57995427", "0.5786953", "0.57422495", "0.57422495", "0.57422495", "0.5738277", "0.57372963", "0.57364774", "0.5729599" ]
0.7042529
0
Returns the filtered queryset based on the value provided in the query string and retrievable via `self.value()`.
def queryset(self, request, queryset): # Compare the requested value (either '80s' or 'other') # to decide how to filter the queryset. if self.value() is None: return queryset.all() return queryset.filter(firm__pk=self.value())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def queryset(self, request, queryset):\n # Compare the requested value (either '80s' or '90s')\n # to decide how to filter the queryset.\n if self.value():\n return queryset.filter(state_pol=self.value())", "def queryset(self, request, queryset):\n if self.value() is None:\n return queryset\n return queryset.filter(data__qg_location__0__country__icontains=self.value())", "def queryset(self, request, queryset):\r\n # Compare the requested value to decide how to filter the queryset.\r\n if self.value():\r\n return queryset.filter(parent_id=self.value())\r\n return queryset", "def get_queryset(self):\n qs = super().get_queryset()\n search_value = self.request.GET.get('search_box')\n\n if search_value is not None:\n qs = qs.search_by(search_value)\n\n return qs", "def queryset(self, request, queryset):\n\n return (\n queryset if self.value() is None\n else queryset.filter(instrument__id=self.value())\n )", "def queryset(self, request, queryset):\n\n print(\"queryset: self.value() = {} with type = {}\".format(self.value(), type(self.value())))\n\n filtered_pub_ids = []\n\n for pub in Publication.objects.all():\n print(\"queryset: pub.book_title_short.lower() = {} self.value() = {}\".format(pub.book_title_short.lower(), self.value()))\n if self.value() is None:\n filtered_pub_ids.append(pub.id)\n elif pub.book_title_short != None and self.value().lower() in pub.book_title_short.lower():\n filtered_pub_ids.append(pub.id)\n\n return queryset.filter(id__in=filtered_pub_ids)", "def queryset(self, request, queryset):\n\n print(\"queryset: self.value() = {} with type = {}\".format(self.value(), type(self.value())))\n\n filtered_person_ids = []\n for person in Person.objects.all():\n cur_position = person.get_latest_position()\n if self.value() is None:\n filtered_person_ids.append(person.id)\n elif cur_position is not None and cur_position.title == self.value() or \\\n cur_position is None and self.value() == Position.UNKNOWN:\n filtered_person_ids.append(person.id)\n\n return queryset.filter(id__in=filtered_person_ids)", "def queryset(self, request, queryset):\n if self.value() == \"animals\":\n return queryset.animals()\n if self.value() == \"webelos\":\n return queryset.webelos()", "def query(self, queryString, value):\n return", "def queryset(self, request, queryset):\n\n print(\"queryset: self.value() = {} with type = {}\".format(self.value(), type(self.value())))\n\n filtered_pub_ids = []\n for pub in Publication.objects.all():\n if self.value() is None:\n filtered_pub_ids.append(pub.id)\n elif pub.pub_venue_type == self.value():\n filtered_pub_ids.append(pub.id)\n\n return queryset.filter(id__in=filtered_pub_ids)", "def filter_queryset(self, queryset):\n for name, value in self.form.cleaned_data.items():\n queryset = self.filters[name].filter(queryset, value)\n # assert isinstance(queryset, models.QuerySet), \\\n # \"Expected '%s.%s' to return a QuerySet, but got a %s instead.\" \\\n # % (type(self).__name__, name, type(queryset).__name__)\n return queryset", "def queryset(self, request, queryset):\n\n print(\"queryset: self.value() = {} with type = {}\".format(self.value(), type(self.value())))\n\n # Compare the requested value (either True or False)\n # to decide how to filter the queryset.\n\n filtered_person_ids = []\n for person in Person.objects.all():\n if person.is_current_member() is True:\n print(\"{} is_current_member(): {} | self.value(): {} | equals? {} | type(member): {} | type(self.value): {}\".format(person.get_full_name(),\n person.is_current_member(), self.value(),\n person.is_current_member() is self.value(),\n type(person.is_current_member()),\n type(self.value())))\n if person.is_current_member() is True and self.value() is None:\n filtered_person_ids.append(person.id)\n elif person.is_alumni_member() is True and person.is_current_member() is False and self.value() == \"past_member\":\n filtered_person_ids.append(person.id)\n elif person.is_current_collaborator() is True and self.value() == \"current_collaborator\":\n filtered_person_ids.append(person.id)\n elif person.is_past_collaborator() is True and self.value() == \"past_collaborator\":\n filtered_person_ids.append(person.id)\n elif person.is_current_member() is False and person.is_alumni_member() is False and\\\n person.is_current_collaborator() is False and person.is_past_collaborator() is False and\\\n self.value() == \"other\":\n filtered_person_ids.append(person.id)\n elif self.value() == \"all\":\n filtered_person_ids.append(person.id)\n\n return queryset.filter(id__in = filtered_person_ids)", "def queryset(self, request, queryset):\n if self.value() == '1':\n return queryset.exclude(moyen_id=24)\n if self.value() == '0':\n return queryset.filter(moyen_id=24)", "def queryset(self, request, queryset):\n # Compare the requested value (either '80s' or '90s')\n # to decide how to filter the queryset.\n\n if self.value() == 'passed':\n return queryset.filter(renewal_date__lt=datetime.datetime.now())\n if self.value() == 'today':\n return queryset.filter(renewal_date__gte=datetime.datetime.now(),\n renewal_date__lte=datetime.datetime.now() + datetime.timedelta(days=1))\n if self.value() == 'week':\n return queryset.filter(renewal_date__gte=datetime.datetime.now() + datetime.timedelta(days=1),\n renewal_date__lte=datetime.datetime.now() + datetime.timedelta(days=7))\n if self.value() == 'month':\n return queryset.filter(renewal_date__gte=datetime.datetime.now() + datetime.timedelta(days=7),\n renewal_date__lte=datetime.datetime.now() + datetime.timedelta(days=31))", "def queryset(self, request, queryset):\n\n # TODO: our current credentials logic in merchants/models is confusing,\n # this may change when that improves\n if self.value() == 'none':\n return queryset.filter(basecredential=None)\n if self.value() == 'some':\n return queryset.filter(basecredential__isnull=False).distinct()\n if self.value() == 'some_valid':\n return queryset.filter(basecredential__isnull=False, basecredential__last_failed_at=None).distinct()\n if self.value() == 'some_invalid':\n return queryset.filter(basecredential__isnull=False, basecredential__last_failed_at__isnull=False).distinct()", "def query(self, value):\n \n self._query = str(value) if value else None", "def filter_query(self, request, query, view):\n\n if not request.params:\n return query\n\n querystring_params = self.parse_query_string(request.params)\n query, filter_list = self.build_filter_list(querystring_params, query, view)\n\n return self.apply_filter(query, filter_list)", "def get_query(self, q, request):\r\n \r\n return self.model.objects.filter(filter__icontains=q).order_by('filter')[:50]", "def get_queryset(self):\n\n # Get the keyword URL parameter value. Return empty string if the keyword is empty.\n # Filter the queryset based on the value of keyword and the queryset object's title.\n keyword = self.request.query_params.get('keyword', '')\n queryset = self.queryset.filter(title__icontains=keyword)\n\n if isinstance(queryset, QuerySet):\n # Ensure queryset is re-evaluated on each request.\n queryset = queryset.all()\n\n #return queryset\n return queryset.order_by('-first_published_at')", "def filter_status(self, queryset, name, value):\n\n return queryset.filter(status=value)", "def queryset(self, request, queryset):\n # Compare the requested value (either '80s' or '90s')\n # to decide how to filter the queryset.\n if self.value() == 'yes':\n return queryset.exclude(image__exact='')\n if self.value() == 'no':\n return queryset.filter(image__exact='')", "def filter(self, request, queryset): # NOQA: A003\n pro = request.GET.get(\"production\")\n\n if pro:\n queryset = queryset.filter(production=pro)\n\n queryset = queryset.prefetch_related(\"production\")\n\n return queryset", "def _filter(self, _model, **kwargs):\n return _model.objects.filter(**kwargs)", "def get_queryset(self):\n queryset = Food.objects.all()\n name = self.request.query_params.get('name', None)\n ndb_no = self.request.query_params.get('ndb_no', None)\n if name is not None:\n queryset = queryset.filter(name=name)\n elif ndb_no is not None:\n queryset = queryset.filter(ndb_no=ndb_no)\n return queryset", "def _custom_filter(self, query):\r\n return query", "def __get__(self, model_instance, model_class):\r\n if model_instance is not None:\r\n query = Query(self.__model)\r\n if type(self.__property) == list:\r\n props = []\r\n for prop in self.__property:\r\n props.append(\"%s =\" % prop)\r\n return query.filter(props, model_instance)\r\n else:\r\n return query.filter(self.__property + ' =', model_instance)\r\n else:\r\n return self", "def queryset(self, request, queryset):\n v = self.value()\n if v == \"sent\":\n return queryset.filter(sent__isnull=False)\n elif v == \"unsent\":\n return queryset.filter(sent__isnull=True)\n return queryset", "def filter_by_query_params(self, request):\n items = self\n project = request.GET.get('project', None)\n customer = request.GET.get('customer', None)\n company = request.GET.get('company', None)\n engineer = request.GET.get('engineer', None)\n q = request.GET.get('q', None)\n sort_by = request.GET.get('sort_by', None)\n str = request.GET.get('str', None)\n\n if project:\n items = items.filter(project=project).distinct()\n if engineer:\n items = items.filter(lead__sales_engineer=engineer).distinct()\n if customer:\n items = items.filter(lead__customer=customer).distinct()\n if company:\n items = items.filter(company=company).distinct()\n # sort\n if q == 'asc' and sort_by:\n items = items.order_by(sort_by).distinct()\n\n if q == 'des' and sort_by:\n items = items.order_by('-' + sort_by).distinct()\n\n if str:\n # str = str.strip().lower()\n items = items.filter(Q(reference_no__icontains=str) |\n Q(erp_reference__icontains=str)).distinct()\n return items", "def apply_search(self, queryset):\n self.form = self.form_class(self.request.GET)\n\n if not self.form.is_valid():\n return queryset\n\n data = self.form.cleaned_data\n\n if data.get('upc'):\n # If there's an exact UPC match, it returns just the matched\n # product. Otherwise does a broader icontains search.\n qs_match = queryset.filter(upc=data['upc'])\n if qs_match.exists():\n queryset = qs_match\n else:\n queryset = queryset.filter(upc__icontains=data['upc'])\n\n if data.get('title'):\n queryset = queryset.filter(title__icontains=data['title'])\n\n if data.get('product_class'):\n queryset = queryset.filter(product_class=data['product_class'])\n\n return queryset", "def queryset(self, request: HttpRequest, queryset: QuerySet) -> QuerySet:\n return {\n 'superuser': queryset.filter(is_superuser=True),\n 'staff': queryset.filter(is_staff=True),\n 'scanlator': queryset.filter(groups__name='Scanlator'),\n 'regular': queryset.exclude(is_staff=True)\n }.get(self.value() or '', queryset)" ]
[ "0.7419924", "0.7245252", "0.72038484", "0.70902455", "0.70488775", "0.6975942", "0.67827517", "0.6762181", "0.67344034", "0.6681628", "0.66560334", "0.6588891", "0.6470637", "0.64161074", "0.64019424", "0.6383527", "0.6369606", "0.63506347", "0.63492984", "0.62763286", "0.62677187", "0.6225198", "0.62184376", "0.6207633", "0.61965823", "0.61914515", "0.61890084", "0.61832815", "0.61784565", "0.6177561" ]
0.7539923
0
inserts player name and score to top5 db
def insert_player(self, name, score): command = "UPDATE %s " % self.table_name_players command += "SET name_player = '%s', score = %d " % (name, score) command += "WHERE name_player = ( " command += "SELECT name_player " command += "FROM %s " % self.table_name_players command += "WHERE score < %d " % score command += "ORDER BY score ASC " command += "LIMIT 1 );" self.cursor.execute(command) self.conn.commit()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_score(self, player, level, score):\n cursor = self._connection.cursor()\n command = 'INSERT INTO scores (player, level, score) VALUES (?, ?, ?)'\n cursor.execute(command, [player, level, score])\n self._connection.commit()", "def registerPlayer(name):\n db, cursor = connect()\n cursor.execute(\"INSERT INTO players (name, wins, matches) VALUES (%s, 0, 0)\" , (name, ) ) \n db.commit() \n db.close()", "def table(score):\r\n\t\r\n\tprint(\"Congratulation!!! You have enough points to get into Best Results Board\")\r\n\tplayer_score = (input(\"\\nPlease enter your name: \")) + \":\" + str(score)\r\n\t\r\n\tf = open(\"pickle_board.txt\", 'a')\r\n\tf.write(player_score)\r\n\tf.close()\r\n\tprint(\"Your score was added!\")", "def add_score(self, data):\n # sql_score_add = \"\"\"update $s SET\n for team_id, score in data.items():\n if int(score) in range(0, 14):\n column = \"score_\" + (score)\n sql_cmd = (\"UPDATE %s SET %s=1 WHERE team_id=%s\" % (self.dbtable, column, team_id))\n print sql_cmd\n try:\n self._db_cur.execute(sql_cmd)\n self._db_conn.commit()\n except sqlite3.Error as er:\n print er", "def registerPlayer(name):\n # cn=name\n # title='playerName'\n # data=[title,cn]\n DB = connect()\n c = DB.cursor()\n #cur.execute(\"INSERT INTO test (num, data) VALUES (%s, %s)\",*/\n #c.execute(\"INSERT INTO tournament (playerName) values ('al pachino2') \")\n #c.execute(\"INSERT INTO tournament name values (%s)\", name)\n #cur.execute('INSERT INTO %s (day, elapsed_time, net_time, length, average_speed, geometry) VALUES (%s, %s, %s, %s, %s, %s)', (escaped_name, day, ))\n c.execute(\"INSERT INTO tournament VALUES (%s)\", (name,))\n DB.commit()\n DB.close()", "def addTeam(teaminfo):\r\n team, auto, rc_comp, spirit_comp, video_comp = teaminfo\r\n if team_exists(team): # Team already exists\r\n print(\"Team\", team, \"already exists.\")\r\n else:\r\n with sqlite3.connect(database_file) as conn:\r\n #(teamname TEXT, autonomous TEXT, rc TEXT, spirit INT, video INT)\r\n conn.execute(\"INSERT INTO scores(teamname, autonomous, rc, spirit, video)\\\r\n VALUES('{0}', '{1}', '{2}', '{3}', '{4}');\".format(team, auto, rc_comp, spirit_comp, video_comp))", "def write_to_db(name: str, score: float):\n score = float(score)\n\n with open('db.json') as fo:\n data = loads(fo.read())\n\n data[name] = score\n\n with open('db.json', 'w') as fo:\n fo.write(dumps(data))", "def add_score(self, difficulty, time, name):\n self.database[difficulty].insert_one({'time': time, 'name': name})", "def registerPlayer(name):\n # gets connection to tournament database in conn object\n conn = connect()\n # gets the cursor to execute queries\n c = conn.cursor()\n # executes insert query which takes the name variable passed in arguments\n # of this method and adds a new player record to PLAYER table where the\n # ID is generated automatically for new created record\n c.execute(\"INSERT INTO PLAYER VALUES (DEFAULT, %s)\", (name,))\n # commits the changes performed on PLAYER table\n # after insert statement executes\n conn.commit()\n # closes the connection to tournament database\n conn.close()", "def update_highscoretable(self, points : int):\r\n\r\n score_is_higher = False\r\n\r\n # Check if the Player's Name already is in the file\r\n if self.name in self.names: \r\n\r\n # When the Score of the current Player is less then the Score he already reached, set Flag\r\n if points <= self.NAME_HIGHSCORE_TABLE[self.name]:\r\n score_is_higher = True\r\n\r\n # Else put the curent Players Highscore out of the table\r\n else:\r\n self.NAME_HIGHSCORE_TABLE.pop(self.name)\r\n\r\n # When the current Player wasn't in the table already\r\n else:\r\n\r\n # When highscore_counter < 5 remove the lowest score from the highscore table\r\n if self.highscore_counter < 5:\r\n self.NAME_HIGHSCORE_TABLE.pop(self.names[-1])\r\n\r\n # When the current Player has beaten his own highscore, or he got a highscore better than one of the top 5, open and write name and reached score in the file\r\n if not score_is_higher:\r\n f = open('.\\\\etc\\\\Name_Highscore.txt', \"w\")\r\n counter = 0\r\n\r\n # Name and Score will be written in the .txt file in decreasing order (by points)\r\n for name, score in self.NAME_HIGHSCORE_TABLE.items():\r\n \r\n if self.highscore_counter == counter:\r\n if points > score:\r\n f.write(self.name + ' ' + str(points) + '\\n')\r\n else:\r\n f.write(self.name + ' ' + str(score) + '\\n')\r\n f.write(name + ' ' + str(score) + '\\n')\r\n counter += 1\r\n if self.highscore_counter == 4:\r\n f.write(self.name + ' ' + str(points) + '\\n')\r\n f.close()", "def reportMatch(winner, loser):\n db = connect()\n db_cursor = db.cursor()\n db_cursor.execute(\"INSERT INTO matches(winner, loser) VALUES(%s,%s)\", (winner,loser))\n db.commit()\n db.close()", "def reportMatch(winner, loser):\n dbConn = connect()\n c = dbConn.cursor()\n c.execute(\"INSERT INTO match (winner, loser) VALUES (%s,%s)\", (winner, loser))\n dbConn.commit()\n dbConn.close()", "def add_to_rating_db(table, user_list):\n client, db = open_db_connection()\n db[table].remove()\n for user in user_list:\n net_id = user.replace(\"\\r\\n\", \"\").encode(\"utf-8\")\n db[table].insert({\"ta\": net_id, \"_id\": net_id, \"score\":random.random()*5})\n close_db_connection(client)", "def insert_player(document):\n players_col.insert_one(document)", "def reportMatch(winner, loser):\n executeNonQuery(\"INSERT INTO matches (winner, loser) VALUES (%s, %s);\", (winner,loser))", "def updateScore(team, competition, newscore):\r\n if team_exists(team):\r\n with sqlite3.connect(database_file) as conn:\r\n conn.execute(\"UPDATE scores SET {0} = '{1}'\\\r\n WHERE teamname = '{2}';\".format(competition, newscore, team))\r\n else:\r\n print(\"Invalid team name\")", "def add_stats(self, game):\n with self.con:\n cursor = self.con.cursor()\n queryString = \"INSERT INTO stats(game_id, team_id\"\n homeValueString = \" VALUES(?, ?\"\n awayValueString = homeValueString\n \n homeValues = [game.game_id, self.teams[game.home_team]]\n awayValues = [game.game_id, self.teams[game.away_team]]\n \n for key in game.home_stats:\n\n queryString += ', '\n awayValueString += ', '\n homeValueString += ', '\n \n homeValueString += '?'\n awayValueString += '?'\n \n homeValues.append(game.home_stats[key])\n awayValues.append(game.away_stats[key])\n \n queryString += statsToSql[key]\n \n queryString += ')'\n homeValueString += ')' \n awayValueString += ')'\n \n \n cursor.execute(queryString + homeValueString, homeValues)\n cursor.execute(queryString + awayValueString, awayValues)", "def report_match(winner, loser):\n\n DB = connect()\n c = DB.cursor()\n c.execute(\"INSERT INTO matches (winner, loser) VALUES (%s,%s);\", (winner, loser))\n DB.commit()\n DB.close()", "def reportMatch(winner, loser):\n conn, c = connect()\n\n q = \"INSERT INTO MATCHES VALUES (default, %s, %s);\"\n data = ((winner,), (loser,))\n c.execute(q, data)\n conn.commit()\n conn.close()", "def insert_post_into_db(\n cursor,\n url=\"\",\n author=\"\",\n score=0,\n created_time=0,\n subreddit_url=\"\"):\n sql = '''\n INSERT INTO reddit_hot_posts(\n url,\n author, \n subreddit_url,\n created_time,\n score\n ) VALUES (\n %s,\n %s,\n %s,\n %s,\n %s\n ) ON DUPLICATE KEY UPDATE\n score = VALUES(score)\n '''\n top = cursor.execute(\n sql,\n (url, author, subreddit_url, created_time, score, )\n )", "def registerPlayer(name):\n conn = psycopg2.connect(\"dbname=tournament\")\n c = conn.cursor()\n\n query = \"INSERT INTO player (name) VALUES (%s)\"\n param = (name,)\n c.execute(query, param)\n\n query = \"SELECT id FROM player WHERE name = (%s)\"\n param = (name,)\n c.execute(query, param)\n player_id = c.fetchall()[0][0]\n\n query = \"INSERT INTO match (id, num_of_matches, num_of_wins) VALUES (\" + str(player_id) + \", 0, 0)\"\n c.execute(query) # Insert the player into the match table with same id.\n conn.commit()\n conn.close()", "def reportMatch(winner, loser):\n db, cursor = connect()\n cursor.execute(\"UPDATE players SET matches = matches + 1 WHERE id = %d or id = %d\" % (winner, loser) ); \n db.commit() \n cursor.execute(\"UPDATE players SET wins = wins + 1 WHERE id = %d\" % (winner, ) ) \n db.commit() \n cursor.execute(\"INSERT INTO matches (winner_id, loser_id ) VALUES ( %s, %s) \", (winner, loser))\n db.commit()\n db.close()", "def reportMatch(winner, loser):\n # Keeping things orderly by always inserting player IDs lowest to highest\n player1ID = min(winner, loser)\n player2ID = max(winner, loser)\n\n dbconnection = connect()\n dbcursor = dbconnection.cursor()\n\n # Use string insertion method with tuple to prevent SQL injection attacks\n dbcursor.execute(\"\"\"INSERT INTO matches (player_1_id, player_2_id,\n winner_id) VALUES (%s, %s, %s)\"\"\",\n (str(player1ID), str(player2ID), str(winner),))\n\n dbconnection.commit()\n dbconnection.close()", "def registerPlayer(name):\n conn, cur = connect()\n query = \"INSERT INTO players (player_name) VALUES (%s);\"\n param = (name,)\n try:\n cur.execute(query, param)\n except:\n print(\"Error encountered when inserting player \" + name + \" into the database\")\n conn.commit()\n conn.close()", "def registerPlayer(name):\n DB = connect()\n c = DB.cursor()\n #inserts a new player into the players table, bleach cleans the input to avoid attack \n c.execute(\"INSERT INTO players (player) VALUES (%s)\", (bleach.clean(name), ))\n DB.commit()\n DB.close()", "def registerPlayer(name):\n db_conn = connect()\n db_cursor = db_conn.cursor()\n player_insert_stmt = \"insert into players(player_name) values (%s)\"\n db_cursor.execute(player_insert_stmt, (name,))\n db_conn.commit()\n db_conn.close()", "def reportMatch(winner, loser):\n \n cursor.execute(\"insert into matches (winnerid, loserid) values (%s, %s)\" % (winner, loser))\n gc.commit()", "def registerPlayer(name):\n with _connect_db() as (conn, cur):\n cur.execute(\"\"\"INSERT INTO players (name) VALUES (%s);\"\"\", (name,))\n conn.commit()", "def registerPlayer(name):\n dbConn = connect()\n c = dbConn.cursor()\n c.execute(\"INSERT INTO player (name) VALUES (%s)\", (name,))\n dbConn.commit()\n dbConn.close()", "def registerPlayer(name):\n\n if len(name) < 1:\n print \"Player not registered. Invalid name or no name given.\"\n else:\n query = \"INSERT INTO players (name) VALUES (%s)\"\n values = (name,)\n results = executeQuery({\n 'dbname': 'tournament', \n 'query' : query, \n 'type' : 'insert', \n 'values' : values\n })" ]
[ "0.66316354", "0.65528435", "0.6539488", "0.6218796", "0.6213069", "0.61682665", "0.61086845", "0.6015163", "0.59967804", "0.59904796", "0.5963679", "0.59542024", "0.5944807", "0.59214735", "0.59171426", "0.58731747", "0.5869828", "0.58598584", "0.58454907", "0.58205545", "0.58050084", "0.5788786", "0.5788196", "0.5780455", "0.57788175", "0.577165", "0.57612646", "0.5744565", "0.57317793", "0.57313174" ]
0.7346812
0
Creates a delta between the initial statistics and the final one
def _deltas(self): istat = self.init lstat = self.stats uptime = self._uptime() delta = float(uptime) - float(self.uptime) self.uptime = uptime for dev in lstat.keys(): if not istat.has_key(dev): del lstat[dev] continue idev = istat[dev] ldev = lstat[dev] for key,value in ldev.items(): if re.search(r'(^major\Z|^minor\Z)',key): continue if not idev.has_key(key): print "Different keys in statistics" sys.exit(1) if not str(value).isdigit and \ not str(ldev[key]).isdigit(): print "value of key is not a number" sys.exit(1) if ldev[key] == idev[key]: ldev[key] = self._sprintf('%.2f', 0) elif int(delta) > 0: ldev[key] = self._sprintf('%.2f',float((ldev[key] - idev[key]) / delta)) else: ldev[key] = self._sprintf('%.2f', float(ldev[key] - idev[key])) idev[key] = value return idev
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delta(self) -> None:", "def calculate_delta(self):\n rho_des_index, distance, data_size = self.rho_des_index, self.distance, self.data_size\n self.result[rho_des_index[0]][1] = -1\n for i in range(1, data_size):\n for j in range(0, i):\n old_i, old_j = rho_des_index[i], rho_des_index[j]\n min_pos, max_pos = min(old_j, old_i), max(old_j, old_i)\n if distance[(min_pos, max_pos)] < self.result[old_i][1]:\n self.result[old_i][1] = distance[(min_pos, max_pos)]\n self.master[old_i] = old_j\n self.result[rho_des_index[0]][1] = max(self.result[:, 1])", "def make_delta(m1, m2):\n return graph_objs.Scatter(\n x=[0],\n y=[(m1 + m2) / 2.0],\n text=['delta: ' + '{:0.2f}'.format(abs(m1 - m2))],\n mode='markers',\n marker=dict(symbol='square',\n color='rgb(255,255,255)'),\n hoverinfo='text'\n )", "def getdelta(self):\n\t\tmyhmag.initializehelmholtz()\n\t\tabar = 13.714285714285715\n\t\tzbar = abar/2.0\n\t\tself.data[\"delta\"] = np.zeros(len(self.data[\"rho\"]))\n\t\tfor i in range(len(self.data[\"rho\"])):\n\t\t\tadgradred,hydrograd,my_nu,my_alpha,self.data[\"delta\"][i],my_gamma1,my_cp,my_cph,my_c_s,failtrig = myhmag.gethelmgrads(self.data[\"T\"][i], self.data[\"rho\"][i], 0.,abar,zbar,True)", "def make_delta(self, delta):\n\n return delta", "def calculate_delta(self, name, previous, count):\n if count < previous:\n logger.error(\n \"Saw a non-monotonically increasing value for \"\n \"metric {name}\".format(name=name))\n return 0\n return count - previous", "def __init__(self, initial_std=0.1, delta=0.1):\n self.initial_std = initial_std\n self.delta = delta\n # Initialize the current standard deviation\n self.cur_std = initial_std", "def diff_stat_data(stat_data):\n diff_stat_data = dict.fromkeys(stat_data.keys())\n diff_stat_data[\"name\"] = stat_data[\"name\"]\n diff_stat_data[\"t\"] = stat_data[\"t\"]\n\n for key in [\"000\", \"090\", \"ver\"]:\n dt = stat_data[\"t\"][1] - stat_data[\"t\"][0]\n\n # axis keyword added to numpy version 1.11\n # diff_stat_data[key] = np.gradient(stat_data[key],\n # dt, edge_order=2, axis=None)\n diff_stat_data[key] = np.gradient(stat_data[key], dt, edge_order=2)\n return diff_stat_data", "def delta(self):\r\n return self.nd1()", "def gen_delta(self):\n delta = self.delta.gen_delta(self.mask.good_pix, self.mask.bad_pix,\n self.params.nside, self.params.npix)\n return delta", "def update(self, final_delta = None):\n l = len(self.derivatives)\n\n if final_delta:\n #self.derivatives[ l - 1 ] += final_delta NOTE: not supported in CodeSkulptor\n self.derivatives[ l - 1 ] = self.derivatives[ l - 1 ] + final_delta\n\n for i in range(l - 2, -1, -1):\n #self.derivatives[ i ] += self.derivatives[ i + 1 ] NOTE: not supported in CodeSkulptor\n self.derivatives[ i ] = self.derivatives[ i + 1 ] + self.derivatives[ i ]", "def diffStats(name1, vals1, name2, vals2):\n from Stats import Stats\n label = name2 + ' - ' + name1\n diff = vals2 - vals1\n return Stats().label(label).addm(diff)", "def delta(self):\n return (self._stages[EStage.CURRENT] - self._stages[EStage.START]) \\\n / (self._stages[EStage.END] - self._stages[EStage.START])", "def _compute_diff(self, begin, end):\n d = self.diff\n x = self.x\n for i in range(begin, end):\n for j in range(i):\n d[i].append((d[i][j] - d[i-1][j]) / (x[i] - x[i-j-1]))", "def deltaCnt(self,new_cnt, past_cnt):\r\n delta = new_cnt - past_cnt\r\n if delta < -1 * (\r\n 2 ** 15): # Checks if the encoder values have rolled over, and if so, subtracts/adds accordingly to assure normal delta values\r\n delta += (2 ** 16)\r\n elif delta > (2 ** 15):\r\n delta -= (2 ** 16)\r\n old_cnt = new_cnt\r\n return delta, old_cnt", "def apply_delta_float(self):\n\n if not self._randomValFlag:\n newData = self.data + self.deltaFloat\n self._deltaPrevAbs.insert(0, self.deltaFloat)\n self._deltaPrevPer.insert(0, (((newData / self.data) - 1) * 100))\n self.data += self.deltaFloat\n self.deltaFloat = 0\n else:\n a = self._randomInfo[0]\n b = self._randomInfo[1]\n newData = random.uniform(a, b)\n delta = self.data - newData\n self._deltaPrevAbs.insert(0, delta)\n self._deltaPrevPer.insert(0, (((delta / self.data) - 1) * 100))\n self.deltaFloat = 0\n self.data = newData", "def get_delta(name):\n\n # get metrics\n [curr_metrics, last_metrics] = get_metrics()\n\n # get delta\n name = name[len(NAME_PREFIX):] # remove prefix from name\n try:\n delta = (curr_metrics['data'][name] - last_metrics['data'][name])/(curr_metrics['time'] - last_metrics['time'])\n if delta < 0:\n delta = 0\n except StandardError:\n delta = 0\n\n return delta", "def delta(self):\n \n print(\"Cannot calculate delta for base class BSOption.\" )\n return 0", "def calculate_delta(log, state, config, policy):\n current = len(state.active) + len(state.pending)\n return apply_delta(log, current, state, config, policy)", "def delta(self):\r\n return 1 - xl.Refractive_Index_Re(self.compound, self.energy, self.density)", "def bm_change(self,dt,delta):\n change = norm.rvs(loc=0,size=1,scale=delta**2*dt)\n return change", "def delta(self):\n return self._delta", "def delta(self):\n return self._delta", "def delta(self):\n return self._delta", "def delta(self):\n return self._delta", "def delta(self):\n return 1 - xl.Refractive_Index_Re(self.compound, self.energy, self.density)", "def _get_sum_delta(self, instance, mode, previous):\n new_value = self._get_value_from_instance(instance)\n if mode == CHANGING:\n old_value = self._get_value_from_instance(previous)\n if new_value - old_value == 0:\n # updates not needed\n return None\n return F(self.field) + new_value - old_value\n # mode is ENTERING or LEAVING, only new_value matters.\n return F(self.field) + new_value * mode", "def setup_tdelta(self, dir1: str, num1: int, pos1: str, dir2: str, num2: int, pos2: str) -> None:\n cmd = ':measure:define deltatime,{0},{1},{2},{3},{4},{5}'.format(dir1, num1, pos1, dir2, num2, pos2)\n self.write(cmd)", "def aggregate_statistics(self, new_stats):\n \n if isinstance(new_stats,RunStatistics):\n new_stats = [new_stats, ]\n elif isinstance(new_stats,list):\n if any(not isinstance(_,RunStatistics) for _ in new_stats):\n raise MadGraph5Error, \"The 'new_stats' argument of the function \"+\\\n \"'updtate_statistics' must be a (possibly list of) \"+\\\n \"RunStatistics instance.\"\n \n keys = set([])\n for stat in [self,]+new_stats:\n keys |= set(stat.keys())\n\n new_stats = new_stats+[self,]\n for key in keys:\n # Define special rules\n if key=='max_precision':\n # The minimal precision corresponds to the maximal value for PREC\n self[key] = min( _[key] for _ in new_stats if key in _)\n elif key=='min_precision':\n # The maximal precision corresponds to the minimal value for PREC\n self[key] = max( _[key] for _ in new_stats if key in _)\n elif key=='averaged_timing':\n n_madloop_calls = sum(_['n_madloop_calls'] for _ in new_stats if\n 'n_madloop_calls' in _)\n if n_madloop_calls > 0 :\n self[key] = sum(_[key]*_['n_madloop_calls'] for _ in \n new_stats if (key in _ and 'n_madloop_calls' in _) )/n_madloop_calls\n else:\n # Now assume all other quantities are cumulative\n self[key] = sum(_[key] for _ in new_stats if key in _)", "def delta(self) -> timedelta:\n delta = self.data.get(\"delta\", 0)\n return timedelta(seconds=delta)" ]
[ "0.66328543", "0.6213218", "0.61514914", "0.61068285", "0.6014113", "0.59775084", "0.5938798", "0.59145635", "0.58798945", "0.5820956", "0.5816481", "0.58054644", "0.5763823", "0.571858", "0.57119256", "0.56900823", "0.5680007", "0.5678773", "0.5669453", "0.56573594", "0.5641122", "0.5638563", "0.5638563", "0.5638563", "0.5638563", "0.5636887", "0.5600171", "0.55720127", "0.55644244", "0.5563409" ]
0.63489467
1
Search text for [[link_me]], replace with link_me
def wiki_link(text): return wiki_link_pattern.sub(get_link, text)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _replace_links(input_string,link_dict={}):\n\n # Dictionary to hold link string/target pairs\n target_dict = {}\n\n # Strip white space from string\n this_string = input_string.strip()\n\n # If we have a string stub, return it\n if len(input_string) < 2:\n return input_string, {}\n\n # Look for patterns: \";_\" OR \",_\" OR \" _\" OR \":_\"\n search_pattern = re.compile(\"[\\,\\s\\;\\:]\\_|\\A\\_\")\n\n # Look for place to break link: \";\" OR \",\" OR \":\" OR \" \" OR \".\"\n end_pattern = re.compile(\"[\\,\\s\\;\\:\\.]\")\n\n # Look for a match\n match = search_pattern.search(this_string)\n while match:\n\n # If started with \"_\", link_alias starts at match.span()[0]\n if this_string[match.span()[0]] == \"_\":\n start = match.span()[0]\n\n # If started with \",_\" or the like, link_alias starts at match.span()[0] + 1\n else:\n start = match.span()[0] + 1\n\n # Chop string into before and after _ in link (front and back)\n front = this_string[:start]\n back = this_string[start:]\n\n # Look for end of the link\n link_end = end_pattern.search(back)\n\n # If we find the end, split back into link_alias and trailing\n if link_end:\n link_alias = back[:link_end.span()[0]]\n trailing = back[link_end.span()[0]:]\n\n # If we do not find the end, the whole back is link_alias ... no trailing\n else:\n link_alias = back[:]\n trailing = \"\"\n\n # Extract url and text for constructing the link text\n try:\n url, text = link_dict[link_alias]\n\n if url == \"\":\n if text == \"\":\n raise KeyError\n else:\n # Replace the link_alias with the text, no url\n link_string = text\n else:\n if text == \"\":\n # Replace the link_alias with link_alias[1:] -> url\n label = link_alias[1:]\n else:\n # Replace the link_alias with the text -> url\n label = text\n\n link_string = \"`{}`_\".format(label)\n url_string = \".. _`{}`: {}\".format(label,url)\n try:\n already_seen = target_dict[label]\n if already_seen != url_string:\n err = \"The same link_text '{}' corresponds to more than one url\\n\".format(text)\n raise ValueError(err)\n except KeyError:\n target_dict[label] = url_string\n\n except KeyError:\n\n # Replace the link_alias with link_alias[1:]\n link_string = link_alias[1:]\n\n # Rebuild this_string with front + new link + trailing\n this_string = \"{}{}{}\".format(front,link_string,trailing)\n\n # Look for another link\n match = search_pattern.search(this_string)\n\n return this_string, target_dict", "def ref_to_link(txt):\n text = txt.group(1) # because it was a match in a regular expression\n\n thecite, everythingelse = first_bracketed_string(text)\n thecite = thecite[1:-1] # strip curly brackets\n thecite = thecite.replace(\"\\\\\",\"\") # \\href --> href\n\n refs = thecite.split(\",\")\n ans = \"\"\n\n # print \"refs\",refs\n\n for ref in refs:\n ref = ref.strip() # because \\cite{A, B, C,D} can have spaces\n this_link = \"\"\n if ref.startswith(\"href\"):\n the_link = re.sub(r\".*{([^}]+)}{.*\", r\"\\1\", ref)\n click_on = re.sub(r\".*}{([^}]+)}\\s*\", r\"\\1\", ref)\n this_link = '{{ LINK_EXT(\"' + click_on + '\",\"' + the_link + '\") | safe}}'\n elif ref.startswith(\"doi\"):\n ref = ref.replace(\":\",\"\") # could be doi:: or doi: or doi\n the_doi = ref[3:] # remove the \"doi\"\n this_link = '{{ LINK_EXT(\"' + the_doi + '\",\"https://doi.org/' + the_doi + '\")| safe }}'\n elif ref.lower().startswith(\"mr\"):\n ref = ref.replace(\":\",\"\")\n the_mr = ref[2:] # remove the \"MR\"\n this_link = '{{ LINK_EXT(\"' + 'MR:' + the_mr + '\", '\n this_link += '\"http://www.ams.org/mathscinet/search/publdoc.html?pg1=MR&s1='\n this_link += the_mr + '\") | safe}}'\n elif ref.lower().startswith(\"arxiv\"):\n ref = ref.replace(\":\",\"\")\n the_arx = ref[5:] # remove the \"arXiv\"\n this_link = '{{ LINK_EXT(\"' + 'arXiv:' + the_arx + '\", '\n this_link += '\"http://arxiv.org/abs/'\n this_link += the_arx + '\")| safe}}'\n\n\n if this_link:\n if ans:\n ans += \", \"\n ans += this_link\n\n return '[' + ans + ']' + everythingelse", "def replace_urls(text, replace_with=\"_URL_\"):\n return RE_SHORT_URL.sub(replace_with, RE_URL.sub(replace_with, text))", "def md_link(link_text, link_target):\n return '[%s](%s)' % (md_escape(link_text, characters=']'),\n md_escape(link_target, characters=')'))", "def substitute(sentence):\n result = []\n for link in sentence[\"links\"]:\n first = sentence[\"words\"][link[0]]\n second = sentence[\"words\"][link[1]]\n result.append([first, second, link[2]])\n return result", "def _make_links(tweet):\n for pattern, repl in (USER_SUB, KEYWORD_SUB):\n tweet = re.sub(pattern, repl, tweet)\n return tweet", "def weblinksIn(text, withoutBracketed=False, onlyBracketed=False):\n text = textlib.removeDisabledParts(text)\n\n # Ignore links in fullurl template\n text = re.sub(r'{{\\s?fullurl:.[^}]*}}', '', text)\n # TODO search for links within cite with filled-in archiwum parameter\n \n\n # MediaWiki parses templates before parsing external links. Thus, there\n # might be a | or a } directly after a URL which does not belong to\n # the URL itself.\n\n # First, remove the curly braces of inner templates:\n nestedTemplateR = re.compile(r'{{([^}]*?){{(.*?)}}(.*?)}}')\n while nestedTemplateR.search(text):\n text = nestedTemplateR.sub(r'{{\\1 \\2 \\3}}', text)\n\n # Then blow up the templates with spaces so that the | and }} will not\n # be regarded as part of the link:.\n templateWithParamsR = re.compile(r'{{([^}]*?[^ ])\\|([^ ][^}]*?)}}',\n re.DOTALL)\n while templateWithParamsR.search(text):\n text = templateWithParamsR.sub(r'{{ \\1 | \\2 }}', text)\n\n # Add <blank> at the end of a template\n # URL as last param of multiline template would not be correct\n text = text.replace('}}', ' }}')\n\n # Remove HTML comments in URLs as well as URLs in HTML comments.\n # Also remove text inside nowiki links etc.\n text = textlib.removeDisabledParts(text)\n #linkR = textlib.compileLinkR(withoutBracketed, onlyBracketed)\n #linkR = re.compile(r'(?P<url>http[s]?:(\\/\\/[^:\\s\\?]+?)(\\??[^\\s;<>\\\"\\|\\)]*))(?:[\\]\\s\\.:;,<>\\\"\\|\\)])')\n linkR = re.compile(r'(?m)(?P<url>http[s]?:(\\/\\/[^\\s\\?]+?)(\\??[^\\s<\\|\\}\\]]*))(?:[\\]\\s\\.<\\|\\}])')\n for m in linkR.finditer(text):\n if m.group('url'):\n #pywikibot.output('URL to YIELD:%s' % m.group('url'))\n if not citeArchivedLink(m.group('url'),text):\n yield m.group('url')\n else:\n #test output\n pywikibot.output('[%s] WebLinksIn: link skipped:%s' % (datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"),m.group('url')))\n #else:\n # yield m.group('urlb')", "def _fix_links(self, text, page_names):\n for n in page_names:\n text = text.replace(f\"]({n})\", f\"]({n}.html)\")\n text = text.replace(f\"]({n}.md)\", f\"]({n}.html)\")\n return text", "def replace_local_hyperlinks(\n text,\n base_url=\"https://github.com/project-rig/nengo_spinnaker/blob/master/\"\n ):\n def get_new_url(url):\n return base_url + url[2:]\n\n # Deal with anonymous URLS\n for match in re.finditer(r\"^__ (?P<url>\\./.*)\", text, re.MULTILINE):\n orig_url = match.groupdict()[\"url\"]\n url = get_new_url(orig_url)\n\n text = re.sub(\"^__ {}\".format(orig_url),\n \"__ {}\".format(url), text, flags=re.MULTILINE)\n\n # Deal with named URLS\n for match in re.finditer(r\"^\\.\\. _(?P<identifier>[^:]*): (?P<url>\\./.*)\",\n text, re.MULTILINE):\n identifier = match.groupdict()[\"identifier\"]\n orig_url = match.groupdict()[\"url\"]\n url = get_new_url(orig_url)\n\n text = re.sub(\n \"^\\.\\. _{}: {}\".format(identifier, orig_url),\n \".. _{}: {}\".format(identifier, url),\n text, flags=re.MULTILINE)\n\n # Deal with image URLS\n for match in re.finditer(r\"^\\.\\. image:: (?P<url>\\./.*)\",\n text, re.MULTILINE):\n orig_url = match.groupdict()[\"url\"]\n url = get_new_url(orig_url)\n\n text = text.replace(\".. image:: {}\".format(orig_url),\n \".. image:: {}\".format(url))\n\n return text", "def noteRef(self, text):\n text_re = re.compile(r\"\"\"\n \\[ # start\n (%s) # !atts\n \\#\n ([^\\]!]+) # !label\n ([!]?) # !nolink\n \\]\"\"\" % self.c, re.X)\n text = text_re.sub(self.fParseNoteRefs, text)\n return text", "def slack_link(url, text=\"\"):\n if text:\n return \"<%s|%s>\" % (url, text)\n\n else:\n return \"<%s>\" % url", "def make_inter_wiki_links(string):\n\n _inter_wiki = _inter_wiki_re.get(config.get_option('language'))\n\n if _inter_wiki is None:\n return string\n\n mapping = _inter_wiki_map.get(config.get_option('language'))\n\n for i, regex in enumerate(_inter_wiki):\n out = []\n last_index = 0\n for match in regex.finditer(string):\n text = match.group('text')\n # Offset by 1 to account for text group\n index = match.groups().index(text, 1)-1\n data = mapping[i*_MAX_RE+index][1]\n\n out.append(string[last_index:match.start('text')])\n if text == data['link']:\n out.append('[[%s]]' % data['link'])\n else:\n out.append('[[%s|%s]]' % (data['link'], text))\n\n last_index = match.end('text')\n\n out.append(string[last_index:])\n string = ''.join(out)\n\n return string", "def _do_links(self, text):\r\n MAX_LINK_TEXT_SENTINEL = 3000 # markdown2 issue 24\r\n\r\n # `anchor_allowed_pos` is used to support img links inside\r\n # anchors, but not anchors inside anchors. An anchor's start\r\n # pos must be `>= anchor_allowed_pos`.\r\n anchor_allowed_pos = 0\r\n\r\n curr_pos = 0\r\n while True: # Handle the next link.\r\n # The next '[' is the start of:\r\n # - an inline anchor: [text](url \"title\")\r\n # - a reference anchor: [text][id]\r\n # - an inline img: ![text](url \"title\")\r\n # - a reference img: ![text][id]\r\n # - a footnote ref: [^id]\r\n # (Only if 'footnotes' extra enabled)\r\n # - a footnote defn: [^id]: ...\r\n # (Only if 'footnotes' extra enabled) These have already\r\n # been stripped in _strip_footnote_definitions() so no\r\n # need to watch for them.\r\n # - a link definition: [id]: url \"title\"\r\n # These have already been stripped in\r\n # _strip_link_definitions() so no need to watch for them.\r\n # - not markup: [...anything else...\r\n try:\r\n start_idx = text.index('[', curr_pos)\r\n except ValueError:\r\n break\r\n text_length = len(text)\r\n\r\n # Find the matching closing ']'.\r\n # Markdown.pl allows *matching* brackets in link text so we\r\n # will here too. Markdown.pl *doesn't* currently allow\r\n # matching brackets in img alt text -- we'll differ in that\r\n # regard.\r\n bracket_depth = 0\r\n for p in range(start_idx+1, min(start_idx+MAX_LINK_TEXT_SENTINEL,\r\n text_length)):\r\n ch = text[p]\r\n if ch == ']':\r\n bracket_depth -= 1\r\n if bracket_depth < 0:\r\n break\r\n elif ch == '[':\r\n bracket_depth += 1\r\n else:\r\n # Closing bracket not found within sentinel length.\r\n # This isn't markup.\r\n curr_pos = start_idx + 1\r\n continue\r\n link_text = text[start_idx+1:p]\r\n\r\n # Possibly a footnote ref?\r\n if \"footnotes\" in self.extras and link_text.startswith(\"^\"):\r\n normed_id = re.sub(r'\\W', '-', link_text[1:])\r\n if normed_id in self.footnotes:\r\n self.footnote_ids.append(normed_id)\r\n result = '<sup class=\"footnote-ref\" id=\"fnref-%s\">' \\\r\n '<a href=\"#fn-%s\">%s</a></sup>' \\\r\n % (normed_id, normed_id, len(self.footnote_ids))\r\n text = text[:start_idx] + result + text[p+1:]\r\n else:\r\n # This id isn't defined, leave the markup alone.\r\n curr_pos = p+1\r\n continue\r\n\r\n # Now determine what this is by the remainder.\r\n p += 1\r\n if p == text_length:\r\n return text\r\n\r\n # Inline anchor or img?\r\n if text[p] == '(': # attempt at perf improvement\r\n match = self._tail_of_inline_link_re.match(text, p)\r\n if match:\r\n # Handle an inline anchor or img.\r\n is_img = start_idx > 0 and text[start_idx-1] == \"!\"\r\n if is_img:\r\n start_idx -= 1\r\n\r\n url, title = match.group(\"url\"), match.group(\"title\")\r\n if url and url[0] == '<':\r\n url = url[1:-1] # '<url>' -> 'url'\r\n # We've got to encode these to avoid conflicting\r\n # with italics/bold.\r\n url = url.replace('*', self._escape_table['*']) \\\r\n .replace('_', self._escape_table['_'])\r\n if title:\r\n title_str = ' title=\"%s\"' % (\r\n _xml_escape_attr(title)\r\n .replace('*', self._escape_table['*'])\r\n .replace('_', self._escape_table['_']))\r\n else:\r\n title_str = ''\r\n if is_img:\r\n result = '<img src=\"%s\" alt=\"%s\"%s%s' \\\r\n % (url.replace('\"', '&quot;'),\r\n _xml_escape_attr(link_text),\r\n title_str, self.empty_element_suffix)\r\n if \"smarty-pants\" in self.extras:\r\n result = result.replace('\"', self._escape_table['\"'])\r\n curr_pos = start_idx + len(result)\r\n text = text[:start_idx] + result + text[match.end():]\r\n elif start_idx >= anchor_allowed_pos:\r\n result_head = '<a href=\"%s\"%s>' % (url, title_str)\r\n result = '%s%s</a>' % (result_head, link_text)\r\n if \"smarty-pants\" in self.extras:\r\n result = result.replace('\"', self._escape_table['\"'])\r\n # <img> allowed from curr_pos on, <a> from\r\n # anchor_allowed_pos on.\r\n curr_pos = start_idx + len(result_head)\r\n anchor_allowed_pos = start_idx + len(result)\r\n text = text[:start_idx] + result + text[match.end():]\r\n else:\r\n # Anchor not allowed here.\r\n curr_pos = start_idx + 1\r\n continue\r\n\r\n # Reference anchor or img?\r\n else:\r\n match = self._tail_of_reference_link_re.match(text, p)\r\n if match:\r\n # Handle a reference-style anchor or img.\r\n is_img = start_idx > 0 and text[start_idx-1] == \"!\"\r\n if is_img:\r\n start_idx -= 1\r\n link_id = match.group(\"id\").lower()\r\n if not link_id:\r\n link_id = link_text.lower() # for links like [this][]\r\n if link_id in self.urls:\r\n url = self.urls[link_id]\r\n # We've got to encode these to avoid conflicting\r\n # with italics/bold.\r\n url = url.replace('*', self._escape_table['*']) \\\r\n .replace('_', self._escape_table['_'])\r\n title = self.titles.get(link_id)\r\n if title:\r\n before = title\r\n title = _xml_escape_attr(title) \\\r\n .replace('*', self._escape_table['*']) \\\r\n .replace('_', self._escape_table['_'])\r\n title_str = ' title=\"%s\"' % title\r\n else:\r\n title_str = ''\r\n if is_img:\r\n result = '<img src=\"%s\" alt=\"%s\"%s%s' \\\r\n % (url.replace('\"', '&quot;'),\r\n link_text.replace('\"', '&quot;'),\r\n title_str, self.empty_element_suffix)\r\n if \"smarty-pants\" in self.extras:\r\n result = result.replace('\"', self._escape_table['\"'])\r\n curr_pos = start_idx + len(result)\r\n text = text[:start_idx] + result + text[match.end():]\r\n elif start_idx >= anchor_allowed_pos:\r\n result = '<a href=\"%s\"%s>%s</a>' \\\r\n % (url, title_str, link_text)\r\n result_head = '<a href=\"%s\"%s>' % (url, title_str)\r\n result = '%s%s</a>' % (result_head, link_text)\r\n if \"smarty-pants\" in self.extras:\r\n result = result.replace('\"', self._escape_table['\"'])\r\n # <img> allowed from curr_pos on, <a> from\r\n # anchor_allowed_pos on.\r\n curr_pos = start_idx + len(result_head)\r\n anchor_allowed_pos = start_idx + len(result)\r\n text = text[:start_idx] + result + text[match.end():]\r\n else:\r\n # Anchor not allowed here.\r\n curr_pos = start_idx + 1\r\n else:\r\n # This id isn't defined, leave the markup alone.\r\n curr_pos = match.end()\r\n continue\r\n\r\n # Otherwise, it isn't markup.\r\n curr_pos = start_idx + 1\r\n\r\n return text", "def replace_urls(text):\n text = re.sub('(?P<url>https?://[^\\s]+)', 'URL', text)\n return text", "def _remove_invalid_links(text):\n\n for reply_number in re.finditer(REGEX_REPLY, text):\n post_id = reply_number.group(1)\n post = Post.objects.filter(id=post_id)\n if not post.exists():\n text = string.replace(text, REFLINK_PREFIX + post_id, post_id)\n\n return text", "def autoLink(self, text):\n\n pattern = re.compile(r\"\"\"\\b((?:https?://|www\\d{0,3}[.]|[a-z0-9.\\-]+[.][a-z]{2,4}/)(?:[^\\s()<>]+|\\(([^\\s()<>]+|(\\([^\\s()<>]+\\)))*\\))+(?:\\(([^\\s()<>]+|(\\([^\\s()<>]+\\)))*\\)|[^\\s`!()\\[\\]{};:'\".,<>?«»“”‘’]))\"\"\",\n re.U | re.I)\n return pattern.sub(r'\"$\":\\1', text)", "def replace_urls_token(text):\n\n text = re.sub(r\"^https?://.*[\\r\\n]*\", \"<url/>\", text, re.M | re.I)\n return re.sub(r\"http\\S+(\\s)*(\\w+\\.\\w+)*\", \"<url/>\", text, re.M | re.I)", "def replace_hashtags(text, replace_with=\"_TAG_\"):\n return RE_HASHTAG.sub(replace_with, text)", "def linkify_tags_and_mentions(value):\n value = find_hashtags_re.sub(tag_match_to_url, sanitize(value))\n value = find_mentions_re.sub(mention_match_to_url, value)\n # value = link_tags_parse(value)\n return mark_safe(value)", "def link_latex(line: str) -> str:\n\n # Look for alias-links\n while \"[\" in line and \"(\" in line:\n url = line[line.find(\"(\") + 1: line.find(\")\")]\n alias = line[line.find(\"[\") + 1: line.find(\"]\")]\n line = line[:line.find(\"[\")] + \"\\\\href{\" + url + \"}{\" + alias + \"}\" + line[line.find(\")\") + 1:]\n\n # Look for non-alias links\n line = line.replace(\"[\", \"\\\\href{\")\n line = line.replace(\"]\", \"}\")\n\n return line", "def links(self, text):\n\n # For some reason, the part of the regex below that matches the url\n # does not match a trailing parenthesis. It gets caught by tail, and\n # we check later to see if it should be included as part of the url.\n pattern = r'''\n (?P<pre>^|(?<=[\\s>.\\(\\|])|[{[])? # leading text\n \" # opening quote\n (?P<atts>%s) # block attributes\n (?P<text>[^\"]+?) # link text\n \\s? # optional space\n (?:\\((?P<title>[^)]+?)\\)(?=\"))? # optional title\n \": # closing quote, colon\n (?P<url>%s+?) # URL\n (?P<slash>\\/)? # slash\n (?P<post>[^\\w\\/]*?) # trailing text\n (?P<tail>[\\]})]|(?=\\s|$|\\|)) # tail\n ''' % (self.c, self.urlch)\n\n text = re.compile(pattern, re.X | re.U).sub(self.fLink, text)\n\n return text", "def relink(self, link_id):", "def getRefs(self, text):\n pattern = re.compile(r'(?:(?<=^)|(?<=\\s))\\[(.+)\\]((?:http(?:s?):\\/\\/|\\/)\\S+)(?=\\s|$)',\n re.U)\n text = pattern.sub(self.refs, text)\n return text", "def format_link(self, link):\n new_link = \"/\".join(link.split(\"/\")[0:3])\n return \"http://www.imdb.com\" + new_link", "def fix_links():\n pass", "def test_link_in_list(self):\n self.assertEquals(\"* [name](name)\\n* name2\\n* name3\",\n trans(\" * [name]\\n * name2\\n * name3\"))", "def _build_links(links):\n for link in links:\n link['href'] = link['href'].replace('servers', 'instances')\n return links", "def _generate_links(self):\n index = 0\n links = \"\"\n for ch in self.text:\n if ch == '[':\n links += \"(^\"\n elif ch == ']':\n links += \")$|\"\n index += 1\n elif links[-1:] != '|' and links != \"\":\n links += ch\n self.links = compile(links[:-1].lower())", "def feed_link_decorator(context, feed):\n for item in feed.items:\n current_link = item['link']\n # print(current_link)\n new_link = current_link + FUD_DEFAULT['parameters']\n item['link'] = new_link\n # print(item)\n return feed", "def __replace_href(self, tag):\n if \"<a href=\" in str(tag.contents):\n if len(tag.contents) > 1:\n tag = concatenate(list(tag.contents))\n else:\n tag = str(tag.contents)[1:len(str(tag.contents)) - 1]\n for k, v in self.rules.items():\n tag = tag.replace(k, v)\n else:\n tag = tag.get_text()\n\n return tag" ]
[ "0.6165548", "0.60130036", "0.5864938", "0.5808209", "0.5724455", "0.5653492", "0.5621826", "0.55912733", "0.5550039", "0.5528044", "0.5508838", "0.54424155", "0.542252", "0.53638536", "0.5346632", "0.5269909", "0.5243337", "0.52135646", "0.51990354", "0.5163372", "0.5137252", "0.51008075", "0.50981957", "0.50961703", "0.50723004", "0.50576234", "0.5055523", "0.5048631", "0.5035438", "0.5020608" ]
0.62884384
0
Handle updating an object by its ID
def update(self, request, pk=None): #update a specific object return Response({'http_method': 'PUT'})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_item(id: str, obj: endpoint_model):\n # should this error if exists?\n if obj.id:\n if obj.id != id:\n raise HTTPException(status_code=400, detail=\"id in body does not match id in path\")\n else:\n obj.id = id\n new_obj = db.save(obj)\n return new_obj", "def update(self, id, obj):\n url = self._format_url(self.url + \"/{id}\", {\"id\": id})\n\n return self._make_request('put', url, data={self.singular: obj})", "def put(self, request, pk=None): #pk of id of objects to be updated (DB)\n return Response({'method':'PUT'})", "def put(self,id):\r\n data = request.json\r\n return update(id=id,data=data)", "def update(self, request, pk):\n if pk is None:\n for item in request.data:\n # get object by its primary key\n obj = self._object_get(item[self.model._meta.pk.attname])\n self._object_update(obj, item)\n else:\n obj = self._object_get(pk)\n self._object_update(obj, request.data)\n return obj", "def updateOne(self,ident):\n \tLOGGER.info(\"lazily updating {}\".format(ident))\n \tself.idToUpdate=ident\n \tself.newState=''\n \tself.save()", "def update_object(self, name: str) -> None:", "def put(self, request, pk):\n return self.update(request, pk)", "def update(self,request,pk = None):\n return Response({'http_method':'PUT'})", "def put(self, id):\n return update_msg(request.json, id)", "def update(self, id, id_col='name'):\n instance = self.get_one_instance(id_col, id)\n\n if type(instance) != self.Component:\n set_session_var('errors', str(instance))\n return None\n\n errors, data = self.format_and_control(request.form, obj=instance)\n\n if len(errors) > 0:\n set_session_var('errors', dict(errors))\n return None\n\n data = get_only_updated_values(instance, data)\n\n if len(data) == 0:\n return None\n\n res = update_in_db(instance, data)\n\n if res != 'updated':\n set_session_var('errors', str(res))\n return None\n else:\n set_session_var('success', res)\n\n if self.module_fn is not None:\n self.module_fn(instance, data)\n\n return instance", "def patch(self, request, pk=None): #pk of id of objects to be updated (DB)\n return Response({'method':'PATCH'})", "def update_item(self, id: str, user: User, **kwargs) -> None:", "def update_object(self, oid, name, url):\n r = self.request(\n 'put',\n safeformat('registry/objects/{:int}/', oid),\n json.dumps({\n 'description': {\n 'name': name,\n 'url': url\n }\n })\n )\n return self._extract_id_from_batch_response(r, 'oid')", "def put(self, id):\n data = request.json\n update_entry(id, data)\n return None, 204", "def updateItem(self, object):\n pass", "def update(self, request, pk=None):\n\n return Response({'http_method': 'PUT'})", "def update(self, request, pk=None):\n return Response({'http_method': 'PUT'})", "def update(self, request, pk=None):\n\n return Response({'http_method':'PUT'})", "def salesforce_update(self, obj_name, obj_id, **kwargs):\n self.builtin.log(\n \"Updating {} {} with values {}\".format(obj_name, obj_id, kwargs)\n )\n obj_class = getattr(self.cumulusci.sf, obj_name)\n return obj_class.update(obj_id, kwargs)", "def update_user(id):\n pass", "def partial_update(self, request, pk=None): #partial update a specific object\n return Response({'http_method': 'PATCH'})", "def update(cls, row_id, **kwargs):\n cls.delete(row_id)\n # obj = cls.query.filter_by(id=row_id).first()\n # for k, v in kwargs.items():\n # obj[k] = v\n # obj = cls.query.filter_by(id=row_id).update(kwargs)\n kwargs[\"id\"] = row_id\n obj = cls(**kwargs)\n #print(\"the type of updated object is\", type(obj))\n return commit(obj)", "def put(self, request, pk):\n return self.post(request, pk)", "def update(self, request, pk=None):\n\n return Response({'http_method': 'PUT'})", "def put(self, _id, _value):\n self.objects[_id] = _value", "def put(self, id):\r\n try:\r\n self.valid_args()\r\n existing = db.session.query(self.__class__).get(id)\r\n if existing is None:\r\n raise NotFound\r\n getattr(require, self.__class__.__name__.lower()).update(existing)\r\n data = json.loads(request.data)\r\n # may be missing the id as we allow partial updates\r\n data['id'] = id\r\n # Clean HATEOAS args\r\n data = self.hateoas.remove_links(data)\r\n inst = self.__class__(**data)\r\n db.session.merge(inst)\r\n db.session.commit()\r\n self._refresh_cache(inst)\r\n return Response(json.dumps(inst.dictize()), 200,\r\n mimetype='application/json')\r\n except IntegrityError:\r\n db.session.rollback()\r\n raise\r\n except Exception as e:\r\n return error.format_exception(\r\n e,\r\n target=self.__class__.__name__.lower(),\r\n action='PUT')", "def setObjectID(self, id):\n\n self.objectID = id[0]", "def update(self, identifier, data):\n self.client.request_with_method(Methods.UPDATE % (self.name, identifier,),\n data=data)", "def do_update(self, arg):\n args = arg.split()\n object_dict = storage.all()\n if len(args) == 0:\n print(\"** class name missing **\")\n return\n if args[0] in self.class_dict:\n if len(args) == 1:\n print(\"** instance id missing **\")\n return\n elif len(args) == 2:\n print(\"** attribute name missing **\")\n return\n elif len(args) == 3:\n print(\"** value missing **\")\n return\n else:\n print(\"** class doesn't exist **\")\n return\n\n for i in range(len(args)):\n if args[i].startswith('\"') and args[i].endswith('\"'):\n args[i] = args[i][1:-1]\n\n for full_key in object_dict.keys():\n key = full_key.split('.')\n key_id = key[1]\n if args[0] in self.class_dict:\n if args[1] == object_dict[full_key].id:\n setattr(object_dict[full_key], args[2], args[3])\n setattr(object_dict[full_key], \"updated_at\",\n datetime.now())\n storage.save()\n return\n else:\n print(\"** class doesn't exist **\")\n return\n print(\"** no instance found **\")" ]
[ "0.760733", "0.7322684", "0.7160693", "0.7101339", "0.697243", "0.6948714", "0.6921794", "0.6902203", "0.689081", "0.6834843", "0.6813453", "0.6782466", "0.67707884", "0.67625", "0.67575747", "0.6728648", "0.67064226", "0.66966736", "0.6653617", "0.6611896", "0.65948665", "0.65887684", "0.6544265", "0.65333104", "0.6520084", "0.6506676", "0.64985645", "0.64871466", "0.645112", "0.64238113" ]
0.7415862
1
Handle updating part of an object by its ID
def partial_update(self, request, pk=None): #partial update a specific object return Response({'http_method': 'PATCH'})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_item(id: str, obj: endpoint_model):\n # should this error if exists?\n if obj.id:\n if obj.id != id:\n raise HTTPException(status_code=400, detail=\"id in body does not match id in path\")\n else:\n obj.id = id\n new_obj = db.save(obj)\n return new_obj", "def update(self, request, pk=None): #update a specific object\n return Response({'http_method': 'PUT'})", "def updateOne(self,ident):\n \tLOGGER.info(\"lazily updating {}\".format(ident))\n \tself.idToUpdate=ident\n \tself.newState=''\n \tself.save()", "def update(self, request, pk):\n if pk is None:\n for item in request.data:\n # get object by its primary key\n obj = self._object_get(item[self.model._meta.pk.attname])\n self._object_update(obj, item)\n else:\n obj = self._object_get(pk)\n self._object_update(obj, request.data)\n return obj", "def update(self, id, obj):\n url = self._format_url(self.url + \"/{id}\", {\"id\": id})\n\n return self._make_request('put', url, data={self.singular: obj})", "def updateItem(self, object):\n pass", "def update_object(self, name: str) -> None:", "def put(self,id):\r\n data = request.json\r\n return update(id=id,data=data)", "def put(self, request, pk=None): #pk of id of objects to be updated (DB)\n return Response({'method':'PUT'})", "def partial_update(self,request,pk = None):\r\n\r\n return Response({'HTTP method':'PATCH'})", "def update_item(self, id: str, user: User, **kwargs) -> None:", "def put(self, request, pk):\n return self.update(request, pk)", "def update(self, id, id_col='name'):\n instance = self.get_one_instance(id_col, id)\n\n if type(instance) != self.Component:\n set_session_var('errors', str(instance))\n return None\n\n errors, data = self.format_and_control(request.form, obj=instance)\n\n if len(errors) > 0:\n set_session_var('errors', dict(errors))\n return None\n\n data = get_only_updated_values(instance, data)\n\n if len(data) == 0:\n return None\n\n res = update_in_db(instance, data)\n\n if res != 'updated':\n set_session_var('errors', str(res))\n return None\n else:\n set_session_var('success', res)\n\n if self.module_fn is not None:\n self.module_fn(instance, data)\n\n return instance", "def update_object(self, oid, name, url):\n r = self.request(\n 'put',\n safeformat('registry/objects/{:int}/', oid),\n json.dumps({\n 'description': {\n 'name': name,\n 'url': url\n }\n })\n )\n return self._extract_id_from_batch_response(r, 'oid')", "def setObjectID(self, id):\n\n self.objectID = id[0]", "def patch(self, request, pk=None): #pk of id of objects to be updated (DB)\n return Response({'method':'PATCH'})", "def _update_object(self, data_dict):\r\n pass", "def test_partially_update_device_by_id(self):\n pass", "def update(self, data, id_obj=None, query_data=None):\n if id_obj:\n return self.collection.update({'_id': id_obj}, {\"$set\": data})\n return self.collection.update(query_data, {\"$set\": data})", "def put(self, id):\n return update_msg(request.json, id)", "async def update_one(self, where, data):\n\n pass", "def partial_update(self, request, pk=None):\n return Response({'http_method':'PATCH'})", "def update(self, obj, data):\n self.get(obj[self.model.pk_field.name])\n self.validate_fields(data)\n\n fields = []\n values = []\n\n for k, v in data.iteritems():\n if k in self.model.get_fields_name():\n fields.append(k)\n values.append(v)\n\n conn = self.get_connector()\n cursor = conn.cursor()\n update = \" ,\".join([\"{0}='{1}'\".format(f, v) for f, v in zip(fields,\n values)])\n query = \"update {0} set {1} WHERE {2}={3}\".format(\n self.ressource_config[\"table\"],\n update,\n self.model.pk_field.name,\n obj[self.model.pk_field.name]\n )\n\n cursor.execute(query)\n conn.commit()\n conn.close()\n\n return self.get(obj[self.model.pk_field.name])", "def partial_update(self,request,pk= None):\n return Response({'http_method':'PATCH'})", "def update(self,request,pk = None):\n return Response({'http_method':'PUT'})", "def patch(self, request, pk):\n return self.partial_update(request, pk)", "def _update_internal(self, entity_id, data, commit=True):\n input_data = self.to_model(data)\n self.validate_present(input_data)\n if not input_data:\n raise UnprocessableEntity(\"Can not update using empty data.\")\n entity = db_session.query(self.model).get(entity_id)\n if not entity:\n raise NotFound(\"Could not find any entity with specified parameters.\")\n\n for k, v in input_data.items():\n try:\n setattr(entity, k, v)\n except ValueError as e:\n raise UnprocessableEntity(f\"Could not save value.\", fields=k, what=BAD_VALUE) from e\n\n if commit:\n db_session.commit()\n \n return self.to_obj(entity)", "def partial_update(self, request, pk=None):\n\n return Response({'http_method':'PATCH'})", "def edit_record(self, record):\r\n self.record.editObject(record, id=record['id'])", "def put(self, _id, _value):\n self.objects[_id] = _value" ]
[ "0.71984583", "0.6707685", "0.66999716", "0.66406435", "0.66380024", "0.6630572", "0.6587033", "0.6563462", "0.64720654", "0.6385685", "0.6380481", "0.63732487", "0.6364642", "0.6296849", "0.62867194", "0.6279398", "0.6251531", "0.6212349", "0.61951846", "0.61918664", "0.61887556", "0.61866164", "0.617279", "0.61721045", "0.61549336", "0.6151158", "0.60880804", "0.60844606", "0.6071005", "0.6060062" ]
0.67500085
1
Purge all completed exports from the backing IHS remote account
def exports(): from celery_queue.tasks import cleanup_remote_exports cleanup_remote_exports.run()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def purge(self):\n pass", "def cleanup(self):\n files = self.nlst()\n latest = self.latest_filename\n for filename in files:\n if filename != latest:\n result = self.delete(filename)\n logger.info(f\"Deleted old export from FTP: {result}\")", "async def afterHoursAutoPurge(self, ctx: Context):", "def clean_up_old_exports(self):\n threshold = datetime.datetime.utcnow() - datetime.timedelta(days=30)\n self.session.query(Export).filter(Export.started_at < threshold).delete()", "def sipserver_purge(self) -> None:", "def purge_all(self, ctx, limit: int = 100):\r\n if ctx.invoked_subcommand is None:\r\n date_limit = datetime.today() - timedelta(days=12)\r\n yield from ctx.message.channel.purge(after=date_limit, bulk=True)\r\n yield from ctx.author.send('Purge Complete')", "async def clear_all(self) -> None:", "def test_backup_purge(self):\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n old_backup_name = \"\"\n new_backup_name = \"\"\n backup_result = self.cluster.async_backup_cluster(backupset=self.backupset,\n objstore_provider=self.objstore_provider,\n resume=self.backupset.resume, purge=self.backupset.purge,\n no_progress_bar=self.no_progress_bar,\n cli_command_location=self.cli_command_location,\n cb_version=self.cb_version)\n self.sleep(10)\n conn = RemoteMachineShellConnection(self.backupset.cluster_host)\n conn.kill_erlang()\n output = backup_result.result(timeout=200)\n self.log.info(str(output))\n status, output, message = self.backup_list()\n if not status:\n self.fail(message)\n if output and output[0]:\n bk_info = json.loads(output[0])\n bk_info = bk_info[\"repos\"][0]\n else:\n return False, \"No output content\"\n if bk_info[\"backups\"]:\n for i in range(0, len(bk_info[\"backups\"])):\n old_backup_name = bk_info[\"backups\"][i][\"date\"]\n self.log.info(\"Backup name before purge: \" + old_backup_name)\n conn.start_couchbase()\n conn.disconnect()\n self.sleep(30)\n output, error = self.backup_cluster()\n if error or not self._check_output(\"Backup completed successfully\", output):\n self.fail(output)\n status, output, message = self.backup_list()\n if not status:\n self.fail(message)\n if output and output[0]:\n bk_info = json.loads(output[0])\n bk_info = bk_info[\"repos\"][0]\n else:\n return False, \"No output content\"\n if bk_info[\"backups\"]:\n for i in range(0, len(bk_info[\"backups\"])):\n new_backup_name = bk_info[\"backups\"][i][\"date\"]\n self.log.info(\"Backup name after purge: \" + new_backup_name)\n\n # Once the purge (and backup) have completed we shouldn't see any orphaned multipart uploads\n if self.objstore_provider:\n self.assertEqual(\n self.objstore_provider.num_multipart_uploads(), 0,\n \"Expected all multipart uploads to have been purged (all newly created ones should have also been completed)\"\n )\n\n self.assertNotEqual(old_backup_name, new_backup_name,\n \"Old backup name and new backup name are same when purge is used\")\n self.log.info(\"Old backup name and new backup name are not same when purge is used\")", "def acknowledge_downloaded_files():\n requests_to_delete = jobtracker.query(\"SELECT * FROM requests \" \\\n \"WHERE status='finished'\")\n if len(requests_to_delete) > 0:\n\n queries = []\n for request_to_delete in requests_to_delete:\n\n DownloaderSPAN512.delete_stagged_file(request_to_delete)\n\n dlm_cout.outs(\"Report download (%s) succeeded.\" % request_to_delete['guid'])\n queries.append(\"UPDATE requests \" \\\n \"SET status='cleaned_up', \" \\\n \"details='download complete', \" \\\n \"updated_at='%s' \" \\\n \"WHERE id=%d\" % \\\n (jobtracker.nowstr(), request_to_delete['id']))\n\n jobtracker.query(queries)\n else: pass", "def _garbage_collect_exports(self, export_dir_base: Text):\n if self._exports_to_keep is None:\n return\n\n def _export_version_parser(path):\n # create a simple parser that pulls the export_version from the directory.\n filename = os.path.basename(path.path)\n if not (len(filename) == 10 and filename.isdigit()):\n return None\n return path._replace(export_version=int(filename))\n\n # pylint: disable=protected-access\n keep_filter = gc._largest_export_versions(self._exports_to_keep)\n delete_filter = gc._negation(keep_filter)\n for p in delete_filter(\n gc._get_paths(export_dir_base, parser=_export_version_parser)):\n try:\n gfile.DeleteRecursively(p.path)\n except errors_impl.NotFoundError as e:\n tf_logging.warn('Can not delete %s recursively: %s', p.path, e)\n # pylint: enable=protected-access", "def _purge():\r\n _cache.clear()", "def purge(self, **options):\n pass", "def purge() -> None:\r\n _purge_func(False)", "def PurgeAll(self):\n\t\tself.acad.ActiveDocument.PurgeAll()", "def purge():\n all_hashes = read_all()\n used_hashes = read_used()\n\n for kind, hashes in used_hashes.items():\n to_remove = all_hashes[kind].difference(hashes)\n if kind == 'evs':\n delete_from_directory_by_hashes(EV_DIRECTORY, to_remove)\n elif kind == 'cache':\n delete_from_directory_by_hashes(CACHE_DIRECTORY, to_remove)\n elif kind == 'seeds':\n delete_from_directory_by_hashes(SEED_DIRECTORY, to_remove)\n\n reset_used()", "def cleanup(self):\n deletes = []\n for item in self._collect.find({'status': 'started'}, {'_id': True}):\n deletes.append(pymongo.DeleteOne(item))\n # Remove them\n if len(deletes):\n print(\"Delete\", self._collect.bulk_write(deletes).deleted_count)", "def clean_expired_task():\n day_ago = datetime.datetime.now(pytz.timezone(\"UTC\")) - datetime.timedelta(days=ASYNC_EXPORT_FILE_EXPIRED_DAYS)\n # 获取过期的内网下载文件\n expired_task_list = AsyncTask.objects.filter(created_at__lt=day_ago, is_clean=False)\n # nfs文件需要进行定期清理操作\n storage_type = FeatureToggleObject.toggle(FEATURE_ASYNC_EXPORT_COMMON).feature_config.get(\n FEATURE_ASYNC_EXPORT_STORAGE_TYPE\n )\n\n if storage_type or storage_type == RemoteStorageType.NFS.value:\n # 删除NFS文件\n for expired_task in expired_task_list:\n target_file_dir = os.path.join(settings.EXTRACT_SAAS_STORE_DIR, expired_task.file_name)\n if os.path.isfile(target_file_dir):\n os.remove(os.path.abspath(target_file_dir))\n expired_task.is_clean = True\n expired_task.save()", "def _purge(self):\n for _ in self.all():\n self.delete(_)", "def purging() -> bool:\r\n return _purge", "def cleanup_backups():\n try:\n yield\n finally:\n shutil.rmtree(\"tmp/backups\")", "def cleanup_on_disconnect(self, datapath):\n self.delete_all_flows(datapath)", "def cleanup():\n redis_client.flushall()", "def api_asset_cleanup():\n app.bank.clear()\n return \"\", 200", "def remove_all(self):\n if self._processed:\n res, messages = self._mailconn.search(None, 'ALL')\n if res == 'OK':\n for msg in messages[0].split():\n res, data = self._mailconn.store(msg.decode('utf-8'), '+FLAGS', '\\\\Deleted')\n print(res)", "def __del__(self):\n for req in self._outbox:\n req.Wait()", "def _unprovision_node(self, conn):\n conn.run(f\"rm -rf {EXPORTER_HOME}\")", "def purge(self):\n self._rpc(specification.Queue.Purge())", "def purge(self):\n self.remaining = 0", "def clear_client_outputs():\n directory = client_variables.output_zip_folder\n for name in glob.glob(directory + '\\\\*'):\n os.remove(name)", "def purge_entries_command():\n incident = demisto.args().get('id', get_investigation_id())\n cursor = COLLECTION.find({})\n deleted = 0\n # Iterate, collecting any name/value pairs associated with the incident\n for i in cursor:\n if incident in i:\n object_id = i.get('_id')\n COLLECTION.delete_one({'_id': object_id})\n deleted += 1\n if deleted == 1:\n return f'Incident \"{incident}\" key/value pairs purged - {str(deleted)} document/record deleted', {}, {}\n return f'Incident \"{incident}\" key/value pairs purged - {str(deleted)} documents/records deleted', {}, {}" ]
[ "0.6358549", "0.6349242", "0.6194633", "0.6167525", "0.61525655", "0.61206007", "0.6027554", "0.59524834", "0.59467685", "0.59411263", "0.5885341", "0.5773222", "0.5749375", "0.571782", "0.56598467", "0.5652387", "0.5631703", "0.56277174", "0.56239337", "0.5614715", "0.56123257", "0.5607701", "0.56016153", "0.55809295", "0.55805653", "0.5570102", "0.5569543", "0.55664146", "0.5561051", "0.55057096" ]
0.6384582
0
Moves the faller to the left Won't move left if there are jewels in the way check piece to left if it is zero, if it is then you can move left
def move_left(self): counter = 0 for y in range(1, self._col): for x in reversed(range(self._row)): if '[' in self._board[x][y] and self._board[x][y-1] == ' ': counter += 1 elif '|' in self._board[x][y] and self._board[x][y-1] == ' ': counter += 1 if counter == 3: for y in range(1, self._col): for x in reversed(range(self._row)): if '[' in self._board[x][y] and self._board[x][y - 1] == ' ': self._board[x][y-1] = self._board[x][y] self._board[x][y] = ' ' elif '|' in self._board[x][y] and self._board[x][y-1] == ' ': self._board[x][y-1] = self._board[x][y] self._board[x][y] = ' ' return self._board
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def moveLeft(board):\n\t# initial shift\n\t#shiftLeft(board)\n\trykTilVenstre(board)\n\t# merge cells\n\tfor i in range(4):\n\t\tfor j in range(3):\n\n\t\t\tif board[i][j] == board[i][j + 1] and board[i][j] != 0:\n\t\t\t\tboard[i][j] *= 2\n\t\t\t\tboard[i][j + 1] = 0\n\t\t\t\tj = 0\n\n\t# final shift\n\t#shiftLeft(board)\n\trykTilVenstre(board)\n\treturn board", "def move_left():\n return __maze.move_left()", "def move_left(self):\r\n if self.rect.left > 0:\r\n self.rect.left -= self.speed", "def move_left(self):\n\n if self.xcor() < -230:\n self.setx(-255)\n else:\n new_x = self.xcor() - 40\n self.setx(new_x)", "def move_left(self):\n if self.change_valid(dx=-1):\n self.x -= 1", "def moveLeft(self):\n if self._position.x != 0:\n self._position.x -=1\n return True\n return False", "def move_left(self):\n self.rect.x -= 5 # Moves to the left by 5\n\n # If the player reaches the edge of the screen, they can't go further\n if self.rect.x <= -50:\n self.rect.x = -50", "def move_left(self):\n kya = self.board.board[self.player.y][self.player.x-1]\n if self.player.x > 0 and kya != 'X' and kya != 'G':\n self.board.board[self.player.y][self.player.x] = '.'\n self.coin_taken(-1, 0)\n self.board.board[self.player.y][self.player.x-1] = 'P'\n self.player.x -= 1\n else:\n print \"Can't move left\"\n self.dont_move_ghosts = 1", "def push_left (grid):\r\n \r\n for i in range(4):\r\n row = grid[i]\r\n \r\n if row == [0, 0 ,0 ,0]:\r\n continue\r\n for k in range(4):\r\n for j in range(1, 4):\r\n if row[j-1] == 0:\r\n row[j-1] = row[j]\r\n row[j] = 0\r\n for l in range(1, 4):\r\n if row[l-1] == row[l]:\r\n row[l-1] = row[l]*2\r\n row[l] = 0\r\n for j in range(1, 4):\r\n if row[j-1] == 0:\r\n row[j-1] = row[j]\r\n row[j] = 0 \r\n grid[i] = row\r\n return grid", "def onMoveLeft(self):\n self.mainGrid.moveLeft()", "def shift_board_left(self) -> bool:\n for r in range(self.board_size):\n if self.board[r][0] is not None:\n return False\n\n for c in range(0, self.board_size - 1):\n for r in range(self.board_size):\n self.board[r][c] = self.board[r][c + 1]\n\n for r in range(self.board_size):\n self.board[r][self.board_size - 1] = None\n\n return True", "def swipeLeft (self) :\n rotated = Grid(np.rot90(np.rot90(np.rot90(self.grid))))\n self.grid = np.rot90(rotated.swipeBase())", "def move_left (f):\r\n tmp_moved = f[:]\r\n for k in range(len(f)):\r\n for l in range(len(f)):\r\n if f[k][l] == 'x':\r\n tmp_moved[k][l] = '_'\r\n if l == 0:\r\n tmp_moved[k][len(f)-1] = 'x'\r\n else:\r\n tmp_moved[k][l-1] = 'x'\r\n break\r\n return tmp_moved", "def move_left(self, current_state):\n # Find the index of the '0' tile\n index = current_state.index('0')\n tile_that_was_swapped = '0'\n\n # If the current state can't perform the 'move left' action, then exit the function\n if not self.can_move_left(index):\n return 0, tile_that_was_swapped\n\n # Else, we can move the '0' tile left by one and swap it with the tile that was there\n else:\n # To find the index of the tile 'directly to the left' of the '0', we can simply subtract the '0' index by 1\n index_to_swap = index - 1\n\n # Swap the '0' tile with the other index\n tile_that_was_swapped = current_state[index_to_swap]\n current_state[index] = current_state[index_to_swap]\n current_state[index_to_swap] = '0'\n # end: if-else\n\n return self.cost_of_move_left, tile_that_was_swapped", "def moveLeft(self,board_object):\n\n\t\tif checkClash(board_object,self,self.x,self.y-1) == 0:\n\t\t\tif(self.y-1>board_object.start):\n\t\t\t\toverlayMatrix(board_object,self,self.x,self.y-1)\n\t\t\t\tself.setPos(self.x,self.y - 1)\n\t\telse:\n\t\t\treturn 1", "def push_left (grid):\r\n for a in range(4): \r\n for i in range(4): \r\n for j in range(3,0,-1): \r\n if grid[i][j-1]==0: \r\n grid[i][j-1]=grid[i][j] \r\n grid[i][j]=0\r\n \r\n for i in range(4): \r\n for j in range(3): \r\n if grid[i][j]==grid[i][j+1]: \r\n grid[i][j]=(grid[i][j])*2\r\n grid[i][j+1]=0 \r\n \r\n for a in range(4): \r\n for i in range(4): \r\n for j in range(3,0,-1): \r\n if grid[i][j-1]==0: \r\n grid[i][j-1]=grid[i][j] \r\n grid[i][j]=0", "def move_left(board):\n row_size = get_row_size(board)\n index_of_emtpy_tile = board.index(0)\n emtpy_tile_is_on_rightmost_side = index_of_emtpy_tile % row_size == row_size - 1\n result = copy.deepcopy(board)\n\n if not emtpy_tile_is_on_rightmost_side:\n right_tile_pos = index_of_emtpy_tile + 1\n right_tile = board[right_tile_pos]\n result[index_of_emtpy_tile] = right_tile\n result[right_tile_pos] = 0\n possible = True\n else:\n possible = False\n\n return possible, result", "def move_left(self, num):\n self.left_postion = num", "def move_shape_left(self):\n if self.falling_shape:\n self.falling_shape.shift_shape_left_by_one_column()\n if self.shape_cannot_be_placed(self.falling_shape):\n self.falling_shape.shift_shape_right_by_one_column()\n return False\n return True", "def MoveLeftStep(self):\n if self.facing == 0:\n self.facing = 3\n self.x -= self.stepLeft\n elif self.facing == 1:\n self.facing = 0\n self.y -= self.stepUp\n elif self.facing == 2:\n self.facing = 1\n self.x += self.stepRight\n elif self.facing == 3:\n self.facing = 2\n self.y += self.stepDown", "def one_step_left(self):\n if (self.column-1 <0):\n return False\n elif (self.battery == 0):\n return False\n elif (self.maze[self.row][self.column-1] == False):\n return False\n else:\n self.column-=1\n self.battery -= 1\n return True", "def _move_left(self):\n self.x -= self.settings.mario_speed\n if self.settings.direction == 1:\n self.image = pygame.transform.flip(self.image, True, False)\n self.settings.direction = -1", "def move_left(self):\n\t\tself.set_x_vector(-1 * constants.DONKEY_SPEED)", "def left(self):\n self.move(-1, 0)", "def move_left(self,distance):\n self.turn_left()\n self.move_forward(distance)\n # self.log_arr.append(\"left\")", "def repair_column():\n turn_left()\n while front_is_clear():\n if no_beepers_present():\n put_beeper()\n move()\n if no_beepers_present():\n put_beeper()\n turn_around()\n while front_is_clear():\n move()\n turn_left()", "def turn_left(self):\n self.facing_direction -= self.config\n if self.facing_direction < 0:\n self.facing_direction += 8\n self.x, self.y = self.compute_positions()", "def moveLeft(board, cavityPos):\n board[cavityPos] = 0\n board[cavityPos-1] = 0\n board[cavityPos-2] = 1", "def move_left(self):\n self._time += 1\n if self._position > 0:\n self._position -= 1\n return True\n else:\n return False", "def left(self):\n if self.pos > 0:\n self.pos -= 1" ]
[ "0.72968435", "0.7108253", "0.7007417", "0.697426", "0.6911207", "0.69036156", "0.68996483", "0.6873529", "0.6853759", "0.68486154", "0.6829842", "0.67925954", "0.6785727", "0.6766036", "0.6765819", "0.6754254", "0.675414", "0.67041194", "0.66865104", "0.66781795", "0.6668006", "0.6662444", "0.66573507", "0.6645321", "0.66397685", "0.66226524", "0.65970826", "0.65950835", "0.65901375", "0.6585389" ]
0.7336359
0
Loading directory with no courses gives only an empty "lessons" course
def test_no_courses(): model = models.Root() model.load_local_courses(fixture_path / 'empty-lessons-dir') assert sorted(model.courses) == ['lessons'] assert not model.courses['lessons'].sessions assert not model.courses['lessons'].lessons
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_course_index_view_with_no_courses(self):\r\n # Create a course so there is something to view\r\n resp = self.client.get_html('/course/')\r\n self.assertContains(\r\n resp,\r\n '<h1 class=\"page-header\">My Courses</h1>',\r\n status_code=200,\r\n html=True\r\n )\r\n _test_no_locations(self, resp)", "def test_xml_get_courses(self):\r\n self.initdb('direct')\r\n courses = self.store.modulestores['xml'].get_courses()\r\n self.assertEqual(len(courses), 2)\r\n course_ids = [course.id for course in courses]\r\n self.assertIn(self.course_locations[self.XML_COURSEID1].course_key, course_ids)\r\n self.assertIn(self.course_locations[self.XML_COURSEID2].course_key, course_ids)\r\n # this course is in the directory from which we loaded courses but not in the map\r\n self.assertNotIn(\"edX/toy/TT_2012_Fall\", course_ids)", "def remove_empty_courses(self):\n pass", "def load_courses(self):\r\n store = modulestore()\r\n\r\n # Add a course with a unicode name, if the modulestore\r\n # supports adding modules.\r\n if hasattr(store, 'create_xmodule'):\r\n CourseFactory.create(org=u'ëḋẌ',\r\n course=u'śíḿṕĺé',\r\n display_name=u'2012_Fáĺĺ',\r\n modulestore=store)\r\n\r\n courses = store.get_courses()\r\n # NOTE: if xml store owns these, it won't import them into mongo\r\n if SlashSeparatedCourseKey.from_deprecated_string(TEST_COURSE_ID) not in [c.id for c in courses]:\r\n import_from_xml(store, DATA_DIR, ['toy', 'simple'])\r\n\r\n return [course.id for course in store.get_courses()]", "def load_student_full_courseload():\n return None", "def get_courses(self) -> List[Course]:\n courses: List[Course] = []\n\n for root, dirs, filenames in os.walk(self.folder, followlinks=True):\n # https://stackoverflow.com/questions/13454164/os-walk-without-hidden-folders\n # ---\n # CAREFUL: dirs[:] is necessary, since it overwrites the contents, not just\n # the reference\n filenames = [f for f in filenames if not f[0] == \".\"]\n dirs[:] = [d for d in dirs if not d[0] == \".\"]\n\n for filename in filter(lambda f: f == \"info.yaml\", filenames):\n courses.append(Course.from_file(os.path.join(root, filename)))\n\n return courses", "def test_get_courses_for_wiki(self):\r\n store = XMLModuleStore(DATA_DIR, course_dirs=['toy', 'simple'])\r\n for course in store.get_courses():\r\n course_locations = store.get_courses_for_wiki(course.wiki_slug)\r\n self.assertEqual(len(course_locations), 1)\r\n self.assertIn(course.location, course_locations)\r\n\r\n course_locations = store.get_courses_for_wiki('no_such_wiki')\r\n self.assertEqual(len(course_locations), 0)\r\n\r\n # now set toy course to share the wiki with simple course\r\n toy_course = store.get_course(SlashSeparatedCourseKey('edX', 'toy', '2012_Fall'))\r\n toy_course.wiki_slug = 'simple'\r\n\r\n course_locations = store.get_courses_for_wiki('toy')\r\n self.assertEqual(len(course_locations), 0)\r\n\r\n course_locations = store.get_courses_for_wiki('simple')\r\n self.assertEqual(len(course_locations), 2)\r\n for course_number in ['toy', 'simple']:\r\n self.assertIn(Location('edX', course_number, '2012_Fall', 'course', '2012_Fall'), course_locations)", "def get_course(self, name):\r\n print \"Importing {0}\".format(name)\r\n\r\n modulestore = XMLModuleStore(DATA_DIR, course_dirs=[name])\r\n courses = modulestore.get_courses()\r\n self.modulestore = modulestore\r\n self.assertEquals(len(courses), 1)\r\n return courses[0]", "def test_has_course(self):\r\n check_has_course_method(\r\n XMLModuleStore(DATA_DIR, course_dirs=['toy', 'simple']),\r\n SlashSeparatedCourseKey('edX', 'toy', '2012_Fall'),\r\n locator_key_fields=SlashSeparatedCourseKey.KEY_FIELDS\r\n )", "def get_course(self, name):\r\n print(\"Importing {0}\".format(name))\r\n\r\n modulestore = XMLModuleStore(\r\n DATA_DIR,\r\n course_dirs=[name],\r\n xblock_mixins=(InheritanceMixin,),\r\n xblock_select=only_xmodules,\r\n )\r\n courses = modulestore.get_courses()\r\n self.assertEquals(len(courses), 1)\r\n return courses[0]", "def get_courses_for_wiki(self, wiki_slug):\r\n courses = []\r\n return courses", "def load_course(self, course_dir, course_ids, tracker):\r\n log.debug('========> Starting course import from {0}'.format(course_dir))\r\n\r\n with open(self.data_dir / course_dir / \"course.xml\") as course_file:\r\n\r\n # VS[compat]\r\n # TODO (cpennington): Remove this once all fall 2012 courses have\r\n # been imported into the cms from xml\r\n course_file = StringIO(clean_out_mako_templating(course_file.read()))\r\n\r\n course_data = etree.parse(course_file, parser=edx_xml_parser).getroot()\r\n\r\n org = course_data.get('org')\r\n\r\n if org is None:\r\n msg = (\"No 'org' attribute set for course in {dir}. \"\r\n \"Using default 'edx'\".format(dir=course_dir))\r\n log.warning(msg)\r\n tracker(msg)\r\n org = 'edx'\r\n\r\n course = course_data.get('course')\r\n\r\n if course is None:\r\n msg = (\"No 'course' attribute set for course in {dir}.\"\r\n \" Using default '{default}'\".format(dir=course_dir,\r\n default=course_dir\r\n )\r\n )\r\n log.warning(msg)\r\n tracker(msg)\r\n course = course_dir\r\n\r\n url_name = course_data.get('url_name', course_data.get('slug'))\r\n policy_dir = None\r\n if url_name:\r\n policy_dir = self.data_dir / course_dir / 'policies' / url_name\r\n policy_path = policy_dir / 'policy.json'\r\n\r\n policy = self.load_policy(policy_path, tracker)\r\n\r\n # VS[compat]: remove once courses use the policy dirs.\r\n if policy == {}:\r\n old_policy_path = self.data_dir / course_dir / 'policies' / '{0}.json'.format(url_name)\r\n policy = self.load_policy(old_policy_path, tracker)\r\n else:\r\n policy = {}\r\n # VS[compat] : 'name' is deprecated, but support it for now...\r\n if course_data.get('name'):\r\n url_name = Location.clean(course_data.get('name'))\r\n tracker(\"'name' is deprecated for module xml. Please use \"\r\n \"display_name and url_name.\")\r\n else:\r\n raise ValueError(\"Can't load a course without a 'url_name' \"\r\n \"(or 'name') set. Set url_name.\")\r\n\r\n course_id = SlashSeparatedCourseKey(org, course, url_name)\r\n if course_ids is not None and course_id not in course_ids:\r\n return None\r\n\r\n def get_policy(usage_id):\r\n \"\"\"\r\n Return the policy dictionary to be applied to the specified XBlock usage\r\n \"\"\"\r\n return policy.get(policy_key(usage_id), {})\r\n\r\n services = {}\r\n if self.i18n_service:\r\n services['i18n'] = self.i18n_service\r\n\r\n system = ImportSystem(\r\n xmlstore=self,\r\n course_id=course_id,\r\n course_dir=course_dir,\r\n error_tracker=tracker,\r\n parent_tracker=self.parent_trackers[course_id],\r\n load_error_modules=self.load_error_modules,\r\n get_policy=get_policy,\r\n mixins=self.xblock_mixins,\r\n default_class=self.default_class,\r\n select=self.xblock_select,\r\n field_data=self.field_data,\r\n services=services,\r\n )\r\n\r\n course_descriptor = system.process_xml(etree.tostring(course_data, encoding='unicode'))\r\n\r\n # If we fail to load the course, then skip the rest of the loading steps\r\n if isinstance(course_descriptor, ErrorDescriptor):\r\n return course_descriptor\r\n\r\n # NOTE: The descriptors end up loading somewhat bottom up, which\r\n # breaks metadata inheritance via get_children(). Instead\r\n # (actually, in addition to, for now), we do a final inheritance pass\r\n # after we have the course descriptor.\r\n compute_inherited_metadata(course_descriptor)\r\n\r\n # now import all pieces of course_info which is expected to be stored\r\n # in <content_dir>/info or <content_dir>/info/<url_name>\r\n self.load_extra_content(system, course_descriptor, 'course_info', self.data_dir / course_dir / 'info', course_dir, url_name)\r\n\r\n # now import all static tabs which are expected to be stored in\r\n # in <content_dir>/tabs or <content_dir>/tabs/<url_name>\r\n self.load_extra_content(system, course_descriptor, 'static_tab', self.data_dir / course_dir / 'tabs', course_dir, url_name)\r\n\r\n self.load_extra_content(system, course_descriptor, 'custom_tag_template', self.data_dir / course_dir / 'custom_tags', course_dir, url_name)\r\n\r\n self.load_extra_content(system, course_descriptor, 'about', self.data_dir / course_dir / 'about', course_dir, url_name)\r\n\r\n log.debug('========> Done with course import from {0}'.format(course_dir))\r\n return course_descriptor", "def test_get_courses_for_wiki(self, default_ms):\r\n self.initdb(default_ms)\r\n course_locations = self.store.get_courses_for_wiki('toy')\r\n self.assertEqual(len(course_locations), 1)\r\n self.assertIn(self.course_locations[self.XML_COURSEID1], course_locations)\r\n\r\n course_locations = self.store.get_courses_for_wiki('simple')\r\n self.assertEqual(len(course_locations), 1)\r\n self.assertIn(self.course_locations[self.XML_COURSEID2], course_locations)\r\n\r\n self.assertEqual(len(self.store.get_courses_for_wiki('edX.simple.2012_Fall')), 0)\r\n self.assertEqual(len(self.store.get_courses_for_wiki('no_such_wiki')), 0)", "def load_courses(cls, config: DefaultConfig):\n lecture_halls = cls.load_course_resource(config.CoursePaths.LECTURER_HALLS.value, LectureHall.create)\n course_units = cls.load_course_resource(config.CoursePaths.COURSE_UNITS.value, CourseUnit.create(lecture_halls))\n lecturers = cls.load_course_resource(config.CoursePaths.LECTURER.value, Lecturer.create(course_units))\n teaching_assistants = cls.load_course_resource(\n config.CoursePaths.TEACHING_ASSISTANTS.value, Lecturer.create(course_units))\n\n return cls(\n course_units=course_units,\n lecturers=lecturers,\n teaching_assistants=teaching_assistants\n )", "def test_no_such_course(self):\r\n for course_key in [\r\n\r\n SlashSeparatedCourseKey(*fields)\r\n for fields in [\r\n ['edX', 'simple', 'no_such_course'], ['edX', 'no_such_course', '2012_Fall'],\r\n ['NO_SUCH_COURSE', 'Test_iMport_courSe', '2012_Fall'],\r\n ]\r\n ]:\r\n course = self.store.get_course(course_key)\r\n assert_is_none(course)\r\n assert_false(self.store.has_course(course_key))\r\n mix_cased = SlashSeparatedCourseKey(\r\n course_key.org.lower(), course_key.course.upper(), course_key.run.upper()\r\n )\r\n assert_false(self.store.has_course(mix_cased))\r\n assert_false(self.store.has_course(mix_cased, ignore_case=True))", "def test_get_course_list_with_invalid_course_location(self):\r\n request = self.factory.get('/course')\r\n request.user = self.user\r\n\r\n course_key = SlashSeparatedCourseKey('Org', 'Course', 'Run')\r\n self._create_course_with_access_groups(course_key, self.user)\r\n\r\n # get courses through iterating all courses\r\n courses_list = _accessible_courses_list(request)\r\n self.assertEqual(len(courses_list), 1)\r\n\r\n # get courses by reversing group name formats\r\n courses_list_by_groups = _accessible_courses_list_from_groups(request)\r\n self.assertEqual(len(courses_list_by_groups), 1)\r\n # check both course lists have same courses\r\n self.assertEqual(courses_list, courses_list_by_groups)\r\n\r\n # now delete this course and re-add user to instructor group of this course\r\n delete_course_and_groups(course_key, commit=True)\r\n\r\n CourseInstructorRole(course_key).add_users(self.user)\r\n\r\n # test that get courses through iterating all courses now returns no course\r\n courses_list = _accessible_courses_list(request)\r\n self.assertEqual(len(courses_list), 0)\r\n\r\n # now test that get courses by reversing group name formats gives 'ItemNotFoundError'\r\n with self.assertRaises(ItemNotFoundError):\r\n _accessible_courses_list_from_groups(request)", "def test_dashboard_no_courses(self):\r\n self.auth_page.visit()\r\n self.dashboard_page.visit()", "def test_course_index_view_with_course(self):\r\n CourseFactory.create(display_name='Robot Super Educational Course')\r\n resp = self.client.get_html('/course/')\r\n self.assertContains(\r\n resp,\r\n '<h3 class=\"course-title\">Robot Super Educational Course</h3>',\r\n status_code=200,\r\n html=True\r\n )\r\n _test_no_locations(self, resp)", "def _accessible_courses_list(request):\r\n courses = modulestore('direct').get_courses()\r\n\r\n # filter out courses that we don't have access to\r\n def course_filter(course):\r\n \"\"\"\r\n Get courses to which this user has access\r\n \"\"\"\r\n if GlobalStaff().has_user(request.user):\r\n return course.location.course != 'templates'\r\n\r\n return (has_course_access(request.user, course.id)\r\n # pylint: disable=fixme\r\n # TODO remove this condition when templates purged from db\r\n and course.location.course != 'templates'\r\n )\r\n courses = filter(course_filter, courses)\r\n return courses", "def test_get_courses_for_wiki(self):\r\n for course_number in self.courses:\r\n course_locations = self.store.get_courses_for_wiki(course_number)\r\n assert_equals(len(course_locations), 1)\r\n assert_equals(Location('edX', course_number, '2012_Fall', 'course', '2012_Fall'), course_locations[0])\r\n\r\n course_locations = self.store.get_courses_for_wiki('no_such_wiki')\r\n assert_equals(len(course_locations), 0)\r\n\r\n # set toy course to share the wiki with simple course\r\n toy_course = self.store.get_course(SlashSeparatedCourseKey('edX', 'toy', '2012_Fall'))\r\n toy_course.wiki_slug = 'simple'\r\n self.store.update_item(toy_course)\r\n\r\n # now toy_course should not be retrievable with old wiki_slug\r\n course_locations = self.store.get_courses_for_wiki('toy')\r\n assert_equals(len(course_locations), 0)\r\n\r\n # but there should be two courses with wiki_slug 'simple'\r\n course_locations = self.store.get_courses_for_wiki('simple')\r\n assert_equals(len(course_locations), 2)\r\n for course_number in ['toy', 'simple']:\r\n assert_in(Location('edX', course_number, '2012_Fall', 'course', '2012_Fall'), course_locations)\r\n\r\n # configure simple course to use unique wiki_slug.\r\n simple_course = self.store.get_course(SlashSeparatedCourseKey('edX', 'simple', '2012_Fall'))\r\n simple_course.wiki_slug = 'edX.simple.2012_Fall'\r\n self.store.update_item(simple_course)\r\n # it should be retrievable with its new wiki_slug\r\n course_locations = self.store.get_courses_for_wiki('edX.simple.2012_Fall')\r\n assert_equals(len(course_locations), 1)\r\n assert_in(Location('edX', 'simple', '2012_Fall', 'course', '2012_Fall'), course_locations)", "def test_no_course_id(self):\n run_nbgrader([\"quickstart\"], retcode=1)", "def extract_courses():\n if settings.XPRO_COURSES_API_URL:\n return requests.get(settings.XPRO_COURSES_API_URL, timeout=20).json()\n return []", "def course_pages(self):\n return None", "def course_pages(self):\n return None", "def test_modes_for_course_empty(self):\r\n # shouldn't be able to find a corresponding course\r\n modes = CourseMode.modes_for_course(self.course_key)\r\n self.assertEqual([CourseMode.DEFAULT_MODE], modes)", "def courses(request):\r\n courses = get_courses(request.user, request.META.get('HTTP_HOST'))\r\n courses = sort_by_announcement(courses)\r\n\r\n return render_to_response(\"courseware/courses.html\", {'courses': courses})", "def available_courses(self):\r\n def _get_course_name(el):\r\n # The first component in the link text is the course number\r\n _, course_name = el.text.split(' ', 1)\r\n return course_name\r\n\r\n return self.q(css='section.info > hgroup > h3 > a').map(_get_course_name).results", "def test_quickstart_overwrite_course_folder_if_structure_not_present(self):\n\n run_nbgrader([\"quickstart\", \"example_without_folder_and_config_file\"])\n\n # it should fail if it already exists\n run_nbgrader([\"quickstart\", \"example_without_folder_and_config_file\"], retcode=1)\n\n # should succeed if both source folder and config file are not present.\n shutil.rmtree(os.path.join(\"example_without_folder_and_config_file\", \"source\"))\n os.remove(os.path.join(\"example_without_folder_and_config_file\", \"nbgrader_config.py\"))\n\n run_nbgrader([\"quickstart\", \"example_without_folder_and_config_file\"])\n assert os.path.exists(os.path.join(\"example_without_folder_and_config_file\", \"nbgrader_config.py\"))\n assert os.path.exists(os.path.join(\"example_without_folder_and_config_file\", \"source\"))\n\n # nbgrader validate should work\n os.chdir(\"example_without_folder_and_config_file\")\n for nb in os.listdir(os.path.join(\"source\", \"ps1\")):\n if not nb.endswith(\".ipynb\"):\n continue\n output = run_nbgrader([\"validate\", os.path.join(\"source\", \"ps1\", nb)], stdout=True)\n assert output.strip() == \"Success! Your notebook passes all the tests.\"\n\n # nbgrader generate_assignment should work\n run_nbgrader([\"generate_assignment\", \"ps1\"])", "def test_get_course_list(self):\r\n request = self.factory.get('/course/')\r\n request.user = self.user\r\n\r\n course_location = SlashSeparatedCourseKey('Org1', 'Course1', 'Run1')\r\n self._create_course_with_access_groups(course_location, self.user)\r\n\r\n # get courses through iterating all courses\r\n courses_list = _accessible_courses_list(request)\r\n self.assertEqual(len(courses_list), 1)\r\n\r\n # get courses by reversing group name formats\r\n courses_list_by_groups = _accessible_courses_list_from_groups(request)\r\n self.assertEqual(len(courses_list_by_groups), 1)\r\n # check both course lists have same courses\r\n self.assertEqual(courses_list, courses_list_by_groups)", "def get_courses_for_wiki(self, wiki_slug):\r\n courses = []\r\n for modulestore in self.modulestores.values():\r\n courses.extend(modulestore.get_courses_for_wiki(wiki_slug))\r\n return courses" ]
[ "0.6494664", "0.64453155", "0.6420257", "0.64079463", "0.63854814", "0.6376241", "0.6313256", "0.6268793", "0.6231201", "0.6211336", "0.6141695", "0.61415595", "0.60863847", "0.6081872", "0.60563177", "0.5995458", "0.5963661", "0.5940306", "0.5938993", "0.5885596", "0.5870694", "0.58323413", "0.5794421", "0.5794421", "0.57803804", "0.5778725", "0.577287", "0.5762272", "0.5738951", "0.5737463" ]
0.7734044
0
Test all methods inside the Pizza class.
def test_pizza_class(): # __init__() small = MenuItem("Small", "Pizza size", False, 0.0, 1) medium = MenuItem("Medium", "Pizza size", False, 4.0, 1) topping1 = MenuItem("Extra cheese", "Topping", False, 2.0, 1) topping2 = MenuItem("Special sauce", "Topping", False, 3.0, 1) pizza_menu_item1 = MenuItem("Pepperoni", "Pizzas", True, 10.0, 1) pizza_menu_item2 = MenuItem("Meatball", "Pizzas", True, 12.0, 1) pizza1 = Pizza(pizza_menu_item1, small) pizza2 = Pizza(pizza_menu_item2, small) pizza3 = Pizza(pizza_menu_item1, medium) # __eq__() assert pizza1 != pizza3 assert pizza1 != pizza2 pizza3.set_size(small) assert pizza1 == pizza3 # Getter methods except get_price() assert pizza1.get_toppings() == [] assert pizza1.get_size() == small assert pizza1.get_attributes() == [AMOUNT, TOPPINGS, SIZE_UPGRADE] # Setter methods pizza1.set_attribute(TOPPINGS, {topping1: 2}) assert pizza1.get_toppings()[0].get_amount() == 2 pizza1.set_attribute(AMOUNT, 2) assert pizza1.get_amount() == 2 pizza1.set_attribute(SIZE_UPGRADE, small) assert pizza1.get_size() == small pizza1.set_size(medium) assert pizza1.get_size() == medium pizza1.set_topping(topping1, 3) assert pizza1.get_toppings()[0].get_amount() == 3 pizza1.set_topping(topping2, 1) assert pizza1.get_toppings()[1].get_amount() == 1 pizza1.set_amount(1) # get_price() assert pizza1.get_price() == 23 # __str__() topping1_str = "\t\t3 Extra cheese\n" topping2_str = "\t\t1 Special sauce\n" expected_str = "($23.00) 1 Medium Pepperoni pizza(s) with the " \ "following toppings:\n" + topping1_str + topping2_str assert pizza1.__str__() == expected_str # _check_toppings() pizza3.set_topping(topping1, 3) pizza3.set_topping(topping2, 1) assert pizza1._check_toppings(pizza3) pizza3.set_topping(topping1, 0) assert not pizza1._check_toppings(pizza3) pizza3.set_topping(topping1, 1) assert not pizza1._check_toppings(pizza3) pizza3.set_topping(topping1, 0) pizza1.set_topping(topping2, 0) assert not pizza1._check_toppings(pizza3)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_food(self):\n pass", "def test_required_methods(self):", "def test_poets_get(self):\n pass", "def test_post_foods(self):\n pass", "def test_get_foods(self):\n pass", "def test_method(self):", "def _test(self):", "def _test(self):", "def _test(self):", "def _test(self):", "def _test(self):", "def tests():", "def test(self):", "def test(self):", "def test(self):\n pass", "def test_theft_and_stealing(self):", "def _test(self):\n pass", "def _test(self):\n pass", "def _test(self):\n pass", "def test_create_pizza(self):\n url = reverse('pizzas-list')\n data = {'name': 'Quattro Formaggio'}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(Pizza.objects.count(), 1)\n self.assertEqual(Pizza.objects.get().name, 'Quattro Formaggio')", "def test_basic_execution(self):", "def test_something():", "def test_generate_all_testing(self):\n pass", "def test_post_foods_list(self):\n pass", "def test_setters(self, name, num_petals, price):\n flower = chap2.Flower('Iris', 8, 3.27)\n with pytest.raises(AssertionError):\n flower.set_name(name)\n with pytest.raises(AssertionError):\n flower.set_num_petals(num_petals)\n with pytest.raises(AssertionError):\n flower.set_price(price)", "def test_all():\n test_get_to()\n test_error_type()\n test_exchange()\n print(\"All tests passed.\")", "def test_order_and_delivery_class():\n # Test Order methods first\n # __init__()\n order = Order(1)\n cheese = MenuItem(\"cheese\", \"Pizzas\", True, 10.00, 1)\n fanta = MenuItem(\"fanta\", \"Drinks\", True, 1.00, 1)\n fries = MenuItem(\"fries\", \"Sides\", True, 5.00, 1)\n extra_mushroom = MenuItem(\"extra mushrooms\", \"toppings\", False, 1.00, 1)\n medium = MenuItem(\"medium\", \"size\", False, 5.00, 1)\n\n pizza = Pizza(cheese, medium)\n pizza.set_topping(extra_mushroom, 2)\n drink = Drink(fanta, medium)\n side = Side(fries)\n\n # Getter methods except price\n assert order.get_cart() == []\n assert order.get_order_number() == 1\n\n # add_to_cart(), remove_from_cart(), and set_item()\n order.add_to_cart(pizza)\n order.add_to_cart(drink)\n order.add_to_cart(side)\n assert len(order.get_cart()) == 3\n assert order.remove_from_cart(1)\n assert not order.remove_from_cart(3242)\n assert len(order.get_cart()) == 2\n order.add_to_cart(pizza)\n order.add_to_cart(pizza)\n assert order.get_cart()[2].get_amount() == 2\n\n wrong_input = {\"position\": 432}\n input_dict = {\"position\": 1, \"attributes\": {ICE: False}}\n assert not order.set_item(wrong_input)\n assert order.set_item(input_dict)\n assert not drink.get_ice()\n\n # get_total_price()\n assert order.get_total_price() == pizza.get_price() + drink.get_price() + \\\n side.get_price()\n\n # _get_equivalent_item()\n none_item = MenuItem(\"none\", \"none\", True, 234.00, 32)\n assert not order._get_equivalent_item(none_item)\n\n # is_valid_position\n assert order.is_valid_position(1)\n assert not order.is_valid_position(34523)\n\n # __str__()\n expected_str = \"Your order number is 1.\\n\" + ORDER_INTRODUCTION_MSG + \\\n \"\\t\" + drink.__str__() + \"\\t\" + side.__str__() + \"\\t\" + \\\n pizza.__str__() + \\\n \"\\nThe total price is ${:.2f}.\".format(\n order.get_total_price())\n assert expected_str == order.__str__()\n\n # Test Delivery methods\n # __init__()\n delivery = Delivery(order, \"pizza place\")\n\n # deliver() and set_address()\n assert not delivery.deliver()\n delivery.set_address(\"42 Corniche St.\")\n assert delivery.deliver()\n\n # Getter methods\n assert delivery.get_order().get_cart() == order.get_cart()\n assert delivery.get_address() == \"42 Corniche St.\"\n assert delivery.get_deliverer() == \"pizza place\"\n\n # make_dict()\n expected_dict = {\"Order Number\": 1,\n \"Address\": \"42 Corniche St.\",\n \"Order Details\": order.__str__()}\n assert expected_dict == delivery.make_dict()", "def runTest(self):\n\t\tself.setUp()\n\t\tself.test_postopProgramming1()", "def runTests(self):\n \n pass", "def test_all_no_class(self):" ]
[ "0.69464415", "0.6795874", "0.6673222", "0.6660429", "0.65980023", "0.65783155", "0.64330643", "0.64330643", "0.64330643", "0.64330643", "0.64330643", "0.6398508", "0.62931794", "0.62931794", "0.6187191", "0.61704147", "0.61076164", "0.61076164", "0.61076164", "0.6073124", "0.60705006", "0.60599947", "0.6057021", "0.6047439", "0.6046957", "0.6041233", "0.6037814", "0.60249025", "0.6009865", "0.598806" ]
0.69786686
0
Test all methods inside the Drink class.
def test_drink_class(): # __init__() default_size = MenuItem("Small size", "Drink size", False, 0.0, 1) size_upgrade = MenuItem("Medium size", "Drink size", False, 1.0, 1) drink_menu_item1 = MenuItem("Coca Cola", "Drinks", True, 1.0, 1) drink_menu_item2 = MenuItem("Fanta", "Drinks", True, 1.5, 1) drink1 = Drink(drink_menu_item1, default_size) drink2 = Drink(drink_menu_item2, default_size) drink3 = Drink(drink_menu_item1, size_upgrade) drink3.set_ice(False) # Getter methods except get_price assert drink1.get_ice() assert drink1.get_size() == default_size assert drink1.get_attributes() == [AMOUNT, ICE, SIZE_UPGRADE] # Setter methods and __eq__() assert drink1 != drink2 drink1.set_size(size_upgrade) assert drink1.get_size() == size_upgrade drink1.set_ice(False) assert not drink1.get_ice() drink1.set_amount(2) assert drink1.get_amount() == 2 assert drink1 == drink3 # get_price() assert drink1.get_price() == 4.0 expected_str = "($4.00) 2 Medium size Coca Cola drink(s) with no ice.\n" assert drink1.__str__() == expected_str drink1.set_ice(True) expected_str = "($4.00) 2 Medium size Coca Cola drink(s) with ice.\n" assert drink1.__str__() == expected_str # set_attributes() drink1.set_attribute(AMOUNT, 312) assert drink1.get_amount() == 312 drink1.set_attribute(ICE, True) assert drink1.get_ice() drink1.set_attribute(SIZE_UPGRADE, default_size) assert drink1.get_size() == default_size
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_theft_and_stealing(self):", "def test_get_food(self):\n pass", "def test_required_methods(self):", "def test_post_foods(self):\n pass", "def setUp(self) -> None:\n print(\"testing Deaths Class...\")\n self.data_handler_1 = self._init_mocked_data_handler(json_file_path=\"json_files/deaths_mocked_data.json\",\n resource_id_enum=ResourceId.DEATHS_DATA_RESOURCE_ID)\n self._check_base_step_of_all_methods(data_handler=self.data_handler_1, class_type=Deaths)", "def tests():", "def _test(self):", "def _test(self):", "def _test(self):", "def _test(self):", "def _test(self):", "def test_get_dealer_ratings(self):\n pass", "def test_get_foods(self):\n pass", "def test_bed(self):\n #TODO write bed tests", "def test(self):\n pass", "def _test(self):\n pass", "def _test(self):\n pass", "def _test(self):\n pass", "def test_method(self):", "def test_arc_smear(self):", "def test(self):", "def test(self):", "def main():\n\n tea_bag = Flavour('Tea')\n hot_water = Water('Hot Water')\n semi_skimmed = Milk('Semi-Skimmed Milk')\n no_sugar = Sugar('No Sugar')\n\n print make_drink(tea_bag, hot_water, semi_skimmed, no_sugar)\n\n sour_milk = Milk.BAD_MILK\n print make_drink(tea_bag, hot_water, sour_milk, no_sugar)\n\n salt = Sugar.INVALID_SUGAR\n print make_drink(tea_bag, hot_water, semi_skimmed, salt)", "def runTests(self):\n \n pass", "def test_turtle(self):\n assert not inspection.is_fixture_method(DummyTestCase.turtle_method)", "def test(self):\n raise NotImplementedError", "def test_get_boat(self):\n pass", "def run_tests():\n good_car = UnreliableCar(\"Good Car\", 100, 90)\n bad_car = UnreliableCar(\"Bad Car\", 100, 10)\n\n for i in range(1, 15):\n print(\"Attempting to drive {}km:\".format(i))\n print(\"{:12} drove {:2}km\".format(good_car.name, good_car.drive(i)))\n print(\"{:12} drove {:2}km\".format(bad_car.name, bad_car.drive(i)))\n\n \"\"\"final states of the cars\"\"\"\n print(good_car)\n print(bad_car)", "def run_tests(self):\n raise NotImplementedError", "def runtest(self):" ]
[ "0.6496863", "0.64672583", "0.63941574", "0.6384117", "0.6376888", "0.62080526", "0.6195716", "0.6195716", "0.6195716", "0.6195716", "0.6195716", "0.61877775", "0.6143858", "0.6119874", "0.60925025", "0.60861033", "0.60861033", "0.60861033", "0.6076404", "0.60213035", "0.6008965", "0.6008965", "0.6005336", "0.5952512", "0.59171396", "0.5913876", "0.58811843", "0.5855958", "0.5849815", "0.5848176" ]
0.6493183
1
Test all methods inside the Side class.
def test_side_class(): # __init__() side_menu_item1 = MenuItem("Fries", "Sides", True, 4.0, 1) side_menu_item2 = MenuItem("Salad", "Sides", True, 3.0, 1) side1 = Side(side_menu_item1) side2 = Side(side_menu_item2) side3 = Side(side_menu_item1) # Getter methods assert side1.get_sauces() == 1 assert side1.get_attributes() == [AMOUNT, SAUCES] # Setter methods and __eq__() side1.set_sauces(3) side2.set_sauces(3) side3.set_sauces(2) assert side1 != side2 assert side1 != side3 side3.set_sauces(3) assert side3.get_sauces() == 3 assert side1 == side3 side3.set_attribute(SAUCES, 1) assert side3.get_sauces() == 1 side3.set_attribute(AMOUNT, 2) assert side3.get_amount() == 2 expected_price = 4.0 assert side1.get_price() == expected_price expected_str = "($4.00) 1 Fries with 3 sauces each.\n" assert side1.__str__() == expected_str
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_required_methods(self):", "def _test(self):", "def _test(self):", "def _test(self):", "def _test(self):", "def _test(self):", "def test_method(self):", "def test(self):\n pass", "def test_method(self):\n self.assertEqual(self.method, 'modified strong collision')", "def runTests(self):\n \n pass", "def test(self):", "def test(self):", "def tests():", "def test_methods(self):\n\n #log\n self.logger.debug('\\n\\nExecute test methods:\\n-----------------------------')\n\n\n \n #test methods here\n #------------------------------------------------------------------\n\n #dummy_method\n self.dummy_method()\n\n #stylesheet_test\n #self.stylesheet_test(self.wdgt_explanation)\n\n #------------------------------------------------------------------\n\n\n\n #log\n self.logger.debug('\\n\\n-----------------------------\\nFinished test methods.')", "def _test(self):\n pass", "def _test(self):\n pass", "def _test(self):\n pass", "def test(self):\n raise NotImplementedError", "def test_methods(self):\n\n #log\n self.logger.debug('\\n\\nExecute test methods:\\n-----------------------------')\n\n\n \n #test methods here\n #------------------------------------------------------------------\n\n #dummy_method\n self.dummy_method()\n\n #------------------------------------------------------------------\n\n\n\n #log\n self.logger.debug('\\n\\n-----------------------------\\nFinished test methods.')", "def test_subsystems(self):\n pass", "def test_class_methods(self):\n\n x = BaseTransformer()\n\n h.test_object_method(obj=x, expected_method=\"fit\", msg=\"fit\")\n\n h.test_object_method(obj=x, expected_method=\"transform\", msg=\"transform\")\n\n h.test_object_method(\n obj=x, expected_method=\"columns_set_or_check\", msg=\"columns_set_or_check\"\n )\n\n h.test_object_method(\n obj=x, expected_method=\"columns_check\", msg=\"columns_check\"\n )", "def run_tests(self):\n raise NotImplementedError", "async def run(self):\n print(\"\".join((\"-\" * 8, type(self).__name__, \"-\" * 8)))\n for method_name in dir(self):\n if not method_name.startswith(\"test\"):\n continue\n print(method_name, end=\"... \")\n try:\n await getattr(self, method_name)()\n except AssertionError:\n print(\"FAIL\")\n traceback.print_exception(*sys.exc_info())\n except Exception: # pylint: disable=broad-except\n print(\"ERROR\")\n traceback.print_exception(*sys.exc_info())\n else:\n print(\"PASS\")\n print()", "def inner_test():\n pass", "def inner_test():\n pass", "def test(self):\n # -- Test --\n\n # (1)\n\n # (2)\n\n # (3)\n\n # (4)\n # -- Test --", "def runtest(self):", "def test_class_method(self):\n self.assertEqual(self.Test.scoped.im_self.__name__, 'Test')", "def test_bed(self):\n #TODO write bed tests", "def test_let(self):" ]
[ "0.6895903", "0.6433451", "0.6433451", "0.6433451", "0.6433451", "0.6433451", "0.6388006", "0.6334689", "0.6332678", "0.6311027", "0.6272724", "0.6272724", "0.624409", "0.62368387", "0.6206552", "0.6206552", "0.6206552", "0.619093", "0.6120962", "0.6083485", "0.60834163", "0.60540587", "0.6026337", "0.60062337", "0.60062337", "0.60049707", "0.5993319", "0.5979584", "0.59545624", "0.5937027" ]
0.6743495
1
Test all methods inside the ItemFactory class.
def test_item_factory_class(): # __init__() factory = ItemFactory() pizza_menuitem = MenuItem("cheese", "Pizzas", True, 10.0, 1) drink_menuitem = MenuItem("fanta", "Drinks", True, 10.0, 1) side_menuitem = MenuItem("fries", "Sides", True, 10.0, 1) none_menuitem = MenuItem("oreo", "oreo", True, 10.0, 1) medium = MenuItem("medium", "size", False, 4.0, 1) # create_item() expected_pizza = Pizza(pizza_menuitem, medium) expected_drink = Drink(drink_menuitem, medium) expected_side = Side(side_menuitem) pizza = factory.create_item(pizza_menuitem, medium) assert pizza == expected_pizza assert factory.create_item(drink_menuitem, medium) == expected_drink assert factory.create_item(side_menuitem) == expected_side assert not factory.create_item(none_menuitem, medium)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_create_item(self):\n item = self.item\n\n self.assertTrue(isinstance(item, Item))\n self.assertEqual(item.name, \"Test Item\")", "def test_item_factory(self):\r\n course = CourseFactory.create()\r\n item = ItemFactory.create(parent_location=course.location)\r\n self.assertIsInstance(item, SequenceDescriptor)", "def setUp(self):\n self.new_inv_item = ['1', 'Knife Set', 10, 'n', 'n']\n self.new_furn_item = ['2', 'Couch', 25, 'y', 'Cloth', 'L']\n self.new_elec_item = ['3', 'Dryer', 100, 'n', 'y', 'Samsung', 12]", "def test_new_item(self):\n\n\t\titem_id = mock_item()[0]\n\t\tself.assertEqual(item_id, 1)", "def setUp(self):\n self.item = ElectricAppliances('10', 'test', '4', '5', 'GE', '110')", "def setUp(self):\n self.item = Furniture('11', 'sofa', '4', '5', 'suede', 'xl')", "def mock_item(title='Item One', author='Author One', location='Location One'):\n\n\titem_data = {'title': title, 'author': author, 'location': location}\n\n\treturn models.new_item(item_data), title, author, location", "def test_get_order_items(self):\n pass", "def test_shoppingitems_creation(self):\n # register and login a user\n self.app.post('/register', data=self.user_reg_details)\n self.app.post('/login', data=self.user_login_details)\n # create a shopping list\n self.shopping_class_obj.create_list(\n 'Easter', '[email protected]')\n # make a post request to add an item\n res = self.app.post(\n '/shoppingitems/Easter', data={'item-name': 'Bread'})\n self.assertEqual(res.status_code, 200)\n response = self.item_class_obj.add_item(\n 'Easter', 'Bread', '[email protected]')\n self.assertIsInstance(response, list)\n # check if item was successfully created\n self.assertIn(\"Bread\", str(res.data))", "def test_find_stock_items(self):\n pass", "def setUp(self):\n self.item_id = \"mss37820001\"", "def test_gethardwares_item(self):\n pass", "def test_retrieve_item(self):\n\n\t\titem_id, title, author, location = mock_item()\n\t\titem = models.item(item_id)\n\n\t\tself.assertIsNotNone(item)\n\t\tself.assertEqual(item['id'], item_id)\n\t\tself.assertEqual(item['title'], title)\n\t\tself.assertEqual(item['author'], author)\n\t\tself.assertEqual(item['location'], location)", "def setUp(self):\n self.business_item_class = BusinessesClass()", "def setUp(self):\n super(ItemTest, self).setUp()\n \n self.ct = ContentType(app_label=\"po\")\n self.ct.save()\n self.p = Permission(codename=\"add_purchaseorder\", content_type=self.ct)\n self.p.save()\n self.p2 = Permission(codename=\"change_purchaseorder\", content_type=self.ct)\n self.p2.save()\n \n #Create the user\n self.username = 'tester'\n self.password = 'pass'\n self.user = User.objects.create_user(self.username, '[email protected]', self.password)\n self.user.save()\n self.user.user_permissions.add(self.p)\n self.user.user_permissions.add(self.p2)\n self.client.login(username=self.username, password=self.password)\n \n \n self.supplier = Supplier(**base_supplier)\n self.supplier.save()\n self.address = Address(**base_address)\n self.address.contact = self.supplier\n self.address.save()\n self.contact = SupplierContact(name='test', email='[email protected]', telephone=1234, primary=True)\n self.contact.supplier = self.supplier\n self.contact.save()\n \n \n self.supply = Fabric.create(**base_fabric)\n \n #self.supply.units = \"m^2\"\n self.supply.save()\n \n self.po = PurchaseOrder()\n self.po.employee = self.user\n self.po.supplier = self.supplier\n self.po.terms = self.supplier.terms\n self.po.vat = 7\n self.po.order_date = datetime.datetime(2014, 3, 2)\n self.po.save()\n \n self.item = Item(unit_cost=Decimal('13.55'), quantity=Decimal('10'), supply=self.supply)\n self.item.description = self.supply.description\n self.item.purchase_order = self.po\n self.item.save()", "def setUp(self):\r\n self.course = CourseFactory.create(display_name=self.COURSE_NAME, number=self.COURSE_SLUG)\r\n self.chapter1 = ItemFactory.create(\r\n parent_location=self.course.location,\r\n display_name=\"chapter1\",\r\n category='chapter')\r\n self.section1 = ItemFactory.create(\r\n parent_location=self.chapter1.location,\r\n display_name=\"section1\",\r\n category='sequential')\r\n self.chapter2 = ItemFactory.create(\r\n parent_location=self.course.location,\r\n display_name=\"chapter2\",\r\n category='chapter')\r\n self.section2 = ItemFactory.create(\r\n parent_location=self.chapter2.location,\r\n display_name=\"section2\",\r\n category='sequential')\r\n\r\n # creates one draft and one published lti module, in different sections\r\n self.lti_published = ItemFactory.create(\r\n parent_location=self.section1.location,\r\n display_name=\"lti published\",\r\n category=\"lti\",\r\n location=self.course.id.make_usage_key('lti', 'lti_published'),\r\n )\r\n self.lti_draft = ItemFactory.create(\r\n parent_location=self.section2.location,\r\n display_name=\"lti draft\",\r\n category=\"lti\",\r\n location=self.course.id.make_usage_key('lti', 'lti_published').replace(revision='draft'),\r\n )", "def test_transition(self):\n # Make sure we push the upgraded items out of cache\n gc.collect()\n\n self.assertEqual(self.store.getItemByID(1).attribute, 'one')\n self.assertEqual(\n self.store.findUnique(Dummy, Dummy.attribute == 'two').storeID,\n 2)\n self.assertRaises(ItemNotFound, self.store.getItemByID, 3)\n i2 = self.store.getItemByID(4)\n self.assertEqual(i2.attribute, 'four')\n self.assertIsInstance(i2, Dummy2)", "def setUp(self):\n super(BaseSearchPageViewTestCase, self).setUp()\n SearchPriceFactory(\n type=SearchPrice.SEARCH_PRICE_LETTING,\n label='100',\n price=100\n )\n SearchPriceFactory(\n type=SearchPrice.SEARCH_PRICE_LETTING,\n label='200',\n price=200\n )\n SearchPriceFactory(\n type=SearchPrice.SEARCH_PRICE_SALE,\n label='40000',\n price=40000\n )\n SearchPriceFactory(\n type=SearchPrice.SEARCH_PRICE_SALE,\n label='50000',\n price=50000\n )", "def test_get_item_details(self, mock_requests_get):\n details = resources.get_item_details(21787)\n\n item = details.item\n assert item.id == 21787\n assert item.name == \"Steadfast boots\"\n assert item.type == \"Miscellaneous\"\n assert item.current.price == 5900000\n assert item.today.price == -138200\n assert item.members is True", "def test_feed_item_creation(self):\n title = 'title'\n link = 'link'\n description = 'description'\n item = FeedItem(title, link, description)\n assert isinstance(item, FeedItem)\n assert item.title == title\n assert item.link == link\n assert item.description == description", "def _test(self):", "def _test(self):", "def _test(self):", "def _test(self):", "def _test(self):", "def test_item_count(self):\n self.assertEqual(len(self.items), 2)", "def test_item_id(item):\n assert item.item_id == 'exopy_pulses.Item'", "def test_get_invoice_items(self):\n invoice = Invoice(self.client, 123456)\n items = invoice.items\n\n self.assertEqual(len(items), 1)\n item = items[0]\n\n self.assertEqual(item.label, \"Linode 2048 - Example\")\n self.assertEqual(item.type, \"hourly\")\n self.assertEqual(item.amount, 9.51)\n self.assertEqual(item.quantity, 317)\n self.assertEqual(item.unit_price, \"0.03\")\n self.assertEqual(\n item.from_date,\n datetime(year=2014, month=12, day=19, hour=0, minute=27, second=2),\n )\n self.assertEqual(\n item.to_date,\n datetime(year=2015, month=1, day=1, hour=4, minute=59, second=59),\n )", "def test_get_invoice_items(self):\n invoice = Invoice(self.client, 123456)\n items = invoice.items\n\n self.assertEqual(len(items), 1)\n item = items[0]\n\n self.assertEqual(item.label, \"Linode 2048 - Example\")\n self.assertEqual(item.type, \"hourly\")\n self.assertEqual(item.amount, 9.51)\n self.assertEqual(item.quantity, 317)\n self.assertEqual(item.unit_price, \"0.03\")\n self.assertEqual(item.from_date, datetime(year=2014, month=12, day=19, hour=0, minute=27, second=2))\n self.assertEqual(item.to_date, datetime(year=2015, month=1, day=1, hour=4, minute=59, second=59))", "def test_add_stock_item(self):\n pass" ]
[ "0.72097385", "0.6974505", "0.69647765", "0.68578804", "0.68017167", "0.67930925", "0.6737591", "0.666116", "0.6648063", "0.66393995", "0.6633347", "0.6621216", "0.65884066", "0.64894754", "0.6486753", "0.6479011", "0.63990486", "0.6379815", "0.63780177", "0.63681114", "0.63583773", "0.63583773", "0.63583773", "0.63583773", "0.63583773", "0.6342611", "0.6341547", "0.62775403", "0.6274045", "0.6271731" ]
0.77116483
0
Tests methods of all Delivery subclasses.
def test_delivery_subclasses(): # Start with UberEatsDelivery class # __init__() order = Order(1) fries = MenuItem("fries", "Sides", True, 5.00, 1) order.add_to_cart(fries) uber_eats = UberEatsDelivery(order, "Test1.json") deliver_error = UberEatsDelivery(order, "FileDoesNotExist.jsdson") uber_eats.set_address("some address") # Getter methods assert uber_eats.get_file() == "Test1.json" assert uber_eats.get_deliverer() == UBER_EATS # deliver() assert uber_eats.deliver() assert not deliver_error.deliver() # FoodoraDelivery class # __init__() foodora = FoodoraDelivery(order, "Test2.csv") deliver_error = FoodoraDelivery(order, "FileDoesNotExist.cdwasv") foodora.set_address("some address") # Getter methods assert foodora.get_file() == "Test2.csv" assert foodora.get_deliverer() == FOODORA # deliver() assert foodora.deliver() assert not deliver_error.deliver()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_delivery_factory_class():\n # __init__()\n factory = DeliveryFactory()\n order = Order(1)\n file = \"This is a file.\"\n\n expected_uber = UberEatsDelivery(order, file)\n expected_foodora = FoodoraDelivery(order, file)\n expected_delivery = Delivery(order, \"not uber or foodora\")\n\n assert factory.create_delivery(order, UBER_EATS, file).get_deliverer() == \\\n expected_uber.get_deliverer()\n assert factory.create_delivery(order, FOODORA, file).get_deliverer() == \\\n expected_foodora.get_deliverer()\n assert factory.create_delivery(order, \"not uber or foodora\", file).\\\n get_deliverer() == expected_delivery.get_deliverer()", "def test_create_confirm_delivery_details(self):\n pass", "def test_list_delivery_usage(self):\n pass", "def test_order_and_delivery_class():\n # Test Order methods first\n # __init__()\n order = Order(1)\n cheese = MenuItem(\"cheese\", \"Pizzas\", True, 10.00, 1)\n fanta = MenuItem(\"fanta\", \"Drinks\", True, 1.00, 1)\n fries = MenuItem(\"fries\", \"Sides\", True, 5.00, 1)\n extra_mushroom = MenuItem(\"extra mushrooms\", \"toppings\", False, 1.00, 1)\n medium = MenuItem(\"medium\", \"size\", False, 5.00, 1)\n\n pizza = Pizza(cheese, medium)\n pizza.set_topping(extra_mushroom, 2)\n drink = Drink(fanta, medium)\n side = Side(fries)\n\n # Getter methods except price\n assert order.get_cart() == []\n assert order.get_order_number() == 1\n\n # add_to_cart(), remove_from_cart(), and set_item()\n order.add_to_cart(pizza)\n order.add_to_cart(drink)\n order.add_to_cart(side)\n assert len(order.get_cart()) == 3\n assert order.remove_from_cart(1)\n assert not order.remove_from_cart(3242)\n assert len(order.get_cart()) == 2\n order.add_to_cart(pizza)\n order.add_to_cart(pizza)\n assert order.get_cart()[2].get_amount() == 2\n\n wrong_input = {\"position\": 432}\n input_dict = {\"position\": 1, \"attributes\": {ICE: False}}\n assert not order.set_item(wrong_input)\n assert order.set_item(input_dict)\n assert not drink.get_ice()\n\n # get_total_price()\n assert order.get_total_price() == pizza.get_price() + drink.get_price() + \\\n side.get_price()\n\n # _get_equivalent_item()\n none_item = MenuItem(\"none\", \"none\", True, 234.00, 32)\n assert not order._get_equivalent_item(none_item)\n\n # is_valid_position\n assert order.is_valid_position(1)\n assert not order.is_valid_position(34523)\n\n # __str__()\n expected_str = \"Your order number is 1.\\n\" + ORDER_INTRODUCTION_MSG + \\\n \"\\t\" + drink.__str__() + \"\\t\" + side.__str__() + \"\\t\" + \\\n pizza.__str__() + \\\n \"\\nThe total price is ${:.2f}.\".format(\n order.get_total_price())\n assert expected_str == order.__str__()\n\n # Test Delivery methods\n # __init__()\n delivery = Delivery(order, \"pizza place\")\n\n # deliver() and set_address()\n assert not delivery.deliver()\n delivery.set_address(\"42 Corniche St.\")\n assert delivery.deliver()\n\n # Getter methods\n assert delivery.get_order().get_cart() == order.get_cart()\n assert delivery.get_address() == \"42 Corniche St.\"\n assert delivery.get_deliverer() == \"pizza place\"\n\n # make_dict()\n expected_dict = {\"Order Number\": 1,\n \"Address\": \"42 Corniche St.\",\n \"Order Details\": order.__str__()}\n assert expected_dict == delivery.make_dict()", "def test_ship_orders(self):\n pass", "def test_case_1(self):\n\n orders_data = generate_orders_data([30, 10, 20])\n assigned_delivery_vehicles = SlotDelivery.assign_new_batch_order_delivery(\n slot_number=1, orders=orders_data)\n\n bike = VehicleType.objects.get(name='bike')\n self.assertEqual(len(assigned_delivery_vehicles), 2) # 2 vehicles\n self.assertEqual(\n sum(dv.vehicle_type == bike for dv in assigned_delivery_vehicles), 2\n ) # 2 bike", "def test_case_2(self):\n\n orders_data = generate_orders_data([50, 50])\n assigned_delivery_vehicles = SlotDelivery.assign_new_batch_order_delivery(\n slot_number=1, orders=orders_data)\n\n scooter = VehicleType.objects.get(name='scooter')\n self.assertEqual(len(assigned_delivery_vehicles), 2) # 2 vehicles\n self.assertEqual(\n sum(dv.vehicle_type == scooter for dv in assigned_delivery_vehicles), 2\n ) # 2 scooters", "def test_get_shipment(self):\n pass", "def test_01_base(self):\n # Create/validate PO\n order = self.create_and_validate_po()\n\n # Validate picking\n picking = order.picking_ids[0]\n picking.do_transfer()\n self.assertEqual(picking.state, 'done')", "def test_subsystems(self):\n pass", "def test_subclasses(self):\n subclasses = Route.get_subclasses()\n self.assertIn(RouteSubclass, subclasses)", "def test_required_methods(self):", "def setUp(self):\n super(PurchaseOrderTest, self).setUp()\n \n self.ct = ContentType(app_label=\"po\")\n self.ct.save()\n self.p = Permission(codename=\"add_purchaseorder\", content_type=self.ct)\n self.p.save()\n self.p2 = Permission(codename=\"change_purchaseorder\", content_type=self.ct)\n self.p2.save()\n #Create the user\n self.username = 'tester'\n self.password = 'pass'\n self.user = User.objects.create_user(self.username, '[email protected]', self.password)\n self.user.save()\n self.user.user_permissions.add(self.p)\n self.user.user_permissions.add(self.p2)\n self.client.login(username=self.username, password=self.password)\n self.client.force_authenticate(self.user)\n \n self.supplier = Supplier(**base_supplier)\n self.supplier.save()\n self.address = Address(**base_address)\n self.address.contact = self.supplier\n self.address.save()\n self.contact = SupplierContact(name='test', email='[email protected]', telephone=1234, primary=True)\n self.contact.supplier = self.supplier\n self.contact.save()\n\n # Create Custom Supply\n # not implemented\n\n # Create Fabric\n self.supply = Fabric.create(**base_fabric)\n \n #self.supply.units = \"m^2\"\n self.supply.save()\n self.supply1 = self.supply\n \n self.product = Product(supply=self.supply, supplier=self.supplier, cost=base_fabric['unit_cost'],\n purchasing_units='m')\n self.product.save()\n self.supply2 = Fabric.create(**base_fabric2)\n self.supply2.discount = 5\n self.supply2.save()\n self.product2 = Product(supply=self.supply2, supplier=self.supplier, cost=base_fabric['unit_cost'])\n self.product2.save()\n self.supply1.supplier = self.supplier\n self.supply2.supplier = self.supplier\n \n\n #Create supply with no target item\n self.supply3 = Supply.objects.create(description='test supply')\n self.supply3.id = 203\n self.supply3.save()\n\n #Create a project\n self.project = Project()\n self.project.codename = 'MC House'\n self.project.save()\n \n self.po = PurchaseOrder()\n self.po.employee = self.user\n self.po.supplier = self.supplier\n self.po.terms = self.supplier.terms\n self.po.vat = 7\n self.order_date = datetime.datetime(2017, 1, 15, 15, 30, 0, 0, tzinfo=timezone('Asia/Bangkok'))\n self.po.order_date = self.order_date\n self.po.receive_date = datetime.datetime.now()\n self.po.save()\n #self.po.create_and_upload_pdf()\n \n self.item = Item.create(supplier=self.supplier, id=1, **base_purchase_order['items'][0])\n self.item.purchase_order = self.po\n self.item.save()\n \n self.po.calculate_total()\n self.po.save()", "def test_create_shipment(self):\n pass", "def test_all():\n test_get_to()\n test_error_type()\n test_exchange()\n print(\"All tests passed.\")", "def test_should_implement(self):\n pass", "def test_all_no_class(self):", "def test_all_no_class(self):", "def test_make_delivery_report_with_delivery_statuss(self):\n msg_helper = MessageHelper()\n event = msg_helper.make_delivery_report(delivery_status='pending')\n self.assert_message_fields(event, {\n 'event_type': 'delivery_report',\n 'delivery_status': 'pending',\n })", "def _test(self):\n pass", "def _test(self):\n pass", "def _test(self):\n pass", "def test_make_delivery_report_default(self):\n msg_helper = MessageHelper()\n event = msg_helper.make_delivery_report()\n self.assert_message_fields(event, {\n 'event_type': 'delivery_report',\n 'delivery_status': 'delivered',\n })", "def test_subscribe_offer(self):\n pass", "def test_all(self):\n self.assertEqual(type(self.route_list), type([]))\n [self.assertEqual(type(i), BusRoute) for i in self.route_list]", "def setUp(self):\n super(Orders, self).setUp()", "def testBaseClass(self):\n object = SubClassTest()\n self.assertEqual(object.foo(), \"foo\")\n self.assertEqual(TestFunctions.test_foo(object), \"foo\")\n self.assertEqual(object.bar(\"bar\", 2), \"bar\")\n self.assertEqual(TestFunctions.test_bar(object, \"bar\", 2), \"bar\")\n self.assertEqual(object.not_overriden(), \"not_overriden\")\n self.assertEqual(list(object.return_list()), [\"a\", \"b\", \"c\"])\n self.assertEqual(list(SubClassTest.test_list(object)), [\"a\", \"b\", \"c\"])", "def test_process_subscriptions(self):\n pass", "def _test(self):", "def _test(self):" ]
[ "0.6932525", "0.67744064", "0.6736943", "0.6681401", "0.6332729", "0.6103171", "0.60782915", "0.59496444", "0.5907", "0.59045416", "0.5860093", "0.5786136", "0.5719699", "0.5714881", "0.5705465", "0.5688725", "0.5654305", "0.5654305", "0.5642396", "0.5632793", "0.5632793", "0.5632793", "0.5615049", "0.5613199", "0.5601592", "0.559317", "0.55902183", "0.55901396", "0.55580866", "0.55580866" ]
0.8624718
0
Test all methods in DeliveryFactory class.
def test_delivery_factory_class(): # __init__() factory = DeliveryFactory() order = Order(1) file = "This is a file." expected_uber = UberEatsDelivery(order, file) expected_foodora = FoodoraDelivery(order, file) expected_delivery = Delivery(order, "not uber or foodora") assert factory.create_delivery(order, UBER_EATS, file).get_deliverer() == \ expected_uber.get_deliverer() assert factory.create_delivery(order, FOODORA, file).get_deliverer() == \ expected_foodora.get_deliverer() assert factory.create_delivery(order, "not uber or foodora", file).\ get_deliverer() == expected_delivery.get_deliverer()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_delivery_subclasses():\n # Start with UberEatsDelivery class\n # __init__()\n order = Order(1)\n fries = MenuItem(\"fries\", \"Sides\", True, 5.00, 1)\n order.add_to_cart(fries)\n uber_eats = UberEatsDelivery(order, \"Test1.json\")\n deliver_error = UberEatsDelivery(order, \"FileDoesNotExist.jsdson\")\n uber_eats.set_address(\"some address\")\n\n # Getter methods\n assert uber_eats.get_file() == \"Test1.json\"\n assert uber_eats.get_deliverer() == UBER_EATS\n\n # deliver()\n assert uber_eats.deliver()\n assert not deliver_error.deliver()\n\n # FoodoraDelivery class\n # __init__()\n foodora = FoodoraDelivery(order, \"Test2.csv\")\n deliver_error = FoodoraDelivery(order, \"FileDoesNotExist.cdwasv\")\n foodora.set_address(\"some address\")\n\n # Getter methods\n assert foodora.get_file() == \"Test2.csv\"\n assert foodora.get_deliverer() == FOODORA\n\n # deliver()\n assert foodora.deliver()\n assert not deliver_error.deliver()", "def test_create_confirm_delivery_details(self):\n pass", "def test_list_delivery_usage(self):\n pass", "def test_order_and_delivery_class():\n # Test Order methods first\n # __init__()\n order = Order(1)\n cheese = MenuItem(\"cheese\", \"Pizzas\", True, 10.00, 1)\n fanta = MenuItem(\"fanta\", \"Drinks\", True, 1.00, 1)\n fries = MenuItem(\"fries\", \"Sides\", True, 5.00, 1)\n extra_mushroom = MenuItem(\"extra mushrooms\", \"toppings\", False, 1.00, 1)\n medium = MenuItem(\"medium\", \"size\", False, 5.00, 1)\n\n pizza = Pizza(cheese, medium)\n pizza.set_topping(extra_mushroom, 2)\n drink = Drink(fanta, medium)\n side = Side(fries)\n\n # Getter methods except price\n assert order.get_cart() == []\n assert order.get_order_number() == 1\n\n # add_to_cart(), remove_from_cart(), and set_item()\n order.add_to_cart(pizza)\n order.add_to_cart(drink)\n order.add_to_cart(side)\n assert len(order.get_cart()) == 3\n assert order.remove_from_cart(1)\n assert not order.remove_from_cart(3242)\n assert len(order.get_cart()) == 2\n order.add_to_cart(pizza)\n order.add_to_cart(pizza)\n assert order.get_cart()[2].get_amount() == 2\n\n wrong_input = {\"position\": 432}\n input_dict = {\"position\": 1, \"attributes\": {ICE: False}}\n assert not order.set_item(wrong_input)\n assert order.set_item(input_dict)\n assert not drink.get_ice()\n\n # get_total_price()\n assert order.get_total_price() == pizza.get_price() + drink.get_price() + \\\n side.get_price()\n\n # _get_equivalent_item()\n none_item = MenuItem(\"none\", \"none\", True, 234.00, 32)\n assert not order._get_equivalent_item(none_item)\n\n # is_valid_position\n assert order.is_valid_position(1)\n assert not order.is_valid_position(34523)\n\n # __str__()\n expected_str = \"Your order number is 1.\\n\" + ORDER_INTRODUCTION_MSG + \\\n \"\\t\" + drink.__str__() + \"\\t\" + side.__str__() + \"\\t\" + \\\n pizza.__str__() + \\\n \"\\nThe total price is ${:.2f}.\".format(\n order.get_total_price())\n assert expected_str == order.__str__()\n\n # Test Delivery methods\n # __init__()\n delivery = Delivery(order, \"pizza place\")\n\n # deliver() and set_address()\n assert not delivery.deliver()\n delivery.set_address(\"42 Corniche St.\")\n assert delivery.deliver()\n\n # Getter methods\n assert delivery.get_order().get_cart() == order.get_cart()\n assert delivery.get_address() == \"42 Corniche St.\"\n assert delivery.get_deliverer() == \"pizza place\"\n\n # make_dict()\n expected_dict = {\"Order Number\": 1,\n \"Address\": \"42 Corniche St.\",\n \"Order Details\": order.__str__()}\n assert expected_dict == delivery.make_dict()", "def test_create_shipment(self):\n pass", "def test_ship_orders(self):\n pass", "def test_case_1(self):\n\n orders_data = generate_orders_data([30, 10, 20])\n assigned_delivery_vehicles = SlotDelivery.assign_new_batch_order_delivery(\n slot_number=1, orders=orders_data)\n\n bike = VehicleType.objects.get(name='bike')\n self.assertEqual(len(assigned_delivery_vehicles), 2) # 2 vehicles\n self.assertEqual(\n sum(dv.vehicle_type == bike for dv in assigned_delivery_vehicles), 2\n ) # 2 bike", "def test_get_PineappleRepublic_factory(self):\n order_processor = OrderProcessor()\n factory = order_processor.get_factory('PineappleRepublic')\n self.assertTrue(self, isinstance(factory, PineappleRepublicFactory))", "def test_get_shipment(self):\n pass", "def test_make_delivery_report_with_delivery_statuss(self):\n msg_helper = MessageHelper()\n event = msg_helper.make_delivery_report(delivery_status='pending')\n self.assert_message_fields(event, {\n 'event_type': 'delivery_report',\n 'delivery_status': 'pending',\n })", "def test_make_delivery_report_default(self):\n msg_helper = MessageHelper()\n event = msg_helper.make_delivery_report()\n self.assert_message_fields(event, {\n 'event_type': 'delivery_report',\n 'delivery_status': 'delivered',\n })", "def setUp(self):\n super(PurchaseOrderTest, self).setUp()\n \n self.ct = ContentType(app_label=\"po\")\n self.ct.save()\n self.p = Permission(codename=\"add_purchaseorder\", content_type=self.ct)\n self.p.save()\n self.p2 = Permission(codename=\"change_purchaseorder\", content_type=self.ct)\n self.p2.save()\n #Create the user\n self.username = 'tester'\n self.password = 'pass'\n self.user = User.objects.create_user(self.username, '[email protected]', self.password)\n self.user.save()\n self.user.user_permissions.add(self.p)\n self.user.user_permissions.add(self.p2)\n self.client.login(username=self.username, password=self.password)\n self.client.force_authenticate(self.user)\n \n self.supplier = Supplier(**base_supplier)\n self.supplier.save()\n self.address = Address(**base_address)\n self.address.contact = self.supplier\n self.address.save()\n self.contact = SupplierContact(name='test', email='[email protected]', telephone=1234, primary=True)\n self.contact.supplier = self.supplier\n self.contact.save()\n\n # Create Custom Supply\n # not implemented\n\n # Create Fabric\n self.supply = Fabric.create(**base_fabric)\n \n #self.supply.units = \"m^2\"\n self.supply.save()\n self.supply1 = self.supply\n \n self.product = Product(supply=self.supply, supplier=self.supplier, cost=base_fabric['unit_cost'],\n purchasing_units='m')\n self.product.save()\n self.supply2 = Fabric.create(**base_fabric2)\n self.supply2.discount = 5\n self.supply2.save()\n self.product2 = Product(supply=self.supply2, supplier=self.supplier, cost=base_fabric['unit_cost'])\n self.product2.save()\n self.supply1.supplier = self.supplier\n self.supply2.supplier = self.supplier\n \n\n #Create supply with no target item\n self.supply3 = Supply.objects.create(description='test supply')\n self.supply3.id = 203\n self.supply3.save()\n\n #Create a project\n self.project = Project()\n self.project.codename = 'MC House'\n self.project.save()\n \n self.po = PurchaseOrder()\n self.po.employee = self.user\n self.po.supplier = self.supplier\n self.po.terms = self.supplier.terms\n self.po.vat = 7\n self.order_date = datetime.datetime(2017, 1, 15, 15, 30, 0, 0, tzinfo=timezone('Asia/Bangkok'))\n self.po.order_date = self.order_date\n self.po.receive_date = datetime.datetime.now()\n self.po.save()\n #self.po.create_and_upload_pdf()\n \n self.item = Item.create(supplier=self.supplier, id=1, **base_purchase_order['items'][0])\n self.item.purchase_order = self.po\n self.item.save()\n \n self.po.calculate_total()\n self.po.save()", "def test_shipment_creation(self):\n SalesOrder = self.new_state.apps.get_model('order', 'salesorder')\n Shipment = self.new_state.apps.get_model('order', 'salesordershipment')\n\n # Check that the correct number of Shipments have been created\n self.assertEqual(SalesOrder.objects.count(), 5)\n self.assertEqual(Shipment.objects.count(), 5)", "def setUp(self):\n self.factory = PhoneFactory()", "def test_case_2(self):\n\n orders_data = generate_orders_data([50, 50])\n assigned_delivery_vehicles = SlotDelivery.assign_new_batch_order_delivery(\n slot_number=1, orders=orders_data)\n\n scooter = VehicleType.objects.get(name='scooter')\n self.assertEqual(len(assigned_delivery_vehicles), 2) # 2 vehicles\n self.assertEqual(\n sum(dv.vehicle_type == scooter for dv in assigned_delivery_vehicles), 2\n ) # 2 scooters", "def test_consume_tickets(self):\n st1 = ServiceTicketFactory()\n st2 = ServiceTicketFactory()\n ServiceTicket.objects.consume_tickets(self.user)\n self.assertTrue(ServiceTicket.objects.get(ticket=st1).is_consumed())\n self.assertTrue(ServiceTicket.objects.get(ticket=st2).is_consumed())", "def testFactoryMethod(self):\n factory = service_handlers.ServiceHandlerFactory(Service)\n handler = factory()\n\n self.assertTrue(isinstance(handler, service_handlers.ServiceHandler))\n self.assertTrue(isinstance(handler.service, Service))", "def test_get_Nika_factory(self):\n order_processor = OrderProcessor()\n factory = order_processor.get_factory('Nika')\n self.assertTrue(self, isinstance(factory, NikaFactory))", "def test_required_methods(self):", "def test_transportzone_create(self):\n self.assertTrue(True)", "def test_item_factory_class():\n # __init__()\n factory = ItemFactory()\n pizza_menuitem = MenuItem(\"cheese\", \"Pizzas\", True, 10.0, 1)\n drink_menuitem = MenuItem(\"fanta\", \"Drinks\", True, 10.0, 1)\n side_menuitem = MenuItem(\"fries\", \"Sides\", True, 10.0, 1)\n none_menuitem = MenuItem(\"oreo\", \"oreo\", True, 10.0, 1)\n medium = MenuItem(\"medium\", \"size\", False, 4.0, 1)\n\n # create_item()\n expected_pizza = Pizza(pizza_menuitem, medium)\n expected_drink = Drink(drink_menuitem, medium)\n expected_side = Side(side_menuitem)\n pizza = factory.create_item(pizza_menuitem, medium)\n assert pizza == expected_pizza\n assert factory.create_item(drink_menuitem, medium) == expected_drink\n assert factory.create_item(side_menuitem) == expected_side\n assert not factory.create_item(none_menuitem, medium)", "def test_process_subscriptions(self):\n pass", "def test_subscribe_offer(self):\n pass", "def test_factoryAttribute(self):\n port = self.port(store=self.store, factory=self.factory)\n self.assertIdentical(port.factory, self.factory)", "def setUp(self):\n self.foreignAmount=1000.0\n self.fxFixingDate=Date(1,October,2018)\n self.familyName=\"ECB\"\n self.fixingDays=2\n self.sourceCurrency=USDCurrency()\n self.targetCurrency=EURCurrency()\n self.fixingCalendar=UnitedStates()\n self.todayDate=Date(11, November, 2018)\n self.tsDayCounter=Actual360()\n self.flatForwardUSD=FlatForward(self.todayDate, 0.005, self.tsDayCounter)\n self.sourceYts=RelinkableYieldTermStructureHandle(self.flatForwardUSD)\n self.flatForwardEUR=FlatForward(self.todayDate, 0.03, self.tsDayCounter);\n self.targetYts=RelinkableYieldTermStructureHandle(self.flatForwardEUR)\n self.fxindex=FxIndex(self.familyName,self.fixingDays,self.sourceCurrency,self.targetCurrency,self.fixingCalendar,self.sourceYts,self.targetYts)\n self.paymentDate=Date(1,November,2018)\n self.startDate=Date(1,October,2018)\n self.endDate=Date(1,November,2018)\n self.fixingDays=2\n self.gearing=1.0\n self.spread=0.0\n self.refPeriodStart=Date(1,October,2018)\n self.refPeriodEnd=Date(1,November,2018)\n self.dayCounter=Actual360()\n self.isInArrears=False\n self.tenor=Period(3,Months)\n self.settlementDays=2\n self.currency=GBPCurrency()\n self.floatIndex=USDLibor(self.tenor,self.sourceYts)\n self.undCpn = IborCoupon(self.paymentDate,self.foreignAmount, self.startDate,self.endDate,self.fixingDays,self.floatIndex,self.gearing,self.spread,self.refPeriodStart,self.refPeriodEnd,self.dayCounter)\n self.floatingratefxlinkednotionalcoupon=FloatingRateFXLinkedNotionalCoupon(self.fxFixingDate,self.foreignAmount,self.fxindex,self.undCpn)", "def test_create_subscription(self):\n pass", "def test_factory(self):\n port = self.port(description=u'foo')\n port.startService()\n self.assertIdentical(self._service.factory, port.factory.realFactory)", "def setUp(self):\n self.factory = RequestFactory()\n with patch(\"bookwyrm.suggested_users.rerank_suggestions_task.delay\"), patch(\n \"bookwyrm.activitystreams.populate_stream_task.delay\"\n ):\n self.local_user = models.User.objects.create_user(\n \"[email protected]\",\n \"[email protected]\",\n \"mouseword\",\n local=True,\n localname=\"mouse\",\n remote_id=\"https://example.com/users/mouse\",\n )\n self.work = models.Work.objects.create(title=\"Test Work\")\n self.book = models.Edition.objects.create(\n title=\"Example Edition\",\n remote_id=\"https://example.com/book/1\",\n parent_work=self.work,\n )\n with patch(\"bookwyrm.models.activitypub_mixin.broadcast_task.delay\"):\n self.shelf = models.Shelf.objects.create(\n name=\"Test Shelf\", identifier=\"test-shelf\", user=self.local_user\n )\n models.SiteSettings.objects.create()", "def test_get_orders(self):\n pass", "def setUp(self):\n super(Orders, self).setUp()" ]
[ "0.77591133", "0.74950004", "0.6969184", "0.6616062", "0.65615195", "0.6530319", "0.6247274", "0.62400633", "0.6212892", "0.6183295", "0.61669254", "0.61602634", "0.6080189", "0.6077063", "0.60623956", "0.60152614", "0.6003989", "0.5962305", "0.5956253", "0.595623", "0.59478486", "0.59407145", "0.5928491", "0.5906659", "0.58969915", "0.5882575", "0.5856857", "0.58540934", "0.5851708", "0.58454806" ]
0.8259066
0
Test pizza parlour's read from file.
def test_parlour_one(): menu = 'Here is our menu:\nPizzas:\n\tMargherita: $10.99\n\t' \ 'Pepperoni: $12.99\nDrinks:\n\tSprite: $2.99\n\tPepsi: $3.99\n' \ 'PizzaSizes:\n\tSmall: $0.00\n\tMedium: $3.99\n\tLarge: $6.99\n' \ 'DrinkSizes:\n\tSmall: $0.00\n\tMedium: $2.99\n\tLarge: $4.99\n' \ 'Toppings:\n\tExtra Cheese: $1.99\nSides:\n\tFries: $6.99\n' assert PizzaParlour._read_from_file().__str__() == menu
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ReadInputFile(file:int)->EvenMorePizza:\n self.files = ('a_example.in','b_little_bit_of_everything.in', 'c_many_ingredients.in','d_many_pizzas.in','e_many_teams.in')\n \n EvenMorePizza temporalValue\n \n file = open(files[file])#open the file\n data = doc.Read().strip(' ,\\n').split('\\n')\n file.close()#close the file\n #set the title of the command\n data_head = data[0].strip(' ,\\n').split(' ')\n temporalValue.AvailablePizza = int(data_head[0])\n temporalValue.Team2ppl = int(data_head[1])\n temporalValue.Team3ppl = int(data_head[2])\n temporalValue.Team4ppl = int(data_head[3])\n #set the other ingredients\n count = 0\n for a in data[1:]:\n x = a.strip(' ,\\n').split(' ')\n temporalValue.ingredients[count] = x[1:]\n count += 1", "def read_setup(fname):\n with codecs.open(fname, 'r') as fin:\n n_row, n_col, L, H = list(map(int, fin.readline().split()))\n\n pizza = []\n for _ in range(n_row):\n line = fin.readline().strip()\n pizza.append(line)\n\n return pizza, n_row, n_col, L, H", "def test_read_from_file():\n reading_file = d.read_code_from_file()\n assert type(reading_file) == list\n assert len(reading_file) == 7\n assert \"\\n\" not in d.read_code_from_file()", "def test_onePerLine(self):\n fp = FilePath(self.mktemp())\n fp.setContent('something\\ncool')\n self.assertEqual(list(inventoryReader(fp.path)), ['something', 'cool'])", "def main():\n splitted_file = convert_input_to_list()\n encyclopedia_of_pizza = parse_pizza_info(splitted_file)\n pizza_winner = choose_pizza(encyclopedia_of_pizza)\n print_winner(pizza_winner)", "def read_puzzles(name):\n puzzles = []\n puzzle = []\n with open(name) as f:\n for line in f:\n if line and (line[0] == '#' or line[0].isalpha()):\n if puzzle:\n assert len(puzzle) == 81\n puzzles.append(puzzle)\n puzzle = []\n continue\n for c in line:\n if c.isdigit():\n puzzle.append(int(c))\n elif c == '.':\n puzzle.append(0)\n if puzzle:\n assert len(puzzle) == 81\n puzzles.append(puzzle)\n return puzzles", "def test_get_infile(self):\r\n pass # not practically testable, but obvious file I/O\r", "def test_read_file(self):\n restart_path = os.path.join(arc_path, 'arc', 'testing', 'restart(H,H2O2,N2H3,CH3CO2).yml')\n input_dict = read_file(restart_path)\n self.assertIsInstance(input_dict, dict)\n self.assertTrue('reactions' in input_dict)\n self.assertTrue('freq_level' in input_dict)\n self.assertTrue('use_bac' in input_dict)\n self.assertTrue('ts_guess_level' in input_dict)\n self.assertTrue('running_jobs' in input_dict)\n\n with self.assertRaises(InputError):\n read_file('nopath')", "def _test(self, file_name):\n data = bob.io.base.load(file_name)\n assert (_data == data).all()", "def test_artemis_reader():\n _test_raw_reader(\n read_raw_artemis123,\n input_fname=short_hpi_1kz_fname,\n pos_fname=dig_fname,\n verbose=\"error\",\n )", "def test_read():\n f = open('test', mode='r')\n line = f.read()\n f.close()", "def read_prn(file_name):\n\n with open(file_name) as f:\n f = MyIter(f)\n try:\n assert next(f).strip() == 'prn:13|'\n assert next(f).strip() == 'subtype = MT2500'\n assert next(f).strip() == 'Doc={MT2500:14|'\n\n assert next(f).strip() == 'Film={12.1|'\n film_data = {}\n for line in f:\n if '}' in line:\n break\n key, value = read_key_value(line)\n film_data[key] = value\n\n assert next(f).strip() == 'Test_Info={2|'\n test_info = {}\n for line in f:\n if '}' in line:\n break\n key, value = read_key_value(line)\n test_info[key] = value\n\n assert next(f).strip() == 'Test_Data=('\n test_data = []\n for i, line in enumerate(f):\n if line.strip() != '{6|':\n break\n\n test_data.append({})\n for line in f:\n if '[' in line:\n break\n key, value = read_key_value(line)\n test_data[i][key] = try_to_num(value)\n\n xs, ys = [], []\n for line in f:\n if ']' in line:\n break\n x, y = line.split(',')\n xs.append(x)\n ys.append(y)\n\n test_data[i]['xs'] = np.array(xs, dtype='float')\n test_data[i]['ys'] = np.array(ys, dtype='float')\n assert int(test_data[i]['Number_Of_Points']) == len(xs)\n assert next(f).strip()[0] == '}' # may have a comma\n\n assert 'Test_Results=(' == next(f).strip()\n test_results = []\n for i, line in enumerate(f):\n if line.strip() != '{6|':\n break\n test_results.append({})\n for line in f:\n if '}' in line:\n break\n key, value = read_key_value(line)\n test_results[i][key] = try_to_num(value)\n assert next(f).strip()[0] == '}' # may include comma\n\n except AssertionError as e:\n print(f._index, f._line)\n raise\n\n data_remove = ['Number_Of_Points']\n results_swaps = [\n ('TestDate', 'date'),\n ('Length_Cnvrsn', 'length_conversion'),\n ('Force_Cnvrsn', 'force_conversion'),\n ('LoadCell_Capacity', 'loadcell_capacity'),\n ('LoadCell_CpctyUnit', 'loadcell_capacity_unit'),\n ('LoadCell_BitsOfReso', 'loadcell_bits_of_resolution'),\n ('Slack_time', 'slack_time'),\n ('BreakStrength', 'break_strength'),\n ('BreakElongation', 'break_elongation'),\n ('BreakPctElongation', 'break_percent_elongation'),\n ('YieldStrength1', 'yield_strength'),\n ('YieldLoad1', 'yield_load'),\n ('SampleThickness', 'thickness'),\n ('BreakLoad', 'break_load'),\n ]\n results_remove = ['Analysis']\n data_swaps = [\n ('X_unit', 'x_units'),\n ('Y_unit', 'y_units'),\n ('Crosshead_speed', 'crosshead_speed'),\n ('Sample_Thkness', 'sample_thickness'),\n ('Sample_Width', 'sample_width'),\n ('Grip_Separation', 'gauge_length'),\n ('Start_Threshhold', 'start_threshhold'),\n ('Stop_Threshhold', 'stop_threshhold'),\n ]\n\n elongations = []\n assert len(test_data) == len(test_results)\n for data, results in zip(test_data, test_results):\n for original, to in data_swaps:\n data[to] = data.pop(original)\n for original, to in results_swaps:\n results[to] = results.pop(original)\n for key in data_remove:\n data.pop(key)\n for key in results_remove:\n results.pop(key)\n\n if data['x_units'] == 'Secs.':\n data['x_units'] = 's'\n if data['y_units'] == 'Newtons':\n data['y_units'] = 'N'\n if results['date']:\n results['date'] = datetime.strptime(results['date'], '%d %b, %Y')\n\n xs = data['xs']*float(data['crosshead_speed'])\n elongations.append(\n Elongation(\n xs, data['ys'],\n float(data['gauge_length']) / 1e3, # mm → m\n float(data['sample_width']) / 1e3, # mm → m\n float(data['sample_thickness']) / 1e3, # mm → m\n None\n )\n )\n\n return elongations", "def test_read_file():\n filename = 'sample'\n assert read_file(filename) == 'hello!\\n'", "def test_real_file(self):\n log.info('===== START TEST BYTE LOSS =====')\n\n # Recovered\n file_path = os.path.join(RESOURCE_PATH, '11079364_SNA_SNA.txt')\n\n stream_handle = open(file_path, MODE_ASCII_READ)\n\n self.create_parser(stream_handle, telem_flag=False)\n\n particles = self.parser.get_records(182)\n\n log.debug(\"*** test_real_file Num particles %s\", len(particles))\n\n # check all the values against expected results.\n self.assert_particles(particles, '11079364_SNA_SNA_recov.yml', RESOURCE_PATH)\n self.assertEquals(self.exception_callback_value, [])\n stream_handle.close()\n\n # Telemetered\n file_path = os.path.join(RESOURCE_PATH, '11079419_SNA_SNA.txt')\n\n stream_handle = open(file_path, MODE_ASCII_READ)\n\n self.create_parser(stream_handle)\n\n particles = self.parser.get_records(172)\n\n log.debug(\"*** test_real_file Num particles %s\", len(particles))\n\n # check all the values against expected results.\n self.assert_particles(particles, '11079419_SNA_SNA_telem.yml', RESOURCE_PATH)\n stream_handle.close()\n\n log.info('===== END TEST REAL FILE =====')", "def test_parse_CPS_files(self):\r\n\r\n lines = [\r\n \"\\t\".join([\"ChimeraSlayer\", \"chimera_X92624\", \"7000004131495956\",\r\n \"S000469847\", \"1.0360\", \"99.35\", \"100\", \"0.9354\", \"89.70\",\r\n \"0\", \"YES\", \"NAST:4595-4596\", \"ECO:941-942\"]),\r\n \"\\t\".join([\"ChimeraSlayer\", \"Test\", \"7000004131495956\",\r\n \"S000469847\", \"1.0360\", \"99.35\", \"100\", \"0.9354\", \"89.70\",\r\n \"0\", \"NO\", \"NAST:4595-4596\", \"ECO:941-942\"])\r\n ]\r\n\r\n expected = [('chimera_X92624', ['7000004131495956', 'S000469847'])]\r\n self.assertEqual(parse_CPS_file(lines), expected)\r\n\r\n # Bad file raises error\r\n self.assertRaises(ValueError, parse_CPS_file, [\"\"])\r\n self.assertRaises(ValueError, parse_CPS_file, [\"Broken\"])", "def read_input_pizza(filename):\n lines = open(filename).readlines()\n M, N = [int(val) for val in lines[0].split()]\n available = np.array([int(n) for n in lines[1].split()])\n return M, N, available", "def test_read_raw_suggested(fname):\n with pytest.raises(ValueError, match='Try reading'):\n read_raw(fname)", "def testOneShow(self):\n\t\t# for line in self.file:\n\t\t# \tprint line\n\t\tline = self.file.readline()\n\t\tinfo = scrapeFilename( line )\n\t\tassert info['show'] == \"Chuck\"", "def test_probabilistic_parsers():", "def test_file_read():\n expected = [\"scorevideo LOG\\n\", \"File: log.mat\"]\n with open(TEST_RES + \"/file_read.txt\", 'r') as file:\n actual = file.readlines()\n assert expected == actual", "def test_read_from_file():\n from scraper import read_from_file\n assert read_from_file(TEST_FILE) == (TEST_CONTENT, 'utf-8')", "def testFileInRead(self, mockPath):\n mockPath.return_value = 'bananaphone.ccc'\n self.node = cdl_convert.ColorCollection(input_file='mybestfile.ccc')\n\n mockPath.assert_called_once_with('mybestfile.ccc')\n\n self.assertEqual(\n 'bananaphone.ccc',\n self.node.file_in\n )", "def check_one_file(logger, evolve_options, filename, population):\n\n # Number of lines in the file should match the population size\n lines = sum(1 for line in open(filename))\n assert lines == len(population)\n\n # Loading the contents in the generation file should be equal to each random strategy\n file_strategies = evolve.load_generation(logger, filename)\n # Write the output of the loaded generation file for debugging purposes in case the test fails\n output_file = filename + \".output\"\n evolve.write_generation(output_file, file_strategies)\n\n for index, strategy in enumerate(file_strategies):\n assert str(strategy) == str(population[index])\n\n # Initializing the population without the \"load_from\" option should not be equal in total\n random_strategies = evolve.initialize_population(logger, evolve_options, None)\n file_strategies_str, population_str = '', ''\n\n for strategy in random_strategies:\n file_strategies_str += str(strategy)\n\n for strategy in population:\n population_str += str(strategy)\n\n assert file_strategies_str != population_str\n\n # Initializing the population with the \"load_from\" option should be equal for each strategy\n evolve_options[\"load_from\"] = filename\n file_strategies = evolve.initialize_population(logger, evolve_options, None)\n for index, individual in enumerate(file_strategies):\n assert str(individual) == str(population[index])", "def test_nonfile(self):\n self.assertEqual(None,readfiles.read_file(\"tests.txt))", "def parse_products(self, infile):\r\n raise NotImplementedError()", "def test_file(self):\n a = False\n if \"show()\" in open('attempt.py').read():\n a = True\n self.assertEquals(a,True)", "def fixture_parsed_clippings(clippings_filename):\n\n clippings_file_path = os.path.join(TEST_RESOURCES_DIR, clippings_filename)\n\n with open(clippings_file_path) as clippings_file:\n clippings = parse_clippings(clippings_file)\n\n return clippings", "def read_from_file(self, filename: str) -> None:", "def test_the_main_with_simple_yet_proper_file(self):\r\n assert the_main_function('test_proper_file.csv') == (['\\ufeffid', 'link'], [['1', 'abc.com'], ['2', 'gef.com']])", "def test_get_query_list_from_file(): # ***Incomplete test\n ##########################\n # Arrange.\n infp = \"infp\"\n\n ##########################\n # Act.\n #x = get_query_list_from_file(infp)\n\n ##########################\n # Assert.\n assert True == True # ***Temporary." ]
[ "0.62816143", "0.6195963", "0.6089519", "0.60648394", "0.59500283", "0.5931898", "0.59020066", "0.58859295", "0.582211", "0.5805901", "0.57465994", "0.5741897", "0.56686026", "0.5658264", "0.5647219", "0.56221044", "0.5591386", "0.5587265", "0.55658156", "0.5563152", "0.5537137", "0.5525886", "0.5524323", "0.5513892", "0.5506442", "0.54819167", "0.5478876", "0.5464446", "0.545015", "0.5431343" ]
0.79743844
0
Given t, returns BWT(t) by way of the BWM
def bwtViaBwm(t): BWT= ''.join(map(lambda x:x[-1], bwm(t))) return BWT
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def m2_weight(t):\n return integrate.quad(lambda x: x*x * bethe_dos(x, t), -2*t, 2*t)[0]", "def calcBW(t,g,bw):\n if type(g) is not np.array:\n g = np.array(g)\n if bw>0.0:\n bw*=-1.0\n maxg = np.max(g)\n g = g-maxg\n #plt.figure('testbw')\n \n imax = np.argmax(g)\n mb = np.where(g>3.0*bw)\n # ...left\n gg = g[mb[0][0]:imax]\n tt = t[mb[0][0]:imax]\n #plt.plot(tt,gg)\n ha1 = np.interp(bw,gg,tt)\n # ...right\n gg = np.flipud(g[imax:mb[0][-1]])\n tt = np.flipud(t[imax:mb[0][-1]])\n ha2 = np.interp(bw,gg,tt)\n #plt.plot(tt,gg)\n # ...full\n FWHM = abs(ha1) + abs(ha2)\n #print 'bw: ',ha1,ha2\n return FWHM", "def B(self, t):\n return np.sqrt((3 * self.eta_B * self.snr.L0 *\n YEAR_TO_SEC * self.pulsar.tau_0) /\n (self.r(t) * PC_TO_CM) ** 3 *\n (1 - (1 + (t / self.pulsar.tau_0)) ** (-1)))", "def bwt(T, SA: array, BWT: bytearray, n: int):\n for i in range(n):\n BWT[i] = T[SA[i] - 1]", "def Bernstein(i, n, t):\n return special.binom(n, i) * t ** i * (1 - t) ** (n - i)", "def pwcmtm(t, w, t_binning):\n\n\n # computing the optimal solution of the least squares approximation \\hat\\beta\n hat_beta= np.array([np.mean(w[t_binning == i]) for i in range(max(t_binning) + 1)])\n \n # reconstructing the (S . \\hat\\beta) vector (the vector approximating w)\n hat_w= hat_beta[t_binning]\n \n # computing the measure\n return np.sum((w - hat_w)**2)/(np.var(w)*len(w))", "def mb(y, t):\n dfxr=k*y[0]-mu*y[4]\n dfxi=k*y[1]-mu*y[5]\n dfyr=k*y[2]-mu*y[6]+y[3]*(Dphi0+m*np.cos(wf*t))\n dfyi=k*y[3]-mu*y[7]-y[2]*(Dphi0+m*np.cos(wf*t))\n drxr=(1*y[4]-d*y[5])-y[0]*y[8]\n drxi=(1*y[5]+d*y[4])-y[1]*y[8]\n dryr=(1*y[6]-d*y[7])-y[2]*y[8]\n dryi=(1*y[7]+d*y[6])-y[3]*y[8]\n ddelta=g*(y[8]-D0+(y[0]*y[4]+y[1]*y[5]+y[2]*y[6]+y[3]*y[7]))\n return [dfxr,dfxi,dfyr,dfyi,drxr,drxi,dryr,dryi,ddelta]", "def _dBmTomW(dBm):\n return math.pow(10.0, dBm / 10.0)", "def gibbs_(dict_, T):\n\tdST = dict_['S298'] + CpT(dict_, T)[0]\n\t\t\n\tdHT = dict_['dH298'] + CpT(dict_, T)[1]/1000\n\t\t\n\treturn (dHT - T*dST/1000)", "def basis(T, dimensions, t):\n # Undo change of basis from realizer, and then transform into window\n B = canonical_basis(dimensions, t)\n return B.dot(T)", "def planck_w(lam, T):\n return ((2*h*c**2)/(lam**5))*(1./(np.exp((h*c)/(lam*k*T))-1))", "def half_space_cooling_waermefluss(k, T0, T1, kappa, t):\n return k * (T1 - T0) / (numpy.sqrt(math.pi * kappa * t))", "def thetacm(t):\n return np.array([\n 0,\n 0,\n self.wz * t\n ])", "def weighting(wb, m, a):\n s = control.tf([1, 0], [1])\n return (s/m + wb) / (s + wb*a)", "def gen_tb_tb_weights(weight=1.):\r\n W = np.zeros([8, 8])\r\n sinusoid = -(np.cos(np.linspace(0, 2 * np.pi, 8, endpoint=False)) - 1) / 2\r\n for i in range(8):\r\n values = np.roll(sinusoid, i)\r\n W[i, :] = values\r\n return weight * W", "def td_to_modb(td, error=None):\n if error is not None:\n return (2.0 * np.pi / (1.758820150E11 * td), 2.0 * np.pi / (1.758820150E11 * td**2.) * error)\n return 2.0 * np.pi / (1.758820150E11 * td)", "def boltzmann(deltaE, T):\n\tassert T != 0\n\tk_b = 1.3806488e-23\n\treturn math.exp(-abs(deltaE) / k_b * T)", "def get_wb(self, x, weight_blend):\r\n batch_nums = weight_blend.size()[0]\r\n c = weight_blend.unsqueeze(-1).unsqueeze(-1)\r\n x_size = x.size()\r\n x = x.unsqueeze(0).expand(batch_nums, x_size[0], x_size[1], x_size[2])\r\n x = c * x\r\n return x.sum(dim=1)", "def B(self, s, t, n):\n\n if n == 0:\n return 1\n elif min(len(s), len(t)) < n:\n return 0\n if (s[:-1], t, n) in self.B_st.keys():\n B1 = self.B_st[(s[:-1], t, n)]\n else:\n B1 = self.B(s[:-1], t, n)\n #self.B_st[(s[:-1], t, n)] = B1\n if (s, t[:-1], n) in self.B_st.keys():\n B2 = self.B_st[(s, t[:-1], n)]\n else:\n B2 = self.B(s, t[:-1], n)\n #self.B_st[(s, t[:-1], n)] = B2 \n \n if (s[:-1], t[:-1], n) in self.B_st.keys():\n B3 = self.B_st[(s[:-1], t[:-1], n)]\n else:\n B3 = self.B(s[:-1], t[:-1], n)\n #self.B_st[(s[:-1], t[:-1], n)] = B3\n result = self.decay_param * (B1 + B2 - self.decay_param * B3)\n if s[-1] == t[-1]:\n if (s[:-1], t[:-1], n-1) in self.B_st.keys():\n B4 = self.B_st[(s[:-1], t[:-1], n-1)]\n else:\n B4 = self.B(s[:-1], t[:-1], n-1)\n #self.B_st[(s[:-1], t[:-1], n-1)] = B4\n result += (self.decay_param ** 2) * B4\n return result", "def bkgwt(self):\n return self._return_if('_bkgwt')", "def modb_to_td(b):\n return 2.0 * np.pi / (1.758820150E11 * b)", "def binned_ts(t, x, w, fun=np.mean):\n\n nbins = int(round((t.max()-t.min())/w))\n bins = np.linspace(t.min(), t.max(), nbins)\n idx = np.digitize(t, bins)\n tb = [fun(t[idx==bn]) for bn in range(1,nbins)]\n xb = [fun(x[idx==bn]) for bn in range(1,nbins)]\n\n return np.array(tb), np.array(xb)", "def getTbin(self,numwrap=float):\n return numwrap(self.getPeriod()) / numwrap(self.getNbin())", "def build_beck(cur, s, t):\n if np.mod(t, LPRINT) == 0:\n print('+> Calculating synthetic riverbed topography...', end='')\n beck_bed = compute_beck(cur, s)\n beck_bed[np.abs(beck_bed)<ZERO] = 0\n if np.mod(t, LPRINT) == 0:\n print(' [done]')\n return beck_bed", "def beta_squared(T, m):\n numerator = T * (T + 2 * m)\n denominator = (T + m) ** 2\n return numerator / denominator", "def weights_midas_beta(\n th:np.array, bt:np.array,\n Spc:Dict) -> np.array:\n\n dict_list = create_time_dicts(Spc)\n\n if Spc['TwoParam']:\n th1=th[0:Spc['nbvar']]\n th2=th[Spc['nbvar']:2*Spc['nbvar']]\n else:\n th2=th\n\n for time_period in dict_list:\n for i in time_period['range']:\n if Spc['TwoParam']:\n if Spc['almon']:\n W0=np.exp(th1[i]*time_period['k'] + th2[i]*np.square(time_period['k'])) \\\n / np.sum(np.exp(th1[i]*time_period['k'] + th2[i]*np.square(time_period['k'])))\n elif Spc['betaFc']:\n W0=np.exp(th1[i]*time_period['k'] + th2[i]*np.square(time_period['k'])) \\\n / np.sum(np.exp(th1[i]*time_period['k'] + th2[i]*np.square(time_period['k'])))\n elif Spc['Averaging']:\n W0=time_period['one']/time_period['kk']\n elif Spc['betaFc']:\n W0=np.power(th2[i]*(1-time_period['w']),(th2[i]-1)) \\\n / sum(np.power(th2[i]*(1-time_period['w']),(th2[i]-1)))\n elif Spc['betaFc_special']:\n W0=th2[i]*time_period['w']*np.power((1-time_period['w']),(th2[i]-1))\\\n / sum(th2[i]*time_period['w']*np.power((1-time_period['w']),(th2[i]-1)))\n if i==0:\n W = W0*bt[i]\n ww = W0\n else:\n W = np.r_[W,W0*bt[i]]\n ww = np.r_[ww,W0]\n\n return W.T", "def taumBday(n_b, n_w, cost_b, cost_w, z):\n # 3 cases for how much it would cost to buy black or white present\n case1 = n_w * cost_w + n_b * cost_b\n case2 = n_w * (cost_b + z) + n_b * cost_b\n case3 = n_w * cost_w + n_b * (cost_w + z)\n return min(case1, case2, case3)", "def Boltzmann(En,T):\n ev = 1.60218e-19\n kb = 1.380e-23\n return np.exp(-En/(kb*T/ev))", "def abbott_steam():\n per_klb = 20 # dollars per klb of steam\n kwh_eq = to_kwh(1) # kwh equivalent of steam\n per_kwh = per_klb / kwh_eq\n return per_kwh", "def betaT(self):\n if self.maTail > 1:\n return 0\n else:\n return sqrt(1 - self.maTail**2)" ]
[ "0.69747543", "0.6678443", "0.6650404", "0.6596994", "0.6503366", "0.6358359", "0.62313914", "0.60944253", "0.6083277", "0.60792756", "0.6062386", "0.6059564", "0.5959053", "0.58937097", "0.5877149", "0.58583546", "0.5853875", "0.58464986", "0.57807404", "0.57335067", "0.57277733", "0.57136095", "0.5693588", "0.56704026", "0.5651813", "0.5621788", "0.5619007", "0.56027466", "0.55965376", "0.55738646" ]
0.842045
0
Cleans up the current state of the GUI by closing any open models and motion files.
def cleanUp(self): # Close any open models openModels = getAllModels() if len(openModels): for model in openModels: setCurrentModel(model) performAction("FileClose") # Wait time.sleep(1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clean_gui():\n pass", "def destroy(self):\n tk.Frame.destroy(self)", "def close(self):\n self.window.destroy()\n self.buttons_window.destroy()", "def quit_fun(self):\n try:\n try:\n self.h5saver.close_file()\n except:\n pass\n\n for module in self.move_modules:\n try:\n module.quit_fun()\n QtWidgets.QApplication.processEvents()\n QThread.msleep(1000)\n QtWidgets.QApplication.processEvents()\n except:\n pass\n\n for module in self.detector_modules:\n try:\n module.quit_fun()\n QtWidgets.QApplication.processEvents()\n QThread.msleep(1000)\n QtWidgets.QApplication.processEvents()\n except:\n pass\n areas=self.dockarea.tempAreas[:]\n for area in areas:\n area.win.close()\n QtWidgets.QApplication.processEvents()\n QThread.msleep(1000)\n QtWidgets.QApplication.processEvents()\n\n\n #save scan settings related to the current preset\n if self.preset_file is not None:\n file = os.path.split(self.preset_file)[1]\n # path = os.path.join(scan_conf_path, file)\n # custom_tree.parameter_to_xml_file(self.settings, path)\n\n if hasattr(self,'mainwindow'):\n self.mainwindow.close()\n\n\n\n\n except Exception as e:\n pass", "def quit(self):\n\n self.main_window.destroy()", "def close(self):\n self.props_action.setVisible(False)\n self.cache.last_format = None\n self.cache.last_serial = None\n self.setWindowTitle(\"TCam Capture\")\n self.pixel_coords_label.setText(\"\")\n self.pixel_label.setText(\"\")\n self.current_fps_label.setText(\"\")\n\n if self.props:\n self.props.setParent(None)\n self.props = None\n self.removeDockWidget(self.props)\n\n self.set_device_menus_enabled(False)\n self.setCentralWidget(None)\n self.serial = None\n\n if self.props_widget:\n self.props_widget.stop()\n self.props_widget = None\n\n if self.view is not None:\n self.stop()\n self.view.setParent(None)\n self.view = None\n # update menu to remove mark on open camera\n self.update_device_list(self.device_list)", "def done(self):\n self.root.destroy()", "def destroy(self):\n for window in self.windows:\n try:\n destroy_window(window)\n except:\n pass", "def delwin(self):\n\t\tfor c in self.components:\n\t\t\tc.delwin()\n\t\tself.win = None", "def destroy_on_close(self):\n self.deleteLater()", "def cleanup(self):\n pygame.quit()", "def finalizeExit(self) -> None:\n base.graphicsEngine.removeAllWindows()\n if self.win is not None:\n print(\"Exiting KarelCraft app, bye!\")\n self.closeWindow(self.win)\n self.win = None\n self.destroy()\n sys.exit()", "def clean_up(self):\n cv2.destroyAllWindows()\n # self.vs.release()", "def close_app(self):\n BASE_DIR = os.path.dirname(os.path.abspath(__file__))\n\n db_path = os.path.join(BASE_DIR, \"..\", \"DATA\", \"AIRCRAFT_COLLISION_FORECAST_SYSTEM.db\")\n clean_table(db_path, 'AIRPLANES')\n\n BASE_DIR = os.path.dirname(os.path.abspath(__file__))\n\n img_path = os.path.join(BASE_DIR, \"..\", \"GUI\", \"IMAGE\")\n\n # img_path = 'GUI\\\\IMAGE\\\\'\n img_file_names = [file_name for file_name in listdir(img_path) if isfile(join(img_path, file_name))]\n for file_name in img_file_names:\n if file_name not in ('map_marker.png', 'airplane_marker.png', 'collision_marker.png'):\n os.remove(os.path.join(img_path, file_name))\n print('Closing app')\n self.app.root_window.close()", "def quit(self) -> None:\n global tcl_interp\n\n for child in tuple(self._children.values()):\n child.destroy()\n\n self._tcl_call(None, \"destroy\", self.tcl_path)\n self._tcl_call(None, \"destroy\", self.wm_path)\n\n tcl_interp = None\n\n self.app.quit()", "def bye(self):\n self._frame._destroy()\n self._turtles = []\n self._gpens = []\n del self._frame", "def __onclosing(self):\n self.window.destroy()", "def close(self):\n self.RemoveAllObservers()\n if hasattr(self, 'axes_widget'):\n self.hide_axes() # Necessary to avoid segfault\n self.axes_actor = None\n del self.axes_widget\n\n if self._empty_str is not None:\n self._empty_str.SetReferenceCount(0)\n self._empty_str = None", "def exitProgram():\n canvas.destroy()\n tool.destroy()\n code_editor.destroy()\n sys.exit()", "def clean(self):\n for i in self.winfo_children():\n i.destroy()", "def close(self):\n \n self.renderer.RemoveActor(self._crosshair.actor)\n self.renderer.RemoveActor(self._scalar_bar_actor)\n self.renderer.RemoveActor(self._orientation_annotation)\n self.renderer.RemoveActor(self._corner_annotation)\n \n for layer in self._layers :\n self.renderer.RemoveActor(layer.actor)\n \n for gui_annotation in self._gui_annotations.values() :\n self.renderer.RemoveActor(gui_annotation.shape_actor)\n self.renderer.RemoveActor(gui_annotation.text_actor)", "def close(self):\n self.destroy()", "def quit(self, *args, **kwargs):\n self.shutdown_cleanup()\n\n self.view.master.destroy()\n self.view.master.quit()", "def unload(self):\n for action in self.actions:\n self.iface.removePluginMenu(\n self.tr(u'&Convert to 3D'),\n action)\n self.iface.removeToolBarIcon(action)\n # remove the toolbar\n del self.toolbar", "def close(self):\n self.master.destroy()\n root.quit()", "def close_all_editors():\n hide_editor_ui()\n destroy_all_pulse_windows()\n destroy_ui_model_instances()", "def unload(self):\n main.msgQ.removeEvent(Constants.CMSG_CHANGE_AVATAR_TYPE)\n main.msgQ.removeEvent(Constants.CMSG_CHANGE_TEAM_PVP)\n main.msgQ.removeEvent(Constants.CMSG_START_TO_READY_GAME)\n main.msgQ.removeEvent(Constants.CMSG_CANCEL_TO_JOIN_GAME)\n main.msgQ.removeEvent(Constants.CMSG_START_SIXTY_SECONDS_COUNTER)\n self.mainFrame.destroy()", "def destructor(self):\n cv2.destroyAllWindows()", "def on_cleanup(self):\n self.close()", "def on_cleanup(self):\n self.close()" ]
[ "0.72540486", "0.72328305", "0.70189834", "0.6971871", "0.69068426", "0.6906358", "0.69044363", "0.68878174", "0.68419445", "0.6836851", "0.6834143", "0.67956424", "0.6766781", "0.6766633", "0.6763605", "0.67270285", "0.67138654", "0.6711285", "0.67105806", "0.67079717", "0.67066514", "0.67037106", "0.669257", "0.66629034", "0.6661921", "0.6650237", "0.6643475", "0.66302264", "0.66269785", "0.66269785" ]
0.8206654
0
Loads the adjusted COM model for the chosen trial into the GUI.
def loadAdjustedModel(self): # Load model in GUI addModel(self.trcFilePath.replace('.trc','.osim'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def LoadModel(self):\n\t\tself.form = loader.loadModel(\"models/lampExport\")\n\t\t#self.form.setScale(.007)\n\t\tself.form.reparentTo(render)\n\t\tself.form.setPos(self.xpos, self.ypos, -30)", "def load(self, model):\r\n\r\n self._software_model = model\r\n\r\n self.chkDevEnvQ1.set_active(model.lst_development[0])\r\n self.chkDevEnvQ2.set_active(model.lst_development[1])\r\n self.chkDevEnvQ3.set_active(model.lst_development[2])\r\n self.chkDevEnvQ4.set_active(model.lst_development[3])\r\n self.chkDevEnvQ5.set_active(model.lst_development[4])\r\n self.chkDevEnvQ6.set_active(model.lst_development[5])\r\n self.chkDevEnvQ7.set_active(model.lst_development[6])\r\n self.chkDevEnvQ8.set_active(model.lst_development[7])\r\n self.chkDevEnvQ9.set_active(model.lst_development[8])\r\n self.chkDevEnvQ10.set_active(model.lst_development[9])\r\n self.chkDevEnvQ11.set_active(model.lst_development[10])\r\n self.chkDevEnvQ12.set_active(model.lst_development[11])\r\n self.chkDevEnvQ13.set_active(model.lst_development[12])\r\n self.chkDevEnvQ14.set_active(model.lst_development[13])\r\n self.chkDevEnvQ15.set_active(model.lst_development[14])\r\n self.chkDevEnvQ16.set_active(model.lst_development[15])\r\n self.chkDevEnvQ17.set_active(model.lst_development[16])\r\n self.chkDevEnvQ18.set_active(model.lst_development[17])\r\n self.chkDevEnvQ19.set_active(model.lst_development[18])\r\n self.chkDevEnvQ20.set_active(model.lst_development[19])\r\n self.chkDevEnvQ21.set_active(model.lst_development[20])\r\n self.chkDevEnvQ22.set_active(model.lst_development[21])\r\n self.chkDevEnvQ23.set_active(model.lst_development[22])\r\n self.chkDevEnvQ24.set_active(model.lst_development[23])\r\n self.chkDevEnvQ25.set_active(model.lst_development[24])\r\n self.chkDevEnvQ26.set_active(model.lst_development[25])\r\n self.chkDevEnvQ27.set_active(model.lst_development[26])\r\n self.chkDevEnvQ28.set_active(model.lst_development[27])\r\n self.chkDevEnvQ29.set_active(model.lst_development[28])\r\n self.chkDevEnvQ30.set_active(model.lst_development[29])\r\n self.chkDevEnvQ31.set_active(model.lst_development[30])\r\n self.chkDevEnvQ32.set_active(model.lst_development[31])\r\n self.chkDevEnvQ33.set_active(model.lst_development[32])\r\n self.chkDevEnvQ34.set_active(model.lst_development[33])\r\n self.chkDevEnvQ35.set_active(model.lst_development[34])\r\n self.chkDevEnvQ36.set_active(model.lst_development[35])\r\n self.chkDevEnvQ37.set_active(model.lst_development[36])\r\n self.chkDevEnvQ38.set_active(model.lst_development[37])\r\n self.chkDevEnvQ39.set_active(model.lst_development[38])\r\n self.chkDevEnvQ40.set_active(model.lst_development[39])\r\n self.chkDevEnvQ41.set_active(model.lst_development[40])\r\n self.chkDevEnvQ42.set_active(model.lst_development[41])\r\n self.chkDevEnvQ43.set_active(model.lst_development[42])\r\n\r\n return False", "def loadModel(self):\n#FLAG: still waiting on boost image\n\t\tself.form = loader.loadModel(\"models/panda-model\")\n\t\tself.form.setScape(.005)\n\t\tself.form.reparentTo(render)", "def run(self):\r\n # Close any open models\r\n self.cleanUp()\r\n # Dynamically select file to preview\r\n self.selectTrial() \r\n # Add adjusted COM (RRA/CMC) model\r\n self.loadAdjustedModel()\r\n # Hide the markers from view\r\n self.hideModelMarkers()\r\n # Load CMC motion to model\r\n self.loadCMCMotion()", "def load_model(self):\n pass", "def load_model():\n global obj\n obj = NutritionTableDetector()\n print(\"Weights Loaded!\")", "def load(self, model):\r\n\r\n self._model = model\r\n\r\n self._load_analysis_inputs_page()\r\n\r\n # Remove existing results and plots pages.\r\n if self._obj_results is not None:\r\n self._notebook.remove_page(1)\r\n if self._obj_plots is not None:\r\n self._notebook.remove_page(1)\r\n\r\n # Get the correct results and plots object for the selected s-model.\r\n self._obj_results = self._lst_results[self._model.distribution_id - 1]\r\n self._obj_plots = self._lst_plots[self._model.distribution_id - 1]\r\n\r\n # Insert the s-model results and plots pages.\r\n self._notebook.insert_page(self._obj_results,\r\n tab_label=self._obj_results.lblPage,\r\n position=1)\r\n self._notebook.insert_page(self._obj_plots,\r\n tab_label=self._obj_plots.lblPage,\r\n position=2)\r\n\r\n # Load the s-model results and plots pages.\r\n self._obj_results.load_results_page(self._model)\r\n self._obj_plots.load_plots(self._model)\r\n\r\n self._notebook.show_all()\r\n self._notebook.set_current_page(0)\r\n\r\n return False", "def load_model(self) -> Any:", "def train_model(self, initial=False):\n if len(self.loading.intersection({'model_train', 'base_model'})) > 0:\n return\n\n train_files = []\n if not self.model_trained and not initial:\n train_files = list(askopenfilenames())\n if len(train_files) is 0:\n return\n\n self.queue_gui_update('model_train_status', {'visible': False})\n self.queue_gui_update('btn_train_model', {'text': 'Training...'})\n self.queue_gui_update('model_train_loading', {'visible': True})\n self.loading.add('model_train')\n else:\n self.queue_gui_update('base_model_status', {'visible': False})\n self.queue_gui_update('btn_train_model', {'text': 'Loading base model...'})\n self.queue_gui_update('base_model_loading', {'visible': True})\n self.loading.add('base_model')\n\n\n self.model, is_base = get_model(train_files)\n if is_base or is_base is None:\n self.base_model_loaded = True\n self.model_trained = False\n self.queue_gui_update('base_model_status', {'value': u'\\u2713', 'text_color': 'green', 'visible': True})\n self.queue_gui_update('base_model_loading', {'visible': False})\n\n self.queue_gui_update('model_train_status', {'value': u'\\u2717', 'text_color': 'red', 'visible': True})\n self.queue_gui_update('btn_train_model', {'text': 'Train transfer model'})\n self.queue_gui_update('model_train_loading', {'visible': False})\n\n if is_base:\n self.loading.remove('base_model')\n else:\n self.model_trained = True\n self.queue_gui_update('model_train_status', {'value': u'\\u2713', 'text_color': 'green', 'visible': True})\n self.queue_gui_update('btn_train_model', {'text': 'Reset model'})\n self.queue_gui_update('model_train_loading', {'visible': False})\n self.loading.remove('model_train')\n\n self.model._make_predict_function()", "def load(self, model):\r\n\r\n self._model = model\r\n\r\n _model = self.tvwRiskMap.get_model()\r\n _model.clear()\r\n\r\n _software = self._dtcBoM.dicSoftware.values()\r\n _top_module = [_s for _s in _software if _s.software_id == 0]\r\n\r\n self._load_risk_map(_top_module, _software, _model)\r\n self._load_testing_matrix_page()\r\n\r\n return False", "def load_modelresult(self, inpfile):\n result = load_modelresult(inpfile)\n\n for prefix in list(self.fit_components.keys()):\n self.onDeleteComponent(self, prefix=prefix)\n\n for comp in result.model.components:\n isbkg = comp.prefix in result.user_options['bkg_components']\n self.addModel(model=comp.func.__name__,\n prefix=comp.prefix, isbkg=isbkg)\n\n for comp in result.model.components:\n parwids = self.fit_components[comp.prefix].parwids\n for pname, par in result.params.items():\n if pname in parwids:\n wids = parwids[pname]\n if wids.minval is not None:\n wids.minval.SetValue(par.min)\n if wids.maxval is not None:\n wids.maxval.SetValue(par.max)\n val = result.init_values.get(pname, par.value)\n wids.value.SetValue(val)\n\n self.fill_form(result.user_options)\n return result", "def load_model(self):\n Thread(target=self.__load_model).start()", "def _load_model(self):\n with open(self.filepath, 'rb') as file:\n self.cmodel = pickle.load(file)", "def _get_model():\n freezing = os.environ.get('NAUCSE_FREEZE', not app.config['DEBUG'])\n initialize = True\n\n try:\n g.model = app.config['NAUCSE_MODEL']\n except KeyError:\n g.model = init_model()\n app.config['NAUCSE_MODEL'] = g.model\n else:\n if freezing:\n # Model already initialized; don't look for changes\n return\n\n # (Re-)initialize model\n\n g.model.load_licenses(Path(app.root_path).parent / 'licenses')\n g.model.load_local_courses(Path(app.root_path).parent)\n\n if freezing:\n g.model.freeze()", "def load_model(self):\n try:\n self.model = Word2Vec.load(self.config[\"model_path\"])\n self.model.init_sims(replace=True)\n except Exception as e:\n print(e)\n print(\"error in model loading!\")", "def SetModel( self, loader ):\n self.loader = loader\n self.adapter,tree,rows = self.RootNode( )\n self.listControl.integrateRecords( rows.values())\n self.activated_node = tree\n self.squareMap.SetModel( tree, self.adapter )\n self.RecordHistory()", "def load_model(self, filename):\r\n pass", "def load(path_to_model):\n pass", "def set_load_model_parameters(self):\n\n self.controller.set_new_model_test_input_path(self.test_input.get())\n self.controller.set_new_model_results_input_path(self.results_input.get())\n self.controller.set_new_model_running(False)", "def loadTutorialSuit():\n loader.loadModelNode(\"phase_3.5/models/char/suitC-mod\")\n loadDialog(1)", "def load_model(self, model_path: str):", "def _prepare_model(model):\n\n # Ensure there is at least 1 load combination to solve if the user didn't define any\n if model.LoadCombos == {}:\n # Create and add a default load combination to the dictionary of load combinations\n model.LoadCombos['Combo 1'] = LoadCombo('Combo 1', factors={'Case 1':1.0})\n \n # Generate all meshes\n for mesh in model.Meshes.values():\n if mesh.is_generated == False:\n mesh.generate()\n\n # Activate all springs and members for all load combinations\n for spring in model.Springs.values():\n for combo_name in model.LoadCombos.keys():\n spring.active[combo_name] = True\n \n # Activate all physical members for all load combinations\n for phys_member in model.Members.values():\n for combo_name in model.LoadCombos.keys():\n phys_member.active[combo_name] = True\n \n # Assign an internal ID to all nodes and elements in the model. This number is different from the name used by the user to identify nodes and elements.\n _renumber(model)", "def load_model(self):\r\n try:\r\n self.model = CRNN_STN(self.crnn_cfg())\r\n self.model.load_weights(config.CRNN_Model_Path)\r\n except:\r\n print('Error in method {0} in module {1}'.format('load_model', 'crnn_bridge.py'))", "def ImportModelPart(self):\n KratosMultiphysics.Logger.PrintInfo(\"::[MechanicalSolver]::\", \"Importing model part.\")\n problem_path = os.getcwd()\n input_filename = self.settings[\"model_import_settings\"][\"input_filename\"].GetString()\n if self.is_restarted():\n self.get_restart_utility().LoadRestart()\n elif(self.settings[\"model_import_settings\"][\"input_type\"].GetString() == \"mdpa\"):\n # Import model part from mdpa file.\n KratosMultiphysics.Logger.PrintInfo(\"::[MechanicalSolver]::\", \"Reading model part from file: \" + os.path.join(problem_path, input_filename) + \".mdpa\")\n KratosMultiphysics.ModelPartIO(input_filename).ReadModelPart(self.main_model_part)\n KratosMultiphysics.Logger.PrintInfo(\"::[MechanicalSolver]::\", \"Finished reading model part from mdpa file.\")\n self.PrepareModelPartForSolver()\n else:\n raise Exception(\"Other model part input options are not yet implemented.\")\n KratosMultiphysics.Logger.PrintInfo(\"ModelPart\", self.main_model_part)\n KratosMultiphysics.Logger.PrintInfo(\"::[MechanicalSolver]:: \", \"Finished importing model part.\")", "def load(self):\n utils.get_previous_weights_from_gdrive(self.config.model_folder)\n last_used_model = utils.get_latest_model_name(self.config.model_folder)\n self.model = load_model(last_used_model)\n self.model.summary()", "def _load_analysis_inputs_page(self):\r\n\r\n # Load the gtk.ComboBox() with system hardware names.\r\n self.cmbAssembly.handler_block(self._lst_handler_id[0])\r\n Widgets.load_combo(self.cmbAssembly, Configuration.RTK_HARDWARE_LIST,\r\n simple=False)\r\n self.cmbAssembly.handler_unblock(self._lst_handler_id[0])\r\n\r\n self.cmbAssembly.set_active(self._model.assembly_id)\r\n self.cmbDistribution.set_active(self._model.distribution_id)\r\n self.cmbConfType.set_active(self._model.confidence_type)\r\n self.cmbConfMethod.set_active(self._model.confidence_method)\r\n self.cmbFitMethod.set_active(self._model.fit_method)\r\n\r\n self.txtDescription.set_text(self._model.description)\r\n if self._model.confidence < 1.0:\r\n _confidence = self._model.confidence * 100.0\r\n else:\r\n _confidence = self._model.confidence\r\n self.txtConfidence.set_text(str(_confidence))\r\n self.txtStartTime.set_text(str(self._model.start_time))\r\n self.txtEndTime.set_text(str(self._model.rel_time))\r\n self.txtRelPoints.set_text(str(self._model.n_rel_points))\r\n\r\n _start_date = Utilities.ordinal_to_date(self._model.start_date)\r\n _end_date = Utilities.ordinal_to_date(self._model.end_date)\r\n self.txtStartDate.set_text(str(_start_date))\r\n self.txtEndDate.set_text(str(_end_date))\r\n\r\n return False", "def load_model():\n global columns\n global data\n \n model = pickle.load(open('MedCostModel.pkl', 'rb'))\n data = pd.read_csv('MedCosts.csv')\n data = data.drop(columns=['charges'])\n columns = data.columns\n return(model)", "def load_model(self, path):\n pass", "def load_model(self):\n self.pred_net.load((self.save_path / \"iqn_pred_net\").absolute().as_posix())\n self.target_net.load((self.save_path / \"iqn_target_net\").absolute().as_posix())", "def load_NMF_model():\n model = pickle.load(open(\"models/nmf_model.sav\", 'rb'))\n Q = model.components_ \n return model, Q" ]
[ "0.6397701", "0.633631", "0.6274005", "0.5957209", "0.58768433", "0.5819468", "0.58116055", "0.57786995", "0.56621677", "0.56414884", "0.5607664", "0.55917513", "0.55909866", "0.55737233", "0.556685", "0.5564237", "0.5556083", "0.55559343", "0.55243313", "0.54895574", "0.54881465", "0.54787284", "0.54405344", "0.54383683", "0.5437622", "0.5423995", "0.5411144", "0.54108953", "0.53838885", "0.53795546" ]
0.7538407
0
Hide the markers in the current model.
def hideModelMarkers(self): # Handle to current model cmcModel = getCurrentModel() # Hide markers markerSet = cmcModel.getMarkerSet() for i in range(cmcModel.getNumMarkers()): marker = markerSet.get(i) toggleObjectDisplay(marker,False)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hide(self):\n self.set_visible(False)", "def hide(self):\n self.visible = False", "def hidePlot(self, index):\n self.pathItem_list[index].hide()", "def hideAnnotations(self):\r\n widget = slicer.modules.NeedleFinderWidget\r\n nodes = slicer.mrmlScene.GetNodesByClass('vtkMRMLAnnotationTextDisplayNode')\r\n for i in range(nodes.GetNumberOfItems()):\r\n node = nodes.GetItemAsObject(i)\r\n if widget.hideAnnotationTextButton.checked:\r\n node.SetTextScale(0)\r\n else:\r\n node.SetTextScale(3)", "def unHide(self):\n self.visible = True", "def ensure_hidden(self):\n self.set_visible(False)", "def hide(self):\n\n if not 'd-none' in str(self.class_):\n self.old_class = self.class_\n self.class_ = 'd-none'\n\n self.viz = False\n\n return self", "def hide(self):\n raise NotImplementedError", "def _hide_labels(self):\n pass", "def clear(self):\n self.ids.mapview.remove_marker(self.marker1)\n self.ids.mapview.remove_marker(self.marker2)", "def hide(self):\n self.row_box.grid_remove()\n self.field_name_box.grid_remove()\n self.field_name_label.grid_remove()\n self.value_box.grid_remove()\n self.active_value_widget.grid_remove()", "def hide(self):\n self.course.quick_action(self.id, 'hide')", "def HideMe(self, event):\n self.Hide()", "def unhide(self):\n self.course.quick_action(self.id, 'show')", "def hideInvisibles(self: Self, event: Event = None) -> None:\n c = self\n showInvisiblesHelper(c, False)", "def toggle_surface_mode(self):\n for poly in self.poly_list:\n poly.setFlag(QGraphicsItem.GraphicsItemFlag.ItemIsSelectable, True)\n if poly in self.hole_list:\n poly.setBrush(QBrush(QColor(255, 255, 255)))\n else:\n poly.setBrush(QBrush(QColor(0, 0, 0, 50)))\n\n # Disable the selection of edges and hide the marker if there is one\n for edge in self.edge_list:\n edge.setFlag(QGraphicsItem.GraphicsItemFlag.ItemIsSelectable, False)\n\n if edge.childItems()[0].childItems():\n text = edge.childItems()[0].childItems()[0]\n text.setVisible(False)\n\n # Hide markers on points\n for point in self.point_marker_list:\n if point.childItems():\n point.childItems()[0].setVisible(False)", "def ClearMarkers(cls):\n cls._markers = []", "def hide(self, item_id):\n pass", "def hide(self):\n self.frame.grid_forget()\n self.visible = False", "def hide(self, indices):\n traj_ids = set(traj.id for traj in self._trajlist)\n\n for index in indices:\n comp_id = self._ngl_component_ids[index]\n if comp_id in traj_ids:\n traj = self._get_traj_by_id(comp_id)\n traj.shown = False\n self._remote_call(\n \"setVisibility\",\n target='compList',\n args=[\n False,\n ],\n kwargs={'component_index': index})", "def hide(self):\n self.geometry(\"%dx%d%+d%+d\" % (0, 0, 0, 0))", "def setNoHiddenLines():\n dislin.nohide()", "def hideLayer(self, id):\n\n #log.debug('hideLayer: hiding layer %s' % str(id))\n\n self.layer_mapping[id].visible = False\n self.Refresh()", "def Hide(self):\r\n \r\n return self.SetFlag(self.optionHidden, True)", "def hide(self):\n \n self.save_scores()\n super().hide()", "def hide(self):\r\n\t\tself.frame.Show(False)", "def __searchClearMarkers(self):\n self.activeWindow().clearSearchIndicators()", "def hide_all(self, immediate=True):\n raise NotImplementedError", "def hide(self):\r\n if self.visible:\r\n nid = (self.hwnd, 0)\r\n Shell_NotifyIcon(NIM_DELETE, nid)\r\n self.visible = 0", "def toggle_hidden(self):\n if self.hidden:\n self.show()\n else:\n self.hide()" ]
[ "0.6804635", "0.66961974", "0.6605869", "0.65740716", "0.65275544", "0.63676405", "0.6291115", "0.6265939", "0.6202667", "0.6144658", "0.61384076", "0.6124352", "0.60892564", "0.6060456", "0.6000356", "0.59868026", "0.59822285", "0.5929362", "0.5924352", "0.5917634", "0.5902383", "0.5886374", "0.5843379", "0.5831019", "0.58134824", "0.57828325", "0.57820797", "0.57649565", "0.57584274", "0.5733399" ]
0.88948095
0
Loads the CMC motion into the adjusted model.
def loadCMCMotion(self): # Load motion file to current model loadMotion(self.trcFilePath.replace('.trc','_CMC_states.sto'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _load_model(self):\n with open(self.filepath, 'rb') as file:\n self.cmodel = pickle.load(file)", "def loadAdjustedModel(self):\r\n # Load model in GUI\r\n addModel(self.trcFilePath.replace('.trc','.osim'))", "def load_model():\n global columns\n global data\n \n model = pickle.load(open('MedCostModel.pkl', 'rb'))\n data = pd.read_csv('MedCosts.csv')\n data = data.drop(columns=['charges'])\n columns = data.columns\n return(model)", "def __load_model(self):\n loaded = load(self.__file_name)\n self.__model = loaded['model']\n self.__meta_data = loaded['metadata']\n self.__is_ready = True", "def load_model(self, path):\n self._saver.restore(self._sess, path + '/model.ckp')\n pkl_file = open(path + '/som.pkl', 'rb')\n restored = pickle.load(pkl_file)\n pkl_file.close()\n self._m = restored['_m']\n self._n = restored['_n']\n self._neighbourhood = restored['_neighbourhood']\n # self._topography = restored['_topography']\n self._num_iterations = restored['_num_iterations']\n self._Wts = restored['_Wts']\n self._locations = restored['_locations']\n self._learned = restored['_learned']\n self._centroid_grid = restored['_centroid_grid']\n self.abnormal_dist = restored['abnormal_dist']\n\n print(\"Model restored from path: \" + path)", "def import_cml(self, fname):\n self.ftype = 'cml'\n with open(fname) as f:\n lines = f.readlines()\n self.n_atom = 0\n self.n_connect = 0\n self.sym = []\n self.at_num = []\n self.xyz = []\n self.connect = []\n for i in range(len(lines)):\n if lines[i].split()[0] == '<atom':\n self.n_atom += 1\n tmp = lines[i].split()\n self.sym.append(tmp[2].split('\"')[1])\n self.at_num.append(self.sym2num(tmp[2].split('\"')[1]))\n x = float(tmp[3].split('\"')[1])\n y = float(tmp[4].split('\"')[1])\n z = float(tmp[5].split('\"')[1])\n self.xyz.append([x, y, z])\n elif lines[i].split()[0] == '<bond':\n self.n_connect += 1\n tmp = lines[i].split()\n a = int(tmp[1].split('\"')[1].split('a')[1])\n b = int(tmp[2].split('\"')[0].split('a')[1])\n self.connect.append([a, b])\n self.xyz = np.array(self.xyz)", "def load_cobra_model(self, model):\n self.model = ModelPro(model)\n for g in self.model.genes:\n if self.genes_dir:\n g.root_dir = self.genes_dir\n g.protein.pdb_file_type = self.pdb_file_type\n self.genes = self.model.genes\n\n log.info('{}: loaded model'.format(model.id))\n log.info('{}: number of reactions'.format(len(self.model.reactions)))\n log.info('{}: number of reactions linked to a gene'.format(ssbio.core.modelpro.true_num_reactions(self.model)))\n log.info('{}: number of genes (excluding spontaneous)'.format(ssbio.core.modelpro.true_num_genes(self.model,\n custom_spont_id=self.custom_spont_id)))\n log.info('{}: number of metabolites'.format(len(self.model.metabolites)))\n log.warning('IMPORTANT: All Gene objects have been transformed into GenePro '\n 'objects, and will be for any new ones')", "def load(self, motion_file):\n\n logging.info(\"Loading motion from: {:s}\".format(motion_file))\n with open(motion_file, \"r\") as f:\n motion_json = json.load(f)\n\n self._loop_mode = LoopMode[motion_json[self._LOOP_MODE_KEY]]\n self._frame_duration = float(motion_json[self._FRAME_DURATION_KEY])\n\n if self._ENABLE_CYCLE_OFFSET_POSITION_KEY in motion_json:\n self._enable_cycle_offset_pos = bool(\n motion_json[self._ENABLE_CYCLE_OFFSET_POSITION_KEY])\n else:\n self._enable_cycle_offset_pos = False\n\n if self._ENABLE_CYCLE_OFFSET_ROTATION_KEY in motion_json:\n self._enable_cycle_offset_rot = bool(\n motion_json[self._ENABLE_CYCLE_OFFSET_ROTATION_KEY])\n else:\n self._enable_cycle_offset_rot = False\n\n self._frames = np.array(motion_json[self._FRAMES_KEY])\n self._postprocess_frames(self._frames)\n\n self._frame_vels = self._calc_frame_vels()\n\n assert (self._frames.shape[0] > 0), \"Must have at least 1 frame.\"\n assert (self._frames.shape[1] > self.POS_SIZE +\n self.ROT_SIZE), \"Frames have too few degrees of freedom.\"\n assert (self._frame_duration > 0), \"Frame duration must be positive.\"\n\n logging.info(\"Loaded motion from {:s}.\".format(motion_file))\n\n return", "def _load_molecule(self):\n self.pymol = pybel.readstring(self.input_format, self.file_dic['input'])", "def model_load(self, filename_actor, filename_critic):\r\n self.actor = tf.keras.models.load_model(filename_actor)\r\n self.critic = tf.keras.models.load_model(filename_critic)", "def _load_model(self):\n self.model = tf.keras.experimental.load_from_saved_model(\n self.m_cfg['load_model'], custom_objects=self.custom_objects)\n\n ref = 1 if self.m_cfg['configs']['recursive'] else self.levels\n self.opt = [self._inst_optimizer() for _ in range(ref)]\n self.loss = Losses(self.m_cfg['configs']['loss']).value\n\n l_groups = np.split(np.array(self.model.layers), ref)\n self.vars = list(map(\n lambda g: list(chain(*map(lambda e: e.variables, g))), l_groups))", "def load_model(self):\n if torch.cuda.is_available():\n map_location=lambda storage, loc: storage.cuda()\n else:\n map_location='cpu'\n\n for index, agent in enumerate(self.agents):\n agent.actor_local.load_state_dict(torch.load('agent{}_checkpoint_actor.pth'.format(index + 1), map_location=map_location))\n agent.critic_local.load_state_dict(torch.load('agent{}_checkpoint_critic.pth'.format(index + 1), map_location=map_location))", "def run(self):\r\n # Close any open models\r\n self.cleanUp()\r\n # Dynamically select file to preview\r\n self.selectTrial() \r\n # Add adjusted COM (RRA/CMC) model\r\n self.loadAdjustedModel()\r\n # Hide the markers from view\r\n self.hideModelMarkers()\r\n # Load CMC motion to model\r\n self.loadCMCMotion()", "def PCTMCtoCTMC(h: PCTMC) -> CTMC:\n\tif not h.isInstantiated():\n\t\traise ValueError(\"Cannot convert non-instantiated PCTMC to CTMC.\")\n\tres = zeros(h.matrix.shape)\n\tfor s in range(h.nb_states):\n\t\tfor ss in range(h.nb_states):\n\t\t\tres[s,ss] = h.transitionValue(s,ss)\n\treturn CTMC(res, h.labelling, h.name)", "def load(path_to_model):\n pass", "def extract_model_ctc(args, hybrid_model):\n BPE = False\n ctc_class = EncDecCTCModel\n if 'tokenizer' in hybrid_model.cfg.keys():\n BPE = True\n ctc_class = EncDecCTCModelBPE\n\n hybrid_model_cfg = OmegaConf.to_container(hybrid_model.cfg)\n\n new_cfg = deepcopy(hybrid_model_cfg)\n new_cfg['ctc_reduction'] = hybrid_model_cfg['aux_ctc']['ctc_reduction']\n new_cfg['decoder'] = hybrid_model_cfg['aux_ctc']['decoder']\n del new_cfg['compute_eval_loss']\n del new_cfg['model_defaults']\n del new_cfg['joint']\n del new_cfg['decoding']\n del new_cfg['aux_ctc']\n del new_cfg['loss']\n if BPE and 'labels' in new_cfg:\n del new_cfg['labels']\n elif (not BPE) and 'tokenizer' in new_cfg:\n del new_cfg['tokenizer']\n del new_cfg['target']\n del new_cfg['nemo_version']\n\n new_cfg_oc = OmegaConf.create(new_cfg)\n\n # we call restore_from with strict=False because the .nemo file we're restoring from is a hybrid model, which will have named\n # tensors in the state_dict that do not exist in the pure CTC model class, which would result in an exception with strict=True\n ctc_model = ctc_class.restore_from(\n args.input, map_location=torch.device('cpu'), override_config_path=new_cfg_oc, strict=False\n )\n\n assert all(\n [\n torch.allclose(hybrid_model.state_dict()[x], ctc_model.state_dict()[x])\n for x in hybrid_model.state_dict().keys()\n if x.split('.')[0] in ['preprocessor', 'encoder']\n ]\n ), \"Encoder and preprocessor state dicts don't match!\"\n\n ctc_model.decoder.load_state_dict(hybrid_model.ctc_decoder.state_dict())\n\n assert all(\n [\n torch.allclose(hybrid_model.ctc_decoder.state_dict()[x], ctc_model.decoder.state_dict()[x])\n for x in hybrid_model.ctc_decoder.state_dict().keys()\n ]\n ), \"Decoder state_dict load failed!\"\n\n assert isinstance(ctc_model, ctc_class), \"Extracted CTC model is of the wrong expected class!\"\n\n return ctc_model", "def loadMonoMMF(self, path):\n \n currentHMM = None\n currentState = None\n currentStream = None\n currentMixture = None\n \n # Example:\n # ~h \"A\"\n # <BEGINHMM>\n # <NUMSTATES> 7\n # <STATE> 2\n # <SWEIGHTS> 5\n # 1.000000e+00 1.000000e+00 1.000000e+00 1.000000e+00 0.000000e+00\n # <STREAM> 1\n # <MEAN> 120\n # 4.696125e+00 2.949968e+00 3.134574e-01 8.816458e-01 1.970429e-02 6.499365e-01 3.236455e-01 -2.100632e-01 -2.807565e-01 2.731812e-02 1.980597e-01 -3.675799e-02 -8.129626e-02 1.889552e-01 1.646941e-02 -1.289776e-01 -7.191063e-02 -8.503922e-02 -5.142944e-02 4.708945e-03 -1.301508e-01 -1.205762e-01 -2.791793e-02 -4.471184e-02 -3.310435e-02 4.167116e-02 -5.886093e-02 -1.739067e-02 2.174975e-02 2.013168e-03 1.526068e-02 2.820022e-02 -4.045233e-03 8.139343e-03 1.044561e-02 2.516671e-02 1.215572e-02 -1.503560e-02 -2.112125e-02 1.579380e-02 9.378761e-02 9.153476e-02 -3.943259e-03 3.806450e-03 -2.646687e-02 2.374074e-02 2.898503e-02 -4.656117e-02 -3.545107e-02 -2.300411e-02 2.819717e-02 -1.862090e-02 -3.309735e-02 1.990083e-02 1.583429e-03 -6.634455e-03 -3.381855e-03 -9.518028e-03 -4.426301e-03 -2.549598e-03 -3.076506e-03 -2.884187e-03 2.186387e-03 -2.975489e-03 4.832148e-03 1.308339e-02 -1.743729e-03 6.280211e-03 6.954642e-03 -6.576275e-04 4.461045e-03 1.880297e-03 4.778963e-03 -1.871376e-03 -3.224137e-03 1.496911e-03 -1.267739e-03 -1.200278e-03 -4.305848e-03 3.576194e-03 -7.372506e-02 -6.160514e-02 -2.629448e-03 7.157943e-03 7.199069e-03 -1.128740e-02 -1.195622e-02 1.683325e-02 1.154647e-02 3.931310e-03 -8.084111e-03 1.316739e-03 1.064620e-02 -7.454145e-03 2.635498e-04 4.661378e-03 1.686717e-03 5.327193e-03 2.250276e-03 -1.258986e-03 3.072441e-03 1.209965e-03 -7.417311e-04 6.167710e-05 -1.865989e-03 -2.905391e-03 3.621586e-04 3.377025e-04 -2.963853e-03 8.844314e-05 -3.321448e-03 -1.449478e-03 -1.439827e-03 -2.003317e-03 -2.297701e-03 6.066221e-04 -3.146972e-03 1.087785e-03 1.640665e-03 -1.389944e-03\n # <VARIANCE> 120\n # 2.749784e-01 9.513675e-02 9.151283e-02 7.004740e-02 6.639282e-02 5.846786e-02 4.681997e-02 4.555215e-02 3.252877e-02 3.858987e-02 4.224407e-02 4.190500e-02 2.866594e-02 2.525655e-02 2.227394e-02 2.177498e-02 1.459964e-02 1.985120e-02 1.503495e-02 1.568949e-02 1.634841e-02 1.390152e-02 1.478345e-02 1.550525e-02 1.553188e-02 1.173604e-02 9.394297e-03 1.201788e-02 9.938436e-03 8.747019e-03 8.849040e-03 9.817274e-03 6.372289e-03 7.423026e-03 5.927648e-03 5.913395e-03 5.848510e-03 5.512487e-03 5.220711e-03 7.363599e-03 2.489263e-02 1.073082e-02 3.360401e-03 2.513706e-03 1.973711e-03 1.693189e-03 2.335216e-03 1.915346e-03 1.364503e-03 1.332114e-03 1.159645e-03 9.800000e-04 1.099333e-03 1.042568e-03 7.632344e-04 7.993022e-04 5.957563e-04 7.604795e-04 6.706708e-04 6.345969e-04 6.288295e-04 5.336152e-04 6.252768e-04 6.391230e-04 5.661934e-04 6.331608e-04 5.145242e-04 4.738655e-04 5.501772e-04 4.354312e-04 4.913094e-04 4.626485e-04 3.851971e-04 4.831283e-04 3.829468e-04 3.732785e-04 3.603869e-04 3.458906e-04 3.119832e-04 5.431667e-04 2.544728e-02 5.996812e-03 1.494761e-03 1.115514e-03 1.235385e-03 1.107064e-03 1.210763e-03 8.309078e-04 7.964299e-04 6.786759e-04 6.709303e-04 5.907466e-04 6.343870e-04 6.149057e-04 4.585393e-04 4.753864e-04 4.183158e-04 4.501677e-04 3.928643e-04 4.064549e-04 4.214160e-04 4.000704e-04 3.696143e-04 4.195306e-04 3.726038e-04 3.557785e-04 3.535643e-04 3.656799e-04 3.461961e-04 3.616848e-04 3.172553e-04 2.983032e-04 2.908558e-04 3.325507e-04 2.619927e-04 2.673168e-04 2.908063e-04 2.554393e-04 2.491622e-04 4.217977e-04\n # <GCONST> -5.200827e+02\n \n logger.info('Loading monophon macro file %s', path)\n \n mmfFile = open(path)\n \n # read file line by line\n line = mmfFile.readline()\n while line != \"\":\n line = line.strip(\"\\n\").strip()\n \n # found a new ~h macro?\n ret = re.match(\"~h \\\"(\\S+)\\\"\", line)\n if ret is not None:\n currentHMM = HMacro(ret.group(1))\n self.macroList.append(currentHMM)\n self.macroDict[ currentHMM.name ] = currentHMM\n \n currentState = None\n currentStream = None\n currentMixture = None\n logger.info('Loading macro %s', currentHMM.name) \n \n # state given?\n ret = re.match(\"<STATE>\\s+([0-9]+)\", line)\n if ret is not None: \n currentState = StateInfo(currentHMM, int(ret.group(1)))\n currentHMM.addStateInfo(currentState)\n \n currentStream = None\n currentMixture = None \n\n # stream given? \n ret = re.match(\"<STREAM>\\s+([0-9]+)\", line)\n if ret is not None:\n currentStream = StreamInfo(currentState, int(ret.group(1)))\n currentState.addStreamInfo(currentStream)\n self.streamList.append(currentStream)\n \n currentMixture = None\n \n # mixture given? \n ret = re.match(\"<MIXTURE>\\s+([0-9]+)\\s+(.+)\", line) \n if ret is not None:\n #print 'Found mixture with ' + ret.group(1) + ' ' + ret.group(2)\n currentMixture = MixtureInfo(currentStream, int(ret.group(1)))\n currentMixture.setWeight(float(ret.group(2)))\n currentStream.addMixtureInfo(currentMixture)\n \n \n # means given?\n ret = re.match(\"<MEAN>\\s+([0-9]+)\", line)\n if currentStream is not None and ret is not None:\n numMeans = int(ret.group(1))\n \n if currentMixture is None:\n currentMixture = MixtureInfo(currentStream, 1)\n currentMixture.setWeight(1.0)\n currentStream.addMixtureInfo(currentMixture)\n \n # not a multi space distribution with a mixture for unvoiced\n if numMeans > 0: \n line = mmfFile.readline()\n means = map(float, line.split())\n \n pdf = currentMixture.pdf \n if pdf is None:\n pdf = PDFInfo()\n currentMixture.setPDF(pdf)\n \n pdf.setMeans(means)\n \n # variances given?\n ret = re.match(\"<VARIANCE>\\s+([0-9]+)\", line)\n if currentStream is not None and ret is not None:\n numVars = int(ret.group(1))\n \n if currentMixture is None:\n currentMixture = MixtureInfo(currentStream, 1)\n currentMixture.setWeight(1.0)\n currentStream.addMixtureInfo(currentMixture)\n \n # not a multi space distribution with a mixture for unvoiced\n if numVars > 0: \n line = mmfFile.readline()\n variances = map(float, line.split())\n \n pdf = currentMixture.pdf \n if pdf is None:\n pdf = PDFInfo()\n currentMixture.setPDF(pdf)\n \n pdf.setVariances(variances) \n \n \n # read next line and then finish loop\n line = mmfFile.readline()\n \n # close the file and leave method \n mmfFile.close()", "def load_checkpoint(self, session, model_dir):\n assert self.params.cp_load == True, (\"cp_load must be set to true to load a checkpoint\")\n self.loader.restore(session, model_dir)", "def load(self):\n try:\n if self.model.is_cuda:\n self.model.load_state_dict(torch.load(os.path.join(self.save_path, \"save_point.pth\")))\n else:\n self.model.load_state_dict(torch.load(os.path.join(self.save_path, \\\n \"save_point.pth\"), map_location=\"cpu\"))\n except:\n sys.exit(\"Unable to load previous model\")", "def load_model(self):\n Thread(target=self.__load_model).start()", "def load_model(self):\n pass", "def update_model(path_to_data, path_to_model):\r\n # Open the annotation files.\r\n with open(path_to_data + r'\\coco_annotation.json') as f:\r\n coco_d = json.load(f)\r\n # Get the categories.\r\n categories = []\r\n for cat in coco_d['categories']:\r\n categories.append(cat['name'])\r\n\r\n # Register the new data.\r\n register_coco_instances(\"coco_update\", {}, path_to_data + r\"\\coco_annotation.json\", path_to_data)\r\n MetadataCatalog.get(\"meta_update\").set(thing_classes=categories)\r\n # MetadataCatalog.get(\"meta_update\").set(thing_classes=[\"Bad\", \"Good\"], thing_colors=[(172, 0, 0), (229, 0, 0)])\r\n\r\n # Set the parameters.\r\n cfg = get_cfg()\r\n cfg.merge_from_file(model_zoo.get_config_file(\"COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml\"))\r\n cfg.DATASETS.TRAIN = (\"coco_update\",)\r\n cfg.OUTPUT_DIR = path_to_model\r\n cfg.DATASETS.TEST = ()\r\n cfg.DATALOADER.NUM_WORKERS = 2\r\n cfg.MODEL.DEVICE = 'cpu'\r\n cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, \"model_final.pth\")\r\n cfg.SOLVER.IMS_PER_BATCH = 1\r\n cfg.SOLVER.BASE_LR = 0.00025\r\n cfg.SOLVER.MAX_ITER = 400\r\n cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 10\r\n cfg.MODEL.ROI_HEADS.NUM_CLASSES = len(categories)\r\n\r\n # Update the model.\r\n trainer = DefaultTrainer(cfg)\r\n trainer.resume_or_load(resume=False)\r\n trainer.train()", "def loadKoopman(self):\n self.linearEvolving = np.diag(self.model.Koopman['eigenvalues'])\n self.KoopmanModes = self.model.Koopman['modes']\n # self.residual_matrix = self.model.residual_matrix\n self.linearEvolvingEigen = self.linearEvolving\n\n if self.model.type == 'd':\n self.dt = self.model.dt\n elif self.model.type == 'c':\n self.dt = None\n else:\n raise NotImplementedError", "def load(self, file_path):\n self.model = load_model(file_path)", "def load(self, file_path):\n self.model = load_model(file_path)", "def load(self, file_path):\n self.model = load_model(file_path)", "def load(self, path):\n with open(path, 'rb') as fp:\n me = pickle.load(fp)\n self.exp = me[\"exp\"] # type: Experiment\n with suppress_errors(\"output root directory may have changed\"):\n self.mru_exp_root = self.exp.root\n self.chdir()\n self.imdb = self.exp.imdb\n self.exp_df = me[\"exp_df\"]\n with suppress_errors(\"caffenet may no longer exist\"):\n self.caffenet = self.exp.caffenet\n with suppress_errors(\"caffemodel may no longer exist\"):\n self.caffemodel = self.exp.caffemodel\n with suppress_errors(\"data directory may have changed\"):\n self.mru_exp_data = self.exp.data\n self.lbl_exp_data.setText(self.mru_exp_data)\n self.edt_caffemodel.setText(self.caffemodel)\n self.edt_caffenet.setText(self.caffenet)\n self.lbl_exp_data.setText(self.mru_exp_data)\n if self.exp_df is not None:\n model = PandasModel(self.exp_df)\n self.table_imdb.setModel(model)\n self.table_imdb.show()\n # Update the status label\n self.lbl_last_status.setText(str(self.exp))", "def load_models(self, episode):\r\n self.actor.load_state_dict(self.target_actor.state_dict())\r\n self.critic.load_state_dict(self.target_critic.state_dict())\r\n hard_update(self.target_actor, self.actor)\r\n hard_update(self.target_critic, self.critic)", "def load_model(self):\n try:\n self.model = Word2Vec.load(self.config[\"model_path\"])\n self.model.init_sims(replace=True)\n except Exception as e:\n print(e)\n print(\"error in model loading!\")", "def _set_mc_orig(self):\n mc_jmp = self._get_mc_jmp(self.asm_current['addr'],\n self.addr_shellcode)\n mc_jmp_size = invade.Tool.get_mc_size(mc_jmp)\n mc_orig = self._mem_read(self.asm_current['addr'], mc_jmp_size)\n self.asm_current['mc'] = mc_orig\n return" ]
[ "0.6660261", "0.5961372", "0.58306295", "0.5790346", "0.56145704", "0.55496687", "0.5395073", "0.53921926", "0.5350237", "0.5312845", "0.53073144", "0.52975523", "0.5285391", "0.527037", "0.52650625", "0.5244477", "0.5244028", "0.5241696", "0.5236578", "0.5218793", "0.5214155", "0.52140397", "0.52114207", "0.5193855", "0.5193855", "0.5193855", "0.517761", "0.5171979", "0.5140773", "0.5137993" ]
0.81616914
0
Dynamic Programming Python implementation of Matrix Chain Multiplication build the worst sequence of brackets (it means that this sequence have the biggest number of elementary operations).
def matrix_chain_dynamic(dimensions, n): m = [[-1 for _ in range(n)] for _ in range(n)] s = [[0 for _ in range(n)] for _ in range(n)] # multiplying matrix by itself for i in range(1, n): m[i][i] = 0 for length in range(2, n): for i in range(1, n - length + 1): j = i + length - 1 for k in range(i, j): cost = m[i][k] + m[k + 1][j] + dimensions[i - 1] * dimensions[k] * dimensions[j] if cost > m[i][j]: m[i][j] = cost # index if splitting s[i][j] = k return m, s
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _multi_matmul_chain_order(arrays):\n n = len(arrays)\n # p stores the dimensions of the matrices\n # Example for p: A_{10x100}, B_{100x5}, C_{5x50} --> p = [10, 100, 5, 50]\n # Using -2 to generalize for shapes that are more than 2 dimmensions\n p = [a.shape[-2] for a in arrays] + [arrays[-1].shape[-1]]\n # m is a matrix of costs of the subproblems\n # m[i,j]: min number of scalar multiplications needed to compute A_{i..j}\n m = np.zeros((n, n), dtype=np.double)\n # s is the actual ordering\n # s[i, j] is the value of k at which we split the product A_i..A_j\n s = np.empty((n, n), dtype=np.intp)\n\n for l in range(1, n):\n for i in range(n - l):\n j = i + l\n m[i, j] = np.inf\n for k in range(i, j):\n q = m[i, k] + m[k + 1, j] + p[i] * p[k + 1] * p[j + 1]\n if q < m[i, j]:\n m[i, j] = q\n s[i, j] = k # Note that Cormen uses 1-based index\n return s", "def recursive_multiply(a, b):\n if len(a) == 2:\n return naive_multiply(a, b)\n\n a11 = a[0:int(len(a) / 2)]\n for index, row in enumerate(a11):\n a11[index] = row[0:int(len(row) / 2)]\n\n a12 = a[0:int(len(a) / 2)]\n for index, row in enumerate(a12):\n a12[index] = row[int(len(a) / 2):len(a)]\n\n a21 = a[int(len(a) / 2):len(a)]\n for index, row in enumerate(a21):\n a21[index] = row[0:int(len(row) / 2)]\n\n a22 = a[int(len(a) / 2):len(a)]\n for index, row in enumerate(a22):\n a22[index] = row[int(len(a) / 2):len(a)]\n\n b11 = b[0:int(len(b) / 2)]\n for index, row in enumerate(b11):\n b11[index] = row[0:int(len(row) / 2)]\n\n b12 = b[0:int(len(b) / 2)]\n for index, row in enumerate(b12):\n b12[index] = row[int(len(b) / 2):len(b)]\n\n b21 = b[int(len(b) / 2):len(b)]\n for index, row in enumerate(b21):\n b21[index] = row[0:int(len(row) / 2)]\n\n b22 = b[int(len(b) / 2):len(b)]\n for index, row in enumerate(b22):\n b22[index] = row[int(len(b) / 2):len(b)]\n\n c11 = matrix_add(recursive_multiply(a11, b11), recursive_multiply(a12, b21)) # C11 = A11*B11 + A12*B21\n c12 = matrix_add(recursive_multiply(a11, b12), recursive_multiply(a12, b22)) # C12 = A11*B12 + A12*B22\n c21 = matrix_add(recursive_multiply(a21, b11), recursive_multiply(a22, b21)) # C21 = A21*B11 + A22*B21\n c22 = matrix_add(recursive_multiply(a21, b12), recursive_multiply(a22, b22)) # C22 = A21*B12 + A22*B22\n\n # Append c12 to c11\n for row_index, row in enumerate(c11):\n for col_index, col in enumerate(c12):\n row.append(c12[row_index][col_index])\n\n # Append c22 to c21\n for row_index, row in enumerate(c21):\n for col_index, col in enumerate(c12):\n row.append(c22[row_index][col_index])\n\n # Append c21 to c11\n for i in c21:\n c11.append(i)\n\n return c11", "def derMatrix(Bulk_Operators, Bdy_Operators, Bulk_second, s=1):\n\t\tMatrix = []\n\t\tfor i in range(1, M_max + 1):\n\t\t\t\trow = []\n\t\t\t\tfor Bulk_Op in Bulk_Operators:\n\t\t\t\t\t\ttable_call = N*table1.table[0, i].subs({\"Delta\":Bulk_Op, \"Delta_12\":delta_12, \"Xi\":1.00})\n\t\t\t\t\t\trow.append(table_call)\n\t\t\t\tfor index in range(len(Bulk_second)):\n\t\t\t\t\t\trow.append(0*table1.table[0,0])\n\t\t\t\tfor Bdy_Op in Bdy_Operators:\n\t\t\t\t\tif Bdy_Op == 2.0:\n\t\t\t\t\t\t\tprint(\"here\")\n\t\t\t\t\t\t\ttable_call = (N-1)*table1.table[1, i].subs({\"Delta\":Bdy_Op, \"Xi\":1.00})\n\t\t\t\t\telse:\n\t\t\t\t\t\ttable_call = table1.table[1, i].subs({\"Delta\":Bdy_Op, \"Xi\":1.00})\n\t\t\t\t\trow.append(table_call)\n\t\t\t\tXi = symbols('Xi')\n\t\t\t\tif s == 1:\n\t\t\t\t\t\tlast_expr = Xi**((delta_1 + delta_2)/2)\n\t\t\t\t\t\tlast_elem = diff(last_expr, Xi, i).subs({\"Xi\":1.00}).evalf()\n\t\t\t\t\t\trow.append(last_elem)\n\t\t\t\tMatrix.append(row)\n\t\t\t\trow2 = []\n\t\t\t\tfor index in range(len(Bulk_Operators)):\n\t\t\t\t\t\trow2.append(0*table1.table[0,0])\n\t\t\t\tfor Bulk_Op in Bulk_second:\n\t\t\t\t\t\ttable_call = table1.table[0, i].subs({\"Delta\":Bulk_Op, \"Delta_12\":delta_12, \"Xi\":1.00})\n\t\t\t\t\t\trow2.append(table_call)\n\t\t\t\tfor Bdy_Op in Bdy_Operators:\n\t\t\t\t\t\tif Bdy_Op == 2.0:\n\t\t\t\t\t\t\tprint(\"here\")\n\t\t\t\t\t\t\ttable_call = -1*table1.table[1, i].subs({\"Delta\":Bdy_Op, \"Xi\":1.00})\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\ttable_call = table1.table[1, i].subs({\"Delta\":Bdy_Op, \"Xi\":1.00})\n\t\t\t\t\t\trow2.append(table_call)\n\t\t\t\tXi = symbols('Xi')\n\t\t\t\tif s == 1:\n\t\t\t\t\t\tlast_expr = Xi**((delta_1 + delta_2)/2)\n\t\t\t\t\t\tlast_elem = diff(last_expr, Xi, i).subs({\"Xi\":1.00}).evalf()\n\t\t\t\t\t\trow2.append(last_elem)\n\t\t\t\tMatrix.append(row2)\t\t\n\t\treturn np.array(Matrix)", "def MatMulOrder(D):\r\n\tnum = len(D)-1 # number of matrix in the chain\r\n\tprint(f\"There are {num} matrix to multiply\")\r\n\tM = [[0 for _ in range(num)] for _ in range(num)]\r\n\tP = [[0 for _ in range(num)] for _ in range(num)]\r\n\r\n\t# i要从大到小\r\n\t# i == j时, M[i][j]=0,所以不用更新\r\n\t# i-th矩阵到j-th矩阵的乘的最优值初始化为inf\r\n\tfor i in range(num-2, -1, -1):\r\n\t\tfor j in range(i+1, num):\r\n\t\t\tM[i][j] = 100000000\r\n\t\t\tfor k in range(i, j):\r\n\t\t\t\tnew = M[i][k] + M[k+1][j] + D[i]*D[k+1]*D[j+1]\r\n\t\t\t\tif new < M[i][j]:\r\n\t\t\t\t\tM[i][j] = new \r\n\t\t\t\t\tP[i][j] = k\r\n\treturn M, P", "def J (self, n):", "def it_matrixpower(p,t,n,root_field=RR):\n assert n>=2, \"Carleman matrix must at least be of size 2 to retrieve the coefficients. But given was \" + repr(n)\n CM = p.carleman_matrix(n)\n ev = CM.charpoly().roots(root_field)\n assert len(ev) == n, \"Carleman matrix must have exactly \" + repr(n) + \"eigenvalues, but has \" + repr(len(ev))\n\n Char = [0]*n\n for k in range(n):\n #here is possibility for improvement of precision\n #to separate the fractional from the root parts\n #expanding the product\n Char[k] = CM - ev[k][0]*identity_matrix(n)\n\n #we want to have the first row of the product of the matrices\n #thatswhy we mulitply in front with:\n prod = vector(p.K,[0,1]+[0]*(n-2))\n prodwo = [0]*n\n for k in range(n):\n prodwo[k]=prod #these are the first terms until k-1\n\n #no need to continue\n if k == n-1:\n break\n\n #and we add the terms starting with k+1\n for i in range(k+1,n):\n prodwo[k] = prodwo[k] * Char[i]\n\n prod = prod * Char[k]\n\n sprodwo = [0]*n\n for k in range(n):\n if k==0:\n sprodwo[k] = ev[k][0] - ev[1][0]\n start = 2\n else:\n sprodwo[k] = ev[k][0] - ev[0][0]\n start = 1\n\n for i in range(start,n):\n if i != k:\n sprodwo[k] = sprodwo[k] * (ev[k][0] - ev[i][0])\n\n res = ev[0][0]**t/sprodwo[0] * prodwo[0]\n for k in range(1,n):\n res += ev[k][0]**t/sprodwo[k]*prodwo[k]\n\n return res.list()", "def generateOperator(onQubits: Union[int, List[int]], matrices: Union[numpy.ndarray, List[numpy.ndarray]],\n sysLevel: Union[int, List[int]], qubitNum: int) -> numpy.ndarray:\n # Each qubit of the system has the same energy level. \n if isinstance(sysLevel, int):\n # We first define the identity matrix to fill un-assigned qubits\n idMat = numpy.identity(sysLevel, dtype=complex)\n if isinstance(onQubits, int):\n assert numpy.size(matrices) == (sysLevel, sysLevel), \"Dimension of matrix does not match the system Level.\"\n # The operator is on only one qubit.\n if onQubits == 0:\n # This operator is on the first qubit.\n operator = matrices\n for i in range(1, qubitNum):\n operator = numpy.kron(operator, idMat)\n else:\n # This operator is not on the first qubit.\n operator = idMat\n for i in range(1, onQubits):\n operator = numpy.kron(operator, idMat)\n operator = numpy.kron(operator, matrices)\n for i in range(onQubits + 1, qubitNum):\n operator = numpy.kron(operator, idMat)\n return operator\n elif isinstance(onQubits, list):\n operator = []\n for i in range(qubitNum):\n if i == 0:\n # On the first qubit\n if i in onQubits:\n matrixIndex = onQubits.index(i)\n operator = matrices[matrixIndex]\n operatorSize = numpy.shape(matrices[matrixIndex])\n assert operatorSize == (sysLevel, sysLevel), \\\n f\"Dim of input matrix {operatorSize} does not match with the system level ({sysLevel}).\"\n else:\n operator = idMat\n else:\n # Not on the first qubit\n if i in onQubits:\n matrixIndex = onQubits.index(i)\n operatorSize = numpy.shape(matrices[matrixIndex])\n assert operatorSize == (sysLevel, sysLevel), \\\n f\"Dim of input matrix {operatorSize} does not match with the system level ({sysLevel}).\"\n operator = numpy.kron(operator, matrices[matrixIndex])\n else:\n operator = numpy.kron(operator, idMat)\n return operator\n \n else:\n assert False, \"Variable onQubits should be a list or an int.\"\n # The sysLevel is a list of different energy levels for multiple qubits\n if isinstance(sysLevel, list):\n # Create a list of identities of different dimension for each qubit of different energy level\n idMat = [numpy.identity(i, dtype=complex) for i in sysLevel]\n # The operator is acting on only one qubit.\n if isinstance(onQubits, int):\n assert numpy.size(matrices) == (sysLevel[onQubits], sysLevel[onQubits]), \"Dimension of matrix does not match the system Level.\" \n # The operator is acting on the first qubit.\n if onQubits == 0:\n operator = matrices\n for i in range(1, qubitNum):\n operator = numpy.kron(operator, idMat[i])\n else:\n # This operator is not acting on the first qubit.\n operator = idMat[0]\n for i in range(1, onQubits):\n operator = numpy.kron(operator, idMat[i])\n operator = numpy.kron(operator, matrices)\n for i in range(onQubits + 1, qubitNum):\n operator = numpy.kron(operator, idMat[i])\n return operator\n # The operator is acting on multiple qubits.\n elif isinstance(onQubits, list):\n operator = []\n for i in range(qubitNum):\n if i == 0:\n # Acting on the first qubit\n if i in onQubits:\n matrixIndex = onQubits.index(i)\n operator = matrices[matrixIndex]\n operatorSize = numpy.shape(matrices[matrixIndex])\n assert operatorSize == (sysLevel[i], sysLevel[i]), \\\n f\"Dim of input matrix {operatorSize} does not match with the system level ({sysLevel[i]}).\"\n else:\n operator = idMat[i]\n else:\n # Not acting on the first qubit\n if i in onQubits:\n matrixIndex = onQubits.index(i)\n operatorSize = numpy.shape(matrices[matrixIndex])\n assert operatorSize == (sysLevel[i], sysLevel[i]), \\\n f\"Dim of input matrix {operatorSize} does not match with the system level ({sysLevel[i]}).\"\n operator = numpy.kron(operator, matrices[matrixIndex])\n else:\n operator = numpy.kron(operator, idMat[i])\n return operator\n \n else:\n assert False, \"Variable onQubits should be a list or an int.\"", "def boundary_op_n(v):\r\n h = list(v.dic.keys())[0]\r\n p = len(h) - 1\r\n s = P_chains([],[])\r\n if (p != 0) and (isinstance(h, str) != True) and (isinstance(h, frozenset) != True) and (isinstance(h, ImmutableMatrix) != True):\r\n if (is_int(list(v.dic.keys())) == True):\r\n for u in v.dic.keys():\r\n c = 0\r\n for i in u: \r\n w = list(u)[:]\r\n w.remove(i)\r\n if (orientation_function(tuple(tuple_sorted(tuple(w))),tuple(w),p) == True):\r\n s1 = P_chains([tuple(tuple_sorted(tuple(w)))],[abs(v.dic[u])])\r\n if (np.sign((v.dic[u])*(-1)**c) < 0):\r\n s = s - s1\r\n else:\r\n s = s + s1\r\n c = c+1\r\n else:\r\n s1 = P_chains([tuple(tuple_sorted(tuple(w)))],[abs(v.dic[u])])\r\n if (np.sign((v.dic[u])*(-1)**(c+1)) < 0):\r\n s = s - s1\r\n else:\r\n s = s + s1\r\n c = c+1\r\n return s\r\n else:\r\n aux = P_chains([],[])\r\n D = {}\r\n ct = 0\r\n st = []\r\n for u in v.dic.keys():\r\n for x in u:\r\n if x not in st:\r\n st.append(x)\r\n for i in st:\r\n D[tuple([ct])] = i\r\n ct = ct + 1\r\n for u in v.dic.keys():\r\n w2 = []\r\n for x in u:\r\n for y in list(D.keys()):\r\n if (x == D[y]):\r\n w2.append(y)\r\n aux = aux + P_chains([tuple(w2)],[v.dic[u]]) \r\n v = aux\r\n for u in v.dic.keys():\r\n c = 0\r\n for i in u: \r\n w = list(u)[:]\r\n w.remove(i)\r\n if (orientation_function(tuple(tuple_sorted(tuple(w))),tuple(w),p) == True):\r\n s1 = P_chains([tuple(tuple_sorted(tuple(w)))],[abs(v.dic[u])])\r\n if (np.sign((v.dic[u])*(-1)**c) < 0):\r\n s = s - s1\r\n else:\r\n s = s + s1\r\n c = c+1\r\n else:\r\n s1 = P_chains([tuple(tuple_sorted(tuple(w)))],[abs(v.dic[u])])\r\n if (np.sign((v.dic[u])*(-1)**(c+1)) < 0):\r\n s = s - s1\r\n else:\r\n s = s + s1\r\n c = c+1\r\n s2 = P_chains([],[])\r\n for u in s.dic.keys():\r\n w2=[]\r\n for i in u:\r\n w2.append(D[i])\r\n s2 = s2 + P_chains([tuple(w2)],[s.dic[u]])\r\n \r\n return s2\r\n else:\r\n return s", "def problem():\n\n print 'problem #27'\n\n l = 0\n m_a = 0\n m_b = 0\n for a in xrange(-1000, 1000):\n for b in xrange(-1000, 1000):\n p = len(check(a, b))\n if p > l:\n l = p\n m_a = a\n m_b = b\n\n print 'the product of coefficients is %s' % (m_a * m_b)", "def u_update(eta_0, eta, eta_lin, w_0, w, w_lin, eta_T_H_L_stacked, premultiplied_lhs = None, nnls_max_iter=50): \n # PREMULTIPLIED LHS IS AN EXTRA ARGUMENT! Set it to None and add solver! \n \"\"\"In the following +[[]] and [:-1] are added to keep thing 1dim array of objects and still multiply it elemtwisely\"\"\" \n# #B.append([]) #THIS IS WRONG, CHANGES THE LIST \n# B_concat = np.concatenate((1/np.sqrt(2*eta))*np.array(B+[[]])[:-1], axis = 0) \n# A_ls = np.concatenate([(1/np.sqrt(2*eta0))*A, B_concat], axis = 0) \n# #print(np.array(B).shape) \n# #print(w[0].shape) \n# #print(w, eta) \n# #w.append([]) THIS IS WRONG, CHANGES THE LIST \n# w_concat = np.concatenate((1/np.sqrt(2*eta))*np.array(w+[[]])[:-1], axis = 0) #[:-1] Added as a hack to keep it one-dim array of objects \n# eta_w = np.expand_dims(1/np.sqrt(2*eta),1)*np.array(w) \n# print(eta_w.shape) \n# b_ls = np.concatenate([(1/np.sqrt(2*eta_0))*w_0, eta_w.flatten()], axis = 0) \n #Use correct broadcasting?\n w_concat = np.concatenate((1/np.sqrt(2*eta))*np.array(w+[[]])[:-1], axis = 0) #[:-1] Added as a hack to keep it one-dim array of objects \n b_ls = np.concatenate([(1/np.sqrt(2*eta_0))*w_0, w_concat, (1/np.sqrt(2*eta_lin))*w_lin], axis = 0) \n# print(np.sum(eta_w.flatten() != w_concat)) \n# premultiplied_time_start = time.time() \n# premultiplied_lhs = eta_T_H_stacked.T.dot(eta_T_H_stacked).toarray() \n# premultiplied_time_end = time.time() \n# print('premultiplying took {}'.format(premultiplied_time_end - premultiplied_time_start)) \n# premultiplied_rhs = eta_T_H_stacked.T.dot(b_ls) \n# u_next = nnls_predotted(premultiplied_lhs, premultiplied_rhs, tol=1e-5) \n# print(eta_T_H_stacked.shape, b_ls.shape) \n# A_ls_t_b = eta_T_H_stacked.T.dot(b_ls) \n# w =scipy.sparse.linalg.spsolve_triangular(RT, A_ls_t_b, lower = True) \n# x = scipy.sparse.linalg.spsolve_triangular(R, w, lower = False) \n# u_next = x \n u_next = scipy.optimize.lsq_linear(eta_T_H_L_stacked, b_ls, bounds = (0, np.inf), tol=1e-3, lsmr_tol=1e-3, max_iter=nnls_max_iter, verbose=1).x \n# u = scipy.optimize.lsq_linear(premultiplied_lhs, premultiplied_rhs, bounds = (0, np.inf), tol=1e-5).x \n return u_next", "def metro_alg(N):\n\n chain = np.zeros(N) # start with x_0 = 0\n chain_removed = np.array([0])\n j = 0\n for i in range(N-1):\n\n y = (np.random.rand()-0.5)*10\n if next_chain_link(chain[i], y):\n chain[i + 1] = y\n else:\n chain[i + 1] = chain[i]\n\n if next_chain_link(chain_removed[j], y):\n chain_removed = np.append(chain_removed, y) # append creates new array, does not change array argument\n j += 1\n\n return chain, chain_removed", "def decode_MST(energies, lengths, leading_symbolic=0, labeled=True):\n\n def find_cycle(par):\n added = np.zeros([length], np.bool)\n added[0] = True\n cycle = set()\n findcycle = False\n for i in range(1, length):\n if findcycle:\n break\n\n if added[i] or not curr_nodes[i]:\n continue\n\n # init cycle\n tmp_cycle = set()\n tmp_cycle.add(i)\n added[i] = True\n findcycle = True\n l = i\n\n while par[l] not in tmp_cycle:\n l = par[l]\n if added[l]:\n findcycle = False\n break\n added[l] = True\n tmp_cycle.add(l)\n\n if findcycle:\n lorg = l\n cycle.add(lorg)\n l = par[lorg]\n while l != lorg:\n cycle.add(l)\n l = par[l]\n break\n\n return findcycle, cycle\n\n def chuLiuEdmonds():\n par = np.zeros([length], dtype=np.int32)\n # create best graph\n par[0] = -1\n for i in range(1, length):\n # only interested at current nodes\n if curr_nodes[i]:\n max_score = score_matrix[0, i]\n par[i] = 0\n for j in range(1, length):\n if j == i or not curr_nodes[j]:\n continue\n\n new_score = score_matrix[j, i]\n if new_score > max_score:\n max_score = new_score\n par[i] = j\n\n # find a cycle\n findcycle, cycle = find_cycle(par)\n # no cycles, get all edges and return them.\n if not findcycle:\n final_edges[0] = -1\n for i in range(1, length):\n if not curr_nodes[i]:\n continue\n\n pr = oldI[par[i], i]\n ch = oldO[par[i], i]\n final_edges[ch] = pr\n return\n\n cyc_len = len(cycle)\n cyc_weight = 0.0\n cyc_nodes = np.zeros([cyc_len], dtype=np.int32)\n id = 0\n for cyc_node in cycle:\n cyc_nodes[id] = cyc_node\n id += 1\n cyc_weight += score_matrix[par[cyc_node], cyc_node]\n\n rep = cyc_nodes[0]\n for i in range(length):\n if not curr_nodes[i] or i in cycle:\n continue\n\n max1 = float(\"-inf\")\n wh1 = -1\n max2 = float(\"-inf\")\n wh2 = -1\n\n for j in range(cyc_len):\n j1 = cyc_nodes[j]\n if score_matrix[j1, i] > max1:\n max1 = score_matrix[j1, i]\n wh1 = j1\n\n scr = cyc_weight + score_matrix[i, j1] - score_matrix[par[j1], j1]\n\n if scr > max2:\n max2 = scr\n wh2 = j1\n\n score_matrix[rep, i] = max1\n oldI[rep, i] = oldI[wh1, i]\n oldO[rep, i] = oldO[wh1, i]\n score_matrix[i, rep] = max2\n oldO[i, rep] = oldO[i, wh2]\n oldI[i, rep] = oldI[i, wh2]\n\n rep_cons = []\n for i in range(cyc_len):\n rep_cons.append(set())\n cyc_node = cyc_nodes[i]\n for cc in reps[cyc_node]:\n rep_cons[i].add(cc)\n\n for i in range(1, cyc_len):\n cyc_node = cyc_nodes[i]\n curr_nodes[cyc_node] = False\n for cc in reps[cyc_node]:\n reps[rep].add(cc)\n\n chuLiuEdmonds()\n\n # check each node in cycle, if one of its representatives is a key in the final_edges, it is the one.\n found = False\n wh = -1\n for i in range(cyc_len):\n for repc in rep_cons[i]:\n if repc in final_edges:\n wh = cyc_nodes[i]\n found = True\n break\n if found:\n break\n\n l = par[wh]\n while l != wh:\n ch = oldO[par[l], l]\n pr = oldI[par[l], l]\n final_edges[ch] = pr\n l = par[l]\n\n if labeled:\n assert energies.ndim == 4, 'dimension of energies is not equal to 4'\n else:\n assert energies.ndim == 3, 'dimension of energies is not equal to 3'\n input_shape = energies.shape\n batch_size = input_shape[0]\n max_length = input_shape[2]\n\n pars = np.zeros([batch_size, max_length], dtype=np.int32)\n arc_tags = np.zeros([batch_size, max_length], dtype=np.int32) if labeled else None\n for i in range(batch_size):\n energy = energies[i]\n\n # calc the real length of this instance\n length = lengths[i]\n\n # calc real energy matrix shape = [length, length, num_labels - #symbolic] (remove the label for symbolic arcs).\n if labeled:\n energy = energy[leading_symbolic:, :length, :length]\n # get best label for each edge.\n label_id_matrix = energy.argmax(axis=0) + leading_symbolic\n energy = energy.max(axis=0)\n else:\n energy = energy[:length, :length]\n label_id_matrix = None\n # get original score matrix\n orig_score_matrix = energy\n # initialize score matrix to original score matrix\n score_matrix = np.array(orig_score_matrix, copy=True)\n\n oldI = np.zeros([length, length], dtype=np.int32)\n oldO = np.zeros([length, length], dtype=np.int32)\n curr_nodes = np.zeros([length], dtype=np.bool)\n reps = []\n\n for s in range(length):\n orig_score_matrix[s, s] = 0.0\n score_matrix[s, s] = 0.0\n curr_nodes[s] = True\n reps.append(set())\n reps[s].add(s)\n for t in range(s + 1, length):\n oldI[s, t] = s\n oldO[s, t] = t\n\n oldI[t, s] = t\n oldO[t, s] = s\n\n final_edges = dict()\n chuLiuEdmonds()\n par = np.zeros([max_length], np.int32)\n if labeled:\n arc_tag = np.ones([max_length], np.int32)\n arc_tag[0] = 0\n else:\n arc_tag = None\n\n for ch, pr in final_edges.items():\n par[ch] = pr\n if labeled and ch != 0:\n arc_tag[ch] = label_id_matrix[pr, ch]\n\n par[0] = 0\n pars[i] = par\n if labeled:\n arc_tags[i] = arc_tag\n\n return pars, arc_tags", "def chain_matmul_square(As):\n\n As_matmul = As\n while As_matmul.shape[0] > 1:\n if As_matmul.shape[0] % 2:\n A_last = As_matmul[-1:]\n else:\n A_last = None\n \n As_matmul = torch.matmul(As_matmul[0:-1:2], As_matmul[1::2])\n if A_last is not None:\n As_matmul = torch.cat([As_matmul, A_last], dim=0)\n \n return As_matmul.squeeze(0)", "def associativity(ob):\n return 0", "def _simpl_varsh_872_4(expr: Sum):\n if len(expr.args) != 3:\n return None\n\n dummies = (expr.args[1][0], expr.args[2][0])\n j1, j2, cj1, cm1, cj2, cm2 = symbols('j1 j2 J1 M1 J2 M2', cls=Wild)\n\n for m1, m2 in [dummies, reversed(dummies)]:\n match = expr.args[0].match(\n CG(j1, m1, j2, m2, cj1, cm1) * CG(j1, m1, j2, m2, cj2, cm2)\n )\n if not match:\n continue\n return KroneckerDelta(\n match[cj1], match[cj2]\n ) * KroneckerDelta(match[cm1], match[cm2])\n\n return None", "def get_qubitops(H, verbose):\n num_nodes = H.shape[0]\n pauli_list = [] \n s = \"\"\n for i in range(num_nodes):\n xp = np.zeros(num_nodes, dtype=np.bool)\n zp = np.zeros(num_nodes, dtype=np.bool)\n zp[i] = True\n pauli_list.append([ H[i, i], Pauli(zp, xp)]) \n s += ' {}*Z[{}]'.format(H[i,i], i)\n for j in range(i):\n if H[i, j] != 0:\n xp = np.zeros(num_nodes, dtype=np.bool)\n zp = np.zeros(num_nodes, dtype=np.bool)\n zp[i] = True\n zp[j] = True\n pauli_list.append([ H[i, j], Pauli(zp, xp)]) \n s += ' + {}*Z[{}]*Z[{}]'.format(H[i,j], i, j) \n if verbose > 0:\n print(s)\n return Operator(paulis=pauli_list)", "def homotopy(chain):\n # We clone the input chain to prevent unexpected alterations\n work_chain = copy.deepcopy(chain)\n\n # We create a dictionary \"priority\" which gives the rank of queens.\n # Lower priority numbers will be processed first (i.e. priority ranking, not priority value)\n Queens = [Q for Q in cellcomplex if isQueen(Q)]\n def AdjacentQueens(Q): return [ q for q in bd(M(Q)) if isQueen(q) and q != Q ]\n priority = { Q : rank for (rank, Q) in enumerate(TopologicalSort(Queens, AdjacentQueens)) }\n\n # We arrange the priority queue for queens.\n # We use an auxiliary set \"enqueued\" to prevent the same queen from being\n # placed in the priority queue twice.\n work_queue = PriorityQueue()\n enqueued = set()\n def enqueue(list_of_queens):\n for Q in list_of_queens:\n if Q in enqueued: continue\n enqueued.add(Q)\n work_queue.put((-priority[Q], Q))\n\n # Initialize queue with the queens in the original chain\n enqueue([ Q for Q in work_chain if isQueen(Q) ])\n\n # Make a zero chain of correct dimension to store result in\n gamma_chain = Chain(dim(chain) + 1, cellcomplex.ring())\n\n # We iteratively process the maximal queen in \"work_chain\", each time\n # adding the appropriate multiple of the boundary of its mating king in \n # order to cancel it. Doing this can add new queens, which we enqueue.\n # A theorem prevents previously processed queens from being \"new_queens\" \n # We keep track of the king chain as we go.\n while not work_queue.empty():\n (rank, Q) = work_queue.get()\n a = work_chain[Q]\n if a == 0: continue\n K = M(Q)\n bd_K = bd(K)\n b = bd_K[Q]\n c = -a/b\n gamma_chain[K] += c\n work_chain += c * bd_K\n enqueue([ q for q in bd_K if isQueen(q) and q != Q ])\n return gamma_chain", "def _simpl_varsh_872_5(expr: Sum):\n if len(expr.args) != 3:\n return None\n\n dummies = (expr.args[1][0], expr.args[2][0])\n j1, j2, m2, j3, m3, cj = symbols('j1 j2 m2 j3 m3 J', cls=Wild)\n for m1, cm in [dummies, reversed(dummies)]:\n match = expr.args[0].match(\n CG(j1, m1, j2, m2, cj, cm) * CG(j1, m1, j3, m3, cj, cm)\n )\n\n if not match:\n continue\n\n cjhat = 2 * match[cj] + 1\n jhat2 = 2 * match[j2] + 1\n\n return (cjhat / jhat2) * KroneckerDelta(\n match[j2], match[j3]\n ) * KroneckerDelta(match[m2], match[m3])\n\n return None", "def C(relatorlist,quit_at=float('inf')):\n F,rels=fg.parseinputwords(relatorlist)\n if not all(r==F.cyclic_reduce(r) for r in rels):\n raise ValueError(\"Relators are not cyclically reduced.\")\n thepieces=pieces(rels)\n minnumberpieces=quit_at\n def min_string_piece_expression(whatsleft,thepieces,quit_at):\n # recursively determine the minimal expression of the string whatsleft as a concatenation of elements of thepieces, or stop once it is determined that any such expression requires at least quit_at many pieces\n # find a piece that agrees with a prefix of whatsleft and the recurse on the suffix\n if not whatsleft:\n return 0\n minexp=quit_at\n for p in thepieces:\n if p!=whatsleft[:len(p)]:\n continue\n else:\n minexp=min(minexp,1+min_string_piece_expression(whatsleft[len(p):],thepieces,minexp-1))\n return minexp\n def min_relator_piece_expression(relator,thepieces,quit_at):\n # This is first step in recursive search. Here we want to find a piece p such that for relator r we can write p=xy and r=yzx, with y nontrivial. That is, in this step only we think of r as cyclic word and allow first piece that wraps.\n r=relator()\n minexp=quit_at\n for p in thepieces:\n if len(p)>len(r):\n continue\n possiblestartingindices=[] # for given p there may be different possible choices of y\n for startingindex in range(len(r)-len(p)+1,len(r)+1):\n if p==(r+r)[startingindex:startingindex+len(p)]:\n possiblestartingindices.append(startingindex)\n if not possiblestartingindices:\n continue\n for startingindex in possiblestartingindices:\n # found a way to fit p into r spanning the beginning of r. This accounts for x and y part of r. Now recursively find shortest expression of z=whatsleft as a concatenation of pieces.\n whatsleft=(r+r)[startingindex+len(p):startingindex+len(r)]\n if not whatsleft:\n return 1\n else:\n minexp=min(minexp,1+min_string_piece_expression(whatsleft,thepieces,minexp-1))\n return minexp\n for thisrelator in rels:\n minnumberpieces=min(minnumberpieces,min_relator_piece_expression(thisrelator,thepieces,minnumberpieces))\n return minnumberpieces", "def auxmax_cc_piece(x,k_ind,m_ind):\n \n # Adding new linear function as a last function:\n # The first line. If k_ind = nomax-1, this is a new line, otherwise an old one.\n line_start=cfg.nfea*sum(cfg.jk[i] for i in range(k_ind))\n if cfg.jk[k_ind]==1 and k_ind==cfg.nomax-1: #\n print \"hihu0\"\n f_cc=np.dot(x[0:cfg.nfea-1],cfg.a[m_ind,:cfg.nfea-1])+x[cfg.nfea-1]\n return f_cc\n else:\n print \"hihu1\",line_start\n f_cc=np.dot(cfg.xprev[line_start:line_start+(cfg.nfea-1)],cfg.a[m_ind,:cfg.nfea-1])+cfg.xprev[line_start+cfg.nfea-1]\n \n cfg.min_line[k_ind,m_ind] = 0 # a global variable to save the smallest value.\n \n # Next lines\n line_start += cfg.nfea\n for j in range(1,cfg.jk[k_ind]-1): # Everything but the first and last.\n \n f_tmp = np.dot(cfg.xprev[line_start:line_start+(cfg.nfea-1)],cfg.a[m_ind,:cfg.nfea-1])+cfg.xprev[line_start+cfg.nfea-1]\n \n # Minimum of lines\n if f_tmp <= f_cc:\n f_cc = f_tmp\n cfg.min_line[k_ind,m_ind] = j\n line_start += cfg.nfea\n \n \n # The last line.\n if k_ind==cfg.nomax-1:\n \n f_tmp = np.dot(x[0:cfg.nfea-1],cfg.a[m_ind,:cfg.nfea-1])+x[cfg.nfea-1]\n else: \n \n f_tmp = np.dot(cfg.xprev[line_start:line_start+(cfg.nfea-1)],cfg.a[m_ind,:cfg.nfea-1])+cfg.xprev[line_start+cfg.nfea-1]\n \n # Minimum of lines\n if f_tmp <= f_cc:\n f_cc = f_tmp\n cfg.min_line[k_ind,m_ind] = cfg.jk[k_ind]-1 \n \n \n return f_cc", "def m(self):\n\t\tn = 0\n\t\ti = self.k0\n\t\twhile 1:\n\t\t\tif i > self.j:\n\t\t\t\treturn n\n\t\t\tif not self.cons(i):\n\t\t\t\tbreak\n\t\t\ti = i + 1\n\t\ti = i + 1\n\t\twhile 1:\n\t\t\twhile 1:\n\t\t\t\tif i > self.j:\n\t\t\t\t\treturn n\n\t\t\t\tif self.cons(i):\n\t\t\t\t\tbreak\n\t\t\t\ti = i + 1\n\t\t\ti = i + 1\n\t\t\tn = n + 1\n\t\t\twhile 1:\n\t\t\t\tif i > self.j:\n\t\t\t\t\treturn n\n\t\t\t\tif not self.cons(i):\n\t\t\t\t\tbreak\n\t\t\t\ti = i + 1\n\t\t\ti = i + 1", "def bc_matrix(params):\r\n w = params['w']\r\n kx = params['kx']\r\n d_list = params['d_list']\r\n ex_list = params['ex_list']\r\n ez_list = params['ez_list']\r\n kz_list = params['kz_list']\r\n N = len(d_list)\r\n assert N == len(d_list) == len(ex_list) == len(ez_list) == len(kz_list)\r\n assert N >= 2\r\n assert d_list[0] == d_list[-1] == inf\r\n \r\n # delta = e^{i * kz * d}, i.e. phase change across each layer\r\n # delta[0] and delta[-1] are undefined and are not used.\r\n delta_list = [cmath.exp(1j * kz_list[i] * d_list[i]) for i in range(N)]\r\n \r\n Ex_up_over_H_up_list = [kz_list[i] / (w * ex_list[i] * nu.eps0)\r\n for i in range(N)]\r\n Ex_down_over_H_down_list = [-a for a in Ex_up_over_H_up_list]\r\n Ez_up_over_H_up_list = [-kx / (w * ez_list[i] * nu.eps0) for i in range(N)]\r\n Ez_down_over_H_down_list = Ez_up_over_H_up_list[:]\r\n \r\n mat = np.zeros((2*N-2, 2*N-2), dtype=complex)\r\n \r\n for row_now in range(N-1):\r\n # This row concerns continuity of Ex across the boundary between\r\n # layer_under and layer_over (under and over the boundary respectively)\r\n layer_under = row_now\r\n layer_over = layer_under + 1\r\n # up_under_index is the column index in mat that gets multiplied by\r\n # H_{up} in layer_under.\r\n up_under_index = 2 * layer_under - 1\r\n down_under_index = 2 * layer_under\r\n up_over_index = 2 * layer_over - 1\r\n down_over_index = 2 * layer_over\r\n \r\n if layer_under != 0:\r\n assert 0 <= up_under_index < 2*N-2\r\n mat[row_now, up_under_index] = (\r\n Ex_up_over_H_up_list[layer_under] * delta_list[layer_under])\r\n mat[row_now, down_under_index] = Ex_down_over_H_down_list[layer_under]\r\n mat[row_now, up_over_index] = -Ex_up_over_H_up_list[layer_over]\r\n if layer_over != N-1:\r\n assert 0 <= down_over_index < 2*N-2\r\n mat[row_now, down_over_index] = (\r\n -Ex_down_over_H_down_list[layer_over] * delta_list[layer_over])\r\n\r\n for row_now in range(N-1, 2*N-2):\r\n # This row concerns continuity of eps_z * Ez across the boundary between\r\n # layer_under and layer_over (under and over the boundary respectively)\r\n layer_under = row_now - (N-1)\r\n layer_over = layer_under + 1\r\n # up_under_index is the column index in mat that gets multiplied by\r\n # H_{up} in layer_under.\r\n up_under_index = 2 * layer_under - 1\r\n down_under_index = 2 * layer_under\r\n up_over_index = 2 * layer_over - 1\r\n down_over_index = 2 * layer_over\r\n \r\n if layer_under != 0:\r\n assert 0 <= up_under_index < 2*N-2\r\n mat[row_now, up_under_index] = (ez_list[layer_under] *\r\n Ez_up_over_H_up_list[layer_under] * delta_list[layer_under])\r\n mat[row_now, down_under_index] = (ez_list[layer_under] *\r\n Ez_down_over_H_down_list[layer_under])\r\n mat[row_now, up_over_index] = (-ez_list[layer_over] * \r\n Ez_up_over_H_up_list[layer_over])\r\n if layer_over != N-1:\r\n assert 0 <= down_over_index < 2*N-2\r\n mat[row_now, down_over_index] = (-ez_list[layer_over] *\r\n Ez_down_over_H_down_list[layer_over] * delta_list[layer_over])\r\n \r\n return mat", "def test_sqopprod():\n w.reset_space()\n w.add_space(\"o\", \"fermion\", \"occupied\", [\"i\", \"j\"])\n w.add_space(\"a\", \"fermion\", \"general\", [\"u\", \"v\"])\n w.add_space(\"v\", \"fermion\", \"occupied\", [\"a\", \"b\", \"c\"])\n\n opprod = w.sqopprod([], [])\n assert str(opprod) == \"\"\n\n opprod = w.sqopprod([\"v_0\"], [])\n assert str(opprod) == \"a+(v0)\"\n\n opprod = w.sqopprod([], [\"o_0\"])\n assert str(opprod) == \"a-(o0)\"\n\n opprod = w.sqopprod([\"v_0\"], [\"o_0\"])\n assert str(opprod) == \"a+(v0) a-(o0)\"\n assert opprod.latex() == r\"\\hat{a}^\\dagger_{a} \\hat{a}_{i}\"\n\n opprod = w.sqopprod([\"v_0\", \"v_1\"], [\"o_0\", \"o_1\"])\n assert str(opprod) == \"a+(v0) a+(v1) a-(o1) a-(o0)\"\n\n opprod1 = w.sqopprod([\"v_0\", \"v_1\"], [\"o_0\", \"o_1\"])\n opprod2 = w.sqopprod([\"v_0\", \"v_1\"], [\"o_0\", \"o_1\"])\n assert opprod1 == opprod2\n\n opprod1 = w.sqopprod([\"v_0\"], [])\n opprod2 = w.sqopprod([\"v_0\"], [])\n assert not (opprod1 < opprod2)\n\n opprod1 = w.sqopprod([\"v_0\"], [])\n opprod2 = w.sqopprod([\"v_1\"], [])\n assert opprod1 < opprod2\n\n # let's test a bunch of combinations\n\n opprod1 = w.sqopprod([\"v_0\"], [\"o_0\"])\n opprod2 = w.sqopprod([\"v_0\"], [\"o_0\"])\n assert opprod1 == opprod2\n assert not (opprod1 < opprod2)\n\n opprod1 = w.sqopprod([\"v_0\"], [\"o_0\"])\n opprod2 = w.sqopprod([\"v_1\"], [\"o_0\"])\n assert opprod1 < opprod2\n\n opprod1 = w.sqopprod([\"v_0\"], [\"o_1\"])\n opprod2 = w.sqopprod([\"v_1\"], [\"o_0\"])\n assert opprod1 < opprod2\n\n opprod1 = w.sqopprod([\"v_1\"], [\"o_0\"])\n opprod2 = w.sqopprod([\"v_1\"], [\"o_1\"])\n assert opprod1 < opprod2\n\n opprod1 = w.sqopprod([\"v_1\"], [\"o_1\"])\n opprod2 = w.sqopprod([\"v_1\"], [\"o_0\"])\n assert not (opprod1 < opprod2)\n\n opprod1 = w.sqopprod([\"v_1\"], [\"o_2\"])\n opprod2 = w.sqopprod([\"v_1\", \"v_2\"], [\"o_0\"])\n assert opprod1 < opprod2\n\n opprod1 = w.sqopprod([\"v_2\"], [\"o_2\"])\n opprod2 = w.sqopprod([\"v_1\", \"v_2\"], [\"o_0\"])\n assert opprod1 < opprod2\n\n opprod1 = w.sqopprod([\"o_4\"], [\"o_2\"])\n opprod2 = w.sqopprod([\"v_1\", \"v_2\"], [\"o_0\"])\n assert opprod1 < opprod2\n\n opprod1 = w.sqopprod([\"o_4\"], [\"o_2\"])\n opprod2 = w.sqopprod([\"a_1\", \"o_2\"], [\"o_0\"])\n assert opprod1 < opprod2\n\n opprod1 = w.sqopprod([\"a_4\"], [\"o_2\"])\n opprod2 = w.sqopprod([\"a_4\"], [\"a_2\"])\n assert opprod1 < opprod2", "def compose_children(self):\n for l_symbol, l_info in self.matrix[self.i][self.k].items():\n l_rhs = Nonterminal(l_symbol)\n for r_symbol, r_info in self.matrix[self.k][self.j].items():\n r_rhs = Nonterminal(r_symbol)\n\n # check the subtrees in [i][k] and [k][j] to see if you can make a valid rhs\n potential_rules = [p for p in self.grammar.productions(rhs=l_rhs) if p.rhs()[1] == r_rhs]\n for potential_rule in sorted(potential_rules, key=lambda x: x.prob()):\n new_lhs = potential_rule.lhs().symbol()\n new_tree = Tree(new_lhs, [l_info[1], r_info[1]])\n new_prob = log(potential_rule.prob()) + l_info[0] + r_info[0]\n if new_lhs not in self.matrix[self.i][self.j] or new_prob > self.matrix[self.i][self.j][new_lhs][0]:\n self.matrix[self.i][self.j][new_lhs] = (new_prob, new_tree)", "def metro_alg(N):\n\n chain = []\n chain_removed = []\n chain.append(0)\n chain_removed.append(0)\n\n for i in range(N):\n j = 0\n y = (np.random.rand()-0.5)*10\n if next_chain_link(chain[i], y):\n chain.append(y)\n else:\n chain.append(chain[i])\n\n if next_chain_link(chain_removed[j], y):\n chain_removed.append(y)\n j += 1\n\n return chain, chain_removed", "def find_max_independent_set(graph, params):\r\n\r\n max_ind_set = []\r\n\r\n # QHACK #\r\n\r\n # function that takes in a graph and outputs the hamiltonians\r\n cost_h, mixer_h = qaoa.max_independent_set(graph, constrained=True) # Assume the graph they give me is good\r\n\r\n def qaoa_layer(gamma, alpha):\r\n qaoa.cost_layer(gamma, cost_h)\r\n qaoa.mixer_layer(alpha, mixer_h)\r\n\r\n dev = qml.device(\"default.qubit\", wires=range(NODES))\r\n\r\n def circuit(params, **kwargs): \r\n qml.layer(qaoa_layer, N_LAYERS, params[0], params[1]) \r\n\r\n @qml.qnode(dev)\r\n def probability_circuit(gamma, alpha):\r\n circuit([gamma, alpha])\r\n return qml.probs(wires=range(NODES))\r\n\r\n answer = probability_circuit(params[0], params[1])\r\n\r\n maxn = 0\r\n maxn = max(answer)\r\n\r\n for i in range(len(answer)):\r\n if maxn == answer[i]:\r\n decimal = i\r\n \r\n binary_num = []\r\n def DecimalToBinary(decimal):\r\n if decimal >= 1:\r\n DecimalToBinary(decimal // 2)\r\n binary_num.append(decimal % 2)\r\n \r\n DecimalToBinary(decimal)\r\n\r\n if len(binary_num) < 6:\r\n if len(binary_num) < 5:\r\n if len(binary_num) < 4:\r\n if len(binary_num) < 3:\r\n if len(binary_num) < 2:\r\n binary_num.insert(0, 0) # At beginning append 0\r\n binary_num.insert(0, 0)\r\n binary_num.insert(0, 0)\r\n binary_num.insert(0, 0)\r\n binary_num.insert(0, 0)\r\n\r\n for i in range(6):\r\n if binary_num[i] == 1:\r\n max_ind_set.append(i)\r\n\r\n # QHACK #\r\n\r\n return max_ind_set", "def contraction_max_algos():\n return cutensor.contractionMaxAlgos()", "def __init__(self, nums):\r\n # Define the table for dynamic programming\r\n # that has all sums from the first element to the current\r\n self.dp = [sum(nums[0:i+1]) for i in range(len(nums))]", "def symbolic_max_plus_matrices(d, n, ch=None, typ='sym'):\n d = int(d)\n n = int(n)\n if d <= 0:\n raise ValueError(\"d (= {}) must be postive\".format(d))\n\n nvar = n * d * d\n\n V = FreeModule(ZZ, nvar)\n B = ((b,) for b in V.basis())\n\n matrices = []\n\n if d == 1:\n typ = 'full'\n\n if typ == 'sym' or typ == 'quick':\n z = [0]*nvar\n for i in range(n):\n z[i*d*d] = 1\n diag = (V(z),)\n z[i*d*d] = 0\n\n z[i*d*d+1] = 1\n nondiag = (V(z),)\n z[i*d*d+1] = 0\n\n if typ == 'sym':\n matrices.append(SymbolicSymmetricMaxPlusMatrix(d, n, diag, nondiag, ch))\n else:\n matrices.append(QuickSymbolicSymmetricMaxPlusMatrix(d, n, diag, nondiag, ch))\n elif typ == 'full':\n for i in range(n):\n mat = []\n for j in range(d):\n mat.append([next(B) for k in range(d)])\n matrices.append(SymbolicMaxPlusMatrix(d, nvar, mat, ch))\n else:\n raise ValueError\n\n return matrices", "def Multiply(M1,M2):\r\n M3=[]\r\n w=0\r\n while w<len(M2[0]):\r\n tap=[]\r\n t=0\r\n while t<len(M2):\r\n tap.append(M2[t][w])\r\n t=t+1\r\n M3.append(tap)\r\n w=w+1\r\n M=[]\r\n # Multiplying matrices\r\n k=0\r\n sums=0\r\n while k<len(M1):\r\n j=0\r\n mpy=[]\r\n while j<len(M3):\r\n p=0\r\n sums=0\r\n while p<len(M3[j]):\r\n temp = (M1[k][p])*(M3[j][p])\r\n sums=sums+temp\r\n p=p+1\r\n mpy.append(sums)\r\n j=j+1\r\n M.append(mpy)\r\n k=k+1\r\n return M" ]
[ "0.628266", "0.6160883", "0.6072152", "0.58409727", "0.58077633", "0.580263", "0.5799161", "0.57617015", "0.5760376", "0.5655284", "0.5590683", "0.5581058", "0.55787057", "0.5549699", "0.5547573", "0.5537065", "0.5532428", "0.5504157", "0.5502477", "0.5472439", "0.546243", "0.5443431", "0.53955513", "0.5392002", "0.5391405", "0.53836715", "0.53817654", "0.5381458", "0.53717256", "0.53670126" ]
0.62702864
1
Returns a list of tuples representing model binaries that are cached locally. Each tuple has shape `(model_url, etag, size_MB)`. Filenames in `cache_dir` are use to get the metadata for each model, only urls ending with .bin are added.
def get_cached_models(cache_dir: Union[str, Path] = None) -> List[Tuple]: if cache_dir is None: cache_dir = TRANSFORMERS_CACHE elif isinstance(cache_dir, Path): cache_dir = str(cache_dir) if not os.path.isdir(cache_dir): return [] cached_models = [] for file in os.listdir(cache_dir): if file.endswith(".json"): meta_path = os.path.join(cache_dir, file) with open(meta_path, encoding="utf-8") as meta_file: metadata = json.load(meta_file) url = metadata["url"] etag = metadata["etag"] if url.endswith(".bin"): size_MB = os.path.getsize(meta_path.strip(".json")) / 1e6 cached_models.append((url, etag, size_MB)) return cached_models
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_all_cached_files(cache_dir=None):\n if cache_dir is None:\n cache_dir = TRANSFORMERS_CACHE\n else:\n cache_dir = str(cache_dir)\n if not os.path.isdir(cache_dir):\n return []\n\n cached_files = []\n for file in os.listdir(cache_dir):\n meta_path = os.path.join(cache_dir, f\"{file}.json\")\n if not os.path.isfile(meta_path):\n continue\n\n with open(meta_path, encoding=\"utf-8\") as meta_file:\n metadata = json.load(meta_file)\n url = metadata[\"url\"]\n etag = metadata[\"etag\"].replace('\"', \"\")\n cached_files.append({\"file\": file, \"url\": url, \"etag\": etag})\n\n return cached_files", "def get(self):\n if path.exists(self.cachefile):\n self.invalidion()\n full_cache = self._get_all()\n return full_cache\n else:\n return []", "def _load_cache():\n BASE_DIR = os.path.dirname(os.path.abspath(__file__))\n fname = os.path.join(BASE_DIR, \"model_cache.json\")\n with open(fname) as f:\n models_cache = json.load(f)\n return models_cache", "def fetch(\n self, output_folder: Path, cache: Dict[str, str], fetch_opts: List[Dict[str, Any]]\n ) -> List[str]:\n return [\n download_snapshot(source_config[\"url\"], output_folder, **source_config.get(\"opts\", {}))\n for source_config in fetch_opts\n ]", "def list_cached():\n for json_name in cached_files():\n source_name = get_source_file_name(json_name)\n yield (json_name, source_name)", "def cachepath(self):\n return [self.fs.cachepath(uri) for uri in self.uri]", "def find_cache_files():\n files = []\n\n for root, dirnames, filenames in os.walk(\".\"):\n for filename in fnmatch.filter(filenames, \"*.pyc\"):\n files.append(os.path.join(root, filename))\n\n for root, dirnames, filenames in os.walk(\".\"):\n for filename in fnmatch.filter(filenames, \"__pycache__\"):\n files.append(os.path.join(root, filename))\n\n return files", "async def get() -> list:\n if _cache is None:\n await _update()\n return _cache", "def _list_dir(self):\n return [os.path.join(self.cache_dir, fn)\n for fn in os.listdir(self.cache_dir)]", "def read_filter_cache_scratch(cache_dir):\n # Load up the cache file with the most keys (precomputed filter matrices).\n cache = {}\n cache_files = glob.glob(cache_dir + '/*.filter_cache')\n # loop through cache files, load them.\n # If there are new keys, add them to internal cache.\n # If not, delete the reference matrices from memory.\n for cache_file in cache_files:\n cfile = open(cache_file, 'rb')\n cache_t = pickle.load(cfile)\n for key in cache_t:\n if key not in cache:\n cache[key] = cache_t[key]\n return cache", "def cached_files():\n for (dir_path, _dir_names, file_names) in os.walk(CACHE):\n for file_name in file_names:\n if is_json_file(file_name):\n yield os.path.join(dir_path, file_name)", "def cache_all_kind_of_images_locally_for_all_voter():\n cache_images_locally_for_all_voters_results = []\n voter_list = Voter.objects.all()\n voter_list = voter_list[:200]\n\n for voter in voter_list:\n cache_images_for_a_voter_results = migrate_remote_voter_image_urls_to_local_cache(voter.id)\n cache_images_locally_for_all_voters_results.append(cache_images_for_a_voter_results)\n\n return cache_images_locally_for_all_voters_results", "def files():\n return get_cached(\"files.json\")", "def get_request_candidates(self):\n return os.listdir(self.cache_dir_)", "def _get_all_cache_files(self):\n files = set()\n dir_tree = os.walk(self.config.get('cachedir', self.CACHEDIR))\n for dirpath, _, filenames in dir_tree:\n for file_name in filenames:\n if 'cache' in file_name:\n files.add(os.path.join(dirpath, file_name))\n return files", "def get_cache_path(self):", "def get_cache_path(self):", "def cached(cache_path, generator):\n if path.exists(cache_path):\n with open(cache_path, 'rb') as f:\n return pickle.load(f)\n output = generator()\n with open(cache_path, 'wb+') as f:\n pickle.dump(output, f)\n return output", "def get_from_cache(url, cache_dir=None, force_download=False, proxies=None):\n\tif cache_dir is None:\n\t\tcache_dir = PYTORCH_TRANSFORMERS_CACHE\n\tif sys.version_info[0] == 3 and isinstance (cache_dir, Path):\n\t\tcache_dir = str (cache_dir)\n\tif sys.version_info[0] == 2 and not isinstance (cache_dir, str):\n\t\tcache_dir = str (cache_dir)\n\n\tif not os.path.exists (cache_dir):\n\t\tos.makedirs (cache_dir)\n\n\t# Get eTag to add to filename, if it exists.\n\tif url.startswith (\"s3://\"):\n\t\tetag = s3_etag (url, proxies=proxies)\n\telse:\n\t\ttry:\n\t\t\tresponse = requests.head (url, allow_redirects=True, proxies=proxies)\n\t\t\tif response.status_code != 200:\n\t\t\t\tetag = None\n\t\t\telse:\n\t\t\t\tetag = response.headers.get (\"ETag\")\n\t\texcept EnvironmentError:\n\t\t\tetag = None\n\n\tif sys.version_info[0] == 2 and etag is not None:\n\t\tetag = etag.decode ('utf-8')\n\tfilename = url_to_filename (url, etag)\n\n\t# get cache path to put the file\n\tcache_path = os.path.join (cache_dir, filename)\n\n\t# If we don't have a connection (etag is None) and can't identify the file\n\t# try to get the last downloaded one\n\tif not os.path.exists (cache_path) and etag is None:\n\t\tmatching_files = fnmatch.filter (os.listdir (cache_dir), filename + '.*')\n\t\tmatching_files = list (filter (lambda s: not s.endswith ('.json'), matching_files))\n\t\tif matching_files:\n\t\t\tcache_path = os.path.join (cache_dir, matching_files[-1])\n\n\tif not os.path.exists (cache_path) or force_download:\n\t\t# Download to temporary file, then copy to cache dir once finished.\n\t\t# Otherwise you get corrupt cache entries if the download gets interrupted.\n\t\twith tempfile.NamedTemporaryFile () as temp_file:\n\t\t\tlogger.info (\"%s not found in cache or force_download set to True, downloading to %s\", url, temp_file.name)\n\n\t\t\t# GET file object\n\t\t\tif url.startswith (\"s3://\"):\n\t\t\t\ts3_get (url, temp_file, proxies=proxies)\n\t\t\telse:\n\t\t\t\thttp_get (url, temp_file, proxies=proxies)\n\n\t\t\t# we are copying the file before closing it, so flush to avoid truncation\n\t\t\ttemp_file.flush ()\n\t\t\t# shutil.copyfileobj() starts at the current position, so go to the start\n\t\t\ttemp_file.seek (0)\n\n\t\t\tlogger.info (\"copying %s to cache at %s\", temp_file.name, cache_path)\n\t\t\twith open (cache_path, 'wb') as cache_file:\n\t\t\t\tshutil.copyfileobj (temp_file, cache_file)\n\n\t\t\tlogger.info (\"creating metadata file for %s\", cache_path)\n\t\t\tmeta = {'url': url, 'etag': etag}\n\t\t\tmeta_path = cache_path + '.json'\n\t\t\twith open (meta_path, 'w') as meta_file:\n\t\t\t\toutput_string = json.dumps (meta)\n\t\t\t\tif sys.version_info[0] == 2 and isinstance (output_string, str):\n\t\t\t\t\toutput_string = unicode (output_string, 'utf-8') # The beauty of python 2\n\t\t\t\tmeta_file.write (output_string)\n\n\t\t\tlogger.info (\"removing temp file %s\", temp_file.name)\n\n\treturn cache_path", "def get_from_cache(url, cache_dir=None):\n if cache_dir is None:\n cache_dir = PYTORCH_PRETRAINED_BIGGAN_CACHE\n if sys.version_info[0] == 3 and isinstance(cache_dir, Path):\n cache_dir = str(cache_dir)\n\n if not os.path.exists(cache_dir):\n os.makedirs(cache_dir)\n\n # Get eTag to add to filename, if it exists.\n if url.startswith(\"s3://\"):\n etag = s3_etag(url)\n else:\n response = requests.head(url, allow_redirects=True)\n if response.status_code != 200:\n raise IOError(\"HEAD request failed for url {} with status code {}\"\n .format(url, response.status_code))\n etag = response.headers.get(\"ETag\")\n\n filename = url_to_filename(url, etag)\n\n # get cache path to put the file\n cache_path = os.path.join(cache_dir, filename)\n\n if not os.path.exists(cache_path):\n # Download to temporary file, then copy to cache dir once finished.\n # Otherwise you get corrupt cache entries if the download gets interrupted.\n with tempfile.NamedTemporaryFile() as temp_file:\n logger.info(\"%s not found in cache, downloading to %s\", url, temp_file.name)\n\n # GET file object\n if url.startswith(\"s3://\"):\n s3_get(url, temp_file)\n else:\n http_get(url, temp_file)\n\n # we are copying the file before closing it, so flush to avoid truncation\n temp_file.flush()\n # shutil.copyfileobj() starts at the current position, so go to the start\n temp_file.seek(0)\n\n logger.info(\"copying %s to cache at %s\", temp_file.name, cache_path)\n with open(cache_path, 'wb') as cache_file:\n shutil.copyfileobj(temp_file, cache_file)\n\n logger.info(\"creating metadata file for %s\", cache_path)\n meta = {'url': url, 'etag': etag}\n meta_path = cache_path + '.json'\n with open(meta_path, 'w', encoding=\"utf-8\") as meta_file:\n json.dump(meta, meta_file)\n\n logger.info(\"removing temp file %s\", temp_file.name)\n\n return cache_path", "def existing_caches(self):\n caches_map = {}\n repos_dir = pjoin(self.options.cache_dir, \"repos\")\n for cache in sorted(self.caches.values(), key=attrgetter(\"type\")):\n caches_map[cache.type] = tuple(sorted(pathlib.Path(repos_dir).rglob(cache.file)))\n return ImmutableDict(caches_map)", "def get_cache_file_list(self,\n file_list_obj=None,\n file_info_class=FileInfo,\n file_list_class=FileList):\n if not file_list_obj:\n file_list_obj = file_list_class()\n temp_ = self.read_pickle_object_in_file()\n if temp_:\n for tup_ in temp_:\n finf_ = file_info_class(in_tuple=tup_)\n fn_ = finf_.filename\n self.cache_file_list_dict[fn_] = finf_\n file_list_obj.append(finf_)\n return file_list_obj", "def load_internal_cache(cls, pex, pex_info):\r\n internal_cache = os.path.join(pex, pex_info.internal_cache)\r\n with TRACER.timed('Searching dependency cache: %s' % internal_cache):\r\n if os.path.isdir(pex):\r\n for dist in find_distributions(internal_cache):\r\n yield dist\r\n else:\r\n for dist in cls.write_zipped_internal_cache(pex, pex_info):\r\n yield dist", "def retrieve():\n # type: () -> list\n with Cache(CACHE_URI) as c:\n data = c.get(SAVED_SEARCH)\n return json.loads(data[\"blob\"]) if data else []", "def __fillCache(self):\n assert (not self.__modelCache)\n\n # Assemble a list of model IDs to look up\n numModelIDs = len(self.__modelIDs) if self.__modelIDs else 0\n\n if self.__nextIndex >= numModelIDs:\n return\n\n idRange = self.__nextIndex + self.__CACHE_LIMIT\n if idRange > numModelIDs:\n idRange = numModelIDs\n\n lookupIDs = self.__modelIDs[self.__nextIndex:idRange]\n\n self.__nextIndex += (idRange - self.__nextIndex)\n\n # Query Nupic for model info of all models in the look-up list\n # NOTE: the order of results may not be the same as lookupIDs\n infoList = _clientJobsDB().modelsInfo(lookupIDs)\n assert len(infoList) == len(lookupIDs), \\\n \"modelsInfo returned %s elements; expected %s.\" % \\\n (len(infoList), len(lookupIDs))\n\n # Create _NupicModelInfo instances and add them to cache\n for rawInfo in infoList:\n modelInfo = _NupicModelInfo(rawInfo=rawInfo)\n self.__modelCache.append(modelInfo)\n\n assert len(self.__modelCache) == len(lookupIDs), \\\n \"Added %s elements to modelCache; expected %s.\" % \\\n (len(self.__modelCache), len(lookupIDs))\n\n if self.debug:\n _emit(Verbosity.DEBUG,\n \"MODELITERATOR: Leaving __fillCache(); modelCacheLen=%s\" % \\\n (len(self.__modelCache),))", "def cache_path(self):", "def cache_path(self):", "def getCacheContents(self):\n return self._cache", "def getJobCache(*jobSpecIds):\n \n jobData = WEJob.get(list(jobSpecIds))\n if type(jobData) != type([]):\n jobData = [jobData]\n result = {}\n # //\n # // make sure all job ids have an entry\n #//\n [ result.__setitem__(k, None) for k in jobSpecIds]\n # //\n # // update result with actual data\n #//\n [ result.__setitem__(k['id'], k.get('cache_dir', None)) for k in jobData ]\n \n return result", "def load_cache():\n return {}" ]
[ "0.64820683", "0.6196719", "0.60758835", "0.59966093", "0.5914366", "0.58652943", "0.5834361", "0.57033515", "0.56778526", "0.56652087", "0.55668455", "0.5528032", "0.55096465", "0.5494031", "0.54744536", "0.5452223", "0.5452223", "0.5440602", "0.53939825", "0.5392779", "0.5374808", "0.5358162", "0.5334572", "0.53182495", "0.5311237", "0.53040636", "0.53040636", "0.52969414", "0.5240333", "0.5232884" ]
0.7802284
0
Formats a useragent string with basic info about a request.
def http_user_agent(user_agent: Union[Dict, str, None] = None) -> str: ua = f"transformers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}" if is_torch_available(): ua += f"; torch/{_torch_version}" if is_tf_available(): ua += f"; tensorflow/{_tf_version}" if DISABLE_TELEMETRY: return ua + "; telemetry/off" if is_training_run_on_sagemaker(): ua += "; " + "; ".join(f"{k}/{v}" for k, v in define_sagemaker_information().items()) # CI will set this value to True if os.environ.get("TRANSFORMERS_IS_CI", "").upper() in ENV_VARS_TRUE_VALUES: ua += "; is_ci/true" if isinstance(user_agent, dict): ua += "; " + "; ".join(f"{k}/{v}" for k, v in user_agent.items()) elif isinstance(user_agent, str): ua += "; " + user_agent return ua
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def format_user_agent(name=None):\n parts = ['TronAPI/%s' % tronapi.__version__,\n '%s/%s' % (platform.python_implementation(),\n platform.python_version())]\n if name:\n parts.insert(0, name)\n return ' '.join(parts)", "def build_user_agent(application_name, version, url):\n return '%s/%s %s/%s (+%s)' % (application_name, version,\n 'python-simplemediawiki', __version__, url)", "def view_user_agent():\n\n headers = get_headers()\n\n return jsonify({\"user-agent\": headers[\"user-agent\"]})", "def user_agent(name, version):\n\n def _interpreter():\n name = platform.python_implementation()\n version = platform.python_version()\n bitness = platform.architecture()[0]\n if name == 'PyPy':\n version = '.'.join(map(str, sys.pypy_version_info[:3]))\n full_version = [version]\n if bitness:\n full_version.append(bitness)\n return name, \"-\".join(full_version)\n\n tags = [\n (name, version),\n (\"python\", platform.python_version()),\n _interpreter(),\n (\"machine\", platform.machine() or 'unknown'),\n (\"system\", platform.system() or 'unknown'),\n (\"platform\", platform.platform() or 'unknown'),\n ]\n\n return ' '.join(\"{}/{}\".format(name, version) for name, version in tags)", "def build_user_agent():\n if any(key.startswith(prefix) for prefix in TESTING_ENV_PREFIXES for key in os.environ.keys()):\n testing = \" (testing) \"\n else:\n testing = \" \"\n os_platform = \"{0.system}/{0.release} ({0.machine})\".format(utils.get_os_platform())\n return \"charmcraft/{}{}{} python/{}\".format(\n __version__, testing, os_platform, platform.python_version()\n )", "def get_user_agent_from_request(request):\n from user_agents import parse\n return parse(request.META.get('HTTP_USER_AGENT', ''))", "def user_agent_f(x: Text) -> Tuple[Text, Text]:\n return \"userAgent\", x", "def user_agent(self):\n version = '{0}.{1}.{2}'.format(sys.version_info[0], sys.version_info[1], sys.version_info[2])\n return \"PAYNL/SDK/{0} Python/{1} ({2})\".format(self.client_version, version, sys.hexversion)", "def get_user_agent(user_agent: str | None) -> str:\r\n from wikibaseintegrator import __version__\r\n wbi_user_agent = f\"WikibaseIntegrator/{__version__}\"\r\n\r\n if user_agent is None:\r\n return_user_agent = wbi_user_agent\r\n else:\r\n return_user_agent = user_agent + ' ' + wbi_user_agent\r\n\r\n return return_user_agent", "def user_agent_identifier():\n client_info = (get_version(), platform.system(), platform.machine())\n return \"txclient/%s (%s %s)\" % client_info", "def user_agent_info(self,\r\n user_agent):\r\n\r\n # Prepare query URL\r\n _url_path = '/user-agent-info'\r\n _query_builder = Configuration.get_base_uri()\r\n _query_builder += _url_path\r\n _query_url = APIHelper.clean_url(_query_builder)\r\n\r\n # Prepare headers\r\n _headers = {\r\n 'accept': 'application/json'\r\n }\r\n\r\n # Prepare form parameters\r\n _form_parameters = {\r\n 'output-case': 'camel',\r\n 'user-agent': user_agent\r\n }\r\n\r\n # Prepare and execute request\r\n _request = self.http_client.post(_query_url, headers=_headers, parameters=_form_parameters)\r\n CustomQueryAuth.apply(_request)\r\n _context = self.execute_request(_request)\r\n self.validate_response(_context)\r\n\r\n # Return appropriate type\r\n return APIHelper.json_deserialize(_context.response.raw_body, UserAgentInfoResponse.from_dictionary)", "def default_user_agent(name=\"crawlit\"):\n #https://github.com/kennethreitz/requests/blob/master/requests/utils.py#L440\n _implementation = platform.python_implementation()\n\n if _implementation == 'CPython':\n _implementation_version = platform.python_version()\n elif _implementation == 'PyPy':\n _implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major,\n sys.pypy_version_info.minor,\n sys.pypy_version_info.micro)\n if sys.pypy_version_info.releaselevel != 'final':\n _implementation_version = ''.join([_implementation_version, sys.pypy_version_info.releaselevel])\n elif _implementation == 'Jython':\n _implementation_version = platform.python_version() # Complete Guess\n elif _implementation == 'IronPython':\n _implementation_version = platform.python_version() # Complete Guess\n else:\n _implementation_version = 'Unknown'\n\n try:\n p_system = platform.system()\n p_release = platform.release()\n except IOError:\n p_system = 'Unknown'\n p_release = 'Unknown'\n\n return u\" \".join(['{0}/{1}'.format(name, get_version()),\n '%s/%s' % (_implementation, _implementation_version),\n '%s/%s' % (p_system, p_release)])", "def get_manubot_user_agent() -> str:\n try:\n from manubot import __version__ as manubot_version\n except ImportError:\n manubot_version = \"\"\n return (\n f\"manubot/{manubot_version} \"\n f\"({platform.system()}; Python/{sys.version_info.major}.{sys.version_info.minor}) \"\n f\"<{contact_email}>\"\n )", "def user_agent():\n ua_list = [\n\"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50\",\n\"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50\",\n\"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0);\",\n\"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)\",\n\"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)\",\n\"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)\",\n\"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1\",\n\"Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1\",\n\"Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; en) Presto/2.8.131 Version/11.11\",\n\"Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11\",\n\"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 \",\n]\n return random.choice(ua_list)", "def user_agent(self):\n return self._session.headers[\"User-Agent\"]", "def test_user_agent(self):\n user_agent = b\"test-agent\"\n\n def update_expected_user_agent(expected):\n expected[3][\"attributes\"].update(\n {\"http.user_agent\": user_agent.decode(\"utf8\")}\n )\n return expected\n\n self.scope[\"headers\"].append([b\"user-agent\", user_agent])\n app = otel_asgi.OpenTelemetryMiddleware(simple_asgi)\n self.seed_app(app)\n self.send_default_request()\n outputs = self.get_all_output()\n self.validate_outputs(outputs, modifiers=[update_expected_user_agent])", "def userAgentForUrl(self, url):\n return \"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36\"", "def UserAgent(self):\n return self._userAgent", "def test_user_agent(self):\n user_agent = b\"test-agent\"\n\n def update_expected_user_agent(expected):\n expected[3][\"attributes\"].update(\n {SpanAttributes.HTTP_USER_AGENT: user_agent.decode(\"utf8\")}\n )\n return expected\n\n self.scope[\"headers\"].append([b\"user-agent\", user_agent])\n app = otel_asgi.OpenTelemetryMiddleware(simple_asgi)\n self.seed_app(app)\n self.send_default_request()\n outputs = self.get_all_output()\n self.validate_outputs(outputs, modifiers=[update_expected_user_agent])", "def user_agent(self):\n ua_list = [\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/30.0.1599.101',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.122',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71',\n 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E)',\n 'Mozilla/5.0 (Windows NT 5.1; U; en; rv:1.8.1) Gecko/20061208 Firefox/2.0.0 Opera 9.50',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:34.0) Gecko/20100101 Firefox/34.0',\n ]\n return random.choice(ua_list)", "def user_agent(self):\n ua_list = [\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/30.0.1599.101',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.122',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71',\n 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E)',\n 'Mozilla/5.0 (Windows NT 5.1; U; en; rv:1.8.1) Gecko/20061208 Firefox/2.0.0 Opera 9.50',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:34.0) Gecko/20100101 Firefox/34.0',\n ]\n return random.choice(ua_list)", "def user_agent(self) -> str:\n return self.root_hartree.user_agent", "def user_agent(self):\n # type: () -> str\n return self.user_agent_policy.user_agent", "def get_user_agent(faked=False):\n if faked:\n agent = 'curl/7.21.4 (universal-apple-darwin11.0) libcurl/7.21.4 OpenSSL/0.9.8r zlib/1.2.5'\n\n else:\n from bowerer import VERSION\n from platform import platform\n agent = 'bowerer/%s (%s)' % ('.'.join(map(str, VERSION)), platform(terse=True))\n\n return agent", "def before_request():\n user_agent = request.headers.get(\"User-Agent\")\n\n if user_agent is None:\n return \"A user agent must be provided\", 401\n\n lowercase_user_agent = user_agent.lower()\n\n if \"rift\" in lowercase_user_agent:\n logger.debug(\"Detected Rift as user agent (%r)\", user_agent)\n return \"Rift not allowed\", 401\n if \"python\" in lowercase_user_agent:\n logger.debug(\"Detected Python as user agent (%r)\", user_agent)\n return \"Python requests not allowed\", 401\n if \"yandex\" in lowercase_user_agent:\n logger.debug(\"Detected Yandex as user agent (%r)\", user_agent)\n return \"Yandex bots are not allowed\", 401\n if \"smtbot\" in lowercase_user_agent:\n logger.debug(\"Detected SMT as user agent (%r)\", user_agent)\n return \"SMT Bots are not allowed\", 401\n if \"nimbostratus\" in lowercase_user_agent:\n logger.debug(\"Detected Nimbostratus as user agent (%r)\", user_agent)\n return \"Nimbostratus bots are not allowed\", 401\n if \"bot\" in lowercase_user_agent:\n logger.warning(\"Detected unkown bot as user agent (%r)\", user_agent)\n return \"Bots are not allowed\", 401\n if user_agent == \"-\":\n logger.debug(\"Not user agent provided (%r)\", user_agent)\n return \"A user agent must be provided\", 401\n\n return", "def set_option_user_agent(self, string, apikey=''):\n return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/action/setOptionUserAgent/', {'String': string, 'apikey': apikey})))", "def set_user_agent(self, user_agent: str) -> None:\n self.headers['User-Agent'] = user_agent", "def option_user_agent(self):\n return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/view/optionUserAgent/')))", "def user_agent():\n headers = [\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0',\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) AppleWebKit/603.2.4 (KHTML, like Gecko) Version/10.1.1 Safari/603.2.4',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/603.3.8 (KHTML, like Gecko) Version/10.1.2 Safari/603.3.8',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',\n 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:54.0) Gecko/20100101 Firefox/54.0',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:54.0) Gecko/20100101 Firefox/54.0',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko',\n 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36 Edge/15.15063',\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:54.0) Gecko/20100101 Firefox/54.0',\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.78 Safari/537.36',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.79 Safari/537.36 Edge/14.14393',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',\n 'Mozilla/5.0 (X11; Linux x86_64; rv:54.0) Gecko/20100101 Firefox/54.0',\n 'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0',\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/59.0.3071.109 Chrome/59.0.3071.109 Safari/537.36',\n 'Mozilla/5.0 (X11; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0',\n 'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko',\n 'Mozilla/5.0 (Windows NT 6.1; rv:54.0) Gecko/20100101 Firefox/54.0',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:54.0) Gecko/20100101 Firefox/54.0',\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.78 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:54.0) Gecko/20100101 Firefox/54.0',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/603.3.8 (KHTML, like Gecko) Version/10.1.2 Safari/603.3.8',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.78 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36 OPR/46.0.2597.57',\n 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.78 Safari/537.36',\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 6.1; Trident/7.0; rv:11.0) like Gecko',\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',\n 'Mozilla/5.0 (iPad; CPU OS 10_3_2 like Mac OS X) AppleWebKit/603.2.4 (KHTML, like Gecko) Version/10.0 Mobile/14F89 Safari/602.1',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_4) AppleWebKit/603.1.30 (KHTML, like Gecko) Version/10.1 Safari/603.1.30',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:52.0) Gecko/20100101 Firefox/52.0',\n 'Mozilla/5.0 (Windows NT 5.1; rv:52.0) Gecko/20100101 Firefox/52.0',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36',\n 'Mozilla/5.0 (X11; Linux x86_64; rv:45.0) Gecko/20100101 Firefox/45.0',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/603.2.5 (KHTML, like Gecko) Version/10.1.1 Safari/603.2.5',\n 'Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',\n 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0; Trident/5.0)',\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:55.0) Gecko/20100101 Firefox/55.0',\n 'Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko',\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',\n 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0; Trident/5.0; Trident/5.0)',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',\n 'Mozilla/5.0 (iPad; CPU OS 10_3_3 like Mac OS X) AppleWebKit/603.3.8 (KHTML, like Gecko) Version/10.0 Mobile/14G60 Safari/602.1',\n 'Mozilla/5.0 (X11; Fedora; Linux x86_64; rv:54.0) Gecko/20100101 Firefox/54.0',\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:54.0) Gecko/20100101 Firefox/54.0',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/603.2.5 (KHTML, like Gecko) Version/10.1.1 Safari/603.2.5',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/603.3.8 (KHTML, like Gecko) Version/10.1.2 Safari/603.3.8',\n 'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; Touch; rv:11.0) like Gecko',\n 'Mozilla/5.0 (Windows NT 6.1; rv:52.0) Gecko/20100101 Firefox/52.0',\n 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:55.0) Gecko/20100101 Firefox/55.0',\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.86 Safari/537.36',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/602.4.8 (KHTML, like Gecko) Version/10.0.3 Safari/602.4.8',\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.104 Safari/537.36',\n ]\n return {'User-Agent': headers[random.randrange(0, len(headers))]}", "def userAgent(self):\n raise NotImplementedError" ]
[ "0.7501937", "0.6610868", "0.6601205", "0.6560155", "0.65414107", "0.64870393", "0.64412105", "0.6412119", "0.62766165", "0.62002414", "0.61000466", "0.6004552", "0.59547067", "0.5931048", "0.5911232", "0.58893156", "0.5878208", "0.5870884", "0.5832963", "0.582548", "0.582548", "0.58003217", "0.5733318", "0.5708632", "0.5641419", "0.56155145", "0.55705494", "0.55636823", "0.554374", "0.55324334" ]
0.7017349
1
Extracts the commit hash from a resolved filename toward a cache file.
def extract_commit_hash(resolved_file: Optional[str], commit_hash: Optional[str]): if resolved_file is None or commit_hash is not None: return commit_hash resolved_file = str(Path(resolved_file).as_posix()) search = re.search(r"snapshots/([^/]+)/", resolved_file) if search is None: return None commit_hash = search.groups()[0] return commit_hash if REGEX_COMMIT_HASH.match(commit_hash) else None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _git_intern_file(self, file_contents, cwd, commit_hash):\n cmd = 'hash-object -t blob -w --stdin'.split(' ')\n stdin = self.api.m.raw_io.input(file_contents)\n stdout = self.api.m.raw_io.output()\n step_name = 'Hashing modified DEPS file with revision ' + commit_hash\n step_result = self.api.m.git(*cmd, cwd=cwd, stdin=stdin, stdout=stdout,\n name=step_name)\n hash_string = step_result.stdout.splitlines()[0]\n try:\n if hash_string:\n int(hash_string, 16)\n return hash_string\n except ValueError: # pragma: no cover\n reason = 'Git did not output a valid hash for the interned file.'\n self.api.m.halt(reason)\n raise self.api.m.step.StepFailure(reason)", "def _get_git_hash(self):\n try:\n with open(os.path.join(self._base_dir, '.git', 'HEAD'), 'r') as head_file:\n ref = head_file.read().strip()\n if ref[:5] == 'ref: ':\n with open(os.path.join(self._base_dir, '.git', ref[5:]), 'r') as commit_file:\n return commit_file.read().strip()\n else:\n return ref[5:]\n except Exception as err:\n self._logger.warning('Couldnt read the git commit hash: %s :: %s',\n err.__class__.__name__, err)\n return 'UNKNOWN'", "def get_hash(file_url):\n file_extension = os.path.splitext(file_url)[1]\n return str(HASHES.get(file_extension))", "def get_hash(file_buffer):\n data = file_buffer.read()\n hasher = sha1()\n hasher.update(data)\n return hasher.hexdigest()", "def _get_hash(self, path):\n with open(path, \"r\") as fp:\n content = fp.read()\n\n return sha256(content).hexdigest()", "def _get_sha_metadata(filename):\n with open(filename) as f:\n return hashlib.sha1(f.read()).hexdigest()", "def get_file_sha(repo_dir, filename):\n try:\n sha = subprocess.check_output(['git', 'ls-files', filename], cwd=repo_dir).strip()\n if not sha:\n return \"\"\n sha = subprocess.check_output(['git', 'hash-object', filename], cwd=repo_dir)\n return sha.decode('utf-8').strip()\n except Exception as e:\n print(\"Failed to get sha for '%s/%s': %s\" % (repo_dir, filename, e))\n return \"\"", "def fetch_local_hashcode(self, path):\n\t\treturn hashlib.sha256(open(self.config[\"daemon\"][\"rootdir\"] + path, \"rb\").read()).hexdigest()", "def get_commit_hash(self, directory):\n\n return (\n subprocess.check_output([\"git\", \"rev-parse\", \"HEAD\"], cwd=directory)\n .decode(\"utf-8\")\n .replace(\"\\n\", \"\")\n )", "def fetch_remote_hashcode(self, path):\n\t\treturn self.fetch_repo_file(\"/hash/\" + path.replace(\"packages/\", \"\")).decode('utf-8').strip()", "def get_git_hash(revname):\n try:\n return check_output([\"git\", \"rev-parse\", revname],\n cwd=get_repo_dir()).strip()\n except:\n revname = \"origin/\" + revname\n return check_output([\"git\", \"rev-parse\", revname],\n cwd=get_repo_dir()).strip()", "def get_commit_hash():\n git_dir = get_git_root()\n args = [\"git\", \"-C\", git_dir, \"rev-parse\", \"--short\", \"--verify\", \"HEAD\"]\n return subprocess.check_output(args).strip().decode()", "def compute_hash(fileName):\n m = hashlib.sha1()\n try:\n fd = open(fileName,\"rb\")\n except IOError:\n print (\"Unable to open the file in readmode:\", fileName)\n return\n content = fd.readlines()\n fd.close()\n for eachLine in content:\n m.update(eachLine)\n return m.hexdigest()", "def computeHash(filename):\n fileHash = hashlib.sha256()\n with open(filename, \"rb\") as f:\n for chunk in iter(lambda: f.read(4096), b\"\"):\n fileHash.update(chunk)\n return fileHash.hexdigest()", "def get_commit_hash(repo_location, commit='origin/HEAD'):\n if not os.path.exists(pjoin(repo_location, '.git')):\n raise ValueError\n ret, out = spawn_get_output(\n ['git', 'rev-parse', commit], cwd=repo_location)\n if ret != 0:\n raise ValueError(\n f'failed retrieving {commit} commit hash '\n f'for git repo: {repo_location}')\n return out[0].strip()", "def calculate_hash(filename, raise_on_not_found = False):\n if not is_file(filename) and not raise_on_not_found:\n return \"NOTFOUND\"\n\n with open(filename, \"rb\") as file:\n sha256 = hashlib.sha256()\n buf = file.read(128)\n while len(buf) > 0:\n sha256.update(buf)\n buf = file.read(128)\n return str(binascii.hexlify(sha256.digest()), \"utf8\")", "def _get_cache_filename(name, filename):\n filename = os.path.abspath(filename)[1:]\n home_folder = os.path.expanduser('~')\n base_cache_dir = os.path.join(home_folder, '.git-lint', 'cache')\n\n return os.path.join(base_cache_dir, name, filename)", "def get_md5_lookup(filename):\n lookup = {}\n\n with open(filename) as f:\n for row in f:\n (md5, sha256) = row.strip().split(\",\")\n lookup[md5] = sha256\n\n return lookup", "def current_git_hash():\n git_file = \".git/refs/heads/master\"\n git_path = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),\n os.pardir, os.pardir, git_file))\n\n if not os.path.exists(git_path):\n git_path = os.getcwd() + \"/\" + git_file\n if not os.path.exists(git_path):\n git_path = os.getcwd() + \"/../\" + git_file\n if not os.path.exists(git_path):\n return None\n\n with open(git_path, \"r\") as git:\n git_hash = git.read()\n\n return git_hash[0:5]", "def ondisk_digest(self):\n with open(self.rename_phase_src) as f:\n return hasher(f.read()).hexdigest()", "def get_cachebusting_name(file_str):\n\n return fu.lcompose([hash, abs, num_to_base36])(file_str)", "def get_commit_hash(revision: str) -> FullCommitHash:\n return FullCommitHash.of(popen(f\"git rev-parse {revision}\"))", "def _actual_hash(self):\n return hash_of_file(join(self._temp_path, self._downloaded_filename()))", "def git_hash():\n if not exists('qmk_firmware'):\n checkout_qmk()\n\n return open('qmk_firmware/version.txt').read().strip()", "def get_file_hash(self, filepath):\n if filepath not in self._file_hash_cache:\n self._file_hash_cache[filepath] = self.static_file_hash(filepath)\n return self._file_hash_cache[filepath]", "def get_hash(self, filepath):\n if (os.path.isfile(filepath) and not (\n os.path.islink(filepath) and self.ignorelinks)):\n file_hash = self.hashfile(open(filepath, 'rb'))\n else:\n file_hash = self.hashstring(filepath)\n if not self._increment_hash:\n self._increment_hash = file_hash\n else:\n self._increment_hash = self.hashstring(\n self._increment_hash + file_hash)\n return file_hash", "def get_hash(repo, ref='HEAD'):\n return subprocess.check_output(['git', 'rev-parse', '--verify', ref],\n cwd=repo).rstrip()", "def find_file_id_in_commit(file_name, revision_number):\n\n\n with open(\".pvcs/revisions/\" + str(revision_number) + \"/change_map\") as change_map:\n # Loop through every line, find the one containing the file, return the id\n for line in change_map.readlines():\n if line.find(file_name) != -1:\n return int(line.split(\",\")[0])", "def _hash_file(self, file_entry):\n if file_entry is None:\n return None\n\n if file_entry.IsDevice() or file_entry.IsPipe() or file_entry.IsSocket():\n # Ignore devices, FIFOs/pipes and sockets.\n return None\n\n hash_context = hashlib.sha256()\n\n try:\n file_object = file_entry.GetFileObject()\n except IOError as exception:\n logging.warning((\n 'Unable to open path specification:\\n{0:s}'\n 'with error: {1!s}').format(file_entry.path_spec.location, exception))\n return None\n\n if not file_object:\n return None\n\n try:\n data = file_object.read(self._READ_BUFFER_SIZE)\n while data:\n hash_context.update(data)\n data = file_object.read(self._READ_BUFFER_SIZE)\n except IOError as exception:\n logging.warning((\n 'Unable to read from path specification:\\n{0:s}'\n 'with error: {1!s}').format(file_entry.path_spec.location, exception))\n return None\n\n return hash_context.hexdigest()", "def calc_file_hash(filepath):\n with open(filepath, 'rb') as f:\n return md5(f.read()).hexdigest()" ]
[ "0.71363044", "0.70138985", "0.682399", "0.67539394", "0.66169316", "0.6589089", "0.6511221", "0.6444103", "0.6440251", "0.6434866", "0.6424781", "0.6415578", "0.64100504", "0.6395506", "0.63427746", "0.6313534", "0.6308896", "0.6279337", "0.627533", "0.62388635", "0.62369126", "0.61677444", "0.6157004", "0.61348486", "0.6132014", "0.6120617", "0.61156124", "0.6110667", "0.6108193", "0.61042714" ]
0.7563469
0
Checks if a repo contains a given file without downloading it. Works for remote repos and local folders. This function will raise an error if the repository `path_or_repo` is not valid or if `revision` does not exist for this repo, but will return False for regular connection errors.
def has_file( path_or_repo: Union[str, os.PathLike], filename: str, revision: Optional[str] = None, proxies: Optional[Dict[str, str]] = None, token: Optional[Union[bool, str]] = None, **deprecated_kwargs, ): use_auth_token = deprecated_kwargs.pop("use_auth_token", None) if use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers.", FutureWarning ) if token is not None: raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.") token = use_auth_token if os.path.isdir(path_or_repo): return os.path.isfile(os.path.join(path_or_repo, filename)) url = hf_hub_url(path_or_repo, filename=filename, revision=revision) headers = build_hf_headers(token=token, user_agent=http_user_agent()) r = requests.head(url, headers=headers, allow_redirects=False, proxies=proxies, timeout=10) try: hf_raise_for_status(r) return True except GatedRepoError as e: logger.error(e) raise EnvironmentError( f"{path_or_repo} is a gated repository. Make sure to request access at " f"https://huggingface.co/{path_or_repo} and pass a token having permission to this repo either by " "logging in with `huggingface-cli login` or by passing `token=<your_token>`." ) from e except RepositoryNotFoundError as e: logger.error(e) raise EnvironmentError(f"{path_or_repo} is not a local folder or a valid repository name on 'https://hf.co'.") except RevisionNotFoundError as e: logger.error(e) raise EnvironmentError( f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for this " f"model name. Check the model page at 'https://huggingface.co/{path_or_repo}' for available revisions." ) except requests.HTTPError: # We return false for EntryNotFoundError (logical) as well as any connection error. return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _git_exists_in_revision(path: Path, rev2: str) -> bool:\n # Surprise: On Windows, `git cat-file` doesn't work with backslash directory\n # separators in paths. We need to use Posix paths and forward slashes instead.\n cmd = [\"git\", \"cat-file\", \"-e\", f\"{rev2}:{path.as_posix()}\"]\n result = run(cmd, check=False, stderr=DEVNULL, env={\"LC_ALL\": \"C\"})\n return result.returncode == 0", "def test_get_file_exists_with_git_and_revision(self):\n self._test_get_file_exists(\n tool_name='Git',\n revision='123',\n base_commit_id=None,\n expected_revision='123',\n expected_found=True)", "def valid_revision(repo_url, revision):\n\n global VALID_CACHE\n if (repo_url, revision) in VALID_CACHE:\n return VALID_CACHE[(repo_url, revision)]\n\n LOG.debug(\"Determine if the revision is valid.\")\n url = \"%s?changeset=%s&tipsonly=1\" % (\n JSON_PUSHES % {\"repo_url\": repo_url},\n revision\n )\n data = retry(requests.get, args=(url,)).json()\n ret = True\n\n # A valid revision will return a dictionary with information about exactly one revision\n if len(data) != 1:\n LOG.warning(\"Revision %s not found on branch %s\" % (revision, repo_url))\n ret = False\n\n VALID_CACHE[(repo_url, revision)] = ret\n return ret", "def repo_exists_on_gf_server(p4, repo_name):\n return get_server_repo_config_rev(p4, repo_name) != '0'", "def test_get_file_exists_with_svn_and_revision(self):\n self._test_get_file_exists(\n tool_name='Subversion',\n revision='123',\n base_commit_id=None,\n expected_revision='123',\n expected_found=True)", "def check_svn_repo(case_dict, username, password):\n # ---------------------------------------------------------------------\n logger.debug(\"check_svn_repo\")\n\n repo_exists = False\n svn_repo = \"{0}/trunk\".format(case_dict[\"svn_repo_url\"])\n cmd = [\"svn\", \"list\", svn_repo, \"--username\", username, \"--password\", password]\n result = \"\"\n try:\n result = subprocess.check_output(cmd)\n except subprocess.CalledProcessError:\n msg = \"SVN repo does not exist for this case. A new one will be created.\"\n logger.warning(msg)\n\n if re.search(\"README.archive\", result):\n repo_exists = True\n\n return repo_exists", "def project_with_revision_exists(project_name, project_revision, working_dir):\n try:\n with open(working_dir + project_name + \".qpf\", \"r\") as project_file:\n for line in project_file:\n if f\"PROJECT_REVISION = \\\"{project_revision}\\\"\" in line:\n return True\n return False\n except FileNotFoundError:\n return False", "def revision_exists(git_dir, revision):\n\n if GIT.execute([git_dir, 'rev-parse', '--quiet', '--verify',\n 'refs/tags/' + revision], quiet=True):\n return GitExistsType.EXISTS_TAG\n\n output = []\n if not GIT.execute([git_dir, 'rev-parse', '--quiet', '--verify',\n revision], quiet=True, capture=output):\n if not GIT.execute([git_dir, 'rev-parse', '--quiet', '--verify',\n 'origin/' + revision], quiet=True, capture=output):\n return GitExistsType.MISSING\n\n # confirm a hash-provided revision exists\n #\n # A call to `rev-parse` with a full hash may succeed even through the\n # hash does not exist in a repository (short hashes are valid though).\n # To handle this case, check if the revision matches the returned hash\n # valid provided. If so, perform a `cat-file` request to ensure the long\n # hash entry is indeed a valid commit.\n if output and output[0] == revision:\n if GIT.execute([git_dir, 'cat-file', '-t', revision], quiet=True):\n return GitExistsType.EXISTS_HASH\n else:\n return GitExistsType.MISSING_HASH\n\n return GitExistsType.EXISTS_BRANCH", "def __has_repo(repo_name):\n\n dtf_db = sqlite3.connect(DTF_DB)\n cur = dtf_db.cursor()\n\n sql = ('SELECT id '\n 'FROM repos '\n \"WHERE repo_name='%s' \"\n 'LIMIT 1' % repo_name)\n\n cur.execute(sql)\n\n return bool(cur.fetchone() is not None)", "def path_exists(path):\n if path.startswith('http://') or path.startswith('https://'):\n return True\n\n return isfile(path)", "def has_repo_file_privilege(login, repo_base, repo, privilege):\n repo = repo.lower()\n repo_base = repo_base.lower()\n\n # Users always have privileges over their own files.\n if login == repo_base:\n return\n\n # Check if the current user or the public user has the privilege on\n # this repo.\n # The anonymous user is never explicitly shared with, so we don't need\n # to check for that.\n permitted_collaborators = Collaborator.objects.filter(\n repo_base=repo_base,\n repo_name=repo,\n file_permission__contains=privilege,\n user__username__in=[settings.PUBLIC_ROLE, login])\n if not next((c for c in permitted_collaborators), None):\n raise PermissionDenied()", "def test_get_file_exists_with_svn_and_base_commit_id(self):\n self._test_get_file_exists(\n tool_name='Subversion',\n revision='123',\n base_commit_id='456',\n expected_revision='123',\n expected_found=True)", "def test_github_file_exists(self):\n for h in self.hyperlinks:\n if h['url'].startswith('https://github.com/cyberbotics/webots/tree/released'):\n path = h['url'].replace('https://github.com/cyberbotics/webots/tree/released',\n os.path.normpath(os.environ['WEBOTS_HOME']))\n self.assertTrue(\n os.path.isfile(path) or os.path.isdir(path),\n msg='Hyperlink \"%s\" is pointing to a non-existing file or directory \"%s\" (in file \"%s\").' %\n (h['md'], path, h['file'])\n )", "def is_firefox_repo(repo):\n try:\n if len(repo) and repo[0].hex() == FIREFOX_ROOT_NODE:\n return True\n except error.FilteredRepoLookupError:\n pass\n\n # Backdoor for testing.\n return repo.vfs.exists('IS_FIREFOX_REPO')", "def test_get_file_exists_with_git_and_base_commit_id(self):\n self._test_get_file_exists(\n tool_name='Git',\n revision='123',\n base_commit_id='456',\n expected_revision='456',\n expected_found=True)", "def exists(self, path):\n\n # First test for local path\n if os.path.exists(path):\n return True\n\n # We import this here because importing urllib is slow and\n # a significant fraction of numpy's total import time.\n from urllib.request import urlopen\n from urllib.error import URLError\n\n # Test cached url\n upath = self.abspath(path)\n if os.path.exists(upath):\n return True\n\n # Test remote url\n if self._isurl(path):\n try:\n netfile = urlopen(path)\n netfile.close()\n del(netfile)\n return True\n except URLError:\n return False\n return False", "def test_azurecli_repofile_exists(host):\n assert host.file(REPO_DEBIAN_FILE).exists or \\\n host.file(REPO_EL_FILE).exists", "def check_repo(self):\n if not os.path.exists(self.path):\n log.error(\"no dots repository found at '{}'\".format(self.path))\n if not os.path.exists(self.files_path):\n log.error(\"corrupted repository, the 'files' subfolder is missing\")\n if not os.path.exists(self.enc_files_path):\n log.error(\"corrupted repository, the 'encrypted' subfolder is missing\")\n if not os.path.exists(os.path.join(self.path, '.git')):\n log.error(\"corrupted repository, folder exists but is not versioned\")\n self.git_repo = Repo(self.path)", "def commit_exists(repo, commit):\n cmd = ['git', 'cat-file', '-t', commit]\n try:\n devnull = open(os.devnull, 'wb')\n output = subprocess.check_output(cmd, cwd=repo,\n stderr=devnull)\n return output.rstrip() == 'commit'\n except subprocess.CalledProcessError:\n return False", "def is_repo_root(path: str) -> bool:\n return os.path.isdir(os.path.join(path, \".repo\"))", "def test_azurecli_repofile_isfile(host):\n assert host.file(REPO_DEBIAN_FILE).is_file or \\\n host.file(REPO_EL_FILE).is_file", "def is_git_repo(directory):\n files = os.listdir(directory)\n if '.git' in files:\n return True\n return False", "def _validate_pants_repo(self, pants_repo: pathlib.PosixPath) -> bool:\n return (\n pants_repo and\n pants_repo.is_dir() and\n pants_repo.joinpath('pants').is_file()\n )", "def has_file(path):\n return os.path.exists(path)", "def check_repository(self, repo_type_key, value):\n def remove_tail(v, tail):\n if v.endswith(tail):\n v = v[:-len(tail)]\n return v\n\n for v in self.c.repositories.get(repo_type_key, ()):\n if remove_tail(v, '.git') == remove_tail(value, '.git'):\n return True\n return False", "def is_svn():\n return (exists('.svn') and isdir('.svn'))", "def check_url(url):\n return get_svninfo(url) != {}", "def is_remote_reserve_branch_present(repo):\n reserve_name = phlgitu_ref.Name(_RESERVE_BRANCH_FQ_NAME)\n remote_ref_names = repo(\"ls-remote\").split()[1::2]\n return reserve_name.fq in remote_ref_names", "def check_path(filename):\n return not bool(checkPath(filename))", "def check_file(self, path, approve_if_no_dbhash=False):\r\n if self.mod.filehash:\r\n h = create_filehash(path)\r\n return h == self.mod.filehash\r\n return approve_if_no_dbhash" ]
[ "0.6596947", "0.6389621", "0.6368034", "0.63567364", "0.6278122", "0.6180356", "0.6152615", "0.6005258", "0.5924015", "0.58965087", "0.5867283", "0.58649385", "0.58643174", "0.586419", "0.5849708", "0.5784172", "0.57095075", "0.56832314", "0.56828415", "0.563263", "0.5627547", "0.55985296", "0.5586624", "0.5566107", "0.55650127", "0.55507517", "0.551299", "0.55052143", "0.55024076", "0.54995173" ]
0.6584571
1
Create the repo if needed, cleans up repo_id with deprecated kwargs `repo_url` and `organization`, retrieves the token.
def _create_repo( self, repo_id: str, private: Optional[bool] = None, token: Optional[Union[bool, str]] = None, repo_url: Optional[str] = None, organization: Optional[str] = None, ) -> str: if repo_url is not None: warnings.warn( "The `repo_url` argument is deprecated and will be removed in v5 of Transformers. Use `repo_id` " "instead." ) if repo_id is not None: raise ValueError( "`repo_id` and `repo_url` are both specified. Please set only the argument `repo_id`." ) repo_id = repo_url.replace(f"{HUGGINGFACE_CO_RESOLVE_ENDPOINT}/", "") if organization is not None: warnings.warn( "The `organization` argument is deprecated and will be removed in v5 of Transformers. Set your " "organization directly in the `repo_id` passed instead (`repo_id={organization}/{model_id}`)." ) if not repo_id.startswith(organization): if "/" in repo_id: repo_id = repo_id.split("/")[-1] repo_id = f"{organization}/{repo_id}" url = create_repo(repo_id=repo_id, token=token, private=private, exist_ok=True) return url.repo_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def api_repo_create():\n form = NewRepoForm()\n if form.validate_on_submit():\n # On the miniscule chance we generate a non-unique access key, loop and try again.\n success = False\n while not success:\n new_repo = Repo.create(\n pass_phrase = form.pass_phrase.data,\n title = form.title.data,\n description = form.description.data,\n is_private = form.is_private.data\n )\n db.session.add(new_repo)\n try:\n db.session.commit()\n success = True\n except:\n db.session.rollback()\n success = False\n session['working_repo'] = new_repo.access_key\n return jsonify(message='success', created=new_repo.access_key)\n else:\n return jsonify(message=\"failed\", errors=form.errors_to_json()), 400", "def _make_github_repo(github_login, entity, reponame, existing,\n access_protocol, private, dryrun):\n repo = None\n access_url = None\n try:\n repo = entity.get_repo(reponame)\n access_url = get_repo_url(repo, access_protocol, github_login)\n except gh.GithubException as e:\n if e.status != 404:\n # this is not a not found message, raise\n raise e\n lgr.debug(\n 'To be created repository \"%s\" does not yet exist on Github',\n reponame)\n\n if repo is not None:\n res = dict(\n url=access_url,\n preexisted=True,\n )\n if existing in ('skip', 'reconfigure'):\n return dict(\n res,\n status='notneeded',\n preexisted=existing == 'skip',\n )\n elif existing == 'error':\n return dict(\n res,\n status='error',\n message=('repository \"%s\" already exists on Github', reponame),\n )\n elif existing == 'replace':\n _msg = ('repository \"%s\" already exists on GitHub.', reponame)\n # Since we are running in the loop trying different tokens,\n # this message might appear twice. TODO: avoid\n if ui.is_interactive:\n remove = ui.yesno(\n \"Do you really want to remove it?\",\n title=_msg[0] % _msg[1],\n default=False\n )\n else:\n return dict(\n res,\n status='impossible',\n message=(\n _msg[0] + \" Remove it manually first on GitHub or \"\n \"rerun datalad in an interactive shell to confirm \"\n \"this action.\",\n _msg[1]),\n )\n if not remove:\n return dict(\n res,\n status='impossible',\n message=_msg,\n )\n repo.delete()\n repo = None\n else:\n RuntimeError('must not happen')\n\n if repo is None and not dryrun:\n try:\n repo = entity.create_repo(\n reponame,\n # TODO description='',\n # TODO homepage='',\n private=private,\n has_issues=False,\n has_wiki=False,\n has_downloads=False,\n auto_init=False)\n except gh.GithubException as e:\n if e.status == 404:\n # can happen if credentials are not good enough!\n raise\n msg = \"Github {} ({})\".format(\n e.data.get('message', str(e) or 'unknown'),\n e.data.get('documentation_url', 'no url')\n )\n if e.data.get('errors'):\n msg += ': {}'.format(\n ', '.join(\n [\n err.get('message')\n for err in e.data.get('errors', [])\n if 'message' in err\n ]))\n return dict(\n res,\n status='error',\n message=msg,\n )\n\n if repo is None and not dryrun:\n raise RuntimeError(\n 'something went wrong, we got no Github repository')\n\n # get definitive URL:\n # - use previously determined one\n # - or query a newly created project\n # - or craft one in dryrun mode\n access_url = access_url or '{}github.com{}{}/{}.git'.format(\n 'https://' if access_protocol == 'https' else 'git@',\n '/' if access_protocol == 'https' else ':',\n # this will be the org, in case the repo will go under an org\n entity.login,\n reponame,\n ) if dryrun else get_repo_url(repo, access_protocol, github_login)\n\n return dict(\n status='ok',\n url=access_url,\n preexisted=False,\n )", "def repo():\n name = REPO_NAME_PREFIX + randstring()\n desc = randstring()\n repo = webapi.repos.create_repo(name)\n print('[create repo] repo_id: %s' % repo.id)\n with only_update_one_repo(repo.id):\n try:\n yield repo\n finally:\n try:\n repo.delete()\n except:\n print(\"repo is deleted\")", "def create_from_git(self, token: Any, repo: str):\n params = [token, repo, ]\n method = \"ProjectAPI.CreateFromGit\"\n self.__add_request(method, params, lambda payload: Definition.from_json(payload))", "def create_repo(self, repo):\n return self.user_con.create_repo(repo=repo)", "async def create_from_git(self, token: Any, repo: str) -> Definition:\n response = await self._invoke({\n \"jsonrpc\": \"2.0\",\n \"method\": \"ProjectAPI.CreateFromGit\",\n \"id\": self.__next_id(),\n \"params\": [token, repo, ]\n })\n assert response.status // 100 == 2, str(response.status) + \" \" + str(response.reason)\n payload = await response.json()\n if 'error' in payload:\n raise ProjectAPIError.from_json('create_from_git', payload['error'])\n return Definition.from_json(payload['result'])", "def _make_github_repos_(\n github_login, github_organization, rinfo, existing,\n access_protocol, private, dryrun):\n if not rinfo:\n return # no need to even try!\n\n auth_success = False\n ncredattempts = 0\n # determine the entity under which to create the repos. It might be that\n # we would need to check a few credentials\n for entity, token_str in _gen_github_entity(\n github_login,\n github_organization):\n lgr.debug(\"Using entity %s with token %s\", entity, token_str)\n ncredattempts += 1\n for ds, reponame in rinfo:\n lgr.debug(\"Trying to create %s for %s\", reponame, ds)\n try:\n res_ = _make_github_repo(\n github_login,\n entity,\n reponame,\n existing,\n access_protocol,\n private,\n dryrun)\n # output will contain whatever is returned by _make_github_repo\n # but with a dataset prepended to the record\n res_['ds'] = ds\n yield res_\n # track (through the keyhole of the backdoor) if we had luck\n # with the github credential set\n # which worked, whenever we have a good result, or where able to\n # determined, if a project already exists\n auth_success = auth_success or \\\n res_['status'] in ('ok', 'notneeded') or \\\n res_['preexisted']\n except (gh.BadCredentialsException, gh.GithubException) as e:\n hint = None\n if (isinstance(e, gh.BadCredentialsException) and e.status != 403):\n # e.g. while deleting a repository, just a generic GithubException is\n # raised but code is 403. At least it is about permissions\n pass\n elif e.status == 404:\n # github hides away if repository might already be existing\n # if token does not have sufficient credentials\n hint = \"Likely the token lacks sufficient permissions to \"\\\n \"assess if repository already exists or not\"\n else:\n # Those above we process, the rest - re-raise\n raise\n lgr.warning(\"Failed to create repository while using token %s: %s%s\",\n token_str,\n exc_str(e),\n (\" Hint: %s\" % hint) if hint else \"\")\n\n if auth_success:\n # so we have succeeded with at least one repo already -\n # we should not try any other credential.\n # TODO: may be it would make sense to have/use different\n # credentials for different datasets e.g. if somehow spread\n # across different organizations? but it is not the case here\n # IMHO (-- yoh)\n raise e\n break # go to the next attempt to authenticate\n\n if auth_success:\n return\n\n # External loop should stop querying for the next possible way when it succeeds,\n # so we should never get here if everything worked out\n if ncredattempts:\n raise AccessDeniedError(\n \"Tried %d times to get authenticated access to GitHub but kept failing\"\n % ncredattempts\n )\n else:\n raise RuntimeError(\"Did not even try to create a repo on github\")", "def register_repo_create(self, body):\n httpretty.register_uri(\n httpretty.POST,\n '{url}orgs/{org}/repos'.format(\n url=self.URL,\n org=self.ORG,\n ),\n body=body\n )", "def callback_repo_create(self, request, uri, headers, status_code=201):\n # Disabling unused-argument because this is a callback with\n # required method signature.\n # pylint: disable=unused-argument\n self.assertEqual(\n request.headers['Authorization'],\n 'token {0}'.format(self.OAUTH2_TOKEN)\n )\n repo_dict = json.loads(request.body)\n self.assertTrue(\n repo_dict['name'] in [self.TEST_REPO, self.TEST_RERUN_REPO]\n )\n self.assertEqual(repo_dict['description'], self.TEST_DESCRIPTION)\n self.assertEqual(repo_dict['private'], True)\n\n return (status_code, headers, json.dumps({'html_url': 'testing'}))", "def create_repository(cfg):\n if os.path.isdir(cfg[\"repo_dir\"]):\n shutil.rmtree(cfg[\"repo_dir\"], ignore_errors=True)\n return Repo.init(cfg[\"repo_dir\"])", "def gh_repo(token, repo_full_name):\n (owner, repo,) = repo_full_name.split('/')\n\n gh = github3.login(token=token)\n return gh.repository(owner, repo)", "def _create_github_repo(self):\n\n repo_dir = join(self.temp_dir, 'repo')\n subprocess.check_output(['git', 'init', repo_dir])\n\n subprocess.check_output(\n ['git', 'config', 'user.email', os.environ['GIT_EMAIL']],\n cwd=repo_dir\n )\n subprocess.check_output(\n ['git', 'config', 'user.name', os.environ['GIT_NAME']],\n cwd=repo_dir\n )\n\n content = statiki.get_travis_files_content(TEST_REPO, 'BOGUS', {})\n\n for info in content:\n path = join(repo_dir, info['name'])\n with open(path, 'w') as f:\n f.write(info['content'])\n\n subprocess.check_output(['git', 'add', path], cwd=repo_dir)\n subprocess.check_output(\n ['git', 'commit', '-m', '%s' % info['message']], cwd=repo_dir\n )\n\n subprocess.check_output(\n shlex.split('git remote add origin ..'), cwd=repo_dir\n )\n\n return repo_dir", "def another_repo():\n name = REPO_NAME_PREFIX + randstring()\n desc = randstring()\n repo = webapi_b.repos.create_repo(name)\n print('[create another repo] repo_id: %s' % repo.id)\n with only_update_one_repo(repo.id):\n try:\n yield repo\n finally:\n try:\n repo.delete()\n except:\n print(\"another repo is deleted\")", "def create_remote_repo(self, auth_token):\n github = Github(auth_token)\n user = github.get_user()\n try:\n return user.create_repo(self.repo)\n except GithubException as e:\n raise PermissionDenied(\n (e._GithubException__data['message'] +\n e._GithubException__data['errors'][0]['message']))", "def test_repo_config_token() -> None:\n token = \"clksd88sadh4HhJ\" # noqa: S105\n repo = RepositoryConfiguration(\n name=\"pypi\", base_url=\"https://private.repo.org/pypi\", token=token,\n )\n assert repo.get_access_url() == f\"https://{token}@private.repo.org/pypi\"", "def _create_repository(self, github=True, repository_plan='public-org'):\n if github:\n account = HostingServiceAccount(service_name='github',\n username='myuser')\n\n def _http_get_user(_self, url, *args, **kwargs):\n self.assertEqual(url, 'https://api.github.com/user')\n\n payload = b'{}'\n headers = {\n str('X-OAuth-Scopes'): str('admin:repo_hook, repo, user'),\n }\n\n return HostingServiceHTTPResponse(\n request=HostingServiceHTTPRequest(url=url),\n url=url,\n data=payload,\n headers=headers,\n status_code=200)\n\n service = account.service\n self.spy_on(service.client.http_get,\n call_fake=_http_get_user)\n\n service.authorize('myuser', 'mypass', None)\n self.assertTrue(account.is_authorized)\n\n service.client.http_get.unspy()\n\n repository = self.create_repository()\n repository.hosting_account = account\n repository.extra_data['repository_plan'] = repository_plan\n\n if repository_plan == 'public':\n repository.extra_data['github_public_repo_name'] = \\\n 'mypublicrepo'\n elif repository_plan == 'public-org':\n repository.extra_data['github_public_org_name'] = 'mypublicorg'\n repository.extra_data['github_public_org_repo_name'] = \\\n 'mypublicorgrepo'\n elif repository_plan == 'private':\n repository.extra_data['github_private_repo_name'] = \\\n 'myprivaterepo'\n elif repository_plan == 'private-org':\n repository.extra_data['github_private_org_name'] = \\\n 'myprivateorg'\n repository.extra_data['github_private_org_repo_name'] = \\\n 'myprivateorgrepo'\n\n repository.save()\n return repository\n else:\n return self.create_repository()", "def repository_create_hosted():\n pass", "def create_or_update_repository():\n user = get_jwt_identity()\n data = request.get_json()\n if data is None:\n raise ApiException(422, \"No data.\")\n data, errors = __repositorySchema.load(data)\n if errors:\n return jsonify({'error': errors}), 422\n if 'id' not in data or data['id'] is None:\n data['owner_id'] = user['id']\n repository = Repository.create(data)\n app.db.session.add(repository)\n else:\n repository = Repository.query.get_by_id(data['id'], user)\n repository.update(data)\n app.db.session.commit()\n data = __repositorySchema.dump(repository).data\n return jsonify(data)", "def _get_github_cred(github_login=None):\n cred_identity = \"%s@github\" % github_login if github_login else \"github\"\n return Token(cred_identity, GITHUB_TOKENS_URL)", "def repo(self, user, repo):\r\n return repos.Repo(self, user, repo)", "def repo(self, user, repo):\r\n return repositories.Repo(self, user, repo)", "def new(cls, base_repo, semester, section, username):\n return cls.from_url(\"http://mockhub.com/\", \"token\")", "def setup_repo(repo_url, repo_path, repo_push_url=None):\n with setup_repo_context(repo_url, repo_path, repo_push_url):\n pass", "def create_repo_cli(api_client, url, provider, path):\n content = ReposApi(api_client).create(url, provider, path)\n click.echo(pretty_format(content))", "def get_github_credendial(cls) -> 'ApiCredential':\n return cls.select_token_for_api(GITHUB_API_NAME)", "def _get_tmp_repo(self):\n repo_path = os.path.join(TEMP_DIR_ROOT, 'repocopy_' + slugify(self.data['repository']['name']))\n if HARD_COPY and os.path.exists(repo_path):\n shutil.rmtree(repo_path)\n elif os.path.exists(repo_path):\n return git.Repo(repo_path)\n\n os.mkdir(repo_path)\n return git.Repo.init(repo_path)", "def repo_new(request):\n if request.method != 'POST':\n form = RepoForm()\n return respond(request, 'repo_new.html', {'form': form})\n form = RepoForm(request.POST)\n errors = form.errors\n if not errors:\n try:\n repo = models.Repository(\n name=form.cleaned_data.get('name'),\n url=form.cleaned_data.get('url'),\n guid=form.cleaned_data.get('guid'),\n )\n except (db.BadValueError, ValueError) as err:\n errors['__all__'] = unicode(err)\n if errors:\n return respond(request, 'repo_new.html', {'form': form})\n repo.put()\n branch_url = repo.url\n if not branch_url.endswith('/'):\n branch_url += '/'\n branch_url += 'trunk/'\n branch = models.Branch(repo_key=repo.key, repo_name=repo.name,\n category='*trunk*', name='Trunk',\n url=branch_url)\n branch.put()\n return HttpResponseRedirect(reverse(repos))", "async def create_token(self, *args, **kwargs) -> OAuth2Token:\n token = await super().create_token(*args, **kwargs)\n # NOTE: Save data from token to db here.\n return token", "def get_github_auth_token():\n global _cached_github_token\n if _cached_github_token:\n near_expiry, _ = get_github_token_info(_cached_github_token)\n if not near_expiry:\n return _cached_github_token\n\n github_auth_token = os.getenv('GITHUB_AUTH_TOKEN')\n assert github_auth_token, 'GITHUB_AUTH_TOKEN needs to be set.'\n tokens = github_auth_token.split(',')\n wait_time = None\n g = None\n for i, token in enumerate(tokens):\n g = github.Github(token)\n near_expiry, wait_time = get_github_token_info(g)\n if not near_expiry:\n _cached_github_token = g\n return g\n print(f'Rate limit exceeded, sleeping till reset: {wait_time} seconds.',\n file=sys.stderr)\n time.sleep(wait_time)\n return g", "def create_master_token(user, repo, config, name):\n url = \"{}/repos/{}/{}/master_tokens\".format(config['url_base'], user, repo)\n postdata = (\"master_token[name]={}\".format(name))\n\n try:\n resp = (api_call(url, 'post', config['debug'], data=postdata))\n token = resp.json()\n except ValueError as ex:\n abort(\"Unexpected response from packagecloud API: \"\n \"{}\".format(ex.message))\n\n if config['debug']:\n print(\"DEBUG: Token {} created, with value {}\".\n format(token['name'], token['value']))\n\n return token" ]
[ "0.63847035", "0.6254067", "0.62396747", "0.6013436", "0.5991349", "0.5897575", "0.57223994", "0.57146037", "0.5666377", "0.5590212", "0.55577445", "0.554561", "0.55116093", "0.5508899", "0.54985505", "0.54891235", "0.54314077", "0.5375935", "0.53152966", "0.52762866", "0.52419144", "0.5227651", "0.5212132", "0.51991946", "0.51859754", "0.51793396", "0.51514983", "0.51492935", "0.51248455", "0.5124202" ]
0.743131
0
Returns the list of files with their last modification timestamp.
def _get_files_timestamps(self, working_dir: Union[str, os.PathLike]): return {f: os.path.getmtime(os.path.join(working_dir, f)) for f in os.listdir(working_dir)}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_files_list(dirname, date_order, rdate_order):\n file_list = os.listdir(dirname)\n file_mtimes = dict.fromkeys(file_list)\n for f in file_list:\n if f[0] == '.':\n print \"Skipping file: \", f\n del file_mtimes[f]\n continue\n if date_order or rdate_order:\n file_mtimes[f] = os.stat(dirname + '/' + f).st_mtime\n if date_order or rdate_order:\n return sorted(file_mtimes.keys(), key=file_mtimes.get, reverse=rdate_order)\n else:\n return file_list", "def last_log(self) -> List:\n logs_list: List = os.listdir(LOGS_BASE_PATH)\n full_list = [os.path.join(LOGS_BASE_PATH, i) for i in logs_list]\n time_sorted_list: List = sorted(full_list, key=os.path.getmtime)\n return time_sorted_list[-1]", "def retrieve_modified_files(self):\n result = [(diff_obj.a_path, diff_obj.b_path)\n for diff_obj in self.repo.index.diff(None)]\n\n return result", "def _sorted_ls(path):\n def _get_modified_time(f):\n return os.stat(os.path.join(path, f)).st_mtime\n return list(sorted(os.listdir(path), key=_get_modified_time))", "def ListSnapshots(self):\n file_names = sorted(\n [name[:-(len(Archive._SNAP_EXT))] for name in os.listdir(self._path)\n if name.endswith(Archive._SNAP_EXT)])\n timestamps = [datetime.datetime.strptime(x, Archive._TIME_FMT)\n for x in file_names]\n return timestamps", "def LastFiles(self, e=0):\n try:\n with open(r'LastFiles.txt', \"rb\") as f:\n l = f.readlines().strip().split('\\n')\n return l\n except:\n pass", "def last_modified_dts(self):\n return self._last_modified_dts", "def get_list_of_comitted_files():\n files = []\n output = []\n try:\n output = subprocess.check_output(['git','diff-index', '--name-status', '--cached','HEAD']\n ).decode(\"utf-8\")\n except subprocess.CalledProcessError:\n print(\"Error diff files get: trace %s\" % subprocess.CalledProcessError)\n return files\n\n for result in output.split(\"\\n\"):\n logging.info(result)\n if result != '':\n match = modified.match(result)\n if match:\n files.append(match.group('name'))\n\n return files", "def mostRecentlyModified(self,files,n):\n mtime = lambda f: time.strptime(f['modified'], \"%a, %d %b %Y %H:%M:%S +0000\") \n filesSorted = sorted(files,key=mtime,reverse=True) #get descending order\n if len(filesSorted)<= n:\n return filesSorted\n else:\n return filesSorted[:n]", "def get_modified_files(repo, args):\n commit = repo.commit(args.commit)\n return commit.stats.files", "def last_videos_recorded(self) -> list:\n return sorted(glob.glob(VIDEOS_DIR), key=os.path.getmtime)[-20:]", "def get_files_last_modified(self, files_list):\n filepath_hash_set = set()\n ret_dict = {}\n\n for e in files_list:\n repo_id, file_path, file_id = e\n path_hash = calc_file_path_hash(file_path)\n filepath_hash_set.add(path_hash)\n\n m_infos = super(FileLastModifiedInfoManager, self).filter(\n file_path_hash__in=list(filepath_hash_set))\n for f in files_list:\n repo_id, file_path, file_id = f\n for info in m_infos:\n if repo_id == info.repo_id and file_path == info.file_path:\n # Got the record in db\n ret_key = '|'.join(f)\n if file_id != info.file_id:\n # record is outdated, need re-calculate\n info.delete()\n email, last_modified = self._calc_file_last_modified(\n info.repo_id, info.file_path, info.file_path_hash,\n file_id)\n ret_dict[ret_key] = last_modified\n continue\n else:\n # record is valid\n ret_dict[ret_key] = info.last_modified\n continue\n \n # Process the remaining files.\n for f in files_list:\n ret_key = '|'.join(f)\n if ret_dict.has_key(ret_key):\n continue\n\n repo_id, file_path, file_id = f\n path_hash = calc_file_path_hash(file_path)\n email, last_modified = self._calc_file_last_modified(\n repo_id, file_path, path_hash, file_id)\n ret_dict[ret_key] = last_modified\n \n return ret_dict", "def _most_recent_event_files(self):\n regex = re.compile(r\"\\w*events.log\")\n return [\n os.path.join(self._output_dir, x)\n for x in os.listdir(self._output_dir)\n if regex.search(x)\n ]", "def get_file_list_without_current_log():\n full_list = sorted(filter(os.path.isfile, os.listdir('.')), key=os.path.getmtime)\n full_list.remove(\"connect-log.log\")\n return full_list", "def get_recent_files():\n creds = authenticate()\n\n service = build('drive', 'v3', credentials=creds)\n\n # Call the Drive v3 API\n results = service.files().list(\n pageSize=10, fields=\"nextPageToken, files(id, name)\").execute()\n items = results.get('files', [])\n\n return items", "def _get_recent_file_names():\n file_name_tpl = '{}-data.csv'\n date_fmt = '%Y-%m-%d'\n now = datetime.now()\n one_day_ago = now - timedelta(days=1)\n file_names = [\n file_name_tpl.format(one_day_ago.strftime(date_fmt)),\n file_name_tpl.format(now.strftime(date_fmt)),\n ]\n return [os.path.join(DATA_DIR, x) for x in file_names]", "def getRecentFile(*p):\n\tfrom os import stat\n\tfrom os.path import join\n\tfrom glob import glob\n\tresult = \"\"\n\tfiles = glob(join(p[0],*p[1:]))\n\tfor file in files:\n\t\tif result == \"\":\n\t\t\tresult = file\n\t\telse:\n\t\t\tif stat(file).st_mtime > stat(result).st_mtime:\n\t\t\t\tresult = file\n\treturn result", "def _get_newest_filenames(self, image_files):\n if self._previewcache[\"modified\"] is None:\n retval = image_files\n else:\n retval = [fname for fname in image_files\n if os.path.getmtime(fname) > self._previewcache[\"modified\"]]\n if not retval:\n logger.debug(\"No new images in output folder\")\n else:\n self._previewcache[\"modified\"] = max([os.path.getmtime(img) for img in retval])\n logger.debug(\"Number new images: %s, Last Modified: %s\",\n len(retval), self._previewcache[\"modified\"])\n return retval", "def get_recently_modified_scratch_file(settings):\n dir_contents = os.listdir(settings.location)\n full_paths = map(lambda f: os.path.join(settings.location, f), dir_contents)\n files = filter(lambda f: os.path.isfile(str(f)), full_paths)\n if not files:\n return \"\"\n files = sorted(files, key=_get_mtime)\n return files[-1]", "def test_last_files(self):\n glob_manager = GlobManager(['*'])\n self.assertCountEqual(glob_manager.last_files, set())\n\n glob_manager.get_files()\n self.assertCountEqual(\n glob_manager.last_files,\n {\n 'bob.py', 'dave.txt', 'fred.txt.py', 'geoff.py', 'jim.py.txt',\n 'rob.txt'\n })", "def get_file_last_modification_date(filename=None):\n with open(filename, 'r') as fp:\n for line in fp:\n if line.startswith('Modify'):\n date_line = line.split()[1]\n file_date = datetime.strptime(date_line, \"%Y-%m-%d\")\n return filename, file_date", "def list_of(self):\r\n self.files = os.listdir(self.p)\r\n self.size = [0] * len(self.files)\r\n self.created = [0] * len(self.files)\r\n self.modified = [0] * len(self.files)\r\n total_size = 0\r\n iteration = 0\r\n for file in self.files:\r\n self.fol = os.path.join(self.p, file)\r\n self.modified[iteration] = time.ctime(os.path.getmtime(f\"{self.fol}\"))\r\n self.created[iteration] = time.ctime(os.path.getctime(f\"{self.fol}\"))\r\n for path, dirs, files in os.walk(self.fol):\r\n for fol in files:\r\n fpath = os.path.join(path, fol)\r\n total_size += os.path.getsize(fpath)\r\n self.size[iteration] = total_size\r\n iteration += 1\r\n return self.files, self.size, self.created, self.modified", "def get_files(self) -> list:\n files = []\n for file in os.listdir(self.root):\n if file.endswith(f\".{self.suffix}\"):\n files.append(os.path.join(self.root, file))\n return files", "def listFiles(self):\n pass", "def list_files(self):\n ret = []\n for fname in self.files:\n ret.append('filename: %s\\t replica locations: %s' %\n (fname, ','.join(self.files[fname])))\n return ret", "def get_files(self):\n m = []\n for post in self:\n m.append(post.FileName)\n return list(sorted(set(m), reverse=True))", "def get_files(self):\r\n return self._filelist", "def all_changed_files(self):\n return [path_to_file_type(os.path.join(self.path, p)) for p in self.changed_paths() if p]", "def get_mtime(self):\n return max(asset.get_mtime() for asset in self._assets)", "def most_recent_file(folder):\n files = ErrorLog.get_files(folder)\n files_with_mod_dates = [[os.path.abspath(file),\n datetime.datetime.fromtimestamp(os.path.getmtime(file))] # modified date\n for file in files]\n if not files_with_mod_dates:\n return None, None\n most_recent_file = files_with_mod_dates[0][0]\n most_recent_file_date = files_with_mod_dates[0][1]\n for file, mod_date in files_with_mod_dates:\n if mod_date > most_recent_file_date:\n most_recent_file = file\n most_recent_file_date = mod_date\n return most_recent_file, most_recent_file_date" ]
[ "0.73492944", "0.7112882", "0.70843965", "0.7018853", "0.6814776", "0.67374945", "0.67360747", "0.6734983", "0.66534", "0.66412646", "0.6602865", "0.65822446", "0.6576336", "0.65412134", "0.6509738", "0.6460812", "0.63975394", "0.639307", "0.6381997", "0.63674754", "0.6314161", "0.6300249", "0.6263737", "0.6253995", "0.6250782", "0.6241938", "0.6232058", "0.62308836", "0.6207549", "0.61646366" ]
0.7201854
1
Uploads all modified files in `working_dir` to `repo_id`, based on `files_timestamps`.
def _upload_modified_files( self, working_dir: Union[str, os.PathLike], repo_id: str, files_timestamps: Dict[str, float], commit_message: Optional[str] = None, token: Optional[Union[bool, str]] = None, create_pr: bool = False, revision: str = None, ): if commit_message is None: if "Model" in self.__class__.__name__: commit_message = "Upload model" elif "Config" in self.__class__.__name__: commit_message = "Upload config" elif "Tokenizer" in self.__class__.__name__: commit_message = "Upload tokenizer" elif "FeatureExtractor" in self.__class__.__name__: commit_message = "Upload feature extractor" elif "Processor" in self.__class__.__name__: commit_message = "Upload processor" else: commit_message = f"Upload {self.__class__.__name__}" modified_files = [ f for f in os.listdir(working_dir) if f not in files_timestamps or os.path.getmtime(os.path.join(working_dir, f)) > files_timestamps[f] ] # filter for actual files + folders at the root level modified_files = [ f for f in modified_files if os.path.isfile(os.path.join(working_dir, f)) or os.path.isdir(os.path.join(working_dir, f)) ] operations = [] # upload standalone files for file in modified_files: if os.path.isdir(os.path.join(working_dir, file)): # go over individual files of folder for f in os.listdir(os.path.join(working_dir, file)): operations.append( CommitOperationAdd( path_or_fileobj=os.path.join(working_dir, file, f), path_in_repo=os.path.join(file, f) ) ) else: operations.append( CommitOperationAdd(path_or_fileobj=os.path.join(working_dir, file), path_in_repo=file) ) if revision is not None: create_branch(repo_id=repo_id, branch=revision, token=token, exist_ok=True) logger.info(f"Uploading the following files to {repo_id}: {','.join(modified_files)}") return create_commit( repo_id=repo_id, operations=operations, commit_message=commit_message, token=token, create_pr=create_pr, revision=revision, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def upload_release_files():\n version = get_release_version()\n target = sf_files + sourceforge_target_dir(version)\n\n print()\n print(\"Uploading release files...\")\n print(\" Source:\", release_path)\n print(\" Target: \" + target)\n print(\" Files: \" + ', '.join(glob.glob('*')))\n print()\n call_rsync(\n username,\n \"\",\n path.join(release_path, \"*\"),\n target\n )\n print()", "def update_local_repo(case_dict, ignore_logs, ignore_timing):\n # ---------------------------------------------------------------------\n logger.debug(\"update_local_repo\")\n from_dir = case_dict[\"CASEROOT\"]\n to_dir = case_dict[\"archive_temp_dir\"]\n\n compare_dir_trees(from_dir, to_dir, case_dict[\"archive_list\"])\n\n # check if ignore_logs is specified\n if ignore_logs:\n os.chdir(to_dir)\n if os.path.isdir(\"./logs\"):\n try:\n shutil.rmtree(\"./logs\")\n except OSError:\n logger.warning(\n 'in \"update_local_repo\" - Unable to remove \"logs\" in archive dir.'\n )\n\n cmd = [\"svn\", \"delete\", \"./logs\"]\n try:\n subprocess.check_call(cmd)\n except subprocess.CalledProcessError as error:\n msg = _call_template.substitute(\n function=\"update_lcoal_repo\",\n cmd=cmd,\n error=error.returncode,\n strerror=error.output,\n )\n logger.warning(msg)\n raise SVNException(msg)\n\n if os.path.isdir(\"./postprocess/logs\"):\n os.chdir(\"./postprocess\")\n try:\n shutil.rmtree(\"./logs\")\n except OSError:\n logger.warning(\n 'in \"update_local_repo\" - '\n 'Unable to remove \"postprocess/logs\" in archive dir.'\n )\n\n cmd = [\"svn\", \"delete\", \"./logs\"]\n try:\n subprocess.check_call(cmd)\n except subprocess.CalledProcessError as error:\n msg = _call_template.substitute(\n function=\"update_lcoal_repo\",\n cmd=cmd,\n error=error.returncode,\n strerror=error.output,\n )\n logger.warning(msg)\n raise SVNException(msg)\n else:\n # add log files\n if os.path.exists(\"{0}/logs\".format(from_dir)):\n if not os.path.exists(\"{0}/logs\".format(to_dir)):\n os.makedirs(\"{0}/logs\".format(to_dir))\n os.chdir(os.path.join(from_dir, \"logs\"))\n for filename in glob.glob(\"*.*\"):\n update_repo_add_file(\n filename,\n os.path.join(from_dir, \"logs\"),\n os.path.join(to_dir, \"logs\"),\n )\n\n if os.path.exists(\"{0}/postprocess/logs\".format(from_dir)):\n if not os.path.exists(\"{0}/postprocess/logs\".format(to_dir)):\n os.makedirs(\"{0}/postprocess/logs\".format(to_dir))\n os.chdir(os.path.join(from_dir, \"postprocess/logs\"))\n for filename in glob.glob(\"*.*\"):\n update_repo_add_file(\n filename,\n os.path.join(from_dir, \"postprocess\", \"logs\"),\n os.path.join(to_dir, \"postprocess\", \"logs\"),\n )\n\n # check if ignore_timing is specified\n if ignore_timing:\n os.chdir(case_dict[\"archive_temp_dir\"])\n if os.path.isdir(\"./timing\"):\n try:\n shutil.rmtree(\"./timing\")\n except OSError:\n logger.warning(\n 'in \"update_local_repo\" - Unable to remove \"timing\" in archive dir.'\n )\n\n cmd = [\"svn\", \"delete\", \"./timing\"]\n try:\n subprocess.check_call(cmd)\n except subprocess.CalledProcessError as error:\n msg = _call_template.substitute(\n function=\"update_lcoal_repo\",\n cmd=cmd,\n error=error.returncode,\n strerror=error.output,\n )\n logger.warning(msg)\n raise SVNException(msg)\n else:\n # add timing files\n if os.path.exists(\"{0}/timing\".format(from_dir)):\n if not os.path.exists(\"{0}/timing\".format(to_dir)):\n os.makedirs(\"{0}/timing\".format(to_dir))\n os.chdir(os.path.join(from_dir, \"timing\"))\n for filename in glob.glob(\"*.*\"):\n update_repo_add_file(\n filename,\n os.path.join(from_dir, \"timing\"),\n os.path.join(to_dir, \"timing\"),\n )", "def uploadRepoFiles(self, key):\n\n ACCESS_TOKEN = initZenodo(self.hostDefn['localhost']['localSettings']/'zenodoSettings.dat')\n url = f\"https://zenodo.org/api/deposit/depositions/{self.nbDetails[key]['repoInfo']['id']}/files?access_token={ACCESS_TOKEN}\"\n\n outputs = []\n for fileIn in self.nbDetails[key]['repoFiles']:\n # Basic schema... will need to run on remote however.\n\n data = {'name': Path(fileIn).name}\n files = {'file': open(fileIn, 'rb')}\n r = requests.post(url, data=data, files=files)\n\n if r.ok:\n print(f\"File upload OK: {fileIn}\")\n else:\n print(f\"File upload failed: {fileIn}\")\n\n outputs.append([r.ok, r.json()])\n\n self.nbDetails[key]['repoFilesUpload'] = outputs\n\n # return 'Not implemented'", "def localfiles_for_update(self, localfiles, obsfiles):\n upload_local_files = []\n obs_dict = {}\n for key, mtime, size in obsfiles:\n obs_dict[key.strip('/')] = mtime\n\n for localfile in localfiles:\n filepath, key = localfile\n fullkey = key + '/' + os.path.basename(filepath)\n fullkey = fullkey.strip('/')\n if fullkey in obs_dict.keys():\n localfile_timestamp = os.path.getmtime(filepath)\n obsfile_timestamp = time.mktime(time.strptime(obs_dict[fullkey], \"%Y/%m/%d %H:%M:%S\"))\n\n if localfile_timestamp > obsfile_timestamp:\n upload_local_files.append(localfile)\n else:\n upload_local_files.append(localfile)\n return upload_local_files", "def _update_files():\n configuration_settings = get_configuration()\n\n # Need to find all of the files that are stored in the input_files directories in order to start building the\n # reports that will be used to generate the static log files.\n for input_path in configuration_settings.processing.inputs:\n search_path = pathlib.Path(input_path)\n\n # Currently going to make the assumption that everyone is using the path naming convention that I'm dictating\n # which is YYYY/MM/DD/file.ext\n for file_component in search_path.glob('*/*/*/*'):\n # Store all of the files into a dictionary containing the keys and a list of the files that are associated\n # with that day\n updaters.update_files(search_path, file_component)", "def _get_files_timestamps(self, working_dir: Union[str, os.PathLike]):\n return {f: os.path.getmtime(os.path.join(working_dir, f)) for f in os.listdir(working_dir)}", "def upload_files(self, files):\n\n for f in files:\n self.scp.put(f, recursive=True)", "def git_removed_files(self):\n\n etc_tracked = self.repo.tracked_files('etc-tmp')\n for rpath in etc_tracked:\n etc_file = os.path.join(self.root_dir, rpath)\n if not os.path.lexists(etc_file):\n self.etc_commits.removed.rpaths.append(rpath)\n self.etc_commits.removed.commit()\n\n master_tracked = self.repo.tracked_files('master-tmp')\n for rpath in master_tracked:\n etc_file = os.path.join(self.root_dir, rpath)\n if not os.path.lexists(etc_file):\n self.master_commits.removed.rpaths.append(rpath)\n self.master_commits.removed.commit()", "def add_files(self, files, commit_msg):\n paths = []\n for rpath in files:\n path = os.path.join(self.repodir, rpath)\n paths.append(path)\n with open(path, 'w') as f:\n f.write(files[rpath])\n if paths:\n self.git_cmd(['add'] + paths)\n self.commit(commit_msg)", "def updateCache(self):\n for root, dirs, files in os.walk(cachedFilesPath):\n for file in files:\n if file.endswith(cachedFileExtensionSuffix):\n path = os.getcwd()+'/'+cachedFilesPath+file\n with open(path, mode='r') as f:\n payload_json = f.read()\n payload_obj=jsonpickle.decode(payload_json)\n r= self.upload(payload_obj)\n if isinstance(r, types.NoneType):\n #do nothing\n print(\"\")\n else:\n if r.status_code == 200 :\n #uploaded!\n if cacheArhive:\n #move it to archive\n dst=os.getcwd()+'/'+cachedArchivePath+file\n shutil.move(path, dst)\n print(\"archived log: \", file)\n else:\n #delete it\n os.remove(path)", "def get_modified_files(repo, args):\n commit = repo.commit(args.commit)\n return commit.stats.files", "def populate_local_repo(case_dict, ignore_logs, ignore_timing):\n # ---------------------------------------------------------------------\n logger.debug(\"populate_local_repo\")\n os.chdir(case_dict[\"CASEROOT\"])\n\n # loop through the archive_list and copy to the temp archive dir\n for archive in case_dict[\"archive_list\"]:\n if os.path.exists(archive):\n if os.path.isdir(archive):\n try:\n target = case_dict[\"archive_temp_dir\"] + \"/\" + archive\n shutil.copytree(\n archive,\n target,\n symlinks=False,\n ignore=shutil.ignore_patterns(*_ignore_patterns),\n )\n except OSError as error:\n msg = _copy_template.substitute(\n function=\"populate_local_repo\",\n source=archive,\n dest=case_dict[\"archive_temp_dir\"],\n error=error.errno,\n strerror=error.strerror,\n )\n logger.warning(msg)\n else:\n try:\n shutil.copy2(archive, case_dict[\"archive_temp_dir\"])\n except OSError as error:\n msg = _copy_template.substitute(\n function=\"populate_local_repo\",\n source=archive,\n dest=case_dict[\"archive_temp_dir\"],\n error=error.errno,\n strerror=error.strerror,\n )\n logger.warning(msg)\n\n # add files with .xml as the suffix\n xml_files = glob.glob(\"*.xml\")\n for xml_file in xml_files:\n if os.path.isfile(xml_file):\n try:\n shutil.copy2(xml_file, case_dict[\"archive_temp_dir\"])\n except OSError as error:\n msg = _copy_template.substitute(\n function=\"populate_local_repo\",\n source=xml_file,\n dest=case_dict[\"archive_temp_dir\"],\n error=error.errno,\n strerror=error.strerror,\n )\n logger.warning(msg)\n\n # add files with .xml as the suffix from the postprocess directory\n if os.path.isdir(\"./postprocess\"):\n pp_path = \"{0}/{1}\".format(case_dict[\"archive_temp_dir\"], \"postprocess\")\n if not os.path.exists(pp_path):\n os.mkdir(pp_path)\n xml_files = glob.glob(\"./postprocess/*.xml\")\n for xml_file in xml_files:\n if os.path.isfile(xml_file):\n try:\n shutil.copy2(xml_file, pp_path)\n except OSError as error:\n msg = _copy_template.substitute(\n function=\"populate_local_repo\",\n source=xml_file,\n dest=case_dict[\"archive_temp_dir\"],\n error=error.errno,\n strerror=error.strerror,\n )\n logger.warning(msg)\n\n # add files with user_nl_ as the prefix\n user_files = glob.glob(\"user_nl_*\")\n for user_file in user_files:\n if os.path.isfile(user_file):\n try:\n shutil.copy2(user_file, case_dict[\"archive_temp_dir\"])\n except OSError as error:\n msg = _copy_template.substitute(\n function=\"populate_local_repo\",\n source=user_file,\n dest=case_dict[\"archive_temp_dir\"],\n error=error.errno,\n strerror=error.strerror,\n )\n logger.warning(msg)\n\n # add files with Depends as the prefix\n conf_files = glob.glob(\"Depends.*\")\n for conf_file in conf_files:\n if os.path.isfile(conf_file):\n try:\n shutil.copy2(conf_file, case_dict[\"archive_temp_dir\"])\n except OSError as error:\n msg = _copy_template.substitute(\n function=\"populate_local_repo\",\n source=conf_file,\n dest=case_dict[\"archive_temp_dir\"],\n error=error.errno,\n strerror=error.strerror,\n )\n logger.warning(msg)\n\n # check if ignore_logs is specified\n if ignore_logs:\n os.chdir(case_dict[\"archive_temp_dir\"])\n if os.path.isdir(\"./logs\"):\n try:\n shutil.rmtree(\"./logs\")\n except OSError:\n logger.warning(\n 'in \"populate_local_repo\" - Unable to remove \"logs\" in archive_temp_dir.'\n )\n if os.path.isdir(\"./postprocess/logs\"):\n os.chdir(\"./postprocess\")\n try:\n shutil.rmtree(\"./logs\")\n except OSError:\n logger.warning(\n 'in \"populate_local_repo\" - '\n 'Unable to remove \"postprocess/logs\" in archive_temp_dir.'\n )\n os.chdir(case_dict[\"CASEROOT\"])\n\n # check if ignore_timing is specified\n if ignore_timing:\n os.chdir(case_dict[\"archive_temp_dir\"])\n if os.path.isdir(\"./timing\"):\n try:\n shutil.rmtree(\"./timing\")\n except OSError:\n logger.warning(\n 'in \"populate_local_repo\" - Unable to remove \"timing\" in archive_temp_dir.'\n )\n os.chdir(case_dict[\"CASEROOT\"])", "def upload_handler(self):\n \n for root, dirs, files in os.walk(self.path):\n\n current_dir = os.path.basename(root)\n \n if root == self.path:\n root_id = self.gapy.create_file(current_dir, path=root, isFolder=True)\n else:\n parents_id = self.filesystem[os.path.dirname(root)][\"id\"]\n root_id = self.gapy.create_file(current_dir, path=root, isFolder=True, parents_id=[parents_id])\n print(f\"\\033[94m The directory {current_dir} was uploaded \\033[0m\")\n\n self.filesystem[root.rstrip(\"/\")] = { \"id\": root_id, \"files\": [] }\n \n if files:\n for f in files:\n if f not in IGNORE_FILES and os.path.getsize(root+\"/\"+f) > 0:\n file_id = self.gapy.create_file(f, path=root, parents_id=[root_id])\n self.filesystem[root][\"files\"].append({ \"name\": f, \"id\": file_id})\n print(f\"\\033[94m The file {f} was uploaded \\033[0m\")\n \n self.update_fs()", "def upload_files(metadata, dir, rclone_path, remote_name):\n\n images = []\n today = datetime.datetime.today().strftime('%Y-%m-%d')\n\n # send images to the Drive\n for image in metadata:\n # prevent sending the same image twice (if two faces are detected)\n if image not in images:\n images.append(image)\n image_path = os.path.join(dir, image['image_name'])\n subprocess.run([rclone_path, 'copy', image_path, '{}:{}'.format(remote_name, today)])\n\n # upload metadata.json to the Drive\n subprocess.run([rclone_path, 'copy', METADATA_FILE, '{}:{}'.format(remote_name, today)])\n os.remove(METADATA_FILE)", "def sync_dir(self):\n\n # mark the trajectories that we have seen\n trajectories = os.listdir(self.trajectory_dir)\n \n for trajectory_file in trajectories:\n\n if trajectory_file not in self.seen_trajectories:\n\n created = self.upload_trajectory(trajectory_file)\n self.seen_trajectories.add(trajectory_file)\n\n if created is True:\n print \"Total of %s solved trajectories\" % \\\n SolvedTrajectory.objects.count(), created", "def upload_directory(self, directory_path, do_timestamps=True, pkey=1):\n\n # The data_dict stores all the information on uploaded files\n # and there respective structures.\n # TODO: Think of a better name for the data_dict.\n files_to_upload = glob.glob(directory_path + \"*\")\n timestamp = self.create_timestamps()\n # For all files in directory.\n for pathname in files_to_upload:\n filename = os.path.basename(pathname)\n\n # Split the '.'s as well cause they are file extensions.\n # We just want to get the first two parts of the name.\n splitname = filename.replace(\".\", \"_\").split(\"_\")\n data_name = splitname[0] + \"_\" + splitname[1]\n module_supplier_id = splitname[2]\n\n assert len(splitname) == 4,\\\n \"Bad file name in folder: {filename}\".format(filename=filename)\n assert data_name in self.types.keys(),\\\n (\"File type {filetype} does not have proper type format. \"\n \"Are you sure you spelt it right?\").format(\n filetype=data_name\n )\n\n # Timestamp files if nessecary.\n if do_timestamps:\n upload_filename = timestamp + filename\n\n\n # Create the file upload and get ID.\n up_id = self.create_file_upload(\n upload_filename,\n self.pub_user,\n module_supplier_id\n )\n assert type(up_id) == int,\\\n \"Bad upload ID response: Not an integer!\"\n\n # Create the file download and get ID.\n down_id = self.create_file_download(\n upload_filename,\n self.pub_user,\n module_supplier_id\n )\n assert type(down_id) == int,\\\n \"Bad download ID response: Not an integer!\"\n\n # Actually upload the file to the server.\n self.upload_file(\n pathname,\n upload_filename\n )\n\n # Save the data for later use.\n # Update data_dict.\n self.data_dict[data_name] = {\n 'filepath': pathname,\n 'upload_name': upload_filename,\n 'upload_id': up_id,\n 'download_id': down_id,\n 'module_supplier_id': module_supplier_id,\n }\n\n print(\"Uploaded directory\")", "def move_files(proj_id):\n project_obj = Project.objects.get(id=proj_id)\n data_files = project_obj.files.all()\n\n for data in data_files:\n working_dir = get_sequencedir(project_obj)\n create_dir(working_dir)\n path = data.file.name.split('/')[-1]\n end_path = os.path.join(working_dir, path)\n\n if file_exists(end_path):\n print(\"File: \", end_path, \" already found. No need to copy.\")\n else:\n try:\n print(\"Copying from %s to %s\" % (data.file.name, end_path))\n shutil.copyfile(data.file.name, end_path)\n # if somehow the user deleted the database files, they are told to restart the database\n except FileNotFoundError:\n print(\"Protected database files have been deleted by the user. Restart the database to continue.\")", "def _test_upload_dir_contents(self, filenames):\n local_src_dir = self._local_tempdir\n remote_dest_dir = 'remote_dest_dir'\n for filename in filenames:\n self._expected_commands.append('%s cp -a public %s %s' % (\n GSUTIL_LOCATION,\n os.path.join(local_src_dir, filename),\n posixpath.join(remote_dest_dir, filename)))\n with open(os.path.join(local_src_dir, filename), 'w'):\n pass\n gs_utils.upload_dir_contents(\n local_src_dir=local_src_dir, remote_dest_dir=remote_dest_dir,\n gs_acl='public')", "def rsync_files(*paths, **kwargs):\n user = whoami()\n local_path = path.join(LOCAL_FILES_DIR, *paths)\n remote_path = posixpath.join(\"/\", *paths)\n remote_temp_path = posixpath.join(STAGE_DIR, *paths)\n mkdir_for(remote_temp_path, \"%s:root\" % user)\n rsync_project(remote_temp_path, path.join(local_path, \"*\"), delete=True)\n sudo(\"mkdir -p %s && rsync -r --delete %s %s\" % (remote_path, posixpath.join(remote_temp_path, \"*\"), remote_path))\n # sudo(\"rm -r %s\" % (remote_temp_path))", "def git_callback(files): # type: (t.List[t.Tuple[str, str]]) -> None\n for dirpath, _dirnames, filenames in os.walk(os.path.join(data_context().content.root, '.git')):\n paths = [os.path.join(dirpath, filename) for filename in filenames]\n files.extend((path, os.path.relpath(path, data_context().content.root)) for path in paths)", "def submitUploads(self, local = False):\n\n # Set and upload files to repo using uploadRepoFiles()\n if local:\n for key in self.nbDetails:\n # Skip metadata key if present\n if key!='proc' and self.nbDetails[key]['pkg'] and self.nbDetails[key]['archFilesOK']:\n self.uploadRepoFiles(key)\n\n else:\n # Upload on remote machine.\n ACCESS_TOKEN = initZenodo(self.hostDefn['localhost']['localSettings']/'zenodoSettings.dat')\n with self.c.prefix(f\"source {self.hostDefn[self.host]['condaPath']} {self.hostDefn[self.host]['condaEnv']}\"):\n result = self.c.run(f\"{Path(self.hostDefn[self.host]['repoScpPath'], self.scpDefnRepo['uploadNohup']).as_posix()} \\\n {Path(self.hostDefn[self.host]['repoScpPath'], self.scpDefnRepo['upload']).as_posix()} \\\n {self.hostDefn[self.host]['nbProcDir']/self.jsonProcFile.name} {ACCESS_TOKEN}\",\n warn = True, timeout = 10)\n\n print(f\"Log file set: {self.hostDefn[self.host]['nbProcDir']/self.jsonProcFile.name}\")\n # Remote upload set to run via nohup... will need to pull logs later.\n\n # Publish\n\n # return 'Not implemented'", "def on_modified(self, event):\n \n if not event.is_directory: \n\n file_name = os.path.basename(event.src_path)\n \n if file_name not in self.ignore_files:\n parent = os.path.dirname(event.src_path)\n file_id = list(filter(lambda f: f[\"name\"] == file_name, self.filesystem[parent][\"files\"]))[0][\"id\"]\n self.gapy.update_file(file_id, path=parent)\n self.gapy.logger.info(\"The file {} was modified, the content was updated\".format(file_name, parent))\n print(f\"\\nThe file {file_name} was modified and synchronized\")", "def UploadBaseFiles(self, issue, rpc_server, patch_list, patchset, options,\n files):\n\n def UploadFile(filename, file_id, content, is_binary, status, is_base):\n \"\"\"Uploads a file to the server.\"\"\"\n file_too_large = False\n if is_base:\n type = \"base\"\n else:\n type = \"current\"\n if len(content) > MAX_UPLOAD_SIZE:\n print (\"Not uploading the %s file for %s because it's too large.\" %\n (type, filename))\n file_too_large = True\n content = \"\"\n checksum = md5(content).hexdigest()\n if options.verbose > 0 and not file_too_large:\n print \"Uploading %s file for %s\" % (type, filename)\n url = \"/%d/upload_content/%d/%d\" % (int(issue), int(patchset), file_id)\n form_fields = [(\"filename\", filename),\n (\"status\", status),\n (\"checksum\", checksum),\n (\"is_binary\", str(is_binary)),\n (\"is_current\", str(not is_base)),\n ]\n if file_too_large:\n form_fields.append((\"file_too_large\", \"1\"))\n if options.email:\n form_fields.append((\"user\", options.email))\n ctype, body = EncodeMultipartFormData(form_fields,\n [(\"data\", filename, content)])\n response_body = rpc_server.Send(url, body,\n content_type=ctype)\n if not response_body.startswith(\"OK\"):\n StatusUpdate(\" --> %s\" % response_body)\n sys.exit(1)\n\n patches = dict()\n [patches.setdefault(v, k) for k, v in patch_list]\n for filename in patches.keys():\n base_content, new_content, is_binary, status = files[filename]\n file_id_str = patches.get(filename)\n if file_id_str.find(\"nobase\") != -1:\n base_content = None\n file_id_str = file_id_str[file_id_str.rfind(\"_\") + 1:]\n file_id = int(file_id_str)\n if base_content != None:\n UploadFile(filename, file_id, base_content, is_binary, status, True)\n if new_content != None:\n UploadFile(filename, file_id, new_content, is_binary, status, False)", "def uploaded(event, context):\n dt = datetime.utcnow()\n # NOTE: the event might include multiple records\n for r in event['Records']:\n file_id = r['s3']['object']['key']\n file = {\n 'id': file_id,\n 'size': r['s3']['object']['size'],\n 'type': get_s3_file_type(file_id),\n 'uploaded_at': dt,\n }\n if runtime_context.STORE:\n file['stored_at'] = dt\n FileModel.update(file)\n LOGGER.debug('Files item updated (uploaded). service=ddb method=update_item id={}'.format(file_id))\n return {\n \"statusCode\": 200\n }", "def UploadBaseFiles(self, issue, rpc_server, patch_list, patchset, options,\r\n files):\r\n\r\n def UploadFile(filename, file_id, content, is_binary, status, is_base):\r\n \"\"\"Uploads a file to the server.\"\"\"\r\n file_too_large = False\r\n if is_base:\r\n type = \"base\"\r\n else:\r\n type = \"current\"\r\n if len(content) > MAX_UPLOAD_SIZE:\r\n print (\"Not uploading the %s file for %s because it's too large.\" %\r\n (type, filename))\r\n file_too_large = True\r\n content = \"\"\r\n checksum = md5(content).hexdigest()\r\n if options.verbose > 0 and not file_too_large:\r\n print \"Uploading %s file for %s\" % (type, filename)\r\n url = \"/%d/upload_content/%d/%d\" % (int(issue), int(patchset), file_id)\r\n form_fields = [(\"filename\", filename),\r\n (\"status\", status),\r\n (\"checksum\", checksum),\r\n (\"is_binary\", str(is_binary)),\r\n (\"is_current\", str(not is_base)),\r\n ]\r\n if file_too_large:\r\n form_fields.append((\"file_too_large\", \"1\"))\r\n if options.email:\r\n form_fields.append((\"user\", options.email))\r\n ctype, body = EncodeMultipartFormData(form_fields,\r\n [(\"data\", filename, content)])\r\n response_body = rpc_server.Send(url, body,\r\n content_type=ctype)\r\n if not response_body.startswith(\"OK\"):\r\n StatusUpdate(\" --> %s\" % response_body)\r\n sys.exit(1)\r\n\r\n patches = dict()\r\n [patches.setdefault(v, k) for k, v in patch_list]\r\n for filename in patches.keys():\r\n base_content, new_content, is_binary, status = files[filename]\r\n file_id_str = patches.get(filename)\r\n if file_id_str.find(\"nobase\") != -1:\r\n base_content = None\r\n file_id_str = file_id_str[file_id_str.rfind(\"_\") + 1:]\r\n file_id = int(file_id_str)\r\n if base_content != None:\r\n UploadFile(filename, file_id, base_content, is_binary, status, True)\r\n if new_content != None:\r\n UploadFile(filename, file_id, new_content, is_binary, status, False)", "def push_backup(args: Arguments) -> None:\n\n files = get_files_from_previous_backup(args.site)\n bucket = get_bucket(args)\n\n for path in files:\n upload_file(\n path=path,\n site_name=args.site,\n bucket=bucket,\n bucket_directory=args.bucket_directory,\n )\n\n print(\"Done!\")", "def do_merge_all():\n for rawd, merged in TOMERGE:\n mylogger.info(\"cleaning \" + merged)\n ensure_dir(merged)\n cleandir(merged)\n mylogger.info(\"merging \" + rawd + \" to \" + merged)\n build_merged_dir(build_sensor_file_map(rawd), merged)\n\n # add timestamp file\n\tf = open(TIMESTAMP_FILE,\"w\")\n\tf.write(str(datetime.datetime.now()))\n\tf.close()", "def post_on_github(params, logger=None):\r\n\r\n try:\r\n logger.debug(\"[*] Trying to upload file(s) {f} in {r} for handle {h}\".format(\r\n f=params[\"to_be_uploaded_file_list\"], r=params[\"repo\"], h=params[\"user\"]))\r\n\r\n g = Github(params[\"user\"], params[\"password\"])\r\n\r\n repo = g.get_user().get_repo(params[\"repo\"])\r\n file_list = params[\"to_be_uploaded_file_list\"]\r\n\r\n file_names = [x.rsplit(\"/\", 1)[1] for x in file_list]\r\n if params[\"commit_message\"] is None:\r\n commit_message = 'KML-file update {}'.format(\r\n randint(0, 100) * randint(0, 100) / randint(1, 100))\r\n else:\r\n commit_message = params[\"commit_message\"]\r\n\r\n master_ref = repo.get_git_ref('heads/' + str(params[\"branch\"]))\r\n master_sha = master_ref.object.sha\r\n base_tree = repo.get_git_tree(master_sha)\r\n\r\n element_list = list()\r\n for i, entry in enumerate(file_list):\r\n with open(entry) as input_file:\r\n data = input_file.read()\r\n element = InputGitTreeElement(\r\n file_names[i], '100644', 'blob', data)\r\n element_list.append(element)\r\n\r\n tree = repo.create_git_tree(element_list, base_tree)\r\n parent = repo.get_git_commit(master_sha)\r\n\r\n commit = repo.create_git_commit(commit_message, tree, [parent])\r\n master_ref.edit(commit.sha)\r\n except Exception as e:\r\n logger.critical(\"Exception: {}\".format(e))\r\n return False\r\n\r\n logger.info(\"[*] Uploading successful!\")\r\n return True", "def upload(all_files, session):\n remote_directory = unique_path('cli-import')\n log.info('uploading files to %s' % remote_directory)\n\n for filename in all_files:\n callback = _progress_callback\n log.info(\"Uploading %s\" % filename)\n session.uploadWrapper(filename, remote_directory, callback=callback)\n if callback:\n print('')\n return remote_directory", "def upload():\n run('mkdir -p /srv/images/'+env.project_name+'/')\n rsync_project(\n env.project_dir, './',\n exclude=(\n '.git', '.gitignore', '__pycache__', '*.pyc', '.DS_Store', 'environment.yml',\n 'fabfile.py', 'Makefile', '.idea', 'bower_components', 'node_modules',\n '.env.example', 'README.md', 'var'\n ), delete=True)" ]
[ "0.6326092", "0.59037274", "0.5897512", "0.58474", "0.5663813", "0.56284285", "0.5628149", "0.5566654", "0.5565086", "0.5512359", "0.54885167", "0.5485811", "0.5406561", "0.5382631", "0.53670764", "0.5350603", "0.53416187", "0.5329326", "0.53228813", "0.529926", "0.5288088", "0.52562577", "0.5250996", "0.524343", "0.5229408", "0.5211464", "0.51933306", "0.51826686", "0.5161649", "0.515554" ]
0.8192738
0
Sends telemetry that helps tracking the examples use.
def send_example_telemetry(example_name, *example_args, framework="pytorch"): if is_offline_mode(): return data = {"example": example_name, "framework": framework} for args in example_args: args_as_dict = {k: v for k, v in args.__dict__.items() if not k.startswith("_") and v is not None} if "model_name_or_path" in args_as_dict: model_name = args_as_dict["model_name_or_path"] # Filter out local paths if not os.path.isdir(model_name): data["model_name"] = args_as_dict["model_name_or_path"] if "dataset_name" in args_as_dict: data["dataset_name"] = args_as_dict["dataset_name"] elif "task_name" in args_as_dict: # Extract script name from the example_name script_name = example_name.replace("tf_", "").replace("flax_", "").replace("run_", "") script_name = script_name.replace("_no_trainer", "") data["dataset_name"] = f"{script_name}-{args_as_dict['task_name']}" headers = {"user-agent": http_user_agent(data)} try: r = requests.head(HUGGINGFACE_CO_EXAMPLES_TELEMETRY, headers=headers) r.raise_for_status() except Exception: # We don't want to error in case of connection errors of any kind. pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def test_all_samples(self):\n response = await self.collect(get_request_json_return_value=self.JMETER_JSON)\n self.assert_measurement(response, value=\"248\", entities=[])", "def test_example_runs(self):\n run_example(\n verbose=False,\n testapp=self.testapp,\n )", "def on_test_begin(self, logs=None):", "def setup(bot: Bot) -> None:\n bot.add_cog(Latency(bot))", "def test_basic_asgi_call(self):\n app = otel_asgi.OpenTelemetryMiddleware(simple_asgi)\n self.seed_app(app)\n self.send_default_request()\n outputs = self.get_all_output()\n self.validate_outputs(outputs)", "def test_basic_asgi_call(self):\n app = otel_asgi.OpenTelemetryMiddleware(simple_asgi)\n self.seed_app(app)\n self.send_default_request()\n outputs = self.get_all_output()\n self.validate_outputs(outputs)", "def test_send(self):\n # Required to get useful test names\n super(TestCisPlyOutput_local, self).test_send()", "def test_examples(self) -> types.Channel:\n\n return self._example_gen.outputs.examples", "def handleTelemetry(self):\n\t\tprint(\"*****************handleTelemetry\")\n\t\tself.cpuUtilPct = self.cpuUtilTask.getTelemetryValue() # Get CPU usage performance\n\t\tself.memUtilPct = self.memUtilTask.getTelemetryValue() # Get Memory usage performance\n\t\tsysData = SystemPerformanceData()\n\t\tsysData.setCpuUtilization(self.cpuUtilPct)\n\t\tsysData.setMemoryUtilization(self.memUtilPct)\n\t\tself.dataMessageListener.handleSystemPerformanceMessage(sysData)\n\t\tlogging.info('CPU utilization is %s percent, and memory utilization is %s percent.', str(self.cpuUtilPct), str(self.memUtilPct))\n\t\t# Log out the usage performance", "def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=False) # set agent to track\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.0) # reduce update_delay to speed up simulation\n sim.run(n_trials=num_of_experiments) # press Esc or close pygame window to quit\n \n pd.Series(a.success).to_pickle('success_' + exp_id + '.pickle')\n a.Q_table.to_pickle('qtable_' + exp_id + '.pickle')\n pd.Series(a.q_delta_avg).to_pickle('convergence_' + exp_id + '.pickle')\n pd.Series(a.t_total).to_pickle('steps_' + exp_id + '.pickle')", "def test_telemetry_logger(caplog):\n telemetry_logger = utils.TelemetryLogger()\n assert (\n telemetry_logger.instrumentation_key == \"aaefce9e-d109-4fac-bb9f-8277c68e91ac\"\n )\n assert telemetry_logger.enable_telemetry\n\n telemetry_logger = utils.TelemetryLogger(enable_telemetry=False)\n assert not telemetry_logger.enable_telemetry\n with caplog.at_level(\"INFO\"):\n telemetry_logger.log_trace(message=\"A unit test message. Please ignore it.\")\n assert (\n \"Sending trace log messages to application insight has been disabled.\"\n in caplog.text\n )\n\n telemetry_logger = utils.TelemetryLogger(enable_telemetry=True)\n telemetry_logger.log_trace(\n message=\"A unit test message of shrike.build. Please ignore it.\",\n level=logging.INFO,\n )", "def send(event):\r\n dog_stats_api.increment('track.send.count')\r\n\r\n for name, backend in backends.iteritems():\r\n with dog_stats_api.timer('track.send.backend.{0}'.format(name)):\r\n backend.send(event)", "def test_otoroshi_controllers_adminapi_analytics_controller_global_stats(self):\n pass", "def upload_telemetry_data():\n gen_log.info(\"upload_telemetry_data\")\n try:\n data=[{attr:random.randint(20,100)} for attr in attributes if attr!='temperature']\n # data={\"temperature\":random.randint(20,100), \n # \"humidity\":random.randint(20,100),\n # \"other\":random.randint(-60,60),\n # \"active\": False}\n url=url_prefix+'/v1/{}/telemetry'.format(token)\n res=yield AsyncHTTPClient().fetch(url,method='POST',body=json.dumps(data),headers=headers)\n #res=requests.post(url,data=json.dumps(data),headers=headers)\n gen_log.info(res.code)\n except Exception as ex:\n gen_log.error(ex)\n IOLoop.current().add_timeout(time.time()+1,upload_telemetry_data)", "def test_Demo(self):\n self._run(self._example_scenarios, \"Demo\")", "def test_autosample(self):\n \n # Start data subscribers.\n #self._start_data_subscribers(6)\n #self.addCleanup(self._stop_data_subscribers) \n \n # Set up a subscriber to collect error events.\n #self._start_event_subscriber('ResourceAgentResourceStateEvent', 7)\n #self.addCleanup(self._stop_event_subscriber) \n \n state = self._ia_client.get_agent_state()\n self.assertEqual(state, ResourceAgentState.UNINITIALIZED)\n \n cmd = AgentCommand(command=ResourceAgentEvent.INITIALIZE)\n retval = self._ia_client.execute_agent(cmd)\n state = self._ia_client.get_agent_state()\n self.assertEqual(state, ResourceAgentState.INACTIVE)\n\n cmd = AgentCommand(command=ResourceAgentEvent.GO_ACTIVE)\n retval = self._ia_client.execute_agent(cmd)\n state = self._ia_client.get_agent_state()\n self.assertEqual(state, ResourceAgentState.IDLE)\n\n cmd = AgentCommand(command=ResourceAgentEvent.RUN)\n retval = self._ia_client.execute_agent(cmd)\n state = self._ia_client.get_agent_state()\n self.assertEqual(state, ResourceAgentState.COMMAND)\n\n cmd = AgentCommand(command=SBE37ProtocolEvent.START_AUTOSAMPLE)\n retval = self._ia_client.execute_resource(cmd)\n \n gevent.sleep(15)\n \n cmd = AgentCommand(command=SBE37ProtocolEvent.STOP_AUTOSAMPLE)\n retval = self._ia_client.execute_resource(cmd)\n \n cmd = AgentCommand(command=ResourceAgentEvent.RESET)\n retval = self._ia_client.execute_agent(cmd)\n state = self._ia_client.get_agent_state()\n self.assertEqual(state, ResourceAgentState.UNINITIALIZED)\n\n #self._async_event_result.get(timeout=CFG.endpoint.receive.timeout)\n #self.assertGreaterEqual(len(self._events_received), 6)\n\n #self._async_sample_result.get(timeout=CFG.endpoint.receive.timeout)\n #self.assertGreaterEqual(len(self._samples_received), 6)", "def runAnalytics():\n #gets OAuth from the API\n analytics = get_Analytics_service()\n #get the object return from the API\n #send that object to print out useful fields\n response = get_report(analytics)\n print_response(response)", "async def test_successful_samples(self):\n self.set_source_parameter(\"test_result\", [\"success\"])\n response = await self.collect(get_request_json_return_value=self.JMETER_JSON)\n self.assert_measurement(response, value=\"242\", entities=[])", "def send_log():\n log.info(f\"UUID={UUID}\")\n log.info(f\"SPLIT={SPLIT}\")\n log.info(f\"BATCH_SIZE={BATCH_SIZE}\")\n log.info(f\"EPOCHS={EPOCHS}\")\n log.info(f\"PATIENCE={PATIENCE}\")\n log.info(f\"X_FREQ={X_FREQ}\")\n log.info(f\"LOOK_BACK={LOOK_BACK}\")\n log.info(f\"LOOK_AHEAD={LOOK_AHEAD}\")\n log.info(f\"KERNEL_SIZE={KERNEL_SIZE}\")\n log.info(f\"FILTERS={FILTERS}\")\n log.info(f\"L1L2={L1L2}\")\n log.info(f\"D1={D1}\")\n log.info(f\"D2={D2}\")\n log.info(f\"DOUT={DOUT}\")\n log.info(f\"PLOT={PLOT}\")\n log.info(f\"SHUFFLE={SHUFFLE}\")", "def sendMeasurement(self, metric, value, source, timestamp=None):\n sys.stdout.write('{0} {1} {2} {3}\\n'.format(metric, value, source, timestamp).decode('utf-8'))\n sys.stdout.flush()", "def __send_all(self):\n\n offset = self.app_id * 10\n\n print(\"Start run {} - {} @ {} with {} tests\".format(self.app_id,\n self.test_run.name,\n self.test_run.date,\n self.test_run.total))\n\n status_dict = {}\n # Test run name\n status_dict[offset + self.PIN_NAME] = self.test_run.name\n # Test run start datetime\n status_dict[offset + self.PIN_DATE] = self.test_run.date\n # Test run advance status string\n status_dict[offset + self.PIN_STATUS_TEXT] = \"{}/{}\".format(self.test_run.actual,\n self.test_run.total)\n # Test run advance status percent\n percent = self.test_run.actual / self.test_run.total * 100\n status_dict[offset + self.PIN_STATUS_GRAPH] = percent\n # Test run result type numbers\n status_dict[offset + self.PIN_TYPES] = \"S{} F{} B{}\".format(self.test_run.succeed,\n self.test_run.failed,\n self.test_run.blocked)\n # Test run led TODO manage color\n status_dict[offset + self.PIN_LED] = 255\n\n self.post_dict(status_dict)", "def get_telemetry ():\n telemetry = OrderedDict()\n\n telemetry[\"ip_addr\"] = socket.gethostbyname(socket.gethostname())\n\n telemetry[\"mem_free\"] = psutil.virtual_memory().free\n\n telemetry[\"cpu_num\"] = psutil.NUM_CPUS\n\n x = psutil.cpu_times()\n telemetry[\"cpu_times\"] = OrderedDict([ (\"user\", x.user), (\"system\", x.system), (\"idle\", x.idle) ])\n\n x = psutil.disk_usage(\"/tmp\")\n telemetry[\"disk_usage\"] = OrderedDict([ (\"free\", x.free), (\"percent\", x.percent) ])\n\n x = psutil.disk_io_counters()\n telemetry[\"disk_io\"] = OrderedDict([ (\"read_count\", x.read_count), (\"write_count\", x.write_count), (\"read_bytes\", x.read_bytes), (\"write_bytes\", x.write_bytes), (\"read_time\", x.read_time), (\"write_time\", x.write_time) ])\n\n x = psutil.network_io_counters()\n telemetry[\"network_io\"] = OrderedDict([ (\"bytes_sent\", x.bytes_sent), (\"bytes_recv\", x.bytes_recv), (\"packets_sent\", x.packets_sent), (\"packets_recv\", x.packets_recv), (\"errin\", x.errin), (\"errout\", x.errout), (\"dropin\", x.dropin), (\"dropout\", x.dropout) ])\n\n return telemetry", "def train():\n import trace\n trace.train()", "def run_experiment():\n pass", "def log_event(event):\r\n tracker.send(event)", "def test_send(self):\n # Required to get useful test names\n super(TestCisObjOutput_local, self).test_send()", "async def module_send_event(self, event: str, ctx, *args, **kwargs):\n self.logger.debug(f\"Sending event '{event}', {ctx=}, {args=}, {kwargs=}\")\n for module in self._features.values():\n method = getattr(module.handle, f\"module_on_{event}\", None)\n if callable(method):\n await method(ctx, *args, **kwargs)", "def sample(self, context: Context) -> T:\n ...", "def send(self, count: int):\n return self.analytics.send(self.anal_name, count)", "def test_gentest(self):\n test = databench.testing.AnalysisTest(Yodler)\n yield test.trigger('echo', ['light', 'red'])\n self.assertIn(('data', {'light': 'red'}), test.emitted_messages)" ]
[ "0.5905889", "0.58216053", "0.5645284", "0.5576144", "0.5565876", "0.5565876", "0.55629486", "0.555112", "0.55271477", "0.5478809", "0.54242194", "0.5422521", "0.54215646", "0.5384052", "0.5356486", "0.5323799", "0.5301311", "0.529773", "0.5288258", "0.5286861", "0.5278613", "0.52646774", "0.5238881", "0.5191506", "0.5187488", "0.51866156", "0.5145957", "0.5143472", "0.51333326", "0.5132768" ]
0.71745557
0
Converts a size expressed as a string with digits an unit (like `"5MB"`) to an integer (in bytes).
def convert_file_size_to_int(size: Union[int, str]): if isinstance(size, int): return size if size.upper().endswith("GIB"): return int(size[:-3]) * (2**30) if size.upper().endswith("MIB"): return int(size[:-3]) * (2**20) if size.upper().endswith("KIB"): return int(size[:-3]) * (2**10) if size.upper().endswith("GB"): int_size = int(size[:-2]) * (10**9) return int_size // 8 if size.endswith("b") else int_size if size.upper().endswith("MB"): int_size = int(size[:-2]) * (10**6) return int_size // 8 if size.endswith("b") else int_size if size.upper().endswith("KB"): int_size = int(size[:-2]) * (10**3) return int_size // 8 if size.endswith("b") else int_size raise ValueError("`size` is not in a valid format. Use an integer followed by the unit, e.g., '5GB'.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_bytes_size(size_str):\n\n m = BYTES_REGEX.fullmatch(size_str.lower())\n if m:\n number = int(m.group(1))\n\n if m.group(2) is not None:\n unit = m.group(2)\n conversion = SIZE_UNITS.get(unit)\n if conversion:\n return conversion * number\n return number\n else:\n raise ValueError(\"Invalid size string: {}\".format(size_str))", "def anySizeToBytes(size_string):\n # separate integer from unit\n try:\n size, unit = size_string.split()\n except Exception:\n try:\n size = size_string.strip()\n unit = ''.join([c for c in size if c.isalpha()])\n if len(unit) > 0:\n size = size[:-len(unit)]\n except Exception:\n return -1\n if len(size) == 0:\n return -1\n size = float(size)\n if len(unit) == 0:\n return int(size)\n short_unit = unit.upper()[0]\n\n # convert\n units_dict = {'T': 40, 'G': 30, 'M': 20, 'K': 10}\n if short_unit in units_dict:\n size = size * 2**units_dict[short_unit]\n return int(size)", "def parse_size(size_str):\n try:\n return int(size_str)\n except ValueError, e:\n pass\n\n try:\n num = int(size_str[:-1])\n except ValueError, e:\n raise VMBuilderUserError(\"Invalid size: %s\" % size_str)\n\n if size_str[-1:] == 'g' or size_str[-1:] == 'G':\n return num * 1024\n if size_str[-1:] == 'm' or size_str[-1:] == 'M':\n return num\n if size_str[-1:] == 'k' or size_str[-1:] == 'K':\n return num / 1024", "def size_human2byte(s_str):#{{{\n s_byte = None\n if s_str.isdigit():\n s_byte = int(s_str)\n else:\n s_str = s_str.upper()\n match = re.match(r\"([0-9]+)([A-Z]+)\", s_str, re.I)\n if match:\n items = match.groups()\n size = int(items[0])\n if items[1] in [\"B\"]:\n s_byte = size\n elif items[1] in [\"K\", \"KB\"]:\n s_byte = size*1024\n elif items[1] in [\"M\", \"MB\"]:\n s_byte = size*1024*1024\n elif items[1] in [\"G\", \"GB\"]:\n s_byte = size*1024*1024*1024\n else:\n print(\"Bad maxsize argument:\", s_str, file=sys.stderr)\n return -1\n else:\n print(\"Bad maxsize argument:\", s_str, file=sys.stderr)\n return -1\n return s_byte", "def _size_to_bytes(size):\n\tunits = 'KMGTPEZY' # note that position of letter is same as power - 1\n\tmatch = re.search(r'^\\s*([-+]?\\s*[0-9]*\\.?[0-9]*)\\s*([' + units + r']?\\s*B?\\s*S?)\\s*', size, re.IGNORECASE)\n\tif match is None or match.group(1) == '':\n\t\traise ValueError(\"size string not in proper format 'number [kmgtpezy]': \" + size)\n\tmem_size = float(re.sub(r'\\s*', '', match.group(1)))\n\tunit = re.sub(r'\\s*', '', match.group(2)).upper()\n\tunit = re.sub(r'B?S?$', '', unit) # remove trailing units symbol\n\tif unit == '':\n\t\tunit_pow = 0\n\telse:\n\t\tunit_pow = units.find(unit) + 1\n\tbyte_size = int(round(mem_size * (1024 ** unit_pow)))\n\treturn byte_size", "def get_size(size):\n if size.isdigit():\n return int(size)\n\n def do_get_size(num, unit):\n u = units[unit]\n if num.find('.') == -1:\n return int(num) * u\n return int(float(num) * u)\n\n s = size.strip().upper()\n if s.find(' ') == -1:\n num, unit = re.sub(r\"([\\d.]+)\", r\"\\1 \", s).split()\n else:\n num, unit = s.split()\n\n try:\n return do_get_size(num, unit)\n except KeyError:\n\traise Exception('unknown size unit[%s]' % size)", "def bytes_from_str( size_str ):\n unit_conversions = { char: 1024**power for ( power, char ) in enumerate( [ \"B\", \"K\", \"M\", \"G\", \"T\" ] ) }\n try:\n coeff = unit_conversions[ size_str.upper()[-1] ]\n size_str = size_str[:-1]\n except KeyError:\n coeff = 1\n try:\n size = float( size_str )\n except ValueError:\n print( \"Invalid size string: {}\".format( size_str ) )\n exit( -1 )\n return coeff * size", "def convertFromBytes(size, unit):\n\tif (unit == 'kb'):\n\t\treturn size / 10000\n\telif (unit == 'mb'):\n\t\treturn size / 1000000\n\telif (size == 'gb'):\n\t\treturn size / 1000000000", "def bytes_to_size(size):\n if not size >> 10 or size < 0:\n return str(size)\n elif not size >> 20:\n return '{:.2f}KB'.format(size / 1024.0)\n elif not size >> 30:\n return '{:.2f}MB'.format(size / (1024.0 ** 2))\n elif not size >> 40:\n return '{:.2f}GB'.format(size / (1024.0 ** 3))\n else:\n return '{:.2f}TB'.format(size / (1024.0 ** 4))", "def parse_size(text, unit):\n\n text = text.strip()\n text = text.upper()\n unit = unit.upper()\n\n # First, handle the suffixes\n if text.endswith('B'):\n text = text[:-1]\n if text.endswith('I'):\n text = text[:-1]\n\n if not text:\n return ValueError('Empty size')\n\n if text[-1] in _SIZE_FACTORS:\n factor = _SIZE_FACTORS[text[-1]]\n text = text[:-1]\n else:\n factor = _SIZE_FACTORS[unit]\n\n try:\n value = float(text) * factor\n except ValueError:\n raise ValueError(\n 'Cannot parse \"{}\" as {}iB value.'.format(text, unit)\n )\n\n if value % _SIZE_FACTORS[unit]:\n raise ValueError('Value must be multiple of 1 {}iB'.format(unit))\n return int(value / _SIZE_FACTORS[unit])", "def parse_size(text, unit):\n\n text = text.strip()\n text = text.upper()\n unit = unit.upper()\n\n # First, handle the suffixes\n if text.endswith('B'):\n text = text[:-1]\n if text.endswith('I'):\n text = text[:-1]\n\n if not text:\n return ValueError('Empty size')\n\n if text[-1] in _SIZE_FACTORS:\n factor = _SIZE_FACTORS[text[-1]]\n text = text[:-1]\n else:\n factor = _SIZE_FACTORS[unit]\n\n try:\n value = float(text) * factor\n except ValueError:\n raise ValueError(\n 'Cannot parse \"{}\" as {}iB value.'.format(text, unit)\n )\n\n if value % _SIZE_FACTORS[unit]:\n raise ValueError('Value must be multiple of 1 {}iB'.format(unit))\n return int(value / _SIZE_FACTORS[unit])", "def baseSize_convert(baseSize_string): \r\n # Convert input genome size to int\r\n if baseSize_string[-1].upper() == 'K':\r\n baseSize = float(baseSize_string[0:-1]) * 1000\r\n elif baseSize_string[-1].upper() == 'M':\r\n baseSize = float(baseSize_string[0:-1]) * 1000000\r\n elif baseSize_string[-1].upper() == 'G':\r\n baseSize = float(baseSize_string[0:-1]) * 1000000000\r\n else:\r\n baseSize = float(baseSize)\r\n \r\n return int(baseSize)", "def _disk_size_in_gb(_string):\n try:\n value = int(_string)\n except ValueError as e:\n raise argparse.ArgumentTypeError(str(e))\n if value <= 0:\n raise argparse.ArgumentTypeError('Size must be positive value')\n return value", "def convert_file_size_string(value):\n # list of file format sizes\n file_format_sizes = (\"kB\", \"MB\", \"GB\", \"TB\", \"PB\", \"EB\", \"ZB\", \"YB\")\n # dictionary mapping to multiplier\n file_format_scale = {\"B\" : 1,\n \"kB\" : 1e3,\n \"MB\" : 1e6,\n \"GB\" : 1e9,\n \"TB\" : 1e12,\n \"EB\" : 1e15,\n \"ZB\" : 1e18,\n \"YB\" : 1e21}\n if isinstance(value, str):\n if value.endswith(file_format_sizes):\n suffix = value[-2:]\n size = int(value[:-2])\n elif value[-1] == \"B\":\n suffix = \"B\"\n size = int(value[:-1])\n else:\n suffix = \"B\"\n size = int(value)\n # multiply by scalar\n size *= file_format_scale[suffix]\n return size\n else:\n return value", "def size_in_mb(size_in_bytes):\n if size_in_bytes < 10**6:\n return size_in_bytes // 1000\n else:\n return size_in_bytes // 10**6", "def human_to_bytes(size):\n symbols = ('B', 'K', 'M', 'G', 'T', 'P')\n unit = size[-1:].strip().upper()\n if unit == \"B\":\n # Strip off trailing 'b' and see if we've got another unit\n size = size[:-1]\n unit = size[-1:].strip().upper()\n if unit in symbols:\n num = size[:-1]\n else:\n unit = \"B\"\n num = size\n else:\n # Assume size in bytes if no units specified?\n unit = \"B\"\n num = size\n assert num.isdigit() and unit in symbols\n num = float(num)\n prefix = {symbols[0]:1}\n for i, size in enumerate(symbols[1:]):\n prefix[size] = 1 << (i+1)*10\n return int(num * prefix[unit])", "def to_bytes(size):\n size2bytes = {\n \"b\":1, \"bytes\":1, \"byte\":1,\n \"k\":1024, \"kib\":1024, \"kb\":1000,\n \"m\": 1024**2, \"mib\": 1024**2, \"mb\": 1000**2,\n \"g\": 1024**3, \"gib\": 1024**3, \"gb\": 1000**3,\n \"t\": 1024**4, \"tib\": 1024**4, \"tb\": 1000**4,\n \"p\": 1024**5, \"pib\": 1024**5, \"pb\": 1000**5,\n \"e\": 1024**6, \"eib\": 1024**6, \"eb\": 1000**6,\n \"z\": 1024**7, \"zib\": 1024**7, \"zb\": 1000**7,\n \"y\": 1024**8, \"yib\": 1024**8, \"yb\": 1000**8\n }\n \n size = size.replace(' ','')\n match = re.search('(?P<size>[0-9.]+)(?P<units>[a-zA-Z]+)$', size)\n \n if match:\n human_units = match.group('units').lower()\n human_units = human_units.lstrip().rstrip()\n scaling_factor = size2bytes[human_units]\n bytes = int(math.ceil(scaling_factor * float(match.group('size'))))\n else:\n # Cannot parse units,\n # cannot convert value\n # into bytes\n return None\n \n return bytes", "def convert_size(size_bytes):\n # Sizes range from B to YiB, \n # warning larger sizes storage\n # may results in blackhole \n size_name = (\n \"B\", \"KiB\", \"MiB\", \n \"GiB\", \"TiB\", \"PiB\", \n \"EiB\", \"ZiB\", \"YiB\"\n )\n if size_bytes == 0: \n return \"0B\"\n i = int(math.floor(math.log(size_bytes, 1024)))\n p = math.pow(1024, i)\n s = round(size_bytes / p, 2)\n return \"{0}{1}\".format(s, size_name[i])", "def filter_storage_size_num(size_str):\n\n # pattern: '^[1-9][\\d\\.]*[MGT]B?$', multiplier=1000 (not KiB)\n if size_str.endswith('B'):\n size_str = size_str[:-1]\n try:\n size_num = 1000000\n for multiplier in ['M', 'G', 'T']:\n if size_str.endswith(multiplier):\n return '{:.2f}'.format(size_num * float(size_str[:-1]))\n size_num = size_num * 1000\n return '{:.2f}'.format(float(size_str))\n except ValueError as ex:\n logging.error(size_str + \" is not a valid size string\")\n raise", "def __get_size_as_number(self, size_arg, size_string):\n _method_name = '__get_size_as_number'\n\n match = self.__size_regex.match(size_string)\n if not match:\n ex = exception_helper.create_alias_exception('WLSDPLY-08308', size_arg, size_string)\n self._logger.throwing(ex, class_name=self._class_name, method_name=_method_name)\n raise ex\n number = int(match.group(1))\n multiplier = self.__get_size_multiplier(match.group(2))\n return number * multiplier", "def size_to_gb(self, value):\n nb = re.search(\"[0-9]+\", value)\n if nb:\n nb = int(re.search(\"[0-9]+\", value).group())\n else:\n return 0\n if \"MB\" in value:\n return nb / 1024 if nb else 0\n elif \"GB\" in value:\n return nb\n else:\n return 0", "def convert_unit(size_in_bytes, unit):\n if unit == 'KB':\n return size_in_bytes/1024\n elif unit == 'MB':\n return size_in_bytes/(1024*1024)\n elif unit == 'GB':\n return size_in_bytes/(1024*1024*1024)\n else:\n return size_in_bytes", "def convert_unit_size_to_num(size, unit=None):\n if unit:\n unit = MemoryUnit.validate_unit(unit)\n else:\n unit = MemoryUnit.UNIT_SIZE_DEFAULT\n log.info(_('A memory unit is not provided for size; using the '\n 'default unit %(default)s.') % {'default': 'B'})\n regex = re.compile('(\\d*)\\s*(\\w*)')\n result = regex.match(str(size)).groups()\n if result[1]:\n unit_size = MemoryUnit.validate_unit(result[1])\n converted = int(str_to_num(result[0])\n * MemoryUnit.UNIT_SIZE_DICT[unit_size]\n * math.pow(MemoryUnit.UNIT_SIZE_DICT\n [unit], -1))\n log.info(_('Given size %(size)s is converted to %(num)s '\n '%(unit)s.') % {'size': size,\n 'num': converted, 'unit': unit})\n else:\n converted = (str_to_num(result[0]))\n return converted", "def parse_size(size):\n if isinstance(size, int):\n return size\n elif isinstance(size, str):\n if size.isdigit():\n return int(size)\n return None", "def human_readable_to_bytes(value):\n value = value.lower()\n if value[-2:] == 'ib':\n # Assume IEC suffix.\n suffix = value[-3:].lower()\n else:\n suffix = value[-2:].lower()\n has_size_identifier = (\n len(value) >= 2 and suffix in SIZE_SUFFIX)\n if not has_size_identifier:\n try:\n return int(value)\n except ValueError:\n raise ValueError(\"Invalid size value: %s\" % value)\n else:\n multiplier = SIZE_SUFFIX[suffix]\n return int(value[:-len(suffix)]) * multiplier", "def str2gib_size(s):\n size_in_bytes = str2size(s)\n return size_in_bytes // units.Gi", "def memstr_to_kbytes(text):\r\n kilo = 1024\r\n units = dict(K=1, M=kilo, G=kilo ** 2)\r\n try:\r\n size = int(units[text[-1]] * float(text[:-1]))\r\n except (KeyError, ValueError):\r\n raise ValueError(\r\n \"Invalid literal for size give: %s (type %s) should be \"\r\n \"alike '10G', '500M', '50K'.\" % (text, type(text))\r\n )\r\n return size", "def parse_size(size,b=1024,u='B',pre=['']+[p for p in'KMGTPEZY']):\n intsize, unit = extract_num_unit(size)\n\n # Account for 10B vs 10KB when looking for base\n if len(unit) == len(u):\n base = unit\n else:\n base = unit[1:]\n\n # Check if we know this unit's base, otherwise use default\n if base in unit_base:\n b = unit_base[base]\n pow = { k+base:v for v, k in enumerate(pre) }\n\n return float(intsize)*(b**pow[unit])", "def _parseDiskSize(self, diskSizeParam): \\\n # pylint: disable=no-self-use\n if diskSizeParam.endswith('TB'):\n return int(float(diskSizeParam[:-2]) * 1000000)\n\n if diskSizeParam.endswith('GB'):\n return int(float(diskSizeParam[:-2]) * 1000)\n elif diskSizeParam.endswith('MB'):\n # Must be an integer\n return int(diskSizeParam[:-2])\n\n return int(diskSizeParam)", "def humanbytes(size):\n # https://stackoverflow.com/a/49361727/4723940\n if not size:\n return \"\"\n # 2 ** 10 = 1024\n power = 2**10\n raised_to_pow = 0\n dict_power_n = {0: \"\", 1: \"Ki\", 2: \"Mi\", 3: \"Gi\", 4: \"Ti\"}\n while size > power:\n size /= power\n raised_to_pow += 1\n return str(round(size, 2)) + \" \" + dict_power_n[raised_to_pow] + \"B\"" ]
[ "0.84322685", "0.8161158", "0.8068183", "0.803176", "0.803071", "0.78486603", "0.78093255", "0.7693891", "0.7610743", "0.7565714", "0.7565714", "0.7534851", "0.7485024", "0.7442531", "0.744205", "0.7435005", "0.7359864", "0.7353351", "0.7342231", "0.7331826", "0.7291998", "0.7272052", "0.72575134", "0.725365", "0.72460604", "0.723058", "0.7145148", "0.70835704", "0.70517343", "0.705141" ]
0.8204714
1
Returns a list for all files cached with appropriate metadata.
def get_all_cached_files(cache_dir=None): if cache_dir is None: cache_dir = TRANSFORMERS_CACHE else: cache_dir = str(cache_dir) if not os.path.isdir(cache_dir): return [] cached_files = [] for file in os.listdir(cache_dir): meta_path = os.path.join(cache_dir, f"{file}.json") if not os.path.isfile(meta_path): continue with open(meta_path, encoding="utf-8") as meta_file: metadata = json.load(meta_file) url = metadata["url"] etag = metadata["etag"].replace('"', "") cached_files.append({"file": file, "url": url, "etag": etag}) return cached_files
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_all_cache_files(self):\n files = set()\n dir_tree = os.walk(self.config.get('cachedir', self.CACHEDIR))\n for dirpath, _, filenames in dir_tree:\n for file_name in filenames:\n if 'cache' in file_name:\n files.add(os.path.join(dirpath, file_name))\n return files", "def list_cached():\n for json_name in cached_files():\n source_name = get_source_file_name(json_name)\n yield (json_name, source_name)", "def files():\n return get_cached(\"files.json\")", "def find_cache_files():\n files = []\n\n for root, dirnames, filenames in os.walk(\".\"):\n for filename in fnmatch.filter(filenames, \"*.pyc\"):\n files.append(os.path.join(root, filename))\n\n for root, dirnames, filenames in os.walk(\".\"):\n for filename in fnmatch.filter(filenames, \"__pycache__\"):\n files.append(os.path.join(root, filename))\n\n return files", "def cached_files():\n for (dir_path, _dir_names, file_names) in os.walk(CACHE):\n for file_name in file_names:\n if is_json_file(file_name):\n yield os.path.join(dir_path, file_name)", "def _list_dir(self):\n return [os.path.join(self.cache_dir, fn)\n for fn in os.listdir(self.cache_dir)]", "def get(self):\n if path.exists(self.cachefile):\n self.invalidion()\n full_cache = self._get_all()\n return full_cache\n else:\n return []", "def listFiles(self):\n pass", "def getAllFiles(self):\n\n\t\treturn self.getFilesForDirs([])", "def files(self):\n files = []\n if self.package_type == 'package':\n file_data = dict([(k, self[k]) \\\n for k in ['size', 'sha1', 'sha256', 'md5sum']])\n file_data['name'] = self['filename'].split('/')[-1]\n files.append(file_data)\n else:\n for d in self['files']:\n file_data = d.copy()\n # Get checksum data as well...\n for key in ['sha1', 'sha256']:\n for data in self['checksums-' + key]:\n if file_data['name'] == data['name']:\n file_data[key] = data[key]\n files.append(file_data)\n return files", "def files(self) -> \"FileMetadataList\":\n from cognite.client.data_classes import FileMetadataList\n\n return self._retrieve_related_resources(FileMetadataList, self._cognite_client.files)", "def getFiles(self):\n\t\treturn os.listdir(self.getPath())", "def get_request_candidates(self):\n return os.listdir(self.cache_dir_)", "def get_cached_models(cache_dir: Union[str, Path] = None) -> List[Tuple]:\n if cache_dir is None:\n cache_dir = TRANSFORMERS_CACHE\n elif isinstance(cache_dir, Path):\n cache_dir = str(cache_dir)\n if not os.path.isdir(cache_dir):\n return []\n\n cached_models = []\n for file in os.listdir(cache_dir):\n if file.endswith(\".json\"):\n meta_path = os.path.join(cache_dir, file)\n with open(meta_path, encoding=\"utf-8\") as meta_file:\n metadata = json.load(meta_file)\n url = metadata[\"url\"]\n etag = metadata[\"etag\"]\n if url.endswith(\".bin\"):\n size_MB = os.path.getsize(meta_path.strip(\".json\")) / 1e6\n cached_models.append((url, etag, size_MB))\n\n return cached_models", "def list_of(self):\r\n self.files = os.listdir(self.p)\r\n self.size = [0] * len(self.files)\r\n self.created = [0] * len(self.files)\r\n self.modified = [0] * len(self.files)\r\n total_size = 0\r\n iteration = 0\r\n for file in self.files:\r\n self.fol = os.path.join(self.p, file)\r\n self.modified[iteration] = time.ctime(os.path.getmtime(f\"{self.fol}\"))\r\n self.created[iteration] = time.ctime(os.path.getctime(f\"{self.fol}\"))\r\n for path, dirs, files in os.walk(self.fol):\r\n for fol in files:\r\n fpath = os.path.join(path, fol)\r\n total_size += os.path.getsize(fpath)\r\n self.size[iteration] = total_size\r\n iteration += 1\r\n return self.files, self.size, self.created, self.modified", "def get_files(self):\r\n return self._filelist", "def get_files(self):\n return self._files.values()", "def get_files(self):\n\n self.files = []\n retriever_methods = [\n m\n for m in rtorrent9.file.methods\n if m.is_retriever() and m.is_available(self._rt_obj)\n ]\n # 2nd arg can be anything, but it'll return all files in torrent\n # regardless\n m = rtorrent9.rpc.Multicall(self)\n m.add(\n \"f.multicall\",\n self.info_hash,\n \"\",\n *[method.rpc_call + \"=\" for method in retriever_methods]\n )\n\n results = m.call()[0] # only sent one call, only need first result\n\n offset_method_index = retriever_methods.index(\n rtorrent9.rpc.find_method(\"f.offset\")\n )\n\n # make a list of the offsets of all the files, sort appropriately\n offset_list = sorted([r[offset_method_index] for r in results])\n\n for result in results:\n results_dict = {}\n # build results_dict\n for m, r in zip(retriever_methods, result):\n results_dict[m.varname] = rtorrent9.rpc.process_result(m, r)\n\n # get proper index positions for each file (based on the file\n # offset)\n f_index = offset_list.index(results_dict[\"offset\"])\n\n self.files.append(\n File(self._rt_obj, self.info_hash, f_index, **results_dict)\n )\n\n return self.files", "def get_all(self) -> Generator:\n\n for filename in self.list_files():\n yield self.get(filename)", "def files(self, **kwargs) -> \"FileMetadataList\":\n return self._cognite_client.files.list(asset_ids=[self.id], **kwargs)", "def files(self):\n try:\n return glob.glob(self.path)\n except (AttributeError, TypeError):\n try:\n return glob.glob(self.alias)\n except (AttributeError, TypeError):\n return []", "def get_meta_of_files(session=konfuzio_session()) -> List[dict]:\n url = get_documents_meta_url()\n result = []\n\n while True:\n r = retry_get(session, url)\n data = r.json()\n if isinstance(data, dict) and 'results' in data.keys():\n result += data['results']\n if 'next' in data.keys() and data['next']:\n url = data['next']\n else:\n break\n else:\n result = data\n break\n\n sorted_documents = sorted(result, key=itemgetter('id'))\n return sorted_documents", "def retrieve_all_files(self):\n result = utilities.rscandir(\n self.folder(), ignore_dirs=[\".git\"])\n\n return result", "def get_cache_file_list(self,\n file_list_obj=None,\n file_info_class=FileInfo,\n file_list_class=FileList):\n if not file_list_obj:\n file_list_obj = file_list_class()\n temp_ = self.read_pickle_object_in_file()\n if temp_:\n for tup_ in temp_:\n finf_ = file_info_class(in_tuple=tup_)\n fn_ = finf_.filename\n self.cache_file_list_dict[fn_] = finf_\n file_list_obj.append(finf_)\n return file_list_obj", "def find_archives(self):\n pattern = re.compile(r'^[0-9A-F]{40}\\.tar$', re.IGNORECASE)\n for entry in self.context.list_entries(self.cache_directory):\n if pattern.match(entry):\n yield os.path.join(self.cache_directory, entry)", "def files(self):\n return self._files.items()", "def listFiles(self):\n current_month = NoaaCycle.last_month()\n # 2do archive contain extra path: data/noaa/metar/2011-* ; better is 2011-*\n dir_regex = os.path.join(self.download_dir, \"%s-*\" % current_month)\n self.archive_name = os.path.join(self.download_dir, current_month)\n return glob.glob(dir_regex)", "def cache_file_metadata(self, filenames):\n file_metadata = {}\n for fn in filenames:\n metadata = parse(fn)\n metadata['fn'] = fn[:-4]\n file_metadata_summary = self.gen_file_metadata_summary(metadata)\n file_metadata[file_metadata_summary] = metadata\n return file_metadata", "def get_list_of_comitted_files():\n files = []\n output = []\n try:\n output = subprocess.check_output(['git','diff-index', '--name-status', '--cached','HEAD']\n ).decode(\"utf-8\")\n except subprocess.CalledProcessError:\n print(\"Error diff files get: trace %s\" % subprocess.CalledProcessError)\n return files\n\n for result in output.split(\"\\n\"):\n logging.info(result)\n if result != '':\n match = modified.match(result)\n if match:\n files.append(match.group('name'))\n\n return files", "def _get_parsed_files(self):\n\n parsed = []\n with Historical_ROAs_Parsed_Table() as t:\n for row in t.execute(f'SELECT * FROM {t.name}'):\n parsed.append(row['file'])\n return parsed" ]
[ "0.7865767", "0.7818694", "0.7672085", "0.7453912", "0.74362546", "0.7339646", "0.70547336", "0.69582987", "0.692106", "0.68153465", "0.6725137", "0.6692921", "0.6681838", "0.66680914", "0.6637055", "0.6628075", "0.66022044", "0.6594799", "0.6579378", "0.65584", "0.6520238", "0.65101284", "0.65095323", "0.64498276", "0.6445087", "0.6439403", "0.64391756", "0.6432613", "0.64205647", "0.64188284" ]
0.81654704
0
Extract repo_name, revision and filename from an url.
def extract_info_from_url(url): search = re.search(r"^https://huggingface\.co/(.*)/resolve/([^/]*)/(.*)$", url) if search is None: return None repo, revision, filename = search.groups() cache_repo = "--".join(["models"] + repo.split("/")) return {"repo": cache_repo, "revision": revision, "filename": filename}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _parse_url(repo_url: str) -> List[str]:\n try:\n return re.findall(r\"github\\.com/([^/]+)/([^\\/?]+)\", repo_url, re.I)[0]\n except IndexError:\n raise AnalyzerError(\"Incorrect repository URL\")", "def _filename_from_url(url):\n file_name = url.split(\"/\")[-1]\n return file_name", "def filename_from(url):\n filename = url.split('/')[-1]\n return filename", "def package_name_from_url(url):\n\n url_repo_part = url.split('/')[-1]\n\n if url_repo_part.endswith('.git'):\n return url_repo_part[:-4]\n\n return url_repo_part", "def get_reponame_from_git_url(url: str) -> Optional[str]:\n repo_url = parse_git_repo(url)\n if repo_url:\n return repo_url.repo\n return None", "def get_file_name(url: str):\n filename = os.path.basename(url)\n fname, extension = os.path.splitext(filename)\n if extension:\n if \"=\" in filename:\n return filename.split(\"=\")[-1]\n return filename\n header = requests.head(url).headers\n if \"Location\" in header:\n return os.path.basename(header[\"Location\"])\n return filename", "def SplitScmUrl(url):\r\n url_split = url.split('@')\r\n scm_url = url_split[0]\r\n scm_rev = 'HEAD'\r\n if len(url_split) == 2:\r\n scm_rev = url_split[1]\r\n return (scm_url, scm_rev)", "def get_archive_url(url, branch='master', release=None):\n git_url = trim_repo_url(url)\n fragment = None\n file = git_url.split(\"/\")[-1]\n \n if release:\n fragment = \"/archive/{}.zip\".format(release)\n else:\n fragment = \"/archive/{}.zip\".format(branch)\n \n return file, git_url+fragment", "def GetVersionFromUrl(url):\n return __ParseUrl(url)[2].split('/')[3]", "def url_file_name(url):\r\n return url[url.rfind('/') + 1:]", "def _parse_url(self, url):\n url_prefix = self.URL_PREFIX\n assert(url[:len(url_prefix)] == url_prefix)\n key, file_attrs = url[len(url_prefix):].split('/', 1)\n file_, attrs = parse_url_opts(file_attrs)\n return key, file_, attrs", "def repo_value(url):\n if url == '^':\n return url\n tup = urlsplit(url)\n if tup.scheme or tup.netloc:\n return urlunsplit(tup[:3]+('', ''))\n raise ValueError('URL %(url)r doesn\\'t contain a scheme '\n 'nor a hostname'\n % locals())", "def get_filename_from_url(url: str) -> str:\n return os.path.basename(urllib.parse.urlparse(urllib.parse.unquote_plus(url)).path)", "def parse_filename(url):\n # extract the URL path\n url_path = urlparse.urlparse(url).path\n filename = url_path.split('/')[-1]\n\n # make loose assumption the file name is for an HTML page\n if len(filename) < 1:\n filename = 'index.html'\n\n return filename", "def url_filename(url):\n return os.path.basename(urlparse.urlparse(url).path)", "def _parse_url(url: str) -> Optional[str]:\n match = re.search(r\"pastecord.com(?:/raw|/documents)?/(\\w+)(?:\\.\\w+)?\", url)\n if match is None:\n return None\n return match.group(1)", "def _get_file_name(url: str) -> str:\n url = url.strip('/')\n result = findall(r'/(\\w+\\.\\w+)[?|$]', url)\n if result:\n return result[-1]\n return url.split('/')[-1]", "def svn_url(svninfo=None):\n if svninfo is None:\n svninfo = svn_info()\n return svninfo.find('entry/url').text", "def parse_ref(url_path):\n ref = url_path.lstrip('/')\n if not ref:\n ref = os.environ.get('DEFAULT_GIT_REF', 'HEAD').strip()\n return ref", "def svn_info_t_URL_get(svn_info_t_self): # real signature unknown; restored from __doc__\n return \"\"", "def owner_project_from_github_url(url):\n if not re.match('https://github.com/([a-zA-Z0-9-_]*)/[a-zA-Z0-9-_]*', url):\n print(str(url) + \"is not a valid url!\")\n exit(-1)\n elements = url.split('/')\n project_name = elements[-1]\n organization_name = elements[-2]\n return (organization_name, project_name)", "def _get_filename_from_url(self) -> Optional[str]:\n file_name_portion = None\n\n right_portion = self.url.rsplit(\"/\", 1)\n if len(right_portion) == 2:\n # split any potential query params - these start with \"?\"\"\n file_name_portion = right_portion[1].split(\"?\")[0].strip()\n\n if len(file_name_portion) == 0:\n file_name_portion = None\n\n return file_name_portion", "def fileId_from_url(url):\r\n raw_fileId = re.findall(\"~[A-z.]+/[0-9]+\", url)[0][1:]\r\n return raw_fileId.replace('/', ':')", "def fileId_from_url(url):\r\n raw_fileId = re.findall(\"~[0-z.]+/[0-9]+\", url)[0][1: ]\r\n return raw_fileId.replace('/', ':')", "def get_username_from_git_url(url: str) -> Optional[str]:\n repo_url = parse_git_repo(url)\n if repo_url:\n return repo_url.username\n return None", "def parse_url(url):\n url_parts = url.split('/')\n webcam_name = url_parts[-3] + 'CAM' + url_parts[-2]\n file_ext = url[-5:-1]\n last_update = 0.\n return {\n 'url': url[:-1], # Skip end of line\n 'name': webcam_name,\n 'imgpath': os.path.join(WEBCAM_DIR, webcam_name, '%d' + file_ext),\n 'last_update': last_update\n }", "def _get_file_url (url, path) :\n path = path + \"/\" + url.replace (\"/\", \"!\").replace (\":\",\"\").replace (\".\",\"-\")\n spl = path.split (\"-\")\n if len (spl) >= 2 :\n ext = spl [len (spl)-1].lower ()\n if 2 <= len (ext) <= 3 and ext in [\"png\", \"jpg\", \"zip\", \"txt\", \"gif\", \"py\", \"cpp\", \"gz\", \"pdf\", \"tif\", \"py\", \"html\", \"h\"] :\n spl = path.split (\"-\")\n spl = spl [:len(spl)-1]\n path = \"-\".join (spl) + \".\" + ext\n return path", "def process_url(url):\n parsed = urlparse(url)\n if parsed.scheme:\n return parsed.netloc, parsed.path\n else:\n host_part = parsed.path\n hostname = host_part.partition(\"/\")[0]\n path = \"/\" + host_part.partition(\"/\")[2]\n return hostname, path", "def extract_api_name(url):\n host = RE_HOST.sub('\\\\1', url)\n return host", "def get_maven_id_from_url(url):\n burl = utils.strip_leading_slash(url)\n artifact_id = utils.get_last_url_segment(burl)\n group_id = get_group_id(burl)\n return group_id, artifact_id" ]
[ "0.66521394", "0.6614812", "0.65995854", "0.65686816", "0.6470143", "0.6419749", "0.6403617", "0.6361332", "0.6344536", "0.63396895", "0.63210946", "0.63057446", "0.6293681", "0.624197", "0.61405224", "0.611934", "0.61118877", "0.6109042", "0.61074966", "0.60993886", "0.6092359", "0.60879666", "0.60466117", "0.6044558", "0.59907115", "0.5961171", "0.59148824", "0.5902959", "0.5880818", "0.58781433" ]
0.7024231
0
Remove, if they exist, file, file.json and file.lock
def clean_files_for(file): for f in [file, f"{file}.json", f"{file}.lock"]: if os.path.isfile(f): os.remove(f)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _remove_files(self):\n if hasattr(self, 'files'):\n for file in self.files:\n if os.path.exists(file):\n os.remove(file)\n\n self._remove_changes()\n self._remove_temporary_files()", "def cleanup_file(name: str):\n if os.path.exists(name) and os.path.isfile(name): # h5\n os.remove(name)\n elif os.path.exists(name) and os.path.isdir(name): # tf\n shutil.rmtree(name)", "def cleanUp(self, f):\n os.system('rm ' + f)", "def tearDown(self):\n if os.path.exists(\"file.json\"):\n os.remove(\"file.json\")", "def tearDown(self):\n if os.path.exists(\"file.json\"):\n os.remove(\"file.json\")", "def tearDown(self):\n if os.path.exists('file.json'):\n os.remove(\"file.json\")", "def clean_filesystem(files=[]):\n remove_files(files + find_cache_files())", "def tearDown(self):\n try:\n remove(\"file.json\")\n except:\n pass", "def purge_cache():\n for (dir_path, dir_names, file_names) in os.walk(CACHE, topdown=False):\n for file_name in file_names:\n if is_json_file(file_name):\n path = os.path.join(dir_path, file_name)\n print(\"Removing file “%s”\" % path)\n os.remove(path)\n for directory in dir_names:\n path = os.path.join(dir_path, directory)\n if not os.listdir(path):\n print(\"Removing directory “%s”\" % path)\n os.rmdir(path)", "def remove(path):", "def remove_lock_file():\n # pylint: disable=global-statement\n global LOCK_FILENAME\n\n if LOCK_FILENAME is not None and os.path.isfile(LOCK_FILENAME):\n os.unlink(LOCK_FILENAME)", "def remove(self):\n self.remove_file()", "def remove_file(self, path):\n pass", "def removeLocks():\n global lockFnames\n for lockFname in lockFnames:\n if isfile(lockFname):\n logging.debug('Removing lockfile %s' % lockFname)\n os.remove(lockFname)\n\n lockFnames = []", "def remove(args):\n files = []\n for path in args.files:\n if os.path.isdir(path):\n ft = filetree(path)\n files.extend(ft.filelist())\n else:\n files.append(path)\n for path in files:\n relpath = os.path.normpath(os.path.relpath(path, args.base))\n if relpath in args.cache:\n del args.cache[args.cache.index(relpath)]\n if args.delete and os.path.exists(path):\n os.remove(path)\n args.update = True\n return", "def remove(self): \n self.doRoot(self.removeDir)\n settings.getChanged('mosh.resourceReplacer.applied').remove(self.file)", "def remove_file(path: str) -> None:\n\tremove(path)", "def remove_file(self):\n if self.file_exists:\n os.remove(self.file_name)", "def rm_file(file_):\n Path(file_).unlink(missing_ok=True)", "def cleanup(e):\n for f in e.files:\n try:\n if os.path.isfile(f):\n os.remove(f)\n except OSError:\n continue\n\n return", "def remove_all_state_files():\n for state_file in Path(CONFIG_DIR).glob('**/state.json'):\n state_file.unlink()", "def _remove_unique_file(self):\n if self._uniquefile_created:\n self._unlink(self.uniquefile)\n self._uniquefile_created = False\n self._p(\"Unique file deleted: %s\" % self.uniquefile)", "def _clear_file(file_name):\n if os.path.exists(file_name):\n os.remove(file_name)", "def _remove_all_manifest_files(self):\n manifest_files = Path(self.manifest_dir).glob(\"**/elyra-component-manifest-*.json\")\n for file in manifest_files:\n os.remove(str(file))", "def _clean_workdir(self):\n\t\ttoremove = [self._get_config_filepath(), self._get_params_filepath(), self._get_conv_filepath(), self._get_psf_filepath()]\n\t\tfor filepath in toremove:\n\t\t\tif os.path.exists(filepath):\t\n\t\t\t\tlogger.debug(\"Removing existing file %s...\" % (filepath))\n\t\t\t\tos.remove(filepath)", "def clean(self) -> None:\n # remove all *.py and *.pyi files in the folder\n for wc in [\"*.py\", \"*.pyi\", \"modules.json\"]:\n for f in (self.package_path).rglob(wc):\n f.unlink()", "def remove_temporary_settings():\n if os.path.exists(\"settings.json\"):\n os.remove(\"settings.json\")", "def _delete_file(path):\n if os.path.isfile(path):\n os.remove(path)", "def remove(path: str) -> None:\n if Stat.isfile(path):\n Stat.forget(path)\n os.remove(path)\n elif Stat.exists(path):\n Stat.forget(path)\n shutil.rmtree(path)", "def cleanup(self):\r\n for f in [i for d in self.data.values() for i in d[\"filenames\"]]:\r\n try:\r\n os.unlink(f)\r\n except Exception: pass\r\n self.Destroy()" ]
[ "0.7176565", "0.71468484", "0.7132706", "0.71127635", "0.71127635", "0.71077317", "0.7105861", "0.7046819", "0.70287746", "0.6958039", "0.6896235", "0.68671364", "0.68619233", "0.6854565", "0.6853768", "0.6840403", "0.68193984", "0.6813936", "0.680045", "0.6765633", "0.6734554", "0.67333835", "0.6721752", "0.6711471", "0.6700949", "0.668347", "0.6667689", "0.66494656", "0.66406953", "0.66380125" ]
0.8273665
0
Move file to repo following the new huggingface hub cache organization.
def move_to_new_cache(file, repo, filename, revision, etag, commit_hash): os.makedirs(repo, exist_ok=True) # refs os.makedirs(os.path.join(repo, "refs"), exist_ok=True) if revision != commit_hash: ref_path = os.path.join(repo, "refs", revision) with open(ref_path, "w") as f: f.write(commit_hash) # blobs os.makedirs(os.path.join(repo, "blobs"), exist_ok=True) blob_path = os.path.join(repo, "blobs", etag) shutil.move(file, blob_path) # snapshots os.makedirs(os.path.join(repo, "snapshots"), exist_ok=True) os.makedirs(os.path.join(repo, "snapshots", commit_hash), exist_ok=True) pointer_path = os.path.join(repo, "snapshots", commit_hash, filename) huggingface_hub.file_download._create_relative_symlink(blob_path, pointer_path) clean_files_for(file)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def move_file(self, ctx):\n pass", "def rupture(url, outpath=None, branch='master', dirname=None, release=None):\n try:\n file, filename = _download(\n url, outpath=outpath, \n dirname=dirname, branch=branch, \n release=release\n )\n base, cs = _unzip(filename)\n _delete(filename)\n if release or branch != 'master':\n return\n to_find = \"{}/{}-{}\".format(base, file, branch)\n _newname = dirname or file\n shutil.move(to_find, base+\"/\"+_newname)\n except Exception as e:\n six.print_(traceback.format_exc())\n six.print_(\"Cannot download the repo. Could you check the repo url ?\")", "def moveAsset(self, src, dst):\n if not self.exists( self.dirname(dst) ):\n self.makedirs( self.dirname(dst) )\n self.move(src, dst)\n\n cache_src = self.cache_path(src)\n if not os.path.exists(cache_src):\n return \n\n cache_dst = self.cache_path(dst)\n if not os.path.exists( os.path.dirname(cache_dst) ):\n os.makedirs( os.path.dirname(cache_dst) )\n shutil.move(cache_src, cache_dst)", "def move_from_temp_directory(self):", "def update_from_repo():\n\treturn", "def replace(self):\n if self.success is False:\n raise TaskError('not ready')\n try:\n temp_src = '/tmp/' + str(random.randint(10000, 99999)) + '.mp3'\n os.move(self.source, temp_src)\n os.move(self.target, self.source)\n os.unlink(temp_src)\n except OSError as e:\n print(e)", "def hmove(src_path, res_path):\n os.rename(src_path, res_path)", "def update(filepath, github_account):\n repo = _git.clone_from_github(\n _repo_path(), join(filepath, _repo_name()), github_account=github_account)\n if _update_disco(repo, github_account) > 0:\n repo.push()", "def push(target):\n if target is None:\n target = getcwd()\n\n target = path.abspath(target)\n\n dot_chunk = load_chunk(target)\n src = dot_chunk[\"src\"]\n source = load_source(src)\n\n copy(target, source)", "def move_file(path):\n new_path = os.path.join(TEST_DIR, TEST_FILE)\n command = ['mv', TEST_FILE, new_path]\n file_operation(path, command)", "def _move_self_to(self, new_dir=None, new_name=None):\n if self.is_downloaded:\n if new_dir and not new_name:\n shutil.move(self._download_path, os.path.join(new_dir, self.download_filename))\n elif new_name and not new_dir:\n shutil.move(self._download_path, os.path.join(self.download_dir, new_name))\n elif new_name and new_dir:\n shutil.move(self._download_path, os.path.join(new_dir, new_name))", "def move_file_to_config(path):\n destination = str(os.path.expanduser('~')) +'/.config/hackerjobs/'\n shutil.copy(path,destination)", "def mv(self, src_path, dst_path):\n try:\n postdata = codecs.encode(json.dumps({ 'src': src_path, 'dst': dst_path }), 'utf-8')\n self._urlopen('/api/fileops/move', postdata).read()\n except HTTPError as err:\n raise RuntimeError(\"Unable to move '{}' to '{}'\".format(src_path, dst_path))", "def do_stage(self, mirror_only=False):\n super().do_stage(mirror_only)\n stsrc = self.stage.source_path\n srcpath = os.path.join( stsrc, self.build_directory )\n ppath = ancestor (srcpath)\n shutil.move(stsrc, stsrc+\"_old\")\n mkdirp(ppath)\n shutil.move(stsrc+\"_old\",srcpath)", "def move_to(self, file_name, to_dir, change_name_to=None):\n raise NotImplementedError", "def move_to(self, path: str) -> None:\n self._new_path = os.path.join(path, self.annot_type, os.path.basename(self._file_path))\n os.rename(self._file_path, self._new_path)\n self._file_was_moved = True", "def move():\n os.system('sudo mv {} {}'.format(TMP_HOSTS_FILE, HOSTS_FILE))", "def move(self,fileName,destDir):\n self.unload(fileName)\n FileInfos.move(self,fileName,destDir)", "def update_source(self):\n cwd = None\n if os.path.exists(self.path):\n cwd = self.path\n cmd = 'git fetch && git reset --hard origin/master'\n else:\n cmd = 'git clone %s %s' % (self.repo_url, self.path)\n Command(cmd, cwd=cwd)", "def moveFile(self, srcPath):\n # Gets the classification for the file type of the path moved\n classification = self.classifyFile(srcPath)\n\n if classification:\n # Gets the output path given the file type\n newPath = self.outPaths[classification][\"outPath\"] + srcPath.split(\"/\")[-1]\n\n # Execute instruction\n os.replace(srcPath, newPath)", "def testMoveAndCopyFile(self):\n try:\n remoteLocator = self.__pathPdbxDictionaryFile\n fn = self.__fileU.getFileName(remoteLocator)\n # _, fn = os.path.split(remoteLocator)\n lPath = os.path.join(self.__workPath, fn)\n ok = self.__fileU.get(remoteLocator, lPath)\n self.assertTrue(ok)\n # Test copy file\n dPath2 = os.path.join(self.__workPath, \"tdir\")\n ok = self.__fileU.mkdir(dPath2)\n self.assertTrue(ok)\n lPath2 = os.path.join(dPath2, fn)\n ok = self.__fileU.put(lPath, lPath2)\n self.assertTrue(ok)\n ok = self.__fileU.exists(lPath)\n self.assertTrue(ok)\n ok = self.__fileU.exists(lPath2)\n self.assertTrue(ok)\n # Remove copied file (to test moving file next)\n ok = self.__fileU.remove(lPath2)\n self.assertTrue(ok)\n ok = self.__fileU.exists(lPath2)\n self.assertFalse(ok)\n # Test move file\n ok = self.__fileU.replace(lPath, lPath2)\n self.assertTrue(ok)\n ok = self.__fileU.exists(lPath)\n self.assertFalse(ok)\n ok = self.__fileU.exists(lPath2)\n self.assertTrue(ok)\n # Now clean up files and dirs\n ok = self.__fileU.remove(lPath)\n self.assertTrue(ok)\n ok = self.__fileU.remove(dPath2)\n self.assertTrue(ok)\n\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()", "def add_file(self, path):\n self.git_repo.index.add([path])", "def syncfolder():", "def file(c, path=local.http_path):\r\n c = conn(c)\r\n print(\"make file repo on {}, path [{}]\".format(c.host, path))\r\n\r\n system.install(c, 'createrepo')\r\n c.run('createrepo {}'.format(path))", "def _move_chunk(self, args: MigrationArgs) -> None:\n def move_command():\n self._mongo_client.admin.command(\"moveChunk\", args.collection, find={SHARD_KEY: args.shard_key},\n to=args.shard, _secondaryThrottle=False, _waitForDelete=True)\n self._try_until_done(move_command)\n self._chunks[args.collection][args.shard_key] = args.shard\n logging.info(f\"MongoAgent: Moved chunk {args.shard_key} of collection {args.collection} to {args.shard}\")", "def flush_repo():\n server = get_server()\n run(\"rm -rf %(project_name)s\" % env)\n git.clone()\n server.setup()", "def bump_upstream_repos_shas(path):\n filelist = find_yaml_files(path)\n for filename in filelist:\n print(\"Working on %s\" % filename)\n bump_upstream_repos_sha_file(filename)", "def move_file(source, destination):\n shutil.move(source, destination)", "def move_to(self, file_name, to_dir, change_name_to=None):\n self._check_filename(file_name)\n src = posixpath.join(server_setup.LOCAL_DIR, file_name)\n file_name = file_name if change_name_to is None else change_name_to\n dest = posixpath.join(self.root, to_dir, file_name)\n print(f\"--> Moving file {src} to {dest}\")\n self._check_file_exists(dest, should_exist=False)\n self.copy(src, dest)\n self.remove(src)", "def move_by(self, path, env=None):\n env = self._find_env(env)\n old_pos = self.position(env)\n new_pos = [p + c for p, c in zip(old_pos, path)]\n env.move_agent(self, new_pos)" ]
[ "0.63573015", "0.5870837", "0.58038634", "0.57403123", "0.5718693", "0.56007093", "0.55890596", "0.546481", "0.54431987", "0.5442263", "0.54101485", "0.5409202", "0.5406879", "0.54016227", "0.5373094", "0.5357668", "0.53532946", "0.5349556", "0.5305648", "0.5292807", "0.5284817", "0.52801096", "0.5279251", "0.5277796", "0.52618086", "0.52551436", "0.52495944", "0.5248341", "0.52353305", "0.5224054" ]
0.652527
0
Subtracts CLEAN model from data
def subtract_model(self, outfile, del_script=True): os.system('cp -r {} {}'.format(self.ms, outfile)) ct.subtract_model(outfile, delete=del_script)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def subtract(self,*datas):\n\t\tdatas = list(datas)\n\t\tresult = datas.pop(0)\n\t\tfor data in datas:\n\t\t\tresult -= data\n\t\treturn result", "def original_clean():\n # load the data\n dataset = np.genfromtxt(\"wdbc.data\", dtype=np.float, delimiter=',', usecols=(2, 3, 4, 5, 6, 7, 8, 9, 10, 11,\n 12, 13, 14, 15, 16, 17, 18, 19,\n 20, 21, 22, 23, 24, 25, 26, 27,\n 28, 29, 30, 31), encoding=None)\n labels = np.genfromtxt(\"wdbc.data\", dtype=None, delimiter=',', usecols=(1), encoding=None)\n temp_labels = np.zeros(len(labels))\n for i in range(len(labels)):\n if labels[i] == 'B':\n temp_labels[i] = 0\n else:\n temp_labels[i] = 1\n # normalize\n temp_data = normalize(dataset)\n return temp_data, temp_labels", "def cleaning (data):", "def subtract(self):\n return self._do_calc(self.subtracter)", "def subtract(self):\n return self._do_calc(self.subtracter)", "def subtract(self):\n return self._do_calc(self.subtracter)", "def clean(self):", "def subtract(self,ctSub):\n\n # First confirm eligible for subtraction\n if (not np.array_equal(self.x1_flat,ctSub.x1_flat)) or (not np.array_equal(self.x2_flat,ctSub.x2_flat)):\n raise Exception(\"Can't subtract because not meshed the same\")\n\n ctResult = copy.deepcopy(ctSub)# copy the class\n\n \n # Original method\n # ctResult.u = self.u - ctSub.u\n # ctResult.uMesh = griddata(np.column_stack([ctResult.y, ctResult.z]),ctResult.u,(ctResult.yMesh.flatten(), ctResult.zMesh.flatten()), method='cubic')\n\n # New method\n ctResult.u_mesh = self.u_mesh - ctSub.u_mesh\n ctResult.v_mesh = self.v_mesh - ctSub.v_mesh\n ctResult.w_mesh = self.w_mesh - ctSub.w_mesh\n ctResult.u_cubed = self.u_cubed - ctSub.u_cubed\n\n\n return ctResult", "def clean(c):", "def clean(args):\n with_dataset(args, Dataset._clean)", "def clean(self):\n if self.reloading:\n self.cleaned = pd.concat(\n [self.raw[0: self.brkIdx1+1],\n self.raw[self.brkIdx3+1: self.brkIdx4+1]])\n else:\n self.cleaned = self.raw[0: self.brkIdx1+1]\n self.cleaned.reset_index(drop=True, inplace=True) # update idx\n # -- Cubic spline that passes through the data\n sigmaLog = np.log10(self.cleaned['stress'][1:])\n cs = CubicSpline(x=sigmaLog, y=self.cleaned['e'][1:])\n self.eSigmaV = float(cs(np.log10(self.sigmaV))) # void ratio at sigmaV\n return", "def clean(self):\n\n if (self.clean_level == 'dusty') | (self.clean_level == 'clean'):\n idx, = np.where(self['B_flag'] == 0)\n self.data = self[idx, :]\n\n return", "def clean():\n filter_phase_data()\n combine_phase_data()\n remove_duplicates_phase_data()", "def apply_preprocess_y(self,Y):\n Y = super(Diff_Generator, self).apply_preprocess_y(Y)\n Y[:,1:,1:,:] = Y[:,1:,1:,:] - Y[:,1:,:-1,:] # 0 correspond to the upper layer and is always 0\n return Y", "def resetmodel(self):\n for key, value in self._dentsvertsdata.items():\n value.free()\n self._dentsvertsdata.clear()", "def clean(self):\n # Perform the standard ACE cleaning\n max_status = mm_ace.clean(self)\n\n # Replace bad values with NaN and remove times with no valid data\n ecols = ['eflux_38-53', 'eflux_175-315']\n\n # Evaluate the electron flux data\n self[self.data['status_e'] > max_status, ecols] = np.nan\n\n # Evaluate the proton flux data\n pcols = ['pflux_47-68', 'pflux_115-195', 'pflux_310-580',\n 'pflux_795-1193', 'pflux_1060-1900']\n self[self.data['status_p'] > max_status, pcols] = np.nan\n\n # Include both fluxes and the anisotropy index in the removal eval\n eval_cols = ecols + pcols\n eval_cols.append('anis_ind')\n\n # Remove lines without any good data\n good_cols = (np.isfinite(self.data.loc[:, eval_cols])).sum(axis=1)\n bad_index = good_cols[good_cols == 0].index\n self.data = self.data.drop(index=bad_index)\n\n return", "def normalize_dataset(self):", "def _de_transform(self, data):\r\n mean, variance = self._input_statistics.overall_feature_moments\r\n return data * variance + mean", "def clean(self):\n pass", "def subtract(self, other):\n return self.as_dataframe(subtract(self.data, other.data))", "def subtract(self, m): \n f = m.negate()\n return self.add(f)", "def clean_up_data(self):\n pass", "def clean(self):\n raise NotImplementedError", "def clean_data(self, opz):\n# pdb.set_trace()\n mask = (opz['Opzetstuk Noord (°)']<-1) | (opz['Opzetstuk Noord (°)']>100)\n opz = opz.drop(opz.loc[mask].index)\n opz['open'] = opz[\"Opzetstuk Noord (°)\"].apply(lambda x: 1 if x < 80 else 0)\n #Deze klopt niet. We hebben het moment nodig van opengaan en het moment van dichtgaat. Moment van openen is: wanneer de verandering van de aantal graden >1 graad is. Moment van sluiten is de laatste verandering totdat het niet meer veranderd. Zie ook code van Pieter in C#.\n opz['diff'] = opz['open'].diff()\n beweegt=opz[opz['diff']!=0]\n return beweegt", "def clean(self):\n # Perform the standard ACE cleaning\n max_status = mm_ace.clean(self)\n\n # Replace bad values with NaN and remove times with no valid data\n self.data = self.data[self.data['status'] <= max_status]\n\n return", "def clean(self):\n\n pass", "def removeEquate(self, data: ghidra.program.model.listing.Data) -> None:\n ...", "def cleanup(adata, del_prediction=False, del_2nd_moments=False):\n\n if \"pca_fit\" in adata.uns_keys():\n adata.uns[\"pca_fit\"] = None\n if \"velocyto_SVR\" in adata.uns_keys():\n adata.uns[\"velocyto_SVR\"][\"SVR\"] = None\n if \"umap_fit\" in adata.uns_keys():\n adata.uns[\"umap_fit\"][\"fit\"] = None\n if \"velocity_pca_fit\" in adata.uns_keys():\n adata.uns[\"velocity_pca_fit\"] = None\n if \"kmc\" in adata.uns_keys():\n adata.uns[\"kmc\"] = None\n if \"kinetics_heatmap\" in adata.uns_keys():\n adata.uns.pop(\"kinetics_heatmap\")\n if \"hdbscan\" in adata.uns_keys():\n adata.uns.pop(\"hdbscan\")\n\n VF_keys = [i if i.startswith(\"VecFld\") else None for i in adata.uns_keys()]\n for i in VF_keys:\n if i is not None and \"VecFld2D\" in adata.uns[i].keys():\n del adata.uns[i][\"VecFld2D\"]\n\n fate_keys = [i if i.startswith(\"fate\") else None for i in adata.uns_keys()]\n for i in fate_keys:\n if i is not None:\n if adata.uns[i][\"init_cells\"] is not None:\n adata.uns[i][\"init_cells\"] = list(adata.uns[i][\"init_cells\"])\n if \"prediction\" in adata.uns[i].keys():\n if del_prediction:\n del adata.uns[i][\"prediction\"]\n if \"VecFld_true\" in adata.uns[i].keys():\n if adata.uns[i][\"VecFld_true\"] is not None:\n del adata.uns[i][\"VecFld_true\"]\n\n if del_2nd_moments:\n from .tools.utils import remove_2nd_moments\n\n remove_2nd_moments(adata)\n\n return adata", "def clean_data(self):\n return self.instance.data", "def _clean_results(self):\n\t\tif self.file_type == \"Automobile\":\n\t\t\tcols = [\"Year\", \"Mileage\", \"Price\"]\n\t\t\tself.data.Mileage.replace([',', 'mi.', 'nan', ' '], '', regex=True, inplace=True) # Fix mileage column\n\t\t\tself.data.Price.replace([',', '\\$'], '', regex=True, inplace=True) # Always fix price column (, and $ removed)\n\t\t\tself.data[cols] = self.data[cols].apply(pd.to_numeric, errors='coerce') # Coerces errors into NaN values\n\t\t\tself.data.drop(self.data[self.data.Year < 2000].index, inplace=True) # Remove cars made before 2000\n\t\t\tself.data.drop(self.data[self.data.Price > 30000].index, inplace=True) # Remove cars over $30,000\n\t\t\tself.data.drop(self.data[(self.data.Mileage < 1000) | (self.data.Mileage > 300000)].index, inplace=True) # Remove cars with over 300,000 miles\n\t\t\tself.data['Age'] = 2018 - self.data['Year'] # Change years to Age\n\t\telif self.file_type == \"Apartment\":\n\t\t\tself.data.Area.replace(['ft2'], '', regex=True, inplace=True) # Remove ft2 from square footage column\n\t\t\tself.data.Price.replace([',', '\\$'], '', regex=True, inplace=True) # Always fix price column (, and $ removed)\n\t\telse:\n\t\t\tself.data['Street'], self.data['City'], self.data['State'] = self.data['Address'].str.split(',', 2).str\n\t\t\tdel self.data.Address\n\t\t\tself.data.drop(self.data[self.data.Price > 1000000].index, inplace=True) # Remove houses worth more than $1 million\n\n\t\tself.data.replace('^\\s*$', np.nan, regex=True, inplace=True) # Replace all empty values with np.NaN\n\t\tself.data = self.data.dropna(axis=1, how='all') # Remove Null Columns\n\t\tself.data = self.data.apply(pd.to_numeric, errors='ignore') # Coerces errors into NaN values" ]
[ "0.60749555", "0.5962677", "0.59040093", "0.58570546", "0.58570546", "0.58570546", "0.58430606", "0.57087916", "0.5620488", "0.5617179", "0.56085736", "0.55911463", "0.5569164", "0.5562851", "0.55291617", "0.5525032", "0.54715806", "0.54459673", "0.5407834", "0.53850883", "0.53633916", "0.53616035", "0.53598017", "0.53530717", "0.535129", "0.53127855", "0.5289293", "0.5260386", "0.5245111", "0.52415746" ]
0.6773486
0
Uncover blanks at this location
def uncover_blanks(self, row, col): checked = {} to_be_checked = [] to_be_checked.append((row, col)) while len(to_be_checked) > 0: sq_row, sq_col = to_be_checked.pop() if checked.has_key((sq_row, sq_col)): continue checked[(sq_row, sq_col)] = True if not self.valid_square(sq_row, sq_col): continue if self.array[sq_row][sq_col].visible is True: continue square = self.array[sq_row][sq_col] square.visible = True self.squares_left -= 1 if square.type == SquareType.BLANK: start_row = sq_row-1 start_col = sq_col-1 end_row = sq_row+1 end_col = sq_col+1 for i in range(start_row, end_row+1): for j in range(start_col, end_col+1): if not checked.has_key((i, j)): to_be_checked.append((i, j))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def blank(self):\n pass", "def check_and_clear_rows(self):\n # if board is full, then there will be a '#' in the first row\n if '#' in self.board[0]:\n return 'Game Over! Top has been reached.'\n for row in xrange(self.height):\n # if any given row is full, then that row won't have any blank spaces\n if not ' ' in self.board[row]:\n del self.board[row]\n self.board.insert(0, [' '] * self.width)", "def CleanUp(self):\n blankColumnPattern = re.compile('^-*$')\n blankColumns = []\n for columnIndex in range(self.alignment.get_alignment_length() - 1):\n columnValues = self.alignment[:,columnIndex]\n match = blankColumnPattern.search(columnValues)\n if (match):\n blankColumns.append(str(columnIndex))\n for column in blankColumns[::-1]:\n self.DeleteRange(',' + str(column), True)\n self.Show(self.displayedColumn)\n self.BackupAlignment()", "def autoreveal_empty_spaces(self, position):\n revealed = []\n zero_spaces = []\n check_stack = [position]\n checked = []\n\n while len(check_stack) > 0:\n pos = x, y = check_stack.pop()\n if self.get_num_mines_around_position(x, y) == 0:\n zero_spaces.append(pos)\n \n # Add spaces around\n for ay in range(y-1, y+2):\n for ax in range(x-1, x+2):\n if ay >= 0 and ax >= 0 and ay < len(self.mine_map) and ax < len(self.mine_map[ay]): # Don't check spaces that are outside of the array\n apos = ax, ay\n if apos not in checked:\n check_stack.append(apos)\n revealed.append(apos)\n checked.append(pos)\n \n self.revealed.extend(revealed)", "def clean(self):\n for i in range(len(self.asteroid_type) - 1, -1, -1):\n x, y = self.get_coords(self.asteroid_type[i])\n if x < -self.gap:\n self.del_asteroid(i)", "def ignorableWhitespace(self, data):\n pass", "def find_empty_space(self, state):\r\n for i in range(3):\r\n for j in range(3):\r\n if state[i][j] == 0:\r\n return (i, j)", "def check_for_empty(self):\n return ' ' in self.game_board", "def _area_is_empty(self, screen: Screen, write_position: WritePosition) -> bool:\n wp = write_position\n\n for y in range(wp.ypos, wp.ypos + wp.height):\n if y in screen.data_buffer:\n row = screen.data_buffer[y]\n\n for x in range(wp.xpos, wp.xpos + wp.width):\n c = row[x]\n if c.char != \" \":\n return False\n\n return True", "def blank(data):\n return data['crop']*0;", "def _find_empty_cell(self):\n\n for r, row in enumerate(self._board):\n for c, cell in enumerate(row):\n if cell is None:\n return r, c", "def ignore_whitespaces(self):\n\n whitespaces = [' ', '\\t', '\\n', '\\r']\n while self.index < self.length and self.xtext[self.index] in whitespaces:\n self.index += 1", "def is_blank(self):\n return not any(self._1 in _row for _row in self._pixels)", "def remove_checker(self, col):\n for r in range(self.height):\n if self.slots[r][col] != ' ':\n self.slots[r][col] = ' '\n break", "def reveal_blank_cells(self, cellObject):\n blankcells = [cellObject]\n investigated = []\n while len(blankcells) != 0: # while more blank cells to expose\n cell = blankcells[0]\n blankcells.remove(cell)\n investigated.append(cell)\n coord = cell.get_coord()\n for h in range(max(0, coord[0] - 1), min(coord[0] + 2, self.height)): # loop through adjacent cells\n for w in range(max(0, coord[1] - 1), min(coord[1] + 2, self.width)):\n index = self.cellCoords.index((h, w))\n targetcell = self.nonBombcells[index]\n targetcell.auto_expose()\n if targetcell.is_blank() and targetcell not in investigated: # found new blank cell\n blankcells.append(targetcell)", "def clean(self):\n self.board_values = np.zeros((self.size, self.size))\n self.tiles_taken[:, :] = False", "def empty(self):\n return [cell for cell in self.compact if not cell.peg]", "def empty_spots(self):\n\t\tret = []\n\t\tfor i in range(0, self.size):\n\t\t\tfor j in range(0, self.size):\n\t\t\t\tif(self.grid[i][j] == self.terminal):\n\t\t\t\t\tret.append((i,j))\n\t\treturn ret", "def unoccupied(self):\n self.is_occupied = 0\n for hex in self.fon:\n hex.remove_neighbor()\n hex.set_quality()", "def find_empty(self):\n num_rows = len(self.board)\n num_cols = len(self.board[0])\n\n for i in range(num_rows):\n for j in range(num_cols):\n if self.board[i][j] == 0:\n return (i, j)", "def _empty_cell(self, i_row, i_col):\n return self._board[i_row][i_col] == \" \"", "def empty(self):", "def calc_empty(self):\n empty = 0\n for x in range(0, self.tot_col):\n for y in range(1, self.tot_rows + 1):\n if self.file_list[y][x] == '':\n empty += 1\n #print(csv_list[y][x] + ' %s %s' % (x, y))\n return empty", "def clear_unknowns(td):\n td.unknown = 0\n td.unknown1 = 0\n td.unknown2 = 0\n # td.unknown3 = 0 # light orientation.", "def uncover(self, loc: tuple[int, int]) -> None:\n self.field.uncover(loc)\n\n if self.field.is_triggered():\n self.game_over = True\n [queue.clear() for queue in\n (self.clear_queue, self.auto_queue, self.hyper_queue)]\n self.status_label.config(text=GAME_OVER_MSG)\n return\n\n self.safes_left -= 1\n\n if self.field.is_all_clear():\n self.win = True\n self.status_label.config(text=ALL_CLEAR_MSG)\n\n if loc in self.clear_queue:\n self.clear_queue.remove(loc)\n\n if self.field[loc].surrounding_mines == 0:\n block = Block(self.field, loc)\n self.clear_queue.add_batch(block.unknown_neighbors,\n emphasis=self.emphasis[\"add_batch\"],\n color=\"new_clear\")\n elif self.auto_solving.get():\n block = Block(self.field, loc)\n useful_neighbors = block.naked_neighbors\n useful_neighbors.add(loc)\n [self.hyper_queue.remove(cell) for cell in useful_neighbors]\n self.auto_queue.add_batch(useful_neighbors,\n emphasis=self.emphasis[\"add_batch\"],\n color=\"new_auto\")\n self.auto_queue.clean_up(emphasis=self.emphasis[\"redundant\"])\n self.hyper_queue.clean_up(emphasis=self.emphasis[\"redundant\"])\n\n if not self.clear_queue.is_busy:\n self.clear_queue.is_busy = True\n self.process(self.clear_queue)", "def skipWhiteSpace(self):\n pass", "def get_empty_cells(board):\n empty_cells = [idx for idx, e in enumerate(board) if e == ' ']\n return empty_cells", "def PartiallyEmpty(self):\n return None==self.piecesToRecover", "def get_empty_positions(self):\n\n empty_positions = []\n\n for i in range(self._dimension):\n for j in range(self._dimension):\n if self._board[i][j] == ' ':\n empty_positions.append((i, j))\n\n return empty_positions", "def getBlank(self):\n return self.tiles[-1]" ]
[ "0.64885885", "0.6289629", "0.6288845", "0.6187194", "0.61361164", "0.612752", "0.6098215", "0.6069186", "0.5969753", "0.5950767", "0.5934857", "0.59044385", "0.5894175", "0.5876831", "0.5842342", "0.58088094", "0.57956934", "0.5768279", "0.5730342", "0.5727449", "0.57215166", "0.5716021", "0.5714944", "0.5677564", "0.56754816", "0.56673265", "0.5650426", "0.56432295", "0.56335384", "0.56327075" ]
0.65439826
0
Returns load balancer name for the current environment.
def get_balancer_name(self): return '{}-{}'.format( self.config['namespace'], self.get_current_env(), )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_balancer_name(self) -> str:\n return pulumi.get(self, \"load_balancer_name\")", "def load_balancer_id(self) -> str:\n return pulumi.get(self, \"load_balancer_id\")", "def load_balancer_id(self) -> str:\n return pulumi.get(self, \"load_balancer_id\")", "def load_balancer_id(self) -> str:\n return pulumi.get(self, \"load_balancer_id\")", "def load_balancer_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"load_balancer_id\")", "def environment_label(self) -> str:\n return self._environment_label", "def load_balancer_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"load_balancer_id\")", "def load_balancer_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"load_balancer_id\")", "def load_balancer_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"load_balancer_id\")", "def name(self):\n return self._env_name", "def load_balancer_id(self):\n return self._load_balancer_id", "def get_name(self):\n \n return 'Loop-Back'", "def stackname(self):\n return self.BASE_NAME.format(**self.conf)", "def get_balancer_arn(self):\n return self.get_balancer_info()['LoadBalancerArn']", "def get_name():\n return config.APP_NAME", "def load_balancer_type(self) -> Optional[pulumi.Input['CloudRunConfigLoadBalancerType']]:\n return pulumi.get(self, \"load_balancer_type\")", "def balancer_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"balancer_id\")", "def get_elb_name ( base_name, app_name ) :\n max_len = 32\n name = base_name + '-' + app_name.upper( ) + '-LB'\n if len( name ) > max_len :\n name = base_name + '-' + app_name.upper( )\n if len( name ) > max_len :\n raise NameError( 'ELB Name ' + name + ' exceeds limit of ' + str( max_len ) )\n\n return name", "def get_balancer_dns(self):\n return self.get_balancer_info()['DNSName']", "def name(self):\n return get_env_name(self.tool_name,\n self._python,\n self._requirements,\n self._tagged_env_vars)", "def environment_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"environment_name\")", "def load_balancer_profile(self) -> Optional[pulumi.Input['LoadBalancerProfileArgs']]:\n return pulumi.get(self, \"load_balancer_profile\")", "def load_balancer_profile(self) -> Optional[pulumi.Input['ManagedClusterLoadBalancerProfileArgs']]:\n return pulumi.get(self, \"load_balancer_profile\")", "def get_balancer_info(self):\n try:\n response = self.client.describe_load_balancers(\n Names=[self.get_balancer_name()],\n )\n assert response['ResponseMetadata']['HTTPStatusCode'] == 200\n\n vpc_id = self.get_vpc_id()\n balancers = [balancer for balancer in response['LoadBalancers'] if balancer['VpcId'] == vpc_id]\n\n return balancers[0]\n except ClientError:\n self.logger.debug('Unable to find load balancer {}.'.format(self.get_balancer_name()))\n return None", "def getJobName():\n return os.environ['LCATR_JOB']", "def tracing_name(name: Optional[str] = None) -> str:\n if name is None:\n name = settings.SERVICE_NAME\n return f\"{name}.{settings.ENVIRONMENT.lower()}\"", "def describe_balancer(ctx):\n data = self.get_balancer_info()\n if data is not None:\n ctx.info('Load balancer {} details:'.format(self.get_balancer_name()))\n ctx.pp.pprint(data)\n else:\n ctx.info('Load balancer {} does not exist.'.format(self.get_balancer_name()))", "def service_name(self) -> str:\n return pulumi.get(self, \"service_name\")", "def service_name(self) -> str:\n return pulumi.get(self, \"service_name\")", "def get_load_name(self, labware_id: str) -> str:\n definition = self.get_definition(labware_id)\n return definition.parameters.loadName" ]
[ "0.8761447", "0.71948916", "0.71948916", "0.71948916", "0.7018782", "0.6654602", "0.66361946", "0.6568133", "0.6568133", "0.65211004", "0.6516881", "0.64701533", "0.64618856", "0.6360919", "0.62249076", "0.6202724", "0.6179302", "0.61681116", "0.61583096", "0.6115745", "0.60407245", "0.602952", "0.60132617", "0.5965234", "0.59335124", "0.5863235", "0.5843292", "0.58188903", "0.58188903", "0.5778203" ]
0.8589218
1
Returns load balancer security group short name.
def get_security_group_short_name(self): return self.config['security_group']
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_target_group_fully_qualified_name(self, short_name):\n return '{}-{}'.format(\n self.get_balancer_name(),\n short_name,\n )", "def name(self):\n return f\"{self._group.friendly_name} {GROUP_SUFFIX}\"", "def server_group_name(self) -> str:\n return pulumi.get(self, \"server_group_name\")", "def group_name(self) -> str:\n return pulumi.get(self, \"group_name\")", "def get_lb_secgrp_name ( base_name, app_name ) :\n return get_secgrp_name( base_name, get_lb_secgrp_type( app_name ) )", "def get_target_group_name(self, short_name):\n app_env = self.get_current_env()\n full_name = self.get_target_group_fully_qualified_name(short_name)\n namespace = self.config['namespace']\n\n if len(full_name) <= 32:\n return full_name\n elif len(namespace) + 10 <= 32:\n env_target_hash = hashlib.md5((short_name + app_env).encode()).hexdigest()[:9]\n return '{}-{}'.format(namespace, env_target_hash)\n else:\n return hashlib.md5(full_name.encode()).hexdigest()", "def subnet_group_name(self) -> str:\n return pulumi.get(self, \"subnet_group_name\")", "def resource_group_name(self) -> str:\n return pulumi.get(self, \"resource_group_name\")", "def _get_sg_name(sg_name, session):\n return session.resource(\"ec2\").SecurityGroup(sg_name).group_name", "def get_group_name(self):\n return self.groupname", "def resource_group_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_group_name\")", "def resource_group_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_group_name\")", "def resource_group_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_group_name\")", "def resource_group_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_group_name\")", "def resource_group_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_group_name\")", "def resource_group_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_group_name\")", "def resource_group_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_group_name\")", "def resource_group_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_group_name\")", "def resource_group_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_group_name\")", "def resource_group_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_group_name\")", "def resource_group_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_group_name\")", "def resource_group_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_group_name\")", "def get_sg_name(ec2,s_id):\n name = \"\"\n try:\n security_group = ec2.SecurityGroup(s_id)\n # name = security_group.description\n name = security_group.group_name\n except:\n name = s_id\n\n return name", "def subnet_group_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"subnet_group_name\")", "def ad_group_name(self):\n\n return self._ad_group_name", "def ad_group_name(self):\n\n return self._ad_group_name", "def get_resource_group_name(self):\n return self.instance_metadata.resource_group_name", "def group_name(self):\n\n return self._group_name", "def _group_name(cls, group=None):\n suffix = f\"{cls.__module__}.{cls.__qualname__}\"\n if group is not None:\n suffix += \"-\" + group\n\n # Wrap the suffix into SHA256 to guarantee that the length of\n # the group name is limited. Otherwise Channels will complain\n # about that the group name is wrong (actually is too long).\n suffix_sha256 = hashlib.sha256()\n suffix_sha256.update(suffix.encode(\"utf-8\"))\n\n return f\"{GraphqlWsConsumer.group_name_prefix}-{suffix_sha256.hexdigest()}\"", "def get_resource_group_name(self) -> str:\n # read the original value passed by the command\n resource_group_name = self.raw_param.get(\"resource_group_name\")\n\n # this parameter does not need dynamic completion\n # this parameter does not need validation\n return resource_group_name" ]
[ "0.73476905", "0.69869596", "0.6923902", "0.6921061", "0.69105417", "0.68615884", "0.68233114", "0.6768074", "0.67503476", "0.67094326", "0.6699743", "0.6699743", "0.6699743", "0.6699743", "0.6699743", "0.6699743", "0.6699743", "0.6699743", "0.6699743", "0.6699743", "0.6699743", "0.6699743", "0.6653211", "0.66195184", "0.66156757", "0.66156757", "0.66156715", "0.65750945", "0.6573473", "0.65446216" ]
0.8458865
0
Returns the unique name of the target group for the current environment. The difference between this function and `get_target_group_fully_qualified_name` is that fits the name into 32 characters.
def get_target_group_name(self, short_name): app_env = self.get_current_env() full_name = self.get_target_group_fully_qualified_name(short_name) namespace = self.config['namespace'] if len(full_name) <= 32: return full_name elif len(namespace) + 10 <= 32: env_target_hash = hashlib.md5((short_name + app_env).encode()).hexdigest()[:9] return '{}-{}'.format(namespace, env_target_hash) else: return hashlib.md5(full_name.encode()).hexdigest()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def target_group_identifier(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"target_group_identifier\")", "def get_target_group_fully_qualified_name(self, short_name):\n return '{}-{}'.format(\n self.get_balancer_name(),\n short_name,\n )", "def target_group_identifier(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"target_group_identifier\")", "def target_group(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"target_group\")", "def target_group_identifier(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"target_group_identifier\")", "def group_name(self) -> str:\n return pulumi.get(self, \"group_name\")", "def group_name(self):\n\n return self._group_name", "def _group_name(cls, group=None):\n suffix = f\"{cls.__module__}.{cls.__qualname__}\"\n if group is not None:\n suffix += \"-\" + group\n\n # Wrap the suffix into SHA256 to guarantee that the length of\n # the group name is limited. Otherwise Channels will complain\n # about that the group name is wrong (actually is too long).\n suffix_sha256 = hashlib.sha256()\n suffix_sha256.update(suffix.encode(\"utf-8\"))\n\n return f\"{GraphqlWsConsumer.group_name_prefix}-{suffix_sha256.hexdigest()}\"", "def target_group(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"target_group\")", "def get_group_name(self):\n return self.groupname", "def log_group_name(self) -> str:\n return jsii.get(self, \"logGroupName\")", "def log_group_name(self) -> str:\n return jsii.get(self, \"logGroupName\")", "def name(self):\n return f\"{self._group.friendly_name} {GROUP_SUFFIX}\"", "def group_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"group_name\")", "def group_name(self):\n return \"device-%s\" % self.id", "def log_group_name(self):\n return self._get_param(CW_LOGS_CFN_PARAM_NAME)", "def resource_group_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_group_name\")", "def resource_group_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_group_name\")", "def resource_group_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_group_name\")", "def resource_group_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_group_name\")", "def resource_group_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_group_name\")", "def resource_group_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_group_name\")", "def resource_group_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_group_name\")", "def resource_group_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_group_name\")", "def resource_group_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_group_name\")", "def resource_group_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_group_name\")", "def resource_group_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_group_name\")", "def resource_group_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_group_name\")", "def managed_network_group_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"managed_network_group_name\")", "def server_group_name(self) -> str:\n return pulumi.get(self, \"server_group_name\")" ]
[ "0.7647652", "0.74390054", "0.7291971", "0.7287171", "0.7284457", "0.70294935", "0.69971144", "0.6987528", "0.693263", "0.6911461", "0.68637323", "0.68637323", "0.68078226", "0.6770091", "0.66889703", "0.6640256", "0.66363704", "0.66363704", "0.66363704", "0.66363704", "0.66363704", "0.66363704", "0.66363704", "0.66363704", "0.66363704", "0.66363704", "0.66363704", "0.66363704", "0.6581339", "0.6552565" ]
0.8356981
0
Returns target groups configuration.
def get_target_groups_config(self): return self.config['target_groups']
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_target_groups_info(self):\n target_groups_config = self.get_target_groups_config()\n groups_info = {}\n\n for short_name in target_groups_config.keys():\n target_group_name = self.get_target_group_name(short_name)\n data = self.get_target_group_info(short_name)\n if data is not None:\n groups_info[target_group_name] = data\n\n return groups_info", "def describe_target_groups(ctx):\n data = self.get_target_groups_info()\n ctx.info('Target groups details for load balancer {}:'.format(self.get_balancer_name()))\n ctx.pp.pprint(data)", "def _get_target_group(self):\n return self.__target_group", "def _get_target_group(self):\n return self.__target_group", "def _get_target_group(self):\n return self.__target_group", "def _get_target_group(self):\n return self.__target_group", "def _get_target_group(self):\n return self.__target_group", "def _get_target_group(self):\n return self.__target_group", "def target_group(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"target_group\")", "def get_targetgroups(self):\r\n result = {}\r\n for row in self._db().select(self._db.targetgroup.ALL):\r\n result[row.id] = {}\r\n result[row.id][\"data\"] = dict(row)\r\n result[row.id][\"members\"] = []\r\n try:\r\n members = result[row.id][\"data\"]['targets']\r\n for member in json.loads(members):\r\n member_data = self._db(self._db.target.id==int(member)\r\n ).select().first()\r\n result[row.id][\"members\"].append(dict(member_data))\r\n except:\r\n result[row.id][\"members\"] = []\r\n return result", "def authenticator_groups_config(self) -> 'outputs.AuthenticatorGroupsConfigResponse':\n return pulumi.get(self, \"authenticator_groups_config\")", "def targets(self): # type: () -> t.List[HostConfig]\n return self.host_settings.targets", "def get_target_pool_configs(self):\n return self.compute.targetPools().get(\n project=self.project,\n region=self.region,\n targetPool=self.target_pool_name).execute()", "def _config_table(self):\n return self.targets", "def create_target_groups(self):\n target_groups_config = self.get_target_groups_config()\n vpc_id = self.get_vpc_id()\n response_data = {}\n\n for short_name in target_groups_config.keys():\n target_group_name = self.get_target_group_name(short_name)\n\n if self.target_group_exists(short_name):\n self.logger.info(f'Target group {target_group_name} exists, skipping creation.')\n continue\n\n response = self.client.create_target_group(\n Name=target_group_name,\n VpcId=vpc_id,\n **target_groups_config[short_name],\n )\n assert response['ResponseMetadata']['HTTPStatusCode'] == 200\n\n self.logger.info(f'Target group {target_group_name} created.')\n response_data[target_group_name] = response['TargetGroups']\n\n return response_data", "def get_groups(args):\n\n args.suppress_verify_output = True\n if verify(args) != 0:\n # restore stdout\n sys.stdout = sys.__stdout__\n print(\"Config file not valid, please use the verify function to debug\")\n return []\n\n with open(args.file, \"r\") as f:\n config_json = json.load(f)\n\n groups = []\n for group in config_json[\"groups\"]:\n groups.append(group[\"name\"])\n return groups", "def target_group(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"target_group\")", "def groups(self):\n\n group_config = {}\n\n # legacy way of threating any dict as a potential\n # group config (pre #44 implementation)\n # supported until vaping 2.0\n\n for k,v in list(self.config.items()):\n if isinstance(v, collections.Mapping):\n group_config[k] = v\n\n # explicit groups object (#44 implementation)\n\n for _group_config in self.config.get(\"groups\",[]):\n group_config[_group_config[\"name\"]] = _group_config\n\n return group_config", "def forward_group_configs(self) -> Sequence['outputs.GetRulesRuleRuleActionForwardGroupConfigResult']:\n return pulumi.get(self, \"forward_group_configs\")", "def _get_check_groups(self, group=None):\n groups = [g for g in self.config_dict]\n if group:\n if group in groups:\n check_groups = [group]\n else:\n check_groups = []\n else:\n check_groups = groups\n return check_groups", "def target_pools(self) -> pulumi.Output[Optional[List[str]]]:\n return pulumi.get(self, \"target_pools\")", "def groups(self):\n #return self.get('{}/groups'.format(ApiVersion.A1.value))\n return self.get('{}/groups'.format(ApiVersion.CM1.value))", "def get_registered_analysis_targets() -> Dict[str, Any]:\n result: Dict[str, Any] = {}\n for groupindex in config.targets:\n result[groupindex] = {}\n result[groupindex][\"path\"] = os.path.dirname(config.targets[groupindex])\n result[groupindex][\"projects\"] = get_analysis_target_index(groupindex)\n return result", "def forward_group_configs(self) -> Sequence['outputs.GetListenersListenerDefaultActionForwardGroupConfigResult']:\n return pulumi.get(self, \"forward_group_configs\")", "def forward_group_config(self) -> 'outputs.ListenerDefaultActionForwardGroupConfig':\n return pulumi.get(self, \"forward_group_config\")", "def create_target_groups(ctx):\n data = self.create_target_groups()\n ctx.info('Created target groups for the load balancer {}:'.format(self.get_balancer_name()))\n ctx.pp.pprint(data)", "def delete_target_groups(self):\n target_groups_config = self.get_target_groups_config()\n\n for short_name in target_groups_config.keys():\n if not self.target_group_exists(short_name):\n self.logger.info('Target group {} does not exists, nothing to delete.'.format(\n self.get_target_group_name(short_name)\n ))\n continue\n\n response = self.client.delete_target_group(\n TargetGroupArn=self.get_target_group_arn(short_name)\n )\n assert response['ResponseMetadata']['HTTPStatusCode'] == 200\n self.logger.info('Target group {} deleted.'.format(self.get_target_group_name(short_name)))", "def grouping_configuration(self) -> Optional['outputs.GroupingConfigurationResponse']:\n return pulumi.get(self, \"grouping_configuration\")", "def health_check_configs(self) -> Sequence['outputs.GetServerGroupsGroupHealthCheckConfigResult']:\n return pulumi.get(self, \"health_check_configs\")", "def get_target_group_info(self, short_name):\n try:\n response = self.client.describe_target_groups(\n Names=[self.get_target_group_name(short_name)],\n )\n assert response['ResponseMetadata']['HTTPStatusCode'] == 200\n\n return response['TargetGroups'][0]\n except ClientError:\n self.logger.debug('Unable to find load balancer {balancer} target group {group}.'.format(\n balancer=self.get_balancer_name(),\n group=self.get_target_group_name(short_name)\n ))\n return None" ]
[ "0.74112564", "0.672198", "0.65918154", "0.65918154", "0.65918154", "0.65918154", "0.65918154", "0.65918154", "0.6472177", "0.6409221", "0.6371832", "0.63339555", "0.63301975", "0.6296605", "0.6258392", "0.61293525", "0.60561275", "0.60237026", "0.60043335", "0.6001119", "0.59922755", "0.5928959", "0.5925248", "0.5914482", "0.5908503", "0.58890706", "0.584116", "0.582836", "0.57964444", "0.5793261" ]
0.9180246
0
Returns load balancer details for the current environment.
def get_balancer_info(self): try: response = self.client.describe_load_balancers( Names=[self.get_balancer_name()], ) assert response['ResponseMetadata']['HTTPStatusCode'] == 200 vpc_id = self.get_vpc_id() balancers = [balancer for balancer in response['LoadBalancers'] if balancer['VpcId'] == vpc_id] return balancers[0] except ClientError: self.logger.debug('Unable to find load balancer {}.'.format(self.get_balancer_name())) return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def describe_balancer(ctx):\n data = self.get_balancer_info()\n if data is not None:\n ctx.info('Load balancer {} details:'.format(self.get_balancer_name()))\n ctx.pp.pprint(data)\n else:\n ctx.info('Load balancer {} does not exist.'.format(self.get_balancer_name()))", "def load_balancer_name(self) -> str:\n return pulumi.get(self, \"load_balancer_name\")", "def get_local_lbs(self):\r\n mask = ('mask[loadBalancerHardware[datacenter],ipAddress]')\r\n return self.account.getAdcLoadBalancers(mask=mask)", "def load_balancer_profile(self) -> Optional[pulumi.Input['ManagedClusterLoadBalancerProfileArgs']]:\n return pulumi.get(self, \"load_balancer_profile\")", "def load_balancer_id(self) -> str:\n return pulumi.get(self, \"load_balancer_id\")", "def load_balancer_id(self) -> str:\n return pulumi.get(self, \"load_balancer_id\")", "def load_balancer_id(self) -> str:\n return pulumi.get(self, \"load_balancer_id\")", "def load_balancer_profile(self) -> Optional[pulumi.Input['LoadBalancerProfileArgs']]:\n return pulumi.get(self, \"load_balancer_profile\")", "def load_balancer_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"load_balancer_id\")", "def get_balancer_name(self):\n return '{}-{}'.format(\n self.config['namespace'],\n self.get_current_env(),\n )", "def get_elbs(elbclient):\r\n try:\r\n resp = elbclient.describe_load_balancers()\r\n return list(map(\r\n lambda x:x['LoadBalancerName'],\r\n resp['LoadBalancerDescriptions']\r\n ))\r\n except Exception as ex:\r\n print(ex.message)\r\n return None", "def load_balancer_id(self):\n return self._load_balancer_id", "def get_balancer_arn(self):\n return self.get_balancer_info()['LoadBalancerArn']", "def get(self, request):\n conn = get_sdk_connection(request)\n lb_list = _sdk_object_to_list(conn.load_balancer.load_balancers(\n project_id=request.user.project_id))\n if request.GET.get('full') and neutron.floating_ip_supported(request):\n add_floating_ip_info(request, lb_list)\n return {'items': lb_list}", "def load_balancer_billing_configs(self) -> Sequence['outputs.GetLoadBalancersBalancerLoadBalancerBillingConfigResult']:\n return pulumi.get(self, \"load_balancer_billing_configs\")", "def http_load_balancing(self) -> Optional[pulumi.Input['HttpLoadBalancingArgs']]:\n return pulumi.get(self, \"http_load_balancing\")", "def get_load_balancer_ip(cluster_config):\n cluster = load_cluster_config_json(cluster_config)\n\n lb_ip = cluster[\"load_balancers\"][0][\"ip\"]\n return lb_ip", "def load_balancing(self) -> pulumi.Output['outputs.FrontdoorOriginGroupLoadBalancing']:\n return pulumi.get(self, \"load_balancing\")", "def load_balancing(self) -> pulumi.Input['FrontdoorOriginGroupLoadBalancingArgs']:\n return pulumi.get(self, \"load_balancing\")", "def load_balancer_type(self) -> Optional[pulumi.Input['CloudRunConfigLoadBalancerType']]:\n return pulumi.get(self, \"load_balancer_type\")", "def list_loadbalancers(call=None):\n if call == \"action\":\n raise SaltCloudSystemExit(\n \"The avail_images function must be called with \"\n \"-f or --function, or with the --list-loadbalancers option\"\n )\n\n ret = {}\n conn = get_conn()\n datacenter = get_datacenter(conn)\n\n for item in conn.list_loadbalancers(datacenter[\"id\"])[\"items\"]:\n lb = {\"id\": item[\"id\"]}\n lb.update(item[\"properties\"])\n ret[lb[\"name\"]] = lb\n\n return ret", "def load_balancer_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"load_balancer_id\")", "def create_balancer(self):\n app_env = self.get_current_env()\n balancer_name = self.get_balancer_name()\n subnet_ids = self.get_subnet_ids()\n\n response = self.client.create_load_balancer(\n Name=balancer_name,\n Subnets=subnet_ids,\n SecurityGroups=[self.get_security_group_id(self.get_security_group_short_name())],\n Scheme='internet-facing',\n Tags=[\n {\n 'Key': 'chops-aws-project',\n 'Value': self.get_aws_project_name(),\n },\n {\n 'Key': 'environment',\n 'Value': app_env,\n },\n ],\n Type='application',\n IpAddressType='ipv4',\n )\n assert response['ResponseMetadata']['HTTPStatusCode'] == 200\n\n return response['LoadBalancers'][0]", "def load_balancing(self) -> Optional[pulumi.Input['FrontdoorOriginGroupLoadBalancingArgs']]:\n return pulumi.get(self, \"load_balancing\")", "def load_balancer_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"load_balancer_id\")", "def load_balancer_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"load_balancer_id\")", "def get_balancer_dns(self):\n return self.get_balancer_info()['DNSName']", "def get(self, request, loadbalancer_id):\n conn = get_sdk_connection(request)\n loadbalancer = conn.load_balancer.find_load_balancer(loadbalancer_id)\n loadbalancer_dict = _get_sdk_object_dict(loadbalancer)\n if request.GET.get('full') and neutron.floating_ip_supported(request):\n add_floating_ip_info(request, [loadbalancer_dict])\n return loadbalancer_dict", "def bgp_service_load_balancer_configuration(self) -> Optional[pulumi.Input['BgpServiceLoadBalancerConfigurationArgs']]:\n return pulumi.get(self, \"bgp_service_load_balancer_configuration\")", "def find_elb ( elb_conn, elb_name ) :\n try :\n elb_r = elb_conn.get_all_load_balancers( load_balancer_names = [ elb_name ] )\n if len( elb_r ) > 0 :\n return elb_r[ 0 ]\n except :\n return None" ]
[ "0.72443384", "0.7015506", "0.6776904", "0.66980827", "0.6632059", "0.6632059", "0.6632059", "0.6629249", "0.6578861", "0.6483858", "0.63487846", "0.6314404", "0.6303139", "0.6239039", "0.6202509", "0.6192813", "0.61762923", "0.6086847", "0.60727555", "0.6039273", "0.6030724", "0.6002725", "0.59836996", "0.5918922", "0.5914119", "0.5914119", "0.5913092", "0.59038126", "0.5890126", "0.58896977" ]
0.7646479
0
Returns whether load balancer exists in the current environment.
def balancer_exists(self): return self.get_balancer_info() is not None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_load_balancer_enabled(cluster_config):\n cluster = load_cluster_config_json(cluster_config)\n return cluster[\"environment\"][\"sg_lb_enabled\"]", "def lb_lookup(session, lb_name):\n if session is None:\n return None\n\n lb_name = lb_name.replace('.', '-')\n\n client = session.client('elb')\n response = client.describe_load_balancers()\n\n for i in range(len(response['LoadBalancerDescriptions'])):\n if (response['LoadBalancerDescriptions'][i]['LoadBalancerName']) == lb_name:\n return True\n return False", "def validate_load_balancer(self, is_prod: bool = True) -> bool:\n if is_prod:\n env = \"prod\"\n else:\n env = \"dev\"\n\n response = self.autoscaling.describe_load_balancer_target_groups(\n AutoScalingGroupName=f'saints-xctf-server-{env}-asg'\n )\n\n load_balancers = response.get('LoadBalancerTargetGroups')\n\n return all([\n len(load_balancers) == 2,\n load_balancers[0].get('State') == 'InService',\n 'targetgroup/saints-xctf-lb-target-http' in load_balancers[0].get('LoadBalancerTargetGroupARN'),\n load_balancers[1].get('State') == 'InService',\n 'targetgroup/saints-xctf-lb-target' in load_balancers[1].get('LoadBalancerTargetGroupARN'),\n ])", "def is_load_balancer_with_two_clusters_enabled(cluster_config):\n cluster = load_cluster_config_json(cluster_config)\n try:\n return cluster[\"environment\"][\"two_sg_cluster_lb_enabled\"]\n except KeyError:\n return False", "def prod_load_balancer_running(self) -> None:\n self.assertTrue(self.validate_load_balancer(is_prod=self.prod_env))", "def exists(_env):\n return True", "def exists(_env):\n return True", "def local_network_check():\n return (\n network.show_active() in LOCAL_BLOCKCHAIN_ENVINROMENTS\n or network.show_active() in FORKED_LOCAL_ENVIRONMENTS\n )", "def in_runtime(self):\n\n return self.is_valid_platform() and self['ENVIRONMENT']", "def contains_addr(self, addr):\n return self.find_loadable_containing(addr) is not None", "def is_active(self):\n if self.load_status == \"I\":\n return True\n return False", "def has(self) -> bool:\n\n return self.scopefunc() in self.registry", "def has_app_name(self, name):\n return name in self._proxies.keys()", "def load_balanced(self) -> Optional[bool]:\n return self._load_balanced", "def available(self) -> bool:\n return self._router.available", "def exists(self, url):\n return (self.base_path / url).exists()", "def is_env_active():\n\n if sys.prefix == sys.base_prefix:\n print(\"Virtual environment is not active, exiting...\\n\")\n sys.exit(1)\n\n print(\"Virtual environment is active, proceeding...\\n\")", "def exists(self):\n return bool(get_zone_by_name(self.get_name(refresh=False)))", "def is_loaded(self):\n return os.path.exists(IPMIService.IPMI_DEV)", "def is_existing(self):\n return self.backend.is_existing", "def load_balancing(self) -> Optional[pulumi.Input['FrontdoorOriginGroupLoadBalancingArgs']]:\n return pulumi.get(self, \"load_balancing\")", "def has_pool ( self ):\n return self._poolstack", "def is_config_exist(self) -> bool:\n pass", "def exists(self):\n return True", "def exists(self):\n return True", "def is_config_exist(self) -> bool:\n return True", "def _is_ha_config(hdfs_site):\n name_services = hdfs_site.get('dfs.nameservices', None)\n if name_services:\n for ns in name_services.split(\",\"):\n if hdfs_site.get('dfs.ha.namenodes.'+ns):\n return True\n return False", "def is_bound(proxy):\n try:\n current_object(proxy)\n except UnboundProxyError:\n return False\n else:\n return True", "def load_balancer_type(self) -> Optional[pulumi.Input['CloudRunConfigLoadBalancerType']]:\n return pulumi.get(self, \"load_balancer_type\")", "def _pool_exists(self, client_id, pool_name):\n pools = self.__pools.get(client_id, [])\n for pool in pools:\n if pool.name == pool_name:\n return True\n return False" ]
[ "0.695405", "0.67182565", "0.6568076", "0.6403442", "0.6346334", "0.5996281", "0.5996281", "0.59851", "0.59643495", "0.5877768", "0.5831705", "0.58173513", "0.58082914", "0.5740544", "0.5653013", "0.5634984", "0.5630054", "0.5595231", "0.5571937", "0.55607104", "0.55493975", "0.5547547", "0.55383825", "0.55271876", "0.55271876", "0.5515096", "0.5508916", "0.5507812", "0.5504977", "0.5495199" ]
0.82090664
0
Returns load balancer ARN for the current environment.
def get_balancer_arn(self): return self.get_balancer_info()['LoadBalancerArn']
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_balancer_name(self):\n return '{}-{}'.format(\n self.config['namespace'],\n self.get_current_env(),\n )", "def load_balancer_name(self) -> str:\n return pulumi.get(self, \"load_balancer_name\")", "def load_balancer_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"load_balancer_id\")", "def load_balancer_id(self) -> str:\n return pulumi.get(self, \"load_balancer_id\")", "def load_balancer_id(self) -> str:\n return pulumi.get(self, \"load_balancer_id\")", "def load_balancer_id(self) -> str:\n return pulumi.get(self, \"load_balancer_id\")", "def bucket_arn(self) -> str:\n return jsii.get(self, \"bucketArn\")", "def bucket_arn(self) -> str:\n return jsii.get(self, \"bucketArn\")", "def config_rule_arn(self) -> str:\n return pulumi.get(self, \"config_rule_arn\")", "def config_rule_arn(self) -> str:\n return pulumi.get(self, \"config_rule_arn\")", "def arn(self) -> str:\n return pulumi.get(self, \"arn\")", "def arn(self) -> str:\n return pulumi.get(self, \"arn\")", "def arn(self) -> str:\n return pulumi.get(self, \"arn\")", "def arn(self) -> str:\n return pulumi.get(self, \"arn\")", "def arn(self) -> str:\n return pulumi.get(self, \"arn\")", "def get_ssl_certificate_arn(environment):\n name = Constants['SslCertificateName'][environment]\n\n certificates = ACM.list_certificates(CertificateStatuses=[ 'ISSUED' ])['CertificateSummaryList']\n arns = [ c['CertificateArn'] for c in certificates if c['DomainName'] == name ]\n\n if len(arns) == 0:\n raise Exception('Missing certificate %s on AWS. Please create it and then re-run this script.' % name)\n\n return arns[0]", "def resource_arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")", "def arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"arn\")" ]
[ "0.69737446", "0.67671096", "0.63797647", "0.62352544", "0.62352544", "0.62352544", "0.6182437", "0.6182437", "0.61434364", "0.61434364", "0.61218286", "0.61218286", "0.61218286", "0.61218286", "0.61218286", "0.604468", "0.60387963", "0.5940735", "0.5940735", "0.5940735", "0.5940735", "0.5940735", "0.5940735", "0.5940735", "0.5940735", "0.5940735", "0.5940735", "0.5940735", "0.5940735", "0.5940735" ]
0.76236105
0
Returns load balancer DNS name for the current environment.
def get_balancer_dns(self): return self.get_balancer_info()['DNSName']
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_balancer_name(self):\n return '{}-{}'.format(\n self.config['namespace'],\n self.get_current_env(),\n )", "def load_balancer_name(self) -> str:\n return pulumi.get(self, \"load_balancer_name\")", "def domain_dns_name(self):\n domain_dn = self.get_default_basedn()\n return domain_dn.canonical_str().split('/')[0]", "def service_dns_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"service_dns_name\")", "def host_dns_name(self):\n res = self.search(base='', scope=ldb.SCOPE_BASE, attrs=['dNSHostName'])\n return str(res[0]['dNSHostName'][0])", "def domain_name(self) -> str:\n return pulumi.get(self, \"domain_name\")", "def domain_name(self) -> str:\n return pulumi.get(self, \"domain_name\")", "def forest_dns_name(self):\n forest_dn = self.get_root_basedn()\n return forest_dn.canonical_str().split('/')[0]", "def service_dns_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service_dns_name\")", "def get_hostname():\n return re.split(\"\\.\", env.host)[0]", "def domain_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"domain_name\")", "def domain_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"domain_name\")", "def domain_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"domain_name\")", "def get_hostname() -> str:\n if config.config is not None and \"hostname\" in config.config.get(\"base\", dict()):\n return config.config[\"base\"][\"hostname\"]\n\n return socket.gethostname().split(\".\")[0]", "def get_hostname(self):\n return self.name", "def name(self):\n return self._env_name", "def get_domain_name(self):\n return self.domain_name.get_text()", "def get_host_name(self):\n\t\treturn call_sdk_function('PrlSrvInfo_GetHostName', self.handle)", "def ad_fqdn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"ad_fqdn\")", "def get_host_name():\n return socket.gethostname()", "def get_daemon_name(cls):\n\n return os.environ[cls.CLOUDIFY_DAEMON_NAME_KEY]", "def db_proxy_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"db_proxy_name\")", "def bucket_dual_stack_domain_name(self) -> str:\n ...", "def gethostname():\n if socket.gethostname().find('.') >= 0:\n host = socket.gethostname()\n else:\n host = socket.gethostbyaddr(socket.gethostname())[0]\n return host", "def get_server_name(self):\n configured_value = self.charm_config[\"server-name\"]\n if configured_value:\n return configured_value\n else:\n fqdn = socket.getfqdn()\n return fqdn", "async def get_hostname(self):\n ngc = await self.middleware.call('network.configuration.config')\n if 'hostname_virtual' in ngc:\n failover_status = await self.middleware.call('failover.status')\n if failover_status == 'MASTER':\n return ngc['hostname_virtual']\n elif failover_status == 'BACKUP':\n return None\n else:\n return ngc['hostname_local']", "def dns_fqdn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"dns_fqdn\")", "def getDbHostName():\n\n if \"DB_HOST\" in controller.CONF.keys():\n return controller.CONF[\"DB_HOST\"]\n\n return basedefs.DB_HOST", "def get_fqdn():\n return socket.getfqdn()", "def host_name(self) -> str:\n return self._values.get('host_name')" ]
[ "0.76529366", "0.75585383", "0.7015553", "0.6898756", "0.6807746", "0.6806018", "0.6806018", "0.67674685", "0.67267495", "0.66026753", "0.6525063", "0.6525063", "0.6525063", "0.650673", "0.64512", "0.6416112", "0.6397409", "0.63273877", "0.63170844", "0.63055694", "0.6283905", "0.62815225", "0.62589467", "0.6227177", "0.6223552", "0.6213416", "0.61971134", "0.6196886", "0.6183583", "0.61586684" ]
0.75997025
1
Returns all target groups details for the current environment.
def get_target_groups_info(self): target_groups_config = self.get_target_groups_config() groups_info = {} for short_name in target_groups_config.keys(): target_group_name = self.get_target_group_name(short_name) data = self.get_target_group_info(short_name) if data is not None: groups_info[target_group_name] = data return groups_info
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_target_groups_config(self):\n return self.config['target_groups']", "def describe_target_groups(ctx):\n data = self.get_target_groups_info()\n ctx.info('Target groups details for load balancer {}:'.format(self.get_balancer_name()))\n ctx.pp.pprint(data)", "def get_targetgroups(self):\r\n result = {}\r\n for row in self._db().select(self._db.targetgroup.ALL):\r\n result[row.id] = {}\r\n result[row.id][\"data\"] = dict(row)\r\n result[row.id][\"members\"] = []\r\n try:\r\n members = result[row.id][\"data\"]['targets']\r\n for member in json.loads(members):\r\n member_data = self._db(self._db.target.id==int(member)\r\n ).select().first()\r\n result[row.id][\"members\"].append(dict(member_data))\r\n except:\r\n result[row.id][\"members\"] = []\r\n return result", "def getGroups():\r\n return Group.getGroups()", "def get_groups(self):\n return Client._get(self)", "def groups(self):\n return self.get_data(\"groups\")", "def getGroups(self):\n return [g[0] for g in grp.getgrall()]", "def groups(self):\n #return self.get('{}/groups'.format(ApiVersion.A1.value))\n return self.get('{}/groups'.format(ApiVersion.CM1.value))", "def get_pingroups(self):\n return self.groups[:]", "def get_all_groups(self):\n return self.groups + ['all']", "def get_target_group_info(self, short_name):\n try:\n response = self.client.describe_target_groups(\n Names=[self.get_target_group_name(short_name)],\n )\n assert response['ResponseMetadata']['HTTPStatusCode'] == 200\n\n return response['TargetGroups'][0]\n except ClientError:\n self.logger.debug('Unable to find load balancer {balancer} target group {group}.'.format(\n balancer=self.get_balancer_name(),\n group=self.get_target_group_name(short_name)\n ))\n return None", "def groups(self):\n return self._groups", "def groups(self):\n return self._groups", "def groups(self):\n return self._groups", "def groups(self):\n return []", "def all_groups(self):\n return self._all_groups", "def _get_target_group(self):\n return self.__target_group", "def _get_target_group(self):\n return self.__target_group", "def _get_target_group(self):\n return self.__target_group", "def _get_target_group(self):\n return self.__target_group", "def _get_target_group(self):\n return self.__target_group", "def _get_target_group(self):\n return self.__target_group", "def list_groups(self):\n return self.get_admin(\"groups\")", "def get_environments(self):\n environments = list()\n for group in self._ncfile.groups:\n environments.append( str(group) )\n return environments", "def get_groups_using_technique():\n global groups_using_technique\n\n if not groups_using_technique:\n groups_using_technique = rsh.groups_using_technique(get_srcs())\n \n return groups_using_technique", "def target_group(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"target_group\")", "def get_group_names(self):\r\n return self.groups.keys()", "def getSourceGroups(self):\n ret = self.jsonRequest(\"/api/v1/sourceGroup/getSourceGroups\", {})\n return ret", "def get_all_groups(self):\n self.cursor.execute(\"select * from groups\")\n self.connection.commit()\n return self.cursor.fetchall()", "def groups(self):\r\n return resources.Groups(self)" ]
[ "0.7435148", "0.7379804", "0.7054704", "0.67164725", "0.6578198", "0.6524327", "0.6510144", "0.6486742", "0.6458211", "0.6452521", "0.6412666", "0.63932836", "0.63932836", "0.63932836", "0.6388009", "0.63451225", "0.6304259", "0.6304259", "0.6304259", "0.6304259", "0.6304259", "0.6304259", "0.63020253", "0.6251115", "0.6239313", "0.62338483", "0.6230097", "0.62087953", "0.6178469", "0.6175364" ]
0.76235884
0
Returns whether target group exists in the current environment.
def target_group_exists(self, short_name): return self.get_target_group_info(short_name) is not None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def group_exists(self):\n return AzureTools().group_exists(names.group_name(self))", "def has_group(group, user, request):\n return group_names[group] in groupfinder(user.username, request)", "def has_group(self, resolvable):\n group = self._resolve_group(resolvable)\n\n for g in self.groups.query(name=group.name):\n if g.name == group.name:\n return True\n\n return False", "def is_group(self, group_name):\n\n return group_name in self._group", "def is_in_group(self, group):\n return group in self.get_all_groups()", "def has_node_groups(self, namespace=None):\n try:\n return bool(self._source(namespace).reverse_upcall)\n except GroupResolverSourceError:\n return False", "def is_group(obj) -> bool:\n return hasattr(obj, IOConstants.GROUP_ATTR_NAME)", "def has_group(user, group_name):\n return user.groups.filter(name=group_name).exists()", "def has_group(self,groupname):\n\n if not self.check_prereqs():\n return False\n\n db = self.env.get_db_cnx()\n cursor = db.cursor()\n\n query=self.create_query(self.sql_get_groups_query+\" WHERE $groupname_field$='$groupname$'\",{'groupname':groupname,'groupname_field':self.sql_groupname_field})\n self.log.debug(\"sqlflexibleauthstore: has_group: %s\" % (query,))\n\n cursor.execute(query)\n for row in cursor:\n return True\n return False", "def does_group_exist(args):\n\n args.suppress_verify_output = True\n if verify(args) != 0:\n # restore stdout\n sys.stdout = sys.__stdout__\n print(\"Config file not valid, please use the verify function to debug\")\n return 1\n\n sanitised_group = args.group.replace('/', '-')\n\n with open(args.file, \"r\") as f:\n config_json = json.load(f)\n\n for group in config_json[\"groups\"]:\n if group[\"name\"] == sanitised_group:\n return 0\n \n return 1", "def group_exists(self, path_to_group, groupname):\n self.open_db()\n try:\n group = self.h5file.get_node(path_to_group,\n name=groupname)\n except tb.NoSuchNodeError:\n group = False\n return group", "def _is_in_group(user, group_name):\n try:\n return Group.objects.get(name=group_name).user_set.filter(id=user.id).exists()\n except Group.DoesNotExist:\n return None", "def has_group_address(self, group_address):\n return self.switch.has_group_address(group_address)", "def is_group(self):\n # Implemented from template for osid.resource.Resource.is_group_template\n return self._my_map['group']", "def is_in_group(user, group_name):\n return user.groups.filter(name__exact=group_name).exists()", "def in_group(self, group):\n\n return self.secondary_groups.filter(\n groups_users.c.group_id == group.id).count() > 0", "def is_in_group(user, group_name):\n return is_in_group_user_id(user.id, group_name)", "def is_group(self):\n return self._is_group", "def is_group(group_name):\n\n try:\n r_json = requests.get(\n 'https://api.rozklad.org.ua/v2/groups/{}'.format(group_name)).json()\n message_text = r_json['message']\n if message_text == 'Ok':\n return True\n elif message_text == 'Group not found':\n return False\n else:\n logger.error(message_text)\n except ConnectionError as error_text:\n logger.error(error_text)\n except IndexError as error_text:\n logger.error(error_text)", "def is_in_group(user, group_name):\n return Group.objects.get(name=group_name).user_set.filter(id=user.id).exists()", "def check_group_pack(self, cr, uid, context=None):\n return self.pool.get('res.users').has_group(cr, uid, 'stock.group_tracking_lot')", "def check_ldap_group_existence(group_id):\n endpoint = f\"/identities/groups/{group_id}\"\n http_response = call_rest_api(endpoint, \"head\", **config.DEFAULT_REST_KWARGS)\n if http_response.status_code == 200: # 200 = 'OK. Group exists.'\n return True\n return False", "def can_group(self) -> bool:\n return (\n self.all_icon is not None and\n self.all_name is not None\n )", "def is_user_in_group(user, group):\n users = group.get_users()\n if user in users:\n return True\n return False", "def is_country_groups_link_present(self):\n return self.is_element_present(self.country_groups_locator)", "def IsTarget(self, target_name):\n return target_name in self.GetTargets()", "def security_group_exists(self, sg_id=None, name=None):\n if sg_id:\n return sg_id in [sg.id for sg in self.get_all_security_groups()]\n elif name:\n return name in [sg.name for sg in self.get_all_security_groups()]", "def belongs_to(self, group):\n return self in group.users", "def has_target(self):\n return self._has_target", "def has(self, target):\n return target in self.by_target" ]
[ "0.7729518", "0.7035008", "0.6886645", "0.6784272", "0.6755214", "0.67159396", "0.6622473", "0.65812206", "0.6573095", "0.64742124", "0.6403617", "0.6397194", "0.63116044", "0.6310388", "0.6310108", "0.62999773", "0.6253467", "0.62442166", "0.61922187", "0.61876094", "0.61838394", "0.6170511", "0.6160562", "0.61523956", "0.60185724", "0.6016597", "0.5945339", "0.59156185", "0.59070253", "0.58958846" ]
0.8102559
0
Returns target group ARN for the current environment.
def get_target_group_arn(self, short_name): target_group_info = self.get_target_group_info(short_name) return target_group_info['TargetGroupArn']
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def log_group_arn(self) -> str:\n return jsii.get(self, \"logGroupArn\")", "def log_group_arn(self) -> str:\n return jsii.get(self, \"logGroupArn\")", "def group_arn(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"group_arn\")", "def target_group(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"target_group\")", "def target_group_identifier(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"target_group_identifier\")", "def log_group_arn(self) -> str:\n ...", "def target_group(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"target_group\")", "def target_group_identifier(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"target_group_identifier\")", "def target_arn(self) -> str:\n return self._values.get('target_arn')", "def target_group_identifier(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"target_group_identifier\")", "def get_target_group_name(self, short_name):\n app_env = self.get_current_env()\n full_name = self.get_target_group_fully_qualified_name(short_name)\n namespace = self.config['namespace']\n\n if len(full_name) <= 32:\n return full_name\n elif len(namespace) + 10 <= 32:\n env_target_hash = hashlib.md5((short_name + app_env).encode()).hexdigest()[:9]\n return '{}-{}'.format(namespace, env_target_hash)\n else:\n return hashlib.md5(full_name.encode()).hexdigest()", "def resource_group_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_group_name\")", "def resource_group_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_group_name\")", "def resource_group_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_group_name\")", "def resource_group_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_group_name\")", "def resource_group_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_group_name\")", "def resource_group_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_group_name\")", "def resource_group_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_group_name\")", "def resource_group_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_group_name\")", "def resource_group_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_group_name\")", "def resource_group_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_group_name\")", "def resource_group_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_group_name\")", "def resource_group_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_group_name\")", "def _get_target_group(self):\n return self.__target_group", "def _get_target_group(self):\n return self.__target_group", "def _get_target_group(self):\n return self.__target_group", "def _get_target_group(self):\n return self.__target_group", "def _get_target_group(self):\n return self.__target_group", "def _get_target_group(self):\n return self.__target_group", "def resource_group(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_group\")" ]
[ "0.7156454", "0.7156454", "0.68402165", "0.6673433", "0.6616204", "0.6345443", "0.61582327", "0.61299807", "0.6107626", "0.61024284", "0.60479337", "0.59156257", "0.59156257", "0.59156257", "0.59156257", "0.59156257", "0.59156257", "0.59156257", "0.59156257", "0.59156257", "0.59156257", "0.59156257", "0.59156257", "0.5854411", "0.5854411", "0.5854411", "0.5854411", "0.5854411", "0.5854411", "0.5846369" ]
0.75529647
0
Creates load balancer in the current environment.
def create_balancer(self): app_env = self.get_current_env() balancer_name = self.get_balancer_name() subnet_ids = self.get_subnet_ids() response = self.client.create_load_balancer( Name=balancer_name, Subnets=subnet_ids, SecurityGroups=[self.get_security_group_id(self.get_security_group_short_name())], Scheme='internet-facing', Tags=[ { 'Key': 'chops-aws-project', 'Value': self.get_aws_project_name(), }, { 'Key': 'environment', 'Value': app_env, }, ], Type='application', IpAddressType='ipv4', ) assert response['ResponseMetadata']['HTTPStatusCode'] == 200 return response['LoadBalancers'][0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create(ctx):\n create_target_groups(ctx)\n create_balancer(ctx)\n create_listeners(ctx)\n\n ctx.info('Load balancers setup completed.')", "def create_balancer(ctx):\n if not self.balancer_exists():\n data = self.create_balancer()\n ctx.info('Successfully created load balancer {}:'.format(self.get_balancer_name()))\n ctx.pp.pprint(data)\n else:\n ctx.info('Load balancer {} already exists, nothing to create.'.format(\n self.get_balancer_name()\n ))", "def create_loadbalancer(self, context, lb):\n super(ArrayDeviceDriverV2, self).create_loadbalancer(context, lb)\n deployment_model = self._get_setting(\n lb.tenant_id, \"lbaas_settings\", \"deployment_model\"\n )\n if deployment_model == \"PER_LOADBALANCER\":\n self.update_loadbalancer(context, lb, None)", "def post(self, request):\n return create_loadbalancer(request)", "def create_loadbalancer(self, context, loadbalancer, driver_name):\n LOG.info(\"Received request 'Create Loadbalancer' for LB:%(lb)s \"\n \"with driver:%(driver_name)s\",\n {'lb': loadbalancer['id'],\n 'driver_name': driver_name})\n arg_dict = {'context': context,\n lb_const.LOADBALANCER: loadbalancer,\n 'driver_name': driver_name\n }\n self._send_event(lb_const.EVENT_CREATE_LOADBALANCER_V2, arg_dict,\n serialize=True, binding_key=loadbalancer['id'],\n key=loadbalancer['id'])", "def create(self, params):\n return self.make_client_call('create_load_balancer_policy', params)", "def ensure_load_balancer_created(vpc, security_group, subnet1, subnet2, target_group_arn, ssl_certificate_arn, environment):\n name = environment + '-load-balancer'\n\n # If it already exists, create returns the existing data\n response = ELB.create_load_balancer(\n Name=name,\n Subnets=[ subnet1.id, subnet2.id ],\n SecurityGroups=[ security_group.id ],\n IpAddressType='dualstack',\n Tags=[\n { 'Key': 'Name', 'Value': name },\n { 'Key': 'Environment', 'Value': environment }\n ]\n )\n\n load_balancer = response['LoadBalancers'][0]\n arn = load_balancer['LoadBalancerArn']\n\n # There seems to be no harm in creating listeners if they already exist\n ELB.create_listener(\n LoadBalancerArn=arn,\n Protocol='HTTP',\n Port=80,\n DefaultActions=[{ 'Type': 'forward', 'TargetGroupArn': target_group_arn } ]\n )\n\n ELB.create_listener(\n LoadBalancerArn=arn,\n Protocol='HTTPS',\n Port=443,\n SslPolicy='ELBSecurityPolicy-TLS-1-2-2017-01',\n Certificates=[ { 'CertificateArn': ssl_certificate_arn } ],\n DefaultActions=[ { 'Type': 'forward', 'TargetGroupArn': target_group_arn } ]\n )\n\n return load_balancer", "def get_create_load_balancer_flow(self, load_balancer_id, topology, project_id,\n listeners=None, pools=None):\n\n f_name = constants.CREATE_LOADBALANCER_FLOW\n lb_create_flow = linear_flow.Flow(f_name)\n lb_create_flow.add(lifecycle_tasks.LoadBalancerIDToErrorOnRevertTask(\n requires=constants.LOADBALANCER_ID))\n lb_create_flow.add(vthunder_tasks.VthunderInstanceBusy(\n requires=a10constants.COMPUTE_BUSY))\n\n lb_create_flow.add(database_tasks.ReloadLoadBalancer(\n requires=constants.LOADBALANCER_ID,\n provides=constants.LOADBALANCER))\n\n lb_create_flow.add(a10_database_tasks.CheckExistingVthunderTopology(\n requires=constants.LOADBALANCER,\n inject={\"topology\": topology}))\n\n # Attaching vThunder to LB in database\n if topology == constants.TOPOLOGY_ACTIVE_STANDBY:\n lb_create_flow.add(*self._create_active_standby_topology())\n LOG.info(\"TOPOLOGY === \" + str(topology))\n elif topology == constants.TOPOLOGY_SINGLE:\n lb_create_flow.add(*self._create_single_topology())\n LOG.info(\"TOPOLOGY === \" + str(topology))\n else:\n LOG.error(\"Unknown topology: %s. Unable to build load balancer.\",\n topology)\n raise exceptions.InvalidTopology(topology=topology)\n\n # IMP: Now creating vThunder config here\n post_amp_prefix = constants.POST_LB_AMP_ASSOCIATION_SUBFLOW\n vthunder = self._vthunder_repo.get_vthunder_by_project_id(db_apis.get_session(),\n project_id)\n lb_create_flow.add(a10_database_tasks.GetFlavorData(\n rebind={a10constants.LB_RESOURCE: constants.LOADBALANCER},\n provides=constants.FLAVOR_DATA))\n lb_create_flow.add(\n self.get_post_lb_vthunder_association_flow(\n post_amp_prefix, load_balancer_id, topology, vthunder,\n mark_active=(not listeners)))\n lb_create_flow.add(a10_database_tasks.CountLoadbalancersWithFlavor(\n requires=(constants.LOADBALANCER, a10constants.VTHUNDER),\n provides=a10constants.LB_COUNT_FLAVOR))\n lb_create_flow.add(vthunder_tasks.AllowL2DSR(\n requires=(constants.SUBNET, constants.AMPHORA,\n a10constants.LB_COUNT_FLAVOR, constants.FLAVOR_DATA)))\n lb_create_flow.add(nat_pool_tasks.NatPoolCreate(\n requires=(constants.SUBNET, constants.LOADBALANCER,\n a10constants.VTHUNDER, constants.FLAVOR_DATA)))\n lb_create_flow.add(virtual_server_tasks.CreateVirtualServerTask(\n requires=(constants.LOADBALANCER, a10constants.VTHUNDER,\n constants.FLAVOR_DATA)))\n\n if pools:\n for pool in pools:\n lb_create_flow.add(self._pool_flows.get_fully_populated_create_pool_flow(\n topology, pool, vthunder_flow=True))\n\n if listeners:\n sf_name = a10constants.FULLY_POPULATED_LISTENER_CREATE\n for listener in listeners:\n lb_create_flow.add(\n self._listener_flows.get_vthunder_fully_populated_create_listener_flow(\n topology, listener))\n\n lb_create_flow.add(database_tasks.MarkLBActiveInDB(\n name=sf_name + '-' + constants.MARK_LB_ACTIVE_INDB,\n mark_subobjects=True,\n requires=constants.LOADBALANCER))\n\n lb_create_flow.add(vthunder_tasks.WriteMemory(\n requires=a10constants.VTHUNDER))\n lb_create_flow.add(a10_database_tasks.SetThunderUpdatedAt(\n requires=a10constants.VTHUNDER))\n\n return lb_create_flow", "def create_loadbalancer(call=None, kwargs=None):\n if call != \"function\":\n raise SaltCloudSystemExit(\n \"The create_address function must be called with -f or --function.\"\n )\n\n if kwargs is None:\n kwargs = {}\n\n conn = get_conn()\n datacenter_id = get_datacenter_id()\n loadbalancer = LoadBalancer(\n name=kwargs.get(\"name\"), ip=kwargs.get(\"ip\"), dhcp=kwargs.get(\"dhcp\")\n )\n\n response = conn.create_loadbalancer(datacenter_id, loadbalancer)\n _wait_for_completion(conn, response, 60, \"loadbalancer\")\n\n return response", "def create(ctx, iface, resource_config, params, **_):\n\n lb_name = params.get(LB_NAME)\n if not lb_name:\n targs = \\\n utils.find_rels_by_node_type(\n ctx.instance,\n LB_TYPE)\n lb_name = \\\n targs[0].target.instance.runtime_properties[\n EXTERNAL_RESOURCE_ID]\n params.update({LB_NAME: lb_name})\n\n ctx.instance.runtime_properties[LB_NAME] = \\\n lb_name\n\n # Actually create the resource\n iface.create(params)", "def add(env, identifier, **args):\n\n mgr = SoftLayer.LoadBalancerManager(env.client)\n uuid, _ = mgr.get_lbaas_uuid_id(identifier)\n\n new_listener = {\n 'backendPort': args.get('backport'),\n 'backendProtocol': args.get('backprotocol') if args.get('backprotocol') else args.get('frontprotocol'),\n 'frontendPort': args.get('frontport'),\n 'frontendProtocol': args.get('frontprotocol'),\n 'loadBalancingMethod': args.get('method'),\n 'maxConn': args.get('connections', None),\n 'sessionType': args.get('sticky'),\n 'tlsCertificateId': args.get('sslcert')\n }\n\n try:\n mgr.add_lb_listener(uuid, new_listener)\n click.secho(\"Success\", fg='green')\n except SoftLayerAPIError as exception:\n click.secho(f\"ERROR: {exception.faultString}\", fg='red')", "def create_listeners(ctx):\n data = self.create_listeners()\n ctx.info('Created listeners for load balancer {}:'.format(\n self.get_balancer_name()\n ))\n ctx.pp.pprint(data)", "def create_gwlb(gwlb_name, subnet_id_list):\n logging.info(f\"Creating gateway load balancer: {gwlb_name}\")\n waiter = elbv2.get_waiter('load_balancer_available')\n try:\n response = elbv2.create_load_balancer(\n Name=gwlb_name,\n Subnets=subnet_id_list,\n Tags=[{'Key': 'Name', 'Value': gwlb_name}],\n Type='gateway'\n )\n gwlb_arn = response['LoadBalancers'][0]['LoadBalancerArn']\n logging.info(\"Waiting for GWLB's state to change to available\")\n waiter.wait(\n LoadBalancerArns=[gwlb_arn],\n WaiterConfig={\n 'Delay': 15,\n 'MaxAttempts': 40\n }\n )\n return response, gwlb_arn\n except ClientError as e:\n logging.error(e)\n return None", "def create_elb(tag_prefix, web_subnet_by_cidrs, moat_sg_id,\n elb_name=None, s3_logs_bucket=None,\n tls_priv_key=None, tls_fullchain_cert=None,\n region_name=None, dry_run=False):\n if not elb_name:\n elb_name = '%selb' % _clean_tag_prefix(tag_prefix)\n\n elb_client = boto3.client('elbv2', region_name=region_name)\n resp = elb_client.create_load_balancer(\n Name=elb_name,\n Subnets=[subnet['SubnetId'] for subnet in web_subnet_by_cidrs.values()\n if subnet],\n SecurityGroups=[\n moat_sg_id,\n ],\n Scheme='internet-facing',\n Type='application',\n Tags=[{'Key': \"Prefix\", 'Value': tag_prefix}])\n load_balancer = resp['LoadBalancers'][0]\n load_balancer_arn = load_balancer['LoadBalancerArn']\n load_balancer_dns = load_balancer['DNSName']\n LOGGER.info(\"%s found/created application load balancer %s available at %s\",\n tag_prefix, load_balancer_arn, load_balancer_dns)\n\n attributes = [{\n 'Key': 'deletion_protection.enabled',\n 'Value': 'true'\n }, {\n #pylint:disable=line-too-long\n #https://stackoverflow.com/questions/58848623/what-does-alb-consider-a-valid-header-field\n 'Key': 'routing.http.drop_invalid_header_fields.enabled',\n 'Value': 'true'\n }]\n if s3_logs_bucket:\n attributes += [{\n 'Key': 'access_logs.s3.enabled',\n 'Value': 'true'\n }, {\n 'Key': 'access_logs.s3.bucket',\n 'Value': s3_logs_bucket\n }, {\n 'Key': 'access_logs.s3.prefix',\n 'Value': 'var/log/elb'\n }]\n\n update_load_balancer_attributes = False\n resp = elb_client.describe_load_balancer_attributes(\n LoadBalancerArn=load_balancer_arn)\n for attr in attributes:\n for curr_attr in resp['Attributes']:\n if attr['Key'] == curr_attr['Key']:\n if attr['Value'] != curr_attr['Value']:\n update_load_balancer_attributes = True\n break\n if update_load_balancer_attributes:\n resp = elb_client.modify_load_balancer_attributes(\n LoadBalancerArn=load_balancer_arn,\n Attributes=attributes)\n LOGGER.info(\"%s updated attributes for load balancer %s\",\n tag_prefix, load_balancer_arn)\n else:\n LOGGER.info(\"%s found expected attributes for load balancer %s\",\n tag_prefix, load_balancer_arn)\n\n try:\n resp = elb_client.create_listener(\n LoadBalancerArn=load_balancer_arn,\n Protocol='HTTP',\n Port=80,\n DefaultActions=[{\n \"Type\": \"redirect\",\n \"RedirectConfig\": {\n \"Protocol\": \"HTTPS\",\n \"Port\": \"443\",\n \"Host\": \"#{host}\",\n \"Path\": \"/#{path}\",\n \"Query\": \"#{query}\",\n \"StatusCode\": \"HTTP_301\"\n }\n }])\n LOGGER.info(\"%s created HTTP application load balancer listener for %s\",\n tag_prefix, load_balancer_arn)\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'DuplicateListener':\n raise\n LOGGER.info(\"%s found HTTP application load balancer listener for %s\",\n tag_prefix, load_balancer_arn)\n\n # We will need a default TLS certificate for creating an HTTPS listener.\n default_cert_location = None\n resp = elb_client.describe_listeners(\n LoadBalancerArn=load_balancer_arn)\n for listener in resp['Listeners']:\n if listener['Protocol'] == 'HTTPS':\n for certificate in listener['Certificates']:\n if 'IsDefault' not in certificate or certificate['IsDefault']:\n default_cert_location = certificate['CertificateArn']\n LOGGER.info(\"%s found default TLS certificate %s\",\n tag_prefix, default_cert_location)\n break\n if not default_cert_location:\n if tls_priv_key and tls_fullchain_cert:\n resp = _store_certificate(\n tls_fullchain_cert, tls_priv_key,\n tag_prefix=tag_prefix, region_name=region_name,\n dry_run=dry_run)\n default_cert_location = resp['CertificateArn']\n else:\n LOGGER.warning(\"default_cert_location is not set and there are no\"\\\n \" tls_priv_key and tls_fullchain_cert either.\")\n\n try:\n resp = elb_client.create_listener(\n LoadBalancerArn=load_balancer_arn,\n Protocol='HTTPS',\n Port=443,\n Certificates=[{'CertificateArn': default_cert_location}],\n DefaultActions=[{\n 'Type': 'fixed-response',\n 'FixedResponseConfig': {\n 'MessageBody': '%s ELB' % tag_prefix,\n 'StatusCode': '200',\n 'ContentType': 'text/plain'\n }\n }])\n LOGGER.info(\n \"%s created HTTPS application load balancer listener for %s\",\n tag_prefix, load_balancer_arn)\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'DuplicateListener':\n raise\n LOGGER.info(\"%s found HTTPS application load balancer listener for %s\",\n tag_prefix, load_balancer_arn)\n\n return load_balancer_arn", "def pre_loadbalancer_pool_create(self, resource_dict):\n pass", "def create(\n self,\n name, # type: str\n load_balancer_type, # type: LoadBalancerType\n algorithm=None, # type: Optional[LoadBalancerAlgorithm]\n services=None, # type: Optional[List[LoadBalancerService]]\n targets=None, # type: Optional[List[LoadBalancerTarget]]\n labels=None, # type: Optional[Dict[str, str]]\n location=None, # type: Optional[Location]\n network_zone=None, # type: Optional[str]\n public_interface=None, # type: Optional[bool]\n network=None # type: Optional[Union[Network,BoundNetwork]]\n ):\n # type: (...) -> CreateLoadBalancerResponse:\n data = {\"name\": name, \"load_balancer_type\": load_balancer_type.id_or_name}\n if network is not None:\n data[\"network\"] = network.id\n if public_interface is not None:\n data[\"public_interface\"] = public_interface\n if labels is not None:\n data[\"labels\"] = labels\n if algorithm is not None:\n data[\"algorithm\"] = {\"type\": algorithm.type}\n if services is not None:\n service_list = []\n for service in services:\n service_list.append(self.get_service_parameters(service))\n data[\"services\"] = service_list\n\n if targets is not None:\n target_list = []\n for target in targets:\n target_data = {\n \"type\": target.type,\n \"use_private_ip\": target.use_private_ip\n }\n if target.type == \"server\":\n target_data['server'] = {\"id\": target.server.id}\n elif target.type == \"label_selector\":\n target_data['label_selector'] = {\"selector\": target.label_selector.selector}\n elif target.type == \"ip\":\n target_data['ip'] = {\"ip\": target.ip.ip}\n target_list.append(target_data)\n\n data[\"targets\"] = target_list\n\n if network_zone is not None:\n data[\"network_zone\"] = network_zone\n if location is not None:\n data[\"location\"] = location.id_or_name\n\n response = self._client.request(url=\"/load_balancers\", method=\"POST\", json=data)\n\n return CreateLoadBalancerResponse(load_balancer=BoundLoadBalancer(self, response[\"load_balancer\"]),\n action=BoundAction(self._client.actions, response['action']))", "def process_load_balancer_in_dev ( vpc_conn,\n ec2_conn,\n elb_conn,\n cloudwatch_conn,\n r53_conn,\n iam_conn,\n vpc,\n base_name,\n base_topicarn,\n app_name,\n params,\n aws_account_type,\n app_visibility = None,\n public_dns_cname = None,\n public_tcp_ports = [],\n app_tcp_ports = [],\n use_ssl = False,\n ssl_hostname = None\n ) :\n\n if not use_ssl : \n print( \"Do not create load balancer since use_ssl is False\" );\n return (None, None, None, None);\n \n\n if not app_name :\n app_name = params[ 'app-name' ]\n\n # in dev vpc, initialize local variables\n if use_ssl:\n app_visibility = 'PUBLIC'\n\n if not public_dns_cname :\n public_dns_cname = ssl_hostname\n\n if len( public_tcp_ports ) == 0 :\n public_tcp_ports = [443]\n \n if len( app_tcp_ports ) == 0 : \n app_tcp_ports = [8080]\n\n if app_visibility == 'PUBLIC' :\n subnet_type = 'PRIVATE' # Public apps have app LB's that sit private. The PROXY LB is public.\n elif app_visibility == 'HBO' :\n subnet_type = 'PUBLIC' # HBO apps have app LB's that site public.\n elif app_visibility == 'PRIVATE' :\n subnet_type = 'PRIVATE'\n else :\n subnet_type = params[ 'subnet-type' ]\n\n if not public_dns_cname :\n public_dns_cname = params.get( 'public-dns-alias' )\n\n create = params.get( 'create', 'NO' )\n if create == 'YES':\n print \"Creating load balancer security group.\"\n lb_secgrp = find_secgrp(ec2_conn, get_lb_secgrp_name( base_name, app_name ))\n if not lb_secgrp :\n lb_secgrp = create_secgrp( ec2_conn,\n vpc,\n get_lb_secgrp_name( base_name, app_name ),\n 'Controls access to the ' + app_name + ' LB' )\n\n ## set deep as False, because there is no dev nat security group\n remove_all_rules( ec2_conn, [ lb_secgrp ] , deep=False, base_name=base_name)\n\n ## reload the security group after removing the rules\n lb_secgrp = find_secgrp(ec2_conn, get_lb_secgrp_name( base_name, app_name ))\n \n health_check_port = params.get( 'health-check-port', 8080 )\n health_check_url = params.get( 'health-check-url' )\n if not health_check_url :\n health_check_url = '/' + app_name + '/ping.html'\n\n ## Figure out if we need to find the SSL cert.\n ssl_cert_arn = None\n if use_ssl :\n cert = get_aws_ssl_certificate( iam_conn, ssl_cert_name )\n if cert :\n ssl_cert_arn = cert.arn\n else :\n print \"ERROR: Use SSL was specified, but could not find certificate matching host: \" + ssl_cert_name\n sys.exit( 5 )\n\n ## Generate the correct listener rules\n listeners = [ ( 80, 8080, 'http' ) ] # Default listener\n if params.get( 'listener-rules' ) :\n listeners = []\n for listener_rule in params[ 'listener-rules' ] :\n if params[ 'protocol' ] == 'https' :\n if not ssl_cert_arn :\n print \"ERRROR: https protocol specified, but use_ssl was NOT specified.\"\n sys.exit( 5 )\n listeners.append( ( params[ 'incoming-port' ],\n params[ 'outgoing-port' ],\n params[ 'protocol' ],\n ssl_cert_arn) )\n else :\n listeners.append( ( params[ 'incoming-port' ],\n params[ 'outgoing-port' ],\n params[ 'protocol' ] ) )\n\n ##\n ## FIX: There is a bug here where the public ports are supposed to be set on the proxy if\n ## app_visibility is PUBLIC. Don't have time to fix/regression test now...\n ##\n elif len( public_tcp_ports ) == len( app_tcp_ports ) and len( public_tcp_ports ) > 0 :\n listeners = []\n for public_port, app_port in zip( public_tcp_ports, app_tcp_ports ) :\n if public_port == 443 :\n if not ssl_cert_arn :\n print \"ERRROR: https protocol specified, but use_ssl was NOT specified.\"\n sys.exit( 5 )\n listeners.append( ( public_port, app_port, 'https', ssl_cert_arn ) )\n else :\n listeners.append( ( public_port, app_port, 'http' ) )\n\n\n ## find subnet in dev vpc.\n ## TODO: should we define subnet-cidr prarameter to get subnet?\n subnets = vpc_conn.get_all_subnets( filters = [ ( \"vpcId\", [ vpc.id ] ) ] ) \n \n\n print \"Creating load balancer.\"\n elb = create_elb( elb_conn,\n get_elb_name( base_name, app_name ),\n subnets,\n listeners,\n lb_secgrp,\n health_check_port,\n health_check_url,\n subnet_type == 'PUBLIC' )\n \n elb = find_elb(elb_conn, elb.name)\n \n if params.get( 'monitors' ) :\n add_monitors_to_elb( cloudwatch_conn, base_name, app_name, base_topicarn, params[ 'monitors' ] )\n\n if subnet_type == 'PUBLIC' :\n print \"Setting public DNS alias for load balancer.\"\n set_dns_cname( r53_conn, public_dns_cname, elb.dns_name )\n else :\n # create dna alias for internal elb in dev vpc.\n dns_alias = create_dns_name( base_name, app_name + '.internal' )\n print \"Configuring DNS name for load balancer: \" + dns_alias\n set_dns_cname( r53_conn, dns_alias, elb.dns_name )\n\n if app_visibility == 'HBO' :\n for port in public_tcp_ports :\n lb_secgrp.authorize( ip_protocol = \"tcp\",\n from_port = port,\n to_port = port,\n cidr_ip = hbo_cidr_list ) \n\n elif app_visibility == 'PUBLIC' :\n print \"Creating public load balancer.\"\n lb_public_name = app_name + '-PB'\n\n lb_public_secgrp = find_secgrp(ec2_conn, get_lb_secgrp_name( base_name, lb_public_name ))\n \n if not lb_public_secgrp :\n lb_public_secgrp = create_secgrp( ec2_conn,\n vpc,\n get_lb_secgrp_name( base_name, lb_public_name ),\n 'Controls access to the ' + lb_public_name + ' load balancer.' )\n\n ## set deep as False, because there is no dev nat security group\n remove_all_rules( ec2_conn, [ lb_public_secgrp ], deep=False, base_name=base_name) \n \n ## reload the security group after removing the rules\n lb_public_secgrp = find_secgrp(ec2_conn, get_lb_secgrp_name( base_name, lb_public_name ))\n\n \n for port in public_tcp_ports :\n lb_public_secgrp.authorize( ip_protocol = \"tcp\",\n from_port = port,\n to_port = port,\n cidr_ip = hbo_cidr_list ) \n\n lb_public_listeners = [ ( 80, 80, 'http' ) ]\n if use_ssl :\n lb_public_listeners = [ ( 443, 8080, 'https', ssl_cert_arn ) ]\n\n public_elb = create_elb( elb_conn,\n get_elb_name( base_name, lb_public_name ),\n subnets,\n lb_public_listeners,\n lb_public_secgrp,\n health_check_port,\n health_check_url,\n True )\n add_monitors_to_elb( cloudwatch_conn, base_name, app_name, base_topicarn, proxy_lb_monitor_rules )\n\n if public_dns_cname :\n print \"Setting public DNS alias for load balancer.\"\n set_dns_cname( r53_conn, public_dns_cname, public_elb.dns_name )\n else :\n public_dns_cname = ''\n else :\n elb = find_elb( elb_conn, get_elb_name( base_name, app_name ) )\n print \"Processing load-balancer actions.\"\n for action_param in params.get( 'actions', [] ) :\n if action_param[ 'type' ] == 'RESTART_INSTANCES' :\n restart_elb_instances( ec2_conn, elb_conn, elb, params.get( 'restart-smoothly', 'YES' ) == 'YES' )\n\n lb_secgrp = find_group( ec2_conn, base_name, get_lb_secgrp_type( app_name ) )\n dns_alias = None\n \n lb_public_name = app_name + '-PB' \n lb_public_secgrp = find_secgrp(ec2_conn, get_lb_secgrp_name( base_name, lb_public_name ))\n\n return ( elb, lb_secgrp, dns_alias, lb_public_secgrp )", "def _create_body(self, name, port=None, protocol=None, nodes=None,\n virtual_ips=None, algorithm=None, halfClosed=None, accessList=None,\n connectionLogging=None, connectionThrottle=None, healthMonitor=None,\n metadata=None, timeout=None, sessionPersistence=None,\n httpsRedirect=None):\n required = (virtual_ips, port, protocol)\n if not all(required):\n raise exc.MissingLoadBalancerParameters(\"Load Balancer creation \"\n \"requires at least one virtual IP, a protocol, and a port.\")\n nodes = utils.coerce_to_list(nodes)\n virtual_ips = utils.coerce_to_list(virtual_ips)\n bad_conditions = [node.condition for node in nodes\n if node.condition.upper() not in (\"ENABLED\", \"DISABLED\")]\n if bad_conditions:\n raise exc.InvalidNodeCondition(\"Nodes for new load balancer must be \"\n \"created in either 'ENABLED' or 'DISABLED' condition; \"\n \"received the following invalid conditions: %s\" %\n \", \".join(set(bad_conditions)))\n node_dicts = [nd.to_dict() for nd in nodes]\n vip_dicts = [vip.to_dict() for vip in virtual_ips]\n body = {\"loadBalancer\": {\n \"name\": name,\n \"port\": port,\n \"protocol\": protocol,\n \"nodes\": node_dicts,\n \"virtualIps\": vip_dicts,\n \"algorithm\": algorithm or \"RANDOM\",\n \"halfClosed\": halfClosed,\n \"accessList\": accessList,\n \"connectionLogging\": connectionLogging,\n \"connectionThrottle\": connectionThrottle,\n \"healthMonitor\": healthMonitor,\n \"metadata\": metadata,\n \"timeout\": timeout,\n \"sessionPersistence\": sessionPersistence,\n \"httpsRedirect\": httpsRedirect,\n }}\n return body", "def pre_loadbalancer_member_create(self, resource_dict):\n pass", "def create(self, region, **kwargs):\n params = {\n \"region\": region.id if isinstance(region, Base) else region,\n }\n params.update(kwargs)\n\n result = self.client.post(\"/nodebalancers\", data=params)\n\n if not \"id\" in result:\n raise UnexpectedResponseError(\n \"Unexpected response when creating Nodebalaner!\", json=result\n )\n\n n = NodeBalancer(self.client, result[\"id\"], result)\n return n", "def process_load_balancer ( vpc_conn,\n ec2_conn,\n elb_conn,\n cloudwatch_conn,\n r53_conn,\n iam_conn,\n vpc,\n base_name,\n base_topicarn,\n app_name,\n params,\n aws_account_type,\n app_visibility = None,\n public_dns_cname = None,\n public_tcp_ports = [],\n app_tcp_ports = [],\n use_ssl = False,\n ssl_hostname = None\n ) :\n\n if not app_name :\n app_name = params[ 'app-name' ]\n\n if app_visibility == 'PUBLIC' :\n subnet_type = 'PRIVATE' # Public apps have app LB's that sit private. The PROXY LB is public.\n elif app_visibility == 'HBO' :\n subnet_type = 'PUBLIC' # HBO apps have app LB's that site public.\n elif app_visibility == 'PRIVATE' :\n subnet_type = 'PRIVATE'\n else :\n subnet_type = params[ 'subnet-type' ]\n\n if not public_dns_cname :\n public_dns_cname = params.get( 'public-dns-alias' )\n\n create = params.get( 'create', 'NO' )\n if create == 'YES':\n print \"Creating load balancer security group.\"\n lb_secgrp = find_secgrp(ec2_conn, get_lb_secgrp_name( base_name, app_name ))\n if not lb_secgrp :\n lb_secgrp = create_secgrp( ec2_conn,\n vpc,\n get_lb_secgrp_name( base_name, app_name ),\n 'Controls access to the ' + app_name + ' LB' )\n remove_all_rules( ec2_conn, [ lb_secgrp ] , deep=True, base_name=base_name)\n ## reload the security group after removing the rules\n lb_secgrp = find_secgrp(ec2_conn, get_lb_secgrp_name( base_name, app_name ))\n \n health_check_port = params.get( 'health-check-port', 8080 )\n health_check_url = params.get( 'health-check-url' )\n if not health_check_url :\n health_check_url = '/' + app_name + '/ping.html'\n\n ## Figure out if we need to find the SSL cert.\n ssl_cert_arn = None\n if use_ssl :\n cert = get_aws_ssl_certificate( iam_conn, ssl_cert_name )\n if cert :\n ssl_cert_arn = cert.arn\n else :\n print \"ERROR: Use SSL was specified, but could not find certificate matching host: \" + ssl_cert_name\n sys.exit( 5 )\n\n ## Generate the correct listener rules\n listeners = [ ( 80, 8080, 'http' ) ] # Default listener\n if params.get( 'listener-rules' ) :\n listeners = []\n for listener_rule in params[ 'listener-rules' ] :\n if params[ 'protocol' ] == 'https' :\n if not ssl_cert_arn :\n print \"ERRROR: https protocol specified, but use_ssl was NOT specified.\"\n sys.exit( 5 )\n listeners.append( ( params[ 'incoming-port' ],\n params[ 'outgoing-port' ],\n params[ 'protocol' ],\n ssl_cert_arn) )\n else :\n listeners.append( ( params[ 'incoming-port' ],\n params[ 'outgoing-port' ],\n params[ 'protocol' ] ) )\n ##\n ## FIX: There is a bug here where the public ports are supposed to be set on the proxy if\n ## app_visibility is PUBLIC. Don't have time to fix/regression test now...\n ##\n elif len( public_tcp_ports ) == len( app_tcp_ports ) and len( public_tcp_ports ) > 0 :\n listeners = []\n for public_port, app_port in zip( public_tcp_ports, app_tcp_ports ) :\n if public_port == 443 :\n if not ssl_cert_arn :\n print \"ERRROR: https protocol specified, but use_ssl was NOT specified.\"\n sys.exit( 5 )\n listeners.append( ( public_port, app_port, 'https', ssl_cert_arn ) )\n else :\n listeners.append( ( public_port, app_port, 'http' ) )\n\n\n print \"Creating load balancer.\"\n elb = create_elb( elb_conn,\n get_elb_name( base_name, app_name ),\n get_vpc_subnets( vpc_conn, vpc, subnet_type ),\n listeners,\n lb_secgrp,\n health_check_port,\n health_check_url,\n subnet_type == 'PUBLIC' )\n \n elb = find_elb(elb_conn, elb.name)\n \n if params.get( 'monitors' ) :\n add_monitors_to_elb( cloudwatch_conn, base_name, app_name, base_topicarn, params[ 'monitors' ] )\n\n if subnet_type == 'PUBLIC' :\n print \"Setting public DNS alias for load balancer.\"\n set_dns_cname( r53_conn, public_dns_cname, elb.dns_name )\n else :\n dns_alias = create_internal_elb_dns_name( base_name, app_name )\n print \"Configuring DNS name for load balancer: \" + dns_alias\n set_dns_cname( r53_conn, dns_alias, elb.dns_name )\n\n if app_visibility == 'HBO' :\n for port in public_tcp_ports :\n lb_secgrp.authorize( ip_protocol = \"tcp\",\n from_port = port,\n to_port = port,\n cidr_ip = hbo_cidr_list ) \n\n elif app_visibility == 'PUBLIC' :\n print \"Creating proxy load balancer.\"\n proxy_type = app_name + '-PX'\n proxy_secgrp = find_secgrp(ec2_conn, get_secgrp_name( base_name, proxy_type ))\n if not proxy_secgrp :\n proxy_secgrp = create_secgrp( ec2_conn,\n vpc,\n get_secgrp_name( base_name, proxy_type ),\n 'Controls access to the ' + proxy_type + ' servers.' )\n \n lb_proxy_secgrp = find_secgrp(ec2_conn, get_lb_secgrp_name( base_name, proxy_type ))\n \n if not lb_proxy_secgrp :\n lb_proxy_secgrp = create_secgrp( ec2_conn,\n vpc,\n get_lb_secgrp_name( base_name, proxy_type ),\n 'Controls access to the ' + proxy_type + ' load balancer.' )\n\n remove_all_rules( ec2_conn, [ lb_proxy_secgrp, proxy_secgrp ], deep=True, base_name=base_name) \n ## reload the security group after removing the rules\n lb_proxy_secgrp = find_secgrp(ec2_conn, get_lb_secgrp_name( base_name, proxy_type ))\n proxy_secgrp = find_secgrp(ec2_conn, get_secgrp_name( base_name, proxy_type ))\n\n \n ##\n ## FIX: In reality, we need to set the group rules between lb_proxy and proxy to match\n ## the listener ports that were passed in/configured.\n ##\n grant_ssh_access( ec2_conn, [ proxy_secgrp ], find_group( ec2_conn, base_name, 'NAT' ) )\n \n \n ## proxy server port is always 80\n ## updated by yliu, 2014/6/13\n ##if use_ssl :\n ## proxy_port = 443\n ##else :\n ## proxy_port = 80\n proxy_port = 80\n\n ## backend elb port that the proxy server passes request to \n if use_ssl :\n proxy_to_elb_port = 443\n else :\n proxy_to_elb_port = 80\n\n grant_grp_access( ec2_conn, [ lb_proxy_secgrp ], proxy_secgrp, proxy_port )\n grant_grp_access( ec2_conn, [ proxy_secgrp ], lb_secgrp, proxy_to_elb_port )\n for port in public_tcp_ports :\n lb_proxy_secgrp.authorize( ip_protocol = \"tcp\",\n from_port = port,\n to_port = port,\n cidr_ip = all_ip_cidr ) \n\n proxy_listeners = [ ( 80, 80, 'http' ) ]\n if use_ssl :\n proxy_listeners = [ ( 443, proxy_port, 'https', ssl_cert_arn ) ]\n\n proxy_elb = create_elb( elb_conn,\n get_elb_name( base_name, proxy_type ),\n get_vpc_subnets( vpc_conn, vpc, 'PUBLIC' ),\n proxy_listeners,\n lb_proxy_secgrp,\n proxy_port,\n '/robots.txt',\n True )\n add_monitors_to_elb( cloudwatch_conn, base_name, app_name, base_topicarn, proxy_lb_monitor_rules )\n\n if public_dns_cname :\n print \"Setting public DNS alias for load balancer.\"\n set_dns_cname( r53_conn, public_dns_cname, proxy_elb.dns_name )\n else :\n public_dns_cname = ''\n\n print \"Creating proxy instances.\"\n proxy_ami = get_ami_by_name( ec2_conn, proxy_ami_name )\n subnets = get_vpc_subnets( vpc_conn, vpc, 'PRIVATE' )\n\n ## direct proxy server to access backend elb over given protocol\n ## added by yliu, 2014/6/13\n if use_ssl :\n app_elb_protocol = 'https'\n else :\n app_elb_protocol = 'http'\n \n proxy_userdata = get_proxy_userdata( public_dns_cname, elb.dns_name, app_elb_protocol, app_name )\n proxy_instances = []\n \n proxy_keypair = get_keypair_name( aws_account_type, vpc.region.name, \"APACHE\" )\n \n for subnet in subnets : \n instance = launch_instance_vpc( ec2_conn,\n proxy_ami,\n base_name = base_name,\n instance_type = proxy_type,\n keypair = proxy_keypair,\n machine_type = 'm3.xlarge',\n security_group_id = proxy_secgrp.id ,\n subnet_id = subnet.id,\n user_data = proxy_userdata,\n public_ip = False )\n proxy_instances.append( instance )\n\n print \"Setting alarms on the proxy\"\n add_monitors_to_instance( cloudwatch_conn, base_name, instance.id, 'PROXY', base_topicarn, proxy_monitor_rules )\n \n proxy_instance_ids = [ i.id for i in proxy_instances ]\n\n print \"Waiting for proxy instances to be ready\"\n aws_waits( ec2_conn.get_only_instances, proxy_instance_ids )\n\n print \"Adding the new proxy instances into the load balancer.\"\n \n status = swap_elb_instances( elb_conn = elb_conn,\n elb = proxy_elb,\n new_instance_ids = proxy_instance_ids,\n terminate_old_instances = True,\n ec2_conn = ec2_conn,\n cloudwatch_conn = cloudwatch_conn,\n swap_smoothly = False )\n\n else :\n elb = find_elb( elb_conn, get_elb_name( base_name, app_name ) )\n print \"Processing load-balancer actions.\"\n for action_param in params.get( 'actions', [] ) :\n if action_param[ 'type' ] == 'RESTART_INSTANCES' :\n restart_elb_instances( ec2_conn, elb_conn, elb, params.get( 'restart-smoothly', 'YES' ) == 'YES' )\n\n lb_secgrp = find_group( ec2_conn, base_name, get_lb_secgrp_type( app_name ) )\n dns_alias = None\n\n return ( elb, lb_secgrp, dns_alias )", "def get_create_rack_vthunder_load_balancer_flow(\n self, vthunder_conf, device_dict, topology, listeners=None, pools=None):\n\n f_name = constants.CREATE_LOADBALANCER_FLOW\n lb_create_flow = linear_flow.Flow(f_name)\n\n lb_create_flow.add(lifecycle_tasks.LoadBalancerIDToErrorOnRevertTask(\n requires=constants.LOADBALANCER_ID))\n lb_create_flow.add(database_tasks.ReloadLoadBalancer(\n requires=constants.LOADBALANCER_ID,\n provides=constants.LOADBALANCER))\n\n # device-name flavor support\n lb_create_flow.add(a10_database_tasks.GetFlavorData(\n rebind={a10constants.LB_RESOURCE: constants.LOADBALANCER},\n provides=constants.FLAVOR_DATA))\n lb_create_flow.add(vthunder_tasks.GetVthunderConfByFlavor(\n inject={a10constants.VTHUNDER_CONFIG: vthunder_conf,\n a10constants.DEVICE_CONFIG_DICT: device_dict},\n requires=(constants.LOADBALANCER, a10constants.VTHUNDER_CONFIG,\n a10constants.DEVICE_CONFIG_DICT, constants.FLAVOR_DATA),\n provides=(a10constants.VTHUNDER_CONFIG, a10constants.USE_DEVICE_FLAVOR)))\n lb_create_flow.add(vthunder_tasks.HandleACOSPartitionChange(\n requires=(constants.LOADBALANCER, a10constants.VTHUNDER_CONFIG),\n provides=a10constants.VTHUNDER_CONFIG))\n lb_create_flow.add(\n a10_database_tasks.CheckExistingThunderToProjectMappedEntries(\n requires=(\n constants.LOADBALANCER,\n a10constants.VTHUNDER_CONFIG,\n a10constants.USE_DEVICE_FLAVOR)))\n lb_create_flow.add(\n self.vthunder_flows.get_rack_vthunder_for_lb_subflow(\n vthunder_conf=a10constants.VTHUNDER_CONFIG,\n prefix=constants.ROLE_STANDALONE,\n role=constants.ROLE_STANDALONE))\n post_amp_prefix = constants.POST_LB_AMP_ASSOCIATION_SUBFLOW\n lb_create_flow.add(\n self.get_post_lb_rack_vthunder_association_flow(\n post_amp_prefix, topology, mark_active=(not listeners)))\n lb_create_flow.add(nat_pool_tasks.NatPoolCreate(\n requires=(constants.SUBNET, constants.LOADBALANCER,\n a10constants.VTHUNDER, constants.FLAVOR_DATA)))\n lb_create_flow.add(virtual_server_tasks.CreateVirtualServerTask(\n requires=(constants.LOADBALANCER, a10constants.VTHUNDER,\n constants.FLAVOR_DATA),\n provides=a10constants.STATUS))\n\n if pools:\n for pool in pools:\n lb_create_flow.add(self._pool_flows.get_fully_populated_create_pool_flow(\n topology, pool, vthunder_conf=vthunder_conf, device_dict=device_dict))\n\n if listeners:\n sf_name = a10constants.FULLY_POPULATED_LISTENER_CREATE\n for listener in listeners:\n lb_create_flow.add(\n self._listener_flows.get_rack_fully_populated_create_listener_flow(\n topology, listener))\n\n lb_create_flow.add(database_tasks.MarkLBActiveInDB(\n name=sf_name + '-' + constants.MARK_LB_ACTIVE_INDB,\n mark_subobjects=True,\n requires=constants.LOADBALANCER))\n\n lb_create_flow.add(vthunder_tasks.WriteMemory(\n requires=a10constants.VTHUNDER))\n lb_create_flow.add(a10_database_tasks.SetThunderUpdatedAt(\n requires=a10constants.VTHUNDER))\n return lb_create_flow", "def deploy_instance(self, pool):\n\n if vlb_db.get_vlb_from_pool_id(pool['pool']['id']) is not None:\n LOG.debug('This is an error')\n return\n name = 'vlb_{0}'.format(os.urandom(6).encode('hex'))\n nova_client = self._get_nova_client()\n neutron_client = self._get_neutron_client()\n\n subnet = neutron_client.show_subnet(pool['pool']['subnet_id'])\n\n LOG.debug('brocade_vlb_driver::deploy_instance %s' % name)\n vLb = nova_client.servers.create(name, self.conf.brocade_vlb.image_id,\n self.conf.brocade_vlb.flavor_id,\n nics=[ {'net-id': self.conf.brocade_vlb.management_network_id },\n {'net-id': subnet['subnet']['network_id'] }]\n )\n\n def _vLb_active():\n while True:\n try:\n instance = nova_client.servers.get(vLb.id)\n except Exception:\n yield self.conf.brocade_vlb.nova_poll_interval\n continue\n LOG.info(_(\"vLB Driver::Load Balancer instance status: %s\")\n %instance.status)\n if instance.status not in ('ACTIVE', 'ERROR'):\n yield self.conf.brocade_vlb.nova_poll_interval\n elif instance.status == 'ERROR':\n raise InstanceSpawnError()\n else:\n break\n self._wait(_vLb_active, \n timeout=self.conf.brocade_vlb.nova_spawn_timeout)\n LOG.info(_(\"vLB Driver::Waiting for the vLB app to initialize %s\") %\n vLb.id)\n\n mgmt_ip = self._get_address(vLb,\n self.conf.brocade_vlb.management_network_id)\n data_ip = self._get_address(vLb, subnet['subnet']['network_id'])\n vlb_db.create_vlb(pool['pool']['id'], vLb.id, vLb.tenant_id, vLb.name,\n data_ip, mgmt_ip)\n\n\t# Now wait for vlb to boot\n def _vLb_soap():\n while True:\n try:\n impl = driver_impl.BrocadeAdxDeviceDriverImpl(\n self.conf.brocade_vlb.username,\n self.conf.brocade_vlb.password,\n mgmt_ip)\n impl.create_pool(pool['pool'])\n impl.ifconfig_e1(data_ip,subnet['subnet']['cidr'])\n impl.create_static_route('0.0.0.0','0',subnet['subnet']['gateway_ip'])\n impl.enable_source_nat()\n except Exception as e:\n LOG.debug('vLB Driver::Load Balancer instance %s' % e)\n yield self.conf.brocade_vlb.vlb_poll_interval\n continue\n break\n self._wait(_vLb_soap, timeout=self.conf.brocade_vlb.vlb_boot_timeout)\n\n LOG.info(_(\"vLB Driver:vLB successfully deployed and configured\"))", "def create_pool(request, **kwargs):\n data = request.DATA\n\n conn = get_sdk_connection(request)\n pool = conn.load_balancer.create_pool(\n protocol=data['pool']['protocol'],\n lb_algorithm=data['pool']['lb_algorithm'],\n session_persistence=data['pool'].get('session_persistence'),\n listener_id=kwargs['listener_id'],\n loadbalancer_id=kwargs['loadbalancer_id'],\n name=data['pool'].get('name'),\n description=data['pool'].get('description'),\n admin_state_up=data['pool'].get('admin_state_up'),\n tls_enabled=data['pool'].get('tls_enabled'),\n # Replace empty string by None (uses default tls cipher string)\n tls_ciphers=data['pool'].get('tls_ciphers') or None,\n )\n\n if data.get('members'):\n args = (request, kwargs['loadbalancer_id'], add_member)\n kwargs = {'callback_kwargs': {'pool_id': pool.id,\n 'index': 0}}\n thread.start_new_thread(poll_loadbalancer_status, args, kwargs)\n elif data.get('monitor'):\n args = (request, kwargs['loadbalancer_id'], create_health_monitor)\n kwargs = {'callback_kwargs': {'pool_id': pool.id}}\n thread.start_new_thread(poll_loadbalancer_status, args, kwargs)\n\n return _get_sdk_object_dict(pool)", "def balancer():\n pass", "def elastic_lb(template, name, instances, subnets, instance_port=443, load_balancer_port=443, instance_proto=\"HTTPS\",\n load_balancer_proto='HTTPS', securitygroups=None, health_check=None, scheme=None):\n\n elasticlb = elb.LoadBalancer(name,\n template=template,\n Subnets=[Ref(r) for r in subnets],\n SecurityGroups=[Ref(r) for r in securitygroups],\n ConnectionDrainingPolicy=elb.ConnectionDrainingPolicy(Enabled=True, Timeout=300),\n CrossZone=True,\n Instances=[Ref(r.title) for r in instances]\n )\n\n listener = elb.Listener()\n listener.LoadBalancerPort = load_balancer_port\n listener.InstancePort = instance_port\n listener.Protocol = load_balancer_proto\n listener.InstanceProtocol = instance_proto\n\n if load_balancer_proto == 'HTTPS':\n listener.SSLCertificateId = PROD_OPS_CERTIFICATE\n\n elasticlb.Listeners = [listener]\n\n if health_check:\n elasticlb.HealthCheck = health_check\n\n if scheme:\n elasticlb.Scheme = scheme\n\n return elasticlb", "def create_bridge(self, num_ifaces: int) -> Bridge:\n testutils.log.info(\n \"---------------------- Creating a namespace ----------------------\",\n )\n random.seed(datetime.now().timestamp())\n bridge = Bridge(uuid.uuid4())\n result = bridge.create_virtual_env(num_ifaces)\n if result != testutils.SUCCESS:\n bridge.ns_del()\n testutils.log.error(\n \"---------------------- Namespace creation failed ----------------------\",\n )\n raise SystemExit(\"Unable to create the namespace environment.\")\n testutils.log.info(\n \"---------------------- Namespace successfully created ----------------------\"\n )\n return bridge", "def new_instance(cls,\n service_name: str = DEFAULT_SERVICE_NAME,\n ) -> 'GlobalLoadBalancersV1':\n authenticator = get_authenticator_from_environment(service_name)\n service = cls(\n authenticator\n )\n service.configure_service(service_name)\n return service", "def app_elb(template, name, subnets, instances, vpc, instance_port=443, load_balancer_port=443, instance_proto='HTTPS',\n load_balancer_proto='HTTPS', securitygroups=None):\n\n applb = elbv2.LoadBalancer(name,\n template=template,\n Subnets=[Ref(r) for r in subnets],\n SecurityGroups=[Ref(r) for r in securitygroups],\n )\n\n targetgroup = elbv2.TargetGroup(title=name + 'targetgroup',\n template=template,\n Port=instance_port,\n Protocol=instance_proto,\n VpcId=Ref(vpc),\n Targets=[elbv2.TargetDescription(Id=Ref(r)) for r in instances],\n HealthCheckIntervalSeconds=10,\n # HealthCheckPath=\"/\",\n # HealthCheckPort=\"traffic-port\",\n # HealthCheckProtocol=\"HTTP\",\n # HealthCheckTimeoutSeconds=5,\n # UnhealthyThresholdCount=10,\n # HealthyThresholdCount=2,\n )\n\n elbv2.Listener(title=(name + 'listener'),\n template=template,\n DefaultActions=[elbv2.Action(TargetGroupArn=Ref(targetgroup), Type='forward')],\n LoadBalancerArn=Ref(applb),\n Port=load_balancer_port,\n Protocol=load_balancer_proto,\n )\n\n return applb", "def _configure_manager(self):\n self._manager = CloudLoadBalancerManager(self,\n resource_class=CloudLoadBalancer,\n response_key=\"loadBalancer\", uri_base=\"loadbalancers\")" ]
[ "0.7753503", "0.7641294", "0.7280807", "0.7179058", "0.67555755", "0.67294407", "0.65474385", "0.64428174", "0.63757575", "0.63649774", "0.61280835", "0.6105206", "0.60898733", "0.60526896", "0.6042839", "0.5977197", "0.5939473", "0.5935822", "0.5933579", "0.58584756", "0.5833675", "0.5775257", "0.5765348", "0.5726141", "0.56559354", "0.56406057", "0.5634717", "0.56206834", "0.56161815", "0.55569446" ]
0.7866335
0
Creates target groups for the current environment.
def create_target_groups(self): target_groups_config = self.get_target_groups_config() vpc_id = self.get_vpc_id() response_data = {} for short_name in target_groups_config.keys(): target_group_name = self.get_target_group_name(short_name) if self.target_group_exists(short_name): self.logger.info(f'Target group {target_group_name} exists, skipping creation.') continue response = self.client.create_target_group( Name=target_group_name, VpcId=vpc_id, **target_groups_config[short_name], ) assert response['ResponseMetadata']['HTTPStatusCode'] == 200 self.logger.info(f'Target group {target_group_name} created.') response_data[target_group_name] = response['TargetGroups'] return response_data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_target_groups(ctx):\n data = self.create_target_groups()\n ctx.info('Created target groups for the load balancer {}:'.format(self.get_balancer_name()))\n ctx.pp.pprint(data)", "def generate_groups(ctx):\n asyncio.run(generate_groups_impl(ctx.obj[\"config\"]))", "def make_groups(self):\n for g in self.groups:\n self.add_group(groupname=g['groupname'],\n grouptitle=g['grouptitle'],\n path_to_group=g['path'])", "def ensure_target_group_created(vpc, environment):\n name = environment + '-web'\n\n # If it already exists, create returns the existing data\n response = ELB.create_target_group(\n Name=name,\n Protocol='HTTP',\n Port=9000,\n VpcId=vpc.id,\n Matcher={\n 'HttpCode': '200,301'\n }\n )\n\n arn = response['TargetGroups'][0]['TargetGroupArn']\n\n return arn", "def create_groups(**kwargs):\n for gname in SEC_GROUP_NAMES.itervalues():\n Group.objects.get_or_create(name=gname)", "def setup_group_workspaces(context):\n if context.readDataFile(\"marker.txt\") is None:\n return\n\n portal = context.getSite()\n if \"groups\" not in portal.objectIds():\n\n groups = portal[\n portal.invokeFactory(\"Folder\",id=\"groups\")]\n\n # set default properties\n groups.setTitle(\"groups\")\n groups.setDescription(\"Group workspaces container.\")\n groups._getWorkflowTool().doActionFor(groups, \"publish\" \"\")\n groups.setExcludeFromNav(True)\n groups.update() \n logger.info(\"Groups container created.\")", "def create_initial_groups():\n \n from base import get_group_database, get_user_database\n import api\n \n # we want any groups we create in here to be active immediately\n save_min_sponsors = Group._min_sponsors\n Group._min_sponsors = 1\n \n user_db = get_user_database()\n group_db = get_group_database()\n \n user_admin = user_db['admin']\n \n def create_group(user_id, name, desc, owner, parent_id, join_pol, memb_vis, memb_edit=''):\n if not group_db.has_key(user_id):\n g = group_db.create_group(user_id=user_id,\n name=name,\n description=desc,\n owner=owner,\n no_pay=True)\n group_db.force_accept(g)\n if parent_id:\n group_db.join_group(g, group_db[parent_id], force=1)\n \n g = group_db[user_id]\n if join_pol:\n api.group_set_join_policy(user_admin, g, join_pol)\n if join_pol == 'open':\n # if membership is open, allow non-members to read\n api.group_set_other_perms(user_admin, g, 'ro')\n if memb_vis:\n api.group_set_membership_visible(user_admin, g, memb_vis)\n if desc:\n api.group_set_settings(user_admin, g, description=desc)\n if memb_edit:\n api.group_set_member_edit(user_admin, g, memb_edit)\n \n # set date of formation\n create = datetime(2004, 05, 10, 12, 0, 0)\n g.date = create\n \n \n groups = [\n ('top', 'Top', 'This group contains the top-level groups.', user_admin, None, '', 'open', ''),\n ('regional', 'Regional', 'Contains groups with a regional focus.', user_admin, 'top', '', 'open', ''),\n ('orgs', 'Organizations', 'Contains categories of organizations.', user_admin, 'top', '', 'open', ''),\n ('community', 'Community', 'Contains groups that are focused or based on ned.com.', user_admin, 'top', '', 'open', ''),\n ('issues', 'Issues', 'Contains groups focused on particular issues.', user_admin, 'top', '', 'open', ''),\n ('general', 'General', 'Contains groups that don\\'t belong in other categories.', user_admin, 'top', 'open', 'open', ''),\n ('general-other', 'General', 'Contains groups that don\\'t belong in other categories.', user_admin, 'general', 'open', 'open', ''),\n ('help', 'Help', 'Contains site help.', user_admin, 'community', '', 'open', ''),\n ('community-general', 'Community - General',\n '', user_admin, 'community', 'open', 'open', 'member'),\n ('suggestions', 'Suggestions', 'For community suggestions.', user_admin, 'community-general', '', 'open', ''),\n ('public', 'Public sector',\n 'Groups operating in the public sector should join this group.', user_admin, 'orgs', 'open', 'open', 'member'),\n ('private', 'Private sector',\n 'Groups operating in the private sector should join this group.', user_admin, 'orgs', 'open', 'open', 'member'),\n ('social', 'Social sector',\n 'Groups operating in the social sector should join this group.', user_admin, 'orgs', 'open', 'open', 'member'),\n ('orgs-general', 'Organizations - General',\n \"For organizations that don't fit in other categories.\", user_admin, 'orgs', 'open', 'open', 'member'),\n ('issues-business', 'Business',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-cyf', 'Children - Youth - Families',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-education', 'Education',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-env', 'Environment - Conservation',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-health', 'Health Care',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-pol', 'Policy - Politics',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-religion', 'Religion',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-soc', 'Social Justice - Human Services',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-tech', 'Technology',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-general', 'Issues - General',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('ned', '<ned> Network',\n '', user_admin, '', '', '', ''),\n ('ned-internal', 'Ned - Internal',\n '', user_admin, '', '', '', ''),\n ('sitedev', 'Site Development',\n '', user_admin, 'ned-internal', '', '', ''),\n ]\n \n for user_id, name, desc, owner, parent_id, join_pol, memb_vis, memb_edit in groups:\n create_group(user_id, name, desc, owner, parent_id, join_pol, memb_vis, memb_edit)\n \n # Help group\n g_help = group_db['help']\n api.group_set_anon_read(user_admin, g_help, True)\n \n # ON groups\n g_on = group_db['ned']\n group_db.join_group(g_on, group_db['private'], force=1)\n group_db.join_group(g_on, group_db['public'], force=1)\n group_db.join_group(g_on, group_db['social'], force=1)\n api.group_set_owners_by_user_id(user_admin, g_on, ['admin', 'jimc'])\n api.group_set_join_policy(user_admin, g_on, 'owner')\n api.group_set_invite_policy(user_admin, g_on, 'owner')\n api.group_set_membership_visible(user_admin, g_on, 'open')\n api.group_set_member_edit(user_admin, g_on, True)\n api.group_set_anon_read(user_admin, g_on, True)\n \n g_on_int = group_db['ned-internal']\n api.group_set_owners_by_user_id(user_admin, g_on_int, ['admin', 'jimc'])\n api.group_set_join_policy(user_admin, g_on_int, 'owner')\n api.group_set_invite_policy(user_admin, g_on_int, 'owner')\n api.group_set_membership_visible(user_admin, g_on_int, 'member')\n api.group_set_member_edit(user_admin, g_on_int, True)\n api.group_set_anon_read(user_admin, g_on_int, False)\n \n g_sitedev = group_db['sitedev']\n api.group_set_owners_by_user_id(user_admin, g_sitedev, ['admin', 'jimc'])\n \n Group._min_sponsors = save_min_sponsors", "def test_create_group(self):\n pass", "def test_create_group(self):\n pass", "def create_targetgroup(self, name, desc, tags, targets=None):\r\n # http://zimbabwenewsonline.com/top_news/2495.html \r\n prev_row = self._db(self._db.targetgroup.name==name).select().first()\r\n if prev_row:\r\n self._db.targetgroup.update(id=prev_row.id, name=name, desc=desc,\r\n tags=tags, targets=targets)\r\n ret_id = prev_row.id\r\n else:\r\n ret_id = self._db.targetgroup.insert(name=name, desc=desc,\r\n tags=tags, targets=targets)\r\n\r\n self._db.commit()\r\n return ret_id", "def set_up_groups(self):\n groups = []\n groups.append({'groupname': 'th',\n 'grouptitle': 'TH',\n 'path': '/'})\n groups.append({'groupname': 'neutronics',\n 'grouptitle': 'Neutronics',\n 'path': '/'})\n groups.append({'groupname': 'metadata',\n 'grouptitle': 'Simulation Metadata',\n 'path': '/'})\n return groups", "def update_target_groups():\n\n # detect which region the explorer(s) are located\n for j in range(NUM_OF_SHARDS):\n key_explorer = \"explorers_\" + str(j)\n array_instance_ip = parse_network_config(key_explorer)\n array_instance_id = retrieve_instance_id(array_instance_ip)\n\n reg = retrieve_instance_region(array_instance_ip[0])\n # all nodes registered for the same endpoints should be located in the same region, if not, exit\n verify_nodes_same_region(reg, array_instance_ip)\n\n elbv2_client = boto3.client('elbv2', region_name=reg)\n\n array_target_group = create_name_target_group(j, ID_DOMAIN_NAME)\n pp.pprint(array_target_group)\n\n # 1/3 - retrieve target group arn\n print(\"==== retrieve target group arn\")\n dict_tg_arn = dict()\n for tg in array_target_group:\n resp = elbv2_client.describe_target_groups(Names=[tg])\n tg_arn = resp[\"TargetGroups\"][0][\"TargetGroupArn\"]\n dict_tg_arn[tg] = tg_arn\n pp.pprint(dict_tg_arn)\n\n # 2/3 - find all the instances\n print(\"==== find all the instances current registered\")\n dict_tg_instanceid = defaultdict(list)\n for tg in array_target_group:\n resp = elbv2_client.describe_target_health(TargetGroupArn=dict_tg_arn[tg])\n num_of_targets = len(resp[\"TargetHealthDescriptions\"])\n for k in range(num_of_targets):\n instance_id = resp[\"TargetHealthDescriptions\"][k][\"Target\"][\"Id\"]\n dict_tg_instanceid[tg].append(instance_id)\n pp.pprint(dict_tg_instanceid)\n\n # 3/3 - deregister all instances, then we can have a clean and nice target group\n print(\"==== deregister all instances\")\n for tg in array_target_group:\n for instance_id in dict_tg_instanceid[tg]:\n try:\n resp = elbv2_client.deregister_targets(TargetGroupArn=dict_tg_arn[tg],\n Targets=[{'Id': instance_id}])\n except Exception as e:\n print(\"Unexpected error to deregister the instance: %s\" % e)\n\n # 3/3 - register instances into the tg\n print(\"==== register all instances\")\n # outer for loop: loop through 2 tg, https and wss\n # inner loop: add every single instance id into each tg\n for tg in array_target_group:\n for instance in array_instance_id:\n response = elbv2_client.register_targets(\n TargetGroupArn=dict_tg_arn[tg],\n Targets=[{'Id': instance, }, ]\n )", "def createMainGroup(self):\n\t\tmc.group( n = self.grp.name, em = True )", "def groups(self, create, extracted, **kwargs):\n if not create:\n # Simple build, do nothing.\n return\n\n if extracted:\n # A list of groups were passed in, use them\n for group in extracted:\n # pylint: disable=no-member\n self.groups.add(group)", "def test_create_device_group(self):\n pass", "def create_groups():\n groups = [\"iDRAC-Administrators\", \"iDRAC-Operators\", \"iDRAC-Readonly\"]\n group_priviledges = [\"0x000001ff\", \"0x000000f9\", \"0x00000001\"]\n for host in online_hosts:\n for index in [1,2,3]:\n print index,\" \", groups[index-1]\n with settings(warn_only=True):\n\n result1 = sudo(\"racadm5 -r \"+host+\" -u root -p \"+PASSWORD+\" config -g cfgStandardSchema -i \"+str(index) +\" -o cfgSSADRoleGroupName \"+groups[index-1])\n if result1.failed:\n logging.error(\"Host: [ \"+host+\" ] : \" + \"Configuration for RoleGroupName failed \")\n\n result2 = sudo(\"racadm5 -r \"+host+\" -u root -p \"+PASSWORD+\" config -g cfgStandardSchema -i \"+str(index) +\" -o cfgSSADRoleGroupDomain corp.inmobi.com\")\n if result2.failed:\n logging.error(\"Host: [ \"+host+\" ] : \" + \"Configuration for RoleGroupDomain failed \")\n\n result3 = sudo(\"racadm5 -r \"+host+\" -u root -p \"+PASSWORD+\" config -g cfgStandardSchema -i \"+str(index) +\" -o cfgSSADRoleGroupPrivilege \"+ group_priviledges[index-1])\n if result3.failed:\n logging.error(\"Host: [ \"+host+\" ] : \" + \"Configuration for RoleGroupPriviledge failed \")", "def create_groups(groups):\n for group_name in groups:\n try:\n Group.objects.get_or_create(name=group_name)\n except Exception as e:\n raise CouldNotCreateGroup(group_name, e)", "def create_default_groups():\n from flaskbb.fixtures.groups import fixture\n result = []\n for key, value in fixture.items():\n group = Group(name=key)\n\n for k, v in value.items():\n setattr(group, k, v)\n\n group.save()\n result.append(group)\n return result", "def test_create_resource_group(self):\n pass", "def create_new_group(self, a, b):\n self.groups[self.group_id] = set([a,b])\n self.node_id[a] = self.node_id[b] = self.group_id\n self.group_id += 1", "def crea_grupo(self):\r\n \r\n self.comprueba_casos_seleccionados()", "def make_grp(self):\n try:\n self.base['grp']\n except:\n self.base['grp'] = np.zeros(len(self.base),dtype='i')\n\n for halo in self._halos.values():\n halo[name][:] = halo._halo_id\n\n if config['verbose']: print \"writing %s\"%(self._base().filename+'.grp')\n self._base().write_array('grp',overwrite=True,binary=False)", "def init_valet_groups(self):\n\n for rk, r in self.stack.items():\n properties = r.get(\"properties\", {})\n metadata = properties.get(\"metadata\", {})\n\n if len(metadata) > 0:\n valet_rules = metadata.get(\"valet_groups\", None)\n\n if valet_rules is not None and valet_rules != \"\":\n rule_list = []\n if isinstance(valet_rules, six.string_types):\n rules = valet_rules.split(\",\")\n for gr in rules:\n rule_list.append(gr.strip())\n else:\n self.status = \"incorrect valet group metadata format\"\n self.logger.error(self.status)\n return\n\n # Check rule validation of valet_groups.\n self.status = self.resource.check_valid_rules(self.tenant_id,\n rule_list,\n use_ex=self.use_dha)\n if self.status != \"ok\":\n self.logger.error(self.status)\n return\n\n self.status = self._make_valet_groups(properties.get(\"name\"),\n properties[\"availability_zone\"][0],\n rule_list)\n if self.status != \"ok\":\n self.logger.error(self.status)\n return\n\n # Check and create server groups if they do not exist.\n scheduler_hints = properties.get(\"scheduler_hints\", {})\n if len(scheduler_hints) > 0:\n for hint_key in scheduler_hints.keys():\n if hint_key == \"group\":\n hint = scheduler_hints[hint_key]\n self.status = self._make_group(properties.get(\"name\"), hint)\n if self.status != \"ok\":\n self.logger.error(self.status)\n return", "def _create_child_group(self, name) -> \"GroupBase\":\n pass", "def gen_group(group_name=None, group_vars={}):\n group = Group(name=group_name)\n for key, value in group_vars.iteritems():\n group.set_variable(key, value)\n return group", "def create(self, context=None):\n values = self.obj_get_changes()\n db_nodegroup = self.dbapi.create_nodegroup(values)\n self._from_db_object(self, db_nodegroup)", "def setup_spritegroups(self):\n self.ground_step_pipe_group = pygame.sprite.Group(self.ground_group,\n self.pipe_group,\n self.step_group)", "def create_groups(self, group_names):\n groups = {}\n for group_name in group_names:\n groups[group_name] = Group(name=group_name)\n groups[group_name].save()\n return groups", "def test_system_group_create(audreyvars, tunnel_requested, system_groups):\n server = audreyvars[\"KATELLO_HOST\"]\n login = audreyvars.get(\"KATELLO_USER\", \"admin\")\n org = audreyvars.get(\"KATELLO_ORG\", \"redhat\")\n password = audreyvars.get(\"KATELLO_PASS\", \"admin\")\n\n # If using a tunnel to access ec2, an alternative port is needed\n if tunnel_requested:\n port = audreyvars.get(\"SSH_TUNNEL_KATELLO_PORT\", 1443)\n else:\n port = audreyvars.get(\"KATELLO_PORT\", 443)\n\n # Query existing system groups\n current_group_names = [g.get('name') for g in common.katello.system_group_query(server, port, org, login, password)]\n\n # Determine whether groups were created\n new_group_ids = []\n for group_name in system_groups:\n if group_name not in current_group_names:\n result_dict = common.katello.system_group_create(server, port, org, login, password, group_name)\n new_group_ids.append(result_dict.get('id'))\n\n if len(new_group_ids) == 0:\n pytest.skip(msg=\"System groups already exist, no groups created\")", "def register_target_commands():\n for plugin_cls in env.plugins.values():\n # Add target group to root commands. E.g. create `nursery vbox`\n cli.add_command(plugin_cls.cli_entry_func, plugin_cls.cli_entry_func.name)\n\n for cmd in TargetPlugin.root_actions:\n if cmd in plugin_cls.root_command_map:\n # Find the corresponding group in this module to add the target's\n # command group to. E.g. Find `nursery up` and add `vbox` to it.\n globals()[f\"{cmd}_cmd\"].add_command(\n plugin_cls.root_command_map[cmd], plugin_cls.cli_entry_func.name\n )" ]
[ "0.77843726", "0.66990286", "0.6696909", "0.6282211", "0.6119517", "0.6050414", "0.5997116", "0.59821665", "0.59821665", "0.5956269", "0.59005904", "0.5763568", "0.5756899", "0.57399017", "0.5738739", "0.5734707", "0.5720554", "0.57122076", "0.5707017", "0.56966335", "0.56955004", "0.5686349", "0.5670442", "0.5613169", "0.5610815", "0.5587007", "0.55781955", "0.5557992", "0.5486684", "0.54599273" ]
0.69275194
1
Deletes target groups for the current environment.
def delete_target_groups(self): target_groups_config = self.get_target_groups_config() for short_name in target_groups_config.keys(): if not self.target_group_exists(short_name): self.logger.info('Target group {} does not exists, nothing to delete.'.format( self.get_target_group_name(short_name) )) continue response = self.client.delete_target_group( TargetGroupArn=self.get_target_group_arn(short_name) ) assert response['ResponseMetadata']['HTTPStatusCode'] == 200 self.logger.info('Target group {} deleted.'.format(self.get_target_group_name(short_name)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_target_groups(ctx):\n self.delete_target_groups()\n ctx.info('Deleted target groups for the load balancer {}:'.format(self.get_balancer_name()))", "def test_delete_groups(self):\n pass", "def del_from_groups(self, username, groups):\n pass", "def drop_groups(self, group_ids=None):\n return self.groups.delete(group_ids)", "def target_remove():\r\n try:\r\n target_id = request.post_vars[\"target\"]\r\n group_id = request.post_vars[\"group\"]\r\n except KeyError:\r\n pass\r\n else:\r\n result = gl.remove_from_targetgroup(target_id, group_id)\r\n if result:\r\n return response.json({'success': 'true'})\r\n return response.json({'success': 'false'})", "def RemoveDeviceGroups(self, devices):\n for d in devices:\n # delete the group from the device tree\n group = self._groups.pop(d.run_target.group.name, None)\n if not group:\n continue\n # delete all devices under the group from the _run_target_index\n for d_in_group in self._ListGroupDevices(group):\n self._run_target_index[d_in_group.run_target.name].pop(\n d_in_group.device_serial)", "def destroy(self, context=None):\n self.dbapi.destroy_nodegroup(self.cluster_id, self.uuid)\n self.obj_reset_changes()", "def test_070_delete_group_from_group(self):\n\n testflow.step(\n \"Removing group %s from group %s\",\n TEST_GROUP1, TEST_GROUP2\n )\n assert MANAGE_CLI.run(\n 'groupdel',\n TEST_GROUP1,\n group=TEST_GROUP2,\n )[0], \"Failed to delete group from group '%s'\" % TEST_GROUP1", "def test_delete_group(self):\n pass", "def test_delete_group(self):\n pass", "def delete_challenge_groups_hook(*_, instance: Challenge, using, **__):\n try:\n instance.admins_group.delete(using=using)\n except ObjectDoesNotExist:\n pass\n\n try:\n instance.participants_group.delete(using=using)\n except ObjectDoesNotExist:\n pass", "def do_del_group(dbsync, group):\n pass", "def delete_targetgroup(self, group_id):\r\n result = False\r\n if self._db(self._db.targetgroup.id==group_id).select():\r\n result = True\r\n self._db(self._db.targetgroup.id==group_id).delete()\r\n self._db.commit()\r\n return result", "def test_080_group_delete(self):\n\n testflow.step(RMV_GRP_MSG, TEST_GROUP_DELETE)\n assert GROUP_CLI.run(\n 'delete',\n TEST_GROUP_DELETE\n )[0], \"Failed to delete group '%s'\" % TEST_GROUP_DELETE", "def delete_groups_and_permissions_for_recipes(apps, schema_editor):\n Group = apps.get_model('auth', 'Group')\n Permission = apps.get_model('auth', 'Permission')\n # Delete the recipe_submitters group.\n recipe_submitters = Group.objects.get(name='recipe_submitters')\n recipe_submitters.delete()\n # Remove permissions for recipes to the dcc groups.\n recipe_permissions = Permission.objects.filter(content_type__app_label='recipes',\n content_type__model__in=('unitrecipe', 'harmonizationrecipe'))\n developers = Group.objects.get(name='dcc_developers')\n developers.permissions.remove(*recipe_permissions)\n analysts = Group.objects.get(name='dcc_analysts')\n analysts.permissions.remove(*recipe_permissions)", "def delete_all_groups(self):\n DELETED = 204\n for group in self.get_list_groups():\n codes = [\n self.delete_all_group_member(group[\"id\"]).status_code,\n self.delete_group(group[\"id\"]).status_code\n ]\n\n res = filter(lambda a: a != DELETED, codes)\n if res:\n return res[0]\n\n return DELETED", "def test_delete_resource_group(self):\n pass", "def test_products_ref_groups_delete(self):\n pass", "def test_groups_group_ref_delete(self):\n pass", "def test_api_v1_groups_id_delete(self):\n pass", "def test_ipam_vlan_groups_delete(self):\n pass", "def test_groups_group_users_delete(self):\n pass", "def test_groups_group_users_delete(self):\n pass", "def test_delete_topic_groups(self):\r\n group_id = None # Change me!!\r\n topic_id = None # Change me!!\r\n\r\n r = self.client.delete_topic_groups(group_id, topic_id)", "def delete_algorithm_groups_hook(*_, instance: Algorithm, using, **__):\n try:\n instance.editors_group.delete(using=using)\n except ObjectDoesNotExist:\n pass\n\n try:\n instance.users_group.delete(using=using)\n except ObjectDoesNotExist:\n pass", "def test_TC_44383_DELETE_Groups_Id(self, context):\n # Define a test step\n with pytest.allure.step(\"\"\"First create group using request POST /groups.\"\"\"):\n # Test case configuration\n edgeDeviceGroupDetails = context.sc.EdgeDeviceGroupDetails(\n configAdminCanEdit=True,\n configurations=[],\n deliveryLoadBalancePolicy='PROXIMITY_MATCHES',\n dnsName='10.1.25.46',\n edgeDeviceRoles=['EDGE', 'ORIGIN', 'DISTRIBUTION'],\n id='GroupD1',\n members=[{\n 'id': 'POST_veDevices_AllConfigAdminMulticastTrue'\n }],\n name='GroupD1',\n originLoadBalancePolicy='DNS_NAME',\n provisioningPolicy='ALL_MEMBERS',\n proximityDetails=None,\n visibleInAllConfigurations=True)\n\n # createEntity the Groups.\n # The `check` call validates return code\n # and some of the swagger schema.\n # Most schema checks are disabled.\n response = check(\n context.cl.Groups.createEntity(\n body=edgeDeviceGroupDetails\n )\n )\n\n\n # Define a test step\n with pytest.allure.step(\"\"\"Now verify that user is able to delete the group on providing 'Id' parameter using request DELETE /groups{id}.\"\"\"):\n\n # deleteEntity the Groups.\n # The `check` call validates return code\n # and some of the swagger schema.\n # Most schema checks are disabled.\n check(\n context.cl.Groups.deleteEntity(\n id='GroupD1'\n )\n )", "def cleanup_user_groups(event):\n name = event.object.name\n\n if name.startswith(\"group:\"):\n principals = get_principals()\n users_groups = [p for p in principals if name in principals[p].groups]\n for user_or_group in users_groups:\n principals[user_or_group].groups.remove(name)\n\n DBSession.query(LocalGroup).filter(\n LocalGroup.principal_name == name).delete()", "def delete_worker_groups(cls, args, config):\n if len(args) == 0:\n raise MOLNSException(\"USAGE: molns worker delete name\")\n return\n config.delete_object(name=args[0], kind='WorkerGroup')", "def deleteGroup(groupName):\r\n Group.deleteGroup(groupName)", "def test_delete_group(self, inventoryloader):\n cg = inventoryloader.count_groups()\n ch = inventoryloader.count_hosts()\n inventoryloader.del_group('glance_api')\n assert 'glance_api' not in inventoryloader.groups['glance_all'].children\n assert 'glance_api' not in inventoryloader.hosts['localhost'].groups\n assert 'glance_api' not in inventoryloader.groups\n assert inventoryloader.count_groups() == cg -1\n assert inventoryloader.count_hosts() == ch" ]
[ "0.8269436", "0.66177106", "0.65208447", "0.6448851", "0.62806493", "0.62591887", "0.62582904", "0.62301564", "0.62272596", "0.62272596", "0.62231916", "0.6211974", "0.6204757", "0.6118375", "0.6099915", "0.60728055", "0.60700786", "0.6055358", "0.6038605", "0.60032064", "0.60022706", "0.59947526", "0.59947526", "0.5991384", "0.5989297", "0.598691", "0.5984235", "0.5946432", "0.5944882", "0.5934852" ]
0.80087006
1