query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Take conversation text and calculates the confidence score using Watson Tone Analyzer | def analyze_tone(conversation):
tone_analyzer = ToneAnalyzerV3Beta(username=WATSON_USERNAME,password=WATSON_PASSWORD,version=WATSON_API_VERSION)
tone_response = tone_analyzer.tone(conversation)
confidence = tone_response['document_tone']['tone_categories'][1]['tones'][1]['score']
return confidence | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def analyze(self, text):\n\n # start from 0 for each Analyser variable\n self.positives = 0\n self.negatives = 0\n\n # precise self text value\n self.text = text\n\n # declare a tokenased word\n tokenizer = nltk.tokenize.TweetTokenizer()\n tokens = tokenizer.tokenize(text)\n\n # indicate the length of list tokens\n size = len(tokens)\n\n # all the word stuff to ckeck\n for word in tokens:\n\n # chaque mots est converti en mot sans majuscule\n word = str.lower(word)\n\n linespos = [line.rstrip('\\n') for line in open('positive-words.txt')]\n linesneg = [line.rstrip('\\n') for line in open('negative-words.txt')]\n\n # check for positive or negative or neutral words\n if word in linespos:\n self.positives += 1\n elif word in linesneg:\n self.negatives += 1\n else:\n continue\n\n # score calculculated and reurned\n score = self.positives - self.negatives\n\n return score",
"def analyze(self, text):\n\n # TODO\n # tokens = tokenizer.tokenize(tweet)\n tokenizer = nltk.tokenize.TweetTokenizer()\n tokens = tokenizer.tokenize(text)\n score = 0\n\n for word in tokens:\n # iterate over tokens#str.lower\n\n if word.lower() in self.positives:\n score = score+1\n\n elif word.lower() in self.negatives:\n score = score-1\n\n else:\n continue\n return score",
"def analyze(self, text):\n #analize every word in the text a value -1, 1 or 0 and calculate total score\n #tokens allow us to split words in single tokens we can initialize tokens like this:\n\n tokenizer = nltk.tokenize.TweetTokenizer()\n tokens = tokenizer.tokenize(text.lower())\n\n score = 0\n\n if tokens[0] in self.negatives:\n score =- 1\n elif tokens[0] in self.positives:\n score =+ 1\n else:\n score = 0\n\n #print('', text)\n\n return score",
"def sentiment_analyzer(text):\n\n\tlower_text = text.lower()\n\t\t\n\thashtag_scaling = 0.3\n\texclamation_scaling = 0.5\n\tuppercase_scaling = 0.2\n\n\n\tsent_index = 0\n\n\tfor x in range(len(positive_words)):\n\t\tsent_index += lower_text.count(positive_words[x])\n\tfor x in range(len(negative_words)):\n\t\tsent_index -= lower_text.count(negative_words[x])\n\tif '!' in text:\n\t\tsent_index *= exclamation_scaling * lower_text.count('!') + 1\n\tif '#' in text:\n\t\tsent_index *= hashtag_scaling * lower_text.count('#') + 1\n\tsent_index *= uppercase_scaling * sum(1 for c in text if c.isupper())\n\t\t\n\treturn sent_index",
"def detect_text(img):\n \n with io.open(img, 'rb') as image_file:\n content = image_file.read()\n\n image = vision.types.Image(content=content)\n response = client.text_detection(image=image) # returns TextAnnotation\n df = pd.DataFrame(columns=['description'])\n texts = response.text_annotations\n for text in texts:\n df = df.append(\n dict(\n \n description= clean_text (text.description)\n ),\n ignore_index=True\n )\n \n porter = PorterStemmer()\n\n try:\n text= (df['description'][0])\n text = porter.stem(text)\n except IndexError:\n text = 'i am neutral'\n # print (analyze(text))\n \n \n # print(df['description'])\n print(text)\n if len (text.split())<3:\n text = 'i am neutral'\n\n sentiment_dict= analyze2(text) \n if sentiment_dict >= 0.008: \n Category.append('Positive') \n return('Positive') \n\n elif (sentiment_dict > - 0.008) & (sentiment_dict < 0.008): \n Category.append('Random')\n return('Random')\n\n elif (sentiment_dict <= -0.008):\n Category.append('Negative')\n return('Negative')",
"def analyze(self, text): #takes the text to be analyzed for sentiment\n #initialize inicial score to 0\n score = 0\n #Create tokenizer instance\n tokenizer = nltk.tokenize.TweetTokenizer()\n #create list of words in a tweets\n tokens = tokenizer.tokenize(text)\n \n #iterate over tokens(list of words)\n for word in tokens:\n #check if word is positive or negative\n if word.lower() in self.positives_words:\n score+=1\n if word.lower() in self.negatives_words:\n score-=1\n #neutral if its neither, doesnt add anything, 0\n return score",
"def analyze(self, text):\n\n score = 0.0;\n\n words = text.split(' ')\n # match each word in either the positives or negatives list adding or subtracting 1 from the score if present\n for word in words:\n for w in self.positives:\n if w == word.lower():\n score += 1.0\n continue\n \n for w in self.negatives:\n if w == word.lower():\n score -= 1.0\n continue\n\n return score",
"def polarity_scores(self, text):\n # convert emojis to their textual descriptions\n text_token_list = text.split()\n \n text_no_emoji_lst = []\n \n for token in text_token_list:\n if token in self.emojis:\n # get the textual description\n description = self.emojis[token]\n text_no_emoji_lst.append(description)\n else:\n text_no_emoji_lst.append(token)\n text = \" \".join(x for x in text_no_emoji_lst)\n \n sentitext = SentiText(text)\n \n sentiments = []\n words_and_emoticons = sentitext.words_and_emoticons\n \n for item in words_and_emoticons:\n valence = 0\n i = words_and_emoticons.index(item)\n # check for vader_lexicon words that may be used as modifiers or negations\n \n if item.lower() in BOOSTER_DICT:\n sentiments.append(valence)\n continue\n if (i < len(words_and_emoticons) - 1 and item.lower() == \"kind\" and\n words_and_emoticons[i + 1].lower() == \"of\"):\n sentiments.append(valence)\n continue\n \n sentiments = self.sentiment_valence(valence, sentitext, item, i, sentiments)\n \n\n sentiments = self._but_check(words_and_emoticons, sentiments)\n\n valence_dict = self.score_valence(sentiments, text)\n return valence_dict",
"def sentiment_analysis(name, dictionary):\n\ttone_analyzer = ToneAnalyzerV3(\n\t\t username='2ed2f0c6-1722-472d-9126-224897b991af',\n\t\t password='UcuSde1YmeK6',\n\t\t version='2016-05-19')\n\tl = open(name + '.txt')\n\tlines = l.readlines()\n\tfeel_dict = {'Anger':1.0,'Fear':2.0, 'Sadness':3.0, 'Disgust':4.0,'Joy':5.0, 'Excitement':6.0}\n\tdictionary[name] = []\n\tfor i in lines:\n\t\t#print('-----------------')\n\t\t#print(i)\n\t\tmax_score = 0.0\n\t\tmax_feel = ''\n\t\ttone = tone_analyzer.tone(i, 'emotion')\n\t\tfor feel in tone['document_tone']['tone_categories']:\n\t\t\tfor feeling in feel['tones']:\n\t\t\t\tif feeling['score'] > max_score:\n\t\t\t\t\tmax_score = feeling['score']\n\t\t\t\t\tmax_feel = feeling['tone_name']\n\t\t#print(max_score, max_feel)\n\t\t#blob1 = TextBlob(i, pos_tagger=PatternTagger(), analyzer=PatternAnalyzer())\n\t\tif max_feel != '':\n\t\t\ttweet_tbu = db.Tweet.objects(rating=feel_dict[max_feel]).first()\n\t\t\tdict_tbu = {}\n\t\t\tif tweet_tbu:\n\t\t\t\tdict_tbu = mongo_to_dict(tweet_tbu)\n\t\t\t\tprint('exists')\n\t\t\t\tprint(dict_tbu)\n\t\t\t\tif max_feel != '':\n\t\t\t\t\tnew_dict = {}\n\t\t\t\t\tnew_dict['tweet'] = dict_tbu['tweet']\n\t\t\t\t\tnew_dict['tweet'].append(i[0:-2])\n\t\t\t\t\ttweet_tbu.update(**new_dict)\n\t\t\t\t\ttweet_tbu.reload()\n\t\t\telse:\n\t\t\t\tprint('not exists - with max')\n\t\t\t\tnew_dict = {}\n\t\t\t\tnew_dict['tweet'] = [i[0:-1]]\n\t\t\t\tif max_feel != '':\n\t\t\t\t\tnew_dict['rating'] = feel_dict[max_feel]\n\t\t\t\telse:\n\t\t\t\t\tnew_dict['rating'] = 0.0\n\t\t\t\tprint(new_dict)\n\t\t\t\tnew_tweet = db.Tweet(**new_dict)\n\t\t\t\tnew_tweet.save()\n\t\telse:\n\t\t\tprint('not exists - without')\n\t\t\tnew_dict = {}\n\t\t\tnew_dict['tweet'] = [i[0:-1]]\n\t\t\tif max_feel != '':\n\t\t\t\tnew_dict['rating'] = feel_dict[max_feel]\n\t\t\telse:\n\t\t\t\tnew_dict['rating'] = 0.0\n\t\t\tprint(new_dict)\n\t\t\tnew_tweet = db.Tweet(**new_dict)\n\t\t\tnew_tweet.save()\n\tresult = db.Tweet.objects()\n\treturn(result)",
"def main(text):\n\n # Get CHATBOT response from the user input.\n bot_response = CHATBOT.get_response(text).text\n print(bot_response)\n\n # Get polarity score from CHATBOT response.\n analysis = VADER_ANALYZER.polarity_scores(text)\n\n # Change polarity score relatively to a audible frequency.\n freq = (analysis['compound'] - -1) / (1 - -1) * (800 - 200) + 200\n\n # Send OSC message, to be listened to by pd.\n CLIENT.send_message(\"/filter\", freq)\n\n # Log conversation.\n exchange = {text: bot_response}\n _log_conversation(\"conversation.db\", exchange)",
"def analyze(self, text):\n tknzr = nltk.tokenize.casual.TweetTokenizer(preserve_case=True, reduce_len=False, strip_handles=False)\n tknTxt = tknzr.tokenize(text)\n sentiment = 0\n \n for i in range(len(tknTxt)):\n if tknTxt[i] in self.posTxt:\n #print(\"POS\")\n #print(tknTxt[i])\n sentiment += 1\n elif tknTxt[i] in self.negTxt:\n #print(\"NEG\")\n #print(tknTxt[i])\n sentiment -= 1\n \n return sentiment",
"def analyse(self):\n logging.info(\"transferring text to CorpusCook...\")\n\n paragraphs = self.text.split('\\n\\n')\n print(\"mean length of splitted lines\", (mean([len(p) for p in paragraphs])))\n\n # If TIKA resolved '\\n'\n if (mean([len(p) for p in paragraphs])) > 80:\n paragraphs = [re.sub(r\"- *\\n\", '', p) for p in paragraphs]\n paragraphs = [p.replace('\\n', \" \") for p in paragraphs]\n paragraphs = [p.replace(';', \" \") for p in paragraphs]\n joiner = \" \"\n else:\n # If TIKA did not\n joiner = \" \"\n\n processed_text = joiner.join([p\n for p in paragraphs\n if\n p and\n ks_2samp(self.normal_data, list(p)).pvalue > self.threshold\n ]\n )\n\n return processed_text.strip()[:self.length_limit]",
"def analyze(self, text):\n\n tknzr = nltk.tokenize.TweetTokenizer()\n words = tknzr.tokenize(text)\n \n score = 0\n \n for word in words:\n if word.lower() in self.positives:\n score += 1\n elif word.lower() in self.negatives:\n score -= 1\n else:\n continue\n \n return score",
"def analyze(text):\n client = language_service_client.LanguageServiceClient()\n\n # with open(movie_review_filename, 'r') as review_file:\n # Instantiates a plain text document.\n \n # content = text.read()\n content=text\n document = language_v1.types.Document(\n content=content,\n type=enums.Document.Type.PLAIN_TEXT,\n language='en'\n )\n # type='PLAIN_TEXT',\n # )\n \n try:\n response = client.analyze_sentiment(\n document=document,\n encoding_type='UTF32',\n )\n sentiment = response.document_sentiment\n return (sentiment.score)\n except InvalidArgument:\n sentiment=0.0\n return sentiment",
"def recognize(self, audio):\n response = self.service.Recognize(cloud_speech_extended_pb2.RecognizeRequest(\n config=cloud_speech_extended_pb2.RecognitionConfig(\n # There are a bunch of config options you can specify. See https://goo.gl/KPZn97 for the full list.\n encoding='LINEAR16', # one of LINEAR16, FLAC, MULAW, AMR, AMR_WB\n sample_rate_hertz=audio.frame_rate, # the rate in hertz\n # See https://g.co/cloud/speech/docs/languages for a list of supported languages.\n language_code=self.settings.language, # a BCP-47 language tag\n enable_word_time_offsets=self.settings.time_offsets, # if true, return recognized word time offsets\n max_alternatives=self.settings.max_alternatives, # maximum number of returned hypotheses\n ),\n audio=cloud_speech_extended_pb2.RecognitionAudio(\n uri=None,\n content=audio.raw_data\n )\n ), self.settings.deadline)\n\n # Print the recognition result alternatives and confidence scores.\n results = []\n\n # for result in response.results:\n if len(response.results) > 0:\n result = response.results[0] # TODO: check why here we have list of results ?, when it is possible ?\n\n alternative = result.alternatives[0]\n alignment = [] #\n confirmed_results = []\n if self.settings.time_offsets:\n word_indices = [j for j in range(len(alternative.words)) if\n alternative.words[j].word != '<eps>']\n\n if len(word_indices) > 0:\n confirmed_results.append([alternative.words[i].word for i in word_indices])\n else: # alignment was not returned\n confirmed_results.append(alternative.transcript)\n\n alignment.append(\n [[alternative.words[i].start_time, alternative.words[i].end_time] for i in\n word_indices])\n results.append({\n 'transcript': ' '.join(confirmed_results),\n 'confidence': alternative.confidence,\n 'alignment': alignment,\n })\n else:\n results.append({\n 'transcript': alternative.transcript,\n 'confidence': alternative.confidence\n })\n\n return results",
"def ocr_core(filename):\n\n\n #text = pytesseract.image_to_string(Image.open(filename)) # We'll use Pillow's Image class to open the image and pytesseract to detect the string in the image\n \n \n sentiment_dict= analyser.polarity_scores(text) \n \n \n \n \n # print(\"sentence was rated as \", sentiment_dict['neg']*100, \"% Negative\") \n # print(\"sentence was rated as \", sentiment_dict['neu']*100, \"% Neutral\") \n # print(\"sentence was rated as \", sentiment_dict['pos']*100, \"% Positive\") \n \n\n if sentiment_dict['compound'] >= 0.08 : \n Category.append('Positive') \n print('Positive') \n \n elif (sentiment_dict['compound'] > - 0.08) & (sentiment_dict['compound'] < 0.08): \n Category.append('Random')\n print('Random')\n \n elif (sentiment_dict['compound'] <= -0.08):\n Category.append('Negative')\n print('Negative')\n \n #return text",
"def analyze(self, text):\n score =0\n token = TweetTokenizer()\n tokens = token.tokenize(text)\n for token in tokens:\n if token.lower() in self.pos_list:\n score+=1\n elif token.lower() in self.neg_list:\n score-=1\n\n return score",
"def text_analytics(self):\n\n headers = {\n # Request headers\n 'Content-Type': 'application/json',\n 'Ocp-Apim-Subscription-Key': self.keys['text_analytics'],\n }\n \n sentiment_url = 'https://westus.api.cognitive.microsoft.com/text/analytics/v2.0/sentiment'\n \n raw_text = self.article_params['text']\n\n # Build post for sentiment\n try:\n sentences = tokenize.sent_tokenize(str(raw_text))\n content = []\n for i, sentence in enumerate(sentences):\n content.append({'id': str(i), 'language': 'en', 'text': sentence})\n body = json.dumps({\"documents\": content}).encode('utf-8')\n\n request = urllib.request.Request(sentiment_url, body, headers)\n response = urllib.request.urlopen(request)\n json_response = json.loads(response.read().decode('utf-8'))\n \n # A list of dictionaries, with each dictionary containing a sentence\n # sentiment score\n sentiments_list = json_response['documents']\n\n # Calculate the articles average sentiment from all the sentences\n cumulative_sentiment_score = 0\n for sent in sentiments_list:\n cumulative_sentiment_score += sent['score']\n avg_article_sentiment = cumulative_sentiment_score/len(sentiments_list)\n\n # Put article sentiments in bucket from 1 to 5, with 1 being very\n # negative and 5 being very positive\n if avg_article_sentiment < 0.2:\n sentiment = 1\n elif 0.2 <= avg_article_sentiment < 0.4:\n sentiment = 2\n elif 0.4 <= avg_article_sentiment < 0.6:\n sentiment = 3\n elif 0.6 <= avg_article_sentiment < 0.8:\n sentiment = 4\n else:\n sentiment = 5\n\n except Exception as e:\n print('Unable to process sentiment for article. Assuming '\n 'sentiment is neutral.')\n sentiment = 3\n\n return sentiment",
"def get_vader_sent(text):\n \n text = str(text)\n responses = analyser.polarity_scores(text)\n sent = responses['compound']\n \n return sent",
"def analyze(self, text):\n #Check each word in text\n tokenizer = nltk.tokenize.TweetTokenizer()\n tokens = tokenizer.tokenize(text)\n total_score = 0\n #Sum the total score\n for token in tokens:\n token = token.lower()\n if token in self.positives:\n total_score = total_score + 1\n elif token in self.negatives:\n total_score = total_score - 1\n else:\n total_score = total_score + 0\n \n return total_score",
"def final_result(self, hyp, confidence):\n msg = String()\n msg.data = str(hyp.lower())\n rospy.loginfo(\n 'Detected string: %s',\n msg.data\n )\n # Stop recogniser until started again by hotword/reasoning\n self.stop()\n self.pub.publish(msg)\n self.split_text_into_logic_parts(msg.data)",
"def parse(self, text: str):\n sentiment_score = 0\n msg = text.rstrip(\",\\r\\n \")\n\n # phrase the message\n # 1. exactly match the phrase\n phrase_list = []\n for phrase in self._afinn_phrase:\n pattern = '[\\s]' + phrase + \"[!.,\\'\\\"]\"\n phrase_match = re.compile(pattern)\n temp = phrase_match.findall(msg)\n if len(temp) == 1:\n phrase_list.append(temp)\n\n # 2. throw away phrases matched and split the remaining words by blank space and .!?'\"\n phrase_split = re.compile(self._reg_affin_phrase_str)\n words = phrase_split.split(msg)\n\n # 3. exactly match the words\n word_list = []\n word_match = re.compile(\"^[a-z]+$\")\n for word in words:\n temp = word_match.findall(word)\n if len(temp) == 1:\n word_list.append(temp[0])\n word_list += phrase_list\n for word in word_list:\n try:\n if word in self._afinn.keys():\n sentiment_score += self._afinn[word]\n except Exception:\n pass\n\n return sentiment_score",
"def sentiment(text):\n words = pattern_split.split(text.lower())\n sentiments = map(lambda word: afinn.get(word, 0), words)\n if sentiments:\n # How should you weight the individual word sentiments? \n # You could do N, sqrt(N) or 1 for example. Here I use sqrt(N)\n sentiment = float(sum(sentiments))/math.sqrt(len(sentiments))\n \n else:\n sentiment = 0\n return sentiment",
"def passion_analyzer(text):\n\n\tlower_text = text.lower()\n\n\thashtag_scaling = 0.3\n\texclamation_scaling = 0.5\n\tuppercase_scaling = 0.2\n\n\n\tpassion_index = 0\n\n\tfor x in range(len(positive_words)):\n\t\tpassion_index += (lower_text.count(positive_words[x]))**2\n\tfor x in range(len(negative_words)):\n\t\tpassion_index -= (lower_text.count(negative_words[x]))**2\n\tif '!' in text:\n\t\tpassion_index *= exclamation_scaling * lower_text.count('!') + 1\n\tif '#' in text:\n\t\tpassion_index *= hashtag_scaling * lower_text.count('#') + 1\n\tpassion_index *= uppercase_scaling * sum(1 for c in text if c.isupper())\n\n\n\t\t\n\treturn math.sqrt(passion_index)",
"def getTextStatsFeat(text, stemmRequired = True,\r\n excludeStopwordsRequired = True):\r\n #length = len(text)\r\n sentenceCount = len(re.findall(\"[.?!]\", text))\r\n exclamationMarkCount = len(re.findall(\"[!]\", text))\r\n questionMarkCount = len(re.findall(\"[?]\", text))\r\n digitsCount = len(re.findall(\"[0-9]+\", text))\r\n text = text.replace(\",\", \" \").replace(\".\", \" \")\r\n cleanText = re.sub('[^a-zа-я0-9]', ' ', text.lower())\r\n wordCount = 0.0\r\n charCount = 0.0\r\n rusCharCount = 0.0\r\n engCharCount = 0.0\r\n if excludeStopwordsRequired:\r\n for w in cleanText.split():\r\n if len(w)>1 and w not in stopwords:\r\n if not (not stemmRequired or re.search(\"[0-9a-z]\", w)):\r\n w = stemmer.stem(w)\r\n wordCount += 1\r\n c, rus, eng = getWordCharCount(w)\r\n charCount += c\r\n rusCharCount += rus\r\n engCharCount += eng\r\n else:\r\n for w in cleanText.split():\r\n if len(w)>1:\r\n if not (not stemmRequired or re.search(\"[0-9a-z]\", w)):\r\n w = stemmer.stem(w)\r\n wordCount += 1\r\n c, rus, eng = getWordCharCount(w)\r\n charCount += c\r\n rusCharCount += rus\r\n engCharCount += eng\r\n # per sentence\r\n wordPerSentence = tryDivide(wordCount, sentenceCount)\r\n charPerSentence = tryDivide(charCount, sentenceCount)\r\n rusCharPerSentence = tryDivide(rusCharCount, sentenceCount)\r\n engCharPerSentence = tryDivide(engCharCount, sentenceCount)\r\n # per word\r\n charPerWord = tryDivide(charCount, wordCount)\r\n rusCharPerWord = tryDivide(rusCharCount, wordCount)\r\n engCharPerWord = tryDivide(engCharCount, wordCount)\r\n # ratio\r\n rusCharRatio = tryDivide(rusCharCount, charCount)\r\n engCharRatio = tryDivide(engCharCount, charCount)\r\n rusCharVsEngChar = tryDivide(rusCharCount, engCharCount)\r\n engCharVsRusChar = tryDivide(engCharCount, rusCharCount)\r\n \r\n stats = [\r\n sentenceCount,\r\n wordCount,\r\n charCount,\r\n rusCharCount,\r\n engCharCount,\r\n digitsCount,\r\n exclamationMarkCount,\r\n questionMarkCount,\r\n wordPerSentence,\r\n charPerSentence,\r\n rusCharPerSentence,\r\n engCharPerSentence,\r\n charPerWord,\r\n rusCharPerWord,\r\n engCharPerWord,\r\n rusCharRatio,\r\n engCharRatio,\r\n rusCharVsEngChar,\r\n engCharVsRusChar,\r\n ]\r\n statsFeat = \"\"\r\n for i,f in enumerate(stats):\r\n if f != 0:\r\n statsFeat += \"%s:%s \" % (i+1, f)\r\n statsFeat = statsFeat[:-1] \r\n return statsFeat",
"def sentiment_analysis(self, text):\n\n body = {'text': text}\n body = json.dumps(body)\n url = self.base_url + '/language-service/phoenix-language/nlp/sentiment'\n headers = {\"ApiKey\": self.api_key, \"Content-type\": \"application/json\"}\n response = requests.post(url=url, data=body, headers=headers).json()\n return response",
"def get_subjectivity(text):\n blob = TextBlob(text)\n return blob.sentiment[1]",
"def test_word_confidences(self):\n self._api.SetImageFile(self._image_file)\n words = self._api.AllWords()\n self.assertEqual(words, [])\n self._api.Recognize()\n words = self._api.AllWords()\n confidences = self._api.AllWordConfidences()\n self.assertEqual(len(words), len(confidences))\n mapped_confidences = self._api.MapWordConfidences()\n self.assertEqual([v[0] for v in mapped_confidences], words)\n self.assertEqual([v[1] for v in mapped_confidences], confidences)",
"def analyze(self, text):\n\n text = tknzr.tokenize(text)\n\n score = 0\n \n for word in text:\n if self.positiveWords.count(word.lower()) > 0:\n score += 1\n elif self.negativeWords.count(word.lower()) > 0:\n score -= 1\n \n return score",
"def analyze(self, text):\n\n sent = 0\n for word in text.split():\n # check each word in tweet\n if word.strip(\":, \").lower() in self.posWords:\n sent += 1\n elif word.strip(\":, \").lower() in self.negWords:\n sent -= 1\n\n return sent"
]
| [
"0.65629125",
"0.643105",
"0.638353",
"0.6379149",
"0.63738453",
"0.635537",
"0.6338651",
"0.63331306",
"0.6307086",
"0.6306271",
"0.62828654",
"0.6266538",
"0.62520146",
"0.6215256",
"0.621029",
"0.6186863",
"0.61214954",
"0.61082006",
"0.6096289",
"0.6078521",
"0.60707635",
"0.6056865",
"0.6047594",
"0.60382825",
"0.6024456",
"0.60195345",
"0.59928983",
"0.5989262",
"0.5984712",
"0.59376377"
]
| 0.7215387 | 0 |
Return a dictionary of crime categories and sum of crimes. | def get_crimes_by_category(self):
result = {}
for crime in self.crimes:
cat_name = crime.category.category_name
if cat_name in result:
result[cat_name] += 1
else:
result[cat_name] = 1
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sum_crimes(cs:CrimeStatistics)-> int:\n # return 0 # stub\n #template from atomic\n crimes_total = (cs.violent_crimes+cs.property_crimes+cs.arson)\n return crimes_total",
"def get_coordinates_by_category(self):\n\n result = {}\n for crime in self.crimes:\n cat_name = crime.category.category_name.strip()\n if cat_name in result:\n result[cat_name].append((crime.latitude, crime.longitude))\n else:\n result[cat_name] = [(crime.latitude, crime.longitude)]\n \n return result",
"def codonComposition(self):#works\n return {codon: self.countDicNuc.get(codon) for codon in self.rnaCodonTable.keys()}",
"def get_category_scores(category: Category):\r\n solutions = Solution.objects.filter(challenge__category=category).select_related(\"user\").select_related(\"challenge\")\r\n d = dict()\r\n\r\n for sol in solutions:\r\n d[sol.user] = d.get(sol.user, 0) + sol.get_score()\r\n \r\n return d",
"def crime_list(loc:List[CrimeStatistics])->List[int]:\n # return [] #stub\n #template from List[CrimeStatistics]\n # crime_count is all the seperate university total crimes seen so far\n crime_count = [] #type: List[int]\n for cs in loc:\n crime_count.append(sum_crimes(cs))\n return crime_count",
"def censuses(self, *scales: int) -> Mapping[int, Census]:\n return {\n # Map id to the whole census object\n census.id: census\n # The dict is created from a generating parsing the nodes\n for census in (\n Census.from_xml(node)\n # an XML node can be used as an iterator, where it yields children\n for node in self.shards_xml(\n \"census\",\n # we want to grab all the values, since a Census requires them\n mode=joined_parameter(\"score\", \"rank\", \"rrank\", \"prank\", \"prrank\"),\n # gets all the different stats for us\n scale=joined_parameter(*(str(scale) for scale in scales))\n if scales\n else \"all\",\n )[\"census\"]\n )\n }",
"def extract_crime_category(self,match):\n \n\n #\n # wouldn't be calling this function if we didn't already know there's a match\n assert(match!=None)\n\n #\n # extract crime category\n line=match.string\n start_index=match.start('crime')\n stop_index=match.end('crime')\n crime_key=line[start_index:stop_index]\n crime_key=crime_key.lower()\n\n my_logger.debug('match(%d,%d)=%s' % (start_index,stop_index,crime_key))\n \n return crime_key",
"def count_categories(businesses):\n\t\tcategories = {}\n\t\tfor business in businesses:\n\t\t\tcats_string = business['categories']\n\t\t\tcats_list = []\n\t\t\tif cats_string:\n\t\t\t\tcats_list = strip_categories(cats_string)\n\t\t\tfor cat in cats_list:\n\t\t\t\tif cat != 'Restaurants':\n\t\t\t\t\tif cat in categories:\n\t\t\t\t\t\tcategories[cat] += 1\n\t\t\t\t\telse:\n\t\t\t\t\t\tcategories[cat] = 1\n\t\treturn categories",
"def count_risk_categories(data):\n results = Counter([row['risk_category'] for row in data])\n if '' in results:\n results['No Violations'] = results['']\n del results['']\n return results",
"def Categories():\n cat = {\n \t \"Featured\": 0,\n \t \"All\": 1,\n \t \"Collectibles\": 2,\n \t \"Clothing\": 3,\n \t \"BodyParts\": 4,\n \t \"Gear\": 5,\n \t \"Models\": 6,\n \t \"Plugins\": 7,\n\t \"Decals\": 8,\n \t \"Audio\": 9,\n \t \"Meshes\": 10,\n\t \"Accessories\": 11,\n\t \"AvatarAnimations\": 12,\n\t \"CommunityCreations\": 13,\n\t \"Video\": 14,\n\t \"Recommended\": 15\n }\n return cat",
"def fetch_incident_by_category_and_resolution(parsed_data):\n incident_counter = dict()\n\n for incident in parsed_data:\n category = incident['Category']\n resolution = incident['Resolution']\n if category in incident_counter:\n incident_counter[category][0] += 1\n if resolution == \"NONE\":\n incident_counter[category][1] += 1\n else:\n if resolution == \"NONE\":\n incident_counter[category] = [1, 1]\n else:\n incident_counter[category] = [1, 0]\n\n return incident_counter",
"def get_categories_group(self):\n m = {}\n for post in self:\n for cat in post.Categories:\n if cat not in m:\n m[cat] = []\n m[cat].append(post)\n return m",
"def income_cat(housing):\n logging.info(\"Creating Income Category.....\")\n housing[\"income_cat\"] = pd.cut(\n housing[\"median_income\"],\n bins=[0.0, 1.5, 3.0, 4.5, 6.0, np.inf],\n labels=[1, 2, 3, 4, 5],\n )\n return housing",
"def get_categories(race_name, event_discipline):\n # FIXME - need to handle pro/elite (cat 0) for MTB\n # FIXME - MTB categories are a disaster and probably need a completely different set of patterns\n cat_match = CATEGORY_RE.search(race_name)\n age_match = AGE_RANGE_RE.search(race_name)\n if age_match:\n return []\n elif cat_match:\n cats = cat_match.group(1).lower().replace('pro', '1')\n if cats in ['beginner', 'novice']:\n cats = '5'\n elif cats == 'c':\n cats = '4'\n elif cats == 'b':\n cats = '3'\n elif cats == 'a':\n cats = '1/2'\n elif cats == 'a/b':\n cats = '1/2/3'\n elif cats == 'b/c':\n cats = '3/4'\n return list(set(int(c) for c in cats.split('/')))\n else:\n return []",
"def get_word_count_category(self):\n word_count_category_dict = dict()\n from capstoneproject.models.models.category import Category\n for cat in Category.categories.all():\n word_count_category_dict[cat.name] = dict()\n\n word_count_dict = self._create_word_count_dict()\n for word, count in word_count_dict.items():\n from capstoneproject.models.models.word import Word\n word_model = Word.words.get_word(word=word)\n for word_cat in word_model.get_categories():\n word_count_category_dict[word_cat][word] = count\n\n return word_count_category_dict",
"def getCategoryCounts(self, query):\n facade = self._getFacade()\n results = facade.getCategoryCounts(query)\n total = sum(result['count'] for result in results)\n return {'results': results,\n 'total': total}",
"def _per_cls_summarize(self):\n per_class_result = {}\n for catId in range(len(self.cats)):\n resultDet = self._summarize_with_cat(f1=False, catIdx=catId)\n\n if catId in self.FPParams.catsWithAttributes:\n results = self._summarize_with_cat(catIdx=catId)\n resultF1 = self._summarize_with_cat(iou=False, catIdx=catId)\n\n per_class_result[self.cats[catId][\"name\"]] = {\n \"iou_f1\": results, \"f1\": resultF1, \"iou\": resultDet,\n }\n else:\n per_class_result[self.cats[catId][\"name\"]] = {\"iou\": resultDet}\n\n return per_class_result",
"def returns_by_category(self):\n cate_weights = self.weights_by_category\n cate_returns = {}\n for cate in self.unique_category:\n if cate_weights[cate] == 0:\n cate_returns[cate] = 0\n else:\n cate_returns[cate] = (self.returns[self.category == cate] *\n self.weights[self.category == cate]).sum()/cate_weights[cate]\n return pd.Series(cate_returns, index=self.unique_category)",
"def site_to_category():\n return {\"UNEW\": 1, \"USFD\": 2, \"CAU\": 3, \"TASMC\": 4, \"RBMF\": 5}",
"def get_categories(mapping):\n categories = []\n \n for idx, name in mapping.items(): \n temp = {'id':idx, 'name':name, 'supercategory':'NA'}\n categories.append(temp)\n \n return categories",
"def summarizeNuclideCategories(self):\n runLog.info(\n \"Nuclide categorization for cross section temperature assignments:\\n\"\n + tabulate.tabulate(\n [\n (\n \"Fuel\",\n createFormattedStrWithDelimiter(\n self._nuclideCategories[\"fuel\"]\n ),\n ),\n (\n \"Coolant\",\n createFormattedStrWithDelimiter(\n self._nuclideCategories[\"coolant\"]\n ),\n ),\n (\n \"Structure\",\n createFormattedStrWithDelimiter(\n self._nuclideCategories[\"structure\"]\n ),\n ),\n ],\n headers=[\"Nuclide Category\", \"Nuclides\"],\n tablefmt=\"armi\",\n )\n )",
"def calc_priors(categories, data):\n counts = np.zeros(categories)\n for val in range(categories):\n counts[val] = np.count_nonzero(data.labels == val)\n return counts / len(data.labels)",
"def create_crime_cat(df):\n df['CrimeType'] = ''\n for ct in crime_type_cols:\n c_int = df[ct].astype('int32')\n sub = c_int[c_int == 1]\n df.CrimeType.iloc[sub.index] = ct",
"def _get_counts(self, X: np.ndarray) -> Dict[int, np.ndarray]:\n return {f: np.bincount(X[:, f].astype(int), minlength=n_cat) for f, n_cat in\n self.categories_per_feature.items()}",
"def getClassCounts(column, uniqueVal, decision, yes, no , total):\r\n dataDict = {} # a dictionary of labels\r\n for val in uniqueVal:\r\n label1 = val + '/Y'\r\n label2 = val + '/N'\r\n dataDict[label1] = 0; dataDict[label2] = 0\r\n for dec, at in zip(decision, column):\r\n if at == val and dec == 'No':\r\n dataDict[label2] += 1\r\n if at == val and dec == 'Yes':\r\n dataDict[label1] += 1\r\n dataDict[val] = (dataDict[label2]+ dataDict[label1])/ total\r\n dataDict[label2] = dataDict[label2] / no\r\n dataDict[label1] = dataDict[label1] / yes\r\n return dataDict",
"def getNuclideCategories(self):\n if not self._nuclideCategories:\n coolantNuclides = set()\n fuelNuclides = set()\n structureNuclides = set()\n for c in self.iterComponents():\n # get only nuclides with non-zero number density\n # nuclides could be present at 0.0 density just for XS generation\n nuclides = [\n nuc for nuc, dens in c.getNumberDensities().items() if dens > 0.0\n ]\n if c.getName() == \"coolant\":\n coolantNuclides.update(nuclides)\n elif \"fuel\" in c.getName():\n fuelNuclides.update(nuclides)\n else:\n structureNuclides.update(nuclides)\n structureNuclides -= coolantNuclides\n structureNuclides -= fuelNuclides\n remainingNuclides = (\n set(self.parent.blueprints.allNuclidesInProblem)\n - structureNuclides\n - coolantNuclides\n )\n fuelNuclides.update(remainingNuclides)\n self._nuclideCategories[\"coolant\"] = coolantNuclides\n self._nuclideCategories[\"fuel\"] = fuelNuclides\n self._nuclideCategories[\"structure\"] = structureNuclides\n self.summarizeNuclideCategories()\n\n return (\n self._nuclideCategories[\"coolant\"],\n self._nuclideCategories[\"fuel\"],\n self._nuclideCategories[\"structure\"],\n )",
"def get_bilan_conso_per_year(self):\n qs = self.get_cerema_cities().aggregate(\n **{f\"20{f[3:5]}\": Sum(f) / 10000 for f in Cerema.get_art_field(\"2011\", \"2020\")}\n )\n return qs",
"def weights_by_category(self):\n cate_weights = {}\n for cate in self.unique_category:\n cate_weights[cate] = self.weights[self.category == cate].sum()\n return pd.Series(cate_weights, index=self.unique_category)",
"def summarize(self) -> Mapping[str, int]:\n return dict(\n compounds=self.count_compounds(),\n side_effects=self.count_side_effects(),\n indications=self.count_indications(),\n umls=self.count_umls(),\n )",
"def histogram_decades(our_data):\n decade_dict = {}\n for album in our_data:\n decade = int(album['year'])//10\n if decade in decade_dict:\n decade_dict[decade] += 1\n else:\n decade_dict[decade] = 1\n return decade_dict"
]
| [
"0.644157",
"0.61237645",
"0.5664426",
"0.56131846",
"0.556375",
"0.55603516",
"0.5524103",
"0.5414242",
"0.5403029",
"0.5379142",
"0.53707236",
"0.5358697",
"0.53495306",
"0.5344111",
"0.5340817",
"0.5312425",
"0.53085023",
"0.53033715",
"0.5292981",
"0.5292524",
"0.5218506",
"0.519007",
"0.51885694",
"0.51549083",
"0.5130482",
"0.5107055",
"0.5105929",
"0.5101328",
"0.508634",
"0.5050996"
]
| 0.8110491 | 0 |
Return a dictionary of crime categories and list of coordinates. | def get_coordinates_by_category(self):
result = {}
for crime in self.crimes:
cat_name = crime.category.category_name.strip()
if cat_name in result:
result[cat_name].append((crime.latitude, crime.longitude))
else:
result[cat_name] = [(crime.latitude, crime.longitude)]
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_crimes_by_category(self):\n\n result = {}\n for crime in self.crimes:\n cat_name = crime.category.category_name\n if cat_name in result:\n result[cat_name] += 1\n else:\n result[cat_name] = 1\n \n return result",
"def total_crimes_in_bounds(user_coords):\n\n crimes_coords = {'crimes': []}\n\n # takes in user_coords point a and point b\n # in order to determine top left and bottom right coordinates.\n point_a = user_coords['point_a']\n point_b = user_coords['point_b']\n\n # compare latitude to see what's the top coord, tupleize\n # add 0.005 to latitude, and subtract 0.02 to longitude\n top_left_coord = {'lat': max(point_a['lat'], point_b['lat']) + 0.005,\n 'lng': min(point_a['lng'], point_b['lng']) - 0.02}\n\n # subtract 0.005 to latitude, and add 0.02 to longitude\n bottom_right_coord = {'lat': min(point_a['lat'], point_b['lat']) - 0.005,\n 'lng': max(point_a['lng'], point_b['lng']) + 0.02}\n\n # once the bounds are generated, we will want to do a query for all of the\n # geohashes that are within those bounds. Let's do that now.\n # some raw sql to get the center coords of geohash\n geohash_in_bounds_sql = \"SELECT *, \" + \\\n \"ST_AsText(ST_PointFromGeoHash(geohash)) AS lat_lng \" + \\\n \"FROM nyc_crimes_by_geohash \" + \\\n \"WHERE ST_Contains(\" + \\\n \"ST_MakeBox2D(\" + \\\n \"ST_Point(%f, %f), ST_Point(%f, %f)), ST_PointFromGeoHash(geohash));\" \\\n % (top_left_coord['lat'], top_left_coord['lng'],\n bottom_right_coord['lat'], bottom_right_coord['lng'])\n # execute the raw sql, there should be many\n geohash_in_bounds_query = db.engine.execute(geohash_in_bounds_sql).fetchall()\n\n for row in geohash_in_bounds_query:\n # strip the lat, lngs before putting them in\n # some string splitting to extract data\n location = row[4].strip(\"POINT(\").rstrip(\")\").split()\n latitude = location[0]\n longitude = location[1]\n\n format_loc_dict = {'latitude': latitude, 'longitude': longitude,\n 'total_crimes': row[2]}\n\n # append to crimes_coords inner list\n crimes_coords['crimes'].append(format_loc_dict)\n\n return crimes_coords",
"def continents_and_cities(self):\r\n list_all = col.defaultdict(list)\r\n for code, node in self.vertices.items():\r\n list_all[node.continent].append(node.name)\r\n return list_all",
"def get_categories(mapping):\n categories = []\n \n for idx, name in mapping.items(): \n temp = {'id':idx, 'name':name, 'supercategory':'NA'}\n categories.append(temp)\n \n return categories",
"def get_categories(self) -> tuple:\n return self.categories",
"def censuses(self, *scales: int) -> Mapping[int, Census]:\n return {\n # Map id to the whole census object\n census.id: census\n # The dict is created from a generating parsing the nodes\n for census in (\n Census.from_xml(node)\n # an XML node can be used as an iterator, where it yields children\n for node in self.shards_xml(\n \"census\",\n # we want to grab all the values, since a Census requires them\n mode=joined_parameter(\"score\", \"rank\", \"rrank\", \"prank\", \"prrank\"),\n # gets all the different stats for us\n scale=joined_parameter(*(str(scale) for scale in scales))\n if scales\n else \"all\",\n )[\"census\"]\n )\n }",
"def available_categories(self):\n return list(self.landmarks.keys())",
"def get_city_points(city):\n for item in coordinate_list:\n if item[0] == city:\n return (item[1], item[2])",
"def getNuclideCategories(self):\n if not self._nuclideCategories:\n coolantNuclides = set()\n fuelNuclides = set()\n structureNuclides = set()\n for c in self.iterComponents():\n # get only nuclides with non-zero number density\n # nuclides could be present at 0.0 density just for XS generation\n nuclides = [\n nuc for nuc, dens in c.getNumberDensities().items() if dens > 0.0\n ]\n if c.getName() == \"coolant\":\n coolantNuclides.update(nuclides)\n elif \"fuel\" in c.getName():\n fuelNuclides.update(nuclides)\n else:\n structureNuclides.update(nuclides)\n structureNuclides -= coolantNuclides\n structureNuclides -= fuelNuclides\n remainingNuclides = (\n set(self.parent.blueprints.allNuclidesInProblem)\n - structureNuclides\n - coolantNuclides\n )\n fuelNuclides.update(remainingNuclides)\n self._nuclideCategories[\"coolant\"] = coolantNuclides\n self._nuclideCategories[\"fuel\"] = fuelNuclides\n self._nuclideCategories[\"structure\"] = structureNuclides\n self.summarizeNuclideCategories()\n\n return (\n self._nuclideCategories[\"coolant\"],\n self._nuclideCategories[\"fuel\"],\n self._nuclideCategories[\"structure\"],\n )",
"def get_categories_group(self):\n m = {}\n for post in self:\n for cat in post.Categories:\n if cat not in m:\n m[cat] = []\n m[cat].append(post)\n return m",
"def CentralityPoint2D(graph, numberOfPoints, typePlot):\n points = dict()\n\n c_eigenvector = nx.katz_centrality(graph)\n c_eigenvector = heapq.nlargest(\n numberOfPoints, list(\n c_eigenvector.values()))\n max_eigenvector = max(c_eigenvector)\n points['Eigenvalues'] = c_eigenvector\n\n c_betweenness = nx.betweenness_centrality(graph)\n c_betweenness = heapq.nlargest(\n numberOfPoints, list(\n c_betweenness.values()))\n max_betweenness = max(c_betweenness)\n points['Betweenness'] = c_betweenness\n\n c_closeness = nx.closeness_centrality(graph)\n c_closeness = heapq.nlargest(numberOfPoints, list(c_closeness.values()))\n max_closeness = max(c_closeness)\n points['Closeness'] = c_closeness\n\n c_harmonic = nx.harmonic_centrality(graph)\n c_harmonic = heapq.nlargest(numberOfPoints, list(c_harmonic.values()))\n max_harmonic = max(c_harmonic)\n points['Harmonic'] = c_harmonic\n\n glCoe = GlobalClusteringCoefficient(graph)\n\n points['Mix'] = (max_eigenvector, max_harmonic, max_betweenness)\n points['Mix2'] = (max_eigenvector, glCoe, max_closeness)\n points['Mix3'] = (max_eigenvector, glCoe, max_harmonic)\n points['Mix4'] = (\n max_eigenvector,\n glCoe,\n SquareClusteringCoefficient(graph))\n\n return points[typePlot]",
"def __generate_dict_of_keys_to_classification__(self):\n dict_of_assigned_citations = {}\n # duplicating citation dataset to filter as matches go on meaning\n # it should result in quicker allocation\n # can be removed to reduce memory load at expense of speed\n list_of_unassigned = []\n for key in self.dict_of_keywords:\n list_of_current_key = []\n for citation_instance in self.array_of_citations:\n if key == citation_instance.get_classification():\n list_of_current_key.append(citation_instance)\n if \"Unassigned\" == citation_instance.get_classification():\n list_of_unassigned.append(citation_instance)\n dict_of_assigned_citations[key] = list_of_current_key\n dict_of_assigned_citations[\"Unassigned\"] = list_of_unassigned\n return dict_of_assigned_citations",
"def get_categories_enumerated_key_map(self):\n return dict(enumerate([c.name for c in self.categories]))",
"def get_subcat_axes():\n cursor = conn.cursor()\n cursor.execute(\n \"\"\"\n SELECT category, sub_category, function, bitmask, gene_name\n FROM genome_rules\n ORDER by category, sub_category\n \"\"\"\n )\n rows = cursor.fetchall()\n subcats = {}\n for k_cat, g_cat in groupby(rows, lambda r: r[0]):\n subcats[k_cat] = {}\n for k_subcat, g_subcat in groupby(g_cat, lambda r: r[1]):\n subcats[k_cat][k_subcat] = []\n for genes in g_subcat:\n subcats[k_cat][k_subcat].append(\n (genes[2], genes[3], genes[4])\n )\n return subcats",
"def get_species_list() -> list:\n c2h2_xyz = {'symbols': ('C', 'C', 'H', 'H'), 'isotopes': (12, 12, 1, 1),\n 'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 1.203142), (0.0, -0.0, 2.265747), (-0.0, -0.0, -1.062605))}\n ch4_xyz = {'symbols': ('C', 'H', 'H', 'H', 'H'), 'isotopes': (12, 1, 1, 1, 1),\n 'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 1.08744517), (1.02525314, 0.0, -0.36248173),\n (-0.51262658, 0.88789525, -0.36248173), (-0.51262658, -0.88789525, -0.36248173))}\n co2_xyz = {'symbols': ('C', 'O', 'O'), 'isotopes': (12, 16, 16),\n 'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 1.1594846), (0.0, 0.0, -1.1594846))}\n co_xyz = {'symbols': ('O', 'C'), 'isotopes': (16, 12), 'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 1.12960815))}\n f2_xyz = {'symbols': ('F', 'F'), 'isotopes': (19, 19), 'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 1.3952041))}\n ch2o_xyz = {'symbols': ('O', 'C', 'H', 'H'), 'isotopes': (16, 12, 1, 1),\n 'coords': ((0.0, 0.0, 0.674622), (0.0, 0.0, -0.529707),\n (0.0, 0.935488, -1.109367), (0.0, -0.935488, -1.109367))}\n h2o_xyz = {'symbols': ('O', 'H', 'H'), 'isotopes': (16, 1, 1),\n 'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 0.95691441), (0.92636305, 0.0, -0.23986808))}\n h2_xyz = {'symbols': ('H', 'H'), 'isotopes': (1, 1), 'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 0.74187646))}\n hcn_xyz = {'symbols': ('C', 'N', 'H'), 'isotopes': (12, 14, 1),\n 'coords': ((0.0, 0.0, -0.500365), (0.0, 0.0, 0.65264), (0.0, 0.0, -1.566291))}\n hf_xyz = {'symbols': ('F', 'H'), 'isotopes': (19, 1), 'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 0.91538107))}\n n2o_xyz = {'symbols': ('N', 'N', 'O'), 'isotopes': (14, 14, 16),\n 'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 1.12056262), (0.0, 0.0, 2.30761092))}\n n2_xyz = {'symbols': ('N', 'N'), 'isotopes': (14, 14), 'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 1.09710935))}\n nh3_xyz = {'symbols': ('N', 'H', 'H', 'H'), 'isotopes': (14, 1, 1, 1),\n 'coords': ((0.0, 0.0, 0.11289), (0.0, 0.938024, -0.263409),\n (0.812353, -0.469012, -0.263409), (-0.812353, -0.469012, -0.263409))}\n oh_xyz = {'symbols': ('O', 'H'), 'isotopes': (16, 1), 'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 0.967))}\n cl2_xyz = {'symbols': ('Cl', 'Cl'), 'isotopes': (35, 35), 'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 1.1))}\n\n c2h2 = ARCSpecies(label='C2H2', smiles='C#C', multiplicity=1, charge=0)\n c2h2.initial_xyz = c2h2_xyz\n\n ch4 = ARCSpecies(label='CH4', smiles='C', multiplicity=1, charge=0)\n ch4.initial_xyz = ch4_xyz\n\n co2 = ARCSpecies(label='CO2', smiles='O=C=O', multiplicity=1, charge=0)\n co2.initial_xyz = co2_xyz\n\n co = ARCSpecies(label='CO', smiles='[C-]#[O+]', multiplicity=1, charge=0)\n co.initial_xyz = co_xyz\n\n f2 = ARCSpecies(label='F2', smiles='[F][F]', multiplicity=1, charge=0)\n f2.initial_xyz = f2_xyz\n\n ch2o = ARCSpecies(label='CH2O', smiles='C=O', multiplicity=1, charge=0)\n ch2o.initial_xyz = ch2o_xyz\n\n h2o = ARCSpecies(label='H2O', smiles='O', multiplicity=1, charge=0)\n h2o.initial_xyz = h2o_xyz\n\n h2 = ARCSpecies(label='H2', smiles='[H][H]', multiplicity=1, charge=0)\n h2.initial_xyz = h2_xyz\n\n hcn = ARCSpecies(label='HCN', smiles='C#N', multiplicity=1, charge=0)\n hcn.initial_xyz = hcn_xyz\n\n hf = ARCSpecies(label='HF', smiles='F', multiplicity=1, charge=0)\n hf.initial_xyz = hf_xyz\n\n n2o = ARCSpecies(label='N2O', smiles='[N-]=[N+]=O', multiplicity=1, charge=0)\n n2o.initial_xyz = n2o_xyz\n\n n2 = ARCSpecies(label='N2', smiles='N#N', multiplicity=1, charge=0)\n n2.initial_xyz = n2_xyz\n\n nh3 = ARCSpecies(label='NH3', smiles='N', multiplicity=1, charge=0)\n nh3.initial_xyz = nh3_xyz\n\n oh = ARCSpecies(label='OH', smiles='[OH]', multiplicity=2, charge=0)\n oh.initial_xyz = oh_xyz\n\n cl2 = ARCSpecies(label='Cl2', smiles='[Cl][Cl]', multiplicity=1, charge=0)\n cl2.initial_xyz = cl2_xyz\n\n species_list = [c2h2, ch4, co2, co, f2, ch2o, h2o, h2, hcn, hf, n2o, n2, nh3, oh, cl2]\n\n return species_list",
"def extract_data(cls):\n parsed_data = cls.clean_data()\n product_rows = []\n product_categories_dict = {}\n\n for category in parsed_data:\n for product in parsed_data.get(category):\n product_barcode = product.get(\"code\")\n\n # Define product attribute values list\n product_attribute_list = [product.get(\n attribute) for attribute in api.product_characteristics]\n product_rows.append(product_attribute_list)\n\n # Define a (barcode, categories) dictionary structure\n product_subcategories = product.get(\"categories_hierarchy\")\n product_categories = list(\n set(product_subcategories + [category]))\n product_categories_dict[product_barcode] = product_categories\n\n return product_rows, product_categories_dict",
"def get(self,item: str) -> List[Dict[EnumShapeCategories, Union[str, List]]]:\n polygons = []\n for i,polygon in enumerate(self.dataset[item]):\n label:int = self.attr_mapping[polygon[EnumShapeCategories.Label]]\n color_code = \"#\"+f\"{label:02x}\"*3 # conversion to hexadecimal color (#FFFFFF for white for instance)\n polygons.append({EnumShapeCategories.Label:color_code,EnumShapeCategories.Points:polygon[EnumShapeCategories.Points]})\n return polygons",
"def _make_category_groups(data_struct):\n groups = {}\n for cat in set(data_struct[\"Objects\"]): \n \n data_names = [\"left_x\",\"top_y\",\"width\",\"height\",\"FPS\",\"AVG_FPS\",\"Accuracy\"]\n indices = [i for i, x in enumerate(data_struct[\"Objects\"]) if x == cat]\n for dn in data_names:\n for idx in indices:\n groups[cat] = data_struct[dn][idx]\n return(groups)",
"def get_categories(self):\n # open a cursor object\n cur = self.get_cursor()\n\n # get payees from database\n cur.execute(\"SELECT * FROM categories\")\n cats_data = cur.fetchall()\n\n # convert into a list of payee dictionaries\n cats_list = []\n [cats_list.append({'category_id': cat[0],\n 'parent_id': cat[1],\n 'category_name': cat[2]})\n for cat in cats_data]\n\n # close the cursor\n self.close_cursor()\n\n return cats_list",
"def site_to_category():\n return {\"UNEW\": 1, \"USFD\": 2, \"CAU\": 3, \"TASMC\": 4, \"RBMF\": 5}",
"def categories(self):\n\t\treturn (sorted(self.dictData.keys()))",
"def read_cliffs(self):\n cliff_list = Cliff.list()\n rtn = {}\n\n for clf in cliff_list:\n rtn[clf] = self.read_cliff(clf)\n\n return rtn",
"def density_based_cluster(R, clusters):\n c_points = {}\n i = 0\n for cluster in clusters:\n points = set()\n for attr in cluster:\n for point in R[attr]:\n points.add(point)\n c_points[i] = points\n i += 1\n return c_points",
"def get_clusters(nombre):\n lon, lat = mydic[nombre][\"lon\"], mydic[nombre][\"lat\"]\n scaled_lon = scaler_lon.transform(np.array(lon).reshape(-1, 1))\n scaled_lat = scaler_lat.transform(np.array(lat).reshape(-1, 1))\n clusters = kmeans.predict(\n pd.DataFrame({\"x\": [l for l in scaled_lat], \"y\": [l for l in scaled_lon]})\n )\n return clusters",
"def assign_data2clusters(X, C):\n\n d_map = []\n for i in range(len(X)):\n\n d = []\n t = [0] * len(C)\n\n for c in C:\n d.append(get_distance(X.iloc[i], c))\n\n min_id = d.index(min(d))\n t[min_id] = min_id\n d_map.append(t)\n\n return d_map",
"def categorize_attributes():\n global attr_categories, seeds\n print \"Generating seeds...\"\n seeds = get_seeds()\n\n print \"Categorizing attributes...\"\n categorized = categorize(seeds)\n \n category_distances = {}\n attr_categories = {}\n for c in categorized:\n for (attr, score) in categorized[c]:\n attr_categories[attr] = c\n category_distances[attr] = score",
"def categories(self) -> List[Category]:\n return list(set(self.mapping.values()))",
"def formatting_cid_id_clusters(cid_id_list, other_id):\n # key: cid, value: list of ocns [ocn1, ocn2]\n cid_ids_dict = {}\n\n if cid_id_list:\n for cid_id in cid_id_list:\n cid = cid_id.get(\"cid\")\n id = cid_id.get(other_id)\n if cid in cid_ids_dict:\n cid_ids_dict[cid].append(id)\n else:\n cid_ids_dict[cid] = [id]\n\n return cid_ids_dict",
"def cloth_category(cloth_txt):\n category_cloth = {}\n linecount = 0\n with open(cloth_txt, 'r') as file:\n for linetext in file:\n line = linetext.rstrip(' \\n')\n if linecount > 1:\n line_attributes = line.split(\" \")\n category_cloth.update({line_attributes[0]: (linecount - 2, int(line_attributes[-1]))})\n linecount += 1\n return category_cloth",
"def get_coordinates(self):\n x_houses = []\n y_houses = []\n\n x_batt = []\n y_batt = []\n\n # turn dict to list so we can iterate through\n houses_list = list(self.houses.values())\n batteries_list = list(self.batteries.values())\n\n # for every house save coordinates to lists\n for house in houses_list:\n x_houses.append(house.x)\n y_houses.append(house.y)\n\n # for every battery save coordinates to lists\n for battery in batteries_list:\n x_batt.append(battery.x)\n y_batt.append(battery.y)\n\n return x_houses, y_houses, x_batt, y_batt"
]
| [
"0.6538004",
"0.603138",
"0.5898592",
"0.58867985",
"0.5820991",
"0.56723803",
"0.5661209",
"0.55859613",
"0.5483333",
"0.5474434",
"0.54702455",
"0.54404277",
"0.5436366",
"0.54201627",
"0.53648657",
"0.5358755",
"0.531535",
"0.52981925",
"0.529552",
"0.528204",
"0.5281181",
"0.5258838",
"0.52465296",
"0.5224519",
"0.52161163",
"0.52129084",
"0.5206699",
"0.52050495",
"0.5202562",
"0.5198779"
]
| 0.8434634 | 0 |
return the route id | def get_route_id(self):
return self.route_id | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def RouteID(self):\n route_id = self.SR\n if self.RRT:\n route_id += self.RRT\n if self.RRQ:\n route_id += self.RRQ\n return route_id",
"def route_map_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"route_map_id\")",
"def route_idx(self):\n return self._route_idx",
"def route_map_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"route_map_id\")",
"def route_table_id(self):\n return self._route_table_id",
"def transit_router_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"transit_router_id\")",
"def transit_router_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"transit_router_id\")",
"def transit_router_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"transit_router_id\")",
"def transit_router_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"transit_router_id\")",
"def transit_router_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"transit_router_id\")",
"def transit_router_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"transit_router_id\")",
"def _get_id(self):\n return self.id",
"def getID():",
"def transit_router_route_table_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"transit_router_route_table_id\")",
"async def get_id(self, tag_name):\n response = await self.describe(tag_name)\n if response['RouteTables']:\n return response['RouteTables'][0][\"RouteTableId\"]\n else:\n raise RtbDoesntExists",
"def get_id(self):\n pass",
"def get_id(self):\n pass",
"def get_id(self):\n pass",
"def get_id(self):\n pass",
"def getid(data):\n return int(data.split('/')[-1])",
"def get_route_by_id(self, route_id):\n route = self.admin_repository.get_route_by_id(route_id)\n if route:\n print('''Route Id: {}\\nRoute: {}\\n\n '''.format(route[0], route[1]))\n return route\n else:\n print(\"Invalid Route Id\")\n return False",
"def getID(self) -> int:\n ...",
"def get_id(self, url):\n return url.split('/')[-1]",
"def transit_router_attachment_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"transit_router_attachment_id\")",
"def transit_router_route_table_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"transit_router_route_table_id\")",
"def transit_router_route_table_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"transit_router_route_table_id\")",
"def id(self):\n return self.getattr('id')",
"def get_id(self):\n return self.get_api_endpoint()",
"def get_id(self, resource):\n try:\n return resource.href.split('/')[-1]\n except AttributeError:\n return resource['href'].split('/')[-1]",
"def get_id(self):\n return self.id"
]
| [
"0.7587031",
"0.7087121",
"0.694256",
"0.690186",
"0.68969196",
"0.67987394",
"0.67987394",
"0.67798674",
"0.67798674",
"0.6695325",
"0.6695325",
"0.66084903",
"0.66054034",
"0.6564537",
"0.6547674",
"0.65456295",
"0.65456295",
"0.65456295",
"0.65456295",
"0.6486264",
"0.6469691",
"0.64648527",
"0.6463187",
"0.64564997",
"0.6433649",
"0.6433649",
"0.6353711",
"0.6349259",
"0.63464206",
"0.63457483"
]
| 0.85870904 | 0 |
Make causal mask used for bidirectional selfattention. | def _make_causal_mask(
input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
):
bsz, tgt_len = input_ids_shape
mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device)
mask_cond = torch.arange(mask.size(-1), device=device)
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
mask = mask.to(dtype)
if past_key_values_length > 0:
mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def build_attention_mask_3d_causal(source_mask, target_mask):\n causal_mask = make_inference_history_mask_3d(target_mask)\n mask = make_attention_mask_3d(source_mask, target_mask)\n mask = mask * causal_mask\n # invert mask for Megatron\n return mask < 0.5",
"def get_causal_mask(model_dim):\n attn_shape = (1, model_dim, model_dim)\n mask = torch.triu(torch.ones(attn_shape, dtype=torch.uint8), diagonal=1) # 1 for subsequent positions\n return mask == 0 # True for attending positions",
"def causal_attention_mask(nd, ns, dtype):\n i = tf.range(nd)[:, None]\n j = tf.range(ns)\n m = i >= j - ns + nd\n return tf.cast(m, dtype)",
"def _to_bert_self_attention_mask(matrix):\n matrix = tf.squeeze(matrix, axis=[1])\n matrix = tf.tile(matrix, [batch_size, 1, 1])\n return matrix",
"def mask(self):",
"def make_attention_mask(source_block, target_block):\n mask = (target_block[None, :] >= 1) * (source_block[:, None] >= 1)\n mask = mask.astype(np.int64)\n # (source_length, target_length)\n return mask",
"def _to_bert_encdec_attention_mask(matrix):\n if self.multi_channel_cross_attention:\n matrix = tf.expand_dims(matrix, axis=2)\n matrix = tf.tile(matrix, [1, 1, decoder_length, 1])\n else:\n matrix = tf.squeeze(matrix, axis=[1])\n matrix = tf.tile(matrix, [1, decoder_length, 1])\n return matrix",
"def causal_attention(queries, keys, values, scale: bool = True):\n mask_shape = (queries.shape[0], keys.shape[0])\n mask = np.triu(np.full(mask_shape, fill_value=-np.inf), k=1)\n attention_score = dp_attention(queries, keys, values, mask, scale=scale)\n return attention_score",
"def actual_causation():\n # fmt: off\n tpm = np.array([\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 0, 1],\n ])\n cm = np.array([\n [1, 1],\n [1, 1],\n ])\n # fmt: on\n return Network(tpm, cm, node_labels=('OR', 'AND'))",
"def cbam_block(cbam_feature, ratio=8):\n\n cbam_feature = channel_attention(cbam_feature, ratio)\n cbam_feature = spatial_attention(cbam_feature)\n return cbam_feature",
"def gen_attention_mask(q_seq_len, k_seq_len):\n return torch.triu(\n torch.full((q_seq_len, k_seq_len), float('-inf')),\n diagonal=1)",
"def cbam_block(cbam_feature, ratio=8):\n\n\tcbam_feature = channel_attention(cbam_feature, ratio)\n\tcbam_feature = spatial_attention(cbam_feature)\n\treturn cbam_feature",
"def attention_mask(model, x):\n config = model.config\n input_mask = model.inputs[\"input_mask\"]\n final_mask = model.builder.customOp(opName=\"AttentionMask\",\n opVersion=1,\n domain=\"ai.graphcore\",\n inputs=[input_mask, x],\n attributes={\"dataType\": model.config.popart_dtype})[0]\n final_mask = model.detach(final_mask)\n return final_mask",
"def cmask(self):\n mask = np.zeros(18)\n if 'full' in self.CONS: mask[:] = 1\n if 'f0' in self.CONS: mask[0] = 1\n if 'f1' in self.CONS: mask[1:4] = 1\n if 'f2' in self.CONS: mask[4:10] = 1\n if 'vx' in self.CONS: mask[10] = 1\n if 'vy' in self.CONS: mask[11] = 1\n if 'vz' in self.CONS: mask[12] = 1\n if 'TG' in self.CONS: mask[13:18] = 1\n return mask>0",
"def Mask(self) -> int:",
"def build_attention_mask(input_ids): \n attention_masks = [] \n\n # 1 for input and 0 for pad\n for seq in input_ids: \n attention_masks.append([float(i>0) for i in seq])\n\n return attention_masks",
"def cbam_block(cbam_feature, ratio=8, name=None):\n cbam_feature = channel_attention(cbam_feature, ratio, name=name + '_channel')\n cbam_feature = spatial_attention(cbam_feature, name=name + '_spatial')\n\n return cbam_feature",
"def make_inference_attention_mask_3d(source_block, target_block, pad_id):\n # mask = (target_block[:, None, :] != pad_id) * (source_block[:, :, None] != pad_id)\n return make_attention_mask_3d(source_block != pad_id, target_block != pad_id)",
"def set_mask_continuum(self, _=None):\n self.set_mask_type(\"cont\")",
"def _compute_causal_padding(self):\n left_pad = self.dilation_rate[0] * (self.kernel_size[0] - 1)\n\n if self.data_format == 'channels_last':\n if self.rank == 1:\n causal_padding = [[0, 0], [left_pad, 0], [0, 0]]\n elif self.rank == 2:\n causal_padding = [[0, 0], [left_pad, 0], [0, 0], [0, 0]]\n elif self.rank == 3:\n causal_padding = [[0, 0], [left_pad, 0], [0, 0], [0, 0], [0, 0]]\n else:\n raise ValueError()\n return causal_padding\n else:\n raise ValueError('No support for NCHW yet')",
"def causative(self, polite=False, positive=False, kanji=False):\n if self.group == 'ichidan':\n base = self.kanji if kanji else self.kana\n base = re.sub('る$', 'さ', base)\n else:\n nai_form = self._nai(kanji=kanji)\n base = re.sub('ない$', '', nai_form)\n if polite:\n suffix = 'せます' if positive else 'せません'\n else:\n suffix = 'せる' if positive else 'せない'\n return base + suffix",
"def map_caesar(key, plaintext):\n letters = string.ascii_lowercase\n mask = letters[key:] + letters[:key]\n transtab = str.maketrans(letters, mask)\n return plaintext.translate(transtab)",
"def make_fc_mask(self) -> Tensor:\n\n mask = torch.ones(self.num_experts, self.num_experts, device=self.device)\n mask = mask - torch.eye(self.num_experts, device=self.device)\n mask = mask / (self.num_experts - 1)\n\n mask = mask.view(1, self.num_experts, self.num_experts, 1)\n mask = mask.expand(self.batch_size, -1, -1, self.hidden_size)\n\n return mask",
"def make_attention_mask_3d(source_mask, target_mask):\n mask = target_mask[:, None, :] * source_mask[:, :, None]\n return mask",
"def attention_mask(x):\n mask = torch.zeros(len(x), len(x[0]))\n for i in range(len(x)):\n try:\n index = np.where(x[i]==1)[0][0]\n mask[i][index:] = -np.inf\n except:\n pass\n return mask",
"def add_mask_layer(self):\n return Masking(mask_value=self.mask_value, input_shape=(self.max_sequence_size, 1))",
"def setCollidemask(self):\n friends = self.createFriendlyEmpireList()\n mask = 0\n for id in friends:\n id = int(id)\n mask = mask + (1<<id)\n self.collidemask = mask",
"def make_subsequent_mask(embedding_sequence: torch.Tensor) -> torch.Tensor:\n sequence_length = embedding_sequence.size(1)\n attn_shape = (sequence_length, sequence_length)\n subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype(\"uint8\")\n subsequent_mask = torch.from_numpy(subsequent_mask) == 0\n subsequent_mask = subsequent_mask.byte()\n subsequent_mask = subsequent_mask.to(embedding_sequence.device)\n return subsequent_mask",
"def one_mask(self):\n accum = 0\n for i in range(self.data.itemsize):\n accum += (0xAA << (i << 3))\n return accum",
"def build_attention_mask_3d(source_mask, target_mask, attn_mask_type):\n if attn_mask_type == AttnMaskType.padding:\n mask = build_attention_mask_3d_padding(source_mask, target_mask)\n elif attn_mask_type == AttnMaskType.causal:\n mask = build_attention_mask_3d_causal(source_mask, target_mask)\n else:\n raise ValueError(f\"Unsupported attention mask attn_mask_type = {attn_mask_type}\")\n\n return mask"
]
| [
"0.6701625",
"0.5880225",
"0.5806054",
"0.5805493",
"0.5771913",
"0.5734302",
"0.5636548",
"0.56013423",
"0.556381",
"0.54872257",
"0.54100525",
"0.5408892",
"0.53997034",
"0.53633064",
"0.53322333",
"0.5319295",
"0.5303393",
"0.5284447",
"0.5255125",
"0.5236887",
"0.52152634",
"0.51720446",
"0.5145546",
"0.5119048",
"0.51073414",
"0.50954294",
"0.50776917",
"0.5074204",
"0.50689745",
"0.50613856"
]
| 0.62958115 | 1 |
Rotates half the hidden dims of the input. | def rotate_half(x):
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _fix_dimension(self, rot: tf.Tensor) -> tf.Tensor:\n even_n = [i for i in range(0, self.circuit_model.nqubit * 2, 2)]\n odd_n = [i for i in range(1, self.circuit_model.nqubit * 2, 2)]\n perm = even_n + odd_n\n rot = tf.transpose(rot, perm=perm)\n rot = tf.reshape(rot, shape=(2 ** self.circuit_model.nqubit, 2 ** self.circuit_model.nqubit))\n return rot",
"def rotate(self,r):\n return r.hprod( self.hprod( r.inv() ) )",
"def rotate90(self):",
"def cw_rotate(self):\n self.grid = [list(x) for x in zip(*self.grid[::-1])]\n self.find_edges()",
"def rotate(self):\n pass",
"def mirror(img):\n return img[:, ::-1]",
"def orient_img_hwd(data, slice_axis):\n if slice_axis == 0:\n return data.transpose(2, 1, 0)\n elif slice_axis == 1:\n return data.transpose(2, 0, 1)\n elif slice_axis == 2:\n return data",
"def rotate(self):\n tmp = self.width\n self.width = self.height\n self.height = tmp\n self.rotated = not self.rotated",
"def rotate(X):\n return X",
"def steerright(self):\n self.direction = self.direction-self.steering\n if self.direction < 0:\n self.direction = 360-90\n self.image, self.rect = rot_center(self.image_orig,self.rect,self.direction)",
"def rotate(q, v):\n if v.ndim == 1:\n qv = np.append(v,0)\n else:\n qv = np.hstack([v,np.zeros((len(v),1))])\n out = mult(q,qv)\n out = mult(out, inv(q))\n return out[:,:3]",
"def _spin(self):\n center= self.rect.center\n self.dizzy= self.dizzy + 10 #12\n if self.dizzy >= 360:\n self.dizzy = 0\n self.image = self.original\n else:\n rotate= pygame.transform.rotate\n self.image= rotate(self.original, self.dizzy)\n self.rect= self.image.get_rect(center= center)",
"def change_orientation(self):\n self.shape = self.shape.T",
"def rotate(x: torch.Tensor, angle: int) -> torch.Tensor:\n # B C H W\n h_dim = 2\n w_dim = 3\n\n if angle == 0:\n return x\n elif angle == 90:\n return x.flip(w_dim).transpose(h_dim, w_dim)\n elif angle == 180:\n return x.flip(w_dim).flip(h_dim)\n elif angle == 270:\n return x.flip(h_dim).transpose(h_dim, w_dim)\n else:\n raise NotImplementedError(\"Must be rotation divisible by 90 degrees\")",
"def flip(h):\n return np.flip(h)",
"def flip(h):\n return np.flip(h)",
"def rotate_inplace(arr):\n\n # Base case: array if empty or 1 x 1\n if arr.size < 2:\n return arr\n\n # Check that array is square\n m,n = arr.shape\n assert(m == n)\n\n # Rotate the outside layer\n for i in range(n-1):\n \n # top = arr[0,i]\n # right = arr[i,n-1]\n # bottom = arr[n-1,n-1-i]\n # left = arr[n-1-i,0]\n\n temp = arr[i,n-1] # save right\n arr[i,n-1] = arr[0,i] # top to right\n arr[0,i] = arr[n-1-i,0] # left to top\n arr[n-1-i,0] = arr[n-1,n-1-i] # bottom to left\n arr[n-1,n-1-i] = temp # right to bottom\n \n # Recursively rotate the other layers\n rotate_inplace(arr[1:n-1,1:n-1])\n\n return arr",
"def pad_distort_im_fn(x,output_size=None):\n \n assert len(output_size) == 3\n b = np.zeros(output_size)\n height = output_size[0]\n width = output_size[1]\n o = int((height-28)/2)\n w = int((width-28)/2)\n b[o:o+28, w:w+28] = x\n x = b\n x = rotation(x, rg=30, is_random=True, fill_mode='nearest')\n x = shear(x, 0.05, is_random=True, fill_mode='nearest')\n x = shift(x, wrg=0.25, hrg=0.25, is_random=True, fill_mode='nearest')\n x = zoom(x, zoom_range=(0.95, 1.05))\n return x",
"def op_mirror():\n mir = np.array([[1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, -1, 0],\n [0, 0, 0, -1]])\n return mir",
"def _spin(self):\n center = self.rect.center\n self.dizzy += 12 # rotate 12 degree clockwise\n\n if self.dizzy >= 360:\n self.dizzy = 0\n self.image = self.original # reset the image to its original ones after rotated\n else:\n self.image = pygame.transform.rotate(self.original, self.dizzy)\n\n self.rect = self.image.get_rect()\n self.rect.center = center # make sure the image would not move when spinning",
"def rotate(volume):\n\n def scipy_rotate(volume_):\n angles = [-20, -10, -5, 5, 10, 20]\n angle = random.choice(angles)\n volume_ = ndimage.rotate(input=volume_, angle=angle, reshape=False)\n volume_[volume_ < 0] = 0\n volume_[volume_ > 1] = 1\n\n return volume_\n\n augmented_volume = tf.numpy_function(scipy_rotate, [volume], tf.float32)\n\n return augmented_volume",
"def rotation_inv(R: np.array) -> np.array:\n return R.T",
"def flip_rotate(img):\r\n\r\n choice = int(8*np.random.rand())\r\n \r\n if choice == 0:\r\n return img\r\n if choice == 1:\r\n return np.rot90(img, 1)\r\n if choice == 2:\r\n return np.rot90(img, 2)\r\n if choice == 3:\r\n return np.rot90(img, 3)\r\n if choice == 4:\r\n return np.flip(img, 0)\r\n if choice == 5:\r\n return np.flip(img, 1)\r\n if choice == 6:\r\n return np.flip(np.rot90(img, 1), 0)\r\n if choice == 7:\r\n return np.flip(np.rot90(img, 1), 1)",
"def rotated_e():\n x = np.zeros((5, 5))\n x[:, 0] = 1.\n y = np.zeros((5, 5))\n y[:, 2] = 1.\n z = np.zeros((5, 5))\n z[:, 4] = 1.\n a = np.zeros((5, 5))\n a[0, :] = 1.\n b = np.zeros((5, 5))\n b[2, :] = 1.\n c = np.zeros((5, 5))\n c[4, :] = 1.\n\n img = np.zeros((4, 5, 5))\n img[0] = x + y + z + a\n img[1] = x + y + z + c\n img[2] = a + b + c + x\n img[3] = a + b + c + z\n img[img > 0] = 1.\n\n return img.astype('float32')",
"def _rotate_image_90(self, img: ndarray, k: int) -> ndarray:\n if img.shape[0] < img.shape[1]:\n self.y = np.rot90(img, k)\n return self.y\n else:\n return img",
"def _r270(self,m):\n return np.rot90(m,3)",
"def rotate(self, m):\n n = len(m)\n for i in range(n//2):\n for j in range(i,n-i-1):\n m[j][~i],m[~i][~j],m[~j][i],m[i][j] = \\\n m[i][j],m[j][~i],m[~i][~j],m[~j][i]",
"def _maybe_rotate_dims(self, x, rotate_right=False):\n needs_rotation_const = tensor_util.constant_value(self._needs_rotation)\n if needs_rotation_const is not None and not needs_rotation_const:\n return x\n ndims = array_ops.rank(x)\n n = (ndims - self._rotate_ndims) if rotate_right else self._rotate_ndims\n return array_ops.transpose(\n x, _concat_vectors(math_ops.range(n, ndims), math_ops.range(0, n)))",
"def _augment(img):\r\n return flip(img, axis=2)",
"def _rotate(self):\n \r\n if self.clr == 1: # (default rotation) \r\n # o o o o \r\n # o x x o x o o x\r\n # o o o o\r\n _colOffsets = [[-1,-1, 0, 0], [-1, 0, 0, 1], [ 1, 1, 0, 0], [ 1, 0, 0,-1]] #\r\n _rowOffsets = [[ 1, 0, 0,-1], [-1,-1, 0, 0], [-1, 0, 0, 1], [ 1, 1, 0, 0]] #\r\n elif self.clr == 2:\r\n # o o o o \r\n # o x o x x o x o\r\n # o o o o\r\n _colOffsets = [[-1,-1, 0, 0], [ 1, 0, 0,-1], [ 1, 1, 0, 0], [-1, 0, 0, 1]] #\r\n _rowOffsets = [[-1, 0, 0, 1], [-1,-1, 0, 0], [ 1, 0, 0,-1], [ 1, 1, 0, 0]] #\n \r\n elif self.clr == 3: # \r\n # o o o o \r\n # x o x o x o x o\r\n # o o o o\n \r\n _colOffsets = [[-1, 0, 0, 0], [-1,-1, 0, 1], [ 1, 0, 0, 0], [ 1, 1, 0,-1]] #\r\n _rowOffsets = [[ 1, 1, 0,-1], [-1, 0, 0, 0], [-1,-1, 0, 1], [ 1, 0, 0, 0]] #\n \r\n elif self.clr == 4:\r\n # o o o o \r\n # x o x o x o x o\r\n # o o o o\r\n _colOffsets = [[-1, 0, 0, 0], [1, 1, 0, -1], [1, 0, 0,0], [-1, -1, 0,1]]\n _rowOffsets = [[-1,-1, 0, 1], [-1,0, 0, 0], [1,1, 0,-1], [1,0, 0, 0]]\n \r\n elif self.clr == 5: # o o\r\n # o x \r\n # x o x o o o o o x o\r\n # o o \r\n _colOffsets = [[ 0, 0, 0, 0], [ 2, 1, 0,-1], [ 0, 0, 0, 0], [-2,-1, 0, 1]] #\r\n _rowOffsets = [[-2,-1, 0, 1], [ 0, 0, 0, 0], [ 2, 1, 0,-1], [ 0, 0, 0, 0]] #\r\n elif self.clr == 6: #\r\n # o o o \r\n # o x o x o x o o x o\r\n # o o o \r\n _colOffsets = [[ 0,-1, 0, 0], [-1, 0, 0, 1], [ 0, 1, 0, 0], [ 1, 0, 0,-1]] #\r\n _rowOffsets = [[ 1, 0, 0,-1], [ 0,-1, 0, 0], [-1, 0, 0, 1], [ 0, 1, 0, 0]] #\r\n elif self.clr == 7: # \r\n # o o o o o o o o\r\n # o x o x o x o x\r\n # \r\n _colOffsets = [[-1,-1, 0, 0], [-1,-1, 0, 0], [-1,-1, 0, 0], [-1,-1, 0, 0]] #@@\r\n _rowOffsets = [[ 0,-1, 0,-1], [ 0,-1, 0,-1], [ 0,-1, 0,-1], [ 0,-1, 0,-1]] #@@\n \r\n self._colOffsets = _colOffsets[self._rot] #@@\r\n self._rowOffsets = _rowOffsets[self._rot] #@@\r\n self._update() #@@\r"
]
| [
"0.6447909",
"0.5587377",
"0.5508792",
"0.54771554",
"0.5473802",
"0.5468499",
"0.54290074",
"0.54104304",
"0.5391904",
"0.5381794",
"0.5374847",
"0.536035",
"0.53580415",
"0.5299022",
"0.5298043",
"0.5298043",
"0.5251104",
"0.52426225",
"0.5236352",
"0.5229538",
"0.5224368",
"0.5223207",
"0.5212485",
"0.5208778",
"0.5208546",
"0.51984394",
"0.5172968",
"0.51688534",
"0.5162228",
"0.5146807"
]
| 0.64703625 | 0 |
Returns a gaussian random number | def _get_gaussian_random(self):
u1 = generateRandom()
u2 = generateRandom()
if u1 < 1e-6:
u1 = 1e-6
return sqrt(-2 * log(u1)) * cos(2 * pi * u2) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generate_gaussian():\n amp = 10 * numpy.random.chisquare(3)\n width = numpy.random.chisquare(3)\n mean = numpy.random.uniform(-10 + width, 10 - width)\n x = numpy.linspace(-10, 10, 500)\n y = amp * numpy.exp(- (x - mean) ** 2 / width ** 2)\n add_noise(y, 0.1)\n return x, y",
"def rand_gauss(n=100, mu=[1, 1], sigma=[0.1, 0.1]):\n d = len(mu)\n res = np.random.randn(n, d)\n return np.array(res * sigma + mu)",
"def gaussian(x, mu, sigma):\n return (np.exp(-(x - mu)**2 / 2.0 / sigma**2) /\n np.sqrt(2.0 * np.pi) / sigma)",
"def random_normal():\r\n return inverse_normal_cdf(random.random())",
"def gaussian(var):\n stddev = np.sqrt(var)\n return stats.norm(0, stddev)",
"def _gaussian_distribution(self, x: ndarray, mu: float, sigma: float) -> ndarray:\n return 1 / (np.sqrt(2 * np.pi) * sigma) * np.exp(\n -np.power(\n (x - mu) / sigma, 2) / 2)",
"def random_normal():\n return inverse_normal_cdf(random.random())",
"def generate_gaussian_random_number(mean=0.0, variance=1.0, size=1):\n\n gaussian_array = np.random.normal(mean, variance, size)\n\n return gaussian_array",
"def get_value(self) -> float:\n return random.gauss(self._mu, self._sigma)",
"def normal(mean, std):\n\n return random.gauss(mean, std)",
"def gaussian(mu, wid, x):\n return np.exp(-((x - mu) / (0.6005612 * wid))**2)",
"def gaussian(x, sigma):\n try: r = np.exp(-0.5*(x/sigma)**2) \n except: r = np.zeros(len(x))\n return r",
"def gauss_sample(num, stdev):\n sample = np.random.normal(0, stdev, num)\n sample = sample.round().astype(int)\n return sample",
"def rand_sample_gauss():\n mean = float(NUM_UNIQUE_VALUES + 1) / 2\n while True:\n r = random.normalvariate(mean, DIST_PARAM)\n value = int(round(r))\n # Rejection sampling to cut off Gaussian to within [1, NUM_UNIQUE_VALUES]\n if 1 <= value <= NUM_UNIQUE_VALUES:\n break\n\n return value # true client value",
"def gaussian(x, mean, sigma):\n return np.exp(-np.square(x-mean)/(2*np.square(sigma))) / (np.sqrt(2*np.pi*sigma**2))",
"def random_glove_generator(emb_mean, emb_stddev):\n x = np.random.normal(loc=0.0, scale=1.0, size=len(emb_mean))\n x_rand = np.multiply(x, emb_stddev) + emb_mean\n return x_rand",
"def gaussianDist(self, x, mu, var):\n val = 1/(math.sqrt(2 * math.pi * var)) * math.exp(-1 * (x - mu)**2 / (2*var))\n return val",
"def gaussianOneSample(self, mu=0., sig=1., size=1):\n\n thisSample = np.random.normal(mu, sig, size)\n\n return thisSample",
"def gaussian(amp, fwhm, mean, x):\n return amp * np.exp(-4. * np.log(2) * (x-mean)**2 / fwhm**2)",
"def sampleGaussian(self, mu, log_sigma):\n # reparameterization trick\n epsilon = tf.random_normal(tf.shape(log_sigma), name=\"epsilon\")\n return mu + epsilon * tf.exp(log_sigma) # N(mu, I * sigma**2)",
"def random_gaussian(img, mu=0.0, sigma=4.0):\n\n out = np.copy(img.astype(np.float))\n rows, cols, depth = img.shape\n noise = np.random.normal(mu, sigma, (rows, cols))\n for dim in range(depth):\n out[:, :, dim] = img[:, :, dim] + noise\n out[out > 255] = 255\n out[out < 0] = 0\n out = out.astype(np.uint8)\n\n return out",
"def convert_gaussian_random(g, op, block):\n\n mean = op.attr(\"mean\")\n std = op.attr(\"std\")\n shape = op.attr(\"shape\")\n seed = op.attr(\"seed\")\n dtype = op.attr(\"dtype\")\n dtype = _convert_dtype_value(dtype)\n out = _op.random.normal(key=seed, shape=shape, dtype=dtype, mean=mean, scale=std)\n g.add_node(op.output(\"Out\")[0], out)",
"def get_standard_normal_distribution():\n return np.random.normal(0, 1)",
"def gauss(x, *p):\n mu, sigma = p\n return (1 / (sigma * np.sqrt(2 * np.pi)) *\n np.exp(-(x - mu) ** 2 / (2. * sigma ** 2)))",
"def calculateGaussian(x, mean, stdev):\n\t\t\texponent = math.exp(-(math.pow(x-mean,2)/(2*math.pow(stdev,2))))\n\t\t\tvalue= (1 / (math.sqrt(2*math.pi) * stdev)) * exponent\n\t\t\tif value==0:\n\t\t\t\treturn np.nan\n\t\t\telse:\n\t\t\t\treturn math.log(value)",
"def gaussian(x, *parameters):\n position, sigma, amplitude, background = parameters\n return amplitude * np.exp(-(x - position)**2 / (2.0 * sigma**2)) + background",
"def gaussian(gp_link=None, variance=2, D=None, N=None):\r\n if gp_link is None:\r\n gp_link = noise_models.gp_transformations.Identity()\r\n analytical_mean = True\r\n analytical_variance = True # ?\r\n return noise_models.gaussian_noise.Gaussian(gp_link, analytical_mean,\r\n analytical_variance, variance=variance, D=D, N=N)",
"def random():\r\n return R.NextDouble()",
"def gaussian(\n shape: Iterable[int],\n mean: float = 0,\n std: float = 1,\n comp_node: Optional[CompNode] = None,\n comp_graph: Optional[CompGraph] = None,\n) -> Tensor:\n comp_node, comp_graph = _use_default_if_none(comp_node, comp_graph)\n seed = _random_seed_generator().__next__()\n return mgb.opr.gaussian_rng(\n shape, seed=seed, mean=mean, std=std, comp_node=comp_node, comp_graph=comp_graph\n )",
"def gaussian(x, x0=0.0, fwhm=1.0, ampl=1.0):\n return ampl * np.exp(-4 * np.log(2) * ((x - x0) / fwhm) ** 2)"
]
| [
"0.76417845",
"0.74124026",
"0.74117035",
"0.7400872",
"0.73885965",
"0.73865706",
"0.7355771",
"0.73550445",
"0.7354002",
"0.7345279",
"0.7341788",
"0.7283857",
"0.72462964",
"0.722161",
"0.72148955",
"0.71684784",
"0.7155654",
"0.70906794",
"0.69348806",
"0.6895141",
"0.68883425",
"0.685776",
"0.6846629",
"0.6815515",
"0.6759583",
"0.6751452",
"0.672991",
"0.6712667",
"0.66905725",
"0.6685373"
]
| 0.8234892 | 0 |
Returns the displacement texture, storing the 3D Displacement in the RGB channels | def get_displacement_texture(self):
return self.displacement_tex | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def rgb_image(self):\n z3 = self.z[:,:,newaxis]\n return z3 * self.c",
"def GetOutTextureCoord(self):\n ...",
"def ci(x, y, z):\n\n return (x * 16 + z) * CHUNK_HEIGHT + y",
"def GetInTextureCoord(self):\n ...",
"def rgb_to_xyz(image: tf.Tensor) -> tf.Tensor:\n r, g, b = tf.unstack(image, axis=-1)\n var_r = r / 255\n var_g = g / 255\n var_b = b / 255\n\n var_r = tf.where(var_r > 0.04045, tf.pow((var_r + 0.055) / 1.055, 2.4),\n var_r / 12.92)\n var_g = tf.where(var_g > 0.04045, tf.pow((var_g + 0.055) / 1.055, 2.4),\n var_g / 12.92)\n var_b = tf.where(var_b > 0.04045, tf.pow((var_b + 0.055) / 1.055, 2.4),\n var_b / 12.92)\n var_r = var_r * 100\n var_g = var_g * 100\n var_b = var_b * 100\n\n x = var_r * 0.4124 + var_g * 0.3576 + var_b * 0.1805\n y = var_r * 0.2126 + var_g * 0.7152 + var_b * 0.0722\n z = var_r * 0.0193 + var_g * 0.1192 + var_b * 0.9505\n\n image_xyz = tf.stack([x, y, z], axis=-1)\n return image_xyz",
"def generate_lut(self):\n colormap = self.get_colormap()\n\n if self.test:\n self.print_colormap(self.name, colormap)\n\n return self.generate_spi3d_from_evs(colormap)",
"def get_map_3d_tex(self, size, filename = None, charPos = None):\n mod = self.world_size / size\n image = PNMImage(size, size)\n for x in xrange(size):\n for y in xrange(size):\n px = x * mod\n py = y * mod\n height = self[px, py]\n if height <= 0:\n color = (abs(height) / 50) + 50\n if color > 255:\n color = 255\n image.setPixel(x, y, (0, 0, 255-color))\n else:\n if height <= self.config.low_mount_level[1]:\n color = height / 20\n r = 0\n g = 50+color\n b = 0\n image.setPixel(x, y, (r, g, b))\n elif height > self.config.low_mount_level[1]:\n color = height / 50\n r = color\n g = color\n b = color\n if r > 255:\n r = 255\n if g > 255:\n r = 255\n if b > 255:\n b = 255\n image.setPixel(x, y, (r, g, b))\n\n if filename != None:\n image.write(filename)\n\n if charPos != None:\n charX, charY = charPos\n for x in xrange(-1, 2):\n for y in xrange(-1, 2):\n image.setPixel(int(charX/mod)+x, int(charY/mod)+y, (255, 0, 0))\n\n texture = Texture()\n texture.load(image)\n return texture",
"def getState(game):\n pixels = pygame.surfarray.array3d(game.screen)[:]\n pixels = np.array([pixels], dtype=float)\n\n # Here we will preprocess the pixel data\n bitsize = game.screen.get_bitsize() / 4\n pixels *= 1 / 2**bitsize # Normalize to [0..1]\n\n return pixels",
"def color_temp(self):\n return self._color_temp",
"def get_normal_texture(self):\n return self.normal_tex",
"def photons(self, depth=1):\n self.dx[:,:self.W-1] = self.z[:,1:] - self.z[:,:self.W-1]\n self.dy[:self.H-1,:] = self.z[1:,:] - self.z[:self.H-1,:]\n px = self.xv - self.dx*depth\n py = self.yv - self.dy*depth\n return px,py",
"def get_dress(self,stack=False):\r\n \"\"\"takes input rgb----> return PNG\"\"\"\r\n name = self.imageid\r\n file = cv2.imread(name)\r\n file = tf.image.resize_with_pad(file,target_height=512,target_width=512)\r\n rgb = file.numpy()\r\n file = np.expand_dims(file,axis=0)/ 255.\r\n seq = self.model.predict(file)\r\n seq = seq[3][0,:,:,0]\r\n seq = np.expand_dims(seq,axis=-1)\r\n c1x = rgb*seq\r\n c2x = rgb*(1-seq)\r\n cfx = c1x+c2x\r\n dummy = np.ones((rgb.shape[0],rgb.shape[1],1))\r\n rgbx = np.concatenate((rgb,dummy*255),axis=-1)\r\n rgbs = np.concatenate((cfx,seq*255.),axis=-1)\r\n if stack:\r\n stacked = np.hstack((rgbx,rgbs))\r\n return stacked\r\n else:\r\n return rgbs",
"def generate_lut(self):\n colormap = self.get_colormap()\n\n if self.block_type == \"equidistant\":\n ev_colormap = colors.colormap_to_ev_blocks_equidistant(colormap, self.exposure_values)\n if self.test:\n self.print_colormap(self.name, ev_colormap)\n return self.generate_spi3d_from_evs(ev_colormap)\n elif self.block_type == \"centered\":\n ev_colormap = colors.colormap_to_ev_blocks_centered(colormap, self.exposure_values)\n if self.test:\n self.print_colormap(self.name, ev_colormap)\n return self.generate_spi3d_from_evs(ev_colormap)\n elif self.block_type == \"stretched\":\n ev_colormap = colors.colormap_to_ev_blocks_stretched(colormap, self.exposure_values)\n if self.test:\n self.print_colormap(self.name, ev_colormap)\n return self.generate_spi3d_from_evs(ev_colormap)",
"def lab_to_xyz(image: tf.Tensor) -> tf.Tensor:\n l, a, b = tf.unstack(image, axis=-1)\n\n var_y = (l + 16) / 116\n var_x = a / 500 + var_y\n var_z = var_y - b / 200\n var_x = tf.where(tf.pow(var_x, 3) > 0.008856, tf.pow(var_x, 3),\n (var_x - 16 / 116) / 7.787)\n var_y = tf.where(tf.pow(var_y, 3) > 0.008856, tf.pow(var_y, 3),\n (var_y - 16 / 116) / 7.787)\n var_z = tf.where(tf.pow(var_z, 3) > 0.008856, tf.pow(var_z, 3),\n (var_z - 16 / 116) / 7.787)\n\n refx = 95.047\n refy = 100.00\n ref_z = 108.883\n\n x = var_x * refx\n y = var_y * refy\n z = var_z * ref_z\n xyz_image = tf.stack([x, y, z], axis=-1)\n return xyz_image",
"def translation_3D(img, trans_x, trans_y, trans_z, cval=0.):\n \n if trans_x > 0:\n img[trans_x:,...] = img[:-trans_x,...] \n img[:trans_x,...] = cval\n elif trans_x < 0:\n img[:trans_x,...] = img[-trans_x:,...] \n img[trans_x:,...] = cval\n \n if trans_y > 0:\n img[:,trans_y:,:,:] = img[:,:-trans_y,:,:] \n img[:,:trans_y,:,:] = cval\n elif trans_y < 0:\n img[:,:trans_y,:,:] = img[:,-trans_y:,:,:] \n img[:,trans_y:,:,:] = cval\n \n if trans_z > 0:\n img[...,trans_z:,:] = img[...,:-trans_z,:] \n img[...,:trans_z,:] = cval\n elif trans_z < 0:\n img[...,:trans_z,:] = img[...,-trans_z:,:] \n img[...,trans_z:,:,:] = cval\n \n return img",
"def generate_lut(self):\n colormap = self.get_colormap()\n\n if self.test:\n self.print_colormap(self.name, colormap)\n\n if self.centered:\n return self.generate_spi3d_from_colormap(colormap, centered=True)\n else:\n return self.generate_spi3d_from_colormap(colormap, centered=False)",
"def luminance(self):\n \n return (self.r + self.g + self.b) // 3",
"def generate_lut(self):\n r,g,b=(Numeric.zeros(256),Numeric.zeros(256),Numeric.zeros(256))\n for i in Numeric.arange(256):\n r_,g_,b_=self.colfct(i/255.0) # these are from [0,1]\n r[i],g[i],b[i]=int(255*r_),int(255*g_),int(255*b_)\n return r,g,b",
"def render(self):\n np_img = np.array(self.prev_img, dtype=np.uint8)\n np_img = np.swapaxes(np_img, 0, 2)\n return np_img",
"def diffuse(self) -> float:\n return self.GetDiffuse()",
"def get_image(self, p, t, c, z):\n assert p in self.position_map.keys(), \\\n \"Position index {} doesn't exist in map\".format(p)\n pos = self.get_zarr(p)\n return pos[t, c, z]",
"def GetTextureDimensions(self):\n ...",
"def get_colors(self):\n x = np.linspace(0, 1, self.length)\n y = x**self.gamma\n\n value = np.linspace(0, 1, len(self.colors))\n r = np.interp(y, value, self.colors[:,0])\n g = np.interp(y, value, self.colors[:,1])\n b = np.interp(y, value, self.colors[:,2])\n\n return np.dstack((r, g, b)).reshape(len(r), 3).astype(np.uint8)",
"def xyz_to_rgb(image: tf.Tensor) -> tf.Tensor:\n x, y, z = tf.unstack(image, axis=-1)\n var_x = x / 100\n var_y = y / 100\n var_z = z / 100\n\n var_r = var_x * 3.2406 + var_y * -1.5372 + var_z * -0.4986\n var_g = var_x * -0.9689 + var_y * 1.8758 + var_z * 0.0415\n var_b = var_x * 0.0557 + var_y * -0.2040 + var_z * 1.0570\n\n var_r = tf.where(var_r > 0.0031308,\n 1.055 * tf.pow(var_r, (1 / 2.4)) - 0.055,\n 12.92 * var_r)\n var_g = tf.where(var_g > 0.0031308,\n 1.055 * tf.pow(var_g, (1 / 2.4)) - 0.055,\n 12.92 * var_g)\n var_b = tf.where(var_b > 0.0031308,\n 1.055 * tf.pow(var_b, (1 / 2.4)) - 0.055,\n 12.92 * var_b)\n r = var_r * 255\n g = var_g * 255\n b = var_b * 255\n rgb_image = tf.cast(tf.stack([r, g, b], axis=-1), tf.uint8)\n return rgb_image",
"def get_pixels(self):\n\n # pygame board needs to be initialized the first time\n if not self.board:\n self.setup_display(render_gui=False)\n\n self.draw_window(draw_leaderboard=False)\n pixels = pygame.surfarray.array3d(self.window)\n return np.moveaxis(pixels, 1, 0)",
"def template_zone(self):\n ratio = 32\n shape = [self.cam.height, self.cam.width]\n img = self.cam.frame[shape[0] / 2 - 3 * shape[0] / ratio:shape[0] / 2 + 3 * shape[0] / ratio,\n shape[1] / 2 - 3 * shape[1] / ratio:shape[1] / 2 + 3 * shape[1] / ratio]\n return img",
"def pix(fixture_position):\n virtual = proj.dot(fixture_position)\n return virtual[:2]/virtual[2]",
"def compute_desired_velocity(self):\n mask_red = (self.image_red == 255) \\\n *(self.image_green == 0) \\\n *(self.image_blue == 0)\n ind_red = sp.where( mask_red )\n phi = sp.ones(self.image_red.shape)\n phi[ind_red] = 0\n phi = sp.ma.MaskedArray(phi, mask=self.mask)\n numpy.set_printoptions(threshold=sys.maxsize)\n self.door_distance = skfmm.distance(phi, dx=self.pixel_size)\n tmp_dist = self.door_distance.filled(9999)\n grad = sp.gradient(tmp_dist,edge_order=2)\n grad_X = -grad[1]/self.pixel_size\n grad_Y = -grad[0]/self.pixel_size\n norm = sp.sqrt(grad_X**2+grad_Y**2)\n norm = (norm>0)*norm+(norm==0)*0.001\n self.desired_velocity_X = self.vmax * (grad_X/norm)\n self.desired_velocity_Y = self.vmax * (grad_Y/norm)\n '''plt.subplot(1,2,1)\n plt.imshow(self.desired_velocity_X, cmap='hot', interpolation='nearest')\n plt.gca().invert_yaxis()\n plt.colorbar()\n plt.subplot(1,2,2)\n plt.imshow(self.desired_velocity_X, cmap='hot', interpolation='nearest')\n plt.gca().invert_yaxis()\n plt.colorbar()\n plt.show()'''\n return self.door_distance, self.desired_velocity_X, self.desired_velocity_Y",
"def get_pixel_pos(self):\n\n c = self.get_center()\n\n return Tank.three_by_three(c[0],c[1])",
"def gradXY(image):\n\treturn image[:-1,:-1]-image[:-1,1:],image[:-1,:-1]-image[1:,:-1]"
]
| [
"0.57808304",
"0.5724655",
"0.5575158",
"0.55007845",
"0.5495631",
"0.5442849",
"0.53449386",
"0.53127766",
"0.52633506",
"0.5257021",
"0.52545923",
"0.52391744",
"0.5231013",
"0.5198924",
"0.51982945",
"0.51770604",
"0.51602405",
"0.51500976",
"0.5144836",
"0.51301616",
"0.511812",
"0.5105197",
"0.51035",
"0.51009434",
"0.50959533",
"0.5087562",
"0.50774944",
"0.50657994",
"0.50617445",
"0.50437576"
]
| 0.7183238 | 0 |
Returns the normal texture, storing the normal in world space in the RGB channels | def get_normal_texture(self):
return self.normal_tex | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_normal(self):\n c, s = np.cos(self.eangles), np.sin(self.eangles)\n r = np.array([[c, -s], [s, c]])\n u = np.array([1, 0])\n return np.dot(r, u)",
"def normal(self, uv):\n res = GeomLProp_SLProps(self.surface(), uv[0], uv[1], 1, 1e-9)\n if not res.IsNormalDefined():\n return (0, 0, 0)\n normal = geom_utils.gp_to_numpy(res.Normal())\n if self.reversed():\n normal = -normal\n return normal",
"def normal(self, position):\n return self._normal",
"def world_normal_images(self):\n if not hasattr(self, '_world_normal_images'):\n cam_normals = self.cam_normal_images.copy()\n cam2world = self.dodeca_cam2world.copy()\n world_normals = []\n for i in range(20):\n im_i = cam_normals[i, ...]\n # Normals are transformed by the inverse transpose\n cam2world_invt = np.linalg.inv(cam2world[i, ...]).T\n im_i = geom_util_np.apply_4x4(im_i, cam2world_invt, are_points=False)\n nrm = np.linalg.norm(im_i, axis=-1, keepdims=True) + 1e-10\n im_i /= nrm\n mask = np_util.make_mask(self.depth_images[0, i, ...])\n world_normals.append(\n np_util.zero_by_mask(mask, im_i).astype(np.float32))\n self._world_normal_images = np.stack(world_normals)\n return self._world_normal_images",
"def GetNormal(self):\n ...",
"def norm(self):\n return self.pixels.norm",
"def normal(self) -> Vector:\n return self._normal",
"def get_normals(self):\n c, s = np.cos(self.eangles), np.sin(self.eangles)\n r = np.array([[c, -s], [s, c]])\n us = np.array([[1, 0], [0, 1], [-1, 0], [0, -1]])\n nsyms = 4 if self.halfexts[0] == self.halfexts[1] else 2\n return [(np.dot(r, u), nsyms) for u in us]",
"def world_to_camera_normals(inverted_camera_quaternation, world_normals):\n exr_x, exr_y, exr_z = world_normals[0], world_normals[1], world_normals[2]\n camera_normal = np.empty([exr_x.shape[0], exr_x.shape[1], 3], dtype=np.float32)\n for i in range(exr_x.shape[0]):\n for j in range(exr_x.shape[1]):\n pixel_camera_normal = _multiply_quaternion_vec3(inverted_camera_quaternation,\n [exr_x[i][j], exr_y[i][j], exr_z[i][j]])\n camera_normal[i][j][0] = pixel_camera_normal[0]\n camera_normal[i][j][1] = pixel_camera_normal[1]\n camera_normal[i][j][2] = pixel_camera_normal[2]\n\n camera_normal = camera_normal.transpose(2, 0, 1)\n return camera_normal",
"def Normal(self):\n return Vector(self.normal)",
"def draw_normal(self):\n means, scale = self.get_means_and_scales()\n return np.random.normal(means,scale,size=[self.sims,means.shape[0]]).T",
"def get_face_normal(self):\n if self.mesh is None:\n self.load_mesh()\n self.mesh.set_face_normal()",
"def norm(self):\n return self._color_mapper",
"def glorot_normal(seed=None):\n return lambda shape, dtype, batch_ndims=0: _initialize( # pylint: disable=g-long-lambda\n shape, dtype, batch_ndims,\n scale=1., mode='fan_avg', distribution='truncated_normal', seed=seed)",
"def unit_normals(self):\n return np.stack(self.centers_cartesian(), axis=-1)",
"def normalize_data(self):\n\t\tfull_matrix = self.balance_clases()\n\t\ttexture_matrix = Normalizer().fit_transform(X=full_matrix[:,range(0,24)])\n\n\t\treturn texture_matrix",
"def normal_to_world(self, local_normal: Vector) -> Vector:\n # This will convert to one group space up if there is a parent group.\n normal = self.transform.inverse().transpose() * local_normal\n normal.w = 0\n normal = normal.normalize()\n\n if self.parent:\n normal = self.parent.normal_to_world(normal)\n return normal",
"def normal(self) -> Vec:\n # The three points are in clockwise order, so compute differences\n # in the clockwise direction, then cross to get the normal.\n point_1 = self.planes[1] - self.planes[0]\n point_2 = self.planes[2] - self.planes[1]\n\n return Vec.cross(point_1, point_2).norm()",
"def get_norma(self):\n return self.norma",
"def tlwh_norm(self):\n ret = self.tlwh()\n ret[0] /= self.im_shape[1] # im_shape: [height, width]\n ret[1] /= self.im_shape[0]\n ret[2] /= self.im_shape[1]\n ret[3] /= self.im_shape[0]\n return ret",
"def surface_norm(self, pt):\n\n return self.normal.normalize()",
"def normal(self, point):\n return self._normal.dup()",
"def normalize(self):\r\n\r\n nlen = 1.0/math.sqrt(self*self)\r\n return vec4(self.x*nlen, self.y*nlen, self.z*nlen, self.w*nlen)",
"def normalize_temp(temp):\n\n\tnorme = np.empty(temp.shape[2])\n\tfor i in range(temp.shape[2]):\n\t\tnorme[i] = np.linalg.norm(temp[:, :, i])\n\t\ttemp[:, :, i] = temp[:, :, i] / norme[i]\n\treturn (norme)",
"def normal(m):\n if type(m) is nn.Linear or type(m) is nn.Conv2d:\n nn.init.normal_(m.weight)",
"def normalize(self):\n return (self.r / 255.0, self.g / 255.0, self.b / 255.0, self.a / 255.0)",
"def gen_world2local(normal):\n last_dim_i = normal.ndim - 1\n\n z = np.array((0, 0, 1), dtype=float)\n\n # Tangents\n t = np.cross(normal, z)\n if (t == 0).all(axis=-1).any():\n raise ValueError((\n \"Found (0, 0, 0) tangents! Possible reasons: normal colinear with \"\n \"(0, 0, 1); normal is (0, 0, 0)\"))\n t = normalize_vec(t, axis=last_dim_i)\n\n # Binormals\n # No need to normalize because normals and tangents are orthonormal\n b = np.cross(normal, t)\n\n # Rotation matrices\n rot = np.stack((t, b, normal), axis=last_dim_i)\n # So that at each location, we have a 3x3 matrix whose ROWS, from top to\n # bottom, are world tangents, binormals, and normals\n\n return rot",
"def twoDNormal(self):\n return vector((-1) * self.y, self.x, 0)",
"def getNormalVector(self):\n vector = self.unit_vector\n vector.rotate(math.pi / 2)\n return vector",
"def normalise(self):\n return self.map_channels(lambda component: float(component) / 255.0)"
]
| [
"0.6566784",
"0.6541615",
"0.6512049",
"0.64429337",
"0.6339668",
"0.6276351",
"0.6200656",
"0.6118488",
"0.61088574",
"0.607771",
"0.60631305",
"0.6046111",
"0.6028556",
"0.60196614",
"0.60163665",
"0.5973171",
"0.5903069",
"0.5851999",
"0.58481455",
"0.57989216",
"0.5756422",
"0.5739427",
"0.5727994",
"0.57088494",
"0.5707559",
"0.56814957",
"0.5678037",
"0.5659992",
"0.56534",
"0.56413394"
]
| 0.78915715 | 0 |
Convert (rows, cols) raster row and column indices to geographic coordinates based on the affine transformation of the raster. | def rowcol_to_xy(rows, cols, affine):
# make it a 3x3 matrix
aff_array = numpy.array(affine).reshape((3, 3))
# check that rows amd cols are indeed 1-D vectors
rows = validate.is_vector(rows)
cols = validate.is_vector(cols)
# filler
layers = numpy.ones_like(rows)
vector = numpy.array([cols, rows, layers])
# compute xy
xy = numpy.dot(aff_array, vector)[:2, :]
return xy | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __affine_geo_transformation(x, y, gtr):\n\n # https://gdal.org/user/raster_data_model.html#affine-geotransform\n # Affine transformation rewritten for rasterio:\n gtr_x = gtr[2] + (x + 0.5) * gtr[0] + (y + 0.5) * gtr[1]\n gtr_y = gtr[5] + (x + 0.5) * gtr[3] + (y + 0.5) * gtr[4]\n\n return gtr_x, gtr_y",
"def pixel2coord(self, x, y):\n # NEED TO CHANGE TO USE INVERSE TRANSFORM COEFFS\n # partly taken from Sean Gillies \"affine.py\"\n a,b,c,d,e,f = self.coordspace_transform\n det = a*e - b*d\n idet = 1 / float(det)\n ra = e * idet\n rb = -b * idet\n rd = -d * idet\n re = a * idet\n newx = (x*ra + y*rb + (-c*ra - f*rb) )\n newy = (x*rd + y*re + (-c*rd - f*re) )\n return newx,newy",
"def _indices_to_coords(c,r):\n\n column = _index_to_column(c)\n row = r + 1\n\n return {'c': column, 'r': row, 'coord': f'{column}{row}'}",
"def map_coordinates(self,geometry):\n\t\tg = self.geomatrix\n\t\tdef project_coord(x,y,z=None):\n\t\t\tx = g[0] + g[1] * x + g[2] * y\n\t\t\ty = g[3] + g[4] * x + g[5] * y\n\t\t\tif z is None:\n\t\t\t\treturn x,y\n\t\t\telse:\n\t\t\t\treturn x,y,z\n\t\treturn transform(project_coord, geometry)",
"def pixel2coords(self, x, y):\n xoff, a, b, yoff, d, e = self.geotransform()\n\n xp = a * x + b * y + xoff\n yp = d * x + e * y + yoff\n return (xp, yp)",
"def calc_affine(df):\n\tx0 = df.columns[0]\n\ty0 = df.index[0]\n\tdx = df.columns[1] - df.columns[0]\n\tdy = df.index[1] - df.index[0]\n\t\n\tt = affine.Affine(dx, 0, x0 , 0, dy ,y0 - dy) \n\t# y0 - dy because anker point is in the south!\n\treturn t",
"def read_affine(file):\n data = open(file, 'r').read()\n data = data.split('\\n')\n for i in range(1, 5):\n data[i] = data[i].split(':')\n int_lon = np.fromstring(data[1][1], dtype='float', sep=',')\n int_lat = np.fromstring(data[2][1], dtype='float', sep=',')\n Nlon = len(int_lon) - 1\n Nlat = len(int_lat) - 1\n data[3][1] = data[3][1].split(',')\n data[4][1] = data[4][1].split(',')\n lon_transform = np.zeros((Nlon, 2))\n lat_transform = np.zeros((Nlat, 2))\n for i in range(Nlon):\n data[3][1][i] = data[3][1][i].split(' ')\n lon_transform[i] = [data[3][1][i][0], data[3][1][i][1]]\n for i in range(Nlat):\n data[4][1][i] = data[4][1][i].split(' ')\n lat_transform[i] = [data[4][1][i][0], data[4][1][i][1]]\n lon_transform = np.array(lon_transform).astype('float')\n lat_transform = np.array(lat_transform).astype('float')\n return int_lon, int_lat, lon_transform, lat_transform",
"def GetGeoTransform(raster_path):\n \n #open a GDAL object containig the raster\n gdal_img = gdal.Open(raster_path)\n \n #extract basic geospatial data\n ulx, xres, xskew, uly, yskew, yres = gdal_img.GetGeoTransform()\n \n #calculate lower right coordinates from upper left coordinates and raster size\n lrx = ulx + (gdal_img.RasterXSize * xres)\n lry = uly + (gdal_img.RasterYSize * yres)\n \n geoinfo = {'ulx': ulx,\n 'lrx': lrx,\n 'uly': uly,\n 'lry': lry,\n 'xres': xres,\n 'xskew': xskew,\n 'yres': yres,\n 'yskew': yskew\n }\n \n return geoinfo",
"def affine_transform(geom, matrix):\n if geom.is_empty:\n return geom\n if len(matrix) == 6:\n ndim = 2\n a, b, d, e, xoff, yoff = matrix\n if geom.has_z:\n ndim = 3\n i = 1.0\n c = f = g = h = zoff = 0.0\n matrix = a, b, c, d, e, f, g, h, i, xoff, yoff, zoff\n elif len(matrix) == 12:\n ndim = 3\n a, b, c, d, e, f, g, h, i, xoff, yoff, zoff = matrix\n if not geom.has_z:\n ndim = 2\n matrix = a, b, d, e, xoff, yoff\n else:\n raise ValueError(\"'matrix' expects either 6 or 12 coefficients\")\n\n def affine_pts(pts):\n \"\"\"Internal function to yield affine transform of coordinate tuples\"\"\"\n if ndim == 2:\n for x, y in pts:\n xp = a * x + b * y + xoff\n yp = d * x + e * y + yoff\n yield (xp, yp)\n elif ndim == 3:\n for x, y, z in pts:\n xp = a * x + b * y + c * z + xoff\n yp = d * x + e * y + f * z + yoff\n zp = g * x + h * y + i * z + zoff\n yield (xp, yp, zp)\n\n # Process coordinates from each supported geometry type\n if geom.type in ('Point', 'LineString', 'LinearRing'):\n return type(geom)(list(affine_pts(geom.coords)))\n elif geom.type == 'Polygon':\n ring = geom.exterior\n shell = type(ring)(list(affine_pts(ring.coords)))\n holes = list(geom.interiors)\n for pos, ring in enumerate(holes):\n holes[pos] = type(ring)(list(affine_pts(ring.coords)))\n return type(geom)(shell, holes)\n elif geom.type.startswith('Multi') or geom.type == 'GeometryCollection':\n # Recursive call\n # TODO: fix GeometryCollection constructor\n return type(geom)([affine_transform(part, matrix)\n for part in geom.geoms])\n else:\n raise ValueError('Type %r not recognized' % geom.type)",
"def xy_to_rowcol(x, y, affine):\n # affine might be an Affine object, might be 3x3 array or\n # 9-element vector. Let's first make damn sure is a 9-element\n # vector\n aff_array = numpy.array(affine).reshape(9)\n\n # now be damn sure we have an Affine object, and reverse it to\n # go from coordinates to indices\n affine = ~Affine(*aff_array[:6])\n\n # compute the y, x values\n yx = numpy.floor(rowcol_to_xy(y, x, affine))\n\n # convert to int and flip to xy\n xy = yx.astype(int)[::-1]\n return xy",
"def transformAffine(self, coords):\n coordsshape = coords.shape\n dims = coordsshape[0] + 1\n coords = coords.reshape((len(coords), -1))\n coords = np.concatenate((coords, np.ones((1, len(coords[0])))), 0)\n affine = np.eye(dims)\n # now transform first to center:\n meanvec = np.mean(coords, 1)\n center = np.eye(dims)\n center[:-1, -1] = -meanvec[:-1]\n affine = np.matmul(center, affine)\n\n if np.sum(self.shift):\n affine[:-1, -1] += (self.deformrandomstate.rand(dims - 1) - 0.5) * np.float32(self.shift)\n if np.max(self.scaling) > 1:\n scales = np.ones(dims)\n # scales[:-1] = (self.deformrandomstate.rand(dims-1)-0.5)*(self.scaling-1.0/self.scaling)+(self.scaling+1/self.scaling)/2\n scales[:-1] = self.scaling ** (self.deformrandomstate.rand(dims - 1) * 2 - 1)\n scales = np.diag(scales)\n # print(scales)\n affine = np.matmul(scales, affine)\n if np.sum(self.rotation):\n affine = self._rotate(affine)\n # move back to location:\n center[:-1, -1] = -center[:-1, -1]\n affine = np.matmul(center, affine)\n # now appyl to coords:\n coords = np.matmul(affine, coords)\n coords = coords[:-1]\n coords = coords.reshape(coordsshape)\n return coords",
"def _pixel_to_map(coordinates, geotransform):\n coordinates_map = np.empty(coordinates.shape)\n coordinates_map[..., 0] = (\n geotransform[0]\n + geotransform[1] * coordinates[..., 0]\n + geotransform[2] * coordinates[..., 1]\n )\n coordinates_map[..., 1] = (\n geotransform[3]\n + geotransform[4] * coordinates[..., 0]\n + geotransform[5] * coordinates[..., 1]\n )\n return coordinates_map",
"def transform_coords(x, y, w, h, nw, nh):\r\n return ((((x / w) - 0.5) * nw), (((h - y) / h) - 0.5) * nh)",
"def _affine_coordinates(self, Vrep_object):\n if '_affine_coordinates_pivots' not in self.__dict__:\n v_list = [ vector(v) for v in self.Vrepresentation() ]\n if len(v_list)>0:\n origin = v_list[0]\n v_list = [ v - origin for v in v_list ]\n coordinates = matrix(v_list)\n self._affine_coordinates_pivots = coordinates.pivots()\n \n v = list(Vrep_object)\n if len(v) != self.ambient_dim():\n raise ValueError('Incorrect dimension: '+str(v))\n\n return vector(self.field(), [ v[i] for i in self._affine_coordinates_pivots ])",
"def Reproject(x, y, in_grid = 4326, out_grid = 32737):\n \n inProj = Proj(init='epsg:'+str(in_grid))\n outProj = Proj(init='epsg:'+str(out_grid))\n \n \n x2,y2 = transform(inProj,outProj,x,y)\n \n return x2, y2",
"def Pixel2World(geoMatrix, x, y):\r\n ulX = geoMatrix[0]\r\n ulY = geoMatrix[3]\r\n xdist = geoMatrix[1]\r\n ydist = geoMatrix[5]\r\n coorX = (ulX + (x * xdist))\r\n coorY = (ulY + (y * ydist))\r\n return (coorX, coorY)",
"def getAffineTransform(self, coord1, coord2):\n num_coords = 2 * len(coord1)\n A = np.zeros((num_coords, 6))\n b = []\n for point2 in coord2:\n b.append(float(point2[0]))\n b.append(float(point2[1]))\n b = np.asarray(b)\n i = 0\n for point1 in coord1:\n A[i, 0:2] = point1[0:2]\n A[i, 2] = 1\n A[i+1, 3:5] = point1[0:2]\n A[i+1, 5] = 1\n i += 2\n A = np.asarray(A)\n b = np.asarray(b)\n x = np.matmul(np.matmul(np.linalg.inv(np.matmul(A.T, A)), A.T), b.T)\n self.depth2rgb_affine = np.reshape(x, (2, 3))\n csv.writer(open(\"depth2rgb_affine.cfg\", \"w+\", newline=''), delimiter=',').writerows(self.depth2rgb_affine)\n # else:\n # x = np.vstack([np.reshape(x,(2,3)),[0,0,1]])\n # self.cam_ext_mat = x\n # A = [point[i,j+0:j+3].astype(np.float32) for i,point in enumerate(coord1) if i%2 == 0]\n # pts1 = coord1[0:3].astype(np.float32)\n # pts2 = coord2[0:3].astype(np.float32)\n # print(cv2.getAffineTransform(pts1, pts2))\n # return cv2.getAffineTransform(pts1, pts2)",
"def world2Pixel(geoMatrix, x, y):\r\n ulX = geoMatrix[0]\r\n ulY = geoMatrix[3]\r\n xDist = geoMatrix[1]\r\n yDist = geoMatrix[5]\r\n rtnX = geoMatrix[2]\r\n rtnY = geoMatrix[4]\r\n # pixel = int((x - ulX) / xDist)\r\n # line = int((ulY - y) / xDist)\r\n # Floor for x and ceiling for y seems to produce the best looking output\r\n #\t(for one test case, may want to change later to np.round?)\r\n pixx = np.round((x - ulX) / xDist, decimals=0).astype(np.int)\r\n pixy = np.round((ulY - y) / xDist, decimals=0).astype(np.int)\r\n\r\n return pixx, pixy",
"def ij_coordinates(self):\n\n x = np.arange(self.nx)\n y = np.arange(self.ny)\n return np.meshgrid(x, y)",
"def transform_coordinates(coords):\n # WGS 84 reference coordinate system parameters\n A = 6378.137 # major axis [km]\n E2 = 6.69437999014e-3 # eccentricity squared\n\n coords = prepare_coords(coords)\n\n # convert to radiants\n lat_rad = np.radians(coords[:, 0])\n lon_rad = np.radians(coords[:, 1])\n\n # convert to cartesian coordinates\n r_n = A / (np.sqrt(1 - E2 * (np.sin(lat_rad) ** 2)))\n x = r_n * np.cos(lat_rad) * np.cos(lon_rad)\n y = r_n * np.cos(lat_rad) * np.sin(lon_rad)\n z = r_n * (1 - E2) * np.sin(lat_rad)\n\n return np.column_stack((x, y, z))",
"def get_affine_transform(gps_coords, pdr_coords):\n # Compute similarity Xp = s A X + b\n X = np.array(pdr_coords)\n Xp = np.array(gps_coords)\n T = tf.superimposition_matrix(X.T, Xp.T, scale=True)\n\n A, b = T[:3, :3], T[:3, 3]\n s = np.linalg.det(A)**(1. / 3)\n A /= s\n return s, A, b",
"def map(self,Affine,i):\n map_x = np.zeros([self.num,self.d])\n for k in range(self.num):\n map_x[k,:] = Affine.apply(i,self.pick(k))\n Mapped = Model_Points(map_x)\n return Mapped",
"def _raster_index_to_coords(i, j, bounds = [[-100, -100], [100, 100]],\n dx = 1, dy = 1):\n x = (j+0.5)*dx + bounds[0][0]\n y = (i+0.5)*dy + bounds[0][1]\n return x, y",
"def gen_gps_to_coords(lat,lon,rows,cols,min_lat,max_lat,min_lon,max_lon):\n\n if (lat <= min_lat or lat >= max_lat or lon <= min_lon or lon >= max_lon):\n return (-1,-1)\n\n lat_step = abs(max_lat-min_lat)/rows\n lon_step = abs(max_lon-min_lon)/cols\n\n lat_spot = int((max_lat-lat)/lat_step)\n lon_spot = int((lon-min_lon)/lon_step)\n #print \"lat: %f lon: %f lat_spot: %f lon_spot: %f\" % (lat,lon,lat_spot,lon_spot)\n return (lat_spot,lon_spot)",
"def ind2coord(self, index):\n\n # assert (index >= 0)\n # assert(index < self.n - 1)\n\n col = index // self.rows\n row = index % self.rows\n\n return [row, col]",
"def _build_geotransform(self, i, j):\n assert isinstance(i, int), (\"i is not an integer\")\n assert isinstance(j, int), (\"j is not an integer\")\n x_origin, x_res, x_ignore, y_origin, y_ignore, y_res = (\n self.image_metadata.geotransform)\n # integer conversion to reduce floating point error\n new_x_origin = self._calculate_origin(x_origin, x_res, self.offset, j)\n new_y_origin = self._calculate_origin(y_origin, y_res, self.offset, i)\n geotransform = (new_x_origin, x_res, x_ignore, new_y_origin, \n y_ignore, y_res) \n return geotransform",
"def node_to_coords(self,node_num):\n row = (node_num - 1) / self.cols\n col = (node_num - 1) % self.cols\n return (row,col)",
"def get_affine_transform_2d(gps_coords, pdr_coords):\n X = np.array(pdr_coords)\n Xp = np.array(gps_coords)\n\n # Estimate 2d similarity to align to GPS\n T = tf.affine_matrix_from_points(X.T[:2], Xp.T[:2], shear=False)\n s = np.linalg.det(T[:2, :2]) ** 0.5\n A = np.eye(3)\n A[:2, :2] = T[:2, :2] / s\n b = np.array([\n T[0, 2],\n T[1, 2],\n Xp[:, 2].mean() - s * X[:, 2].mean() # vertical alignment\n ])\n\n return s, A, b",
"def convertView2Geo(self, x, y):\n\n # x_pix is from left map edge, y_pix from top map edge\n x_pix = x + self.view_offset_x\n y_pix = y + self.view_offset_y\n\n lon = self.map_llon + x_pix/self.ppd_x\n lat = self.map_tlat - y_pix/self.ppd_y\n\n return (lon, lat)",
"def read_affine(df):\n SliceThickness = [df.SliceThickness]\n PixelSpacing = _string_to_list_of_floats(df.PixelSpacing)\n ImageOrientationPatient = _string_to_list_of_floats(df.ImageOrientationPatient)\n ImagePositionPatient = _string_to_list_of_floats(df.ImagePositionPatient)\n\n Zooms = np.array(PixelSpacing+SliceThickness, dtype=float)\n ImageOrientationPatient = np.array(ImageOrientationPatient, dtype=float)\n ImagePositionPatient = np.array(ImagePositionPatient, dtype=float)\n \n ijk2ras = extract_cosines(ImageOrientationPatient)\n\n ijk2ras = (ijk2ras*np.array([-1,-1,1])).T\n ImagePositionPatient = ImagePositionPatient*np.array([-1,-1,1])\n\n affine = np.stack((ijk2ras[:,0]*Zooms[0],\n ijk2ras[:,1]*Zooms[1],\n ijk2ras[:,2]*Zooms[2],\n ImagePositionPatient), axis=1)\n\n return np.vstack((affine,[[0,0,0,1]]))"
]
| [
"0.7374341",
"0.63459325",
"0.6277448",
"0.62138504",
"0.61124843",
"0.61005765",
"0.6083403",
"0.6077274",
"0.6064563",
"0.60560596",
"0.5998631",
"0.59866047",
"0.5951056",
"0.5938208",
"0.5902591",
"0.589468",
"0.5876953",
"0.5858457",
"0.58484095",
"0.5841645",
"0.5835178",
"0.58304113",
"0.5825501",
"0.58252376",
"0.582401",
"0.5820251",
"0.5816614",
"0.57565236",
"0.5751212",
"0.57493544"
]
| 0.646609 | 1 |
Convert (x, y) coordinates to raster column and row indices based on the affine transformation of the raster. | def xy_to_rowcol(x, y, affine):
# affine might be an Affine object, might be 3x3 array or
# 9-element vector. Let's first make damn sure is a 9-element
# vector
aff_array = numpy.array(affine).reshape(9)
# now be damn sure we have an Affine object, and reverse it to
# go from coordinates to indices
affine = ~Affine(*aff_array[:6])
# compute the y, x values
yx = numpy.floor(rowcol_to_xy(y, x, affine))
# convert to int and flip to xy
xy = yx.astype(int)[::-1]
return xy | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pixel2coord(self, x, y):\n # NEED TO CHANGE TO USE INVERSE TRANSFORM COEFFS\n # partly taken from Sean Gillies \"affine.py\"\n a,b,c,d,e,f = self.coordspace_transform\n det = a*e - b*d\n idet = 1 / float(det)\n ra = e * idet\n rb = -b * idet\n rd = -d * idet\n re = a * idet\n newx = (x*ra + y*rb + (-c*ra - f*rb) )\n newy = (x*rd + y*re + (-c*rd - f*re) )\n return newx,newy",
"def xy2ind(self, x, y):\n return self.sub2ind(*self.xy2sub(x, y))",
"def pixel_to_coords(self, x, y):\n rx, ry = self.size\n nx = (x / rx - 0.5) * self.scale + self.center[0]\n ny = ((ry - y) / ry - 0.5) * self.scale + self.center[1]\n nz = self.center[2]\n return [nx, ny, nz]",
"def xy_to_index(x, y):\n index = y * columns + x\n return index",
"def xy2ind(x, y, xdim):\n if isinstance(x, np.ndarray):\n return x + (y * xdim)\n else:\n return int(x) + int(y) * xdim",
"def pixel2coords(self, x, y):\n xoff, a, b, yoff, d, e = self.geotransform()\n\n xp = a * x + b * y + xoff\n yp = d * x + e * y + yoff\n return (xp, yp)",
"def xy2sub(self, x, y):\n # ix = int(round(x/self.Lx*(self.Nx-1)))\n # iy = int(round(y/self.Ly*(self.Ny-1)))\n ix = (np.array(x) / self.Lx*(self.Nx-1)).round().astype(int)\n iy = (np.array(y) / self.Ly*(self.Ny-1)).round().astype(int)\n return ix, iy",
"def grid_to_index(mapdata, x, y):\n i = (y * mapdata.info.width) + x\n return int (i)",
"def xy2ij(self, x, y=None):\n pass",
"def _raster_index_to_coords(i, j, bounds = [[-100, -100], [100, 100]],\n dx = 1, dy = 1):\n x = (j+0.5)*dx + bounds[0][0]\n y = (i+0.5)*dy + bounds[0][1]\n return x, y",
"def xy_to_rowcol(self, x, y):\n col = int((x - self.board_lft_x) / self.next_square)\n row = int((self.board_top_y - y) / self.next_square)\n return [row, col]",
"def map_to_matrix(x, y):\n x_pos = round(x * ((MATRIX_SIZE_X - 1)/(FRAME_W - 1)))\n y_pos = round(y * ((MATRIX_SIZE_Y - 1)/(FRAME_H - 1)))\n\n x_pos = (MATRIX_SIZE_X - 1) - x_pos #invert x direction (left and right) to account for camera perspective\n\n return x_pos, y_pos",
"def coord2pixel(self, x, y):\n a,b,c,d,e,f = self.coordspace_transform\n newx,newy = (x*a + y*b + c, x*d + y*e + f)\n return int(newx),int(newy)",
"def ind2coord(self, index):\n\n # assert (index >= 0)\n # assert(index < self.n - 1)\n\n col = index // self.rows\n row = index % self.rows\n\n return [row, col]",
"def image_coordinates(self, temp):\n iy = np.array((temp.y[:,None]-self.extent[2])/self.spacing[1],dtype=np.int64)\n ix = np.array((temp.x[None,:]-self.extent[0])/self.spacing[0],dtype=np.int64)\n return (iy,ix)",
"def coor2idx(x, y):\r\n a = round(x/4000,0)*4000\r\n b = (round_down(y/4000,0)+0.5)*4000\r\n i = int((a - 24000)/4000) + 1\r\n j = int((b - 22000)/4000) + 1\r\n return i, j",
"def xy_to_idx(self, xs, ys, mask=None, mask_outside=False, nodata=-1):\n _, ncol = self.shape\n r, c = self.rowcol(xs, ys, mask=mask, mask_outside=mask_outside, nodata=nodata)\n mask = r != nodata\n idx = np.full(r.shape, nodata, dtype=int)\n idx[mask] = r[mask] * ncol + c[mask]\n return idx",
"def coord(self, x, y):\n origin_x = self._raster_meta['transform'][3]\n origin_y = self._raster_meta['transform'][0]\n pixel_x = self._raster_meta['transform'][5]\n pixel_y = self._raster_meta['transform'][1]\n\n x = int((x - origin_x) / pixel_x)\n y = int((y - origin_y) / pixel_y)\n return self[x, y]",
"def coord2ind(self, coord):\n\n [row, col] = coord\n\n assert (row < self.rows)\n assert (col < self.cols)\n\n return col * self.rows + row",
"def _indices_to_coords(c,r):\n\n column = _index_to_column(c)\n row = r + 1\n\n return {'c': column, 'r': row, 'coord': f'{column}{row}'}",
"def ij_coordinates(self):\n\n x = np.arange(self.nx)\n y = np.arange(self.ny)\n return np.meshgrid(x, y)",
"def _to_maze_coord(self, x, y):\n maze = self._get_maze()\n x = int(x / _MAZE_CELL_SIZE)\n y = int(y / _MAZE_CELL_SIZE)\n y = maze.shape[1] - y - 1\n return x, y",
"def to_indices(self, world_coords):\n (x, y, z) = world_coords\n (cx, cy, cz) = self.center\n\n px = x - cx\n py = y - cy\n pz = z - cz\n\n # project u and v\n # TODO: Can this be done with some built-in Numpy operation?\n (ux, uy, uz) = self.u_dir\n (vx, vy, vz) = self.v_dir\n u = px * ux + py * uy + pz * uz\n v = px * vx + py * vy + pz * vz\n\n # rescale to pixel coordinates. u is up, v is down\n (H, W, _) = self.shape\n i = rescale(-1, 1, H - 1, 0, v)\n j = rescale(-1, 1, 0, W -1, u)\n\n return (i, j)",
"def widget2imgcoords(self, x,y):\n\t\tif self._w2i_matrix is None: self._calc_matrix()\n\t\treturn self._w2i_matrix.transform_point(x,y)",
"def __affine_geo_transformation(x, y, gtr):\n\n # https://gdal.org/user/raster_data_model.html#affine-geotransform\n # Affine transformation rewritten for rasterio:\n gtr_x = gtr[2] + (x + 0.5) * gtr[0] + (y + 0.5) * gtr[1]\n gtr_y = gtr[5] + (x + 0.5) * gtr[3] + (y + 0.5) * gtr[4]\n\n return gtr_x, gtr_y",
"def world2Pixel(geoMatrix, x, y):\r\n ulX = geoMatrix[0]\r\n ulY = geoMatrix[3]\r\n xDist = geoMatrix[1]\r\n yDist = geoMatrix[5]\r\n rtnX = geoMatrix[2]\r\n rtnY = geoMatrix[4]\r\n # pixel = int((x - ulX) / xDist)\r\n # line = int((ulY - y) / xDist)\r\n # Floor for x and ceiling for y seems to produce the best looking output\r\n #\t(for one test case, may want to change later to np.round?)\r\n pixx = np.round((x - ulX) / xDist, decimals=0).astype(np.int)\r\n pixy = np.round((ulY - y) / xDist, decimals=0).astype(np.int)\r\n\r\n return pixx, pixy",
"def coordinates_to_imgpts(x, y):\n pts = np.array([np.flipud(np.transpose(np.vstack([x, y])))])\n return pts",
"def rowcol_to_xy(rows, cols, affine):\n\n # make it a 3x3 matrix\n aff_array = numpy.array(affine).reshape((3, 3))\n\n # check that rows amd cols are indeed 1-D vectors\n rows = validate.is_vector(rows)\n cols = validate.is_vector(cols)\n\n # filler\n layers = numpy.ones_like(rows)\n vector = numpy.array([cols, rows, layers])\n\n # compute xy\n xy = numpy.dot(aff_array, vector)[:2, :]\n return xy",
"def rowcol2XY(row,col,CCD):\n pixscale = 0.015 #mm/pix\n X = CCD[1]+1024*pixscale-(col*pixscale+pixscale/2.)\n Y = CCD[2]+2048*pixscale-(row*pixscale+pixscale/2.)\n return X,Y",
"def node_to_coords(self,node_num):\n row = (node_num - 1) / self.cols\n col = (node_num - 1) % self.cols\n return (row,col)"
]
| [
"0.7161936",
"0.6930285",
"0.6911302",
"0.6767629",
"0.6755321",
"0.6702426",
"0.6593449",
"0.6577924",
"0.65381414",
"0.64958143",
"0.6494961",
"0.6420091",
"0.6297772",
"0.62835073",
"0.62815624",
"0.62698585",
"0.6257687",
"0.62332827",
"0.61786354",
"0.61742276",
"0.6152893",
"0.6152418",
"0.6139715",
"0.61298734",
"0.6112179",
"0.60887843",
"0.60692376",
"0.6053672",
"0.6025894",
"0.602454"
]
| 0.720692 | 0 |
gets api credentials using keyring returns a list [user name, password] | def get_credentials(service_name="dataforSeo", uname="[email protected]"):
pw = keyring.get_password(service_name, uname)
return [uname, pw] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def GetUserCredentials(self):\r\n # Create a local alias to the email variable to avoid Python's crazy\r\n # scoping rules.\r\n global keyring\r\n email = self.email\r\n if email is None:\r\n email = GetEmail(\"Email (login for uploading to %s)\" % self.server)\r\n password = None\r\n if keyring and not email in self.accounts_seen:\r\n try:\r\n password = keyring.get_password(self.host, email)\r\n except:\r\n # Sadly, we have to trap all errors here as\r\n # gnomekeyring.IOError inherits from object. :/\r\n print \"Failed to get password from keyring\"\r\n keyring = None\r\n if password is not None:\r\n print \"Using password from system keyring.\"\r\n self.accounts_seen.add(email)\r\n else:\r\n password = getpass.getpass(\"Password for %s: \" % email)\r\n if keyring:\r\n answer = raw_input(\"Store password in system keyring?(y/N) \").strip()\r\n if answer == \"y\":\r\n keyring.set_password(self.host, email, password)\r\n self.accounts_seen.add(email)\r\n return (email, password)",
"def get_creds():\n with open(CREDS_PATH, 'r') as creds_file:\n creds = json.load(creds_file)\n return creds['uname'], creds['pword']",
"def get_user_credentials(connection):\n\n response = connection.get_json('user')\n user_data = response.get('user', None)\n if user_data is None:\n raise SAPCliError('gCTS response does not contain \\'user\\'')\n\n config_data = user_data.get('config', None)\n if config_data is None:\n return []\n\n user_credentials = [cred for cred in config_data if cred['key'] == 'USER_AUTH_CRED_ENDPOINTS']\n return json.loads(user_credentials[0]['value'])",
"def list_credentials(user):\n return Credentials.list_credentials(user)",
"def get_auth(self):\n # Only return accepted keys from the auth_keys dictionary\n # This is to prevent exceptions thrown from keystone session\n returnDict = {}\n for key in self.creds:\n if key in self.auth_keys[self.api_version]:\n returnDict[key] = self.creds[key]\n return returnDict",
"def _auth():\n with open('api-credentials.txt', 'r') as f:\n lines = f.read().splitlines()\n credentials = lines[0] + ':' + lines[1]\n return 'Basic ' + base64.b64encode(credentials.encode()).decode()",
"def auth(self):\n return self.creds(\"[email protected]\", cookie=\"USERTOKEN: authcookie\")",
"def GetUserCredentials():\n email = options.email\n if email is None:\n email = GetEmail(\"Email (login for uploading to %s)\" % options.server)\n password = getpass.getpass(\"Password for %s: \" % email)\n return (email, password)",
"def get_credentials():\n username = input(\"Username: \")\n password = getpass.getpass(prompt='Password: ')\n return username, password",
"def _config_credentials_get():\n user = input(\"username:\")\n password = getpass.getpass()\n url = input(\"url:\")\n return user, password, url",
"def list_credentials(self, **_params):\r\n return self.get(self.credentials_path, params=_params)",
"def list_credentials():\n creds = load_auth()\n max_username_len = max([len(c.username) for c in creds]) if len(creds) > 0 else 1\n long_format = f\"{{:{max_username_len}}} for {{}}\"\n for cred in creds:\n if len(cred.hostname) > 0:\n print(str.format(long_format, cred.username, cred.hostname))\n else:\n print(cred.username)\n if len(creds) == 0 and os.isatty(1):\n print(\"No credentials configured\")",
"def get_auth():\n config = configparser.RawConfigParser()\n config.read(\"speech.cfg\")\n apikey = config.get('auth', 'apikey')\n return (\"apikey\", apikey)",
"def get_appengine_credentials():\n return get_credentials()",
"def getcreds():\n global user\n global password\n if not user:\n user = input(\"Please enter your username:\\n\")\n if not password:\n password = getpass.getpass(\"Please enter password:\\n\")",
"def get_credentials():\n # normal, sane way of doing this that really shouldn't be changed\n #home_dir = os.path.expanduser('~')\n #credential_dir = os.path.join(home_dir, '.credentials')\n #if not os.path.exists(credential_dir):\n # os.makedirs(credential_dir)\n #credential_path = os.path.join(credential_dir,'calendar-python-quickstart.json')\n\n # stupid hacky way that I came up with to fix an issue with running this app as root\n credential_path = os.path.join('./credentials','calendar-python-quickstart.json') \n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def get_creds(cred_fpath=None, api_path=None):\n if cred_fpath is not None:\n print(\"reading keys from credentials file\")\n keys = pd.read_csv(cred_fpath, sep=\"=\")\n myAccessKey = keys.loc['aws_access_key_id ']['[default]'].strip()\n mySecretKey = keys.loc['aws_secret_access_key ']['[default]'].strip()\n myToken = \"\"\n else:\n r = requests.get(api_path)\n creds = r.json()\n myAccessKey = creds[\"AccessKeyId\"]\n mySecretKey = creds[\"SecretAccessKey\"]\n myToken = creds[\"Token\"]\n return myAccessKey, mySecretKey, myToken",
"def get_credentials(config, auth_index):\n with open(config) as fp:\n jconfig = json.load(fp)\n\n # Attempt to read authentification details from config file.\n try:\n c_key = jconfig['Authentication'][auth_index]['consumer_key']\n c_secret = jconfig['Authentication'][auth_index]['consumer_secret']\n a_token = jconfig['Authentication'][auth_index]['access_token']\n a_secret = (\n jconfig['Authentication'][auth_index]['access_secret']\n )\n\n except Exception as e:\n # logging.error(str(e))\n print(str(e))\n sys.exit(ERROR)\n\n return c_key, c_secret, a_token, a_secret",
"def get_credentials():\r\n home_dir = os.path.expanduser('~')\r\n credential_dir = os.path.join(home_dir, '.credentials')\r\n if not os.path.exists(credential_dir):\r\n os.makedirs(credential_dir)\r\n credential_path = os.path.join(credential_dir,\r\n 'bis-python-quickstart.json')\r\n\r\n store = oauth2client.file.Storage(credential_path)\r\n credentials = store.get()\r\n if not credentials or credentials.invalid:\r\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\r\n flow.user_agent = APPLICATION_NAME\r\n if flags:\r\n credentials = tools.run_flow(flow, store, flags)\r\n else: # Needed only for compatibility with Python 2.6\r\n credentials = tools.run(flow, store)\r\n print('Storing credentials to ' + credential_path)\r\n return credentials",
"def get_jira_auth() -> Tuple[str, str]:\n jira_auth = json.loads(\n s3.get_object(\n Bucket=\"beckon-devops\", Key=\"credentials/beckon_credentials.json\"\n )[\"Body\"].read()\n ).get(\"jira\", {})\n\n return jira_auth.get(\"user\", \"\"), jira_auth.get(\"password\", \"\")",
"def get_creds():\n\tcredentials = None\n\tif os.path.exists('token.pickle'):\n\t\twith open('token.pickle', 'rb') as token:\n\t\t\tcredentials = pickle.load(token)\n\t# If there are no (valid) credentials available, let the user log in.\n\tif not credentials or not credentials.valid:\n\t\tif credentials and credentials.expired and credentials.refresh_token:\n\t\t\tcredentials.refresh(Request())\n\t\telse:\n\t\t\tflow = InstalledAppFlow.from_client_secrets_file('config/sa.json', SCOPES)\n\t\t\tcredentials = flow.run_local_server(port=0)\n\t\t# Save the credentials for the next run\n\t\twith open('token.pickle', 'wb') as token:\n\t\t\tpickle.dump(credentials, token)\n\treturn credentials",
"def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'credentialv_modify.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def credentials(self):\n return CurrentProject().config.credentials[self.key]",
"def credentials():\n\n username = os.environ.get('OS_USERNAME')\n password = os.environ.get('OS_PASSWORD')\n tenant_name = (os.environ.get('OS_TENANT_NAME') or\n os.environ.get('OS_PROJECT_NAME'))\n auth_url = os.environ.get('OS_AUTH_URL')\n\n config = configparser.RawConfigParser()\n if config.read(_CREDS_FILE):\n username = username or config.get('admin', 'user')\n password = password or config.get('admin', 'pass')\n tenant_name = tenant_name or config.get('admin', 'tenant')\n auth_url = auth_url or config.get('auth', 'uri')\n\n return {\n 'username': username,\n 'password': password,\n 'tenant_name': tenant_name,\n 'uri': auth_url\n }",
"def get_credentials():\n #home_dir = os.path.expanduser('~')\n home_dir = os.path.expanduser('/home/pi/')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir, 'gmail-python-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'sally.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def get_credentials():\n home_dir = os.path.expanduser(os.getcwd())\n credential_dir = os.path.join(home_dir, '.credentials')\n print(credential_dir)\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def get_account_credentials(call):\n account = call.data.get(CONF_SPOTIFY_ACCOUNT)\n user = username\n pwd = password\n if account is not None:\n _LOGGER.debug('setting up with different account than default %s', account)\n user = accounts.get(account).get(CONF_USERNAME)\n pwd = accounts.get(account).get(CONF_PASSWORD)\n return user, pwd",
"def get_auth(context):\n\n headers = context['headers']\n auth_info = {\n \"type\": \"basic\",\n \"basic\": {\n \"user\": headers['api_key'],\n \"password\": \"X\"\n }\n }\n auth = Auth().get_auth(auth_info)\n\n return auth",
"def newcred(self):\n return {'login': input('username: '),\n 'password': getpass.getpass()}"
]
| [
"0.70940423",
"0.70528555",
"0.70423704",
"0.6831092",
"0.6688551",
"0.6600526",
"0.6581192",
"0.6568028",
"0.65603054",
"0.65516466",
"0.6522141",
"0.6507727",
"0.6496433",
"0.6482267",
"0.6443662",
"0.6442605",
"0.64323187",
"0.640169",
"0.63583547",
"0.63330716",
"0.6287448",
"0.62833154",
"0.6278887",
"0.6278871",
"0.62620836",
"0.62533134",
"0.6246779",
"0.623516",
"0.6230237",
"0.6212979"
]
| 0.7742585 | 0 |
checks connection with api by verifying error codes and number of tasks between post_data/requests and response | def check_api_connection(post_data, response) -> list:
# check status code
if response['status_code'] != 20000:
raise ConnectionError(f"Status code is not ok: {response['status_message']}")
# check
id_list = []
for a, b in zip(post_data.values(), response['tasks']):
if a['keyword'] != b['data']['keyword']:
raise ConnectionError("task is missing")
else:
id_list.append(b['id'])
return id_list | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _check_connection(self):\n for _ in range(3):\n try:\n r = get(f\"http://{self.ip}/student/{self.user}\")\n if r.ok:\n break \n except OSError as e:\n print(f\"Connection error:\\n{e}\")\n sleep(2)\n else:\n raise ConnectionError(f\"Can not connect to server with params ip: {self.ip}, user: {self.user}\")",
"async def _perform_api_post_request(self, url, **kwargs):\n error = ''\n json = {}\n max_retries = 5\n for retries in range(max_retries):\n async with self._session.post(url, **kwargs) as resp:\n self.request_count += 1\n status = resp.status\n if resp.status == 504:\n error = 'API timeout'\n self.retry_count += 1\n continue\n try:\n resp.raise_for_status()\n except ClientResponseError:\n error = f'{resp.status}: {resp.reason}'\n continue\n try:\n json = await resp.json()\n except ContentTypeError:\n error = 'Unable to decode JSON'\n self.retry_count += 1\n status = 0\n continue\n json['request_datetime'] = datetime.now()\n break\n\n if retries == max_retries - 1 and error:\n logger.warning(error)\n\n return json, status",
"def test_default_unsuccessful_verify_request(self, cred):\n # make the initial request\n resp = requests.get(verify_url.format('json', cred[0], cred[1],\n 'TestApp', test_number))\n assert resp.status_code == 200\n assert resp.headers['Content-Type'] == 'application/json'\n assert resp.json()['status'] == '0'\n assert len(resp.json()['request_id']) <= 32\n # now enter invalid verify code 3 times to terminate verification process\n # first invalid code check\n request_id = resp.json()['request_id']\n resp = requests.get(check_url.format('json', cred[0], cred[1],\n request_id, '00000'))\n assert resp.status_code == 200\n assert resp.headers['Content-Type'] == 'application/json'\n assert resp.json()['status'] == '16'\n assert resp.json()['request_id'] == request_id\n assert resp.json()['error_text'] == code_does_not_match_msg\n # second invalid check\n resp = requests.get(check_url.format('json', cred[0], cred[1],\n request_id, '00000'))\n assert resp.status_code == 200\n assert resp.headers['Content-Type'] == 'application/json'\n assert resp.json()['status'] == '16'\n assert resp.json()['request_id'] == request_id\n assert resp.json()['error_text'] == code_does_not_match_msg\n # third invalid check\n resp = requests.get(check_url.format('json', cred[0], cred[1],\n request_id, '00000'))\n assert resp.status_code == 200\n assert resp.headers['Content-Type'] == 'application/json'\n assert resp.json()['status'] == '17'\n assert 'request_id' not in resp.json().keys()\n assert resp.json()['error_text'] == workflow_terminated_msg",
"async def check_config(self) -> None:\n try:\n await self._check_api()\n except aiohttp.ClientError as e:\n raise ConnectionError(str(e))",
"def verify_get_response(self, status):\n validate(status, STATUS)\n self.assertTrue(status['database_connection']['connected'])\n self.assertTrue(status['redis_connection']['connected'])\n self.assertEqual(status['missing_workers'], [])\n self.assertNotEqual(status['online_workers'], [])\n self.assertNotEqual(status['versions'], [])",
"def run():\n check_active_requests()\n start_downloads()\n check_download_attempts()\n numsuccess = verify_files()\n recover_failed_downloads()\n check_downloading_requests()\n acknowledge_downloaded_files()\n if can_request_more():\n make_request()\n return numsuccess",
"def run(self,base_url):\n\n url = base_url + self.endpoint\n\n if self.method.upper() == \"GET\":\n r = requests.get(url)\n\n elif self.method.upper() == \"POST\":\n\n if self.payload is not None:\n r = requests.post(url, json=self.payload)\n else:\n r = requests.post(url)\n\n else:\n msg = \"Malformed test. Allowed methods are GET and POST\"\n return get_failure_object(msg)\n\n try:\n\n resp = r.json()\n\n except ValueError as e:\n\n msg = \"Could not decode JSON from response.\"\n return get_failure_object(msg)\n\n try:\n\n # Run all checks for expected exact JSON response values\n for check_key in self.expected_values:\n\n exp_val = self.expected_values[check_key]\n\n if exp_val != resp[check_key]:\n\n msg = \"Expected value '%s' at key '%s' but got '%s'.\" \\\n % (str(exp_val), str(check_key), str(resp[check_key]))\n\n return get_failure_object(msg)\n\n # Run all checks for expected types in JSON response\n for check_key in self.expected_types:\n\n exp_type = self.expected_types[check_key]\n val = resp[check_key]\n\n if exp_type == \"string\":\n type_res = test_expected_type(val, str)\n\n elif exp_type == \"int\":\n type_res = test_expected_type(val, int)\n\n elif exp_type == \"float\":\n type_res = test_expected_type(val, float)\n\n else:\n msg = \"Malformed test. Expected types allowed: 'str',\\\n 'int', 'float'\"\n return {\"status\": \"FAILED\", \"error_msg\": msg}\n\n if type_res == False:\n msg = get_expected_type_error_message(check_key, val, exp_type)\n return get_failure_object(msg)\n\n return {\"status\":\"PASSED\"}\n\n except KeyError as e:\n msg = \"Expected key '%s' not found.\" % str(e.args[0])\n return get_failure_object(msg)",
"def _CheckConnect(self):\n try:\n resp = requests.get(self._target_url, timeout=2)\n if resp.headers['Maximum-Bytes']:\n self._max_bytes = int(resp.headers['Maximum-Bytes'])\n return resp.status_code == 200\n except requests.exceptions.ConnectionError:\n return False\n except Exception as e:\n self.exception('Unexpected test connect failure: %s', str(e))\n return False",
"def test_default_unsuccessful_verify_request(self, cred):\n # make the initial request\n resp = requests.get(verify_url.format('xml', cred[0], cred[1],\n 'TestApp', test_number))\n assert resp.status_code == 200\n assert resp.headers['Content-Type'] == 'text/plain'\n assert resp.text.startswith('<?xml version=\"1.0\" encoding=\"UTF-8\"?>')\n tree = ElementTree.fromstring(resp.text)\n assert tree[0].tag == 'request_id' and len(tree[0].text) <= 32\n assert tree[1].tag == 'status' and tree[1].text == '0'\n # now enter invalid verify code 3 times to terminate verification process\n # first invalid code check\n request_id = tree[0].text\n resp = requests.get(check_url.format('xml', cred[0], cred[1],\n request_id, '00000'))\n assert resp.status_code == 200\n assert resp.headers['Content-Type'] == 'text/plain'\n assert resp.text.startswith('<?xml version=\"1.0\" encoding=\"UTF-8\"?>')\n tree = ElementTree.fromstring(resp.text)\n assert tree[0].tag == 'request_id' and tree[0].text == request_id\n assert tree[1].tag == 'status' and tree[1].text == '16'\n assert tree[2].tag == 'error_text' and tree[2].text == code_does_not_match_msg\n # second invalid check\n resp = requests.get(check_url.format('xml', cred[0], cred[1],\n request_id, '00000'))\n assert resp.status_code == 200\n assert resp.headers['Content-Type'] == 'text/plain'\n assert resp.text.startswith('<?xml version=\"1.0\" encoding=\"UTF-8\"?>')\n tree = ElementTree.fromstring(resp.text)\n assert tree[0].tag == 'request_id' and tree[0].text == request_id\n assert tree[1].tag == 'status' and tree[1].text == '16'\n assert tree[2].tag == 'error_text' and tree[2].text == code_does_not_match_msg\n # third invalid check\n resp = requests.get(check_url.format('xml', cred[0], cred[1],\n request_id, '00000'))\n assert resp.status_code == 200\n assert resp.headers['Content-Type'] == 'text/plain'\n assert resp.text.startswith('<?xml version=\"1.0\" encoding=\"UTF-8\"?>')\n tree = ElementTree.fromstring(resp.text)\n # assert 'request_id' not in [child.tag for child in tree]\n assert tree[1].tag == 'status' and tree[1].text == '17'\n assert tree[2].tag == 'error_text' and tree[2].text == workflow_terminated_msg",
"def status(event):\n e = ''\n\n try:\n logger.setLevel(event.get('loglevel'))\n logging.getLogger('urllib3').setLevel(event.get('loglevel'))\n except:\n pass\n try:\n pool = urllib3.PoolManager()\n except Exception as e:\n raise CreatePoolManagerFailure(e)\n\n if event.get('url', None) is None:\n raise AttributeError(\"url not specified\")\n\n # The code doesn't know how to handle POST\n # The code doesn't know how to handle these yet\n\n st = time.perf_counter()\n try:\n response = pool.request(\n event.get('method', 'GET'),\n event.get('url', None),\n retries=int(event.get('retries', 3)),\n timeout=float(event.get('timeout', 3)))\n except Exception as e:\n raise HttpRequestError(e)\n\n responseTime = (time.perf_counter() - st) * 1000\n\n logger.debug(\"checking endpoint: %s:%s status=%s bytes=%s time=%.3fms\",\n event.get('method', 'GET'),\n event.get('url', None), response.status,\n response._fp_bytes_read, responseTime)\n\n if response.status >= 200 and response.status <= 299:\n statusMessage = \"2xx\"\n elif response.status >= 300 and response.status <= 399:\n statusMessage = \"3xx\"\n elif response.status >= 400 and response.status <= 499:\n statusMessage = \"4xx\"\n elif response.status >= 500 and response.status <= 599:\n statusMessage = \"5xx\"\n endpointStatus = response.status\n\n ts = datetime.datetime.timestamp(datetime.datetime.now())\n \n logging.getLogger('urllib3').setLevel(logging.WARNING)\n \n return {\n 'statusCode': 200,\n 'body': \"OK\",\n 'url': event.get('url', None),\n 'error': e,\n 'timestamp': ts,\n 'endpoint': {\n 'status': endpointStatus,\n 'message': statusMessage,\n 'time': responseTime\n }\n }",
"def test_get_counturingErr(self):\n for app_num, servo_type in app_nr.items():\n try:\n par = self.get_parameter(servo_type, app_num, COUNTURING_ERR_IDX, COUNTURING_ERR_SUB)\n param_obj = self.__dict__[servo_type]._get_counturingErr()\n acs_par, completion = param_obj.get_sync()\n if(completion.code):\n print \"\\nError code found in counturingErr...\"\n continue\n self.data_match(acs_par, par)\n except NackEx:\n continue",
"def check_status():\n try:\n return HTTPClient().fetch(\"https://api.random.org/\").code == 200\n except Exception: # pylint: disable=broad-except\n return False",
"def test_connection(self, **kwargs):\n try:\n url = \"{0}\".format(self.base_url)\n response = requests.request(\"GET\", url)\n if response.status_code < 500:\n return True\n else:\n return False\n except KeyError:\n return False",
"def __check(self):\n status = '200 OK'\n try:\n response = get(self.__url)\n status = '{} {}'.format(\n response.status_code,\n http.client.responses[response.status_code]\n )\n except Exception as e:\n status = e.__class__.__name__\n \n if status[:3] == '200':\n self.__notify_up()\n else:\n if not self.downtime_info:\n self.downtime_info = DowntimeInfo(status)\n self.__notify_down()",
"def test_get_errorCode(self):\n for app_num, servo_type in app_nr.items():\n try:\n par = self.get_parameter(servo_type, app_num, ERROR_CODE_IDX, ERROR_CODE_SUB)\n param_obj = self.__dict__[servo_type]._get_errorCode()\n acs_par, completion = param_obj.get_sync()\n if(completion.code):\n print \"\\nError code found in errorCode...\"\n continue\n self.data_match(acs_par, par)\n except NackEx:\n continue",
"def validate_connection(self):\n __method_name = inspect.currentframe().f_code.co_name\n res = self.pull(\n url=self.base_url + consts.OAUTH2_ENDPOINT,\n auth=HTTPBasicAuth(self.client_id, self.client_secretkey),\n data={\"grant_type\": \"client_credentials\"},\n method=\"POST\",\n )\n if res and res.get(\"access_token\"):\n self.session.headers[\"Authorization\"] = \"bearer {}\".format(\n res.get(\"access_token\")\n )\n self.applogger.info(\n \"{}(method={}) : {} : Validation successful.\".format(\n consts.LOGS_STARTS_WITH, __method_name, self.function_name\n )\n )\n return\n self.applogger.error(\n \"{}(method={}) : {} : Error occurred while fetching the access token from the response. \"\n 'Key \"access_token\" was not found in the API response.'.format(\n consts.LOGS_STARTS_WITH, __method_name, self.function_name\n )\n )\n raise Exception(\n \"Error occurred while fetching the access token from the response. \"\n 'Key \"access_token\" was not found in the API response.'\n )",
"def test_API(self):\n print(\"Test API ...\")\n t0 = time.time()\n c = 0\n for trip_headsign in TRIP_HEADSIGN:\n for stop in STOP_A:\n payload = {'format': 'json', 'route_id': \"A\", 'trip_headsign': trip_headsign, 'stop_name': stop}\n req = requests.get('https://applications002.brest-metropole.fr/WIPOD01/Transport/REST/getRemainingTimes',params=payload)\n if len(req.text) < 100 : #API answer 189 characters if it works well\n print(\"API not responding for parameters : {}, {} \".format(trip_headsign, stop))\n c += 1\n else :\n print(\"Params : {}, {} : {}\".format(trip_headsign, stop, req.text))\n duration = time.time() - t0\n print(\"END OF TEST : duration : {} s, {} requests failed\".format(duration, c))",
"def test_post_error_parameters(self):\n data_github = {\"version_control\": \"github\", \"scm_commit\": \"AA\", \"oper\": \"AA\", \"hcnarb\": \"AA\", \"enabled\": \"AA\"}\n data_git = {\"version_control\": \"git\", \"scm_commit\": \"AA\", \"oper\": \"AA\", \"hcnarb\": \"AA\", \"enabled\": \"AA\"}\n\n for data in [data_git, data_github]:\n resp = self.client.post(\"/tracking\", json=data, content_type=\"application/json\", headers=self.auth)\n resp_dict = json.loads(resp.data)\n self.assertIn(\"code\", resp_dict, msg=\"Error in data format return\")\n self.assertEqual(\n ResponseCode.INPUT_PARAMETERS_ERROR, resp_dict.get(\"code\"), msg=\"Error in status code return\"\n )\n\n self.assertIn(\"msg\", resp_dict, msg=\"Error in data format return\")\n self.assertEqual(\n ResponseCode.CODE_MSG_MAP.get(ResponseCode.INPUT_PARAMETERS_ERROR),\n resp_dict.get(\"msg\"),\n msg=\"Error in status code return\"\n )\n\n self.assertIn(\"data\", resp_dict, msg=\"Error in data format return\")\n self.assertEqual(resp_dict.get(\"data\"), None, msg=\"Error in data information return\")",
"def common_http_validator(method=None,url=None,data=None,header=None):\r\n status_code = 500\r\n error_msg = None\r\n response_data = None\r\n\r\n try:\r\n req = requests.request(method=method,url=url,data=data,headers=header)\r\n # print req.request.method #Getting the method\r\n\r\n except (requests.RequestException,requests.HTTPError,requests.ConnectionError,requests.Timeout) as e:\r\n error_msg = 'Connection/Timeout/General Exception: {}'.format(e)\r\n\r\n except Exception as e:\r\n error_msg = 'Connection/Timeout/General Exception: {}'.format(e)\r\n\r\n else:\r\n status_code = req.status_code\r\n response_data = req.content\r\n\r\n return status_code, response_data, error_msg",
"def test_1():\n\tassert api_call().status_code == 200",
"def _check_status_errors(self, request):\n if request.get_method() not in SUPPORTED_METHODS:\n if DEBUG_LEVEL >= 0:\n print \"Unsupported request method: {}\".format(request.get_method())\n # send code 405\n response = HTTPResponse.HTTPResponse(version=1.0, status_code=405,\n phrase=\"Method Not Allowed\")\n headers = HTTPHeaders.HTTPHeaders()\n public_response_functions.add_default_headers(headers)\n headers[\"Content-Length\"] = \"0\"\n response.set_headers(headers)\n self._client.send(response.build_response())\n return -1\n\n full_file_path = self._get_full_path(request)\n # check if the root abs path is the first substring at the\n # start, if not send forbidden response\n if self._is_restricted(full_file_path):\n if DEBUG_LEVEL >= 0:\n print \"Client tried to access {} which is restricted\".format(full_file_path)\n self._client.send(self._get_restricted_error())\n return 1\n\n if not path.isfile(full_file_path):\n if DEBUG_LEVEL >= 0:\n print \"File: {} not found\".format(full_file_path)\n # send 404 Not Found response\n self._client.send(self._get_404_response())\n return 1\n\n return 0",
"def validate(base_url, keys, throttle, mdrate, mderrors, cterrors, max_file_size, tmpdir):\n tname = threading.current_thread().name\n app.logger.debug(\"Thread:{x} - Initialise Validate; base_url:{a}, throttle:{b}, mdrate:{c}, mderrors:{d}, cterrors:{e}, max_file_size:{f}, tmpdir:{g}\".format(x=tname, a=base_url, b=throttle, c=mdrate, d=mderrors, e=cterrors, f=max_file_size, g=tmpdir))\n\n mdopts = [\"mdonly\", \"md+ct\"]\n mdprobs = [mdrate, 1 - mdrate]\n\n mderroropts = [\"error\", \"ok\"]\n mderrorprobs = [mderrors, 1 - mderrors]\n\n cterroropts = [\"error\", \"ok\"]\n cterrorprobs = [cterrors, 1 - cterrors]\n\n while True:\n try:\n api_key = _select_from(keys)\n j = client.JPER(api_key, base_url)\n # print \"API \" + api_key\n\n # determine whether the metadata we're going to send will cause errors\n mdtype = _select_from(mderroropts, mderrorprobs)\n # print \"MD: \" + mdtype\n\n # generate a notification which may or may not have an error\n note = _make_notification(error=mdtype==\"error\")\n # print note\n\n # determine whether we're going to send some content\n hasct = _select_from(mdopts, mdprobs)\n # print \"CT: \" + hasct\n file_handle = None\n filepath = None\n cterr = \"ok\"\n if hasct == \"md+ct\":\n # determine if the content should have an error\n cterr = _select_from(cterroropts, cterrorprobs)\n #print \"CTERR:\" + cterr\n filepath = _get_file_path(tmpdir, max_file_size, error=cterr==\"error\")\n #print \"File\" + filepath\n file_handle = open(filepath)\n\n app.logger.debug(\"Thread:{x} - Validate request for Account:{y} Type:{z} MD:{a} CT:{b}\".format(x=tname, y=api_key, z=hasct, a=mdtype, b=cterr))\n\n # make the validate request (which will throw an exception more often than not, because that's what we're testing)\n try:\n j.validate(note, file_handle)\n app.logger.debug(\"Thread:{x} - Validate request resulted in success\".format(x=tname))\n except:\n app.logger.error(\"Thread:{x} - Validate request resulted in expected exception\".format(x=tname))\n\n # cleanup after ourselves\n if filepath is not None:\n file_handle.close()\n os.remove(filepath)\n\n # sleep before making the next request\n time.sleep(throttle)\n except Exception as e:\n app.logger.error(\"Thread:{x} - MAJOR ISSUE - Fatal exception '{y}'\".format(x=tname, y=e.message))",
"def check(self):\n self.__check_request_limit()",
"async def _perform_api_request(self, url, **kwargs):\n error = ''\n json = {}\n max_retries = 5\n for retries in range(max_retries):\n async with self._session.get(url, **kwargs) as resp:\n self.request_count += 1\n status = resp.status\n if resp.status == 504:\n error = 'API timeout'\n self.retry_count += 1\n continue\n try:\n resp.raise_for_status()\n except ClientResponseError:\n error = f'{resp.status}: {resp.reason}'\n continue\n try:\n json = await resp.json()\n except ContentTypeError:\n error = 'Unable to decode JSON'\n self.retry_count += 1\n status = 0\n continue\n json['request_datetime'] = datetime.now()\n break\n\n if retries == max_retries - 1 and error:\n logger.warning(error)\n\n return json, status",
"def test_check_status(self):\n post_json = {\"submission_id\": self.status_check_submission_id}\n # Populating error info before calling route to avoid changing last update time\n\n with create_app().app_context():\n sess = GlobalDB.db().session\n populate_submission_error_info(self.status_check_submission_id)\n\n response = self.app.post_json(\"/v1/check_status/\", post_json, headers={\"x-session-id\": self.session_id})\n\n self.assertEqual(response.status_code, 200, msg=str(response.json))\n self.assertEqual(response.headers.get(\"Content-Type\"), \"application/json\")\n json = response.json\n # response ids are coming back as string, so patch the jobIdDict\n job_id_dict = {k: str(self.jobIdDict[k]) for k in self.jobIdDict.keys()}\n job_list = json[\"jobs\"]\n approp_job = None\n cross_job = None\n for job in job_list:\n if str(job[\"job_id\"]) == str(job_id_dict[\"appropriations\"]):\n # Found the job to be checked\n approp_job = job\n elif str(job[\"job_id\"]) == str(job_id_dict[\"cross_file\"]):\n # Found cross file job\n cross_job = job\n\n # Must have an approp job and cross-file job\n self.assertNotEqual(approp_job, None)\n self.assertNotEqual(cross_job, None)\n # And that job must have the following\n self.assertEqual(approp_job[\"job_status\"], \"ready\")\n self.assertEqual(approp_job[\"job_type\"], \"csv_record_validation\")\n self.assertEqual(approp_job[\"file_type\"], \"appropriations\")\n self.assertEqual(approp_job[\"filename\"], \"approp.csv\")\n self.assertEqual(approp_job[\"file_status\"], \"complete\")\n self.assertIn(\"missing_header_one\", approp_job[\"missing_headers\"])\n self.assertIn(\"missing_header_two\", approp_job[\"missing_headers\"])\n self.assertIn(\"duplicated_header_one\", approp_job[\"duplicated_headers\"])\n self.assertIn(\"duplicated_header_two\", approp_job[\"duplicated_headers\"])\n # Check file size and number of rows\n self.assertEqual(approp_job[\"file_size\"], 2345)\n self.assertEqual(approp_job[\"number_of_rows\"], 567)\n\n # Check error metadata for specified error\n rule_error_data = None\n for data in approp_job[\"error_data\"]:\n if data[\"field_name\"] == \"header_three\":\n rule_error_data = data\n self.assertIsNotNone(rule_error_data)\n self.assertEqual(rule_error_data[\"field_name\"], \"header_three\")\n self.assertEqual(rule_error_data[\"error_name\"], \"rule_failed\")\n self.assertEqual(rule_error_data[\"error_description\"], \"A rule failed for this value.\")\n self.assertEqual(rule_error_data[\"occurrences\"], \"7\")\n self.assertEqual(rule_error_data[\"rule_failed\"], \"Header three value must be real\")\n self.assertEqual(rule_error_data[\"original_label\"], \"A1\")\n # Check warning metadata for specified warning\n warning_error_data = None\n for data in approp_job[\"warning_data\"]:\n if data[\"field_name\"] == \"header_three\":\n warning_error_data = data\n self.assertIsNotNone(warning_error_data)\n self.assertEqual(warning_error_data[\"field_name\"], \"header_three\")\n self.assertEqual(warning_error_data[\"error_name\"], \"rule_failed\")\n self.assertEqual(warning_error_data[\"error_description\"], \"A rule failed for this value.\")\n self.assertEqual(warning_error_data[\"occurrences\"], \"7\")\n self.assertEqual(warning_error_data[\"rule_failed\"], \"Header three value looks odd\")\n self.assertEqual(warning_error_data[\"original_label\"], \"A2\")\n\n rule_error_data = None\n for data in cross_job[\"error_data\"]:\n if data[\"field_name\"] == \"header_four\":\n rule_error_data = data\n\n self.assertEqual(rule_error_data[\"source_file\"], \"appropriations\")\n self.assertEqual(rule_error_data[\"target_file\"], \"award\")\n\n # Check submission metadata\n self.assertEqual(json[\"cgac_code\"], \"SYS\")\n self.assertEqual(json[\"reporting_period_start_date\"], \"Q1/2016\")\n self.assertEqual(json[\"reporting_period_end_date\"], \"Q1/2016\")\n\n # Check submission level info\n self.assertEqual(json[\"number_of_errors\"], 17)\n self.assertEqual(json[\"number_of_rows\"], 667)\n\n # Get submission from db for attribute checks\n submission = sess.query(Submission).filter(\n Submission.submission_id == self.status_check_submission_id).one()\n\n # Check number of errors and warnings in submission table\n self.assertEqual(submission.number_of_errors, 17)\n self.assertEqual(submission.number_of_warnings, 7)\n\n # Check that submission was created today, this test may fail if run right at midnight UTC\n self.assertEqual(json[\"created_on\"], datetime.utcnow().strftime(\"%m/%d/%Y\"))\n self.assertEqual(json[\"last_updated\"], submission.updated_at.strftime(\"%Y-%m-%dT%H:%M:%S\"))",
"def test_get_error_parameters(self):\n with app.app_context():\n data_github = {\n \"version_control\": \"github\",\n \"scm_repo\": \"BB\",\n \"scm_branch\": \"BB\",\n \"scm_commit\": \"BB\",\n \"repo\": \"BB1\",\n \"branch\": \"BB1\",\n \"enabled\": True\n }\n\n data_git = {\n \"version_control\": \"github\",\n \"scm_repo\": \"BB\",\n \"scm_branch\": \"BB\",\n \"scm_commit\": \"BB\",\n \"repo\": \"BB2\",\n \"branch\": \"BB2\",\n \"enabled\": True\n }\n\n for data_insert in [data_github, data_git]:\n create_tracking(data_insert)\n\n resp = self.client.get(\"/tracking?oper=B&chcnsrb=B\")\n\n resp_dict = json.loads(resp.data)\n self.assertIn(\"code\", resp_dict, msg=\"Error in data format return\")\n self.assertEqual(\n ResponseCode.INPUT_PARAMETERS_ERROR, resp_dict.get(\"code\"), msg=\"Error in status code return\"\n )\n\n self.assertIn(\"msg\", resp_dict, msg=\"Error in data format return\")\n self.assertEqual(\n ResponseCode.CODE_MSG_MAP.get(ResponseCode.INPUT_PARAMETERS_ERROR),\n resp_dict.get(\"msg\"),\n msg=\"Error in status code return\"\n )\n\n self.assertIn(\"data\", resp_dict, msg=\"Error in data format return\")\n self.assertEqual(resp_dict.get(\"data\"), None, msg=\"Error in data information return\")",
"def _handle_error(self, path, reqs, headers, get=True):\n call = requests.get if get else requests.post\n resp = None\n dump = json.dumps(reqs)\n wait = self.config.start_reconnect_wait\n while resp is None:\n if wait > self.config.max_reconnect_wait:\n raise Exception(\"To many reconnect attempts\")\n time.sleep(wait)\n try:\n resp = call(path, dump, headers=headers)\n except requests.exceptions.ConnectionError:\n resp = None\n wait *= 2\n return resp",
"def check(self):\n try:\n response = requests.head(self.url)\n except requests.exceptions.RequestException:\n return False, \"darkRed\", \"🛑 Connection Error\"\n return self._status_is_okay(response.status_code)",
"def check_valid_http_response(url, token=None):\r\n tries = 6\r\n error = None\r\n for i in range(tries):\r\n try:\r\n if token:\r\n headers = {\"Authorization\": \"Bearer {}\".format(token)}\r\n response = requests.get(url, timeout=10, headers=headers)\r\n else:\r\n response = requests.get(url, timeout=10)\r\n\r\n if response.status_code == 200:\r\n return response.text\r\n elif i >= 5:\r\n raise Exception('Returned wrong status code: {}'.format(response.status_code))\r\n elif response.status_code >= 400 or response.status_code < 200:\r\n print('Response code = {}, sleep and try again'.format(response.status_code))\r\n time.sleep(3)\r\n except requests.exceptions.Timeout as e:\r\n error = e\r\n time.sleep(3)\r\n if error:\r\n raise error\r\n else:\r\n raise Exception('Unexpected case happen!')",
"def response_validator(url_dict, host_name_ip, api_endpoint):\r\n for key, value in url_dict.items():\r\n url_framed = url_framer_or_formatter(value.strip(),host_name_ip) + api_endpoint\r\n logger.debug(\"{} Executing request for {}::{} {}\".format(\"#\" * 20, key,url_framed, \"#\" * 20))\r\n status_code, response_data, error_msg = common_http_validator(method='GET', url=url_framed)\r\n if status_code == 200:\r\n logger.debug(\"{} ok status obtained with response message as {}\".format(status_code,json.loads(response_data)['status']))\r\n else:\r\n logger.debug(\"{} status with response as {} and exception message as {}\".format(status_code,response_data,error_msg))\r\n\r\n logger.debug(\"{} Request execution completed for {}::{} {}\".format(\"#\" * 20, key,url_framed, \"#\" * 20))"
]
| [
"0.6313354",
"0.6221306",
"0.6189494",
"0.6163173",
"0.61223114",
"0.6110808",
"0.60752267",
"0.60510343",
"0.6002983",
"0.5962135",
"0.59314",
"0.5928525",
"0.5911796",
"0.59043324",
"0.5895254",
"0.58699054",
"0.5863783",
"0.5842427",
"0.58288515",
"0.5811535",
"0.57947063",
"0.57908934",
"0.57737434",
"0.57685655",
"0.57524925",
"0.57362044",
"0.5735247",
"0.5725132",
"0.57154655",
"0.57077855"
]
| 0.7266906 | 0 |
Convert an object serialization that was generated by the object serializer into a vocabulary handle. | def deserialize(self, descriptor: Dict, data: List) -> ObjectHandle:
return VocabularyHandle(
values=set(data),
name=descriptor['name'],
namespace=descriptor['namespace'],
label=descriptor.get('label'),
description=descriptor.get('description')
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_serialize_vocabulary():\n # Serialize minimal vocabulary handle.\n v = VocabularyHandle(values={'A', 'B', 'C'}, name='my_vocab')\n doc, data = VocabularyFactory().serialize(v)\n vocab = VocabularyFactory().deserialize(descriptor=doc, data=data)\n assert vocab.name == 'my_vocab'\n assert vocab.namespace is None\n assert vocab.label is None\n assert vocab.description is None\n assert vocab.values == {'A', 'B', 'C'}\n # Serialize maximal vocabulary handle.\n v = VocabularyHandle(\n values={'A', 'B', 'C'},\n name='my_vocab',\n namespace='mynamespace',\n label='My Name',\n description='Just a test'\n )\n doc, data = VocabularyFactory().serialize(v)\n vocab = VocabularyFactory().deserialize(descriptor=doc, data=data)\n assert vocab.name == 'my_vocab'\n assert vocab.namespace == 'mynamespace'\n assert vocab.label == 'My Name'\n assert vocab.description == 'Just a test'\n assert vocab.values == {'A', 'B', 'C'}",
"def deserialize(self, obj):\n raise NotImplementedError",
"def serialize(self, object: VocabularyHandle) -> Tuple[Dict, List]:\n return object.to_dict(), list(object.values)",
"def _deserialize_object(value):\n return value",
"def _deserialize(self, handle):\n raise NotImplementedError",
"def serialize(self, obj):\n return obj",
"def to_representation(self, obj):\n return self._choices[obj]",
"def getVocabulary(self): # real signature unknown; restored from __doc__\n pass",
"def bundle_instance(obj):\n\n content, contents = osl_encode(obj, True)\n # should be a bunch of documents, not just one.\n bundle = [json.dumps(c) for c in contents]\n return bundle",
"def deserialize_object(d):\n pass",
"def vocabulary(self, config=Config()):\n raise NotImplementedError(\"Class %s doesn't implement vocabulary()\" % self.__class__.__name__)",
"def serialize(self, obj):\n return dill.dumps(obj, 0).decode('latin-1')",
"def serialize(self, obj):\n pass",
"def cast(obj: 'itkLightObject') -> \"itkVTKPolyDataReaderMF2 *\":\n return _itkVTKPolyDataReaderPython.itkVTKPolyDataReaderMF2_cast(obj)",
"def from_dict(cls, dikt: dict) -> 'Vocabulary':\n return util.deserialize_model(dikt, cls)",
"def get_vocab(self):\n if os.path.exists(self.vocab_file) & self.vocab_from_file:\n f = open(self.vocab_file, \"rb\")\n vocab = pickle.load(f)\n self.word2idx = vocab.word2idx\n self.idx2word = vocab.idx2word\n f.close()\n else:\n self.build_vocab()\n with open(self.vocab_file, 'wb') as f:\n pickle.dump(self, f)",
"def from_pickle(pkl):\n assert os.path.exists(pkl), f\"{pkl} not exists\"\n with open(pkl, 'rb') as f:\n vocab = pickle.load(f)\n \n return vocab",
"def _get_vocab_id_list(self, json_obj):\n return json_obj",
"def getVocabulary(vocabulary_id):\n relex_web = getSite().restrictedTraverse('relex_web')\n key = KEY_STORAGE + \".\" + vocabulary_id\n vocabulary = json.loads(getattr(relex_web, key, \"[]\"))\n return vocabulary",
"def save(cls, ob):\n return cls._save_to_avos(cls.__name__, ob)",
"def create_vocab(input_iter, min_frequency):\n vocab_processor = tf.contrib.learn.preprocessing.VocabularyProcessor(\n FLAGS.max_sentence_len,\n min_frequency=min_frequency,\n tokenizer_fn=tokenizer_fn)\n\n vocab_processor.fit(input_iter)\n return vocab_processor",
"def __init__(self, obj):\n if isinstance(obj, str):\n # The schema given is some kind of handle which we try to open\n self.data = self._get_schema_content(obj)\n else:\n self.data = obj",
"def serialize(obj):\n return serialization_manager.serialize(obj)",
"def reverse_vocab(self):\n return self._id2token",
"def reverse_vocab(self):\n return self._id2token",
"def cast(obj: 'itkLightObject') -> \"itkVTKPolyDataReaderMD2 *\":\n return _itkVTKPolyDataReaderPython.itkVTKPolyDataReaderMD2_cast(obj)",
"def vocabulary(self):\n return self._vocabulary",
"def cast(self):\n if self.validate():\n if 'blueprint' in self.data:\n # A single blueprint\n obj = Blueprint.Blueprint()\n obj.versionCode = self.versionCode\n obj.data = self.data\n return obj\n elif 'blueprint-book' in self.data:\n # A book of blueprints\n obj = BlueprintBook.BlueprintBook()\n obj.versionCode = self.versionCode\n obj.data = self.data\n return obj\n else:\n # Unknown datatype. Just return the object\n return self\n \n else:\n # Broken validation means just return the object\n return self",
"def setVocabulary(self, vocabulary): # real signature unknown; restored from __doc__\n pass",
"def _post_deserialize (self):\n pass"
]
| [
"0.62096655",
"0.5864106",
"0.56451684",
"0.56001896",
"0.55511445",
"0.5335712",
"0.5323669",
"0.52616173",
"0.52586997",
"0.5231618",
"0.5181435",
"0.518052",
"0.51429373",
"0.51414454",
"0.51291484",
"0.5112931",
"0.5105933",
"0.50208855",
"0.50041157",
"0.4997506",
"0.49743494",
"0.4963626",
"0.49326688",
"0.49293575",
"0.49293575",
"0.49257445",
"0.49107182",
"0.4896446",
"0.4880549",
"0.48689204"
]
| 0.65390325 | 0 |
Compute the mewe estimator for misspecified models Inputs | def mewe_misspecified(M,N,m,n,target):
output = []
for k in tqdm(range(0,M)):
# Allocate space for output
mewe_store = np.zeros((len(n),target['thetadim']))
mewe_runtimes = np.zeros(len(n))
mewe_evals = np.zeros(len(n))
# generate all observations and sets of randomness to be used
if target["observed_law"] == "Gamma":
obs_all = np.random.gamma(true_theta[0], true_theta[1],np.max(n))
elif target["observed_law"] == "Cauchy":
obs_all = np.random.standard_cauchy(np.max(n))
else :
return("Not implemented law")
break
# la ligne du dessus est modifiée pour générer un échantillon contaminé
# generate the synthetic randomness, sort.
randomness = [target['generate_randomness'](m) for i in range(N)]
for i in range(0,len(n)):
# subset observations and sort
obs = obs_all[:n[i]]
sort_obs = np.sort(obs)
sort_obs_mult = np.repeat(sort_obs, m / n[i], axis = 0)
# Define the objective to be minimized to find the MEWE
def obj1(theta):
if(theta[1] < 0 ):
out = 10e6
else :
wass_dists = [target['dist'](sort_obs_mult, np.sort(target['simulation'](theta, x))) for x in randomness]
out = np.mean(wass_dists)
return out
# Optimization
t_mewe = time.process_time()
mewe = minimize(fun = obj1, x0 = true_theta)
t_mewe = time.process_time() - t_mewe
# Save the results
mewe_store[i] = mewe.x
mewe_runtimes[i] = t_mewe
mewe_evals[i] = mewe.nit
output_cbind = np.c_[mewe_store, mewe_runtimes, mewe_evals, n, np.arange(len(n))]
output.append(output_cbind)
return output | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def estimator(envs, model, seed=99999, model_type=\"rf\", _idx=None, name=None):\n\n #wandb.watch(model, log='all')\n wandb.init(project='rgm_single', reinit=True, tags=[flags.wandb_tag])\n wandb.config.update(flags)\n wandb.config.actual_measure = name\n\n if flags.optim == 'adam':\n optimizer = optim.Adam(model.parameters(), lr=flags.lr)\n elif flags.optim == 'sgdm':\n optimizer = optim.SGD(model.parameters(), lr=flags.lr, momentum=0.9)\n else:\n optimizer = optim.SGD(model.parameters(), lr=flags.lr)\n\n risks = None\n for step in range(flags.steps):\n for env in envs.values():\n logits = model(env['X'])\n env['mse'] = mean_mse(logits, env['Y'])\n env['irm_penalty'] = penalty(logits, env['Y'])\n\n risks = torch.stack([e['mse'] for e in envs.values()])\n\n risk_weightings = (~torch.lt(risks, risks.max())).float().detach()\n robustness_penalty = (risks * risk_weightings).mean()\n\n train_mse = risks.mean()\n rex_penalty = risks.var()\n irmv1_penalty = torch.stack([e['irm_penalty'] for e in envs.values()]).mean()\n\n weight_norm = torch.tensor(0.)\n for w in model.parameters():\n weight_norm += w.norm().pow(2)\n\n # minmax\n loss = robustness_penalty\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n if model.w.data.numpy().item() < 0.0:\n if flags.nonnegative_weights_only:\n model.w.data = torch.Tensor([[0.0]])\n runs_zeros[r][_idx] = np.ones((1,))\n\n wandb.log({'loss': loss.cpu().item(),\n 'train_mse': train_mse.cpu().item(),\n 'irmv1_penalty': irmv1_penalty.cpu().item(),\n 'rex_penalty': rex_penalty.cpu().item(),\n 'robustness_penalty': robustness_penalty.cpu().item(),\n 'risk_argmax': risks.argmax().cpu().item(),\n 'risk_max': risks.max().cpu().item(),\n 'risk_min': risks.min().cpu().item(),\n 'risk_range': (risks.max() - risks.min()).cpu().item(),\n 'weight': model.w.squeeze().cpu().item(),\n 'weight_grad': model.w.grad.squeeze().cpu().item(),\n 'bias': model.b.squeeze().cpu().item() if flags.bias else 0.0,\n 'bias_grad': model.b.grad.squeeze().cpu().item() if flags.bias else 0.0,\n })\n wandb.join()\n np.save(f'results/regression/risks/{flags.selected_single_measure}__{flags.env_split}__{flags.exp_type}__{flags.only_bias__ignore_input}.npy', risks.detach().numpy())\n return model",
"def test_predict_uncertain_inputs(self):\n X = np.linspace(-5,5, 10)[:, None]\n Y = 2*X + np.random.randn(*X.shape)*1e-3\n m = GPy.models.BayesianGPLVM(Y, 1, X=X, kernel=GPy.kern.Linear(1), num_inducing=1)\n m.Gaussian_noise[:] = 1e-4\n m.X.mean[:] = X[:]\n m.X.variance[:] = 1e-5\n m.X.fix()\n m.optimize()\n X_pred_mu = np.random.randn(5, 1)\n X_pred_var = np.random.rand(5, 1) + 1e-5\n from GPy.core.parameterization.variational import NormalPosterior\n X_pred = NormalPosterior(X_pred_mu, X_pred_var)\n # mu = \\int f(x)q(x|mu,S) dx = \\int 2x.q(x|mu,S) dx = 2.mu\n # S = \\int (f(x) - m)^2q(x|mu,S) dx = \\int f(x)^2 q(x) dx - mu**2 = 4(mu^2 + S) - (2.mu)^2 = 4S\n Y_mu_true = 2*X_pred_mu\n Y_var_true = 4*X_pred_var\n Y_mu_pred, Y_var_pred = m.predict_noiseless(X_pred)\n np.testing.assert_allclose(Y_mu_true, Y_mu_pred, rtol=1e-3)\n np.testing.assert_allclose(Y_var_true, Y_var_pred, rtol=1e-3)",
"def E_step_precompute(self, model_params, my_suff_stat, my_data):",
"def emm(dataset):\r\n\r\n ####################### CONFIGURE THIS ##############################\r\n\r\n #Define subgroup\r\n #subgroup = dataset[(dataset['dvce_type'] == 'Tablet')]\r\n subgroup = dataset[(dataset['os_timezone'].str.contains(\"Asia\") & (dataset['os_name'].str.contains(\"iPhone\")))]\r\n\r\n #Define target 1\r\n target1 = 'revenue'\r\n\r\n #Define target 2\r\n target2 = 'new_buttons'\r\n\r\n #####################################################################\r\n\r\n logging.info(\"Exceptional Model Mining. (Two targets)\")\r\n\r\n lengthDataset = len(dataset)\r\n logging.debug('Examples of the dataset {}'.format(lengthDataset)) \r\n logging.debug('Examples of subgroup: {} ({:.2f}%)'.format(len(subgroup), (len(subgroup)/lengthDataset) * 100))\r\n correlationTargets = phi_coefficient (dataset,target1,target2)\r\n logging.debug('Correlation of the two targets: {:.2f}'.format(correlationTargets))\r\n \r\n evaluate(QualityMeasure.SCD,ModelClass.PhiCoefficient,dataset,subgroup,target1,target2)",
"def get_model(n_obs=100, ess=50, ug=None, seed_obs=None):\n if ug is None:\n ug = np.zeros((4, 4))\n ug[0, 1:3] = 1\n ug[1:3, 0] = 1\n\n m = elfi.new_model()\n priors = []\n dag, node_ordering, oc = ug_to_dag(ug)\n para_mat = mn_para_mat(ug)\n combs_to_node = 2 ** np.sum(dag, axis=0)\n n_dim = np.sum(combs_to_node).astype(int)\n alpha = ess / 2 / oc.shape[0] * np.ones(n_dim)\n no_connections = np.where(np.sum(dag, axis=0) == 0)[0].astype(int)\n alpha[no_connections] = ess / 2\n\n for i in np.arange(n_dim):\n name_prior = 'a_{}'.format(i)\n prior_beta = elfi.Prior('beta',\n alpha[i],\n alpha[i],\n model=m,\n name=name_prior)\n priors.append(prior_beta)\n\n sim_fn = partial(gmn_simulate,\n ug=ug,\n n=n_obs,\n ess=ess,\n dag=dag,\n node_ordering=node_ordering,\n oc=oc,\n para_mat=para_mat)\n a_true = 0.2 * np.ones((n_dim, 1))\n y = sim_fn(a_true)\n\n elfi.Simulator(sim_fn, *priors, observed=y, name='GMN')\n elfi.Summary(sumstats, m['GMN'], oc.shape[0], n_obs, name='S')\n elfi.Distance('euclidean', m['S'], name='d')\n\n return m",
"def model_test(nu, fsigma_T, fsigma_P, models_in, amps_in, params_in, models_fit, label):\n # Generate fake data with some \"true\" parameters\n (D_vec, Ninv) = gen_data(nu, fsigma_T, fsigma_P, models_in, amps_in, params_in)\n Ninv_sqrt = np.matrix(linalg.sqrtm(Ninv))\n (dust_params, sync_params, cmb_params) = params_in\n (dust_amp, sync_amp, cmb_amp) = amps_in\n \n # Beam model\n beam_mat = np.identity(3*len(nu))\n\n # Set-up MCMC\n dust_guess = np.array([1.6, 20.])\n sync_guess = np.array([-3.])\n cmb_guess = np.array([])\n guess = np.concatenate((dust_guess, sync_guess, cmb_guess))\n #ndim = len(dust_guess) + len(sync_guess) + len(cmb_guess)\n \n # Run MCMC sampler on this model\n t0 = time.time()\n dust_params_out, sync_params_out, cmb_params_out, samples \\\n = mcmc(guess, nu, D_vec, Ninv, beam_mat, models_fit, label)\n print \"MCMC run in %d sec.\" % (time.time() - t0)\n \n # Estimate error on recovered CMB amplitudes\n (F_fg, F_cmb, F) = F_matrix(nu, dust_params_out, sync_params_out, cmb_params_out, models_fit)\n H = F_fg.T*Ninv*F_fg\n x_mat = np.linalg.inv(F.T*beam_mat.T*Ninv*beam_mat*F)*F.T*beam_mat.T*Ninv*D_vec # Equation A3\n \n U, Lambda, VT = np.linalg.svd(Ninv_sqrt*F_fg, full_matrices=False) # Equation A14\n \n print \"-\"*30\n print \"F_cmb.T\", F_cmb.T.shape\n print \"Ninv_sqrt\", Ninv_sqrt.shape\n print \"F_cmb\", F_cmb.shape\n print \"I\", np.identity(U.shape[0]).shape\n print \"U\", U.shape\n print \"U.T\", U.T.shape\n print \"-\"*30\n \n \n \n N_eff_inv_cmb = F_cmb.T*Ninv_sqrt*(np.matrix(np.identity(U.shape[0])) - U*U.T)*Ninv_sqrt*F_cmb # Equation A16\n N_eff_cmb = np.linalg.inv(N_eff_inv_cmb)\n cmb_noise = np.array([N_eff_cmb[0,0], N_eff_cmb[1,1], N_eff_cmb[2,2]])\n\n gls_cmb = x_mat[0:3,0]\n cmb_chisq = (np.matrix(cmb_amp).T - gls_cmb).T*N_eff_inv_cmb*(np.matrix(cmb_amp).T - gls_cmb)\n \n # Output triangle plots for dust\n if label != None:\n if (models_fit[0] == 'mbb' and models_fit[1] == 'pow'):\n if (models_in[0] == 'mbb'):\n fig = corner.corner(samples, truths=[dust_params[0], dust_params[1], sync_params[0]],\n labels=[r\"$\\beta_d$\", r\"$T_d$\",r\"$\\alpha_s$\"])\n else :\n fig = corner.corner(samples, labels=[r\"$\\beta_d$\", r\"$T_d$\",r\"$\\alpha_s$\"])\n else :\n print 'Error! Not configured for this plot!'\n exit()\n fig.savefig('triangle_' + label + '.png')\n plt.close('all')\n \n # Run multinest sampler\n #multinest(nu, D_vec, Ninv, beam_mat, ndim, models_fit, label)\n \n return gls_cmb, cmb_chisq, cmb_noise",
"def test_difficulties_eps_multi(self):\n well_w = self.get_w_well_behaviour()\n\n def get_beamformer(A, B):\n return get_mvdr_vector_souden(\n A, B,\n return_ref_channel=True\n )\n\n for args in [\n (\n [self.PhiXX * 0, self.PhiXX],\n [self.PhiNN, self.PhiNN],\n ),\n (\n [self.PhiXX, self.PhiXX],\n [self.PhiNN * 0, self.PhiNN],\n ),\n (\n [self.PhiXX * 0, self.PhiXX],\n [self.PhiNN * 0, self.PhiNN],\n ),\n ]:\n w, ref_channel = get_beamformer(*args)\n assert ref_channel == 2, ref_channel\n np.testing.assert_allclose(\n w,\n np.array([[0., 0., 0.], well_w])\n )\n\n for args in [\n (\n [self.PhiXX * np.inf, self.PhiXX],\n [self.PhiNN, self.PhiNN],\n ),\n (\n [self.PhiXX, self.PhiXX],\n [self.PhiNN * np.inf, self.PhiNN],\n ),\n (\n [self.PhiXX * np.inf, self.PhiXX],\n [self.PhiNN * np.inf, self.PhiNN],\n ),\n ]:\n with tc.assert_raises(AssertionError):\n get_beamformer(*args)",
"def mewe(M,N,m,n,target):\r\n\t\r\n\toutput = []\r\n\tfor k in range(0,M):\r\n\t\tprint(k)\r\n\t\t# Allocate space for output\r\n\t\tmewe_store = np.zeros((len(n),target['thetadim']))\r\n\t\tmewe_runtimes = np.zeros(len(n))\r\n\t\tmewe_evals = np.zeros(len(n))\r\n\t\t\r\n\t\t# generate all observations and sets of randomness to be used\r\n\t\t\r\n\t\tobs_rand = target['generate_randomness'](np.max(n))\r\n\t\tobs_all = target['observation'](target['true_theta'], false_theta, epsilon,obs_rand)\r\n\t\t\r\n\t\t# generate the synthetic randomness, sort.\r\n\t\t\r\n\t\trandomness = [target['generate_randomness'](m) for i in range(N)]\r\n\t\t\r\n\t\tfor i in range(0,len(n)):\r\n\t\t\t# subset observations and sort\r\n\t\t\tobs = obs_all[:n[i]]\r\n\t\t\tsort_obs = np.sort(obs)\r\n\t\t\tsort_obs_mult = np.repeat(sort_obs, m / n[i], axis = 0)\r\n\t\t\t\r\n\t\t\t# Define the objective to be minimized to find the MEWE\r\n\t\t\t\r\n\t\t\tdef obj1(theta):\r\n\t\t\t\tif(theta[1] < 0 ):\r\n\t\t\t\t\tout = 10e6\r\n\t\t\t\telse :\r\n\t\t\t\t\twass_dists = [target['dist'](sort_obs_mult, np.sort(target['robservation'](theta, x))) for x in randomness]\r\n\t\t\t\t\tout = np.mean(wass_dists)\r\n\t\t\t\t\r\n\t\t\t\treturn out\r\n\t\t\t\t\r\n\t\t\t# Optimization\r\n\t\t\t\r\n\t\t\tt_mewe = time.process_time()\r\n\t\t\tmewe = minimize(fun = obj1, x0 = true_theta)\r\n\t\t\tt_mewe = time.process_time() - t_mewe\r\n\t\t\t\r\n\t\t\t# Save the results\r\n\t\t\tmewe_store[i] = mewe.x\r\n\t\t\tmewe_runtimes[i] = t_mewe\r\n\t\t\tmewe_evals[i] = mewe.nit\r\n\t\t\r\n\t\toutput_cbind = np.c_[mewe_store, mewe_runtimes, mewe_evals, n, np.arange(len(n))]\r\n\t\toutput.append(output_cbind)\r\n\t\t\r\n\treturn output",
"def compute_weights(self):\n # Do the leave-one-out experiments\n loocv = np.zeros((self.M, self.nump))\n for i in range(self.M):\n for j in range(self.nump):\n loocv[i, j] = self.surrogate_list[i][j].eval(self.x[j, :])\n\n # Compute the model characteristics\n corr_coeff = np.ones(self.M)\n for i in range(self.M):\n corr_coeff[i] = np.corrcoef(np.vstack(\n (loocv[i, :], self.get_fx().flatten())))[0, 1]\n\n root_mean_sq_err = np.ones(self.M)\n for i in range(self.M):\n root_mean_sq_err[i] = 1.0 / math.sqrt(\n self._mean_squared_error(self.get_fx().flatten(), loocv[i, :]))\n\n mean_abs_err = np.ones(self.M)\n for i in range(self.M):\n mean_abs_err[i] = 1.0 / self._mean_abs_err(\n self.get_fx().flatten(), loocv[i, :])\n\n # Make sure no correlations are negative\n corr_coeff[np.where(corr_coeff < 0.0)] = 0.0\n if np.max(corr_coeff) == 0.0:\n corr_coeff += 1.0\n\n # Normalize the test statistics\n corr_coeff /= np.sum(corr_coeff)\n root_mean_sq_err /= np.sum(root_mean_sq_err)\n mean_abs_err /= np.sum(mean_abs_err)\n\n # Create mass functions based on the model characteristics\n m1 = self._prob_to_mass(corr_coeff)\n m2 = self._prob_to_mass(root_mean_sq_err)\n m3 = self._prob_to_mass(mean_abs_err)\n\n # Compute pignistic probabilities from Dempster-Shafer theory\n pignistic = m1.combine_conjunctive([m2, m3]).to_dict()\n self.weights = np.ones(self.M)\n for i in range(self.M):\n self.weights[i] = pignistic.get(str(i+1))",
"def computeModel(self, name, data):\n\t\tif name == \"Weibull\":\n\t\t\tmodel = Weibull(data)\n\t\t\tnamePL = \"Weibull\"\n\n\t\telif name == \"Exponential\":\n\t\t\tmodel = Exponential(data)\n#\t\t\tnamePL = \"Wykładniczy (Goel-Okumoto)\"\n\t\t\tnamePL = \"Wykładniczy\"\n\n\t\telif name == \"Gamma\":\n\t\t\tmodel = Gamma(data)\n#\t\t\tnamePL = \"Gamma (S-kształtny)\"\n\t\t\tnamePL = \"Gamma\"\n\n\t\telif name == \"Logarithmic\":\n\t\t\tmodel = Logarithmic(data)\n#\t\t\tnamePL = \"Logarytmiczny (Musa-Okumoto)\"\n\t\t\tnamePL = \"Logarytmiczny\"\n\n\t\telif name == \"Power\":\n\t\t\tmodel = Power(data)\n#\t\t\tnamePL = \"Potęgowy (Duane)\"\n\t\t\tnamePL = \"Potęgowy\"\n\n\t\telse:\n\t\t\traise Exception, \"Nieznany model '%s'\" % name\n\n\t\tprint \"\\n---\\n\", name, \"estimating...\",\n\n\t\ttry:\n\t\t\tparam, chi, ret, akaike = model.fit()\n\t\texcept Exception, e:\n\t\t\tprint \"Wyjatek!\", e\n\t\t\treturn [None, \"brak\", \"brak\", INF, name, namePL, INF]\n\n\t\t# oblicza pozostale wartosci wykresu\n\t\tbeg = len(ret) + 1\n\t\tend = beg + int((self.white2 - self.black2)/self.granSec) + 1\n\t\tcont = model.calculate(param, range(beg, end))\n\t\tret.extend(cont)\n\n\t\tprint \" alfa,beta=\", param,\n\t\tprint \" chi=\", chi\n\t\tprint \" akaike=\", akaike\n\t\tprint ret\n\n\t\t# formatowanie\n\t\talfa = round(param[0], 4)\n\t\tif len(param) == 2: \n\t\t\tbeta = round(param[1], 4) \n\t\telse: \n\t\t\tbeta = ' ';\t\t# nie ma beta w expotencjalnym\n\t\tchi = int(chi)\n\t\takaike = round(akaike, 2)\n\n\t\treturn [ret, alfa, beta, chi, name, namePL, akaike]",
"def weber_MS(I,J,x,y,w):\n M = max([((x[i]-x[j])**2 + (y[i]-y[j])**2) for i in I for j in I])\n model = Model(\"weber - multiple source\")\n X,Y,v,u = {},{},{},{}\n xaux,yaux,uaux = {},{},{}\n for j in J:\n X[j] = model.addVar(lb=-model.infinity(), vtype=\"C\", name=\"X(%s)\"%j)\n Y[j] = model.addVar(lb=-model.infinity(), vtype=\"C\", name=\"Y(%s)\"%j)\n for i in I:\n v[i,j] = model.addVar(vtype=\"C\", name=\"v(%s,%s)\"%(i,j))\n u[i,j] = model.addVar(vtype=\"B\", name=\"u(%s,%s)\"%(i,j))\n xaux[i,j] = model.addVar(lb=-model.infinity(), vtype=\"C\", name=\"xaux(%s,%s)\"%(i,j))\n yaux[i,j] = model.addVar(lb=-model.infinity(), vtype=\"C\", name=\"yaux(%s,%s)\"%(i,j))\n uaux[i,j] = model.addVar(vtype=\"C\", name=\"uaux(%s,%s)\"%(i,j))\n\n\n\n for i in I:\n model.addCons(quicksum(u[i,j] for j in J) == 1, \"Assign(%s)\"%i)\n for j in J:\n model.addCons(xaux[i,j]*xaux[i,j] + yaux[i,j]*yaux[i,j] <= v[i,j]*v[i,j], \"MinDist(%s,%s)\"%(i,j))\n model.addCons(xaux[i,j] == (x[i]-X[j]), \"xAux(%s,%s)\"%(i,j))\n model.addCons(yaux[i,j] == (y[i]-Y[j]), \"yAux(%s,%s)\"%(i,j))\n model.addCons(uaux[i,j] >= v[i,j] - M*(1-u[i,j]), \"uAux(%s,%s)\"%(i,j))\n\n model.setObjective(quicksum(w[i]*uaux[i,j] for i in I for j in J), \"minimize\")\n\n\n model.data = X,Y,v,u\n return model",
"def mult_reads_gmm(reads, training_reads, components):\n\n\tprediction_zero_100 = 0\n\tprediction_one_100 = 0\n\tprediction_zero_200 = 0\n\tprediction_one_200 = 0\n\n\tbase_opts = ['A', 'C', 'G', 'T']\n\n\n\tmodel = mixture.GMM(n_components=components, covariance_type='spherical')\n\tnum_reads = len(reads)\n\n\ttraining_reads = [read.get_read().replace('\\'', '') for read in training_reads]\n\n\tread_input = [read.get_read().replace('\\'', '') for read in reads]\n\t# alignment_inputs = []\n\t# alignment_inputs.extend(read.get_alignments())\n\n\t# Generates observations\n\t# bases are converted to their ascii character values\n\tread_list = []\n\tfor read in read_input:\n\t\tread_char = [convert_letter(c) for c in read]\n\t\tread_list.append(read_char)\n\n\tobservations = []\n\t\n\tfor alignment in training_reads:\n\t\talignment_list = [convert_letter(c) for c in alignment] \n\t\tobservations.append( alignment_list )\n\t# for base_index, base in enumerate(read_main):\n\t# \tbase_observations = [ord(base)]\n\t# \tfor alignment in alignments:\n\t# \t\tbase_observations.append(ord(alignment[base_index]))\n\n\t# \tobservations.append(base_observations)\n\n\tmodel.fit(observations)\n\tmeans = np.round(model.means_, 2)\n\tcovars = np.round(model.covars_, 2)\n\tconverted_means = []\n\tfor num_list in means:\n\t\t# convert to nearest acceptable letter\n\t\t#char_means = [chr(int(n)) for n in num_list]\n\t\tchar_means = [convert_to_letter(n) for n in num_list]\n\t\tconverted_means.append(char_means)\n\t\n\tpredictions = model.predict(read_list)\n\n\tread_predictions = []\n\tfor index, prediction in enumerate(predictions):\n\t\tmapping = [prediction, reads[index]]\n\t\tread_predictions.append(mapping)\n\t\n\n\tfor read_pr in read_predictions:\n\t\t\n\t\tprediction = read_pr[0]\n\t\t# def filt(x): return x[0] == prediction\n\t\t# matches = filter(filt, read_predictions)\n\t\tpr = prediction\n\t\trps = int(float(read_pr[1].get_position()))\n\t\t# print '\\n'\n\t\t# print prediction\n\t\t# print 'Converted Means: '\n\t\t# print ''.join(converted_means[prediction])\n\t\t# print 'Actual Read'\n\t\t# print read_pr[1].get_read()\n\t\t# print read_pr[1].get_position()\n\t\t# print 'Matches'\n\t\t# for m in matches:\n\t\t# \tprint m[1].get_read() + ' Position: ' + m[1].get_position()\n\t\t# \tm[1].print_read()\n\n\t\tif pr == 0:\n\t\t\tif rps == 100:\n\t\t\t\tprediction_zero_100 = prediction_zero_100 + 1\n\t\t\t\t\n\t\t\telse:\n\t\t\t\tprediction_zero_200 = prediction_zero_200 + 1\n\t\t\t\t\n\t\telse:\n\t\t\tif rps == 100:\n\t\t\t\tprediction_one_100 = prediction_one_100 + 1\n\t\t\t\t\n\t\t\telse:\n\t\t\t\tprediction_one_200 = prediction_one_200 + 1\n\t\t\t\t\n\n\tprint '\\n-------------Predictions---------------------'\n\tprint 'Prediction: 0 Position: 100 Num: ' + str(prediction_zero_100)\n\tprint 'Prediction: 1 Position: 100 Num: ' + str(prediction_one_100)\n\tprint 'Prediction: 0 Position: 200 Num: ' + str(prediction_zero_200)\n\tprint 'Prediction: 1 Position: 200 Num: ' + str(prediction_one_200)\n\n\tprint '\\n------Means: -----------'\n\tfor mean in converted_means:\n\t\tprint ''.join(mean) \n\n\t# for index, prediction in enumerate(predictions):\n\t# \tprint 'Read: '\n\t# \tprint reads[index].get_read()\n\t# \tprint 'Prediction: '\n\t# \tprint prediction\n\t# \tprint converted_means[prediction]\n\t# \tprint 'Means: '\n\t# \tprint means[prediction]\n\t# \tprint covars[prediction]\n\t# \tprint '----------------------------------------\\n'\n\n\n\t# posteriors = model.predict_proba(read_list)\n\t# print model.get_params(deep=True)\n\t# sample = model.sample()\n\t# print [convert_to_letter(n) for n in sample[0]]",
"def test_michaelis_menten_fit(self):\n res = michaelis_menten_fit([22])\n self.assertFloatEqual(res,1.0,eps=.01)\n res = michaelis_menten_fit([42])\n self.assertFloatEqual(res,1.0,eps=.01)\n res = michaelis_menten_fit([34],num_repeats=3,params_guess=[13,13])\n self.assertFloatEqual(res,1.0,eps=.01)\n res = michaelis_menten_fit([70,70],num_repeats=5)\n self.assertFloatEqual(res,2.0,eps=.01)",
"def test_nme_evaluate(self):\n # test when norm_mode = 'use_norm_item'\n # test norm_item = 'box_size' like in `AFLWDataset`\n norm_item = 'box_size'\n nme_metric = NME(norm_mode='use_norm_item', norm_item=norm_item)\n aflw_meta_info = dict(from_file='configs/_base_/datasets/aflw.py')\n aflw_dataset_meta = parse_pose_metainfo(aflw_meta_info)\n nme_metric.dataset_meta = aflw_dataset_meta\n\n data_batch, data_samples = self._generate_data(\n batch_size=4, num_keypoints=19, norm_item=norm_item)\n nme_metric.process(data_batch, data_samples)\n nme = nme_metric.evaluate(4)\n target = {'NME': 0.0}\n self.assertDictEqual(nme, target)\n\n # test when norm_mode = 'keypoint_distance'\n # when `keypoint_indices = None`,\n # use default `keypoint_indices` like in `Horse10Dataset`\n nme_metric = NME(norm_mode='keypoint_distance')\n horse10_meta_info = dict(\n from_file='configs/_base_/datasets/horse10.py')\n horse10_dataset_meta = parse_pose_metainfo(horse10_meta_info)\n nme_metric.dataset_meta = horse10_dataset_meta\n\n data_batch, data_samples = self._generate_data(\n batch_size=4, num_keypoints=22)\n nme_metric.process(data_batch, data_samples)\n nme = nme_metric.evaluate(4)\n\n target = {'NME': 0.0}\n self.assertDictEqual(nme, target)\n\n # test when norm_mode = 'keypoint_distance'\n # specify custom `keypoint_indices`\n keypoint_indices = [2, 4]\n nme_metric = NME(\n norm_mode='keypoint_distance', keypoint_indices=keypoint_indices)\n coco_meta_info = dict(from_file='configs/_base_/datasets/coco.py')\n coco_dataset_meta = parse_pose_metainfo(coco_meta_info)\n nme_metric.dataset_meta = coco_dataset_meta\n\n data_batch, data_samples = self._generate_data(\n batch_size=2, num_keypoints=17)\n nme_metric.process(data_batch, data_samples)\n nme = nme_metric.evaluate(2)\n\n target = {'NME': 0.0}\n self.assertDictEqual(nme, target)",
"def _fit(self):\n loss = 1e10\n weights = self._init_weights\n while loss > self._converge_epsilon:\n d_F = 2 * (self._input.t() * self._input *\n weights - self._input.t() * self._label)\n dd_F = 2 * self._input.t() * self._input\n weights = weights - dd_F.inv() * d_F\n loss = self._mse(weights)\n print('Error : {}'.format(loss))\n return weights",
"def construct_model():\n import lbann\n\n # Layer graph\n input = lbann.Input(target_mode='N/A', name='inp_data')\n # data is 64*64*4 images + 15 scalar + 5 param\n #inp_slice = lbann.Slice(input, axis=0, slice_points=\"0 16399 16404\",name='inp_slice')\n inp_slice = lbann.Slice(input, axis=0, slice_points=str_list([0,args.ydim,args.ydim+5]),name='inp_slice')\n gt_y = lbann.Identity(inp_slice,name='gt_y')\n gt_x = lbann.Identity(inp_slice, name='gt_x') #param not used\n\n zero = lbann.Constant(value=0.0,num_neurons='1',name='zero')\n one = lbann.Constant(value=1.0,num_neurons='1',name='one')\n\n z_dim = 20 #Latent space dim\n\n z = lbann.Gaussian(mean=0.0,stdev=1.0, neuron_dims=\"20\")\n model = macc_models.MACCWAE(args.zdim,args.ydim,cf=args.mcf,use_CNN=args.useCNN)\n d1_real, d1_fake, d_adv, pred_y = model(z,gt_y)\n\n d1_real_bce = lbann.SigmoidBinaryCrossEntropy([d1_real,one],name='d1_real_bce')\n d1_fake_bce = lbann.SigmoidBinaryCrossEntropy([d1_fake,zero],name='d1_fake_bce')\n d_adv_bce = lbann.SigmoidBinaryCrossEntropy([d_adv,one],name='d_adv_bce')\n img_loss = lbann.MeanSquaredError([pred_y,gt_y])\n rec_error = lbann.L2Norm2(lbann.WeightedSum([pred_y,gt_y], scaling_factors=\"1 -1\"))\n\n layers = list(lbann.traverse_layer_graph(input))\n # Setup objective function\n weights = set()\n src_layers = []\n dst_layers = []\n for l in layers:\n if(l.weights and \"disc0\" in l.name and \"instance1\" in l.name):\n src_layers.append(l.name)\n #freeze weights in disc2\n if(l.weights and \"disc1\" in l.name):\n dst_layers.append(l.name)\n for idx in range(len(l.weights)):\n l.weights[idx].optimizer = lbann.NoOptimizer()\n weights.update(l.weights)\n l2_reg = lbann.L2WeightRegularization(weights=weights, scale=1e-4)\n d_adv_bce = lbann.LayerTerm(d_adv_bce,scale=0.01)\n obj = lbann.ObjectiveFunction([d1_real_bce,d1_fake_bce,d_adv_bce,img_loss,rec_error,l2_reg])\n # Initialize check metric callback\n metrics = [lbann.Metric(img_loss, name='recon_error')]\n #pred_y = macc_models.MACCWAE.pred_y_name\n callbacks = [lbann.CallbackPrint(),\n lbann.CallbackTimer(),\n lbann.CallbackSaveModel(dir=args.dump_models),\n lbann.CallbackReplaceWeights(source_layers=list2str(src_layers),\n destination_layers=list2str(dst_layers),\n batch_interval=2)]\n\n if(args.ltfb_batch_interval > 0) :\n callbacks.append(lbann.CallbackLTFB(batch_interval=args.ltfb_batch_interval,metric='recon_error',\n low_score_wins=True,\n exchange_hyperparameters=True))\n\n # Construct model\n return lbann.Model(args.num_epochs,\n serialize_io=True,\n weights=weights,\n layers=layers,\n metrics=metrics,\n objective_function=obj,\n callbacks=callbacks)",
"def _handleInput(self, paramInput):\n super()._handleInput(paramInput)\n settings, notFound = paramInput.findNodesAndExtractValues(['epsilon', 'dual', 'loss', 'tol', 'fit_intercept',\n 'intercept_scaling', 'max_iter'])\n # notFound must be empty\n assert(not notFound)\n self.initializeModel(settings)",
"def _preprocess_input(self, dataset):\n masker = self.masker or dataset.masker\n\n mask_img = masker.mask_img or masker.labels_img\n if isinstance(mask_img, str):\n mask_img = nib.load(mask_img)\n\n # Ensure that protected values are not included among _required_inputs\n assert \"aggressive_mask\" not in self._required_inputs.keys(), \"This is a protected name.\"\n\n if \"aggressive_mask\" in self.inputs_.keys():\n LGR.warning(\"Removing existing 'aggressive_mask' from Estimator.\")\n self.inputs_.pop(\"aggressive_mask\")\n\n # A dictionary to collect masked image data, to be further reduced by the aggressive mask.\n temp_image_inputs = {}\n\n for name, (type_, _) in self._required_inputs.items():\n if type_ == \"image\":\n # If no resampling is requested, check if resampling is required\n if not self.resample:\n check_imgs = {img: nib.load(img) for img in self.inputs_[name]}\n _check_same_fov(**check_imgs, reference_masker=mask_img, raise_error=True)\n imgs = list(check_imgs.values())\n else:\n # resampling will only occur if shape/affines are different\n # making this harmless if all img shapes/affines are the same as the reference\n imgs = [\n resample_to_img(nib.load(img), mask_img, **self._resample_kwargs)\n for img in self.inputs_[name]\n ]\n\n # input to NiFtiLabelsMasker must be 4d\n img4d = concat_imgs(imgs, ensure_ndim=4)\n\n # Mask required input images using either the dataset's mask or the estimator's.\n temp_arr = masker.transform(img4d)\n\n # An intermediate step to mask out bad voxels.\n # Can be dropped once PyMARE is able to handle masked arrays or missing data.\n nonzero_voxels_bool = np.all(temp_arr != 0, axis=0)\n nonnan_voxels_bool = np.all(~np.isnan(temp_arr), axis=0)\n good_voxels_bool = np.logical_and(nonzero_voxels_bool, nonnan_voxels_bool)\n\n data = masker.transform(img4d)\n\n temp_image_inputs[name] = data\n if \"aggressive_mask\" not in self.inputs_.keys():\n self.inputs_[\"aggressive_mask\"] = good_voxels_bool\n else:\n # Remove any voxels that are bad in any image-based inputs\n self.inputs_[\"aggressive_mask\"] = np.logical_or(\n self.inputs_[\"aggressive_mask\"],\n good_voxels_bool,\n )\n\n # Further reduce image-based inputs to remove \"bad\" voxels\n # (voxels with zeros or NaNs in any studies)\n if \"aggressive_mask\" in self.inputs_.keys():\n n_bad_voxels = (\n self.inputs_[\"aggressive_mask\"].size - self.inputs_[\"aggressive_mask\"].sum()\n )\n if n_bad_voxels:\n LGR.warning(\n f\"Masking out {n_bad_voxels} additional voxels. \"\n \"The updated masker is available in the Estimator.masker attribute.\"\n )\n\n for name, raw_masked_data in temp_image_inputs.items():\n self.inputs_[name] = raw_masked_data[:, self.inputs_[\"aggressive_mask\"]]",
"def test_gaussian_em():\n fname = \"gmm-3-10-0.7.npz\"\n gmm = GaussianMixtureModel.generate( fname, 3, 3 )\n k, d, M, S, w = gmm.k, gmm.d, gmm.means, gmm.sigmas, gmm.weights\n N, n = 1e6, 1e5\n\n\n X = gmm.sample( N, n )\n\n algo = GaussianMixtureEM(k, d)\n\n def report( i, O_, lhood ):\n M_, _, _ = O_\n lhood, Z, O_ = algo.run( X, None, report )\n\n M_, S_, w_ = O_\n\n M_ = closest_permuted_matrix( M, M_ )\n w_ = closest_permuted_vector( w, w_ )\n\n print w, w_\n\n print norm( M - M_ )/norm(M)\n print abs(S - S_).max()\n print norm( w - w_ ) \n\n assert( norm( M - M_ )/norm(M) < 1e-1 )\n assert (abs(S - S_) < 1 ).all()\n assert( norm( w - w_ ) < 1e-2 )",
"def createSignalModelExponential(data):\n print \"Creating model\"\n switchpoint = DiscreteUniform('switchpoint', lower=0, upper=len(data))\n \n noise_sigma = HalfNormal('noise_sigma', tau=sigToTau(.01))\n exp_sigma = HalfNormal('exp_sigma', tau=sigToTau(.05))\n \n #Modeling these parameters this way is why wf needs to be normalized\n exp_rate = Uniform('exp_rate', lower=0, upper=.1)\n exp_scale = Uniform('exp_scale', lower=0, upper=.1)\n \n timestamp = np.arange(0, len(data), dtype=np.float)\n \n @deterministic(plot=False, name=\"test\")\n def uncertainty_model(s=switchpoint, n=noise_sigma, e=exp_sigma):\n ''' Concatenate Poisson means '''\n out = np.empty(len(data))\n out[:s] = n\n out[s:] = e\n return out\n \n @deterministic\n def tau(eps=uncertainty_model):\n return np.power(eps, -2)\n \n## @deterministic(plot=False, name=\"test2\")\n## def adjusted_scale(s=switchpoint, s1=exp_scale):\n## out = np.empty(len(data))\n## out[:s] = s1\n## out[s:] = s1\n## return out\n#\n# scale_param = adjusted_scale(switchpoint, exp_scale)\n\n @deterministic(plot=False)\n def baseline_model(s=switchpoint, r=exp_rate, scale=exp_scale):\n out = np.zeros(len(data))\n out[s:] = scale * ( np.exp(r * (timestamp[s:] - s)) - 1.)\n \n# plt.figure(fig.number)\n# plt.clf()\n# plt.plot(out ,color=\"blue\" )\n# plt.plot(data ,color=\"red\" )\n# value = raw_input(' --> Press q to quit, any other key to continue\\n')\n\n return out\n\n baseline_observed = Normal(\"baseline_observed\", mu=baseline_model, tau=tau, value=data, observed= True )\n return locals()",
"def test_height_and_fwhm_expression_evalution_in_builtin_models():\n mod = models.GaussianModel()\n params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9)\n params.update_constraints()\n\n mod = models.LorentzianModel()\n params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9)\n params.update_constraints()\n\n mod = models.SplitLorentzianModel()\n params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9, sigma_r=1.0)\n params.update_constraints()\n\n mod = models.VoigtModel()\n params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9, gamma=1.0)\n params.update_constraints()\n\n mod = models.PseudoVoigtModel()\n params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9, fraction=0.5)\n params.update_constraints()\n\n mod = models.MoffatModel()\n params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9, beta=0.0)\n params.update_constraints()\n\n mod = models.Pearson7Model()\n params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9, expon=1.0)\n params.update_constraints()\n\n mod = models.StudentsTModel()\n params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9)\n params.update_constraints()\n\n mod = models.BreitWignerModel()\n params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9, q=0.0)\n params.update_constraints()\n\n mod = models.LognormalModel()\n params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9)\n params.update_constraints()\n\n mod = models.DampedOscillatorModel()\n params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9)\n params.update_constraints()\n\n mod = models.DampedHarmonicOscillatorModel()\n params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9, gamma=0.0)\n params.update_constraints()\n\n mod = models.ExponentialGaussianModel()\n params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9, gamma=0.0)\n params.update_constraints()\n\n mod = models.SkewedGaussianModel()\n params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9, gamma=0.0)\n params.update_constraints()\n\n mod = models.SkewedVoigtModel()\n params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9, gamma=0.0,\n skew=0.0)\n params.update_constraints()\n\n mod = models.DonaichModel()\n params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9, gamma=0.0)\n params.update_constraints()\n\n mod = models.StepModel()\n for f in ('linear', 'arctan', 'erf', 'logistic'):\n params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9, form=f)\n params.update_constraints()\n\n mod = models.RectangleModel()\n for f in ('linear', 'arctan', 'erf', 'logistic'):\n params = mod.make_params(amplitude=1.0, center1=0.0, sigma1=0.0,\n center2=0.0, sigma2=0.0, form=f)\n params.update_constraints()",
"def test_ebm_unknown_value_at_predict():\n X = np.array(\n [[0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 0, 0], [1, 0, 0, 0], [0, 0, 0, 1]],\n dtype=np.uint8,\n )\n\n X_test = np.array([[0, 1, 0, 0], [0, 0, 1, 1], [1, 0, 0, 0]], dtype=np.uint8)\n\n y = np.array([0, 1, 1, 1, 1], dtype=np.uint8)\n\n clf = ExplainableBoostingClassifier()\n clf.fit(X, y)\n clf.predict(X_test)\n\n valid_ebm(clf)",
"def __init__(self, \n num_vars, \n num_hidden,\n training_inputs = None,\n algorithm = None,\n algorithm_dict = None,\n batch_size = None,\n use_momentum = None,\n W0= None, \n b0= None, \n bhid0 = None,\n zero_diag = True,\n symmetric = True,\n report_p_tilda =False,\n learn_biases = True,\n test_mode= False,\n training = True):\n \n self.num_vars = num_vars\n \n self.num_hidden = num_hidden\n \n self.batch_size = batch_size\n \n self.zero_diag = zero_diag\n \n self.algorithm = algorithm\n \n self.num_samples = 0\n \n self.num_u_gibbs = 0\n \n self.gibbs_steps = 0\n \n self.resample = False\n \n self.uniform = False\n \n self.mixture = False\n \n self.mix_params = []\n \n self.m_params = []\n \n self.mf_steps = 0\n \n self.alpha = 0\n \n self.learn_biases = learn_biases\n \n if isinstance(algorithm_dict, dict):\n \n for param in algorithm_dict.keys():\n \n if param == 'resample':\n \n self.resample = algorithm_dict[param]\n \n if param == 'mf_steps':\n \n self.mf_steps = algorithm_dict[param]\n \n if param == \"gibbs_steps\":\n \n self.gibbs_steps = algorithm_dict[param]\n \n if param == \"num_samples\":\n \n self.num_samples = algorithm_dict[param]\n \n if param == \"num_u_gibbs\":\n \n self.num_u_gibbs = algorithm_dict[param]\n \n if param == \"uniform\":\n \n self.uniform = algorithm_dict[param] \n \n if param == \"mixture\":\n \n self.mixture = algorithm_dict[param] \n \n if param == \"mix_params\":\n \n self.mix_params = algorithm_dict[param] \n \n if param == \"alpha\" and algorithm_dict[param] != None:\n #### alpha defines transition rate from\n #### uniform to mean-field distribution\n self.alpha = algorithm_dict[param] \n \n self.m_params = (1-self.alpha)*0.5*np.ones([1,self.num_vars])+\\\n self.alpha*np.mean(training_inputs,0)\n \n self.use_momentum = use_momentum\n \n self.report_p_tilda = report_p_tilda\n \n self.side = int(np.sqrt(self.num_vars))\n \n self.np_rand_gen = np.random.RandomState(1234)\n \n self.theano_rand_gen =\\\n theano.sandbox.rng_mrg.MRG_RandomStreams(self.np_rand_gen.randint(2**30))\n \n #self.theano_rand_gen =\\\n #T.shared_randomstreams.RandomStreams(self.np_rand_gen.randint(2**30))\n \n theano.config.exception_verbosity = 'high'\n \n self.node_indices = \\\n theano.shared(np.arange(self.num_vars), name=\"node_indices\")\n \n self.x = T.matrix('x')\n \n self.x_tilda = T.matrix('x_tilda')\n \n self.sampler_theta = T.matrix('sampler_theta')\n \n self.symmetric = symmetric\n \n if training:\n \n if self.num_hidden ==0:\n \n self.num_x2 = self.num_vars\n \n elif self.num_hidden > 0 :\n \n self.num_x2 = self.num_hidden\n \n self.updates = OrderedDict()\n \n self.N_train = training_inputs.shape[0]\n \n self.train_inputs = theano.shared(np.asarray(training_inputs,\n dtype=theano.config.floatX),\n borrow= True)\n \n self.learning_rate = T.dscalar('learning_rate')\n \n if self.mixture:\n \n print(\"Importance distribution was specified as mixture\"+\\\n \" of Bernoulli products\")\n \n if self.mix_params == []:\n print(\"Error: parameters defining mixture means were\"+\\\n \" not provided\")\n sys.exit()\n \n self.set_mixture_means(inputs = training_inputs)\n \n if use_momentum:\n \n print(\"Will add momentum term to gradient computations\")\n \n self.momentum = T.dscalar('learning_rate')\n \n self.grad_vec = {}\n \n self.grad_vec['W'] = theano.shared(np.zeros([self.num_vars, self.num_x2],\n dtype = theano.config.floatX), name = 'W_momentum', borrow = True)\n \n if self.num_hidden > 0:\n \n self.grad_vec['bhid'] = theano.shared(np.zeros([self.num_x2],\n dtype = theano.config.floatX), name = 'b_momentum', borrow = True)\n \n self.grad_vec['b'] = theano.shared(np.zeros([self.num_vars],\n dtype = theano.config.floatX), name = 'b_momentum', borrow = True)\n \n if test_mode:\n \n b_init =self.np_rand_gen.uniform(0,1, num_vars)\n \n W_init =self.np_rand_gen.uniform(0,1, size = (num_vars, num_vars))\n \n # also tested ones\n # b_init = np.ones(num_vars)\n \n # W_init = np.ones([num_vars, num_vars])\n \n self.b_init= np.asarray(b_init, dtype = theano.config.floatX)\n \n self.W_init= np.asarray(W_init, dtype = theano.config.floatX)\n \n self.b = theano.shared(self.b_init, name='b', borrow = False)\n \n self.W = theano.shared(self.W_init, name='W', borrow = False)\n \n print(\"Initialized with test mode\")\n \n else:\n \n if W0 is None:\n \n if self.num_hidden > 0:\n \n W0_init =\\\n self.np_rand_gen.uniform(\n -4*np.sqrt(6.0/(self.num_vars+self.num_hidden)),\\\n 4*np.sqrt(6.0 /(self.num_vars + self.num_hidden)), \n size = (num_vars, self.num_hidden)\n )\n \n W0 = np.asarray(W0_init, dtype = theano.config.floatX) \n \n if self.num_hidden == 0:\n \n # different W initializations: \n \n # W0_init =\\\n # self.np_rand_gen.uniform(-np.sqrt(3.0/(num_vars)),\\\n # np.sqrt(3.0 / (num_vars)), size = (num_vars, num_vars))\n \n # W0_init =\\\n # self.np_rand_gen.uniform(-0.00000001,\\\n # 0.00000001, size = (num_vars, num_vars))\n \n W0_init = 0.00000001*\\\n self.np_rand_gen.normal(size = (num_vars, self.num_x2)) \n \n W0 = np.asarray(W0_init, dtype = theano.config.floatX)\n \n if self.symmetric:\n \n W0 = (W0 + np.transpose(W0))/2.0\n \n if self.zero_diag:\n \n W0 = W0 - np.diag(np.diag(W0))\n \n self.W = theano.shared(value= W0, name='W', borrow=True)\n \n if self.num_hidden == 0:\n \n test_W = self.W.get_value() \n \n assert sum(np.diag(test_W)) == 0.0\n \n assert (test_W == np.transpose(test_W)).all() == True\n \n else:\n print(\"W is initialized with provided array\")\n self.W = theano.shared(value= W0, name='W', borrow=True)\n \n if b0 is None:\n \n bias_init = np.zeros(num_vars, dtype = theano.config.floatX)\n \n self.b = theano.shared(value= bias_init, name='b', borrow=True)\n \n else:\n print(\"b vector is initialized with provided vector\")\n self.b = theano.shared(value= b0, name='b', borrow=True)\n \n if bhid0 is None and self.num_hidden > 0:\n \n hbias_init = np.zeros(self.num_hidden, dtype = theano.config.floatX)\n \n self.bhid = theano.shared(value= hbias_init, name='bhid', borrow=True)\n \n elif (bhid0 is not None) and (self.num_hidden > 0):\n print(\"bhid vector is initialized with provided vector\") \n self.bhid = theano.shared(value= bhid0, name='bhid', borrow=True)\n \n self.theta = [self.W, self.b]\n \n if self.num_hidden > 0 :\n \n self.theta.append(self.bhid)\n \n self.train_set = set(range(self.N_train))\n \n self.minibatch_set = T.ivector('minibatch_set')\n \n self.sample_set = T.ivector('sample_set')\n \n if \"CD\" in self.algorithm and self.num_hidden ==0:\n \n self.x_gibbs= theano.shared(np.ones([self.batch_size,self.num_vars],\n dtype=theano.config.floatX),\n borrow = True, name= \"x_gibbs\")\n \n if \"CD\" in self.algorithm and self.num_hidden > 0:\n \n self.persistent_gibbs =\\\n theano.shared(np.ones([self.batch_size,self.num_hidden],\n dtype=theano.config.floatX),\n borrow = True, \n name= \"persistent_gibbs\")\n \n if \"CSS\" in self.algorithm and self.mf_steps > 0:\n \n init_mf_vis = self.np_rand_gen.uniform(0, \n 1, \n size =(self.num_vars,1))\n \n init_mf_vis = np.asarray(init_mf_vis, dtype = theano.config.floatX)\n \n self.mf_vis_p = theano.shared(init_mf_vis, \n name= \"mf_vis_p\", \n borrow= True)\n \n if self.num_hidden > 0:\n \n init_mf_hid = \\\n self.np_rand_gen.uniform(0, 1, size =(self.num_hidden,1))\n \n init_mf_hid = np.asarray(init_mf_hid, \n dtype = theano.config.floatX)\n \n self.mf_hid_p = theano.shared(init_mf_hid, \n name= \"mf_hid_p\", \n borrow= True)\n \n elif \"CSS\" in self.algorithm and self.gibbs_steps > 0: \n \n if self.num_hidden ==0: \n self.x_gibbs= theano.shared(np.ones([self.batch_size,self.num_vars],\n dtype=theano.config.floatX),\n borrow = True, name= \"x_gibbs\")",
"def test_difficulties_without_eps_multi(self):\n\n def get_beamformer(A, B):\n return get_mvdr_vector_souden(\n A, B,\n eps=0,\n return_ref_channel=True\n )\n\n for args in [\n (\n [self.PhiXX * 0, self.PhiXX],\n [self.PhiNN, self.PhiNN],\n ),\n (\n [self.PhiXX, self.PhiXX],\n [self.PhiNN * 0, self.PhiNN],\n ),\n (\n [self.PhiXX * 0, self.PhiXX],\n [self.PhiNN * 0, self.PhiNN],\n ),\n (\n [self.PhiXX * np.inf, self.PhiXX],\n [self.PhiNN, self.PhiNN],\n ),\n (\n [self.PhiXX, self.PhiXX],\n [self.PhiNN * np.inf, self.PhiNN],\n ),\n (\n [self.PhiXX * np.inf, self.PhiXX],\n [self.PhiNN * np.inf, self.PhiNN],\n ),\n ]:\n with tc.assert_raises(AssertionError):\n get_beamformer(*args)",
"def test_nonparam(self):\n Y, T, X, _ = ihdp_surface_B()\n est = AutomatedNonParamDML(model_y=automl_model_reg(),\n model_t=automl_model_clf(),\n model_final=automl_model_sample_weight_reg(), featurizer=None,\n discrete_treatment=True)\n est.fit(Y, T, X=X)\n _ = est.effect(X)",
"def test_input_data_after_fitting(mcycle_X_y):\n X, y = mcycle_X_y\n weights = np.ones_like(y)\n\n X_nan = deepcopy(X)\n X_nan[0] = X_nan[0] * np.nan\n\n y_nan = deepcopy(y.values)\n y_nan[0] = y_nan[0] * np.nan\n\n weights_nan = deepcopy(weights)\n weights_nan[0] = weights_nan[0] * np.nan\n\n gam = LinearGAM()\n\n with pytest.raises(ValueError):\n gam.fit(X_nan, y, weights)\n with pytest.raises(ValueError):\n gam.fit(X, y_nan, weights)\n with pytest.raises(ValueError):\n gam.fit(X, y, weights_nan)\n gam = gam.fit(X, y)\n\n # test X is nan\n with pytest.raises(ValueError):\n gam.predict(X_nan)\n with pytest.raises(ValueError):\n gam.predict_mu(X_nan)\n with pytest.raises(ValueError):\n gam.confidence_intervals(X_nan)\n with pytest.raises(ValueError):\n gam.prediction_intervals(X_nan)\n with pytest.raises(ValueError):\n gam.partial_dependence(X_nan)\n with pytest.raises(ValueError):\n gam.deviance_residuals(X_nan, y, weights)\n with pytest.raises(ValueError):\n gam.loglikelihood(X_nan, y, weights)\n with pytest.raises(ValueError):\n gam.gridsearch(X_nan, y, weights)\n with pytest.raises(ValueError):\n gam.sample(X_nan, y)\n\n # test y is nan\n with pytest.raises(ValueError):\n gam.deviance_residuals(X, y_nan, weights)\n with pytest.raises(ValueError):\n gam.loglikelihood(X, y_nan, weights)\n with pytest.raises(ValueError):\n gam.gridsearch(X, y_nan, weights)\n with pytest.raises(ValueError):\n gam.sample(X, y_nan, weights=weights, n_bootstraps=2)\n\n # test weights is nan\n with pytest.raises(ValueError):\n gam.deviance_residuals(X, y, weights_nan)\n with pytest.raises(ValueError):\n gam.loglikelihood(X, y, weights_nan)\n with pytest.raises(ValueError):\n gam.gridsearch(X, y, weights_nan)\n with pytest.raises(ValueError):\n gam.sample(X, y, weights=weights_nan, n_bootstraps=2)",
"def _handleInput(self, paramInput):\n super()._handleInput(paramInput)\n settings, notFound = paramInput.findNodesAndExtractValues(['nu','C', 'kernel', 'degree', 'gamma', 'coef0',\n 'tol', 'cache_size', 'shrinking', 'max_iter'])\n # notFound must be empty\n assert(not notFound)\n self.initializeModel(settings)",
"def test_w_and_without():\n A = Node(\"A\", [\"B\"], {\"B\": np.array([[1,0],[1,.1]])})\n B = Node(\"B\", [], {})\n net = CyberNet([A,B])\n T=10\n data = gen_data(T, net, {\"A\": \"normal\", \"B\":\"normal\"})\n logn_fact = gen_logn_fact(data)\n pdata_no_a = prob_model_no_attacker(net, data, T, logn_fact)\n pdata_a = prob_model_given_data_times(net, data, {}, T, logn_fact,\n {\"A\": \"normal\",\n \"B\":\"normal\"})\n np.testing.assert_almost_equal(pdata_no_a, pdata_a)\n\n np.testing.assert_almost_equal(np.log(poisson.pmf(len(data[0]), 10)), pdata_a)",
"def fit_model(self):\r\n\t\tself.mu = np.mean(self.x, axis = 0)\r\n\t\tself.sig = np.std(self.x, axis = 0)",
"def a_test_mh():\n model = ARIMAX(formula=\"y ~ x1\", data=data, ar=1, ma=1, family=Exponential())\n x = model.fit('M-H',nsims=300)\n assert(len(model.latent_variables.z_list) == 4)\n lvs = np.array([i.value for i in model.latent_variables.z_list])\n assert(len(lvs[np.isnan(lvs)]) == 0)"
]
| [
"0.5930371",
"0.58639264",
"0.5833472",
"0.57596225",
"0.5755409",
"0.5754674",
"0.57527477",
"0.57213014",
"0.56541514",
"0.5622811",
"0.5616077",
"0.56119925",
"0.56081957",
"0.5604912",
"0.5601415",
"0.5573714",
"0.55722886",
"0.55464065",
"0.55427945",
"0.5542184",
"0.5538889",
"0.5521212",
"0.551263",
"0.5499466",
"0.54978204",
"0.54754955",
"0.5457875",
"0.545747",
"0.54515016",
"0.5429691"
]
| 0.6397469 | 0 |
Initiates the report object attached to the flowcell and sequencing run but not attached to any pipelines as of yet. | def __init__(self,config,key=int(-1),flowcell=None,seq_run=None,base_output_dir=None,process_name='flowcell_reports',**kwargs):
if not flowcell is None:
GenericProcess.__init__(self,config,key=key,process_name=process_name,**kwargs)
if base_output_dir == None:
self.base_output_dir = config.get('Common_directories','flowcell_reports')
else:
self.base_output_dir = base_output_dir
self.flowcell_key = flowcell.key
self.sequencing_run_key = seq_run.key
self.sequencing_run_type = seq_run.run_type
self.pipelines = None
numbers = config.get('Flowcell_reports','numbers').split(',')
for number in numbers:
setattr(self,'flowcell_report_' + str(number) + '_key',None)
self.state = 'Running' | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self,config,sample_keys=None,number=None,key=int(-1),flowcell=None,input_dir=None,base_output_dir=None,output_dir=None,date=strftime(\"%Y%m%d\",localtime()),time=strftime(\"%H:%M:%S\",localtime()),process_name='flowcell_report',complete_file=None,**kwargs):\n if flowcell is None:\n flowcell = Flowcell(config,key=\"dummy_flowcell_key\")\n if flowcell.__class__.__name__ != \"Flowcell\":\n raise Exception(\"Trying to start a flowcell statistics reports object on a non-flowcell.\")\n if output_dir is None:\n if base_output_dir is None:\n base_output_dir = config.get('Common_directories','flowcell_reports')\n self.output_dir = os.path.join(os.path.join(base_output_dir,flowcell.key + \"_reports\"),str(number))\n else:\n self.output_dir = output_dir\n if complete_file is None:\n self.complete_file = os.path.join(self.output_dir,\"report_\" + str(number) + \".complete\")\n else:\n self.complete_file = complete_file\n QsubProcess.__init__(self,config,key=key,input_dir=input_dir,base_output_dir=base_output_dir,output_dir=self.output_dir,date=date,time=time,process_name=process_name,complete_file=self.complete_file,**kwargs)\n self.flowcell_key = flowcell.key\n if sample_keys is None:\n self.sample_keys = \"\"\n else:\n self.sample_keys = \";\".join(sample_keys)\n self.number = number\n #List of samples from the project\n self.all_samples_file = os.path.join(self.output_dir,'all_samples.ls')\n if self.key != -1:\n write_list_file(sample_keys,self.all_samples_file,original_list_file=config.get('Filenames','all_samples'))\n self.current_samples_file = os.path.join(self.output_dir,'current_samples.ls')\n if self.key != -1:\n write_list_file(sample_keys,self.current_samples_file)\n #Output files\n self.full_report = os.path.join(self.output_dir,'all_samples_report.csv')\n self.current_report = os.path.join(self.output_dir,'current_samples_report.csv')\n self.concordance_jpeg = os.path.join(self.output_dir,'concordance_vs_depth.jpeg')\n self.dbsnp_jpeg = os.path.join(self.output_dir,'dbsnp_vs_depth.jpeg')\n self.greater_than_10x_jpeg = os.path.join(self.output_dir,'greater_than_10x_vs_depth.jpeg')\n self.zero_coverage_jpeg = os.path.join(self.output_dir,'zero_coverage_vs_depth.jpeg')\n self.hethomratio_jpeg = os.path.join(self.output_dir,'hethomratio_vs_depth.jpeg')\n self.reads_jpeg = os.path.join(self.output_dir,'reads_vs_depth.jpeg')\n self.report_pdf = os.path.join(self.output_dir,self.flowcell_key + '_report.pdf')\n #Flag to keep track if report has been sent\n self.report_sent = False",
"def init_report(self, report):\n report.text('warning', 'init_report() not implemented for this class.')",
"def init_report(self, report):\n super(InformedPlannerHierarchy, self).init_report(report)\n if True:\n self.cover.draw_embeddings(report.section('embeddings'))\n \n self.display_distancetree(report.section('distancetree'))",
"def _initialise(self):\n if self._running:\n raise RuntimeError('Already initialised.')\n\n # Propose x0 as first points\n # Note proposal is multiple points this time!\n self._current = None\n self._current_log_pdfs = None\n self._proposed = self._x0\n self._proposed.setflags(write=False)\n\n # Number of chains left to update in this cycle\n self._remaining = np.arange(self._n_chains)\n\n # Update sampler state\n self._running = True",
"def initialize_report(output_dir,\n subject_name='Subject',\n log=True,\n filename='report',\n prepreproc_undergone=\"\",\n dcm2nii=False,\n deleteorient=False,\n fwhm=None, anat_fwhm=None,\n slice_timing=False,\n realign=False,\n coregister=False,\n coreg_func_to_anat=False,\n segment=False,\n normalize=False,\n dartel=False,\n command_line=None,\n has_func=True\n ):\n report_outfile = os.path.join(output_dir, '{}.html'.format(filename))\n\n report_dict = {}\n report_dict['preproc_undergone'] = generate_preproc_steps_docstring(\n dcm2nii=dcm2nii,\n deleteorient=deleteorient,\n slice_timing=slice_timing,\n realign=realign,\n coregister=coregister,\n segment=segment,\n normalize=normalize,\n fwhm=fwhm, anat_fwhm=anat_fwhm,\n dartel=dartel,\n coreg_func_to_anat=coreg_func_to_anat,\n prepreproc_undergone=prepreproc_undergone,\n has_func=has_func\n )\n report_dict['subject_name'] = subject_name\n report_dict['start_time'] = strftime(\"%d-%b-%Y %H:%M:%S\", gmtime())\n report_dict['end_time'] = \"STILL RUNNING...\"\n report_text = embed_in_HTML('report_template.html', report_dict)\n report_HTML = HTMLDocument(report_text).save_as_html(report_outfile)\n\n if log:\n # create a separate HTML with all the logs\n log_outfile = os.path.join(output_dir, '{}_log.html'.format(filename))\n log_HTML = HTMLDocument(\"<html><body>\").save_as_html(log_outfile)\n return report_outfile, log_outfile\n else:\n return report_outfile, None",
"def _initialize_reporter(self):\n self._reporter.open(mode='w')\n self._reporter.write_thermodynamic_states(self._thermodynamic_states,\n self._unsampled_states)\n\n # Store run metadata and ReplicaExchange options.\n self._store_options()\n self._reporter.write_dict('metadata', self._metadata)\n\n # Store initial conditions. This forces the storage to be synchronized.\n self._report_iteration()",
"def _InitializeVizier(self):\n p = self.params\n self._should_report_metrics = False\n\n reporting_job = self._task_params.cluster.reporting_job\n job_split = self._task_params.cluster.reporting_job.split('/')\n\n if len(job_split) != 2:\n # The default setting for reporting job is 'evaler'. This is not valid\n # for use with program. We only warn only since we may not be in a vizier\n # setting.\n tf.logging.info('reporting_job should be of the form '\n 'program_name/dataset_name with exactly one / '\n f'instead got {reporting_job}')\n return\n\n vizier_program_name, vizier_dataset_name = job_split\n if p.name == vizier_program_name and p.dataset_name == vizier_dataset_name:\n tf.logging.info(f'Adding reporting for {reporting_job}')\n self._should_report_metrics = True",
"def sequencePreparation(self):\n #Calculation of the number of frames in function of the duration + LED list for the acquisition\n if self.seqMode == \"rgbMode\":\n self._rgbSequenceInit()\n elif self.seqMode == 'rbMode':\n self._rbSequenceInit()\n else:\n print('Please select a valid mode of led sequence initialization')\n #Sending nb of frames to initialize the progress bar\n if type(self.nbFrames) == int:\n self.nbFramesSig.emit(self.nbFrames)\n\n print('acquisition Side : ', self.expRatio)\n #Saving the configuration of the experiment file (.json)\n self.savePath = cfgFileSaving(self.experimentName,\n self.nbFrames,\n self.duration,\n self.expRatio,\n self.acquMode,\n self.seqMode,\n self.rgbLedRatio,\n self.greenFrameInterval,\n round(1/self.cycleTime,2), #framerate\n self.folderPath,\n self.colorMode,\n self.mmc,\n 'Zyla') #WARNING > modulabilty (there is a way to get device label but it's not so easy)\n\n #initialization of the acquisition saving files : .tif (frames) and .txt (metadata)\n (self.tiffWriterList, self.textFile) = filesInit( self.savePath,\n self.experimentName,\n self.nbFrames,\n self.maxFrames)\n #send all informations to each LED driver\n self.arduinoSync()",
"def _initialise_run(self) -> None:",
"def __send_reports__(self,config,mockdb):\n numbers = config.get('Flowcell_reports','numbers').split(',')\n for number in numbers:\n flowcell_report_key = getattr(self,'flowcell_report_' + str(number) + '_key')\n if flowcell_report_key is None:\n continue\n report = mockdb['FlowcellStatisticReport'].objects[flowcell_report_key]\n if report.report_sent is True: #If the report is already sent, next.\n continue\n if not report.__is_complete__(): #If the qsub script is still running, next.\n continue\n if self.sequencing_run_type == 'RapidRun' and str(number) == '16':\n recipients = config.get('Flowcell_reports','last_recipients')\n subject, body = report.__generate_flowcell_report_text__(config,mockdb,report_type=\"last_report\")\n #Add samples to the all sample list\n sample_keys = self.__completed_samples_list__(mockdb)\n write_list_file(sample_keys,config.get('Filenames','all_samples'),original_list_file=config.get('Filenames','all_samples'))\n self.__finish__()\n elif self.sequencing_run_type == 'HighThroughputRun' and str(number) == '64':\n recipients = config.get('Flowcell_reports','last_recipients')\n subject, body = report.__generate_flowcell_report_text__(config,mockdb,report_type=\"last_report\")\n #Add samples to the all sample list\n sample_keys = self.__completed_samples_list__(mockdb)\n write_list_file(sample_keys,config.get('Filenames','all_samples'),original_list_file=config.get('Filenames','all_samples'))\n self.__finish__()\n else:\n recipients = config.get('Flowcell_reports','subset_recipients')\n subject, body = report.__generate_flowcell_report_text__(config,mockdb,report_type=\"subset_report\")\n files = []\n files.append(report.report_pdf)\n files.append(report.full_report)\n files.append(report.current_report)\n send_email(subject,body,recipients=recipients,files=files)\n report.__finish__()\n report.report_sent = True\n return 1",
"def initialize(self) -> None:\n self._step = self._start_step\n self._tebd_propagator = compute_tebd_propagator(\n system_chain=self._system_chain,\n time_step=self._parameters.dt/2.0,\n epsrel=self._parameters.epsrel,\n order=self._parameters.order)\n self._results = {}\n self._t_mps = PtTebdBackend(\n gammas=self._initial_augmented_mps.gammas,\n lambdas=self._initial_augmented_mps.lambdas,\n epsrel=self._parameters.epsrel,\n config=self._backend_config)\n self._init_results()\n self._apply_controls(step=self.step, post=False)\n self._append_results()",
"def stage(self):\n\n # prepare projected land allocation data\n self.prep_projected()\n\n # prepare base land use data\n self.prep_base()\n\n # harmonize grid area between projected and base layer land allocation\n self.harmony()\n\n # apply constraints\n self.set_constraints()\n\n # create kernel density filter if not running multiple jobs\n self.kernel_filter()\n\n # set data for step zero\n self.set_step_zero()",
"def __init__(self):\n super(CyclomaticEagle)\n super().__init__()\n self.cmd = \"\"\n self.report_path = None",
"def start(self, report):\r\n self.report = report\r\n self.report.open()\r\n\r\n self._main_root_workunit = WorkUnit(run_tracker=self, parent=None, labels=[],\r\n name=RunTracker.DEFAULT_ROOT_NAME, cmd=None)\r\n self.register_thread(self._main_root_workunit)\r\n self._main_root_workunit.start()\r\n self.report.start_workunit(self._main_root_workunit)",
"def _prepare(self, setup):\n # Initialise cell\n self.cell = self.celltype(model=self._model)\n for rec in setup.record_variables:\n self.cell.record(*rec)\n if 'injected_currents' in setup.conditions:\n for loc, current in setup.conditions['injected_currents'].items():\n getattr(self.cell, loc).inject_current(current)\n if 'voltage_clamps' in setup.conditions:\n for loc, voltages in setup.conditions['voltage_clamps'].items():\n getattr(self.cell, loc).voltage_clamp(voltages)\n if 'synaptic_spikes' in setup.conditions:\n for loc, syn, spkes in setup.conditions['synaptic_spikes'].items():\n getattr(self.cell, loc).synaptic_stimulation(spkes, syn)",
"def _setUp(self):\n self.numOfMeasurements = round(self.config.period / self.config.pollInterval, 0)\n # list of pairs (componentPID, componentName)\n componentsInfo = self._getComponentsInfo()\n for compName, compPID in componentsInfo.items():\n self._setUpProcessDetailAndMeasurements(compPID, compName)",
"def setupRun(self):\n \n # TBD: for now ignores the \"content\" of a protocol parameter\n \n run = self.run\n protocolInterface = self.protocolInterface\n \n for interfaceParameter in protocolInterface.sortedInterfaceParameters():\n nmrCalcObject = self.makeNmrCalcData(run, interfaceParameter)\n \n self.close()",
"def do(self):\r\n self.dlCsvReport()\r\n self.dlXlsReport()",
"def begin(self):\n\n env = self.context.lookup(\"/environment\")\n\n self._test_results_dir = env[\"output_directory\"]\n self._starttime = env[\"starttime\"]\n self._runid = env[\"runid\"]\n\n self._result_filename = os.path.join(self._test_results_dir, \"testrun_results.jsos\")\n self._summary_filename = os.path.join(self._test_results_dir, \"testrun_summary.json\")\n self._import_errors_filename = os.path.join(self._test_results_dir, \"import_errors.jsos\")\n\n return",
"def initialize_reporting(self):\n reporting_params = self.reporting_params\n reporting_params[\"heartbeat_path\"] = self.result_paths[\"current_heartbeat\"]\n reporting_handler = ReportingHandler(**reporting_params)\n\n #################### Make Unified Logging Globally Available ####################\n G.log = reporting_handler.log\n G.debug = reporting_handler.debug\n G.warn = reporting_handler.warn",
"def __init__(self, runId, pipelineName, topic, brokerHost, \n datasetType=None, reportAllPossible=True, brokerPort=None):\n JobOfficeClient.__init__(self, runId, pipelineName, brokerHost,\n brokerPort=brokerPort)\n\n self.datasetType = datasetType\n self.reportAllPossible = reportAllPossible\n \n self.dataSender = utils.EventSender(self.runId, topic, brokerHost,\n self.getOriginatorId(), brokerPort)",
"def prepare_acquisition(self):\n self.lib.PrepareAcquisition()",
"def __init__(self, agent_host, agent_port, mission_type, mission_seed, solution_report, state_space_graph):\n self.AGENT_MOVEMENT_TYPE = 'Continuous'\n self.AGENT_NAME = 'Random'\n\n self.agent_host = agent_host\n self.agent_port = agent_port\n self.mission_seed = mission_seed\n self.mission_type = mission_type\n self.state_space = state_space\n self.solution_report = solution_report # Python makes call by reference !\n self.solution_report.setMissionType(self.mission_type)\n self.solution_report.setMissionSeed(self.mission_seed)",
"def do(self):\n super().do()\n logger.info(\"TrainPipeStep started...\")\n records = self._get_current_step_records()\n logger.debug(\"load pipestep records: {}\".format(records))\n self.num_models = len(records)\n self.num_epochs = self.num_models * TrainerConfig.epochs\n self.update_status(Status.running)\n self.master = create_master()\n self._train_multi_models(records)\n self.master.join()\n ReportServer().output_step_all_records(step_name=self.task.step_name)\n self.master.close()\n ReportServer().backup_output_path()\n self.update_status(Status.finished)",
"def __init__(self, time_lapse_polygons, raster_template, facility_id, from_break, to_break, scratch_folder):\r\n self.time_lapse_polygons = time_lapse_polygons\r\n self.raster_template = raster_template\r\n self.facility_id = facility_id\r\n self.from_break = from_break\r\n self.to_break = to_break\r\n self.scratch_folder = scratch_folder\r\n\r\n # Create a job ID and a folder for this job\r\n self._create_job_folder()\r\n self.scratch_gdb = None # Set later\r\n\r\n # Setup the class logger. Logs for each parallel process are not written to the console but instead to a\r\n # process-specific log file.\r\n self.setup_logger(\"PercAccPoly\")\r\n\r\n # Prepare a dictionary to store info about the analysis results\r\n self.job_result = {\r\n \"jobId\": self.job_id,\r\n \"jobFolder\": self.job_folder,\r\n \"logFile\": self.log_file,\r\n \"polygons\": None # Set later\r\n }",
"def initiateAnalysis(self,):\n\n #\n # Imports\n #\n import os\n import sys\n\n #\n # get optional arguments from commandline\n #\n self.getComandLineOptions()\n \n #\n # for logmessages\n #\n tmpLogMessages = ['----------------\\n']\n tmpLogMessage = self.createLogHeader()\n tmpLogMessages.append(tmpLogMessage)\n #print tmpLogMessage\n \n #\n # check analysis path\n #\n if os.path.isdir(self.analysisPath):\n tmpLogMessage = 'WARNING: the analysis path already exists.\\n'\n print tmpLogMessage\n tmpLogMessages.append(tmpLogMessage)\n else:\n tmpLogMessage = 'Creating directory \"'+self.analysisPath+'\".\\n'\n #print tmpLogMessage\n tmpLogMessages.append(tmpLogMessage)\n os.makedirs(self.analysisPath)\n \n #\n # create the logfile\n #\n tmpLogMessages += self.openLogfileConnection()\n \n #\n # write tmpLogMessages to logfile\n #\n SEAseqPipeLine.logfile.write(''.join(tmpLogMessages))\n \n #\n # create the database\n #\n self.database.create()\n \n #\n # add run to runs table\n #\n self.database.addToRunsTable(self.startTimeStr, self.command, self.commandLine, True, MASTER)\n \n return 0",
"def _run_interface(self, runtime):\n try:\n runtime = super(\n ReportCapableInterface, self)._run_interface(runtime)\n except NotImplementedError:\n pass # the interface is derived from BaseInterface\n\n # leave early if there's nothing to do\n if not self.inputs.generate_report:\n return runtime\n\n self._out_report = os.path.abspath(self.inputs.out_report)\n self._post_run_hook(runtime)\n\n # check exit code and act consequently\n NIWORKFLOWS_LOG.debug('Running report generation code')\n\n if hasattr(runtime, 'returncode') and runtime.returncode not in [0, None]:\n self._generate_error_report(\n errno=runtime.get('returncode', None))\n else:\n self._generate_report()\n NIWORKFLOWS_LOG.info('Successfully created report (%s)',\n self._out_report)\n\n return runtime",
"def run(self):\n logging.info('Perform automatic annotations')\n\n # The procedure outcome report file.\n self.__param.set_report_filename(self.__log_report.get_filename())\n self.__log_report.increment()\n\n # Create the progress bar then run the annotations\n wx.BeginBusyCursor()\n p = sppasAnnotProgressDialog()\n self.__manager.annotate(self.__param, p)\n p.close()\n wx.EndBusyCursor()\n\n self.__update_log_text()\n self.Refresh()\n\n # send to parent\n evt = DataChangedEvent(data=self.__param.get_workspace())\n evt.SetEventObject(self)\n wx.PostEvent(self.GetParent(), evt)",
"def initialize(self):\n logger.debug(\"Begin Generation\")\n self.events.begin_generation()",
"def gReport(self, event):\n \n reports.createReports()"
]
| [
"0.64415294",
"0.59275347",
"0.5872562",
"0.58031285",
"0.5752513",
"0.5710506",
"0.5687142",
"0.5601731",
"0.5572101",
"0.5545518",
"0.55392045",
"0.5517224",
"0.5504108",
"0.54795575",
"0.5469122",
"0.54628384",
"0.5459609",
"0.54314554",
"0.54296523",
"0.5393991",
"0.5387264",
"0.53826135",
"0.538157",
"0.5379044",
"0.5378412",
"0.5375396",
"0.53693205",
"0.53656137",
"0.53554034",
"0.5341324"
]
| 0.62101597 | 1 |
Connects the report with a pipeline by recoding the pipeline key and pipeline obj_type in a string. | def __add_pipeline__(self,pipeline):
if not re.search('Pipeline',pipeline.obj_type):
raise Exception("Trying to add non-pipeline key to flowcell statistics reports")
if not self.pipelines is None:
self.pipelines += ';'
self.pipelines += str(pipeline.key) + ":" + pipeline.obj_type
else:
self.pipelines = str(pipeline.key) + ":" + pipeline.obj_type | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pipeline_id(self):\n pass",
"def pipeline_id(self, pipeline_id):\n\n self._pipeline_id = pipeline_id",
"def _set_pipeline_cfg(self, field, value):",
"def __init__(self, pipeline, config=None):\n self.config = config\n self.pipeline = pipeline",
"def _setup_pipeline_cfg(self):",
"def pipeline_id(self, pipeline_id: PipelineId):\r\n self._pipeline_id = pipeline_id",
"def pipeline_id(self, pipeline_id: PipelineId):\r\n self._pipeline_id = pipeline_id",
"def __current_pipeline_list__(self,mockdb):\n pipelines = []\n if self.pipelines is None:\n return pipelines\n pipelines_dict = self.pipelines.split(';')\n for d in pipelines_dict:\n pipeline_key, obj_type = d.split(':')\n try:\n\t\tpipeline = mockdb[obj_type].objects[int(pipeline_key)]\n except KeyError:\n sys.exit(\"Key error in determining pipeline for report.\\n\")\n pipelines.append(pipeline)\n return pipelines",
"def register_pipelines(self) -> Dict[str, Pipeline]:\n de_pipeline = de.create_pipeline()\n ds_pipeline = ds.create_pipeline()\n return {\n \"de\": de_pipeline,\n \"ds\": ds_pipeline,\n \"__default__\": de_pipeline + ds_pipeline,\n }",
"def make_pipeline():\n # exchange = Fundamentals.exchange_id.latest\n # nyse_filter = exchange.eq('NYS')\n symbol_filter = StaticSids([TRADING_SID])\n set_benchmark(TRADING_SID) \n # volume_filter = VolumeFilter(\n # inputs=[USEquityPricing.volume],\n # window_length=1,\n # mask=symbol_filter\n # )\n\n # is_setup = volume_filter & alpha_long_weekly & alpha_long_daily\n weekly_high = WeeklyHigh(\n inputs=[USEquityPricing.high],\n mask=symbol_filter\n )\n weekly_low = WeeklyLow(\n inputs=[USEquityPricing.low],\n mask=symbol_filter\n )\n weekly_classifier = WeeklyClassifier(\n inputs=[\n USEquityPricing.open,\n USEquityPricing.high,\n USEquityPricing.low,\n USEquityPricing.close\n ],\n mask=symbol_filter\n )\n daily_classifier = DailyClassifier(\n inputs=[\n USEquityPricing.open,\n USEquityPricing.high,\n USEquityPricing.low,\n USEquityPricing.close\n ],\n mask=symbol_filter\n\n )\n\n pipe = Pipeline(\n screen=symbol_filter, # & (daily_classifier > 0),\n columns={\n 'daily_classifier': daily_classifier,\n 'daily_high': USEquityPricing.high.latest,\n 'daily_low': USEquityPricing.low.latest,\n 'weekly_classifier': weekly_classifier,\n 'weekly_high': weekly_high,\n 'weekly_low': weekly_low\n }\n )\n return pipe",
"def get_pipeline(tag=None):\n\n\n data_science_pipeline = (\n # interdiction_baseline_call_pl()\n # + interdiction_baseline_parse_pl()\n #+ interdiction_community_pl()\n #+ interdiction_community_parse_pl()\n #+ dijkstra_prep_paths_pl()\n #+ dijkstra_parse_paths_pl()\n #+ dijkstra_reachable_pl()\n #+ dijkstra_shortest_paths_pl()\n + dijkstra_pypy_pickle_pl()\n + dijkstra_pypy_paths_pl()\n + dijkstra_make_adj_pl()\n #+ dijkstra_opt()\n + dijkstra_flow()\n + sds_counterfactual_pl()\n + supply_interdiction_pl()\n + post_supply_interdiction_pl()\n )\n \n if tag:\n if type(tag)==str:\n return Pipeline([n for n in data_science_pipeline.nodes if tag in n.tags])\n elif type(tag)==list:\n return Pipeline([n for n in data_science_pipeline.nodes if len(n.tags - set(tag)) < len(n.tags)])\n \n else:\n return data_science_pipeline",
"def _create_jdbc_producer_pipeline(pipeline_builder, pipeline_title, raw_data, table_name, operation):\n dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')\n dev_raw_data_source.set_attributes(data_format='JSON', raw_data=raw_data)\n\n record_deduplicator = pipeline_builder.add_stage('Record Deduplicator')\n\n FIELD_MAPPINGS = [dict(field='/id', columnName='id'),\n dict(field='/name', columnName='name')]\n jdbc_producer = pipeline_builder.add_stage('JDBC Producer')\n jdbc_producer.set_attributes(default_operation=operation,\n table_name=table_name,\n field_to_column_mapping=FIELD_MAPPINGS,\n stage_on_record_error='STOP_PIPELINE')\n\n trash = pipeline_builder.add_stage('Trash')\n dev_raw_data_source >> record_deduplicator >> jdbc_producer\n record_deduplicator >> trash\n\n return pipeline_builder.build(title=pipeline_title)",
"def on_pipeline_from_string(code: str) -> PipelineInspectorBuilder:\n return PipelineInspectorBuilder(python_code=code)",
"def pipeline(self, pipeline_id):\r\n return pipelines.Pipeline(self, pipeline_id)",
"def default_pipeline(pipeline=None):\n defaults = {\n \"pipeline\": {},\n \"settings\": {},\n \"output\": {\"format\": \"netcdf\", \"filename\": None, \"format_kwargs\": {}},\n # API Gateway\n \"url\": \"\",\n \"params\": {},\n }\n\n # merge defaults with input pipelines, if supplied\n if pipeline is not None:\n pipeline = {**defaults, **pipeline}\n pipeline[\"output\"] = {**defaults[\"output\"], **pipeline[\"output\"]}\n pipeline[\"settings\"] = {**defaults[\"settings\"], **pipeline[\"settings\"]}\n else:\n pipeline = defaults\n\n # overwrite certain settings so that the function doesn't fail\n pipeline[\"settings\"][\"ROOT_PATH\"] = \"/tmp\"\n pipeline[\"settings\"][\"LOG_FILE_PATH\"] = \"/tmp/podpac.log\"\n\n return pipeline",
"def get_pipeline_summary(pipeline):\n confounds = {\"wm\": \"WM\",\n \"csf\": \"CSF\",\n \"gs\": \"GS\",\n \"acompcor\": \"aCompCor\",\n \"aroma\": \"ICA-AROMA\",\n \"spikes\": \"Spikes\"}\n\n pipeline_list = []\n\n for conf, conf_name in confounds.items():\n\n if conf == \"aroma\":\n raw = YES if pipeline[conf] else NO\n temp_deriv = NA\n quad_terms = NA\n elif conf == \"spikes\":\n raw = YES if pipeline[conf] else NO\n temp_deriv = NA\n quad_terms = NA\n elif conf == 'acompcor':\n raw = YES if pipeline['confounds'][conf] else NO\n temp_deriv = NA\n quad_terms = NA\n else:\n raw = YES if pipeline[\"confounds\"][conf] else NO\n\n if not pipeline[\"confounds\"][conf]:\n temp_deriv = NO\n quad_terms = NO\n\n if isinstance(pipeline[\"confounds\"][conf], dict):\n\n if pipeline[\"confounds\"][conf]['temp_deriv']:\n temp_deriv = YES\n\n if pipeline[\"confounds\"][conf]['quad_terms']:\n quad_terms = YES\n\n pipeline_dict = {\"Confound\": conf_name,\n \"Raw\": raw,\n \"Temp. deriv.\": temp_deriv,\n \"Quadr. terms\": quad_terms}\n pipeline_list.append(pipeline_dict)\n\n return(pipeline_list)",
"def pipeline(args) :\n from pipeliner import create_pipeline\n create_pipeline(args)",
"def main(): # pragma: no cover\n parser = argparse.ArgumentParser(\"Gets the pipeline definition for the pipeline script.\")\n\n parser.add_argument(\n \"-n\",\n \"--module-name\",\n dest=\"module_name\",\n type=str,\n help=\"The module name of the pipeline to import.\",\n )\n parser.add_argument(\n \"-f\",\n \"--file-name\",\n dest=\"file_name\",\n type=str,\n default=None,\n help=\"The file to output the pipeline definition json to.\",\n )\n parser.add_argument(\n \"-kwargs\",\n \"--kwargs\",\n dest=\"kwargs\",\n default=None,\n help=\"Dict string of keyword arguments for the pipeline generation (if supported)\",\n )\n args = parser.parse_args()\n\n if args.module_name is None:\n parser.print_help()\n sys.exit(2)\n\n try:\n pipeline = get_pipeline_driver(args.module_name, args.kwargs)\n content = pipeline.definition()\n if args.file_name:\n with open(args.file_name, \"w\") as f:\n f.write(content)\n else:\n print(content)\n except Exception as e: # pylint: disable=W0703\n print(f\"Exception: {e}\")\n sys.exit(1)",
"def get_pipeline(request):\n id = request.GET['id']\n obj = db.jobs.find_one({\"_id\": ObjectId(id)})\n if obj:\n options = obj['target_config']\n if not options or not isinstance(options[options.keys()[0]], dict):\n new_api = False\n else:\n new_api = True\n complete = options.copy()\n options = dict(map(\n lambda kv: (kv[0], kv[1]['value']),\n options.items()\n ))\n\n # Customisation ----------------- #\n if 'customer' not in options:\n options['mode'] = 'all'\n else:\n cust = options.pop('customer')\n if isinstance(cust, list):\n if len(cust) == 2:\n options['mode'] = 'all'\n else:\n options['mode'] = cust[0].lower()\n else:\n if cust.lower() in ['both', 'all']:\n options['mode'] = 'all'\n else:\n options['mode'] = cust.lower()\n\n if 'language' in options and options['language'].lower() in ['both', 'all']:\n options.pop('language')\n\n if 'purchase_month' in options:\n options['purchase_month'] = [monthDict[a] for a in options['purchase_month']]\n\n if 'channel' in options:\n options['channel'] = map(lambda k: lasttouch_dict[k], options['channel'])\n # ------------------------------- #\n\n if new_api:\n pipeline = {\n 'required': [],\n 'additional': []\n }\n # complete.pop('customer', '')\n for k, v in complete.items():\n if k in options:\n pipeline[v['co_type']].append(k)\n pipeline['required'].append('customer')\n\n else:\n pipeline = [k for k, v in options.items() if k != 'mode']\n pipeline.append('customer')\n\n return jsonResponse({\"pipeline\": pipeline, \"options\": options})\n else:\n raise Http404",
"def get_pipeline_from_run(self, pipeline_run):\n db = self.mongo_client.metalearning\n collection = db.pipelines\n pipeline_doc = collection.find({\"$and\": [{\"id\": pipeline_run[\"pipeline\"][\"id\"]},\n {\"digest\": pipeline_run[\"pipeline\"][\"digest\"]}]})[0]\n return pipeline_doc",
"def _dup_pipeline_name_cfg(self):",
"def create_dataflow(self, pipeline: Dict) -> Dict[str, Union[float, Dict]]:\n raise NotImplementedError",
"def create(self, request):\n pipeline_inputs = load_request(request)\n serializer = self.serializer_class(data=pipeline_inputs, context={'request': request})\n try:\n project = Project.objects.get(id=int(pipeline_inputs[\"project\"]))\n except Project.DoesNotExist:\n return Response(\"No project found for id: {}\".format(int(pipeline_inputs[\"project\"])), status=status.HTTP_400_BAD_REQUEST)\n if project.owner != request.user:\n return Response(status=status.HTTP_401_UNAUTHORIZED)\n if serializer.is_valid():\n serializer.save()\n pipeline = serializer.data\n if \"metadata\" not in pipeline_inputs.keys():\n pipeline_inputs[\"metadata\"] = None\n a = Pipeline.objects.get(pk=int(pipeline[\"id\"]))\n m = Metadata(a, pipeline_inputs[\"metadata\"])\n meta = m.set_metadata(\"PipelineMetadata\")\n pipeline[\"metadata\"] = meta\n if pipeline:\n return Response(pipeline, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)",
"def _generate_pipeline(self, job):\n # Generate actions (one per job) and resources\n resources = self._generate_job_resources(job)\n action = self._generate_job_action(job)\n\n pipeline = {\n # Ordered list of actions to execute\n \"actions\": [action],\n # resources required for execution\n \"resources\": resources,\n # Technical question - difference between resource and action environment\n # For now we will set them to be the same.\n \"environment\": self._generate_environment(),\n }\n\n # \"timeout\": string in seconds (3.5s) is not included (defaults to 7 days)\n return pipeline",
"def append_pipeline(self, pipeline, proba=None, repeat=None):\n self._action_list.append({'name': PIPELINE_ID, 'pipeline': pipeline,\n 'proba': proba, 'repeat': repeat})",
"def _create_pipeline(self) -> codepipeline.Pipeline:\n source_output = codepipeline.Artifact()\n build_output = codepipeline.Artifact()\n return codepipeline.Pipeline(\n self,\n 'Pipeline',\n stages=[\n self._create_source_stage('Source', source_output),\n # self._create_image_build_stage(\n # 'Build', source_output, build_output),\n # self._create_deploy_stage('Deploy', build_output)\n ]\n )",
"def __init__(self, pipeline_configuration):\n\n assert isinstance(pipeline_configuration, type(rs.config()))\n self._context = rs.context()\n self._available_devices = enumerate_connected_devices(self._context)\n self._enabled_devices = {}\n self._config = pipeline_configuration\n self._frame_counter = 0\n self._profile_pipe = \"\"",
"def to_pipeline_spec(self) -> pipeline_spec_pb2.PipelineSpec:\n # import here to aviod circular module dependency\n from kfp.compiler import compiler_utils\n from kfp.compiler import pipeline_spec_builder as builder\n from kfp.dsl import pipeline_channel\n from kfp.dsl import pipeline_task\n from kfp.dsl import tasks_group\n\n args_dict = {}\n pipeline_inputs = self.inputs or {}\n\n for arg_name, input_spec in pipeline_inputs.items():\n args_dict[arg_name] = pipeline_channel.create_pipeline_channel(\n name=arg_name,\n channel_type=input_spec.type,\n is_artifact_list=input_spec.is_artifact_list)\n\n task = pipeline_task.PipelineTask(self, args_dict)\n\n # instead of constructing a pipeline with pipeline_context.Pipeline,\n # just build the single task group\n group = tasks_group.TasksGroup(\n group_type=tasks_group.TasksGroupType.PIPELINE)\n group.tasks.append(task)\n\n group.name = uuid.uuid4().hex\n\n pipeline_name = self.name\n task_group = group\n\n pipeline_outputs = {}\n pipeline_output_spec = self.outputs or {}\n\n for arg_name, output_spec in pipeline_output_spec.items():\n pipeline_outputs[\n arg_name] = pipeline_channel.create_pipeline_channel(\n name=arg_name,\n channel_type=output_spec.type,\n task_name=task.name)\n\n utils.validate_pipeline_name(pipeline_name)\n\n pipeline_spec = pipeline_spec_pb2.PipelineSpec()\n pipeline_spec.pipeline_info.name = pipeline_name\n pipeline_spec.sdk_version = f'kfp-{kfp.__version__}'\n # Schema version 2.1.0 is required for kfp-pipeline-spec>0.1.13\n pipeline_spec.schema_version = '2.1.0'\n\n # if we decide to surface component outputs to pipeline level,\n # can just assign the component_spec_proto directly to .root\n component_spec_proto = builder._build_component_spec_from_component_spec_structure(\n self)\n pipeline_spec.root.CopyFrom(component_spec_proto)\n\n builder._build_dag_outputs(\n component_spec=pipeline_spec.root, dag_outputs=pipeline_outputs)\n\n deployment_config = pipeline_spec_pb2.PipelineDeploymentConfig()\n root_group = task_group\n\n task_name_to_parent_groups, group_name_to_parent_groups = compiler_utils.get_parent_groups(\n root_group)\n\n def get_inputs(task_group: tasks_group.TasksGroup,\n task_name_to_parent_groups):\n inputs = collections.defaultdict(set)\n if len(task_group.tasks) != 1:\n raise ValueError(\n f'Error compiling component. Expected one task in task group, got {len(task_group.tasks)}.'\n )\n only_task = task_group.tasks[0]\n if only_task.channel_inputs:\n for group_name in task_name_to_parent_groups[only_task.name]:\n inputs[group_name].add((only_task.channel_inputs[-1], None))\n return inputs\n\n inputs = get_inputs(task_group, task_name_to_parent_groups)\n\n builder.build_spec_by_group(\n pipeline_spec=pipeline_spec,\n deployment_config=deployment_config,\n group=root_group,\n inputs=inputs,\n outputs=collections.defaultdict(\n dict), # empty -- no sub-DAG outputs to surface\n dependencies={}, # no dependencies for single-component pipeline\n rootgroup_name=root_group.name,\n task_name_to_parent_groups=task_name_to_parent_groups,\n group_name_to_parent_groups=group_name_to_parent_groups,\n name_to_for_loop_group={}, # no for loop in single-component pipeline\n platform_spec=pipeline_spec_pb2.PlatformSpec(\n ), # no PlatformSpec single-component pipeline\n is_compiled_component=True,\n )\n\n return pipeline_spec",
"def make_pipeline():\r\n\r\n # Custom universe containing only desired assets (stocks with flag data)\r\n universe = StaticSids(my_stocks)\r\n\r\n return Pipeline(\r\n columns={\r\n #'flag_type': algo_data_full.flag_type.latest,\r\n #'flag_price': algo_data_full.flag_price.latest,\r\n #'end_flag_date': algo_data_full.end_flag_date.latest,\r\n #'end_flag_price': algo_data_full.end_flag_price.latest,\r\n 'up_flags': flag_counts.up.latest,\r\n 'down_flags': flag_counts.down.latest,\r\n 'up_ratio': up_ratios_2.up_ratio.latest,\r\n 'close': USEquityPricing.close.latest,\r\n },\r\n screen=universe\r\n )",
"def from_pipeline(cls, pipeline, proba=None, repeat=None):\n if proba is None:\n if repeat is None:\n new_p = cls(pipeline=pipeline)\n else:\n if pipeline.num_actions == 1 and pipeline.get_last_action_proba() is None:\n new_p = cls(pipeline=pipeline, repeat=repeat)\n else:\n new_p = cls()\n new_p.append_pipeline(pipeline, repeat=repeat)\n else:\n if pipeline.num_actions == 1 and pipeline.get_last_action_repeat() is None:\n new_p = cls(pipeline=pipeline, proba=proba)\n else:\n new_p = cls()\n new_p.append_pipeline(pipeline, proba=proba)\n return new_p"
]
| [
"0.5497617",
"0.53822815",
"0.5282645",
"0.52708805",
"0.52638996",
"0.5247205",
"0.5247205",
"0.5194562",
"0.51895684",
"0.5155318",
"0.51416343",
"0.5130589",
"0.5123417",
"0.5112902",
"0.50113124",
"0.50014526",
"0.49835396",
"0.49828",
"0.49824667",
"0.496152",
"0.49433082",
"0.49234092",
"0.49030426",
"0.4897567",
"0.48905745",
"0.4857302",
"0.48565093",
"0.48482823",
"0.48479038",
"0.4847887"
]
| 0.7252879 | 0 |
Uses the pipelines string stored in self to generate a list of pipeline objects. | def __current_pipeline_list__(self,mockdb):
pipelines = []
if self.pipelines is None:
return pipelines
pipelines_dict = self.pipelines.split(';')
for d in pipelines_dict:
pipeline_key, obj_type = d.split(':')
try:
pipeline = mockdb[obj_type].objects[int(pipeline_key)]
except KeyError:
sys.exit("Key error in determining pipeline for report.\n")
pipelines.append(pipeline)
return pipelines | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pipelines(self) -> list:\n if not self._pipelines:\n if \"pipelines\" not in self._pipeline_definition:\n raise ValueError(\"Pipeline is missing 'pipelines' field.\")\n elif len(self._pipeline_definition[\"pipelines\"]) == 0:\n raise ValueError(\"Pipeline has zero length 'pipelines' field.\")\n\n pipelines: list = list()\n for pipeline in self._pipeline_definition[\"pipelines\"]:\n pipelines.append(Pipeline(pipeline))\n\n self._pipelines = pipelines\n\n return self._pipelines",
"def get_pipelines() -> Iterable[DataPipeline]:\n for pipeline_name in get_pipeline_names():\n yield DataPipeline.load(pipeline_name)",
"def pipelines(self):\r\n return pipelines.Pipelines(self)",
"def transform(stuff, pipelines=DEFAULT_PIPELINE_NAMES):\n global _pipelines\n for name in pipelines:\n p = load_pipeline(name)\n stuff = p.transform(stuff)\n return stuff",
"def pipeline(args) :\n from pipeliner import create_pipeline\n create_pipeline(args)",
"def prepare_data(self, lines: List[str]) -> List[str]:\n if self.is_tokenized:\n if self.parser == \"spacy\":\n lines = [l.split() for l in lines]\n elif self.parser == \"udpipe\":\n lines = [[l.split()] for l in lines]\n\n return lines",
"def register_pipelines(self) -> Dict[str, Pipeline]:\n de_pipeline = de.create_pipeline()\n ds_pipeline = ds.create_pipeline()\n return {\n \"de\": de_pipeline,\n \"ds\": ds_pipeline,\n \"__default__\": de_pipeline + ds_pipeline,\n }",
"def _build_pipeline(\n self, cfg: Dict, domain_file: Text\n ) -> List[Policy]:\n\n pipeline = []\n\n for i in cfg['policies']:\n policy_name=i['name']\n policy=registered_policies[policy_name](domain_file)\n pipeline.append(policy)\n\n return pipeline",
"def get_pipeline(tag=None):\n\n\n data_science_pipeline = (\n # interdiction_baseline_call_pl()\n # + interdiction_baseline_parse_pl()\n #+ interdiction_community_pl()\n #+ interdiction_community_parse_pl()\n #+ dijkstra_prep_paths_pl()\n #+ dijkstra_parse_paths_pl()\n #+ dijkstra_reachable_pl()\n #+ dijkstra_shortest_paths_pl()\n + dijkstra_pypy_pickle_pl()\n + dijkstra_pypy_paths_pl()\n + dijkstra_make_adj_pl()\n #+ dijkstra_opt()\n + dijkstra_flow()\n + sds_counterfactual_pl()\n + supply_interdiction_pl()\n + post_supply_interdiction_pl()\n )\n \n if tag:\n if type(tag)==str:\n return Pipeline([n for n in data_science_pipeline.nodes if tag in n.tags])\n elif type(tag)==list:\n return Pipeline([n for n in data_science_pipeline.nodes if len(n.tags - set(tag)) < len(n.tags)])\n \n else:\n return data_science_pipeline",
"def parse_simpler_pipeline(self, full_pipeline):\n pipeline_steps = full_pipeline[\"steps\"]\n simple_pipeline = []\n for pipeline_step in pipeline_steps:\n pipeline_step_name = pipeline_step[\"primitive\"][\"python_path\"]\n inputs_list = []\n for key, value in pipeline_step[\"arguments\"].items():\n string_name = value[\"data\"]\n pipeline_step_inputs = self.parse_input_string(string_name)\n inputs_list.append(pipeline_step_inputs)\n # add info to our pipeline\n simple_pipeline.append({\"name\": pipeline_step_name, \"inputs\": inputs_list})\n\n return simple_pipeline",
"def pipelines(self):\n return PipelineManager(session=self._session)",
"def preparePipelines(self):\n\n # Construct the differnent states making up the pipeline\n\n # Input assembly state describes how primitives are assembled\n # This pipeline will assemble vertex data as a triangle lists (though we only use one triangle)\n inputAssemblyState = vk.VkPipelineInputAssemblyStateCreateInfo(\n sType = vk.VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,\n topology = vk.VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST\n )\n # Rasterization state\n rasterizationState = vk.VkPipelineRasterizationStateCreateInfo(\n sType = vk.VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO,\n polygonMode = vk.VK_POLYGON_MODE_FILL,\n cullMode = vk.VK_CULL_MODE_NONE,\n frontFace = vk.VK_FRONT_FACE_COUNTER_CLOCKWISE,\n depthClampEnable = vk.VK_FALSE,\n rasterizerDiscardEnable = vk.VK_FALSE,\n depthBiasEnable = vk.VK_FALSE,\n lineWidth = 1.0\n )\n # Color blend state describes how blend factors are calculated (if used)\n # We need one blend attachment state per color attachment (even if blending is not used\n blendAttachmentState = vk.VkPipelineColorBlendAttachmentState(\n colorWriteMask = 0xf,\n blendEnable = vk.VK_FALSE\n )\n colorBlendState = vk.VkPipelineColorBlendStateCreateInfo(\n sType = vk.VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO,\n attachmentCount = 1,\n pAttachments = [blendAttachmentState]\n )\n # Viewport state sets the number of viewports and scissor used in this pipeline\n # Note: This is actually overriden by the dynamic states (see below)\n viewportState = vk.VkPipelineViewportStateCreateInfo(\n sType = vk.VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,\n viewportCount = 1,\n scissorCount = 1\n )\n # Enable dynamic states\n # Most states are baked into the pipeline, but there are still a few dynamic states that can be changed within a command buffer\n #To be able to change these we need do specify which dynamic states will be changed using this pipeline. Their actual states are set later on in the command buffer.\n # For this example we will set the viewport and scissor using dynamic states\n dynamicStateEnables = [vk.VK_DYNAMIC_STATE_VIEWPORT, vk.VK_DYNAMIC_STATE_SCISSOR]\n dynamicState = vk.VkPipelineDynamicStateCreateInfo(\n sType = vk.VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO,\n dynamicStateCount = len(dynamicStateEnables),\n pDynamicStates = dynamicStateEnables\n )\n\n # Depth and stencil state containing depth and stencil compare and test operations\n # We only use depth tests and want depth tests and writes to be enabled and compare with less or equal\n opState = vk.VkStencilOpState(\n failOp = vk.VK_STENCIL_OP_KEEP,\n passOp = vk.VK_STENCIL_OP_KEEP,\n compareOp = vk.VK_COMPARE_OP_ALWAYS\n )\n depthStencilState = vk.VkPipelineDepthStencilStateCreateInfo(\n sType = vk.VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO,\n depthTestEnable = vk.VK_TRUE,\n depthWriteEnable = vk.VK_TRUE,\n depthCompareOp = vk.VK_COMPARE_OP_LESS_OR_EQUAL,\n depthBoundsTestEnable = vk.VK_FALSE,\n stencilTestEnable = vk.VK_FALSE,\n front = opState,\n back = opState\n )\n # Multi sampling state\n # This example does not make use fo multi sampling (for anti-aliasing), the state must still be set and passed to the pipeline\n multisampleState = vk.VkPipelineMultisampleStateCreateInfo(\n sType = vk.VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,\n rasterizationSamples = vk.VK_SAMPLE_COUNT_1_BIT,\n pSampleMask = None\n )\n # Vertex input descriptions\n # Specifies the vertex input parameters for a pipeline\n #Vertex input binding\n # This example uses a single vertex input binding at binding point 0 (see vkCmdBindVertexBuffers)\n vertexInputBinding = vk.VkVertexInputBindingDescription(\n binding = 0,\n stride = self.vertexShape.size * self.vertexShape.itemsize,\n inputRate = vk.VK_VERTEX_INPUT_RATE_VERTEX\n )\n # Input attribute bindings describe shader attribute locations and memory layouts\n vertexInputAttributs = []\n # These match the following shader layout (see triangle.vert):\n # layout (location = 0) in vec3 inPos;\n # layout (location = 1) in vec3 inColor;\n # Attribute location 0: Position\n vertexInputAttribut = vk.VkVertexInputAttributeDescription(\n binding = 0,\n location = 0,\n # Position attribute is three 32 bit signed (SFLOAT) floats (R32 G32 B32)\n format = vk.VK_FORMAT_R32G32B32_SFLOAT,\n offset = 0 # offsetof(vertexShape, position)\n )\n vertexInputAttributs.append(vertexInputAttribut)\n vertexInputAttribut = vk.VkVertexInputAttributeDescription(\n binding = 0,\n location = 1,\n # Color attribute is three 32 bit signed (SFLOAT) floats (R32 G32 B32)\n format = vk.VK_FORMAT_R32G32B32_SFLOAT,\n offset = self.vertexShape[0].size * self.vertexShape.itemsize # offsetof(vertexShape, color)\n )\n vertexInputAttributs.append(vertexInputAttribut)\n\n # Vertex input state used for pipeline creation\n vertexInputState = vk.VkPipelineVertexInputStateCreateInfo(\n sType = vk.VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,\n vertexBindingDescriptionCount = 1,\n pVertexBindingDescriptions = [vertexInputBinding],\n vertexAttributeDescriptionCount = len(vertexInputAttributs),\n pVertexAttributeDescriptions = vertexInputAttributs\n )\n # Shaders\n shaderStages = []\n # Vertex shader\n shaderStage = vk.VkPipelineShaderStageCreateInfo(\n sType = vk.VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,\n # Set pipeline stage for this shader\n stage = vk.VK_SHADER_STAGE_VERTEX_BIT,\n # Load binary SPIR-V shader\n module = vks.vulkantools.loadShader(self.getAssetPath() + \"shaders/triangle/triangle.vert.spv\", self.device),\n pName = \"main\"\n )\n shaderStages.append(shaderStage)\n # Fragment shader\n shaderStage = vk.VkPipelineShaderStageCreateInfo(\n sType = vk.VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,\n # Set pipeline stage for this shader\n stage = vk.VK_SHADER_STAGE_FRAGMENT_BIT,\n # Load binary SPIR-V shader\n module = vks.vulkantools.loadShader(self.getAssetPath() + \"shaders/triangle/triangle.frag.spv\", self.device),\n pName = \"main\"\n )\n shaderStages.append(shaderStage)\n\n # Assign the pipeline states to the pipeline creation info structure\n pipelineCreateInfo = vk.VkGraphicsPipelineCreateInfo(\n sType = vk.VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,\n # The layout used for this pipeline (can be shared among multiple pipelines using the same layout)\n layout = self.pipelineLayout,\n # Renderpass this pipeline is attached to\n renderPass = self.renderPass,\n pVertexInputState = vertexInputState,\n pInputAssemblyState = inputAssemblyState,\n pRasterizationState = rasterizationState,\n pColorBlendState = colorBlendState,\n pMultisampleState = multisampleState,\n pViewportState = viewportState,\n pDepthStencilState = depthStencilState,\n pDynamicState = dynamicState,\n stageCount = len(shaderStages),\n pStages = shaderStages\n )\n # Create rendering pipeline using the specified states\n self.pipelines = vk.vkCreateGraphicsPipelines(self.device, self.pipelineCache, 1, [pipelineCreateInfo], None)\n try:\n self.pipeline = self.pipelines[0]\n except TypeError:\n self.pipeline = self.pipelines\n # Shader modules are no longer needed once the graphics pipeline has been created\n vk.vkDestroyShaderModule(self.device, shaderStages[0].module, None)\n vk.vkDestroyShaderModule(self.device, shaderStages[1].module, None)",
"def pipeline(self, pipeline_id):\r\n return pipelines.Pipeline(self, pipeline_id)",
"def get_pipeline(self):\n if hasattr(self, \"pipeline\"):\n return self.pipeline\n steps = [\n # before preprocessor, comes the feature extractor\n ('extractor', TurkishFeatureExtractor()),\n # first the pre-processor\n (\"preprocessor\", TurkishPreprocessor(self.stemmer_name_to_method[self.stemmer_method])),\n (\"vectorizer\", TurkishVectorizer(self.feature_name_to_class[self.feature])),\n # use pca\n # (\"pca\", TruncatedSVD(n_components=20, n_iter=10)),\n (\"adder\", TurkishFeatureAdder(n_components=20, n_iter=10)),\n (\"model\", self.model_name_to_class[self.model])\n ]\n self.pipeline = Pipeline(steps)\n return self.pipeline",
"def _create_pipeline(self):\n # Add boxes to the pipeline\n if self.box_tag not in self.proto:\n raise Exception(\n \"Box defined in '{0}' has no '<{1}>' declared.\".format(\n self._xmlfile, self.box_tag))\n switch_descs = []\n for box_item in self.proto[self.box_tag]:\n for box_type in box_item.keys():\n\n # Create processing boxes (can be iterative)\n if box_type == self.box_names[0]:\n for boxdesc in box_item[box_type]:\n self._add_box(boxdesc)\n # Create switch boxes\n elif box_type == self.box_names[1]:\n for switchdesc in box_item[box_type]:\n switch_descs.append(switchdesc)\n # Unrecognize box type\n else:\n raise ValueError(\n \"Box structure: '{0}' defined in '{1}' is not \"\n \"supported. Supported boxes are '{2}'.\".format(\n json.dumps(box_item, indent=2), self._xmlfile,\n self.box_names))\n\n # Add switch to the pipeline\n for switchdesc in switch_descs:\n self._add_switch(switchdesc)\n\n # Add links between boxes\n if self.link_tag not in self.proto:\n raise Exception(\n \"Box defined in '{0}' has no '<{1}>' declared.\".format(\n self._xmlfile, self.link_tag))\n for link_item in self.proto[self.link_tag]:\n inner_tag = self.link_tag[:-1]\n for linkdesc in link_item[inner_tag]:\n if is_io_control(linkdesc[self.link_attributes[0]]):\n linktype = \"input\"\n elif is_io_control(linkdesc[self.link_attributes[1]]):\n linktype = \"output\"\n else:\n linktype = \"link\"\n self._add_link(linkdesc, linktype)",
"def create_pipeline(self, primitives, hyperparameters=None):\n\n self.primitive = self.check_path(primitives)\n\n if hyperparameters is not None:\n hyperparameters = self.check_path_hyperparameters(hyperparameters)\n pipeline = MLPipeline(self.primitive, hyperparameters)\n else:\n pipeline = MLPipeline(self.primitive)\n return pipeline",
"def _instantiate_pipeline(self, pipeline_url, input_file, output_file, params):\n pipeline_resource = self.bqSession.fetchxml(pipeline_url, view='short')\n out_pipeline_file = os.path.join(self.options.stagingPath, 'pipeline.json')\n out_error_file = os.path.join(self.options.stagingPath, 'dream3d_error.txt')\n pipeline_url = self.bqSession.service_url('blob_service', path=pipeline_resource.get('resource_uniq'))\n self.bqSession.fetchblob(pipeline_url, path=os.path.join(self.options.stagingPath, 'pipeline_uninit.json'))\n pipeline_file = os.path.join(self.options.stagingPath, 'pipeline_uninit.json')\n with open(pipeline_file, 'r') as fi:\n pipeline = json.load(fi)\n # replace all placeholders in pipeline template\n _replace_placeholders(pipeline, input_file, output_file, params)\n # write out pipeline to provided file\n with open(out_pipeline_file, 'w') as fo:\n json.dump(pipeline, fo)\n return out_pipeline_file, out_error_file",
"def pipeline(self):\n # gotta avoid circular imports by deferring\n from .pipeline import Pipeline\n return Pipeline().from_source(self._collection)",
"def pipeline(self):\n return stanza.Pipeline(dir=TEST_MODELS_DIR, processors=\"tokenize,ner\")",
"def pipeline(self):\n return stanza.Pipeline(dir=TEST_MODELS_DIR, processors=\"tokenize,ner\", package={\"ner\": [\"ncbi_disease\", \"ontonotes\"]})",
"def get_registered_pipelines(self, request, context):\n \n repl = RegisteredPipelinesReply()\n for e in self._ppl.keys():\n repl.add(e)\n return repl",
"def on_pipeline_from_string(code: str) -> PipelineInspectorBuilder:\n return PipelineInspectorBuilder(python_code=code)",
"def parse_steps(steps, max_n_pipes=200):\n im_lst_of_tpls = _lst_of_tpls(steps[0], parse_imputing)\n pp_lst_of_tpls = _lst_of_tpls(steps[1], parse_preproc)\n dr_lst_of_tpls = _lst_of_tpls(steps[2], parse_dimred)\n\n # When parsing clustering options, take care of error-generating parameters\n cl_lst_of_tpls = _lst_of_tpls(\n steps[3], parse_clustering,\n filt=(lambda x: x.get('affinity', '') in ['manhattan', 'precomputed']\n and x.get('linkage', '') == 'ward'))\n\n # Generate the list of list of tuples (i.e. the list of pipelines)\n pipes = modified_cartesian(im_lst_of_tpls, pp_lst_of_tpls, dr_lst_of_tpls,\n cl_lst_of_tpls, pipes_mode=True)\n for pipe in pipes:\n logging.info(\"Generated pipeline: \\n %s \\n\", pipe)\n logging.info(\"*** %d pipeline(s) generated ***\", len(pipes))\n\n # Get only the first max_n_pipes\n if len(pipes) > max_n_pipes:\n logging.warning(\"Maximum number of pipelines reached. \"\n \"I'm keeping the first %d\", max_n_pipes)\n pipes = pipes[:max_n_pipes]\n\n return pipes",
"def _create_pipeline(self) -> TfmIterator:\n # 1. Initialise TubRecord -> x, y transformations\n def get_x(record: TubRecord) -> Dict[str, Union[float, np.ndarray]]:\n \"\"\" Extracting x from record for training\"\"\"\n out_dict = self.model.x_transform(record, self.image_processor)\n # apply the normalisation here on the fly to go from uint8 -> float\n out_dict['img_in'] = normalize_image(out_dict['img_in'])\n return out_dict\n\n def get_y(record: TubRecord) -> Dict[str, Union[float, np.ndarray]]:\n \"\"\" Extracting y from record for training \"\"\"\n y = self.model.y_transform(record)\n return y\n\n # 2. Build pipeline using the transformations\n pipeline = self.sequence.build_pipeline(x_transform=get_x,\n y_transform=get_y)\n return pipeline",
"def run(self):\n pipeline = set_pipeline()\n pipeline.fit(self.X_train, self.y_train)\n return pipeline",
"def _setup_pipeline_cfg(self):",
"def parse_slide_pipes(slide_desc_str):\n tile_tokens = slide_desc_str.split(\"-\")\n return tile_tokens",
"def build(self, pipe_model, allow_flow_reversal):\n\n self.pipe_model = pipe_model\n\n try:\n cls = pip.str_to_pipe(pipe_model)\n except AttributeError:\n cls = None\n\n if cls:\n obj = cls(name=self.name,\n start_node=self.start_node.name,\n end_node=self.end_node.name, length=self.length,\n allow_flow_reversal=allow_flow_reversal,\n temperature_driven=self.temperature_driven,\n repr_days=self.repr_days)\n else:\n obj = None\n\n if obj is None:\n raise ValueError(\"%s is not a valid class name! (pipe %s)\" % (\n pipe_model, self.name))\n\n self.logger.info(\n 'Pipe model {} added to {}'.format(pipe_model, self.name))\n\n return obj",
"def set_inputs(string):\n global list_of_inputs\n\n list_of_inputs = string.split(\"\\n\")",
"def append(self, pipeline):\n for stage in pipeline.pipe:\n self._pipe.append(stage)\n return self"
]
| [
"0.64970875",
"0.6224952",
"0.6171325",
"0.59650576",
"0.59019244",
"0.5801626",
"0.5700288",
"0.5635955",
"0.561687",
"0.5602918",
"0.558298",
"0.55374354",
"0.54890347",
"0.5482137",
"0.5480424",
"0.5476726",
"0.5439776",
"0.5429055",
"0.54281485",
"0.5370522",
"0.5369393",
"0.53413475",
"0.5340302",
"0.5324436",
"0.5300614",
"0.5285285",
"0.5273457",
"0.52730304",
"0.52557164",
"0.52498454"
]
| 0.6630521 | 0 |
Returns a list of sample keys associated with pipelines that have completed. | def __completed_samples_list__(self,mockdb):
sample_keys = []
for pipeline in self.__current_pipeline_list__(mockdb):
if pipeline.__is_complete__():
sample_keys.append(pipeline.sample_key)
return sample_keys | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def expected_log_keys(learner: adaptive.BaseLearner) -> list[str]:\n # Check if the result contains the expected keys\n expected_keys = [\n \"elapsed_time\",\n \"overhead\",\n \"npoints\",\n \"cpu_usage\",\n \"mem_usage\",\n ]\n if not _at_least_adaptive_version(\"0.16.0\", raises=False) and not isinstance(\n learner,\n adaptive.SequenceLearner,\n ):\n # The loss cache for SequenceLearner was introduced in adaptive 0.16.0\n # see https://github.com/python-adaptive/adaptive/pull/411\n expected_keys.append(\"latest_loss\")\n return expected_keys",
"def getTestSets():\n return list(_testsetdict.keys())",
"def keys(self):\n return self._sequence",
"def keys(self):\n return self._sequence[:]",
"async def keys(self) -> Iterable[str]:",
"def get_completed_outputs(self):\n return [o for o in self.get_outputs(flatten=True) if o.exists()]",
"def get_completed_outputs(self):\n return [o for o in self.get_outputs() if o.get_status() == Constants.DONE]",
"def get_reference_parkeys(self):\n dataset_parkeys = self.get_required_parkeys()\n if \"reference_to_dataset\" in self.header:\n for reference, dataset in self.reference_to_dataset.items():\n if dataset in dataset_parkeys:\n dataset_parkeys.append(reference)\n return tuple(sorted(set(dataset_parkeys)))",
"def completed_puzzles(self):\r\n from foldit.models import PuzzleComplete\r\n\r\n return sorted(\r\n PuzzleComplete.completed_puzzles(self.system.anonymous_student_id),\r\n key=lambda d: (d['set'], d['subset']))",
"def get_key_list(self) -> list:\n return self.key_functs.keys()",
"def create_sample_listing():\n entries = []\n for task_name, dataset in chain(MCBackgroundsSampleDictionaryUnordered, SignalMCSampleDictionaryUnordered, DataDictionaryMuonChannelUnordered):\n matching_output = [x for x in TaskDictionaryNameUnordered if x[0] == task_name]\n if len(matching_output) > 1:\n raise RuntimeError(\"More than 1 match for %s\" % task_name)\n if len(matching_output) == 0:\n print \"No match for task %s\" % task_name\n continue\n output_name = matching_output[0][1]\n entries.append(Sample(task_name, dataset, output_name))\n\n return entries",
"def SampleIds(self):\r\n return sorted(self._metadata.keys())",
"def get_group_keys(self):\r\n if len(self.conflicting_exclusives) == 0:\r\n return [\"<none>\"]\r\n else:\r\n return self.key_to_targets.keys()",
"def dependent_keys(tasks, complete=False):\n out = set()\n errors = set()\n stack = list(tasks)\n while stack:\n ts = stack.pop()\n key = ts.key\n if key in out:\n continue\n if not complete and ts.who_has:\n continue\n if ts.exception is not None:\n errors.add(key)\n if not complete:\n continue\n\n out.add(key)\n stack.extend(ts.dependencies)\n return out, errors",
"def get_computed_stats_keys():\n\n # order not important\n result = [NUM_KITS_KEY, NUM_SAMPLES_KEY,\n NUM_SAMPLES_RECEIVED_KEY, NUM_UNIQUE_SOURCES_KEY,\n NUM_PARTIALLY_RETURNED_KITS_KEY,\n NUM_FULLY_RETURNED_KITS_KEY, NUM_KITS_W_PROBLEMS_KEY]\n\n num_status_keys = get_status_num_keys()\n result.extend(num_status_keys)\n return result",
"def keys(self):\n return self.WaveNameMap.keys()",
"async def get_keys(self):\n return self.dict.keys()",
"def key_usages(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"key_usages\")",
"def keys(self):\n return self.params.keys()",
"def keys(self, installer_context):\n keys = set()\n for source in self.sources:\n keys.update(set(source.keys(installer_context)))\n return list(keys)",
"def get_capture_keys(pen_id: int, start_date: str, end_date: str, inbound_bucket=INBOUND_BUCKET) -> List:\n\n site_id = PEN_SITE_MAPPING[pen_id]\n dates = get_dates_in_range(start_date, end_date)\n capture_keys = []\n for date in dates:\n print('Getting capture keys for pen_id={}, date={}...'.format(pen_id, date))\n for hour in DAYTIME_HOURS_GMT:\n hr = str(hour).zfill(2)\n s3_prefix = 'environment=production/site-id={}/pen-id={}/date={}/hour={}'.format(site_id, pen_id,\n date, hr)\n\n\n generator = s3.get_matching_s3_keys(inbound_bucket, prefix=s3_prefix,\n subsample=1.0,\n suffixes=['capture.json'])\n\n these_capture_keys = [key for key in generator]\n capture_keys.extend(these_capture_keys)\n\n return capture_keys",
"def keys(self):\n\t\treturn tuple(self.dist.keys())",
"def get_registered_output_keys(self):\n return tuple(self._output_keys)",
"def return_keys(self):\r\n\r\n keys = list(self.piDD.keys())\r\n return keys",
"def getSamplesList(self):\n return self.sample_names",
"def source_keys(self):\n for source_key in self._counts.keys():\n yield source_key",
"def get_keys(self):\r\n return self._keys",
"def get_evaluated_keys(self) -> List[str]:\n return force_list(self.evaluated_keys)",
"def keys(self):\n return",
"def get_test_key(points_history): # TODO: remove after development\n\n import random\n random_key = random.choice(list(points_history.keys()))\n\n return random_key"
]
| [
"0.58077824",
"0.57011235",
"0.55749696",
"0.55166054",
"0.5502152",
"0.5502144",
"0.5501071",
"0.54757553",
"0.5468525",
"0.54596",
"0.5451035",
"0.54484963",
"0.54165906",
"0.53382456",
"0.5324936",
"0.532412",
"0.53238577",
"0.5314545",
"0.52536863",
"0.5247326",
"0.52377856",
"0.52294135",
"0.5213607",
"0.5193686",
"0.5181528",
"0.51780295",
"0.5165171",
"0.51626164",
"0.51605445",
"0.51474935"
]
| 0.81249654 | 0 |
Checks the number of completed samples and generates reports based on this number and what has been previously reported. Return True only if a new report object is initialized. | def __generate_reports__(self,configs,mockdb):
sample_keys = self.__completed_samples_list__(mockdb)
n = len(sample_keys)
numbers = configs['pipeline'].get('Flowcell_reports','numbers').split(',')
numbers.sort(key=int,reverse=True)
flowcell = mockdb['Flowcell'].__get__(configs['system'],key=self.flowcell_key)
for number in numbers:
if n >= int(number):
if getattr(self,'flowcell_report_' + str(number) + '_key') is None:
report = mockdb['FlowcellStatisticReport'].__new__(configs['system'],sample_keys=sample_keys,flowcell=flowcell,number=number,base_output_dir=self.base_output_dir)
report.__fill_qsub_file__(configs)
report.__launch__(configs['system'])
setattr(self,'flowcell_report_' + str(number) + '_key',report.key)
return True
return False
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def has_report(self):\n return self.report is not None",
"def Report(self):\n return True",
"def has_more_samples(self):\n return True",
"def has_more_samples(self):\n return True",
"def has_more_samples(self):\n return True",
"def is_ready_to_reap(self):\n self.calc_progress()\n return self._num_results > 0 and (\n self._num_results == self.num_sown_batches\n )",
"def reportResult(self):\n return True;",
"def report_trial(self):\n pass",
"def check_expectations(self):\n self.load_results()\n\n for (benchmark, producer), result in self.results.items():\n if not result.reports:\n print('No results found for ' + benchmark + ' ' + producer)\n result.test_passed = False\n else:\n for report in result.reports:\n if check_benchmark_result(report, result.expectation):\n print('Test passed: ' + result.directory)\n result.test_passed = True\n else:\n print('Test failed: ' + result.directory)\n result.test_passed = False",
"def has_more_trials(self) -> bool:\r\n raise NotImplementedError",
"def should_start_analysis(self):\n return len(self.task_queue) >= self.bulk_size",
"def detect_completion(self):\n results_dir = glob.glob(f\"{self.production.rundir}\")\n if len(results_dir)>0: # dynesty_merge_result.json\n if len(glob.glob(os.path.join(results_dir[0], f\"extrinsic_posterior_samples.dat\"))) > 0:\n return True\n else:\n return False\n else:\n return False",
"def has_result(self):\n return len(self.__analysis_items) > 0",
"def check_readings(self):\n # loading data from log file\n if self.filepath is not None:\n if self.all_read is None:\n return False\n else:\n ei = self.curr_indexi + self.read_step\n if ei >= self.all_read.shape[0]:\n return False\n self.curr_read = self.all_read[self.curr_index: ei, :]\n self.curr_index = ei\n return True\n\n # stream of data from beaglebone\n # check that there is new data avalible\n isnew = not all_data.empty()\n\n if isnew:\n # read most current data\n qsize = all_data.qsize()\n curr_read = [all_data.get_nowait() for _ in range(qsize)]\n self.curr_read = np.concatenate(curr_read)\n\n return isnew",
"def complete(self):\r\n\tif self.launch_time == INVALID_TIME:\r\n\t print \"Missing probe launch time\"\r\n return False\r\n if self.received_time == INVALID_TIME:\r\n print \"Missing probe received time\"\r\n return False\r\n if self.completion_time == INVALID_TIME:\r\n print \"Missing probe completion time\"\r\n return False\r\n return True",
"def done(self):\n log.debug(\"Test run concluded\")\n if self._startTime is not None:\n self.report['startTime'] = self._startTime\n self.report['runTime'] = time.time() - self._startTime\n self.report['testsRun'] = self.testsRun\n self.report['tests'] = self._tests\n self.writeReport()",
"def final_report(self):\n print('Final Count for', self.reason, self.successes, 'of', self.tests, 'tests passed')",
"def test_basic_report(self):\n report = self.analytics.suites[testReportSuite].report\n queue = []\n queue.append(report)\n response = omniture.sync(queue)\n self.assertIsInstance(response, list)",
"def __send_reports__(self,config,mockdb):\n numbers = config.get('Flowcell_reports','numbers').split(',')\n for number in numbers:\n flowcell_report_key = getattr(self,'flowcell_report_' + str(number) + '_key')\n if flowcell_report_key is None:\n continue\n report = mockdb['FlowcellStatisticReport'].objects[flowcell_report_key]\n if report.report_sent is True: #If the report is already sent, next.\n continue\n if not report.__is_complete__(): #If the qsub script is still running, next.\n continue\n if self.sequencing_run_type == 'RapidRun' and str(number) == '16':\n recipients = config.get('Flowcell_reports','last_recipients')\n subject, body = report.__generate_flowcell_report_text__(config,mockdb,report_type=\"last_report\")\n #Add samples to the all sample list\n sample_keys = self.__completed_samples_list__(mockdb)\n write_list_file(sample_keys,config.get('Filenames','all_samples'),original_list_file=config.get('Filenames','all_samples'))\n self.__finish__()\n elif self.sequencing_run_type == 'HighThroughputRun' and str(number) == '64':\n recipients = config.get('Flowcell_reports','last_recipients')\n subject, body = report.__generate_flowcell_report_text__(config,mockdb,report_type=\"last_report\")\n #Add samples to the all sample list\n sample_keys = self.__completed_samples_list__(mockdb)\n write_list_file(sample_keys,config.get('Filenames','all_samples'),original_list_file=config.get('Filenames','all_samples'))\n self.__finish__()\n else:\n recipients = config.get('Flowcell_reports','subset_recipients')\n subject, body = report.__generate_flowcell_report_text__(config,mockdb,report_type=\"subset_report\")\n files = []\n files.append(report.report_pdf)\n files.append(report.full_report)\n files.append(report.current_report)\n send_email(subject,body,recipients=recipients,files=files)\n report.__finish__()\n report.report_sent = True\n return 1",
"def test_report(self):\n # Delete some ghost waters so they can be written out\n ghosts = [3054, 3055, 3056, 3057, 3058]\n base_gcmc_sampler.deleteGhostWaters(ghostResids=ghosts)\n\n # Report\n base_gcmc_sampler.report(base_gcmc_simulation)\n\n # Check the output to the ghost file\n assert os.path.isfile(os.path.join(outdir, 'bpti-ghost-wats.txt'))\n # Read which ghosts were written\n with open(os.path.join(outdir, 'bpti-ghost-wats.txt'), 'r') as f:\n n_lines = 0\n lines = f.readlines()\n for line in lines:\n if len(line.split()) > 0:\n n_lines += 1\n assert n_lines == 1\n ghosts_read = [int(resid) for resid in lines[0].split(',')]\n assert all(np.isclose(ghosts, ghosts_read))\n\n return None",
"def is_completed(self):\n self.logger.info(\"# dispatch completed: %s\", self.dispatch_completed)\n self.logger.info(\"@ num_queries={}, num_finished_jobs={}\".format(self.query_count, self.num_finished_jobs))\n for driver_id in sorted(self.dispatch_records.keys()):\n self.logger.info(\"driver_id={}, dispatch={}, reports={}\".format(driver_id, self.dispatch_records[driver_id], self.report_records[driver_id]))\n return self.dispatch_completed and (self.query_count == self.num_finished_jobs)",
"def ready(self):\n return self.counter > 0",
"def is_complete(self):\n is_complete = True\n \n if (type(self.N) is not IntType) or self.N < 2:\n warnings.warn('N not set up properly.')\n is_complete = False\n \n if self.m is None or len(self.m) != self.N:\n warnings.warn('m not set up properly.')\n is_complete = False\n \n if self.R is None or len(self.R) != self.N:\n warnings.warn('R not set up properly.')\n is_complete = False\n \n if self.a is None or len(self.a) != self.N - 1:\n warnings.warn('a not set up properly.')\n is_complete = False\n \n if self.force is None or len(self.force) != self.N:\n warnings.warn('force not set up properly.')\n is_complete = False\n \n if self.Delta is None or len(self.Delta) != self.N - 1:\n warnings.warn('Delta not set up properly.')\n is_complete = False\n \n if self.n is None or len(self.n) != self.N - 1:\n warnings.warn('n not set up properly.')\n is_complete = False\n \n if self.beta < 0.0:\n warnings.warn('beta not set up properly.')\n is_complete = False\n \n if self.m0 < 0.0:\n warnings.warn('m0 not set up properly.')\n is_complete = False\n \n if self.mu < 0.0:\n warnings.warn('mu not set up properly.')\n is_complete = False\n \n return is_complete",
"def check_if_schedule_finished(self):\n tot_num_tasks_scheduled = sum(self.is_task_finished[0])\n if tot_num_tasks_scheduled > 19 or self.t > 150:\n self.data_done_generating = True\n if self.t > 150:\n print('Schedule failed to create')\n print('Schedule will not be copied')\n self.did_schedule_fail = True\n else:\n print('Successful schedule created')\n # copy rows into another excel file\n # with open(self.filepath, 'r') as csvfile, open(self.writepath, 'a') as outfile:\n # data = (csv.reader(csvfile))\n # writer = csv.writer(outfile)\n # for row in data:\n # writer.writerow(row)\n #\n # with open(self.second_file_path, 'r') as csvfile, open(self.writepath2, 'a') as outfile:\n # data = (csv.reader(csvfile))\n # writer = csv.writer(outfile)\n # for row in data:\n # writer.writerow(row)\n\n print('1 schedule created.')",
"def is_finished(self):\n self.refresh()\n return self.progress.remaining_budget is not None and self.progress.remaining_budget <= 0",
"def can_sample(self, n_samples):\n return len(self) >= n_samples",
"def can_sample(self, n_samples):\n return len(self) >= n_samples",
"def can_sample(self, n_samples):\n return len(self) >= n_samples",
"def can_sample(self, n_samples):\n return len(self) >= n_samples",
"def is_done(self):\n return_val = False\n for name in os.listdir(self.results_dir_path):\n if name.startswith('top_genes_per_phenotype'):\n return_val = True\n return return_val"
]
| [
"0.6579948",
"0.6464013",
"0.60099596",
"0.60099596",
"0.60099596",
"0.6004687",
"0.5992304",
"0.59684485",
"0.5916682",
"0.5828449",
"0.57452166",
"0.5734676",
"0.5715299",
"0.5679798",
"0.5673723",
"0.5657325",
"0.56381965",
"0.561074",
"0.56070954",
"0.55903435",
"0.5573295",
"0.5568445",
"0.5552846",
"0.55510956",
"0.55301905",
"0.5517953",
"0.5517953",
"0.5517953",
"0.5517953",
"0.5517305"
]
| 0.6739479 | 0 |
Fills the qsub file from a template. Since not all information is archived in the parent object, the function also gets additional information on the fly for the qsub file. | def __fill_qsub_file__(self,configs):
template_file= os.path.join(configs['system'].get('Common_directories','template'),configs['pipeline'].get('Template_files','flowcell_report'))
dictionary = {}
for k,v in self.__dict__.iteritems():
dictionary.update({k:str(v)})
dictionary.update({'post_pipeline':configs['pipeline'].get('Db_reports','post_pipeline')})
dictionary.update({'concord_script':configs['pipeline'].get('Flowcell_reports','concord_script')})
dictionary.update({'dbsnp_script':configs['pipeline'].get('Flowcell_reports','dbsnp_script')})
dictionary.update({'tenx_script':configs['pipeline'].get('Flowcell_reports','tenx_script')})
dictionary.update({'zero_script':configs['pipeline'].get('Flowcell_reports','zero_script')})
dictionary.update({'hethom_script':configs['pipeline'].get('Flowcell_reports','hethom_script')})
dictionary.update({'reads_script':configs['pipeline'].get('Flowcell_reports','reads_script')})
with open(self.qsub_file,'w') as f:
f.write(fill_template(template_file,dictionary)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __fill_template__(self,template_file,output_fname):\n dictionary = {}\n for k,v in self.__dict__.iteritems():\n if k == 'sample_key':\n try:\n int(v)\n new_sample_key = \"Sample_\" + str(v)\n dictionary.update({k:new_sample_key})\n continue\n except ValueError:\n pass\n dictionary.update({k:str(v)})\n dictionary.update({'restats_tail': self.restats_file + '.tail'})\n with open(output_fname,'w') as f:\n string = fill_template(template_file,dictionary)\n f.write(string)",
"def _fill_template(\n self,\n template: Dict[Text, Any],\n template_vars: Dict[Text, Any]\n ) -> Dict[Text, Any]:\n\n line_object_keys = [\"quickReply\", \"items\", \"action\", \"template\", \"actions\"]\n\n if type(template) == list:\n for item in template:\n self._fill_template(item, template_vars)\n else:\n self._fill_template_text(template, template_vars)\n for key in line_object_keys:\n if key in template:\n self._fill_template(template[key], template_vars)\n\n return template",
"def _create_from_template(self):\n template_file = self._helper._get_template_file_path()\n self._engine.open_file_by_path(template_file)\n self._save_current_as_new()",
"def __init__(self, template):\n\n self.template = template\n self.parsed_template = {}",
"def _fill_template(self, name, superclass):\n end = name.lower().split(\"_\")[-1]\n id_field_name = self.id_field_name or end + \"_id\"\n template = Template(self.template)\n template = template.substitute(namespace=self.module.namespace,\n module_name=self.module.name,\n name=name,\n superclass=superclass,\n group=self.module.name.lower(),\n name_lower=name.lower(),\n end=end,\n id_field_name=id_field_name)\n return template",
"def __init__(self, path_template, untrimmed_path, qualities, file_opener):\n super().__init__()\n assert '{name}' in path_template\n self.template = path_template\n self.untrimmed_path = untrimmed_path\n self.untrimmed_writer = None\n self.writers = dict()\n self.qualities = qualities\n self.file_opener = file_opener",
"def __init__(self, pageName):\n self.pageName = pageName\n self.updateFileData()\n self.template = pystache.parse(unicode(self.fileData, 'utf-8'))",
"def process_template(template, data):\n t = Template(template, data)\n t.job = get_current_job()\n t.process()\n\n result = dict(template=template, data=data, result_folder=t.resultdir, log=t.log)\n\n return result",
"def _fill_template_text(\n self,\n template: Dict[Text, Any],\n template_vars: Dict[Text, Any]\n ) -> Dict[Text, Any]:\n line_text_keys = [\"text\", \"altText\", \"label\", \"uri\"]\n try:\n for key in line_text_keys:\n if key in template:\n template[key] = template[key].format(**template_vars)\n except KeyError as e:\n logger.exception(\n \"Failed to fill line template '{}'. \"\n \"Tried to replace '{}' but could not find \"\n \"a value for it. There is no slot with this \"\n \"name nor did you pass the value explicitly \"\n \"when calling the template. Return template \"\n \"without filling the template. \"\n \"\".format(template, e.args[0]))\n return template",
"def _ProcessTemplate(self,topdir):\n self.dicomdir = \"%s/anatomicals\" % self.topdir\n self.rawdir = \"%s/raw\" % topdir\n self.rawdirs = {}\n tmplt = self._GetTemplate()\n if self.opts.outdir is not None:\n# Override template output directory.\n tmplt['top_outdir'] = self.opts.outdir\n self.tmplt = tmplt\n if len(tmplt['top_outdir']) == 0:\n tmplt['top_outdir'] = os.path.realpath(self.topdir)\n raise RuntimeError('Template file must specify an output directory.')\n tmplt['top_outdir'] = os.path.realpath(tmplt['top_outdir'])\n if '/home' in tmplt['top_outdir'][:7]:\n raise RuntimeError('Image data cannot be stored in the /home partition. Change the \"top_outdir\" entry in the template file: %s.' % (' '.join(self.templates)))\n# tmplt['subject'] = 'orig'\n self.procdir = os.path.abspath(\"%s/%s\" % \\\n (tmplt['top_outdir'],tmplt['subject']))\n target = os.path.abspath('%s/../..' % tmplt['top_outdir'])\n if not ismounted(target):\n raise RuntimeError('Could not access partition at %s' % target)\n\n self.anatdir = \"%s/anat\" % self.procdir\n self.fmapdir = \"%s/%s\" % (self.procdir,tmplt['fmap']['outdir'])\n self.dtidir = \"%s/%s\" % (self.procdir,tmplt['dti']['outdir'])\n self.logdir = \"%s/%s\" % (self.procdir,tmplt['logdir'])\n self.skip = tmplt.get('skip', DEFAULT_SKIP)\n self.acq_tr = tmplt.get('acq_tr',None)\n self.episetup_dir = \"%s/%s\" % (self.procdir,tmplt['first_epi'])\n self.fsl_cmpblty = tmplt.get('fsl_compatibility',False)\n self.epi_file_format = self.tmplt['epi_file_format']\n self.censor_thresh = tmplt.get('censor_threshold', 2.)\n self.censor_interleave = tmplt.get('censor_interleave', True)\n# self.server_userid = self.tmplt.get('server_userid','default')\n\n# Overide flags for aligning EPIs and skull-stripping with command-\n# line options.\n if self.opts.align_fmaps:\n self.align_fmaps = True\n else:\n self.align_fmaps = self.tmplt.get('epi_align', False)\n\n if self.opts.no_align_fmaps:\n self.no_align_fmaps = True\n else:\n self.no_align_fmaps = self.tmplt.get('no_epi_align', False)\n\n if self.opts.skull_strip:\n self.skull_strip = True\n else:\n self.skull_strip = self.tmplt.get('skull_strip', False)\n\n# Create log file now so it can be used immediately.\n if not os.path.exists(self.logdir):\n if self.verbose:\n print 'mkdir %s' % self.logdir\n if not self.opts.fake_opts:\n self.MakeDir(self.logdir)\n\n self._ProcessTemplateEpiInfo()",
"def update_template():\n\n # Open, and read, the template file\n with open(\"template.html\", \"r\") as f:\n soup = BeautifulSoup(f.read(), features=\"html5lib\")\n\n # Add the plots in the correct places\n for div in soup.find_all(\"div\", class_=\"plot\"):\n with open(div[\"src\"], \"r\") as f:\n plot = BeautifulSoup(f.read(), features=\"html5lib\")\n div.replace_with(plot.html.body.div)\n\n # Write the finished report to document.html\n with open(\"document.html\", \"w\") as f:\n f.write(soup.prettify())",
"def build(self):\n self.logger.debug(\"run\")\n\n self.onInit()\n self.work()\n \n self.afterWork()\n\n template = Templateengine(self.currenttemplate)\n template.readTemplateFile()\n contenttype = self.settings.contenttype \n self.defaultTemplateParameter()\n \n try:\n self.content = template.get(self.tplparam)\n except Exception as ex:\n Emergency.stop(ex)\n\n self.onDone()\n \n self.logger.debug(\"done\")",
"def _fill_template(self, name, superclass):\n template = Template(self.template)\n template = template.substitute(namespace=self.module.namespace,\n module_name=self.module.name,\n name=name,\n superclass=superclass)\n return template",
"def create_concrete_from_template(pf_template, file_number):\n\n return pf_template.replace(\"${PBS_ARRAYID}\", str(file_number))",
"def fill_template_file(filename, value_dict):\n f = open(filename, 'r')\n text = f.read()\n f.close()\n f = open(filename, 'w')\n f.write(text % value_dict)\n f.close()",
"def baseTemplate(*args, exists: bool=True, fileName: Union[AnyStr, bool]=\"\", force: bool=True,\n load: bool=True, matchFile: Union[AnyStr, bool]=\"\", silent: bool=True, unload:\n bool=True, viewList: Union[AnyStr, bool]=\"\", q=True, query=True, e=True,\n edit=True, **kwargs)->Union[None, Any]:\n pass",
"def render_template(self):\n # create and expand commandline template\n tmpl_r1 = self.finditem.sub(r'{{\\2}}', self.raw_template)\n tmpl_r2 = jinja2.Template(tmpl_r1).render(self.variables)\n self.relation.script = tmpl_r2\n self.relation.template_sha256 = self.variables['template_sha256']",
"def test_populate_single_template_from_master(populated_template, datamap):\n data = parse(populated_template, datamap)\n assert data[0]['gmpp_key'] == 'Project/Programme Name'\n assert data[0]['gmpp_key_value'] == 'PROJECT/PROGRAMME NAME 9'",
"def prepare_template(self, obj):\n if self.instance_name is None and self.template_name is None:\n raise SearchFieldError(\n \"This field requires either its instance_name variable to be populated or an explicit template_name in order to load the correct template.\"\n )\n\n if self.template_name is not None:\n template_names = self.template_name\n\n if not isinstance(template_names, (list, tuple)):\n template_names = [template_names]\n else:\n app_label, model_name = get_model_ct_tuple(obj)\n template_names = [\n \"search/indexes/%s/%s_%s.txt\"\n % (app_label, model_name, self.instance_name)\n ]\n\n t = loader.select_template(template_names)\n return t.render({\"object\": obj})",
"def __init__(\n self,\n path_template: str,\n path_paired_template: str,\n untrimmed_name: Optional[str],\n qualities: bool,\n file_opener: FileOpener,\n ):\n super().__init__()\n assert '{name1}' in path_template and '{name2}' in path_template\n assert '{name1}' in path_paired_template and '{name2}' in path_paired_template\n self.template = path_template\n self.paired_template = path_paired_template\n self.untrimmed_name = untrimmed_name\n self.writers = dict() # type: Dict[Tuple[str, str], Any]\n self.qualities = qualities\n self.file_opener = file_opener",
"def parse_template(self):\n for line in self.raw_template.split(\"\\n\"):\n line = line.strip()\n if line.startswith('#m3'):\n key, val = line[3:].strip().split('=', 1)\n key = key.strip()\n val = val.strip()\n self.variables[key] = val\n\n for fitem in self.finditem.finditer(self.raw_template):\n fgrp = fitem.groups()\n categ = fgrp[0]\n name = fgrp[1]\n rest_str = fgrp[2]\n rest = {} # type: dict\n for item in rest_str.split('|'):\n item = item.strip()\n if item:\n key, val = item.split('=')\n rest[key] = val\n\n self.data[name] = (categ, rest)",
"def input_template(template, fields):\n editor = os.environ.get('EDITOR', '/usr/bin/vim')\n with tempfile.NamedTemporaryFile('w+t') as ofile:\n ofile.write(template % fields)\n ofile.flush()\n user_command = '%s %s' % (editor, ofile.name)\n if os.system(user_command) != 0:\n raise Error('Error acquiring user input (command was %r).' % user_command)\n with open(ofile.name, 'r') as ifile:\n filled_template = ifile.read()\n\n fields = dict(parse_template(filled_template))\n return fields",
"def __init__(self, path, template):\n super(GenerateSpectrum, self).__init__(path)\n self._template = template",
"def mk_data(self):\n self.data = self.DEFAULTS.copy()\n\n for template in self.raw_data.get('extends', []):\n template_data = self.load_template(template)\n self.data.update(template_data)\n\n self.data.update(self.raw_data)\n\n str_replace(self.data)\n\n if self.data.get('redirect_stderr'):\n self.data.pop('stderr')",
"def __init__(self, *args):\n _ida_fpro.qfile_t_swiginit(self, _ida_fpro.new_qfile_t(*args))",
"def edit(self, connection_id, arguments, template):\n context = self.context\n self.connection_id = str(connection_id)\n arguments = str(arguments)\n self.arguments_src = arguments\n self._arg = Aqueduct.parse(arguments)\n if not isinstance(template, (str, unicode)):\n template = str(template)\n self.src = template\n self.template = t = context.template_class(template)\n t.cook()\n context._v_query_cache={}, Bucket()",
"def process_tempita(fromfile):\n if not fromfile.endswith('.in'):\n raise ValueError(\"Unexpected extension: %s\" % fromfile)\n\n from_filename = tempita.Template.from_filename\n template = from_filename(fromfile,\n encoding=sys.getdefaultencoding()) \n\n content = template.substitute()\n\n outfile = os.path.splitext(fromfile)[0]\n with open(outfile, 'w') as f:\n f.write(content)",
"def __init__(self, tmp_json):\n super(Template, self).__init__(tmp_json)",
"def __init__(self, source=None, name=None, lookup=[], encoding='utf8', **settings):\r\n self.name = name\r\n self.source = source.read() if hasattr(source, 'read') else source\r\n self.filename = source.filename if hasattr(source, 'filename') else None\r\n self.lookup = map(os.path.abspath, lookup)\r\n self.encoding = encoding\r\n self.settings = self.settings.copy() # Copy from class variable\r\n self.settings.update(settings) # Apply\r\n if not self.source and self.name:\r\n self.filename = self.search(self.name, self.lookup)\r\n if not self.filename:\r\n raise TemplateError('Template %s not found.' % repr(name))\r\n if not self.source and not self.filename:\r\n raise TemplateError('No template specified.')\r\n self.prepare(**self.settings)",
"def _prepare_template(self, obj, needs_request=False):\r\n if self.instance_name is None and self.template_name is None:\r\n raise SearchFieldError(\"This field requires either its instance_name variable to be populated or an explicit template_name in order to load the correct template.\")\r\n\r\n if self.template_name is not None:\r\n template_names = self.template_name\r\n\r\n if not isinstance(template_names, (list, tuple)):\r\n template_names = [template_names]\r\n else:\r\n template_names = ['search/indexes/%s/%s_%s.txt' % (obj._meta.app_label, obj._meta.module_name, self.instance_name)]\r\n\r\n t = loader.select_template(template_names)\r\n ctx = {'object': obj}\r\n if needs_request:\r\n request = rf.get(\"/\")\r\n request.session = {}\r\n ctx['request'] = request\r\n return t.render(Context(ctx))"
]
| [
"0.6398042",
"0.59318715",
"0.57331526",
"0.56946903",
"0.56846076",
"0.5610546",
"0.5556413",
"0.5502752",
"0.546865",
"0.54102975",
"0.5395212",
"0.5394656",
"0.5380861",
"0.5331941",
"0.5326681",
"0.5309131",
"0.52896446",
"0.5275481",
"0.524707",
"0.52117896",
"0.5208709",
"0.51388615",
"0.5117937",
"0.51152873",
"0.5111921",
"0.51036197",
"0.5073658",
"0.5066433",
"0.50433177",
"0.50361526"
]
| 0.71228755 | 0 |
Simple test to check that raw target data does NOT leak during validation. | def test_for_leakage(self):
src, trg = next(iter(self.validation_loader))
trg_mem = trg.clone().detach()
result = greedy_decode(self.model, src, 20, trg)
self.assertNotEqual(result[0, 1, 0], trg_mem[0, 1, 0])
self.assertEqual(result[0, 1, 1], trg_mem[0, 1, 1])
self.assertEqual(result[0, 1, 2], trg_mem[0, 1, 2])
loss = pytorch_criterion_dict["MSE"](trg, trg_mem)
self.assertNotEqual(result[0, 1, 0], result[0, 4, 0])
self.assertGreater(loss, 0) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_raw_data(self):\n self.assertEqual(self.tester.raw_data, 1)",
"def testCheckSourceCopyOperation_FailContainsData(self):\n payload_checker = checker.PayloadChecker(self.MockPayload())\n self.assertRaises(PayloadError, payload_checker._CheckSourceCopyOperation,\n 134, 0, 0, 'foo')",
"def check_data(raw_data, nsamples, verbose):\n if verbose:\n print 'raw_data', raw_data\n print 'raw_data.shape', raw_data.shape\n\n uc_timings, uc_run_counts = np.unique(raw_data[:,0], return_counts=True)\n if verbose:\n print 'uc_timings', uc_timings\n print 'uc_run_counts', uc_run_counts\n\n n_of_runs = set(uc_run_counts)\n if len(n_of_runs) != 1 and len(n_of_runs) != 2:\n raise AssertionError(\n 'Something bad happened!\\nn_of_runs = {0}\\nlen(n_of_runs) = '\n '{1}'.format(n_of_runs, len(n_of_runs))\n )\n\n if len(n_of_runs) == 2:\n if np.diff(list(n_of_runs))[0] != 1:\n raise AssertionError(\n 'Something bad happened!\\nn_of_runs = '\n '{0}\\nnp.diff(list(n_of_runs))[0] = '\n '{1}'.format(n_of_runs, np.diff(list(n_of_runs))[0])\n )\n n_incomplete_pulse = np.sum(uc_run_counts == np.max(list(n_of_runs)))\n if verbose:\n print 'n_incomplete_pulse', n_incomplete_pulse\n clean_data = raw_data[:-n_incomplete_pulse]\n else:\n clean_data = raw_data\n\n if verbose:\n print 'clean_data', clean_data\n print 'clean_data.shape', clean_data.shape\n\n return clean_data",
"def testCheckSourceCopyOperation_FailBlockCountsMismatch(self):\n payload_checker = checker.PayloadChecker(self.MockPayload())\n self.assertRaises(PayloadError, payload_checker._CheckSourceCopyOperation,\n None, 0, 1, 'foo')",
"def test_missing_data_sources(self):",
"def valid(self, target):",
"def test_raw_existence(self):\n\n # RNA - raw layer required\n del self.validator.adata.raw\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: Raw data is missing: there is no 'raw.X' and 'X_normalization' is not 'none'.\"\n ],\n )\n\n # ATAC - raw layer not required\n # The assignment above makes X to not be raw: self.validator.adata.uns[\"X_normalization\"] = \"CPM\"\n # The following line makes it to be scATAC-seq data (EFO:0010891)\n # Missing raw data in atac-seq data is allowed, thus the following should not return an error message\n self.validator.errors = []\n self.validator.adata.obs[\"assay_ontology_term_id\"] = \"EFO:0010891\"\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])",
"def test_check_new_data_saved_size_limit_exceeded(self):\n ldump = [[b'\\x01.cafe', 123, 512], [b'\\x00.bbbb', 123, 128], [b'\\x01.babe', 124, 128]]\n self.session.get.side_effect = [pickle.dumps(ldump), True, True, True, True]\n self.sut.init()\n self.sut.track(b'\\x01.ffff', 640)\n ldump.append([b'\\x01.ffff', self.sut.index['keys'][b'\\x01.ffff']['saved_at'], 640])\n ldump = ldump[1:]\n self.loop.run_until_complete(self.sut.check())\n Mock.assert_called_once_with(self.repository.blockchain.remove_block, b'cafe')\n Mock.assert_called_once_with(self.session.put, b'cache_index', pickle.dumps(ldump))",
"def test_data_source_soaps_post(self):\n pass",
"def test_process_data(self):\n pass",
"def _check_data_correct(self, expected_data, _):\n data_len = len(expected_data)\n for retry_count in range(self.RETRY_COUNT):\n if retry_count > 0:\n test_info.info('Previous attempts %s' % retry_count)\n try:\n data_loaded = self.board.read_target_memory(self._start, data_len)\n except:\n time.sleep(self.DELAY_BEFORE_RETRY_S)\n continue\n break\n else:\n raise Exception(\"read_target_memory() failed after %i retries\" % self.RETRY_COUNT)\n\n return _same(expected_data, data_loaded)",
"def test_validation(self):\n self.validationFails()",
"def test_trailing_data(self):",
"def test_validate_metadata_no_samples(self):\n mock_event = {\n 'gds_volume_name': \"bssh.xxxx\",\n 'gds_folder_path': \"/Runs/cccc.gggg\",\n 'seq_run_id': \"yyy\",\n 'seq_name': \"zzz\",\n }\n\n settings_by_override_cycles = [\n {\n \"batch_name\": \"my-no-samples-batch\",\n \"samples\": [],\n \"settings\": {\n \"override_cycles\": \"Y100;I8N2;I8N2;Y100\"\n }\n }\n ]\n\n reason = bcl_convert.validate_metadata(mock_event, settings_by_override_cycles)\n\n logger.info(\"-\" * 32)\n logger.info(json.dumps(reason))\n\n self.assertIsNotNone(reason)\n\n # should call to slack webhook once\n verify(libslack.http.client.HTTPSConnection, times=1).request(...)",
"def testCheckSourceCopyOperation_Pass(self):\n payload_checker = checker.PayloadChecker(self.MockPayload())\n self.assertIsNone(\n payload_checker._CheckSourceCopyOperation(None, 134, 134, 'foo'))",
"def test_validate_metadata_pass(self):\n mock_event = {\n 'gds_volume_name': \"bssh.xxxx\",\n 'gds_folder_path': \"/Runs/cccc.gggg\",\n 'seq_run_id': \"yyy\",\n 'seq_name': \"zzz\",\n }\n\n settings_by_override_cycles = [\n {\n \"batch_name\": \"my-passing-batch\",\n \"samples\": [\n \"PTC_EXPn200908LL_L2000001\",\n \"PTC_EXPn200908LL_L2000002\",\n \"PTC_EXPn200908LL_L2000003\"\n ],\n \"settings\": {\n \"override_cycles\": \"Y100;I8N2;I8N2;Y100\"\n }\n }\n ]\n\n reason = bcl_convert.validate_metadata(mock_event, settings_by_override_cycles)\n\n logger.info(\"-\" * 32)\n logger.info(json.dumps(reason))\n\n self.assertIsNone(reason)\n\n # should not call to slack webhook\n verify(libslack.http.client.HTTPSConnection, times=0).request(...)",
"def test_has_correct_length(self) -> None:\n assert len(list(self._dataset)) == 7168",
"def test_verifyDamaged(self):\n self.testObject.content.setContent('garbage!')\n self.assertRaises(CorruptObject, self.testObject.verify)",
"def test_shapes(self):\n\n # Creates a raw layer\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.raw.var.drop(\"feature_is_filtered\", axis=1, inplace=True)\n self.validator.adata.X = examples.adata_non_raw.X.copy()\n self.validator.adata.uns[\"X_normalization\"] = \"CPM\"\n\n # remove one gene\n self.validator.adata = self.validator.adata[:, 1:]\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\"ERROR: Number of genes in X (3) is different than raw.X (4).\"],\n )",
"def test_X_normalization_not_raw(self):\n\n # Assign a real value to X while X_normalization is 'none'\n del self.validator.adata.raw\n self.validator.adata.uns[\"X_normalization\"] = \"none\"\n self.validator.validate_adata()\n print(\"FOO\", self.validator.warnings)\n self.assertEqual(\n self.validator.warnings,\n [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear \"\n \"to have raw counts (integers)\"\n ],\n )",
"def test_validate(self):\n pass",
"def test_data_object_del(self):\n pass",
"def check_Data(self):\r\n \r\n if self._target_data is None:\r\n self.processData()",
"def test_validate_metadata_blank_samples(self):\n mock_event = {\n 'gds_volume_name': \"bssh.xxxx\",\n 'gds_folder_path': \"/Runs/cccc.gggg\",\n 'seq_run_id': \"yyy\",\n 'seq_name': \"zzz\",\n }\n\n settings_by_samples = [\n {\n \"batch_name\": \"my-batch\",\n \"samples\": [],\n \"settings\": {\n \"override_cycles\": \"Y100;I8N2;I8N2;Y100\"\n }\n }\n ]\n\n reason = bcl_convert.validate_metadata(mock_event, settings_by_samples)\n\n logger.info(\"-\" * 32)\n logger.info(json.dumps(reason))\n\n self.assertIsNotNone(reason)\n\n # should call to slack webhook once\n verify(libslack.http.client.HTTPSConnection, times=1).request(...)",
"def test_sources_not_ok_on_parse_error(self):\n measurement = self.measurement(\n self.metric(),\n sources=[\n {\n \"source_uuid\": SOURCE_ID,\n \"value\": None,\n \"total\": None,\n \"parse_error\": \"Oops!\",\n \"connection_error\": None,\n },\n {\n \"source_uuid\": SOURCE_ID2,\n \"value\": \"7\",\n \"total\": \"100\",\n \"parse_error\": None,\n \"connection_error\": None,\n },\n ],\n )\n self.assertFalse(measurement.sources_ok())",
"def test_bad_data(self):\n # Bad checksum\n # If checksum is bad, skip the record and continue parsing.\n self.stream_handle = StringIO(AdcpsJlnStcParserUnitTestCase.BAD_CHECKSUM)\n self.parser = AdcpsJlnStcParser(self.config, self.start_state, self.stream_handle,\n self.state_callback, self.pub_callback, self.exception_callback)\n # Only the header and second record, particle_b should be returned.\n result = self.parser.get_records(3)\n self.assertEqual(self.publish_callback_value[0], self.particle_header_footer)\n self.assertEqual(self.publish_callback_value[1], self.particle_b)\n if len(result) != 2:\n self.fail(\"Expected two records and got %d. Record containing bad data should have been skipped.\", len(result))\n \n # Incorrect number of bytes\n # If numbytes is incorrect, skip the record and continue parsing.\n self.start_state = {StateKey.POSITION: 0}\n self.stream_handle = StringIO(AdcpsJlnStcParserUnitTestCase.BAD_NUM_BYTES)\n self.parser = AdcpsJlnStcParser(self.config, self.start_state, self.stream_handle,\n self.state_callback, self.pub_callback, self.exception_callback) \n result = self.parser.get_records(3)\n self.assertEqual(self.publish_callback_value[0], self.particle_header_footer)\n self.assertEqual(self.publish_callback_value[1], self.particle_b)\n if len(result) != 2:\n self.fail(\"Expected two records and got %d. Record containing bad data should have been skipped.\", len(result))",
"def test_prevent_wrong_memory(self):\n self.assertRaises(cinv.host.Error, self.wrong_memory)",
"def test_garbage_stream(self):\r\n valid: bytes = b\"!AIVDM,1,1,,B,B43JRq00LhTWc5VejDI>wwWUoP06,0*29\"\r\n mock_file = MockFile([b\"Foo\", b\"Bar\", b\"1337\", valid])\r\n for msg in BinaryIOStream(mock_file):\r\n self.assertEqual(msg.raw, valid)",
"def load_data_clean(source='local'):\n\n train_bl, test_bl = load_data_raw(source)\n\n train = train_bl.copy()\n train = encode_dtypes(train)\n train = impute_missing(train)\n train = clean_data(train)\n\n test = test_bl.copy()\n test = encode_dtypes(test)\n test = impute_missing(test)\n test = clean_data(test)\n\n return train, test",
"def test_valid_project_curl_memory(self):\n with tempfile.TemporaryDirectory() as tmp_dir:\n self.assertTrue(\n cifuzz.build_fuzzers('curl',\n 'curl',\n tmp_dir,\n pr_ref='fake_pr',\n sanitizer='memory'))"
]
| [
"0.63760984",
"0.61903423",
"0.61187553",
"0.6034975",
"0.59867835",
"0.58931315",
"0.58748543",
"0.5858307",
"0.5847851",
"0.58443165",
"0.58315504",
"0.5814813",
"0.58093095",
"0.5804731",
"0.5799579",
"0.5769792",
"0.57684606",
"0.57661027",
"0.57597375",
"0.5759114",
"0.57509524",
"0.57478297",
"0.57388127",
"0.57112044",
"0.57078856",
"0.56989074",
"0.5689952",
"0.567415",
"0.5639215",
"0.5609461"
]
| 0.66377693 | 0 |
Helper to clear workers. | def clear_workers(self):
# seems sometimes that workers will cause
# print "calling destructor"
# first set the exit flag for each of the workers.
for worker in self.workers:
worker.no_exit = False
# next clear the queue, the workers might be waiting to add data to
# the queue.
# print "clearing queue"
while not self.queue.empty():
self.queue.get()
# print "queue empty, joining threads"
# now join all the workers
for worker in self.workers:
worker.join()
# print "done joining threads" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reset_workers(self, workers):\n for obj_ref, ev in self._tasks.copy().items():\n if ev not in workers:\n del self._tasks[obj_ref]\n del self._objects[obj_ref]\n\n # We want to keep the same deque reference so that we don't suffer from\n # stale references in generators that are still in flight\n for _ in range(len(self._fetching)):\n ev, obj_ref = self._fetching.popleft()\n if ev in workers:\n # Re-queue items that are still valid\n self._fetching.append((ev, obj_ref))",
"def workers_reset(self):\n self._post('agents/stop-workers')",
"def clear_jobs(self):\n with self._mutex:\n self._jobs = []",
"def clear(self):\n self.background_scheduler.remove_all_jobs()",
"def clearQueueAll():",
"def quit(self):\n map(lambda w: self._tasks.put(None), self._workers)\n map(lambda w: w.join(), self._workers)",
"def clear(mcs):\n mcs._JOB_REGISTRY.clear()",
"def force_exit(self):\n map(lambda w: w.force_exit(), self._workers)",
"def clean_up(self):\n if self.has_started_workers and not self.has_cleaned_up:\n self._log.log(1, \"Stopping feeder thread\")\n self.feeder_thread.stop()\n self.feeder_thread.join()\n\n self._log.log(1, \"Stopping workers\")\n for w in self.workers:\n w.stop()\n w.join()\n\n if self.is_multiprocessing:\n self._log.log(1, \"Closing/Joining process queues\")\n for q in (self.work_queue, self.results_queue):\n q.close()\n q.join_thread()\n\n self.has_cleaned_up = True",
"def terminate_workers(self):\n if self.shared_storage_worker:\n self.shared_storage_worker.set_info.remote(\"terminate\", True)\n self.checkpoint = ray.get(\n self.shared_storage_worker.get_checkpoint.remote()\n )\n if self.replay_buffer_worker:\n self.replay_buffer = ray.get(self.replay_buffer_worker.get_buffer.remote())\n\n print(\"\\nShutting down workers...\")\n\n self.self_play_workers = None\n self.test_worker = None\n self.training_worker = None\n self.reanalyse_worker = None\n self.replay_buffer_worker = None\n self.shared_storage_worker = None",
"def shutdown(self):\n self.all_workers_joined.wait() \n self.shutdown_master_thread()\n self.all_workers_joined.clear()",
"def clear(self):\n self.queue.clear()",
"def shutdown(self) -> None:\n for worker in self.remote_workers:\n worker.shutdown.remote()\n worker.__ray_terminate__.remote()",
"def cancel_workers(self):\n pass",
"def stop (self):\n for i in xrange(self.numpools):\n numworkers = self.numworkerslist[i]\n for j in xrange(numworkers):\n self.queues[i].put('__STOP__')",
"def cleanup():\n redis_client.flushall()",
"def reset(self):\n logger.info(\"resetting worker #%i\", self.myid)\n self.model.projection = self.model.projection.empty_like()\n self.finished = False",
"def clean(self):\r\n with self.mutex:\r\n now = time.time()\r\n if self.last_clean_time + self.CLEAN_INTERVAL < now:\r\n to_remove = []\r\n for (host, pool) in self.host_to_pool.items():\r\n pool.clean()\r\n if pool.size() == 0:\r\n to_remove.append(host)\r\n for host in to_remove:\r\n del self.host_to_pool[host]\r\n self.last_clean_time = now",
"def clear_all(self):\n self.clear_redis()\n self.clear_cache()",
"async def clear_all(self) -> None:",
"def clear(self):\r\n try:\r\n while not self._queue.empty():\r\n self._queue.get().close()\r\n except:\r\n pass",
"def clear_tasks(self):\n self.last_task = None\n self.tasks = []",
"def clear_queue(self):\n while not self.queue.empty():\n self.queue.get()",
"def terminateAll(self):\n with self.__queueLock:\n for queue in [self.__queue, self.__clientQueue]:\n queue.clear()\n\n for runList in [self.__running, self.__clientRunning]:\n unfinishedRuns = [run for run in runList if run is not None]\n for run in unfinishedRuns:\n run.kill()",
"def clear(self):\n self.queue = Queue()",
"def clear_queue(self):\n self.queue = deque()",
"def clear_mem(self):\n dbe.upload_trials(self.trials, self.name)\n self.trials = []",
"def clear(self):\n self.recorders = set([])\n self.reset()\n\n # Stop any currently running SpiNNaker application\n self.stop()",
"def clearQueue(targets):",
"def clear(self):\n self._multivol.deallocate(self.id)"
]
| [
"0.7626767",
"0.7542355",
"0.7180268",
"0.7154682",
"0.6952088",
"0.68525493",
"0.66856486",
"0.65819097",
"0.65408283",
"0.64911026",
"0.64703065",
"0.64379334",
"0.6434519",
"0.6424239",
"0.6419156",
"0.6379613",
"0.63488203",
"0.6319778",
"0.6311553",
"0.62962127",
"0.62816113",
"0.6244032",
"0.6237941",
"0.6196385",
"0.6167919",
"0.61355335",
"0.6135343",
"0.61259556",
"0.6120093",
"0.61031485"
]
| 0.78377396 | 0 |
listens for incoming data. calls the callback when EOL character is received or nothing more received after frame_interval delay. | def __listener__(self):
frame_interval = 0.1
str_list = []
c = ''
while True:
with Timeout(frame_interval, False):
while True:
try:
c = self.ser.read()
except:
self.ser.close()
self.make_connection.go()
self.connection_made.wait()
str_list.append(c)
if c == "\n" or c == '':
break
received = ''.join(str_list)
str_list = []
if received:
for i in self.read_handlers:
gevent.spawn(i, received)
sleep(0.001) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run(self):\n while True:\n line = self.stream.readline()\n if not len(line):\n # EOF, stop!\n break\n else:\n # Put the text on the queue, along with the time it was read.\n self.callback_queue.put(line)",
"def dataReceived(self, data):\n self.resetTimeout()\n LengthDelimitedStream.dataReceived(self, data)",
"def outReceived(self, data):\n log.msg('got %r' % data)\n lines = (self._lineBuffer + data).split(b'\\n')\n self._lineBuffer = lines.pop(-1)\n self._linesReceived.extend(lines)\n # XXX - not strictly correct.\n # We really want onOutReceived to fire after the first 'cftp>' prompt\n # has been received. (See use in OurServerCmdLineClientTests.setUp)\n if self.onOutReceived is not None:\n d, self.onOutReceived = self.onOutReceived, None\n d.callback(data)\n self.buffer += data\n self._checkForCommand()",
"def _thread_loop(self):\n while not self.stop_thread.is_set():\n # First, read a line\n try:\n line = self._read_line()\n except EvseTimeoutError:\n continue\n # Then if the line is a status change, execute the callback\n if line[:3] in ('ST ', '$ST'):\n self.callback(states[int(line.split()[1], 16)])\n # write_allowed is only cleared if the board has been reset ;\n # in this case, we should wait 1 more second before executing\n # commands in order for the board to finish booting.\n if not self.write_allowed.is_set():\n threading.Timer(1, self.write_allowed.set).start()\n continue\n # Do not write a new line if\n # the previous one isn't read and is not old enough\n previous_newline_age = 0\n while (self.newline_available.is_set() and\n previous_newline_age <= NEWLINE_MAX_AGE):\n time.sleep(SYNC_SERIAL_TIMEOUT)\n previous_newline_age += SYNC_SERIAL_TIMEOUT\n # Write the new received line\n self.newline = line\n self.newline_available.set()",
"def _ir_recv_daemon(self):\n while True:\n if (time.ticks_us()-self._prev_time) > self.waittime and self.pulse_buffer != []:\n dec = self.decode_buff()\n if self.callback:\n self.callback(dec)",
"def data_received(self, data):\n self.log.debug('data_received: {!r}'.format(data))\n self._last_received = datetime.datetime.now()\n for byte in (bytes([value]) for value in data):\n\n try:\n self.stream.feed_byte(byte)\n except (ValueError, AssertionError):\n e_type, e_value, _ = sys.exc_info()\n map(self.log.warn,\n traceback.format_exception_only(e_type, e_value))\n continue\n\n if self.stream.is_oob:\n continue\n\n # self.reader.feed_byte()\n self.shell.feed_byte(byte)",
"def data_received(self, data):\n # This may seem strange; feeding all bytes received to the **writer**,\n # and, only if they test positive, duplicating to the **reader**.\n #\n # The writer receives a copy of all raw bytes because, as an IAC\n # interpreter, it may likely **write** a responding reply.\n self._last_received = datetime.datetime.now()\n\n cmd_received = False\n for byte in data:\n try:\n recv_inband = self.writer.feed_byte(bytes([byte]))\n except:\n self._log_exception(logger.warning, *sys.exc_info())\n else:\n if recv_inband:\n # forward to reader (shell).\n self.reader.feed_data(bytes([byte]))\n\n # becomes True if any out of band data is received.\n cmd_received = cmd_received or not recv_inband\n\n # until negotiation is complete, re-check negotiation aggressively\n # upon receipt of any command byte.\n if not self._waiter_connected.done() and cmd_received:\n self._check_negotiation_timer()",
"def dataReceived(self, data):",
"def data_received(self, data):\n self.buf += data\n if b'\\n' in self.buf:\n lines = self.buf.split(b'\\n')\n self.buf = lines[-1] # whatever was left over\n for line in lines[:-1]:\n asyncio.ensure_future(self.q.put(line))\n self.msgs_recvd += 1\n if self.msgs_recvd == 4:\n self.transport.close()",
"def onRecv(self, data):\n self.stream += data\n while self.handleStream(): pass",
"def _on_read(self, line):\n # Some game logic (or magic)\n line = line.strip()\n logger.info(\"RCV> %s\", line)\n if not line:\n self.stream.close()\n return\n\n self.stream.write(\"echo: %s\\n\" % line)\n\n # Wait for further input on this connection\n self.wait()",
"def handle_received(self) -> None:\n self.buffer: bytes\n while self.buffer:\n try:\n request, self.buffer = parse_request(self.buffer)\n if request is None:\n _LOGGER.debug(\"Not enough data to parse request on event channel\")\n break\n\n _LOGGER.debug(\"Got message on event channel: %s\", request)\n\n # Send a positive response to satisfy the other end of the channel\n # TODO: Add public method to pyatv.http to format a message\n headers = {\n \"Content-Length\": 0,\n \"Audio-Latency\": 0,\n \"Server\": request.headers.get(\"Server\"),\n \"CSeq\": request.headers.get(\"CSeq\"),\n }\n response = (\n f\"{request.protocol}/{request.version} 200 OK\\r\\n\"\n + \"\\r\\n\".join(f\"{key}: {value}\" for key, value in headers.items())\n + \"\\r\\n\\r\\n\"\n )\n self.send(response.encode(\"utf-8\"))\n except Exception:\n _LOGGER.exception(\"Failed to handle message on event channel\")",
"def data_received(self, data):\n self.buffered += data\n while True:\n if self.have_length:\n if len(self.buffered) < self.message_length:\n break\n self._decode_message(self.buffered[:self.message_length])\n self.have_length = False\n self.buffered = self.buffered[self.message_length:]\n self.message_length = 0\n else:\n if len(self.buffered) < 4:\n break\n (self.message_length,) = struct.unpack_from(\">I\", self.buffered)\n self.buffered = self.buffered[4:]\n self.have_length = True",
"def _listener(self):\n with open(self.playback_file, \"rb\") as f:\n while self.thread_run.is_set():\n length = f.read(1)\n if len(length) == 0:\n # out of data\n break\n length = length[0]\n data = f.read(length)\n if len(data) != length:\n raise RuntimeError(\"Didn't receive the expected amount of bytes!\")\n\n # itterating over bytes gives us ints\n report = data[0]\n retval = data[1]\n payload = [d for d in data[2:]]\n if report >= 0:\n self.queue.put((report, retval, payload))\n if self.verbose:\n self.log(\"Put report {} on queue\".format(report))\n\n if self.verbose:\n self.log(\"Waiting for queue to empty...\")\n\n while self.packets_available():\n time.sleep(0.01)",
"def handle_received(self) -> None:\n self.buffer: bytes\n while len(self.buffer) >= DataHeader.length:\n header = DataHeader.decode(self.buffer, allow_excessive=True)\n if len(self.buffer) < header.size:\n _LOGGER.debug(\n \"Not enough data on data channel (has %d, expects %d)\",\n len(self.buffer),\n header.size,\n )\n break\n\n try:\n self._process_message_from_buffer(header)\n except Exception:\n _LOGGER.exception(\"failed to process data frame\")\n\n self.buffer = self.buffer[header.size :]",
"def listen_forever(self):\n self.listening_event.set()\n self.shutdown_event.clear()\n try:\n while not self.shutdown_event.is_set():\n frame = self.connection.read()\n if frame:\n self.log.debug(\"Processing frame: %s\" % frame)\n self.dispatch_frame(frame)\n except:\n self.log.exception(\"Error receiving data; aborting listening loop.\")\n raise\n finally:\n self.listening_event.clear()",
"def data_received(self, data: str) -> None:\n logger.debug('Received: {}'.format(data))\n try:\n self.buffer += data.decode()\n except:\n logger.exception('Could not decode data from client')\n\n idx = self.buffer.find('\\r\\n')\n\n while idx >= 0: # While there are separators\n frame = self.buffer[:idx + 2].strip() # Extract the JSON object\n self.buffer = self.buffer[idx + 2:] # Removes the JSON object from the buffer\n\n self.on_frame(frame) # Process the frame\n idx = self.buffer.find('\\r\\n')\n\n if len(self.buffer) > 4096 * 1024 * 1024: # If buffer is larger than 4M\n logger.warning('Buffer to large')\n self.buffer = ''\n self.transport.close()",
"def on_data_received(self, data):\n # pylint: disable=too-many-branches,too-many-statements\n\n if self.is_receiving_data is True:\n self._buffer += data\n return\n\n try:\n self.is_receiving_data = True\n self._buffer += data\n\n # Keep looping while we have unprocessed data\n # We start processing only once we have an entire field\n # (e.g. 'id=value') in the buffer, otherwise wait for more\n # data.\n # The problem with the current approach is that if there is a\n # binary field with an incorrect length, we may read past\n # the end of the message.\n # BUGBUG: Need to fix this. A quick hack may be to\n # try to peek to see what the tag id is and do something\n # with that. On the other hand this may just be a problem\n # with the protocol (should probably specify a maximum\n # allowable length of a binary field as a sanity check)\n while (len(self._buffer) > 0 and\n self._buffer.find(b'\\x01', self._binary_length + 1) != -1):\n\n # Need to make sure that we have the entire binary field\n # before continuing the processing\n if (self._binary_length > 0 and\n len(self._buffer) < self._binary_length):\n break\n\n # break up the field\n delim = self._buffer.find(b'\\x01', self._binary_length + 1)\n field = self._buffer[:delim]\n self._buffer = self._buffer[delim+1:]\n\n tag_id, value = self._parse_field(field)\n\n # Is this the start of a message?\n if tag_id == 8:\n if self.is_parsing:\n raise FIXParserError('unexpected tag: 8')\n self.is_parsing = True\n elif not self.is_parsing:\n raise FIXParserError('message must start with tag 8')\n\n if self._debug:\n log_text(self._logger.debug, None,\n f\"tag {tag_id} = {repr(value)}\")\n\n self._update_length(field, tag_id, value)\n self._update_checksum(field, tag_id, value)\n self._update_binary(field, tag_id, value)\n\n # The tag value gets assigned here. Due to grouping\n # the container where the update takes place gets\n # changed\n # self._message[tag_id] = value\n self._update_field(tag_id, value)\n\n # Is this the end of a message?\n if tag_id == 10:\n self._receiver.on_message_received(self._message,\n self._message_length,\n self._checksum)\n self.reset()\n\n except FIXLengthTooLongError as err:\n self.reset(flush_buffer=True)\n self._receiver.on_error_received(err)\n except FIXParserError as err:\n self.reset(flush_buffer=True)\n self._receiver.on_error_received(err)\n finally:\n self.is_receiving_data = False",
"def _listener(self):\n while self.thread_run.is_set():\n if self._serial_bytes_available() >= len(self.MAGIC_HEADER) and \\\n self._check_for_start():\n report, retval, payload = self._receive_packet()\n if report >= 0:\n self.queue.put((report, retval, payload))\n if self.verbose:\n self.log(\"Put report {} on queue\".format(report))",
"def __async_read_callback(self, data, err) -> None:\n if err != 0:\n logging.info('async_read (1): disconnected')\n self.close()\n elif not data:\n logging.info('async_read (2): disconnected')\n self.close()\n elif self.__is_active:\n # Push incoming data through Telnet Option Parser.\n self.receive_buffer.clear()\n for byte in data:\n # Add parsed text data\n return_byte = self.__telnet_parser.iac_sniffer(bytes([byte]))\n if return_byte is not None:\n # logging.info('byte received: {byte}'.format(byte=return_byte))\n # bytes_parsed = bytes_parsed + return_byte\n self.receive_buffer.append(return_byte)\n\n # Data other than Telnet Options, then send back to client. or push through system!!\n if len(self.receive_buffer) > 0:\n # This should now be pushed through for\n # Input on the STATE instead of echoed back!\n logging.info(\"Echo %s\", self.receive_buffer)\n self.async_write(b''.join(self.receive_buffer))\n\n # Ready for next set of incoming data\n self.wait_for_async_data()",
"def rawDataReceived(self, data):\n self._buffer.append(data)\n self._bufferLength += len(data)\n\n if self._bufferLength >= self._expectedLength:\n receivedData = ''.join(self._buffer)\n expectedData = receivedData[:self._expectedLength]\n extraData = receivedData[self._expectedLength:]\n\n self._buffer = None\n self._bufferLength = None\n self._expectedLength = None\n\n self.datagramReceived(expectedData)\n self.setLineMode(extraData)",
"def dataReceived(self, data):\n if not self.disconnected:\n self.protocol.dataReceived(data)",
"def _on_connect(self, stream_reader, stream_writer):\n # Sometimes the remote side doesn't send the newline for the first\n # prompt. This causes our prompt matching to fail. Here we inject a\n # newline to normalize these cases. This keeps our prompt processing\n # simple.\n super().data_received(b\"\\n\")\n self._session._session_connected(stream_reader, stream_writer)",
"def stream_callback(self, in_data, frame_count, time_info, status_flags):\n time_delta = time.time() - self.last_frame_timestamp\n if time_delta == 0:\n time_delta = sys.float_info.min\n fps = int(1.0 / time_delta)\n self.last_frame_timestamp = time.time()\n if DEBUG:\n self.fps_deque.append(fps)\n avg_fps = 0\n for frame_fps in self.fps_deque:\n avg_fps += frame_fps\n avg_fps /= len(self.fps_deque)\n self.log_fps(self.last_frame_timestamp, fps)\n sys.stdout.write(\"\\r{} FPS\".format(avg_fps))\n sys.stdout.flush()\n self.data = np.frombuffer(in_data, dtype=np.float32)\n if self.binned:\n self.binned_fft()\n self.fft_callback(self.fft_bins_y)\n else:\n self.fft()\n self.fft_callback(self.spec_y)\n if self.send_osc:\n self.send_fft_osc()\n return (None, pyaudio.paContinue)",
"def lineReceived(self,line):\n print \"data received:\",line,self.delimiter",
"def data_available(self):\n\n self.run = True\n self.serial.reset_input_buffer()\n while self.run:\n if self.serial.in_waiting:\n data: str = self.serial.readline().decode(\"utf-8\")\n data = data.replace(\">>>\", \"\").lstrip()\n\n if len(data) > 0:\n self.output_text.config(state=NORMAL)\n self.output_text.insert(END, data)\n self.output_text.see(END)\n self.output_text.config(state=DISABLED)\n else:\n time.sleep(0.1)",
"def read_handler(socket, buf):\n while True:\n message = socket.recv(BUFFER_SIZE)\n if not message:\n break\n logging.debug(\"receiving data : %s\", message)\n\n try:\n message = json.loads(message)\n\n # handle callback functions\n if recv_data_callback is not None:\n event = message[\"event\"]\n data = message[\"data\"]\n recv_data_callback(event, data)\n except ValueError:\n logging.error(\"message must be json serialized\")\n\n buf.appendleft(message)\n socket.close()",
"def _receive_data(self, txt) -> None:\n\n if self.client_recv:\n self.numrecvs.next()\n _MTTRACE(\"RECV:\\n[%s]\\n\", txt)\n self.client_recv(txt, self.loopbacknode)\n else:\n self.numforcedrops.next()",
"def _listen(self):\n if not self.is_connected:\n self.connect()\n\n while True:\n data = self.recv()\n ping = PING_RE.match(data)\n if ping:\n self.handle_ping(ping.group(1))\n else:\n result = self.handle_message(data)\n\n if result:\n print(result)\n\n time.sleep(1)",
"def data_received(self, data):\n pass"
]
| [
"0.65784127",
"0.6168723",
"0.6104151",
"0.60715026",
"0.6070223",
"0.60645175",
"0.6064057",
"0.5994367",
"0.5993057",
"0.5985244",
"0.594018",
"0.5926664",
"0.59176093",
"0.5900453",
"0.58874613",
"0.58724445",
"0.5861622",
"0.58127826",
"0.5803357",
"0.5771629",
"0.57334226",
"0.5725302",
"0.56722933",
"0.5668473",
"0.5649677",
"0.5626492",
"0.5584699",
"0.5577506",
"0.55771554",
"0.55606997"
]
| 0.6448255 | 1 |
Run depth first search from the start page to a specified depth (n) | def depth_first_search(self, start_page: str, n: int = 1):
url = name_to_url(start_page)
i = self.add_node(WikiNode(url, 0))
stack = deque()
stack.append(i)
while stack:
nd = self.nodes[stack.pop()]
if nd.level > n:
continue
for link in nd.links:
if link not in self.node_map:
j = self.add_node(WikiNode(link, nd.level + 1))
stack.append(j) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def depth_limited_search(initial_state, goal_state, limit):\n\n return recursive_dls(createRootNode(initial_state), goal_state, limit)",
"def search(board):\n depth = 0\n while True:\n result = depth_first(board, depth)\n if result:\n return result\n else:\n depth += 1",
"def probe_stack(depth = 10):\n if depth == 0:\n return\n probe_stack(depth - 1)",
"def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()",
"def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n \n util.raiseNotDefined()",
"def depth_first_traversal(self, start):\n return self.recursive_dft(start, [])",
"def depth_first_search(start, finish_line, next_moves):\n frontier = Stack()\n frontier.push(Move(start, None))\n searched = {start}\n full_search = []\n while not frontier.stuck:\n loc = frontier.pop()\n active = loc.current\n full_search.append(active)\n if finish_line(active):\n final_path = get_path(\"Depth First:\", loc)\n return final_path[1:], full_search[1:-1]\n for space in next_moves(active):\n if space not in searched:\n searched.add(space)\n frontier.push(Move(space, loc))\n return None, None",
"def depthFirstSearch(problem):\n container = util.Stack() \n return depthOrBreadthFirstSearch(problem, container)",
"def fn(x):\n nonlocal ans \n if x < ans: \n if min(depth) == n: ans = x # all tiled\n else: \n i = min(depth)\n j = jj = depth.index(i) # (i, j)\n while jj < m and depth[jj] == depth[j]: jj += 1\n k = min(n - i, jj - j)\n for kk in reversed(range(1, k+1)): \n for jj in range(j, j+kk): depth[jj] += kk\n fn(x+1)\n for jj in range(j, j+kk): depth[jj] -= kk",
"def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n '''\n print \"Start:\", problem.getStartState()\n print \"Is the start a goal?\", problem.isGoalState((2,2))\n print \"Start's successors:\", problem.getSuccessors((1,1))\n suc=problem.getSuccessors(problem.getStartState())\n actionList=[]\n stateList=[]\n import random\n randomNum=random.randrange(0,len(suc),1)\n \n \n print len(suc)\n #for i in range(1000):\n while not problem.isGoalState(suc[randomNum][0]):\n\tprint randomNum\n\trandomNum=random.randrange(0,len(suc),1)\n\trandomAction=suc[randomNum][1]\n\t\n \t#print randomNum\n\tif suc[randomNum][0] not in stateList:\n\t\tstateList.append(suc[randomNum][0])\n\t\tactionList.append(randomAction)\n \t\tsuc=problem.getSuccessors(suc[randomNum][0]) \n \n #actionList.append(suc[randomNum][0])\n #if kiki==0:\n print actionList\n \n return actionList\n\n\n #util.raiseNotDefined()\n '''\n return DFS(problem,problem.getStartState(),[])",
"def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n fringe = util.Stack()\n return GraphSearch(problem, 'dfs').search(fringe)",
"def iterativeDeepeningSearch(problem):\n from util import Stack\n \n for max_depth in range(0, 10000000):\n # print max_depth\n st = Stack()\n mapper = {}\n mapper[(problem.getStartState(), 0)] = None #map of (childpos, depth): (parentpos, direction, depth)\n st.push((problem.getStartState(), 0)) # stack of ((x,y) , depth)\n\n while not(st.isEmpty()):\n vertex = st.pop() #( (x,y) , depth )\n depth = vertex[1]\n\n if (problem.isGoalState(vertex[0])):\n c = vertex\n l = []\n while mapper[c] != None:\n tup = mapper[c]\n l.append(tup[1])\n c = tup[0], tup[2]\n l.reverse()\n print \"max_depth: \", max_depth\n print l\n return l\n\n else:\n n_depth = depth + 1 # new depth\n if n_depth < max_depth:\n neigh = problem.getSuccessors(vertex[0])\n # neigh.reverse()\n for child in neigh:\n if (child[0], n_depth) not in mapper:\n st.push((child[0], n_depth))\n mapper[(child[0], n_depth)] = (vertex[0], child[1], depth)",
"def depth_first(board, depth):\n for mole in moles(board):\n new_board = hit(board, mole)\n if new_board == 0:\n return (mole,)\n elif depth > 0:\n result = depth_first(new_board, depth-1)\n if result:\n return (mole,) + result \n return False",
"def depthFirstSearch(problem):\r\n \"*** YOUR CODE HERE ***\"\r\n node = problem.getStartState()\r\n if (problem.isGoalState(node)):\r\n return [] # no need to make any moves of the start state is goal\r\n start = (node, 'NoDirection',0)\r\n\r\n frontier_queue = Stack() # queue for frontier\r\n frontier_queue.push(start) # frontier consists of only the start state\r\n\r\n explored_nodes = set()\r\n explored_track = {start:None} # keep a track of parent, parent of root node is None\r\n\r\n while not frontier_queue.isEmpty():\r\n state = frontier_queue.pop() # pop the top element from the queue \r\n explored_nodes.add(state)\r\n\r\n if problem.isGoalState(state[0]):\r\n return get_track(explored_track, state)\r\n\r\n neighbors_state = problem.getSuccessors(state[0])\r\n for neighbor in neighbors_state: # neighbor will be something like this ((34, 15), 'South', 1)\r\n if neighbor not in frontier_queue.list and neighbor not in explored_nodes:\r\n frontier_queue.push(neighbor)\r\n explored_track[neighbor] = state\r\n\r\n\r\n def get_track(explored_track, state):\r\n from game import Directions\r\n track_history = [state]\r\n track_history_direction = []\r\n leaf = state\r\n while (explored_track[leaf]) != start:\r\n track_history.append(explored_track[leaf])\r\n leaf = explored_track[leaf]\r\n\r\n for j in range (len(track_history),-1,-1):\r\n this_step = track_history[j-1]\r\n this_step = this_step[1]\r\n track_history_direction.append(this_step)\r\n return track_history_direction[:-1]",
"def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n visited_nodes = []\n start_node = problem.getStartState()\n visited_nodes.append(start_node)\n curr_node = start_node\n q = util.Queue()\n directions = util.Queue()\n q.push(curr_node)\n goal_found = problem.isGoalState(curr_node)\n\n while not goal_found:\n nxt_node_list = problem.getSuccessors(curr_node)\n nxt_node_found = False\n\n # Check if a child can be found which has not been visited\n for node in nxt_node_list:\n nxt_node = node[0]\n move = node[1]\n if nxt_node not in visited_nodes:\n nxt_node_found = True # mark that a child node has been found\n q.push(nxt_node) # add the node in the tree\n directions.push(move) # add the direction\n visited_nodes.append(nxt_node) # mark the node as visited\n break\n\n # If child not found, go to parent\n if not nxt_node_found:\n q.list.pop(0)\n directions.list.pop(0)\n\n if q.isEmpty(): break\n\n curr_node = q.list[0]\n goal_found = problem.isGoalState(curr_node)\n\n final_moves = []\n while not directions.isEmpty():\n final_moves.append(directions.pop())\n \n return final_moves\n #util.raiseNotDefined()",
"def iterative_depth_search(self, board, player, t_max=30, min_depth=4, stop_at_depth=False):\n\n\t\tt_elapsed = 0.0\n\t\tbest_move, max_depth = None, 1\n\t\talpha, beta = -float('inf'), float('inf')\n\n\t\twhile max_depth <= min_depth or t_elapsed <= t_max:\n\t\t\tif stop_at_depth and max_depth > min_depth:\n\t\t\t\tbreak\n\n\t\t\tstart = time.time()\n\t\t\tbest_moves, best_val = self.alpha_beta_search(board, alpha, beta, player, 0, max_depth)\n\t\t\tt_elapsed += time.time() - start\n\t\t\tmax_depth += 1\n\t\t\tself.update()\n\n\t\t\t# Checkmate found.\n\t\t\tif abs(best_val) == float('inf'):\n\t\t\t\tself.moves_til_checkmate = len(best_moves)\n\t\t\t\tbreak\n\n\t\tbest_move = best_moves[0]\n\n\t\treturn best_move, best_val",
"def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n # util.raiseNotDefined()\n\n # print(\"Start:\", problem.getStartState())\n # print(\"Is the start a goal?\", problem.isGoalState(problem.getStartState()))\n # print(\"Start's successors:\", problem.getSuccessors(problem.getStartState()))\n\n # Initialize a frontier, and push the initial state into the frontier\n frontier = util.Stack()\n frontier.push([(problem.getStartState(), 'move', 0)])\n # Initialize a explored set to store the visited nodes\n exploredSet = set()\n\n # Check the content of frontier\n while not frontier.isEmpty():\n stateList = list()\n stateList = frontier.pop()\n # print (stateList)\n # What we focus on is the next state, not the (previous state + next state), so we should take the last element\n nextState = stateList[len(stateList) - 1]\n # Check the current state is goal or not\n if problem.isGoalState(nextState[0]):\n # Initial a path, which is the way to the goal state\n path = list()\n for eachMove in stateList:\n path.append(eachMove[1])\n # If the initial state is the goal state, there's no need to explore other nodes, so that's called special condition\n if len(path) == 1:\n return path[0]\n # This is the normal condition, we should convey the path except the first one, because we haven't define what's \"move\"\n else:\n return path[1:]\n # If this is a state which we don't visit, add it to the explored set(this is called GSA)\n if not nextState[0] in exploredSet:\n exploredSet.add(nextState[0])\n # Give me your child nodes\n for childState in problem.getSuccessors(nextState[0]):\n nextStateList = stateList[:]\n # we focus on the path, so we have to record the every move from the initial state to the current one\n nextStateList.append(childState)\n frontier.push(nextStateList)\n\n # Or maybe there's no way to the goal state\n else:\n return \"There's no way.\"",
"def crawl(self, q, depth = 0):\n\n\t\tindex = 1\n\t\tlast_results = None\n\n\t\twhile True:\n\t\t\tif index == 1:\n\t\t\t\tstart = 0\n\n\t\t\telse:\n\t\t\t\tstart = (index - 1) * 10\n\n\t\t\ttry:\n\t\t\t\tresults = self.search(q, start, 10)\n\n\t\t\texcept: continue\n\n\t\t\tif not results:\n\t\t\t\tbreak\n\n\t\t\tif last_results == results:\n\t\t\t\tbreak\n\n\t\t\tlast_results = results\n\n\t\t\tyield results\n\n\t\t\tif index == depth:\n\t\t\t\tbreak\n\n\t\t\tindex = index + 1",
"def depthFirstSearch(problem):\r\n\t\"*** YOUR CODE HERE ***\"\r\n\r\n\tfrontera = util.Stack()\r\n\testadoInicial= problem.getStartState()\r\n\tfrontera.push((estadoInicial, [],0))\r\n\tvisitados=[]\r\n\tvisitados.append(estadoInicial)\r\n\r\n\twhile not(frontera.isEmpty()):\r\n\t\t(estado, camino, costo) =frontera.pop()\r\n\t\tif(problem.isGoalState(estado)):\r\n\t\t\tbreak\r\n\r\n\t\tsucesores=problem.getSuccessors(estado)\r\n\t\tfor sucesor in sucesores:\r\n\t\t\tif sucesor[0] not in visitados:\r\n\t\t\t\tfrontera.push((sucesor[0], camino + [sucesor[1]], costo + sucesor[2]))\r\n\t\t\t\tvisitados.append(sucesor[0])\r\n\tprint ('Cantidad de nodos en memoria: {}').format(len(frontera.list))\r\n\treturn camino",
"def depth_first_search(problem):\n fringe = util.Stack()\n return general_search(problem, fringe)",
"def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n\n # current path stack\n path_stack = util.Stack()\n action_stack = util.Stack()\n path_stack.push(problem.getStartState())\n\n # visited (so don't )\n visited = []\n visited.append(problem.getStartState())\n\n i = 0\n while not path_stack.isEmpty():\n\n # check goal state\n if problem.isGoalState(path_stack.list[-1]): # check if goal\n return action_stack.list\n\n # get next possible state (choose first in list)\n successors = problem.getSuccessors(path_stack.list[-1])\n forward=False\n for successor in successors:\n ss,aa,_ = successor\n if ss not in visited:\n\n path_stack.push(ss)\n action_stack.push(aa)\n visited.append(ss) # you don't pop visited\n forward=True\n break\n\n # backtrack\n if forward==False:\n path_stack.pop()\n action_stack.pop()\n\n i+=1\n #if i==25:\n # import pdb; pdb.set_trace()\n #print(path_stack.list)",
"def depth_limited_search(problem, limit):\n\n # in this case, we simply use a list to keep track of nodes we\n # traversed, instead of the data structure, Stack.\n path = list()\n visited = set() # as before, to prevent duplicated nodes\n root = problem.get_initial_state()\n\n def rec_dls(state, action, depth):\n\n visited.add(state)\n\n # if it is a goal\n if problem.goal_test(state):\n path.append((state, action))\n return path\n\n # or if it reaches a certain depth, but not a goal\n elif depth == 0:\n visited.remove(state)\n return None\n\n else:\n path.append([state, action])\n for successor, action, cost in problem.get_successors(state):\n if successor not in visited:\n # recursively expands the deepest node\n res = rec_dls(successor, action, depth-1)\n if res is not None:\n return res\n path.pop()\n visited.remove(state)\n\n # \"Stared From the Bottom\" (root)\n result = rec_dls(root, 'None', limit)\n # return the path if the we DID have achieved something\n if result is not None:\n return path",
"def _loop_depth(self, start, connections):\n # This is just a slightly modified breadth-first search\n visited = {start: 1}\n frontier = [start]\n\n limit = []\n while len(frontier):\n node = frontier.pop(0)\n prev_depth = visited[node]\n if prev_depth >= self.depth:\n limit.append(node)\n continue\n\n for x in connections[node]:\n if x in visited:\n continue\n visited[x] = prev_depth + 1\n frontier.append(x)\n return limit",
"def depthFirstSearch(problem):\n \"*** YOUR CODE HERE IF YOU WANT TO PRACTICE ***\"\n # Initialize a stack\n open = util.Stack()\n\n # Retrieve the init state\n initState = (problem.getStartState(), ['Stop'], 0)\n open.push(initState)\n closed = []\n\n while not open.isEmpty():\n currState = open.pop()\n currPos = currState[0]\n currPath = currState[1]\n currCost = currState[2]\n\n if problem.isGoalState(currPos):\n return currPath[1:]\n else:\n closed.append(currPos)\n if currState not in closed:\n successors = problem.getSuccessors(currPos)\n if len(successors) > 0:\n for each in successors:\n if each[0] not in closed:\n temp = (each[0], currPath+[each[1]], currCost+each[2])\n open.push(temp)\n return False",
"def dfs(x):\n if x <= n:\n ans.append(x)\n for xx in range(10): dfs(10*x + xx)",
"def sudokuDepthFirstSearch(problem):\n\n def convertStateToHash(values):\n \"\"\" \n values as a dictionary is not hashable and hence cannot be used directly in the explored/visited set.\n This function changes values dict into a unique hashable string which can be used in the explored set.\n You may or may not use this\n \"\"\"\n l = list(sorted(values.items()))\n modl = [a+b for (a, b) in l]\n return ''.join(modl)\n\n ## YOUR CODE HERE\n root_node = Node(problem.getStartState(), [], 0, None, 0)\n frontier = util.Stack()\n frontier.push(root_node)\n explored = []\n\n while not(frontier.isEmpty()):\n node_to_explore = frontier.pop()\n\n if problem.isGoalState(node_to_explore.state):\n return node_to_explore.state\n else:\n copy_state = node_to_explore.state.copy()\n \n if convertStateToHash(copy_state) not in explored:\n\t explored.append(convertStateToHash(copy_state))\n\t successors_state = problem.getSuccessors(copy_state)\n\t if len(successors_state) > 0:\n\t\t for state_action_cost in successors_state:\n\t\t if convertStateToHash(state_action_cost[0]) in explored:\n\t\t continue\n\t\t else:\n\t\t frontier.push(Node(state_action_cost[0], state_action_cost[1], node_to_explore.path_cost + 1, node_to_explore, node_to_explore.depth + 1))\n\n return False\n # util.raiseNotDefined()",
"def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n \n #Stack to hold the node that have been visited along with the path taken from the start node to reach that node.\n stack = Stack()\n #Set to hold the node explored.\n explorednode = set()\n #Get the start node.\n startnode = problem.getStartState()\n #Push the starting node on the Stack along with an empty set to know the direction in order to reach the node.\n stack.push((startnode,[]))\n #Loop till the stack is empty\n while stack.isEmpty() is not True:\n #Pop the currentnode and the direction from the stack\n currentnode, direction = stack.pop()\n #We will now add the node to set of explored node.\n explorednode.add(currentnode)\n #If the node is the goal. We made it!!\n if problem.isGoalState(currentnode):\n #print currentnode, direction\n #The direction holds the way to reach till the goal from the start node.\n #print direction\n return direction\n #Loop for each successor(child) of the current node.\n for (successor, action, stepCost) in problem.getSuccessors(currentnode):\n #If the successor(child) is not explored\n if successor not in explorednode:\n #Add the successor to the stack along with the path to reach it.\n stack.push((successor, direction + [action]))",
"def recursive_search(i, F, t, s, explored, leaders, order):\n x = len(explored)\n if x % 10 == 0:\n print(\"Length of explored: {}\".format(x))\n explored.append(i)\n if order == 2:\n leaders[i] = s\n arc_list = db.Database.find_one(collection=\"biggraph\", query={\"key\": i})\n if arc_list:\n for node in arc_list['value']:\n if node not in explored:\n F, t, leaders, explored = recursive_search(node, F, t, s, explored, leaders, order)\n if order == 1:\n t += 1\n F[i] = t\n return F, t, leaders, explored",
"def _dfsearch_recursive(self, footprint):\n self.visited[footprint] = 1\n self.temp_component.append(footprint)\n for neighbour in self.neighbours[footprint]:\n if self.visited[neighbour] == 0:\n self._dfsearch(neighbour)",
"def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n \n from game import Directions\n visited = set() # unique elements\n state = problem.getStartState()\n #returns starting agent's position\n waiting_list = util.Stack()\n # LIFO\n # last in first out\n # parents = collections.defaultdict(collections.UserDict)\n parents = {}\n #dictionary\n sequence = []\n #LIFO\n for action in problem.getSuccessors(state):\n # in order to push full-state values\n waiting_list.push(action)\n # enumarating tuple\n\n while not waiting_list.isEmpty():\n state = waiting_list.pop()\n \n visited.add(state[0])\n # node is visited and we wont visit those nodes\n \n for substate in problem.getSuccessors(state[0]):\n # take a look to successors of current node\n \n if substate[0] not in visited:\n # if not in visited \n # saving parents\n parents[substate[0]]={'parent':state} \n # generate new node\n waiting_list.push(substate)\n # push to stack\n if problem.isGoalState(substate[0]): \n target_state = substate \n #finding wayback\n\n\n while target_state[0] in parents.keys():\n temp=parents[target_state[0]]['parent']\n sequence.append(target_state[1])\n target_state = temp\n sequence.append(target_state[1])\n return sequence[::-1]"
]
| [
"0.6694698",
"0.614414",
"0.6132657",
"0.60701245",
"0.6066893",
"0.595954",
"0.5902348",
"0.5713828",
"0.5664526",
"0.56568605",
"0.5646772",
"0.56425816",
"0.56281155",
"0.5602723",
"0.5602259",
"0.5561969",
"0.5543314",
"0.5535719",
"0.55312914",
"0.5527265",
"0.55212325",
"0.55186474",
"0.5494138",
"0.54876214",
"0.5466281",
"0.5455387",
"0.5454633",
"0.5429046",
"0.54280084",
"0.54211515"
]
| 0.7672866 | 0 |
Caches current values of this scaler. | def cache(self):
self.cached_mu = self.mu.eval()
self.cached_var = self.var.eval()
self.cached_count = self.count.eval() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _cache_values(self):\n width = self.width.current\n center = self.horizontal.current\n\n # If center would need to be rounded, increase the width by 1 to make a smoother fade between LEDs.\n if center % 2 != 0:\n width += 1\n # Moving right\n if center < self.horizontal.target:\n center += 1\n # Moving left\n else:\n center -= 1\n\n self.pi_inc = PI_2 / width\n self.first_led = round(center - (width / 2))\n self.last_led = self.first_led + width\n\n # Start at the bottom of the curve, to provide a smooth fade up\n self.start_x = (width * -1.25) * self.pi_inc",
"def cache(self):\n return {'output': self.output, 'series': self.series}",
"def _reset_cache(self):\n self._tick_positions = array([], dtype=float)\n self._tick_extents = array([], dtype=float)\n self._cache_valid = False\n return",
"def _cache_data(self):\n while self._run:\n try:\n values = self._data_streamer.get_data_current_state()\n for parameter, mapping_method in self._mapping.items():\n value = values[parameter]\n mapped_notes = self._data_streamer.get_mapper_for_param(parameter, mapping_method[0]).map(value)\n self._value_queues[parameter].put((value,mapped_notes))\n except Exception, e:\n print e.message",
"def _reload_values(self):\r\n raise NotImplementedError",
"def cache_all(self):\n if not self._cached_all:\n poss = range(len(self))\n uuids = self.vars['uuid']\n\n cls_names = self.variables['cls'][:]\n samples_idxss = self.variables['samples'][:]\n subchanges_idxss = self.variables['subchanges'][:]\n mover_idxs = self.variables['mover'][:]\n details_idxs = self.variables['details'][:]\n try:\n input_samples_vars = self.variables['input_samples']\n except KeyError:\n # BACKWARD COMPATIBILITY: REMOVE IN 2.0\n input_samples_idxss = [[] for _ in samples_idxss]\n else:\n input_samples_idxss = input_samples_vars[:]\n\n [self._add_empty_to_cache(*v) for v in zip(\n poss,\n uuids,\n cls_names,\n samples_idxss,\n input_samples_idxss,\n mover_idxs,\n details_idxs)]\n\n [self._load_partial_subchanges(c, s) for c, s in zip(\n self,\n subchanges_idxss)]\n\n self._cached_all = True",
"def cache(self):\r\n data = self.input.data\r\n f, cache = tsa.cache_fft(data,\r\n self.ij,\r\n lb=self.lb,\r\n ub=self.ub,\r\n method=self.method,\r\n prefer_speed_over_memory=self.prefer_speed_over_memory,\r\n scale_by_freq=self.scale_by_freq)\r\n\r\n return cache",
"async def cached_values(cls):\n if cls.__cached__ is None:\n cls.__cached__ = await cls.values()\n return cls.__cached__",
"def _prepare_cache(self, value):\n\n return deepcopy(value)",
"def set_cache(self, val):\n pass",
"def warmup_cache(self):\n self.get_whitespace_changes()\n self.get_cvsheader_changes()\n self.get_unmodified_changes()\n self.get_used_changes()\n self.get_zapped_changes()\n self.get_undecided_changes()",
"def cacheSimSpeeds(self):\n self._cached_start_speed = self.sim_state['start_speed']\n self._cached_top_speed = self.sim_state['top_speed']\n self._cached_cutoff_speed = self.sim_state['cutoff_speed']",
"def reset_values(self):\n\n self.values = []",
"def _clear_cache(self):\n super(ShootingSolver, self)._clear_cache()\n self.__numeric_jacobian = None\n self.__numeric_system = None\n self.__ivp = None",
"def process_values(self):\r\n \r\n if self.padding>0:\r\n channels = np.zeros((self.vals.shape[0], self.vals.shape[1]+self.padding))\r\n channels[:, 0:self.vals.shape[0]] = self.vals\r\n else:\r\n channels = self.vals\r\n vals_mat = self.skel.to_xyz(channels.flatten())\r\n self.vals = np.zeros_like(vals_mat)\r\n # Flip the Y and Z axes\r\n self.vals[:, 0] = vals_mat[:, 0].copy()\r\n self.vals[:, 1] = vals_mat[:, 2].copy()\r\n self.vals[:, 2] = vals_mat[:, 1].copy()",
"def initRunningVals(self):\n self.r_Vm = [0.0]*self.mirror.dataPoints\n self.r_Va = [0.0]*self.mirror.dataPoints",
"def reload_cache(self):\n self.data = self.read_data_cache()",
"def _reset(self):\n self._values = {}",
"def _get_cached_value(self, value):\n\n if self._refreshable is True and self.is_expired is False:\n self.refresh()\n\n return deepcopy(value)",
"def _cache_state(self, instance):\n if instance.pk:\n instance.__cache_data = dict((f, getattr(instance, f)) for f in self.cache_fields)\n else:\n instance.__cache_data = UNSAVED",
"def reset_values(self):\n\n self.values = np.array([])",
"def array(self):\n if self._ind < self.shape[0]:\n return self._values[:self._ind]\n if not self._cached:\n ind = self._ind % self.shape[0]\n self._cache[:self.shape[0] - ind] = self._values[ind:]\n self._cache[self.shape[0] - ind:] = self._values[:ind]\n self._cached = True\n return self._cache",
"def rebuildMatrixCache(self):\n self.converterYUR = Mat4.convertMat(CSYupRight, self.lens.getCoordinateSystem()) * self.lens.getProjectionMat()",
"def _backup_and_load_cache(self):\n for group in self.optimizer.param_groups:\n for p in group['params']:\n param_state = self.state[p]\n param_state['backup_params'] = torch.zeros_like(p.data)\n param_state['backup_params'].copy_(p.data)\n p.data.copy_(param_state['cached_params'])",
"def _backup_and_load_cache(self):\n for group in self.optimizer.param_groups:\n for p in group['params']:\n param_state = self.state[p]\n param_state['backup_params'] = torch.zeros_like(p.data)\n param_state['backup_params'].copy_(p.data)\n p.data.copy_(param_state['cached_params'])",
"def reset(self) -> None:\n self._dist['current'] = np.copy(self._dist['initial'])",
"def cache(self):\n return self.__cache",
"def cache(self):\n return self.__cache",
"def cache(self):\n return self.__cache",
"def cache(self):\n return self.__cache"
]
| [
"0.68166256",
"0.6293583",
"0.6013564",
"0.5906083",
"0.58870524",
"0.5878629",
"0.5794002",
"0.5788975",
"0.5729468",
"0.5700146",
"0.5641709",
"0.5626525",
"0.562225",
"0.5617957",
"0.5611663",
"0.5600222",
"0.5584668",
"0.55765784",
"0.55575067",
"0.5549433",
"0.5529761",
"0.55256474",
"0.55004823",
"0.54977375",
"0.54977375",
"0.54852206",
"0.5465381",
"0.5465381",
"0.5465381",
"0.5465381"
]
| 0.6696546 | 1 |
Loads values from the cache | def load_cache(self):
self.mu.load(self.cached_mu)
self.var.load(self.cached_var)
self.count.load(self.cached_count) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_cache():\n return {}",
"def _load_cache(self):\n self.cache = self.cache_manager.retrieve(self.cache_file)\n if self.cache is None:\n self.cache = {}\n return",
"def _load_cache(self):\n logger.debug(\"Loading coherence data for %s from cache\", self.w1)\n\n assert self.variant_unit is None, \"Cannot load from cache once variant_unit has been set\"\n with open(self._cache_key) as f:\n self.rows = json.load(f)\n\n self._already_generated = True\n logger.debug(\"Loaded {} rows from cache ({})\".format(len(self.rows), self._cache_key))",
"def getData(self, local_cache):",
"def _retrieveCachedData(self):",
"def reload_cache(self):\n self.data = self.read_data_cache()",
"def read_cache(self):\n with open(self.get_cache_filename(), 'rb') as f:\n data = pickle.loads(f.read())\n self.timestamp = data['timestamp']\n self.cache = data['cache']",
"def cache_data(self):\n # Initialize key variables\n result = self.data['cache_data']\n return result",
"def cache(self):\n self.cached_mu = self.mu.eval()\n self.cached_var = self.var.eval()\n self.cached_count = self.count.eval()",
"def __read_cache(self, fileName):\n if self.__log:\n self.__logger.info(f\"Cache hit - {fileName}\")\n # Cache hit\n with open(fileName, \"rb\") as f:\n content = self.__handle_decompression(f.read())\n variables = pickle.loads(content)\n\n # Move node to front\n node = os.path.relpath(fileName, \"cache\")\n self.__shift_node(node)\n\n return variables",
"def _load_cache():\n BASE_DIR = os.path.dirname(os.path.abspath(__file__))\n fname = os.path.join(BASE_DIR, \"model_cache.json\")\n with open(fname) as f:\n models_cache = json.load(f)\n return models_cache",
"def fill_cache(cache, values_dict):\n cache.get.side_effect = lambda k, d=None: values_dict.get(k, d)",
"def retrieve_cached_records(self):\r\n return u.load_cached_data(self.records_cache_path)",
"async def cached_values(cls):\n if cls.__cached__ is None:\n cls.__cached__ = await cls.values()\n return cls.__cached__",
"def __getitem__(self,key):\n result = None\n # check if it's tin the cache first\n if key in self._cache:\n result = self._cache[key]\n else:\n # it's not in the cache so retrieve it\n result = self._get_from_tree(key)\n # remove None values\n result = [x for x in result if x is not None]\n self._cache[key] = result\n\n return result",
"def loadCacheFile(self):\n if not os.path.exists(self.cachePath):\n self.initCacheFile()\n else:\n with open(self.cachePath) as json_cacheFile:\n self.cacheData = json.load(json_cacheFile)",
"def load_prev_val_cache():\n try:\n f = open(Filenames.VALUE_CACHE, 'r')\n j = json_load_as_ascii(f)\n except:\n print \"The previous value cache (%s) doesn't exist, or its JSON is corrupt. Creating empty.\" % (\n Filenames.VALUE_CACHE\n )\n f = open(Filenames.VALUE_CACHE, 'w')\n j = {}\n json.dump(j, f)\n f.close()\n return j",
"def get_cache(self, key):\n return self.r.get(key)",
"async def get() -> list:\n if _cache is None:\n await _update()\n return _cache",
"def load_cache(self, filename=None):\n try:\n if not os.path.getsize(self._cache_filename(filename)):\n print(\"On-disk cache empty\")\n return\n\n with open(self._cache_filename(filename), \"rb\") as fh:\n cached = pickle.load(fh)\n self.name_cache = cached.name_cache\n self.run_cache = cached.run_cache\n self.row_cache = cached.row_cache\n self.extend(cached)\n print(\"On-disk cache loaded\")\n except OSError: # (FileNotFoundError is Python 3 only)\n print(\"On-disk cache not found\")",
"def init_cache(self):\n if self.cacheable:\n self._instance._cache[self.name] = {}",
"def __getitem__(self, key):\n if self._cache is None:\n self._cache = self.storer.read()\n\n try:\n return self._cache[key].values\n except KeyError:\n return self._get_constant_array(key)",
"def load(self, name: str):\n result = self.l2.load(name)\n if result is not None:\n logging.debug(f'{name} l2 hit')\n return result\n\n result = self.l3.load(name, self.l2)\n if result is not None:\n logging.debug(f'{name} l3 hit')\n return result\n logging.debug(f'{name} cache miss')\n return None # Cache Miss",
"def load(self):\n all_ = self._fetch_features()\n features = {f.name: f for f in all_}\n self._cache = {n: self._state(features.get(n))\n for n in FEATURES.keys()}",
"def get(key, fxn_load):\n CacheLayers.fill_request_cache()\n\n if not request_cache.cache.get(key):\n request_cache.cache[key] = fxn_load()\n\n return request_cache.cache[key]",
"def __loadCacheTokens(self):\n with open(self.configFile, \"r\") as f:\n self.orgConf.readfp(f)\n if not self.cacheSection in self.orgConf:\n return\n t = self.orgConf[self.cacheSection]\n if self.ACCESS_TOKEN in t:\n self.accessToken = t[self.ACCESS_TOKEN]\n if self.REFRESH_TOKEN in t:\n self.refreshToken = t[self.REFRESH_TOKEN]",
"def get(self, key):\n return self.cache_data.get(key)",
"def retrieve_cached_decisions(self):\r\n return u.load_cached_data(self.decisions_cache_path)",
"def _load_cached_2to3(self, path, cache):\n try:\n cache_stats = os.stat(cache)\n source_stats = os.stat(path)\n except OSError as e:\n if e.errno == errno.ENOENT: # FileNotFoundError\n self.logger.debug('Cache miss: %s' % cache)\n return None\n else:\n raise\n\n if cache_stats.st_mtime <= source_stats.st_mtime:\n self.logger.debug('Cache miss (stale): %s' % cache)\n return None\n\n self.logger.debug(\"Cache hit: %s\" % cache)\n return super().get_data(cache)",
"def get(self, key):\n # Initialize key variables\n result = self.cache.get(key)\n\n # Return\n return result"
]
| [
"0.7575775",
"0.7280087",
"0.7223931",
"0.7177263",
"0.70998794",
"0.70036113",
"0.69161636",
"0.67685753",
"0.67554885",
"0.6736556",
"0.66704935",
"0.66339123",
"0.65642005",
"0.6550079",
"0.65268654",
"0.64958376",
"0.6484298",
"0.6474443",
"0.64711505",
"0.6470632",
"0.6460502",
"0.6450383",
"0.6420358",
"0.6419692",
"0.63777804",
"0.6369842",
"0.63549364",
"0.63521266",
"0.634942",
"0.63292974"
]
| 0.82253915 | 0 |
Run spark job that will associate vendor's users (loaded from REST API) with our users (loaded from MySQL) | def run(sc, logger):
start = time.time()
# Spark SQL Context
sqlContext = sql.context.SQLContext(sc)
# 1) Practices - Small table so no need to partition it, could even broadcast across all executors
# Spark SQL JDBC Options: https://spark.apache.org/docs/latest/sql-data-sources-jdbc.html
practicesRDD = getPracticesRDD(sqlContext)
# 1) Debug Practices
logger.info("-------------")
logger.info("1a) Practices RDD: %s (%s partition(s))" % (practicesRDD, practicesRDD.getNumPartitions()))
logger.info("1b) Count Practices: %s \n" % practicesRDD.count())
# 2) User Table - Big table so need to read in batches, so partition the table by id
# Spark SQL JDBC Options: https://spark.apache.org/docs/latest/sql-data-sources-jdbc.html
usersRDD = getUsersRDD(sqlContext)
# 2) Debug Users
logger.info("-------------")
logger.info("2a) Users RDD: %s (%s partition(s))" % (usersRDD, usersRDD.getNumPartitions()))
logger.info("2b) Count Users: %s \n" % usersRDD.count())
# 3) Join Users and Practices (user.practice_id = user_practice.id)
joinedUsersRDD = usersRDD \
.join(practicesRDD) \
.persist(StorageLevel.MEMORY_AND_DISK) \
.map(lambda row: ((row[1][0].firstname, row[1][0].lastname, row[1][1].location, row[1][0].specialty), UserReference(row[1][0].id, row[1][0].last_active_date)))
# 3) Debug Joined Users
logger.info("-------------")
logger.info("3a) Joined Users RDD: %s (%s partition(s))" % (joinedUsersRDD, joinedUsersRDD.getNumPartitions()))
logger.info("3b) Count Joined Users: %s \n" % joinedUsersRDD.count())
# 4) Vendor Users
vendorUsersRDD = vendor.getVendorUsers(sc) \
.persist(StorageLevel.MEMORY_AND_DISK) \
.map(lambda row: ((row["firstName"], row["lastName"], row["practiceLocation"], row["specialty"]), row))
# 4) Debug Vendor Users
logger.info("-------------")
logger.info("4a) Vendor Users RDD: %s (%s partition(s))" % (vendorUsersRDD, vendorUsersRDD.getNumPartitions()))
logger.info("4b) Count Vendor Users: %s \n" % vendorUsersRDD.count())
# 4) Enriched Users (associate our user record with the vendor user)
# Since we already have our users loaded in our db, just gonna load the vendor user with a pointer to our user
enrichedRDD = joinedUsersRDD \
.rightOuterJoin(vendorUsersRDD) \
.persist(StorageLevel.MEMORY_AND_DISK) \
.map(lambda row: enrich(row))
# 5) Debug Enriched Users
logger.info("-------------")
logger.info("5a) Enriched Users RDD: %s (%s partition(s))" % (enrichedRDD, enrichedRDD.getNumPartitions()))
logger.info("5b) Count Enriched Users: %s \n" % enrichedRDD.count())
# 6) Save to disk in JSON
logger.info("-------------")
# Calculate how long it took for the job to run
end = time.time()
elapsed = end - start
# Job Output (write to stdout and output.txt)
output = JobOutput(logger, elapsed, usersRDD, vendorUsersRDD, enrichedRDD)
output.write() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def myjob(spark: SparkSession, **kwargs):\n df = spark.read.csv(\"spark-data/climatewatch-usemissions.csv\")\n\n df.show()",
"def main():\n sc = pyspark.SparkContext(conf=sparkConf())\n sql = pyspark.SQLContext(sc)\n args = parse_args()\n cleanOutputDir(args.output)\n users = os.listdir(args.input)\n map(lambda user: parseUser(user, args, sql, args.partitions), users)\n corpora_stats(args.output)\n append_corpus(args.output)",
"def spark_user(self, sparkUser):\n self.executor_env(key='SPARK_USER', value=sparkUser)\n return self",
"def spark(self, *args, **kwargs):\n self.spark_submit(*args, **kwargs)",
"def execute(self, context):\n\n self._hook = SparkSubmitHook(\n conf=self._conf,\n conn_id=self._conn_id,\n ssh_conn_id=self._ssh_conn_id,\n files=self._files,\n py_files=self._py_files,\n driver_classpath=self._driver_classpath,\n jars=self._jars,\n java_class=self._java_class,\n packages=self._packages,\n exclude_packages=self._exclude_packages,\n repositories=self._repositories,\n total_executor_cores=self._total_executor_cores,\n executor_cores=self._executor_cores,\n executor_memory=self._executor_memory,\n driver_memory=self._driver_memory,\n keytab=self._keytab,\n principal=self._principal,\n name=self._name,\n num_executors=self._num_executors,\n application_args=self._application_args,\n env_vars=self._env_vars,\n verbose=self._verbose,\n dataeng_spark=self.dataeng_spark,\n dataeng_spark_pyenv_path=self.dataeng_spark_pyenv_path\n\n )\n self._hook.submit(self._application)",
"def main():\n\n print(\"Initiating Spark session...\")\n print('-' * 50)\n spark = create_spark_session()\n \n # Use these settings if you want to test on the full\n # dataset, but it takes a LONG time.\n song_input_data = config['AWS']['SONG_DATA']\n log_input_data = config['AWS']['LOG_DATA']\n \n # Uncomment the two lines if you want to test on\n # minimal data\n #song_input_data = config['AWS']['SINGLE_SONG_DATA']\n #log_input_data = config['AWS']['SINGLE_LOG_DATA']\n \n output_data = config['AWS']['OUTPUT_DATA']\n \n print('-' * 50)\n print(\"Processing song data...\")\n print('-' * 50)\n print('')\n process_song_data(spark, song_input_data, output_data)\n \n print('-' * 50) \n print(\"Processing log data...\")\n print('-' * 50)\n print('')\n process_log_data(spark, song_input_data, log_input_data, output_data)",
"def _do_action_import_user_info(self):\n self._run_express_job(\n \"org.kiji.tutorial.load.UserInfoImporter\",\n options=\"--user-info ml-100k/u.user\"\n )\n self._scan_table(\"users\")",
"def cli():\n # Configuration\n AppConfig()\n\n # Parse the cli arguments\n parser = argparse.ArgumentParser()\n parser.add_argument('standard_data_path', help='path to the standard data directory')\n parser.add_argument('queue', help='job queue')\n parser.add_argument('--app-name', help='spark application name which must contain the application prd',\n default='gmt00-diaman-ai')\n parser.add_argument('--driver-mem', help='amount of memory to use for the driver process',\n default='4g')\n parser.add_argument('--driver-cores', help='number of cores to use for the driver process',\n default=1)\n parser.add_argument('--executor-mem', help='amount of memory to use per executor process',\n default='8g')\n parser.add_argument('--executor-cores', help='number of cores to use on each executor',\n default=4)\n parser.add_argument('--min-executors', help='minimum number of executors to run if dynamic allocation is enabled',\n default=4)\n parser.add_argument('--max-executors', help='maximum number of executors to run if dynamic allocation is enabled',\n default=12)\n parser.add_argument('--ini-executors', help='initial number of executors to run if dynamic allocation is enabled',\n default=4)\n args = parser.parse_args()\n\n # Instantiate spark\n _, spark_session = spark_config.get_spark(app_name=args.app_name,\n queue=args.queue,\n driver_mem=args.driver_mem,\n driver_cores=args.driver_cores,\n executor_mem=args.executor_mem,\n executor_cores=args.executor_cores,\n min_executors=args.min_executors,\n max_executors=args.max_executors,\n ini_executors=args.ini_executors)\n\n # Run the train pipeline\n train_pipeline.run(spark_session, args.standard_data_path)",
"def parseUser(user, args, sql, numPartitions):\n print(\"parsing user {}\".format(user))\n df = parse_raw(sql, args.input, user)\n df.repartition(numPartitions)\n df = select(df)\n df = df.rdd.map(lambda x: mapRow(x)).toDF()\n df = format(df, args.format, args.documentformat)\n writeToFile(df, args.parallelized, args.output, user, args.format, args.documentformat)",
"def submit_job_emr(self, context):\n # Get snowflake connection details based on conn_id\n self.hook = SnowFlakeHook(conn_id=self.conn_id)\n self.conn = self.hook.get_conn()\n\n # Update the parameters for the spark job\n # to use the snowflake conn details\n import base64\n self.parameters.update({'account_name': self.conn.host,\n 'database': self.conn.schema,\n 'username': self.conn.login,\n 'password': base64.b64encode(self.conn.password),\n 'warehouse': self.conn.extra_dejson.get('warehouse', ''),\n 'role': self.conn.extra_dejson.get('role', '')})\n\n # Set spark job related configs if provided\n spark_configs = self.parameters.get('spark_configs', ' ')\n if self.packages:\n spark_packages=self.packages\n else:\n spark_packages = ' --packages net.snowflake:snowflake-jdbc:3.4.2,net.snowflake:spark-snowflake_2.11:2.2.8 '\n geniesnowflake_sparkjob = 's3://nike-emr-bin/' + self.env_type + '/common/scripts/geniesnowflake_sparkload.py '\n\n import json\n self.command_json = json.dumps(self.parameters)\n self.conn_id = self.emr_conn_id\n self.command = ' --master yarn --deploy-mode cluster ' + \\\n spark_configs + \\\n spark_packages + \\\n geniesnowflake_sparkjob + \\\n self.command_json\n super(GenieSnowflakeOperator, self).execute(context)\n self.conn_id = self.snow_conn_id",
"def main():\n # create a Spark session\n spark = create_spark_session()\n\n # set input & output data locations\n input_data = \"data/\"\n output_data = \"results/\"\n\n # Gather/read the datasets\n df_visits = spark.read.parquet(\"data/immigration_data\")\n df_demo = spark.read.csv(\"data/us-cities-demographics.csv\", sep=\";\", header=True)\n df_airports = spark.read.csv(\"data/airport-codes_csv.csv\", header=True)\n df_airport_codes = get_airport_codes(spark)\n df_countries = get_countries(spark)\n df_states = get_states(spark)\n df_visa = get_visa(spark)\n\n # clean the datasets\n df_airports_clean = clean_airport_codes(spark,df_airports)\n df_demo_clean= clean_demographics(spark,df_demo)\n df_visits_clean = clean_immigration_data(spark, df_visits, df_airport_codes, df_countries, df_states, df_visa)\n\n # load the fact and dimensions in parquet files\n load_dimensions(output_data, df_countries, df_states, df_visa, df_demo_clean, df_airports_clean)\n load_fact(spark,output_data, df_visits_clean)\n\n # run validation checks\n validate_dimensions(spark,['dim_visa','dim_state','dim_country','dim_us_demo','dim_airports'],output_data)\n validate_fact(spark,'fact_visits',output_data)",
"def process(self):\n if not hasattr(self, \"spark\") or self.spark is None:\n self.log.info(\"PySpark has failed to initialize. No mapper will be processed\")\n return\n\n # Define the date range to read from Cassandra\n from_date = datetime.datetime(2020, 1, 1, 0, 0)\n to_date = datetime.datetime.now()\n\n # - Since the \"user_logs\" store extremely large data (around tens of millions rows per day),\n # it is hard to store all data on a local computer's RAM\n # - Instead, we will divide the date range in multiple \"days\", and perform needed\n # aggregations per day, and union into a final results. This will save a lot of RAM\n # and make it possible to read billions of records of event logs using just one normal\n # machine (running Spark in local mode)\n #\n # Below is a process reading 20 billions of rows in the event logs using a 8 GB RAM PC\n diff = to_date - from_date\n days_diff = math.ceil(diff.days + diff.seconds / 86400)\n df_final_results = None\n\n for i in range(days_diff + 1):\n to = from_date + datetime.timedelta(days=1) - datetime.timedelta(hours=1)\n df_temp = self.create_log_dataframe(from_date, to) \\\n .rdd \\\n .filter(lambda row: _filter_btn_add_to_cart_clicked(row)) \\\n .map(lambda row: _map_to_output(row)) \\\n .toDF(\"date\") \\\n .groupBy(\"date\") \\\n .count()\n\n if df_final_results is None:\n df_final_results = df_temp\n else:\n df_final_results = df_final_results.union(df_temp)\n\n df_final_results.show(truncate=False)\n \"\"\"\n The showed results represent the total Add-To-Cart Button click per day, example:\n \n +--------+--------+\n | date | count |\n +--------+--------+\n |20200613| 13 |\n +--------+--------+\n |20200614| 212 |\n +--------+--------+\n |20200615| 131 |\n +--------+--------+\n |20200616| 1 |\n +--------+--------+\n |20200617| 26 |\n +--------+--------+\n \"\"\"",
"def getUsersRDD(sqlContext):\n # Currently the id field ranges from '0' to '1000000'.\n # To avoid loading it all in memory, partition on the id field (100 partitions, about 10k records per partition).\n # Also setting fetch size to 10,000 to avoid multiple database calls per partition.\n # All records from a single partition will come in a single query.\n # If we need to use less memory, we can increase the # of partitions and decrease the lower/uppper bounds.\n # We are also relying on Spark to spill to disk if no memory is available.\n from db import *\n return sqlContext \\\n .read \\\n .format(\"jdbc\") \\\n .options(\n driver=driver,\n url=url,\n dbtable=\"user\",\n user=user,\n password=password,\n fetchSize=10000,\n numPartitions=100,\n partitionColumn=\"id\",\n lowerBound=0,\n upperBound=1000000\n ) \\\n .load() \\\n .rdd \\\n .persist(StorageLevel.MEMORY_AND_DISK) \\\n .map(lambda row: (row.practice_id, row)) # We are setting practice_id as the key here because we'll use that to join with user_practice table",
"def read_kafka():\n hosts = \"192.168.30.141:6667,192.168.30.140:6667,192.168.30.139:6667\"\n topic = \"sfy_v2.User\"\n\n spark = SparkSession.builder.master('yarn').appName(\"GetUsers\").getOrCreate()\n\n events = spark.readStream.format(\"kafka\") \\\n .option(\"kafka.bootstrap.servers\", hosts) \\\n .option(\"startingOffsets\", \"earliest\") \\\n .option(\"subscribe\", topic) \\\n .option(\"failOnDataLoss\", False) \\\n .load()\n events = events.selectExpr(\"CAST(value AS STRING)\")\n schema = StructType([\n StructField(\"mobile\", StringType(), True),\n StructField(\"name\", StringType(), True),\n StructField(\"created_time\", TimestampType(), True),\n ])\n\n data = events.select(from_json(events.value, schema).alias(\"User\")) \\\n .selectExpr(\"User.mobile\", \"User.name\", \"User.created_time\")\n data.createOrReplaceTempView(\"NewUser\")\n user_count = data.withWatermark(\"created_time\", \"10 minutes\") \\\n .groupBy(window(\"created_time\", \"5 minutes\", \"5 minutes\")).count() \\\n .selectExpr(\"window.start\", \"window.end\", \"count\")\n\n # spark.sql(\"select * from User4\").show()\n\n # query1 = user_count \\\n # .selectExpr(\"CAST(key AS STRING)\", \"CAST(value AS STRING)\") \\\n # .writeStream \\\n # .format(\"kafka\") \\\n # .option(\"kafka.bootstrap.servers\", hosts) \\\n # .option(\"topic\", \"user_count\") \\\n # .option(\"checkpointLocation\", \"/data/spark/checkpoint\") \\\n # .outputMode(\"update\") \\\n # .trigger(processingTime='5 minutes') \\\n # .queryName(\"user_count\") \\\n # .start()\n\n # query1 = user_count \\\n # .writeStream \\\n # .outputMode(\"append\") \\\n # .format(\"console\") \\\n # .trigger(processingTime='5 minutes') \\\n # .queryName(\"User1\") \\\n # .start()\n\n query1 = user_count \\\n .writeStream \\\n .outputMode(\"update\") \\\n .format(\"console\") \\\n .trigger(processingTime='5 minutes') \\\n .queryName(\"User1\") \\\n .start()\n\n query1.awaitTermination()\n query1.stop()",
"def main():\n # Initiate Spark Session\n spark = create_spark_session()\n \n # Data files\n # Root Data Path\n # Uncomment below line for AWS S3\n #input_data = \"s3a://udacity-dend\"\n # Uncomment below line for local files\n input_data = \"data\"\n\n # Warehouse\n # Root WH\n # Uncomment below line for AWS S3\n #output_data = \"s3a://jerryespn-project-out\"\n # Uncomment below line for local files\n output_data = \"spark-warehouse\"\n \n process_song_data(spark, input_data, output_data) \n process_log_data(spark, input_data, output_data)",
"def create_sparksession():\n return SparkSession.builder.\\\n appName(\"Transforming the historical parking occupancy and blockface datasets\").\\\n getOrCreate()",
"def process_log_data(spark, input_data, output_data):\n \n # get filepath to log data file\n log_data = os.path.join(input_data,\"log_data/*/*/*.json\")\n\n\n # read log data file\n df = spark.read.json(log_data)\n \n # filter by actions for song plays\n #df = \n\n # extract columns for users table \n users_table = df['userId', 'firstName', 'lastName', 'gender', 'level'].dropDuplicates()\n \n # write users table to parquet files\n users_table.write.parquet(os.path.join(output_data, 'users.parquet'), 'overwrite')\n print(\"--- users.parquet completed ---\")\n\n # create timestamp column from original timestamp column\n get_timestamp = udf(lambda x: datetime.fromtimestamp( (x/1000.0) ), T.TimestampType())\n # create datetime column from original timestamp column\n get_datetime = udf(lambda x: datetime.fromtimestamp(int(int(x)/1000)))\n get_hour = udf(lambda x: x.hour, T.IntegerType()) \n get_day = udf(lambda x: x.day, T.IntegerType()) \n get_week = udf(lambda x: x.isocalendar()[1], T.IntegerType()) \n get_month = udf(lambda x: x.month, T.IntegerType()) \n get_year = udf(lambda x: x.year, T.IntegerType()) \n get_weekday = udf(lambda x: x.weekday(), T.IntegerType()) \n\n df = df.withColumn(\"timestamp\", get_timestamp(df.ts))\n df = df.withColumn('start_time', get_datetime(df.ts))\n df = df.withColumn(\"hour\", get_hour(df.timestamp))\n df = df.withColumn(\"day\", get_day(df.timestamp))\n df = df.withColumn(\"week\", get_week(df.timestamp))\n df = df.withColumn(\"month\", get_month(df.timestamp))\n df = df.withColumn(\"year\", get_year(df.timestamp))\n df = df.withColumn(\"weekday\", get_weekday(df.timestamp))\n \n \n # extract columns to create time table\n time_columns = ['start_time', 'hour', 'day', 'week', 'month', 'year', 'weekday'] \n \n # write time table to parquet files partitioned by year and month\n time_table = df[time_columns]\n \n # write time table to parquet files partitioned by year and month\n time_table.write.partitionBy('year', 'month').parquet(os.path.join(output_data, 'time.parquet'), 'overwrite')\n print(\"--- time.parquet completed ---\")\n \n # read in song data to use for songplays table\n df_songs = spark.read.parquet(os.path.join(output_data, 'songs.parquet'))\n \n df_songplays = df_songs.join(df, (df_songs.title == df.song)).where(df.page == 'NextSong').orderBy(df.timestamp)\n # extract columns from joined song and log datasets to create songplays table \n songplays_table = df_songplays['timestamp', 'userId', 'level', 'song_id', 'artist_id', 'sessionId', 'location', 'userAgent']\n songplays_table.select(monotonically_increasing_id().alias('songplay_id')).collect()\n\n # write songplays table to parquet files partitioned by year and month\n songplays_table\\\n .withColumn(\"year\", get_year(songplays_table.timestamp))\\\n .withColumn(\"month\", get_month(songplays_table.timestamp))\\\n .write\\\n .partitionBy('year', 'month')\\\n .parquet(os.path.join(output_data, 'songplays.parquet'), 'overwrite')\n \n print(\"--- songplays.parquet completed ---\")\n print(\"*** process_log_data completed ***\\n\\nEND\")",
"def process_log_data(spark, input_data, output_data):\n\n # get filepath to log data file\n log_data = os.path.join( input_data, \"log-data/*/*/*.json\")\n\n # read log data file\n df = spark.read.json(log_data)\n\n # filter by actions for song plays\n df = df.filter(df.page == \"NextSong\")\n \n # USERS TABLE\n # extract columns for users table\n users_table = df.select(\"userId\",\"firstName\",\"lastName\",\"gender\",\"level\").dropDuplicates(['userId'])\n \n print( \"HERE users_table sample:\\n\")\n users_table.show(5)\n # write users table to parquet files\n users_table.write.parquet(os.path.join(output_data, \"users/\") , mode=\"overwrite\")\n\n # TIME TABLE\n # create timestamp column from original timestamp column\n get_start_time = udf(lambda x: datetime.fromtimestamp(x / 1000.0).strftime('%Y-%m-%d %H:%M:%S'))\n get_hour = udf(lambda x: datetime.fromtimestamp(x / 1000.0).hour)\n get_day = udf(lambda x: datetime.fromtimestamp(x / 1000.0).day)\n get_week = udf(lambda x: datetime.fromtimestamp(x / 1000.0).strftime('%W'))\n get_month = udf(lambda x: datetime.fromtimestamp(x / 1000.0).month)\n get_year = udf(lambda x: datetime.fromtimestamp(x / 1000.0).year)\n get_weekday = udf(lambda x: datetime.fromtimestamp(x / 1000.0).strftime('%A'))\n\n df = df.withColumn('start_time', get_start_time(df['ts']))\n df = df.withColumn('hour', get_hour(df['ts']))\n df = df.withColumn('day', get_day(df['ts']))\n df = df.withColumn('week', get_week(df['ts']))\n df = df.withColumn('month', get_month(df['ts']))\n df = df.withColumn('year', get_year(df['ts']))\n df = df.withColumn('week_day', get_weekday(df['ts'])).dropDuplicates(['start_time'])\n\n df.createOrReplaceTempView(\"time_table\")\n \n time_columns = ['start_time', 'hour', 'day', 'week', 'month', 'year', 'week_day']\n\n # extract columns to create time table\n time_table = spark.sql(\"\"\"\n SELECT start_time, hour, day, week, month, year, week_day\n FROM time_table\n \"\"\").toDF(*time_columns)\n \n print( \"HERE time_table sample:\\n\")\n time_table.show(5)\n # write time table to parquet files partitioned by year and month\n time_table.write.parquet(os.path.join(output_data, \"time_table/\"), mode='overwrite', partitionBy=[\"year\",\"month\"])\n\n # SONGPLAYS TABLE\n # add monotonically increasing id column\n df = df.withColumn('songplay_id', functions.monotonically_increasing_id())\n df.createOrReplaceTempView(\"songplays_table\")\n\n # song df\n song_data = os.path.join( input_data, \"song_data/*/*/*/*.json\")\n song_df = spark.read.json(song_data).dropDuplicates()\n song_df.createOrReplaceTempView(\"songs_table\")\n\n song_columns = ['songplay_id', 'start_time', 'userId', 'level', 'sessionId', 'location', 'userAgent', 'year', 'month',\n 'length', 'song_id', 'artist_id', 'title', 'artist_name', 'duration']\n\n # extract columns to create time table\n songplays_table = spark.sql(\n \"\"\"\n SELECT sp.songplay_id, sp.start_time, sp.userId, sp.level, sp.sessionId, sp.location, sp.userAgent, sp.year, \n sp.month, sp.length, s.song_id, s.artist_id, s.title, s.artist_name, s.duration\n FROM songplays_table AS sp \n JOIN songs_table AS s ON sp.song = s.title AND sp.artist = s.artist_name AND sp.length = s.duration\n \"\"\").toDF(*song_columns)\n \n print( \"HERE songplays_table sample:\\n\")\n songplays_table.show(5)\n # write songplays table to parquet files partitioned by year and month\n songplays_table.write.parquet(os.path.join(output_data, \"songplays/\"), mode=\"overwrite\", partitionBy=[\"year\",\"month\"])",
"def main():\n bootstrapping.CommandStart('dataflow-sql', component_id='dataflow-sql')\n bootstrapping.CheckUpdates('dataflow-sql')\n update_manager.UpdateManager.EnsureInstalledAndRestart(\n ['dataflow-sql'], command=__file__)\n java_bin = java.RequireJavaInstalled('Dataflow SQL')\n bootstrapping.ExecuteJavaClass(\n java_bin,\n jar_dir=_JAR_DIR,\n main_jar=_MAIN_JAR,\n main_class=_CLASSNAME,\n main_args=['-nn', 'DFSQL', '-u', 'jdbc:beam:userAgent=DataflowSQL'])",
"def main(): \n spark = create_spark_session()\n print(\"Spark Session Created\")\n\n #Print S3 bucket location\n s3_bucket=os.environ[\"s3_bucket\"]\n s3_bucket = s3_bucket.replace(\"'\", \"\")\n \n print (s3_bucket)\n \n #Invoke Functions to process data\n process_data(spark, s3_bucket)",
"def launch_training_job(master_nodes, trainset_date, opts, ec2_opts):\n # TODO: check whether HDFS is running\n # TODO: check whether YARN is running\n master = master_nodes[0].public_dns_name\n print(\"Setting up HDFS on the cluster..\")\n ssh(host=master, opts=ec2_opts, command=\"chmod u+x /root/spark-ec2/setup_pricer_data.sh\")\n ssh(host=master, opts=ec2_opts, command=\"/root/spark-ec2/setup_pricer_data.sh\")\n print(\"Running trainer with train date={d}..\".format(d=trainset_date))\n ssh(host=master, opts=ec2_opts, command=\"chmod u+x /root/spark-ec2/run_aws_trainer.sh\")\n ssh(host=master, opts=ec2_opts, command=\"nohup /root/spark-ec2/run_aws_trainer.sh {d} 2>&1 </dev/null |tee log.aws_trainer\".format(d=trainset_date))\n print(\"Trainer was launched successfully..\")",
"def run(self):\n self.export_users()",
"def main():\n mvip, user, user_pass, mvip_node = get_inputs()\n headers, url = build_auth(mvip, user, user_pass, mvip_node)\n payload = build_payload()\n response_json = connect_cluster(headers, url, payload)\n account_table = create_table(response_json)\n print(account_table)",
"def main():\n spark_it_up()",
"def main():\n # start Spark application and get Spark session, logger and config\n spark = SparkSession \\\n .builder \\\n .appName(\"PokemonBasicETLOperations\") \\\n .config(\"spark.eventLog.enabled\", True) \\\n .enableHiveSupport() \\\n .getOrCreate()\n\n print('PokemonBasicETLOperations ETL is up-and-running')\n \n # execute ETL pipeline\n pokemon = extract(spark)\n max_attack_per_type,agg_legend_poke,special_criteria_poke = transform(pokemon)\n load(max_attack_per_type,agg_legend_poke,special_criteria_poke)\n\n print('PokemonBasicETLOperations ETL job is finished')\n spark.stop()\n return None",
"def emr_run_spark():\n\n try:\n response = emr.run_job_flow(\n Name=\"Lab Spark Cluster\",\n LogUri=log_uri,\n ReleaseLabel='emr-5.28.0',\n Instances={\n 'MasterInstanceType': 'm5.xlarge',\n 'SlaveInstanceType': 'r5.2xlarge',\n 'InstanceCount': 4,\n 'KeepJobFlowAliveWhenNoSteps': True,\n 'TerminationProtected': False,\n 'Ec2SubnetId': subnet_id,\n 'EmrManagedMasterSecurityGroup': master_sg,\n 'EmrManagedSlaveSecurityGroup': slave_sg,\n 'ServiceAccessSecurityGroup': service_access_sg\n },\n Applications=[\n {\n 'Name': 'Spark'\n }\n ],\n BootstrapActions=[\n {\n 'Name': 'Maximize Spark Default Config',\n 'ScriptBootstrapAction': {\n 'Path': 's3://support.elasticmapreduce/spark/maximize-spark-default-config',\n }\n },\n {\n 'Name': 'Install boto3',\n 'ScriptBootstrapAction': {\n 'Path': f's3://{lab_bucket}/spark/conf/install_python_modules.sh',\n }\n }\n ],\n Steps=[\n {\n 'Name': 'Setup Debugging',\n 'ActionOnFailure': 'TERMINATE_CLUSTER',\n 'HadoopJarStep': {\n 'Jar': 'command-runner.jar',\n 'Args': ['state-pusher-script']\n }\n },\n {\n 'Name': 'setup - copy files',\n 'ActionOnFailure': 'CANCEL_AND_WAIT',\n 'HadoopJarStep': {\n 'Jar': 'command-runner.jar',\n 'Args': ['aws', 's3', 'cp', f's3://{lab_bucket}/spark/main.py', '/home/hadoop/']\n }\n },\n {\n 'Name': 'Run Spark',\n 'ActionOnFailure': 'CANCEL_AND_WAIT',\n 'HadoopJarStep': {\n 'Jar': 'command-runner.jar',\n 'Args': ['spark-submit', '/home/hadoop/main.py', lab_bucket, s3_data_repo]\n }\n }\n ],\n Configurations=[\n {\n 'Classification': 'spark-env',\n \"Configurations\": [\n {\n \"Classification\": \"export\",\n \"Properties\": {\n \"PYSPARK_PYTHON\": \"/usr/bin/python3\"\n }\n }\n ]\n }\n ],\n VisibleToAllUsers=True,\n JobFlowRole='EMR_EC2_DefaultRole',\n ServiceRole='EMR_DefaultRole',\n Tags=[\n {\n 'Key': 'Project',\n 'Value': 'Data Lake Quickstart'\n },\n {\n 'Key': 'Prefix',\n 'Value': prefix_name\n }\n ]\n )\n\n return response\n\n except ClientError as error:\n logger.error(\"The error occurred when configure emr to run spark\")\n logger.exception(error)",
"def spark_config_set(is_spark_submit):\n if is_spark_submit:\n global sc, sqlContext\n sc = SparkContext()\n sqlContext = HiveContext(sc)",
"def train(self, train_set):\n\n class TrainJob(mp.Process):\n def __init__(self, func, result_list, *args):\n super().__init__()\n self.func = func\n self.args = args\n self.res = result_list\n\n def run(self):\n self.res.append(self.func(*self.args))\n\n self._user_log = pd.DataFrame(train_set)\n self._user_log.columns = ['user_id', 'item_id']\n self._user_log.drop_duplicates(inplace=True)\n '''Calculate user model'''\n manager = mp.Manager()\n res_list = manager.list()\n user_ids = self._user_log['user_id'].drop_duplicates().values.tolist()\n part = 2\n cpus = cpu_count()\n job_list = []\n jobs = int(cpus / part) # Use 1/2 of the cpus\n if jobs <= 0:\n jobs = 1\n part_ids_num = int((len(user_ids) + jobs - 1) / jobs)\n for i in range(jobs):\n part_ids = user_ids[i * part_ids_num:i * part_ids_num + part_ids_num]\n j = TrainJob(self._build_user_model, res_list, part_ids)\n job_list.append(j)\n j.start()\n for job in job_list:\n job.join()\n for ids_dict in res_list:\n for key in ids_dict.keys():\n self._user_vector[key] = ids_dict[key]\n return self",
"def spark(tmp_path_factory, app_name=\"Sample\", url=\"local[*]\"):\n\n with TemporaryDirectory(dir=tmp_path_factory.getbasetemp()) as td:\n config = {\n \"spark.local.dir\": td,\n \"spark.sql.shuffle.partitions\": 1,\n \"spark.sql.crossJoin.enabled\": \"true\",\n }\n spark = start_or_get_spark(app_name=app_name, url=url, config=config)\n yield spark\n spark.stop()",
"def main():\n spark = create_spark_session()\n input_data = \"s3a://udacity-dend/\"\n output_data = \"data/analytics\"\n \n process_song_data(spark, input_data, output_data) \n process_log_data(spark, input_data, output_data)"
]
| [
"0.60789704",
"0.5929695",
"0.57598954",
"0.5682684",
"0.56565857",
"0.5645783",
"0.55553067",
"0.55104584",
"0.5424839",
"0.5386339",
"0.53613216",
"0.53369313",
"0.5331455",
"0.52619505",
"0.5234679",
"0.51690274",
"0.51419294",
"0.5136389",
"0.5131487",
"0.5121975",
"0.51138556",
"0.5111919",
"0.509977",
"0.5091188",
"0.50651276",
"0.5044565",
"0.5031035",
"0.5030926",
"0.5023632",
"0.502137"
]
| 0.62689376 | 0 |
DB Table (user) RDD | def getUsersRDD(sqlContext):
# Currently the id field ranges from '0' to '1000000'.
# To avoid loading it all in memory, partition on the id field (100 partitions, about 10k records per partition).
# Also setting fetch size to 10,000 to avoid multiple database calls per partition.
# All records from a single partition will come in a single query.
# If we need to use less memory, we can increase the # of partitions and decrease the lower/uppper bounds.
# We are also relying on Spark to spill to disk if no memory is available.
from db import *
return sqlContext \
.read \
.format("jdbc") \
.options(
driver=driver,
url=url,
dbtable="user",
user=user,
password=password,
fetchSize=10000,
numPartitions=100,
partitionColumn="id",
lowerBound=0,
upperBound=1000000
) \
.load() \
.rdd \
.persist(StorageLevel.MEMORY_AND_DISK) \
.map(lambda row: (row.practice_id, row)) # We are setting practice_id as the key here because we'll use that to join with user_practice table | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getPracticesRDD(sqlContext):\n from db import *\n return sqlContext \\\n .read \\\n .format(\"jdbc\") \\\n .options(\n driver=driver,\n url=url,\n dbtable=\"user_practice\",\n user=user,\n password=password\n ) \\\n .load() \\\n .rdd \\\n .map(lambda row: (row.id, row)) # We'll use the id to join with the users rdd defined above",
"def example_data_users():\n\n #Empty out existing data\n User.query.delete()\n\n celia = User(user_id=2, first_name='Celia', last_name='Waggoner',\n email=\"[email protected]\", password=\"123\", city=\"San Francisco\",\n state=\"CA\", zipcode='94110',\n amenities_pref=1, cleanliness_pref=5, class_size_pref=10,\n class_schedule_pref=5, class_pace_pref=1)\n pam = User(user_id=3, first_name='Pam', last_name='Geick',\n email=\"[email protected]\", password=\"456\", city=\"Rocky River\",\n state=\"OH\", zipcode='44116',\n amenities_pref=1, cleanliness_pref=1, class_size_pref=1,\n class_schedule_pref=1, class_pace_pref=1)\n amber = User(user_id=4, first_name='Amber', last_name='Lynn',\n email=\"[email protected]\", password=\"789\", city=\"Brooklyn\",\n state=\"NY\", zipcode='11201',\n amenities_pref=10, cleanliness_pref=10, class_size_pref=10,\n class_schedule_pref=10, class_pace_pref=10)\n\n db.session.add_all([celia, pam, amber])\n db.session.commit()",
"def parseUser(user, args, sql, numPartitions):\n print(\"parsing user {}\".format(user))\n df = parse_raw(sql, args.input, user)\n df.repartition(numPartitions)\n df = select(df)\n df = df.rdd.map(lambda x: mapRow(x)).toDF()\n df = format(df, args.format, args.documentformat)\n writeToFile(df, args.parallelized, args.output, user, args.format, args.documentformat)",
"def user_login(self, user_name, password):\n\n sql = \"SELECT * FROM USERSTABLE\"\n\n db_cursor.execute(sql)\n users_result = db_cursor.fetchall()\n users = []\n\n\n for user_value in users_result:\n user_value = {\n \n \"location\" : user_value[0],\n \"happeningOn\" : user_value[1],\n \"topics\" : user_value[2]\n\n }\n users.append(user_value)\n\n return users",
"def db_users():\n return [\n {\"name\": \"Cathy\", \"email\": \"cathy@\", \"group\": \"guest\", \"password\": \"12345\"},\n {\"name\": \"Marry\", \"email\": \"marry@\", \"group\": \"guest\", \"password\": \"12345\"},\n {\"name\": \"John\", \"email\": \"john@\", \"group\": \"guest\", \"password\": \"12345\"},\n ]",
"def get_all_users():\n\n # create a db engine\n conn_url = os.getenv(\"DATABASE_URL\")\n engine = create_engine(conn_url, echo=True)\n session_maker = sessionmaker(bind=engine)\n session = session_maker()\n\n # get all ratings\n users = session.query(User).all()\n return pd.DataFrame(\n [\n [\n user.id,\n user.username,\n user.password,\n user.last_trained_on,\n user.tbl_rating_user_id,\n ]\n for user in users\n ],\n columns=[\n \"id\",\n \"username\",\n \"password\",\n \"last_trained_on\",\n \"tbl_rating_user_id\",\n ],\n )",
"def getUserItemMatrix(self):\n\t\tdf = self.getrating()\n\n\t\trows_index = df.user_id.unique()\n\t\tcolumn_index = df.venue_id.unique() \n\n\t\trow_len = len(rows_index)\n\t\tcol_len = len(column_index)\n\n\t\tX = lil_matrix((row_len, col_len))\n\t\trow_map = dict(zip(rows_index, range(row_len)))\n\t\tcol_map = dict(zip(column_index, range(col_len)))\n\n\t\t# Get mapping table for rows and columns\n\t\td = {}\n\t\td[\"row\"] = row_map\n\t\td[\"col\"] = col_map\n\n\t\tfor index, row in df.iterrows():\n\t\t\tX[d[\"row\"][row[\"user_id\"]], d[\"col\"][row[\"venue_id\"]]] = row[\"Rating\"]\n\n\t\tX = X.tocsr() # Allow efficient row slicing\n\n\t\treturn [d,X]",
"def get_users():\n\n return User.query.all() # [<User user_id=1 fname=Alice lname=Apple>]",
"def get_users_from_table():\n # Connect to database\n conn = psycopg2.connect(DATABASE_URL, sslmode='require')\n # Open a cursor to perform db operations\n cur = conn.cursor()\n # Query the table\n cur.execute(\"\"\"\n SELECT *\n FROM test \n ;\n \"\"\"\n )\n rows = cur.fetchall()\n # Commit and close connection\n conn.commit()\n cur.close()\n conn.close()\n return rows",
"def populate_table(self, username = \"\"):\n db_acces = DbMethods()\n users = db_acces.select_users(username)\n\n self.result_table.setRowCount(len(users))\n\n for i in range(len(users)):\n user = users[i]\n item_user = QTableWidgetItem(user[\"username\"])\n self.result_table.setItem(i, 0, item_user)",
"def query_all_users():\n ddb = boto3.resource(\"dynamodb\")\n tb = ddb.Table(os.environ.get(\"TABLE_NAME\"))\n return tb.scan()",
"def get_user_activity(userid: int) -> list:\n usertable = metadata.tables['users']\n bookstable = metadata.tables['books']\n reviewstable = metadata.tables['book_reviews']\n\n fields = ('isbn', 'title', 'author', 'year', 'image_url', 'rating', 'review')\n\n query = select([\n bookstable.c.isbn,\n bookstable.c.title,\n bookstable.c.author,\n bookstable.c.year,\n bookstable.c.image_url,\n reviewstable.c.rating,\n reviewstable.c.review\n ]).select_from(\n reviewstable.join(\n usertable).join(\n bookstable)).where(\n usertable.c.id == userid\n )\n return [{k: v for k, v in zip(fields, res)} for res in conn.execute(query).fetchall()]",
"def _fetch_sample_data_from_user_query(self) -> TableData:\n rnd = self.session.execute(f\"{self._profile_sample_query}\")\n try:\n columns = [col.name for col in rnd.cursor.description]\n except AttributeError:\n columns = list(rnd.keys())\n return TableData(\n columns=columns,\n rows=[list(row) for row in rnd.fetchmany(100)],\n )",
"def QueryAllUsers():\n conn = engine.connect()\n outmsg = \"\"\n if CheckTblNameExist(\"lineuser\"):\n result_db = conn.execute(\"select * from lineuser\")\n for row in result_db:\n outstring = f\" [(userid: {row.userid}),\"\\\n f\" (username: {row.username}),\"\\\n f\" (usertoken: {row.usertoken})]\"\n if outmsg == \"\":\n outmsg = outstring\n else:\n outmsg = outmsg + \",\\n\" + outstring\n conn.close()\n return outmsg\n else:\n conn.close()\n return \"Table Not Exist\"",
"def fetch_user_from_db(self) -> pd.DataFrame:\n query = f\"select * from {TABLE} where user_handle = {self.user}\"\n return db_main.read_table(DATABASE_ENV, query)",
"def hash_data(self):\r\n # you will need to use self.A for this method\r\n func = self.functions\r\n # For each row of file, it will create hash vectors and will map that hash function to the RDD\r\n query = self.A.map(lambda q: q + ([f(q[0]) for f in func],))\r\n #print(query.take(1))\r\n return query\r\n #raise NotImplementedError\r\n raise NotImplementedError",
"def db_table(self):",
"def extract_ratings_by_uid(dataset, user_id):\n new_dataset = dataset.filter(lambda x: x[0] != user_id) \\\n .repartition(numPartitions) \\\n .cache()\n\n user_ratings = dataset.filter(lambda x: x[0] == user_id) \\\n .repartition(numPartitions) \\\n .cache()\n\n# debug\n print \"Count of user ratings: \", user_ratings.count()\n\n return new_dataset, user_ratings",
"def get_user_list(dataset):\n res = dataset\\\n .map(lambda x: x[0])\\\n .collect()\n return list(set(res))",
"def user_list(self):\n con = self.connect()\n cursor = con.cursor()\n cursor.execute(\"SELECT id FROM users ORDER BY id DESC\")\n record = cursor.fetchone()\n cursor.close()\n con.commit()\n con.close()\n return record",
"def run(sc, logger):\n start = time.time()\n\n # Spark SQL Context\n sqlContext = sql.context.SQLContext(sc)\n\n # 1) Practices - Small table so no need to partition it, could even broadcast across all executors\n # Spark SQL JDBC Options: https://spark.apache.org/docs/latest/sql-data-sources-jdbc.html\n practicesRDD = getPracticesRDD(sqlContext)\n\n # 1) Debug Practices\n logger.info(\"-------------\")\n logger.info(\"1a) Practices RDD: %s (%s partition(s))\" % (practicesRDD, practicesRDD.getNumPartitions()))\n logger.info(\"1b) Count Practices: %s \\n\" % practicesRDD.count())\n\n # 2) User Table - Big table so need to read in batches, so partition the table by id\n # Spark SQL JDBC Options: https://spark.apache.org/docs/latest/sql-data-sources-jdbc.html\n usersRDD = getUsersRDD(sqlContext)\n\n # 2) Debug Users\n logger.info(\"-------------\")\n logger.info(\"2a) Users RDD: %s (%s partition(s))\" % (usersRDD, usersRDD.getNumPartitions()))\n logger.info(\"2b) Count Users: %s \\n\" % usersRDD.count())\n\n # 3) Join Users and Practices (user.practice_id = user_practice.id)\n joinedUsersRDD = usersRDD \\\n .join(practicesRDD) \\\n .persist(StorageLevel.MEMORY_AND_DISK) \\\n .map(lambda row: ((row[1][0].firstname, row[1][0].lastname, row[1][1].location, row[1][0].specialty), UserReference(row[1][0].id, row[1][0].last_active_date)))\n\n # 3) Debug Joined Users\n logger.info(\"-------------\")\n logger.info(\"3a) Joined Users RDD: %s (%s partition(s))\" % (joinedUsersRDD, joinedUsersRDD.getNumPartitions()))\n logger.info(\"3b) Count Joined Users: %s \\n\" % joinedUsersRDD.count())\n\n # 4) Vendor Users\n vendorUsersRDD = vendor.getVendorUsers(sc) \\\n .persist(StorageLevel.MEMORY_AND_DISK) \\\n .map(lambda row: ((row[\"firstName\"], row[\"lastName\"], row[\"practiceLocation\"], row[\"specialty\"]), row))\n\n # 4) Debug Vendor Users\n logger.info(\"-------------\")\n logger.info(\"4a) Vendor Users RDD: %s (%s partition(s))\" % (vendorUsersRDD, vendorUsersRDD.getNumPartitions()))\n logger.info(\"4b) Count Vendor Users: %s \\n\" % vendorUsersRDD.count())\n\n # 4) Enriched Users (associate our user record with the vendor user)\n # Since we already have our users loaded in our db, just gonna load the vendor user with a pointer to our user\n enrichedRDD = joinedUsersRDD \\\n .rightOuterJoin(vendorUsersRDD) \\\n .persist(StorageLevel.MEMORY_AND_DISK) \\\n .map(lambda row: enrich(row))\n\n # 5) Debug Enriched Users\n logger.info(\"-------------\")\n logger.info(\"5a) Enriched Users RDD: %s (%s partition(s))\" % (enrichedRDD, enrichedRDD.getNumPartitions()))\n logger.info(\"5b) Count Enriched Users: %s \\n\" % enrichedRDD.count())\n\n # 6) Save to disk in JSON\n logger.info(\"-------------\")\n\n # Calculate how long it took for the job to run\n end = time.time()\n elapsed = end - start\n\n # Job Output (write to stdout and output.txt)\n output = JobOutput(logger, elapsed, usersRDD, vendorUsersRDD, enrichedRDD)\n output.write()",
"def get_users(self):\n cmd = \"\"\" SELECT %s FROM %s; \"\"\" %(COL_WALLETS_NAME,\n TABLE_WALLETS)\n self.__dbcursor.execute(cmd)\n return [row[0] for row in self.__dbcursor.fetchall()]",
"def example_data():\n\n db.create_all()\n\n\n #Empty out data from previous runs\n User.query.delete()\n Book.query.delete()\n Rating.query.delete()\n\n #Add sample users, books, and ratings\n\n #sample users\n user1 = User(user_id=1, email='[email protected]', password='password')\n user2 = User(user_id=2, email='[email protected]', password='password')\n user3 = User(user_id=3, email='[email protected]', password='password')\n user4 = User(user_id=4, email='[email protected]', password='password')\n user5 = User(user_id=5, email='[email protected]', password='password')\n\n #sample books\n book1 = Book(book_id=7627, work_id=16683183, isbn='0007331789', title='Death of Kings (The Saxon Stories, #6)', author='Bernard Cornwell')\n book2 = Book(book_id=7695, work_id=16947613, isbn='0007350430', title='The Time of My Life', author='Cecelia Ahern')\n book3 = Book(book_id=69, work_id=15524542, isbn='0007442912', title='Insurgent (Divergent #2)', author='Veronica Roth')\n book4 = Book(book_id=3327, work_id=23906880, isbn='0007491433', title='The Shock of the Fall', author='Nathan Filer')\n book5 = Book(book_id=8387, work_id=67116, isbn='0099464691', title='The White Lioness (Kurt Wallander, #3)', author='Henning Mankell')\n\n\n #sample ratings\n rating1 = Rating(rating_id=1, book_id=7627, user_id=1, score=5)\n rating2 = Rating(rating_id=2, book_id=7627, user_id=2, score=5)\n rating3 = Rating(rating_id=3, book_id=7627, user_id=3, score=3)\n rating4 = Rating(rating_id=4, book_id=7627, user_id=4, score=3)\n rating5 = Rating(rating_id=5, book_id=7627, user_id=5, score=1)\n rating6 = Rating(rating_id=6, book_id=8387, user_id=1, score=5)\n rating7 = Rating(rating_id=7, book_id=8387, user_id=2, score=5)\n rating8 = Rating(rating_id=8, book_id=8387, user_id=3, score=3)\n rating9 = Rating(rating_id=9, book_id=8387, user_id=4, score=3)\n rating10 = Rating(rating_id=10, book_id=8387, user_id=5, score=1)\n rating11 = Rating(rating_id=11, book_id=69, user_id=5, score=5)\n rating12 = Rating(rating_id=12, book_id=3327, user_id=5, score=5)\n rating13 = Rating(rating_id=13, book_id=3327, user_id=2, score=5)\n\n #Add all to session and commit\n db.session.add_all([user1, user2, user3, user4, user5, book1, book2, book3, \n book4, book5, rating1, rating2, rating3, rating4, \n rating5, rating6, rating7, rating8, rating9, rating10, rating11,\n rating12, rating13])\n db.session.commit()",
"def _create_table_user(cur) -> None:\n cur.execute('''\n CREATE TABLE IF NOT EXISTS user\n (id INTEGER PRIMARY KEY, name TEXT)\n ''')",
"def query_one(\n self, table_name_users, table_name_activities, table_name_trackpoints\n ):\n\n query = (\n \"SELECT UserCount.NumUsers, ActivitiesCount.NumActivities, TrackpointCount.NumTrackpoints FROM \"\n \"(SELECT COUNT(*) as NumUsers FROM %s) AS UserCount,\"\n \"(SELECT COUNT(*) as NumActivities FROM %s) AS ActivitiesCount,\"\n \"(SELECT COUNT(*) as NumTrackpoints FROM %s) AS TrackpointCount\"\n )\n\n self.cursor.execute(\n query % (table_name_users, table_name_activities,\n table_name_trackpoints)\n )\n rows = self.cursor.fetchall()\n print(tabulate(rows, headers=self.cursor.column_names))\n return rows",
"def create_dataset(self, *args, **kwargs):\n dataset = super().create_dataset(*args, **kwargs)\n length = len(dataset._data)\n dataset.append_col([self.request.user.id] * length,\n header=\"source_user\")\n return dataset",
"def create_dataset(self, *args, **kwargs):\n dataset = super().create_dataset(*args, **kwargs)\n length = len(dataset._data)\n dataset.append_col([self.request.user.id] * length,\n header=\"source_user\")\n return dataset",
"def users():\r\n section = document.add_section()\r\n new_width, new_height = section.page_height, section.page_width\r\n section.orientation = WD_ORIENT.LANDSCAPE\r\n section.page_width = 10058400\r\n section.page_height = 7772400\r\n document.add_heading('Users', level=1)\r\n users = get_qlik_sense.get_users()\r\n num_of_users = len(users)\r\n table = document.add_table(rows=num_of_users+1, cols=7)\r\n table.style = 'Grid Table 1 Light Accent 1'\r\n row = table.rows[0]\r\n row.cells[0].text = 'userId'\r\n row.cells[1].text = 'userDirectory'\r\n row.cells[2].text = 'name'\r\n row.cells[3].text = 'roles'\r\n row.cells[4].text = 'inactive'\r\n row.cells[5].text = 'removed externally'\r\n row.cells[6].text = 'blacklisted'\r\n for user in range(num_of_users):\r\n row = table.rows[user+1]\r\n row.cells[0].text = str(users[user][0])\r\n row.cells[1].text = str(users[user][1])\r\n row.cells[2].text = str(users[user][2])\r\n row.cells[3].text = str(users[user][3])\r\n row.cells[4].text = str(users[user][4])\r\n row.cells[5].text = str(users[user][5])\r\n row.cells[6].text = str(users[user][6])\r\n document.add_page_break()",
"def process_log_data(spark, input_data, output_data):\n\n # get filepath to log data file\n log_data = os.path.join( input_data, \"log-data/*/*/*.json\")\n\n # read log data file\n df = spark.read.json(log_data)\n\n # filter by actions for song plays\n df = df.filter(df.page == \"NextSong\")\n \n # USERS TABLE\n # extract columns for users table\n users_table = df.select(\"userId\",\"firstName\",\"lastName\",\"gender\",\"level\").dropDuplicates(['userId'])\n \n print( \"HERE users_table sample:\\n\")\n users_table.show(5)\n # write users table to parquet files\n users_table.write.parquet(os.path.join(output_data, \"users/\") , mode=\"overwrite\")\n\n # TIME TABLE\n # create timestamp column from original timestamp column\n get_start_time = udf(lambda x: datetime.fromtimestamp(x / 1000.0).strftime('%Y-%m-%d %H:%M:%S'))\n get_hour = udf(lambda x: datetime.fromtimestamp(x / 1000.0).hour)\n get_day = udf(lambda x: datetime.fromtimestamp(x / 1000.0).day)\n get_week = udf(lambda x: datetime.fromtimestamp(x / 1000.0).strftime('%W'))\n get_month = udf(lambda x: datetime.fromtimestamp(x / 1000.0).month)\n get_year = udf(lambda x: datetime.fromtimestamp(x / 1000.0).year)\n get_weekday = udf(lambda x: datetime.fromtimestamp(x / 1000.0).strftime('%A'))\n\n df = df.withColumn('start_time', get_start_time(df['ts']))\n df = df.withColumn('hour', get_hour(df['ts']))\n df = df.withColumn('day', get_day(df['ts']))\n df = df.withColumn('week', get_week(df['ts']))\n df = df.withColumn('month', get_month(df['ts']))\n df = df.withColumn('year', get_year(df['ts']))\n df = df.withColumn('week_day', get_weekday(df['ts'])).dropDuplicates(['start_time'])\n\n df.createOrReplaceTempView(\"time_table\")\n \n time_columns = ['start_time', 'hour', 'day', 'week', 'month', 'year', 'week_day']\n\n # extract columns to create time table\n time_table = spark.sql(\"\"\"\n SELECT start_time, hour, day, week, month, year, week_day\n FROM time_table\n \"\"\").toDF(*time_columns)\n \n print( \"HERE time_table sample:\\n\")\n time_table.show(5)\n # write time table to parquet files partitioned by year and month\n time_table.write.parquet(os.path.join(output_data, \"time_table/\"), mode='overwrite', partitionBy=[\"year\",\"month\"])\n\n # SONGPLAYS TABLE\n # add monotonically increasing id column\n df = df.withColumn('songplay_id', functions.monotonically_increasing_id())\n df.createOrReplaceTempView(\"songplays_table\")\n\n # song df\n song_data = os.path.join( input_data, \"song_data/*/*/*/*.json\")\n song_df = spark.read.json(song_data).dropDuplicates()\n song_df.createOrReplaceTempView(\"songs_table\")\n\n song_columns = ['songplay_id', 'start_time', 'userId', 'level', 'sessionId', 'location', 'userAgent', 'year', 'month',\n 'length', 'song_id', 'artist_id', 'title', 'artist_name', 'duration']\n\n # extract columns to create time table\n songplays_table = spark.sql(\n \"\"\"\n SELECT sp.songplay_id, sp.start_time, sp.userId, sp.level, sp.sessionId, sp.location, sp.userAgent, sp.year, \n sp.month, sp.length, s.song_id, s.artist_id, s.title, s.artist_name, s.duration\n FROM songplays_table AS sp \n JOIN songs_table AS s ON sp.song = s.title AND sp.artist = s.artist_name AND sp.length = s.duration\n \"\"\").toDF(*song_columns)\n \n print( \"HERE songplays_table sample:\\n\")\n songplays_table.show(5)\n # write songplays table to parquet files partitioned by year and month\n songplays_table.write.parquet(os.path.join(output_data, \"songplays/\"), mode=\"overwrite\", partitionBy=[\"year\",\"month\"])",
"def load_users():\n\n print \"User\"\n\n for row in list(open(\"Users.csv\"))[1:]:\n name, zipcode, email = row.strip().split(\",\")\n\n user = User(name=name, \n zipcode=zipcode, email=email)\n\n db.session.add(user)\n\n db.session.commit()"
]
| [
"0.67998576",
"0.5744766",
"0.5728073",
"0.5698959",
"0.56625485",
"0.5611208",
"0.5545845",
"0.5541903",
"0.5511742",
"0.54902637",
"0.5478023",
"0.546456",
"0.5460285",
"0.54463327",
"0.54219323",
"0.53919584",
"0.53734076",
"0.53692913",
"0.53442574",
"0.5303176",
"0.5291779",
"0.52899325",
"0.5280382",
"0.527371",
"0.52704674",
"0.52629644",
"0.52629644",
"0.526199",
"0.5259207",
"0.52541935"
]
| 0.7346135 | 0 |
DB Table (user_practice) RDD | def getPracticesRDD(sqlContext):
from db import *
return sqlContext \
.read \
.format("jdbc") \
.options(
driver=driver,
url=url,
dbtable="user_practice",
user=user,
password=password
) \
.load() \
.rdd \
.map(lambda row: (row.id, row)) # We'll use the id to join with the users rdd defined above | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getUsersRDD(sqlContext):\n # Currently the id field ranges from '0' to '1000000'.\n # To avoid loading it all in memory, partition on the id field (100 partitions, about 10k records per partition).\n # Also setting fetch size to 10,000 to avoid multiple database calls per partition.\n # All records from a single partition will come in a single query.\n # If we need to use less memory, we can increase the # of partitions and decrease the lower/uppper bounds.\n # We are also relying on Spark to spill to disk if no memory is available.\n from db import *\n return sqlContext \\\n .read \\\n .format(\"jdbc\") \\\n .options(\n driver=driver,\n url=url,\n dbtable=\"user\",\n user=user,\n password=password,\n fetchSize=10000,\n numPartitions=100,\n partitionColumn=\"id\",\n lowerBound=0,\n upperBound=1000000\n ) \\\n .load() \\\n .rdd \\\n .persist(StorageLevel.MEMORY_AND_DISK) \\\n .map(lambda row: (row.practice_id, row)) # We are setting practice_id as the key here because we'll use that to join with user_practice table",
"def spark_recommendations(filename, user, products, separator = '\\t', n = 10):\n sc = pyspark.SparkContext('loc', 'pyspark_rec')\n aData = sc.textFile(filename)\n data = aData.map(lambda line: np.array([float(x) for x in line.split(separator)])) \n # to do this, it assumes that each line of the file consists of [user, product, rating]\n \n numIterations = 20\n aModel = pyspark.mllib.recommendation.ALS.train(data, n, numIterations)\n aRDDresults = aModel.predict(user, products)\n\n return aModel, aRDDresults",
"def run(sc, logger):\n start = time.time()\n\n # Spark SQL Context\n sqlContext = sql.context.SQLContext(sc)\n\n # 1) Practices - Small table so no need to partition it, could even broadcast across all executors\n # Spark SQL JDBC Options: https://spark.apache.org/docs/latest/sql-data-sources-jdbc.html\n practicesRDD = getPracticesRDD(sqlContext)\n\n # 1) Debug Practices\n logger.info(\"-------------\")\n logger.info(\"1a) Practices RDD: %s (%s partition(s))\" % (practicesRDD, practicesRDD.getNumPartitions()))\n logger.info(\"1b) Count Practices: %s \\n\" % practicesRDD.count())\n\n # 2) User Table - Big table so need to read in batches, so partition the table by id\n # Spark SQL JDBC Options: https://spark.apache.org/docs/latest/sql-data-sources-jdbc.html\n usersRDD = getUsersRDD(sqlContext)\n\n # 2) Debug Users\n logger.info(\"-------------\")\n logger.info(\"2a) Users RDD: %s (%s partition(s))\" % (usersRDD, usersRDD.getNumPartitions()))\n logger.info(\"2b) Count Users: %s \\n\" % usersRDD.count())\n\n # 3) Join Users and Practices (user.practice_id = user_practice.id)\n joinedUsersRDD = usersRDD \\\n .join(practicesRDD) \\\n .persist(StorageLevel.MEMORY_AND_DISK) \\\n .map(lambda row: ((row[1][0].firstname, row[1][0].lastname, row[1][1].location, row[1][0].specialty), UserReference(row[1][0].id, row[1][0].last_active_date)))\n\n # 3) Debug Joined Users\n logger.info(\"-------------\")\n logger.info(\"3a) Joined Users RDD: %s (%s partition(s))\" % (joinedUsersRDD, joinedUsersRDD.getNumPartitions()))\n logger.info(\"3b) Count Joined Users: %s \\n\" % joinedUsersRDD.count())\n\n # 4) Vendor Users\n vendorUsersRDD = vendor.getVendorUsers(sc) \\\n .persist(StorageLevel.MEMORY_AND_DISK) \\\n .map(lambda row: ((row[\"firstName\"], row[\"lastName\"], row[\"practiceLocation\"], row[\"specialty\"]), row))\n\n # 4) Debug Vendor Users\n logger.info(\"-------------\")\n logger.info(\"4a) Vendor Users RDD: %s (%s partition(s))\" % (vendorUsersRDD, vendorUsersRDD.getNumPartitions()))\n logger.info(\"4b) Count Vendor Users: %s \\n\" % vendorUsersRDD.count())\n\n # 4) Enriched Users (associate our user record with the vendor user)\n # Since we already have our users loaded in our db, just gonna load the vendor user with a pointer to our user\n enrichedRDD = joinedUsersRDD \\\n .rightOuterJoin(vendorUsersRDD) \\\n .persist(StorageLevel.MEMORY_AND_DISK) \\\n .map(lambda row: enrich(row))\n\n # 5) Debug Enriched Users\n logger.info(\"-------------\")\n logger.info(\"5a) Enriched Users RDD: %s (%s partition(s))\" % (enrichedRDD, enrichedRDD.getNumPartitions()))\n logger.info(\"5b) Count Enriched Users: %s \\n\" % enrichedRDD.count())\n\n # 6) Save to disk in JSON\n logger.info(\"-------------\")\n\n # Calculate how long it took for the job to run\n end = time.time()\n elapsed = end - start\n\n # Job Output (write to stdout and output.txt)\n output = JobOutput(logger, elapsed, usersRDD, vendorUsersRDD, enrichedRDD)\n output.write()",
"def example_data():\n\n db.create_all()\n\n\n #Empty out data from previous runs\n User.query.delete()\n Book.query.delete()\n Rating.query.delete()\n\n #Add sample users, books, and ratings\n\n #sample users\n user1 = User(user_id=1, email='[email protected]', password='password')\n user2 = User(user_id=2, email='[email protected]', password='password')\n user3 = User(user_id=3, email='[email protected]', password='password')\n user4 = User(user_id=4, email='[email protected]', password='password')\n user5 = User(user_id=5, email='[email protected]', password='password')\n\n #sample books\n book1 = Book(book_id=7627, work_id=16683183, isbn='0007331789', title='Death of Kings (The Saxon Stories, #6)', author='Bernard Cornwell')\n book2 = Book(book_id=7695, work_id=16947613, isbn='0007350430', title='The Time of My Life', author='Cecelia Ahern')\n book3 = Book(book_id=69, work_id=15524542, isbn='0007442912', title='Insurgent (Divergent #2)', author='Veronica Roth')\n book4 = Book(book_id=3327, work_id=23906880, isbn='0007491433', title='The Shock of the Fall', author='Nathan Filer')\n book5 = Book(book_id=8387, work_id=67116, isbn='0099464691', title='The White Lioness (Kurt Wallander, #3)', author='Henning Mankell')\n\n\n #sample ratings\n rating1 = Rating(rating_id=1, book_id=7627, user_id=1, score=5)\n rating2 = Rating(rating_id=2, book_id=7627, user_id=2, score=5)\n rating3 = Rating(rating_id=3, book_id=7627, user_id=3, score=3)\n rating4 = Rating(rating_id=4, book_id=7627, user_id=4, score=3)\n rating5 = Rating(rating_id=5, book_id=7627, user_id=5, score=1)\n rating6 = Rating(rating_id=6, book_id=8387, user_id=1, score=5)\n rating7 = Rating(rating_id=7, book_id=8387, user_id=2, score=5)\n rating8 = Rating(rating_id=8, book_id=8387, user_id=3, score=3)\n rating9 = Rating(rating_id=9, book_id=8387, user_id=4, score=3)\n rating10 = Rating(rating_id=10, book_id=8387, user_id=5, score=1)\n rating11 = Rating(rating_id=11, book_id=69, user_id=5, score=5)\n rating12 = Rating(rating_id=12, book_id=3327, user_id=5, score=5)\n rating13 = Rating(rating_id=13, book_id=3327, user_id=2, score=5)\n\n #Add all to session and commit\n db.session.add_all([user1, user2, user3, user4, user5, book1, book2, book3, \n book4, book5, rating1, rating2, rating3, rating4, \n rating5, rating6, rating7, rating8, rating9, rating10, rating11,\n rating12, rating13])\n db.session.commit()",
"def __init__(self, spark_session, dataset_folder_path):\n self.df = [None for i in range(10)]\n self.model = [None for i in range(10)]\n\n logger.info(\"Starting up the Recommendation Engine: \")\n self.spark_session = spark_session\n\n # Load ratings data for later use\n logger.info(\"Loading Ratings data...\")\n self.file_index = 0\n while True:\n filename = 'result' + str(self.file_index) + '.txt'\n dataset_file_path = os.path.join(dataset_folder_path,filename)\n exist_file = os.path.isfile(dataset_file_path)\n\n if exist_file:\n logger.info(self.file_index)\n self.df[self.file_index] = spark_session.read.csv(dataset_file_path,header=None, inferSchema=True)\n self.df[self.file_index] = self.df[self.file_index].selectExpr(\"_c1 as Title\" , \"_c3 as Userscore\", \"_c5 as Username\")\n self.df[self.file_index] = self.df[self.file_index].select(self.df[self.file_index].Username,self.df[self.file_index].Title,self.df[self.file_index].Userscore)\n\n logger.info(\"can load data\")\n\n stringindexer = StringIndexer(inputCol='Username',outputCol='UserId')\n stringindexer.setHandleInvalid(\"keep\")\n model = stringindexer.fit(self.df[self.file_index])\n indexed = model.transform(self.df[self.file_index]) \n\n stringindexer_item = StringIndexer(inputCol='Title',outputCol='GameId')\n stringindexer_item.setHandleInvalid(\"keep\") \n model = stringindexer_item.fit(indexed)\n indexed = model.transform(indexed)\n\n logger.info(\"sucess conver\")\n\n self.df[self.file_index] = indexed.select(indexed.Username,indexed.UserId,indexed.Title,indexed.GameId,indexed.Userscore.cast(\"int\"))\n logger.info(\"get data frame\")\n\n \n self.df[self.file_index].show()\n self.file_index+=1\n else:\n break\n\n self.__train_model()",
"def example_data_users():\n\n #Empty out existing data\n User.query.delete()\n\n celia = User(user_id=2, first_name='Celia', last_name='Waggoner',\n email=\"[email protected]\", password=\"123\", city=\"San Francisco\",\n state=\"CA\", zipcode='94110',\n amenities_pref=1, cleanliness_pref=5, class_size_pref=10,\n class_schedule_pref=5, class_pace_pref=1)\n pam = User(user_id=3, first_name='Pam', last_name='Geick',\n email=\"[email protected]\", password=\"456\", city=\"Rocky River\",\n state=\"OH\", zipcode='44116',\n amenities_pref=1, cleanliness_pref=1, class_size_pref=1,\n class_schedule_pref=1, class_pace_pref=1)\n amber = User(user_id=4, first_name='Amber', last_name='Lynn',\n email=\"[email protected]\", password=\"789\", city=\"Brooklyn\",\n state=\"NY\", zipcode='11201',\n amenities_pref=10, cleanliness_pref=10, class_size_pref=10,\n class_schedule_pref=10, class_pace_pref=10)\n\n db.session.add_all([celia, pam, amber])\n db.session.commit()",
"def _fetch_sample_data_from_user_query(self) -> TableData:\n rnd = self.session.execute(f\"{self._profile_sample_query}\")\n try:\n columns = [col.name for col in rnd.cursor.description]\n except AttributeError:\n columns = list(rnd.keys())\n return TableData(\n columns=columns,\n rows=[list(row) for row in rnd.fetchmany(100)],\n )",
"def example_data():\n\n #add user, business, comment, rating, tips, question, answer\n sample_user = User(username='ilkay', \n password=argon2.hash('123Qwe/'),\n email='[email protected]')\n sample_business = Business(business_id='IBZbaTy-_Ds7GITu4QimHQ', \n business_name='Wildhaven Ranch', \n business_type='zoo',\n latitude=34.256787,\n longitude=-117.161389)\n sample_favorite = Favorite(user=sample_user,\n business=sample_business)\n sample_comment = Comment(user=sample_user, \n business=sample_business,\n comment='hi there')\n sample_rating = Rating(user=sample_user, \n business=sample_business,\n rating=5)\n sample_tip_b = BusinessTip(user=sample_user, \n business=sample_business,\n business_tip='bring wet towels')\n sample_tip_t = TripTip(user=sample_user,\n trip_tip='bring toys')\n sample_question = Question(question='Favorite color?')\n sample_answer = Answer(question=sample_question,\n user=sample_user,\n answer='blue')\n sample_route = Route(user=sample_user,\n start=\"Sunnyvale, CA\",\n end=\"Universal City, CA\")\n sample_stopover = Stopover(route=sample_route,\n business=sample_business,\n latitude=34.256787,\n longitude=-117.161389)\n\n db.session.add_all([sample_user,\n sample_business, \n sample_rating, \n sample_comment,\n sample_tip_b,\n sample_tip_t,\n sample_question,\n sample_answer,\n sample_favorite,\n sample_route,\n sample_stopover])\n db.session.commit()",
"def load_data():\n\tscores = pd.read_csv('../data/user_assessment_scores.csv')\n\tviews = pd.read_csv('../data/user_course_views.csv')\n\ttags = pd.read_csv('../data/course_tags.csv')\n\tinterests = pd.read_csv('../data/user_interests.csv')\n\n\tdb_file = '../db/usersim.sqlite'\n\ttry:\n\t\tengine = sqlite3.connect(db_file, timeout=10)\n\t\tscores.to_sql('scores', engine, if_exists='replace', index=False, index_label='user_handle')\n\t\tviews.to_sql('views', engine, if_exists='replace', index=False, index_label='user_handle')\n\t\ttags.to_sql('tags', engine, if_exists='replace', index=False, index_label='course_id')\n\t\tinterests.to_sql('interests', engine, if_exists='replace', index=False, index_label='user_handle')\n\texcept:\n\t\tprint('Error occured while inserting into database')\n\tfinally:\n\t\tif engine:\n\t\t\tengine.close()\n\treturn scores, views, tags, interests",
"def load_ratings():\n\n print \"Ratings\"\n\n # Delete all rows in table, so if we need to run this a second time,\n # we won't be trying to add duplicate users\n Rating.query.delete()\n\n # Read u.data file and insert data\n for row in open(\"seed_data/u.data\"):\n row = row.rstrip()\n user_id, movie_id, score, timestamp = row.split(\"\\t\")\n\n user_id = int(user_id)\n movie_id = int(movie_id)\n score = int(score)\n\n #from rating class take the movie_id and make it equal to the movie_id \n #from the for loop above. We are calling it to make an instance of the rating\n #class\n rating = Rating(movie_id=movie_id, user_id=user_id, score=score)\n \n #We need to add to the session or it won't ever be stored\n db.session.add(rating)\n\n #Once we're done, we should commit our work\n db.session.commit()",
"def parseUser(user, args, sql, numPartitions):\n print(\"parsing user {}\".format(user))\n df = parse_raw(sql, args.input, user)\n df.repartition(numPartitions)\n df = select(df)\n df = df.rdd.map(lambda x: mapRow(x)).toDF()\n df = format(df, args.format, args.documentformat)\n writeToFile(df, args.parallelized, args.output, user, args.format, args.documentformat)",
"def add_to_rating_db(table, user_list):\n client, db = open_db_connection()\n db[table].remove()\n for user in user_list:\n net_id = user.replace(\"\\r\\n\", \"\").encode(\"utf-8\")\n db[table].insert({\"ta\": net_id, \"_id\": net_id, \"score\":random.random()*5})\n close_db_connection(client)",
"def example_data():\n\n # In case this is run more than once, empty out existing data\n User.query.delete()\n Answer.query.delete()\n Question.query.delete()\n\n # Add sample users, answers and questions\n cat = User(user_name=\"Cat\", email=\"[email protected]\", password=\"abc\")\n dog = User(user_name=\"Dog\", email=\"[email protected]\", password=\"abc\")\n horse = User(user_name=\"Horse\", email=\"[email protected]\", password=\"abc\")\n\n db.session.add_all([cat, dog, horse])\n db.session.commit()\n\n question_1 = Question(question_id=\"q1\", title=\"Should we save the planet?\", description=\" \", user_id=3)\n question_2 = Question(question_id=\"q2\", title=\"Is recycling pointless?\", description=\" \", user_id=3)\n question_3 = Question(question_id=\"q3\", title=\"Mustard or Ketchup?\", description=\" \", user_id=1)\n\n db.session.add_all([question_1, question_2, question_3])\n db.session.commit()\n\n answer_1 = Answer(question_id=\"q1\", user_id=1, body=\"Yes, I agree.\")\n answer_2 = Answer(question_id=\"q2\", user_id=2, body=\"No, I disagree.\")\n answer_3 = Answer(question_id=\"q3\", user_id=3, body=\"Hrm, I'm indifferent.\")\n\n db.session.add_all([answer_1, answer_2, answer_3])\n db.session.commit()",
"def load_ratings():\n filepath = \"./seed_data/u.data\"\n ratings = open(filepath)\n\n for rating in ratings:\n rating = rating.rstrip().split()\n\n db_rating = Rating(movie_id=rating[1], user_id=rating[0],\n score=rating[2])\n db.session.add(db_rating)\n\n db.session.commit()",
"def get_training_data(db_conn):\n return pd.read_sql('''select * from churn_model.churn_data;''', db_conn)",
"def all_students(self):\n \n with sqlite3.connect(self.db_path) as conn:\n # conn.row_factory = self.create_student\n conn.row_factory = lambda cursor, row: Student(\n row[1], row[2], row[3], row[5]\n )\n \n \n db_cursor = conn.cursor()\n\n db_cursor.execute(\"\"\"\n select s.Id,\n s.FirstName,\n s.LastName,\n s.SlackHandle,\n s.CohortId,\n c.Name\n from Student s\n join Cohort c on s.CohortId = c.Id\n order by s.CohortId\n \"\"\")\n\n all_students = db_cursor.fetchall()\n\n # for student in all_students:\n # print(f'{student[1]} {student[2]} is in {student[5]}')\n\n # for student in all_students:\n # print(f'{student[1]} {student[2]} is in {student[5]}')\n\n for student in all_students:\n print(student)",
"def process_log_data(spark, input_data, output_data):\n\n # get filepath to log data file\n log_data = os.path.join( input_data, \"log-data/*/*/*.json\")\n\n # read log data file\n df = spark.read.json(log_data)\n\n # filter by actions for song plays\n df = df.filter(df.page == \"NextSong\")\n \n # USERS TABLE\n # extract columns for users table\n users_table = df.select(\"userId\",\"firstName\",\"lastName\",\"gender\",\"level\").dropDuplicates(['userId'])\n \n print( \"HERE users_table sample:\\n\")\n users_table.show(5)\n # write users table to parquet files\n users_table.write.parquet(os.path.join(output_data, \"users/\") , mode=\"overwrite\")\n\n # TIME TABLE\n # create timestamp column from original timestamp column\n get_start_time = udf(lambda x: datetime.fromtimestamp(x / 1000.0).strftime('%Y-%m-%d %H:%M:%S'))\n get_hour = udf(lambda x: datetime.fromtimestamp(x / 1000.0).hour)\n get_day = udf(lambda x: datetime.fromtimestamp(x / 1000.0).day)\n get_week = udf(lambda x: datetime.fromtimestamp(x / 1000.0).strftime('%W'))\n get_month = udf(lambda x: datetime.fromtimestamp(x / 1000.0).month)\n get_year = udf(lambda x: datetime.fromtimestamp(x / 1000.0).year)\n get_weekday = udf(lambda x: datetime.fromtimestamp(x / 1000.0).strftime('%A'))\n\n df = df.withColumn('start_time', get_start_time(df['ts']))\n df = df.withColumn('hour', get_hour(df['ts']))\n df = df.withColumn('day', get_day(df['ts']))\n df = df.withColumn('week', get_week(df['ts']))\n df = df.withColumn('month', get_month(df['ts']))\n df = df.withColumn('year', get_year(df['ts']))\n df = df.withColumn('week_day', get_weekday(df['ts'])).dropDuplicates(['start_time'])\n\n df.createOrReplaceTempView(\"time_table\")\n \n time_columns = ['start_time', 'hour', 'day', 'week', 'month', 'year', 'week_day']\n\n # extract columns to create time table\n time_table = spark.sql(\"\"\"\n SELECT start_time, hour, day, week, month, year, week_day\n FROM time_table\n \"\"\").toDF(*time_columns)\n \n print( \"HERE time_table sample:\\n\")\n time_table.show(5)\n # write time table to parquet files partitioned by year and month\n time_table.write.parquet(os.path.join(output_data, \"time_table/\"), mode='overwrite', partitionBy=[\"year\",\"month\"])\n\n # SONGPLAYS TABLE\n # add monotonically increasing id column\n df = df.withColumn('songplay_id', functions.monotonically_increasing_id())\n df.createOrReplaceTempView(\"songplays_table\")\n\n # song df\n song_data = os.path.join( input_data, \"song_data/*/*/*/*.json\")\n song_df = spark.read.json(song_data).dropDuplicates()\n song_df.createOrReplaceTempView(\"songs_table\")\n\n song_columns = ['songplay_id', 'start_time', 'userId', 'level', 'sessionId', 'location', 'userAgent', 'year', 'month',\n 'length', 'song_id', 'artist_id', 'title', 'artist_name', 'duration']\n\n # extract columns to create time table\n songplays_table = spark.sql(\n \"\"\"\n SELECT sp.songplay_id, sp.start_time, sp.userId, sp.level, sp.sessionId, sp.location, sp.userAgent, sp.year, \n sp.month, sp.length, s.song_id, s.artist_id, s.title, s.artist_name, s.duration\n FROM songplays_table AS sp \n JOIN songs_table AS s ON sp.song = s.title AND sp.artist = s.artist_name AND sp.length = s.duration\n \"\"\").toDF(*song_columns)\n \n print( \"HERE songplays_table sample:\\n\")\n songplays_table.show(5)\n # write songplays table to parquet files partitioned by year and month\n songplays_table.write.parquet(os.path.join(output_data, \"songplays/\"), mode=\"overwrite\", partitionBy=[\"year\",\"month\"])",
"def test_sample_rows():\n ratings = lktu.ml_test.ratings\n ratings = ratings.set_index('user') ##forces non-unique index\n with pytest.raises(ValueError):\n for split in xf.sample_rows(ratings, partitions=5, size=1000):\n pass",
"def example_data():\n\n # In case this is run more than once, empty out existing data\n User.query.delete()\n Trip.query.delete()\n Entry.query.delete()\n Category.query.delete()\n Share.query.delete()\n\n # Add sample data\n user1 = User(email='[email protected]', password=bcrypt.hashpw('user1'.encode('utf8'), bcrypt.gensalt(9)), name='One')\n user2 = User(email='[email protected]', password=bcrypt.hashpw('user2'.encode('utf8'), bcrypt.gensalt(9)), name='Two')\n trip1 = Trip(location='Spain', date='08/09/2017', name='Abroad Trip', user_id=1)\n entry1 = Entry(trip_id=1, name='Tibidabo', address='08035 Barcelona, Spain', notes='Fun day trip!',\n type_id=1)\n category1 = Category(name='Attraction')\n share1 = Share(viewer_id=2, trip_id=1)\n\n db.session.add_all([user1, user2, trip1, entry1, category1, share1])\n db.session.commit()",
"def get_rdd_count_type_of_topy(rdd: list) -> pb.DataFrame:\n data_frame_pandas = pb.DataFrame(rdd, columns=['Topic', 'Question'])\n print(data_frame_pandas)\n return data_frame_pandas",
"def extract_ratings_by_uid(dataset, user_id):\n new_dataset = dataset.filter(lambda x: x[0] != user_id) \\\n .repartition(numPartitions) \\\n .cache()\n\n user_ratings = dataset.filter(lambda x: x[0] == user_id) \\\n .repartition(numPartitions) \\\n .cache()\n\n# debug\n print \"Count of user ratings: \", user_ratings.count()\n\n return new_dataset, user_ratings",
"def tableName():\n return \"people\"",
"def read_data(file_path, sparkContext):\n data_rdd = sparkContext \\\n .textFile(file_path) \\\n .map(eval) \\\n .map(lambda x: (x[0], x[1]))\n return data_rdd",
"def get_data(self):\n\n self.cur.execute('SELECT year, sex, education, score from vocabulary_scores;')\n scores = dict()\n education = dict()\n count = dict()\n\n for row in self.cur :\n if row[0] in scores:\n if row[1] in scores[row[0]]:\n scores[row[0]][row[1]] += int(row[3])\n education[row[0]][row[1]] += int(row[2])\n count[row[0]][row[1]] += 1\n else:\n scores[row[0]][row[1]] = int(row[3])\n education[row[0]][row[1]] = int(row[2])\n count[row[0]][row[1]] = 1\n else:\n # scores[year] = {gender: score}\n scores[row[0]] = {row[1]: int(row[3])}\n education[row[0]] = {row[1]: int(row[2])}\n count[row[0]] = {row[1]: 1}\n\n scores, education = self.average_scores(scores, education, count)\n\n return scores, education",
"def ustvari_tabele(cur):\n cur.execute(\"\"\"\n CREATE TABLE izdelki (\n sifra INTEGER PRIMARY KEY AUTOINCREMENT,\n ime STRING NOT NULL\n UNIQUE,\n zaloga INTEGER NOT NULL\n CHECK (zaloga >= 0),\n opis STRING,\n trenutna_cena DOUBLE\n );\n \"\"\")\n cur.execute(\"\"\"\n CREATE TABLE narocila (\n st_narocila INTEGER PRIMARY KEY AUTOINCREMENT,\n datum_narocila DATE,\n datum_prejetja DATE,\n partner STRING\n );\n\n \"\"\")\n cur.execute(\"\"\"\n CREATE TABLE partnerji (\n ddv INTEGER PRIMARY KEY,\n ime STRING,\n naslov STRING,\n drzava STRING\n );\n\n \"\"\")\n cur.execute(\"\"\"\n CREATE TABLE kosarica (\n st_narocila INTEGER,\n sifra_izdelka INTEGER,\n cena DOUBLE NOT NULL,\n popust DOUBLE,\n kolicina DOUBLE NOT NULL,\n PRIMARY KEY (\n st_narocila,\n sifra_izdelka\n )\n );\n\n \"\"\")\n cur.execute(\"\"\"\n CREATE TABLE ponudba (\n partner INTEGER,\n izdelek INTEGER,\n PRIMARY KEY (\n partner,\n izdelek\n )\n );\n \"\"\")",
"def create_ratings_table(connection):\n table_sql = 'create table ' \\\n 'ratings(id serial primary key, userId integer, movieId integer, ' \\\n 'rating real, timestamp integer) '\n create_table(connection, table_sql)",
"def add_student_data(connection,fname,lname,class_n,marks):\r\n with connection:\r\n connection.execute(INSERT_STUDENT,(fname,lname,class_n,marks))",
"def db_data4test():\n administrators = {\n 'field': ['name', 'password'],\n 'data': [\n ('admin', '123'),\n ]\n }\n\n countries = {\n 'field': 'name',\n 'data': [\n 'China',\n 'India'\n ]\n }\n\n positions = {\n 'field': 'name',\n 'data': [\n 'Software EngineerSystem Analyst',\n 'Business Analyst',\n 'Technical support',\n 'Network Engineer',\n 'Technical Consultant',\n 'Web Developer',\n 'Software Test'\n ]\n }\n\n users = {\n 'field': ['name', 'password'],\n 'data': [\n ('test', '123456'),\n ('test2', '123456'),\n ('test3', '123456')\n ]\n }\n\n user_infos = {\n 'field': [\n 'name', 'first_name', 'last_name', 'position', 'company',\n 'nationality', 'tobe_contacted', 'skills_have', 'skills_learned'\n ],\n 'data': [\n (\n 'test', 'Huang', 'Xiao', 'Business Analyst',\n 'Global Consulting Services', 'China', 1,\n '3months Python Subject',\n 'Advanced Python through on-job training'\n ),\n (\n 'test2', 'Yong', 'Wu', 'Business Analyst',\n 'REA', 'China', 0,\n '3 months Datawarehousing',\n 'Project management skill'\n ),\n ]\n }\n\n return {\n 'administrator': administrators,\n 'country': countries,\n 'position': positions,\n 'user': users,\n 'user_info': user_infos\n }",
"def process_log_data(spark, input_data, output_data):\n # get filepath to log data file\n log_data = input_data + \"log_data/*/*/*.json\"\n\n # read log data file\n df = spark.read.json(log_data)\n \n # print out the schema in tree format\n print(\"---------- Print out the schema of log dataset in tree format: ----------\")\n df.printSchema()\n \n # filter by actions for song plays\n df = df.filter(df.page == \"NextSong\")\n\n # extract columns for users table \n # users attributes: user_id, first_name, last_name, gender, level\n users_table = df.select(\"userId\", \"firstName\", \"lastName\", \"gender\", \"level\").distinct()\n \n # show first 10 rows in users table\n print(\" ---------- Show first 10 rows of users table ----------\")\n users_table.show(10)\n \n # write users table to parquet files\n output_data_users = os.path.join(output_data_users, \"users_table.parquet\")\n if path.exists(output_data_users):\n users_table.write.parquet(path = output_data_users, \n mode = \"overwrite\")\n else:\n users_table.write.parquet(path = output_data_users, \n mode = \"append\")\n \n # read parquet file and check the first 10 rows of partitioned parquet dataframes\n df_users_parquet = spark.read.parquet(\"users_table.parquet\")\n print(\" ---------- Show first 10 rows of users table parquet file ----------\")\n df_users_parquet.show(10)\n\n # create datetime column from original timestamp column\n # divide timestamp by 1000 to convert from milliseconds to seconds\n get_datetime = udf(lambda x: datetime.fromtimestamp(x / 1000), TimestampType())\n df = df.withColumn(\"start_time\", get_datetime(df.ts))\n \n # time table attributes: start_time, hour, day, week, month, year, weekday\n get_hour = udf(lambda x: x.hour) \n df = df.withColumn(\"hour\", get_hour(df.start_time)) # create hour column\n \n get_day = udf(lambda x: x.day)\n df = df.withColumn(\"day\", get_day(df.start_time)) # create day column\n \n get_week = udf(lambda x: x.isocalendar()[1])\n df = df.withColumn(\"week\", get_week(df.start_time)) # create week number column\n \n get_month = udf(lambda x: x.month)\n df = df.withColumn(\"month\", get_month(df.start_time)) # create month column\n \n get_year = udf(lambda x: x.year)\n df = df.withColumn(\"year\", get_year(df.start_time)) # create year column\n \n get_weekday = udf(lambda x: x.weekday())\n df = df.withColumn(\"weekday\", get_weekday(df.start_time)) # create weekday column\n \n # extract columns to create time table\n time_table = df.select(df.columns[-7:])\n \n # show first 10 rows of time table \n print(\" ---------- Show first 10 rows of time table ----------\")\n time_table.show(10)\n\n # write time table to parquet files partitioned by year and month\n out_path_time = os.path.join(output_data, \"time_table.parquet\")\n if path.exists(out_path_time):\n time_table.write.parquet(path = out_path_time, \n partitionBy = (\"year\", \"month\"),\n mode = \"overwrite\")\n else:\n time_table.write.parquet(path = out_path_time, \n partitionBy = (\"year\", \"month\"),\n mode = \"append\")\n\n # read parquet file and check the first 10 rows of partitioned parquet dataframes\n df_time_parquet = spark.read.parquet(\"time_table.parquet\")\n print(\" ---------- Show first 10 rows of time table parquet file ----------\")\n df_time_parquet.show(10)\n\n # read in song data to use for songplays table\n song_df = spark.read.parquet(\"songs_table.parquet\")\n \n # inner join df with song_df by song's name\n cond = [df.song == song_df.title]\n df_join = df.join(song_df, cond, \"inner\")\n \n # extract columns from joined song and log datasets to create songplays table \n # songplays attributes: songplay_id, start_time, user_id, level, song_id, \n # artist_id, session_id, location, user_agent\n songplays_table = df_join.select(\"start_time\", \"userId\", \"level\", \"song_id\", \n \"artist_id\", \"sessionId\", \"location\", \n \"userAgent\").distinct()\n \n # create songplay_id column with auto_increment\n songplays_table.withColumn(\"songplay_id\", monotonically_increasing_id())\n \n # show first 10 rows of songplays table \n print(\" ---------- Show first 10 rows of songplays table ----------\")\n songplays_table.show(10)\n \n # append year and month column into songplays_table\n songplays_table = songplays_table.withColumn(\"year\", get_year(df.start_time))\n songplays_table = songplays_table.withColumn(\"month\", get_month(df.start_time))\n \n # write songplays table to parquet files partitioned by year and month\n out_path_songplays = os.path.join(output_data, \"songplays_table.parquet\")\n if path.exists(out_path_songplays):\n songplays_table.write.parquet(path = out_path_songplays, \n partitionBy = (\"year\", \"month\"),\n mode = \"overwrite\")\n else:\n songplays_table.write.parquet(path = out_path_songplays, \n partitionBy = (\"year\", \"month\"),\n mode = \"append\")",
"def create_test_and_train_sets(input_file):\n \n input_df = pd.read_csv(input_file, sep=',', header=0)\n df_items = pd.DataFrame({'productId': input_df.productId.unique()})\n df_sorted_items = df_items.sort_values('productId').reset_index()\n pds_items = df_sorted_items.productId\n \n df_user_items = input_df.groupby(['userId', 'productId']).agg({'Expense': 'sum'})\n \n # create a list of (userId, productId, Expense) ratings, where userId and productId are 0-indexed\n current_u = -1\n ux = -1\n pv_ratings = []\n user_ux = []\n \n for timeonpg in df_user_items.itertuples():\n user = timeonpg[0][0]\n item = timeonpg[0][1]\n if user != current_u:\n user_ux.append(user)\n ux += 1\n current_u = user\n ix = pds_items.searchsorted(item)[0]\n pv_ratings.append((ux, ix, timeonpg[1]))\n\n # convert ratings list and user map to np array\n pv_ratings = np.asarray(pv_ratings)\n user_ux = np.asarray(user_ux)\n \n # create train and test coos matrixes\n tr_sparse, test_sparse = _create_sparse_train_and_test(pv_ratings, ux + 1, df_items.size)\n \n return user_ux, pds_items.as_matrix(), tr_sparse, test_sparse"
]
| [
"0.7516853",
"0.5981532",
"0.5914595",
"0.5749075",
"0.5651898",
"0.5564147",
"0.5504629",
"0.5466716",
"0.53867334",
"0.5365914",
"0.53556013",
"0.53233206",
"0.53024125",
"0.52994925",
"0.5294507",
"0.5259196",
"0.5251913",
"0.52423704",
"0.52172804",
"0.52047914",
"0.52028966",
"0.51865286",
"0.5180042",
"0.5173224",
"0.5162959",
"0.5148702",
"0.5146894",
"0.51109546",
"0.50833696",
"0.5082015"
]
| 0.7766232 | 0 |
Modify cherrypy.response status, headers, and body to represent self. CherryPy uses this internally, but you can also use it to create an HTTPRedirect object and set its output without raising the exception. | def set_response(self):
import cherrypy
response = cherrypy.response
response.status = status = self.status
if status in (300, 301, 302, 303, 307):
response.headers['Content-Type'] = "text/html"
# "The ... URI SHOULD be given by the Location field
# in the response."
response.headers['Location'] = self.urls[0]
# "Unless the request method was HEAD, the entity of the response
# SHOULD contain a short hypertext note with a hyperlink to the
# new URI(s)."
msg = {300: "This resource can be found at <a href='%s'>%s</a>.",
301: "This resource has permanently moved to <a href='%s'>%s</a>.",
302: "This resource resides temporarily at <a href='%s'>%s</a>.",
303: "This resource can be found at <a href='%s'>%s</a>.",
307: "This resource has moved temporarily to <a href='%s'>%s</a>.",
}[status]
response.body = "<br />\n".join([msg % (u, u) for u in self.urls])
# Previous code may have set C-L, so we have to reset it
# (allow finalize to set it).
response.headers.pop('Content-Length', None)
elif status == 304:
# Not Modified.
# "The response MUST include the following header fields:
# Date, unless its omission is required by section 14.18.1"
# The "Date" header should have been set in Response.__init__
# "...the response SHOULD NOT include other entity-headers."
for key in ('Allow', 'Content-Encoding', 'Content-Language',
'Content-Length', 'Content-Location', 'Content-MD5',
'Content-Range', 'Content-Type', 'Expires',
'Last-Modified'):
if key in response.headers:
del response.headers[key]
# "The 304 response MUST NOT contain a message-body."
response.body = None
# Previous code may have set C-L, so we have to reset it.
response.headers.pop('Content-Length', None)
elif status == 305:
# Use Proxy.
# self.urls[0] should be the URI of the proxy.
response.headers['Location'] = self.urls[0]
response.body = None
# Previous code may have set C-L, so we have to reset it.
response.headers.pop('Content-Length', None)
else:
raise ValueError("The %s status code is unknown." % status) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_response(self):\r\n import cherrypy\r\n \r\n response = cherrypy.response\r\n \r\n clean_headers(self.status)\r\n \r\n # In all cases, finalize will be called after this method,\r\n # so don't bother cleaning up response values here.\r\n response.status = self.status\r\n tb = None\r\n if cherrypy.request.show_tracebacks:\r\n tb = format_exc()\r\n response.headers['Content-Type'] = \"text/html\"\r\n \r\n content = self.get_error_page(self.status, traceback=tb,\r\n message=self._message)\r\n response.body = content\r\n response.headers['Content-Length'] = len(content)\r\n \r\n _be_ie_unfriendly(self.status)",
"def process_response(self, response):\n self.set_status(response.code) # code\n for name, value in response.headers.items(): # headers except restricted\n if name.lower() not in self.RESTRICTED_HEADERS:\n self.set_header(name, value)\n if response.code not in self.RESTRICT_SEND_BODY_ON_CODE: # body\n self.write(response.body)",
"def startResponse(self, status, headers, excInfo=None):\r\n if self.started and excInfo is not None:\r\n raise excInfo[0], excInfo[1], excInfo[2]\r\n self.status = status\r\n self.headers = headers\r\n self.reactor.callInThread(\r\n responseInColor, self.request, status, headers\r\n )\r\n return self.write",
"def _finalize_response(self, response):\n\n res = HttpResponse(content=response.content,\n content_type=self._get_content_type())\n # status_code is set separately to allow zero\n res.status_code = response.code\n return res",
"def _render(cls, request, code, ctype, msg):\r\n request.setResponseCode(code)\r\n request.setHeader('content-type', ctype)\r\n request.write(msg)\r\n request.finish()",
"def challenge(self, request, response, **kw):\n response.setStatus('200')\n response.setHeader('Content-Type', 'text/html')\n response.setBody(self.body)\n\n # Keep HTTPResponse.exception() from further writing on the\n # response body, without using HTTPResponse.write()\n response._locked_status = True\n response.setBody = self._setBody # Keep response.exception\n return True",
"def start_response_wrapper(self, status, response_headers, exc_info=None):\n response_headers = response_headers + self.response_headers\n return self.start_response(status, response_headers, exc_info)",
"def render(self):\n\n # If the content type is not specified, we set\n # it to text/html as the default\n if 'content-type' not in map(lambda x:x.lower(), self.headers):\n self.headers['Content-Type'] = 'text/html'\n\n # Set headers as list of tuples\n self.headers = [(k, v) for k, v in self.headers.items()]\n\n # httplib.responses maps the HTTP 1.1 status codes to W3C names.\n # Output example: '200 OK' or '404 Not Found'\n resp_code = '{} {}'.format(self.code, httplib.responses[self.code])\n\n if str(self.code)[0] in ['4', '5'] and not self.data:\n self.make_response(resp_code, self.headers)\n return resp_code.encode('utf-8')\n\n try:\n data = bytes(self.data).encode('utf-8')\n except UnicodeDecodeError:\n data = bytes(self.data)\n \n self.make_response(resp_code, self.headers)\n return data",
"def process_raw_response(self):\n non_excepts = self.non_exceptionals\n raw = self.raw_response\n\n #if the raw respones is an urllib2 error act accordingly.\n if isinstance(raw, non_excepts):\n self.error = raw\n if isinstance(raw, HTTPError):\n self.status_code = raw.code\n self.headers = dict(raw.headers)\n else:\n #its a url error nothing to do\n pass\n\n else:\n #only urllib.addinfourl type should be now be possible\n self.status_code = raw.code\n self.headers = dict(raw.headers)\n self.body = \"\".join(raw.readlines())",
"def start_response(self, status, headers, exc_info = None):\r\n if self.started_response:\r\n if not exc_info:\r\n raise AssertionError(\"WSGI start_response called a second \"\r\n \"time with no exc_info.\")\r\n else:\r\n try:\r\n raise exc_info[0], exc_info[1], exc_info[2]\r\n finally:\r\n exc_info = None\r\n self.started_response = True\r\n self.status = status\r\n self.outheaders.extend(headers)\r\n return self.write",
"def to_response(self):\n return make_response(self.res, self.status)",
"def to_response(self):\n return make_response(self.res, self.status)",
"def process_response(self, request, response):\n return response",
"def process_response(self, request, response):\n return response",
"def make_response(self, rv):\n status_or_headers = headers = None\n if isinstance(rv, tuple):\n rv, status_or_headers, headers = rv + (None,) * (3 - len(rv))\n\n if rv is None:\n raise ValueError('View function did not return a response')\n\n if isinstance(status_or_headers, (dict, list)):\n headers, status_or_headers = status_or_headers, None\n\n if not isinstance(rv, self.response_class):\n if isinstance(rv, six.text_type):\n rv = self.response_class(rv, status=status_or_headers)\n else:\n raise ValueError('Content must be a string')\n\n if status_or_headers is not None:\n if isinstance(status_or_headers, six.text_type):\n # FIXME: I'm pretty sure Django's reason_phrase is *just* the\n # 'OK' in '200 OK', whereas Flask allows passing '200 OK'\n rv.reason_phrase = status_or_headers\n else:\n rv.status = status_or_headers\n\n if headers:\n # HttpResponse doesn't take a headers kwarg, so we must set each\n # header manually with rv[header] = value\n if isinstance(headers, dict):\n headers_iter = six.iteritems(headers)\n elif isinstance(headers, list):\n headers_iter = headers\n else:\n raise ValueError('headers must be dict, list, or None')\n\n for header, value in headers_iter:\n rv[header] = value\n\n return rv",
"def render_response(self, context, result):\n\t\tcontext.response = result\n\t\treturn True",
"def redirect(cls, dest, code = 302):\r\n dest = cls.format_output_url(dest)\r\n c.response.headers['Location'] = dest\r\n c.response.status_code = code\r\n return c.response",
"def __response__(self) -> requests.Response:\n return self._self_response",
"def process_response(self, response):\n return response",
"def redirect( self, url, code = 303):\n self.res.status = code\n self.res.location = url\n self.res.content_type = 'text/html'\n self.res.content_length = None\n self.start_response(self.res.status, self.res.headerlist)\n return ['']",
"def Proxy(self, status, headers, exc_info=None):\n self.call_context['status'] = status\n self.call_context['headers'] = headers\n self.call_context['exc_info'] = exc_info\n\n return self.body_buffer.write",
"def create_response(self, status, statusmsg, body):\n self.response.setStatus(status, statusmsg)\n return body",
"def encode(self, response):\n encode_as = response.whichEncoding()\n if encode_as == ENCODE_KVFORM:\n wr = self.responseFactory(body=response.encodeToKVForm())\n if isinstance(response, Exception):\n wr.code = HTTP_ERROR\n elif encode_as == ENCODE_URL:\n location = response.encodeToURL()\n wr = self.responseFactory(\n code=HTTP_REDIRECT, headers={'location': location})\n elif encode_as == ENCODE_HTML_FORM:\n wr = self.responseFactory(code=HTTP_OK, body=response.toHTML())\n else:\n # Can't encode this to a protocol message. You should probably\n # render it to HTML and show it to the user.\n raise EncodingError(response)\n return wr",
"def response_handling(self) -> global___Snippet.SimpleResponseHandling:",
"def response_handling(self) -> global___Snippet.SimpleResponseHandling:",
"def _handle_response(self, response):\n self.client.status = response.code\n self.response_headers = headers = response.headers\n # XXX This workaround (which needs to be improved at that) for possible\n # bug in Twisted with new client:\n # http://twistedmatrix.com/trac/ticket/5476\n if self._method.upper() == 'HEAD' or response.code == NO_CONTENT:\n return succeed('')\n receiver = self.receiver_factory()\n receiver.finished = d = Deferred()\n receiver.content_length = response.length\n response.deliverBody(receiver)\n if response.code >= 400:\n d.addCallback(self._fail_response, response)\n return d",
"def send_response(self, status, body=''):\n\n logger.debug(\"Sending cap response to viewer: Status:%s Body:%s\" % (status, body))\n\n self.response.status = status\n self.response.body = body\n return self.response(self.environ, self.start)",
"def finalize_response(self, response):\n if self.request.is_ajax() and response.status_code == 302:\n if self.ajax_catch_redirects:\n return http.HttpResponse(\n json.dumps(\n {\n 'redirect': response['location'],\n 'result': self.result_text,\n }\n ),\n content_type='application/json',\n )\n return response",
"def response_handling(self) -> global___Snippet.StreamingResponseHandling:",
"def response_handling(self) -> global___Snippet.StreamingResponseHandling:"
]
| [
"0.80094314",
"0.64838946",
"0.64483994",
"0.64255804",
"0.64048517",
"0.6346558",
"0.6325356",
"0.62837017",
"0.6198728",
"0.618486",
"0.609106",
"0.609106",
"0.6029773",
"0.6029773",
"0.60140204",
"0.59765",
"0.597243",
"0.5957101",
"0.59427726",
"0.5942422",
"0.5940268",
"0.5928732",
"0.5919371",
"0.58641887",
"0.58641887",
"0.58290106",
"0.5818014",
"0.5803697",
"0.5794678",
"0.5794678"
]
| 0.79674995 | 1 |
Remove any headers which should not apply to an error response. | def clean_headers(status):
import cherrypy
response = cherrypy.response
# Remove headers which applied to the original content,
# but do not apply to the error page.
respheaders = response.headers
for key in ["Accept-Ranges", "Age", "ETag", "Location", "Retry-After",
"Vary", "Content-Encoding", "Content-Length", "Expires",
"Content-Location", "Content-MD5", "Last-Modified"]:
if respheaders.has_key(key):
del respheaders[key]
if status != 416:
# A server sending a response with status code 416 (Requested
# range not satisfiable) SHOULD include a Content-Range field
# with a byte-range-resp-spec of "*". The instance-length
# specifies the current length of the selected resource.
# A response with status code 206 (Partial Content) MUST NOT
# include a Content-Range field with a byte-range- resp-spec of "*".
if respheaders.has_key("Content-Range"):
del respheaders["Content-Range"] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def DeleteResponseHeader(self, name):\n assert name.islower()\n self._wpr_response.original_headers = \\\n [x for x in self._wpr_response.original_headers if x[0].lower() != name]",
"def get204(self):\n bad = ('content-length', 'content-type')\n for h in bad:\n bottle.response.set_header(h, 'foo')\n bottle.status = 204\n for h, v in bottle.response.headerlist:\n self.assertFalse(h.lower() in bad, \"Header %s not deleted\" % h)",
"def del_header(self, name):\n key = name.upper()\n if key not in _RESPONSE_HEADER_DICT:\n key = name\n if key in self._headers:\n del self._headers[key]",
"def sanitize_headers(headers):\n auth_header = headers.pop(\"Authorization\", None)\n if auth_header:\n _logger.warning(\n f\"Possible fraud: Authorization header was set to {auth_header}\"\n )\n userinfo_header = headers.pop(\"X-Userinfo\", None)\n if userinfo_header:\n _logger.warning(\n f\"Possible fraud: X-Userinfo header was set to {userinfo_header}\"\n )",
"def delete_headers(self, ):\n if self.AttributeNames.HEADERS in self.attrs:\n del self.attrs[self.AttributeNames.HEADERS]\n return self",
"def clear_headers(self):\r\n\r\n # Remove things from the old dict as well\r\n self.reply_headers.clear()\r\n\r\n self.__reply_header_list[:] = []",
"def RemoveResponseHeaderDirectives(self, name, directives_blacklist):\n response_headers = self.GetResponseHeadersDict()\n if name not in response_headers:\n return\n new_value = []\n for header_name in response_headers[name].split(','):\n if header_name.strip().lower() not in directives_blacklist:\n new_value.append(header_name)\n if new_value:\n self.SetResponseHeader(name, ','.join(new_value))\n else:\n self.DeleteResponseHeader(name)",
"def scrub_headers(self, header_dict):\n return self.__headers_scrubber(header_dict)",
"def fix_deletion_http_response_HttpResponseBase_private_headers(utils):\n from django.http.response import HttpResponseBase\n\n @property\n def _headers(self):\n return self.headers._store # Maps lower_case to (normal_case, value)\n\n utils.inject_attribute(HttpResponseBase, \"_headers\", _headers)",
"def get304(self):\n bad = ('allow', 'content-encoding', 'content-language',\n 'content-length', 'content-md5', 'content-range',\n 'content-type', 'last-modified') # + c-location, expires?\n for h in bad:\n bottle.response.set_header(h, 'foo')\n bottle.status = 304\n for h, v in bottle.response.headerlist:\n self.assertFalse(h.lower() in bad, \"Header %s not deleted\" % h)",
"def responseheaders(self, flow: mitmproxy.http.HTTPFlow):\n pass",
"def responseheaders(self, flow: mitmproxy.http.HTTPFlow):\n pass",
"def responseheaders(self, flow: mitmproxy.http.HTTPFlow):",
"def check_no_header_response(response: HTTPResponse) -> bool:\n return response.status_code == 422",
"def scrub_headers(headers):\n if isinstance(headers, dict):\n headers = headers.items()\n headers = [\n (parse_header_string(key), parse_header_string(val))\n for (key, val) in headers\n ]\n if not logger_settings.get('redact_sensitive_headers', True):\n return dict(headers)\n if logger_settings.get('reveal_sensitive_prefix', 16) < 0:\n logger_settings['reveal_sensitive_prefix'] = 16\n return {key: safe_value(key, val) for (key, val) in headers}",
"def forget(self, request):\n return self._get_challenge_headers(request, check_stale=False)",
"def add_headers(r):\n r.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n r.headers[\"Pragma\"] = \"no-cache\"\n return r",
"def response_headers():\n # Pending swaggerUI update\n # https://github.com/swagger-api/swagger-ui/issues/3850\n headers = MultiDict(request.args.items(multi=True))\n response = jsonify(list(headers.lists()))\n\n while True:\n original_data = response.data\n d = {}\n for key in response.headers.keys():\n value = response.headers.get_all(key)\n if len(value) == 1:\n value = value[0]\n d[key] = value\n response = jsonify(d)\n for key, value in headers.items(multi=True):\n response.headers.add(key, value)\n response_has_changed = response.data != original_data\n if not response_has_changed:\n break\n return response",
"def process_response(self, response):\n self.set_status(response.code) # code\n for name, value in response.headers.items(): # headers except restricted\n if name.lower() not in self.RESTRICTED_HEADERS:\n self.set_header(name, value)\n if response.code not in self.RESTRICT_SEND_BODY_ON_CODE: # body\n self.write(response.body)",
"def deauthorize():\n\tPAYLOAD_HEADERS.pop('Authorization', None)",
"def process_request_headers(request):\n request.headers.setdefault('User-Agent',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) '\n 'AppleWebKit/537.36 (KHTML, like Gecko) '\n 'Chrome/51.0.2704.103 Safari/537.36')\n if 'redirect_urls' not in request.meta:\n request.headers['Referer'] = None",
"def write_missing_duplicated_headers(self, missing_headers, duplicated_headers, bucket_name, error_filename):\n with self.get_writer(bucket_name, error_filename, self.header_report_headers, self.is_local) as writer:\n for header in duplicated_headers:\n writer.write([\"Duplicated header\", header])\n for header in missing_headers:\n writer.write([\"Missing header\", header])\n writer.finish_batch()",
"def add_header(response):\n response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'\n response.headers['Pragma'] = 'no-cache'\n response.headers['Expires'] = '-1'\n return response",
"def _setHeaders(self):\r\n if not self.headers_set:\r\n self.headers_set = 1\r\n for key in self.headers_out.keys():\r\n self._response.setHeader(key, self.headers_out[key])\r\n self._response.setContentType(self.content_type)",
"def missing_header_fields():\n auth_token = get_auth_token()\n\n headers = '{\"Host\": \"$host\",\"Date\": \"DATE\",'\n headers += '\"Accept-Encoding\": \"gzip\",'\n headers += '\"X-Auth-Token\": \"$token\"}'\n headers = string.Template(headers)\n\n return headers.substitute(host=CFG.host, token=auth_token)",
"def _filter_headers(self):\n return [\"Authorization\"]",
"def test_response_exempt(self):\n\n def xframe_exempt_response(request):\n response = HttpResponse()\n response.xframe_options_exempt = True\n return response\n\n def xframe_not_exempt_response(request):\n response = HttpResponse()\n response.xframe_options_exempt = False\n return response\n\n with override_settings(X_FRAME_OPTIONS=\"SAMEORIGIN\"):\n r = XFrameOptionsMiddleware(xframe_not_exempt_response)(HttpRequest())\n self.assertEqual(r.headers[\"X-Frame-Options\"], \"SAMEORIGIN\")\n\n r = XFrameOptionsMiddleware(xframe_exempt_response)(HttpRequest())\n self.assertIsNone(r.headers.get(\"X-Frame-Options\"))",
"def response_headers(self, extra_headers=None):\n headers_copy = self.headers.copy()\n\n if extra_headers:\n headers_copy.update(extra_headers)\n\n headers = \"\"\n\n for h in headers_copy:\n headers += \"%s: %s\\r\\n\" % (h, headers_copy[h])\n\n return headers.encode()",
"def trim_headers(all_headers, relevant_headers=[\"From\", \"To\", \"Subject\", \"Date\"]):\n data = {}\n for header in all_headers:\n if header['name'] in relevant_headers:\n data[header['name']] = header['value']\n\n return data",
"def add_custom_headers(self, headers):\n headers_to_remove = [x for x in headers if x.lower() in [y.lower() for y in self.headers]]\n for header in headers_to_remove:\n headers.pop(header, None)\n headers.update(self.headers)"
]
| [
"0.70664626",
"0.6766832",
"0.67029774",
"0.65894616",
"0.65733063",
"0.6347384",
"0.62985134",
"0.62751406",
"0.6233765",
"0.620523",
"0.62022334",
"0.62022334",
"0.61543894",
"0.6121957",
"0.60959095",
"0.59708506",
"0.59368455",
"0.5910138",
"0.5903606",
"0.5784915",
"0.5726826",
"0.5704613",
"0.5697093",
"0.56916493",
"0.5679225",
"0.5672053",
"0.56537366",
"0.5646113",
"0.5626942",
"0.5616227"
]
| 0.7672321 | 0 |
Modify cherrypy.response status, headers, and body to represent self. CherryPy uses this internally, but you can also use it to create an HTTPError object and set its output without raising the exception. | def set_response(self):
import cherrypy
response = cherrypy.response
clean_headers(self.status)
# In all cases, finalize will be called after this method,
# so don't bother cleaning up response values here.
response.status = self.status
tb = None
if cherrypy.request.show_tracebacks:
tb = format_exc()
response.headers['Content-Type'] = "text/html"
content = self.get_error_page(self.status, traceback=tb,
message=self._message)
response.body = content
response.headers['Content-Length'] = len(content)
_be_ie_unfriendly(self.status) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_response(self):\r\n import cherrypy\r\n response = cherrypy.response\r\n response.status = status = self.status\r\n \r\n if status in (300, 301, 302, 303, 307):\r\n response.headers['Content-Type'] = \"text/html\"\r\n # \"The ... URI SHOULD be given by the Location field\r\n # in the response.\"\r\n response.headers['Location'] = self.urls[0]\r\n \r\n # \"Unless the request method was HEAD, the entity of the response\r\n # SHOULD contain a short hypertext note with a hyperlink to the\r\n # new URI(s).\"\r\n msg = {300: \"This resource can be found at <a href='%s'>%s</a>.\",\r\n 301: \"This resource has permanently moved to <a href='%s'>%s</a>.\",\r\n 302: \"This resource resides temporarily at <a href='%s'>%s</a>.\",\r\n 303: \"This resource can be found at <a href='%s'>%s</a>.\",\r\n 307: \"This resource has moved temporarily to <a href='%s'>%s</a>.\",\r\n }[status]\r\n response.body = \"<br />\\n\".join([msg % (u, u) for u in self.urls])\r\n # Previous code may have set C-L, so we have to reset it\r\n # (allow finalize to set it).\r\n response.headers.pop('Content-Length', None)\r\n elif status == 304:\r\n # Not Modified.\r\n # \"The response MUST include the following header fields:\r\n # Date, unless its omission is required by section 14.18.1\"\r\n # The \"Date\" header should have been set in Response.__init__\r\n \r\n # \"...the response SHOULD NOT include other entity-headers.\"\r\n for key in ('Allow', 'Content-Encoding', 'Content-Language',\r\n 'Content-Length', 'Content-Location', 'Content-MD5',\r\n 'Content-Range', 'Content-Type', 'Expires',\r\n 'Last-Modified'):\r\n if key in response.headers:\r\n del response.headers[key]\r\n \r\n # \"The 304 response MUST NOT contain a message-body.\"\r\n response.body = None\r\n # Previous code may have set C-L, so we have to reset it.\r\n response.headers.pop('Content-Length', None)\r\n elif status == 305:\r\n # Use Proxy.\r\n # self.urls[0] should be the URI of the proxy.\r\n response.headers['Location'] = self.urls[0]\r\n response.body = None\r\n # Previous code may have set C-L, so we have to reset it.\r\n response.headers.pop('Content-Length', None)\r\n else:\r\n raise ValueError(\"The %s status code is unknown.\" % status)",
"def process_raw_response(self):\n non_excepts = self.non_exceptionals\n raw = self.raw_response\n\n #if the raw respones is an urllib2 error act accordingly.\n if isinstance(raw, non_excepts):\n self.error = raw\n if isinstance(raw, HTTPError):\n self.status_code = raw.code\n self.headers = dict(raw.headers)\n else:\n #its a url error nothing to do\n pass\n\n else:\n #only urllib.addinfourl type should be now be possible\n self.status_code = raw.code\n self.headers = dict(raw.headers)\n self.body = \"\".join(raw.readlines())",
"def _finalize_response(self, response):\n\n res = HttpResponse(content=response.content,\n content_type=self._get_content_type())\n # status_code is set separately to allow zero\n res.status_code = response.code\n return res",
"def __init__(self, msg):\n\n super(HTTPError, self).__init__(msg)\n self.msg = msg",
"def __init__(self, exception):\n self.wrapped_exc = exception\n for key, value in self.wrapped_exc.headers.items():\n self.wrapped_exc.headers[key] = str(value)\n self.status_int = exception.status_int",
"def __init__(self, reason=None):\n self.reason = reason\n super(HTTPError, self).__init__(reason)",
"def make_response(self, rv):\n status_or_headers = headers = None\n if isinstance(rv, tuple):\n rv, status_or_headers, headers = rv + (None,) * (3 - len(rv))\n\n if rv is None:\n raise ValueError('View function did not return a response')\n\n if isinstance(status_or_headers, (dict, list)):\n headers, status_or_headers = status_or_headers, None\n\n if not isinstance(rv, self.response_class):\n if isinstance(rv, six.text_type):\n rv = self.response_class(rv, status=status_or_headers)\n else:\n raise ValueError('Content must be a string')\n\n if status_or_headers is not None:\n if isinstance(status_or_headers, six.text_type):\n # FIXME: I'm pretty sure Django's reason_phrase is *just* the\n # 'OK' in '200 OK', whereas Flask allows passing '200 OK'\n rv.reason_phrase = status_or_headers\n else:\n rv.status = status_or_headers\n\n if headers:\n # HttpResponse doesn't take a headers kwarg, so we must set each\n # header manually with rv[header] = value\n if isinstance(headers, dict):\n headers_iter = six.iteritems(headers)\n elif isinstance(headers, list):\n headers_iter = headers\n else:\n raise ValueError('headers must be dict, list, or None')\n\n for header, value in headers_iter:\n rv[header] = value\n\n return rv",
"def _handle_response(self, response):\n if response.status_code >= 500:\n raise ServerError(response.content, response.status_code)\n elif response.status_code >= 300:\n raise ClientError(response.json(), response.status_code)\n\n return Response(response)",
"def render(self):\n\n # If the content type is not specified, we set\n # it to text/html as the default\n if 'content-type' not in map(lambda x:x.lower(), self.headers):\n self.headers['Content-Type'] = 'text/html'\n\n # Set headers as list of tuples\n self.headers = [(k, v) for k, v in self.headers.items()]\n\n # httplib.responses maps the HTTP 1.1 status codes to W3C names.\n # Output example: '200 OK' or '404 Not Found'\n resp_code = '{} {}'.format(self.code, httplib.responses[self.code])\n\n if str(self.code)[0] in ['4', '5'] and not self.data:\n self.make_response(resp_code, self.headers)\n return resp_code.encode('utf-8')\n\n try:\n data = bytes(self.data).encode('utf-8')\n except UnicodeDecodeError:\n data = bytes(self.data)\n \n self.make_response(resp_code, self.headers)\n return data",
"def process_response(self, response):\n self.set_status(response.code) # code\n for name, value in response.headers.items(): # headers except restricted\n if name.lower() not in self.RESTRICTED_HEADERS:\n self.set_header(name, value)\n if response.code not in self.RESTRICT_SEND_BODY_ON_CODE: # body\n self.write(response.body)",
"def __init__(self, content, status):\n Exception.__init__(self)\n self.status = status\n self.content = content",
"def get_error_response():\n response = HTTPResponse.HTTPResponse(version=1.0, status_code=500,\n phrase=\"Internal Error\")\n headers = HTTPHeaders.HTTPHeaders()\n add_default_headers(headers)\n headers[\"Content-Length\"] = str(0)\n headers[\"Connection\"] = \"close\"\n response.set_headers(headers)\n\n return response.build_response()",
"def startResponse(self, status, headers, excInfo=None):\r\n if self.started and excInfo is not None:\r\n raise excInfo[0], excInfo[1], excInfo[2]\r\n self.status = status\r\n self.headers = headers\r\n self.reactor.callInThread(\r\n responseInColor, self.request, status, headers\r\n )\r\n return self.write",
"def __init__(self, status_code='', resp_content=''):\n if resp_content:\n message = \"Supplai server returned [{}] status code\".format(\n status_code)\n\n if \"errorCode\" in resp_content:\n message += \", and API returned [{}] error code\".format(\n resp_content[\"errorCode\"])\n\n if \"errorMessage\" in resp_content:\n message += \", with message: ({})\".format(\n resp_content[\"errorMessage\"])\n\n else:\n message = \"Supplai server returned a internal server error\"\n\n super().__init__(message)",
"def __init__(self, result, data='', msg='', safe=True, status=200):\n\n content = {'code': result, 'data': data, 'msg': msg}\n super(ResultResponse, self).__init__(content, status=status, safe=safe)",
"def _handle_response(self, response):\n self.client.status = response.code\n self.response_headers = headers = response.headers\n # XXX This workaround (which needs to be improved at that) for possible\n # bug in Twisted with new client:\n # http://twistedmatrix.com/trac/ticket/5476\n if self._method.upper() == 'HEAD' or response.code == NO_CONTENT:\n return succeed('')\n receiver = self.receiver_factory()\n receiver.finished = d = Deferred()\n receiver.content_length = response.length\n response.deliverBody(receiver)\n if response.code >= 400:\n d.addCallback(self._fail_response, response)\n return d",
"def to_response(self):\n return make_response(self.res, self.status)",
"def to_response(self):\n return make_response(self.res, self.status)",
"def challenge(self, request, response, **kw):\n response.setStatus('200')\n response.setHeader('Content-Type', 'text/html')\n response.setBody(self.body)\n\n # Keep HTTPResponse.exception() from further writing on the\n # response body, without using HTTPResponse.write()\n response._locked_status = True\n response.setBody = self._setBody # Keep response.exception\n return True",
"def start_response_wrapper(self, status, response_headers, exc_info=None):\n response_headers = response_headers + self.response_headers\n return self.start_response(status, response_headers, exc_info)",
"def __init__(self, content: Union[dict, list, tuple], status_code: int = 200):\n self.status_code = HTTPStatus(status_code)\n try:\n self.content = dumps(content).encode()\n except TypeError:\n raise ValueError(\"Response content must be JSON serializable\")\n\n self.headers = Headers()\n self.headers.add_header(\n \"content-type\", f\"{self.media_type}; charset={self.charset}\"\n )\n self.headers.add_header(\"content-length\", str(len(self.content)))",
"def do(self, *args, **kwargs):\n try:\n return super().do(*args, **kwargs)\n except ResponseError as e:\n self.handle_error(e)",
"def __init__(self, content = None, *args, **kwargs):\n super(Response, self).__init__(content, *args, **kwargs)",
"def __call__(self, rv):\n if isinstance(rv, ResponseBase):\n return rv\n data, status, headers = unpack(rv)\n resp = flask.make_response(self._encoder(data, **self.json_settings),\n status, {'Content-Type': self.content_type})\n resp.headers.extend(headers)\n return resp",
"def from_response(response, body=None):\n cls = _code_map.get(response.status, HTTPException)\n if body:\n details = body.replace('\\n\\n', '\\n')\n return cls(details=details)\n\n return cls()",
"def __call__(self, status_code, headers, body):\n self.result_obj['status_code'] = status_code\n self.result_obj['headers'] = dict(headers)\n self.result_obj['body'] = body\n \n return self.result_obj",
"def __call__(self, status_code, headers, body):\n self.result_obj['status_code'] = status_code\n self.result_obj['headers'] = dict(headers)\n self.result_obj['body'] = body\n \n return self.result_obj",
"def create_response(self, status, statusmsg, body):\n self.response.setStatus(status, statusmsg)\n return body",
"def _render(cls, request, code, ctype, msg):\r\n request.setResponseCode(code)\r\n request.setHeader('content-type', ctype)\r\n request.write(msg)\r\n request.finish()",
"def __init__(self, status_code, response_obj):\r\n self._status_code = status_code\r\n self._response_error = \"\"\r\n self._original_response = response_obj\r\n \r\n if type(response_obj) is bytes:\r\n response_obj = response_obj.decode(\"utf-8\", errors='ignore')\r\n\r\n if type(response_obj) is str:\r\n try:\r\n self._response = json.loads(response_obj)\r\n except:\r\n self._response = response_obj\r\n elif type(response_obj) is dict:\r\n try:\r\n self._response = json.loads(json.dumps(response_obj))\r\n except:\r\n self._response = response_obj\r\n else:\r\n self._response = response_obj"
]
| [
"0.7279636",
"0.6841792",
"0.6596323",
"0.6478856",
"0.63977695",
"0.6384832",
"0.63642305",
"0.6360515",
"0.6346353",
"0.6292077",
"0.62432635",
"0.6235534",
"0.62161624",
"0.61403537",
"0.6117794",
"0.6098712",
"0.60923874",
"0.60923874",
"0.60763764",
"0.60666084",
"0.6066026",
"0.6033722",
"0.60321844",
"0.6030375",
"0.60243195",
"0.60242283",
"0.60242283",
"0.60132515",
"0.5976253",
"0.5974311"
]
| 0.79920655 | 0 |
Return an HTML page, containing a pretty error response. status should be an int or a str. kwargs will be interpolated into the page template. | def get_error_page(status, **kwargs):
import cherrypy
try:
code, reason, message = _http.valid_status(status)
except ValueError, x:
raise cherrypy.HTTPError(500, x.args[0])
# We can't use setdefault here, because some
# callers send None for kwarg values.
if kwargs.get('status') is None:
kwargs['status'] = "%s %s" % (code, reason)
if kwargs.get('message') is None:
kwargs['message'] = message
if kwargs.get('traceback') is None:
kwargs['traceback'] = ''
if kwargs.get('version') is None:
kwargs['version'] = cherrypy.__version__
for k, v in kwargs.iteritems():
if v is None:
kwargs[k] = ""
else:
kwargs[k] = _escape(kwargs[k])
# Use a custom template or callable for the error page?
pages = cherrypy.request.error_page
error_page = pages.get(code) or pages.get('default')
if error_page:
try:
if callable(error_page):
return error_page(**kwargs)
else:
return file(error_page, 'rb').read() % kwargs
except:
e = _format_exception(*_exc_info())[-1]
m = kwargs['message']
if m:
m += "<br />"
m += "In addition, the custom error page failed:\n<br />%s" % e
kwargs['message'] = m
return _HTTPErrorTemplate % kwargs | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _error(self, request, status, headers={}, prefix_template_path=False, **kwargs):\n\n return self._render(\n request = request,\n template = str(status),\n status = status,\n context = {\n 'error': kwargs\n },\n headers = headers,\n prefix_template_path = prefix_template_path\n )",
"def error_page(self, error_message: str, status: int = 400):\n self.set_status(status)\n self.render('error.jinja2', error=error_message)",
"def error(status_code, status_message=None,\n content_type='text/plain; charset=utf-8',\n headers=None, content=None):\n if status_message is None:\n status_message = httplib.responses.get(status_code, 'Unknown Error')\n\n if content is None:\n content = status_message\n\n content = util.pad_string(content)\n\n return static_page(content,\n status=(status_code, status_message),\n content_type=content_type,\n headers=headers)",
"def write_error(self, status_code, **kwargs):\n reason = \"Unknown Error\"\n\n # Get information about the triggered exception\n self.application.gs_globals[\"exception_fulltext\"] = repr(sys.exc_info())\n\n # Get the status code and error reason\n if status_code in list(ERROR_CODES):\n reason = ERROR_CODES[status_code]\n try:\n if \"exc_info\" in kwargs:\n _, error, _ = kwargs[\"exc_info\"]\n reason = error.reason\n except AttributeError:\n pass\n\n # Return JSON if this is an API call\n if \"/api/v1/\" in self.request.uri:\n jsondict = {\n \"page_title\": \"Error {}: {}\".format(status_code, reason),\n \"error_status\": status_code,\n \"error_reason\": reason,\n \"error_exception\": self.application.gs_globals[\"exception_fulltext\"],\n }\n self.set_header(\"Content-type\", \"application/json\")\n self.write(json.dumps(jsondict))\n\n # Render the error template\n else:\n t = self.application.loader.load(\"error_page.html\")\n self.write(\n t.generate(\n gs_globals=self.application.gs_globals,\n status=status_code,\n reason=reason,\n user=self.get_current_user(),\n )\n )",
"def error_page(e):\n \n return render_template('error-page.html'), 404",
"def error(status, msg, *args):\n logger.error(msg.format(*args))\n return web.Response(status=status, text=msg.format(*args))",
"def error(message, code=400):\n return render_template(\"error.html\", top=code, bottom=message)",
"def error_view_handler(request, exception, status):\n if status not in [400, 403, 404, 500]:\n status = 500\n\n return render(\n request,\n template_name=\"richie/error.html\",\n status=status,\n context={\n \"error\": exception,\n \"status\": status,\n \"title\": CONTEXT_ERRORS[status][\"title\"],\n \"content\": CONTEXT_ERRORS[status][\"content\"],\n },\n )",
"def page_error(e):\n\n return render_template('404.html')",
"def make_error( title, *args, **kwargs ):\n blocks = list()\n blocks.append( '<h1>{}</h1>'.format( title ) )\n if args:\n blocks.append( '<h4>{}</h4>'.format( args[ 0 ] ) )\n for arg in args[ 1 : ]:\n blocks.append( '<p>{}</p>'.format( arg ) )\n if kwargs:\n dl = list()\n for key, value in kwargs.items():\n dl.append( '<dt>{}</dt><dd>{}</dd>'.format( key, value ) )\n blocks.append( '<dl>\\n{}\\n</dl>'.format( '\\n'.join( dl ) ) )\n return _html.format(\n title = title,\n head = '',\n body = '\\n'.join( blocks )\n )",
"def key_error_page(e):\n return render_template(\"index.html\", error=e), 500",
"def error():\n return render_template(\"error.html\", **locals())",
"def bad_request(e):\n return render_template(\"400.html\", page_title=400)",
"def error():\n return render_template(\"404.html\")",
"def write_error(self, status_code, **kwargs):\n\n self.set_header(\"content-type\", \"text/plain; charset=UTF-8\")\n if status_code == 400:\n self.write(\n f\"HTTP {status_code}: Could not service this request \"\n f\"because of invalid request parameters.\"\n )\n elif status_code == 401:\n self.write(\n f\"HTTP {status_code}: Could not service this request \"\n f\"because of invalid request authentication token or \"\n f\"violation of host restriction.\"\n )\n elif status_code == 429:\n self.set_header(\"Retry-After\", \"180\")\n self.write(\n f\"HTTP {status_code}: Could not service this request \"\n f\"because the set rate limit has been exceeded.\"\n )\n else:\n self.write(f\"HTTP {status_code}: Could not service this request.\")\n\n if not self._finished:\n self.finish()",
"def renderHTTP_exception(request, failure):",
"def error(errornum):\n return render_template('error.html', errornum=errornum)",
"def httperror( status_code=500, message=b'' ):",
"def render_error(self, template, *args, **kwargs):\n self._render(template, sys.stderr, *args, **kwargs)",
"def send_error(text, status='500 Internal Server Error'):\n print 'Status: %s' % status\n print 'Content-type: text/plain'\n print\n print text\n sys.exit(0)",
"def error_handler(result_code, resp):\n if result_code == 1:\n return render_template(\n \"error.html\", error=resp[\"error\"]\n )\n elif result_code == 2:\n return render_template(\n \"rate_exceed.html\", seconds=resp[\"retry_after\"]\n )\n elif result_code == 3:\n return render_template(\n \"not_found.html\"\n )\n elif result_code == 4:\n return render_template(\n \"service_unavailable.html\", seconds=resp[\"retry_after\"]\n )\n else:\n return render_template(\n \"error.html\", error=resp[\"error\"]\n )",
"def assertErrorPage(self, status, message=None, pattern=''):\n\n # This will never contain a traceback\n page = cherrypy._cperror.get_error_page(status, message=message)\n\n # First, test the response body without checking the traceback.\n # Stick a match-all group (.*) in to grab the traceback.\n def esc(text):\n return re.escape(ntob(text))\n epage = re.escape(page)\n epage = epage.replace(\n esc('<pre id=\"traceback\"></pre>'),\n esc('<pre id=\"traceback\">') + b'(.*)' + esc('</pre>'))\n m = re.match(epage, self.body, re.DOTALL)\n if not m:\n self._handlewebError(\n 'Error page does not match; expected:\\n' + page)\n return\n\n # Now test the pattern against the traceback\n if pattern is None:\n # Special-case None to mean that there should be *no* traceback.\n if m and m.group(1):\n self._handlewebError('Error page contains traceback')\n else:\n if (m is None) or (\n not re.search(ntob(re.escape(pattern), self.encoding),\n m.group(1))):\n msg = 'Error page does not contain %s in traceback'\n self._handlewebError(msg % repr(pattern))",
"def internal_error(e):\n return render_template(\"errors/500.html\"), 500",
"def use_error_page(func):\n @wraps(func)\n def wrapper(*args, **kwrds):\n try:\n return func(*args, **kwrds)\n except:\n try:\n etype, evalue, etrace = sys.exc_info()\n app_logger().error(\"ERROR HANDLER => %s: %s\\n%s\\n\", etype, evalue, etrace)\n errfmt = traceback.format_exception(etype, evalue, etrace)\n txtpre = \"Unexpected error:\"\n txtpost = '\\n'.join(errfmt) if current_app.debug else evalue\n return template(\"error.html\", errortext=txtpre+txtpost)\n except:\n app_logger().error(\"ERROR IN ERROR HANDLER - PUNTING - %s: %s\\n%s\\n\" % sys.exc_info())\n return abort(500) # Double Whoops!\n return wrapper",
"def page_not_found(error):\n return '<h1> 404 - Not Found</h1>', 404",
"def error(\n status=500,\n message=\"Internal Server Error\"\n):\n return make_response(\n jsonify(error=message),\n status,\n )",
"def write_error(self, status_code, **kwargs):\n self.finish(\"Error %d - %s\" % (status_code, kwargs['message']))",
"def server_error(e):\n return render_template('500.html'), 500",
"def server_error(request):\n response = render(request, '500.html')\n response.status_code = 500\n\n return response",
"def error(self, request):\n if self.debug:\n import cgitb\n request.stdout.write('Content-Type: text/html\\r\\n\\r\\n' +\n cgitb.html(sys.exc_info()))\n else:\n errorpage = \"\"\"<!DOCTYPE HTML PUBLIC \"-//IETF//DTD HTML 2.0//EN\">\n<html><head>\n<title>Unhandled Exception</title>\n</head><body>\n<h1>Unhandled Exception</h1>\n<p>An unhandled exception was thrown by the application.</p>\n</body></html>\n\"\"\"\n request.stdout.write('Content-Type: text/html\\r\\n\\r\\n' +\n errorpage)"
]
| [
"0.7486714",
"0.71014893",
"0.70575845",
"0.6884129",
"0.68273413",
"0.66129935",
"0.6582311",
"0.6527852",
"0.63832134",
"0.6349418",
"0.62882286",
"0.6281123",
"0.6267836",
"0.62180173",
"0.6196485",
"0.61800444",
"0.6087222",
"0.6086159",
"0.6083011",
"0.60501426",
"0.6045828",
"0.6040284",
"0.60335535",
"0.6016566",
"0.5998782",
"0.59764796",
"0.59529024",
"0.5946424",
"0.59449893",
"0.59401345"
]
| 0.75789016 | 0 |
Return exc (or sys.exc_info if None), formatted. | def format_exc(exc=None):
if exc is None:
exc = _exc_info()
if exc == (None, None, None):
return ""
import traceback
return "".join(traceback.format_exception(*exc)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def format_exc():\n from traceback import format_exc\n return format_exc().decode('utf-8', 'surrogateescape')",
"def _FormatException(exc):\n return ''.join(traceback.format_exception_only(type(exc), exc))",
"def exc_info(): # real signature unknown; restored from __doc__\n pass",
"def exc_message(exc_info):\n exc = exc_info[1]\n if exc is None:\n # str exception\n result = exc_info[0]\n else:\n try:\n result = str(exc)\n except UnicodeEncodeError:\n try:\n result = unicode(exc) # flake8: noqa\n except UnicodeError:\n # Fallback to args as neither str nor\n # unicode(Exception(u'\\xe6')) work in Python < 2.6\n result = exc.args[0]\n return result",
"def format_exception_only(exc):\r\n exc_type = type(exc)\r\n\r\n stype = exc_type.__qualname__\r\n smod = exc_type.__module__\r\n if smod not in (\"__main__\", \"builtins\"):\r\n stype = smod + '.' + stype\r\n try:\r\n _str = str(exc)\r\n except:\r\n _str = \"<unprintable {} object>\".format(exc_type.__name__)\r\n\r\n if _str == \"None\" or not _str:\r\n line = \"{}\\n\".format(stype)\r\n else:\r\n line = \"{}: {}\\n\".format(stype, _str)\r\n return line",
"def formatException(self, exc_info):\n keys = [\"type\", \"value\", \"frame\", \"filename\", \"lineno\", \"function\", \"text\"]\n type_, value, trcbk = exc_info\n rows = []\n\n for pos, frame in enumerate(traceback.extract_tb(trcbk)):\n values = [\n type_.__name__,\n value,\n pos,\n frame.filename,\n frame.lineno,\n frame.name,\n frame.line,\n ]\n rows.append(dict(zip(keys, values)))\n\n return str(CustomEncoder().encode(rows))",
"def _get_traceback(self, exc_info=None):\n import traceback\n import sys\n return '\\n'.join(traceback.format_exception(*(exc_info or sys.exc_info())))",
"def formatException(self, exc_info):\n traces = traceback.format_exception(*exc_info)\n return \"\\n\".join(traces)",
"def format_stack_trace(exc_info):\n if exc_info[0] is None:\n return ''\n lines = traceback.format_exception(*exc_info)\n return ''.join(line for line in lines)",
"def exc_info_to_str(exc_info):\r\n return ''.join(traceback.format_exception(*exc_info))",
"def _get_traceback(self, exc_info):\n import traceback\n return '<br/>'.join(traceback.format_exception(*(exc_info or sys.exc_info())))",
"def exc_info(self):\n return self._exc_info",
"def __exc_info(self):\n exctype, excvalue, tb = sys.exc_info()\n if sys.platform[:4] == 'java': ## tracebacks look different in Jython\n return (exctype, excvalue, tb)\n return (exctype, excvalue, tb)",
"def get_exception():\n trace = ''\n exception = ''\n exc_list = traceback.format_exception_only(sys.exc_info()[0],\n sys.exc_info()[1])\n for entry in exc_list:\n exception += entry\n tb_list = traceback.format_tb(sys.exc_info()[2])\n for entry in tb_list:\n trace += entry\n return '%s\\n%s' % (exception, trace)",
"def exc_message(exc_info):\n exc = exc_info[1]\n if exc is None:\n # str exception\n result = exc_info[0]\n else:\n try:\n result = str(exc)\n except UnicodeEncodeError:\n try:\n result = str(exc)\n except UnicodeError:\n # Fallback to args as neither str nor\n # unicode(Exception(u'\\xe6')) work in Python < 2.6\n result = exc.args[0]\n return xml_safe(result)",
"def formatException(self, exc_info):\n type_, value, trcbk = exc_info\n\n for pos, frame in enumerate(traceback.extract_tb(trcbk)):\n row = [\n type_.__name__,\n value,\n pos,\n frame.filename,\n frame.lineno,\n frame.name,\n frame.line,\n ]\n self.writer.writerow(row)\n\n data = self.output.getvalue()\n self.output.truncate(0)\n self.output.seek(0)\n return data.strip()",
"def format_debug(e):\n _, _, tb = sys.exc_info()\n return '1: {doc} \\n2: {exec_info} \\n3: {exec_0} \\n 4: {exec_1} \\n5: {lineno} \\n6: {stack}'.format(\n doc=e.__doc__,\n exec_info=sys.exc_info(),\n exec_0=sys.exc_info()[0],\n exec_1=sys.exc_info()[1],\n lineno=traceback.tb_lineno(sys.exc_info()[2]),\n stack=traceback.print_tb(tb))",
"def formatException(self, exc_info):\n result = super(OneLineExceptionFormatter, self).formatException(exc_info)\n return repr(result) # or format into one line however you want to",
"def format_exc(etype, evalue, etb, context=5, tb_offset=0):\r\n # some locals\r\n try:\r\n etype = etype.__name__\r\n except AttributeError:\r\n pass\r\n\r\n # Header with the exception type, python version, and date\r\n pyver = 'Python ' + sys.version.split()[0] + ': ' + sys.executable\r\n date = time.ctime(time.time())\r\n pid = 'PID: %i' % os.getpid()\r\n\r\n head = '%s%s%s\\n%s%s%s' % (etype, ' ' * (75 - len(str(etype)) - len(date)),\r\n date, pid, ' ' * (75 - len(str(pid)) - len(pyver)),\r\n pyver)\r\n\r\n # Flush cache before calling inspect. This helps alleviate some of the\r\n # problems with python 2.3's inspect.py.\r\n linecache.checkcache()\r\n # Drop topmost frames if requested\r\n try:\r\n records = _fixed_getframes(etb, context, tb_offset)\r\n except:\r\n raise\r\n print('\\nUnfortunately, your original traceback can not be '\r\n 'constructed.\\n')\r\n return ''\r\n\r\n # Get (safely) a string form of the exception info\r\n try:\r\n etype_str, evalue_str = map(str, (etype, evalue))\r\n except:\r\n # User exception is improperly defined.\r\n etype, evalue = str, sys.exc_info()[:2]\r\n etype_str, evalue_str = map(str, (etype, evalue))\r\n # ... and format it\r\n exception = ['%s: %s' % (etype_str, evalue_str)]\r\n frames = format_records(records)\r\n return '%s\\n%s\\n%s' % (head, '\\n'.join(frames), ''.join(exception[0]))",
"def exc_info(self):\n ei = self._exc_info\n if ei is not None and ei[0] is not None:\n return (\n ei[0],\n ei[1],\n # The pickled traceback may be None if we couldn't pickle it.\n load_traceback(ei[2]) if ei[2] else None\n )",
"def get_traceback_stxt():\n #/\n exc_cls, exc_obj, tb_obj = sys.exc_info()\n\n #/\n txt_s = traceback.format_exception(exc_cls, exc_obj, tb_obj)\n\n #/\n res = ''.join(txt_s)\n\n return res",
"def last_exception():\n exc_type, exc_value, exc_traceback = sys.exc_info()\n return ''.join(traceback.format_exception(exc_type, exc_value,\n exc_traceback))",
"def create_log(self, exc):\n return self.formatter.formatException(exc)",
"def exception(self):\n return self._exc_info[1] if self._exc_info is not None else None",
"def exception(self):\n exc_type, exc_value, exc_tb = sys.exc_info()\n cui.message(traceback.format_exception_only(exc_type, exc_value)[-1],\n log_message=traceback.format_exc())",
"def fancy_traceback(exc: Exception) -> str:\n text = \"\".join(traceback.format_exception(type(exc), exc, exc.__traceback__))\n return f\"```py\\n{text[-4086:]}\\n```\"",
"def tidy_error(ex=None) -> str:\r\n from sys import exc_info\r\n from os.path import join, abspath, dirname\r\n from traceback import extract_tb, format_list, format_exception_only\r\n\r\n show = join(dirname(abspath(__file__)), '')\r\n\r\n def _check_file(name):\r\n return name and name.startswith(show)\r\n\r\n def _print(typ, value, tb): # If not debug, generator expression: filter trace to my files.\r\n show = extract_tb(tb) if DEBUG else (fs for fs in extract_tb(tb, limit=3) if _check_file(fs.filename))\r\n fmt = format_list(show) + format_exception_only(typ, value)\r\n return ''.join((f.strip('\"\\'').replace('\\\\n', '') for f in fmt))\r\n\r\n args = ex or exc_info()\r\n return _print(*args)",
"def exc_log_str(exception) -> str:\n return \"{}: {!s}\".format(type(exception).__name__, exception)",
"def tb():\n etype, value, tb = sys.exc_info()\n return \"%s: %s (%s@%s:%d)\" % (etype.__name__, value, tb.tb_frame.f_code.co_name, os.path.basename(tb.tb_frame.f_code.co_filename), tb.tb_lineno)",
"def sys_exc_info(self, for_hidden=False):\n return self.gettopframe()._exc_info_unroll(self.space, for_hidden)"
]
| [
"0.74286795",
"0.7414739",
"0.7375478",
"0.7347589",
"0.7310034",
"0.72899365",
"0.7289055",
"0.72446114",
"0.7233393",
"0.7170941",
"0.71380997",
"0.70984167",
"0.70755666",
"0.70068014",
"0.70056164",
"0.698292",
"0.6949396",
"0.69382375",
"0.688788",
"0.6868626",
"0.6820991",
"0.67896897",
"0.6744583",
"0.6721996",
"0.6675649",
"0.6670401",
"0.66619664",
"0.6613094",
"0.6574832",
"0.6571165"
]
| 0.8027619 | 0 |
Produce status, headers, body for a critical error. Returns a triple without calling any other questionable functions, so it should be as errorfree as possible. Call it from an HTTP server if you get errors outside of the request. If extrabody is None, a friendly but rather unhelpful error message is set in the body. If extrabody is a string, it will be appended asis to the body. | def bare_error(extrabody=None):
# The whole point of this function is to be a last line-of-defense
# in handling errors. That is, it must not raise any errors itself;
# it cannot be allowed to fail. Therefore, don't add to it!
# In particular, don't call any other CP functions.
body = "Unrecoverable error in the server."
if extrabody is not None:
body += "\n" + extrabody
return ("500 Internal Server Error",
[('Content-Type', 'text/plain'),
('Content-Length', str(len(body)))],
[body]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __call__(self, req):\n\n user_locale = req.best_match_language()\n # Replace the body with fault details.\n code = self.wrapped_exc.status_int\n fault_name = self._fault_names.get(code, \"computeFault\")\n explanation = self.wrapped_exc.explanation\n LOG.debug(_(\"Returning %(code)s to user: %(explanation)s\"),\n {'code': code, 'explanation': explanation})\n\n explanation = i18n.translate(explanation, user_locale)\n fault_data = {\n fault_name: {\n 'code': code,\n 'message': explanation}}\n if code == 413 or code == 429:\n retry = self.wrapped_exc.headers.get('Retry-After', None)\n if retry:\n fault_data[fault_name]['retryAfter'] = retry\n\n # 'code' is an attribute on the fault tag itself\n metadata = {'attributes': {fault_name: 'code'}}\n\n content_type = req.best_match_content_type()\n serializer = {\n 'application/json': JSONDictSerializer(),\n }[content_type]\n\n self.wrapped_exc.body = serializer.serialize(fault_data)\n self.wrapped_exc.content_type = content_type\n\n return self.wrapped_exc",
"def httperror( status_code=500, message=b'' ):",
"def error(logger_id, data):\n if data.status_code == 200:\n log.error(logger_id, _(\"CC1 - Problem with request: \") + data.url\n + _(\" obtain problem: \") + ast.literal_eval(data.text).get(DATA))\n else:\n log.error(logger_id, _(\"CC1 - Problem with request: \") + data.url)",
"def __call__(self, req):\n # Replace the body with fault details.\n code = self.wrapped_exc.status_int\n fault_name = self._fault_names.get(code, \"openwleeFault\")\n fault_data = {\n fault_name: {\n 'code': code,\n 'message': self.wrapped_exc.explanation}}\n if code == 413:\n retry = self.wrapped_exc.headers.get('Retry-After', None)\n if retry:\n fault_data[fault_name]['retryAfter'] = retry\n\n # 'code' is an attribute on the fault tag itself\n metadata = {'attributes': {fault_name: 'code'}}\n\n xml_serializer = wsgi.XMLDictSerializer(metadata)\n\n content_type = req.best_match_content_type()\n serializer = {\n 'application/xml': xml_serializer,\n 'application/json': wsgi.JSONDictSerializer(),\n }[content_type]\n\n self.wrapped_exc.body = serializer.serialize(fault_data)\n self.wrapped_exc.content_type = content_type\n _set_request_id_header(req, self.wrapped_exc.headers)\n\n return self.wrapped_exc",
"def http_error_handler(ex, req, resp, params):\n resp.body = encode.encode({\n 'status': 1,\n 'msg': 'HTTP error: ' + ex.status\n })",
"def renderHTTP_exception(request, failure):",
"def throw_error(self, error, status_code=400, **extra):\n data = dict(success=False, data=dict(message=error, **extra))\n raise ShortCircuitHttpChain(response=JsonResponse(data, status=status_code))",
"def read_tapis_http_error(http_error_object):\n h = http_error_object\n # extract HTTP response code\n code = -1\n try:\n code = h.response.status_code\n assert isinstance(code, int)\n except Exception:\n # we have no idea what happened\n code = 418\n\n # extract HTTP reason\n reason = 'UNKNOWN ERROR'\n try:\n reason = h.response.reason\n except Exception:\n pass\n\n # Tapis APIs will give JSON responses if the target web service is at all\n # capable of fulfilling the request. Therefore, try first to extract fields\n # from the JSON response, then fall back to returning the plain text from\n # the response.\n err_msg = 'Unexpected encountered by the web service'\n status_msg = 'error'\n version_msg = 'unknown'\n try:\n j = h.response.json()\n if 'message' in j:\n err_msg = j['message']\n if 'status' in j:\n status_msg = j['status']\n if 'version' in j:\n version_msg = j['version']\n except Exception:\n err_msg = h.response.text\n\n httperror = '[{}] {}; message: {}; status: {}; version: {}; response.content: {}'\n return httperror.format(code, reason, err_msg, status_msg, version_msg,\n h.response.content)",
"def handle_error(self, p_ctx, others, error, start_response):\n\n if p_ctx.transport.resp_code is None:\n p_ctx.transport.resp_code = \\\n p_ctx.out_protocol.fault_to_http_response_code(error)\n\n self.get_out_string(p_ctx)\n p_ctx.out_string = [b''.join(p_ctx.out_string)]\n\n p_ctx.transport.resp_headers['Content-Length'] = \\\n str(len(p_ctx.out_string[0]))\n self.event_manager.fire_event('wsgi_exception', p_ctx)\n\n start_response(p_ctx.transport.resp_code,\n _gen_http_headers(p_ctx.transport.resp_headers))\n\n try:\n process_contexts(self, others, p_ctx, error=error)\n except Exception as e:\n # Report but ignore any exceptions from auxiliary methods.\n logger.exception(e)\n\n return itertools.chain(p_ctx.out_string, self.__finalize(p_ctx))",
"def error_body(self):\n return self._status.error_body",
"def raise_http(httpError=HTTPInternalServerError, # type: HTTPError\n httpKWArgs=None, # type: Optional[ParamsType]\n detail=\"\", # type: Str\n content=None, # type: Optional[JSON]\n contentType=CONTENT_TYPE_JSON, # type: Str\n nothrow=False # type: bool\n ): # type: (...) -> Optional[HTTPException]\n\n # fail-fast if recursion generates too many calls\n # this would happen only if a major programming error occurred within this function\n global RAISE_RECURSIVE_SAFEGUARD_MAX\n global RAISE_RECURSIVE_SAFEGUARD_COUNT\n RAISE_RECURSIVE_SAFEGUARD_COUNT = RAISE_RECURSIVE_SAFEGUARD_COUNT + 1\n if RAISE_RECURSIVE_SAFEGUARD_COUNT > RAISE_RECURSIVE_SAFEGUARD_MAX:\n raise HTTPInternalServerError(detail=\"Terminated. Too many recursions of `raise_http`\")\n\n # try dumping content with json format, `HTTPInternalServerError` with caller info if fails.\n # content is added manually to avoid auto-format and suppression of fields by `HTTPException`\n contentType = CONTENT_TYPE_JSON if contentType == CONTENT_TYPE_ANY else contentType\n httpCode, detail, content = validate_params(httpError, HTTPError, detail, content, contentType)\n json_body = format_content_json_str(httpError.code, detail, content, contentType)\n resp = generate_response_http_format(httpError, httpKWArgs, json_body, outputType=contentType)\n\n # reset counter for future calls (don't accumulate for different requests)\n # following raise is the last in the chain since it wasn't triggered by other functions\n RAISE_RECURSIVE_SAFEGUARD_COUNT = 0\n if nothrow:\n return resp\n raise resp",
"def error_wrapper(error, errorClass):\n http_status = 0\n if error.check(TwistedWebError):\n xml_payload = error.value.response\n if error.value.status:\n http_status = int(error.value.status)\n else:\n error.raiseException()\n if http_status >= 400:\n if not xml_payload:\n error.raiseException()\n try:\n fallback_error = errorClass(\n xml_payload, error.value.status, str(error.value),\n error.value.response)\n except (ParseError, AWSResponseParseError):\n error_message = http.RESPONSES.get(http_status)\n fallback_error = TwistedWebError(\n http_status, error_message, error.value.response)\n raise fallback_error\n elif 200 <= http_status < 300:\n return str(error.value)\n else:\n error.raiseException()",
"def error(status, msg, *args):\n logger.error(msg.format(*args))\n return web.Response(status=status, text=msg.format(*args))",
"def _processGETErr(self, e, request):\r\n if e.check(InvalidRequest):\r\n msg = e.getErrorMessage()\r\n code = httplib.BAD_REQUEST\r\n elif e.check(UnauthorizedLogin):\r\n msg = e.getErrorMessage()\r\n code = httplib.UNAUTHORIZED\r\n elif e.check(InternalError):\r\n e.printTraceback()\r\n msg = 'Internal Error'\r\n code = httplib.INTERNAL_SERVER_ERROR\r\n else:\r\n e.printTraceback()\r\n msg = 'Fatal Error'\r\n code = httplib.INTERNAL_SERVER_ERROR\r\n\r\n self._render_GET(request, code, 'text/plain; charset=utf-8', msg)",
"def format_exception(self):\n if isinstance(self.message, dict):\n return self.message, self.status_code\n return Request.format_exception(self.message, self.status_code)",
"def finish_failed_request(self):\n # build new response to be safe\n request = get_request()\n original_response = request.response\n request.response = HTTPResponse()\n #self.log(\"caught an error (%s), reporting it.\" %\n # sys.exc_info()[1])\n\n (exc_type, exc_value, tb) = sys.exc_info()\n error_summary = traceback.format_exception_only(exc_type, exc_value)\n error_summary = error_summary[0][0:-1] # de-listify and strip newline\n\n plain_error_msg = self._generate_plaintext_error(request,\n original_response,\n exc_type, exc_value,\n tb)\n\n if not self.config.display_exceptions:\n # DISPLAY_EXCEPTIONS is false, so return the most\n # secure (and cryptic) page.\n request.response.set_header(\"Content-Type\", \"text/html\")\n user_error_msg = self._generate_internal_error(request)\n elif self.config.display_exceptions == 'html':\n # Generate a spiffy HTML display using cgitb\n request.response.set_header(\"Content-Type\", \"text/html\")\n user_error_msg = self._generate_cgitb_error(request,\n original_response,\n exc_type, exc_value,\n tb)\n else:\n # Generate a plaintext page containing the traceback\n request.response.set_header(\"Content-Type\", \"text/plain\")\n user_error_msg = plain_error_msg\n\n self.logger.log_internal_error(error_summary, plain_error_msg)\n request.response.set_status(500)\n self.session_manager.finish_failed_request()\n return user_error_msg",
"def exceptions(e):\n # NOTE: add log entry\n str(getattr(e, \"code\", \"unavailable\"))\n log_error_code = str(getattr(e, \"code\", \"unavailable\"))\n service_log.error(\n f\"{request.remote_addr} {request.method} {request.scheme} {request.full_path}\\n\"\n f\"Error code: {log_error_code}\\n\"\n f\"Stack trace: {traceback.format_exc()}\"\n )\n\n # NOTE: craft user messages\n if hasattr(e, \"code\"):\n code = int(e.code)\n\n # NOTE: return an http error for methods with no body allowed. This prevents undesired exceptions.\n NO_PAYLOAD_METHODS = \"HEAD\"\n if request.method in NO_PAYLOAD_METHODS:\n return Response(status=code)\n\n error: ServiceError\n if code == 400:\n error = ProgramHttpRequestError(e)\n elif code == 404:\n error = ProgramHttpMissingError(e)\n elif code == 405:\n error = ProgramHttpMethodError(e)\n elif code == 408:\n error = ProgramHttpTimeoutError(e)\n else:\n error = ProgramHttpServerError(e, code)\n\n return error_response(error)\n\n # NOTE: Werkzeug exceptions should be covered above, the following line is for\n # unexpected HTTP server errors.\n return error_response(e)",
"def my_log_traceback(severity=logging.CRITICAL):\n from cherrypy import _cperror\n h = [\" %s: %s\" % (k, v) for k, v in cherrypy.request.header_list]\n cherrypy.log('\\nRequest Headers:\\n' + '\\n'.join(h) + '\\n\\n' + _cperror.format_exc(), \"HTTP\", severity=severity)",
"def send_rpc_error(req, rpcreq, e):",
"def error_handler(source, prod, HEADERS):\n\n try:\n req = requests.get(source, params=prod, headers=HEADERS)\n except Timeout as e:\n print(\"\\nThe website took too long to respond. Please try after sometime.\\n\")\n sys.exit(1)\n except ConnectionError as e:\n print(\"\\nYou do not have a descent internet connection. Please check your Internet Connection and try again later.\\n\")\n sys.exit(1)\n except TooManyRedirects as e:\n print(\"\\nYour request exceeded the configured number of maximum redirections. Please try after sometime.\\n\")\n sys.exit(1)\n except Exception as e:\n print(\"\\nRequest souldn't be completed. Please try after sometime.\\n\")\n sys.exit(1)\n\n return req",
"def _error(error_msg, status_code):\n return {\n 'statusCode': status_code,\n 'body': error_msg}",
"def _raise_http_error(self, *args, **kwargs):",
"def bad_request(error):\n app.logger.warning(str(error))\n return (\n jsonify(\n status=status.HTTP_400_BAD_REQUEST, error=\"Bad Request\", message=str(error)\n ),\n status.HTTP_400_BAD_REQUEST,\n )",
"def _error(self, request, status, headers={}, prefix_template_path=False, **kwargs):\n\n return self._render(\n request = request,\n template = str(status),\n status = status,\n context = {\n 'error': kwargs\n },\n headers = headers,\n prefix_template_path = prefix_template_path\n )",
"def bad_request(error):\n message = str(error)\n app.logger.warning(message)\n return (\n jsonify(\n status=status.HTTP_400_BAD_REQUEST, error=\"Bad Request\", message=message\n ),\n status.HTTP_400_BAD_REQUEST,\n )",
"def error(status, message):\n\n headers = {\"Content-Type\":\"text/plain\"}\n\n current.log.error(message)\n raise HTTP(status, body=message, web2py_error=message, **headers)",
"def bad_request():\n return HttpError(400)",
"def make_error(err='DEFAULT', language='english'):\n json_data, code = construct_err(err_name=err, err_language=language)\n return make_response(json_data, code)",
"def post_traceback(self, req):\n debug_info = req.debug_info\n long_xml_er = formatter.format_xml(debug_info.exc_data, \n show_hidden_frames=True, show_extra_data=False, \n libraries=self.libraries)[0]\n host = req.GET['host']\n headers = req.headers\n conn = httplib.HTTPConnection(host)\n headers = {'Content-Length':len(long_xml_er), \n 'Content-Type':'application/xml'}\n conn.request(\"POST\", req.GET['path'], long_xml_er, headers=headers)\n resp = conn.getresponse()\n res = Response()\n for header, value in resp.getheaders():\n if header.lower() in ['server', 'date']: continue\n res.headers[header] = value\n res.body = resp.read()\n return res",
"def handle_error(context, inference_exception, trace):\n context.set_response_status(\n code=inference_exception.status_code,\n phrase=utils.remove_crlf(inference_exception.phrase),\n )\n return [\"{}\\n{}\".format(inference_exception.message, trace)]"
]
| [
"0.5321928",
"0.52629745",
"0.5132919",
"0.5015631",
"0.49685788",
"0.4951957",
"0.49515012",
"0.48696885",
"0.48568124",
"0.48105162",
"0.48014742",
"0.47840595",
"0.47553754",
"0.4739833",
"0.47328785",
"0.47322333",
"0.4730517",
"0.47163033",
"0.47095564",
"0.46958682",
"0.46896997",
"0.46822867",
"0.46436918",
"0.46262556",
"0.46248147",
"0.46156767",
"0.46152607",
"0.4607613",
"0.4601326",
"0.45980254"
]
| 0.6682402 | 0 |
Called when an interest for the specified name is recieved | def onInterest(self, prefix, interest, transport, registeredPrefixId):
interestName = interest.getName()
data = Data(interestName)
data.setContent("Hello, " + interestName.toUri())
hourMilliseconds = 3600 * 1000
data.getMetaInfo().setFreshnessPeriod(hourMilliseconds)
self.keyChain.sign(data, self.keyChain.getDefaultCertificateName())
transport.send(data.wireEncode().toBuffer())
dump("Replied to:", interestName.toUri()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def on_notify(self, name):\r\n pass",
"def on_callback_myname(self, pubsub, topic, value):\n raise NotImplementedError()",
"def enter_name(self, name):\n self.name = name",
"def get_next_single_name_classic(self, arg: str):\n current_name = arg\n self.sent_interests[str(current_name)] = True\n self.queue_to_lower.put((self.packetid, Interest(current_name)))\n result = self.get_content(current_name)\n return result",
"def _on_interest(self, int_name, int_param, _app_param):\n if int_param.must_be_fresh:\n return\n data_bytes = self.storage.get_data_packet(int_name, int_param.can_be_prefix)\n if data_bytes == None:\n return\n self.app.put_raw_packet(data_bytes)\n logging.info(f'Read handle: serve data {Name.to_str(int_name)}')",
"def on_namreply(self, raw_msg, nicknames, **kwargs):\n pass",
"def on_action_myname(self, value):\n raise NotImplementedError()",
"def callback(name):\n f = \"on%s\" % \"\".join(x.capitalize() for x in name.split())\n return lambda: getattr(self, f)()",
"def notify(self, subject, name, value):\n if isinstance(subject, BasePart):\n if name == 'partname':\n self.__resequence()",
"async def _instrument(self, msg: dict, timestamp: float):\n for data in msg['data']:\n if 'openInterest' in data:\n ts = self.timestamp_normalize(data['timestamp'])\n await self.callback(OPEN_INTEREST, feed=self.id,\n symbol=self.exchange_symbol_to_std_symbol(data['symbol']),\n open_interest=data['openInterest'],\n timestamp=ts,\n receipt_timestamp=timestamp)",
"def acceptQuestion(self, i, name):\n\t\tprint('question is', i)\n\t\tself.server.accQuestion.emit(i, name)",
"def received_NAME(self, message=None):\n\n\t\tself.player_client.send_message(self.player_model.player.name)",
"def _name_changed ( self, name ):\n self.name_last = parse_name( name )[-1]\n self.inputs_changed()",
"def pNameChanged(self):\n\t\t \n\t\tpn_widget = self.ui.findChild(QWidget, \"p_name\")\n\t\tpatient_name = pn_widget.toPlainText()\n\t\tprint patient_name\n\t\t\n\t\t# Make a database query to check if the current name exists\n\t\t# note: query with \"like\" so that similar names can be suggested\n\t\t\n\t\t# if patient can be found, updating following things:\n\t\t# - SSN field next to patient name\n\t\t# - name, age, etc.\n\t\t# - clearing nerve_info field (sinister&dexter) to correspond summary\n\t\t#\to set CCombobox to \"Summary\"\n\t\tdb_query = True\n\t\tif db_query:\n\t\t\t# Patient with the given name has been found, setting patient data to summary view \n\t\t\tnerve_combo_box = self.ui.findChild(QWidget, \"nerve_box\")\t\t\n\t\t\tnerve_combo_box.setCurrentIndex(0)\n\t\t\tself.nerveChanged()",
"def on(self, event_name, callback):\n self.factory.on(event_name, callback)",
"def observer(oname='unknown'):\n camera.status.observer = oname.strip()\n logger.info('Observer name: '+oname)",
"def on_take(self):\n print(\"You have picked up\", self.name)",
"def Notify(self):\r\n\r\n self._owner.OnRenameTimer()",
"def __on_alarm(\n self, event_name: str, data: dict, kwargs: dict) -> None:\n self.run_in(self.__delayed_announcement, 40)",
"def handle_assistant_name(self, hermes, intent_message):\n name = self.assistant['name']\n\n result_sentence = i18n.RESULT_ASSISTANT_NAME.format(name)\n hermes.publish_end_session(intent_message.session_id, result_sentence)",
"def addInfo(self, name, information):\r\n gamethread.delayed(0, gamethread.delayed, (0, self.setSkillInfo, (name, information))) # delay by 2 ticks to allow skills to register\r\n header = \"\\n%s\\n%s\\n\\n\" % ('*' * 50, name.center(50))\r\n footer = \"\\n%s\" % (\"*\" * 50)\r\n information = information.strip() # strip whitespace at begggining and end of lines\r\n information = (header + information + footer).replace('\\n', '\\n// ')\r\n self.text(information, False)",
"def open_interest(self, open_interest):\n\n self._open_interest = open_interest",
"def notify(self, data):\n\n if 'personId' in data.keys():\n person_id = data['personId']\n if data['type'] == EventTimeLine.PERSON_CREATION:\n self._registry[person_id] = {\n 'name': data['name'],\n 'address': data['address'],\n 'status': data['status'],\n 'version': 1\n }\n\n if data['type'] == EventTimeLine.PERSON_STATUS_CHANGE:\n p = self._registry[person_id]\n p['status'] = data['newStatus']\n p['version'] += 1\n\n if data['type'] == EventTimeLine.PERSON_MOVE:\n p = self._registry[person_id]\n p['address'] = data['newAddress']\n p['version'] += 1",
"def GetCustomInterest(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def OnAccept(self, event):\n pass",
"def person_name(self, person_name):\n\n self._person_name = person_name",
"def person_name(self, person_name):\n\n self._person_name = person_name",
"def on_nick(self, raw_msg, source, old_nickname, new_nickname, **kwargs):",
"def OnFacename ( self, facename ):\n self._facenames.append( facename )\n return True",
"def add_name(self, name):\n self.name = name"
]
| [
"0.60658574",
"0.5551281",
"0.5527813",
"0.5408764",
"0.53355795",
"0.5331701",
"0.5316069",
"0.5271348",
"0.52610093",
"0.5228865",
"0.52115935",
"0.5185267",
"0.5160243",
"0.51520616",
"0.51286995",
"0.5124869",
"0.51123345",
"0.50867796",
"0.50811255",
"0.5079059",
"0.50762236",
"0.50678885",
"0.4985352",
"0.49835804",
"0.49801612",
"0.4977615",
"0.4977615",
"0.4967477",
"0.49656907",
"0.49550954"
]
| 0.6637799 | 0 |
Called when forwarder can't register prefix | def onRegisterFailed(self, prefix):
dump("Register failed for prefix", prefix.toUri())
self.isDone = True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def route_rejected(self, prefix, next_hop, as_path):",
"def test_prefix():\n\n dispatcher = ntelebot.dispatch.Dispatcher()\n dispatcher.add_prefix('prefix', lambda ctx: 'PREFIX')\n ctx = MockContext()\n ctx.type = 'message'\n assert dispatcher(ctx) is False\n ctx.prefix = 'prefix'\n assert dispatcher(ctx) == 'PREFIX'",
"def route_accepted(self, prefix, next_hop, as_path):",
"def test_ipam_prefixes_update(self):\n pass",
"def route_removed(self, prefix, next_hop, as_path):",
"def test_ipam_prefixes_available_prefixes_create(self):\n pass",
"def test_routing_ip_prefix_uninstall(self):\n self._common_uninstall_external_and_unintialized(\n 'some_id', routing_ip_prefix.delete,\n {'prefix': {}}\n )",
"def test_ipam_prefixes_create(self):\n pass",
"def prefix(self, prefix, *args):\n new_prefix = '%s%s' % (self.prefixes[-1], prefix % args)\n self.prefixes.append(new_prefix)\n try:\n yield\n finally:\n assert self.prefixes.pop() == new_prefix",
"def add_prefix(self, prefix, iri):\n existing = self._prefixes.get(prefix)\n if existing:\n if existing != iri:\n raise mio.MIOException('The prefix \"%s\" is already asigned to \"%s\"' % (prefix, existing))\n return\n self._prefixes[prefix] = iri",
"def post_lookup_hook(self):\n pass",
"def test_prefix(self):\n self.chck_triple('prefix')",
"def register_router(self, hostname, expire=-1):",
"def on_init(self, prefix='space', **kwargs):\n assert prefix not in (None, '')\n self.prefix = prefix",
"def empty_prefix(self):\r\n raise NotImplementedError()",
"def test_ipam_prefixes_partial_update(self):\n pass",
"def _registerOnServer(self, daemon, nameserver,vclock):\n uri = daemon.register(self)\n nameserver.register(self._name, uri)\n self.updateVectorClock(vclock)\n print(\"Gateway registered. Name {} and uri {} \".format(self._name,uri))",
"def set_prefix(prefix):\n PLUGINS.set_prefix(prefix)",
"def _register(self, comm, handler):",
"def on_register(cls):",
"def forward_pass(self):",
"def onRegister(setup_state):\n\tblueprint = setup_state.blueprint\n\t#if setup_state.options.get('auth') == True:\n\tif setup_state.url_prefix.startswith('/ext/'): #not really used right now\n\t\t#inside here, 'route' works but not 'before_request'\n\t\t#maybe use to register authentication-specific routes?\n\t\tprint(\"Authenticated API on {}\".format(setup_state.url_prefix))",
"def onRegisterNetworkNode(self):\n pass",
"def stillLookingForPrefix(self, prefix):\n return prefix in self._prefixToIdentifiers",
"def test_ipam_prefixes_delete(self):\n pass",
"def onRegister(self):\n pass",
"def onRegister(self):\n pass",
"def _pre_hook(\n state_dict,\n prefix,\n local_metadata,\n strict,\n missing_keys,\n unexpected_keys,\n error_msgs,\n):\n k = prefix + \"pe\"\n if k in state_dict:\n state_dict.pop(k)",
"def autoprefix(prefix):\n pl = len(prefix)\n msg = '%%(s)r: expected some name after %(prefix)r!' % locals()\n def checker(s):\n if s.startswith(prefix):\n tail = s[pl:]\n if tail:\n return prefix + dotted_name(tail)\n else:\n raise ValueError(msg % locals())\n elif s:\n return prefix + dotted_name(s)\n else:\n return ''\n return checker",
"def __warn_if_not_empty(n):\n assert isinstance(n, node.Node)\n if n.prefix is not None:\n logger.warning(\"Node.prefix is not empty, overwriting existing prefix '{}' (only one can be active at a\"\n \" time).\".format(n.prefix))"
]
| [
"0.63499",
"0.5765259",
"0.5601321",
"0.5575383",
"0.5490063",
"0.54715466",
"0.5461371",
"0.5442819",
"0.53290313",
"0.5288423",
"0.52614975",
"0.524435",
"0.5240158",
"0.52038664",
"0.5189467",
"0.51688075",
"0.5157592",
"0.51413566",
"0.512637",
"0.5113935",
"0.50908184",
"0.50847936",
"0.5082768",
"0.5049642",
"0.50436556",
"0.5036311",
"0.5036311",
"0.50069696",
"0.50015545",
"0.4998315"
]
| 0.6864617 | 0 |
Returns default metadata of type ``name``, where ``name`` is one of "tree_sequence", "edge", "site", "mutation", "mutation_list_entry", "node", "individual", or "population". | def default_slim_metadata(name):
if name == "tree_sequence":
out = {
"SLiM" : {
"model_type" : "nonWF",
"cycle" : 1,
"tick" : 1,
"file_version" : slim_file_version,
"spatial_dimensionality" : "",
"spatial_periodicity" : "",
"separate_sexes" : False,
"nucleotide_based" : False,
"stage" : "late",
"name" : "",
"description" : "",
}
}
elif name == "edge":
out = None
elif name == "site":
out = None
elif name == "mutation":
out = {
"mutation_list": []
}
elif name == "mutation_list_entry":
out = {
"mutation_type": 0,
"selection_coeff": 0.0,
"subpopulation": tskit.NULL,
"slim_time": 0,
"nucleotide": -1,
}
elif name == "node":
out = {
"slim_id": tskit.NULL,
"is_null": False,
"genome_type": 0,
}
elif name == "individual":
out = {
"pedigree_id": tskit.NULL,
"age": -1,
"subpopulation": tskit.NULL,
"sex": -1,
"flags": 0,
"pedigree_p1": tskit.NULL,
"pedigree_p2": tskit.NULL,
}
elif name == "population":
out = {
"slim_id": tskit.NULL,
"name": "default",
"description": "",
"selfing_fraction": 0.0,
"female_cloning_fraction": 0.0,
"male_cloning_fraction": 0.0,
"sex_ratio": 0.0,
"bounds_x0": 0.0,
"bounds_x1": 1.0,
"bounds_y0": 0.0,
"bounds_y1": 1.0,
"bounds_z0": 0.0,
"bounds_z1": 1.0,
"migration_records": []
}
else:
raise ValueError(
"Unknown metadata request: name should be one of 'tree_sequence', "
"'edge', 'site', 'mutation', 'mutation_list_entry', 'node', "
"'individual', or 'population'.")
return out | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def name(self) -> str:\n return self.type_data.name",
"def _getMetadataName(self):\n pass",
"def get_metadata (self, name):\n return self.metadata.get(name)",
"def get_or_create_meddra_type(self, name: str) -> MeddraType:\n return self._get_or_create_model(self.meddra_types, MeddraType, 'name', name)",
"def meta(self, name=None, text_key=None, axis_edit=None):\n if not name:\n return self._meta\n else:\n return self.describe(name, text_key=text_key, axis_edit=axis_edit)",
"def typeof(self, name):\n tag = self._find(name)\n if tag is not None:\n return tag.get(CN('meta:value-type'), 'string')\n raise KeyError(name)",
"def type_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"type_name\")",
"def type(name):",
"def getDefaultMeta(self):\n\n meta = MetaDict()\n units = UnitDict()\n\n # meta[self.getStandardIdentifier('tsSamplingRate')] = 80000\n #\n # units[self.getStandardIdentifier('tsSamplingRate')] = 'Hz'\n\n return meta, units",
"def Name(self, default=None):\n return self.data.get('name', default)",
"def getInfo(self):\n return self.name + \" [\" + self.target_type + \"]\"",
"def type_name(self) -> Optional[str]:\n return pulumi.get(self, \"type_name\")",
"def infoKindEl(self, name):\n return self.infoKinds.get(name, None)",
"def name_type(self):\n return self.tag(\"name_type\")",
"def type_name(self):\n return self.TYPE_NAMES.get(self.type, \"Unknown\")",
"def get_type_data(name):\n name = name.upper()\n try:\n return {\n 'authority': 'birdland.mit.edu',\n 'namespace': 'coordinate format',\n 'identifier': name,\n 'domain': 'Coordinate Format Types',\n 'display_name': JEFFS_COORDINATE_FORMAT_TYPES[name] + ' Coordinate Format Type',\n 'display_label': JEFFS_COORDINATE_FORMAT_TYPES[name],\n 'description': ('The type for the ' +\n JEFFS_COORDINATE_FORMAT_TYPES[name] +\n ' Geographic coordinate format.')\n }\n except KeyError:\n raise NotFound('CoordinateFormat Type: ' + name)",
"def get_extra_info(self, name, default=None):\n if name == 'handle':\n return self._handle\n else:\n return default",
"def get_type_by_name(self, name):\n raise NotImplementedError()",
"def type_name(self):\n return self._type_name",
"def name(self):\r\n if self._name_map is None:\r\n self._name_map = {}\r\n for key,value in TypeKind.__dict__.items():\r\n if isinstance(value,TypeKind):\r\n self._name_map[value] = key\r\n return self._name_map[self]",
"def animal_metadatum(animal: Animal, marker_name: str) -> Union[str, None]:\n primary_marker_list = (\n holofood_config.tables.animals_list.default_metadata_marker_columns\n )\n datum = None\n possible_marker_names = marker_name.split(\"||\")\n for possible_marker_name in possible_marker_names:\n if (\n hasattr(animal, \"primary_metadata\")\n and possible_marker_name in primary_marker_list\n ):\n try:\n datum = next(\n m\n for m in animal.primary_metadata\n if m.marker.name == possible_marker_name\n )\n except StopIteration:\n # Metadata was prefetched but didn't exist on this sample\n continue\n else:\n break\n else:\n # Metadata not prefetched\n datum = animal.structured_metadata.filter(\n marker__name=possible_marker_name\n ).first()\n if datum:\n break\n\n if datum is None:\n return None\n measurement_includes_units = str(datum.measurement).endswith(str(datum.units))\n return f'{datum.measurement}{datum.units if datum.units and not measurement_includes_units else \"\"}'",
"def get_info(self, name):\n return self.info[name]",
"def type(self):\n # by default, return my marker\n return self.typename",
"def metadata(self): # -> list[Unknown]:\n ...",
"def metadata(self): # -> list[Unknown]:\n ...",
"def __init__(self, name):\n self.type_cls = None\n\n self.name = name\n self.description = None\n self.updated = None\n self.notes = None\n self.properties = {}",
"def get_type_data(name):\n name = name.upper()\n try:\n return {\n 'authority': 'okapia.net',\n 'namespace': 'string match types',\n 'identifier': name,\n 'domain': 'String Match Types',\n 'display_name': STRING_MATCH_TYPES[name] + ' String Match Type',\n 'display_label': STRING_MATCH_TYPES[name],\n 'description': ('The string match type for the ' +\n STRING_MATCH_TYPES[name])\n }\n except KeyError:\n raise NotFound('String Type: ' + name)",
"def metadata():\n\treturn {\n\t\t\"name\": \"User Interface Type\",\n\t\t\"description\": \"Defines a type of plug-in that communicates with the user by showing information to the user and allowing the user to control the application.\",\n\t\t\"version\": 2,\n\t\t\"dependencies\": {},\n\n\t\t\"type\": { #This is a \"plug-in type\" plug-in.\n\t\t\t\"type_name\": \"userinterface\",\n\t\t\t\"api\": userinterfacetype.user_interface,\n\t\t\t\"validate_metadata\": validate_metadata\n\t\t}\n\t}",
"def PublicationType(self, default=None):\n return self.data.get('metadata', {}).get('publication_type', default)",
"def type_skeleton():\n return {\"base_type\": None,\n \"values\": {\"names\": [], \"codes\": []}}"
]
| [
"0.5959555",
"0.58948135",
"0.5858112",
"0.5682448",
"0.5663172",
"0.5657096",
"0.55724984",
"0.55313265",
"0.5495867",
"0.5495678",
"0.54869545",
"0.5458396",
"0.5452425",
"0.5430432",
"0.5417629",
"0.5389796",
"0.5383348",
"0.5370883",
"0.5369414",
"0.535739",
"0.53353643",
"0.53344226",
"0.53274405",
"0.53257024",
"0.53257024",
"0.5316552",
"0.5308838",
"0.52917945",
"0.5284701",
"0.52709585"
]
| 0.6202577 | 0 |
Tests whether the tree sequence or table collection provided is the current SLiM file format or not. If not, use `pyslim.update( )` to bring it up to date. | def is_current_version(ts, _warn=False):
out = (
isinstance(ts.metadata, dict)
and
('SLiM' in ts.metadata)
and
(ts.metadata['SLiM']['file_version'] == slim_file_version)
)
if _warn and not out:
warnings.warn(
"This tree sequence is not the current SLiM format, "
"so some operations may not work. "
"Use `pyslim.update( )` to update the tree sequence."
)
return out | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_tables(tables):\n # First we ensure we can find the file format version number\n # in top-level metadata. Then we proceed to fix up the tables as necessary.\n if not (isinstance(tables.metadata, dict) and 'SLiM' in tables.metadata):\n # Old versions kept information in provenance, not top-level metadata.\n # Note this uses defaults on keys not present in provenance,\n # which prior to 0.5 was everything but generation and model_type.\n values = default_slim_metadata('tree_sequence')['SLiM']\n prov = None\n file_version = 'unknown'\n # use only the last SLiM provenance\n for p in tables.provenances:\n is_slim, this_file_version = slim_provenance_version(p) \n if is_slim:\n prov = p\n file_version = this_file_version\n values['file_version'] = file_version\n try:\n record = json.loads(prov.record)\n if file_version == \"0.1\":\n values['model_type'] = record['model_type']\n values['tick'] = record['generation']\n values['cycle'] = record['generation']\n else:\n if 'generation' in record['slim']:\n values['tick'] = record['slim']['generation']\n values['cycle'] = record['slim']['generation']\n for k in values:\n if k in record['parameters']:\n values[k] = record['parameters'][k]\n if k in record['slim']:\n values[k] = record['slim'][k]\n except:\n raise ValueError(\"Failed to obtain metadata from provenance.\")\n set_tree_sequence_metadata(tables, **values)\n\n file_version = tables.metadata['SLiM']['file_version']\n if file_version != slim_file_version:\n warnings.warn(\"This is a version {} SLiM tree sequence.\".format(file_version) +\n \" When you write this out, \" +\n \"it will be converted to version {}.\".format(slim_file_version))\n\n # the only tables to have metadata schema changed thus far\n # are populations, individuals, mutations, and top-level:\n old_schema = _old_metadata_schema(\"tree_sequence\", file_version)\n if old_schema is not None:\n md = tables.metadata\n new_schema = slim_metadata_schemas[\"tree_sequence\"]\n new_properties = new_schema.asdict()['properties']['SLiM']['required']\n tables.metadata_schema = new_schema\n defaults = default_slim_metadata(\"tree_sequence\")\n for k in new_properties:\n if k not in md['SLiM']:\n if k == \"tick\":\n md['SLiM']['tick'] = md['SLiM']['generation']\n md['SLiM']['cycle'] = md['SLiM']['generation']\n else:\n md['SLiM'][k] = defaults['SLiM'][k]\n tables.metadata = md\n\n old_schema = _old_metadata_schema(\"population\", file_version)\n if old_schema is not None:\n pops = tables.populations.copy()\n tables.populations.clear()\n if pops.metadata_schema == tskit.MetadataSchema(None):\n pops.metadata_schema = old_schema\n new_schema = slim_metadata_schemas[\"population\"]\n tables.populations.metadata_schema = new_schema\n defaults = default_slim_metadata(\"population\")\n # just needs recoding\n for pop in pops:\n tables.populations.append(pop)\n\n old_schema = _old_metadata_schema(\"individual\", file_version)\n if old_schema is not None:\n inds = tables.individuals.copy()\n tables.individuals.clear()\n if inds.metadata_schema == tskit.MetadataSchema(None):\n inds.metadata_schema = old_schema\n new_schema = slim_metadata_schemas[\"individual\"]\n tables.individuals.metadata_schema = new_schema\n defaults = default_slim_metadata(\"individual\")\n d = {}\n for k in [\"pedigree_p1\", \"pedigree_p2\"]:\n d[k] = defaults[k]\n for ind in inds:\n md = ind.metadata\n md.update(d)\n tables.individuals.append(ind.replace(metadata=md))\n\n old_schema = _old_metadata_schema(\"mutation\", file_version)\n if old_schema is not None:\n muts = tables.mutations.copy()\n tables.mutations.clear()\n if muts.metadata_schema == tskit.MetadataSchema(None):\n muts.metadata_schema = old_schema\n tables.mutations.metadata_schema = slim_metadata_schemas[\"mutation\"]\n for mut in muts:\n md = mut.metadata\n for ml in md['mutation_list']:\n ml['nucleotide'] = -1\n tables.mutations.append(mut.replace(metadata=md))\n\n if file_version == \"0.1\":\n # shift times\n slim_generation = tables.metadata['SLiM']['tick']\n node_times = tables.nodes.time + slim_generation\n tables.nodes.set_columns(\n flags=tables.nodes.flags,\n time=node_times,\n population=tables.nodes.population,\n individual=tables.nodes.individual,\n metadata=tables.nodes.metadata,\n metadata_offset=tables.nodes.metadata_offset)\n migration_times = tables.migrations.time + slim_generation\n tables.migrations.set_columns(\n left=tables.migrations.left,\n right=tables.migrations.right,\n node=tables.migrations.node,\n source=tables.migrations.source,\n dest=tables.migrations.dest,\n time=migration_times)\n\n new_record = {\n \"schema_version\": \"1.0.0\",\n \"software\": {\n \"name\": \"pyslim\",\n \"version\": pyslim_version,\n },\n \"parameters\": {\n \"command\": [\"updrade_tables\"],\n \"old_file_version\": file_version,\n \"new_file_version\": slim_file_version,\n },\n \"environment\": get_environment(),\n }\n tskit.validate_provenance(new_record)\n tables.provenances.add_row(json.dumps(new_record))\n\n set_metadata_schemas(tables)\n md = tables.metadata\n md['SLiM']['file_version'] = slim_file_version\n tables.metadata = md",
"def __is_modification_legal_in_current_mode(self):\n self.__is_collection_close()\n if self.__mode == 'r':\n from ir_log import IRLog\n IRLog.get_instance().println(\n 'Error! Cannot write to collection being opened in read mode.')\n assert False",
"def _is_probably_new_datfile_format(raw_data):\n return \"<OOI-ts:\" in raw_data",
"def _compare_survey_structures(self, db_structure, local_structure):\n if not local_structure.equals(db_structure):\n self.log.info(f\"Survey structure data is not consistent with the table in the db\")\n return False\n else:\n return True",
"def _matchDatasetToLIMS(self, pathToLIMSfile):\n\n # Detect if requires NMR specific alterations\n if 'expno' in self.sampleMetadata.columns:\n from . import NMRDataset\n NMRDataset._matchDatasetToLIMS(self,pathToLIMSfile)\n else:\n super()._matchDatasetToLIMS(pathToLIMSfile)",
"def test08(self):\n logical_file_name = \"/store/mc/Fall08/BBJets250to500-madgraph/GEN-SIM-RAW/IDEAL_/%s/%i.root\" %(uid, 0)\n #print \"WARNING : DBS cannot list INVALID file, so for now this test is commented out\"\n self.api.updateFileStatus(logical_file_name=logical_file_name, is_file_valid=0)\n #listfile\n filesInDBS=self.api.listFiles(logical_file_name=logical_file_name, detail=True)\n self.assertEqual(len(filesInDBS), 1)\n self.assertEqual(filesInDBS[0]['is_file_valid'], 0)",
"def isParallelMS(vis):\n \n msTool = mstool()\n if not msTool.open(vis):\n raise ValueError, \"Unable to open MS %s,\" % vis\n rtnVal = msTool.ismultims() and \\\n isinstance(msTool.getreferencedtables(), list)\n\n msTool.close()\n return rtnVal",
"def check_structure_is_modified(self):\n if not self.structure_has_been_modified: \n print('NEED TO MODIFY STRUCTURE BEFORE PROCEEDING FURTHER!')\n sys.exit()",
"def isISL(self):\n return True",
"def isCollection(self, path):\n # pylint: disable=E1101\n # E1101: pylint could not resolve the node_kind attribute. \n\n return self._determineItemKind(path, pysvn.node_kind.dir)",
"def checkL1Compatibility(self, inConversion=False):\n return _libsbml.SBMLDocument_checkL1Compatibility(self, inConversion)",
"def check(self, version=None):\n \n if version is None:\n print(\"assuming Stata version 13\")\n version = 13\n if version not in (11, 12, 13):\n raise ValueError(\"allowed versions are 11 through 13\")\n \n width = self.width\n nvar = self._nvar\n nobs = self._nobs\n \n chrdict = self._chrdict\n \n char_len = max([0] + [len(char) for evar, evardict in chrdict.items()\n for char in evardict.values()])\n num_val_encodings = max([0] + [len(mapping) \n for mapping in self._vallabs.values()])\n max_note_size = max([0] + [len(note) for d in chrdict.values()\n for name, note in d.items()\n if re.match(r'^note[0-9]+$', name)])\n max_num_notes = max([0] + [len([1 for name,note in d.items() \n if re.match(r'^note[0-9]+$', name)])\n for d in chrdict.values()])\n \n if '_dta' in chrdict and '_lang_list' in chrdict['_dta']:\n n_label_langs = len(chrdict['_dta'][\"_lang_list\"].split())\n else:\n n_label_langs = 0\n \n small_good = medium_good = large_good = True\n general_good = format_good = True\n \n print(\"\\nformat problems\")\n if self._ds_format == 117 and version <= 12:\n format_good = False\n print(\" format 117 cannot be opened by Stata version \" + \n str(version))\n if version < 12 and any(TB_FMT_RE.match(fmt) for fmt in self._fmtlist):\n format_good = False\n print(\" Stata version \" + str(version) + \n \" cannot understand tb format\")\n if format_good:\n print(\" none\")\n \n print(\"\\ngeneral size problems\")\n if len(self._data_label) > 80:\n general_good = False\n print(\" data label length > 80\")\n if any(len(name) > 32 for name in self._varlist):\n general_good = False\n print(\" variable name length > 32\")\n if any(len(v) > 80 for v in self._vlblist):\n general_good = False\n print(\" variable label length > 80\")\n if any(len(name) > 32 for name in self._vallabs.keys()):\n general_good = False\n print(\" value label name length > 32\")\n if any(len(valstr) > 32000 \n for mapping in self._vallabs.values()\n for valstr in mapping.values()):\n general_good = False\n print(\" value label string length > 32,000\")\n if max_num_notes > 10000:\n # limit here is set at 10000, assuming one of the notes is 'note0'\n general_good = False\n print(\" number of notes for single variable or _dta > 9,999\")\n if n_label_langs > 100:\n general_good = False\n print(\" number of label languages > 100\")\n if general_good:\n print(\" none\")\n\n print(\"\\nStata small problems\")\n if width > 800:\n small_good = False\n print(\" data set width > 800\")\n if nvar > 99:\n small_good = False\n print(\" numbar of variables > 99\")\n if nobs > 1200:\n small_good = False\n print(\" number of observations > 1,200\")\n if num_val_encodings > 1000:\n small_good = False\n print(\" number of encodings within single value label > 1,000\")\n if version == 13:\n if max_note_size > 13400:\n small_good = False\n print(\" note size > 13,400\")\n if char_len > 13400:\n small_good = False\n print(\" char length > 13,400\")\n else:\n if max_note_size > 8681:\n small_good = False\n print(\" note size > 8,681\")\n if char_len > 8681:\n small_good = False\n print(\" char length > 8,681\")\n if small_good:\n print(\" none\")\n\n print(\"\\nStata IC problems\")\n if width > 24564:\n medium_good = False\n print(\" data set width > 24,564\")\n if nvar > 2047:\n medium_good = False\n print(\" numbar of variables > 2,047\")\n if nobs > 2147483647:\n medium_good = False\n print(\" number of observations > 2,147,483,647\")\n if num_val_encodings > 65536:\n medium_good = False\n print(\" number of encodings within single value label > 65,536\")\n if max_note_size > 67784:\n medium_good = False\n print(\" note size > 67,784\")\n if char_len > 67784:\n medium_good = False\n print(\" char length > 67,784\")\n if medium_good:\n print(\" none\")\n\n print(\"\\nStata MP & SE problems\")\n if width > 393192:\n large_good = False\n print(\" data set width > 393,192\")\n if nvar > 32767:\n large_good = False\n print(\" numbar of variables > 32,767\")\n if nobs > 2147483647:\n large_good = False\n print(\" number of observations > 2,147,483,647\")\n if num_val_encodings > 65536:\n large_good = False\n print(\" number of encodings within single value label > 65,536\")\n if max_note_size > 67784:\n large_good = False\n print(\" note size > 67,784\")\n if char_len > 67784:\n large_good = False\n print(\" char length > 67,784\")\n if large_good:\n print(\" none\")",
"def checkL2v5Compatibility(self):\n return _libsbml.SBMLDocument_checkL2v5Compatibility(self)",
"def test_existing_current_files(self):\n\n m = Mothur(**self.init_vars)\n self.set_current_dirs(m)\n m.summary.seqs(fasta='test_fasta_1.fasta')\n m.summary.seqs()\n\n return",
"def is_mutable_collection(self, col: str) -> bool:\n if self.scope is None:\n raise ValueError(\"Can't check mutability on unbound modules\")\n return self.scope.is_mutable_collection(col)",
"def assert_modification_intention(self, db_cfg_name, collection_cfg_name):\n return self.__assert_collection_change(db_cfg_name,\n collection_cfg_name, False)",
"def is_sel_file(roi_path: Union[str, Path]) -> bool:\n # TODO: scipy.io.readsav does not close the buffer correctly\n # on finding an invalid signature.\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", ResourceWarning)\n try:\n sel = scipy.io.readsav(roi_path)\n assert set(sel.keys()).issubset(\n {\n \"erasecolor\",\n \"lseltemp\",\n \"rseltemp\",\n \"region_info\",\n \"left_pos\",\n \"right_pos\",\n \"sel_file_format_major_version\",\n \"sel_file_format_minor_version\",\n \"sel_file_format_date\"\n }\n )\n return True\n except AssertionError:\n # this is apparently an IDL .sav file, but not a\n # properly-formatted .sel file\n return False\n # scipy.io.readsav uses general Exception\n except Exception as exception:\n if \"Invalid SIGNATURE\" in str(exception):\n # this is not an IDL .sav file but rather something else\n return False\n # presumably FileNotFoundError, you passed an integer, stuff\n # like that\n raise",
"def is_structured(self):\n return self.data_format in [\"dataframe\", \"flattened_dataframe\"]",
"def parsed_in_original(self, filep: Optional[str]) -> bool:\n if not filep:\n return False # pragma: no cover\n return self._parsed_by_parser_paths(filep, self.existing_paths)",
"def _is_probably_old_datfile_format(raw_data):\n return not _is_probably_new_datfile_format(raw_data) and \"UTC\" in raw_data",
"def sync(self):\n _yaml = self.hashable.yaml()\n if self.path.exists():\n _yaml_on_disk = self.path.read_text()\n if _yaml_on_disk != _yaml:\n e.code.CodingError(\n msgs=[\n \"Info file mismatch ... should never happen\",\n \"State on disk: \",\n [_yaml_on_disk],\n \"State in memory: \",\n [_yaml],\n ]\n )\n else:\n # handle info file and make it read only\n # ... write hashable info\n self.path.write_text(_yaml)\n # ... make read only as done only once\n util.io_make_path_read_only(self.path)",
"def test_flim_model(datadir):\n # ifuslot_063\n filename = datadir.join(\"test_hdf.h5\").strpath\n hdcon1 = SensitivityCubeHDF5Container(filename, flim_model=\"hdr1\")\n hdcon2 = SensitivityCubeHDF5Container(filename, flim_model=\"hdr2pt1\")\n\n scube1 = hdcon1.extract_ifu_sensitivity_cube(\"ifuslot_063\")\n scube2 = hdcon2.extract_ifu_sensitivity_cube(\"ifuslot_063\")\n\n s1 = scube1.get_f50(161.4201, 50.8822, 3470.0, 5.5)\n s2 = scube2.get_f50(161.4201, 50.8822, 3470.0, 5.5)\n\n print(s1)\n # if different models passed should be different\n assert abs(s1 - s2) > 1e-19",
"def checkL2v4Compatibility(self):\n return _libsbml.SBMLDocument_checkL2v4Compatibility(self)",
"def __assert_collection_change(self, db_cfg_name, collection_cfg_name, \n is_finished):\n import time\n from ir_config import IRConfig\n db_name = IRConfig.get_instance().get(db_cfg_name)\n collection_name = IRConfig.get_instance().get(collection_cfg_name)\n meta_collection = self.__get_meta_collection(db_name)\n res = self.__find_collection_in_meta(db_name, collection_name)\n if res.count() > 0:\n meta_collection.update({self.__meta_key_name : collection_name},\n {'$set' : {self.__meta_lastmodified_name : int(time.time()),\n self.__meta_success_name : is_finished}}) \n else:\n meta_collection.insert({self.__meta_key_name : collection_name,\n self.__meta_lastmodified_name : int(time.time()),\n self.__meta_success_name : is_finished})",
"def is_structural(self):\n\n if self.depth > 1:\n\n if (self.path[0] == \"input\") and (self.path[1] in gs.all_elements):\n\n return True\n\n return False",
"def _is_lis_output_missing(curdate, model_forcing):\n for model in [\"SURFACEMODEL\", \"ROUTING\"]:\n filename = f\"lis_fcst\"\n filename += f\"/{model_forcing}\"\n filename += f\"/{model}\"\n filename += f\"/{curdate.year:04d}{curdate.month:02d}\"\n filename += \"/LIS_HIST_\"\n filename += f\"{curdate.year:04d}{curdate.month:02d}{curdate.day:02d}\"\n filename += \"0000.d01.nc\"\n if not os.path.exists(filename):\n return True\n return False",
"def is_input_file(self):\r\n return self.depth == 0",
"def check_filekind(self):\n assert self.filekind in self.obs_package.FILEKINDS, \\\n \"Invalid filekind \" + repr(self.filekind) + \" in \" + repr(self.filename)",
"def checkL2v1Compatibility(self, inConversion=False):\n return _libsbml.SBMLDocument_checkL2v1Compatibility(self, inConversion)",
"def parsed_in_current(self, filep: Optional[str]) -> bool:\n if not filep:\n return False # pragma: no cover\n return self._parsed_by_parser_paths(filep, self.parser_paths)"
]
| [
"0.5645714",
"0.5205285",
"0.47525713",
"0.473416",
"0.46577895",
"0.4646021",
"0.46265066",
"0.4595514",
"0.4574456",
"0.45528594",
"0.45147106",
"0.45048997",
"0.44993272",
"0.44980407",
"0.4485074",
"0.4447759",
"0.44419795",
"0.44316334",
"0.44212565",
"0.4402587",
"0.43579513",
"0.43465173",
"0.43369704",
"0.43214965",
"0.43085578",
"0.43025804",
"0.42946804",
"0.42779136",
"0.42768562",
"0.4263091"
]
| 0.59775496 | 0 |
Update tables produced by a previous verion of SLiM to the current file version. Modifies the tables in place. | def update_tables(tables):
# First we ensure we can find the file format version number
# in top-level metadata. Then we proceed to fix up the tables as necessary.
if not (isinstance(tables.metadata, dict) and 'SLiM' in tables.metadata):
# Old versions kept information in provenance, not top-level metadata.
# Note this uses defaults on keys not present in provenance,
# which prior to 0.5 was everything but generation and model_type.
values = default_slim_metadata('tree_sequence')['SLiM']
prov = None
file_version = 'unknown'
# use only the last SLiM provenance
for p in tables.provenances:
is_slim, this_file_version = slim_provenance_version(p)
if is_slim:
prov = p
file_version = this_file_version
values['file_version'] = file_version
try:
record = json.loads(prov.record)
if file_version == "0.1":
values['model_type'] = record['model_type']
values['tick'] = record['generation']
values['cycle'] = record['generation']
else:
if 'generation' in record['slim']:
values['tick'] = record['slim']['generation']
values['cycle'] = record['slim']['generation']
for k in values:
if k in record['parameters']:
values[k] = record['parameters'][k]
if k in record['slim']:
values[k] = record['slim'][k]
except:
raise ValueError("Failed to obtain metadata from provenance.")
set_tree_sequence_metadata(tables, **values)
file_version = tables.metadata['SLiM']['file_version']
if file_version != slim_file_version:
warnings.warn("This is a version {} SLiM tree sequence.".format(file_version) +
" When you write this out, " +
"it will be converted to version {}.".format(slim_file_version))
# the only tables to have metadata schema changed thus far
# are populations, individuals, mutations, and top-level:
old_schema = _old_metadata_schema("tree_sequence", file_version)
if old_schema is not None:
md = tables.metadata
new_schema = slim_metadata_schemas["tree_sequence"]
new_properties = new_schema.asdict()['properties']['SLiM']['required']
tables.metadata_schema = new_schema
defaults = default_slim_metadata("tree_sequence")
for k in new_properties:
if k not in md['SLiM']:
if k == "tick":
md['SLiM']['tick'] = md['SLiM']['generation']
md['SLiM']['cycle'] = md['SLiM']['generation']
else:
md['SLiM'][k] = defaults['SLiM'][k]
tables.metadata = md
old_schema = _old_metadata_schema("population", file_version)
if old_schema is not None:
pops = tables.populations.copy()
tables.populations.clear()
if pops.metadata_schema == tskit.MetadataSchema(None):
pops.metadata_schema = old_schema
new_schema = slim_metadata_schemas["population"]
tables.populations.metadata_schema = new_schema
defaults = default_slim_metadata("population")
# just needs recoding
for pop in pops:
tables.populations.append(pop)
old_schema = _old_metadata_schema("individual", file_version)
if old_schema is not None:
inds = tables.individuals.copy()
tables.individuals.clear()
if inds.metadata_schema == tskit.MetadataSchema(None):
inds.metadata_schema = old_schema
new_schema = slim_metadata_schemas["individual"]
tables.individuals.metadata_schema = new_schema
defaults = default_slim_metadata("individual")
d = {}
for k in ["pedigree_p1", "pedigree_p2"]:
d[k] = defaults[k]
for ind in inds:
md = ind.metadata
md.update(d)
tables.individuals.append(ind.replace(metadata=md))
old_schema = _old_metadata_schema("mutation", file_version)
if old_schema is not None:
muts = tables.mutations.copy()
tables.mutations.clear()
if muts.metadata_schema == tskit.MetadataSchema(None):
muts.metadata_schema = old_schema
tables.mutations.metadata_schema = slim_metadata_schemas["mutation"]
for mut in muts:
md = mut.metadata
for ml in md['mutation_list']:
ml['nucleotide'] = -1
tables.mutations.append(mut.replace(metadata=md))
if file_version == "0.1":
# shift times
slim_generation = tables.metadata['SLiM']['tick']
node_times = tables.nodes.time + slim_generation
tables.nodes.set_columns(
flags=tables.nodes.flags,
time=node_times,
population=tables.nodes.population,
individual=tables.nodes.individual,
metadata=tables.nodes.metadata,
metadata_offset=tables.nodes.metadata_offset)
migration_times = tables.migrations.time + slim_generation
tables.migrations.set_columns(
left=tables.migrations.left,
right=tables.migrations.right,
node=tables.migrations.node,
source=tables.migrations.source,
dest=tables.migrations.dest,
time=migration_times)
new_record = {
"schema_version": "1.0.0",
"software": {
"name": "pyslim",
"version": pyslim_version,
},
"parameters": {
"command": ["updrade_tables"],
"old_file_version": file_version,
"new_file_version": slim_file_version,
},
"environment": get_environment(),
}
tskit.validate_provenance(new_record)
tables.provenances.add_row(json.dumps(new_record))
set_metadata_schemas(tables)
md = tables.metadata
md['SLiM']['file_version'] = slim_file_version
tables.metadata = md | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_vluln_table():",
"def run_sql_file(filename, connection, version, lastversion):\n cursor = connection.cursor()\n for line in open(filename):\n cursor.execute(line)\n connection.commit()\n cursor.execute(\n \"update ecs.versionTable SET ecs.versionTable.version='{}' \"\n \"where ecs.versionTable.version ='{}';\".format(version, lastversion))\n connection.commit()\n print(\"VersionTable updated. Current version is now: {}\".format(version))",
"def callUpdateTable(self):\r\n self.updateTable()",
"def db_update_files():\n _populate_table_files(File)\n _populate_table_files(Software)\n return redirect(url_for('view_index'))",
"def update(self):\n\n if not self.db: self.validate()\n\n self.logging.debug( \"update(%s)\" % (self.db) )\n\n for name in self.tables:\n self.dbs_tables[name]['md5'] = get_md5( self.dbs_tables[name]['path'] )\n\n self._get_magnitudes()\n self._get_events()",
"def update(ts):\n tables = ts.dump_tables()\n update_tables(tables)\n return tables.tree_sequence()",
"def update_table_in_file(table, source_file):\n with open(source_file, 'r') as source, \\\n tempfile.NamedTemporaryFile('w', delete=False) as temp:\n source_lines = source.readlines()\n\n table_start = index_tag_in_lines(source_lines, tag='Table Start')\n table_end = index_tag_in_lines(source_lines, tag='Table End')\n print(f'Found table_start tag at line no: {table_start}')\n print(f'Found table_end tag at line no: {table_end}')\n assert table_end > table_start, 'Table End must be after Table Start'\n\n table_written = False\n for line_no, line in enumerate(source_lines):\n if line_no <= table_start or line_no >= table_end:\n temp.write(line)\n elif not table_written: # write table once\n temp.writelines(table)\n table_written = True\n\n backup_file = source_file.with_suffix('.md.bkp')\n os.rename(source_file, backup_file)\n print(f'Original file backed up at: {backup_file}')\n\n shutil.copy(temp.name, source_file)",
"def __update_version(self):\r\n\r\n db_version = self.__get_db_version_int()\r\n if db_version == SCHEMA_VERSION:\r\n return\r\n\r\n #\r\n # Define functions for upgrading between schema versions\r\n #\r\n def update_2xto30():\r\n \"\"\"Incremental update of database from Freeseer 2.x and older to 3.0\r\n\r\n SCHEMA_VERSION is 300\r\n \"\"\"\r\n if db_version > 300:\r\n log.debug('Database newer than schema version 300.')\r\n return # No update needed\r\n\r\n log.debug('Updating to schema 300.')\r\n QtSql.QSqlQuery('ALTER TABLE presentations RENAME TO presentations_old') # temporary table\r\n self.__create_presentations_table(PRESENTATIONS_SCHEMA_300)\r\n QtSql.QSqlQuery(\"\"\"INSERT INTO presentations\r\n SELECT Id, Title, Speaker, Description, Level, Event, Room, Time FROM presentations_old\"\"\")\r\n QtSql.QSqlQuery('DROP TABLE presentations_old')\r\n\r\n def update_30to31():\r\n \"\"\"Performs incremental update of database from 3.0 and older to 3.1.\"\"\"\r\n QtSql.QSqlQuery('ALTER TABLE presentations RENAME TO presentations_old')\r\n self.__create_presentations_table(PRESENTATIONS_SCHEMA_310)\r\n QtSql.QSqlQuery(\"\"\"INSERT INTO presentations\r\n SELECT Id, Title, Speaker, Description, Level, Event, Room, Time, Time, Time\r\n FROM presentations_old\"\"\")\r\n QtSql.QSqlQuery('DROP TABLE presentations_old')\r\n\r\n #\r\n # Perform the upgrade\r\n #\r\n updaters = [update_2xto30, update_30to31]\r\n for updater in updaters:\r\n updater()\r\n\r\n QtSql.QSqlQuery('PRAGMA user_version = %i' % SCHEMA_VERSION)\r\n log.info('Upgraded presentations database from version {} to {}'.format(db_version, SCHEMA_VERSION))",
"def update_table(table_name):\n for filename in table_name_to_funcs[table_name][\"filename\"]:\n choose_file_to_get(table_name_to_funcs[table_name][\"file_type\"], filename)\n\n for process_func in table_name_to_funcs[table_name][\"process\"]:\n process_func()\n for to_sql_func in table_name_to_funcs[table_name][\"to_sql\"]:\n to_sql_func(update=True)",
"def updateTable(self, tableOld: str, tableNew: str, line, column):\n database = SymbolTable().useDatabase\n if not database:\n desc = f\": Database not selected\"\n ErrorController().add(4, 'Execution', desc,\n line, column)\n return\n\n dbStatement = data_mode.mode(database.mode).alterTable(database.name.lower(),\n tableOld.lower(), tableNew.lower())\n\n if dbStatement == 0:\n table = self.searchTable(database, tableOld)\n table.name = tableNew\n self.writeFile()\n DataWindow().consoleText('Query returned successfully: Table updated')\n\n elif dbStatement == 1:\n desc = f\": Can't update Table {tableOld}\"\n ErrorController().add(34, 'Execution', desc, line, column)\n\n elif dbStatement == 2:\n desc = f\": Database {database.name} does not exist\"\n ErrorController().add(35, 'Execution', desc, line, column)\n\n elif dbStatement == 3:\n desc = f\": Table {tableOld} does not exist\"\n ErrorController().add(27, 'Execution', desc, line, column)\n\n elif dbStatement == 4:\n desc = f\": Table {tableNew} already exists\"\n ErrorController().add(31, 'Execution', desc, line, column)",
"def updateJobsTable(self):\n self.checkJobsDict()\n jobdict = self.DB.meta.peatsa_jobs \n M = TableModel()\n #open job log from file\n f=open('jobstates.log','r')\n jl = pickle.load(f) \n for j in jobdict: \n jobid = jobdict[j] \n try:\n M.addRecord(j,state=jl[jobid]['State'],date=jl[jobid]['Date'])\n except:\n M.addRecord(j,state='Not in DB')\n self.jobstable = TableCanvas(self.tf, model=M, height=100, editable=False)\n self.jobstable.createTableFrame() \n self.log.yview('moveto', 1)\n f.close()\n return",
"def update(self):\n current = LazyRegister(self.db)\n current.render()\n cur = self.db.cursor()\n for table in self.tables:\n if table in current.tables:\n additions, removals = current.tables[table].migrate(self.tables[table])\n for addition in additions:\n cur.execute(\"\"\"ALTER TABLE %s ADD COLUMN %s\"\"\" % (\n table, addition[1].get_sql()\n ))\n print('Added column: ', addition[0])\n for removal in removals:\n #cur.execute(\"\"\"ALTER TABLE %s DROP COLUMN %s\"\"\" % (\n # table, removal[0]\n #))\n #print('Removed column: ', removal[0])\n print('Did not removed column: ', removal[0])\n else:\n schema = self.tables[table].get_create_table_sql()\n cur.execute(schema)\n print('Added table %s' % table)",
"def update_mysql_files(self):\n\n param = None\n self._check_path_availability([\"get_db_dir\", \"get_db_dir_to\"])\n self.updater.update_files(\n self.analizer.get_db_dir(),\n self.analizer.get_db_dir_to(),\n param\n )\n return self.write_debug_message(\"Mysql files upgrade is done!\\n\")",
"def UpdateS1SVs(s, Difference, WorkingSet):",
"def force_update():\n # TODO: IS THERE A WAY TO ONLY REFRESH FOR A GIVEN YEAR?\n # TODO: FIND A WAY TO DO THIS ASYNCHRONOUSLY\n print('Starting update...')\n # TODO: THIS IS A PRETTY BAD WORKAROUND. WE SHOULD FIND A WAY TO PROVIDE THE SCRIPTS WITH THE 'LANDTAGSWAHLDB' PACKAGE\n sql_path = pathlib.Path(current_app.instance_path).parent.parent / 'sql-scripts' / 'UpdateViews.sql'\n with open(sql_path) as sql_file:\n script = sql_file.read()\n db = db_context.get_db()\n db.run_script(script)\n db.commit()\n return 'Success'",
"def refresh_table_list(self):\n selection = self._lb_tables.GetSelection() #preserve table selection\n\n self._tables.clear()\n self._lb_tables.Clear()\n\n tables = self._datafile.query(sciplot.database.Query(\"SELECT TableID, Title FROM `Table`\", [], 1))[0] #get all tables from the database\n for table_id, table_title in tables:\n self._lb_tables.Append(table_title)\n self._tables.append((table_id, table_title))\n \n if selection != -1:\n self._lb_tables.SetSelection(selection)",
"def visit_table(self, sytable):\n self.current.update(sytable)",
"def visit_table(self, sytable):\n self.current.update(sytable)",
"def swap_tables(self):\n if self.stop_before_swap:\n return True\n log.info(\"== Stage 6: Swap table ==\")\n self.stop_slave_sql()\n self.execute_sql(sql.set_session_variable(\"autocommit\"), (0,))\n self.start_transaction()\n stage_start_time = time.time()\n self.lock_tables((self.new_table_name, self.table_name, self.delta_table_name))\n log.info(\"Final round of replay before swap table\")\n self.checksum_required_for_replay = False\n self.replay_changes(single_trx=True, holding_locks=True)\n # We will not run delta checksum here, because there will be an error\n # like this, if we run a nested query using `NOT EXISTS`:\n # SQL execution error: [1100] Table 't' was not locked with LOCK TABLES\n if self.mysql_version.is_mysql8:\n # mysql 8.0 supports atomic rename inside WRITE locks\n self.execute_sql(\n sql.rename_all_tables(\n orig_name=self.table_name,\n old_name=self.renamed_table_name,\n new_name=self.new_table_name,\n )\n )\n self.table_swapped = True\n self.add_drop_table_entry(self.renamed_table_name)\n log.info(\n \"Renamed {} TO {}, {} TO {}\".format(\n self.table_name,\n self.renamed_table_name,\n self.new_table_name,\n self.table_name,\n )\n )\n else:\n self.execute_sql(sql.rename_table(self.table_name, self.renamed_table_name))\n log.info(\n \"Renamed {} TO {}\".format(self.table_name, self.renamed_table_name)\n )\n self.table_swapped = True\n self.add_drop_table_entry(self.renamed_table_name)\n self.execute_sql(sql.rename_table(self.new_table_name, self.table_name))\n log.info(\"Renamed {} TO {}\".format(self.new_table_name, self.table_name))\n\n log.info(\"Table has successfully swapped, new schema takes effect now\")\n self._cleanup_payload.remove_drop_table_entry(\n self._current_db, self.new_table_name\n )\n self.commit()\n self.unlock_tables()\n self.stats[\"time_in_lock\"] = self.stats.setdefault(\"time_in_lock\", 0) + (\n time.time() - stage_start_time\n )\n self.execute_sql(sql.set_session_variable(\"autocommit\"), (1,))\n self.start_slave_sql()\n self.stats[\"swap_table_progress\"] = \"Swap table finishes\"",
"def updateTableDiff(self, table1,table2):\n logging.debug(f\"\"\"updTblDiff table_name ={table1.tableName}\"\"\")\n qry1 = table1.format_qry().replace(\"'\",\"''\")\n qry2 = table2.format_qry().replace(\"'\",\"''\")\n server1_select = table1.select.replace(\"'\",\"''\")\n server2_select = table2.select.replace(\"'\",\"''\")\n sql = f\"\"\"update {self.schemaRepo}.tablediff set\n server1_rows = {table1.numrows},\n server2_rows = {table2.numrows},\n server1_select = '{server1_select}',\n server2_select = '{server2_select}',\n server1_qry = '{qry1}',\n server2_qry = '{qry2}'\n WHERE (lower(table_name),step)\n =(lower('{table1.tableName}'),0)\"\"\"\n conn = self.connect(self.cxRepo)\n with conn:\n with conn.cursor() as curs:\n try:\n curs.execute(sql)\n except conn.DatabaseError as exc:\n error, = exc.args\n logging.error(f\"\"\"error executing {sql} : {error}\"\"\")",
"def update_database():\n\n # We obtain the data from the official database\n df = getData.extractData()\n\n # We save the dataframe for later use in the API\n auxiliary.saveToCsv(df, 'app/resources')",
"def update_updated_data_sqlite_db(self, table_name: str):\n # go through indicators and get updated data in dataframe\n print('start downloading queries')\n df = self.__get_updated_data(table_name)\n print('api download completed')\n\n # get list of sql queries to insert to sqlite db\n print('start creating queries')\n q_list = self.__get_sql_insert_query_list(df, table_name)\n\n # insert data to sqlite\n print('start inserting data')\n AccessDB().run_insert_query(q_list)\n return 'Process Completed'",
"def refresh(self, list_of_tables):\n self.dismod_file.refresh(list_of_tables)",
"def _update_database(self, sql_interface):\n print(\" Loading...\")\n\n # create table if it doesn't exist\n sql_interface.create_table_if_necessary()\n try:\n sql_interface.write_file_to_sql()\n except TableWritingError:\n # TODO: tell user total count of errors.\n # currently write_file_to_sql() just writes in log that file failed\n self._failed_table_count += 1\n pass",
"def update_2xto30():\r\n if db_version > 300:\r\n log.debug('Database newer than schema version 300.')\r\n return # No update needed\r\n\r\n log.debug('Updating to schema 300.')\r\n QtSql.QSqlQuery('ALTER TABLE presentations RENAME TO presentations_old') # temporary table\r\n self.__create_presentations_table(PRESENTATIONS_SCHEMA_300)\r\n QtSql.QSqlQuery(\"\"\"INSERT INTO presentations\r\n SELECT Id, Title, Speaker, Description, Level, Event, Room, Time FROM presentations_old\"\"\")\r\n QtSql.QSqlQuery('DROP TABLE presentations_old')",
"def init_table_obj(self):\n # Check the existence of original table\n if not self.table_exists(self.table_name):\n raise OSCError(\n \"TABLE_NOT_EXIST\", {\"db\": self._current_db, \"table\": self.table_name}\n )\n self._old_table = self.fetch_table_schema(self.table_name)\n self.partitions[self.table_name] = self.fetch_partitions(self.table_name)\n # The table after swap will have the same partition layout as current\n # table\n self.partitions[self.renamed_table_name] = self.partitions[self.table_name]\n # Preserve the auto_inc value from old table, so that we don't revert\n # back to a smaller value after OSC\n if self._old_table.auto_increment:\n self._new_table.auto_increment = self._old_table.auto_increment\n # We don't change the storage engine in OSC, so just use\n # the fetched instance storage engine\n self._new_table.engine = self._old_table.engine\n # Populate both old and new tables with explicit charset/collate\n self.populate_charset_collation(self._old_table)\n self.populate_charset_collation(self._new_table)",
"def updateVersions(self):\r\n f = open('../versions.pckl', 'wb')\r\n pickle.dump(self.versions, f)\r\n f.close()",
"def refresh_table(self):\n selection_index = self._lb_tables.GetSelection()\n if selection_index != -1:\n table_id = self._tables[selection_index][0]\n \n #remake table ui so that new columns can be added\n self._recreate_dvl_data()\n\n #create datatable object\n datatable = sciplot.datatable.Datatable(self._datafile)\n\n #set variable ids for columns\n variable_ids = []\n variable_symbols = []\n format_strings = []\n for variable_symbol, variable_id, format_string in self._datafile.query(sciplot.database.Query(\"SELECT Variable.Symbol, Variable.VariableID, TableColumn.FormatPattern FROM Variable INNER JOIN TableColumn ON TableColumn.VariableID = Variable.VariableID WHERE TableColumn.TableID = (?);\", [table_id], 1))[0]:\n self._dvl_columns.append(self._dvl_data.AppendTextColumn(variable_symbol)) #create column header\n variable_symbols.append(variable_symbol)\n variable_ids.append(variable_id)\n format_strings.append(format_string)\n \n datatable.set_variables(variable_ids)\n\n #load constants for the datatable\n constants_table = {}\n for composite_unit_id, constant_symbol, constant_value in self._datafile.query(sciplot.database.Query(\"SELECT UnitCompositeID, Symbol, Value FROM Constant;\", [], 1))[0]:\n value = sciplot.functions.Value(constant_value) #make a value object so that the data can be formatted with the format strings\n if composite_unit_id != None:\n value.units = self._datafile.get_unit_by_id(composite_unit_id)[1]\n constants_table[constant_symbol] = constant_value\n \n #load all data from the datafile into memory\n no_exception = True\n try:\n datatable.load(constants_table)\n \n except Exception as e:\n wx.MessageBox('Couldn\\'t generate table\\n{}'.format(str(e)), type(e).__name__, wx.ICON_ERROR | wx.OK) #display error message for the user\n no_exception = False\n\n if no_exception:\n #load transposed data\n data_as_rows = datatable.as_rows()\n \n #put data into table\n for row in data_as_rows:\n formatted_row = []\n for i in range(len(row)):\n value, exponent = row[i].format(format_strings[i])\n \n if exponent is None: #not in exponential form, just display the value\n formatted_row.append(value)\n else: #exponential form, display correctly\n if int(exponent) < 0:\n sign = ''\n else:\n sign = '+'\n\n formatted_row.append('{}E{}{}'.format(value, sign, exponent))\n\n self._dvl_data.AppendItem(formatted_row) #add row to table\n \n #set column titles\n if len(data_as_rows) > 0:\n for index in range(len(data_as_rows[0])):\n column_obj = self._dvl_columns[index]\n new_col_string = variable_symbols[index]\n value_obj = data_as_rows[0][index]\n\n unit_string = self._datafile.get_unit_string(value_obj.units)\n \n if unit_string != '': #add si units to title, if there are any\n new_col_string += ': ' + unit_string\n column_obj.SetTitle(new_col_string)\n \n #set column widths\n if len(self._dvl_columns) > 0:\n col_width = (self._dvl_data.GetSize()[0] - 30) / len(self._dvl_columns)\n for col in self._dvl_columns:\n col.SetWidth(col_width)",
"def update(self, mode=\"all\"):\n\n self._check_mode(mode)\n\n mode = [\"prod\", \"staging\"] if mode == \"all\" else [mode]\n for m in mode:\n\n try:\n table = self._get_table_obj(m)\n except google.api_core.exceptions.NotFound:\n continue\n\n # if m == \"staging\":\n\n table.description = self._render_template(\n Path(\"table/table_description.txt\"), self.table_config\n )\n\n # save table description\n with open(\n self.metadata_path\n / self.dataset_id\n / self.table_id\n / \"table_description.txt\",\n \"w\",\n encoding=\"utf-8\",\n ) as f:\n f.write(table.description)\n\n # when mode is staging the table schema already exists\n table.schema = self._load_schema(m)\n fields = [\"description\", \"schema\"] if m == \"prod\" else [\"description\"]\n self.client[f\"bigquery_{m}\"].update_table(table, fields=fields)\n\n logger.success(\n \" {object} {object_id} was {action}!\",\n object_id=self.table_id,\n object=\"Table\",\n action=\"updated\",\n )",
"def update_table(self):\r\n self.cursor.execute(\"\"\"SELECT * FROM transactions\"\"\")\r\n result = self.cursor.fetchall()\r\n self.tree.delete(*self.tree.get_children())\r\n for item in result:\r\n self.tree.insert('', 'end', text=item[0], values=item[1:])"
]
| [
"0.64403546",
"0.62191427",
"0.6161551",
"0.6083598",
"0.60682315",
"0.59898955",
"0.59780186",
"0.5969097",
"0.59629166",
"0.5858584",
"0.5825505",
"0.5784813",
"0.5734493",
"0.5684447",
"0.5658647",
"0.56252503",
"0.5621925",
"0.5621925",
"0.56180346",
"0.56000155",
"0.5599577",
"0.55482537",
"0.5545632",
"0.55261993",
"0.54809976",
"0.5479856",
"0.547499",
"0.537855",
"0.53745663",
"0.537136"
]
| 0.74815 | 0 |
Uses a dictionary to convert a DNA sequence into the complement strand. C G, T A | def fast_complement(dna):
str = ''
dict = {'C':'G','G':'C','A':'T','T':'A'}
for char in dna:
if char == 'C' or char == 'G' or char == 'T' or char == 'A':
str = str + dict[char]
else :
str = 'invalid character entered, please check the input'
break
return str | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reverse_complement(dna):\n str = ''\n dict = {'C': 'G', 'G': 'C', 'A': 'T', 'T': 'A'}\n for char in dna:\n if char == 'C' or char == 'G' or char == 'T' or char == 'A':\n str = str + dict[char]\n else :\n str = 'invalid character entered, please check the input'\n break\n return str[::-1]",
"def complement(s):\n\n # dictionary setup for complement\n dict = {'A': 'T', 'T': 'A', 'C': 'G', 'G': 'C'}\n\n # make a list of letters from string\n #print(s)\n slist = list(s)\n\n # for loop of the letters and call the base_complementary dictionary\n templist = []\n for i in slist:\n templist.append(dict[i])\n\n # join the letters of the list into string and return\n compstring = \"\"\n for i in templist:\n compstring += i\n #print(compstring)\n return compstring",
"def complement_strand(dna):\n reverse_complement = \"\"\n\n for character in dna[::-1]:\n if character == \"A\":\n reverse_complement += \"T\"\n elif character == \"T\":\n reverse_complement += \"A\"\n elif character == \"C\":\n reverse_complement += \"G\"\n elif character == \"G\":\n reverse_complement += \"C\"\n\n return reverse_complement",
"def complement(seq):\n complement_dict = {'A': 'T', 'C': 'G', 'T': 'A', 'G': 'C'}\n seq_list = list(seq)\n seq_list = [complement_dict[base] for base in seq_list]\n return ''.join(seq_list)",
"def transcribe(dna):\n str = ''\n dict = {'C': 'C', 'G': 'G', 'A': 'A', 'T': 'U'}\n for char in dna:\n if char == 'C' or char == 'G' or char == 'T' or char == 'A':\n #converting only of the valid string is encountered\n #then the string is converted accordingly\n str = str + dict[char]\n #the case for incalid string, it throws only the error\n else :\n str = 'invalid character entered, please check the input'\n break\n return str",
"def get_complement(nucleotide):\n\n nucDict={'A':'T','G':'C','T':'A','C':'G'}\n return(nucDict[nucleotide])",
"def get_complement(sequence):\n #Convert all rna_sequence to upper case:\n sequence=sequence.upper()\n # Conver RNA sequence into a list\n rna_list=list(sequence)\n #Create an empty list to store complement sequence:\n comlement_sequence=[]\n #Complement code corresponsing for all RNA bases\n complement= {'A' : 'U', 'C' : 'G', 'G': 'C', 'U': 'A'}\n # Looping through all the bases in RNA seq. to convert to its complement seq using dictionary values.\n for i in rna_list:\n comlement_sequence.append(complement[i])\n return ''.join(comlement_sequence)",
"def complement_this(seq):\n compliment_dict = {'A':'T', 'T':'A', 'C':'G', 'G':'C'}\n rev_seq = ''\n for nuc in seq:\n if nuc in ['A', 'T', 'G', 'C']:\n rev_seq += compliment_dict[nuc]\n return rev_seq",
"def get_reverse_complement(dna):\n res = \"\";\n for c in dna:\n if c == 'A':\n res = 'T' + res\n elif c == 'T':\n res = 'A' + res\n elif c == 'G':\n res = 'C' + res\n elif c == 'C':\n res = 'G' + res\n return res",
"def get_complement(s):\n dna_complement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'}\n return ''.join(filter(None, [ dna_complement[c.upper()] if c.upper() in dna_complement else '' for c in s ] ))",
"def translate(codon):\n \n table = { \n 'ATA':'I', 'ATC':'I', 'ATT':'I', 'ATG':'M', \n 'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACT':'T', \n 'AAC':'N', 'AAT':'N', 'AAA':'K', 'AAG':'K', \n 'AGC':'S', 'AGT':'S', 'AGA':'R', 'AGG':'R', \n 'CTA':'L', 'CTC':'L', 'CTG':'L', 'CTT':'L', \n 'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCT':'P', \n 'CAC':'H', 'CAT':'H', 'CAA':'Q', 'CAG':'Q', \n 'CGA':'R', 'CGC':'R', 'CGG':'R', 'CGT':'R', \n 'GTA':'V', 'GTC':'V', 'GTG':'V', 'GTT':'V', \n 'GCA':'A', 'GCC':'A', 'GCG':'A', 'GCT':'A', \n 'GAC':'D', 'GAT':'D', 'GAA':'E', 'GAG':'E', \n 'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGT':'G', \n 'TCA':'S', 'TCC':'S', 'TCG':'S', 'TCT':'S', \n 'TTC':'F', 'TTT':'F', 'TTA':'L', 'TTG':'L', \n 'TAC':'Y', 'TAT':'Y', 'TAA':'*', 'TAG':'*', \n 'TGC':'C', 'TGT':'C', 'TGA':'*', 'TGG':'W', \n } \n \n assert codon in table.keys(), \"Not a valid codon sequence.\"\n \n return table[codon]",
"def to_rna(DNA):\r\n\r\n \r\n return \"\".join( ( {\"G\":\"C\", \"C\":\"G\", \"T\":\"A\", \"A\":\"U\"}[nuc] for nuc in DNA))",
"def translate(rna):\n RNA_CODON_TABLE = {\"UUU\": \"F\", \"UUC\": \"F\", \"UUA\": \"L\", \"UUG\": \"L\",\n \"UCU\": \"S\", \"UCC\": \"S\", \"UCA\": \"S\", \"UCG\": \"S\",\n \"UAU\": \"Y\", \"UAC\": \"Y\", \"UAA\": \"*\", \"UAG\": \"*\",\n \"UGU\": \"C\", \"UGC\": \"C\", \"UGA\": \"*\", \"UGG\": \"W\",\n \"CUU\": \"L\", \"CUC\": \"L\", \"CUA\": \"L\", \"CUG\": \"L\",\n \"CCU\": \"P\", \"CCC\": \"P\", \"CCA\": \"P\", \"CCG\": \"P\",\n \"CAU\": \"H\", \"CAC\": \"H\", \"CAA\": \"Q\", \"CAG\": \"Q\",\n \"CGU\": \"R\", \"CGC\": \"R\", \"CGA\": \"R\", \"CGG\": \"R\",\n \"AUU\": \"I\", \"AUC\": \"I\", \"AUA\": \"I\", \"AUG\": \"M\",\n \"ACU\": \"T\", \"ACC\": \"T\", \"ACA\": \"T\", \"ACG\": \"T\",\n \"AAU\": \"N\", \"AAC\": \"N\", \"AAA\": \"K\", \"AAG\": \"K\",\n \"AGU\": \"S\", \"AGC\": \"S\", \"AGA\": \"R\", \"AGG\": \"R\",\n \"GUU\": \"V\", \"GUC\": \"V\", \"GUA\": \"V\", \"GUG\": \"V\",\n \"GCU\": \"A\", \"GCC\": \"A\", \"GCA\": \"A\", \"GCG\": \"A\",\n \"GAU\": \"D\", \"GAC\": \"D\", \"GAA\": \"E\", \"GAG\": \"E\",\n \"GGU\": \"G\", \"GGC\": \"G\", \"GGA\": \"G\", \"GGG\": \"G\"}\n str = ''\n list = [rna[i:i+3] for i in range(0,len(rna),3)]\n for x in list:\n #checks if x is in key of RNA_CODON_TABLE\n if x in RNA_CODON_TABLE:\n #appends only if the value for the given key is not *\n if RNA_CODON_TABLE[x] != '*':\n str = str + RNA_CODON_TABLE[x]\n #if only one char is extra(meaning apart form the 3 pair characters available in dictionary)\n #checks if the char is in following\n elif len(x) == 1 and x in ['A','G','C','U']:\n str = str + x\n #if the char is of length 2 i.e, 2 words extra\n elif len(x) == 2 and x[0] in ['A','G','C','U'] and x[1] in ['A','G','C','U']:\n #Then appending the char to the actually converted string\n str = str + x[0]\n str = str + x[1]\n #if the char is not in the above characters then it is a unrecognised character.\n else:\n print(\"Unrecognised character:\",x)\n return str",
"def reverse_complement_strand(dna):\n reverse_complement = \"\"\n\n for character in dna[::-1]:\n if character == \"A\":\n reverse_complement += \"T\"\n elif character == \"T\":\n reverse_complement += \"A\"\n elif character == \"C\":\n reverse_complement += \"G\"\n elif character == \"G\":\n reverse_complement += \"C\"\n\n return reverse_complement",
"def get_complement(nucleotide):\n nucDict = {'A':'T', 'T':'A', 'G':'C', 'C':'G'}\n try: \n return nucDict[nucleotide]\n except KeyError:\n print 'input not a nucleotide.'\n return None",
"def get_complement(nucleotide):\n if nucleotide=='A':\n \treturn 'T'\n if nucleotide=='C':\n \treturn 'G'\n if nucleotide=='T':\n \treturn 'A'\n if nucleotide=='G':\n \treturn 'C'",
"def to_rna(dna_strand):\n rta = [] # create an empty rna string\n dna = list(dna_strand) # create a list of dna so iterating is possible\n to_replace = {'C': 'G', 'G': 'C', 'T': 'A', 'A': 'U'}\n for letter in dna:\n if letter in to_replace.keys():\n rta.append(to_replace[letter])\n return ''.join(rta) # returning the rta string",
"def get_complement(nucleotide):\n if nucleotide == 'A':\n return 'T'\n elif nucleotide == 'T':\n return 'A'\n elif nucleotide == 'C':\n return 'G'\n elif nucleotide == 'G':\n return 'C'",
"def complement_RNA(RNAsequence):\n complement = \"\"\n for nucleotide in RNAsequence:\n if nucleotide == \"A\":\n complement += \"U\"\n if nucleotide == \"C\":\n complement += \"G\"\n if nucleotide == \"G\":\n complement += \"C\"\n if nucleotide == \"U\":\n complement += \"A\"\n return complement",
"def to_rna(dna=''):\n themap = {'G':'C', 'C':'G', 'T': 'A', 'A': 'U'}\n return ''.join([themap[each] for each in dna])",
"def get_complement(nucleotide):\n\n if nucleotide == 'T':\n return 'A'\n elif nucleotide == 'A':\n return 'T'\n elif nucleotide == 'C':\n return 'G'\n elif nucleotide == 'G':\n return 'C'",
"def reverse_complement_strand(dna):\n assert (is_dna(dna))\n return ''.join(_rev_mapping[nn] for nn in dna[::-1])",
"def get_reverse_complement(dna):\n reverseDNA = ''\n newDNA = ''\n for i in range(len(dna)): \n reverseDNA+= dna[-1-i]\n for k in range(len(dna)): \n if reverseDNA[k] == 'A': \n newDNA+='T'\n elif reverseDNA[k] =='T':\n newDNA+= 'A' \n elif reverseDNA[k] =='G':\n newDNA+= 'C'\n elif reverseDNA[k] =='C':\n newDNA+= 'G' \n return newDNA",
"def get_strand_complement(sequence):\n seq = sequence.upper()\n change = str.maketrans('ACGT', 'TGCA')\n return seq.translate(change)",
"def rev_comp(dna):\n return dna[::-1].translate(maketrans('ATCG', 'TAGC'))",
"def complement(seq):\n if PY3:\n table = str.maketrans('ACTGNactg', 'TGACNtgac')\n elif PY2:\n table = string.maketrans('ACTGNactg', 'TGACNtgac')\n return str(seq).translate(table)",
"def ReverseComplement1(seq):\n seq_dict = {'A':'T','T':'A','G':'C','C':'G'}\n return \"\".join([seq_dict[base] for base in reversed(seq)])",
"def get_strand_complement(sequence):\n # make the sequence upper case\n seq = sequence.upper()\n # table to change the complement characters\n change = str.maketrans('ACGT', 'TGCA')\n return seq.translate(change)",
"def reverse_complementary(seq):\n\n complement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'}\n return \"\".join(complement[n] for n in reversed(seq))",
"def get_complement(nucleotide):\n #if statements change nucleotide inputs to their complementary nucleotide\n if nucleotide == \"A\":\n return \"T\"\n if nucleotide == \"T\":\n return \"A\" \n if nucleotide == \"C\":\n return \"G\"\n if nucleotide == \"G\":\n return \"C\""
]
| [
"0.7362567",
"0.6997333",
"0.69915193",
"0.68732756",
"0.6788549",
"0.67824614",
"0.6681162",
"0.6663421",
"0.65523094",
"0.6519002",
"0.6517805",
"0.6516243",
"0.6514966",
"0.6506109",
"0.6447673",
"0.6435777",
"0.6419471",
"0.6395348",
"0.63883877",
"0.63750035",
"0.6363215",
"0.6346148",
"0.6327365",
"0.6302697",
"0.62995523",
"0.62900764",
"0.62615144",
"0.62492526",
"0.62465423",
"0.6244765"
]
| 0.7598703 | 0 |
Removes the interval of characters from a string or list inclusively, 0 based | def remove_interval(s, start, stop):
#s[:start] will get the string from start of string to 'start'->value stored in start
#s[stop:] will get the string from 'stop'->value stored in the stop to end of the string
temp_list = s[:start] + s[stop+1:]
return temp_list | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def anything_but_range(*args:List[str]) -> str:\n return range(*args, negate=True)",
"def remove_4s_every_other_in_between(seq):\n seq_copy = seq [4:-4:2]\n return seq_copy",
"def anything_but_chars(*args:List[str]) -> str:\n # TODO uniq\n chars = \"\".join(args)\n return f\"[^{chars}]\"",
"def disperse_string(solid_string):\r\n normal_list = list(solid_string)\r\n return list(itertools.chain.from_iterable(zip(normal_list, [0] * len(normal_list))))",
"def truncate(string):\n\n result = []\n\n for char in string:\n if len(result) == 0 or char != result[len(result)-1]:\n result.append(char)",
"def truncate(string):",
"def remove_letter(letter, strng):",
"def remove_boundaries(input: str, delim: str) -> str:\n output = \"\"\n for char in input:\n if char != delim:\n output += char\n return output",
"def trim(self, start, end):",
"def clean(x):\r\n return ensure_sorted_indices(remove0(x))",
"def fn(s):\n if len(s) == 1: return [s] # edge case \n if s.startswith(\"0\") and s.endswith(\"0\"): return []\n if s.startswith(\"0\"): return [s[:1] + \".\" + s[1:]]\n if s.endswith(\"0\"): return [s]\n return [s] + [s[:i] + \".\" + s[i:] for i in range(1, len(s))]",
"def remove_special_characters(string_list):",
"def clean_str(\n s: str,\n l: list,\n r: list,\n ) -> str: \n\n # Loop through every substring in the list\n for i in range(0, len(l)):\n\n # Remove all occurrences of the substring\n s = s.replace(l[i], r[i])\n\n return s",
"def strip_leading_chars(val):\n for i, c in enumerate(val):\n if c in \"0123456789.\":\n return val[i:]\n return \"\"",
"def remove_all(sub, s):\n i = 0\n while i < len(s):\n if s[i:i + len(sub)] == sub:\n s = s[:i] + s[i + len(sub):]\n i = 0\n i += 1\n return s\n\n # n = 0\n # b = len(sub)\n # m = n + b\n # if sub in s:\n # while n < len(s):\n # if s[n:m] == sub:\n # s = s[:n] + s[m:]\n # n = 0\n # else:\n # n += 1\n # m = n + b\n # return s\n # return s\n\n #another removes all subs even if it was part of already removed sub\n # wall = 0\n # new_str = \"\"\n # for i in range(len(s) - len(sub) + 1):\n # if s[i:i + len(sub)] == sub:\n # new_str += s[wall:i]\n # wall = i + len(sub)\n # return new_str + s[wall:]",
"def sufix(pattern):\n return pattern[1:len(pattern)]",
"def reduce_me(s):\n for x in range(0, len(s)):\n b = s[x:] + s[:x]\n print(b)",
"def cut(value,arg):\n return value.replace(arg, '')",
"def _removeRepetitions(s, encod='utf-8'): \n if not isinstance(s, unicode):\n s = unicode(s, encod,'replace')\n \n # Remove additional caracters \n s = re.sub(r'(\\w)\\1{2,100}', r'\\1', s) \n # Remove additional white spaces \n s = re.sub( '\\s+', ' ', s ).strip() \n \n return s",
"def sequence_cleaner(sequence, alphabet):\n seq = sequence.upper()\n sequence = [base for base in seq if base in alphabet]\n return ''.join(sequence)",
"def mycut(value, arg):\r\n return value.replace(arg, '')",
"def clip_string_list(a, max_len, continue_str='…'):\n return [x if len(x) <= max_len else x[:max_len - len(continue_str)] + '…' for x in a]",
"def clean(input):\n output = input[0]\n for char in input:\n if output[-1] != char: \n output += char\n return output",
"def cut(value,arg):\n return value.replace(arg,'')",
"def cut(value,arg):\n return value.replace(arg,'')",
"def cut(value,arg):\n return value.replace(arg,'')",
"def cut(value,arg):\n return value.replace(arg,'')",
"def cut(value,arg):\n return value.replace(arg,'')",
"def clean_code(code, lengte):\n return code.zfill(lengte)",
"def fours_removed(seq):\n length = len(seq) - 4\n new_seq = seq[4:length:2]\n return new_seq"
]
| [
"0.61851144",
"0.5980326",
"0.5857507",
"0.58541673",
"0.580172",
"0.57548076",
"0.563322",
"0.5611872",
"0.5602925",
"0.55550575",
"0.55322695",
"0.55312544",
"0.5528412",
"0.54889864",
"0.5466602",
"0.54589194",
"0.5456114",
"0.5435535",
"0.54222876",
"0.5413383",
"0.5395615",
"0.53932136",
"0.53916734",
"0.5383483",
"0.5383483",
"0.5383483",
"0.5383483",
"0.5383483",
"0.5351442",
"0.53468376"
]
| 0.70485836 | 0 |
Generates all kmers of size k for a string s and store them in a set | def kmer_set(s, k):
kmer = set([])
n = len(s)
#n-k+1 is the available range of values or probablities.
for x in range(0, n - k + 1):
kmer.add(s[x:x + k])
return kmer | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clump_forming_kmers(string, k, l, t):\n clumpFormingKmers = set()\n # Initial counts of k-mers within length l window starting from the first\n # chracter of the string.\n counts = Counter([kmer for i, kmer in enumerate_kmers(string[:l], k)])\n clumpFormingKmers = add_clump_forming_kmers(counts, clumpFormingKmers)\n\n for i in range(1, len(string) - l + 1):\n counts[string[i-1:i-1+k]] -= 1\n counts[string[i+l-k:i+l]] += 1\n clumpFormingKmers = add_clump_forming_kmers(counts, clumpFormingKmers)\n\n return list(clumpFormingKmers)",
"def string_to_kmers(s: str, k: int) -> List[str]:\n for i in range(0, len(s), k):\n yield s[i:i + k]",
"def enumerate_kmers(string, k, start=0):\n for i in range(0, len(string) - k + 1):\n yield start + i, string[i:i+k]",
"def get_kmers(file, size):\n\tkmers = defaultdict(int)\n\tregex = re.compile('[' + string.punctuation + ']')\n\tfor line in open(file):\n\t\tfor word in [regex.sub('', w) for w in line.lower().split()]:\n\t\t\tnkmers = len(word) - size + 1\n\t\t\tfor kmer in [word[i:i+size] for i in range(nkmers)]:\n\t\t\t\tkmers[kmer] += 1\n\treturn kmers",
"def kmers(sequence, alphabet, k):\n mers = (''.join(c) for c in windowed(k, sequence))\n return [mer for mer in mers if all(base in set(alphabet) for base in mer)]",
"def find_kmers(in_fasta, k):\n n= len(in_fasta)-k+1\n kmers=[]\n for i in range(0, n):\n kmers.append(in_fasta[i:i+k])\n return(kmers)",
"def load_unique_kmers(n, k):\t\n\thg38, hiv1 = load_kmer_data(k)\n\n\tkmers = set()\n\tif len(hg38)+len(hiv1) < n:\n\t\tprint(\"Not enough sequences! {} < {}!\".format(len(hg38)+len(hiv1), n))\n\telse:\n\t\ti = 0\n\t\twhile len(kmers) < n:\n\t\t\tkmers.add(hg38[i][0].upper())\n\t\t\tif len(kmers) < n:\n\t\t\t\tkmers.add(hiv1[i][0].upper())\n\t\t\ti += 1\n\treturn list(kmers)",
"def get_kmers(seq,k=2):\n pair_list = []\n for i in range(0,len(seq),k):\n pair_list.append(str(seq)[i:i+k])\n return pair_list",
"def get_kmers(seq, k):\n\n return [seq[i:i+k] for i in range(len(seq)-k+1)]",
"def count_kmers(seq, k=3):\n # Start with an empty dictionary\n counts = {}\n # Calculate how many kmers of length k there are\n num_kmers = len(str(seq)) - k + 1\n # Loop over the kmer start positions\n for i in range(num_kmers):\n # Slice the string to get the kmer\n kmer = str(seq)[i:i+k]\n # Add the kmer to the dictionary if it's not there\n if kmer not in counts:\n counts[kmer] = 0\n # Increment the count for this kmer\n counts[kmer] += 1\n # Return the final counts\n return counts",
"def generate_kmers(k):\n\n kmers_list = []\n kmers_tuples = itertools.product('ACGT', repeat=k)\n for kmer in kmers_tuples:\n kmers_list.append(''.join(kmer))\n\n return kmers_list",
"def kmerNeighbors(text,k):\r\n L=set()\r\n for i in range(0,len(text)-k+1):\r\n for d in range(0,k+1):\r\n L.update(Neighbors(kmer(text,i,k),d))\r\n D=dict()\r\n for l in L:\r\n D[l]=minHamm(text,l)\r\n return D",
"def build_kmers(\n sequence, \n ksize):\n\n kmers = list()\n n_kmers = len(sequence) - ksize + 1\n # Loop to store khmers in each sequence\n for i in range(n_kmers):\n kmer = sequence[i:i + ksize]\n kmers.append(kmer)\n \n return kmers, n_kmers\n\n # It is an example that needs to say the size of Kmer you would like.",
"def get_all_possible_kmers(alphabet, kmin, kmax):\n kmers = [''.join(letters) for n in range(kmin, kmax + 1)\n for letters in product(alphabet, repeat=n)]\n return kmers",
"def count_kmers_possible(read, k):\n num_kmers = {}\n num_kmers1 = len(read) - k + 1\n num_kmers2 = 4**k\n#num_kmers.append(min(num_kmers1,num_kmers2))\n num_kmers = min(num_kmers1,num_kmers2)\n num_kmers3 = max(num_kmers,0)\n return(num_kmers3)",
"def all_kmers(k):\n for i in range(0, 4 ** k):\n res = number_to_kmer(i, k)\n yield res",
"def kmer_list(s, k):\n kmer = []\n n = len(s)\n # n-k+1 is the available range of values or probablities.\n for x in range(0, n-k+1):\n kmer.append(s[x:x+k])\n return kmer",
"def count_kmers(dna: str, k: int, alphabet: str = \"ACGT\"):\n c = Counter(dna[i:i + k] for i in range(len(dna) - k + 1))\n result = []\n for k_mer in enumerate_kmers(alphabet, k):\n result.append(c[k_mer])\n return result",
"def get_all_kmers(pattern, k, ordered=False):\n ordered_kmers = [pattern[i:i + k] for i in range(len(pattern) - k + 1)]\n if ordered:\n return ordered_kmers\n return set(ordered_kmers)",
"def kmer_chunks(sequence: str, chunks: int) -> Set[str]:\n chunk_size = len(sequence) // (chunks)\n remainder = len(sequence) % (chunks)\n chunk_sizes: List[int] = remainder * [chunk_size + 1] + (chunks - remainder) * [\n chunk_size\n ]\n offset = 0\n chunk_set = set()\n for size in chunk_sizes:\n chunk_set.add(sequence[offset : offset + size])\n offset += size\n return chunk_set",
"def remove_redundant_kmers(\n search_sets: List[SearchSet],\n) -> List[Tuple[int, Optional[int], List[str]]]:\n\n kmer_search_list = []\n for start, stop, kmer_set in search_sets:\n for kmer in kmer_set:\n kmer_search_list.append((kmer, start, stop))\n minimized_search_list = minimize_kmer_search_list(kmer_search_list)\n result_dict = defaultdict(list)\n for kmer, start, stop in minimized_search_list:\n result_dict[(start, stop)].append(kmer)\n return [(start, stop, kmers) for (start, stop), kmers in result_dict.items()]",
"def generate_input(s_terms):\n qm = QuineMcCluskey()\n res = set()\n if len(s_terms) == 0:\n return res\n for term in s_terms:\n res = res | set([i for i in qm.permutations(term)])\n return res",
"def kmers_composition(dna: str, k: int, alphabet: str = \"ACGT\"):\n dna = Counter(string_to_kmers(dna, k))\n for k_mer in enumerate_kmers(alphabet, k):\n yield dna[k_mer]",
"def create_kmers(seq,kmer_size):\n\n return [seq[i:(i+kmer_size)] for i in range(len(seq)-kmer_size+1)]",
"def kmers_from_dna(dna, k):\n assert k >= 1\n assert len(dna) >= k\n\n assert len(dna) >= k\n for i in range(0, len(dna) - k + 1):\n kmer = dna[i:i + k]\n yield kmer",
"def GenKmers(consensus,MinLen=18,MaxLen=22):\n lengths = [i+MinLen for i in range(MaxLen+1-MinLen)]\n kmers = []\n for length in lengths:\n for i in range(len(consensus)+1 - length):\n kmer = consensus[i:i+length]\n kmers.append((i,kmer))\n return kmers",
"def count_kmers(dna, k):\n kmer_count = Counter()\n for i in range(len(dna)):\n kmer = dna[i:(i+k)]\n if len(kmer) == k:\n kmer_count[kmer] += 1\n return kmer_count",
"def tokenize(self, s):\n hashset = set()\n if s == '':\n return hashset\n for i in xrange(len(s) - self.ngram):\n hashset.add(s[i:i + self.ngram])\n return hashset",
"def iter_strings_k(n, k, m):\n # initial state -- all zeros\n state = np.zeros((n,), dtype=int)\n\n if k == 0:\n # that was it (!)\n return\n\n while True:\n #print(f\"next state is {state=}\")\n yield state\n\n # Update to next state. Idea is to count and carry as usual, except if\n # there are already k nonzeros in which case we count and carry by\n # ignoring all the trailing zeros. This is the algorithm described here\n # - https://stackoverflow.com/a/10458380/1694896 - adapted from bits to\n # base-m \"mits\"\n if np.count_nonzero(state) < k:\n _add_and_carry_in_place(state, m)\n continue\n\n # there are k nonzeros already, find first nonzero from least\n # significant end. See https://stackoverflow.com/a/52911347/1694896\n last_nonzero = np.max(np.nonzero(state))\n # and increment that one\n _add_and_carry_in_place(state, m, last_nonzero)\n if not np.any(state):\n # end of iteration reached, as we've gone back to the all-zero\n # state.\n return",
"def median_string_motifs(dna, k):\n min_d = math.inf\n median_candidates = {}\n kmers = set()\n for seq in dna:\n kmers |= set(bp.all_kmers(seq, k))\n for pattern in kmers:\n dist, c = min_ham_dist(pattern, dna)\n if dist < min_d:\n min_d = dist\n median_candidates = {}\n median_candidates[pattern] = c\n elif dist == min_d:\n median_candidates[pattern] = c\n return dist, median_candidates"
]
| [
"0.76189435",
"0.7363111",
"0.6979085",
"0.6867179",
"0.6815722",
"0.67906415",
"0.67450863",
"0.6658662",
"0.66486514",
"0.66284275",
"0.6626568",
"0.6619437",
"0.6613662",
"0.64602196",
"0.6456886",
"0.64392704",
"0.6331464",
"0.6325849",
"0.63245183",
"0.6272226",
"0.6261694",
"0.6208308",
"0.61924237",
"0.6180829",
"0.6138742",
"0.6120847",
"0.6104897",
"0.60719776",
"0.6042176",
"0.6012536"
]
| 0.76864225 | 0 |
Generates all kmers of size k for a string s and store them in a dictionary with the kmer(string) as the key and the number of occurances of the kmer as the value(int). | def kmer_dict(s, k):
kmer = {}
#calculating the length as n.
n = len(s)
for x in range(0, n - k + 1):
#checking if the entry alread in the dictionary kmer
if s[x:x+k] in kmer:
#if the entry is available then increament 1
kmer[s[x:x + k]] += 1
else:
#else initialize the kmer value as 1
kmer[s[x:x+k]] = 1
return kmer | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def count_kmers(seq, k=3):\n # Start with an empty dictionary\n counts = {}\n # Calculate how many kmers of length k there are\n num_kmers = len(str(seq)) - k + 1\n # Loop over the kmer start positions\n for i in range(num_kmers):\n # Slice the string to get the kmer\n kmer = str(seq)[i:i+k]\n # Add the kmer to the dictionary if it's not there\n if kmer not in counts:\n counts[kmer] = 0\n # Increment the count for this kmer\n counts[kmer] += 1\n # Return the final counts\n return counts",
"def count_kmers_possible(read, k):\n num_kmers = {}\n num_kmers1 = len(read) - k + 1\n num_kmers2 = 4**k\n#num_kmers.append(min(num_kmers1,num_kmers2))\n num_kmers = min(num_kmers1,num_kmers2)\n num_kmers3 = max(num_kmers,0)\n return(num_kmers3)",
"def getKmers(seq, k):\n \n kmd = {}\n \n for i in range(len(seq)+1-k):\n kmer = seq[i:i+k]\n kmd[kmer] = kmd.get(kmer,0) + 1\n return kmd",
"def get_kmers(file, size):\n\tkmers = defaultdict(int)\n\tregex = re.compile('[' + string.punctuation + ']')\n\tfor line in open(file):\n\t\tfor word in [regex.sub('', w) for w in line.lower().split()]:\n\t\t\tnkmers = len(word) - size + 1\n\t\t\tfor kmer in [word[i:i+size] for i in range(nkmers)]:\n\t\t\t\tkmers[kmer] += 1\n\treturn kmers",
"def count_kmers(dna, k):\n kmer_count = Counter()\n for i in range(len(dna)):\n kmer = dna[i:(i+k)]\n if len(kmer) == k:\n kmer_count[kmer] += 1\n return kmer_count",
"def count_kmers_observed(read, k):\n counts = {}\n num_kmers = len(read) - k + 1\n for i in range (num_kmers):\n kmer= read[i:i+k]\n if kmer not in counts:\n counts[kmer] = 0\n counts[kmer] +=1\n return len(counts)",
"def kmerHashMap(reads, k):\n kmers_dict = {}\n # loop through all reads\n for i in range(len(reads)):\n # loop read's bases, except for the last k, to obtain its kmers\n for j in range(1+len(reads[i])-k):\n kmer = reads[i][j:k+j]\n if kmers_dict.has_key(kmer):\n kmers_dict[kmer].add(i)\n else:\n kmers_dict[kmer] = set([i])\n \n return kmers_dict",
"def count_kmers(dna: str, k: int, alphabet: str = \"ACGT\"):\n c = Counter(dna[i:i + k] for i in range(len(dna) - k + 1))\n result = []\n for k_mer in enumerate_kmers(alphabet, k):\n result.append(c[k_mer])\n return result",
"def clump_forming_kmers(string, k, l, t):\n clumpFormingKmers = set()\n # Initial counts of k-mers within length l window starting from the first\n # chracter of the string.\n counts = Counter([kmer for i, kmer in enumerate_kmers(string[:l], k)])\n clumpFormingKmers = add_clump_forming_kmers(counts, clumpFormingKmers)\n\n for i in range(1, len(string) - l + 1):\n counts[string[i-1:i-1+k]] -= 1\n counts[string[i+l-k:i+l]] += 1\n clumpFormingKmers = add_clump_forming_kmers(counts, clumpFormingKmers)\n\n return list(clumpFormingKmers)",
"def enumerate_kmers(string, k, start=0):\n for i in range(0, len(string) - k + 1):\n yield start + i, string[i:i+k]",
"def kmerNeighbors(text,k):\r\n L=set()\r\n for i in range(0,len(text)-k+1):\r\n for d in range(0,k+1):\r\n L.update(Neighbors(kmer(text,i,k),d))\r\n D=dict()\r\n for l in L:\r\n D[l]=minHamm(text,l)\r\n return D",
"def build_kmers(\n sequence, \n ksize):\n\n kmers = list()\n n_kmers = len(sequence) - ksize + 1\n # Loop to store khmers in each sequence\n for i in range(n_kmers):\n kmer = sequence[i:i + ksize]\n kmers.append(kmer)\n \n return kmers, n_kmers\n\n # It is an example that needs to say the size of Kmer you would like.",
"def generate_all_kmers(k, ignore_N=True):\n alphabet = \"ACGT\"\n if not ignore_N:\n alphabet += \"N\"\n possible_kmers = itertools.product(alphabet, repeat=k)\n retval = collections.OrderedDict()\n for i, kmer in enumerate(possible_kmers):\n retval[''.join(kmer)] = i\n return retval",
"def kmer_composition(k, text):\r\n # TODO: your code here\r\n d = {}\r\n for i in range(len(text)-k+1):\r\n print(text[i:k+i])\r\n ''' \r\n if(text[i:k+i] in d.keys()):\r\n d[text[i:k+i]] += 1\r\n else:\r\n d[text[i:k+i]] = 1\r\n print(d)\r\n '''",
"def all_kmers(k):\n for i in range(0, 4 ** k):\n res = number_to_kmer(i, k)\n yield res",
"def kmers_composition(dna: str, k: int, alphabet: str = \"ACGT\"):\n dna = Counter(string_to_kmers(dna, k))\n for k_mer in enumerate_kmers(alphabet, k):\n yield dna[k_mer]",
"def kmer_preprocess(filename, k):\n\tkmers = {}\n\twith open(filename) as infile:\n\t\tline = infile.readline()\n\t\tseq = \"\"\n\t\tfor line in infile:\n\t\t\tfor ch in line:\n\t\t\t\tif is_valid_char(ch):\n\t\t\t\t\tseq = seq + ch\n\t\t\t\t\tif len(seq) > k:\n\t\t\t\t\t\tseq = seq[1:]\n\t\t\t\t\tif len(seq) == k:\n\t\t\t\t\t\tif seq in kmers:\n\t\t\t\t\t\t\tkmers[seq] += 1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tkmers[seq] = 1\n\tpairs = sorted(kmers.items(), reverse=True, key=lambda x: x[1])\n\treturn pairs",
"def string_to_kmers(s: str, k: int) -> List[str]:\n for i in range(0, len(s), k):\n yield s[i:i + k]",
"def kndist(self, string): ###\n d = self.retrieve(string)\n if d == None:\n d = {}\n else:\n d = dict(d.children)\n prefixes = list(self.dist(\"\").keys())\n for w in list(d.keys()):\n d[w] = self.kncount(string + w, prefixes)\n return d",
"def count_mers(sequence, alphabet, kmin, kmax):\n alphabet = set(alphabet)\n counts = defaultdict(int)\n for kmer in get_kmers_from_sequence(sequence, kmin, kmax):\n if set(kmer).issubset(alphabet):\n counts[kmer] = counts.get(kmer, 0) + 1\n return counts",
"def final_kmer_counts(seq_dict, num_seqs, alphabet, min_k, max_k):\n counted = Counter()\n len_seqs = 0\n for name, sequence in seq_dict.items():\n seq = seq_cleaner(sequence, alphabet)\n len_seqs += len(seq)\n counted.update(count_kmers_cython(seq, min_k, max_k))\n final_count = {k: (v // num_seqs) for k, v in counted.items()}\n # total_len = (len_seqs // num_seqs)\n return final_count, len_seqs",
"def kmers_from_dna(dna, k):\n assert k >= 1\n assert len(dna) >= k\n\n assert len(dna) >= k\n for i in range(0, len(dna) - k + 1):\n kmer = dna[i:i + k]\n yield kmer",
"def median_string_motifs(dna, k):\n min_d = math.inf\n median_candidates = {}\n kmers = set()\n for seq in dna:\n kmers |= set(bp.all_kmers(seq, k))\n for pattern in kmers:\n dist, c = min_ham_dist(pattern, dna)\n if dist < min_d:\n min_d = dist\n median_candidates = {}\n median_candidates[pattern] = c\n elif dist == min_d:\n median_candidates[pattern] = c\n return dist, median_candidates",
"def kmer_set(s, k):\n kmer = set([])\n n = len(s)\n #n-k+1 is the available range of values or probablities.\n for x in range(0, n - k + 1):\n kmer.add(s[x:x + k])\n return kmer",
"def count_kmer(gene_list, codon_seqs, R, kmer_size=3):\n\n kmer = kmer_size\n MM = 'yes'\n\n list_seqfile = list( codon_seqs.keys() )\n kmer_dict = {}\n\n for orf in gene_list:\n if orf in list_seqfile:\n current_seq = np.array(codon_seqs[orf])\n\n for pos in range(len(current_seq) - (kmer + 1) ):\n if MM == 'yes' and orf in list( mm_consensus.keys() ):\n current_mm = mm_consensus[orf]\n if np.all(current_mm[pos:(pos+kmer)]): # check that no kmer position is MM\n current_kmer = \"\".join( current_seq[pos:pos+kmer])\n if current_kmer in kmer_dict.keys():\n kmer_dict[current_kmer] += 1\n else:\n kmer_dict[current_kmer] = 1\n\n elif MM == 'no':\n current_kmer = \"\".join( current_seq[pos:pos+kmer])\n if current_kmer in kmer_dict.keys():\n kmer_dict[current_kmer] += 1\n else:\n kmer_dict[current_kmer] = 1\n\n new_dict = {}\n list_redundant = []\n for k in kmer_dict.keys():\n if kmer_dict[k] > R:\n if k not in list_redundant:\n \t list_redundant.append(k)\n \n return list_redundant",
"def count_kmers(file_name, k, verbose=False):\n if verbose:\n start = time.time()\n print('Counting kmers in {}'.format(file_name))\n total_kmers = 0\n with open(file_name, 'r') as f:\n line_num = 0\n for line in f:\n if line_num % 4 == 1: # dna sequence\n total_kmers += len(line) - k # eliminate new-line\n line_num += 1\n if verbose:\n end = time.time()\n print('{} kmers are counted in {:.2f} seconds'.format(\n total_kmers, end - start))\n return total_kmers",
"def create_kmers(seq,kmer_size):\n\n return [seq[i:(i+kmer_size)] for i in range(len(seq)-kmer_size+1)]",
"def generate_kmers(k):\n\n kmers_list = []\n kmers_tuples = itertools.product('ACGT', repeat=k)\n for kmer in kmers_tuples:\n kmers_list.append(''.join(kmer))\n\n return kmers_list",
"def sherlockAndAnagrams(s):\n\n dic = {}\n\n count = 0\n for i in range(len(s)):\n for j in range(i+1, len(s)+1):\n substrings = sorted(list(s[i:j]))\n joined_ss = ''.join(substrings)\n if joined_ss != '':\n if joined_ss in dic:\n count += dic[joined_ss]\n dic[joined_ss] += 1\n else:\n dic[joined_ss] = 1 \n print(dic)\n return count",
"def kmer_distance(seq1,seq2,k=3):\n seq1_set = set(count_kmers(seq1,k).keys())\n seq2_set = set(count_kmers(seq2,k).keys())\n union_seq = seq1_set.union(seq2_set)\n dissimilarity = seq1_set ^ seq2_set\n distance = len(dissimilarity)/len(union_seq)\n print(dissimilarity)\n return distance"
]
| [
"0.83377427",
"0.74581957",
"0.7419587",
"0.7218793",
"0.7165491",
"0.7158098",
"0.7143614",
"0.68497807",
"0.6764127",
"0.6763646",
"0.6749679",
"0.67431635",
"0.6696293",
"0.66370577",
"0.661861",
"0.6601165",
"0.65593135",
"0.65568435",
"0.64502054",
"0.6405555",
"0.63692814",
"0.63536423",
"0.6306333",
"0.62802374",
"0.62606996",
"0.6212165",
"0.6157194",
"0.6112153",
"0.6103937",
"0.6050672"
]
| 0.79432833 | 1 |
Prints the FIRST 10 lines of a file | def head(file_name):
#from itertools import islice
with open('../test_files/' + file_name, 'r') as infile:
list = infile.readlines()
#printing the 1st 10 lines
print('list of first 10 lines',list[:10]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def head(filename, lines=5):\n from itertools import islice\n with open(filename, \"r\") as f:\n return list(islice(f, lines))",
"def head(filename, n=10):\n\tprint(\"[HEAD {}] {}\".format(n,filename))\n\tif filename[-3:].casefold()=='.gz':\n\t\twith gzip.open(filename, 'rt') as previewfile:\n\t\t\tprint(*(next(previewfile) for x in range(n)))\n\telse:\n\t\twith open(filename, 'r') as f:\n\t\t\tfor linenumber in range(n):\n\t\t\t\tline = f.readline()\n\t\t\t\tprint(line)\n\tprint(\"[END HEAD]\")",
"def read_lines(filename=\"\", nb_lines=0):\n with open(filename, \"r\", encoding=\"utf-8\") as file1:\n lines = file1.readlines()\n if nb_lines <= 0 or nb_lines > len(lines):\n print(\"\".join(lines), end='')\n else:\n print(\"\".join(lines[:nb_lines]), end='')",
"def read_lines(filename=\"\", nb_lines=0):\n with open(filename) as file:\n n_lines = 0\n for line in file:\n n_lines += 1\n if nb_lines <= 0 or nb_lines >= n_lines:\n file.seek(0)\n for line in file:\n print(line, end=\"\")\n else:\n file.seek(0)\n for line in range(nb_lines):\n print(file.readline(), end=\"\")",
"def read_lines(filename=\"\", nb_lines=0):\n\n line_counter = 0\n with open(filename, 'r', encoding='utf-8') as my_file:\n for lines in my_file:\n line_counter += 1\n my_file.seek(0)\n if nb_lines <= 0 or nb_lines >= line_counter:\n print(my_file.read(), end=\"\")\n else:\n for i in range(nb_lines):\n print(my_file.readline(), end=\"\")",
"def file_head(f, n):\n prc = subprocess.Popen(\n \"head -n \" + str(n) + \" \" + os.path.normpath(f),\n shell=True, stdout=subprocess.PIPE)\n return prc.communicate()[0].decode().rstrip()",
"def read_lines(filename=\"\", nb_lines=0):\n with open(filename, encoding=\"utf-8\") as myFile:\n if nb_lines <= 0:\n print(myFile.read(), end=\"\")\n for i in range(nb_lines):\n print(myFile.readline(), end=\"\")",
"def read_lines(filename=\"\", nb_lines=0):\n with open(filename, 'r', encoding='utf8') as f:\n if nb_lines <= 0:\n print(f.read(), end=\"\")\n else:\n for line in f:\n if nb_lines == 0:\n break\n print(line, end=\"\")\n nb_lines -= 1",
"def read_lines(filename=\"\", nb_lines=0):\n line_count = 0\n with open(filename, mode='r', encoding='utf-8') as f:\n for line_count, lines in enumerate(f):\n pass\n if nb_lines <= 0 or nb_lines > (line_count + 1):\n f.seek(0)\n print(f.read(), end='')\n else:\n f.seek(0) # return to file beginning\n for line in range(nb_lines):\n print(f.readline(), end='')",
"def read_lines(filename=\"\", nb_lines=0):\n\n num = 0\n with open(filename, encoding=\"utf-8\") as f:\n for line in f:\n num += 1\n\n with open(filename, encoding=\"utf-8\") as f:\n if nb_lines <= 0 or nb_lines >= num:\n for line in f:\n print(line, end='')\n else:\n for line in range(nb_lines):\n print(f.readline(), end='')",
"def read_lines(filename=\"\", nb_lines=0):\n with open(filename, 'r', encoding=\"utf-8\") as f:\n if nb_lines <= 0:\n read_data = f.read()\n print(read_data, end='')\n else:\n n_lines = 0\n for x in f:\n print(x, end='')\n n_lines += 1\n if n_lines == nb_lines:\n break",
"def __show_partial_lines(file_path, limit, cut_method):\n\n lines = fs.read_file(file_path)\n if limit > len(lines):\n raise Exception(\"Index out of bound. {0} is greater than lines in file ({1} lines)\".format(limit, len(lines)))\n for ln in cut_method(lines, limit):\n print(ln)",
"def read_lines(filename=\"\", nb_lines=0):\n number_of_lines = __import__('1-number_of_lines').number_of_lines\n count = 0\n num_file_lines = number_of_lines(filename)\n with open(filename, mode='r', encoding=\"utf=8\") as myFile:\n if nb_lines <= 0 or nb_lines >= num_file_lines:\n print(myFile.read(), end='')\n else:\n while count != nb_lines:\n print(myFile.readline(), end='')\n count += 1",
"def check_file_header(fnames, nlines=5):\n from itertools import islice\n for fname in fnames:\n print(f\"\\nPrinting header from {fname} \\n#########################################\")\n with open(fname) as f:\n head = list(islice(f, nlines))\n for line in head:\n print(line)",
"def _fileHead(self, sourceFile, destFile, maxNumLines, maxNumBytes):\n #os.system('head -n ' + str(numLines) +' \"' + sourceFile +'\" >> \"' + destFile + '\"')\n os.system('head -c ' + str(maxNumBytes) + ' \"' + sourceFile + '\" | head -n ' + str(maxNumLines) + ' >> \"' + destFile + '\"')",
"def read_lines(filename=\"\", nb_lines=0):\n\n with open(filename, encoding=\"UTF8\") as f:\n if nb_lines <= 0:\n print(f.read(), end=\"\")\n i = 0\n while i < nb_lines:\n print(f.readline(), end=\"\")\n i += 1",
"def pager():\n File = open(raw_input( \"Enter any file name for reading \") )\n if File is not None:\n prompt = raw_input(\"Do you wanna read the contents now... y / n \")\n counter = 1\n delimiter = 1\n m = re.search(r'y|Yes|yes|y', prompt)\n if m:\n for line in File:\n if (counter / delimiter) == 10:\n delimiter += 1\n prompted = raw_input(\"Do you wish to continue\")\n x = re.search(r'y|Yes|YES|yes', prompted)\n if x:\n print line,\n print line,\n counter += 1",
"def read_lines(filename=\"\", nb_lines=0):\n\n line_count = 0\n with open(filename, mode='r', encoding='utf-8') as f:\n for lines in f:\n line_count += 1\n with open(filename, mode='r', encoding='utf-8') as f:\n if nb_lines <= 0 or nb_lines >= line_count:\n result = f.read()\n print(\"{:s}\".format(result), end=\"\")\n else:\n for j, line in enumerate(f):\n if j < nb_lines:\n print(\"{:s}\".format(line), end=\"\")",
"def print_file(self, file, line):\n self._file = file\n self._line = line\n try:\n self._file = open(file)\n except IOError:\n print \"can't open\", file \n \n n = 0\n retour =\"....\\n\"\n for line in self._file:\n n +=1\n if (self._line-5 < n) & (n < self._line+5) & (n != self._line):\n retour += line\n if n == self._line:\n retour += (\"<font color='red'><b>%s</b></font>\" % (line))\n \n return retour + \"....\"\n self._file.close()",
"def read_header(datafile):\n\thead = []\n\tf = open(datafile,'r')\n\tfor i,line in enumerate(f):\n\t\tif i is 10: break\n\t\thead += [line]\n\tf.close()\n\treturn head",
"def open_file(file,n,n1=0):\n\tfin = open(file)\n\tbook_lines = []\n\tcount = 0\n\tfor line in fin:\n\t\tword = line.strip()\n\t\tcount += 1\n\t\tif count > n:\n\t\t\tbook_lines.append(word)\n\treturn book_lines",
"def tail(file_name):\n with open('../test_files/' + file_name, 'r') as infile:\n list = infile.readlines()\n #calculating the last 10 lines using len(list)-10:len(list)\n print('list of last 10 lines',list[len(list)-10:len(list)])",
"def print_a_line(line_count, f):\n\tprint line_count, f.readline()",
"def get_next_hundered_lines(file):\n count = 0\n result = []\n while count < 100:\n count += 1\n next_line = file.readline()\n if next_line != \"\":\n result.append(next_line)\n else:\n break\n return result",
"def read_lines(filename=\"\", nb_lines=0):\n read_buffer = []\n\n with open(filename, \"r\", encoding=\"utf-8\") as fd:\n count = 0\n for count, line in enumerate(fd):\n if (count < nb_lines or nb_lines == 0):\n read_buffer.append(line)\n else:\n break\n\n print(\"{}\".format(\"\".join(read_buffer)), end=\"\")",
"def print_a_line(line_count, f):\n print (line_count, f.readline())",
"def print_till_blank(amfile):\n # print lines until a blank is found.\n while True:\n line = amfile.readline().strip()\n print(line)\n if line == \"\":\n break\n return None",
"def print_file(f, anykey):\n\n print \"\"\n with open(f) as f:\n for line in f:\n sys.stdout.write(line)\n if anykey is True:\n cont()\n continue\n print \"\\n\"",
"def print_titles(file, inc=3):\n count = 0\n with open(file) as f:\n for line in f:\n if count == 0 or count % 3 == 0:\n print(line.strip() + '\\t 0')\n count += 1",
"def tailNLinesFromFile(file, n):\n\n if not os.path.isfile(file):\n return None\n\n command = ['tail', '-n', str(n), file]\n\n output = subprocess.Popen(command, stdout=subprocess.PIPE).communicate()[0]\n\n return output.split('\\n')"
]
| [
"0.6985806",
"0.6973528",
"0.6691345",
"0.6686263",
"0.6660941",
"0.6617253",
"0.6611951",
"0.65246826",
"0.64488167",
"0.63853645",
"0.6347137",
"0.63329756",
"0.63230157",
"0.6308031",
"0.62810165",
"0.62323415",
"0.61938477",
"0.6095313",
"0.6090691",
"0.6051577",
"0.6046864",
"0.60099626",
"0.600965",
"0.59570175",
"0.5951882",
"0.5912754",
"0.57211334",
"0.5704043",
"0.5686798",
"0.5681429"
]
| 0.81507313 | 0 |
Prints the LAST 10 lines of a file | def tail(file_name):
with open('../test_files/' + file_name, 'r') as infile:
list = infile.readlines()
#calculating the last 10 lines using len(list)-10:len(list)
print('list of last 10 lines',list[len(list)-10:len(list)]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def file_tail(filename, n):\n result = ''\n with open(filename, 'r') as f:\n for line in (f.readlines()[-n:]):\n result += line\n\n return result",
"def tail(filepath, n):\n with open(filepath) as file_fd:\n lines = ''.join(file_fd.readlines())\n lines = lines.splitlines()[-n:]\n return lines",
"def tailFile(logFile, n):\n return tailFile2(logFile,n)",
"def tail(fname, n):\n try:\n f = open(fname, 'r')\n except IOError:\n print \"IOError: No such file or directory: '\" + fname + \"'\"\n return\n \n # NOT IMPLEMENTED...\n f.close()",
"def tail(filep, n=10):\n with open(filep) as f:\n return list(deque(f, maxlen=n))",
"def tailNLinesFromFile(file, n):\n\n if not os.path.isfile(file):\n return None\n\n command = ['tail', '-n', str(n), file]\n\n output = subprocess.Popen(command, stdout=subprocess.PIPE).communicate()[0]\n\n return output.split('\\n')",
"def file_tail(f, n):\n prc = subprocess.Popen(\n \"tail -n \" + str(n) + \" \" + os.path.normpath(f),\n shell=True, stdout=subprocess.PIPE)\n return prc.communicate()[0].decode().rstrip()",
"def tailLines(filename,linesback):\r\n\tavgcharsperline=150\r\n\t\r\n\tfile = open(filename,'r')\r\n\twhile 1:\r\n\t\ttry: file.seek(-1 * avgcharsperline * linesback,2)\r\n\t\texcept IOError: file.seek(0) \r\n\t\tif file.tell() == 0: atstart=1 \r\n\t\telse: atstart=0\r\n\t\tlines=file.read().split(\"\\n\")\r\n\t\tif (len(lines) > (linesback+1)) or atstart: break\r\n\t\t#The lines are bigger than we thought\r\n\t\tavgcharsperline=avgcharsperline * 1.3 #Inc avg for retry\r\n\tfile.close()\r\n\t\r\n\tif len(lines) > linesback: start=len(lines)-linesback -1\r\n\telse: start=0\r\n\treturn lines[start:len(lines)-1]",
"def print_tail_from_jotter(self, n_lines):\n try:\n tail_lines = deque((), n_lines)\n with open(self._filename, 'r') as f:\n line = f.readline()\n while line:\n l = line.rstrip(\"\\n\")\n tail_lines.append(l)\n line = f.readline()\n\n for l in tail_lines:\n print(l)\n except OSError:\n pass",
"def tail_lines(fd, linesback=10):\n avgcharsperline = 75\n\n while True:\n try:\n fd.seek(-1 * avgcharsperline * linesback, 2)\n except IOError:\n fd.seek(0)\n\n if fd.tell() == 0:\n atstart = 1\n else:\n atstart = 0\n\n lines = fd.read().split(\"\\n\")\n if (len(lines) > (linesback+1)) or atstart:\n break\n\n avgcharsperline=avgcharsperline * 1.3\n\n if len(lines) > linesback:\n start = len(lines) - linesback - 1\n else:\n start = 0\n\n return lines[start:len(lines)-1]",
"def head(file_name):\n #from itertools import islice\n with open('../test_files/' + file_name, 'r') as infile:\n list = infile.readlines()\n #printing the 1st 10 lines\n print('list of first 10 lines',list[:10])",
"def tail(filename, n):\n p=subprocess.Popen(['tail','-n',str(n),filename], stdout=subprocess.PIPE)\n soutput, _=p.communicate()\n lines = soutput.decode('utf8').split('\\r')\n return lines",
"def tail(self, seconds=1, max_lines=50):\n # Read file\n with open(self.tailed_file, 'r') as file_:\n # Go to EOF and get file size\n file_.seek(0, 2)\n fsize = file_.tell()\n\n # Get position of last 10K characters, then read to the end\n file_.seek(max(fsize-10000, 0), 0)\n lines = file_.readlines() # Read to end\n\n # Print last max_lines number of lines\n lines = lines[-max_lines:]\n for line in lines:\n print(line.strip())\n\n # Process file\n with open(self.tailed_file) as file_:\n # Go to the end of file\n file_.seek(0, 2)\n while True:\n # Tail file. Exit if CTRL-C is pressed\n try:\n # Get the byte offset of the most recent file read op\n # In other words get current size of file\n curr_position = file_.tell()\n\n # Read line\n line = file_.readline().strip()\n\n # If nothing new, then sleep\n if not line:\n # Go to the current end of file\n file_.seek(curr_position)\n time.sleep(seconds)\n else:\n print(line)\n except KeyboardInterrupt:\n sys.exit(0)",
"def _seek_to_n_lines_from_end_ng(f, numlines=10):\n\tline_count = 0;\n\n\tfor line in f:\n\t\tline_count += 1;\n\tpos = line_count - numlines;\n\tif (pos >= 0):\n\t\tf.seek(pos, 0);\n\telse:\n\t\tf.seek(0, 0);",
"def tailFile2(logFile, n): \n try:\n tempFile = logFile + '.temp' \n cmd1 = 'tail -n'+str(n) + ' ' +logFile + ' > ' + tempFile\n cmd2 = 'rm ' + tempFile\n os.system(cmd1)\n f = open(tempFile)\n lines = f.readlines()\n f.close() \n os.system(cmd2)\n except:\n lines=[]\n return lines",
"def __read_last_lines(self) -> str:\n with open(LOGFILE_OPENINGS, \"r\", encoding=\"utf-8\") as f:\n last_lines = f.readlines()[-10:]\n return \" 🌸 \" + \"\\n🌸 \".join(\n map(lambda l: repr(LogLine.from_line(l)), last_lines)\n )",
"def tail(f, lines=1, _buffer=4098):\n # place holder for the lines found\n lines_found = []\n\n # block counter will be multiplied by buffer\n # to get the block size from the end\n block_counter = -1\n\n # loop until we find X lines\n while len(lines_found) < lines:\n try:\n f.seek(block_counter * _buffer, os.SEEK_END)\n except IOError: # either file is too small, or too many lines requested\n f.seek(0)\n lines_found = f.readlines()\n break\n\n lines_found = f.readlines()\n\n # we found enough lines, get out\n if len(lines_found) > lines:\n break\n\n # decrement the block counter to get the\n # next X bytes\n block_counter -= 1\n\n return lines_found[-lines:]",
"def read_lines(filename=\"\", nb_lines=0):\n with open(filename) as file:\n n_lines = 0\n for line in file:\n n_lines += 1\n if nb_lines <= 0 or nb_lines >= n_lines:\n file.seek(0)\n for line in file:\n print(line, end=\"\")\n else:\n file.seek(0)\n for line in range(nb_lines):\n print(file.readline(), end=\"\")",
"def list_recent_lines(self, num):\n return self.list_lines_gen(self.go_backward, num=num)",
"def tail(filename, run_event, starting_lines=10):\n\tf = open(filename)\n\tcurrent_size = os.stat(filename).st_size\n\t#There is no seek from the end of text files in Python 3...\n\tcur_version = sys.version_info;\n\tif (cur_version.major == 2):\n\t\t_seek_to_n_lines_from_end(f, starting_lines)\n\telse:\n\t\t_seek_to_n_lines_from_end_ng(f, starting_lines)\n\t\t#f.seek(0, 2);\n\n\t#Main tail loop...\n\twhile (run_event.is_set()):\n\t\tnew_size = os.stat(filename).st_size\n\n\t\twhere = f.tell()\n\t\tline = f.readline()\n\t\tif not line:\n\t\t\tif new_size < current_size:\n\t\t\t\t# the file was probably truncated, reopen\n\t\t\t\tf = open(filename)\n\t\t\t\tcurrent_size = new_size\n\t\t\t\tdashes = \"-\" * 20\n\t\t\t\tyield \"\\n\"\n\t\t\t\tyield \"\\n\"\n\t\t\t\tyield \"%s file was truncated %s\" % (dashes, dashes)\n\t\t\t\tyield \"\\n\"\n\t\t\t\tyield \"\\n\"\n\t\t\t\ttime.sleep(0.25)\n\t\t\telse:\n\t\t\t\ttime.sleep(0.25)\n\t\t\t\tf.seek(where)\n\t\telse:\n\t\t\tcurrent_size = new_size\n\t\t\tyield line",
"def read_lines(filename=\"\", nb_lines=0):\n with open(filename, \"r\", encoding=\"utf-8\") as file1:\n lines = file1.readlines()\n if nb_lines <= 0 or nb_lines > len(lines):\n print(\"\".join(lines), end='')\n else:\n print(\"\".join(lines[:nb_lines]), end='')",
"def newtail(f, n, offset=0):\n for i, line in enumerate(f):\n print(\"newtail stats\", i, n, line, )\n if i == n:\n return line",
"def read_lines(filename=\"\", nb_lines=0):\n with open(filename, 'r', encoding='utf8') as f:\n if nb_lines <= 0:\n print(f.read(), end=\"\")\n else:\n for line in f:\n if nb_lines == 0:\n break\n print(line, end=\"\")\n nb_lines -= 1",
"def read_lines(filename=\"\", nb_lines=0):\n\n line_counter = 0\n with open(filename, 'r', encoding='utf-8') as my_file:\n for lines in my_file:\n line_counter += 1\n my_file.seek(0)\n if nb_lines <= 0 or nb_lines >= line_counter:\n print(my_file.read(), end=\"\")\n else:\n for i in range(nb_lines):\n print(my_file.readline(), end=\"\")",
"def tail(f, n, offset=0):\n avg_line_length = 74\n to_read = n + offset\n while 1:\n try:\n f.seek(-(avg_line_length * to_read), 2)\n except IOError:\n # woops. apparently file is smaller than what we want\n # to step back, go to the beginning instead\n f.seek(0)\n pos = f.tell()\n lines = f.read().splitlines()\n if len(lines) >= to_read or pos == 0:\n return lines[-to_read:offset and -offset or None]\n avg_line_length *= 1.3",
"def read_lines(filename=\"\", nb_lines=0):\n line_count = 0\n with open(filename, mode='r', encoding='utf-8') as f:\n for line_count, lines in enumerate(f):\n pass\n if nb_lines <= 0 or nb_lines > (line_count + 1):\n f.seek(0)\n print(f.read(), end='')\n else:\n f.seek(0) # return to file beginning\n for line in range(nb_lines):\n print(f.readline(), end='')",
"def read_lines(filename=\"\", nb_lines=0):\n with open(filename, encoding=\"utf-8\") as myFile:\n if nb_lines <= 0:\n print(myFile.read(), end=\"\")\n for i in range(nb_lines):\n print(myFile.readline(), end=\"\")",
"def getLastLine(f, blockSize=3072): \n \n f.seek(0,os.SEEK_END) \n \n totalBytes = f.tell() \n \n if totalBytes > blockSize: \n f.seek(-blockSize,os.SEEK_END) \n else: \n f.seek(0) \n \n lastLines = f.readlines() \n lastLine = lastLines[-1] \n \n if lastLine =='': \n lastLine = lastLines[-2] \n \n return lastLine",
"def head(filename, n=10):\n\tprint(\"[HEAD {}] {}\".format(n,filename))\n\tif filename[-3:].casefold()=='.gz':\n\t\twith gzip.open(filename, 'rt') as previewfile:\n\t\t\tprint(*(next(previewfile) for x in range(n)))\n\telse:\n\t\twith open(filename, 'r') as f:\n\t\t\tfor linenumber in range(n):\n\t\t\t\tline = f.readline()\n\t\t\t\tprint(line)\n\tprint(\"[END HEAD]\")",
"def keep_last_lines(self, num_lines):\n self.data = self.data[-num_lines:]"
]
| [
"0.74048",
"0.70670325",
"0.69681805",
"0.69455147",
"0.6894602",
"0.67591935",
"0.6747669",
"0.6644481",
"0.65139806",
"0.6490487",
"0.6489199",
"0.6370238",
"0.6363277",
"0.6346032",
"0.63062066",
"0.6291511",
"0.62899953",
"0.6274384",
"0.6269273",
"0.62548035",
"0.62216425",
"0.6213719",
"0.61753947",
"0.61728835",
"0.6136269",
"0.608717",
"0.6067143",
"0.60214275",
"0.5985947",
"0.59715897"
]
| 0.7993086 | 0 |
Prints the even numbered lines of a file | def print_even(file_name):
with open('../test_files/' + file_name, 'r') as infile:
#initialising 1 to 1 so that it evaluate from line 1
i = 1
for x in infile.readlines():
#performing operation to find the even number entry
if i%2 == 0:
#actual printing of lines
print(x)
#increamenting
i+=1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def print_titles(file, inc=3):\n count = 0\n with open(file) as f:\n for line in f:\n if count == 0 or count % 3 == 0:\n print(line.strip() + '\\t 0')\n count += 1",
"def fc():\n try:\n revfile = sys.argv[1]\n with open(revfile) as f:\n for revnumber in f:\n revnumber = int(revnumber)\n if revnumber % 2 == 0:\n print(\"{}={}*{}\".format(revnumber, revnumber // 2, 2))\n continue\n i = 3\n while i < revnumber // 2:\n if revnumber % i == 0:\n print(\"{}={}*{}\".format(revnumber, revnumber // i, i))\n break\n i = i + 2\n if i == (revnumber // 2) + 1:\n print(\"{}={}*{}\".format(revnumber, revnumber, 1))\n except (IndexError):\n pass",
"def read_lines(filename=\"\", nb_lines=0):\n with open(filename) as file:\n n_lines = 0\n for line in file:\n n_lines += 1\n if nb_lines <= 0 or nb_lines >= n_lines:\n file.seek(0)\n for line in file:\n print(line, end=\"\")\n else:\n file.seek(0)\n for line in range(nb_lines):\n print(file.readline(), end=\"\")",
"def open_read_write(file_to_open=PROBLEM_FILE):\n\n with open(file_to_open, 'r') as working_file:\n # turns the opened file into a list using a list comprehension\n working_file = [sentence.strip() for sentence in working_file.readlines()]\n\n # iterates through the enumerated file displaying a tuple\n # of (index, string)\n for sentence in enumerate(working_file): #iterates through the file\n if sentence[0] % 2: #Checks to see if the index is divisible by 2\n print(sentence[1]) #prints the string",
"def number_idx(self, filename):\n with open(filename) as fh:\n firstline = fh.readline()\n parts = firstline.split('\\t')\n # only add if there are 4 parts\n if len(parts) != 4:\n return\n\n count = 1\n def writeline(fho, line, count):\n fho.write(line.rstrip() + '\\t' + str(count) + '\\n')\n\n with open(filename + '.tmp', 'w+b') as fho:\n writeline(fho, firstline, count)\n count += 1\n for line in fh:\n writeline(fho, line, count)\n count += 1\n\n shutil.move(filename + '.tmp', filename)",
"def splitFile(filename, n):\n in_file = open(filename)\n line = in_file.readline()\n count = 0\n while line <> \"\":\n if count < 10: num = \"0\"+str(count)\n else: num = str(count)\n f = open(\"output/\"+filename+\"-\"+num,\"w\")\n for i in range(n):\n if line == \"\": break\n f.write(line)\n line = in_file.readline()\n f.close()\n count += 1\n return count",
"def print_evens(n):\n print(f\"Printing Evens below {n}\")\n print(\"-------------------\")\n for i in range(0, n): # n iterations\n if i % 2 == 0:\n print(i)",
"def open_file(file,n,n1=0):\n\tfin = open(file)\n\tbook_lines = []\n\tcount = 0\n\tfor line in fin:\n\t\tword = line.strip()\n\t\tcount += 1\n\t\tif count > n:\n\t\t\tbook_lines.append(word)\n\treturn book_lines",
"def read_lines(filename=\"\", nb_lines=0):\n\n num = 0\n with open(filename, encoding=\"utf-8\") as f:\n for line in f:\n num += 1\n\n with open(filename, encoding=\"utf-8\") as f:\n if nb_lines <= 0 or nb_lines >= num:\n for line in f:\n print(line, end='')\n else:\n for line in range(nb_lines):\n print(f.readline(), end='')",
"def read_lines(filename=\"\", nb_lines=0):\n with open(filename, 'r', encoding='utf8') as f:\n if nb_lines <= 0:\n print(f.read(), end=\"\")\n else:\n for line in f:\n if nb_lines == 0:\n break\n print(line, end=\"\")\n nb_lines -= 1",
"def read_lines(filename=\"\", nb_lines=0):\n\n line_counter = 0\n with open(filename, 'r', encoding='utf-8') as my_file:\n for lines in my_file:\n line_counter += 1\n my_file.seek(0)\n if nb_lines <= 0 or nb_lines >= line_counter:\n print(my_file.read(), end=\"\")\n else:\n for i in range(nb_lines):\n print(my_file.readline(), end=\"\")",
"def read_lines(filename=\"\", nb_lines=0):\n line_count = 0\n with open(filename, mode='r', encoding='utf-8') as f:\n for line_count, lines in enumerate(f):\n pass\n if nb_lines <= 0 or nb_lines > (line_count + 1):\n f.seek(0)\n print(f.read(), end='')\n else:\n f.seek(0) # return to file beginning\n for line in range(nb_lines):\n print(f.readline(), end='')",
"def position_helper():\n for file_name in file_list[:1]:\n file_bits = file_splitter(file_name)\n line_length = len(max(file_bits, key=len)) + 13\n index = 0\n print('\\n' + ('-' * line_length))\n for x in file_bits:\n print('Index ', str(index), ' = ', file_bits[index])\n index += 1\n print(('-' * line_length) + '\\n')",
"def read_lines(filename=\"\", nb_lines=0):\n with open(filename, \"r\", encoding=\"utf-8\") as file1:\n lines = file1.readlines()\n if nb_lines <= 0 or nb_lines > len(lines):\n print(\"\".join(lines), end='')\n else:\n print(\"\".join(lines[:nb_lines]), end='')",
"def read_lines(filename=\"\", nb_lines=0):\n with open(filename, 'r', encoding=\"utf-8\") as f:\n if nb_lines <= 0:\n read_data = f.read()\n print(read_data, end='')\n else:\n n_lines = 0\n for x in f:\n print(x, end='')\n n_lines += 1\n if n_lines == nb_lines:\n break",
"def read_lines(filename=\"\", nb_lines=0):\n with open(filename, encoding=\"utf-8\") as myFile:\n if nb_lines <= 0:\n print(myFile.read(), end=\"\")\n for i in range(nb_lines):\n print(myFile.readline(), end=\"\")",
"def print_file(self, file, line):\n self._file = file\n self._line = line\n try:\n self._file = open(file)\n except IOError:\n print \"can't open\", file \n \n n = 0\n retour =\"....\\n\"\n for line in self._file:\n n +=1\n if (self._line-5 < n) & (n < self._line+5) & (n != self._line):\n retour += line\n if n == self._line:\n retour += (\"<font color='red'><b>%s</b></font>\" % (line))\n \n return retour + \"....\"\n self._file.close()",
"def skiprows(fn):\n for i, line in enumerate(open(fn)):\n if line.startswith('#'):\n continue\n else:\n break\n return i",
"def splitting():\n n = 1\n with open('numbers.txt', 'r+') as f:\n f.readline()\n seek_2 = f.tell()\n seek_1 = 0\n\n while seek_1 != seek_2:\n print(n)\n n += 1\n with open('numbers.txt', 'r+') as f, open('numbers.txt', 'r+') as f_2:\n f.seek(seek_1)\n f_2.seek(seek_2)\n seek_1, seek_2 = merge(f, f_2)\n\n make_result_file(seek_1)",
"def print_a_line(line_count, f):\n\tprint line_count, f.readline()",
"def print_file(chr_list,filename):\n infile = open(filename)\n for line in infile:\n if line.startswith('SL2.40'):\n chr = int(line.strip().split()[0][-2:])\n loci = int(line.strip().split()[1])\n for chr_i,chr_l in enumerate(chr_list):\n for loc in chr_l:\n if chr==chr_i and loci==loc:\n print line\n return",
"def read_lines(filename=\"\", nb_lines=0):\n number_of_lines = __import__('1-number_of_lines').number_of_lines\n count = 0\n num_file_lines = number_of_lines(filename)\n with open(filename, mode='r', encoding=\"utf=8\") as myFile:\n if nb_lines <= 0 or nb_lines >= num_file_lines:\n print(myFile.read(), end='')\n else:\n while count != nb_lines:\n print(myFile.readline(), end='')\n count += 1",
"def read_lines(filename=\"\", nb_lines=0):\n\n with open(filename, encoding=\"UTF8\") as f:\n if nb_lines <= 0:\n print(f.read(), end=\"\")\n i = 0\n while i < nb_lines:\n print(f.readline(), end=\"\")\n i += 1",
"def skip_lines(nb):\n if nb == -1:\n os.system('cls' if os.name=='nt' else 'clear')\n else:\n print(\"\\n\" * (nb-1))",
"def read_lines(filename=\"\", nb_lines=0):\n\n line_count = 0\n with open(filename, mode='r', encoding='utf-8') as f:\n for lines in f:\n line_count += 1\n with open(filename, mode='r', encoding='utf-8') as f:\n if nb_lines <= 0 or nb_lines >= line_count:\n result = f.read()\n print(\"{:s}\".format(result), end=\"\")\n else:\n for j, line in enumerate(f):\n if j < nb_lines:\n print(\"{:s}\".format(line), end=\"\")",
"def print_a_line(line_count, f):\n print (line_count, f.readline())",
"def pager():\n File = open(raw_input( \"Enter any file name for reading \") )\n if File is not None:\n prompt = raw_input(\"Do you wanna read the contents now... y / n \")\n counter = 1\n delimiter = 1\n m = re.search(r'y|Yes|yes|y', prompt)\n if m:\n for line in File:\n if (counter / delimiter) == 10:\n delimiter += 1\n prompted = raw_input(\"Do you wish to continue\")\n x = re.search(r'y|Yes|YES|yes', prompted)\n if x:\n print line,\n print line,\n counter += 1",
"def num_lines(fname):\n with open(fname) as f:\n for i, l in enumerate(f):\n pass\n return( i + 1 )",
"def printEvenOdd(num):\n if num %2 ==0:\n print(\"Even\")\n elif num %2 == 1:\n print(\"Odd\")\n else:\n print(\"Unknown\")",
"def every_second_line(report):\n f = report\n \n result = []\n for line in f:\n result.append(line.strip())\n f.readline()"
]
| [
"0.61693466",
"0.6092263",
"0.60504764",
"0.60123736",
"0.59696233",
"0.59063226",
"0.58038336",
"0.5767786",
"0.5724785",
"0.56689507",
"0.5647623",
"0.5638165",
"0.5592854",
"0.5588552",
"0.5547456",
"0.553177",
"0.5497751",
"0.54832536",
"0.5455734",
"0.5450205",
"0.5421187",
"0.5413994",
"0.5404999",
"0.53830075",
"0.5364616",
"0.53418833",
"0.5326476",
"0.53042364",
"0.5268845",
"0.5256177"
]
| 0.85165477 | 0 |
Reads in a CSV file and returns a list of values belonging to the column specified | def get_csv_column(file_name, column):
list = []
with open('../test_files/' + file_name, 'r') as infile:
for x in infile.readlines():
x = x.replace('\n', '')
# splitting based on ',' that are encountered in csv files.
#column-1 because the range start from 0 , so if user enters 1st column then its 0th column we need to fetch
list.append(x.split(',')[column - 1])
return list | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def values_of_col(csvf,col_name,sepchar=' '):\n\tcol_values=[]\n\twith open(csvf,'rb') as f:\n\t\treader=csv.reader(f,delimiter=sepchar)\n\t\tcsv_list=list(reader)\n\t\theader_row=csv_list[0]\t\t\n\t\tcol_name_index=header_row.index(col_name)\n\t\tfor row in csv_list:\n\t\t\tcol_values.append(row[col_name_index])\n\tdel col_values[0] # remove the column name\t\t\n\treturn col_values",
"def read_csv(csv_file):\r\n with open(csv_file, \"r\") as files:\r\n data = csv.reader(files)\r\n return list(data)",
"def read_csv():",
"def _read_csv_col(colNum: int, filename: str) -> List[str]:\n col = []\n with open(filename, 'r') as rf:\n reader = csv.reader(rf, delimiter=',')\n for row in reader:\n col.append(str(row[colNum]))\n\n return col[1::] # Ignore the csv header",
"def csvtolist(csvfile, column_header='Organism'):\n\n file = pd.read_csv(csvfile)\n # Create a list name/variable and use list()\n listfromcolumn = list(file[column_header])\n\n return listfromcolumn",
"def read_from_csv(file):\n with open(file) as f:\n next(f)\n data = []\n for line in csv.reader(f, delimiter='\\t'):\n data.append(list(line))\n return data",
"def read_data(filepath):\n data = []\n column_names = []\n\n with open(filepath, 'rt') as csvfile:\n data_reader = csv.reader(csvfile, delimiter=',')\n flag = False\n for row in data_reader:\n if not flag:\n column_names = row\n flag = True\n else:\n data.append(row)\n\n return column_names, np.array(data)",
"def read_csv(csv_file_path):\n res = [] #list\n # f = open(csv_file_path) #read file\n with open(csv_file_path,\"r\") as f:",
"def read_csv(file, header=False):\n data = []\n with open(file, 'r') as f:\n reader = csv.reader(f)\n for row in reader:\n if header:\n continue\n line = [v_parse(val) for val in row]\n data.append(line)\n\n return data",
"def read_csv(filename):\n # Implement this function\n file = open(filename)\n wrapper = csv.reader(file)\n result = []\n for rpos in wrapper: \n result = result + [rpos]\n file.close() \n return result",
"def get_column_value_list(\n filenames: List[str],\n column_name: str,\n debug: Optional[bool] = False\n) -> List[Any]:\n result = []\n for file_name in filenames:\n\n # Open the file for reading\n file_in = codecs.open(file_name, 'rU')\n dialect = csv.Sniffer().sniff(file_in.read(1024))\n file_in.seek(0)\n data_in = csv.reader(file_in, dialect=dialect, delimiter=str(','))\n\n if debug:\n print('Parsing file ' + file_name)\n\n line_number = 0\n header_detected = False\n col_idx = -1\n for data in data_in:\n # Count the line number to flag anomalies\n line_number += 1\n\n # If mark has not been detected yet\n if not header_detected:\n if column_name not in data:\n # Line does not match, skip it\n continue\n\n # At this point the column has been detected\n header_detected = True\n\n # Get index of the column\n col_idx = data.index(column_name)\n\n # Proceed with the following lines\n continue\n\n # Safety check. If something went wrong when the CSV file was\n # exported, it is very likely that the string #REF! is present. If\n # so, notify and stop.\n if '#REF!' in data or '#VALUE' in data:\n print('Line', line_number, 'contains incorrect data',\n file=sys.stderr)\n sys.exit(1)\n\n # At this point we are processing a data line\n\n # If the number of fields doesn't match number of columns, flag!\n if col_idx >= len(data):\n print('Mismatched line', line_number, 'skipping',\n file=sys.stderr)\n continue\n\n # append the string\n result.append(data[col_idx])\n\n return result",
"def get_all_rows_for_column (file, columnNumber, has_header, delimiter):\n with open(file, 'r', encoding='ISO-8859-1') as csvfile:\n jobsReader = csv.reader(csvfile, delimiter=delimiter)\n\n if (has_header):\n next(jobsReader, None)\n\n allrows = []\n for row in jobsReader:\n allrows.append(row[columnNumber])\n\n return allrows",
"def _load_column(filename, col=0):\n with open(filename) as f:\n col = list(zip(*csv.reader(f)))[0]\n return list(col)",
"def get_all_columns_from_csv(csvFile):\n dmlst = []\n with open(csvFile, 'r', encoding='iso-8859-3') as fh:\n for dm in fh.readline().split(';'):\n dmlst.append(dm.strip())\n return dmlst",
"def load_data_from_csv(csv_file):\n list=[]\n\n with open(csv_file) as csv_1:\n csv_out = csv.reader(csv_1) \n next(csv_out)\n for rows in csv_out: \n if len(rows) != 0:\n list.append([rows[0],int(rows[1]),int(rows[2])])\n \n return (list)",
"def read_csv(filename):\n with open(filename) as csv:\n return [csv_line.strip().split(',') for csv_line in csv]",
"def load_csv(filepath):\n log.debug('Loading csv')\n with open(filepath) as csvfile:\n reader = csv.DictReader(csvfile)\n return reader.fieldnames, list(reader)",
"def read_sample_csv(self):\n f = open('sample.csv')\n lines = f.readline()\n fields = lines.split(',')\n fieldnames_lst = [i.strip() for i in fields]\n f.close()\n return fieldnames_lst",
"def _load_column(filename, col=0):\n with open(filename) as f:\n col = sorted(list(zip(*csv.reader(f)))[0])\n return list(col)",
"def read_csv(file_path, delimiter=\",\", quotechar='\"'):\n # Opening file\n with open(file_path, newline='') as csvfile:\n # Will be used to store content\n lsts = []\n\n # Loading and reading csv\n csv_data = csv.reader(csvfile, delimiter=delimiter, quotechar=quotechar)\n\n # Adding data to container\n for row in csv_data:\n lsts.append(row)\n\n return lsts",
"def read_csv(file_name):\n final_list = []\n reader = csv.reader(open(file_name, 'rb'), delimiter=',')\n for x in reader:\n final_list.append(x)\n return final_list",
"def read_csv(path):\n csv_data =[]\n \n with open(path, 'r') as csv_file:\n csv_read = csv.reader(csv_file, dialect='excel')\n for row in csv_read:\n csv_data.append(row)\n\n return(csv_data)",
"def readCSV(self, csvFileName):\n\tdata = []\n\twith open(csvFileName) as csvFile:\n\t\treader = csv.reader(csvFile)\n\t\tfor row in reader:\n\t\t\tdata.append(row)\n\treturn data",
"def parse(csvfilename):\n table = []\n with open(csvfilename, \"r\") as csvfile:\n for line in csvfile:\n line = line.rstrip()\n columns = line.split(',')\n table.append(columns)\n return table",
"def __obtain_data_from_csv__(self, csvfile):\n data = csvfile.readlines()\n data = self.__parse_string_for_delimiter__(data)\n return data",
"def read_csv_to_list(csv_path):\n\n with open(csv_path, newline=\"\") as f:\n reader = csv.reader(f)\n data = list(reader)\n\n return data",
"def readFile(filename):\n df = pd.read_csv(filename, header=0) # read the file\n return df.iloc[:,:].values",
"def read_csv(csvfilename):\n\trows = []\n\n\twith open(csvfilename, \"rU\") as csvfile:\n\t\tfile_reader = csv.reader(csvfile)\n\t\tfor row in file_reader:\n\t\t\trows.append(row)\n\treturn rows",
"def read_csv(path):\r\n data = []\r\n csv_file = open(path)\r\n for row in csv.DictReader(csv_file):\r\n data.append(row)\r\n csv_file.close() \r\n return data",
"def read_csv(csvfilename):\n rows = []\n with open(csvfilename, encoding='utf-8') as csvfile:\n file_reader = csv.reader(csvfile)\n for row in file_reader:\n rows.append(row)\n return rows"
]
| [
"0.75514627",
"0.74750394",
"0.7196764",
"0.7163925",
"0.7137294",
"0.70621157",
"0.7006086",
"0.70048636",
"0.69896185",
"0.6975001",
"0.6971096",
"0.690734",
"0.6904262",
"0.6882441",
"0.68709296",
"0.6865765",
"0.6844638",
"0.6837288",
"0.68288726",
"0.6812737",
"0.6811516",
"0.68110925",
"0.68053275",
"0.6760293",
"0.6742512",
"0.6741626",
"0.67388684",
"0.6738457",
"0.6731657",
"0.6728793"
]
| 0.775621 | 0 |
Reads in a FASTA file and returns a list of only the sequences | def fasta_seqs(file_name):
list = []
with open('../test_files/' + file_name, 'r') as infile:
text = infile.read()
seqs = text.split('>')
for seq in seqs:
try:
x = seq.split('\n', 1)
# sequence will be stored in x[1], and i am removing nextline '\n' characters that comes with it.
list.append(x[1].replace('\n', ''))
except:
pass
return list | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def readFastaFile(filename):\n if os.path.exists(filename)==False:return {}\n sequences={}\n fhr=open(filename,\"r\")\n for line in fhr:\n if line[0]==\">\":\n sequences[line.strip()[1:].split()[0]]=fhr.readline().strip()\n fhr.close()\n return sequences",
"def ReadFASTA(fastafile):\n lines = open(fastafile).readlines()\n headers_seqs = []\n header = None\n seq = []\n for line in lines:\n if line[0] == '>':\n if (not header) and (not seq):\n pass # first sequence in file\n elif header and not seq:\n raise ValueError, \"Empty sequence for %s\" % header\n elif seq and not header:\n raise ValueError, \"File does not begin with header.\"\n else:\n seq = ''.join(seq)\n seq = seq.replace(' ', '')\n headers_seqs.append((header, seq))\n header = line.strip()[1 : ]\n seq = []\n else:\n seq.append(line.strip())\n if (not header) and (not seq):\n pass # first sequence in file\n elif header and not seq:\n raise ValueError, \"Empty sequence for %s\" % header\n elif seq and not header:\n raise ValueError, \"File does not begin with header.\"\n else:\n seq = ''.join(seq)\n seq = seq.replace(' ', '')\n headers_seqs.append((header, seq))\n return headers_seqs",
"def readFasta(self, fastaFile):\t\n\t\tname, seq = None, []\n\t\tfor line in fastaFile:\n\t\t\tline = line.rstrip()\n\t\t\tif (line.startswith(\">\")):\n\t\t\t\tif name: yield (name, ''.join(seq))\n\t\t\t\tname, seq = line, []\n\t\t\telse:\n\t\t\t\tseq.append(line)\n\t\tif name: yield (name, ''.join(seq))",
"def fasta_reader(inp):\n #inp is hard coded as \"Sequence1/2.fasta in this script\".\n with open(inp) as in_file: \n for line in in_file.readlines():\n #Guarantees sequence is pulled from the FASTA file not the title \n if line[0].isalpha():\n seq = line.rstrip()\n return (seq)",
"def get_sequence_from_fasta(fasta_file):\n\n if not fasta_file:\n raise ValueError(\"Path cannot be empty.\")\n\n handle = open(fasta_file, 'r')\n try:\n sequence = handle.readlines()\n finally:\n handle.close()\n\n sequence = [line.strip() for line in sequence if not '>' in line]\n sequence = ''.join(sequence)\n\n return sequence",
"def readFASTA(filename, alpha = None, string_only = False):\n seqlist = []\n seqname = None\n seqinfo = None\n seqdata = []\n fh = open(filename)\n thisline = fh.readline()\n while (thisline):\n if (thisline[0] == '>'): # new sequence\n if (seqname): # take care of the data that is already in the buffer before processing the new sequence\n try:\n if (string_only):\n seqnew = ''.join(seqdata)\n else:\n seqnew = Sequence(seqdata, alpha, seqname, seqinfo)\n seqlist.append(seqnew)\n except RuntimeError as e:\n print(\"Warning: \"+seqname+\" is invalid (ignored): \", e, file=sys.stderr)\n seqinfo = thisline[1:-1] # everything on the defline is \"info\"\n seqname = seqinfo.split()[0] # up to first space\n seqdata = []\n else: # pull out the sequence data\n cleanline = thisline.split()\n for line in cleanline:\n seqdata.extend(tuple(line.strip('*'))) # sometimes a line ends with an asterisk in FASTA files\n thisline = fh.readline()\n\n if (seqname):\n try:\n if (string_only):\n seqnew = ''.join(seqdata)\n else:\n seqnew = Sequence(seqdata, alpha, seqname, seqinfo)\n seqlist.append(seqnew)\n except RuntimeError as e:\n print(\"Warning: \" + seqname + \" is invalid (ignored): \", e, file=sys.stderr)\n else:\n raise RuntimeError(\"No sequences on FASTA format found in this file\")\n fh.close()\n return seqlist",
"def read_file(path):\n with open(path, \"r\") as IN:\n file_seqs = [line.strip() for line in IN]\n return file_seqs",
"def read_fasta(amplicon_file, minseqlen):\n with gzip.open(amplicon_file) as file:\n sequences = file.readlines()\n seqs = \"\"\n for sequence in sequences:\n #print(\"sequence\")\n seq = sequence.replace(b\"\\n\", b\"\")\n seq = seq.decode('utf8')\n #print(seq)\n for character in seq:\n if character not in \"TGAC\":\n if len(seqs)>=minseqlen:\n yield seqs\n #print(seqs)\n seq = \"\"\n seqs = \"\"\n break\n seqs += seq\n #print(seqs)\n yield seqs",
"def read_fasta(name):\n assert os.path.exists(name), name + ' does not exist'\n sequence_lst = []\n header_lst = []\n header = \"\"\n sequence = \"\"\n with open(name, \"rt\") as f_in:\n for line in f_in:\n data = line.strip()\n # jump empty lines\n if not data:\n continue\n # store header and sequence when a new header\n # (i.e. sequence) is found\n if sequence and header and data.startswith(\">\"):\n header_lst.append(header)\n sequence_lst.append(sequence)\n # reset header and sequence\n header = \"\"\n sequence = \"\"\n # save header of sequence\n if data.startswith(\">\"):\n header = data[1:]\n # save sequence\n if \">\" not in data:\n sequence += data\n # save last sequence\n if header and sequence:\n header_lst.append(header)\n sequence_lst.append(sequence)\n # outputs\n assert len(header_lst) == len(sequence_lst), \\\n \"cannot read same number of headers and sequences\"\n print(\"read %d sequences in %s\" % (len(sequence_lst), name))\n if len(sequence_lst) == 0:\n print(\"WARNING: {} seems empty of sequence\".format(name))\n return header_lst, sequence_lst",
"def read_fasta(file_path=\"\"):\n\n line = \"\"\n\n try:\n fasta_handle = open(file_path,\"r\")\n except:\n raise IOError(\"Your input FASTA file is not right!\")\n\n # make sure the file is not empty\n while True:\n line = fasta_handle.readline()\n if line == \"\":\n return\n if line[0] == \">\":\n break\n\n # when the file is not empty, we try to load FASTA file\n while True:\n if line[0] != \">\":\n raise ValueError(\"Records in Fasta files should start with '>' character\")\n title = line[1:].rstrip()\n lines = []\n line = fasta_handle.readline()\n while True:\n if not line:\n break\n if line[0] == \">\":\n break\n lines.append(line.rstrip())\n line = fasta_handle.readline()\n\n yield title,\"\".join(lines).replace(\" \",\"\").replace(\"\\r\",\"\")\n\n if not line:\n return\n\n fasta_handle.close()\n assert False, \"Your input FASTA file have format problem.\"",
"def read_fasta(fp):\n name, seq = None, []\n for line in fp:\n line = line.rstrip()\n if line.startswith(\">\"):\n if name: yield (name, ''.join(seq))\n name, seq = line, []\n else:\n seq.append(line)\n if name: yield (name, ''.join(seq))",
"def load_fasta_sequences(fasta_file, return_keys=False):\n fasta = Fasta(fasta_file, as_raw=True, sequence_always_upper=True)\n seqs = [seq[:] for seq in fasta]\n if return_keys:\n keys = list(fasta.keys())\n fasta.close()\n if return_keys:\n return seqs, keys\n return seqs",
"def read_fasta_file(fasta):\n\n ptn_list = []\n fasta_content = open(fasta, \"r\")\n new_ptn = None\n for line in fasta_content:\n if \">sp\" in line or \">tr\" in line:\n if new_ptn != None:\n new_ptn[\"seq\"] = sequence\n ptn_list.append(new_ptn)\n tokens = line.split()\n new_ptn = {\"id\": tokens[0] }\n sequence = \"\"\n else:\n sequence += line[:-1]\n new_ptn[\"seq\"] = sequence\n ptn_list.append(new_ptn)\n\n return ptn_list",
"def read_fasta(filename):\n with open(filename, \"r\") as f:\n s = \"\"\n for l in f.readlines()[1:]:\n s += l.strip()\n return s",
"def FASTA_iterator(filename):\n fasta_file=open(filename, \"r\")\n id_fasta=\"\"\n seq_fasta=\"\"\n\n for line in fasta_file:\n if line.startswith(\">\"):\n if id_fasta == \"\":\n id_fasta=line.strip()\n continue\n fasta = id_fasta , seq_fasta\n yield fasta\n seq_fasta=\"\"\n id_fasta=line.strip()\n\n else:\n seq_fasta += line.strip()\n\n if seq_fasta != \"\":\n yield id_fasta, seq_fasta",
"def parse_fasta(self, filename):\n id = ''\n desc = ''\n tempseq = []\n try:\n seqfile = open(filename,'r')\n for line in seqfile:\n if line.startswith('>'):\n if not id is '':\n yield { 'id': id.strip(), 'desc': desc.strip(), 'dna': ''.join(tempseq) }\n if ' ' in line:\n (id, desc) = line[1::].split(' ', 1)\n else:\n id = line[1::].strip()\n desc = ''\n tempseq = []\n elif not line.startswith('>'):\n tempseq.append(line.rstrip())\n if not id is '':\n yield { 'id': id.strip(), 'desc': desc.strip(), 'dna': ''.join(tempseq) }\n except OSError:\n raise PathError(''.join(['ERROR: cannot open', refseqpath]))",
"def read_fasta_sequences_to_str(filename):\n with open(filename) as f:\n lines = [line.strip() for line in f.readlines()]\n sequences = []\n text = ''\n\n for line in lines:\n if line[0] == '>':\n if len(text) > 0:\n sequences.append(text)\n text = ''\n else:\n if len(line):\n text += line\n if len(text) > 0:\n sequences.append(text)\n\n return sequences",
"def get_sequences_from_fasta_file(file, include_reverse_complement= True):\n if os.path.isfile(file):\n try:\n record_dict = SeqIO.to_dict(SeqIO.parse(file, \"fasta\"))\n logger.info(datetime.datetime.now().strftime(\"%H:%M:%S\") +\n \" Read the input file: \" + file)\n if len(record_dict) == 0:\n logger.error(clock_now() +\n \"Read : no sequence input in file: \" + file)\n raise RuntimeError(\"Read : no sequence input in file: \" + file)\n logger.info(datetime.datetime.now().strftime(\"%H:%M:%S\") +\n \" A number of \" + str(len(record_dict.keys())) +\n \" records read in.\")\n return record_dict\n except IOError as err:\n print(err.args)\n print(\"Reading fasta file failed. Please check file :\" + file)\n logger.error(clock_now() + \" FileRead: \" + err.message)\n raise err\n\n else:\n raise RuntimeError(\"The provided file not exist :\" + file)",
"def read_fasta(fasta_name):\n \n \"\"\"first open the file outside \"\"\"\n file_handler = open(fasta_name)\n\n # ditch the boolean (x[0]) and just keep the header or sequence since\n # we know they alternate.\n fasta_iter = (x[1] for x in groupby(file_handler, lambda line: line[0] == \">\"))\n\n for header in fasta_iter:\n # drop the \">\"\n headerStr = header.__next__()[1:].strip()\n\n # join all sequence lines to one.\n seq = \"\".join(s.strip() for s in fasta_iter.__next__())\n\n # yield (headerStr, seq)\n result_record = {'header':headerStr,'seqRecord':seq}\n return result_record",
"def read(fin, alphabet=None): \n seqs = [ s for s in iterseq(fin, alphabet)]\n return SeqList(seqs)",
"def parse_FASTA(file):\r\n\tstate = 0\r\n\tdna_list = []\r\n\tfor line in file:\r\n\t\tline = line.strip()\r\n\t\tif state == 0:\r\n\t\t\tif line[0] == '>':\r\n\t\t\t\tadd_new_DNA(dna_list, line)\r\n\t\t\t\tstate = 1\r\n\t\t\telif line == '':\r\n\t\t\t\tcontinue\r\n\t\t\telse:\r\n\t\t\t\traise Exception()\r\n\t\telif state == 1:\r\n\t\t\tadd_line_to_DNA(dna_list[-1], line)\r\n\t\t\tstate = 2\r\n\t\telif state == 2:\r\n\t\t\tif line[0] == '>':\r\n\t\t\t\tadd_new_DNA(dna_list, line)\r\n\t\t\t\tstate = 1\r\n\t\t\telse:\r\n\t\t\t\tadd_line_to_DNA(dna_list[-1], line)\r\n\t\telse:\r\n\t\t\traise Exception()\r\n\tfile.seek(0)\r\n\treturn dna_list",
"def fasta_path_to_seqs(fasta_path, codon_table=False, codon_alphabet=False):\n seqs = []\n for record in SeqIO.parse(fasta_path, \"fasta\"):\n seqs.append(record)\n return seqs",
"def read_fasta_file(filename):\n sequences_lines = {}\n current_sequence_lines = None\n with open(filename) as fp:\n for line in fp:\n line = line.strip()\n if line.startswith(';') or not line:\n continue\n if line.startswith('>'):\n sequence_name = line.lstrip('>')\n current_sequence_lines = []\n sequences_lines[sequence_name] = current_sequence_lines\n else:\n if current_sequence_lines is not None:\n current_sequence_lines.append(line)\n sequences = {}\n for name, lines in sequences_lines.items():\n sequences[name] = ''.join(lines)\n return sequences",
"def read_fasta(src, remove_gaps=False):\n file_obj = None\n if isinstance(src, str):\n try:\n file_obj = open(src, \"r\")\n except IOError:\n print((\"The file `%s` does not exist, exiting gracefully\" % src))\n elif isinstance(src, filetypes):\n file_obj = src\n else:\n raise TypeError('FASTA reader cannot recognize the source of %s, %s, %s' % (src,type(src),isinstance(src, filetypes)))\n name = None\n seq_list = list()\n for line_number, i in enumerate(file_obj):\n if i.startswith('>'):\n if name:\n if remove_gaps:\n yield name, ''.join(seq_list).replace('-', '')\n else:\n yield name, ''.join(seq_list)\n seq_list = list()\n name = i[1:].strip()\n else:\n #seq = ''.join(i.strip().upper().split())\n seq = ''.join(i.strip().split())\n #if not is_sequence_legal(seq):\n # raise Exception(\"Error: illegal characeters in sequence at line %d\" % line_number)\n seq_list.append(seq)\n if name:\n if remove_gaps:\n yield name, ''.join(seq_list).replace('-', '')\n else:\n yield name, ''.join(seq_list)\n if isinstance(src, str):\n file_obj.close()",
"def read_fasta_file(filename):\n sequences_lines = {}\n current_sequence_lines = None\n with open(filename) as fp:\n for line in fp:\n line = line.strip()\n if line.startswith(';') or not line:\n continue\n if line.startswith('>'):\n sequence_name = line.lstrip('>')\n current_sequence_lines = []\n sequences_lines[sequence_name] = current_sequence_lines\n else:\n if current_sequence_lines is not None:\n current_sequence_lines.append(line)\n sequences = {}\n for name, lines in sequences_lines.items():\n sequences[name] = ''.join(lines)\n return sequences",
"def get_fasta_sequence_ids(fasta):\n if not os.path.exists(fasta) or (not os.path.isfile(fasta)):\n raise FileNotFoundError(fasta)\n seq_ids = set()\n with open(fasta, \"r\") as f:\n # 'fasta' is https://biopython.org/wiki/SeqIO file type.\n for record in SeqIO.parse(f, \"fasta\"):\n seq_ids.add(record.id)\n return seq_ids",
"def parse_fasta_use_bio(file_name):\n pro_id_list = []\n for seq_record in SeqIO.parse(file_name, \"fasta\"):\n tmp_list = seq_record.id.strip('\\n').split('|')\n pro_id_list.append(tmp_list[1])\n # break\n\n return pro_id_list",
"def fasta_reader(fasta):\n # ditch the boolean (x[0]) and just keep the header/seq grouping\n fa_iter = (x[1] for x in itertools.groupby(fasta, lambda line: line[0] == \">\"))\n for header in fa_iter:\n # drop the \">\"\n name = next(header)[1:].strip()\n # join all sequence lines to one by iterating until the next group.\n read = \"\".join(s.strip() for s in next(fa_iter))\n yield name, read",
"def fasta(path):\n label = None\n sequence = None\n with open(path, 'r') as data:\n for line in data:\n line = line.strip()\n if line.startswith('>'):\n if label and sequence:\n yield (label, sequence)\n label = line[1:]\n sequence = \"\"\n else:\n sequence += line\n\n if label and sequence:\n yield (label, sequence)",
"def read_fasta(sequence_file :str):\n\n #for gziped files:\n\n if sequence_file.endswith(\".gz\"):\n with gzip.open(sequence_file, \"rt\") as file:\n seqDict = SeqIO.to_dict(SeqIO.parse(file, 'fasta'))\n ident = ident.split(\"|\")[1]\n return seqDict\n\n # for no gziped fasta files:\n else:\n seqRecord = SeqIO.read(sequence_file, \"fasta\")\n sequence = seqRecord.seq\n ident = seqRecord.id\n ident = ident.split(\"|\")[1]\n return ident, sequence"
]
| [
"0.78360283",
"0.77743053",
"0.7728829",
"0.7471323",
"0.74618864",
"0.7347454",
"0.73288727",
"0.73279846",
"0.73026425",
"0.72431064",
"0.7187846",
"0.7174564",
"0.71720695",
"0.71589667",
"0.714582",
"0.7135067",
"0.71167755",
"0.7116155",
"0.710813",
"0.7086274",
"0.70719475",
"0.7064045",
"0.706056",
"0.7044668",
"0.70296645",
"0.7013325",
"0.6990762",
"0.6985268",
"0.69847053",
"0.69846165"
]
| 0.7978697 | 0 |
Reads in a FASTA file and returns a list of only the headers (Lines that start with ">") | def fasta_headers(file_name):
list = []
with open('../test_files/' + file_name, 'r') as infile:
text = infile.read()
seqs = text.split('>')
for seq in seqs:
try:
x = seq.split('\n', 1)
if x[0] != '':
#x[0] contains only headers
list.append(x[0])
except:
pass
return list | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ReadFASTA(fastafile):\n lines = open(fastafile).readlines()\n headers_seqs = []\n header = None\n seq = []\n for line in lines:\n if line[0] == '>':\n if (not header) and (not seq):\n pass # first sequence in file\n elif header and not seq:\n raise ValueError, \"Empty sequence for %s\" % header\n elif seq and not header:\n raise ValueError, \"File does not begin with header.\"\n else:\n seq = ''.join(seq)\n seq = seq.replace(' ', '')\n headers_seqs.append((header, seq))\n header = line.strip()[1 : ]\n seq = []\n else:\n seq.append(line.strip())\n if (not header) and (not seq):\n pass # first sequence in file\n elif header and not seq:\n raise ValueError, \"Empty sequence for %s\" % header\n elif seq and not header:\n raise ValueError, \"File does not begin with header.\"\n else:\n seq = ''.join(seq)\n seq = seq.replace(' ', '')\n headers_seqs.append((header, seq))\n return headers_seqs",
"def test_fasta_get_headers(self):\r\n\r\n header_records = mfau.get_record_headers(full_file_name)\r\n\r\n if debug:\r\n for header_record in header_records:\r\n print header_record.strip()\r\n\r\n self.assertGreaterEqual(len(header_records), 0)",
"def readFastaFile(filename):\n if os.path.exists(filename)==False:return {}\n sequences={}\n fhr=open(filename,\"r\")\n for line in fhr:\n if line[0]==\">\":\n sequences[line.strip()[1:].split()[0]]=fhr.readline().strip()\n fhr.close()\n return sequences",
"def entrez_fasta_parser(handleFasta):\n fullList = handleFasta.read().split(\"\\n\") \n resL = []\n seqFlag = False\n for fullLine in fullList:\n if fullLine == \"\":\n seqFlag = False\n continue\n elif fullLine[0] == \">\":\n resL.append(fullLine + \"\\n\")\n seqFlag = True\n elif seqFlag:\n resL[-1] += fullLine \n return resL",
"def fasta_reader(fasta):\n # ditch the boolean (x[0]) and just keep the header/seq grouping\n fa_iter = (x[1] for x in itertools.groupby(fasta, lambda line: line[0] == \">\"))\n for header in fa_iter:\n # drop the \">\"\n name = next(header)[1:].strip()\n # join all sequence lines to one by iterating until the next group.\n read = \"\".join(s.strip() for s in next(fa_iter))\n yield name, read",
"def read_fasta(fp):\n name, seq = None, []\n for line in fp:\n line = line.rstrip()\n if line.startswith(\">\"):\n if name: yield (name, ''.join(seq))\n name, seq = line, []\n else:\n seq.append(line)\n if name: yield (name, ''.join(seq))",
"def readFastaFile(filename):\n info={}\n fhr=open(filename,\"r\")\n while(True):\n line=fhr.readline()\n if not line: break\n if(\">\" in line):\n try:\n info[line.strip()[1:].split()[0]]=fhr.readline().strip()\n except ValueError:\n pass\n return info",
"def read_fasta(fasta_name):\n \n \"\"\"first open the file outside \"\"\"\n file_handler = open(fasta_name)\n\n # ditch the boolean (x[0]) and just keep the header or sequence since\n # we know they alternate.\n fasta_iter = (x[1] for x in groupby(file_handler, lambda line: line[0] == \">\"))\n\n for header in fasta_iter:\n # drop the \">\"\n headerStr = header.__next__()[1:].strip()\n\n # join all sequence lines to one.\n seq = \"\".join(s.strip() for s in fasta_iter.__next__())\n\n # yield (headerStr, seq)\n result_record = {'header':headerStr,'seqRecord':seq}\n return result_record",
"def read_fasta(name):\n assert os.path.exists(name), name + ' does not exist'\n sequence_lst = []\n header_lst = []\n header = \"\"\n sequence = \"\"\n with open(name, \"rt\") as f_in:\n for line in f_in:\n data = line.strip()\n # jump empty lines\n if not data:\n continue\n # store header and sequence when a new header\n # (i.e. sequence) is found\n if sequence and header and data.startswith(\">\"):\n header_lst.append(header)\n sequence_lst.append(sequence)\n # reset header and sequence\n header = \"\"\n sequence = \"\"\n # save header of sequence\n if data.startswith(\">\"):\n header = data[1:]\n # save sequence\n if \">\" not in data:\n sequence += data\n # save last sequence\n if header and sequence:\n header_lst.append(header)\n sequence_lst.append(sequence)\n # outputs\n assert len(header_lst) == len(sequence_lst), \\\n \"cannot read same number of headers and sequences\"\n print(\"read %d sequences in %s\" % (len(sequence_lst), name))\n if len(sequence_lst) == 0:\n print(\"WARNING: {} seems empty of sequence\".format(name))\n return header_lst, sequence_lst",
"def readFasta(self, fastaFile):\t\n\t\tname, seq = None, []\n\t\tfor line in fastaFile:\n\t\t\tline = line.rstrip()\n\t\t\tif (line.startswith(\">\")):\n\t\t\t\tif name: yield (name, ''.join(seq))\n\t\t\t\tname, seq = line, []\n\t\t\telse:\n\t\t\t\tseq.append(line)\n\t\tif name: yield (name, ''.join(seq))",
"def _readheaderlines(f):\n hdrlines = []\n for i in range(0,26):\n hdrlines.append(f.readline())\n return hdrlines",
"def readFasta(self, fp):\n\t\t\n\t\tfor head, seq in self.parseFasta(fp):\n\t\t\t#analyzing the sequence\n\t\t\tself.analyzeSequence(seq)\n\t\t\t#saving the header\n\t\t\tif head == '':\n\t\t\t\tcontinue\n\t\t\telse:\t\n\t\t\t\tself.header.append(head)",
"def read_fasta(file_path=\"\"):\n\n line = \"\"\n\n try:\n fasta_handle = open(file_path,\"r\")\n except:\n raise IOError(\"Your input FASTA file is not right!\")\n\n # make sure the file is not empty\n while True:\n line = fasta_handle.readline()\n if line == \"\":\n return\n if line[0] == \">\":\n break\n\n # when the file is not empty, we try to load FASTA file\n while True:\n if line[0] != \">\":\n raise ValueError(\"Records in Fasta files should start with '>' character\")\n title = line[1:].rstrip()\n lines = []\n line = fasta_handle.readline()\n while True:\n if not line:\n break\n if line[0] == \">\":\n break\n lines.append(line.rstrip())\n line = fasta_handle.readline()\n\n yield title,\"\".join(lines).replace(\" \",\"\").replace(\"\\r\",\"\")\n\n if not line:\n return\n\n fasta_handle.close()\n assert False, \"Your input FASTA file have format problem.\"",
"def find_headers(file):\n headers = []\n with open(file, 'r') as f:\n for line in f:\n if line[0] == '#':\n if line[-1] == '\\n':\n headers.append(line[1:-1].strip())\n else:\n headers.append(line[1:].strip())\n return headers",
"def read_fasta(filename, keep_formatting=True):\n\n with open(filename) as fasta:\n line = fasta.readline().rstrip()\n if not line.startswith(\">\"):\n raise IOError(\"Not FASTA format? First line didn't start with '>'\")\n if keep_formatting:\n sep = \"\\n\"\n else:\n sep = \"\"\n first = True\n seq = []\n header = \"\"\n while fasta:\n if line == \"\": #EOF\n yield header, sep.join(seq)\n break\n elif line.startswith(\">\") and not first:\n yield header, sep.join(seq)\n header = line.rstrip()[1:]\n seq = []\n elif line.startswith(\">\") and first:\n header = line.rstrip()[1:]\n first = False\n else:\n seq.append(line.rstrip())\n line = fasta.readline()",
"def read_fasta(filename, keep_formatting=True):\n\n with open(filename) as fasta:\n line = fasta.readline().rstrip()\n if not line.startswith(\">\"):\n raise IOError(\"Not FASTA format? First line didn't start with '>'\")\n if keep_formatting:\n sep = \"\\n\"\n else:\n sep = \"\"\n first = True\n seq = []\n header = \"\"\n while fasta:\n if line == \"\": #EOF\n yield header, sep.join(seq)\n break\n elif line.startswith(\">\") and not first:\n yield header, sep.join(seq)\n header = line.rstrip()[1:]\n seq = []\n elif line.startswith(\">\") and first:\n header = line.rstrip()[1:]\n first = False\n else:\n seq.append(line.rstrip())\n line = fasta.readline()",
"def get_header_and_sequence_lists(fh_in):\n temp = ''\n isSeq = isSecStr = False\n sequence_header = secstr_header = sequence = secstr = []\n for line in fh_in:\n if (isSeq | isSecStr) & (line[0] != \">\"):\n temp += line.strip()\n elif line[0] == \">\" & line.strip().endswith(\"sequence\"):\n sequence_header.append(line.strip())\n isSeq = True\n isSecStr = False\n if temp != '':\n secstr.append(temp)\n temp = ''\n elif line[0] == \">\" & line.strip().endswith(\"secstr\"):\n secstr_header.append(element.strip())\n isSeq = False\n isSecStr = True\n if temp != '':\n secstr.append(temp)\n data = ''\n if isSeq & temp != '':\n sequence.append(temp)\n else:\n secstr.append(temp)\n _check_size_of_lists(sequence_header, secstr_header)\n return sequence_header, sequence, secstr_header, secstr",
"def fasta_reader(inp):\n #inp is hard coded as \"Sequence1/2.fasta in this script\".\n with open(inp) as in_file: \n for line in in_file.readlines():\n #Guarantees sequence is pulled from the FASTA file not the title \n if line[0].isalpha():\n seq = line.rstrip()\n return (seq)",
"def read_header(datafile):\n\thead = []\n\tf = open(datafile,'r')\n\tfor i,line in enumerate(f):\n\t\tif i is 10: break\n\t\thead += [line]\n\tf.close()\n\treturn head",
"def read_fasta(self, handle):\n read = \"\"\n for line in handle:\n if line[0] == \">\":\n if len(read):\n self.add_read(read)\n read = \"\"\n else:\n read += line.strip()\n self.add_read(read)",
"def fasta_seqs(file_name):\n list = []\n with open('../test_files/' + file_name, 'r') as infile:\n text = infile.read()\n seqs = text.split('>')\n for seq in seqs:\n try:\n x = seq.split('\\n', 1)\n # sequence will be stored in x[1], and i am removing nextline '\\n' characters that comes with it.\n list.append(x[1].replace('\\n', ''))\n except:\n pass\n return list",
"def parse_fasta(infile, upper=False):\n try:\n fp = must_open(infile)\n except:\n fp = infile\n # keep header\n fa_iter = (x[1] for x in groupby(fp, lambda row: row[0] == \">\"))\n for header in fa_iter:\n header = next(header)\n if header[0] != \">\":\n continue\n # drop '>'\n header = header.strip()[1:]\n # stitch the sequence lines together and make into upper case\n seq = \"\".join(s.strip() for s in next(fa_iter))\n if upper:\n seq = seq.upper()\n yield header, seq",
"def parse_pdb_header(infile):\n header = []\n with File.as_handle(infile) as f:\n for l in f:\n record_type = l[0:6]\n if record_type in (\"ATOM \", \"HETATM\", \"MODEL \"):\n break\n else:\n header.append(l)\n return _parse_pdb_header_list(header)",
"def test_is_fasta_header(self):\r\n\r\n is_fasta_header = False\r\n\r\n with open(full_file_name, \"r\") as in_file:\r\n for line in in_file:\r\n is_fasta_header = mfau.is_header_line(line)\r\n\r\n # only testing the first line\r\n break\r\n\r\n self.assertEqual(is_fasta_header, True)",
"def BuildHeadList(all_file_contents):\n head_list = []\n list_all_file_contents = (all_file_contents)\n for line in list_all_file_contents: \n if line[0:4] != 'ATOM':\n head_list.append(line)\n else:\n break\n\n return head_list",
"def parseFasta(fh):\n\n record_seq = []\n record_id = None\n\n for line in fh:\n line = line.strip(\"\\n\")\n\n if line.startswith(\">\"):\n\n if record_seq:\n yield Record(record_id, \"\".join(record_seq))\n\n record_id = line[1:].split()[0]\n record_seq = []\n else:\n record_seq.append(line.replace(\"*\", \"-\"))\n\n if record_seq:\n yield Record(record_id, \"\".join(record_seq))",
"def _parse_fastq(f):\n header = ''\n seq = ''\n skip = False\n for line in f:\n if skip:\n skip = False\n continue\n line = line.strip()\n if line == '':\n continue\n if line[0] == '@':\n header = line.replace('@', '')\n elif line[0] == '+':\n yield header, seq\n skip = True\n else:\n seq = line.upper()",
"def read_fasta(filename):\n with open(filename, \"r\") as f:\n s = \"\"\n for l in f.readlines()[1:]:\n s += l.strip()\n return s",
"def read_header(tgp_vcf):\n reader = pd.read_csv(tgp_vcf, compression=\"gzip\", iterator=True, header=None)\n loop = True\n while loop:\n header = reader.get_chunk(1).ix[0, 0]\n if header.startswith(\"#CHROM\"):\n loop = False\n return(header.lstrip(\"#\").split(\"\\t\"))",
"def read_file(file_name):\n with open(file_name) as f:\n content = f.readlines()\n names = []\n dnas = []\n dna = \"\"\n name = \"\"\n for line in content:\n line = line.strip()\n if line[0] == \">\":\n names.append(name)\n dnas.append(dna)\n name = line[1:]\n dna = \"\"\n else:\n dna += line\n names.append(name)\n dnas.append(dna)\n\n return (names[1:], dnas[1:])"
]
| [
"0.76121736",
"0.6860627",
"0.67152005",
"0.6706671",
"0.6701969",
"0.6697289",
"0.6667222",
"0.6633265",
"0.65563095",
"0.65458727",
"0.65280664",
"0.6475872",
"0.64492995",
"0.64413077",
"0.6364521",
"0.6364521",
"0.63030875",
"0.6302384",
"0.62837064",
"0.61853427",
"0.61853105",
"0.6154881",
"0.61546695",
"0.6151387",
"0.61373997",
"0.61348134",
"0.6124583",
"0.61158216",
"0.60825235",
"0.60795367"
]
| 0.8069983 | 0 |
Reads in a FASTQ file and writes it to a new FASTA file. This definition should also keep the same file name and change the extension to from .fastq to .fasta if new_name is not specified. | def fastq_to_fasta(file_name, new_name=None):
if(file_name.endswith('.fastq')):
with open('../test_files/' + file_name, 'r') as infile:
text = infile.read()
if new_name == None:
f = open('../test_files/'+file_name.split('.')[0]+'.fasta','w+')
print('New file created : '+file_name.split('.')[0]+'.fasta')
else:
f = open('../test_files/' + new_name + '.fasta', 'w+')
print('New file created : ' + new_name + '.fasta')
f.write(text)
f.close()
return | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fastq_to_fasta(input_file, wanted_set):\n file_name = os.path.splitext(os.path.basename(input_file))[0]\n with open(file_name + \"_filtered.fasta\", \"w\") as out:\n for record in SeqIO.parse(input_file, \"fastq\"):\n ID = str(record.id)\n SEQ = str(record.seq)\n if ID in wanted_set:\n out.write(\">\" + ID + \"\\n\" + SEQ + \"\\n\")",
"def __return_new_file_name(self, file_name: str, file_path: str):\n\n fastq_runid = re.split('[_.]', file_name) # split on `_` or `.`\n barcode_number = file_path.split(\"/\")[-1] # get the barcode number\n fastq_or_fasta = fastq_runid[-1] # get the .fastq/.fasta file extension\n\n # create the new file name\n new_file_name = \"_\".join(fastq_runid[:3]) # join first three elements\n new_file_name += \"_%s.%s\" % (barcode_number, fastq_or_fasta) # append the barcode number and file extension\n\n return new_file_name",
"def control_fastq_filename(demux_folder):\n pattern=re.compile(\"^(P[0-9]+)-([0-9]{3,4}).+fastq.*$\")\n for root, dirs, files in os.walk(demux_folder):\n for f in files:\n matches=pattern.search(f)\n if matches:\n new_name=f.replace(\"{}-{}\".format(matches.group(1), matches.group(2)), \"{}_{}\".format(matches.group(1), matches.group(2)))\n os.rename(os.path.join(root, f), os.path.join(root, new_name))",
"def fastaRenameRemoveNewlines(filename, usebaseEncode = False, minlength = 0):\n fo = open(filename,\"r\")\n fout = open(filename+\"_out\", \"w\")\n aline = fo.readline()\n faSeq = fo.readline()[:-1]\n readnum = 0\n if usebaseEncode == False:\n base = 10\n else:\n base = 36\n while aline:\n if aline[0] == \">\":\n if faSeq != \"\":\n readnum += 1\n if len(faSeq) >= minlength:\n fout.write(\">\"+baseEncode(readnum, base)+\"\\n\"+faSeq+\"\\n\")\n faSeq = \"\"\n else:\n faSeq = faSeq+aline[:-1]\n aline = fo.readline()\n fout.write(\">\"+baseEncode(readnum, base)+\"\\n\"+faSeq+\"\\n\")\n fout.close()\n fo.close()",
"def test_full_fasta_full_fastq(self):\r\n convert_fastq(self.fasta_file_path, self.qual_file_path,\r\n full_fasta_headers=True, full_fastq=True,\r\n output_directory=self.output_dir)\r\n\r\n actual_output_file_path = get_filename_with_new_ext(\r\n self.fasta_file_path,\r\n '.fastq',\r\n self.output_dir)\r\n\r\n actual_output_file = open(actual_output_file_path)\r\n actual_output = actual_output_file.read()\r\n actual_output_file.close()\r\n self._files_to_remove.append(actual_output_file_path)\r\n\r\n self.assertEquals(actual_output, expected_fastq_full_fasta_full_fastq)",
"def create_final_name(fname, date, fc_id, sample_name):\n \n # Split the file name according to CASAVA convention\n m = re.match(r'(\\S+?)_(?:[ACGTN\\-]+|NoIndex|Undetermined)_L0*(\\d+)_R(\\d)_\\d+\\.fastq(.*)', fname)\n if m is not None:\n lane = m.group(2)\n read = m.group(3)\n ext = m.group(4)\n else:\n # Split the file name according to bcbb convention\n m = re.match(r'(\\d+)_(\\d+)_([^_]+)_(\\d+)_(?:nophix_)?(\\d+)_fastq.txt(.*)', fname)\n if m is None:\n raise ValueError(\"Could not parse file name {:s} correctly!\".format(fname))\n lane = m.group(1)\n read = m.group(5)\n ext = m.group(6)\n \n dest_file_name = \"{:s}.fastq{:s}\".format(\"_\".join([lane,\n date,\n fc_id,\n sample_name,\n read]),\n ext.replace('..','.'))\n return dest_file_name",
"def convertFastqToFasta(inputFastq, outputFasta):\n out = open(outputFasta, \"w\")\n for (titleStr, seqStr, qualityStr) in FastqIterator(inputFastq):\n out.write(\">%s\\n%s\\n\" % (titleStr, seqStr))",
"def test_write_seqs_to_fasta(self):\r\n fd, output_fp = mkstemp(\r\n prefix=\"qiime_util_write_seqs_to_fasta_test\",\r\n suffix='.fasta')\r\n close(fd)\r\n self.files_to_remove.append(output_fp)\r\n seqs = [('s1', 'ACCGGTTGG'), ('s2', 'CCTTGG'),\r\n ('S4 some comment string', 'A')]\r\n exp = \">s1\\nACCGGTTGG\\n>s2\\nCCTTGG\\n>S4 some comment string\\nA\\n\"\r\n # works in write mode\r\n write_seqs_to_fasta(output_fp, seqs, 'w')\r\n self.assertEqual(open(output_fp).read(), exp)\r\n # calling again in write mode overwrites original file\r\n write_seqs_to_fasta(output_fp, seqs, 'w')\r\n self.assertEqual(open(output_fp).read(), exp)\r\n # works in append mode\r\n exp2 = exp + exp\r\n write_seqs_to_fasta(output_fp, seqs, 'a')\r\n self.assertEqual(open(output_fp).read(), exp2)",
"def fasta_header(path, new_path):\n with open(path, 'r') as f_in:\n with open(new_path, 'w+') as f_out:\n records = SeqIO.parse(f_in, 'fasta')\n for record in records:\n record.id = record.id.split(\" \")[0]\n record.description = record.id.split(\" \")[0]\n SeqIO.write(record, f_out, 'fasta')\n return new_path",
"def rename_records(f, fh, i):\n from Bio import SeqIO\n import gzip as gz\n for record in SeqIO.parse(gz.open(f, 'rt'), 'fastq'):\n record.id = \"{}_{}\".format(i, record.id)\n SeqIO.write(record, fh, \"fastq\")\n return fh",
"def RenameFile(self, oldname: str, newname: str) -> None:\n ...",
"def make_fastq_single(in_fasta, quals, out_fp,\r\n label_transform=split_lib_transform):\r\n outfile = open(out_fp, 'w')\r\n for rec, seq_id in iter_fastq(in_fasta, quals, label_transform):\r\n outfile.write(rec + '\\n')\r\n outfile.close()",
"def rename_sequences(self, new_fasta, mapping):\n assert isinstance(new_fasta, FASTA)\n new_fasta.create()\n for seq in self:\n new_name = mapping[seq.id]\n nucleotides = str(seq.seq)\n new_fasta.add_str(nucleotides, new_name)\n new_fasta.close()",
"def rename_file(fname):\n x,y = load_file(fname)\n date=y[0].split(\".\")\n if len(y[2])<20:\n title=y[2]\n else:\n title=y[2][0:20]\n title=title.replace(\" \",\"_\")\n \n new_name=\"{}{}{}{}.csv\".format(date[2],date[1],date[0],title)\n new_appendix=rename_appendix(y[10],new_name)\n os.rename(fname,new_name)\n replace_line(new_name,10,'Anhang;\"{}\"'.format(new_appendix))\n return new_name",
"def concatenate_fastq(path, isfastq, sample_name):\n \n r1 = []\n r2 = []\n filenames = get_filesnames_in_dir(path)\n \n for i in filenames:\n if \"fake_genome\" in i:\n continue\n elif \"R1\" in i:\n r1.append(i)\n elif \"R2\" in i:\n r2.append(i)\n if isfastq:\n nameR1 = sample_name + \"-R1.fastq\"\n nameR2 = sample_name + \"-R2.fastq\"\n else:\n nameR1 = sample_name + \"-R1.fasta\"\n nameR2 = sample_name + \"-R2.fasta\"\n\n #concatinate R1\n with open(path + nameR1, 'w') as outfile:\n for fname in sorted(r1):\n with open(path + fname) as infile:\n outfile.write(infile.read())\n outfile.write(\"\\n\")\n\n #concatinate R2\n with open(path + nameR2, 'w') as outfile:\n for fname in sorted(r2):\n with open(path + fname) as infile:\n outfile.write(infile.read())\n outfile.write(\"\\n\")\n\n \n for i in r1 + r2:\n os.remove(path + i)",
"def fastq(args):\n from jcvi.formats.fastq import FastqLite\n\n p = OptionParser(fastq.__doc__)\n p.add_option(\"--qv\", type=\"int\", help=\"Use generic qv value\")\n\n opts, args = p.parse_args(args)\n\n if len(args) != 1:\n sys.exit(not p.print_help())\n\n (fastafile,) = args\n fastqfile = fastafile.rsplit(\".\", 1)[0] + \".fastq\"\n fastqhandle = open(fastqfile, \"w\")\n num_records = 0\n\n if opts.qv is not None:\n qv = chr(ord(\"!\") + opts.qv)\n logging.debug(\"QV char '{0}' ({1})\".format(qv, opts.qv))\n else:\n qv = None\n\n if qv:\n f = Fasta(fastafile, lazy=True)\n for name, rec in f.iteritems_ordered():\n r = FastqLite(\"@\" + name, str(rec.seq).upper(), qv * len(rec.seq))\n print(r, file=fastqhandle)\n num_records += 1\n\n else:\n qualfile = get_qual(fastafile)\n for rec in iter_fasta_qual(fastafile, qualfile):\n SeqIO.write([rec], fastqhandle, \"fastq\")\n num_records += 1\n\n fastqhandle.close()\n logging.debug(\"A total of %d records written to `%s`\" % (num_records, fastqfile))",
"def fast_Q2A(fastq_filepath):\n filein = open(fastq_filepath, \"r\")\n fileout = open(fastq_filepath[:-5] + \"fasta\", \"w\")\n found_id = 0\n num_of_seqs = 0\n for i in filein:\n if i[0] == \"@\":\n seq_id = \">\" + i[1:]\n found_id = 1\n num_of_seqs += 1\n continue\n if found_id == 1:\n seq = i\n found_id = 0\n fileout.write(seq_id + seq)\n filein.close()\n fileout.close()\n print num_of_seqs\n return os.path.abspath(fileout.name)",
"def base_rename(self, new_name):\n\n new_path = join(dirname(self.fspath), new_name)\n\n return self.rename(new_path)",
"def newfile(filename):\n # Open the new file for writing\n with open(filename, \"w\") as file:\n pass",
"def genSamName(fastq):\n return os.path.join(samFolder, os.path.splitext(fastq)[0] + \".sam\")\n # return os.path.join(samFolder, ntpath.split(fastq)[1].replace(\".fastq\", \".sam\"))",
"def write_SEQRES_fasta():\n \n import os\n choice = input('Enter the name of the file: ')\n filepath = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Data', choice)\n with open(filepath,'r') as file:\n seq_list = []\n for line in file:\n if line[:6] == 'SEQRES':\n line_split = line.split()[4:]\n seq_list.append(line_split)\n choice1 = input('Enter name of the outfile: ') \n filepath1 = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Results', choice1)\n with open(filepath1, 'w') as outfile:\n for i in seq_list:\n outfile.writelines(i)\n print('Sequences successfully written!')\n \n with open(choice, 'r') as myfile:\n header = ''\n for line in myfile:\n if line.startswith(\"TITLE\"): \n head_split = line.split()\n header = header + ' '.join(head_split[1:])\n \n choice2 = input('Enter output file name with a .fasta extension: ')\n filepath2 = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Results', choice2)\n z = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Results', choice1)\n with open(z, 'r') as file:\n with open(filepath2, 'w') as output:\n for i in file:\n output.writelines('>' + header + '\\n' + i)\n print('>' + header + '\\n' + i)\n print('Fasta file generated!')",
"def quality_matcher(fasta, full_fastq, filt_fastq, trunclen):\n with open(fasta, \"r\") as fasta, open(full_fastq, \"r\") as fastq, open(filt_fastq, \"w\") as new_fastq:\n #make lists of the fasta and fastq files, where every successive value is a successive line\n #purpose of -1: to avoid the \"\\n\" newline character at the end of the lines\n fastq_list = [line[:-1] for line in fastq]\n fasta_list = [line[:-1] for line in fasta]\n #iterate through the sequence ids in the fasta file\n for fasta_index, fasta_id in enumerate(fasta_list):\n if fasta_id[0] == \">\":\n #get the list index of the matching sequence id in the metagenomic fastq file\n fastq_index = fastq_list.index(\"@{}\".format(fasta_id[1:]))\n #print and write a new fastq entry with the quality scores string truncated to the same length as the sequence from the fasta file\n print(str(\"@{}\".format(fasta_id[1:])) + \"\\n\" + str(fasta_list[fasta_index+1]) + \"\\n\" + str(\"+{}\".format(fasta_id[1:])) + \"\\n\" + str(fastq_list[fastq_index+3][:int(trunclen)]))\n new_fastq.write(str(\"@{}\".format(fasta_id[1:])) + \"\\n\" + str(fasta_list[fasta_index+1]) + \"\\n\" + str(\"+{}\".format(fasta_id[1:])) + \"\\n\" + str(fastq_list[fastq_index+3][:int(trunclen)]))",
"def make_fasta(pair, filename, id):\n \n fname = filename + \"-R1.fasta\"\n with open(fname,\"w\") as r1:\n r1.write(\">\" + id + \"\\n\")\n r1.write(pair[0])\n r1.write(\"\\n\")\n \n fname = filename + \"-R2.fasta\"\n with open(fname,\"w\") as r2:\n r2.write(\">\" + id + \"\\n\")\n r2.write(pair[1])\n r2.write(\"\\n\")",
"def fastq_filename(fastq_base):\n return fastq_base+\"_1.fastq\", fastq_base+\"_2.fastq\"",
"def fix_fasta(database_names):\n for file in database_names:\n file_mod = file.replace(\".fasta\", \"_mod.fasta\")\n with open(file, 'r') as f:\n lines = f.readlines()\n new_lines = []\n for line in lines:\n if '|' in line and \">\" not in line:\n # we replace spaces in header line with \"__\"\n # so I can manipulate that later as biopython doesn't\n # like \"__\"\n new_line = \">\"+line.replace(\" \", \"__\")\n new_lines.append(new_line)\n else:\n new_lines.append(line)\n with open(file_mod, 'w') as f:\n for line in new_lines:\n f.write(line)",
"def save(self, filename, format_='fasta'):\n format_ = format_.lower()\n\n if isinstance(filename, str):\n try:\n with open(filename, 'w') as fp:\n for read in self:\n fp.write(read.toString(format_))\n except ValueError:\n unlink(filename)\n raise\n else:\n # We have a file-like object.\n for read in self:\n filename.write(read.toString(format_))\n return self",
"def info_to_fasta(infofile, fastafile, append, infoobj=None):\n if infoobj is None:\n infoobj = read_tsv_or_parquet(infofile)\n if append:\n filemode = \"a+\"\n else:\n filemode = \"w\"\n with Path(fastafile).open(filemode) as file_handle:\n fcntl.flock(file_handle, fcntl.LOCK_EX)\n logger.debug(f\"Writing to {fastafile} with mode {filemode}.\")\n seqs = infoobj[\"prot.seq\"].copy()\n del infoobj[\"prot.seq\"]\n for gene_id, row in infoobj.iterrows():\n file_handle.write(f\">{gene_id} {row.to_json()}\\n\")\n file_handle.write(f\"{seqs[gene_id]}\\n\")\n fcntl.flock(file_handle, fcntl.LOCK_UN)",
"def writeFasta(self):\n utils.log(\"writing {} bases to {} ...\".format(\n self.numBases, self.fastaFileName))\n with open(self.fastaFileName, 'w') as fastaFile:\n firstLine = \">{} Generated by generate_fasta.py\".format(\n self.referenceId)\n print(firstLine, file=fastaFile)\n basesPerLine = 70\n numLines = int(math.ceil(self.numBases / basesPerLine))\n baseChoices = ['A', 'G', 'C', 'T']\n basesRemaining = self.numBases\n for i in range(numLines):\n if basesRemaining < basesPerLine:\n basesToWrite = basesRemaining\n else:\n basesToWrite = basesPerLine\n bases = ''.join(\n [random.choice(baseChoices) for _ in range(basesToWrite)])\n line = \"{}\".format(bases)\n self.bases += line\n print(line, file=fastaFile)\n basesRemaining -= basesToWrite\n assert basesRemaining == 0",
"def test_fasta_naming(self):\n aln2fasta = hhsuite.AlignmentToFasta(q_name=\"foo\", t_name=\"{name}\")\n self.assertEqual(\n aln2fasta.fasta(self.hit, \"A-E----\"),\n \">foo\\nJKLMNOP\\n>templatename\\nA-E----\\n\")",
"def generate_fasta_single(seq_file, rfam_acc, out_dir):\n\n sequence = ''\n fp_out = None\n seq_bits = None\n\n # logging sequences not exported\n # rename this to family log\n log_file = os.path.join(out_dir, rfam_acc + \".log\")\n logging.basicConfig(\n filename=log_file, filemode='w', level=logging.INFO)\n\n # connect to db\n cnx = RfamDB.connect()\n\n # get a new buffered cursor\n cursor = cnx.cursor(raw=True)\n\n # fetch sequence accessions for specific family - significant only!!\n query = (\"SELECT fr.rfam_acc, fr.rfamseq_acc, fr.seq_start, fr.seq_end, rf.description\\n\"\n \"FROM full_region fr, rfamseq rf\\n\"\n \"WHERE fr.rfamseq_acc=rf.rfamseq_acc\\n\"\n \"AND fr.is_significant=1\\n\"\n \"AND fr.rfam_acc=\\'%s\\'\") % (rfam_acc)\n\n # execute the query\n cursor.execute(query)\n\n # open a new fasta output file\n fp_out = gzip.open(\n os.path.join(out_dir, str(rfam_acc) + \".fa.gz\"), 'w')\n\n for region in cursor:\n\n cmd = \"esl-sfetch -c %s/%s %s %s\" % (str(region[START]), str(region[END]),\n seq_file, str(region[SEQ_ACC]))\n\n proc = subprocess.Popen(\n cmd, shell=True, stdout=subprocess.PIPE)\n\n seq = proc.communicate()[0]\n\n # get sequence\n sequence = ''\n seq_bits = seq.split('\\n')[1:]\n sequence = sequence.join(seq_bits)\n\n # print sequence\n\n if sequence != '' and seq_validator(sequence) is True:\n # write header\n fp_out.write(\">%s/%s-%s %s\\n\" % (str(region[SEQ_ACC]),\n str(region[START]),\n str(region[END]),\n str(region[DESC])))\n\n # write sequence\n fp_out.write(sequence + '\\n')\n\n else:\n # logging sequences that have not been exported\n logging.info(str(region[SEQ_ACC]))\n\n # close last file\n fp_out.close()\n\n # disconnect from DB\n cursor.close()\n RfamDB.disconnect(cnx)"
]
| [
"0.5908012",
"0.57850957",
"0.5716593",
"0.56831974",
"0.56598663",
"0.56053895",
"0.5586926",
"0.5581481",
"0.5579382",
"0.5540243",
"0.55108505",
"0.55101264",
"0.54717064",
"0.5441375",
"0.54151505",
"0.5385739",
"0.53766024",
"0.5341403",
"0.5340518",
"0.5337158",
"0.5326581",
"0.5316529",
"0.5292253",
"0.529073",
"0.5232827",
"0.52192456",
"0.5196458",
"0.51867914",
"0.51760817",
"0.51707214"
]
| 0.848807 | 0 |
Generates a list of all 6 possible reading frames for a given strand of DNA | def reading_frames(dna):
#the 6 types are as follows
# the actual string and the reverse complement
list = []
#this is the actual string
list.append(dna)
#this is the reverse complement of the string
# done reusing the fastcomplement() method that was already available.
#others are done likewise
list.append(fast_complement(dna[::-1]))
list.append(dna[1:len(dna)])
list.append(fast_complement(dna[1:len(dna)][::-1]))
list.append(dna[2:len(dna)])
list.append(fast_complement(dna[2:len(dna)][::-1]))
return list | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find_all_ORFs(dna):\n\n readingFrame0 = ''\n readingFrame1 = ''\n readingFrame2 = ''\n output = ''\n for k in range(0,3): \n if k ==0: \n readingFrame0 = find_all_ORFs_oneframe(dna[k:])\n elif k ==1: \n readingFrame1 = find_all_ORFs_oneframe(dna[k:])\n elif k ==2:\n readingFrame2 = find_all_ORFs_oneframe(dna[k:])\n print 'Reading Frame 1: ' + str(readingFrame0) + ', Reading Frame 2: ' + str(readingFrame1) + ', Reading Frame 3: ' + str(readingFrame2)\n output = readingFrame0 + readingFrame1 + readingFrame2\n return output",
"def frame_strand(strand):\n logging.info(\"Framing strand: \" + strand)\n framed_strand = []\n rolling_frame = ['','','']\n frame_begin = -1\n for i in range(0, len(strand)):\n rolling_frame[0] = rolling_frame[1]\n rolling_frame[1] = rolling_frame[2]\n rolling_frame[2] = strand[i]\n if rolling_frame[0]+rolling_frame[1]+rolling_frame[2] == start_codon:\n # We have the frame at this point. Prune the beginning of the string:\n # We are at a pos+2 (c ccc atg ccc ccc c) so cut back 2 and then break\n # ^\n frame_begin = i - 2\n logging.debug(\"Found start codon, strand is now framed at \" + str(frame_begin) + \".\")\n break\n if frame_begin == -1:\n # In this case, there is no valid frame in the strand. Return an empty list.\n logging.info(\"No valid frame in strand.\")\n return [], 0\n pruned_strand = strand[frame_begin:]\n framed_strand = [pruned_strand[i:i+3] for i in range(0, len(pruned_strand), 3)] # Make triples\n logging.debug(\"Framed strand is: \" + str(framed_strand))\n if len(framed_strand[-1]) < 3:\n logging.info(\"Newly framed sequence terminates with non-codon: \" + framed_strand[-1])\n return framed_strand, frame_begin",
"def find_all_ORFs(dna):\n first_frame = find_all_ORFs_oneframe(dna[0:])\n second_frame = find_all_ORFs_oneframe(dna[1:])\n third_frame = find_all_ORFs_oneframe(dna[2:])\n list_of_frames = first_frame + second_frame + third_frame\n return list_of_frames",
"def find_all_ORFs_oneframe(dna):\n strand = []\n startCodon = [\"ATG\"]\n for i in range(0,len(dna),3):\n triple = dna[i:i+3]\n if triple in startCodon: \n strand.append(rest_of_ORF(dna[i:]))\n print strand\n return strand",
"def find_all_ORFs_oneframe(dna):",
"def find_all_ORFs(dna):\n #inital conditions\n i = 0\n start_codon = ('ATG')\n end_index = len(dna)\n return_variable = []\n\n #for statement tells the code to run three times at three inital index values\n #0,1,and 2 which allows for each reading frame to be read\n for i in range (3):\n while i < (len(dna)-2):\n codon = dna[i:i+3] \n if codon in start_codon: #check if the codon the code is looking at is a start codon\n orf = rest_of_ORF(dna[i:]) #then determine that this is an orf, make it a string given the index of the start codon\n return_variable.append(orf) #and append said string to a list of ORFs\n i += len(orf) \n i += 3\n #return a list of strings of DNA from both strands\n return return_variable",
"def rest_of_ORF(dna):\n readingFrame = ''\n endCodons = [\"TAG\", \"TAA\", \"TGA\"]\n for i in range(0,len(dna),3):\n triple = dna[i:i+3]\n if triple in endCodons:\n readingFrame = readingFrame\n break\n else: \n readingFrame+=triple \n return readingFrame",
"def find_all_ORFs_oneframe(dna):\n x = 3\n list_of_dna = []\n while (x < len(dna)):\n if dna[x-3:x] == 'ATG':\n current_dna = rest_of_ORF(dna[x-3:])\n list_of_dna.append(current_dna)\n x += len(current_dna)\n x += 3\n return list_of_dna",
"def find_all_ORFs(dna):\n res = []\n for i in range(0, 3):\n s = dna[i:]\n# print i, s,\n# l = len(s)\n# for j in range(0, l, 3):\n# if s[j: j + 3] == start_codon:\n# r = find_all_ORFs_oneframe(s[j:])\n# print r\n# if len(r) > 0:\n# for item in r:\n# res.append(item)\n# break # careful for indentation here!!\n r = find_all_ORFs_oneframe(s)\n if len(r) > 0:\n# for item in r:\n# res.append(item)\n res += r # [WOW] I love python!\n return res",
"def find_all_ORFs(dna):\n all_ORFs=[]\n for i in range (0,3):\n \tframe=dna[i:] \n all_ORFs+=find_all_ORFs_oneframe(frame)\n return all_ORFs",
"def find_all_ORFs_oneframe(dna): \n i=0\n multiple_list=[]\n while i < len(dna):\n part = dna[i:i+3]\n if part == 'ATG': #if the first indecied are ATG then it runs the code that creates the string of DNA\n ORF=rest_of_ORF(dna[i:]) \n multiple_list.append(ORF)\n i+=len(ORF)\n else:\n i+=3\n # print multiple_list\n return multiple_list\n \n #runs fuinction to mmake list of function\n #need to save to list\n #need to continue to next ATG start \n #need to add that to list\n #need to output new list as commas between lists",
"def find_all_ORFs_oneframe(dna):\n \n\n ORFs=[]\n i=0\n while i<len(dna):\n if dna[i:i+3]==\"ATG\":\n j=i\n dna2=rest_of_ORF(dna[j:])\n \n if dna2==None:\n \tbreak\n ORFs.append(dna2)\n i=i+len(dna2)\n i=i+3\n return ORFs",
"def find_all_ORFs_oneframe(dna):\n i=0 \n ORF_list = []\n while i<len(dna):\n if dna[i : i+3] == \"ATG\":\n start_place = i \n current_ORF = rest_of_ORF(dna[start_place:])\n i += 3 + len(current_ORF)\n ORF_list.append(current_ORF) \n return ORF_list",
"def find_all_ORFs_oneframe(dna):\n dna_split = split_into_codons(dna)\n i = 0\n all_ORFs_oneframe = []\n length_of_orf = 0\n while i < len(dna_split):\n if dna_split[i] == \"ATG\":\n orf = rest_of_ORF(dna[int(i)*3:])\n all_ORFs_oneframe += [orf]\n length_of_orf = math.ceil(len(orf)/3)\n i += length_of_orf\n else:\n i += 1\n return all_ORFs_oneframe",
"def find_all_ORFs_oneframe(dna):\n condition = 1\n dna2 = dna\n list_of_ORFs = []\n while condition == 1:\n num_codons = int(len(dna2)/3)\n num = 0\n list_codons = []\n start_index = -1\n while num < num_codons:\n num_start = int(num*3)\n num_end = int(num*3 + 3)\n list_codons.append(dna2[num_start:num_end])\n num = num + 1\n for element in list_codons:\n if element == 'ATG':\n start_index = list_codons.index(element)\n break\n if start_index != -1:\n this_orf = rest_of_ORF(dna2[start_index*3:])\n list_of_ORFs.append(this_orf)\n dna2 = dna2[start_index*3+len(this_orf)+3:]\n else:\n condition = 2\n\n return list_of_ORFs",
"def find_all_ORFs_oneframe(dna):\n\n ORFlist = []\n i = 0\n lend = len(dna)\n while i < lend:\n codon = dna[i:i+3]\n if codon == 'ATG':\n orf = rest_of_ORF(dna[i:])\n ORFlist.append(orf)\n i = i + len(orf)\n else:\n i = i+3\n return ORFlist",
"def find_all_ORFs_oneframe(dna):\n #inital conditions\n i = 0\n start_codon = ('ATG')\n end_index = len(dna)\n return_variable = []\n\n #for each codon in the set of DNA, check if it is a start codon\n #if it is a start codon, add the string until the stop codon to a list\n #then continue running\n while i < len(dna)-2:\n codon = dna[i:i+3] \n if codon in start_codon:\n orf = rest_of_ORF(dna[i:])\n return_variable.append(orf)\n i += len(orf)\n i += 3\n\n\n #return a list of strings of DNA\n return return_variable",
"def find_all_ORFs_oneframe(dna):\n\tindex = 0\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# unitiate index at 0\n\tsegment = []\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# intitializes empty list\n\twhile index < len(dna):\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# given that index is smaller than length of string\n\t\tcodon = dna[index:index + 3]\t\t\t\t\t\t\t\t\t\t\t\t\t\t# creates codons from nucleotides starting with first 3 letters\n\t\tif codon == \"ATG\":\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# if the codon is ATG\n\t\t\tsegment.append(rest_of_ORF(dna[index:]))\t\t\t\t\t\t\t\t\t\t# run rest_of_ORF starting at index\n\t\t\tindex = index + len(rest_of_ORF(dna[index:]))\t\t\t\t\t\t\t\t\t# new index starting at old + length of ORF output string\n\t\telse:\n\t\t\tindex += 3\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# if ATG isn't found, move to next codon and check again\n\n\treturn segment\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# returns list of ORFs",
"def find_all_ORFs(dna):\n #these functions offset to analyze each each open frame reference\n zero_offset=find_all_ORFs_oneframe(dna[0:])\n \n one_offset=find_all_ORFs_oneframe(dna[1:])\n \n two_offset=find_all_ORFs_oneframe(dna[2:])\n \n return zero_offset+one_offset+two_offset\n \n # YOUR IMPLEMENTATION HERE",
"def find_all_ORFs_oneframe(dna):\n i = 0\n list_ORF = []\n\n def search_ORF(dna,i):\n aa = dna[3*i:(3*i)+3]\n if aa == 'ATG':\n list_ORF.append(rest_of_ORF(dna,aa,i))\n i = i + 1 + len(rest_of_ORF(dna,aa,i))/3\n else:\n i = i + 1\n if len(aa) ==3:\n search_ORF(dna,i)\n \n return list_ORF\n\n return search_ORF(dna,i)\n # i = i + ((len(list_ORF[-1]))/3)\n # search_ORF(dna,i)",
"def find_all_ORFs(dna):\n all_ORFs = []\n for i in range(0, 3):\n dna_new = dna[i:]\n all_ORFs += find_all_ORFs_oneframe(dna_new)\n return all_ORFs",
"def gene_finder(dna):\n #use longest_ORF_noncoding to determine what the longest strand of DNA could be in a random sequence\n threshold = longest_ORF_noncoding(dna,1500)\n\n #find all of the orfs on both strand and the generate an empty list\n all_orfs = find_all_ORFs_both_strands(dna)\n all_orfs_long = []\n\n #this makes a list of strings of dna that are longer than the threshold\n\n for i in range (0, len(all_orfs)):\n if len(all_orfs[i]) > threshold:\n all_orfs_long.append(all_orfs[i])\n\n #this takes each dna string in a list and converts it to an amino acid sequence\n\n aa_list = []\n\n for i in range (0, len(all_orfs_long)):\n aa_strand = coding_strand_to_AA(all_orfs_long[i])\n aa_list.append(aa_strand)\n\n #returns strings of amino acids\n return aa_list",
"def find_all_ORFs(dna):\n index = 0\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# initialize index\n all_ORFs = []\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# intitalize empty list\n while index < 3:\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# switches throught the 3 frames\n \tall_ORFs = all_ORFs + find_all_ORFs_oneframe(dna[index:])\t# adds all ORFs oneframe to the list\n \tindex += 1\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# indexes +1\n return all_ORFs \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# returns list of all ORFs in all 3 frames",
"def find_all_ORFs(dna):\n all_ORFs = []\n i = 0\n\n for i in range (0,3): \n all_ORFs.extend(find_all_ORFs_oneframe(dna))\n dna = 'X' + dna\n\n return all_ORFs",
"def gene_finder(dna):\n threshold = longest_ORF_noncoding(dna, 1500)\n l = []\n for i in find_all_ORFs_both_strands(dna):\n \tif len(i)>=threshold:\n \t\tl.append(coding_strand_to_AA(i))\n print l\n return l",
"def gene_finder(dna):\n orfs = find_all_ORFs_both_strands(dna)\n print(orfs)\n threshold = longest_ORF_noncoding(dna, 1000)\n print('threshold is', threshold)\n print('number of orfs:', len(orfs))\n aa_sequences = []\n i = 0\n while i < len(orfs):\n print(len(orfs[i]))\n if len(orfs[i]) > threshold:\n print('if')\n aa_sequences += [coding_strand_to_AA(orfs[i])]\n i += 1\n print(aa_sequences)",
"def find_all_ORFs_oneframe(dna):\n res = []\n l = len(dna)\n next_pos = 0\n while True:\n arg = dna[next_pos:]\n# orf = rest_of_ORF(arg)\n# res.append(orf)\n# if arg == orf: # if: no stop codon\n# break\n# next_pos = dna.index(orf, next_pos) + len(orf) + 3 # point next position\n# if next_pos >= l: # if: meet the end of dna\n# break\n if arg[0:3] == start_codon:\n orf = rest_of_ORF(arg)\n res.append(orf)\n if arg == orf: # if: no stop codon\n break\n next_pos = dna.index(orf, next_pos) + len(orf) + 3 # point next position\n else: # if: not start codon, see next\n next_pos += 3\n if next_pos >= l: # if: meet or go beyond the end of dna\n break\n return res",
"def gene_finder(dna):\n all_orfs_both_strands = find_all_ORFs_both_strands(dna)\n longest_fake_orf = longest_ORF_noncoding(dna, 20)\n for element in all_orfs_both_strands:\n if len(element) > longest_fake_orf:\n a_a_string = coding_strand_to_AA(element)\n else:\n a_a_string = 'not longer than shuffle'\n print(a_a_string)",
"def parse_dataframes(genome_gtf, sralist):\n\n def gather_strand_by_geneID_dict(genome_gtf):\n \"\"\"\n Returns dictionary with strand orientation as values and geneIDs as Keys/\n e.g.: {'YAL012W': '+',\n 'YAL069W': '+',\n 'YAL068W-A': '+',\n \"\"\"\n strand_by_geneID_dict = {}\n with open(genome_gtf) as f: \n for line in f: \n current_line = line.split('\\t')\n if current_line[2] == \"CDS\":\n current_orf = current_line[8].split(';')[2].split()[1].strip('\\\"')\n current_strand = current_line[6]\n strand_by_geneID_dict[current_orf] = current_strand\n return strand_by_geneID_dict\n\n\n def import_scikit_data(sralist):\n \"\"\"\n Import results from scikit pipeline for all datasets contained in datsets_names.\n \"\"\"\n scikit_data_dict = {}\n for dataset in sralist:\n with open(TMP_DIR+'scikit_'+dataset+'/ALL_genes_profile_dict.json', 'r') as scikit_data:\n scikit_data_dict[dataset] = [json.load(scikit_data)]\n return scikit_data_dict\n\n\n def build_mat_scikit_strandOriented(sralist, scikit_data):\n \"\"\"\n Building of scikit_df based on the output of plot_ribo_density_dict.py script.\n\n C/-/reverse/complementary strand are taken into account and the profile values\n (\"codon_density_profile\", \"codon_triplet\", \"codon_AA\") are reversed. This is\n performed by adding [::-1] to C strands profile ends.\n\n Same profile values are also have their extremities trimmed out of 8 codons.\n (This is because the scikit-ribo pipeline considers 8 extra codons on each end,\n but here we are only interested in the coding sequence). This is performed by\n adding [8:-8] to profile lists ends.\n \"\"\"\n\n scikit_mat = {}\n seq_codons = {}\n seq_aa = {}\n\n for geneID in scikit_data[sralist[0]][0].keys():\n for ix, dataset in enumerate(sralist):\n\n if geneID in scikit_data[dataset][0].keys():\n current_profile = scikit_data[dataset][0].get(geneID, np.nan)\n current_ribo = current_profile[0]\n current_ribo = current_ribo[8:-8]\n N = len(sralist)\n M = len(current_ribo)\n print(geneID, M)\n\n if ix == 0:\n current_matrix = np.zeros((N,M)) * np.nan\n\n current_seq_codons = current_profile[1]\n current_seq_codons = current_seq_codons[8:-8]\n\n current_seq_aa = current_profile[2]\n current_seq_aa = current_seq_aa[8:-8]\n\n if strand_by_geneID_dict.get(geneID, \"NA\") == \"+\":\n seq_codons[geneID] = current_seq_codons\n seq_aa[geneID] = current_seq_aa\n\n elif strand_by_geneID_dict.get(geneID, \"NA\") == \"-\":\n seq_codons[geneID] = current_seq_codons[::-1]\n seq_aa[geneID] = current_seq_aa[::-1]\n \n \n if strand_by_geneID_dict.get(geneID, \"NA\") == \"+\":\n current_matrix[ix,:] = current_ribo\n\n elif strand_by_geneID_dict.get(geneID, \"NA\") == \"-\":\n current_matrix[ix,:] = current_ribo[::-1]\n \n if np.sum(current_matrix) > 0: \n scikit_mat[geneID] = current_matrix\n\n# scikit_df = pd.DataFrame(values_list, columns=columns_list)\n\n return scikit_mat, seq_codons, seq_aa\n\n\n def mean_norm(row):\n codon_dens_prof = row.codon_density_profile\n profile_average = np.average(codon_dens_prof)\n\n return [x/profile_average for x in codon_dens_prof]\n \n #scikit_data_df[\"mean_norm_codon_density_profile\"] = scikit_data_df.apply(mean_norm, axis=1)\n #scikit_data_df[\"mean_norm_codon_density_profile\"] = scikit_data_df['mean_norm_codon_density_profile'].apply(lambda x: x[8:-8])\n\n strand_by_geneID_dict = gather_strand_by_geneID_dict(genome_gtf)\n scikit_data_dict = import_scikit_data(sralist)\n scikit_data_mat, seq_codons_dict, seq_aa_dict = build_mat_scikit_strandOriented(sralist, scikit_data_dict)\n\n with open('../data/processed/scikit_mat.pkl', 'wb') as f:\n \tpickle.dump(scikit_data_mat, f)\n\n with open('../data/processed/scikit_codonseq.pkl', 'wb') as f_seq:\n pickle.dump(seq_codons_dict, f_seq)\n \n\n return scikit_data_mat",
"def find_all_ORFs(dna):\n \n # YOUR IMPLEMENTATION HERE"
]
| [
"0.65610415",
"0.654116",
"0.6465748",
"0.6425101",
"0.6400191",
"0.6245368",
"0.61965746",
"0.6174377",
"0.6132045",
"0.6130481",
"0.60865504",
"0.6047133",
"0.6040552",
"0.60298204",
"0.60196996",
"0.5954839",
"0.591576",
"0.5874647",
"0.5840224",
"0.58145535",
"0.58045846",
"0.578043",
"0.5757494",
"0.56416506",
"0.5623227",
"0.55466676",
"0.55402666",
"0.54792386",
"0.54627997",
"0.54313123"
]
| 0.7209891 | 0 |
fetches instruction from ROM, increases IP and returns instruction | def fetch_instruction(self) -> dict:
instruction = self.__ROM.read(self.regs["ip"].read())
self.regs["ip"].inc()
return self.disassembler.decode_instruction(instruction) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fetch_execute(self):\n\n op_code = self.mem.read(self.reg.ip)\n self.reg.ip_inc()\n addr = self.mem.read(self.reg.ip)\n self.reg.ip_inc()\n\n # Execute the instruction on addr.\n self.op_codes[op_code.num](addr)",
"def fetch(self):\n line = self.code[self.ip]\n self.ip += 1\n return line",
"def getInstructionAt(self, address: ghidra.program.model.address.Address) -> ghidra.program.model.listing.Instruction:\n ...",
"def instr(addr=None):\n global simulator\n\n if simulator is None:\n print \"Program is not started.\"\n return\n try:\n if addr is None:\n addr = pc()\n\n instr = simulator.get_instruction(addr)\n if instr is None:\n for i in program.disas(addr, 1):\n instr = i[1]\n print instr\n else:\n print instr\n except:\n simulation_error()",
"def INT():\n\tglobal pointer, memory\n\tprint(memory[memory[pointer + 0x01]], end='')\n\tpointer += 0x02",
"def getInstructionAfter(self, address: ghidra.program.model.address.Address) -> ghidra.program.model.listing.Instruction:\n ...",
"def request_new_ip(self, mac):\n self.execute_script('new_ip', mac)",
"def advance(self):\n if self.instr is not None:\n self.simulator.registers[int(self.instr.binary[20:25], 2)][1] = self.instr.result",
"def fetch(self, memory_ram):\n # empty no sirve para nada pero no la borren xd\n empty = [0 for _ in range(8)]\n self.IR.copy_from_int8(memory_ram.get_celds()[self.PC.cast_hex()])\n new_r = self.alu.increment(self.PC.cast_int8(), empty)\n self.PC.copy_from_int8(new_r)",
"def getInstructionBefore(self, address: ghidra.program.model.address.Address) -> ghidra.program.model.listing.Instruction:\n ...",
"def run(self):\n\n self.running = True\n\n # while self.running:\n\n # ir = self.ram_read(self.pc) # Instruction Register, contains a copy of the currently executing instruction\n \n # operand_1 = self.ram_read(self.pc + 1)\n # operand_2 = self.ram_read(self.pc + 2)\n \n # if ir == 0b10000010: # LDI\n # self.registers[operand_1] = operand_2\n # self.pc += 3\n\n # elif ir == 0b01000111: # PRN\n # print(self.registers[operand_1])\n # self.pc += 2\n\n # elif ir == 0b00000001: # HLT\n # self.running = False\n # self.pc += 1\n\n # else:\n # print(f\"Unknown instruction\")\n\n while self.running:\n ir = self.ram_read(self.pc)\n self.opcodes[ir]()\n number_of_operands = (ir & 0b11000000) >> 6\n how_far_to_move_pc = number_of_operands + 1\n self.pc += how_far_to_move_pc",
"def runIntcode(program):\n\n pc = 0\n\n while program[pc] != 99:\n command = program[pc]\n reg1 = program[program[pc + 1]]\n reg2 = program[program[pc + 2]]\n dest = program[pc + 3]\n\n if command == 1:\n print (pc, \" (add) \", reg1, \" \", reg2, \" -> \", dest)\n program[dest] = reg1 + reg2\n\n if command == 2:\n print (pc, \" (mul) \", reg1, \" \", reg2, \" -> \", dest)\n program[dest] = reg1 * reg2\n\n pc = pc + 4\n\n return program",
"def sendInstruction(self,instruction):\n\t\tstartMessage = \"start\"\n\t\tendMessage = \"end\"\n\t\t\n\t\tNanotecSharedMemoryClient.writeMemory(self.dataMemory,instruction)\n\t\tNanotecSharedMemoryClient.writeMemory(self.statusMemory,startMessage)\n\t\t\n\t\t# wait for instruction to be executed\n\t\tcurrentStatus = NanotecSharedMemoryClient.readMemory(self.statusMemory)\n\t\twhile ( currentStatus != endMessage):\n\t\t\tcurrentStatus = NanotecSharedMemoryClient.readMemory(self.statusMemory)\n\t\t\t#time.sleep(1)\n\t\t\t#print(\"currentStatus: \" + currentStatus)\n\t\t\t#print(\"currentData: \" + NanotecSharedMemoryClient.readMemory(self.dataMemory))\n\t\t\t#print(\"\")\n\t\t\t\n\t\t\t \n\t\t\n\t\treturnString = NanotecSharedMemoryClient.readMemory(self.dataMemory)\n\t\treturn returnString",
"def advance(self):\n # if the current program counter is still smaller than the last instruction\n if self.simulator.programCounter < (len(self.simulator.instrCollection) * 4 + 0x1000):\n # increment the instruction counter\n self.simulator.instrCount += 1\n # get another instruction\n self.instr = PipelineInstruction(self.simulator.mainmemory[self.simulator.programCounter])\n else:\n self.instr = None\n # increment program counter to the next byte\n self.simulator.programCounter += 4",
"def read_opcode(self) -> int:\n\n offset = self.current_address - self.STARTING_ADDRESS\n\n if self.insight:\n self.insight.opcode(self.rom_data, offset)\n\n return self.rom_data[offset] << 8 | self.rom_data[offset + 1]",
"def increment_instr(self):\n self.instruction_count += 1",
"def advance(self):\n\n if self.instr is not None:\n #calculate the offset of the lw and sw instructions\n if opcode_decode[self.instr.opcode] == 'Load':\n self.instr.source1RegValue = self.instr.source1RegValue + int(self.instr.imm)\n else:\n self.instr.result = eval(\n \"%d %s %d\" % (\n self.instr.source1RegValue, self.simulator.operations[self.instr.operation],\n self.instr.source2RegValue))",
"def getInstructionAfter(self, instruction: ghidra.program.model.listing.Instruction) -> ghidra.program.model.listing.Instruction:\n ...",
"def test_add_to_i(self, cpu):\n cpu.V_register = bytearray([1, 5, 8, 12, 15, 18, 29, 53,\n 78, 102, 158, 183, 202, 234, 255, 0])\n for x in range(0x0, 0xF):\n cpu.opcode = 0xF01E | (x << 8)\n for i in range(cpu.memory_start, cpu.memory_size):\n cpu.I = i\n cpu.add_to_i()\n assert(cpu.I == (i+cpu.V_register[x]) & 0xFFF)",
"def instruction_call(self, address):\n next_instruction_offset = self.exec_ptr + 2 # the value of the next instruction's memory\n\n self.stack_push(next_instruction_offset)\n\n if Vm.is_register(address):\n address = self.get_register(address)\n\n self.exec_ptr = Vm.filter_mem_address(address)",
"def execute_program(self, emulator, callback=None, ip=0):\n while ip >=0 and ip < len(self.program):\n # Write IP value to IP reg\n emulator.registers[self.ip_reg] = ip\n\n # Execute instruction\n instruction = self.program[ip]\n emulator.dispatch_table[instruction[0]](*instruction[1:])\n\n # Read IP back from IP reg\n ip = emulator.registers[self.ip_reg]\n\n # Increment IP as per requirements\n ip += 1\n\n if callback:\n callback(ip)",
"def decode(self, address: int = None) -> None:\n\n self.current_address = address if address else self.STARTING_ADDRESS\n context_change = False\n\n while not context_change:\n if self.current_address - self.STARTING_ADDRESS + 1 > len(self.rom_data):\n self.context_change = True\n break\n\n print(\n f\"Current Address: {self.current_address} ({hex(self.current_address)})\"\n )\n\n opcode = self.read_opcode()\n assert isinstance(opcode, int)\n\n operation = self.read_operation(opcode)\n assert isinstance(operation, int)\n\n if self.insight:\n self.insight.execution_context(opcode, operation)\n\n if operation == 0x1000:\n # 1NNN: Jumps to address NNN.\n # This jump doesn't remember its origin, so no stack interaction\n # is required. However, it is worth having this recognized as a\n # context change with a label.\n context_change = True\n\n address = self.read_address(opcode)\n assert isinstance(address, int)\n\n self.add_to_disassembly(operation, address)\n self.add_label(address)\n self.add_context(address)\n\n elif operation == 0x3000:\n # 3XNN: Skips the next instruction if VX equals NN.\n\n vx = self.read_vx(opcode)\n byte = self.read_byte(opcode)\n self.add_to_disassembly(operation, vx, byte)\n\n next_address = self.current_address + 4\n self.add_context(next_address)\n\n elif operation == 0x6000:\n # 6XNN: Sets VX to NN.\n vx = self.read_vx(opcode)\n byte = self.read_byte(opcode)\n self.add_to_disassembly(operation, vx, byte)\n\n elif operation == 0xA000:\n # ANNN: Sets I to the address NNN.\n address = self.read_address(opcode)\n assert isinstance(address, int)\n\n self.add_to_disassembly(operation, address)\n self.add_label(address)\n\n elif operation == 0xD000:\n # DXYN: Draws a sprite at coordinate (VX, VY).\n vx = self.read_vx(opcode)\n vy = self.read_vy(opcode)\n nibble = opcode & 0xF # 15\n self.add_to_disassembly(operation, vx, vy, nibble)\n else:\n print(\"Unknown opcode: 0x{:04x}\".format(opcode))\n context_change = True\n\n self.current_address += 2\n\n print(f\"\\nAll Contexts: {self.all_contexts}\")\n print(f\"Current Contexts: {self.current_contexts}\")\n print(f\"Labels: {self.labels}\\n\")\n\n if len(self.current_contexts) > 0:\n self.decode(self.current_contexts.pop())",
"def exec(self, cpu, memory):\n\n def _stall():\n pass\n\n addr_low = cpu.exec_in_cycle(memory.stack_pop, cpu)\n addr_high = cpu.exec_in_cycle(memory.stack_pop, cpu)\n addr_high = addr_high << 8\n addr = AddressMode.get_16_bits_addr_from_high_low(addr_high, addr_low)\n cpu.pc = addr + 1\n cpu.exec_in_cycle(_stall)\n cpu.exec_in_cycle(_stall)\n cpu.exec_in_cycle(_stall)",
"def ip_command():\n # 1. Get input host from Demisto\n ip = demisto.args().get('ip')\n if not is_ip_valid(ip):\n return_error('Invalid IP address, Please retry with a valid IP address')\n # 2. Get the host reputation from SlashNext API\n response = ip_lookup(ip=ip)\n if response.get('errorNo') != 0:\n return\n # 3. Parse and format the response\n dbot_score_cont, ip_cont = get_dbot_std_context(\n ip, 'IP', response.get('threatData').get('verdict'), response.get('threatData').get('threatType'))\n\n snx_ioc_cont = get_snx_host_ioc_context(ip, 'IP', response.get('threatData'))\n\n ec = {\n 'SlashNext.IP(val.Value === obj.Value)': snx_ioc_cont,\n 'DBotScore': dbot_score_cont,\n 'IP': ip_cont\n }\n\n title = 'SlashNext Phishing Incident Response - IP Lookup\\n' \\\n '##### ip = {}'.format(ip)\n\n md = tableToMarkdown(\n title,\n snx_ioc_cont,\n ['Value',\n 'Type',\n 'Verdict',\n 'ThreatStatus',\n 'ThreatName',\n 'ThreatType',\n 'FirstSeen',\n 'LastSeen']\n )\n\n return_outputs(md, ec, snx_ioc_cont)",
"def RET(self):\n\t\tself.SP -= 1\n\t\tself.IP = self.stack[self.SP]",
"def instruction_ret(self):\n if len(self.stack) == 0:\n self.halt = True\n return\n\n address = self.stack_pop()\n\n if Vm.is_register(address):\n address = self.get_register(address)\n\n self.exec_ptr = Vm.filter_mem_address(address)",
"def read_address(self, opcode: int) -> int:\n\n if self.insight:\n self.insight.address(opcode)\n\n return opcode & 0xFFF",
"def RET(self):\n self.pc = self.ram[self.sp]\n self.pc += 2\n self.sp -= 1\n\n pass",
"def getInstructionBefore(self, instruction: ghidra.program.model.listing.Instruction) -> ghidra.program.model.listing.Instruction:\n ...",
"def getInstructionContaining(self, address: ghidra.program.model.address.Address) -> ghidra.program.model.listing.Instruction:\n ..."
]
| [
"0.677617",
"0.63121897",
"0.57688415",
"0.57639194",
"0.56438655",
"0.56249213",
"0.5590774",
"0.5558426",
"0.5549947",
"0.55308354",
"0.5505338",
"0.54756784",
"0.5458533",
"0.5436289",
"0.53643537",
"0.53420275",
"0.533024",
"0.53019565",
"0.5229638",
"0.51992863",
"0.51968795",
"0.5181154",
"0.51629895",
"0.5093271",
"0.5038856",
"0.50213784",
"0.5002345",
"0.49817422",
"0.49530223",
"0.4944622"
]
| 0.7375822 | 0 |
Gets book statuses from the dim_book_statuses table. | def get_book_statuses() -> list:
return data.get_book_statuses() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_statuses(self):\n return self.statuses",
"def statuses(self):\n return self._get_paged(\"statuses\")",
"async def getstatuses(self, ctx):\n final_list = \"\"\n statuses = await ex.get_bot_statuses()\n if statuses is not None:\n for status in await ex.get_bot_statuses():\n final_list += f\"{status[0]}\\n\"\n else:\n final_list = \"None\"\n embed = discord.Embed(title=\"Statuses\", description=final_list)\n await ctx.send(embed=embed)",
"def _get_loadbalancer_statuses(self, lb_id):\n resource_path = \"%s/%s/%s/statuses\" % (RESOURCE_PREFIX,\n LBS_RESOURCE,\n lb_id)\n try:\n statuses = self.client.retrieve_resource(\n \"GLOBAL\", resource_path)[1]['dict']\n except ncc_client.NCCException as e:\n if e.is_not_found_exception():\n return {\"lb_statuses\": None}\n else:\n return None\n statuses = statuses[\"statuses\"]\n return {\"lb_statuses\": statuses}",
"def statuses(self):\n big = BigCommerceAPI()\n response = big.get('orderstatuses')\n return response.text",
"def get_post_statuses(self):\n statuses = self.session.query(PostStatus).all()\n return statuses",
"def get_author_statuses(self):\n author_statuses = self.session.query(AuthorStatus).all()\n return author_statuses",
"def status_get(): # noqa: E501\n db = get_db()\n return [{'id': sample, 'status': db['samples'][sample]['status']} for sample in db['samples'].keys()]",
"def available_statuses(self):\n return self.pipeline.get(self.status, ())",
"def available_statuses(self):\n return self.pipeline.get(self.status, ())",
"def get_statuses():\n statuses = list()\n\n for status in tweepy.Cursor(api.user_timeline, id=836104384366936066).items():\n if is_prediction(status):\n statuses.append(status)\n else:\n continue\n\n return statuses",
"def get_status(self, ids):\n return [self.tasks[id].status for id in ids]",
"def get_statuses(**kwargs):\n\n instance = Ceic._get_instance()\n\n get_dictionaries_method = instance._dictionary_facade.get_statuses\n result = instance._make_request(get_dictionaries_method, **kwargs)\n\n return result",
"def status():\n statuses = get_all_statuses()\n return json.dumps(statuses, indent=4)",
"def get_status(self, rows):\n\n\t\taccount_status = {}\n\t\tfor row in rows:\n\t\t\t(account_number, status) = (int(row[0]), row[2])\n\t\t\tif account_status.has_key(account_number):\n\t\t\t\taccount_status[account_number].append(status)\n\t\t\t\t# Log account information if account has more than 1 current active status\n\t\t\t\tself.log.debug(\"Multiple Current Statuses for Account Number:\" + account_number)\n\t\t\telse:\n\t\t\t\taccount_status[account_number] = [status]\n\n\t\treturn account_status",
"def select_by_status(status):\n sql = 'checkStatus'\n val = [status]\n rows = DBconnector.call_procedure(sql, val)\n for r in rows:\n return _wrap_in_parcel_list(r.fetchall())",
"def get_statuses() -> Union[dict, None]:\n with lock:\n if running:\n return statuses\n return None",
"def get_post_status_list(self, blogid=1):\n return self.execute('wp.getPostStatusList', blogid, self.username, self.password)",
"def GetStatuses(self, limit = -1, since = -1, offset = -1):\n\n if (limit < 1):\n limit = self.limit\n\n url = self.__BuildGetUrl(\"statuses\", self.userName, limit, since, offset)\n return self.__GetJson(url, True)",
"def getAllStatus(self) -> DataFrame:\n return self.writer.getAllStatus()",
"def GetStatusesForAll(self, limit = -1, since = -1, offset = -1):\n\n if (limit < 1):\n limit = self.limit\n\n url = self.__BuildGetUrl(\"statuses\", \"\", limit, since, offset)\n return self.__GetJson(url, True)",
"def get_status_cols(self, key):\n colours = dict()\n for s_name, status in self.fastqc_statuses[key].items():\n colours[s_name] = self.status_colours.get(status, self.status_colours['default'])\n return colours",
"def get_page_status_list(self, blogid=1):\n \n return self.execute('wp.getPageStatusList', blogid, self.username, self.password)",
"def get_approval_statuses(self):\n approval_statuses = self.session.query(Approval).all()\n return approval_statuses",
"def load_status_table():",
"def get_latest_statuses(self):\n self.status_lock.acquire()\n status = copy.deepcopy(self.cached_status)\n self.status_lock.release()\n return status",
"def get_comment_statuses(self):\n comment_statuses = self.session.query(CommentStatus).all()\n return comment_statuses",
"def get_trackback_statuses(self):\n statuses = self.session.query(TrackbackStatus).all()\n return statuses",
"def get_statuses(self, sha):\n return self.client.request(\n self.base_path + \"/commits/\" + sha + \"/statuses\")",
"def service_statuses(self) -> Optional[pulumi.Input['ServiceStatusesArgs']]:\n return pulumi.get(self, \"service_statuses\")"
]
| [
"0.63397527",
"0.62338936",
"0.6056262",
"0.5881798",
"0.58743775",
"0.5870557",
"0.5784424",
"0.5598575",
"0.53600687",
"0.53600687",
"0.5335189",
"0.52850866",
"0.5255495",
"0.5245465",
"0.5239241",
"0.52107036",
"0.52041435",
"0.51484483",
"0.51168203",
"0.5098181",
"0.5082854",
"0.50684375",
"0.5051957",
"0.5042838",
"0.5040666",
"0.504017",
"0.50365627",
"0.50331825",
"0.5027568",
"0.5027367"
]
| 0.7752342 | 0 |
compute the LR1 closure of items | def closure(items, ruleSet, terminals):
I = copy.deepcopy(items)
added = 1
while added:
added = 0
#for each item [A -> alpha . B Beta, a] in I (result)
for item in I:
if item.pointAtEnd(): continue
A = item.lhs
alpha = item.rhs[:item.dotPos]
B = item.rhs[item.dotPos]
Beta = item.rhs[item.dotPos + 1:]
a = item.lookaheads
#for each production B -> gamma in G'
for prod in getProductions(ruleSet, B):
#and each terminal b in FIRST(Beta a)
b = FIRSTS(Beta, a.items(), ruleSet, terminals)
newItem = prod.getItem()
newItem.lookaheads.addList(b)
#such that [B -> . gamma, b] not in I
if newItem not in I:
#add [B -> . gamma, b] to I
I.append(newItem)
added = 1
else: #newItem is in I, but are the lookaheads all there?
i = I.index(newItem)
#if they aren't add them and say we've added something
if (newItem.lookaheads != I[i].lookaheads
and not I[i].lookaheads.contains(newItem.lookaheads)):
added = 1
I[i].lookaheads.addSet(newItem.lookaheads)
return I | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _build_item_closure(itemset, productionset):\r\n #For every item inside current itemset, if we have the following rule:\r\n # xxx <cursor><nonterminalSymbol> xxx append every rule from self._productionruleset that begins with that NonTerminalSymbol\r\n if not isinstance(itemset, LR0ItemSet):\r\n raise TypeError\r\n import copy\r\n resultset = copy.copy(itemset)\r\n changed = True\r\n while changed:\r\n changed = False\r\n for currentitem in resultset.itemlist:\r\n nextsymbol = currentitem.next_symbol()\r\n if nextsymbol is None:\r\n break\r\n for rule in productionset.productions:\r\n newitem = LR0Item(rule)\r\n if rule.leftside[0] == nextsymbol and newitem not in resultset.itemlist:\r\n resultset.append_item(newitem)\r\n changed = True\r\n return resultset",
"def items(ruleSet, terminals, nonTerminals):\n symbols = nonTerminals + terminals\n #start with closure of [ [S' -> S, $] ]\n C = [closure([startItem], ruleSet, terminals)]\n added = 1\n while added:\n added = 0\n for I in C:\n for X in symbols:\n g = goto(I, X, ruleSet, terminals)\n if g and not fullIn(C, g):# not in C:\n C.append(g)\n added = 1\n return C",
"def reduce_run():",
"def run_memoized(items):\n global counter\n counter = 0\n print\n print \"Memoized version\"\n print \"Position with\", items, \"items is\", evaluate_memo_position(items, {0 : \"lost\"})\n print \"Evaluated in\", counter, \"calls\"",
"def calculate_parameters(self, item):",
"def make_accumulator():\n pass # replace with your solution",
"def A(lr):\n pass",
"def __call__(self, items: List[Item]) -> List[Item]:",
"def FuncItems(start):\n return ida_funcs.func_item_iterator_t(ida_funcs.get_func(start))",
"def items_generated_by_next(self):\n def lookup(rule):\n return self.grammar.rules[rule.content] if rule.is_symbol_name() else rule\n if self.the_items_generated_by_next is None:\n self.the_items_generated_by_next = []\n rhs = lookup(self.grammar.rules[self.the_next.content])\n rhs = [rhs] if rhs.is_terminal() else rhs\n # iterate over the alternatives of a Choice\n for production in rhs:\n if production.is_empty():\n # Avoid creating useless productions that have no right-hand-side\n # They can only lead to redundant reductions, and sometimes useless\n # conflicts.\n continue\n new_item = self.grammar.MakeItem(self.the_next,production,0)\n self.the_items_generated_by_next.append(new_item)\n return self.the_items_generated_by_next",
"def _step1_optimization_closure(self, iteration, step):\n if iteration == self.num_iter_first_step - 1:\n reg_noise_std = 0\n else:\n reg_noise_std = (1 / 1000.) * (iteration // 300) # TODO: make it dependant in the max number of iterations\n aug = self._get_augmentation(iteration)\n if iteration == self.num_iter_first_step - 1:\n aug = 0\n # creates left_net_inputs and right_net_inputs by adding small noise\n clean_net_input = self.clean_net_inputs[aug] + (self.clean_net_inputs[aug].clone().normal_() * reg_noise_std)\n # watermark_net_input = self.watermark_net_inputs[aug] # + (self.watermark_net_input.clone().normal_())\n # mask_net_input = self.mask_net_inputs[aug]\n # applies the nets\n self.clean_net_output = self.clean_net(clean_net_input)\n self.total_loss = 0\n self.blur = 0\n self.total_loss += self.extended_l1_loss(self.clean_net_output,\n self.image_torchs[aug],\n (1 - self.watermark_hint_torchs[aug]))\n self.total_loss.backward(retain_graph=True)",
"def PreOpL(op, items):\r\n k = len(items)\r\n logk = int(ceil(log(k,2)))\r\n kmax = 2**logk\r\n output = list(items)\r\n for i in range(logk):\r\n for j in range(kmax//(2**(i+1))):\r\n y = two_power(i) + j*two_power(i+1) - 1\r\n for z in range(1, 2**i+1):\r\n if y+z < k:\r\n output[y+z] = op(output[y], output[y+z], j != 0)\r\n return output",
"def prodri(items: Iterator[float]) -> float:\n try:\n head = next(items)\n except StopIteration:\n return 1\n return head*prodri(items)",
"def multiple_eval_for_loops_v1():",
"def run(self, xs):\n \n def f(x, h):\n if not h:\n return nn.Linear(x, self.w)\n return nn.Linear(nn.ReLU(nn.Add(nn.Linear(x, self.w), nn.Linear(h, self.w_h1))), self.w_h2)\n\n h = None\n for x in xs:\n h = f(x, h)\n return nn.Linear(h, self.w_f)",
"def double_trace(l):\n return Primary(mono_sum(Monomial(\n (-1)**m * factorial(l+2)**2/(\n factorial(m) * factorial(m+2) * factorial(l-m) * factorial(l-m+2)\n ), m+1, l-m+1) for m in range(l+1)))",
"def __call__(self, x_init):\n x1 = x_init\n x2 = x_init\n for f in self.path1_blocks:\n x1 = f(x1)\n for f in self.path2_blocks:\n x2 = f(x2)\n return x1 + x2",
"def _fold_loop(cls, f, agg, next):\n\n while next is not None:\n (val, next) = next\n agg = f(val, agg)\n return agg",
"def setup_RL(self):\n for n in range(self.L):\n self.update_RL(n)\n return self.R[-1]",
"def pre_compute(self, e_list):\n\t\tpass",
"def ireduce(f, it):\n acc = it.next()\n yield acc\n for x in it:\n acc = f(acc, x)\n yield acc",
"def general_poly (L):\r\n\r\n def secondFunc(x):\r\n total = 0\r\n listLength = len(L)-1\r\n for i in L:\r\n total += i * x**listLength\r\n listLength -= 1\r\n return(total)\r\n return secondFunc",
"def multiple_eval_for_loops_v2():",
"def body(i, *args):\n del args\n fn_result = fn(ctx, iterator.get_next())\n flat_last_step_outputs = nest.flatten(ctx.last_step_outputs)\n with ops.control_dependencies([fn_result]):\n return [i + 1] + flat_last_step_outputs",
"def step1(ball_list, step,borders,obstacle=None):\n\n index_list = range(len(ball_list))\n for i in index_list:\n ball_list[i].compute_refl(step,borders,obstacle)\n for j in index_list:\n if i!=j:\n ball_list[i].compute_coll(ball_list[j],step)\n return ball_list",
"def _fold_rec(cls, f, agg, next):\n\n if next is None:\n return agg\n (val, next) = next\n return cls._fold_rec(f, f(val, agg), next)",
"def foldr(link, fn, z):\n \"*** YOUR CODE HERE ***\"",
"def lambda_func(self):\n air = self.air_alias.val\n fuel = self.fuel_alias.val\n\n m_air = 0\n m_fuel = 0\n\n for i in self.inl:\n m_air += (i.m.val_SI * i.fluid.val[air])\n m_fuel += (i.m.val_SI * i.fluid.val[fuel])\n\n return self.lamb.val - m_air / (m_fuel * self.air_min)",
"def reduce_my(self, func: Callable[[int, int], int], key: keyType, initial_state: int) -> int:\n iterable = self.get_by_key(key)\n tmp = [] # type: List\n if not isinstance(iterable, list):\n tmp = list([iterable])\n else:\n tmp = iterable\n it = iter(tmp)\n value = initial_state\n for element in it:\n # Support the element with one-dimension list\n if isinstance(element, list):\n for e in element:\n value = func(value, e)\n else:\n value = func(value, element)\n return value",
"def apply(self):"
]
| [
"0.5816267",
"0.57219464",
"0.5648127",
"0.54384965",
"0.54112405",
"0.54015505",
"0.533691",
"0.52744246",
"0.52543527",
"0.5183656",
"0.5157742",
"0.514377",
"0.5122546",
"0.51188684",
"0.51163983",
"0.51161325",
"0.51054764",
"0.5103773",
"0.5095413",
"0.50749654",
"0.50741065",
"0.50720584",
"0.50582725",
"0.5033634",
"0.5025138",
"0.4974081",
"0.49678728",
"0.49523106",
"0.49434313",
"0.49429548"
]
| 0.61120766 | 0 |
find targetSet in C | def fullIndex(C, targetSet):
for i in range(len(C)):
if not fullCmpSets(C[i], targetSet):
return i
raise 'OhShit', "couldn't find %s\n%s" % (targetSet, C[4]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getSets():",
"def test_find_sets(self):\n cards = numpy.array([[1,1,1,2,0],\n [0,1,2,2,2],\n [0,1,2,2,2],\n [0,1,2,2,2]])\n\n set_indices = set_solver.find_sets(cards)\n self.assertEqual(len(set_indices), 2)\n self.assertTrue((0, 1, 2) in set_indices)\n self.assertTrue((2, 3, 4) in set_indices)",
"def get_from_set(set_):\n for e in set_: return e",
"def get_targets(self, group_set, target_set=[]):\r\n result_id = []\r\n if not isinstance(group_set, list):\r\n for target in self._db(self._db.target).select():\r\n result_id.append(target.id)\r\n else:\r\n rows = self._db(self._db.targetgroup).select()\r\n for row in rows:\r\n if row.id in group_set:\r\n targets = json.loads(row.targets)\r\n for t_id in targets:\r\n result_id.append(self._db(self._db.target.id==t_id\r\n ).select().first().id)\r\n result_id += target_set\r\n\r\n result = []\r\n for target_id in set(result_id):\r\n result.append(self.get_target(target_id))\r\n\r\n return result",
"def search_node_names(nodeset, target_name):\n # return the filtered set of nodes\n return \\\n {\n node for node in set(nodeset)\n if node.get_name().__eq__(target_name)\n }",
"def getSet(unique_name):",
"def getSet(unique_name):",
"def get_for_targets(self, targets):\n products = OrderedSet()\n for target in targets:\n products.update(self._products_by_target[target])\n return products",
"def getSets(unique_name=None):",
"def get_targets_in_sif_file(sif_file, targets):\n targets_in_network = set()\n str_tar = [str(x) for x in targets]\n with open(sif_file, 'r') as sif_fd:\n for line in sif_fd:\n node1, score, node2 = line.strip().split('\\t')\n if node1 in str_tar:\n targets_in_network.add(node1)\n if node2 in str_tar:\n targets_in_network.add(node2)\n return list(targets_in_network)",
"def get_targets():\n # Use a list comp because querying MODM with Guid.find(Q('referent', 'eq', None))\n # only catches the first case.\n return [each for each in Guid.find() if each.referent is None]",
"def _random_subset(self, pa_nodes, seq, m, rng):\n targets = set()\n while len(targets) < m:\n x = rng.choice(seq)\n # if x in pa_nodes:\n if pa_nodes.get(x, False):\n targets.add(x)\n else:\n pass\n return targets",
"def make_set_cover_pos(gRNA_hits, num_sets = 1, target_ids = [], algorithm = \"LAR\",\n id_key = lambda x: x, tie_breaker = tie_break_first, suppress_warning = False):\n # exclude_seqs = set(str(s).upper() for s in exclude_seqs)\n # gRNA_coverage = {seq: hits for seq, hits in gRNA_hits.hits.items()\n # if str(seq).upper() not in exclude_seqs}\n gRNA_coverage = gRNA_hits.hits\n eliminated_gRNA = {}\n ## prepare target ids\n if not target_ids:\n target_ids = set(hit.target_id for hit in gRNA_hits.flatten_hits())\n else:\n target_ids = set(target_ids)\n ## selected set cover algorithm\n if algorithm in (\"LAR\", \"greedy\"):\n set_cover_algo = set_cover_LAR if algorithm == \"LAR\" else set_cover_greedy\n else:\n raise Exception(f\"Invalid algorithm name: '{algorithm}'\")\n def coverage_possible():\n return set(id_key(hit) for hits in gRNA_coverage.values() for hit in hits) >= set(target_ids)\n ## function to generate set covers\n def make_set_cover(restore = []):\n for grna in restore:\n gRNA_coverage[grna.seq] = eliminated_gRNA[grna.seq]\n if not coverage_possible():\n if not suppress_warning:\n print((\"\\nError: The provided gRNA sequences cannot cover all target sequences\"\n \" at least once.\\n\"))\n return []\n selected_grnas = set_cover_algo(gRNA_coverage, target_ids, id_key = id_key, tie_breaker = tie_breaker)\n ## remove selected gRNA from candidates, and covert to gRNA object\n output = []\n for grna_seq in selected_grnas:\n ## remove\n eliminated_gRNA[grna_seq] = gRNA_coverage[grna_seq]\n del gRNA_coverage[grna_seq]\n ## convert gRNA sequences to gRNA object\n grna_seq_obj = gRNA_hits.get_gRNAseq_by_seq(grna_seq)\n output.append(gRNA(grna_seq_obj.id, grna_seq_obj))\n return output\n return make_set_cover",
"def _resolve_TaskSet(self, desired_metadata):\n desired_keys = []\n for node in self.tasksets._v_groups:\n attrs = getnode(self.tasksets,node)._v_attrs\n if metadata_matches(attrs, desired_metadata):\n desired_keys.append(node)\n return desired_keys",
"def targets(cls, spec):\r\n return set(target for target, _ in SpecParser(cls.build_root).parse(spec) if target)",
"def find_set(self):\n return self._set_set(self._find_set())",
"def _get_targets(input_: Optional[Collection],\n all_: Collection[Any],\n type_: str,\n ) -> Collection[Any]:\n if input_ is None:\n return all_\n target = []\n for item in input_:\n if item not in all_:\n logger.warning(f'unknown target {type_}: {item}')\n continue\n target.append(item)\n return target",
"def find_one_independent_choose(all_set_variables):\n task_list = []\n for key in all_set_variables:\n value = all_set_variables[key]\n choose_keywords = list(value)\n for choose_keyword in choose_keywords:\n set_vars = value[choose_keyword]\n task_list.append((key, choose_keyword))\n task_list = add_more_important_tasks(\n choose_keyword, all_set_variables, task_list\n )\n logging.debug(task_list)\n return task_list[0]",
"def basic_find_one_independent_choose(all_set_variables):\n task_list = []\n for choose_keyword in list(all_set_variables):\n # for choose_keyword, set_vars in six.iteritems(value):\n task_list.append(choose_keyword)\n task_list = basic_add_more_important_tasks(\n choose_keyword, all_set_variables, task_list\n )\n logging.debug(task_list)\n return task_list[0]",
"def FindQualifiedTargets(target, qualified_list):\n return [t for t in qualified_list if ParseQualifiedTarget(t)[1] == target]",
"def keys(targets):",
"def target_roots(self):\r\n return self._target_roots",
"def targetids(obj, reftype):",
"def identify(cls, targets):\r\n return cls.combine_ids(target.id for target in targets)",
"def select_for_target(self, target):\n\n return [x for x in self.objects if x.target == target]",
"def getResult(targets, i=None):",
"def identify_member_sets(index):\n\n if index is None:\n return []\n queue = [index]\n ans = []\n while queue:\n s = queue.pop(0)\n if not isinstance(s, _SetProduct):\n ans.append(s)\n else:\n queue.extend(s.set_tuple)\n return ans",
"def nn_set2set_match(descs1, descs2):\n idxs = nn_set2set_match_cuda(descs1.unsqueeze(0).cuda(), descs2.unsqueeze(0).cuda()).detach().cpu().long()\n return idxs[0]",
"def neighbors(node, test_set):\r\n result = set()\r\n for neighbor in node.neighbors:\r\n if neighbor in test_set:\r\n result.add(neighbor)\r\n return result",
"def test_general_subset_dset():\n pass"
]
| [
"0.6221172",
"0.6146109",
"0.6115618",
"0.6065418",
"0.60626835",
"0.6042033",
"0.6042033",
"0.59801114",
"0.59222025",
"0.5874613",
"0.58715564",
"0.58662826",
"0.58362824",
"0.5833347",
"0.58118105",
"0.5797853",
"0.57861584",
"0.57830733",
"0.5778525",
"0.57779676",
"0.5772928",
"0.5732466",
"0.57080424",
"0.56954914",
"0.56451",
"0.56358933",
"0.5617511",
"0.5547733",
"0.5484968",
"0.5482738"
]
| 0.6192314 | 1 |
Generate an LR(1) state table | def generateStateTable(C, ruleSet, terminals, indexFunc):
#initialize the state dictionary
stateDict = {}
for i in range(len(C)):
stateDict[i] = {}
gotoDict = {}
for i in range(len(C)):
gotoDict[i] = {}
#compute the states
for state in range(len(C)):
for item in C[state]:
exp = item.expects()
targetSet = goto(C[state], exp, ruleSet, terminals)
#check for conflicts
#if there is a goto, shift
if targetSet:
#targetState = C.index(targetSet)
#targetState = fullIndex(C, targetSet)
targetState = indexFunc(targetSet)
if stateDict[state].has_key(exp):
x = stateDict[state][exp]
if x[0] == SHIFT and x[1] != targetState:
print 'shift/shift conflict! for %s' % item
print 'favoring this on %s' % exp
if x[0] == REDUCE:
print ('shift/reduce conflict for %s, was reducing '\
'by %s, now shifting on %s') % (
item, str(ruleSet[x[1]]), exp)
stateDict[state][exp] = (SHIFT, targetState)
#else if point is at the end and lhs isn't S', reduce
elif item.pointAtEnd() and item.lhs != "S'":
for i in item.lookaheads:
if stateDict[state].has_key(i):
x = stateDict[state][i]
if x[0] == SHIFT:
print ('shift/reduce conflict for %s, was '
'shifting, will not reduce') % item
if x[0] == REDUCE:
print 'reduce/reduce conflict for %s' % item
thisRule = ruleSet[item.ruleNumber]
thatRule = ruleSet[x[1]]
if len(thisRule.rhs) > len(thatRule.rhs):
print 'favoring redux by %s over %s' % (
thisRule, thatRule)
stateDict[state][i] = (REDUCE,
item.ruleNumber)
else:
print 'favoring redux by %s over %s' % (
thatRule, thisRule)
print
else:
stateDict[state][i] = (REDUCE, item.ruleNumber)
#else if point is at the and and lhs is S', accept
elif item.pointAtEnd() and item.lhs == "S'":
for i in item.lookaheads:
stateDict[state][i] = (ACCEPT, item.ruleNumber)
#else, panic
else:
raise RuntimeError, 'Waaaaaaah!!! Aieeee!'
#compute goto table
## ATC -- this and the LR0 version are identical, move to common
for state in range(len(C)):
for item in C[state]:
targetSet = goto(C[state], item.lhs, ruleSet, terminals)
if targetSet:
targetState = C.index(targetSet)
gotoDict[state][item.lhs] = targetState
return stateDict, gotoDict | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generate_table(self):\n states = self.get_canonical_collection()\n # self.print_canonical_collection(states)\n table = [{} for _ in range(len(states))]\n\n for index in range(len(states)):\n state = states[index]\n first_rule_cnt = 0\n second_rule_cnt = 0\n third_rule_cnt = 0\n beta = []\n for prod in state:\n dot_index = prod[1].index('.')\n alpha = prod[1][:dot_index]\n beta = prod[1][dot_index + 1:]\n if len(beta) != 0:\n first_rule_cnt += 1\n else:\n if prod[0] != 'S1':\n second_rule_cnt += 1\n production_index = self.grammar.P.index((prod[0], alpha))\n elif alpha == [self.grammar.S[0]]:\n third_rule_cnt += 1\n if first_rule_cnt == len(state):\n table[index]['action'] = 'shift'\n\n elif second_rule_cnt == len(state):\n table[index]['action'] = 'reduce ' + str(production_index)\n\n elif third_rule_cnt == len(state):\n table[index]['action'] = 'acc'\n else:\n conflict_msg = 'Conflict! State I' + str(index) + ': ' + str(state) + '\\nSymbol: ' + beta[0]\n raise (Exception(conflict_msg))\n for symbol in self.grammar.N + self.grammar.E: # the goto part of the table\n next_state = self.go_to(state, symbol)\n if next_state in states:\n table[index][symbol] = states.index(next_state)\n # print(\"table\", table)\n return table",
"def table_walk(self):\n power = self.k ** (2 * self.r + 1)\n rule_set = [0] * power\n changes = np.random.choice(power - 1, int(self.lambda_param * power),\n replace=False)\n for i in range(len(changes)):\n rule_set[changes[i]] = np.random.randint(1, self.k)\n return rule_set",
"def createStateTable(self):\r\n tableName = self._names['state']\r\n con = None\r\n try:\r\n con = sql.connect(self._filename, timeout=10, \r\n isolation_level=\"IMMEDIATE\")\r\n with con:\r\n c = con.cursor()\r\n c.execute(\"CREATE TABLE IF NOT EXISTS \"\r\n \"{}(id INTEGER PRIMARY KEY, manager TEXT, \"\r\n \"module TEXT, state TEXT)\".format(tableName))\r\n finally:\r\n _closeConnection(con)\r\n return tableName",
"def testLR0ParseTable(self):\r\n from pydsl.Parser.LR0 import _slr_build_parser_table, build_states_sets\r\n state_sets = build_states_sets(productionset0)\r\n self.assertEqual(len(state_sets), 5)\r\n #0 . EI: : . exp $ , \r\n # exp : .SR\r\n # transitions: S -> 2,\r\n # goto: exp -> 1\r\n #1 EI: exp . $ ,\r\n # transitions: $ -> 3\r\n #2 exp: S . R,\r\n # transitions: R -> 4\r\n #3 EI: exp $ .\r\n #4 exp: S R .\r\n # reduce\r\n\r\n parsetable = _slr_build_parser_table(productionset0)\r\n self.assertEqual(len(parsetable), 4)",
"def rl():\n q_table = build_q_table(N_STATES, ACTIONS)\n for episode in range(MAX_EPISODES):\n step_counter = 0\n # initial\n S = 0\n is_terminated = False\n update_env(S, episode, step_counter)\n while not is_terminated:\n A = choose_action(S, q_table)\n S_, R = get_env_feedback(S, A)\n q_predict = q_table.ix[S, A]\n if S_ != 'terminated':\n q_target = R + LAMBDA * q_table.iloc[S_,:].max() # iloc: chose the specific columns based on integer\n else:\n q_target = R\n is_terminated = True\n q_table.ix[S, A] += ALPHA * (q_target - q_predict)\n # next_state <- old_state\n S = S_\n update_env(S, episode, step_counter + 1)\n step_counter += 1\n return q_table",
"def readState(f: TextIOWrapper) -> StateNode:\n table = []\n line = f.readline().strip()\n while len(line) > 0:\n table.append(line)\n line = f.readline().strip()\n line_lengths = [len(x) for x in table]\n\n # print(\"Table: \", table)\n # print(\"Lengths of table: \", line_lengths)\n\n if len(table) == 0:\n raise ValueError(\"State is missing first line of data!\")\n if min(line_lengths) != max(line_lengths):\n raise ValueError(\"State doesn't have all lines of equal size!\")\n return StateNode(\n table, \n (list(range(len(table))), list(range(len(table[0])))), \n ([], []), \n 0, \n None\n )",
"def test_case_generate(self):\n\n # initialization\n state = np.random.choice(self.init_states)\n model = rm.randint(0, self.model_num - 1)\n duration = np.random.choice(self.step_values)\n temp = rm.randint(self.min_temp, self.max_temp)\n\n self.states = [[model, duration, temp]]\n self.time = duration\n\n while self.time < self.max_time:\n if state == \"inc_tmp\":\n change = np.random.choice(\n self.transitionName[0], p=self.transitionMatrix[0]\n ) # choose the next state\n if change == \"S1S1\": # stay in the same state\n temp = self.get_temp_inc(temp)\n model = rm.randint(0, self.model_num - 1)\n diff = (\n self.max_time - self.time\n ) # this is for ensuring the maximum duration is not exceeded\n if (diff) < self.max_step and (diff) > self.min_step:\n duration = diff\n self.states.append([model, duration, temp])\n return self.states_to_dict()\n elif diff < self.min_step:\n self.states[len(self.states) - 1][1] += diff\n return self.states_to_dict()\n else:\n duration = np.random.choice(self.step_values)\n self.time += duration\n self.states.append([model, duration, temp])\n\n elif change == \"S1S2\": # change from increase to decrease\n temp = self.get_temp_dec(temp)\n model = rm.randint(0, self.model_num - 1)\n state = \"dec_tmp\"\n\n diff = self.max_time - self.time\n if (diff) < self.max_step and (diff) > self.min_step:\n duration = diff\n self.states.append([model, duration, temp])\n return self.states_to_dict()\n elif diff < self.min_step:\n self.states[len(self.states) - 1][1] += diff\n return self.states_to_dict()\n else:\n duration = np.random.choice(self.step_values)\n self.time += duration\n self.states.append([model, duration, temp])\n else:\n print(\"Error\")\n\n elif state == \"dec_tmp\":\n change = np.random.choice(\n self.transitionName[1], p=self.transitionMatrix[1]\n )\n if change == \"S2S1\":\n temp = self.get_temp_inc(temp)\n model = rm.randint(0, self.model_num - 1)\n state = \"inc_tmp\"\n diff = self.max_time - self.time\n if (diff) < self.max_step and (diff) > self.min_step:\n duration = diff\n self.states.append([model, duration, temp])\n return self.states_to_dict()\n elif diff < self.min_step:\n self.states[len(self.states) - 1][1] += diff\n return self.states_to_dict()\n else:\n duration = np.random.choice(self.step_values)\n\n self.time += duration\n self.states.append([model, duration, temp])\n\n elif change == \"S2S2\":\n temp = self.get_temp_dec(temp)\n model = rm.randint(0, self.model_num - 1)\n\n diff = self.max_time - self.time\n if (diff) < self.max_step and (diff) > self.min_step:\n duration = diff\n self.states.append([model, duration, temp])\n return self.states_to_dict()\n elif diff < self.min_step:\n self.states[len(self.states) - 1][1] += diff\n return self.states_to_dict()\n else:\n duration = np.random.choice(self.step_values)\n self.time += duration\n self.states.append([model, duration, temp])\n\n else:\n print(\"Error\")\n pass\n else:\n print(\"Error\")\n\n return self.states_to_dict()",
"def fill_map(self):\n\n sim = Pong(max_steps=None)\n s = sim.empty_state()\n s[DEFAULT_DIMS] = DEFUALT_VALUES\n\n # Optimization issues:\n next_state = self.next_state\n next_reward = self.next_reward\n d = self.d\n\n # Make the terminal state a self-loop\n next_state[self.n] = self.n\n\n t0 = clock()\n for i in range(0, self.n, 1000000):\n for j in range(i, min(i + 1000000, self.n)):\n s[TRAIN_DIMS] = d.index_to_state(j)\n for a in c.ACTIONS:\n sim.fast_set_and_step(s, c.A_STAY, a)\n if sim.hit == \"r\":\n next_reward[j, a] = 1\n next_state[j, a] = -1\n elif sim.miss == \"r\":\n next_reward[j, a] = -1\n next_state[j, a] = -1\n else:\n next_state[j, a] = d.state_to_index(sim.s[TRAIN_DIMS])\n print(i, clock() - t0)",
"def initialize_rnn_state(state):\n if isinstance(state, tf.nn.rnn_cell.LSTMStateTuple):\n # when state_is_tuple=True for LSTM\n # print(state)\n # print(state.c)\n # print(state.h)\n # print(state.c.eval())\n # print(state.h.eval())\n # exit()\n c = state.c.eval()\n h = state.h.eval()\n return (c, h)\n # # print(state)\n # # print(state[0])\n # new_state = state\n # new_state[0].assign(state[0].eval())\n # new_state[1].assign(state[1].eval())\n # # state[0] = state[0].eval()\n # # state[1] = state[1].eval()\n # # state.c = state.c.eval()\n # # state.h = state.h.eval()\n # return new_state\n else:\n # when state_is_tuple=False for LSTM\n # or other RNNs\n new_state = state.eval()\n return new_state",
"def create_train_state(\n config, rng, learning_rate_fn, example_batch\n):\n model, variables, metric_collector = create_model(config, rng, example_batch)\n params = variables['params']\n parameter_overview.log_parameter_overview(params)\n tx = train_utils.create_optimizer(config, learning_rate_fn)\n\n state = train_state.TrainState.create(\n apply_fn=model.apply,\n params=variables['params'],\n tx=tx,\n )\n return model, state, metric_collector",
"def next_state(self):\n \n self.state = np.random.choice(['checkout', 'dairy', 'drinks', 'fruit', 'spices'], p=self.tr_array_dict[f'{self.state}'])",
"def generate_all_states(self):\n self.clingo = ClingoBridge() # reset clingo\n\n base = ('base', '')\n self.clingo.add_file('initial-states.lp')\n self.clingo.run([base])\n output = self.clingo.output\n\n num_states = int(len(output) / 2)\n\n states = np.full(num_states, object)\n for i in range(0, num_states):\n state_atoms = []\n for atom in output[i]:\n if atom.name == 'state':\n state_atoms.append(atom)\n states[i] = self.parse_state(state_atoms)\n return states",
"def compute_lookuptable(self):\n\n if self.uselookuptable:\n # Evaluation lookup tables \n self.action_isok = np.zeros( ( self.nodes_n , self.actions_n ) , dtype = bool )\n self.x_next = np.zeros( ( self.nodes_n , self.actions_n , self.DS.n ) , dtype = float ) # lookup table for dynamic\n \n # For all state nodes \n for node in range( self.nodes_n ): \n \n x = self.nodes_state[ node , : ]\n \n # For all control actions\n for action in range( self.actions_n ):\n \n u = self.actions_input[ action , : ]\n \n # Compute next state for all inputs\n x_next = self.DS.fc( x , u ) * self.dt + x\n \n # validity of the options\n x_ok = self.DS.isavalidstate(x_next)\n u_ok = self.DS.isavalidinput(x,u)\n \n self.x_next[ node, action, : ] = x_next\n self.action_isok[ node, action] = ( u_ok & x_ok )",
"def generate_states(esncell, xs, h0):\n (map_ih, (Whh, shape), bh) = esncell\n def _step(h, x):\n #h = jnp.tanh(sp_dot(Whh, h, shape[0]) + map_ih(x) + bh)\n h = jnp.tanh(sp_dot(Whh, h, shape[0]) + map_ih(x))\n return (h, h)\n (h, hs) = lax.scan(_step, h0, xs)\n return (h, hs)",
"def RoutingTable(M):\n G = StateTransitionGraph(M)\n current = initialState = next(iter(M))\n\n # find list of tokens that lead to the initial state\n activeTokens = set()\n for LG in breadth_first_levels(G, initialState):\n for v in LG:\n for w in LG[v]:\n activeTokens.add(G[w][v])\n for t in activeTokens:\n if M.reverse(t) in activeTokens:\n raise MediumError(\"shortest path to initial state is not concise\")\n activeTokens = list(activeTokens)\n inactivated = object() # flag object to mark inactive tokens\n\n # rest of data structure: point from states to list and list to states\n activeForState = {S: -1 for S in M}\n statesForPos = [[] for i in activeTokens]\n\n def scan(S):\n \"\"\"Find the next token that is effective for s.\"\"\"\n i = activeForState[S]\n while True:\n i += 1\n if i >= len(activeTokens):\n raise MediumError(\n \"no active token from %s to %s\" % (S, current))\n if activeTokens[i] != inactivated and M(S, activeTokens[i]) != S:\n activeForState[S] = i\n statesForPos[i].append(S)\n return\n\n # set initial active states\n for S in M:\n if S != current:\n scan(S)\n\n # traverse the graph, maintaining active tokens\n visited = set()\n routes = {}\n for prev, current, edgetype in dfs_search(G, initialState):\n if prev != current and edgetype != NONTREE:\n if edgetype == REVERSE:\n prev, current = current, prev\n\n # add token to end of list, point to it from old state\n activeTokens.append(G[prev][current])\n activeForState[prev] = len(activeTokens) - 1\n statesForPos.append([prev])\n\n # inactivate reverse token, find new token for its states\n activeTokens[activeForState[current]] = inactivated\n for S in statesForPos[activeForState[current]]:\n if S != current:\n scan(S)\n\n # remember routing table as part of returned results\n if current not in visited:\n for S in M:\n if S != current:\n routes[S, current] = activeTokens[activeForState[S]]\n\n return routes",
"def calculate_new_state(state, rules):\n closed_line = f'{state[-1]}{state}{state[0]}'\n listed = list(window(closed_line))\n new_state = ''.join(rules[stride] for stride in listed)\n return new_state",
"def stateOccupationProbabilityGeneration(self):\n self.L = zeros((self.noOfEmmittingStates, self.T))\n\n for j in range(self.noOfEmmittingStates):\n for t in range(self.T):\n self.L[j,t] = (self.alpha[j+1, t+1] * self.beta[j+1, t+1]) / self.observationLikelihood",
"def train(esncell, states, labels):\n Who = lstsq_stable(states, labels)\n return esncell + (Who,)",
"def _build_sparse_table(self):\n self._table = {}\n\n for p in self._jump_nodes:\n self._table[p.index()] = [self._tree.parent(p)] # table[p][0] = parent(p)\n\n l = 0\n while l < self._logsize:\n u = self._table[p.index()][l]\n\n if u is None:\n break\n\n if self._ind[u.index()] < self._pow[l]: # incomplete ladder\n break\n\n i = self._path[u.index()] # u belongs to path_i\n j = self._ind[u.index()] # path_i[j] = u\n w = self._ladders[i][j - self._pow[l]]\n self._table[p.index()].append(w)\n l += 1",
"def generate_next_state(self, action) :\n raise NotImplementedError",
"def create_result_states(self):\n\n # Dictionary of Final TP States (== the winners)\n self.final_TPStates = dict()\n for stimulus in self.inputNames:\n for i in range(self.settings['epochs']):\n key = stimulus + \"/\" + str(i)\n self.final_TPStates[key] = 0",
"def lfads_decode_prior_one_step_scan(params, lfads_hps, state, key_n_ii):\n key, ii = key_n_ii\n _, ib, g, f = state\n state_and_returns = lfads_decode_prior_one_step(params, lfads_hps, key,\n ii, ib, f, g)\n g, f, ii, ib, lograte = state_and_returns\n state = (ii, ib, g, f)\n return state, state_and_returns",
"def forwardVariableGeneration(self):\n self.alpha = zeros((self.noOfEmmittingStates+2, self.T + 1))\n\n # initialistation\n self.alpha[0,0] = 1.0\n self.alpha[1:,0] = 0.0\n self.alpha[0,1:] = 0.0\n\n # main recursion\n for t in range(1, self.T+1):\n for j in range(1, self.noOfEmmittingStates+1):\n partialSum = 0\n for k in range(self.noOfEmmittingStates+1):\n partialSum += (self.alpha[k, t-1] * self.transitionMatrix[k, j-1])\n self.alpha[j, t] = self.b[j-1, t-1] * partialSum\n # since must end in final state, last alpha for states with zero transition\n # prob to last state must be zero?\n for row in range(self.transitionMatrix.shape[0]):\n if self.transitionMatrix[row,-1] == 0.0:\n self.alpha[row,-1] = 0.0\n # fwd prob variable for final state at 'last' timestep gets bumped into the\n # final column to save having a needless column\n partialSum = 0\n for k in range(self.noOfEmmittingStates+1):\n partialSum += (self.alpha[k,-1] * self.transitionMatrix[k,-1])\n self.alpha[-1,-1] = partialSum\n\n # likelihood of observed sequence, p(O|lambda)\n self.observationLikelihood = self.alpha[-1,-1]",
"def step(state_ind, transmat, atol=1e-7):\n pvals = np.round(transmat[state_ind] / atol) * atol\n pvals = pvals / np.sum(pvals)\n next_state_onehot = np.random.multinomial(1, pvals=pvals)\n next_state = np.where(next_state_onehot)[0][0]\n return next_state",
"def init_states(batch_size, num_lstm_layer, num_hidden):\n init_c = [('l%d_init_c' % l, (batch_size, num_hidden)) for l in range(num_lstm_layer)]\n init_h = [('l%d_init_h' % l, (batch_size, num_hidden)) for l in range(num_lstm_layer)]\n return init_c + init_h",
"def generate_lstm(\n input_seqs,\n hidden_state,\n cell_state,\n w_inp,\n w_hid,\n b_inp,\n b_hid,\n f_act,\n g_act,\n h_act,\n backwards=False,\n ):\n\n h_list = []\n seq_length = len(input_seqs)\n for i in range(seq_length):\n step = input_seqs[i] if not backwards else input_seqs[seq_length - (i + 1)]\n step = _op.squeeze(step, axis=[0])\n gates = _op.nn.dense(step, w_inp) + _op.nn.dense(hidden_state, w_hid)\n if b_inp is not None:\n gates += b_inp\n if b_hid is not None:\n gates += b_hid\n i, f, c, o = _op.split(gates, 4, axis=-1)\n\n i = f_act(i)\n f = f_act(f)\n\n c = g_act(c)\n C = f * cell_state + i * c\n\n o = f_act(o)\n\n H = o * h_act(C)\n\n hidden_state = H\n cell_state = C\n h_list.append(_op.expand_dims(H, axis=0))\n\n if backwards:\n h_list = h_list[::-1]\n\n # Concatenate outputs and add back in direction axis.\n concatenated = _op.concatenate(h_list, 0)\n output = _op.expand_dims(concatenated, axis=1)\n hidden_state = _op.expand_dims(hidden_state, axis=0)\n cell_state = _op.expand_dims(cell_state, axis=0)\n\n return output, hidden_state, cell_state",
"def next_state_generation(state, target, operators):\n current_swap_state = ''\n current_swap_res = sys.maxsize\n current_change_state = ''\n current_change_res = sys.maxsize\n # for swapping\n for i in range(0, len(state), 2):\n for j in range(2, len(state), 2):\n new_swap_state, swap_res = swap(i, j, state, target)\n if current_swap_res > swap_res:\n current_swap_res = swap_res\n current_swap_state = new_swap_state\n # for changing\n for i in range(1, len(state) - 1, 2):\n new_change_state, change_res = change(i, random.choice(operators), state, target)\n if current_change_res > change_res:\n current_change_res = change_res\n current_change_state = new_change_state\n # return the lowest of the 2 value and state amongst swapping and changing operations\n print(\"Swap res:\", current_swap_res)\n print(\"Change res:\", current_change_res)\n if current_swap_res < current_change_res:\n print(\"Best State \", current_swap_state)\n print(\"Distance from target: \", current_swap_res)\n print()\n return current_swap_state, current_swap_res\n else:\n print(\"Best State \", current_change_state)\n print(\"Distance from target: \", current_change_res)\n print()\n return current_change_state, current_change_res",
"def generate_state():\n\n\t\tprobs = calc_probs(env)\n\t\tn_options = len(probs)\n\n\t\t# feedback for agent\n\t\tr_mag = np.zeros(n_options) + rmag\n\t\tl_mag = np.zeros(n_options) + lmag\n\n\t\tnew_state = Bogacz(n_trials, n_options, probs, r_mag, l_mag, V0=V0)\n\t\treturn new_state",
"def initialize_state(self):\n # Initialize everything to zero\n self.stateC = self.initializer((self.nSym, 1))\n self.stateC_prev = self.initializer((self.nSym, 1))\n self.state = self.toNeural(self.stateC)\n self.state_prev = self.toNeural(matrix=self.stateC_prev)\n self.inpC = self.initializer((self.nSym, 1))\n self.inpS = self.toNeural(self.inpC)\n\n # Create full traces\n self.create_full_traces()\n\n # Initialize Lotka Volterra\n self.LV_Matrices()\n\n # Allocate Temperature and Lambda\n self.vars['T'] = 0\n self.vars['lambda'] = 0",
"def make_tables(self):\n r = np.zeros((self.size*self.size, 4))\n p = np.zeros((self.size*self.size, 4, self.size*self.size))\n directions = np.array([[1, -1, 0, 0], [0, 0, -1, 1]])\n for x in range(self.size):\n for y in range(self.size):\n for a in range(4):\n i = x*self.size + y\n r[i, a] = self.reward((x, y))\n if (x, y) == (self.size-1, self.size-1) or \\\n (x, y) == (self.mid, self.mid):\n p[i, a, 0] = 1\n else:\n for d in range(4):\n dx, dy = directions[:, d]\n x_ = max(0, min(self.size-1, x+dx))\n y_ = max(0, min(self.size-1, y+dy))\n j = x_*self.size + y_\n if self.noise is not None:\n p[i, a, j] += 0.3 * self.noise[x, y, a, d] + 0.7 * int(a == d)\n else:\n p[i, a, j] += int(a == d)\n return r, p"
]
| [
"0.6823201",
"0.59635055",
"0.59432924",
"0.59324837",
"0.5919664",
"0.5785841",
"0.5745793",
"0.5742737",
"0.5727405",
"0.5704459",
"0.5688638",
"0.5662519",
"0.5636774",
"0.56181955",
"0.561092",
"0.5608363",
"0.56077516",
"0.5607666",
"0.5600288",
"0.559829",
"0.55503726",
"0.550761",
"0.55040604",
"0.5480969",
"0.54757535",
"0.5468933",
"0.5466987",
"0.5459068",
"0.54578876",
"0.54546016"
]
| 0.6284223 | 1 |
Print the invoice and mark it as sent, so that we can see more easily the next step of the workflow This Method overrides the one in the original invoice class | def invoice_print(self):
self.ensure_one()
self.sent = True
return self.env['report'].get_action(self, 'ferrua_report.report_invoice') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def action_invoice_dian_resend(self):\n self.ensure_one()\n template = self.env.ref('l10n_co_e-invoice.email_template_edi_invoice_dian', False)\n compose_form = self.env.ref('mail.email_compose_message_wizard_form', False)\n ctx = dict(\n default_model='account.invoice',\n default_res_id=self.id,\n default_use_template=bool(template),\n default_template_id=template and template.id or False,\n default_composition_mode='comment',\n mark_invoice_as_sent=True,\n )\n return {\n 'name': _('Compose Email'),\n 'type': 'ir.actions.act_window',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'mail.compose.message',\n 'views': [(compose_form.id, 'form')],\n 'view_id': compose_form.id,\n 'target': 'new',\n 'context': ctx,\n }",
"def UpdateInvoice(self):\n self.builder.get_object('GuiInvProd').get_buffer().set_text(\"Product:\\n\")\n self.builder.get_object('GuiInvPrice').get_buffer().set_text(\"Price:\\n\")\n self.amount = 0\n for items in self.prod_list:\n self.builder.get_object('GuiInvProd').get_buffer().insert_at_cursor(\n u\"%s\\n\" % items['name'])\n if self.is_member:\n self.builder.get_object('GuiInvPrice').get_buffer().insert_at_cursor(\n config.CURRENCY_SYMBOL + u\"%.2f\\n\" % items[2])\n self.amount = self.amount + items[2]\n else:\n self.builder.get_object('GuiInvPrice').get_buffer().insert_at_cursor(\n config.CURRENCY_SYMBOL + u\"%.2f\\n\" % items[3])\n self.amount = self.amount + items[3]\n if self.is_member:\n self.builder.get_object('GuiInvProd').get_buffer().insert_at_cursor(\n u\"\\nYou are a member.\")\n self.builder.get_object('GuiTotal').set_text(config.CURRENCY_SYMBOL + u\"%.2f\" % self.amount)\n self.builder.get_object('GuiInput').set_text(\"\")",
"def save_print(request):\n if request.method == \"POST\":\n initial_data, data = process_request(request)\n tax_data = {\n \"s_gst\": request.POST[\"s_gst\"],\n \"c_gst\": request.POST[\"c_gst\"],\n \"other_charges\": request.POST[\"other_charges\"],\n \"additional_notes\": request.POST[\"additional_notes\"]\n }\n\n if request.POST[\"invoice_number\"] == \"\":\n inv_num = Invoice.objects.order_by(\"-number\").first()\n\n if inv_num is None:\n inv_num = 1\n else:\n inv_num = inv_num.number + 1\n\n elif request.POST[\"invoice_number\"] != \"\" \\\n and \\\n Invoice.objects.filter(number=request.POST[\"invoice_number\"]).exists():\n error = \"Error, Invoice number Exists\"\n return render(request,\n \"invoice/invoice_create.html\",\n {\n \"form\": InvoiceForm,\n \"stage\": \"3\",\n \"prev_data\": data,\n \"initial_data\": initial_data,\n \"error\": error\n })\n else:\n inv_num = request.POST[\"invoice_number\"]\n\n sub_total = sum([float(a.get(\"total_cost\")) for a in data])\n grand_total = sub_total + float(tax_data.get(\"other_charges\")) + \\\n ((float(request.POST[\"s_gst\"]) +\n float(request.POST[\"c_gst\"])) / 100) * sub_total\n\n return render(request,\n \"invoice/invoice_preview.html\",\n {\n \"invoice_number\": inv_num,\n \"initial_data\": initial_data,\n \"prev_data\": data,\n \"sub_total\": sub_total,\n \"tax_data\": tax_data,\n \"grand_total\": grand_total\n })",
"def abc_confirm_invoice(self, lines, packages, data, params, res):\n invoice = params.get('invoice')\n if invoice and invoice.state == 'draft':\n self.env.cr.commit()\n env = None\n try:\n # Ne cursor doesn't time out when requesting lock.\n # Could be bad I guess? Works for now.\n # TODO: Look into setting a more reasonable lock wait time.\n new_cr = Registry(self.env.cr.dbname).cursor()\n new_cr.autocommit(True)\n env = api.Environment(new_cr, self.env.uid, self.env.context)\n # Validate invoice\n invoice.signal_workflow('invoice_open')\n res['invoice']['name'] = invoice.number\n res['messages'].append(u\"Created and confirmed invoice %s.\" % invoice.number)\n res['results']['invoice'] = 'confirmed'\n # Commit to unlock the invoice sequence\n env.cr.commit()\n except Exception as e:\n res['warnings'].append((\n _(u\"Failed to confirm invoice %s!\") % (invoice and (invoice.number or invoice.name) or 'Unknown'),\n '%s\\n\\nTraceback:\\n%s' % (e.message or 'Unknown Error', traceback.format_exc())))\n finally:\n if env:\n env.cr.close()",
"def invoice():\n name = raw_input(\"What is your name? \")\n\n subtotal = sub_total()\n discount = discount_card(subtotal)\n iva = tax(subtotal, discount)\n total = total_final(subtotal, discount, iva)\n\n reset()\n print \"---------------INVOICE---------------\"\n print \"\"\n print \" DESPENSA FAMILIAR \\n\"\n print \"%s\" % name\n print \"\"\n count_products(SAVE_EXISTENT)\n print \"\\nThe subtotal is:----------- Q%.2f\" % subtotal\n print \"The discount is:----------- Q%.2f\" % discount\n print \"The tax is:---------------- Q%.2f\" % iva\n print \"The total to pay is:------- Q%.2f\" % total\n print \"-------------------------------------\"\n print \"\\n\\n---Thank you for shopping with us---\"",
"def print_invoice(request, invoice_number):\n\n data = Invoice.objects.get(number=invoice_number)\n\n sub_total = sum([a.get(\"total_cost\") for a in data.items])\n s_gst_val = float(sub_total) * (float(data.s_gst) / 100)\n c_gst_val = float(sub_total) * (float(data.c_gst) / 100)\n\n data.addressed_to = data.addressed_to.replace(\"\\n\", \"<br>\")\n\n return render(request,\n \"invoice/invoice_print.html\",\n {\n \"data\": data,\n \"sub_total\": sub_total,\n \"s_gst_value\": s_gst_val,\n \"c_gst_value\": c_gst_val\n })",
"def print_quotation(self):\n self.filtered(lambda s: s.state == 'draft').write({'state': 'sent'})\n return self.env['report'].get_action(self, 'ferrua_report.sale_order')",
"def display(self):\n print(f'{self.first_name} {self.last_name}, Customer#: '\n f'{self.customer_id}\\n{self.address}\\n{self.phone_number}\\n'\n f'{self.create_invoice()}')",
"def action_my_payslip_sent(self):\n self.ensure_one()\n template = self.env.ref('payroll_email.email_template_for_my_payroll')\n if template:\n self.env['mail.template'].browse(template.id).send_mail(self.id,force_send=True)\n self.flag = True",
"def action_send_email(self):\n self.ensure_one()\n if self.communication_channel != \"email\":\n return False # Maybe we should raise an error here.\n\n if self.state in (\"sent\", \"done\"):\n raise ValidationError(_(\"This communication is already sent.\"))\n\n lines_2be_processed = self.credit_control_line_ids.filtered(\n lambda line: line.state != \"sent\"\n )\n\n if not lines_2be_processed:\n raise ValidationError(_(\"There is no draft lines to send.\"))\n\n self = self.with_context(lang=self.partner_id.lang)\n\n partner = self.partner_id\n mail_template = self.policy_level_id.email_template_id\n # Send the email\n partner.with_context(credit_control_mail=True).message_post_with_template(\n template_id=mail_template.id,\n model=self._name,\n res_id=self.id,\n )\n # Set the state of the credit control lines to \"queued\"\n lines_2be_processed.write({\"state\": \"sent\"})\n self.state = \"sent\"",
"def button_fac_ent(self):\n invoice = self._fac_ent()\n\n # imprime factura\n datas = {\n 'ids': invoice.ids,\n 'model': 'account.report_invoice',\n 'form': invoice.read()\n }\n return {\n 'type': 'ir.actions.report.xml',\n 'report_name': 'aeroo_report_ar_einvoice',\n 'datas': datas,\n }",
"def create_order_amended_invoice(sender, instance, using, **kwargs):\n\n sender_name = sender._meta.model.__name__\n\n if sender_name == \"WillOrder\":\n order = instance\n elif sender_name == \"Allocation\":\n order = instance.asset_store.order\n else:\n order = instance.order\n\n if Invoice.objects.filter(\n order=order, been_paid=True, parent_invoice=None\n ).exists():\n amended_invoice_required = False\n latest_paid_invoice = order.invoice.latest_paid()\n print(\"latest_paid_invoice\", latest_paid_invoice)\n if latest_paid_invoice:\n order_details = InvoiceService(order).limit_details\n\n for order_detail, order_numbers in order_details.items():\n try:\n willorder_limit = OrderLimit.objects.get(\n invoice=latest_paid_invoice, detail=order_detail\n )\n if order_numbers > willorder_limit.limit:\n amended_invoice_required = True\n except OrderLimit.DoesNotExist:\n amended_invoice_required = True\n\n parent_invoice = Invoice.objects.get(order=order, parent_invoice=None)\n\n if amended_invoice_required:\n if Invoice.objects.filter(\n order=order, been_paid=False, parent_invoice=parent_invoice\n ).exists():\n print(\"UPDATE AMENDED INVOICE\")\n order.invoice.latest().update_invoice()\n else:\n Invoice.objects.create(\n order=order, parent_invoice=parent_invoice)\n else:\n print(\"DELETE AMENDED INVOICE\")\n if Invoice.objects.filter(\n order=order, been_paid=False, parent_invoice=parent_invoice\n ).exists():\n Invoice.objects.get(\n order=order, parent_invoice=parent_invoice, been_paid=False\n ).delete()",
"def action_create_invoice(self):\n if self.partner_id:\n supplier = self.partner_id\n else:\n supplier = self.partner_id.search(\n [(\"name\", \"=\", \"Salon Default Customer\")])\n lines = []\n product_id = self.env['product.product'].search(\n [(\"name\", \"=\", \"Salon Service\")])\n for records in self.order_line_ids:\n if product_id.property_account_income_id.id:\n income_account = product_id.property_account_income_id.id\n elif product_id.categ_id.property_account_income_categ_id.id:\n income_account = product_id.categ_id.\\\n property_account_income_categ_id.id\n else:\n raise UserError(\n _(\"Please define income account for this product: \"\n \"'%s' (id:%d).\") % (product_id.name, product_id.id))\n value = (0, 0, {\n 'name': records.service_id.name,\n 'account_id': income_account,\n 'price_unit': records.price,\n 'quantity': 1,\n 'product_id': product_id.id,\n })\n lines.append(value)\n invoice_line = {\n 'move_type': 'out_invoice',\n 'partner_id': supplier.id,\n 'invoice_user_id': self.env.user.id,\n 'invoice_origin': self.name,\n 'invoice_line_ids': lines,\n }\n inv = self.env['account.move'].create(invoice_line)\n action = self.env.ref('account.action_move_out_invoice_type',\n raise_if_not_found=False)\n result = {\n 'name': action.name,\n 'type': 'ir.actions.act_window',\n 'views': [[False, 'form']],\n 'target': 'current',\n 'res_id': inv.id,\n 'res_model': 'account.move',\n }\n self.inv_stage_identifier = True\n self.stage_id = 3\n invoiced_records = self.env['salon.order'].search(\n [('stage_id', 'in', [3, 4]), ('chair_id', '=', self.chair_id.id)])\n total = 0\n for rows in invoiced_records:\n invoiced_date = str(rows.date)\n invoiced_date = invoiced_date[0:10]\n if invoiced_date == str(date.today()):\n total = total + rows.price_subtotal\n self.chair_id.collection_today = total\n self.update_number_of_orders()\n return result",
"def onchange_invoice(self):\n self.product_id = False\n self.date = self.invoice.date_invoice\n self.name = (self.invoice and self.invoice.reference) or ''\n self.analytic_account_id = False\n self.unit_amount = self.invoice.residual\n self.quantity = 1\n self.total_amount = self.unit_amount",
"def _prepare_invoice(self, cr, uid, order, lines, context=None):\n if context is None:\n context = {}\n journal_ids = self.pool.get('account.journal').search(cr, uid,\n [('type', '=', 'sale'), ('company_id', '=', order.company_id.id)],\n limit=1)\n if not journal_ids:\n raise osv.except_osv(_('Error !'),\n _('There is no sales journal defined for this company: \"%s\" (id:%d)') % (order.company_id.name, order.company_id.id))\n\n invoice_vals = {\n 'name': order.client_order_ref or '',\n 'origin': order.name,\n 'type': 'out_invoice',\n 'reference': order.client_order_ref or order.name,\n 'account_id': order.partner_id.property_account_receivable.id,\n 'journal_id': order.partner_id.property_default_sale_invoice_journal.id,\n 'partner_id': order.partner_id.id,\n 'address_invoice_id': order.partner_invoice_id.id,\n #'address_contact_id': order.partner_order_id.id,\n 'invoice_line': [(6, 0, lines)],\n 'currency_id': order.pricelist_id.currency_id.id,\n 'comment': order.note,\n 'payment_term': order.payment_term and order.payment_term.id or False,\n 'fiscal_position': order.fiscal_position.id or order.partner_id.property_account_position.id,\n 'date_invoice': context.get('date_invoice', False),\n 'company_id': order.company_id.id,\n 'user_id': order.user_id and order.user_id.id or False\n }\n\n # Care for deprecated _inv_get() hook - FIXME: to be removed after 6.1\n invoice_vals.update(self._inv_get(cr, uid, order, context=context))\n\n return invoice_vals",
"def _prepare_invoice(self, cr, uid, order, lines, context=None):\n if context is None:\n context = {}\n journal_id = self.pool['account.invoice'].default_get(cr, uid, ['journal_id'], context=context)['journal_id']\n if not journal_id:\n raise osv.except_osv(_('Error!'),\n _('Please define sales journal for this company: \"%s\" (id:%d).') % (order.company_id.name, order.company_id.id))\n invoice_vals = {\n 'name': order.client_order_ref or '',\n 'origin': order.name,\n 'type': 'out_invoice',\n 'reference': order.client_order_ref or order.name,\n 'account_id': order.partner_invoice_id.property_account_receivable.id,\n 'partner_id': order.partner_invoice_id.id,\n 'journal_id': journal_id,\n 'invoice_line': [(6, 0, lines)],\n 'currency_id': order.pricelist_id.currency_id.id,\n 'comment': order.note,\n 'payment_term': order.payment_term and order.payment_term.id or False,\n 'fiscal_position': order.fiscal_position.id or order.partner_invoice_id.property_account_position.id,\n 'date_invoice': context.get('date_invoice', False),\n 'company_id': order.company_id.id,\n 'user_id': order.user_id and order.user_id.id or False,\n 'section_id' : order.section_id.id,\n 'test_1' :order.test\n }\n\n # Care for deprecated _inv_get() hook - FIXME: to be removed after 6.1\n invoice_vals.update(self._inv_get(cr, uid, order, context=context))\n return invoice_vals",
"def printPayment(self):\n print self.output()",
"def action_invoice_create(self, grouped=False, final=False):\n inv_obj = self.env['account.invoice']\n precision = self.env['decimal.precision'].sudo().precision_get('Product Unit of Measure')\n invoices = {}\n references = {}\n for order in self:\n group_key = order.id if grouped else (order.partner_invoice_id.id, order.currency_id.id)\n for line in order.order_line.sorted(key=lambda l: l.qty_to_invoice < 0):\n if float_is_zero(line.qty_to_invoice, precision_digits=precision):\n continue\n if group_key not in invoices:\n inv_data = order._prepare_invoice()\n invoice = inv_obj.sudo().create(inv_data)\n references[invoice] = order\n invoices[group_key] = invoice\n invoice['sale_order_id'] = order.id\n elif group_key in invoices:\n vals = {}\n if order.name not in invoices[group_key].origin.split(', '):\n vals['origin'] = invoices[group_key].origin + ', ' + order.name\n if order.client_order_ref and order.client_order_ref not in invoices[group_key].name.split(\n ', ') and order.client_order_ref != invoices[group_key].name:\n vals['name'] = invoices[group_key].name + ', ' + order.client_order_ref\n invoices[group_key].sudo().write(vals)\n if line.qty_to_invoice > 0:\n line.invoice_line_create(invoices[group_key].id, line.qty_to_invoice)\n elif line.qty_to_invoice < 0 and final:\n line.invoice_line_create(invoices[group_key].id, line.qty_to_invoice)\n\n if references.get(invoices.get(group_key)):\n if order not in references[invoices[group_key]]:\n references[invoices[group_key]] |= order\n if not invoices:\n raise UserError(_('There is no invoiceable line.'))\n for invoice in invoices.values():\n if not invoice.invoice_line_ids:\n raise UserError(_('There is no invoiceable line.'))\n # If invoice is negative, do a refund invoice instead\n if invoice.amount_untaxed < 0:\n invoice.type = 'out_refund'\n for line in invoice.invoice_line_ids:\n line.quantity = -line.quantity\n # Use additional field helper function (for account extensions)\n for line in invoice.invoice_line_ids:\n line._set_additional_fields(invoice)\n # Necessary to force computation of taxes. In account_invoice, they are triggered\n # by onchanges, which are not triggered when doing a create.\n invoice.compute_taxes()\n invoice.message_post_with_view('mail.message_origin_link',\n values={'self': invoice, 'origin': references[invoice]},\n subtype_id=self.env.ref('mail.mt_note').id)\n return [inv.id for inv in invoices.values()]",
"def action_invoice_create(self, grouped=False, final=False):\n inv_obj = self.env['account.invoice']\n precision = self.env['decimal.precision'].precision_get('Product Unit of Measure')\n invoices = {}\n references = {}\n invoices_origin = {}\n invoices_name = {}\n for order in self:\n group_key = order.id if grouped else (order.partner_id.id, order.currency_id.id)\n for line in order.order_line.sorted(key=lambda l: l.qty_received - l.qty_invoiced < 0):\n if float_is_zero(line.qty_received - line.qty_invoiced, precision_digits=precision):\n continue\n if group_key not in invoices:\n inv_data = order._prepare_invoice()\n invoice = inv_obj.create(inv_data)\n references[invoice] = order\n invoices[group_key] = invoice\n invoices_origin[group_key] = [invoice.origin]\n invoices_name[group_key] = [invoice.name]\n elif group_key in invoices:\n if order.name not in invoices_origin[group_key]:\n invoices_origin[group_key].append(order.name)\n if order.partner_ref and order.partner_ref not in invoices_name[group_key]:\n invoices_name[group_key].append(order.partner_ref)\n\n if line.qty_received - line.qty_invoiced > 0:\n line.invoice_line_create(invoices[group_key].id, line.qty_received - line.qty_invoiced)\n elif line.qty_received - line.qty_invoiced < 0 and final:\n line.invoice_line_create(invoices[group_key].id, line.qty_received - line.qty_invoiced)\n\n if references.get(invoices.get(group_key)):\n if order not in references[invoices[group_key]]:\n references[invoices[group_key]] |= order\n\n for group_key in invoices:\n invoices[group_key].write({'name': ', '.join(invoices_name[group_key]),\n 'origin': ', '.join(invoices_origin[group_key])})\n\n if not invoices:\n raise UserError(_('There is no invoicable line.'))\n\n for invoice in invoices.values():\n if not invoice.invoice_line_ids:\n raise UserError(_('There is no invoicable line.'))\n # If invoice is negative, do a refund invoice instead\n if invoice.amount_total < 0:\n invoice.type = 'in_refund'\n for line in invoice.invoice_line_ids:\n line.quantity = -line.quantity\n # Necessary to force computation of taxes. In account_invoice, they are triggered\n # by onchanges, which are not triggered when doing a create.\n invoice.compute_taxes()\n invoice.message_post_with_view('mail.message_origin_link',\n values={'self': invoice, 'origin': references[invoice]},\n subtype_id=self.env.ref('mail.mt_note').id)\n return [inv.id for inv in invoices.values()]",
"def action_invoice_create(self, cr, uid, ids, grouped=False, states=None, date_invoice=False, context=None):\n order = self.browse(cr, uid, ids[0], context=context)\n inv_obj = self.pool.get('account.invoice')\n # create the invoice\n inv_id = super(sale_order, self).action_invoice_create(cr, uid, ids, grouped, states, date_invoice, context=context)\n # modify the invoice\n inv_obj.write(cr, uid, [inv_id], {'past_doc': order.past_doc})\n return inv_id",
"def payment_completed(order_id):\n order = Order.objects.get(id=order_id)\n\n #create invoice email\n subject = f'My Shop - EE Invoice no. {order.id}'\n message = 'Please, find attached the invoice for your recent purchase.'\n email = EmailMessage(subject, message, '[email protected]', [order.email])\n\n #generate PDF\n html = render_to_string('admin/orders/order/pdf.html', {'order': order})\n out =BytesIO()\n stylesheets = [weasyprint.CSS(settings.STATIC_ROOT + 'pdf.css')]\n weasyprint.HTML(string=html).write_pdf(out, stylesheets=stylesheets)\n\n #attach PDf file\n email.attach(f'order_{order.id}.pdf', out.getvalue(), 'application/pdf')\n\n #send email\n email.send()",
"def create_invoice(self, order): # noqa:max-complexity=18\n\n if len(order['order_lines']) == 0:\n raise RuntimeError(\n \"Expected 1 order_lines in order {}, got: {}\".format(\n order['order_id'],\n order['order_lines']\n )\n )\n\n order_id = order['order_id']\n\n refund = False\n if order['state'] == 'REFUND':\n refund = True\n self.stdout.write(self.style.WARNING(\"Refunded order: {}\".format(order_id)))\n elif order['state'] == 'PAID':\n pass\n else:\n self.stdout.write(self.style.WARNING(\"Not processing unknown order state {} for: {}\".format(order['state'], order_id)))\n return\n\n if self.only_known and order_id not in billy.TICKETBUTLER_IGNORE_LIST:\n self.stdout.write(self.style.WARNING(\"Only processing known invoices, skipping {}\".format(order_id)))\n return\n\n # Object containing all created tickets, to have an invoice relation\n # appended later\n ticketbutler_tickets = []\n\n for ticket in order['tickets']:\n\n sprints = list(filter(\n lambda q: q['question'] == 148,\n ticket['answers']\n ))[0]\n\n if any(filter(lambda c: c['choice_heading'].lower() == 'no', sprints['answered_choices'])):\n sprints = models.TicketbutlerTicket.SPRINTS_NO\n elif any(filter(lambda c: c['choice_heading'].lower() == 'maybe', sprints['answered_choices'])):\n sprints = models.TicketbutlerTicket.SPRINTS_MAYBE\n elif any(filter(lambda c: c['choice_heading'].lower() == 'yes', sprints['answered_choices'])):\n sprints = models.TicketbutlerTicket.SPRINTS_YES\n\n ticketbutler_ticket = models.TicketbutlerTicket.get_or_create(\n ticket['email'],\n ticket['full_name'],\n order_id,\n sprints,\n ticket['ticket_type_name'],\n )\n if refund:\n self.stdout.write(self.style.WARNING(\"This ticket was marked refunded: {}\".format(order_id)))\n ticketbutler_ticket.refunded = True\n ticketbutler_ticket.save()\n else:\n ticketbutler_ticket.refunded = False\n ticketbutler_ticket.save()\n\n ticketbutler_tickets.append(ticketbutler_ticket)\n\n if refund:\n self.stdout.write(self.style.WARNING(\"Skipping refunded order: {}\".format(order_id)))\n return\n\n # If an email is changed on a TicketButler ticket and an old user exists without any other tickets,\n # then disable this user's account and delete the ticket from the system\n all_order_tickets = models.TicketbutlerTicket.objects.filter(ticketbutler_orderid=order_id)\n\n for ticket in order['tickets']:\n\n for verify_ticket in all_order_tickets:\n # Check if the ticket is active in the current order, if it is\n # then skip it.\n if any(active.id == verify_ticket.id for active in ticketbutler_tickets):\n continue\n # Yeah, it's not active anymore, so delete it and potentially\n # disable the user account\n inactive_ticket = verify_ticket\n self.stdout.write(self.style.WARNING(\"Going to remove ticket for {}, order_id: {}\".format(inactive_ticket.user.email, order_id)))\n if inactive_ticket.user.tickets.all().exclude(id=inactive_ticket.id).exists():\n # Just remove the ticket\n self.stdout.write(self.style.WARNING(\"Found another ticket for user {} and deleted the inactive ticket in question but not the user\".format(inactive_ticket.user.email)))\n if inactive_ticket.pk:\n inactive_ticket.delete()\n continue\n else:\n # Remove the user account too if there are no submissions and it's not a superuser\n if not inactive_ticket.user.is_superuser and not inactive_ticket.user.submissions.all().exists():\n if inactive_ticket.user.is_active:\n self.stdout.write(self.style.WARNING(\"Also disabling user account for: {}\".format(inactive_ticket.user.email)))\n inactive_ticket.user.is_active = False\n inactive_ticket.user.save()\n else:\n self.stdout.write(self.style.WARNING(\"User was already inactive: {}\".format(inactive_ticket.user.email)))\n # In case the user had several tickets, and one of them was already deleted\n if inactive_ticket.pk:\n inactive_ticket.delete()\n\n if 'discount' in order:\n if order['discount']['amount'] == 100:\n\n for ticket in ticketbutler_tickets:\n ticket.free_ticket = True\n ticket.save()\n\n self.stdout.write(self.style.SUCCESS(\"Skipping invoice for free ticket for order id: {}\".format(order_id)))\n return\n else:\n self.stdout.write(self.style.ERROR(\"!!! Order id {} will have an invoice generated with missing information, Ticketbutler said the discount was: {}\".format(order_id, order['discount']['amount'])))\n\n for ticketbutler_order_line_no, order_line in enumerate(order['order_lines']):\n\n self.process_order_line(order, order_line, ticketbutler_tickets, ticketbutler_order_line_no=ticketbutler_order_line_no)",
"def test_invoice(self):\n invoice = self._create_invoice()\n self.assertEquals(invoice.total_amount, Decimal('2.38'))\n self.assertEquals(invoice.is_paid, False)\n\n # then cancel the created invoice\n cancelled_invoice = cancel_invoice(invoice)\n self.assertEquals(cancelled_invoice.total_amount, Decimal('-2.38'))",
"def action_view_invoice_salon(self):\n return {\n 'name': 'Invoices',\n 'domain': [('invoice_origin', '=', self.name)],\n 'res_model': 'account.move',\n 'view_id': False,\n 'view_mode': 'tree,form',\n 'type': 'ir.actions.act_window',\n }",
"def create_invoice(self):\n for line in self:\n # if not line.account_id:\n # raise UserError(_('Please Add the incoming Account !!'))\n self.ensure_one()\n journal_id = self.env['account.journal'].search([\n ('type', '=', 'sale')], limit=1)\n inv_line_main = {\n 'name': line.description.name,\n 'price_unit': line.amount or 0.00,\n 'quantity': 1,\n 'discount': line.discount,\n 'account_id': line.description.property_account_income_id.id or line.description.categ_id.property_account_income_categ_id.id or False,\n }\n inv_values = {\n 'partner_id': line.patient_id.partner_id.id,\n 'patient_id': line.patient_id.id,\n 'dentist': line.dentist.id,\n 'move_type': 'out_invoice',\n 'invoice_date': datetime.now().strftime(DF) or False,\n 'journal_id': journal_id and journal_id.id or False,\n 'teeth_id': line.patient_id and line.patient_id.id or False,\n }\n acc_id = self.env['account.move'].create(inv_values)\n acc_id.write({'invoice_line_ids': [(0, 0, inv_line_main)]})\n\n self.write({'invc_id': acc_id.id, 'inv': True})\n context = dict(self._context or {})\n wiz_form_id = self.env['ir.model.data'].get_object_reference(\n 'account', 'view_move_form')[1]\n\n return {\n 'view_type': 'form',\n 'view_id': wiz_form_id,\n 'view_mode': 'form',\n 'res_model': 'account.move',\n 'res_id': self.invc_id.id,\n 'type': 'ir.actions.act_window',\n 'target': 'current',\n 'context': context,\n }",
"def invoice(self, invoice_number):\r\n return inv.Invoice(self, invoice_number)",
"def alert_for_pending_invoices_1(request):\n\n\tprint(\">>>>>>>>>>>>>>>>>>>>>Beginning of alert_for_pending_invoices_1<<<<<<<<<<<<<<<<<<<<\")\n\n\tThread(target=alert_for_pending_invoices_1_woker).start()\n\n\tprint(\">>>>>>>>>>>>>>>>>>>>>End of alert_for_pending_invoices_1<<<<<<<<<<<<<<<<<<<<\")\n\n\tresponse = {}\n\n\tresponse[\"info_to_contact\"] = \"Ok\"\n\n\treturn response",
"def _prepare_invoice(self):\n self.ensure_one()\n journal_id = self.env['account.invoice'].default_get(['journal_id'])['journal_id']\n if not journal_id:\n raise UserError(_('Please define an accounting sales journal for this company.'))\n invoice_vals = {\n 'name': self.client_order_ref or '',\n 'origin': self.name,\n 'type': 'out_invoice',\n 'account_id': self.partner_invoice_id.property_account_receivable_id.id,\n 'partner_id': self.partner_invoice_id.id,\n 'partner_shipping_id': self.partner_shipping_id.id,\n 'journal_id': journal_id,\n 'currency_id': self.pricelist_id.currency_id.id,\n 'comment': self.note,\n 'payment_term_id': self.payment_term_id.id,\n 'fiscal_position_id': self.fiscal_position_id.id or self.partner_invoice_id.property_account_position_id.id,\n 'company_id': self.company_id.id,\n 'user_id': self.user_id and self.user_id.id,\n 'team_id': self.team_id.id,\n 'x_studio_field_rgEdd': self.x_studio_field_icWOZ.id,\n 'x_studio_car_type_1': self.vehicle.id,\n 'x_studio_job_card_1': self.x_studio_agency_job_card,\n 'x_studio_car_type_name': self.vehicle.model_id.name,\n 'x_studio_plate_num': self.vehicle.license_plate,\n 'x_studio_claim_num': self.claim_no,\n\n 'x_studio_is_insured':self.is_insured,\n 'x_studio_service_provider': self.service_advisor.id,\n 'date_invoice': fields.Date.today(),\n 'transaction_ids': [(6, 0, self.transaction_ids.ids)],\n }\n return invoice_vals\n\n # 'x_studio_field_rgEdd':order.x_studio_field_icWOZ.id,",
"def payment_completed(order_id):\n order = Order.objects.get(id=order_id)\n # create invoice e-mail\n subject = f'My Shop - EE Invoice no. {order.id}'\n message = 'Please, find attached the invoice for your recent purchase.'\n email = EmailMessage(subject,\n message,\n '[email protected]',\n [order.user.email])\n # generate PDF\n html = render_to_string('orders/order/pdf.html', {'order': order})\n out = BytesIO()\n stylesheets=[weasyprint.CSS(settings.STATIC_ROOT + 'css/pdf.css')]\n weasyprint.HTML(string=html).write_pdf(out,\n stylesheets=stylesheets)\n # attach PDF file\n email.attach(f'order_{order.id}.pdf',\n out.getvalue(),\n 'application/pdf')\n # send e-mail\n email.send()",
"def invoice(self, start, end):\n\n if self.invoice_type is None:\n invoice_type = self.conn.config[\"main\"][\"invoice:object\"]\n if \":\" not in invoice_type:\n raise AttributeError(\"Invoice configuration incorrect! %s\" % invoice_type)\n module, call = invoice_type.split(\":\")\n _package = __import__(module, globals(), locals(), [ call ])\n\n funct = getattr(_package, call)\n self.invoice_type = funct\n config = self.conn.config[\"invoice_object\"]\n invoice = self.invoice_type(self, config)\n return invoice"
]
| [
"0.6223257",
"0.60884035",
"0.6055741",
"0.60072035",
"0.5954929",
"0.5904435",
"0.5853541",
"0.5839624",
"0.58209735",
"0.57507336",
"0.57295054",
"0.5693066",
"0.56464297",
"0.5634363",
"0.5624102",
"0.56167454",
"0.5604158",
"0.56001776",
"0.5599313",
"0.55901307",
"0.558336",
"0.55749077",
"0.557123",
"0.5566733",
"0.55538106",
"0.55240995",
"0.55018824",
"0.5501824",
"0.5489125",
"0.5480496"
]
| 0.8144645 | 0 |
Test the retrieval system. For each input in image_ids, generate a list of IDs returned for that image. | def test(label_dict, image_ids, args):
log_header('Starting image retrieval preparations')
queries = []
if args.test_image is not None:
queries.append(args.test_image)
else:
logging.info('Generating random test queries')
number_of_images = len(image_ids)
# Generate random queries to use in the test procedure
for i in range(args.k):
queries.append(image_ids[random.randint(0, number_of_images - 1)])
# Calculate score for the retrieved images
calculate_score(
label_dict, queries, retrieve_similar_images(queries, args)
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __get_multi_images_ids(self, num_images=0): \n availability_images = imageInstance()\n images = availability_images.get_images()\n images_ids = []\n for image in images:\n if image.type == 'machine':\n images_ids.append( image.id.encode(\"latin-1\") )\n if num_images>1:\n random.shuffle(images_ids)\n return images_ids[:num_images]\n return images_ids",
"def get_image_ids(params: DownloadCommandParameters) -> List[str]:\n if params.retry:\n logger.info(f\"Attempting to download previously failed images.\")\n with open(recovery_file_name()) as fh:\n image_ids = json.load(fh)\n else:\n df = pd.read_csv(params.metadata_file)\n image_ids = df[df[\"dataset\"] == params.dataset][\"isic_id\"]\n\n return list(image_ids)",
"def test_test_ids_request(get_interface_params):\n from sail_on_client.protocol.localinterface import LocalInterface\n\n config_directory, config_name = get_interface_params\n data_dir = f\"{os.path.dirname(__file__)}/data\"\n local_interface = LocalInterface(config_name, config_directory)\n\n assumptions_path = os.path.join(os.path.dirname(__file__), \"assumptions.json\")\n filename = local_interface.test_ids_request(\n \"OND\", \"image_classification\", \"5678\", assumptions_path\n )\n expected = os.path.join(data_dir, \"OND\", \"image_classification\", \"test_ids.csv\")\n assert os.stat(expected).st_size > 5\n assert expected == filename",
"def get_ids_detection(self, split):\n if split == 'test': # test set has no json file. Scrape ids from directory.\n file_names = tf.io.gfile.listdir(\n os.path.dirname(self._image_path_100k.format(split, '')))\n image_names = [f[:-4] for f in file_names if f.endswith('.jpg')]\n return set(image_names)\n\n if split not in self._data:\n self.process_json(split)\n return self._data[split].keys()",
"def fetch_images(client, images):\n return [fetch_image(client, image) for image in images]",
"def test_get_imagelist_inmutable(self):\n images1 = self.mock_master.get_imagelist(self.region1)\n images2 = self.mock_master.get_imagelist(self.region1)\n r2dict = dict((i.id, i) for i in images2)\n self.assertEquals(images1, images2)\n self.assertNotEquals(id(images1), id(images2))\n for image in images1:\n self.assertIn(image.id, r2dict)\n image2 = r2dict[image.id]\n self.assertEquals(image, image2)\n self.assertNotEquals(id(image), id(image2))\n self.assertNotEquals(id(image.user_properties),\n id(image2.user_properties))",
"def cmd_account_image_ids(client, args):\n account_image_ids = client.get_account_image_ids(args.username, args.page)\n generate_output({'account_image_ids': account_image_ids})",
"def get_legacy_image_ids(self, content_retriever):\n pass",
"def get_legacy_image_ids(self, content_retriever):\n pass",
"def get_my_image_ids(self) -> Union[List[int], None]:\n if self.imported is not True:\n logging.error(f'File {self.file_path} has not been imported')\n return None\n else:\n q = self.conn.getQueryService()\n params = Parameters()\n path_query = self.make_substitutions()\n path_query = path_query.strip('/')\n params.map = {\"cpath\": rstring(path_query)}\n results = q.projection(\n \"SELECT i.id FROM Image i\"\n \" JOIN i.fileset fs\"\n \" JOIN fs.usedFiles u\"\n \" WHERE u.clientPath=:cpath\",\n params,\n self.conn.SERVICE_OPTS\n )\n self.image_ids = [r[0].val for r in results]\n return self.image_ids",
"def get_expected_chip_ids():\n paths = INPUT_IMAGES_DIR.glob(\"*.tif\")\n # images are named something like abc12.tif, we only want the abc12 part\n ids = list(sorted(set(path.stem.split(\"_\")[0] for path in paths)))\n return ids",
"def __get_image_id(self):\n return self.__get_multi_images_ids(1)",
"def get_image_features_from_full_ids(full_ids):\n submissions = Util.reddit_api.get_submissions(full_ids)\n images = [get_image_from_submission(sub) for sub in submissions]",
"def test_get_ids(civic, main_data, updated_data):\n assert len(civic._get_ids(main_data['assertions'])) == 0\n assert len(civic._get_ids(main_data['variants'])) == 1\n assert len(civic._get_ids(main_data['genes'])) == 2\n assert len(civic._get_ids(main_data['evidence'])) == 1\n\n assert len(civic._get_ids(updated_data['assertions'])) == 1\n assert len(civic._get_ids(updated_data['variants'])) == 1\n assert len(civic._get_ids(updated_data['genes'])) == 1\n assert len(civic._get_ids(updated_data['evidence'])) == 1",
"def getImgList(id_classe):\n f=open('csv_files/test-annotations-bbox.csv',\"r\",encoding='utf8')\n img_list0 = []\n img_list = []\n a = 0\n nb_img = parser_arguments().limit\n for l in f:\n if id_classe in l:\n mots = l.split(\",\")\n id_img = mots[0]\n img_list0.append(id_img)\n # lists all the identifiers of the images present in the requested class\n\n while a < nb_img:\n img_list.append(random.choice(img_list0))\n a = a+1\n return img_list\n # select randomly in the previous list the required number (requested in the terminal) of image identifiers and store them in a new list",
"def find_images(\n ami_name=None,\n executable_by=None,\n owners=None,\n image_ids=None,\n tags=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n return_objs=False,\n):\n retries = 30\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n while retries:\n try:\n filter_parameters = {\"filters\": {}}\n if image_ids:\n filter_parameters[\"image_ids\"] = [image_ids]\n if executable_by:\n filter_parameters[\"executable_by\"] = [executable_by]\n if owners:\n filter_parameters[\"owners\"] = [owners]\n if ami_name:\n filter_parameters[\"filters\"][\"name\"] = ami_name\n if tags:\n for tag_name, tag_value in tags.items():\n filter_parameters[\"filters\"][\"tag:{}\".format(tag_name)] = tag_value\n images = conn.get_all_images(**filter_parameters)\n log.debug(\n \"The filters criteria %s matched the following images:%s\",\n filter_parameters,\n images,\n )\n if images:\n if return_objs:\n return images\n return [image.id for image in images]\n else:\n return False\n except boto.exception.BotoServerError as exc:\n if exc.error_code == \"Throttling\":\n log.debug(\"Throttled by AWS API, will retry in 5 seconds...\")\n time.sleep(5)\n retries -= 1\n continue\n log.error(\"Failed to convert AMI name `%s` to an AMI ID: %s\", ami_name, exc)\n return False\n return False",
"def get_image_ids(self):\n train_ids = os.listdir(self.tr_img_dir)\n test_ids = os.listdir(self.te_img_dir)\n\n ids = {\n 'train':train_ids,\n 'test':test_ids\n }\n\n return ids",
"def cmd_image_id(client, args):\n image = client.get_image(args.image_id)\n data = image.__dict__\n generate_output({'image': data})",
"def get_ids(voc_path):\n ids = []\n print(\"voc\")\n\n files_images = glob.iglob(os.path.join(voc_path, \"*.JPEG\"))\n for x in files_images:\n name = os.path.splitext(os.path.basename(x))[0]\n ids.append(name)\n print(\"names: \", ids)\n return ids",
"def getIDs():",
"def make_request(api: IsicApi, image_set: list, params: DownloadCommandParameters) -> Union[List[dict], None]:\n # Convert to a json array\n url_image_ids = json.dumps(str(image_set))\n\n # Replace and switch quote notation for the API\n url_image_ids = url_image_ids.replace('\"', \"\")\n url_image_ids = url_image_ids.replace(\"'\", '\"')\n # Quote all url strings.\n url_image_ids = urllib.parse.quote(url_image_ids)\n # Create the endpoint URL\n endpoint = f\"image/download?include={params.include}&imageIds={url_image_ids}\"\n\n # Request the images and return the response\n return api.get(endpoint=endpoint, timeout=params.timeout)",
"def make_image_data_list(image_filenames):\n img_requests = []\n for imgname in image_filenames:\n try:\n with open(imgname, 'rb') as f:\n ctxt = b64encode(f.read()).decode()\n img_requests.append({\n 'image': {'content': ctxt},\n 'features': [{\n 'type': 'TEXT_DETECTION',\n 'maxResults': 1\n }]\n })\n\n except:\n print(\"Image not found\")\n\n\n return img_requests",
"def test_getImages(self): # GIVEN the group chat has at least one image\n testBot = bot.Bot(os.environ['bot_id'], os.environ['token'], os.environ['group_ID'])\n imageList = testBot.run() #AND THEN post_images calls the private get_images method which returns an array\n self.assertTrue(len(imageList) > 0) #THEN there should be at least one element in the array",
"def test_get_image_id(self):\n img_id = str(uuid.uuid4())\n img_name = 'myfakeimage'\n self.my_image.id = img_id\n self.my_image.name = img_name\n self.sahara_client.images.get.return_value = self.my_image\n self.sahara_client.images.find.side_effect = [[self.my_image], []]\n\n self.assertEqual(img_id, self.sahara_plugin.get_image_id(img_id))\n self.assertEqual(img_id, self.sahara_plugin.get_image_id(img_name))\n self.assertRaises(exception.EntityNotFound,\n self.sahara_plugin.get_image_id, 'noimage')\n\n calls = [mock.call(name=img_name),\n mock.call(name='noimage')]\n self.sahara_client.images.get.assert_called_once_with(img_id)\n self.sahara_client.images.find.assert_has_calls(calls)",
"def modify_ids(self, images, annotations):\n print(\"Reinitialicing images and annotation IDs ...\")\n ### Images\n old_new_imgs_ids = {} # necessary for the annotations!\n for n,im in enumerate(images):\n old_new_imgs_ids[images[n]['id']] = n+1 # dicto with old im_ids and new im_ids\n images[n]['id'] = n+1 # reorganize the ids\n ### Annotations\n for n,ann in enumerate(annotations):\n annotations[n]['id'] = n+1\n old_image_id = annotations[n]['image_id']\n annotations[n]['image_id'] = old_new_imgs_ids[old_image_id] # replace im_ids in the annotations as well\n return images, annotations",
"def loadImgs(self, ids=[]):\r\n if isinstance(ids, tuple) or isinstance(ids, list):\r\n return [self.imgs[id] for id in ids]\r\n elif type(ids) == int:\r\n return [self.imgs[ids]]",
"def handle_api_list_images(self, http_context):\n\n command = self.docker + ['images', '--format', '\\'{{json .}}\\'', '--no-trunc', '-a']\n images = []\n for line in subprocess.check_output(command).decode().splitlines():\n image = json.loads(line)\n image['hash'] = image['ID'].split(':')[1][:12]\n images.append(image)\n return images",
"def _get_ids_from_name_private(self, name):\r\n results = self.list_private_images(name=name)\r\n return [result['id'] for result in results]",
"def test_get_owner_image(self):\n\n # user1 is owner of image_id 1\n # user2 is owner of image ids (2,3)\n for image_id in range(1, 4):\n url = reverse(self.url_name_one, args=(image_id,))\n if image_id == 1:\n self.client.force_authenticate(self.user1)\n else:\n self.client.force_authenticate(self.user2)\n\n response = self.client.get(url, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n if image_id == 1:\n self.assertEqual(response.data[\"owner\"], \"user1\")\n else:\n self.assertEqual(response.data[\"owner\"], \"user2\")\n\n # user2 try to get image_id 1 which is owner user1\n url = reverse(self.url_name_one, args=(1,))\n response = self.client.get(url, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)",
"def _read_image_ids(image_ids_path):\n return list(map(str.strip, open(image_ids_path, \"r\").readlines()))"
]
| [
"0.7080062",
"0.6665811",
"0.6469198",
"0.643074",
"0.639978",
"0.6186044",
"0.61784416",
"0.6166096",
"0.6166096",
"0.6113377",
"0.6014358",
"0.5991673",
"0.5991526",
"0.5941047",
"0.5906495",
"0.5866268",
"0.5865587",
"0.5854042",
"0.58208966",
"0.5806179",
"0.5782617",
"0.5768938",
"0.5732924",
"0.5731361",
"0.5716794",
"0.5710282",
"0.5696545",
"0.56892633",
"0.5684027",
"0.5682692"
]
| 0.6839561 | 1 |
Square 32 < n < 99 | def square(n: int) -> int:
return int(n ** 2) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_squares(n):\n\n return sum([i * i for i in range(n)])",
"def square(n):\r\n squared = n ** 2\r\n print (\"%d squared is %d.\" % (n, squared)) ## გიო: შეცდომას აგდებდა სანამ ფრჩხილებში არ ჩავსვი\r\n return squared",
"def make_magic_square(N): # part a\n if N % 2 == 0:\n print('N must be odd.')\n my_magic_square = np.zeros((N, N))\n i = 0\n j = np.ceil(N / 2.).astype(int)\n n = 1\n while n <= N**2:\n my_magic_square[i, j] = n\n n += 1\n i_next =\n j_next =\n if my_magic_square[i_next, j_next] > 0:\n i =\n else:\n i =\n j =\n return my_magic_square",
"def square(n):\n\n result = [num * num for num in range(n)]\n\n return result[1:]",
"def is_square(n):\r\n m = int(sqrt(n))\r\n return m * m == n",
"def sum_of_squares(n):\n result = i = 0\n while i < n:\n result += i\n i += 1\n return result",
"def sum_of_squares(n):\n return (n * (n+1) * (2*n + 1)) / 6",
"def square(n):\r\n try:\r\n assert(type(n) is int)\r\n if n == 1:\r\n return 1\r\n s = square(n - 1) + 2*(n - 1) + 1\r\n return s\r\n except:\r\n return None",
"def square_nums(number_list):",
"def square(n):\n squared = n ** 2\n print \"%d squared is %d.\" % (n, squared)\n return squared",
"def gensquares(n):\n for number in my_range.my_range(n): # note that we are NOT calling range(N), we implemented our own my_range() generator\n yield number**2",
"def is_hilbert_square(n):\n return ((-1 + math.sqrt(n)) / 4).is_integer()",
"def generate_square_number(square_limit):\n for i in range(0,square_limit):\n yield i**2",
"def sum_of_squares(n):\n sum = 0\n\n for i in range(0,n):\n sum += i*i\n\n return sum",
"def is_square(N):\n return N == round(N**(0.5))**2",
"def snt(n):\r\n f = True\r\n for j in range(2, n):\r\n if n % j == 0:\r\n f = False\r\n break\r\n return f",
"def square(n):\n squared = n**2\n print \"%d squared is %d.\" % (n, squared)\n return squared",
"def square(n):\n squared = n**2\n print \"%d squared is %d.\" % (n, squared)\n return squared",
"def square(n):\n squared = n**2\n print \"%d squared is %d.\" % (n, squared)\n return squared",
"def solution1(n):\n res = []\n while n > 0:\n m = int(math.floor(math.sqrt(n))**2)\n res.append(m)\n n -= m\n return res",
"def square(n):\n squared = n**2\n print \"%d squared is %d.\" % (n, squared) # %d is used for decimals instead of %s for strings\n return squared",
"def McNuggets(n):\n # Your Code Here\n for c in xrange( n/20+2):\n for b in xrange( (n-20*c)/9+2):\n for a in xrange ((n-20*c-9*b)/6 +2):\n if (6*a + 9*b + 20*c) == n :\n return True\n return False",
"def problem9_naive(n):\n for a in range(4, n, 4):\n for b in range(3, n - a):\n c = n - a - b\n if a ** 2 + b ** 2 == c ** 2:\n return a * b * c\n return None",
"def square(num):\n return num * num",
"def square_number(number: int) -> int:\n return number * number",
"def d(n):\n rt = math.sqrt(n)\n i = 2\n result = 1\n while i < rt:\n if n % i == 0:\n result += i\n result += n // i\n i += 1\n\n # i == rt implies that n is a square number\n if i == rt and n % i == 0:\n result += i\n return result",
"def solve(n=1000):\r\n return str(sum(x**x for x in range(1, n + 1)))[-10:]",
"def squares():\n return [i for i in xrange(11, 89) if 1 <= (i % 10) <= 8]",
"def solution3(n):\n res = []\n while n > 0:\n m = int(math.sqrt(n))**2\n res.append(m)\n n -= m\n return res",
"def sum_of_three_squares(n):\n special = {1:(1, 0, 0), 2:(1, 1, 0), 3:(1, 1, 1), 10: (1, 3, 0), 34: (3, 3, 4), 58:(3, 7, 0),\n 85:(6, 7, 0), 130:(3, 11, 0), 214:(3, 6, 13), 226:(8, 9, 9), 370:(8, 9, 15),\n 526:(6, 7, 21), 706:(15, 15, 16), 730:(1, 27, 0), 1414:(6, 17, 33), 1906:(13, 21, 36),\n 2986: (21, 32, 39), 9634: (56, 57, 57)}\n\n v = 0\n\n if n == 0:\n return (0, 0, 0)\n\n v = multiplicity(4, n)\n n //= 4**v\n\n if n % 8 == 7:\n return\n\n if n in special.keys():\n x, y, z = special[n]\n return _sorted_tuple(2**v*x, 2**v*y, 2**v*z)\n\n s, _exact = integer_nthroot(n, 2)\n\n if _exact:\n return (2**v*s, 0, 0)\n\n x = None\n\n if n % 8 == 3:\n s = s if _odd(s) else s - 1\n\n for x in range(s, -1, -2):\n N = (n - x**2) // 2\n if isprime(N):\n y, z = prime_as_sum_of_two_squares(N)\n return _sorted_tuple(2**v*x, 2**v*(y + z), 2**v*abs(y - z))\n return\n\n if n % 8 in (2, 6):\n s = s if _odd(s) else s - 1\n else:\n s = s - 1 if _odd(s) else s\n\n for x in range(s, -1, -2):\n N = n - x**2\n if isprime(N):\n y, z = prime_as_sum_of_two_squares(N)\n return _sorted_tuple(2**v*x, 2**v*y, 2**v*z)"
]
| [
"0.73049027",
"0.7087057",
"0.7078491",
"0.70378524",
"0.6993337",
"0.6979318",
"0.6968695",
"0.69222224",
"0.68997127",
"0.684079",
"0.6803975",
"0.6786549",
"0.67735994",
"0.67726743",
"0.6746669",
"0.67392933",
"0.67251664",
"0.67251664",
"0.67251664",
"0.6713144",
"0.6673458",
"0.66582537",
"0.6649719",
"0.663099",
"0.6623224",
"0.65774274",
"0.65601593",
"0.6540699",
"0.65331286",
"0.6531006"
]
| 0.74875534 | 0 |
Pentagonal 26 < n < 81 | def pentagonal(n: int) -> int:
return int(n * (3 * n - 1) / 2) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pentagonal(n):\n return (n * ((3 * n) - 1)) / 2",
"def is_pentagonal(n):\r\n if ((1+(24*n+1)**0.5) / 6)%1 == 0:\r\n return True\r\n return False",
"def pentagon(n) -> int:\n\n return (n * (3 * n - 1)) // 2",
"def isPentagonal(n):\n test = (sqrt(1+24*n)+1)/6\n return test == (int) (test)",
"def is_pentagonal(n):\n if (1+(24*n+1)**0.5) % 6 == 0:\n return True\n return False",
"def is_pentagonal(n: int) -> bool:\r\n root = (1 + 24 * n) ** 0.5\r\n return ((1 + root) / 6) % 1 == 0",
"def heptagonal(n: int) -> int:\n return int(n * (5 * n - 3) / 2)",
"def pentagonal_index(P):\n return (1 + sqrt(1 + 24 * P)) / 6",
"def is_pentagonal(P):\n return sqrt(1 + 24 * P) % 6 == 5",
"def McNuggets(n):\n # Your Code Here\n\n high = n//6+1\n\n if n != 0:\n for i in range(high):\n for j in range(high):\n for k in range(high):\n if 6*k + 9*j + 20*i == n:\n return True\n\n return False\n\n else:\n return False",
"def McNuggets(n):\n # Your Code Here\n for c in xrange( n/20+2):\n for b in xrange( (n-20*c)/9+2):\n for a in xrange ((n-20*c-9*b)/6 +2):\n if (6*a + 9*b + 20*c) == n :\n return True\n return False",
"def problem9_naive(n):\n for a in range(4, n, 4):\n for b in range(3, n - a):\n c = n - a - b\n if a ** 2 + b ** 2 == c ** 2:\n return a * b * c\n return None",
"def solution(n,p):\n \n a=pow(n, (p - 1) // 2, p)\n if(a==1):\n return True\n else :\n return False",
"def nw(n):\n return 4*n*n + 1",
"def McNuggets(n):\n\t# Your Code Here\n\tfor a in range(n/6):\n\t\tfor b in range(n/9):\n\t\t\tc = n - 6a - 9b\n\t\t\tif c >= 0 and c % 20 == 0:\n\t\t\t\treturn True\n\treturn False",
"def McNuggets(n):\n # Your Code Here\n\n for a in range(0, n/6+1):\n for b in range(0, n/9+1):\n for c in range(0, n/20+1):\n if 6*a+9*b+20*c == n:\n return True\n return False",
"def cd2p(s, N):\n letter = s[0].upper()\n number = s[1:]\n col = letter_coord.index(letter) + 1\n row = (N + 1) - int(number)\n # print('row:{} col:{}'.format(row,col))\n return col + (N + 1) * row",
"def McNuggets(n):\n \n '''if n == 0:\n return True\n for i in (6, 9, 20):\n if n >= i and McNuggets(n - i):\n return True\n return False\n '''\n \n for a in range(0,n):\n for b in range(0,n):\n for c in range(0,n):\n if 6*a+9*b+20*c == n:\n return True\n return False",
"def pentakis(self):\n return self.nlegomena(5)",
"def McNuggets(n):\n a=0\n b=0\n c=0\n while 6*a + 9*b + 20*c < n:\n for a in range((n//6)+1):\n for b in range((n//9)+1):\n for c in range ((n//20)+1):\n if 6*a + 9*b + 20*c == n:\n return print(True)\n if 6*a + 9*b + 20*c != n:\n return print(False)",
"def _P(m):\n P = np.zeros((m**2,m**2), dtype=np.int64)\n for i in range(1, m**2 + 1):\n j = 1 + m*((i - 1) % m) + (i - 1)//m\n P[i-1, j-1] = 1\n return P",
"def answer():\n for k in range(2,3000):\n for j in range(k-1,0,-1):\n pj, pk = P(j), P(k)\n #print( j, k, pj, pk )\n if isPent(pk-pj):\n #print( j, k, pj, pk, pk+pj, isPent(pk+pj), pk-pj )\n if isPent(pk+pj) and isPent(pk-pj):\n return pk-pj",
"def vxc_PW92(n):\n eps = 1E-9*n # ???????\n SMALL = 1E-90\n if n < SMALL:\n return 0.0\n else:\n return exc_PW92(n) + n*exc_PW92(n,der=1)",
"def collatz(n):\n if n%2==0: return n/2\n else: return 3*n+1",
"def polygonal_number(s, n):\n return (n*n*(s-2)-n*(s-4))/2",
"def N_out(K,P,S,N_in):\n return (int((N_in+2*P-K)/S)+1)",
"def probability(n, k, p):\n prob = 0\n power = expotentation_by_squaring((1-p), n)\n count_mult = math.log(n, 2)\n p_fraction = p/(1-p)\n count_mult += 1\n for i in range(0, k+1):\n element = newton(n, i)*power\n prob += element\n power *= p_fraction\n count_mult += 2\n return prob, count_mult",
"def hexagonal(n: int) -> int:\n return int(n * (2 * n - 1))",
"def collatz(n):\n iterCount = 0\n while(n != 1):\n if(n & 1):\n n = 3 * n + 1\n else:\n n //= 2\n iterCount += 1\n return iterCount",
"def solve(n):\r\n c=0\r\n Ans=0\r\n while c<=n:\r\n if isLychrel(c):\r\n Ans+=1\r\n c+=1\r\n return Ans"
]
| [
"0.7227314",
"0.6879778",
"0.68287385",
"0.6813105",
"0.68031704",
"0.6631665",
"0.6621119",
"0.6595571",
"0.64292324",
"0.6410007",
"0.6353537",
"0.63163114",
"0.62923586",
"0.6150827",
"0.61487466",
"0.614311",
"0.6083966",
"0.6078726",
"0.6017164",
"0.6017101",
"0.6011597",
"0.6008871",
"0.5988706",
"0.5942934",
"0.59357804",
"0.59237385",
"0.5923413",
"0.5918078",
"0.5901902",
"0.58891404"
]
| 0.7106296 | 1 |
Hexagonal 23 < n < 70 | def hexagonal(n: int) -> int:
return int(n * (2 * n - 1)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def hexagonal_number(n):\n return n * (2 * n - 1)",
"def is_hexagonal_number(n):\n _, x = quadratic.solve(2, -1, -n)\n return is_number(x) and x.is_integer()",
"def is_hex(n):\n hex_test = (1 + sqrt(1 + 8*n))/4\n if hex_test == int(hex_test):\n return True\n return False",
"def McNuggets(n):\n # Your Code Here\n\n high = n//6+1\n\n if n != 0:\n for i in range(high):\n for j in range(high):\n for k in range(high):\n if 6*k + 9*j + 20*i == n:\n return True\n\n return False\n\n else:\n return False",
"def fn(c):\n ans = 0\n for k in range(1, 16): \n ans = min(ans, k*16+k, key=lambda x: abs(x - int(c, 16)))\n return hex(ans)[2:].zfill(2)",
"def hit_bin(self, n):\n # TODO: fix this monkey code!\n\n if n < 4:\n return n\n elif n << 3 == 0:\n return 4\n elif n << 4 == 0:\n return 5\n elif n << 5 == 0:\n return 6\n elif n >= 32 and n <= 127:\n return 7\n else:\n return 8",
"def heptagonal(n: int) -> int:\n return int(n * (5 * n - 3) / 2)",
"def McNuggets(n):\n\t# Your Code Here\n\tfor a in range(n/6):\n\t\tfor b in range(n/9):\n\t\t\tc = n - 6a - 9b\n\t\t\tif c >= 0 and c % 20 == 0:\n\t\t\t\treturn True\n\treturn False",
"def is_pentagonal(n):\r\n if ((1+(24*n+1)**0.5) / 6)%1 == 0:\r\n return True\r\n return False",
"def isPentagonal(n):\n test = (sqrt(1+24*n)+1)/6\n return test == (int) (test)",
"def McNuggets(n):\n # Your Code Here\n for c in xrange( n/20+2):\n for b in xrange( (n-20*c)/9+2):\n for a in xrange ((n-20*c-9*b)/6 +2):\n if (6*a + 9*b + 20*c) == n :\n return True\n return False",
"def is_pentagonal(n):\n if (1+(24*n+1)**0.5) % 6 == 0:\n return True\n return False",
"def calc_plate_HEX(NTU, cr):\n eff = 1 - scipy.exp((1 / cr) * (NTU ** 0.22) * (scipy.exp(-cr * (NTU) ** 0.78) - 1))\n return eff",
"def sat(n: int):\n i = n ** 17 + 9\n j = (n + 1) ** 17 + 9\n\n while i != 0: # compute gcd using Euclid's algorithm\n (i, j) = (j % i, i)\n\n return n >= 0 and j != 1",
"def octagonal(n: int) -> int:\n return int(n * (3 * n - 2))",
"def sat(i: int, n=62710561):\n return 1 < i < n and n % i == 0",
"def McNuggets(n):\n # Your Code Here\n\n for a in range(0, n/6+1):\n for b in range(0, n/9+1):\n for c in range(0, n/20+1):\n if 6*a+9*b+20*c == n:\n return True\n return False",
"def sat(n: int):\n return pow(2, n, n) == 3",
"def McNuggets(n):\n a=0\n b=0\n c=0\n while 6*a + 9*b + 20*c < n:\n for a in range((n//6)+1):\n for b in range((n//9)+1):\n for c in range ((n//20)+1):\n if 6*a + 9*b + 20*c == n:\n return print(True)\n if 6*a + 9*b + 20*c != n:\n return print(False)",
"def pentagonal(n: int) -> int:\n return int(n * (3 * n - 1) / 2)",
"def footprint_corner_indices():",
"def codage(nbr):\n\tmask=1\n\tresult=0\n\tfor index in range(len(G)):\n\t\tif ((mask<<index)&nbr) != 0:\n\t\t\tresult^=G[len(G)-index-1]\n\treturn result",
"def Hexagon(image):\n return x, y",
"def is_pentagonal(n: int) -> bool:\r\n root = (1 + 24 * n) ** 0.5\r\n return ((1 + root) / 6) % 1 == 0",
"def McNuggets(n):\n \n '''if n == 0:\n return True\n for i in (6, 9, 20):\n if n >= i and McNuggets(n - i):\n return True\n return False\n '''\n \n for a in range(0,n):\n for b in range(0,n):\n for c in range(0,n):\n if 6*a+9*b+20*c == n:\n return True\n return False",
"def grayCode(self, n):\n res = [0]\n for i in range(0, n):\n res += [(1 << i) + x for x in reversed(res)]\n return res",
"def get_nth_digit_hex(n: int) -> int:\n res = (\n 4 * get_partial_sum(n, 1)\n - 2 * get_partial_sum(n, 4)\n - get_partial_sum(n, 5)\n - get_partial_sum(n, 6)\n )\n res = res - math.floor(res)\n\n return math.floor(res * 16)",
"def draw_raw_hexagons(x, y, n, color1, color2):\n w = x\n e = y\n side_hexagon = math.floor(500 / (2 * n))\n for i in range(math.ceil(n / 2)):\n draw_hexagon(x, y, side_hexagon, color1)\n #Получить координаты для следуующего шестиугольника\n x = turtle.xcor() + 2 * (side_hexagon * math.sqrt(3))\n y = turtle.ycor()\n\n\n turtle.up()\n turtle.goto(w - side_hexagon * math.sqrt(3), e)\n\n for q in range(math.floor(n / 2)):\n x = turtle.xcor() + 2 * (side_hexagon * math.sqrt(3))\n y = turtle.ycor()\n draw_hexagon(x, y, side_hexagon, color2)",
"def pentagonal(n):\n return (n * ((3 * n) - 1)) / 2",
"def draw_raw_hexagons1(x, y, n, color1, color2):\n w = x\n e = y\n side_hexagon = math.floor(500 / (2 * n))\n for i in range(math.ceil(n / 2)):\n draw_hexagon(x, y, side_hexagon, color2)\n #Получить координаты для следуующего шестиугольника\n x = turtle.xcor() + 2 * (side_hexagon * math.sqrt(3))\n y = turtle.ycor()\n\n turtle.up()\n turtle.goto(w - side_hexagon * math.sqrt(3), e)\n\n for q in range(math.floor(n / 2)):\n x = turtle.xcor() + 2 * (side_hexagon * math.sqrt(3))\n y = turtle.ycor()\n draw_hexagon(x, y, side_hexagon, color1)"
]
| [
"0.73256433",
"0.649887",
"0.64827543",
"0.63866496",
"0.62783104",
"0.6215221",
"0.61989176",
"0.6133955",
"0.61159766",
"0.61101925",
"0.60988736",
"0.60578424",
"0.6041439",
"0.60011524",
"0.5965139",
"0.59388924",
"0.59315044",
"0.59012663",
"0.5898888",
"0.58850104",
"0.58798254",
"0.58780813",
"0.58609056",
"0.58515024",
"0.58248615",
"0.5820934",
"0.5801188",
"0.5799328",
"0.57924783",
"0.5761709"
]
| 0.72794765 | 1 |
Heptagonal 21 < n < 63 | def heptagonal(n: int) -> int:
return int(n * (5 * n - 3) / 2) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def hexagonal(n: int) -> int:\n return int(n * (2 * n - 1))",
"def H(n,x):\n if(n == 0):\n hn = 1\n elif(n == 1):\n hn = 2*x\n else:\n Hnm1 = 1; Hn = 2*x\n for i in range(1,n):\n H = 2*x*Hn - 2*i*Hnm1\n Hnm1 = Hn\n Hn = H\n hn = H\n return hn",
"def formHn(n,min,max,N):\n pas = abs((max-min)/N)\n x = min\n result = np.array([])\n \n for i in range(0,N):\n result = np.append(result,H(n,x))\n x += pas \n return result",
"def hexagonal_number(n):\n return n * (2 * n - 1)",
"def make_hamming(N):\n return map(lambda x: 0.54 - 0.46 * math.cos(2*math.pi*x/(N-1)),\n range(N))",
"def hn(x, n):\n\n data = x\n for _ in xrange(n):\n data = h(data)\n return data",
"def H(n,x):\r\n H_values = [] #a list of sequential H values for different n's up to n=n.\r\n H_values.append(1) #appends H_0.\r\n H_values.append(2*x) #appends H_1.\r\n if n>1:\r\n for i in range(1,n):\r\n H_values.append((2*x*H_values[-1])-(2*i*H_values[-2]))\r\n return H_values[-1]\r\n elif n == 0:\r\n return H_values[0]\r\n else:\r\n return H_values[1]",
"def eval_hankel_function(pt, n=MAX_N):\n j_0 = 0\n for i in range(n):\n j_0 += (-1)**i * (1 / 4 * e**2)**i / factorial(i)**2\n\n g = 0.57721566490153286\n y_0 = (ln(e / 2) + g) * j_0\n h_n = 0\n for i in range(n):\n h_n += 1 / (i + 1)\n y_0 += (-1)**(i) * h_n * (e**2 / 4)**(i+1) / (factorial(i+1))**2\n y_0 *= 2 / pi\n\n imag_unit = (np.zeros(1, dtype=np.complept128) + 1j)[0]\n h_0 = j_0 + imag_unit * y_0\n return h_0",
"def pentagonal(n):\n return (n * ((3 * n) - 1)) / 2",
"def H(self) -> BaseMatrix:",
"def H(self) -> BaseMatrix:",
"def makeHadamard(n, d):\n return [[1 if d[\"r%dc%d\" % (i, j)] else 0 for j in range(n)] for i in range(n)]",
"def pentagonal(n: int) -> int:\n return int(n * (3 * n - 1) / 2)",
"def isPentagonal(n):\n test = (sqrt(1+24*n)+1)/6\n return test == (int) (test)",
"def calc_H(J, N):\n\n H = np.array([[0.0] * 2 ** N for i in range(2 ** N)]) # np.zeros([ 2**N, 2**N]) macht das gleiche\n max_bin_len = len(bin(2 ** N - 1)[2:]) # need this for 2 -> 010 instead of 10\n\n #hier machst du dir viel mühe um die Binären zustände richtig darzustellen....\n for n in create_n(N):\n # ... Aber hier sagst du dem interpreter dass er das ganze wieder zu einer zahl machen soll. du kannst den\n # gleichen Effekt also mit einem einfachen `for n_dec in range(N+1)` erreichen\n n_dec = int(n, 2) # converts binary to integer\n\n for link in ['x', 'y', 'z']:\n for i in range(N): # loop over the combinations 12, 23 and 31 (repr. as 01, 12, 20)\n j = i + 1\n if j == N:\n j = 0\n\n # calculating l\n new_i = (max_bin_len - 1) - i # need to preprocess i, j and k because the most left site in |01 ... 1>\n new_j = (max_bin_len - 1) - j # is the most right digit in its binary representation\n l = n_dec + (1 - 2*int(n[new_i])) * 2 ** i + (1 - 2 * int(n[new_j])) * 2 ** j # no -1 in power because indexing is zero based\n\n # bin mir nicht 100% sicher hier, aber ich glaube du brauchst hier gar keine Fallunterscheidung für\n # x, y und z. weil du ja immer l gleich berechnest kannst du direkt alle Fälle damit füllen, falls die\n # jeweiligen (vielleicht dann abgeänderten) bedingungen erfüllt sind\n\n # inserting matrix elements\n if link == 'x':\n H[l, n_dec] += -J[0, i, j] / 4\n elif link == 'y':\n if n[i] == n[j]: # check for correct sign in y link\n H[l, n_dec] += J[1, i, j] / 4\n else:\n H[l, n_dec] += -J[1, i, j] / 4\n else: # z link\n if n[i] == n[j]: # check for sign\n H[n_dec, n_dec] += -J[2, i, j] * h / 4\n else:\n H[n_dec, n_dec] += J[2, i, j] * h / 4\n # bei der berechnung oder wertzuweisung von x23 und y23 macht der glaub ich komische sachen, konnte das nicht genau\n # erkennen. Der rest scheint auf den ersten blick zu funktionieren. bin mir wie gesagt nicht sicher.\n return H",
"def hadamard(n):\n def recurse(x):\n if x == 1: # 1x1 square, we return 1 (black)\n return np.array([1])\n else:\n prev = recurse(x // 2) # previous hadamard matrix\n # create current matrix according to the grid\n # prev | prev\n # ------------\n # prev | -prev\n # hstack and vstack are horizontal and vertical stacks, respectively\n return np.vstack((np.hstack((prev, prev)), np.hstack((prev, -prev))))\n\n mat = recurse(n) # call with n as initial value, store result in mat\n fig, ax = plt.subplots(figsize=(10, 10))\n cmap = colors.ListedColormap(['white', 'black']) # create colormap\n ax.matshow(mat, cmap=cmap)\n plt.xticks([]) # remove axis labels\n plt.yticks([])\n plt.show()",
"def horne1986_Ni(N):\n return -6.362 + 1.193*N + 0.00098*N**2",
"def is_pentagonal(n):\r\n if ((1+(24*n+1)**0.5) / 6)%1 == 0:\r\n return True\r\n return False",
"def hart(N):\n m = 2\n i = 1\n while not is_square(m):\n s = isqrt(N * i) + 1\n m = pow(s, 2, N)\n i += 1\n t = isqrt(m)\n g = gcd(s - t, N)\n return g, N // g",
"def McNuggets(n):\n # Your Code Here\n\n high = n//6+1\n\n if n != 0:\n for i in range(high):\n for j in range(high):\n for k in range(high):\n if 6*k + 9*j + 20*i == n:\n return True\n\n return False\n\n else:\n return False",
"def is_pentagonal(n):\n if (1+(24*n+1)**0.5) % 6 == 0:\n return True\n return False",
"def Hanning(data):\r\n N=float(data.shape[0])\r\n temp=np.zeros(data.shape[0])\r\n for u, i in enumerate(data):\r\n temp[u]=(0.5-0.5*np.cos(2*np.pi*(u/N)))*i\r\n return temp",
"def hill( m, n, pH ):\n # the 6 is actually pKa which we are setting to 6 for this question\n X = float(m) * ( (10**(float(n)*(pH - 6))) / (1 + (10**(float(n)*(pH - 6 )))) )\n\n return X",
"def lehmer(n):\n a = np.outer(np.ones(n), np.arange(1, n + 1))\n a = a / a.T\n a = np.tril(a) + np.tril(a, -1).T\n\n return a",
"def h(n, x, orthonormal=True):\n h = polynomial.polyval(x, h_coefs[n])\n return h",
"def Eho(ve, n=0):\r\n\r\n return h*ve*(n+1/2)",
"def plot_h_static(n: int = 1):\n # Negative E implies bound state; positive scattering.\n # ψ_p0 should be 0 for continuity across the origin.\n # E should be a whittaker energy, ie -1/2, -2/9, -1/8, -.08 etc\n # Only odd states (n = 1, 3, 5 etc) correspond to 3d H atom.\n E = -2 / (n + 1) ** 2\n x, ψ = h_static(E)\n ψ = ψ**2\n\n fig, ax = plt.subplots()\n ax.plot(x, ψ)\n\n ax.grid(True)\n plt.xlim(0, 20)\n plt.show()",
"def footprint_corner_indices():",
"def create_boundary_hyp_space(n_features):\n hyp_space = []\n for i in range(n_features + 1):\n hyp = [1 for _ in range(n_features)]\n hyp[n_features-i:n_features] = [0 for _ in range(i)]\n hyp_space.append(hyp)\n hyp_space = np.array(hyp_space)\n return hyp_space",
"def solve_for_edge_dimensionality(n):\n return int(round(np.sqrt(2 * n + 2.25) - 1.5))"
]
| [
"0.6687355",
"0.65955836",
"0.6554876",
"0.652879",
"0.64516014",
"0.64367026",
"0.6418375",
"0.63759947",
"0.6347117",
"0.6294453",
"0.6294453",
"0.62828547",
"0.6234204",
"0.61670166",
"0.6138718",
"0.6136461",
"0.6133237",
"0.60687983",
"0.60186327",
"0.6014623",
"0.6013419",
"0.59747934",
"0.5947933",
"0.5929356",
"0.5929207",
"0.59281015",
"0.5924424",
"0.5902296",
"0.5899035",
"0.58794636"
]
| 0.81946015 | 0 |
Octagonal 19 < n < 58 | def octagonal(n: int) -> int:
return int(n * (3 * n - 2)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def hexagonal_number(n):\n return n * (2 * n - 1)",
"def horne1986_Ni(N):\n return -6.362 + 1.193*N + 0.00098*N**2",
"def hexagonal(n: int) -> int:\n return int(n * (2 * n - 1))",
"def isPentagonal(n):\n test = (sqrt(1+24*n)+1)/6\n return test == (int) (test)",
"def McNuggets(n):\n # Your Code Here\n\n high = n//6+1\n\n if n != 0:\n for i in range(high):\n for j in range(high):\n for k in range(high):\n if 6*k + 9*j + 20*i == n:\n return True\n\n return False\n\n else:\n return False",
"def heptagonal(n: int) -> int:\n return int(n * (5 * n - 3) / 2)",
"def I (self, n):",
"def McNuggets(n):\n # Your Code Here\n for c in xrange( n/20+2):\n for b in xrange( (n-20*c)/9+2):\n for a in xrange ((n-20*c-9*b)/6 +2):\n if (6*a + 9*b + 20*c) == n :\n return True\n return False",
"def is_pentagonal(n):\r\n if ((1+(24*n+1)**0.5) / 6)%1 == 0:\r\n return True\r\n return False",
"def make_b_array(n):\n array = np.linspace(-3, 3, n)\n for i, x in enumerate(array[1:-1], start=1):\n if abs(x) < 1:\n array[i] = 2\n else:\n array[i] = 0\n array[0] = 0\n array[n-1] = 0\n\n return array",
"def nw(n):\n return 4*n*n + 1",
"def J (self, n):",
"def is_pentagonal(n):\n if (1+(24*n+1)**0.5) % 6 == 0:\n return True\n return False",
"def bin_states(n, sym=False):\n\n if n<0:\n raise Exception(\"n cannot be <0\")\n if n>30:\n raise Exception(\"n is too large to enumerate all states.\")\n \n v = np.array([list(np.binary_repr(i,width=n)) for i in range(2**n)]).astype(int)\n\n if sym is False:\n return v\n return v*2-1",
"def McNuggets(n):\n\t# Your Code Here\n\tfor a in range(n/6):\n\t\tfor b in range(n/9):\n\t\t\tc = n - 6a - 9b\n\t\t\tif c >= 0 and c % 20 == 0:\n\t\t\t\treturn True\n\treturn False",
"def b_n(n):\n if n <= 0.36: # MCH03\n ei = np.array([0, 1, 2, 3, 4])\n ai = np.array([0.01945, -0.8902, 10.95, -19.67, 13.43])\n else: # CB99\n ei = np.array([1, 0, -1, -2])\n ai = np.array([2, -1./3, 4./405, 46./25515])\n return np.sum(ai * np.power(float(n), ei))",
"def polygonal_number(s, n):\n return (n*n*(s-2)-n*(s-4))/2",
"def binary_compositions(n):\n return productrange(*[2]*(n-1))",
"def vxc_PW92(n):\n eps = 1E-9*n # ???????\n SMALL = 1E-90\n if n < SMALL:\n return 0.0\n else:\n return exc_PW92(n) + n*exc_PW92(n,der=1)",
"def is_pentagonal(n: int) -> bool:\r\n root = (1 + 24 * n) ** 0.5\r\n return ((1 + root) / 6) % 1 == 0",
"def ne(n):\n return 4*n*n - 2*n + 1",
"def hit_bin(self, n):\n # TODO: fix this monkey code!\n\n if n < 4:\n return n\n elif n << 3 == 0:\n return 4\n elif n << 4 == 0:\n return 5\n elif n << 5 == 0:\n return 6\n elif n >= 32 and n <= 127:\n return 7\n else:\n return 8",
"def sat(i: int, n=62710561):\n return 1 < i < n and n % i == 0",
"def pentagonal(n: int) -> int:\n return int(n * (3 * n - 1) / 2)",
"def pentagon(n) -> int:\n\n return (n * (3 * n - 1)) // 2",
"def pentagonal(n):\n return (n * ((3 * n) - 1)) / 2",
"def grayCode(self, n):\n res = [0]\n for i in range(0, n):\n res += [(1 << i) + x for x in reversed(res)]\n return res",
"def Arn(r, n):\n ret = 1\n for t in range(n, n-r+1-1, -1):\n ret *= t\n return ret",
"def McNuggets(n):\n # Your Code Here\n\n for a in range(0, n/6+1):\n for b in range(0, n/9+1):\n for c in range(0, n/20+1):\n if 6*a+9*b+20*c == n:\n return True\n return False",
"def diff21b(n):\n return 2 * (n - 21) if n > 21 else 21-n"
]
| [
"0.63575953",
"0.63354844",
"0.6244672",
"0.61414593",
"0.61299133",
"0.61168927",
"0.60856116",
"0.6066138",
"0.60464036",
"0.6032416",
"0.6026403",
"0.6007042",
"0.5979031",
"0.5948708",
"0.59418005",
"0.59248644",
"0.5901751",
"0.5899809",
"0.58847505",
"0.5876129",
"0.58586544",
"0.5848911",
"0.5822018",
"0.58219504",
"0.5815204",
"0.5742304",
"0.5741261",
"0.5732448",
"0.56997037",
"0.56874645"
]
| 0.6601916 | 0 |
computes the number of zernike coefficients given the radial polynomial order/level n. | def zernike_num_coeff(n):
if not (n>=0):
print('Input parameter must be >= 0')
raise AssertionError()
return sum(xrange(n+1)) + n+1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def N_z(self) -> int:\n return self.params.N_z",
"def formula_n(self, n: int, x: np.ndarray) -> np.ndarray:\n\n # express x as z = x/(x-1)\n z = x / (x - 1)\n\n # special case @n=0\n if n == 0:\n kn = 1 - self._vlerchphi(1 / z, n + 1)\n else:\n kn = 1 / n - self._vzlerchphi(1 / z, n + 1)\n\n # return\n return kn",
"def _getZc(n):\n # An extra trial is required for low counts, due to the fact\n # that there is higher variance in the calculated deviation.\n extra = 1\n\n vFree = n - 1\n zc = 1.96\n if vFree > 15:\n # Normal distribution, and enough that we do not need to\n # have an extra trial.\n extra = 0\n elif vFree >= 10:\n # Here and below is a t-distribution; note that this comes\n # from the 97.5% column in Table 3 of Driels et al., since\n # those coefficients don't include the tail\n zc = 2.23\n elif vFree >= 5:\n zc = 2.57\n elif vFree >= 4:\n zc = 2.78\n elif vFree >= 3:\n zc = 3.18\n elif vFree >= 2:\n zc = 4.30\n elif vFree >= 1:\n zc = 12.71\n return zc, extra",
"def n_z(self, level):\n resolution = self.resolution(level)\n return (self.z_extent // resolution + 63) // 64",
"def Z(n):\n count5 = 0\n i = 1\n while 1:\n a = pow(5, i)\n if a > n:\n return count5\n else:\n count5 += n/a\n i += 1",
"def NumCoefficients(self):\n return nchoosek(self.degree + self.dimension, self.degree, exact=True)",
"def test_zernike_coeffs(self):\n basis = FourierZernikeBasis(L=40, M=40, N=0, spectral_indexing=\"ansi\")\n l, m = basis.modes[:, :2].T\n coeffs = zernike_radial_coeffs(l, m, exact=False)\n assert coeffs.dtype == np.int64\n basis = FourierZernikeBasis(L=60, M=30, N=0, spectral_indexing=\"fringe\")\n l, m = basis.modes[:, :2].T\n coeffs = zernike_radial_coeffs(l, m, exact=False)\n assert coeffs.dtype == np.float64",
"def test_zernike_radial(self): # noqa: C901\n # https://en.wikipedia.org/wiki/Zernike_polynomials#Radial_polynomials\n\n def Z3_1(x, dx=0):\n if dx == 0:\n return 3 * x**3 - 2 * x\n if dx == 1:\n return 9 * x**2 - 2\n if dx == 2:\n return 18 * x\n if dx == 3:\n return np.full_like(x, 18)\n if dx >= 4:\n return np.zeros_like(x)\n\n def Z4_2(x, dx=0):\n if dx == 0:\n return 4 * x**4 - 3 * x**2\n if dx == 1:\n return 16 * x**3 - 6 * x\n if dx == 2:\n return 48 * x**2 - 6\n if dx == 3:\n return 96 * x\n if dx == 4:\n return np.full_like(x, 96)\n if dx >= 5:\n return np.zeros_like(x)\n\n def Z6_2(x, dx=0):\n if dx == 0:\n return 15 * x**6 - 20 * x**4 + 6 * x**2\n if dx == 1:\n return 90 * x**5 - 80 * x**3 + 12 * x\n if dx == 2:\n return 450 * x**4 - 240 * x**2 + 12\n if dx == 3:\n return 1800 * x**3 - 480 * x\n if dx == 4:\n return 5400 * x**2 - 480\n if dx == 5:\n return 10800 * x\n if dx == 6:\n return np.full_like(x, 10800)\n if dx >= 7:\n return np.zeros_like(x)\n\n l = np.array([3, 4, 6])\n m = np.array([1, 2, 2])\n r = np.linspace(0, 1, 11) # rho coordinates\n max_dr = 4\n desired = {\n dr: np.array([Z3_1(r, dr), Z4_2(r, dr), Z6_2(r, dr)]).T\n for dr in range(max_dr + 1)\n }\n radial = {\n dr: zernike_radial(r[:, np.newaxis], l, m, dr) for dr in range(max_dr + 1)\n }\n radial_poly = {\n dr: zernike_radial_poly(r[:, np.newaxis], l, m, dr)\n for dr in range(max_dr + 1)\n }\n for dr in range(max_dr + 1):\n np.testing.assert_allclose(radial[dr], desired[dr], err_msg=dr)\n np.testing.assert_allclose(radial_poly[dr], desired[dr], err_msg=dr)",
"def getZernikeCoeffsOLS(x, y, z, nZern, xOffset=0, yOffset=0, xMax=None, yMax=None, weights=None):\n \n if xMax is None:\n xMax = np.nanmax(x)\n if yMax is None:\n yMax = np.nanmax(y)\n if weights is None:\n w = np.ma.ones(x.shape, dtype=np.int)\n else:\n w = weights\n \n # Remove NaN values.\n z = np.ma.masked_where(x.mask | y.mask | z.mask | w.mask, z)\n x = np.ma.masked_where(x.mask | y.mask | z.mask | w.mask, x)\n y = np.ma.masked_where(x.mask | y.mask | z.mask | w.mask, y)\n w = np.ma.masked_where(x.mask | y.mask | z.mask | w.mask, w)\n x = x.compressed()\n y = y.compressed()\n w = w.compressed()\n z = z.compressed()\n \n # Transform the coordinates to the unit circle.\n xcn = (x - xOffset)/xMax\n ycn = (y - yOffset)/yMax\n\n # We defined the Zernike polynomials in polar coordinates.\n rcn = np.sqrt(xcn**2. + ycn**2.)\n ucn = np.arctan2(ycn, xcn)\n \n # Build the matrix with the Zernike polynomials.\n zMat = np.zeros((np.prod(rcn.shape), nZern), dtype=np.float)\n for i in range(0,nZern):\n zMat[:,i] = zernikes[i+1](rcn.flatten(), ucn.flatten())\n\n zMat = np.matrix(zMat)\n sMat = np.matrix(z)\n wMat = diags(w)\n coefs = np.linalg.lstsq(wMat*zMat, wMat*sMat.T, rcond=None)\n \n return np.hstack((0, np.asarray(coefs[0])[:,0]))",
"def _qsd_l2_cx_count(self, n):\n return 9 / 16 * 4**n - 3 / 2 * 2**n",
"def n_wyraz(a1,nr_wyrazu,r):\n return a1+(nr_wyrazu-1)*r",
"def nCWRk(n, r):\n val = 1\n for i in range(1, r+1):\n val *= n + r - i\n val //= i\n return val",
"def nCr():\n return math.factorial(self.nn) / (math.factorial(self.rr) * math.factorial(self.nn - self.rr))",
"def zn_pow(x, y, n):\n if y < 0:\n y = abs(y)\n x = inverse_in_zn(x, n)\n product = 1\n for i in range(y):\n product = (product * x) % n\n vprint(\"{}^{}={}\".format(x, i, product))\n return product",
"def number_of_trees_of_order(n):\n if n < 2:\n return n\n result = 0\n for k in range(1, n):\n result += k * number_of_trees_of_order(k) * _s(n-1, k)\n return result // (n - 1)",
"def zzx_degree(f):\n return len(f) - 1",
"def combinations_count(n, r):\n # TODO: How should I do when n - r is negative?\n if n < 0 or r < 0:\n raise Exception('combinations_count(n, r) not defined when n or r is negative')\n if n - r < r: r = n - r\n if r < 0: return 0\n if r == 0: return 1\n if r == 1: return n\n numerator = [n - r + k + 1 for k in range(r)]\n denominator = [k + 1 for k in range(r)]\n for p in range(2,r+1):\n pivot = denominator[p - 1]\n if pivot > 1:\n offset = (n - r) % p\n for k in range(p-1,r,p):\n numerator[k - offset] /= pivot\n denominator[k] /= pivot\n result = 1\n for k in range(r):\n if numerator[k] > 1:\n result *= int(numerator[k])\n return result",
"def collatz(n):\n if n%2==0: return n/2\n else: return 3*n+1",
"def get_n(self):\n return np.append([self.n_init],[s.n for s in self.surfaces])",
"def nN(self):\n return int((self._n+1).prod())",
"def totient_chain_length(n, arr):\n if arr[n]:\n return arr[n]\n else:\n i = 1 + totient_chain_length(sympy.totient(n), arr)\n arr[n] = i\n return i",
"def getNumAssembliesWithAllRingsFilledOut(self, nRings):\n if self.powerMultiplier == 1:\n return 3 * nRings * (nRings - 1) + 1\n else:\n return nRings * (nRings - 1) + (nRings + 1) // 2",
"def count_ways(n):\n if n < 0:\n return 0\n elif n == 0:\n return 1\n else:\n total = 0\n for i in range(1, min(n, 3) + 1):\n total += count_ways(n - i)\n return total",
"def basis_fns(n=0):\n return lambda x: np.sum(x ** (n+1), axis=1)",
"def nCr(n, k):\n if n < k:\n return 0\n f = math.factorial\n return f(n) / f(k) / f(n - k)",
"def nu3(self):\n n = self.level()\n if (n % 9 == 0):\n return ZZ(0)\n return prod([ 1 + kronecker_symbol(-3, p) for p, _ in n.factor()])",
"def required_nb_data_func(list_nb_coeff):\n return max(list_nb_coeff / (1+np.arange(1, N+1)//2))",
"def nw(n):\n return 4*n*n + 1",
"def f(z):\n if abs(z) > 2:\n return 1\n else:\n n = 1\n while abs(z) < 2:\n n += 1\n if n > 100:\n return 0\n else:\n z = z**2 + c\n return n",
"def no_math_solution(n: int):\n lookup = {1: 1}\n # Calculate the chain's length of all Collatz sequences started below n\n for i in range(2, n):\n cal_chain_length(i, lookup)\n # Find the longest chain\n longestChain = 1\n for i in range(2, n):\n if (lookup[i] > lookup[longestChain]):\n longestChain = i\n\n return longestChain"
]
| [
"0.69166356",
"0.6618227",
"0.65092444",
"0.63854516",
"0.63752306",
"0.6365634",
"0.634402",
"0.6255035",
"0.6190102",
"0.61696285",
"0.6107539",
"0.60845035",
"0.60397416",
"0.593207",
"0.5926248",
"0.5918641",
"0.5896914",
"0.5892694",
"0.58867955",
"0.58669955",
"0.5858061",
"0.58540034",
"0.58443296",
"0.5804682",
"0.5799623",
"0.5797946",
"0.5786025",
"0.57825816",
"0.5777632",
"0.5774764"
]
| 0.74826425 | 0 |
Function to compute the Zernike polynomial (n, m) given a grid of radial coordinates rho and azimuthal coordinates phi. >>> zernike(3,5, 0.12345, 1.0) 0.0073082282475042991 | def zernike_poly(m, n, rho, phi):
if (m > 0): return zernike_rad(m, n, rho) * np.cos(m * phi)
if (m < 0): return zernike_rad(-m, n, rho) * np.sin(-m * phi)
return zernike_rad(0, n, rho) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def zernike(m, n, rho, phi):\n if (m > 0): return zernike_rad(m, n, rho) * np.cos(m * phi)\n if (m < 0): return zernike_rad(-m, n, rho) * np.sin(-m * phi)\n return zernike_rad(0, n, rho)",
"def zernikel(j, rho, phi):\n n = 0\n while (j > n):\n n += 1\n j -= n\n m = -n+2*j\n return zernike(m, n, rho, phi)",
"def test_zernike_radial(self): # noqa: C901\n # https://en.wikipedia.org/wiki/Zernike_polynomials#Radial_polynomials\n\n def Z3_1(x, dx=0):\n if dx == 0:\n return 3 * x**3 - 2 * x\n if dx == 1:\n return 9 * x**2 - 2\n if dx == 2:\n return 18 * x\n if dx == 3:\n return np.full_like(x, 18)\n if dx >= 4:\n return np.zeros_like(x)\n\n def Z4_2(x, dx=0):\n if dx == 0:\n return 4 * x**4 - 3 * x**2\n if dx == 1:\n return 16 * x**3 - 6 * x\n if dx == 2:\n return 48 * x**2 - 6\n if dx == 3:\n return 96 * x\n if dx == 4:\n return np.full_like(x, 96)\n if dx >= 5:\n return np.zeros_like(x)\n\n def Z6_2(x, dx=0):\n if dx == 0:\n return 15 * x**6 - 20 * x**4 + 6 * x**2\n if dx == 1:\n return 90 * x**5 - 80 * x**3 + 12 * x\n if dx == 2:\n return 450 * x**4 - 240 * x**2 + 12\n if dx == 3:\n return 1800 * x**3 - 480 * x\n if dx == 4:\n return 5400 * x**2 - 480\n if dx == 5:\n return 10800 * x\n if dx == 6:\n return np.full_like(x, 10800)\n if dx >= 7:\n return np.zeros_like(x)\n\n l = np.array([3, 4, 6])\n m = np.array([1, 2, 2])\n r = np.linspace(0, 1, 11) # rho coordinates\n max_dr = 4\n desired = {\n dr: np.array([Z3_1(r, dr), Z4_2(r, dr), Z6_2(r, dr)]).T\n for dr in range(max_dr + 1)\n }\n radial = {\n dr: zernike_radial(r[:, np.newaxis], l, m, dr) for dr in range(max_dr + 1)\n }\n radial_poly = {\n dr: zernike_radial_poly(r[:, np.newaxis], l, m, dr)\n for dr in range(max_dr + 1)\n }\n for dr in range(max_dr + 1):\n np.testing.assert_allclose(radial[dr], desired[dr], err_msg=dr)\n np.testing.assert_allclose(radial_poly[dr], desired[dr], err_msg=dr)",
"def getZernikeCoeffsOLS(x, y, z, nZern, xOffset=0, yOffset=0, xMax=None, yMax=None, weights=None):\n \n if xMax is None:\n xMax = np.nanmax(x)\n if yMax is None:\n yMax = np.nanmax(y)\n if weights is None:\n w = np.ma.ones(x.shape, dtype=np.int)\n else:\n w = weights\n \n # Remove NaN values.\n z = np.ma.masked_where(x.mask | y.mask | z.mask | w.mask, z)\n x = np.ma.masked_where(x.mask | y.mask | z.mask | w.mask, x)\n y = np.ma.masked_where(x.mask | y.mask | z.mask | w.mask, y)\n w = np.ma.masked_where(x.mask | y.mask | z.mask | w.mask, w)\n x = x.compressed()\n y = y.compressed()\n w = w.compressed()\n z = z.compressed()\n \n # Transform the coordinates to the unit circle.\n xcn = (x - xOffset)/xMax\n ycn = (y - yOffset)/yMax\n\n # We defined the Zernike polynomials in polar coordinates.\n rcn = np.sqrt(xcn**2. + ycn**2.)\n ucn = np.arctan2(ycn, xcn)\n \n # Build the matrix with the Zernike polynomials.\n zMat = np.zeros((np.prod(rcn.shape), nZern), dtype=np.float)\n for i in range(0,nZern):\n zMat[:,i] = zernikes[i+1](rcn.flatten(), ucn.flatten())\n\n zMat = np.matrix(zMat)\n sMat = np.matrix(z)\n wMat = diags(w)\n coefs = np.linalg.lstsq(wMat*zMat, wMat*sMat.T, rcond=None)\n \n return np.hstack((0, np.asarray(coefs[0])[:,0]))",
"def showZernike(beta=None,betaErr=None,gridsize = 1, max_rad = 1,significance=False):\n x,y = np.meshgrid(np.arange(-gridsize,gridsize,0.01),np.arange(-gridsize,gridsize,0.01))\n rho = np.sqrt(x**2+y**2)\n phi = np.arctan2(y,x)\n ok = rho < max_rad\n if significance != False:\n sigIdx = np.abs(beta)/betaErr >= significance\n beta = beta[sigIdx]\n nn = len(beta)\n znk=0\n for j in range(nn):\n znk = znk + beta[j]*zernikel(j,rho,phi)*ok\n pl.imshow(znk)\n return znk",
"def test_zernike_coeffs(self):\n basis = FourierZernikeBasis(L=40, M=40, N=0, spectral_indexing=\"ansi\")\n l, m = basis.modes[:, :2].T\n coeffs = zernike_radial_coeffs(l, m, exact=False)\n assert coeffs.dtype == np.int64\n basis = FourierZernikeBasis(L=60, M=30, N=0, spectral_indexing=\"fringe\")\n l, m = basis.modes[:, :2].T\n coeffs = zernike_radial_coeffs(l, m, exact=False)\n assert coeffs.dtype == np.float64",
"def zernike_rad(m, n, rho):\n if (n < 0 or m < 0 or abs(m) > n):\n raise ValueError\n if ((n-m) % 2):\n return rho*0.0\n pre_fac = lambda k: (-1.0)**k * fac(n-k) / ( fac(k) * fac( (n+m)/2.0 - k ) * fac( (n-m)/2.0 - k ) )\n return sum(pre_fac(k) * rho**(n-2.0*k) for k in xrange((n-m)/2+1))",
"def zernike_Vnm(rho,theta,n,m):\n\tRnm = 0\n\tfact = lambda x: np.math.factorial(x)\n\tam = abs(m)\n\tfor s in range(0,(n-am)/2):\n\t\tRnm+= (-1)**s*fact(n-s)*rho**(n-2*s)/(\n\t\t\tfact(s)*fact((n+am)/2-s)*fact((n-am)/2-s))\n\tVnm = Rnm*np.exp(1j*m*theta)",
"def getZernikeCoeffs(surface, order, printReport=False, norm='sqrt', radius=1):\n\n if order > nMax:\n raise ValueError('order must be less than {}.'.format(nMax+1))\n\n coeffs = np.zeros(order+1, dtype=np.float)\n\n # Make the support to evaluate the Zernike polynomials.\n ny = surface.shape[0]\n nx = surface.shape[1]\n x = np.linspace(-1., 1., nx)\n y = np.linspace(-1., 1., ny)\n [xx,yy] = np.meshgrid(x, y)\n r = np.sqrt(xx**2. + yy**2.)\n u = np.arctan2(yy, xx)\n\n # Loop over Zernike polynomials and use their orthogonality to determine their coefficients.\n # The active surface starts counting from Z1.\n for i in range(1,order+1):\n func = zernikes[i]\n zf = func(r, u)\n mask = (r > radius)\n zf[mask] = 0\n\n # Define the normalization factor.\n if norm == 'sqrt':\n zn = np.sqrt(zernikeNorm[i])\n elif norm == 'active-surface':\n zn = zernikeNorm[i]\n elif norm == 'one':\n zn = 1.\n\n # Get the coefficients like in a Fourier series.\n a = np.sum(surface*zf)*2.*2./nx/ny/np.pi*zn\n coeffs[i] = a\n\n # Print a table with the coefficients.\n if printReport:\n zernikePrint(coeffs)\n\n return coeffs",
"def chernNum(self, kx_Bz=np.array([0,4*np.pi/3]), \n ky_Bz=np.array([0,2*np.pi/np.sqrt(3)]), \n N_res=30):\n x_eps = 0.3 # shift from Dirac point\n x_res = 20\n kx_int = 0 + x_eps # -np.pi\n kx_fin = 4*np.pi/3 + x_eps\n Dx = (kx_fin - kx_int)/x_res\n\n y_res = 20\n ky_int = 0 # -np.pi\n ky_fin = 2*np.pi/np.sqrt(3)\n Dy = (ky_fin - ky_int)/y_res\n\n Nd = self.Nd # dimension of the Hamiltonian\n Dk = np.array([Dx,Dy], float)\n\n LF = np.zeros((Nd), dtype=complex)\n LF_arr = np.zeros((Nd,x_res, y_res), dtype=float)\n E_arr = np.zeros((Nd,x_res, y_res), dtype=float)\n sumN = np.zeros((Nd), dtype=complex)\n E_k = np.zeros((Nd), dtype=complex)\n chernN = np.zeros((Nd), dtype=complex)\n\n # Loop over kx\n for ix in range(x_res):\n kx = kx_int + ix*Dx\n\n # Loop over ky\n for iy in range(y_res):\n ky = ky_int + iy*Dy\n\n k_vec = np.array([kx,ky], float)\n\n LF, E_k = self.latF(k_vec, Dk, self.delta)\n\n sumN += LF\n\n # # save data for plotting\n LF_arr[:,ix,iy] = LF.imag\n\n E_arr[:,ix,iy] = np.sort(E_k.real)\n\n # End of ky Loop\n # End of kx Loop\n\n chernN = sumN.imag/(2*np.pi)\n print(\"Chern number bands are (%.3f, %.3f) \" \n %(chernN[0], chernN[1]))\n print(\"Sum of all bands Chern Number is %.2f \" %(sum(chernN)))\n return chernN, E_arr\n #################### ",
"def Z(phi = None):\n if phi == None:\n return sz\n else:\n return scipy.linalg.expm(-1j * phi / 2 * sz)",
"def zernikeFit(x, y, z,max_rad=225.,cm=[0,0],max_order=20):\n x = x - cm[0]\n y = y - cm[1]\n n = len(x)\n p = max_order\n rho = np.sqrt(x**2+y**2)/max_rad #normalize to unit circle.\n phi = np.arctan2(y,x)\n dataX = []\n ok = rho <= 1.\n for j in range(max_order):\n dataX.append(zernikel(j,rho[ok],phi[ok]))\n dataX=np.array(dataX).T\n beta,SSE,rank,sing = np.linalg.lstsq(dataX,z[ok])# SSE is the residual sum square\n sigma = np.sqrt(SSE/(n-p))\n betaErr = sigma/np.dot(dataX.T,dataX).diagonal()\n SST = np.var(z[ok])*(len(z[ok])-1)# SST is the sum((z_i - mean(z))^2)\n R2 = 1 - SSE/SST\n R2adj = 1-(1-R2)*(len(z[ok])-1)/(len(z[ok])-max_order)# adjusted R2 for quality of fit. \n fitted = np.dot(dataX,beta) # fitted value\n return beta,betaErr,R2adj,fitted",
"def _vzlerchphi(self, z: np.ndarray, a: int) -> np.ndarray:\n return np.array([self._zlerchphi(z_, a) for z_ in z])",
"def cplxgrid(m):\n m = m\n r = np.arange(0, m).reshape(m, 1) / m\n theta = np.pi * np.arange(-m, m) / m\n z = r * np.exp(1j * theta)\n\n return z",
"def get_z(theta, phi):\n return math.cos(phi)/math.tan(theta/2) + 1j*math.sin(phi)/math.tan(theta/2)",
"def zernikePoly(x, y, xOffset, yOffset, coefficients, xMax=-1e22, yMax=-1e22, verbose=False):\n\n if len(coefficients) > nMax + 1:\n raise ValueError('coefficients must have less than {} items.'.format(zernikies.nMax+1))\n\n if xMax == -1e22:\n xMax = np.nanmax(x - xOffset)\n if yMax == -1e22:\n yMax = np.nanmax(y - yOffset)\n\n xcn = (x - xOffset)/xMax\n ycn = (y - yOffset)/yMax\n\n rcn = np.sqrt(xcn**2. + ycn**2.)\n ucn = np.arctan2(ycn, xcn)\n\n z = zernikePolar(coefficients, rcn, ucn)\n\n if verbose:\n print(\"Zernike polynomials with coefficients\", coefficients)\n print(\"Their linear combination has mean: {0:.2e}, min: {1:.2e}, max: {2:.2e}\".format(np.mean(z), np.nanmin(z), np.nanmax(z)))\n\n return z",
"def _gridSampInMnInZemax(self, zfInMm, xfInMm, yfInMm, innerRinMm,\n outerRinMm, nx, ny, resFile=None):\n\n # Radial basis function approximation/interpolation of surface\n Ff = Rbf(xfInMm, yfInMm, zfInMm)\n\n # Number of grid points on x-, y-axis.\n # Alway extend 2 points on each side\n # Do not want to cover the edge? change 4->2 on both lines\n NUM_X_PIXELS = nx+4\n NUM_Y_PIXELS = ny+4\n\n # This is spatial extension factor, which is calculated by the slope\n # at edge\n extFx = (NUM_X_PIXELS-1) / (nx-1)\n extFy = (NUM_Y_PIXELS-1) / (ny-1)\n extFr = np.sqrt(extFx * extFy)\n\n # Delta x and y\n delx = outerRinMm*2*extFx / (NUM_X_PIXELS-1)\n dely = outerRinMm*2*extFy / (NUM_Y_PIXELS-1)\n\n # Minimum x and y\n minx = -0.5*(NUM_X_PIXELS-1)*delx\n miny = -0.5*(NUM_Y_PIXELS-1)*dely\n\n # Calculate the epsilon\n epsilon = 1e-4*min(delx, dely)\n\n # Write four numbers for the header line\n content = \"%d %d %.9E %.9E\\n\" % (NUM_X_PIXELS, NUM_Y_PIXELS, delx,\n dely)\n\n # Write the rows and columns\n for jj in range(1, NUM_X_PIXELS + 1):\n for ii in range(1, NUM_Y_PIXELS + 1):\n\n # x and y positions\n x = minx + (ii - 1) * delx\n y = miny + (jj - 1) * dely\n\n # Invert top to bottom, because Zemax reads (-x,-y) first\n y = -y\n\n # Calculate the radius\n r = np.sqrt(x**2 + y**2)\n\n # Set the value as zero when the radius is not between the\n # inner and outer radius.\n if (r < innerRinMm/extFr) or (r > outerRinMm*extFr):\n\n z = 0\n dx = 0\n dy = 0\n dxdy = 0\n\n # Get the value by the fitting\n else:\n\n # Get the z\n z = Ff(x, y)\n\n # Compute the dx\n tem1 = Ff((x+epsilon), y)\n tem2 = Ff((x-epsilon), y)\n dx = (tem1 - tem2)/(2.0*epsilon)\n\n # Compute the dy\n tem1 = Ff(x, (y+epsilon))\n tem2 = Ff(x, (y-epsilon))\n dy = (tem1 - tem2)/(2.0*epsilon)\n\n # Compute the dxdy\n tem1 = Ff((x+epsilon), (y+epsilon))\n tem2 = Ff((x-epsilon), (y+epsilon))\n tem3 = (tem1 - tem2)/(2.0*epsilon)\n\n tem1 = Ff((x+epsilon), (y-epsilon))\n tem2 = Ff((x-epsilon), (y-epsilon))\n tem4 = (tem1 - tem2)/(2.0*epsilon)\n\n dxdy = (tem3 - tem4)/(2.0*epsilon)\n\n content += \"%.9E %.9E %.9E %.9E\\n\" % (z, dx, dy, dxdy)\n\n # Write the surface residue data into the file\n if (resFile is not None):\n outid = open(resFile, \"w\")\n outid.write(content)\n outid.close()\n\n return content",
"def zn_pow(x, y, n):\n if y < 0:\n y = abs(y)\n x = inverse_in_zn(x, n)\n product = 1\n for i in range(y):\n product = (product * x) % n\n vprint(\"{}^{}={}\".format(x, i, product))\n return product",
"def Hill_Surf_Cj_xy(n, Miu):\n def func(x, y):\n r1 = ((x + Miu)**2 + y**2)**0.5\n r2 = ((x + Miu - 1)**2 + y**2)**0.5\n return n**2 * (x**2 + y**2) + 2 * (1 - Miu) / r1 + 2 * Miu / r2\n\n return func",
"def zernikeWLS(x, y, z, nZern, weights=None):\n\n # Use WLS to determine the Zernike coefficients.\n if weights is None:\n weights = np.ones(z.shape)\n\n weights = np.ma.masked_invalid(weights)\n \n x_ = np.ma.masked_invalid(x)\n x_ -= midPoint(x)\n y_ = np.ma.masked_invalid(y)\n y_ -= midPoint(y)\n \n fl_wls = getZernikeCoeffsOLS(x_, y_, z, nZern, weights=weights)\n \n return fl_wls",
"def zernike_radial(r, l, m, dr=0):\n m = jnp.abs(m)\n alpha = m\n beta = 0\n n = (l - m) // 2\n s = (-1) ** n\n jacobi_arg = 1 - 2 * r**2\n if dr == 0:\n out = r**m * _jacobi(n, alpha, beta, jacobi_arg, 0)\n elif dr == 1:\n f = _jacobi(n, alpha, beta, jacobi_arg, 0)\n df = _jacobi(n, alpha, beta, jacobi_arg, 1)\n out = m * r ** jnp.maximum(m - 1, 0) * f - 4 * r ** (m + 1) * df\n elif dr == 2:\n f = _jacobi(n, alpha, beta, jacobi_arg, 0)\n df = _jacobi(n, alpha, beta, jacobi_arg, 1)\n d2f = _jacobi(n, alpha, beta, jacobi_arg, 2)\n out = (\n (m - 1) * m * r ** jnp.maximum(m - 2, 0) * f\n - 4 * (2 * m + 1) * r**m * df\n + 16 * r ** (m + 2) * d2f\n )\n elif dr == 3:\n f = _jacobi(n, alpha, beta, jacobi_arg, 0)\n df = _jacobi(n, alpha, beta, jacobi_arg, 1)\n d2f = _jacobi(n, alpha, beta, jacobi_arg, 2)\n d3f = _jacobi(n, alpha, beta, jacobi_arg, 3)\n out = (\n (m - 2) * (m - 1) * m * r ** jnp.maximum(m - 3, 0) * f\n - 12 * m**2 * r ** jnp.maximum(m - 1, 0) * df\n + 48 * (m + 1) * r ** (m + 1) * d2f\n - 64 * r ** (m + 3) * d3f\n )\n elif dr == 4:\n f = _jacobi(n, alpha, beta, jacobi_arg, 0)\n df = _jacobi(n, alpha, beta, jacobi_arg, 1)\n d2f = _jacobi(n, alpha, beta, jacobi_arg, 2)\n d3f = _jacobi(n, alpha, beta, jacobi_arg, 3)\n d4f = _jacobi(n, alpha, beta, jacobi_arg, 4)\n out = (\n (m - 3) * (m - 2) * (m - 1) * m * r ** jnp.maximum(m - 4, 0) * f\n - 8 * m * (2 * m**2 - 3 * m + 1) * r ** jnp.maximum(m - 2, 0) * df\n + 48 * (2 * m**2 + 2 * m + 1) * r**m * d2f\n - 128 * (2 * m + 3) * r ** (m + 2) * d3f\n + 256 * r ** (m + 4) * d4f\n )\n else:\n raise NotImplementedError(\n \"Analytic radial derivatives of Zernike polynomials for order>4 \"\n + \"have not been implemented.\"\n )\n return s * jnp.where((l - m) % 2 == 0, out, 0)",
"def zgrid(zetas=None, wns=None, ax=None):\n\n fig = plt.gcf()\n if ax is None:\n ax = fig.gca()\n\n # Constant damping lines\n if zetas is None:\n zetas = linspace(0, 0.9, 10)\n for zeta in zetas:\n # Calculate in polar coordinates\n factor = zeta/sqrt(1-zeta**2)\n x = linspace(0, sqrt(1-zeta**2), 200)\n ang = pi*x\n mag = exp(-pi*factor*x)\n # Draw upper part in retangular coordinates\n xret = mag*cos(ang)\n yret = mag*sin(ang)\n ax.plot(xret, yret, ':', color='grey', lw=0.75)\n # Draw lower part in retangular coordinates\n xret = mag*cos(-ang)\n yret = mag*sin(-ang)\n ax.plot(xret, yret, ':', color='grey', lw=0.75)\n # Annotation\n an_i = int(len(xret)/2.5)\n an_x = xret[an_i]\n an_y = yret[an_i]\n ax.annotate(str(round(zeta, 2)), xy=(an_x, an_y),\n xytext=(an_x, an_y), size=7)\n\n # Constant natural frequency lines\n if wns is None:\n wns = linspace(0, 1, 10)\n for a in wns:\n # Calculate in polar coordinates\n x = linspace(-pi/2, pi/2, 200)\n ang = pi*a*sin(x)\n mag = exp(-pi*a*cos(x))\n # Draw in retangular coordinates\n xret = mag*cos(ang)\n yret = mag*sin(ang)\n ax.plot(xret, yret, ':', color='grey', lw=0.75)\n # Annotation\n an_i = -1\n an_x = xret[an_i]\n an_y = yret[an_i]\n num = '{:1.1f}'.format(a)\n ax.annotate(r\"$\\frac{\"+num+r\"\\pi}{T}$\", xy=(an_x, an_y),\n xytext=(an_x, an_y), size=9)\n\n _final_setup(ax)\n return ax, fig",
"def strain_distribution(self,na_z,phi):\r\n\t\treturn (self.mesh_center - na_z)*phi",
"def zernike_coeff(filename=None,zernike_max_order=20):\n hdu = pf.open(filename)\n nn = len(hdu)\n data = []\n colnames = ['x','y','M20','M22','M31','M33']\n sigma = 1.08/0.27\n for hdui in hdu[1:]:\n img = hdui.data[0][4:].reshape(npix,npix)\n img = rebin(img,(40,40))\n M20,M22,M31,M33=complexMoments(data=img,sigma=sigma)\n x=hdui.header['ccdXcen']\n y=hdui.header['ccdYcen']\n data.append([x,y,M20,M22,M31,M33])\n data=np.array(data)\n betaAll=[]\n #betaErrAll=[]\n R2adjAll=[]\n beta,betaErr,R2_adj = zernikeFit(data[:,0].real,data[:,1].real,data[:,2].real,max_order=zernike_max_order)\n betaAll.append(beta)\n #betaErrAll.append(betaErr)\n R2adjAll.append(R2_adj)\n for i in range(3,6):\n beta,betaErr,R2_adj = zernikeFit(data[:,0].real,data[:,1].real,data[:,i].real,max_order=zernike_max_order)\n betaAll.append(beta)\n #betaErrAll.append(betaErr)\n R2adjAll.append(R2_adj)\n beta,betaErr,R2_adj = zernikeFit(data[:,0].real,data[:,1].real,data[:,i].imag,max_order=zernike_max_order)\n betaAll.append(beta)\n #betaErrAll.append(betaErr)\n R2adjAll.append(R2_adj)\n betaAll = np.array(betaAll)\n #betaErrAll = np.array(betaErrAll)\n R2adjAll = np.array(R2adjAll)\n x=hdu[0].header['x']\n y=hdu[0].header['y']\n z=hdu[0].header['z']\n phi = hdu[0].header['phi']\n theta=hdu[0].header['theta']\n s_fwhm=hdu[0].header['s_fwhm']\n e1=hdu[0].header['e1']\n e2=hdu[0].header['e2']\n return x,y,z,theta,phi,s_fwhm,e1,e2,betaAll,R2adjAll",
"def eulerphi(n):\r\n\treturn euler_phi(n)",
"def buildZGrid(self, plot=False):\r\n\r\n print(\"Constructing Z corners\")\r\n\r\n # self.zcorn = np.array(self.zcorn, dtype=float)\r\n # temp = np.zeros( ((self.ne+1)*(self.nn+1)*self.nz) )\r\n temp = []\r\n count = 0\r\n for item in self.zcorn:\r\n\r\n if \"*\" in item:\r\n ct = (int)(item.split(\"*\")[0])\r\n vl = (float)(item.split(\"*\")[1])\r\n temp += np.tile(vl, ct).tolist()\r\n count += ct\r\n else:\r\n temp += [(float)(item)]\r\n count += 1\r\n\r\n # layers = np.resize(temp, (8, self.ne*self.nn*self.nz ))\r\n layers = np.resize(temp, (self.nz * 2, self.ne * self.nn * 4))\r\n \"\"\"\r\n plt.plot(newtemp[0,:]) # TOP 0 0\r\n plt.plot(newtemp[1,:]) # SAME -- # BOTTOM 0 1\r\n #plt.plot(newtemp[2,:]) # SAME -- # TOP 1 2\r\n\r\n plt.plot(newtemp[3,:]) # SAME -- # BOTTOM 1 3\r\n #plt.plot(newtemp[4,:]) # SAME -- # TOP 2 4\r\n\r\n plt.plot(newtemp[5,:]) # SAME -- # BOTTOM 2 5\r\n #plt.plot(newtemp[6,:]) # SAME -- # TOP 3 6\r\n plt.plot(newtemp[7,:]) # BOTTOM 3 7\r\n \"\"\"\r\n self.ZZT = {} # zztop ha ha...two year's later this is still funny -TI\r\n self.ZZB = {}\r\n for ilay in range(self.nz):\r\n self.ZZT[ilay] = np.zeros((self.ndx, self.ndy))\r\n self.ZZB[ilay] = np.zeros((self.ndx, self.ndy))\r\n iis = 0\r\n # plt.plot(layers[ilay*2])\r\n for iin in range(self.nn):\r\n nears = {}\r\n fars = {}\r\n bnears = {}\r\n bfars = {}\r\n for iif in range(2):\r\n # top\r\n nears[iif] = layers[ilay * 2][iis:iis + 2 * self.ne][0::2].tolist()\r\n fars[iif] = layers[ilay * 2][iis:iis + 2 * self.ne][1::2].tolist()\r\n layers[ilay * 2][iis:iis + 2 * self.ne][0::2] *= 0. # check\r\n layers[ilay * 2][iis:iis + 2 * self.ne][1::2] *= 0.\r\n nears[iif].append(fars[iif][-1])\r\n fars[iif] = [nears[iif][0]] + fars[iif]\r\n # bottom\r\n bnears[iif] = layers[ilay * 2 + 1][iis:iis + 2 * self.ne][0::2].tolist()\r\n bfars[iif] = layers[ilay * 2 + 1][iis:iis + 2 * self.ne][1::2].tolist()\r\n layers[ilay * 2 + 1][iis:iis + 2 * self.ne][0::2] *= 0.\r\n layers[ilay * 2 + 1][iis:iis + 2 * self.ne][1::2] *= 0.\r\n bnears[iif].append(bfars[iif][-1])\r\n bfars[iif] = [bnears[iif][0]] + bfars[iif]\r\n #\r\n iis += 2 * self.ne\r\n\r\n self.ZZT[ilay][:, iin] = nears[0]\r\n self.ZZB[ilay][:, iin] = bnears[0]\r\n # NaN mask for visualizing, but can be sort of a pain to deal with\r\n # imask = np.nonzero( 1-self.ActiveCells[:,iin,ilay] )\r\n # self.ZZT[ilay][:,iin][1::][imask] = np.nan\r\n # self.ZZB[ilay][:,iin][1::][imask] = np.nan\r\n # if self.ActiveCells[0,iin,ilay] == 0:\r\n # self.ZZT[ilay][:,iin][0] = np.nan\r\n # self.ZZB[ilay][:,iin][0] = np.nan\r\n if iin == self.nn - 1:\r\n self.ZZT[ilay][:, iin + 1] = fars[1]\r\n self.ZZB[ilay][:, iin + 1] = bfars[1]\r\n # NaN mask\r\n # self.ZZT[ilay][:,iin+1][1::][imask] = np.nan\r\n # self.ZZB[ilay][:,iin+1][1::][imask] = np.nan\r\n # if self.ActiveCells[0,iin,ilay] == 0:\r\n # self.ZZT[ilay][:,iin+1][0] = np.nan\r\n # self.ZZB[ilay][:,iin+1][0] = np.nan\r\n\r\n print(\"Layers ||\", np.linalg.norm(layers), \"||\")\r\n # exit()\r\n\r\n # visualize\r\n if plot:\r\n fig = plt.figure()\r\n ax = fig.add_subplot(111, projection='3d')\r\n # ax.plot_wireframe( self.X0, self.Y0, self.Z0, rstride=1, cstride=1)\r\n\r\n ax.plot_wireframe(self.X0, self.Y0, self.ZZT[0], rstride=1, cstride=1, color=\"blue\")\r\n # ax.plot_wireframe( self.X0, self.Y0, self.ZZT[1], rstride=1, cstride=1, color=\"blue\")\r\n # ax.plot_wireframe( self.X0, self.Y0, self.ZZT[2], rstride=1, cstride=1, color=\"blue\")\r\n # ax.plot_wireframe( self.X0, self.Y0, self.ZZT[3], rstride=1, cstride=1, color=\"blue\")\r\n\r\n # ax.plot_wireframe( self.X0, self.Y0, self.ZZB[3], rstride=1, cstride=1, color=\"green\")\r\n\r\n plt.gca().set_xlim(np.min(self.X0), np.max(self.X0))\r\n plt.gca().set_ylim(np.max(self.Y0), np.min(self.Y0))\r\n # plt.gca().set_zlim( np.max(self.ZZB[3]), np.min(self.ZZT[0]) )\r\n plt.gca().set_zlim(5000, 4000)\r\n plt.savefig(\"mesh.png\")\r\n plt.show()",
"def correlationZernike():\n Tfile='/home/jghao/research/decamFocus/psf_withseeing/finerGrid_coeff_matrix/zernike_coeff_finerGrid_training.cp'\n b=p.load(open(Tfile))\n x = b[:,0]\n y = b[:,1]\n z = b[:,2]\n theta = b[:,3]\n phi = b[:,4]\n fwhm = b[:,5]\n e1 = b[:,6]\n e2 = b[:,7]\n thetax = theta*np.cos(np.deg2rad(phi))\n thetay = theta*np.sin(np.deg2rad(phi))\n bb = b[:,7:68] #choose only those corresponding to M202\n #idx = np.concatenate((np.arange(9,28),np.arange(29,48),np.arange(49,68)))\n #bb = b[:,idx]\n evalue,evector,pca = getPCA(bb)\n coeff = np.corrcoef(bb.T)\n ok = coeff >= 0.65\n pl.matshow(coeff*ok)\n ind = np.arange(0,60)\n pl.xticks(ind,('Piston','Tip','Tilt','Astignism','Defocus','Astignism','Trefoil','Coma','Coma','Trefoil','Ashtray','Astigm.5th','Spherical','Astigm.5th','Ashtray','16','17','18','19','20','Piston','Tip','Tilt','Astignism','Defocus','Astignism','Trefoil','Coma','Coma','Trefoil','Ashtray','Astigm.5th','Spherical','Astigm.5th','Ashtray','16','17','18','19','20','Piston','Tip','Tilt','Astignism','Defocus','Astignism','Trefoil','Coma','Coma','Trefoil','Ashtray','Astigm.5th','Spherical','Astigm.5th','Ashtray','16','17','18','19','20'),rotation=90,color='black')\n pl.yticks(ind,('Piston','Tip','Tilt','Astignism','Defocus','Astignism','Trefoil','Coma','Coma','Trefoil','Ashtray','Astigm.5th','Spherical','Astigm.5th','Ashtray','16','17','18','19','20','Piston','Tip','Tilt','Astignism','Defocus','Astignism','Trefoil','Coma','Coma','Trefoil','Ashtray','Astigm.5th','Spherical','Astigm.5th','Ashtray','16','17','18','19','20','Piston','Tip','Tilt','Astignism','Defocus','Astignism','Trefoil','Coma','Coma','Trefoil','Ashtray','Astigm.5th','Spherical','Astigm.5th','Ashtray','16','17','18','19','20'))\n pl.grid(color='yellow')",
"def eulerphi(n):\n\treturn euler_phi(n)",
"def formula_n(self, n: int, x: np.ndarray) -> np.ndarray:\n\n # express x as z = x/(x-1)\n z = x / (x - 1)\n\n # special case @n=0\n if n == 0:\n kn = 1 - self._vlerchphi(1 / z, n + 1)\n else:\n kn = 1 / n - self._vzlerchphi(1 / z, n + 1)\n\n # return\n return kn",
"def Rpz(angle=0, units='deg'):\n\n if(units=='deg'):\n angle = angle*pi/180\n\n C = np.cos(angle)\n S = np.sin(angle)\n\n M = np.identity(3)\n\n M[0,0] = +C\n M[0,1] = -S\n M[1,0] = +S\n M[1,1] = +C\n\n return M"
]
| [
"0.72611326",
"0.67792964",
"0.63562363",
"0.6140943",
"0.60044694",
"0.5997871",
"0.5987661",
"0.58109844",
"0.5753353",
"0.56706387",
"0.5536199",
"0.5432994",
"0.5377874",
"0.5369935",
"0.5335055",
"0.52910936",
"0.5261898",
"0.52562475",
"0.5235264",
"0.5219978",
"0.52159154",
"0.5198074",
"0.5109904",
"0.5106375",
"0.5092731",
"0.5092123",
"0.50751984",
"0.5061484",
"0.5055018",
"0.50404686"
]
| 0.72030765 | 1 |
Create an unit disk and convert to rho, phi and and masking the grid. | def unit_disk(imgSize):
# src = np.nan_to_num(imgsrc)
if not (imgSize > 0):
print('Nside must be > 0')
raise AssertionError()
nx = imgSize #, ny = src.shape
grid = (np.indices((nx, nx), dtype=np.float) - nx/2) / (nx*1./2) # create unit grid [-1,1]
grid_rho, grid_phi = cart2pol(x=grid[0], y=grid[-1])
# grid_rho = (grid**2.0).sum(0)**0.5 # rho = sqrt(x^2+y^2)
# grid_phi = np.arctan2(grid[0], grid[-1]) # phi = itan(x/y)
grid_mask = grid_rho <= 1
return grid_rho, grid_phi, grid_mask | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_tt72_disk(m=10.e11|units.MSun,\n r_min=25.|units.kpc,\n n_rings=[12,15,18,21,24,27,30,33,36,39,42,45],\n r_rings_rel=[0.2,0.25,0.3,0.35,0.4,0.45,0.5,0.55,0.6,0.65,0.7,0.75],\n disk_id='a',\n eps=0.|units.m):\n disk = Particles()\n \n for i,ri in enumerate(r_rings_rel):\n \n disk_rad_i = Particles(n_rings[i])\n \n a = ri*r_min\n phi_i = numpy.linspace(0., pipi, num=n_rings[i], endpoint=False)\n \n disk_rad_i.x = a * numpy.cos(phi_i)\n disk_rad_i.y = a * numpy.sin(phi_i)\n disk_rad_i.z = 0. * a\n \n x_r = disk_rad_i.x/a\n y_r = disk_rad_i.y/a\n \n #vc = (constants.G*m/a)**0.5\n vc = ( constants.G*m*a**2/(a**2 + eps**2)**1.5 )**0.5\n disk_rad_i.vx = -vc * y_r\n disk_rad_i.vy = vc * x_r\n disk_rad_i.vz = 0.0 * vc\n \n disk.add_particles(disk_rad_i)\n \n # test particles\n disk.mass = 0.|units.MSun\n \n # identification of the disk\n disk.id = disk_id\n \n return disk",
"def _disk(radius):\n\n coords = np.arange(-round(radius,0), round(radius,0)+1)\n X, Y = np.meshgrid(coords, coords)\n disk_out = 1*np.array((X**2 + Y**2) < (radius+0.5)**2)\n # round improves behavior with irrational radii\n return(disk_out)",
"def run(self):\n config = self.config\n logger = self.logger\n\n timeStart = time.time()\n\n section = config['horizontal_grid']\n nx = section.getint('nx')\n ny = section.getint('ny')\n dc = section.getfloat('dc')\n\n dsMesh = make_planar_hex_mesh(nx=nx, ny=ny, dc=dc, nonperiodic_x=True,\n nonperiodic_y=False)\n write_netcdf(dsMesh, 'base_mesh.nc')\n\n dsMesh = cull(dsMesh, logger=logger)\n dsMesh = convert(dsMesh, graphInfoFileName='culled_graph.info',\n logger=logger)\n write_netcdf(dsMesh, 'culled_mesh.nc')\n\n section = config['vertical_grid']\n maxDepth = section.getfloat('bottom_depth')\n nVertLevels = section.getint('vert_levels')\n\n section = config['solitary_wave']\n config_eos_linear_alpha = section.getfloat('eos_linear_alpha')\n config_eos_linear_Tref = section.getfloat('eos_linear_Tref')\n config_eos_linear_Sref = section.getfloat('eos_linear_Sref')\n config_eos_linear_densityref = section.getfloat(\n 'eos_linear_densityref')\n h1 = section.getfloat('h1')\n deltaRho = section.getfloat('deltaRho')\n interfaceThick = section.getfloat('interfaceThick')\n amplitude = section.getfloat('amplitude')\n wavelenght = section.getfloat('wavelenght')\n\n # comment('obtain dimensions and mesh variables')\n # vertical_coordinate = 'uniform'\n\n ds = dsMesh.copy()\n nCells = ds.nCells.size\n nEdges = ds.nEdges.size\n nVertices = ds.nVertices.size\n\n xCell = ds.xCell\n angleEdge = ds.angleEdge\n\n # initialize velocity field\n u = np.zeros([1, nEdges, nVertLevels])\n\n # comment('create and initialize variables')\n time1 = time.time()\n\n surfaceStress = np.nan * np.ones(nCells)\n atmosphericPressure = np.nan * np.ones(nCells)\n boundaryLayerDepth = np.nan * np.ones(nCells)\n\n ds['bottomDepth'] = maxDepth * xarray.ones_like(xCell)\n ds['ssh'] = xarray.zeros_like(xCell)\n\n init_vertical_coord(config, ds)\n\n # initial salinity, density, temperature\n ds['salinity'] = (config_eos_linear_Sref *\n xarray.ones_like(ds.zMid)).where(ds.cellMask)\n ds['density'] = \\\n (config_eos_linear_densityref -\n (0.5*deltaRho)*(np.tanh(\n (2/interfaceThick)*np.arctanh(0.99) *\n (ds.zMid + amplitude*np.exp(\n -(ds.xCell/wavelenght)*(ds.xCell/wavelenght)) + h1))))\n # T = Tref - (rho - rhoRef)/alpha\n ds['temperature'] = \\\n (config_eos_linear_Tref\n - (ds.density - config_eos_linear_densityref) /\n config_eos_linear_alpha)\n\n # initial velocity on edges\n ds['normalVelocity'] = (('Time', 'nEdges', 'nVertLevels',),\n np.zeros([1, nEdges, nVertLevels]))\n normalVelocity = ds['normalVelocity']\n for iEdge in range(0, nEdges):\n normalVelocity[0, iEdge, :] = u[0, iEdge, :] * \\\n math.cos(angleEdge[iEdge])\n\n # Coriolis parameter\n ds['fCell'] = (('nCells', 'nVertLevels',),\n np.zeros([nCells, nVertLevels]))\n ds['fEdge'] = (('nEdges', 'nVertLevels',),\n np.zeros([nEdges, nVertLevels]))\n ds['fVertex'] = (('nVertices', 'nVertLevels',),\n np.zeros([nVertices, nVertLevels]))\n\n # surface fields\n surfaceStress[:] = 0.0\n atmosphericPressure[:] = 0.0\n boundaryLayerDepth[:] = 0.0\n print(f' time: {time.time() - time1}')\n\n # comment('finalize and write file')\n time1 = time.time()\n\n # If you prefer not to have NaN as the fill value, you should consider\n # using mpas_tools.io.write_netcdf() instead\n write_netcdf(ds, 'initial_state.nc')\n print(f' time: {time.time() - time1}')\n print(f'Total time: {time.time() - timeStart}')",
"def rand_disk(d0):\n phi = np.random.rand(d0) * (2 * np.pi)\n rad = pow(np.random.rand(d0), 1.0 / 2.0)\n return (np.array([np.cos(phi), np.sin(phi)]) * rad).T",
"def disk_r(N=10,h=1.0,z0=1.0,p0=1.0,Mtot=1.0):\n r = np.linspace(0,100*h,N)\n z = np.array([random.uniform(0,z0) for _ in range(N)])\n #z = np.random.uniform(0,100*z0,N)\n #q_r = np.random.uniform(0,1,N)\n q_r = np.array([random.uniform(0,1) for _ in range(N)]) \n\n LHS = np.exp(-r/h)*(1+ r/h)\n RHS = 1 - ( (2*np.pi*q_r*p0*h**2)/(Mtot*np.tanh(z/z0)) )\n \n w = np.interp(r,LHS,RHS)\n u = np.array([random.uniform(0,1) for _ in range(N)])\n #u = np.random.uniform(0,1,N)\n theta = 2*np.pi*u\n\n x = w*np.cos(theta)\n y = w*np.sin(theta)\n\n p = np.stack((x,y,z),axis=-1)\n\n return p",
"def get_planetesimals_disk(n_disk, r_in=20.0|units.AU, r_out=50.0|units.AU, m_star=1.0|units.MSun, \r\n alpha=None, m_disk=1.0e-15|units.MSun, seed = 42, disk_num = 1):\r\n numpy.random.seed(seed) # Mess with random seed\r\n\r\n for i in xrange(int(disk_num) + 4):\r\n planetesimals = Particles(n_disk)\r\n print \"Seed:\", seed\r\n print planetesimals.key[:10]\r\n planetesimals.mass = 0.0|units.MJupiter\r\n planetesimals.radius = 100.0|units.km\r\n planetesimals.collection_attributes.timestamp = 0.0 | units.yr\r\n \r\n if alpha is not None:\r\n converter = nbody_system.nbody_to_si(m_disk, r_in)\r\n power_disk = ProtoPlanetaryDisk(n_disk, convert_nbody=converter, densitypower=alpha, \r\n Rmin=1.0, Rmax=1.0*r_out/r_in, q_out=0.0, discfraction=1.0).result\r\n x = power_disk.x\r\n y = power_disk.y\r\n z = power_disk.z # <--- Mystery error?\r\n\r\n print \"X\"\r\n print x.value_in(units.AU)\r\n print \"Y\"\r\n print y.value_in(units.AU)\r\n print \"Z\"\r\n print z.value_in(units.AU)\r\n #z = 0\r\n\r\n print \"MASS\"\r\n print power_disk.mass\r\n\r\n #power_disk.mass = 0.0 * power_disk.mass ###### THIS WORKS!!!! (if you want to switch to this later)\r\n\r\n print power_disk.mass\r\n \r\n a = (x**2 + y**2)**0.5\r\n print \"SM-AXIS\"\r\n print a.value_in(units.AU)\r\n \r\n phi = numpy.arctan2(y.value_in(units.AU), x.value_in(units.AU))\r\n vc = (constants.G*m_star/a)**0.5\r\n vx = - vc * numpy.sin(phi)\r\n vy = vc * numpy.cos(phi)\r\n vz = 0.0 * vc\r\n # vz = - vc * numpy.sin(phi) # ???????????????????????????????????????????????????????????????? #\r\n\r\n print \"VX\"\r\n print vx.value_in(units.km / units.s)\r\n print \"VY\"\r\n print vy.value_in(units.km / units.s)\r\n print \"VZ\"\r\n print vz.value_in(units.km / units.s)\r\n\r\n print \"PLANAR VELOCITY VECTOR\"\r\n print ((vx**2 + vy**2)**(0.5)).value_in(units.km / units.s)\r\n\r\n #vx = power_disk.vx\r\n #vy = power_disk.vy\r\n #vz = power_disk.vz\r\n\r\n #print \"POWER DISK VX\"\r\n #print vx.value_in(units.km / units.s)\r\n #print \"POWER DISK VY\"\r\n #print vy.value_in(units.km / units.s)\r\n #print \"POWER DISK VZ\"\r\n #print vz.value_in(units.km / units.s)\r\n \r\n else:\r\n a = r_in + (r_out-r_in)*numpy.random.rand(n_disk)\r\n phi_rand = 2.0 * numpy.pi * numpy.random.rand(n_disk)\r\n \r\n x = a * numpy.cos(phi_rand)\r\n y = a * numpy.sin(phi_rand)\r\n z = 0.0 * a\r\n \r\n vc = (constants.G*m_star/a)**0.5\r\n vx = - vc * numpy.sin(phi_rand)\r\n vy = vc * numpy.cos(phi_rand)\r\n vz = 0.0 * vc\r\n \r\n planetesimals.x = x\r\n planetesimals.y = y\r\n planetesimals.z = z\r\n \r\n planetesimals.vx = vx\r\n planetesimals.vy = vy\r\n planetesimals.vz = vz\r\n \r\n return planetesimals",
"def diskgrid(n, radius=1, alpha=2):\n # Golden ratio.\n phi = 0.5 * (np.sqrt(5) + 1)\n # Calculate coordinates of each point to uniformly fill the unit disk.\n k = np.arange(1, n + 1)\n theta = 2 * np.pi * k / phi ** 2\n r = np.sqrt((k - 0.5) / (n - 0.5))\n # Transform r to increase the density towards the center.\n if alpha > 0:\n r = (np.exp(alpha * r) - 1) / (np.exp(alpha) - 1)\n r *= radius\n return r * np.cos(theta), r * np.sin(theta)",
"def create(self, directory):\n\n if not self.preallocated:\n if directory:\n self.filename = '%s/%s' % (directory, self.filename)\n logging.info('Creating disk image: %s' % self.filename)\n qemu_img_output = run_cmd(qemu_img_path(), 'create', '-f', 'raw', self.filename, '%dM' % self.size)\n if not os.path.exists(self.filename): \n logging.info(\"Problem while creating raw image: %s\" % qemu_img_output)\n raise Exception(\"Problem while creating raw image: %s\" % qemu_img_output)\n\n # From here, we assume that self.filename refers to whatever holds the disk image,\n # be it a file, a partition, logical volume, actual disk..\n\n logging.info('Adding partition table to disk image: %s' % self.filename)\n run_cmd('parted', '--script', self.filename, 'mklabel', 'msdos')\n\n # Partition the disk \n for part in self.partitions:\n part.create(self)\n\n logging.info('Creating loop devices corresponding to the created partitions')\n self.vm.add_clean_cb(lambda : self.unmap(ignore_fail=True))\n kpartx_output = run_cmd('kpartx', '-av', self.filename)\n parts = []\n for line in kpartx_output.split('\\n'):\n if line == \"\" or line.startswith(\"gpt:\") or line.startswith(\"dos:\"):\n continue\n if line.startswith(\"add\"):\n parts.append(line)\n continue\n logging.error('Skipping unknown line in kpartx output (%s)' % line)\n mapdevs = []\n for line in parts:\n mapdevs.append(line.split(' ')[2])\n for (part, mapdev) in zip(self.partitions, mapdevs):\n part.mapdev = '/dev/mapper/%s' % mapdev\n\n # At this point, all partitions are created and their mapping device has been\n # created and set as .mapdev\n\n # Adds a filesystem to the partition\n logging.info(\"Creating file systems\")\n for part in self.partitions:\n part.mkfs()",
"def run(self):\n initial_condition = self.initial_condition\n logger = self.logger\n section = self.config['hydro_radial']\n\n nx = section.getint('nx')\n ny = section.getint('ny')\n dc = section.getfloat('dc')\n\n dsMesh = make_planar_hex_mesh(nx=nx, ny=ny, dc=dc, nonperiodic_x=True,\n nonperiodic_y=True)\n\n write_netcdf(dsMesh, 'grid.nc')\n\n dsMesh = cull(dsMesh, logger=logger)\n dsMesh = convert(dsMesh, logger=logger)\n write_netcdf(dsMesh, 'mpas_grid.nc')\n\n levels = section.get('levels')\n args = ['create_landice_grid_from_generic_MPAS_grid.py',\n '-i', 'mpas_grid.nc',\n '-o', 'landice_grid.nc',\n '-l', levels,\n '--hydro',\n '--diri']\n\n check_call(args, logger)\n\n make_graph_file(mesh_filename='landice_grid.nc',\n graph_filename='graph.info')\n\n _setup_hydro_radial_initial_conditions(\n logger, filename='landice_grid.nc',\n initial_condition=initial_condition)",
"def init_disk(self):\n self.put(self.SIZE/2-1, self.SIZE/2-1,\n Disk(self.SIZE/2-1, self.SIZE/2-1, 255, self, self.DISKSIZE))\n self.put(self.SIZE/2-1, self.SIZE/2,\n Disk(self.SIZE/2-1, self.SIZE/2, 0, self, self.DISKSIZE))\n self.put(self.SIZE/2, self.SIZE/2-1,\n Disk(self.SIZE/2, self.SIZE/2-1, 0, self, self.DISKSIZE))\n self.put(self.SIZE/2, self.SIZE/2,\n Disk(self.SIZE/2, self.SIZE/2, 255, self, self.DISKSIZE))",
"def createDisk(self , name):\n return",
"def flat_disk(radius):\n n = 2*radius + 1\n return np.stack([\n np.zeros((n, n), bool), \n morphology.disk(radius).astype(bool), \n np.zeros((n, n), bool)\n ])",
"def deposit_rho_gpu(x, y, z, w,\r\n invdz, zmin, Nz,\r\n invdr, rmin, Nr,\r\n rho0, rho1,\r\n rho2, rho3,\r\n cell_idx, prefix_sum):\r\n # Get the 1D CUDA grid\r\n i = cuda.grid(1)\r\n # Deposit the field per cell in parallel (for threads < number of cells)\r\n if i < prefix_sum.shape[0]:\r\n # Calculate the cell index in 2D from the 1D threadIdx\r\n iz = int(i / Nr)\r\n ir = int(i - iz * Nr)\r\n # Calculate the inclusive offset for the current cell\r\n # It represents the number of particles contained in all other cells\r\n # with an index smaller than i + the total number of particles in the\r\n # current cell (inclusive).\r\n incl_offset = np.int32(prefix_sum[i])\r\n # Calculate the frequency per cell from the offset and the previous\r\n # offset (prefix_sum[i-1]).\r\n if i > 0:\r\n frequency_per_cell = np.int32(incl_offset - prefix_sum[i - 1])\r\n if i == 0:\r\n frequency_per_cell = np.int32(incl_offset)\r\n # Initialize the local field value for\r\n # all four possible deposition directions\r\n # Mode 0, 1 for r, t, z\r\n # 1 : lower in r, lower in z\r\n # 2 : lower in r, upper in z\r\n # 3 : upper in r, lower in z\r\n # 4 : upper in r, upper in z\r\n R1_m0 = 0. + 0.j\r\n R2_m0 = 0. + 0.j\r\n R3_m0 = 0. + 0.j\r\n R4_m0 = 0. + 0.j\r\n # ------------\r\n R1_m1 = 0. + 0.j\r\n R2_m1 = 0. + 0.j\r\n R3_m1 = 0. + 0.j\r\n R4_m1 = 0. + 0.j\r\n # Loop over the number of particles per cell\r\n for j in range(frequency_per_cell):\r\n # Get the particle index before the sorting\r\n # --------------------------------------------\r\n # (Since incl_offset is a cumulative sum of particle number,\r\n # and since python index starts at 0, one has to add -1)\r\n ptcl_idx = incl_offset - 1 - j\r\n\r\n # Preliminary arrays for the cylindrical conversion\r\n # --------------------------------------------\r\n # Position\r\n xj = x[ptcl_idx]\r\n yj = y[ptcl_idx]\r\n zj = z[ptcl_idx]\r\n # Weights\r\n wj = w[ptcl_idx]\r\n\r\n # Cylindrical conversion\r\n rj = math.sqrt(xj**2 + yj**2)\r\n # Avoid division by 0.\r\n if (rj != 0.):\r\n invr = 1. / rj\r\n cos = xj * invr # Cosine\r\n sin = yj * invr # Sine\r\n else:\r\n cos = 1.\r\n sin = 0.\r\n exptheta_m0 = 1.\r\n exptheta_m1 = cos + 1.j * sin\r\n\r\n # Get linear weights for the deposition\r\n # --------------------------------------------\r\n # Positions of the particles, in the cell unit\r\n r_cell = invdr * (rj - rmin) - 0.5\r\n z_cell = invdz * (zj - zmin) - 0.5\r\n # Original index of the uppper and lower cell\r\n ir_lower = int(math.floor(r_cell))\r\n ir_upper = ir_lower + 1\r\n iz_lower = int(math.floor(z_cell))\r\n iz_upper = iz_lower + 1\r\n # Linear weight\r\n Sr_lower = ir_upper - r_cell\r\n Sr_upper = r_cell - ir_lower\r\n Sz_lower = iz_upper - z_cell\r\n Sz_upper = z_cell - iz_lower\r\n # Set guard weights to zero\r\n Sr_guard = 0.\r\n\r\n # Treat the boundary conditions\r\n # --------------------------------------------\r\n # guard cells in lower r\r\n if ir_lower < 0:\r\n Sr_guard = Sr_lower\r\n Sr_lower = 0.\r\n ir_lower = 0\r\n # absorbing in upper r\r\n if ir_lower > Nr - 1:\r\n ir_lower = Nr - 1\r\n if ir_upper > Nr - 1:\r\n ir_upper = Nr - 1\r\n # periodic boundaries in z\r\n # lower z boundaries\r\n if iz_lower < 0:\r\n iz_lower += Nz\r\n if iz_upper < 0:\r\n iz_upper += Nz\r\n # upper z boundaries\r\n if iz_lower > Nz - 1:\r\n iz_lower -= Nz\r\n if iz_upper > Nz - 1:\r\n iz_upper -= Nz\r\n\r\n # Calculate rho\r\n # --------------------------------------------\r\n # Mode 0\r\n R_m0 = wj * exptheta_m0\r\n # Mode 1\r\n R_m1 = wj * exptheta_m1\r\n\r\n # Caculate the weighted currents for each\r\n # of the four possible direction\r\n # --------------------------------------------\r\n if ir_lower == ir_upper:\r\n # In the case that ir_lower and ir_upper are equal,\r\n # the current is added only to the array corresponding\r\n # to ir_lower.\r\n # (This is the case for the boundaries in r)\r\n R1_m0 += Sz_lower * Sr_lower * R_m0\r\n R1_m0 += Sz_lower * Sr_upper * R_m0\r\n R3_m0 += Sz_upper * Sr_lower * R_m0\r\n R3_m0 += Sz_upper * Sr_upper * R_m0\r\n # -----------------------------\r\n R1_m1 += Sz_lower * Sr_lower * R_m1\r\n R1_m1 += Sz_lower * Sr_upper * R_m1\r\n R3_m1 += Sz_upper * Sr_lower * R_m1\r\n R3_m1 += Sz_upper * Sr_upper * R_m1\r\n # -----------------------------\r\n if ir_lower != ir_upper:\r\n # In the case that ir_lower and ir_upper are different,\r\n # add the current to the four arrays according to\r\n # the direction.\r\n R1_m0 += Sz_lower * Sr_lower * R_m0\r\n R2_m0 += Sz_lower * Sr_upper * R_m0\r\n R3_m0 += Sz_upper * Sr_lower * R_m0\r\n R4_m0 += Sz_upper * Sr_upper * R_m0\r\n # -----------------------------\r\n R1_m1 += Sz_lower * Sr_lower * R_m1\r\n R2_m1 += Sz_lower * Sr_upper * R_m1\r\n R3_m1 += Sz_upper * Sr_lower * R_m1\r\n R4_m1 += Sz_upper * Sr_upper * R_m1\r\n # -----------------------------\r\n if ir_lower == ir_upper == 0:\r\n # Treat the guard cells.\r\n # Add the current to the guard cells\r\n # for particles that had an original\r\n # cell index < 0.\r\n R1_m0 += -1. * Sz_lower * Sr_guard * R_m0\r\n R3_m0 += -1. * Sz_upper * Sr_guard * R_m0\r\n # ---------------------------------\r\n R1_m1 += -1. * Sz_lower * Sr_guard * R_m1\r\n R3_m1 += -1. * Sz_upper * Sr_guard * R_m1\r\n # Write the calculated field values to\r\n # the field arrays defined on the interpolation grid\r\n rho0[iz, ir, 0] = R1_m0\r\n rho0[iz, ir, 1] = R1_m1\r\n rho1[iz, ir, 0] = R2_m0\r\n rho1[iz, ir, 1] = R2_m1\r\n rho2[iz, ir, 0] = R3_m0\r\n rho2[iz, ir, 1] = R3_m1\r\n rho3[iz, ir, 0] = R4_m0\r\n rho3[iz, ir, 1] = R4_m1",
"def _prepareDiskObject(**kwargs):\n storage_domain_name = kwargs.pop('storagedomain', None)\n\n # Tuple (lun_address, lun_target, lun_id, lun_port)\n lun = (kwargs.pop('lun_address', None), kwargs.pop('lun_target', None),\n kwargs.pop('lun_id', None), kwargs.pop('lun_port', 3260))\n # Tuple (username, password)\n lun_creds = (kwargs.pop('lun_username', None),\n kwargs.pop('lun_password', None))\n type_ = kwargs.pop('type_', None)\n\n storage_connection = kwargs.pop('storage_connection', None)\n\n if lun != (None, None, None, 3260) and storage_connection:\n logger.error(\n \"You cannot set storage connection id and LUN params in one call!\")\n return None\n kwargs.pop('active', None)\n\n disk = kwargs.pop('update', None)\n if disk is None:\n disk = data_st.Disk(**kwargs)\n\n if storage_connection is not None:\n storage = data_st.HostStorage()\n storage.id = storage_connection\n disk.set_lun_storage(storage)\n\n if storage_domain_name is not None:\n storage_domain = STORAGE_DOMAIN_API.find(storage_domain_name,\n NAME_ATTR)\n storage_domains = data_st.StorageDomains()\n storage_domains.add_storage_domain(storage_domain)\n disk.storage_domains = storage_domains\n\n # quota\n quota_id = kwargs.pop('quota', None)\n if quota_id == '':\n disk.set_quota(data_st.Quota())\n elif quota_id:\n disk.set_quota(data_st.Quota(id=quota_id))\n\n if lun != (None, None, None, 3260):\n direct_lun = data_st.LogicalUnit(address=lun[0], target=lun[1],\n id=lun[2], port=lun[3])\n if lun_creds != (None, None):\n direct_lun.set_username(lun_creds[0])\n direct_lun.set_password(lun_creds[1])\n\n logical_units = data_st.LogicalUnits(logical_unit=[direct_lun])\n disk.set_lun_storage(\n data_st.HostStorage(logical_units=logical_units, type_=type_)\n )\n\n # id\n disk_id = kwargs.pop('id', None)\n if disk_id:\n disk.set_id(disk_id)\n\n # read_only\n read_only = kwargs.pop('read_only', None)\n if read_only is not None:\n disk.set_read_only(read_only)\n\n # snapshot\n snapshot = kwargs.pop('snapshot', None)\n if snapshot:\n disk.set_snapshot(snapshot)\n\n # description\n description = kwargs.pop('description', None)\n if description is not None:\n disk.set_description(description)\n\n # qcow_version\n qcow_version = kwargs.pop('qcow_version', None)\n if qcow_version:\n disk.set_qcow_version(qcow_version)\n\n return disk",
"def write_ROMS_grid(grd, visc_factor, diff_factor, filename='roms_grd.nc'):\n\n Mm, Lm = grd.hgrid.x_rho.shape\n\n \n # Write ROMS grid to file\n nc = netCDF.Dataset(filename, 'w', format='NETCDF4')\n nc.Description = 'ROMS grid'\n nc.Author = 'Trond Kristiansen'\n nc.Created = datetime.now().isoformat()\n nc.type = 'ROMS grid file'\n\n nc.createDimension('xi_rho', Lm)\n nc.createDimension('xi_u', Lm-1)\n nc.createDimension('xi_v', Lm)\n nc.createDimension('xi_psi', Lm-1)\n \n nc.createDimension('eta_rho', Mm)\n nc.createDimension('eta_u', Mm)\n nc.createDimension('eta_v', Mm-1)\n nc.createDimension('eta_psi', Mm-1)\n\n nc.createDimension('xi_vert', Lm+1)\n nc.createDimension('eta_vert', Mm+1)\n\n nc.createDimension('bath', None)\n\n if hasattr(grd.vgrid, 's_rho') is True and grd.vgrid.s_rho is not None:\n N, = grd.vgrid.s_rho.shape\n nc.createDimension('s_rho', N)\n nc.createDimension('s_w', N+1)\n\n def write_nc_var(var, name, dimensions, long_name=None, units=None):\n nc.createVariable(name, 'f8', dimensions)\n if long_name is not None:\n nc.variables[name].long_name = long_name\n if units is not None:\n nc.variables[name].units = units\n nc.variables[name][:] = var\n print ' ... wrote ', name\n\n if hasattr(grd.vgrid, 's_rho') is True and grd.vgrid.s_rho is not None:\n write_nc_var(grd.vgrid.theta_s, 'theta_s', (), 'S-coordinate surface control parameter')\n write_nc_var(grd.vgrid.theta_b, 'theta_b', (), 'S-coordinate bottom control parameter')\n write_nc_var(grd.vgrid.Tcline, 'Tcline', (), 'S-coordinate surface/bottom layer width', 'meter')\n write_nc_var(grd.vgrid.hc, 'hc', (), 'S-coordinate parameter, critical depth', 'meter')\n write_nc_var(grd.vgrid.s_rho, 's_rho', ('s_rho'), 'S-coordinate at RHO-points')\n write_nc_var(grd.vgrid.s_w, 's_w', ('s_w'), 'S-coordinate at W-points')\n write_nc_var(grd.vgrid.Cs_r, 'Cs_r', ('s_rho'), 'S-coordinate stretching curves at RHO-points')\n write_nc_var(grd.vgrid.Cs_w, 'Cs_w', ('s_w'), 'S-coordinate stretching curves at W-points')\n\n write_nc_var(grd.vgrid.h, 'h', ('eta_rho', 'xi_rho'), 'bathymetry at RHO-points', 'meter')\n #ensure that we have a bath dependancy for hraw\n if len(grd.vgrid.hraw.shape) == 2:\n hraw = np.zeros((1, grd.vgrid.hraw.shape[0], grd.vgrid.hraw.shape[1]))\n hraw[0,:] = grd.vgrid.hraw\n else:\n hraw = grd.vgrid.hraw\n write_nc_var(hraw, 'hraw', ('bath', 'eta_rho', 'xi_rho'), 'raw bathymetry at RHO-points', 'meter')\n write_nc_var(grd.hgrid.f, 'f', ('eta_rho', 'xi_rho'), 'Coriolis parameter at RHO-points', 'second-1')\n write_nc_var(1./grd.hgrid.dx, 'pm', ('eta_rho', 'xi_rho'), 'curvilinear coordinate metric in XI', 'meter-1')\n write_nc_var(1./grd.hgrid.dy, 'pn', ('eta_rho', 'xi_rho'), 'curvilinear coordinate metric in ETA', 'meter-1')\n write_nc_var(grd.hgrid.dmde, 'dmde', ('eta_rho', 'xi_rho'), 'XI derivative of inverse metric factor pn', 'meter')\n write_nc_var(grd.hgrid.dndx, 'dndx', ('eta_rho', 'xi_rho'), 'ETA derivative of inverse metric factor pm', 'meter')\n write_nc_var(grd.hgrid.xl, 'xl', (), 'domain length in the XI-direction', 'meter')\n write_nc_var(grd.hgrid.el, 'el', (), 'domain length in the ETA-direction', 'meter')\n\n write_nc_var(grd.hgrid.x_rho, 'x_rho', ('eta_rho', 'xi_rho'), 'x location of RHO-points', 'meter')\n write_nc_var(grd.hgrid.y_rho, 'y_rho', ('eta_rho', 'xi_rho'), 'y location of RHO-points', 'meter')\n write_nc_var(grd.hgrid.x_u, 'x_u', ('eta_u', 'xi_u'), 'x location of U-points', 'meter')\n write_nc_var(grd.hgrid.y_u, 'y_u', ('eta_u', 'xi_u'), 'y location of U-points', 'meter')\n write_nc_var(grd.hgrid.x_v, 'x_v', ('eta_v', 'xi_v'), 'x location of V-points', 'meter')\n write_nc_var(grd.hgrid.y_v, 'y_v', ('eta_v', 'xi_v'), 'y location of V-points', 'meter')\n write_nc_var(grd.hgrid.x_psi, 'x_psi', ('eta_psi', 'xi_psi'), 'x location of PSI-points', 'meter')\n write_nc_var(grd.hgrid.y_psi, 'y_psi', ('eta_psi', 'xi_psi'), 'y location of PSI-points', 'meter')\n write_nc_var(grd.hgrid.x_vert, 'x_vert', ('eta_vert', 'xi_vert'), 'x location of cell verticies', 'meter')\n write_nc_var(grd.hgrid.y_vert, 'y_vert', ('eta_vert', 'xi_vert'), 'y location of cell verticies', 'meter')\n\n if hasattr(grd.hgrid, 'lon_rho'):\n write_nc_var(grd.hgrid.lon_rho, 'lon_rho', ('eta_rho', 'xi_rho'), 'longitude of RHO-points', 'degree_east')\n write_nc_var(grd.hgrid.lat_rho, 'lat_rho', ('eta_rho', 'xi_rho'), 'latitude of RHO-points', 'degree_north')\n write_nc_var(grd.hgrid.lon_u, 'lon_u', ('eta_u', 'xi_u'), 'longitude of U-points', 'degree_east')\n write_nc_var(grd.hgrid.lat_u, 'lat_u', ('eta_u', 'xi_u'), 'latitude of U-points', 'degree_north')\n write_nc_var(grd.hgrid.lon_v, 'lon_v', ('eta_v', 'xi_v'), 'longitude of V-points', 'degree_east')\n write_nc_var(grd.hgrid.lat_v, 'lat_v', ('eta_v', 'xi_v'), 'latitude of V-points', 'degree_north')\n write_nc_var(grd.hgrid.lon_psi, 'lon_psi', ('eta_psi', 'xi_psi'), 'longitude of PSI-points', 'degree_east')\n write_nc_var(grd.hgrid.lat_psi, 'lat_psi', ('eta_psi', 'xi_psi'), 'latitude of PSI-points', 'degree_north')\n write_nc_var(grd.hgrid.lon_vert, 'lon_vert', ('eta_vert', 'xi_vert'), 'longitude of cell verticies', 'degree_east')\n write_nc_var(grd.hgrid.lat_vert, 'lat_vert', ('eta_vert', 'xi_vert'), 'latitude of cell verticies', 'degree_north')\n\n nc.createVariable('spherical', 'c')\n nc.variables['spherical'].long_name = 'Grid type logical switch'\n nc.variables['spherical'][:] = grd.hgrid.spherical\n print ' ... wrote ', 'spherical'\n\n write_nc_var(grd.hgrid.angle_rho, 'angle', ('eta_rho', 'xi_rho'), 'angle between XI-axis and EAST', 'radians')\n\n write_nc_var(grd.hgrid.mask_rho, 'mask_rho', ('eta_rho', 'xi_rho'), 'mask on RHO-points')\n write_nc_var(grd.hgrid.mask_u, 'mask_u', ('eta_u', 'xi_u'), 'mask on U-points')\n write_nc_var(grd.hgrid.mask_v, 'mask_v', ('eta_v', 'xi_v'), 'mask on V-points')\n write_nc_var(grd.hgrid.mask_psi, 'mask_psi', ('eta_psi', 'xi_psi'), 'mask on psi-points')\n\n if visc_factor != None:\n write_nc_var(visc_factor, 'visc_factor', ('eta_rho', 'xi_rho'), 'horizontal viscosity sponge factor')\n if diff_factor != None:\n write_nc_var(diff_factor, 'diff_factor', ('eta_rho', 'xi_rho'), 'horizontal diffusivity sponge factor')\n \n nc.close()",
"def setup_disks(self):\n disk_cols = self.disk_cols\n img = self.img\n img_shape = img.shape[0:2]\n img_shape = img_shape[::-1] #reverse so img_shape[0]= x dim, [1] = y dim\n cur_arm_pos = [self.x, self.y]\n img_centre = tuple(ij/2 for ij in img_shape)\n offset = self.offset\n des_img_pos = list(map(operator.add, img_centre, offset))\n img_thres = self.img_thres\n peg_pos = self.peg_pos\n move_z = self.move_to_height\n init_x = self.init_x\n init_y = self.init_y\n init_z = self.init_z\n \n print('Setting up disks:')\n for disk_col in disk_cols:\n #find disk in image\n disk_img_pos, img_bin = self.find_colours(img, disk_col)\n print(' Disk found at: '+str(disk_img_pos))\n \n #move arm to disk\n print(' Moving arm...')\n cur_arm_pos = self.move_to_object(disk_img_pos, img_shape, \n disk_col, des_img_pos, img_thres)\n \n #pickup disk\n print(' Picking up disk...')\n self.pick(0)\n \n #move to peg 0\n print(' Moving to peg 0...')\n self.move_to(peg_pos[0][0], peg_pos[0][1], move_z)\n \n #drop disk\n print(' Dropping disk...')\n self.drop()\n \n #return to neutral position\n print(' Moving to initial position...')\n self.move_to(init_x, init_y, init_z)\n print('Finished setting up disks.')",
"def build_spherical_mesh(cellWidth, lon, lat, earth_radius,\n out_filename='base_mesh.nc', plot_cellWidth=True,\n dir='./', logger=None):\n\n with LoggingContext(__name__, logger=logger) as logger:\n\n da = xarray.DataArray(cellWidth,\n dims=['lat', 'lon'],\n coords={'lat': lat, 'lon': lon},\n name='cellWidth')\n cw_filename = 'cellWidthVsLatLon.nc'\n da.to_netcdf(cw_filename)\n if plot_cellWidth:\n register_sci_viz_colormaps()\n fig = plt.figure(figsize=[16.0, 8.0])\n ax = plt.axes(projection=ccrs.PlateCarree())\n ax.set_global()\n im = ax.imshow(cellWidth, origin='lower',\n transform=ccrs.PlateCarree(),\n extent=[-180, 180, -90, 90], cmap='3Wbgy5',\n zorder=0)\n ax.add_feature(cartopy.feature.LAND, edgecolor='black', zorder=1)\n gl = ax.gridlines(\n crs=ccrs.PlateCarree(),\n draw_labels=True,\n linewidth=1,\n color='gray',\n alpha=0.5,\n linestyle='-', zorder=2)\n gl.top_labels = False\n gl.right_labels = False\n plt.title(\n 'Grid cell size, km, min: {:.1f} max: {:.1f}'.format(\n cellWidth.min(),cellWidth.max()))\n plt.colorbar(im, shrink=.60)\n fig.canvas.draw()\n plt.tight_layout()\n plt.savefig('cellWidthGlobal.png', bbox_inches='tight')\n plt.close()\n\n logger.info('Step 1. Generate mesh with JIGSAW')\n jigsaw_driver(cellWidth, lon, lat, on_sphere=True,\n earth_radius=earth_radius, logger=logger)\n\n logger.info('Step 2. Convert triangles from jigsaw format to netcdf')\n jigsaw_to_netcdf(msh_filename='mesh-MESH.msh',\n output_name='mesh_triangles.nc', on_sphere=True,\n sphere_radius=earth_radius)\n\n logger.info('Step 3. Convert from triangles to MPAS mesh')\n args = ['MpasMeshConverter.x',\n 'mesh_triangles.nc',\n out_filename]\n check_call(args=args, logger=logger)",
"def Disc(center=(0.0, 0.0, 0.0), inner=0.25, outer=0.5, normal=(0.0, 0.0, 1.0), r_res=1, c_res=6):\n src = _vtk.vtkDiskSource()\n src.SetInnerRadius(inner)\n src.SetOuterRadius(outer)\n src.SetRadialResolution(r_res)\n src.SetCircumferentialResolution(c_res)\n src.Update()\n normal = np.array(normal)\n center = np.array(center)\n surf = wrap(src.GetOutput())\n surf.rotate_y(90, inplace=True)\n translate(surf, center, normal)\n return surf",
"def density_grid(self):\n # Create a grid that contains the area in m2 per grid cell.\n if self.crs.is_geographic:\n area = self.area_grid()\n\n elif self.crs.is_projected:\n ucf = rasterio.crs.CRS.from_user_input(self.crs).linear_units_factor[1]\n area = abs(self.res[0] * self.res[0]) * ucf**2\n\n # Create a grid that contains the density in unit/m2 per grid cell.\n unit = self._obj.attrs.get(\"unit\", \"\")\n ds_out = self._obj / area\n ds_out.attrs.update(unit=f\"{unit}.m-2\")\n return ds_out",
"def gen_grids(self):\n self.dx = self.grid_width / self.grid_resol\n self.dk = 2 * np.pi/self.grid_width\n self.grid_x_shifted = -self.grid_width/2 + self.dx * np.arange(0, self.grid_resol)\n self.grid_x = self.grid_x_shifted + self.grid_center\n self.grid_k = - (np.pi * self.grid_resol)/self.grid_width + self.dk * np.arange(0, self.grid_resol)\n self.grid_k = np.roll(self.grid_k, int((self.grid_resol)/2))\n self.grid_kin = np.square(self.h)/ (2*self.m) * np.square(self.grid_k)",
"def __init__(self,\r\n ox=0.0, oy=0.0, oz=0.0,\r\n dx=1.0, dy=1.0, dz=1.0,\r\n nx=200, ny=200, nz=10,\r\n gtype='points', gname='image',\r\n periodicity=False):\r\n\r\n self.ox = ox\r\n self.oy = oy\r\n self.oz = oz\r\n self.dx = dx\r\n self.dy = dy\r\n self.dz = dz\r\n self.nx = nx\r\n self.ny = ny\r\n self.nz = nz\r\n self.gtype = gtype\r\n self.gname = gname\r\n self.periodicity = periodicity\r\n\r\n if self.gtype == 'points':\r\n self.points = self.nx*self.ny*self.nz\r\n elif self.gtype == 'cells':\r\n self.cells = (self.nx-1 if self.nx> 1 else 1)*(self.ny-1 if self.ny> 1 else 1)*(self.nz-1 if self.nz> 1 else 1)\r\n\r\n # Compute the size of the grid\r\n self._lx = None #self.dx * self.nx - self.ox\r\n self._ly = None #self.dy * self.ny - self.oy\r\n self._lz = None #self.dz * self.nz - self.oz\r",
"def in_disk():\n sample = np.random.random_sample(2)\n a, b = sample.sort()\n return np.array([b * np.cos(2 * np.pi * a / b),\n b * np.sin(2 * np.pi * a / b)])",
"def write_grid(self):\n \n self.fout = self.create_savename()\n ncout = Dataset(self.fout, 'w')\n print('Writing: %s' % self.fout)\n \n # Create dimensions\n lon = ncout.createDimension(self.xvar, self.nx)\n lat = ncout.createDimension(self.yvar, self.ny)\n depth = ncout.createDimension(self.zvar, self.nz)\n tdim = ncout.createDimension('time', None)\n bndsDim = ncout.createDimension('bnds', 2)\n\n # Create variables\n varx = ncout.createVariable(self.xvar, 'float64', (self.xvar,))\n vary = ncout.createVariable(self.yvar, 'float64', (self.yvar,))\n varz = ncout.createVariable(self.zvar, 'float64', (self.zvar,))\n\n varx.standard_name = 'longitude'\n varx.units = 'degrees'\n ncout.variables['LONGITUDE'].bounds = 'lon_bnds'\n lonBndsVar = ncout.createVariable('lon_bnds', 'float64', (self.xvar, 'bnds'))\n xboundaries = np.concatenate([self.xminbounds, np.reshape(self.xmaxbounds[-1],(1,1))[0]])\n lonBndsVar[:,:] = np.array([xboundaries[:-1], xboundaries[1:]]).T\n\n vary.standard_name = 'latitude'\n vary.units = 'degrees'\n ncout.variables['LATITUDE'].bounds = 'lat_bnds'\n latBndsVar = ncout.createVariable('lat_bnds', 'float64', (self.yvar, 'bnds'))\n yboundaries = np.concatenate([self.yminbounds, np.reshape(self.ymaxbounds[-1],(1,1))[0]])\n latBndsVar[:,:] = np.array([yboundaries[:-1], yboundaries[1:]]).T\n \n varz.standard_name = 'depth'\n varz.units = 'metres'\n ncout.variables['DEPH_CORRECTED'].bounds = 'depth_bnds'\n depthBndsVar = ncout.createVariable('depth_bnds', 'float64', (self.zvar, 'bnds'))\n zboundaries = np.concatenate([self.zminbounds, np.reshape(self.zmaxbounds[-1],(1,1))[0]])\n depthBndsVar[:,:] = np.array([zboundaries[:-1], zboundaries[1:]]).T\n\n vartmean = ncout.createVariable('tmean', 'float32', ('time',self.zvar,self.yvar,self.xvar))\n varmtmean = ncout.createVariable(self.datavar, 'float32', ('time',self.zvar,self.yvar,self.xvar))\n varsum = ncout.createVariable('sum', 'float32', ('time',self.zvar,self.yvar,self.xvar))\n varmsum = ncout.createVariable('meansum', 'float32', ('time',self.zvar,self.yvar,self.xvar))\n varcount = ncout.createVariable('count', 'float32', ('time',self.zvar,self.yvar,self.xvar))\n# varmax = ncout.createVariable('gmax', 'float32', ('time', self.zvar, self.yvar, self.xvar))\n# varmin = ncout.createVariable('gmin', 'float32', ('time', self.zvar, self.yvar, self.xvar))\n# varmed = ncout.createVariable('median', 'float32', ('time', self.zvar, self.yvar, self.xvar))\n varpcount = ncout.createVariable('pcount', 'float32', ('time', self.zvar, self.yvar, self.xvar))\n vartime = ncout.createVariable('time', 'float64', ('time',))\n vartime.units = 'hours since 0001-01-01 00:00:00'\n vartime.calendar = 'gregorian'\n\n # Write to variables\n varx[:] = self.xgrid\n vary[:] = self.ygrid\n varz[:] = self.zgrid\n vartmean[:] = self.grid_tmean[np.newaxis]\n varmtmean[:] = self.grid_meantmean[np.newaxis]\n varsum[:] = self.grid_sum[np.newaxis]\n varmsum[:] = self.grid_meansum[np.newaxis]\n varcount[:] = self.grid_count[np.newaxis]\n varpcount[:] = self.grid_pcount[np.newaxis]\n# varmax[:] = self.grid_max[np.newaxis]\n# varmin[:] = self.grid_min[np.newaxis]\n# varmed[:] = self.grid_med[np.newaxis]\n vartime[:] = date2num(self.dt, units=vartime.units, calendar=vartime.calendar)\n \n # Add global attributes\n ncout.history = 'Created ' + time.ctime(time.time())\n \n # Save\n ncout.close()",
"def to_spherical(d, r_grid, theta_grid, phi_grid, items):\n import numpy as np\n nr, nt, nphi = len(r_grid), len(theta_grid), len(phi_grid)\n files = {}\n\n for key in items:\n files.update({key: open(items[key]['filename'], 'w')})\n\n state = query_state()\n\n for i in range(nphi-1):\n phi = 0.5 * (phi_grid[i] + phi_grid[i+1])\n for j in range(nt-1):\n theta = 0.5 * (theta_grid[j] + theta_grid[j+1])\n for k in range(nr-1):\n r = 0.5 * (r_grid[k] + r_grid[k+1])\n rho = r * np.sin(theta)\n z = r * np.cos(theta)\n for key in items:\n val = state.query(d, rho, z, key)\n files[key].write('{0:.6e}\\n'.format(val))\n\n for key in items:\n files[key].close()",
"def __init__(self, rows=8, cols=8):\n self.rows = rows\n self.cols = cols\n self.matrix = [[Disk.NONE for x in range(rows)] for y in range(cols)]\n init_array = (('d','4',Disk.LIGHT), ('e','5',Disk.LIGHT), ('d','5',Disk.DARK), ('e','4',Disk.DARK))\n # init_array = (('a', '2', Disk.LIGHT), ('b', '5', Disk.LIGHT), ('d', '5', Disk.DARK), ('e', '4', Disk.DARK))\n for item in init_array:\n self.place_disk(*self.coordinates_to_matrix(item[0], item[1]), item[2])",
"def grid_volume(self):\n\n if not hasattr(self.data, 'zeta'):\n surface_elevation = np.zeros((self.dims.node, self.dims.time))\n else:\n surface_elevation = self.data.zeta\n\n self.depth_volume, self.volume = unstructured_grid_volume(self.grid.art1, self.grid.h, surface_elevation, self.grid.siglev, depth_integrated=True)",
"def createncfile(dz_id,t,x,z):\n db = labdb.LabDB()\n #create the directory in which to store the nc file\n sql = \"\"\"INSERT into dn2t (dz_id) VALUES (%d)\"\"\" % (dz_id) \n db.execute(sql)\n sql = \"\"\"SELECT LAST_INSERT_ID()\"\"\" \n rows = db.execute(sql)\n dn2t_id = rows[0][0]\n dn2t_path = \"/Volumes/HD4/dn2t/%d\" % dn2t_id \n os.mkdir(dn2t_path)\n\n dn2t_filename = os.path.join(dn2t_path,\"dn2t.nc\")\n print(\"d(N2)/dt filename : \",dn2t_filename)\n\n\n # Declare the nc file for the first time\n nc = netCDF4.Dataset(dn2t_filename,'w',format = 'NETCDF4')\n row_dim = nc.createDimension('row',964)\n col_dim = nc.createDimension('column',1292)\n lenT=t.shape[0] #lenT is the length of the dn2t file.Its 1 element shorter in time axis than deltaN2\n print(\"time axis length\",lenT) # debug info\n t_dim = nc.createDimension('time',lenT)\n\n # Dimensions are also variable\n ROW = nc.createVariable('row',numpy.float32,('row'))\n print(list(nc.dimensions.keys()), ROW.shape,ROW.dtype)\n COLUMN = nc.createVariable('column',numpy.float32,('column'))\n print(list(nc.dimensions.keys()) , COLUMN.shape, COLUMN.dtype)\n TIME = nc.createVariable('time',numpy.float32,('time'))\n print(list(nc.dimensions.keys()) ,TIME.shape, TIME.dtype)\n\n # declare the 3D data variable \n dn2t = nc.createVariable('dn2t_array',numpy.float32,('time','row','column'))\n print(list(nc.dimensions.keys()) ,dn2t.shape,dn2t.dtype)\n\n # assign the values\n TIME[:] = t\n ROW[:] = z\n COLUMN[:] = x\n\n nc.close()\n db.commit()\n return dn2t_id,dn2t_filename",
"def create_grid(self):\n # Domain definition\n network = pp.FractureNetwork2d(self.frac_pts.T, self.frac_edges.T, domain=self.box)\n gb = network.mesh(self.mesh_args) \n pp.contact_conditions.set_projections(gb)\n\n self.gb = gb\n self.Nd = self.gb.dim_max()\n self._Nd = self.gb.dim_max()\n g2d = self.gb.grids_of_dimension(2)[0]\n self.min_face = np.copy(self.mesh_size) #np.min(g2d.face_areas)\n self.min_cell = np.min(g2d.cell_volumes)\n self.p, self.t = analysis.adjustmesh(g2d, self.tips, self.GAP)\n self.displacement = self.p*0\n self.fa_no = g2d.face_nodes.indices.reshape((2, g2d.num_faces), order='f').T \n return gb",
"def __init__(self, fno, wavelength, extent=None, samples=None):\n if samples is not None:\n x = np.linspace(-extent, extent, samples)\n y = np.linspace(-extent, extent, samples)\n xx, yy = np.meshgrid(x, y)\n rho, phi = cart_to_polar(xx, yy)\n data = airydisk(rho, fno, wavelength)\n else:\n x, y, data = None, None, None\n\n super().__init__(data=data, x=x, y=y)\n self.fno = fno\n self.wavelength = wavelength\n self.has_analytic_ft = True",
"def create_grid(data, drone_altitude, safety_distance):\n\n # minimum and maximum north coordinates\n north_min = np.floor(np.min(data[:, 0] - data[:, 3]))\n north_max = np.ceil(np.max(data[:, 0] + data[:, 3]))\n #print(north_min, north_max)\n\n # minimum and maximum east coordinates\n east_min = np.floor(np.min(data[:, 1] - data[:, 4]))\n east_max = np.ceil(np.max(data[:, 1] + data[:, 4]))\n #print(east_min, east_max)\n # given the minimum and maximum coordinates we can\n # calculate the size of the grid.\n north_size = int(np.ceil((north_max - north_min)))\n east_size = int(np.ceil((east_max - east_min)))\n #print(north_size, east_size)\n # Initialize an empty grid\n grid = np.zeros((north_size, east_size))\n # Center offset for grid\n north_min_center = np.min(data[:, 0])\n east_min_center = np.min(data[:, 1])\n \n # Populate the grid with obstacles\n for i in range(data.shape[0]):\n north, east, alt, d_north, d_east, d_alt = data[i, :]\n\n if alt + d_alt + safety_distance > drone_altitude:\n obstacle = [\n int(north - d_north - safety_distance - north_min_center),\n int(north + d_north + safety_distance - north_min_center),\n int(east - d_east - safety_distance - east_min_center),\n int(east + d_east + safety_distance - east_min_center),\n ]\n grid[obstacle[0]:obstacle[1], obstacle[2]:obstacle[3]] = 1\n\n return grid"
]
| [
"0.62119544",
"0.59816885",
"0.5803847",
"0.5753763",
"0.5676403",
"0.5614738",
"0.55297625",
"0.5526763",
"0.55237687",
"0.5414932",
"0.5387417",
"0.5368581",
"0.52788943",
"0.5258257",
"0.52431476",
"0.5216729",
"0.52134144",
"0.5209183",
"0.52089286",
"0.518013",
"0.51642305",
"0.51619977",
"0.51293945",
"0.5125582",
"0.5104309",
"0.5095949",
"0.50856155",
"0.50783396",
"0.5061669",
"0.50591505"
]
| 0.70307744 | 0 |
Truncate the coefficients upto the given threshold | def truncate(coeffs, threshold=99):
sortedindex = np.argsort(np.abs(coeffs))[::-1]
Ncoeff = coeffs.shape[-1]
cutoff = np.int(np.round(Ncoeff*threshold/100.))
# print "Keeping %2.0f %% (N=%s) of the biggest coefficients"%(threshold,cutoff)
coeffs_trunc = coeffs.copy() # copy of all coeff
coeffs_trunc[sortedindex[cutoff:]] = 0 # put coeff
return coeffs_trunc | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def truncate(self, precision) :\n precision = self.parent().action().filter(precision)\n nprec = min(self.precision(), precision)\n\n ncoefficients = dict( (ch, copy(self.__coefficients[ch]))\n for ch in self.__coefficients )\n return EquivariantMonoidPowerSeries( self.parent(),\n ncoefficients, nprec, cleanup_coefficients = True )",
"def threshold_wcoeffs(coeffs,threshold,mode='soft'):\n from copy import deepcopy\n coeffs_new=deepcopy(coeffs)\n\n # threshold all the detialed coeffs [1:], but not the approximate [1]\n coeffs_new[1:] = map (lambda x: pywt.threshold(x,threshold,mode=mode),\n coeffs[1:])\n\n return coeffs_new",
"def threshold_wcoeffs(coeffs,threshold,mode='soft'):\n from copy import deepcopy\n coeffs_new=deepcopy(coeffs)\n\n if mode == 'none': \n return coeffs_new\n else:\n # threshold all the detialed coeffs [1:], but not the approximate [1]\n coeffs_new[1:] = map (lambda x: pywt.threshold(x,threshold,mode=mode),\n coeffs[1:])\n\n return coeffs_new",
"def _truncate_in_place(self, precision) :\n precision = self.parent().action().filter(precision)\n nprec = min(self.precision(), precision)\n\n if nprec != self.precision() :\n for c in self.__coefficients :\n d = self.__coefficients[c]\n for k in d.keys() :\n if not k in nprec :\n del d[k]\n \n self._set_precision(nprec)",
"def _truncate_in_place(self, precision) :\n precision = self.parent().monoid().filter(precision)\n nprec = min(self.precision(), precision)\n\n if nprec != self.precision() :\n coefficients = self.__coefficients\n for k in coefficients.keys() :\n if not k in nprec :\n del coefficients[k]\n \n self._set_precision(nprec)",
"def clamp(self):\n self.threshold.data.clamp_(self.min_threshold)",
"def truncate(self, precision) :\n precision = self.parent().monoid().filter(precision)\n nprec = min(self.precision(), precision)\n\n ncoefficients = copy(self.__coefficients)\n return MonoidPowerSeries( self.parent(), ncoefficients, nprec, cleanup_coefficients = True )",
"def threshold_coefficients(coefficients, threshold_value):\n return np.array([0.0 if abs(x) <= threshold_value else x for x in coefficients])",
"def truncate(self, precision) :\n raise NotImplementedError",
"def truncate(self, precision) :\n raise NotImplementedError",
"def trimCompo(self, threshold):\n newCompo = {}\n for key,value in self.m_compo.items():\n if value > threshold:\n newCompo[ key ] = value\n self.m_compo = newCompo",
"def mask(self):\n\n mask = self.freqs >= self.minimum_threshold\n mask = mask.astype(int)\n self.freqs = self.freqs * mask\n self.sums = self.sums * mask",
"def sparsify_weights(self, threshold = 1e-6):\n weights = self.list_cnn[-1].get_weights()\n sparsified_weights = []\n for w in weights:\n bool_mask = (abs(w) > threshold).astype(int)\n sparsified_weights.append(w * bool_mask)\n self.list_cnn[-1].set_weights(sparsified_weights)",
"def _standardize_cutoff(cutoff):\n cutoff = np.asarray(cutoff)\n cutoff[0] = max(0., cutoff[0])\n cutoff[1] = min(1., cutoff[1])\n cutoff[0] = np.min([cutoff[0], 0.09])\n cutoff[1] = np.max([cutoff[1], 0.91])\n return cutoff",
"def rm_lin_depend(self, basis, threshold=1e-10):\n Sb = basis.T@basis\n l, T = np.linalg.eig(Sb)\n b_norm = np.sqrt(Sb.diagonal())\n mask = l > threshold*b_norm\n return T[:, mask]",
"def Truncate(self, f, fcut, below=True):\n fout = copy.copy(f)\n ind = thresh(f,fcut)\n if below:\n fout = fout[0:ind]\n else:\n fout = fout[ind:]\n \n keys=['Gxx','Gyy','Gxy']\n\n for curkey in keys:\n curitem = colwise(getattr(self,curkey))\n\n if below:\n curitem = curitem[0:ind,:]\n else:\n curitem = curitem[ind:,:]\n \n setattr(self,curkey,squeeze(curitem))\n return fout",
"def ramp_kernel_real(cutoff, length):\n pos = np.arange(-length, length, 1)\n return cutoff ** 2.0 * (2.0 * np.sinc(2 * pos * cutoff) - np.sinc(pos * cutoff) ** 2.0)",
"def soft_thresh(x: float, lmb: float) -> float:\n if x < lmb:\n return x + lmb\n elif x > lmb:\n return x - lmb\n else:\n return 0.0",
"def reduce(self, threshold):\n def percentage_change(old, new):\n return (old - new) / old\n real_reduction_iterations = 0\n padic_reduction_iterations = 0\n cont_reduction_iterations = 0\n factor = len(self.constants.primes) + 1\n \n print('initial bound',max(self.coefficients['n1_bound'],max(self.coefficients['Z_bounds'])))\n\n # First, go through the real reduction loop.\n current_n1_bound = self.coefficients['n1_bound']\n current_diff_bound = None\n while True:\n real_reduction_iterations += 1\n logging.info(\"Real Reduction - Iteration %d\" % real_reduction_iterations)\n\n large_constant = self.calculate_large_constant(current_n1_bound, factor)\n logging.info(\"Large constant contains %d digits \" % large_constant.ndigits())\n\n # Find a new bound on n_1 - n_k\n new_diff_bound = self.real_reduce(current_n1_bound, large_constant)\n logging.info(\"Current bound on n1: \" + str(current_n1_bound))\n self.update_real_constants(new_diff_bound)\n logging.info(\"new diff bound: \" + str(new_diff_bound))\n logging.info(\"New bound on n1: \" + str(self.coefficients[\"n1_bound\"]))\n logging.info(\"New bound on zi: \" + str(self.coefficients['Z_bounds']))\n \n if percentage_change(current_n1_bound, self.coefficients[\"n1_bound\"]) < self.threshold:\n logging.info(\"New bound did not improve in the real step; real reduction process is done.\")\n factor = factor + 5\n break\n\n current_n1_bound = self.coefficients['n1_bound']\n current_diff_bound = new_diff_bound\n\n # Second, go through the p-adic reduction loop.\n current_Z_bounds = self.coefficients['Z_bounds']\n while True:\n padic_reduction_iterations += 1\n logging.info(\"p-adic Reduction - Iteration %d\" % padic_reduction_iterations)\n\n new_Z_bounds = self.padic_reduce(math.ceil(current_diff_bound))\n logging.info(\"New bound on zi: \" + str(new_Z_bounds))\n logging.info(\"Current bound on n1: \" + str(current_n1_bound))\n new_n1_bound = self.update_padic_constants(new_Z_bounds)\n logging.info(\"New bound on n1: \" + str(new_n1_bound))\n if percentage_change(current_n1_bound, new_n1_bound) < self.threshold:\n logging.info(\"New bound did not improve in the p-adic step; p-adic reduction process is done.\")\n break\n\n current_n1_bound = new_n1_bound\n\n print(current_n1_bound)\n\n return self.constants",
"def remove_low_variance(X, threshold=0.0):\n selector = VarianceThreshold(threshold=threshold)\n return selector.fit_transform(X)",
"def cut_noise(a, tol=1E-10):\n a[abs(a) < tol] = 0\n return a",
"def truncated_normal(size, threshold=1):\n return truncnorm.rvs(-threshold, threshold, size=size)",
"def test_truncate2():\n X = rand(5,5,5)\n T = hosvd(X)\n k = 3\n Tk = T.truncate(k)\n E = X - Tk.asarray()\n Cdk = T.X\n Cdk[:k,:k,:k] = 0\n assert np.allclose(fro_norm(E), fro_norm(Cdk))",
"def prune(self, threshold=0, with_multiplicity=False):\n coefs = self.eci if with_multiplicity else self.coefs\n bit_ids = [i for i, coef in enumerate(coefs) if abs(coef) < threshold]\n self.cluster_subspace.remove_corr_functions(bit_ids)\n\n # Update necessary attributes\n ids_complement = list(set(range(len(self.coefs))) - set(bit_ids))\n ids_complement.sort()\n self.coefs = self.coefs[ids_complement]\n\n if self._feat_matrix is not None:\n self._feat_matrix = self._feat_matrix[:, ids_complement]\n\n if hasattr(self, \"eci\"): # reset cache\n del self.eci\n\n if hasattr(self, \"cluster_interaction_tensors\"): # reset cache\n del self.cluster_interaction_tensors\n\n # reset the evaluator\n self._set_evaluator_data(set_orbits=True)",
"def zero_lower_range(x, lower_threshold):\r\n x = np.asarray(x, dtype=complex)\r\n count = 0\r\n for i in range(0, x.shape[0], 1):\r\n if x[i].real < lower_threshold:\r\n x[i] = complex(0, 0j)\r\n count+=1\r\n print(\"zeroed samples: \", count)\r\n return x;",
"def FoldChangeFilterToControl(X, data_headers, FCto, cutoff=0.4):\n XX = LinearFoldChange(X.copy(), data_headers, FCto)\n Xidx = np.any(XX[data_headers].values <= 1 - cutoff, axis=1) | np.any(XX[data_headers].values >= 1 + cutoff, axis=1)\n return X.iloc[Xidx, :]",
"def cut(self, max_lenght):\n self.V_estimates = self.V_estimates[:max_lenght]\n super().cut(max_lenght)",
"def threshold_col_del(self, threshold):\n self.data = self.data.dropna(thresh=threshold*len(self.data), axis=1) \n self.X = self.data.drop(self.target, axis =1)\n self.y = self.data[self.target]",
"def shrink(x: np.ndarray, t: float) -> np.ndarray:\n return np.sign(x) * np.maximum(np.abs(x) - t, 0)",
"def shrink_soft_threshold(r,rvar,theta):\n if len(theta.get_shape())>0 and theta.get_shape() != (1,):\n lam = theta[0] * tf.sqrt(rvar)\n scale=theta[1]\n else:\n lam = theta * tf.sqrt(rvar)\n scale = None\n lam = tf.maximum(lam,0)\n arml = tf.abs(r) - lam\n xhat = tf.sign(r) * tf.maximum(arml,0)\n dxdr = tf.reduce_mean(tf.to_float(arml>0),0)\n if scale is not None:\n xhat = xhat*scale\n dxdr = dxdr*scale\n return (xhat,dxdr)"
]
| [
"0.64514995",
"0.6273887",
"0.6269911",
"0.62587625",
"0.61372244",
"0.6091238",
"0.6072903",
"0.5973585",
"0.5852989",
"0.5852989",
"0.5834595",
"0.5436423",
"0.540269",
"0.5387345",
"0.5347651",
"0.5327133",
"0.53194124",
"0.5311541",
"0.53010476",
"0.52519625",
"0.524019",
"0.5232685",
"0.5229131",
"0.52134556",
"0.52112406",
"0.5206534",
"0.52026147",
"0.51986563",
"0.5198421",
"0.517298"
]
| 0.82398957 | 0 |
Reconstruct a model image from the coeffcicients | def reconstruct_image(imgsrc, nlevels, trunc_threshold=None):
coeffs = zernike_basis_n_coeffs(imgsrc, nlevels, return_basis=False)
if trunc_threshold is not None:
coeffs = truncate(coeffs, threshold=trunc_threshold)
# Generate (rho, phi) grids and masking grid
grid_rho, grid_phi, grid_mask = unit_disk(imgsrc.shape[0])
# Computes 2 indices (n,m) to specify Zernike functions
n,m = zernike_Double_Index(nlevels)
reconstr_im = np.sum(val * zernike_poly(m[i], n[i], grid_rho, grid_phi)*grid_mask for (i, val) in enumerate(coeffs))
return reconstr_im | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sweep_image_model():\n for c1 in [4, 8, 16]:\n for c2 in [2, 4]:\n for c3 in [2, 4]:\n for c4 in [1, 2]:\n flags = flag_reader.read_flag()\n print(c1)\n flags.channel_list = c1 * np.array([1, c2, c2*c3, c2*c3*c4])\n print('channel list = ', flags.channel_list)\n flags.last_dim = flags.channel_list[-1]\n flags.model_name = flags.data_set + '_channel_' + str(flags.channel_list).replace('[','').replace(']','').replace(' ','_') + \\\n '_dim_last_' + str(flags.last_dim) + '_ind_' + str(flags.comp_ind) + \\\n '_lr_{}_decay_{}_reg_{}_bs_{}'.format(flags.lr, flags.lr_decay_rate, flags.reg_scale, flags.batch_size)\n print(flags.model_name)\n training_from_flag(flags)",
"def reconstruct(self, my_data, my_suff_stat, model_params):\n\n my_x = my_data[\"x\"]\n my_x_infr = my_data[\"x_infr\"]\n lpj = my_suff_stat[\"lpj\"]\n ss = my_suff_stat[\"ss\"] # is (my_N x S x H)\n S_perm = my_suff_stat[\"S_perm\"]\n\n my_N, D = my_x.shape\n B = np.minimum(self.B_max - lpj.max(axis=1), self.B_max_shft) # is: (my_N,)\n pjc = np.exp(lpj + B[:, None]) # is: (my_N, S+H+1)\n\n modelmean = self.modelmean\n\n this_suff_stat = {}\n if \"storage\" in my_suff_stat.keys():\n this_suff_stat[\"storage\"] = my_suff_stat[\"storage\"]\n\n my_data[\"y_reconstructed\"] = my_data[\"y\"].copy()\n my_y = my_data[\"y_reconstructed\"]\n\n for n in range(my_N):\n this_x_infr = my_x_infr[n, :] # is (D,)\n if np.logical_not(this_x_infr).all():\n continue\n\n this_y = my_y[n, :] # is (D,)\n this_x = my_x[n, :] # is (D,)\n this_pjc = pjc[n, :] # is (S,)\n this_ss = ss[n, :, :] # is (S, H)\n\n this_data = {\"y\": this_y, \"x\": this_x, \"x_infr\": this_x_infr}\n this_suff_stat[\"ss\"] = this_ss\n this_mu = modelmean(model_params, this_data, this_suff_stat) # is (D_miss, S)\n\n this_pjc_sum = this_pjc.sum()\n\n this_estimate = (this_mu * this_pjc[None, S_perm:]).sum(\n axis=1\n ) / this_pjc_sum # is (D_miss,)\n this_y[np.logical_not(this_x)] = this_estimate",
"def preprocess(img):\n # standard mean and std for the model\n mean = np.array([0.485, 0.456, 0.406])\n std = np.array([0.229, 0.224, 0.225])\n # resize\n img = img.resize(size = (224, 224))\n # transforms to numpy\n img = np.array(img, dtype = np.float64)\n # Mean and Std\n img = (img - mean)/std\n # transpose [channels first]\n img = img.transpose((2, 0, 1))\n # conver to Tensor\n img = torch.from_numpy(img)\n return img",
"def preprocess_image(image, model_image_size):\n #resized_image = cv2.resize(image, tuple(reversed(model_image_size)), cv2.INTER_AREA)\n resized_image = letterbox_resize(image, tuple(reversed(model_image_size)))\n image_data = np.asarray(resized_image).astype('float32')\n image_data = normalize_image(image_data)\n image_data = np.expand_dims(image_data, 0) # Add batch dimension.\n return image_data",
"def pre_processing_image(img):\n\n #print(img.shape)\n # apply gamma correction and show the images\n #adjusted = adjust_gamma(img, gamma=0.65)\n\n adjusted = exposure.adjust_gamma(img, gamma=1.65)\n #print(adjusted.shape)\n\n # log transform of image\n\n logarithmic_corrected = exposure.adjust_log(adjusted, 1)\n #print(logarithmic_corrected.shape)\n\n # denoising\n #dst2 = cv2.fastNlMeansDenoisingColored(logarithmic_corrected, None, 10, 10, 7, 21)\n #print(dst2.shape)\n dst2 = logarithmic_corrected\n return dst2",
"def predict_from_model(patch, model):\n\n prediction = model.predict(patch.reshape(1, 256, 256, 3))\n prediction = prediction[:, :, :, 1].reshape(256, 256)\n return prediction",
"def pixModel(meas, param):\n res = []\n for i,m in enumerate(meas):\n tmp = []\n for p in ['A','B','C','D']:\n for c in ['1','2','3','4','5']:\n tmp.append(param['flux_'+str(i)]*param['T'+p+c]*\n (1+param['vis_'+str(i)]*\n np.cos(2*np.pi/param['wl'+p+c]*\n param['opd_'+str(i)]+\n param['phi'+p+c])))\n res.append(tmp)\n return res",
"def _init_model(self):\n self.A_inv = np.zeros(shape=(self.numUsers, self.d, self.d))\n self.b = np.zeros(shape=(self.numUsers, self.d))\n self.w = np.zeros(shape=(self.numUsers, self.d))\n for i, mat in enumerate(self.A_inv):\n self.A_inv[i] = np.eye(self.d)",
"def get_final_reconstruction(self):",
"def preprocess_img_inv(img):\n img = img.data.numpy().copy()\n\n img[0] = img[0] * TORCH_IMG_STD[0] + TORCH_IMG_MEAN[0]\n img[1] = img[1] * TORCH_IMG_STD[1] + TORCH_IMG_MEAN[1]\n img[2] = img[2] * TORCH_IMG_STD[2] + TORCH_IMG_MEAN[2]\n img = img.transpose(1, 2, 0) * 255.0\n\n return img.round().astype('uint8')",
"def extract_features(img):\n # load models\n model = FeatureExtractor(CFG)\n model.load_model()\n feature_extractor = model.feature_extractor()\n\n # extract features \n print(type(img))\n extracted_features = feature_extractor.predict([img])\n\n # reduce dimension\n pca_model = joblib.load(PCA_MODEL_DIRECTORY)\n reduced_img = pca_model.transform(extracted_features)\n return reduced_img",
"def predict_mat(self):\n mat = self.covs_mat.dot(self.alpha)\n return mat.reshape(self.shape)",
"def pre_analyse():\n t = transform()\n model = modified_resnet50()\n model.load_state_dict(\n torch.load(\n \"model.pth.tar\",\n map_location=torch.device(\"cpu\"),\n )[\"state_dict\"]\n )\n model.eval()\n\n def get_preds(img_path):\n \"\"\"\n Gives labelds and probabilities for a single image\n This is were we preprocess the image, using a function defined in the model class\n \"\"\"\n # load image\n img = Image.open(img_path).convert(\"RGB\")\n # process it\n x = t(img)\n # get in in the right format\n x = Variable(x).unsqueeze(0)\n # predictions\n output = model(x)\n # decode\n output = decode(output.cpu().data.numpy()[0])\n\n # filter\n # return pred, proba\n return output\n\n return get_preds(\"image.jpg\")",
"def process_image(img):\n img[0] = img[0] * 0.229\n img[1] = img[1] * 0.224\n img[2] = img[2] * 0.225\n img[0] += 0.485\n img[1] += 0.456\n img[2] += 0.406\n\n return img.cpu().numpy().transpose((1, 2, 0))",
"def reconstruct(self, coefs, *args):\n return self._waverec(\n wt.unravel_coeffs(coefs, *args, output_format=self._of), self.wname\n )",
"def _prep_attributes(self):\n self.coeff_ = self._coeff_to_df()\n self.results_ = self._results()\n self.cv_results_ = self._cv_results()\n self.intercept_ = self.model_.intercept_\n self.params_ = self.model_.get_params()\n\n return None",
"def model_maggie(self):\n if self._model_maggie is not None:\n return self._model_maggie\n self._model_maggie = reconstruct_maggie(self.coeffs, self.redshift,\n self.ptable[self.filter_list])\n return self._model_maggie",
"def model_transfer_StoW(img_path):\n A_real = Image.open(img_path)\n A_real = img_to_array(A_real)\n A_real = (A_real ) / 255\n #A_real = (A_real + 1) / 2.0\n A_real = A_real[np.newaxis, :]\n B_generated = model_AtoB.predict(np.array(A_real))\n A_reconstructed = model_BtoA.predict(B_generated)\n A_real = A_real[0, :, : , :]\n B_generated = B_generated[0, :, :, :]\n A_reconstructed = A_reconstructed[0 , :, :,:]\n A_real = Image.fromarray((A_real * 255).astype(np.uint8)).convert('RGB')\n B_generated = Image.fromarray((B_generated*255).astype(np.uint8)).convert('RGB')\n A_reconstructed = Image.fromarray((A_reconstructed * 255).astype(np.uint8)).convert('RGB')\n fileName1 = \"static/uploads/\" + str(random.random()) + \".jpg\"\n fileName2 = \"static/uploads/\" + str(random.random()) + \".jpg\"\n B_generated.save(fileName1)\n A_reconstructed.save(fileName2)\n return fileName1 , fileName2",
"def model_transfer_WtoS(img_path):\n B_real = Image.open(img_path)\n B_real = img_to_array(B_real)\n B_real = (B_real ) / 255\n #B_real = (B_real + 1) / 2.0\n B_real = B_real[np.newaxis, :]\n A_generated = model_BtoA.predict(np.array(B_real))\n B_reconstructed = model_AtoB.predict(A_generated)\n A_generated = A_generated[0, :, :, :]\n B_reconstructed = B_reconstructed[0 , :, :,:]\n A_generated = Image.fromarray((A_generated*255).astype(np.uint8)).convert('RGB')\n B_reconstructed = Image.fromarray((B_reconstructed * 255).astype(np.uint8)).convert('RGB')\n fileName1 = \"static/uploads/\" + str(random.random()) + \".jpg\"\n fileName2 = \"static/uploads/\" + str(random.random()) + \".jpg\"\n A_generated.save(fileName1)\n B_reconstructed.save(fileName2)\n return fileName1, fileName2",
"def get_model(summary=False):\n\timage_input=Input(shape=(220,220,5),name='image_input')\n\tbranch1_conv1=Conv2D(64, kernel_size=(3, 3), border_mode='same', input_shape=(220,220,5), activation='relu')(image_input)\n\tbranch1_conv2=Conv2D(64, kernel_size=(1, 1), border_mode='same', activation='relu')(branch1_conv1)\t\n\tbranch1_pool1=MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(branch1_conv1)\n\tbranch2_conv1=Conv2D(128, kernel_size=(3, 3), border_mode='same', activation='relu')(branch1_pool1)\n\tbranch2_conv2=Conv2D(128, kernel_size=(1, 1), border_mode='same', activation='relu')(branch2_conv1)\t\n\tbranch2_pool1=MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(branch2_conv2)\n\tbranch3_conv1=Conv2D(128, kernel_size=(3, 3), border_mode='same', activation='relu')(branch2_pool1)\n\tbranch3_conv2=Conv2D(128, kernel_size=(1, 1), border_mode='same', activation='relu')(branch3_conv1)\t\n\tbranch3_pool1=MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(branch3_conv2)\n\tbranch4_conv1=Conv2D(128, kernel_size=(3, 3), border_mode='same', activation='relu')(branch3_pool1)\n\tbranch4_conv2=Conv2D(128, kernel_size=(1, 1), border_mode='same', activation='relu')(branch4_conv1)\t\n\tbranch4_pool1=MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(branch4_conv2)\n\tbranch5_conv1=Conv2D(128, kernel_size=(3, 3), border_mode='same', activation='relu')(branch4_pool1)\n\tbranch5_conv2=Conv2D(128, kernel_size=(1, 1), border_mode='same', activation='relu')(branch5_conv1)\t\n\tbranch5_pool1=MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(branch5_conv2)\n\tbranch6_conv1=Conv2D(128, kernel_size=(3, 3), border_mode='same', activation='relu')(branch5_pool1)\n\tbranch6_conv2=Conv2D(128, kernel_size=(1, 1), border_mode='same', activation='relu')(branch6_conv1)\t\n\tbranch6_pool1=MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(branch6_conv2)\n\tbranch1_flat=Flatten()(branch6_pool1)\n\tdrop=Dropout(.3)(branch1_flat)\n\t# FC layers group\n\tdense1=Dense(512, activation='relu', name='fc1')(drop)\n\tdrop1=Dropout(.3)(dense1)\n\tdense2=Dense(256, activation='relu', name='fc2')(drop1)\n\tdrop3=Dropout(.3)(dense2)\n\tout=Dense(2, activation='softmax', name='fc4')(drop3)\n\tmodel=Model(inputs=image_input,outputs=out)\n\treturn model",
"def reconstruct_input_ext(self, model_in):",
"def get_pred_mask(test_image, model):\n\n test_image= test_image=transforms.ToPILImage()(test_image)\n #test_image=Image.fromarray(test_image)\n new_mask = model(transforms.ToTensor()(test_image).unsqueeze(1).cuda())[1].transpose(1,2).transpose(2,3).cpu().detach().numpy().squeeze()\n return new_mask",
"def prepare(img):\n return sobel(rgb2gray(img))",
"def emulator(pca, gp_model, params):\n # Weights prediction\n pred_weights = gp_predict(gp_model, params)\n\n # Inverse PCA (pred_weights * basis + mean)\n reconstructed = pca.inverse_transform(pred_weights)\n return reconstructed",
"def calculateAndAddComponents(img: ee.Image) -> ee.Image:\n img = img.select(coeffs[\"bands\"])\n components = [\n img.multiply(coeffs[comp]).reduce(ee.Reducer.sum()).rename(comp)\n for comp in [\"TCB\", \"TCG\", \"TCW\"]\n ]\n return img.addBands(components)",
"def prepare_texture_matrix(self):\n\t\ttexture_matrix = self.normalize_data()\n\t\tlabels = MatrixCreation().independent_variable_labels()\n\t\t\n\t\tcolumns_to_be_deleted = []\n\n\t\tfor key, label in labels.items():\n\t\t\tif label not in self.principal_components:\n\t\t\t\tcolumns_to_be_deleted.append(key)\n\n\t\ttexture_matrix = numpy.delete(texture_matrix,columns_to_be_deleted,axis=1)\n\n\t\treturn texture_matrix",
"def normalise(image):",
"def prep_input(im, acc=4):\n mask = cs.cartesian_mask(im.shape, acc, sample_n=8)\n im_und, k_und = cs.undersample(im, mask, centred=False, norm='ortho')\n im_gnd_l = to_lasagne_format(im)\n im_und_l = to_lasagne_format(im_und)\n k_und_l = to_lasagne_format(k_und)\n mask_l = to_lasagne_format(mask, mask=True)\n\n return im_und_l, k_und_l, mask_l, im_gnd_l",
"def recreate_image(codebook, labels, w, h):\n d = codebook.shape[1]\n image = np.zeros((w, h, d))\n label_idx = 0 # 第几个原始图像的像素点\n for i in range(w):\n for j in range(h):\n # 获取原始数据像素点对应的类别中心点坐标\n # 再根据中心点坐标获取对应的像素值\n image[i][j] = codebook[labels[label_idx]]\n label_idx += 1\n return image",
"def _construct_model(self):\n self.model = AutoEncoderConvolutional(self.n_latent_features, self.reduced_size)\n self.model = self.model.to(self.device, non_blocking=True)"
]
| [
"0.58549684",
"0.5555387",
"0.55510896",
"0.55296147",
"0.55021566",
"0.5373655",
"0.5363906",
"0.53565735",
"0.5355798",
"0.5353091",
"0.53364193",
"0.5315074",
"0.5299631",
"0.5290952",
"0.5287503",
"0.52676666",
"0.5245175",
"0.5205611",
"0.5202884",
"0.5200446",
"0.51982826",
"0.5196933",
"0.51917154",
"0.51881826",
"0.518438",
"0.51700777",
"0.5160844",
"0.5157912",
"0.51550806",
"0.5151559"
]
| 0.5812254 | 1 |
figure(num=None, figsize=(8, 6), dpi=80, facecolor='w', edgecolor='k') 2Dpolynomials Computes 2 indices (n,m) to specify Zernike functions, starting from the top, shifts to the left and then right. | def zernike_visuo__pyramid(zbasis, n, m, nlevels, figsize=(12, 12), cmap='jet', fontsize=20, colorbar_labelsize=10):
cmap = plt.get_cmap('%s' %cmap)
index = 0
if not (nlevels>=0):
print('Input parameter must be >= 0')
raise AssertionError()
axlist = []
if (nlevels == 0):
fig = plt.figure(num = 1, figsize=figsize)
ax = fig.add_subplot(1,1,1)
axlist.append(ax)
im = ax.imshow(zbasis, cmap=cmap, interpolation='lanczos')
ax.set_title(r'$Z_{%d}^{%d}$' %(n,m), fontsize=fontsize)
ax.axis('off')
else:
# ++++ Defining layout for row number n and colunmn number m ++++++++
fig = plt.figure(1, figsize=figsize)
row_n = nlevels + 1
col_m = 2*nlevels + 1
top = (col_m + 1)/2
leftside = row_n*col_m - col_m + 1
rightside = row_n*col_m
k1 = 0; k2 = 0
for i in xrange(top,row_n*col_m+1, 2*col_m):
ax = fig.add_subplot(row_n,col_m,i)
axlist.append(ax)
im=ax.imshow(zbasis[index], cmap=cmap, interpolation='lanczos', alpha=None)
ax.set_title(r'$Z_{%d}^{%d}$' %(n[index],m[index]), fontsize=fontsize)
ax.axis('off')
index += 1
s1 = i + col_m + 1
s2 = i + col_m - 1
jj1 = k1
jj2 = k2
while (s2 <= leftside):
ax = fig.add_subplot(row_n,col_m,s2)
axlist.append(ax)
im=ax.imshow(zbasis[index], cmap=cmap, interpolation='lanczos')
ax.set_title(r'$Z_{%d}^{%d}$' %(n[index],m[index]), fontsize=fontsize)
ax.axis('off')
index += 1
s2 +=col_m - 1
jj1 += 1
jj2 -= 1
leftside +=2
jj1 = k1
jj2 = k2
while (s1 <= rightside):
ax = fig.add_subplot(row_n,col_m,s1)
axlist.append(ax)
im=ax.imshow(zbasis[index], cmap=cmap, interpolation='lanczos')
ax.set_title(r'$Z_{%d}^{%d}$' %(n[index],m[index]), fontsize=fontsize)
ax.axis('off')
index += 1
s1 +=col_m + 1
jj1 += 1
jj2 += 1
rightside -=2
k1 = 0; k2 += 2
cbar = fig.colorbar(im, ax=axlist,fraction=0.05, orientation='horizontal')
cbar.ax.tick_params(labelsize=colorbar_labelsize)
fig.subplots_adjust(wspace=0,hspace=0, right=0.72, bottom=0.2)
fig.savefig('zernike_orders.png', dpi=300)
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def draw_poly(t, n, sz):\r\n\r\n\tfor i in range(n):\r\n\t\tt.forward(sz)\r\n\t\tt.left(360/n)",
"def draw_poly(t, n, sz):\n\tfor side in range(n):\n\t\tangle = (360/n)\n\t\tt.pendown()\n\t\tt.forward(sz)\n\t\tt.right(angle)",
"def draw_poly(t, n, sz):\r\n angle = 180 - (n - 2) * 180 / n\r\n for i in range(n):\r\n t.forward(sz)\r\n t.left(angle)",
"def display0(*args):\n #----------*----------* # unpack\n twiss_func = args[0]\n cos_like = args[1]\n sin_like = args[2]\n lat_plot = args[3]\n #-------------------- Bahnkoordinate (z)\n z = [twiss_func(i,'s') for i in range(twiss_func.nbpoints)]\n sgx = [twiss_func(i,'sigx') for i in range(twiss_func.nbpoints)]\n sgy = [twiss_func(i,'sigy') for i in range(twiss_func.nbpoints)]\n # zero = [0. for i in range(sigma_fun.nbpoints)]\n #-------------------- trajectories (tz)\n tz= [cos_like(i,'s') for i in range(cos_like.nbpoints)]\n cx= [cos_like(i,'cx') for i in range(cos_like.nbpoints)]\n # cxp= [cos_like(i,'cxp') for i in range(cos_like.nbpoints)]\n cy= [cos_like(i,'cy') for i in range(cos_like.nbpoints)]\n # cyp= [cos_like(i,'cyp') for i in range(cos_like.nbpoints)]\n # cz= [cos_like(i,'cz') for i in range(cos_like.nbpoints)]\n # cdp= [cos_like(i,'cdp') for i in range(cos_like.nbpoints)]\n\n sx= [sin_like(i,'sx') for i in range(sin_like.nbpoints)]\n # sxp= [sin_like(i,'sxp') for i in range(sin_like.nbpoints)]\n sy= [sin_like(i,'sy') for i in range(sin_like.nbpoints)]\n # syp= [sin_like(i,'syp') for i in range(sin_like.nbpoints)]\n # sz= [sin_like(i,'sz') for i in range(sin_like.nbpoints)]\n # sdp= [sin_like(i,'sdp') for i in range(sin_like.nbpoints)]\n #-------------------- lattice viseo\n stop_viseox = 5 # stop viseo plot after so many [m]\n stop_viseoy = 5 # stop viseo plot after so many [m]\n vzero = [0. for i in range(lat_plot.nbpoints)] # zero line\n vis_abszisse = [lat_plot(i,'s') for i in range(lat_plot.nbpoints)]\n vis_ordinate = [lat_plot(i,'viseo') for i in range(lat_plot.nbpoints)]\n #-------------------- figure frame\n width=14; height=7.6\n plt.figure(num=0,figsize=(width,height),facecolor='#eaecef',tight_layout=False)\n\n #-------------------- transverse X\n splot211=plt.subplot(211)\n splot211.set_title('transverse x')\n plt.plot(z,sgx ,label=r'$\\sigma$ [m]',color='green')\n plt.plot(tz,cx ,label='Cx[m]', color='blue',linestyle='-')\n # plt.plot(tz,cxp,label=\"Cx'[m]\",color='blue',linestyle=':')\n plt.plot(tz,sx, label='Sx[m]', color='red' ,linestyle='-')\n # plt.plot(tz,sxp,label=\"Sx'[m]\",color='red' ,linestyle=':')\n # vscale=plt.axis()[3]*0.1\n # viseox = [x*vscale for x in vis_ordinate]\n # for i,s in enumerate(vis_abszisse):\n # if s > stop_viseox:\n # viseox[i] = 0.\n # plt.plot(vis_abszisse,viseox,label='',color='black')\n plt.plot(vis_abszisse,vzero,color='black')\n plt.legend(loc='lower right',fontsize='x-small')\n\n #-------------------- transverse Y\n splot212=plt.subplot(212)\n splot212.set_title('transverse y')\n plt.plot(z,sgy ,label=r'$\\sigma$ [m]',color='green')\n plt.plot(tz,cy, label='Cy[m]', color='blue',linestyle='-')\n # plt.plot(tz,cyp,label=\"Cy'[m]\",color='blue',linestyle=':')\n plt.plot(tz,sy, label='Sy[m]', color='red' ,linestyle='-')\n # plt.plot(tz,syp,label=\"Sy'[m]\",color='red' ,linestyle=':')\n vscale=plt.axis()[3]*0.1\n viseoy = [x*vscale for x in vis_ordinate]\n # for i,s in enumerate(vis_abszisse):\n # if s > stop_viseoy:\n # viseoy[i] = 0.\n plt.plot(vis_abszisse,viseoy,label='',color='black')\n plt.plot(vis_abszisse,vzero,color='black')\n plt.legend(loc='lower right',fontsize='x-small')",
"def main2(factor, n):\n pos = get_orbit_points(n)\n draw_orbit(pos, fast=True)\n draw_lines_insta(pos, factor)",
"def create_fig_2d(self, data_array_2d, output_fn='', xlabel='', ylabel='', title=''):",
"def n27_and_sidebands():\n fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(4.5, 4))\n # n=26 through n=29\n folder = os.path.join(\"..\", \"..\", \"2018-09-06\")\n fname = \"1_dye_fscan.txt\"\n fname = os.path.join(folder, fname)\n data = pmu.fscan_import(fname)\n ax.axhline(0, color='grey')\n data.plot(x='fpoly', y='sig', label=\"MW Off\", c='k', ax=ax)\n # sidebands\n folder = os.path.join(\"..\", \"..\", \"2018-09-09\")\n fname = \"1_freq_dye.txt\"\n fname = os.path.join(folder, fname)\n data = pmu.fscan_import(fname)\n data['asig'] = data['sig'] - 0.3\n ax.axhline(-0.3, color='grey')\n data.plot(x='fpoly', y='asig', label=\"MW On\", c='k', ax=ax)\n # pretty figure\n ax.legend().remove()\n ax.set_ylabel(r\"$e^-$ Signal\")\n ax.set_yticks([])\n ax.set_xlabel(\"Frequency (GHz from Limit)\")\n ax.set_xticks([-4863, -4511, -4195, -3908])\n ax.text(-4400, -0.15, \"MW On\")\n ax.text(-4400, 0.3, \"MW Off\")\n # save\n fig.tight_layout()\n fig.savefig(\"n27_and_sidebands.pdf\")\n return",
"def display3(*args):\n #-------------------- unpack\n twiss_fun = args[0]\n cos_like = args[1]\n sin_like = args[2]\n lat_plot = args[3]\n ape_plot = args[4]\n #-------------------- sigma functions\n # zero = [0. for i in range(sigma_fun.nbpoints)] # zero line\n z = [twiss_fun(i,'s') for i in range(twiss_fun.nbpoints)] # Abszisse\n sgx = [twiss_fun(i,'sigx')*1.e3 for i in range(twiss_fun.nbpoints)] # envelope (sigma-x)\n sgy = [twiss_fun(i,'sigy')*1.e3 for i in range(twiss_fun.nbpoints)] # envelope (sigma-y)\n #-------------------- trajectories\n z1= [cos_like(i,'s') for i in range(cos_like.nbpoints)]\n cx= [cos_like(i,'cx')*1.e3 for i in range(cos_like.nbpoints)]\n # cxp= [cos_like(i,'cxp')*1.e3 for i in range(cos_like.nbpoints)]\n cy= [cos_like(i,'cy')*1.e3 for i in range(cos_like.nbpoints)]\n # cyp= [cos_like(i,'cyp')*1.e3 for i in range(cos_like.nbpoints)]\n cz= [cos_like(i,'cz') for i in range(cos_like.nbpoints)]\n cdp= [cos_like(i,'cdp') for i in range(cos_like.nbpoints)]\n\n z2= [sin_like(i,'s') for i in range(sin_like.nbpoints)]\n sx= [sin_like(i,'sx')*1.e3 for i in range(sin_like.nbpoints)]\n # sxp= [sin_like(i,'sxp')*1.e3 for i in range(sin_like.nbpoints)]\n sy= [sin_like(i,'sy')*1.e3 for i in range(sin_like.nbpoints)]\n # syp= [sin_like(i,'syp')*1.e3 for i in range(sin_like.nbpoints)]\n sz= [sin_like(i,'sz') for i in range(sin_like.nbpoints)]\n sdp= [sin_like(i,'sdp') for i in range(sin_like.nbpoints)]\n #-------------------- lattice viseo\n vzero = [0. for i in range(lat_plot.nbpoints)] # zero line\n vis_abszisse = [lat_plot(i,'s') for i in range(lat_plot.nbpoints)]\n vis_ordinate = [lat_plot(i,'viseo') for i in range(lat_plot.nbpoints)]\n ape_abszisse = [ape_plot(i,'s') for i in range(ape_plot.nbpoints)]\n ape_ordinate = [ape_plot(i,'aperture')*1.e3 for i in range(ape_plot.nbpoints)]\n #-------------------- figure frame\n width=14; height=7.6\n # fighdr = 'lattice version = {}, input file = {}'.format(PARAMS['lattice_version'],PARAMS['input_file'])\n fig = plt.figure(num=1,figsize=(width,height),facecolor='#eaecef',tight_layout=False)\n\n #-------------------- transverse X tracks\n splot311=plt.subplot(311)\n # splot311=plt.subplot(10,1,(1,3))\n splot311.set_title('transverse x')\n # mapping box\n splot311.text(0.01, 1.1,UTIL.FLAGS.get('mapping'),transform=splot311.transAxes,fontsize=8,bbox=dict(boxstyle='round',facecolor='wheat',alpha=0.5),verticalalignment='top')\n if UTIL.FLAGS['envelope']:\n plt.plot(z,sgx ,label=r'$\\sigma$ [mm]',color='green')\n plt.plot(z1,cx, label=\"C [mm]\",color='blue',linestyle='-')\n # plt.plot(z1,cxp,label=\"C' [mr]\",color='blue',linestyle=':')\n plt.plot(z2,sx, label=\"S [mm]\",color='red' ,linestyle='-')\n # plt.plot(z2,sxp,label=\"S' [mr]\",color='red' ,linestyle=':')\n vscale=splot311.axis()[3]*0.25\n viseoz = [x*vscale for x in vis_ordinate]\n plt.plot(vis_abszisse,viseoz,label='',color='black')\n plt.plot(vis_abszisse,vzero,color='green',linestyle='--')\n # apertures\n if UTIL.FLAGS['useaper']:\n plt.plot(ape_abszisse,ape_ordinate,linestyle='-.')\n N = UTIL.PARAMS['nbsigma']\n sgx = [i*N for i in sgx]\n #label = F'{N:1}$\\sigma$ [mm]'\n label = '{:1}$\\sigma$ [mm]'.format(N)\n plt.plot(z,sgx ,label=label,color='green',linestyle=':')\n # zero line\n splot311.plot(vis_abszisse,vzero,color='green',linestyle='--')\n plt.legend(loc='lower right',fontsize='x-small')\n\n #-------------------- transverse Y tracks\n splot312=plt.subplot(312)\n # splot312=plt.subplot(10,1,(4,6))\n splot312.set_title('transverse y')\n if UTIL.FLAGS['envelope']:\n plt.plot(z,sgy ,label=r'$\\sigma$ [mm]',color='green')\n plt.plot(z1,cy, label=\"C [mm]\",color='blue',linestyle='-')\n # plt.plot(z1,cyp,label=\"C' [mr]\",color='blue',linestyle=':')\n plt.plot(z2,sy, label=\"S [mm]\",color='red' ,linestyle='-')\n vscale=splot312.axis()[3]*0.25\n viseoz = [x*vscale for x in vis_ordinate]\n plt.plot(vis_abszisse,viseoz,label='',color='black')\n plt.plot(vis_abszisse,vzero,color='green',linestyle='--')\n # apertures\n if UTIL.FLAGS['useaper']:\n plt.plot(ape_abszisse,ape_ordinate,linestyle='-.')\n N = UTIL.PARAMS['nbsigma']\n sgy = [i*N for i in sgy]\n plt.plot(z,sgy ,label=label,color='green',linestyle=':')\n # zero line\n splot312.plot(vis_abszisse,vzero,color='green',linestyle='--')\n plt.legend(loc='lower right',fontsize='x-small')\n\n #-------------------- longitudinal tracks z, dP/P\n # ax_l = left abszisse\n ax_l=plt.subplot(313)\n # ax_l=plt.subplot(10,1,(7,9))\n ax_l.set_title('longitudinal')\n ax_l.set_ylabel(r\"z [mm]\")\n ax_l.tick_params(axis='y', colors='green')\n ax_l.yaxis.label.set_color('green')\n ax_l.plot(z1,cz,label='C',color='green')\n ax_l.plot(z2,sz,label='S',color='green',linestyle=':')\n plt.legend(loc='lower left',fontsize='x-small')\n # ax_r = right abszisse\n ax_r = ax_l.twinx()\n ax_r.set_ylabel(r'$\\Delta$p/p [%]')\n ax_r.tick_params(axis='y', colors='red')\n ax_r.yaxis.label.set_color('red')\n ax_r.plot(z1,cdp,label='C',color='red')\n ax_r.plot(z2,sdp,label='S',color='red',linestyle=':')\n ax_r.plot(vis_abszisse,vzero,color='red', linestyle='--')\n plt.legend(loc='lower right',fontsize='x-small')\n # lattice elements\n vscale=ax_l.axis()[3]*0.25\n viseoz = [x*vscale for x in vis_ordinate]\n ax_l.plot(vis_abszisse,viseoz,label='',color='black')\n ax_l.plot(vis_abszisse,vzero,color='green',linestyle='--')",
"def printPolyCoeffs(lam) :\n ell = len(lam)\n useFormat = \"2.6e\"\n count = 0\n def printLine(s, count) :\n if lam[count] < 0 :\n s = s + 3 * \" \"\n else :\n s = s + 4 * \" \"\n s = s + \"{0:\" + useFormat + \"}\"\n print(s . format(lam[count]))\n count = count + 1\n return count\n if ell >= 1 :\n count = printLine(\"x0y0\", count)\n if ell >= 3 :\n count = printLine(\"x1y0\", count)\n count = printLine(\"x0y1\", count)\n if ell >= 6 :\n count = printLine(\"x2y0\", count)\n count = printLine(\"x1y1\", count)\n count = printLine(\"x0y2\", count)\n if ell >= 10 :\n count = printLine(\"x3y0\", count)\n count = printLine(\"x2y1\", count)\n count = printLine(\"x1y2\", count)\n count = printLine(\"x0y3\", count)\n if ell >= 15 :\n count = printLine(\"x4y0\", count)\n count = printLine(\"x3y1\", count)\n count = printLine(\"x2y2\", count)\n count = printLine(\"x1y3\", count)\n count = printLine(\"x0y4\", count)\n if ell >= 21 :\n count = printLine(\"x5y0\", count)\n count = printLine(\"x4y1\", count)\n count = printLine(\"x3y2\", count)\n count = printLine(\"x2y3\", count)\n count = printLine(\"x1y4\", count)\n count = printLine(\"x0y5\", count)\n if ell >= 28 :\n count = printLine(\"x6y0\", count)\n count = printLine(\"x5y1\", count)\n count = printLine(\"x4y2\", count)\n count = printLine(\"x3y3\", count)\n count = printLine(\"x2y4\", count)\n count = printLine(\"x1y5\", count)\n count = printLine(\"x0y6\", count)\n if ell >= 36 :\n count = printLine(\"x7y0\", count)\n count = printLine(\"x6y1\", count)\n count = printLine(\"x5y2\", count)\n count = printLine(\"x4y3\", count)\n count = printLine(\"x3y4\", count)\n count = printLine(\"x2y5\", count)\n count = printLine(\"x1y6\", count)\n count = printLine(\"x0y7\", count)\n if (ell > 36) or (ell < 1) :\n raise ValueError(\"Polynomial degree less than or equal to 7, please.\")",
"def plot_solution(self, identlist, aperture_lst, plot_ax1=False, **kwargs):\n coeff = kwargs.pop('coeff')\n k = kwargs.pop('k')\n offset = kwargs.pop('offset')\n npixel = kwargs.pop('npixel')\n std = kwargs.pop('std')\n nuse = kwargs.pop('nuse')\n ntot = kwargs.pop('ntot')\n xorder = kwargs.pop('xorder')\n yorder = kwargs.pop('yorder')\n clipping = kwargs.pop('clipping')\n maxiter = kwargs.pop('maxiter')\n\n label_size = 13 # fontsize for x, y labels\n tick_size = 12 # fontsize for x, y ticks\n\n #wave_scale = 'linear'\n wave_scale = 'reciprocal'\n\n #colors = 'rgbcmyk'\n\n self._ax2.cla()\n self._ax3.cla()\n\n if plot_ax1:\n self._ax1.cla()\n x = np.linspace(0, npixel-1, 100, dtype=np.float64)\n\n # find the maximum and minimum wavelength\n wl_min, wl_max = 1e9,0\n allwave_lst = {}\n for aperture in aperture_lst:\n order = k*aperture + offset\n wave = get_wavelength(coeff, npixel, x, np.repeat(order, x.size))\n allwave_lst[aperture] = wave\n wl_max = max(wl_max, wave.max())\n wl_min = min(wl_min, wave.min())\n # plot maximum and minimum wavelength, to determine the display\n # range of this axes, and the tick positions\n self._ax1.plot([0, 0],[wl_min, wl_max], color='none')\n yticks = self._ax1.get_yticks()\n self._ax1.cla()\n\n\n for aperture in aperture_lst:\n order = k*aperture + offset\n color = 'C{}'.format(order%10)\n\n # plot pixel vs. wavelength\n if plot_ax1:\n wave = allwave_lst[aperture]\n if wave_scale=='reciprocal':\n self._ax1.plot(x, 1/wave,\n color=color, ls='-', alpha=0.8, lw=0.8)\n else:\n self._ax1.plot(x, wave,\n color=color, ls='-', alpha=0.8, lw=0.8)\n\n # plot identified lines\n if aperture in identlist:\n list1 = identlist[aperture]\n pix_lst = list1['pixel']\n wav_lst = list1['wavelength']\n mask = list1['mask'].astype(bool)\n res_lst = list1['residual']\n\n if plot_ax1:\n if wave_scale=='reciprocal':\n self._ax1.scatter(pix_lst[mask], 1/wav_lst[mask],\n c=color, s=20, lw=0, alpha=0.8)\n self._ax1.scatter(pix_lst[~mask], 1/wav_lst[~mask],\n c='w', s=16, lw=0.7, alpha=0.8,\n edgecolor=color)\n else:\n self._ax1.scatter(pix_lst[mask], wav_lst[mask],\n c=color, s=20, lw=0, alpha=0.8)\n self._ax1.scatter(pix_lst[~mask], wav_lst[~mask],\n c='w', s=16, lw=0.7, alpha=0.8,\n edgecolor=color)\n\n repeat_aper_lst = np.repeat(aperture, pix_lst.size)\n self._ax2.scatter(repeat_aper_lst[mask], res_lst[mask],\n c=color, s=20, lw=0, alpha=0.8)\n self._ax2.scatter(repeat_aper_lst[~mask], res_lst[~mask],\n c='w', s=16, lw=0.7, alpha=0.8, ec=color)\n self._ax3.scatter(pix_lst[mask], res_lst[mask],\n c=color, s=20, lw=0, alpha=0.8)\n self._ax3.scatter(pix_lst[~mask], res_lst[~mask],\n c='w', s=16, lw=0.7, alpha=0.8, ec=color)\n\n # refresh texts in the residual panels\n text = 'R.M.S. = {:.5f}, N = {}/{}'.format(std, nuse, ntot)\n self._ax3._residual_text.set_text(text)\n text = u'Xorder = {}, Yorder = {}, clipping = \\xb1{:g}, Niter = {}'.format(\n xorder, yorder, clipping, maxiter)\n self._ax2._fitpar_text.set_text(text)\n\n # adjust layout for ax1\n if plot_ax1:\n self._ax1.set_xlim(0, npixel-1)\n if wave_scale == 'reciprocal':\n _y11, _y22 = self._ax1.get_ylim()\n newtick_lst, newticklabel_lst = [], []\n for tick in yticks:\n if _y11 < 1/tick < _y22:\n newtick_lst.append(1/tick)\n newticklabel_lst.append(tick)\n self._ax1.set_yticks(newtick_lst)\n self._ax1.set_yticklabels(newticklabel_lst)\n self._ax1.set_ylim(_y22, _y11)\n self._ax1.set_xlabel('Pixel', fontsize=label_size)\n self._ax1.set_ylabel(u'\\u03bb (\\xc5)', fontsize=label_size)\n self._ax1.grid(True, ls=':', color='gray', alpha=1, lw=0.5)\n self._ax1.set_axisbelow(True)\n self._ax1._aperture_text.set_text('')\n for tick in self._ax1.xaxis.get_major_ticks():\n tick.label1.set_fontsize(tick_size)\n for tick in self._ax1.yaxis.get_major_ticks():\n tick.label1.set_fontsize(tick_size)\n\n # adjust axis layout for ax2 (residual on aperture space)\n self._ax2.axhline(y=0, color='k', ls='--', lw=0.5)\n for i in np.arange(-3,3+0.1):\n self._ax2.axhline(y=i*std, color='k', ls=':', lw=0.5)\n x1, x2 = self._ax2.get_xlim()\n x1 = max(x1,aperture_lst.min())\n x2 = min(x2,aperture_lst.max())\n self._ax2.set_xlim(x1, x2)\n self._ax2.set_ylim(-6*std, 6*std)\n self._ax2.set_xlabel('Aperture', fontsize=label_size)\n self._ax2.set_ylabel(u'Residual on \\u03bb (\\xc5)', fontsize=label_size)\n for tick in self._ax2.xaxis.get_major_ticks():\n tick.label1.set_fontsize(tick_size)\n for tick in self._ax2.yaxis.get_major_ticks():\n tick.label1.set_fontsize(tick_size)\n\n ## adjust axis layout for ax3 (residual on pixel space)\n self._ax3.axhline(y=0, color='k', ls='--', lw=0.5)\n for i in np.arange(-3,3+0.1):\n self._ax3.axhline(y=i*std, color='k', ls=':', lw=0.5)\n self._ax3.set_xlim(0, npixel-1)\n self._ax3.set_ylim(-6*std, 6*std)\n self._ax3.set_xlabel('Pixel', fontsize=label_size)\n self._ax3.set_ylabel(u'Residual on \\u03bb (\\xc5)', fontsize=label_size)\n for tick in self._ax3.xaxis.get_major_ticks():\n tick.label1.set_fontsize(tick_size)\n for tick in self._ax3.yaxis.get_major_ticks():\n tick.label1.set_fontsize(tick_size)",
"def SH_surface_plots(n_max=6,figsize=(15,15),fs=15,saveA=True,show=False,dpi=400,vis_type='real'):\n\n N = 100j\n\n for n in range(n_max+1):\n for m in range(n+1):\n plt.close('all')\n print(\"working on Y_%s^%s\" % (n,m) )\n\n PHI,THETA = np.mgrid[0:2*np.pi:N*2, 0:np.pi:N]\n if vis_type == 'real':\n R = sp.sph_harm(m,n,PHI,THETA).real\n if vis_type == 'modulus':\n r = sp.sph_harm(m,n,PHI,THETA)\n R = r * r.conjugate()\n if vis_type == 'unit':\n R = sp.sph_harm(m,n,PHI,THETA).real + 1\n\n X = np.abs(R) * np.sin(THETA) * np.cos(PHI)\n Y = np.abs(R) * np.sin(THETA) * np.sin(PHI)\n Z = np.abs(R) * np.cos(THETA)\n\n norm = colors.Normalize()\n fig, ax = plt.subplots(subplot_kw=dict(projection='3d'), figsize=(14,10))\n sm = cm.ScalarMappable(cmap=cm.seismic)\n ax.plot_surface(X, Y, Z, rstride=1, cstride=1, facecolors=cm.seismic(norm(R)))\n ax.set_title('real$(Y^%s_%s)$' % (m,n), fontsize=fs)\n ax.set_aspect(1)\n sm.set_array(R)\n fig.colorbar(sm, shrink=0.8)\n\n if saveA:\n fig.savefig('images/%s/%s_%s.png' % (vis_type,n,m), dpi=dpi)\n if show:\n plt.show()\n\n # print(\"\\n only +m values are used.\")\n # for n in range(n_max+1):\n # for m in range(n+1):\n # plt.close('all')\n # print(\"\\n n,m = %s,%s\" % (n,m) )\n #\n # R,X,Y,Z = harmonics(m,n)\n #\n # fig = plt.figure(figsize=figsize)\n # ax = plt.subplot(projection='3d')\n # ax.set_aspect(1)\n # ax.set_title(\"n: %s m: %s\" % (n,m), fontsize=fs+2)\n # ax.plot_surface(X,Y,Z,\\\n # cmap = cm.seismic,\n # norm = colors.Normalize( vmin=np.min(R),vmax=np.max(R) )\\\n # )\n #\n # if saveA:\n # fig.savefig('images/%s_%s.png' % (n,m), dpi=dpi)\n # if show:\n # plt.show()",
"def display4(*args):\n #-------------------- unpack\n twiss_func = args[0]\n cos_like = args[1]\n sin_like = args[2]\n lat_plot = args[3]\n #-------------------- beta x,y & dispersion x\n s = [twiss_func(i,'s') for i in range(twiss_func.nbpoints)] # Abszisse\n bx = [twiss_func(i,'bx') for i in range(twiss_func.nbpoints)] # beta x\n by = [twiss_func(i,'by') for i in range(twiss_func.nbpoints)] # beta y\n dx = [twiss_func(i,'dx') for i in range(twiss_func.nbpoints)] # dispersion x\n#-------------------- longitudinal trajectories\n z1= [cos_like(i,'s') for i in range(cos_like.nbpoints)]\n cz= [cos_like(i,'cz') for i in range(cos_like.nbpoints)]\n cdp= [cos_like(i,'cdp') for i in range(cos_like.nbpoints)]\n\n z2= [sin_like(i,'s') for i in range(sin_like.nbpoints)]\n sz= [sin_like(i,'sz') for i in range(sin_like.nbpoints)]\n sdp= [sin_like(i,'sdp') for i in range(sin_like.nbpoints)]\n #-------------------- lattice viseo\n vzero = [0. for i in range(lat_plot.nbpoints)] # zero line\n vis_abszisse = [lat_plot(i,'s') for i in range(lat_plot.nbpoints)]\n vis_ordinate = [lat_plot(i,'viseo') for i in range(lat_plot.nbpoints)]\n #-------------------- figure frame\n width=14; height=7.6\n # fighdr = 'lattice version = {}, input file = {}'.format(PARAMS['lattice_version'],PARAMS['input_file'])\n fig = plt.figure(num=1,figsize=(width,height),facecolor='#eaecef',tight_layout=False)\n\n #-------------------- beta functions\n splot211=plt.subplot(211)\n splot211.set_title('beta x,y')\n # mapping box\n splot211.text(0.01, 1.1, UTIL.FLAGS.get('mapping'),transform=splot211.transAxes,fontsize=8,bbox=dict(boxstyle='round',facecolor='wheat',alpha=0.5),verticalalignment='top')\n # function plots\n plt.plot(s,bx, label=r\"$\\beta$x [m]\", color='black', linestyle='-')\n plt.plot(s,by, label=r\"$\\beta$y [m]\", color='red', linestyle='-')\n plt.plot(s,dx, label=r'$\\eta_x$ [m]' , color='green', linestyle='-') # dispersion x\n vscale=splot211.axis()[3]*0.25\n viseoz = [x*vscale for x in vis_ordinate]\n plt.plot(vis_abszisse,viseoz,label='',color='black')\n plt.plot(vis_abszisse,vzero,color='green',linestyle='--')\n # zero line\n splot211.plot(vis_abszisse,vzero,color='green',linestyle='--')\n plt.legend(loc='lower right',fontsize='x-small')\n\n #-------------------- longitudinal tracks z, dP/P\n # ax_l = left abszisse\n ax_l=plt.subplot(212)\n # ax_l=plt.subplot(10,1,(7,9))\n ax_l.set_title('synchrotron oscillation')\n ax_l.set_ylabel(r\"z [mm]\")\n ax_l.tick_params(axis='y', colors='green')\n ax_l.yaxis.label.set_color('green')\n ax_l.plot(z1,cz,label='C',color='green')\n ax_l.plot(z2,sz,label='S',color='green',linestyle=':')\n plt.legend(loc='lower left',fontsize='x-small')\n # ax_r = right abszisse\n ax_r = ax_l.twinx()\n ax_r.set_ylabel(r'$\\Delta$p/p [%]')\n ax_r.tick_params(axis='y', colors='red')\n ax_r.yaxis.label.set_color('red')\n ax_r.plot(z2,cdp,label='C',color='red')\n ax_r.plot(z2,sdp,label='S',color='red',linestyle=':')\n ax_r.plot(vis_abszisse,vzero,color='red', linestyle='--')\n plt.legend(loc='lower right',fontsize='x-small')\n # lattice elements\n vscale=ax_l.axis()[3]*0.25\n viseoz = [x*vscale for x in vis_ordinate]\n ax_l.plot(vis_abszisse,viseoz,label='',color='black')\n ax_l.plot(vis_abszisse,vzero,color='green',linestyle='--')",
"def Birkhoff_polytope(self, n):\n perms = permutations(range(1,n+1))\n verts = []\n for p in perms:\n verts += [ [Polytopes._pfunc(i,j,p) for j in range(1,n+1) \n for i in range(1,n+1) ] ]\n return Polyhedron(vertices = verts)",
"def printfunc(self):\n zero1=self.Newton(True)\n print \"Using initial porition %0.2f ,%0.2f\" %(self.x_init,self.y_0)\n print \"extremum calculated witn Newton-Rapson: %0.2f ,%0.2f.\"%(zero1[0],zero1[1])\n zero2=self.Newton(False)\n print \"extremum calculated witn Secant: %0.2f ,%0.2f.\" %(zero2[0],zero2[1])\n xlist=np.arange(self.x_0-10,self.x_0+10,0.01)\n ylist=np.arange(self.y_0-10,self.y_0+10,0.01)\n X,Y=np.meshgrid(xlist,ylist)\n Z=self.sfunc(X,Y)\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n \n ax.plot(xlist, ylist, self.sfunc(xlist,ylist), 'g-',label='function $e^{(-(x-%0.2f)^2-(y-%0.2f)^2)}$' %(self.x_0,self.y_0))\n ax.contour(X, Y, Z)# colors = 'k', linestyles = 'solid')\n ax.plot([zero1[0]], [zero1[0]], self.sfunc(zero1[0],zero1[1]),'bo',label='extrema using Newton-Rapson (%0.2f; %0.2f)'%(zero1[0],zero1[1]))\n ax.plot([zero2[0]], [zero2[0]], self.sfunc(zero2[0],zero2[1]),'ro',label='extrema using Seacent (%0.2f; %0.2f)'%(zero2[0],zero2[1]))\n ax.legend()\n plt.show()",
"def inner_PlotDistrifun():\r\n \r\n font = {'family': 'serif',\r\n 'color': 'darkred',\r\n 'weight': 'normal',\r\n 'size': 16}\r\n\r\n Nmax = 100\r\n bins = np.linspace(0, Nmax, Nmax+1)\r\n nList = np.linspace(0, Nmax, Nmax+1, dtype = int)\r\n\r\n y_location = self.spinBox_PixelY.value()\r\n x_location = self.spinBox_PixelX.value()\r\n\r\n # get pixel intensity data\r\n Array1 = self.APP_dataprocess.PixelData(y_location, x_location)\r\n Array2 = Array1\r\n g2 = G2(Array1, Array2)\r\n print(\"g2 is:\", g2)\r\n\r\n arr = []\r\n rv = poisson(self.firstOrdImaging[y_location, x_location])\r\n for num in range(0,40):\r\n arr.append(rv.pmf(num))\r\n\r\n ax = fig.add_subplot(111)\r\n\r\n try:\r\n ax.cla()\r\n #print(\"clear self.cbar !\")\r\n except:\r\n pass\r\n #print(\"fail to clear self.cbar !\")\r\n \r\n ax.hist(Array1 , bins, normed=True, label = \"Data distribution\") \r\n ax.plot(nList, BoseEinstein(self.firstOrdImaging[y_location, x_location], Nmax), label =\"BoseEinstein distribution\")\r\n ax.plot(arr, linewidth=2.0, label =\"Possion distribution\")\r\n ax.set_title(\"Pixel Position({},{}); <$I$>:{}\".format(x_location , y_location, self.firstOrdImaging[y_location, x_location]), fontdict=font)\r\n \r\n ax.text(22, .08, r\"g2:{}\".format(g2), fontdict=font)\r\n ax.legend() \r\n \r\n fig.savefig('PixelPosition({},{})PhotDist.eps'.format(x_location , y_location), format='eps', dpi=300)\r\n plt.close()",
"def draw_poly(t, n, sz):\n\n s_intern_angles = (n - 2) * 180\n for i in range(n):\n t.forward(sz)\n t.left(180 - s_intern_angles / n)\n time.sleep(5)\n turtle.Screen().clear()",
"def plot_fb8(fb8, npts):\n xs = fb8.spherical_coordinates_to_nu(*grid(npts))\n pdfs = fb8.pdf(xs)\n z,x,y = xs.T\n\n fig = plt.figure(figsize=plt.figaspect(1.))\n ax = fig.add_subplot(111, projection='3d')\n ax.plot_surface(x.reshape(npts, npts),\n y.reshape(npts, npts),\n z.reshape(npts, npts),\n alpha=0.5,\n rstride=1, cstride=1,\n facecolors=cm.gray(pdfs.reshape(npts, npts)/pdfs.max()))\n # ax.set_xticks([])\n # ax.set_yticks([])\n # ax.set_zticks([])\n ax.set_axis_off()\n ax.set_title(make_title(fb8), fontsize=12, y=0.18)\n plt.tight_layout(pad=-5)",
"def plot_multiLyapunov(systems, mode=2, savefig=True, figname=None):\n if mode == 2:\n print(systems)\n# divnorm = colors.DivergingNorm(vmin=max([np.nanmin(np.nanmax(np.nanmax(system.lyapunov_2, axis=0), axis=0)) for system in systems]), vcenter=0, vmax=max[np.nanmax(system.lyapunov_2) for system in systems])\n if figname == None:\n figname = 'sum_of_first_2_lyapunov'\n \n fig, ax = plt.subplots()\n for system in systems:\n\n lyapunov_2 = system.lyapunov_2\n x = system.x\n y = system.y\n l = system.l\n a = system.a\n\n\n\n plt.contourf(a[0,0,:,:],l[0,0,:,:],np.nanmax(np.nanmax(lyapunov_2, axis=0), axis=0), levels = 100, cmap = 'RdBu_r')\n# , norm=divnorm)\n for i in range(lyapunov_2.shape[0]):\n for j in range(lyapunov_2.shape[1]):\n plt.contour(a[0,0,:,:],l[0,0,:,:],lyapunov_2[i,j], levels = [0,], colors=('k',),alpha=0.1)\n lyap_sum = plt.contour(a[0,0,:,:],l[0,0,:,:],lyapunov_2.max(axis=0).max(axis=0), levels = [0,], colors=('blue',),alpha=1)\n\n# cbar = plt.colorbar()\n plt.plot(wild_chaos[:,0],wild_chaos[:,1],'--r',lw=3)\n plt.title('Sum of the first 2 Lyapunov exponents ')\n plt.ylabel('$\\lambda$')\n plt.xlabel('a')\n# cbar.ax.set_ylabel('Sum of the first 2 Lyapunov exponents')\n\n ax.set_ylim([l.min(),l.max()])\n ax.set_xlim([a.min(),a.max()])\n if savefig:\n plt.savefig(f'images/{figname}.pdf')\n plt.show()",
"def plot_2D_edp(self, xmin=-100, xmax=100, zmin=-100, zmax=100, N=201):\n rho_xz = []\n xgrid = np.linspace(xmin, xmax, num=N)\n zgrid = np.linspace(zmin, zmax, num=N)\n for x in xgrid:\n for z in zgrid:\n tmp = self.phase * self.F * np.cos(self.qx*x+self.qz*z)\n rho_xz.append([x, z, tmp.sum(axis=0)])\n rho_xz = np.array(rho_xz, float) \n X, Y, Z= rho_xz[:,0], rho_xz[:,1], rho_xz[:,2]\n #Y = rho_xz[:,1]\n #Z = rho_xz[:,2]\n X.shape = (N, N)\n Y.shape = (N, N)\n Z.shape = (N, N)\n plt.figure()\n plt.contourf(X, Y, Z)",
"def create_four_subplots():\n pass",
"def draw_equitriangle(t,sz):\r\n\r\n\tdraw_poly(t, 3, sz)",
"def generate_2D_N_plots(x, y, labels_dict, file_title, plot_title, no_start):\n\n fig = plt.figure()\n\n for sub_y in y:\n sub_y = sub_y[no_start]\n plt.plot(x, sub_y)\n\n if labels_dict:\n plt.xlabel(labels_dict[\"x\"])\n plt.ylabel(labels_dict[\"y\"])\n if plot_title:\n plt.title(plot_title)\n\n plt.savefig(file_title)",
"def map_plot(self, iter_no):\n \n m = self._m\n n = self._n\n plt.figure()\n label=np.zeros(m*n)\n self._trained = True\n mapped = self.map_vects(datanorm)\n mapped=tuple(map(tuple, mapped))\n c=Counter(mapped)\n \n c= sorted(c.items(), key=itemgetter(1))\n a=[m*n]\n for i in range(0,len(c)):\n x=(((c[i])[0])[0])\n y=(((c[i])[0])[1])\n z=((c[i])[1])\n plt.plot(x, y, 'ro', markersize= z/(2*m*n)) \n plt.savefig('exoplanet{}.png'.format(iter_no))\n p=plt.imread('exoplanet{}.png'.format(iter_no))\n imgs.append(p)\n plt.show()\n plt.close()\n print(c)\n self._trained = False",
"def num_to_subplots_axes(num):\n cols = int(math.ceil(math.sqrt(num)))\n rows = int(math.ceil(float(num) / cols))\n return rows, cols",
"def SwissRollWithConstrain(nei = [5,25,50]):\n n_samples = 4000\n n_neighbor = 60\n noise = 0\n X, _ = make_swiss_roll(n_samples, noise=noise, random_state=42)\n X = X*2 #scaling ths Swiss\n\n neigh = NearestNeighbors(n_neighbors=n_neighbor).fit(X)\n _, indxes = neigh.kneighbors(X)\n\n SwissConstrain = np.delete(X,indxes[1500,:], axis=0)\n SwissConstrainNoisy = SwissConstrain + np.random.normal(0,1,[n_samples-n_neighbor,3])\n\n elevation = 10\n azimoth = 60\n fig = plt.figure(figsize=(21,7))\n ax1 = fig.add_subplot(131, projection='3d')\n ax1.set_zlim(-30, 30)\n ax1.scatter(X[:, 0], X[:, 1], X[:, 2], c=np.linalg.norm((X[:, 0], X[:, 1]), axis=0))\n ax1.set_title('Swiss Roll')\n ax1.view_init(elev=elevation, azim=azimoth)\n ax1 = fig.add_subplot(132, projection='3d')\n ax1.set_zlim(-30, 30)\n ax1.scatter(SwissConstrain[:, 0], SwissConstrain[:, 1], SwissConstrain[:, 2],\n c=np.linalg.norm((SwissConstrain[:, 0], SwissConstrain[:, 1]), axis=0))\n ax1.set_title('Swiss Roll with constrain')\n ax1.view_init(elev=elevation, azim=azimoth)\n ax1 = fig.add_subplot(133, projection='3d')\n ax1.set_zlim(-30, 30)\n ax1.scatter(SwissConstrainNoisy[:, 0], SwissConstrainNoisy[:, 1], SwissConstrainNoisy[:, 2],\n c=np.linalg.norm((SwissConstrainNoisy[:, 0], SwissConstrainNoisy[:, 1]), axis=0))\n ax1.set_title('Noisy Swiss Roll with constrain')\n ax1.view_init(elev=elevation, azim=azimoth)\n plt.savefig('Swiss Roll with different petubations')\n\n DataToPlot = [X,SwissConstrain,SwissConstrainNoisy]\n DataName = ['Swiss ISOMAP','Swiss with constrain ISOMAP', 'Swiss with constrain and noise ISOMAP']\n\n # Ploting Swiss Isomapping\n for neighbors in nei:\n fig = plt.figure(figsize=(30, 10))\n for i, j in enumerate(DataToPlot):\n Swiss_isomap = Isomap(j, 2, neighbors)\n method = DataName[i]\n ax = fig.add_subplot(1, len(DataToPlot), i + 1)\n ax.scatter(Swiss_isomap[:, 0], Swiss_isomap[:, 1],\n c=np.linalg.norm((Swiss_isomap[:, 0], Swiss_isomap[:, 1]), axis=0), cmap=plt.cm.Spectral)\n ax.set_title('{} with {} Neighbours'.format(method, neighbors))\n # making_plot(Swiss_isomap, pallete=Swiss_isomap[:, 0:1], neighbors=neighbors, method=method) #An option to plot single graphs\n plt.savefig('Swiss ISOMAP embbeding for {} neighbour'.format(neighbors))\n\n DataName = ['Swiss LLE', 'Swiss with constrain LLE', 'Swiss with constrain and noise LLE']\n # Ploting Swiss LLE\n for neighbors in nei:\n fig = plt.figure(figsize=(30, 10))\n for i, j in enumerate(DataToPlot):\n Swiss_LLE = LLE(j, 2, neighbors)\n method = DataName[i]\n ax = fig.add_subplot(1, len(DataToPlot), i + 1)\n ax.scatter(Swiss_LLE[:, 0], Swiss_LLE[:, 1],\n c=np.linalg.norm((Swiss_LLE[:, 0], Swiss_LLE[:, 1]), axis=0), cmap=plt.cm.Spectral)\n ax.set_title('{} with {} Neighbours'.format(method, neighbors))\n # making_plot(Swiss_LLE, pallete=Swiss_LLE[:, 0:1], neighbors=neighbors, method=method) #An option to plot single graphs\n plt.savefig('Swiss LLE embbeding for {} neighbour'.format(neighbors))\n return",
"def plot(n, p, psi):\n # plt.plot(psi_inf(n) ** 2, label=\"analytic\")\n c1 = \"black\"\n fig, ax1 = plt.subplots()\n ax1.plot(psi[n - 1] ** 2, label=r\"$n$ = %d\" % n, color=c1)\n ax1.set_xlabel(r\"$i$\")\n ax1.set_ylabel(r\"$|\\psi(x)|^2$\", color=c1)\n for t in ax1.get_yticklabels():\n t.set_color(c1)\n\n ax2 = ax1.twinx()\n c2 = \"#5b07ed\"\n pot = np.array([potential(i, p) for i in range(N)])\n ax2.plot(pot, label=\"potential\", color=c2, linewidth=4)\n ax2.set_ylabel(\"potential\", color=c2)\n for t in ax2.get_yticklabels():\n t.set_color(c2)\n\n ncols = 1 if n > 2 else 2\n # ask matplotlib for the plotted objects and their labels, from http://stackoverflow.com/a/10129461\n lines, labels = ax1.get_legend_handles_labels()\n lines2, labels2 = ax2.get_legend_handles_labels()\n ax2.legend(lines + lines2, labels + labels2, loc=\"upper center\", ncol=ncols)\n\n ylim = {1: 0.037, 2: 0.027}\n if n in ylim:\n ax1.set_ylim([0, ylim[n]])\n\n plt.title(r\"Time-independent Schrödinger: $n = %d$\" % n)\n plt.show()\n # plt.savefig(\"%s_%d\" % (p, n))\n plt.close()",
"def plot_sieve(n, x, poly={}, lin={}, label=True, shade=True):\n v = list(range(x + 1)) # integers 0, 1, ..., x\n if n == 0:\n v = prime_range(x)\n else:\n for p in prime_divisors(n):\n v = [k for k in v if k % p != 0 or k == p]\n # eliminate non-prime multiples of p\n v = set(v)\n j = 0\n w = [(0, j)]\n for i in range(1, x + 1):\n w.append((i, j))\n if i in v:\n j += 1\n w.append((i, j))\n w.append((i, 0))\n w.append((0, 0))\n if n == 0:\n t = \"Primes\"\n pos = x, .7 * j\n elif n == 1:\n t = \"All Numbers\"\n pos = x, 1.03 * j\n else:\n P = prime_divisors(n)\n if len(P) == 1:\n t = \"Sieve by %s\" % P[0]\n else:\n t = \"Sieve by %s\" % (', '.join([str(_) for _ in P]))\n pos = x, 1.05 * j\n F = line(w[:-2], **lin)\n if shade:\n F += polygon(w, **poly)\n if label:\n F += text(t, pos, horizontal_alignment=\"right\", rgbcolor='black')\n return F",
"def plot_2nd(self, mod = 'F'):\n if not mpl: raise \"Problem with matplotib: Plotting not possible.\"\n f = plt.figure(figsize=(5,4), dpi=100)\n \n A2 = []\n \n strainList= self.__structures.items()[0][1].strainList\n \n if len(strainList)<=5:\n kk=1\n ll=len(strainList)\n grid=[ll]\n elif len(strainList)%5 == 0:\n kk=len(strainList)/5\n ll=5\n grid=[5 for i in range(kk)]\n else:\n kk=len(strainList)/5+1\n ll=5\n grid=[5 for i in range(kk)]\n grid[-1]=len(strainList)%5\n \n \n n=1\n m=1\n for stype in strainList:\n atoms = self.get_atomsByStraintype(stype)\n self.__V0 = atoms[0].V0\n strainList = atoms[0].strainList\n if self.__thermodyn and mod == 'F':\n energy = [i.gsenergy+i.phenergy[-1] for i in atoms]\n elif self.__thermodyn and mod=='E0':\n energy = [i.gsenergy for i in atoms]\n elif self.__thermodyn and mod=='Fvib':\n energy = [i.phenergy[-1] for i in atoms]\n else:\n energy = [i.gsenergy for i in atoms]\n \n strain = [i.eta for i in atoms]\n \n spl = '1'+str(len(strainList))+str(n)\n #plt.subplot(int(spl))\n #a = f.add_subplot(int(spl))\n if (n-1)%5==0: m=0\n \n \n a = plt.subplot2grid((kk,ll), ((n-1)/5,m), colspan=1)\n #print (kk,ll), ((n-1)/5,m)\n j = 0\n for i in [2,4,6]:\n ans = Energy()\n ans.energy = energy\n ans.strain = strain\n ans.V0 = self.__V0\n \n fitorder = i\n ans.set_2nd(fitorder)\n A2.append(ans.get_2nd())\n \n strains = sorted(map(float,A2[j+3*(n-1)].keys()))\n \n try:\n dE = [A2[j+3*(n-1)][str(s)] for s in strains]\n except:\n continue\n a.plot(strains, dE, label=str(fitorder))\n a.set_title(stype)\n a.set_xlabel('strain')\n a.set_ylabel(r'$\\frac{d^2E}{d\\epsilon^2}$ in eV')\n \n j+=1\n \n n+=1\n m+=1\n \n a.legend(title='Order of fit')\n return f",
"def mapFeaturePlot(x1, x2, degree):\n out = np.ones(1)\n for i in range(1, degree+1):\n for j in range(i+1):\n terms = (x1**(i-j)) * (x2**j)\n out = np.hstack((out, terms))\n\n return out",
"def xz_combined(Feaff, Fe, Fi, muVn, length, x, z):\n\n fig, ax = plt.subplots(figsize=(8, 6))\n\n plt.title(\n '$\\\\nu_e^{aff}$, $\\\\nu_e$, $\\\\nu_i$ and $\\\\mu_V^{N}$ for pixel (' + str(x) + ', ' + str(z) + ')')\n axes = [ax, ax.twinx(), ax.twinx(), ax.twinx()]\n\n fig.subplots_adjust(right=0.75)\n axes[-2].spines['right'].set_position(('axes', 1.3))\n axes[-2].set_frame_on(True)\n axes[-2].patch.set_visible(False)\n axes[-1].spines['right'].set_position(('axes', 1.6))\n axes[-1].set_frame_on(True)\n axes[-1].patch.set_visible(False)\n\n axes[0].plot(Feaff[:length, x, z], color='Blue')\n axes[0].set_ylabel('$\\\\nu_e^{aff}(x, t)$', color='Blue')\n axes[0].set_xlabel('t (ms)')\n axes[0].tick_params(axis='y', colors='Blue')\n\n axes[1].plot(Fe[:length, x, z], color='Orange')\n axes[1].set_ylabel('$\\\\nu_e(x, t)$', color='Orange')\n axes[1].set_xlabel('t (ms)')\n axes[1].tick_params(axis='y', colors='Orange')\n\n axes[2].plot(Fi[:length, x, z], color='Green')\n axes[2].set_ylabel('$\\\\nu_i(x, t)$', color='Green')\n axes[2].set_xlabel('t (ms)')\n axes[2].tick_params(axis='y', colors='Green')\n\n axes[3].plot(muVn[:length, x, z], color='Red')\n axes[3].set_ylabel('$\\\\mu_V^{N}(x, t)$', color='Red')\n axes[3].set_xlabel('t (ms)')\n axes[3].tick_params(axis='y', colors='Red')\n\n plt.setp(\n ax,\n xticks=np.linspace(0, length, 5),\n xticklabels=np.linspace(0, length/2, 5, dtype=int)\n )\n fig.tight_layout()\n plt.show()\n\n return"
]
| [
"0.5828909",
"0.5704472",
"0.56556016",
"0.550975",
"0.5487325",
"0.5477178",
"0.5431211",
"0.540554",
"0.52885914",
"0.5253599",
"0.5251076",
"0.52264196",
"0.5221086",
"0.5194837",
"0.5173083",
"0.5154963",
"0.5153161",
"0.515265",
"0.5121226",
"0.51142865",
"0.5109068",
"0.50938195",
"0.5062479",
"0.50581336",
"0.504263",
"0.5025334",
"0.5013392",
"0.4991504",
"0.4984121",
"0.49757865"
]
| 0.60281724 | 0 |
Create a dummy Supervisor structure and start a global patch. | def setUp(self):
from supvisors.plugin import expand_faults
self.supervisor = DummySupervisor()
# add a global patch
self.supvisors_patcher = patch('supvisors.rpcinterface.Supvisors')
self.mocked_supvisors = self.supvisors_patcher.start()
self.mocked_supvisors.return_value = self.supervisor.supvisors
# add fault codes to Supervisor
expand_faults() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def supervisor_start():\n log('start supervisor', green)\n sudo('/etc/init.d/supervisor start')",
"def start():\n if env.latest:\n if env.python3:\n sudo('/bin/systemctl start demo-latest-py3', shell=False)\n else:\n sudo('/bin/systemctl start demo-latest.service', shell=False)\n else:\n with cd(env.directory):\n sudo('./bin/supervisorctl start zeoserver', user=env.deploy_user)\n sudo(\"sleep 2\")\n sudo('./bin/supervisorctl start zeoclient1', user=env.deploy_user)\n sudo(\"sleep 2\")\n sudo('./bin/supervisorctl start zeoclient2', user=env.deploy_user)",
"def start_stubs(_scenario):\r\n for name, service in SERVICES.iteritems():\r\n fake_server = service['class'](port_num=service['port'])\r\n setattr(world, name, fake_server)",
"def startFactory(self):\n self.watchdog.start()\n super().startFactory()",
"def setUp(self):\n self.supvisors = DummySupvisors()",
"def startFluidinfo():\n sudo('start fluidinfo-api')\n sudo('/etc/init.d/haproxy start')\n sudo('/etc/init.d/nginx start')",
"def setUp(self):\r\n self.reactor = DummyProcessReactor()\r\n self.pm = ProcessMonitor(reactor=self.reactor)\r\n self.pm.minRestartDelay = 2\r\n self.pm.maxRestartDelay = 10\r\n self.pm.threshold = 10",
"def init():\n\n banner(\"init\")\n with show(\"output\"):\n if not env.get('no_apt_update'):\n sudo('apt-get update')\n\n require.directory(env.path, mode=\"777\", use_sudo=True)\n require.directory('/var/run/%s' % env.project_name, owner='www-data', group='www-data', mode='770', use_sudo=True)\n require.directory('/var/log/%s' % env.project_name, owner='www-data', group='www-data', mode='770', use_sudo=True)\n require.directory('/var/log/supervisord/', owner='www-data', group='www-data', mode='770', use_sudo=True)\n require.directory('/var/run/supervisord/', owner='www-data', group='www-data', mode='770', use_sudo=True)\n\n require.deb.packages([\n 'gcc', 'python-all-dev', 'libpq-dev', 'libjpeg-dev', 'libxml2-dev', 'libxslt1-dev', 'libmysqlclient-dev',\n 'libfreetype6-dev', 'libevent-dev', 'supervisor'\n ])\n require.python.pip(version=\"1.0\")\n\n new_virtualenv()\n\n me = run('whoami')\n sudo('adduser %s www-data' % me)\n\n install_nginx()\n\n if env.mysql:\n require.mysql.server(password=env.mysql_password)\n with settings(mysql_user='root', mysql_password=env.mysql_password):\n require.mysql.user(env.mysql_username, env.mysql_password)\n require.mysql.database(env.mysql_dbname, owner=env.mysql_username)",
"def test_base(self):\n self.render_config_template(\n )\n\n proc = self.start_beat()\n self.wait_until(lambda: self.log_contains(\"mockbeat start running.\"))\n proc.check_kill_and_wait()\n assert self.log_contains(\"mockbeat stopped.\")",
"def init():\n\n @click.group(cls=cli.make_commands(__name__))\n def run():\n \"\"\"Cross-cell supervision tools.\"\"\"\n cli.init_logger('daemon.conf')\n\n return run",
"def setUp(self):\n self.p = Process(target = start_server)\n self.p.start()\n time.sleep(0.5)",
"def _startup():\n from octoprint_dashboard.model import User, Config\n if Config.query.scalar() is None:\n print(\"No config, add config via command 'python -m flask config'\")\n shutdown_server()\n if User.query.filter_by(superadmin=True).count() == 0:\n print(\"No superadmin, add superadmin via command 'python -m flask add_superadmin <username>'\")\n shutdown_server()\n\n scheduler.start() # starts background task scheduler\n zeroconf_browser.start() # starts MDNS service discovery",
"def handle(self):\n spawner = Spawner()\n\n self.handle_daemon('spawner', spawner)",
"def _start_dummy_server(self):\r\n dummy_executable = os.path.join(__here__, 'tests', 'dummy_xsct.tcl')\r\n start_command = 'tclsh {}'.format(dummy_executable)\r\n logger.info('Starting xsct server: %s', start_command)\r\n stdout = None\r\n self._xsct_server = subprocess.Popen(start_command, stdout=stdout)\r\n logger.info('xsct started with PID: %d', self._xsct_server.pid)",
"def test_creation(self):\n from supvisors.rpcinterface import RPCInterface\n rpc = RPCInterface(self.supervisor)\n self.assertListEqual([call(self.supervisor)], self.mocked_supvisors.call_args_list)\n self.assertIsInstance(rpc.supvisors, MockedSupvisors)",
"def start_test_instance(test_name=None):\n env.warn_only = True\n if test_name is not None:\n instances = [test_name]\n else:\n output = run('ls -1 %s' % env.site_root)\n instances = [x.strip() for x in output.split(\"\\n\")]\n for item in instances:\n sudo(\"start %s\" % item.strip())",
"def main():\n run_nutanix_vm_creation_module()",
"def setUp(self):\n self.wes_server_process = subprocess.Popen(\n 'python {}'.format(os.path.abspath('wes_service/wes_service_main.py')),\n shell=True)\n time.sleep(5)",
"def test_make_server_spawn(self):\n for backend in ['gevent', 'fastgevent', 'geventwebsocket', 'socketio']:\n self.tt = minimock.TraceTracker()\n self._check_make_server_spawn(backend)\n minimock.restore()",
"def test_start_process(self, mocked_check):\n from supvisors.rpcinterface import RPCInterface\n # get patches\n mocked_start = self.supervisor.supvisors.starter.start_process\n mocked_progress = self.supervisor.supvisors.starter.in_progress\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # patch the instance\n rpc._get_application_process = Mock()\n # test RPC call with unknown strategy\n with self.assertRaises(RPCError) as exc:\n rpc.start_process('strategy', 'appli:proc')\n self.assertEqual(Faults.BAD_STRATEGY, exc.exception.code)\n self.assertEqual('BAD_STRATEGY: strategy', exc.exception.text)\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual(0, mocked_start.call_count)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n # test RPC call with running process\n rpc._get_application_process.return_value = (\n None, Mock(**{'running.return_value': True,\n 'namespec.return_value': 'proc1'}))\n with self.assertRaises(RPCError) as exc:\n rpc.start_process(0, 'appli_1')\n self.assertEqual(Faults.ALREADY_STARTED, exc.exception.code)\n self.assertEqual('ALREADY_STARTED: proc1', exc.exception.text)\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual(0, mocked_start.call_count)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n # test RPC call with running processes\n rpc._get_application_process.return_value = (\n Mock(**{'processes.values.return_value': [\n Mock(**{'running.return_value': False}),\n Mock(**{'running.return_value': True,\n 'namespec.return_value': 'proc2'})]}), None)\n with self.assertRaises(RPCError) as exc:\n rpc.start_process(0, 'appli_1')\n self.assertEqual(Faults.ALREADY_STARTED, exc.exception.code)\n self.assertEqual('ALREADY_STARTED: proc2', exc.exception.text)\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual(0, mocked_start.call_count)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n # test RPC call with stopped processes\n proc_1 = Mock(**{'running.return_value': False,\n 'stopped.return_value': True,\n 'namespec.return_value': 'proc1'})\n proc_2 = Mock(**{'running.return_value': False,\n 'stopped.return_value': False,\n 'namespec.return_value': 'proc2'})\n rpc._get_application_process.return_value = (\n Mock(**{'processes.values.return_value': [\n proc_1, proc_2]}), None)\n # test RPC call with no wait and not done\n mocked_start.return_value = False\n result = rpc.start_process(1, 'appli:*', 'argument list', False)\n self.assertTrue(result)\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call(1, proc_1, 'argument list'),\n call(1, proc_2, 'argument list')], mocked_start.call_args_list)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n mocked_start.reset_mock()\n # test RPC call no wait and done\n mocked_start.return_value = True\n result = rpc.start_process(1, 'appli:*', 'argument list', False)\n self.assertTrue(result)\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call(1, proc_1, 'argument list'),\n call(1, proc_2, 'argument list')], mocked_start.call_args_list)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n mocked_start.reset_mock()\n # test RPC call with wait and done\n result = rpc.start_process(2, 'appli:*', wait=True)\n self.assertTrue(result)\n self.assertEqual([call(2, proc_1, ''), call(2, proc_2, '')],\n mocked_start.call_args_list)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n mocked_start.reset_mock()\n # test RPC call with wait and not done\n mocked_start.return_value = False\n deferred = rpc.start_process(2, 'appli:*', wait=True)\n # result is a function for deferred result\n self.assertTrue(callable(deferred))\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call(2, proc_1, ''), call(2, proc_2, '')],\n mocked_start.call_args_list)\n self.assertEqual(0, mocked_progress.call_count)\n # test returned function: return True when job in progress\n mocked_progress.return_value = True\n self.assertEqual(NOT_DONE_YET, deferred())\n self.assertEqual([call()], mocked_progress.call_args_list)\n mocked_progress.reset_mock()\n # test returned function: raise exception if job not in progress anymore\n # and process still stopped\n mocked_progress.return_value = False\n with self.assertRaises(RPCError) as exc:\n deferred()\n self.assertEqual(Faults.ABNORMAL_TERMINATION, exc.exception.code)\n self.assertEqual('ABNORMAL_TERMINATION: proc1', exc.exception.text)\n self.assertEqual([call()], mocked_progress.call_args_list)\n mocked_progress.reset_mock()\n # test returned function: return True if job not in progress anymore\n # and process running\n proc_1.stopped.return_value = False\n self.assertTrue(deferred())\n self.assertEqual([call()], mocked_progress.call_args_list)",
"def start():\n with cd(env.directory):\n sudo('./bin/supervisorctl start all', user=env.deploy_user)",
"def start_fixture(self):\n pass",
"def initialize(self, create_new=True, sysid=\"\"):",
"def __init__(self, name=None, start=True, *args, **kwargs):\n name = \"VM_TEMPL_1\" if name is None else name\n super(MgmtVM, self).__init__(name=name, start=start, *args, **kwargs)\n\n #self.add_proc(rift.vcs.MsgBrokerTasklet())\n self.add_proc(rift.vcs.DtsRouterTasklet())\n self.add_proc(rift.vcs.DtsPerfTasklet())\n #self.add_proc(rift.vcs.LogdTasklet())\n\n self.add_proc(rift.vcs.procs.RiftCli());\n\n #Confd would need RestConf present\n self.add_proc(rift.vcs.uAgentTasklet())\n #self.add_proc(rift.vcs.Confd())\n self.add_proc(rift.vcs.RestconfTasklet())\n self.add_proc(rift.vcs.Watchdog())\n self.add_proc(RedisServer())\n\n #self.add_proc(rift.vcs.Webserver())\n #self.add_proc(rift.vcs.RedisCluster())",
"def _start(self):\n\n super(PySwitchLibApiDaemonRunner, self)._start()",
"def startService(self):\n super(SpawnerService, self).startService()\n for spawn in self.pendingSpawns:\n self.spawn(*spawn)\n self.pendingSpawns = []",
"def start():\n\n start_server()",
"def __init__(self, name=None, start=True, *args, **kwargs):\n name = \"VM_TEMPL_2\" if name is None else name\n super(CliVM, self).__init__(name=name, start=start, *args, **kwargs)\n\n self.add_proc(rift.vcs.DtsPerfTasklet(), mode_active=False)\n self.add_proc(RedisServer(), mode_active=False) \n if not start:\n self.add_tasklet(rift.vcs.uAgentTasklet(), mode_active=False)",
"def main():\r\n service_name, port_num, config_dict = get_args()\r\n print \"Starting stub service '{0}' on port {1}...\".format(service_name, port_num)\r\n\r\n server = SERVICES[service_name](port_num=port_num)\r\n server.config.update(config_dict)\r\n\r\n try:\r\n while True:\r\n time.sleep(1)\r\n\r\n except KeyboardInterrupt:\r\n print \"Stopping stub service...\"\r\n\r\n finally:\r\n server.shutdown()",
"def _start(self):\n pass"
]
| [
"0.60909",
"0.60832024",
"0.59310883",
"0.5775345",
"0.57694423",
"0.5722168",
"0.5652203",
"0.5642636",
"0.56172496",
"0.56158453",
"0.55636376",
"0.555367",
"0.55494773",
"0.5527663",
"0.55231017",
"0.55197024",
"0.551933",
"0.55121124",
"0.55111945",
"0.5498793",
"0.54974705",
"0.5483706",
"0.5482825",
"0.5455995",
"0.54538774",
"0.5448355",
"0.544105",
"0.5428299",
"0.5426142",
"0.54090124"
]
| 0.69613576 | 0 |
Test the get_api_version RPC. | def test_api_version(self):
from supvisors.rpcinterface import API_VERSION, RPCInterface
# create RPC instance
rpc = RPCInterface(self.supervisor)
self.assertEqual(API_VERSION, rpc.get_api_version()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_oapi_version(self):\n pass",
"def test_GetVersion(self):\n ret = wrap_xmlrpc_call(\n self.am_client.GetVersion, [], {}, settings.TIMEOUT)\n self.assertEqual(ret['geni_api'], 1)",
"def test_get_version(self):\n pass",
"def test_api_versioning(self):\n response = self.request_knox(\n self.url,\n media_type=views_api.CORE_API_MEDIA_TYPE,\n version=views_api.CORE_API_DEFAULT_VERSION,\n )\n self.assertEqual(response.status_code, 200)",
"def test_server_details_ok(self):\n response = self.call_api('server_details', {}, 200).json\n self.assertEqual(utils.get_app_version(), response['server_version'])",
"def test_get_version(mocker):\n client = wsgi.application.test_client(mocker)\n\n url = '/api/v0/version'\n\n response = client.get(url)\n\n output = {\n \"message\": f\"AIOPS Publisher Version {wsgi.VERSION}\",\n \"status\": \"OK\",\n \"version\": wsgi.VERSION\n }\n assert response.get_json() == output\n assert response.status_code == 200",
"def test_request_estable_version(self):\n current_stable_version = get_stable_version()\n self.assertIsNotNone(current_stable_version)",
"def _get_api_version(self):\n with self.nb_session.get(\n self.nb_api_url, timeout=10,\n verify=(not settings.NB_INSECURE_TLS)) as resp:\n result = float(resp.headers[\"API-Version\"])\n log.info(\"Detected NetBox API v%s.\", result)\n return result",
"def test_api_version(self, method):\n self.client = trovebox.Trovebox(host=self.test_host, **self.test_oauth)\n self.client.configure(api_version=1)\n self._register_uri(method,\n uri=\"http://%s/v1/%s\" % (self.test_host,\n self.test_endpoint))\n GetOrPost(self.client, method).call(self.test_endpoint)",
"def query_api_version(self):\n version_resp = self._session.get('/api/version',\n logon_required=False)\n self._api_version = version_resp\n return self._api_version",
"def testGetVersion(self):\n helper = pylint.PylintHelper()\n\n helper._GetVersion()",
"def test_api_version(\n decoy: Decoy, subject: ProtocolCore, api_version: APIVersion\n) -> None:\n assert subject.api_version == api_version",
"def test_gets_to_version_page(self):\n\n response = self.client.get('/version')\n\n self.assertEqual(response.status_code, 200)",
"def get_version(self):\n return self.__make_api_call('get/version')",
"def test_check_version(mock_send_message):\n A1sim.check_version(BASE_URL)\n mock_send_message.assert_called_once_with('GET',\n 'Get ric version',\n (f\"{BASE_URL}/counter/interface\"))",
"def test_show_version():\n result = runner.invoke(app, [\"--version\"])\n assert result.exit_code == 0\n assert \"Confluence poster version\" in result.stdout",
"def test_get_version():\n result = uflash.get_version()\n assert result == '.'.join([str(i) for i in uflash._VERSION])",
"def test_set_api_version_valid(self):\n args = {'major': '20', 'minor': 1}\n\n expected_call_args_list = [mock.call('20'), mock.call(1)]\n\n mock_invoke = self.mock_object(six, 'text_type', return_value='str')\n self.root.set_api_version(**args)\n\n self.assertEqual(expected_call_args_list, mock_invoke.call_args_list)",
"def get_api_version(self):\n return self.connector.request('GET', '/app/webapiVersion')",
"def get_version(self):\n\t\treturn call_sdk_function('PrlApi_GetVersion')",
"def test_main_version(\n app_tester: ApplicationTester, valiant_app_title: str, valiant_version: str\n) -> None:\n app_tester.execute(\"--version\")\n expected = f\"{valiant_app_title} version {valiant_version}\\n\"\n assert expected == app_tester.io.fetch_output()",
"def version_get():\n try:\n return json_response.success({'version': version.local_version()})\n except version.Error as e:\n return json_response.error(str(e)), 200",
"def testSimpleEchoMethodReturnsVersion(self):\n body = dumps({'id': 100, 'jsonrpc': '2.0', 'method': 'pass',\n 'params': [39, 'steps']})\n headers = Headers({'Content-Length': [str(len(body))],\n 'Content-Type': ['application/json']})\n request = FakeRequest(headers=headers, body=body)\n resource = TestResource(None, None)\n result = yield resource.deferred_render_POST(request)\n response = loads(result)\n self.assertEqual('2.0', response['jsonrpc'])",
"def test_version_auto_ok(self, m_get, k8sconfig):\n\n # This is a genuine K8s response from Minikube.\n response = {\n 'major': '1', 'minor': '10',\n 'gitVersion': 'v1.10.0',\n 'gitCommit': 'fc32d2f3698e36b93322a3465f63a14e9f0eaead',\n 'gitTreeState': 'clean',\n 'buildDate': '2018-03-26T16:44:10Z',\n 'goVersion': 'go1.9.3',\n 'compiler': 'gc', 'platform': 'linux/amd64'\n }\n m_get.return_value = (response, None)\n\n # Create vanilla `Config` instance.\n m_client = mock.MagicMock()\n k8sconfig = k8sconfig._replace(client=m_client)\n\n # Test function must contact the K8s API and return a `Config` tuple\n # with the correct version number.\n config2, err = k8s.version(k8sconfig)\n assert err is False\n assert isinstance(config2, K8sConfig)\n assert config2.version == \"1.10\"\n\n # Test function must have called out to `get` to retrieve the\n # version. Here we ensure it called the correct URL.\n m_get.assert_called_once_with(m_client, f\"{k8sconfig.url}/version\")\n assert not m_client.called\n\n # The return `Config` tuple must be identical to the input except for\n # the version number because \"k8s.version\" will have overwritten it.\n assert k8sconfig._replace(version=None) == config2._replace(version=None)\n del config2, err\n\n # Repeat the test for a Google idiosyncracy which likes to report the\n # minor version as eg \"11+\".\n response[\"minor\"] = \"11+\"\n m_get.return_value = (response, None)\n config, err = k8s.version(k8sconfig)\n assert config.version == \"1.11\"",
"def test_api_versioning_invalid_version(self):\n response = self.request_knox(\n self.url,\n media_type=views_api.CORE_API_MEDIA_TYPE,\n version=CORE_API_VERSION_INVALID,\n )\n self.assertEqual(response.status_code, 406)",
"def api_version() -> APIVersion:\n return MAX_SUPPORTED_VERSION",
"def get_api_version(self):\n major, minor, patch = self.client.config['api_version']\n return '%s.%s.%s' % (major, minor, patch)",
"def test_version(self):\n pass",
"def get_api_version(session: \"Session\") -> str:\n component_versions = get_component_versions(session)\n return str(component_versions.get(CoordConsts.KEY_API_VERSION, \"2.0.0\"))",
"def GetVersion(self):\n return self._SendRequest(HTTP_GET, \"/version\", None, None)"
]
| [
"0.8000525",
"0.7959162",
"0.7700109",
"0.7642845",
"0.7372035",
"0.7357288",
"0.7071142",
"0.702428",
"0.6985507",
"0.69261676",
"0.6923056",
"0.6875212",
"0.68481594",
"0.68181723",
"0.680943",
"0.6766287",
"0.6759909",
"0.6751502",
"0.6746091",
"0.6720698",
"0.6720222",
"0.67056",
"0.6691171",
"0.6685896",
"0.6675565",
"0.6648025",
"0.66277725",
"0.6600183",
"0.65816003",
"0.65615916"
]
| 0.7964401 | 1 |
Test the get_supvisors_state RPC. | def test_supvisors_state(self):
from supvisors.rpcinterface import RPCInterface
# prepare context
self.supervisor.supvisors.fsm.serial.return_value = 'RUNNING'
# create RPC instance
rpc = RPCInterface(self.supervisor)
self.assertEqual('RUNNING', rpc.get_supvisors_state()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_check_state(self):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n self.supervisor.supvisors.fsm.state = 1\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test there is no exception when internal state is in list\n rpc._check_state([0, 1, 2])\n # test there is an exception when internal state is not in list\n with self.assertRaises(RPCError) as exc:\n rpc._check_state([0, 2])\n self.assertEqual(Faults.BAD_SUPVISORS_STATE, exc.exception.code)\n self.assertEqual(\"BAD_SUPVISORS_STATE: Supvisors (state=DEPLOYMENT) \"\n \"not in state ['INITIALIZATION', 'OPERATION'] to perform request\",\n exc.exception.text)",
"def test_check_operating(self):\n from supvisors.rpcinterface import RPCInterface\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test the call to _check_state\n with patch.object(rpc, '_check_state') as mocked_check:\n rpc._check_operating()\n self.assertListEqual([call([2])], mocked_check.call_args_list)",
"def test_creation(self):\n from supvisors.rpcinterface import RPCInterface\n rpc = RPCInterface(self.supervisor)\n self.assertListEqual([call(self.supervisor)], self.mocked_supvisors.call_args_list)\n self.assertIsInstance(rpc.supvisors, MockedSupvisors)",
"def test_check_from_deployment(self):\n from supvisors.rpcinterface import RPCInterface\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test the call to _check_state\n with patch.object(rpc, '_check_state') as mocked_check:\n rpc._check_from_deployment()\n self.assertListEqual([call([1, 2, 3, 4, 5])], mocked_check.call_args_list)",
"def test_get_instance_state(self):\r\n self.peer_grading.get_instance_state()",
"def test_check_operating_conciliation(self):\n from supvisors.rpcinterface import RPCInterface\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test the call to _check_state\n with patch.object(rpc, '_check_state') as mocked_check:\n rpc._check_operating_conciliation()\n self.assertListEqual([call([2, 3])], mocked_check.call_args_list)",
"def test_check_conciliation(self):\n from supvisors.rpcinterface import RPCInterface\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test the call to _check_state\n with patch.object(rpc, '_check_state') as mocked_check:\n rpc._check_conciliation()\n self.assertListEqual([call([3])], mocked_check.call_args_list)",
"def test_service_initiated():\n assert \"ready\" in bkt_outcome_unwind.index()",
"def testStateRequest(self):\n pkt = struct.pack('<')\n self.mgr.sendState = Mock()\n self.mgr.handlePacket(app_packet.GOPRO_REQUEST_STATE, pkt)\n self.mgr.sendState.assert_called_with()",
"def test_get_state_list(self):\n\n mdp = MDP(5)\n state_list = mdp.get_state_list()\n\n self.assertEqual(len(state_list), 5)\n self.assertIn(mdp.get_state(0), state_list)",
"async def test_voip_sensor_states(hass):\n\n await setup_platform(hass, [SENSOR_DOMAIN], usage=MOCK_VOIP_USAGE)\n\n assert hass.states.get(\"sensor.mobile_national_calls\").state == \"1\"\n assert hass.states.get(\"sensor.mobile_sms_sent\").state == STATE_UNKNOWN\n assert hass.states.get(\"sensor.mobile_data_used\").state == STATE_UNKNOWN",
"def test_shutdown(self, mocked_check):\n from supvisors.rpcinterface import RPCInterface\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test RPC call\n self.assertTrue(rpc.shutdown())\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call()],\n self.supervisor.supvisors.fsm.on_shutdown.call_args_list)",
"def test_api_version(self):\n from supvisors.rpcinterface import API_VERSION, RPCInterface\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n self.assertEqual(API_VERSION, rpc.get_api_version())",
"def test_device_states_get(self):\n pass",
"def test_get_status(self):\n pass",
"def test_get_status(self):\n pass",
"def test_getbinarystate(\n fauxmo_server: pytest.fixture, simplehttpplugin_target: pytest.fixture\n) -> None:\n data = b'Soapaction: \"urn:Belkin:service:basicevent:1#GetBinaryState\"'\n\n resp = requests.post(\n \"http://127.0.0.1:12345/upnp/control/basicevent1\", data=data\n )\n assert resp.status_code == 200\n\n root = ET.fromstring(resp.text)\n val = root.find(\".//BinaryState\").text\n assert val in [\"0\", \"1\"]",
"def test_get_state(self):\n\n # test that you can get a state by numerical id\n mdp = MDP()\n mdp.add_state(0)\n self.assertEquals(type(mdp.get_state(0)), State)\n self.assertIn(mdp.get_state(0), mdp.get_state_list())",
"def get_status(self, state):\n raise NotImplementedError",
"def test_get_node_state(self):\n pass",
"def test_get_page_state(self):\n response = self.client.open(\n '/rui_support/page-state/{tempIdentifier}'.format(tempIdentifier='tempIdentifier_example'),\n method='GET',\n content_type='application/ld+json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"async def test_states_view_filters(\n hass: HomeAssistant, mock_api_client: TestClient, hass_admin_user: MockUser\n) -> None:\n hass_admin_user.mock_policy({\"entities\": {\"entity_ids\": {\"test.entity\": True}}})\n hass.states.async_set(\"test.entity\", \"hello\")\n hass.states.async_set(\"test.not_visible_entity\", \"invisible\")\n resp = await mock_api_client.get(const.URL_API_STATES)\n assert resp.status == HTTPStatus.OK\n json = await resp.json()\n assert len(json) == 1\n assert json[0][\"entity_id\"] == \"test.entity\"",
"def the_user_should_be_able_to_get_the_state_of_the_connected_device():\n assert web_app.get_state()",
"def test_case16(self):\n\n result = self.graph1.supervisorExists(\"supervisor1\")\n\n self.assertTrue(result)",
"def _get_state(self):",
"def test_verify_state_of_a_device():",
"def test_get_state(self):\n self.pump.get_state = MagicMock(return_value=1)\n self.assertEqual(self.pump.get_state(), 1)",
"def test_all_process_info(self, mocked_check):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n self.supervisor.supvisors.context.processes = {\n 'proc_1': Mock(**{'serial.return_value': {'name': 'proc_1'}}),\n 'proc_2': Mock(**{'serial.return_value': {'name': 'proc_2'}})}\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test RPC call\n self.assertItemsEqual([{'name': 'proc_1'}, {'name': 'proc_2'}],\n rpc.get_all_process_info())\n self.assertEqual([call()], mocked_check.call_args_list)",
"def testGetPortState(self):\n self.ports.get_port_state(file_name = 'get_port_state.xml', port_ids = portsDict['port_ids'], port_states = portsDict['port_state'])",
"def test_get_node_state_servicelight(self):\n pass"
]
| [
"0.73319936",
"0.5963998",
"0.5954794",
"0.5952734",
"0.583573",
"0.56402594",
"0.5603491",
"0.55639404",
"0.55524933",
"0.5551539",
"0.5479721",
"0.542013",
"0.5410942",
"0.5388947",
"0.53072816",
"0.53072816",
"0.52877516",
"0.52841717",
"0.52824414",
"0.5274595",
"0.52452505",
"0.5243143",
"0.52300966",
"0.5190945",
"0.5177767",
"0.51750857",
"0.5168143",
"0.51599497",
"0.5155098",
"0.5147857"
]
| 0.8239171 | 0 |
Test the get_master_address RPC. | def test_master_address(self):
from supvisors.rpcinterface import RPCInterface
# prepare context
self.supervisor.supvisors.context.master_address = '10.0.0.1'
# create RPC instance
rpc = RPCInterface(self.supervisor)
self.assertEqual('10.0.0.1', rpc.get_master_address()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_client_address_retrieve(self):\n pass",
"def test_address_info(self):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n self.supervisor.supvisors.context.addresses = {\n '10.0.0.1': Mock(**{'serial.return_value': 'address_info'})}\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test with known address\n self.assertEqual('address_info', rpc.get_address_info('10.0.0.1'))\n # test with unknown address\n with self.assertRaises(RPCError) as exc:\n rpc.get_address_info('10.0.0.0')\n self.assertEqual(Faults.BAD_ADDRESS, exc.exception.code)\n self.assertEqual('BAD_ADDRESS: address 10.0.0.0 unknown in Supvisors',\n exc.exception.text)",
"def test_get_address(self):\n with self.subprocess_getoutput_patch:\n ret = self.inst._get_address()\n self.assertEqual(ret, \"http://example\")",
"def test_get_address():\n\n # Wait for workspace to be initialized\n time.sleep(40)\n bambi = create_test_bambi()\n address = bambi.get_address_to_workspace()\n assert address",
"def test_client_address_create(self):\n pass",
"def test_client_address_update(self):\n pass",
"def test_get_order_address(self):\n pass",
"def get_master_address(self):\n if self.master_address:\n return self.master_address\n return super(CelerySentinelConnectionPool, self).get_master_address()",
"def test_11_individual_1_address(self):\n with mock_api(individual_1_address):\n import_record(self.session, 'magento.res.partner',\n self.backend_id, '9999254')\n cr, uid = self.cr, self.uid\n partner_ids = self.model.search(cr, uid,\n [('magento_id', '=', '9999254'),\n ('backend_id', '=', self.backend_id)])\n self.assertEqual(len(partner_ids), 1)\n partner = self.model.browse(cr, uid, partner_ids[0])\n # Name of the billing address\n self.assertEqual(partner.name, 'Ferreira Margaux')\n self.assertEqual(partner.type, 'default')\n # billing address merged with the partner\n self.assertEqual(len(partner.child_ids), 0)\n self.assertEqual(len(partner.magento_bind_ids), 1)\n self.assertEqual(len(partner.magento_address_bind_ids), 1)\n address_bind = partner.magento_address_bind_ids[0]\n self.assertEqual(address_bind.magento_id, '9999253',\n msg=\"The merged address should be the \"\n \"billing address\")",
"def test_get_address(self):\r\n note_data = self.tape.dataframe.iloc[0]\r\n note = self.tape._get_note_object(note_data)\r\n eq_(note.get_address(), '8 Brown St, Methuen, MA 01844')",
"def test_set_address(self):\n s1 = System()\n s1.set_address(\"101 St James Rd\")\n self.assertEqual(s1.get_address(), \"101 St James Rd\")",
"def _get_address(self):\n return self.__address",
"def test_client_address_partial_update(self):\n pass",
"def test_return_to_assigned_master(\n mm_failover_master_1_salt_cli,\n mm_failover_master_2_salt_cli,\n salt_mm_failover_minion_1,\n salt_mm_failover_minion_2,\n run_salt_cmds,\n):\n returns = run_salt_cmds(\n [mm_failover_master_1_salt_cli, mm_failover_master_2_salt_cli],\n [salt_mm_failover_minion_1, salt_mm_failover_minion_2],\n )\n\n assert len(returns) == 2\n assert (mm_failover_master_1_salt_cli, salt_mm_failover_minion_1) in returns\n assert (mm_failover_master_2_salt_cli, salt_mm_failover_minion_2) in returns",
"def _getAddress(self, result):\r\n ((serverReady, _), (clientReady, _)) = result\r\n\r\n if not (serverReady and clientReady):\r\n # There was a problem in making the server/client ready for the\r\n # connection attempt\r\n # TODO: What should we do here?\r\n return Failure(InternalError('Server/Client could not be prepared '\r\n 'for connection attempt.'))\r\n\r\n return self._serverEndpoint.getAddress()",
"def test_client_addresses_list(self):\n pass",
"def test_13_company_1_address(self):\n with mock_api(company_1_address):\n import_record(self.session, 'magento.res.partner',\n self.backend_id, '9999256')\n cr, uid = self.cr, self.uid\n partner_ids = self.model.search(cr, uid,\n [('magento_id', '=', '9999256'),\n ('backend_id', '=', self.backend_id)])\n self.assertEqual(len(partner_ids), 1)\n partner = self.model.browse(cr, uid, partner_ids[0])\n # Company of the billing address\n self.assertEqual(partner.name, 'Marechal')\n self.assertEqual(partner.type, 'default')\n # all addresses as contacts\n self.assertEqual(len(partner.child_ids), 1)\n self.assertEqual(len(partner.magento_bind_ids), 1)\n self.assertEqual(len(partner.magento_address_bind_ids), 0)\n self.assertEqual(partner.child_ids[0].type, 'invoice',\n msg=\"The billing address should be of \"\n \"type 'invoice'\")",
"def nomad_address():\n\n print(nomad.get_address())",
"def test_lookupAddress(self):\n servers = {\n ('1.1.2.3', 53): {\n (b'foo.example.com', A): {\n 'authority': [(b'foo.example.com', Record_NS(b'ns1.example.com'))],\n 'additional': [(b'ns1.example.com', Record_A('34.55.89.144'))],\n },\n },\n ('34.55.89.144', 53): {\n (b'foo.example.com', A): {\n 'answers': [(b'foo.example.com', Record_A('10.0.0.1'))],\n }\n },\n }\n resolver = self._getResolver(servers)\n d = resolver.lookupAddress(b'foo.example.com')\n d.addCallback(getOneAddress)\n d.addCallback(self.assertEqual, '10.0.0.1')\n return d",
"def getAddress(self) -> int:\n ...",
"def test_client_address_delete(self):\n pass",
"def test_address_page(self):\n tester = app.test_client(self)\n response = tester.get('/', content_type = \"html_text\")\n self.assertTrue(b'Address Locator' in response.data)",
"def test_master(busname):\n check_master_not_running()\n\n process = subprocess.Popen(['rebus_master', busname],\n stderr=subprocess.PIPE, bufsize=0)\n # wait for master bus to be ready\n # TODO look into race condition. Another SIGINT handler?\n time.sleep(2)\n output = process.stderr.read(1)\n process.send_signal(signal.SIGINT)\n process.wait()\n assert process.returncode == 0, output + process.stderr.read()",
"def test_all_addresses_info(self):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n self.supervisor.supvisors.context.addresses = {\n '10.0.0.1': Mock(**{'serial.return_value': 'address_info_1'}),\n '10.0.0.2': Mock(**{'serial.return_value': 'address_info_2'})}\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n self.assertItemsEqual(['address_info_1', 'address_info_2'],\n rpc.get_all_addresses_info())",
"async def test_get_organization_address(client):\n params = [('access_token', 'access_token_example')]\n headers = { \n 'Accept': 'application/json',\n }\n response = await client.request(\n method='GET',\n path='/v1/addresses/{address_id}'.format(address_id=56),\n headers=headers,\n params=params,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')",
"def test82_GenNewAddress(self):\n payload = {\n 'id': 0,\n 'params': {'amount': 100.0, 'qr_code': False, 'gen_new': False},\n 'jsonrpc': '2.0',\n 'method': 'create_order'\n }\n res = requests.post( url, data=json.dumps(payload), headers=headers).json()['result']\n self.assertEqual(res['receiving_address'], 'mjPS9N4T6cjcWLvdkv4jtCrzNA6C6qm8uv')\n self.assertEqual(res['amount'], '0.2860001')\n self.assertTrue(res['exact_amount'])\n order_id = res['order_id']\n payload = {\n 'id': 0, 'params': {'bindings':{'receiving_address': 'mjPS9N4T6cjcWLvdkv4jtCrzNA6C6qm8uv'}},\n 'jsonrpc': '2.0',\n 'method': 'get_address'\n }\n res = requests.post( url, data=json.dumps(payload), headers=headers).json()['result'][0]\n self.assertEqual(res['keypath'], '0/0/4')\n self.assertEqual(res['max_tx'], config.MAX_LEAF_TX)\n self.assertTrue(res['special_digits'] > 0)",
"def test_get(self):\n\n # Grab the server's addresses...\n addrs = self.server.addresses\n\n # Make sure the public and private lists are present\n dtutil.assert_true('public' in addrs)\n dtutil.assert_true('private' in addrs)\n\n # Are IP addresses actually returned?",
"async def test_update_address(hass):\n config_entry = await setup_axis_integration(hass)\n device = hass.data[AXIS_DOMAIN][config_entry.unique_id]\n assert device.api.config.host == \"1.2.3.4\"\n\n with patch(\n \"homeassistant.components.axis.async_setup_entry\",\n return_value=True,\n ) as mock_setup_entry, respx.mock:\n mock_default_vapix_requests(respx, \"2.3.4.5\")\n await hass.config_entries.flow.async_init(\n AXIS_DOMAIN,\n data={\n \"host\": \"2.3.4.5\",\n \"port\": 80,\n \"name\": \"name\",\n \"properties\": {\"macaddress\": MAC},\n },\n context={\"source\": SOURCE_ZEROCONF},\n )\n await hass.async_block_till_done()\n\n assert device.api.config.host == \"2.3.4.5\"\n assert len(mock_setup_entry.mock_calls) == 1",
"def test_address(self):\n result = irc.dccDescribe(\"CHAT arg 3232235522 6666\")\n self.assertEqual(result, \"CHAT for host 192.168.0.2, port 6666\")",
"def test81_GenNewAddress(self):\n payload = {\n 'id': 0,\n 'params': {'amount': 100.0, 'qr_code': False, 'gen_new': True, 'order_id': 'DUMMY_ORD_3'},\n 'jsonrpc': '2.0',\n 'method': 'create_order'\n }\n res = requests.post( url, data=json.dumps(payload), headers=headers).json()['result']\n self.assertEqual(res['amount'], '0.286')\n self.assertEqual(res['exact_amount'], False)\n self.assertEqual(res['receiving_address'], 'miXzTXvkEsfVmkwMjLCHfXAjboodrgQQ9Z')"
]
| [
"0.7373237",
"0.7007187",
"0.6640062",
"0.6608692",
"0.65847445",
"0.6574519",
"0.6425423",
"0.635024",
"0.6174339",
"0.61202514",
"0.6092526",
"0.60028934",
"0.5985169",
"0.59719956",
"0.5930091",
"0.59041667",
"0.58911395",
"0.5854046",
"0.58010584",
"0.57993996",
"0.57926863",
"0.5777211",
"0.577333",
"0.57697433",
"0.57680714",
"0.5712355",
"0.5701122",
"0.56787264",
"0.56646955",
"0.5605202"
]
| 0.808054 | 0 |
Test the get_strategies RPC. | def test_strategies(self):
from supvisors.rpcinterface import RPCInterface
# prepare context
self.supervisor.supvisors.options.auto_fence = True
self.supervisor.supvisors.options.conciliation_strategy = 1
self.supervisor.supvisors.options.starting_strategy = 2
# create RPC instance
rpc = RPCInterface(self.supervisor)
self.assertDictEqual({'auto-fencing': True, 'starting': 'MOST_LOADED',
'conciliation': 'INFANTICIDE'}, rpc.get_strategies()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_scenario(self):\n pass",
"def list_active_strategies():\n response = houston.get(\"/zipline/trade\")\n\n houston.raise_for_status_with_json(response)\n return response.json()",
"def test_get_scenarios(self):\n pass",
"def test_load_strategies_help_nc_params(self) -> None:\n result = load_help_nc_params(\"strategies\")\n self.assertIs(type(result), dict)\n self.assertIsNot(result, {})",
"def get_leaderboard_strategies_with_http_info(self, **kwargs):\n\n all_params = []\n all_params.append('async')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_leaderboard_strategies\" % key\n )\n params[key] = val\n del params['kwargs']\n\n collection_formats = {}\n\n path_params = {}\n\n query_params = []\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n\n # Authentication setting\n auth_settings = ['oauth2_client_credentials_grant', 'oauth2_password_grant']\n\n return self.api_client.call_api('/leaderboards/strategies', 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='list[str]',\n auth_settings=auth_settings,\n async=params.get('async'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)",
"def select_strategies(self) -> NoReturn:\n strategy_map = self._get_strategies()\n show_data(strategy_map, header=SPLITTING_HEADER, key2row=True)\n strategy_id = self.input_handler.expect_input(PRINTOUTS[0])\n\n self.input_handler.check_input(strategy_id, strategy_map.keys())\n self.strategy = strategy_map[strategy_id]",
"def final_strategy_test():\r\n print('-- Testing final_strategy --')\r\n print('Win rate:', compare_strategies(final_strategy))",
"def test_wallets_get(self):\n pass",
"def _get_strategies(self) -> Dict[str, str]:\n strategies = [method for method in dir(self) if STRATEGY_IDENTIFIER in method]\n\n if not strategies:\n logger.warning(\n \"There are no strategy provided. \"\n \"Make sure the implemented strategy methods \"\n \"start contain the '%s' term.\" % STRATEGY_IDENTIFIER\n )\n return {str(n_method): method for n_method, method in enumerate(strategies)}",
"def test_prefectures_get(self):\n pass",
"def test_strategy(self):\n self.responses_test([C, C, C, C], [C, C, C, C], [C])\n self.responses_test([C, C, C, C, D], [C, C, C, D, C], [D])\n self.responses_test([C] * 11, [C] * 10 + [D], [C])",
"def test_get(self):\n pass",
"def test_strategy(self):\n self.responses_test([], [], [C], random_seed=1)\n self.responses_test([], [], [D], random_seed=2)",
"def getstrategy(stname, visited=None, retval=None):\n dprint(2, 'getstrategy', stname)\n dprint(3, 'getstrategy', stname, visited, retval)\n if visited is None:\n visited = []\n if retval is None:\n retval = []\n if stname in visited:\n dprint(0, 'loop detected', stname, visited)\n # resilient but dangerous; keeps going\n return retval\n visited.append(stname)\n\n sto, sts = stname.split('/')\n surl = os.path.join(args.trserver, sto, 'strategy', sts)\n outp = trgeturl(surl, 'lines')\n if not outp:\n dprint(0, 'failed strategy', stname)\n # resilient but dangerous; keeps going\n return retval\n for ln in outp:\n cmdln = ln.split()\n cmd = cmdln.pop(0)\n if cmd == 'test':\n pri = 50\n if cmdln[0].startswith('pri='):\n pric = cmdln.pop(0)\n pri = int(pric.split('=')[1])\n test = cmdln.pop(0)\n retval.append((pri, test, cmdln))\n dprint(3, 'added test', pri, test, cmdln)\n elif cmd == 'include' or cmd == 'strategy':\n dprint(3, 'including strategy', cmdln)\n retval = getstrategy(cmdln.pop(0), visited, retval)\n else:\n dprint(0, 'error unknown cmd', cmd)\n\n return retval",
"def test_get_player_battles(self):\n pass",
"def test_get_run(self):\n pass",
"def get_test_strategy(context, **kw):\n obj_cls = objects.Strategy\n db_data = db_utils.get_test_strategy(**kw)\n obj_data = _load_related_objects(context, obj_cls, db_data)\n\n return _load_test_obj(context, obj_cls, obj_data, **kw)",
"def test_normal(self):\n get_response = lambda: self.client.get(self.url)\n\n self.assert_authentication_required(get_response)\n\n # Regular users cannot see someone else's list of communities:\n self.login_as(\"alice\")\n with self.assertNumQueries(2):\n self.assert_not_authorized(get_response())\n\n # User can see his list of communities:\n self.login_as(\"bob\")\n with self.assertNumQueries(3):\n response = get_response()\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(response.data), self.num_bobs_communities)\n self.assertListEqual(list(response.data[0].keys()), self.expected_keys)\n self.assertListEqual(list(response.data[0][\"community\"].keys()), self.community_expected_keys)",
"def test_get_token_supply_all_using_get(self):\n pass",
"def test_get_waivers(league):\n pass",
"def test_get2(self):\n pass",
"def test_cyclingleagues_get(self):\n pass",
"def test_get_goals(self):\n pass",
"def test_get_distribution_no_feature(self):\r\n url = reverse('get_distribution', kwargs={'course_id': self.course.id.to_deprecated_string()})\r\n response = self.client.get(url)\r\n self.assertEqual(response.status_code, 200)\r\n res_json = json.loads(response.content)\r\n self.assertEqual(type(res_json['available_features']), list)\r\n\r\n url = reverse('get_distribution', kwargs={'course_id': self.course.id.to_deprecated_string()})\r\n response = self.client.get(url + u'?feature=')\r\n self.assertEqual(response.status_code, 200)\r\n res_json = json.loads(response.content)\r\n self.assertEqual(type(res_json['available_features']), list)",
"def test_api_predictors_get(self):\n pass",
"def test_get_orders(self):\n pass",
"def test_intercommunalitys_get(self):\n pass",
"def test_get_results(self):\n pass",
"async def test_user_command_helper_method_get_requests(self):\n test_values = (\n {\n \"helper_method\": self.cog.basic_user_infraction_counts,\n \"expected_args\": (\"bot/infractions\", {\"hidden\": \"False\", \"user__id\": str(self.member.id)}),\n },\n {\n \"helper_method\": self.cog.expanded_user_infraction_counts,\n \"expected_args\": (\"bot/infractions\", {\"user__id\": str(self.member.id)}),\n },\n {\n \"helper_method\": self.cog.user_nomination_counts,\n \"expected_args\": (\"bot/nominations\", {\"user__id\": str(self.member.id)}),\n },\n )\n\n for test_value in test_values:\n helper_method = test_value[\"helper_method\"]\n endpoint, params = test_value[\"expected_args\"]\n\n with self.subTest(method=helper_method, endpoint=endpoint, params=params):\n await helper_method(self.member)\n self.bot.api_client.get.assert_called_once_with(endpoint, params=params)\n self.bot.api_client.get.reset_mock()",
"def test_gridironfootballplayers_get(self):\n pass"
]
| [
"0.6222215",
"0.6173919",
"0.609311",
"0.5985206",
"0.5939424",
"0.5787142",
"0.5773158",
"0.5661657",
"0.5650904",
"0.56022173",
"0.558637",
"0.5583461",
"0.5525984",
"0.550063",
"0.54794484",
"0.54728687",
"0.5461746",
"0.54616755",
"0.54547167",
"0.5448073",
"0.5440383",
"0.5406041",
"0.53974617",
"0.539493",
"0.538941",
"0.5381306",
"0.53737026",
"0.5357759",
"0.5351418",
"0.534606"
]
| 0.7040536 | 0 |
Test the get_address_info RPC. | def test_address_info(self):
from supvisors.rpcinterface import RPCInterface
# prepare context
self.supervisor.supvisors.context.addresses = {
'10.0.0.1': Mock(**{'serial.return_value': 'address_info'})}
# create RPC instance
rpc = RPCInterface(self.supervisor)
# test with known address
self.assertEqual('address_info', rpc.get_address_info('10.0.0.1'))
# test with unknown address
with self.assertRaises(RPCError) as exc:
rpc.get_address_info('10.0.0.0')
self.assertEqual(Faults.BAD_ADDRESS, exc.exception.code)
self.assertEqual('BAD_ADDRESS: address 10.0.0.0 unknown in Supvisors',
exc.exception.text) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_client_address_retrieve(self):\n pass",
"def rpc_getaddressinfo(self, address: str) -> dict:\n return self._call_command([\"getaddressinfo\", address])",
"def test_get_address(self):\n with self.subprocess_getoutput_patch:\n ret = self.inst._get_address()\n self.assertEqual(ret, \"http://example\")",
"def test_get_order_address(self):\n pass",
"def test_all_addresses_info(self):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n self.supervisor.supvisors.context.addresses = {\n '10.0.0.1': Mock(**{'serial.return_value': 'address_info_1'}),\n '10.0.0.2': Mock(**{'serial.return_value': 'address_info_2'})}\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n self.assertItemsEqual(['address_info_1', 'address_info_2'],\n rpc.get_all_addresses_info())",
"def test_client_address_create(self):\n pass",
"def test_address_page(self):\n tester = app.test_client(self)\n response = tester.get('/', content_type = \"html_text\")\n self.assertTrue(b'Address Locator' in response.data)",
"def test_client_address_update(self):\n pass",
"def test_get_address():\n\n # Wait for workspace to be initialized\n time.sleep(40)\n bambi = create_test_bambi()\n address = bambi.get_address_to_workspace()\n assert address",
"def do_getaddress(self,args):\n ppdict(bitstamp.get_depositaddress())",
"def getAddress(user):",
"def test_get_address(self):\r\n note_data = self.tape.dataframe.iloc[0]\r\n note = self.tape._get_note_object(note_data)\r\n eq_(note.get_address(), '8 Brown St, Methuen, MA 01844')",
"def test_get(self):\n\n # Grab the server's addresses...\n addrs = self.server.addresses\n\n # Make sure the public and private lists are present\n dtutil.assert_true('public' in addrs)\n dtutil.assert_true('private' in addrs)\n\n # Are IP addresses actually returned?",
"def test_list_address(self):\n\n data = [\n dict(\n id=self.address.id,\n address_line1='random address 1',\n address_line2='',\n postal_code='RAN DOM',\n city='random city',\n state_province=dict(\n iso_code=self.random_state_province.iso_code,\n name=self.random_state_province.name,\n ),\n country=dict(\n iso_code=self.random_country.iso_code,\n name=self.random_country.name,\n ),\n ),\n ]\n\n self.client.force_authenticate(user=self.user)\n\n response = self.client.get(reverse('location:addresses'))\n\n self.assertEqual(json.loads(response.content)['results'], data)\n self.assertEqual(json.loads(response.content)['count'], 1)\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def address_info(self, address: str) -> dict:\n \n address_info_url = self.network + bf_address_url + address\n \n response = query_blockfrost(address_info_url, self.api_key, self.proxies)\n \n return response",
"def getAddress(self) -> int:\n ...",
"def test_lookupAddress(self):\n servers = {\n ('1.1.2.3', 53): {\n (b'foo.example.com', A): {\n 'authority': [(b'foo.example.com', Record_NS(b'ns1.example.com'))],\n 'additional': [(b'ns1.example.com', Record_A('34.55.89.144'))],\n },\n },\n ('34.55.89.144', 53): {\n (b'foo.example.com', A): {\n 'answers': [(b'foo.example.com', Record_A('10.0.0.1'))],\n }\n },\n }\n resolver = self._getResolver(servers)\n d = resolver.lookupAddress(b'foo.example.com')\n d.addCallback(getOneAddress)\n d.addCallback(self.assertEqual, '10.0.0.1')\n return d",
"def _get_address(self):\n return self.__address",
"def test_get_shipping_address(self):\n self.cim.get_shipping_address(\n customer_profile_id=u\"900\",\n customer_address_id=u\"344\"\n )",
"def test_address(self):\n result = irc.dccDescribe(\"CHAT arg 3232235522 6666\")\n self.assertEqual(result, \"CHAT for host 192.168.0.2, port 6666\")",
"async def test_get_organization_address(client):\n params = [('access_token', 'access_token_example')]\n headers = { \n 'Accept': 'application/json',\n }\n response = await client.request(\n method='GET',\n path='/v1/addresses/{address_id}'.format(address_id=56),\n headers=headers,\n params=params,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')",
"def test_address_incorrect(self):\n tester = app.test_client(self)\n response = tester.post(\"/result\",\n data = dict(location = \"@1i451p4i1u3\"),\n follow_redirects = True)\n self.assertIn(b\"Null address\", response.data)",
"def address_details(self) -> 'outputs.AddressDetailsResponse':\n return pulumi.get(self, \"address_details\")",
"def get_balance_response(address):\n call = Address(address=address)\n response = call.get_address_info()\n if response:\n return response\n else:\n return None",
"def test_address_other_parameters():\n address = lob.Address.create(name='Siddharth Saha', address_line1='104, Printing Boulevard',\n address_line2='Sunset Town', email='[email protected]',\n address_city='Boston', address_state='MA', address_country='US',\n address_zip='12345')\n print address.to_dict()",
"def test_client_addresses_list(self):\n pass",
"def check_address(data):\n\n firebase_uid = data['session'].split('/')[-1]\n db = firebase.database()\n pincode = db.child(\"user_data\").child(firebase_uid).child(\"Address\").child(\"Pincode\").get().val()\n if pincode == \"0\" or pincode is None:\n print(\"Address not found.\")\n response = {\n \"followupEventInput\": {\n \"name\": \"request_address\",\n \"languageCode\": \"en-US\"\n }}\n else:\n response = check_mobile(data)\n return response",
"def test_set_address(self):\n s1 = System()\n s1.set_address(\"101 St James Rd\")\n self.assertEqual(s1.get_address(), \"101 St James Rd\")",
"def test_address_other_parameters():\n address = lob.Address.create(name = 'Siddharth Saha', address_line1 = '104, Printing Boulevard',\n address_line2 = 'Sunset Town', email = '[email protected]', \n address_city = 'Boston', address_state = 'MA', address_country = 'US',\n address_zip = '12345')\n print address.to_dict()",
"def test_get_info(self):\n pass"
]
| [
"0.7554395",
"0.74039465",
"0.73748916",
"0.71039975",
"0.6900985",
"0.66765416",
"0.66494095",
"0.6630977",
"0.65611506",
"0.6506627",
"0.64812136",
"0.6447592",
"0.6382673",
"0.6365876",
"0.63463646",
"0.6344738",
"0.62960494",
"0.6278117",
"0.6209523",
"0.6179435",
"0.61574554",
"0.6126363",
"0.611869",
"0.6101238",
"0.60604036",
"0.60479414",
"0.6023855",
"0.60204923",
"0.5979906",
"0.5969719"
]
| 0.855229 | 0 |
Test the get_all_addresses_info RPC. | def test_all_addresses_info(self):
from supvisors.rpcinterface import RPCInterface
# prepare context
self.supervisor.supvisors.context.addresses = {
'10.0.0.1': Mock(**{'serial.return_value': 'address_info_1'}),
'10.0.0.2': Mock(**{'serial.return_value': 'address_info_2'})}
# create RPC instance
rpc = RPCInterface(self.supervisor)
self.assertItemsEqual(['address_info_1', 'address_info_2'],
rpc.get_all_addresses_info()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_address_info(self):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n self.supervisor.supvisors.context.addresses = {\n '10.0.0.1': Mock(**{'serial.return_value': 'address_info'})}\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test with known address\n self.assertEqual('address_info', rpc.get_address_info('10.0.0.1'))\n # test with unknown address\n with self.assertRaises(RPCError) as exc:\n rpc.get_address_info('10.0.0.0')\n self.assertEqual(Faults.BAD_ADDRESS, exc.exception.code)\n self.assertEqual('BAD_ADDRESS: address 10.0.0.0 unknown in Supvisors',\n exc.exception.text)",
"def test_client_addresses_list(self):\n pass",
"async def test_list_entity_addresses(self):\n await test_service.list_entity_addresses(self)",
"def test_list_address(self):\n\n data = [\n dict(\n id=self.address.id,\n address_line1='random address 1',\n address_line2='',\n postal_code='RAN DOM',\n city='random city',\n state_province=dict(\n iso_code=self.random_state_province.iso_code,\n name=self.random_state_province.name,\n ),\n country=dict(\n iso_code=self.random_country.iso_code,\n name=self.random_country.name,\n ),\n ),\n ]\n\n self.client.force_authenticate(user=self.user)\n\n response = self.client.get(reverse('location:addresses'))\n\n self.assertEqual(json.loads(response.content)['results'], data)\n self.assertEqual(json.loads(response.content)['count'], 1)\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def get_all_addresses():\n try:\n addresses = address_service.get_all_addresses()\n current_app.logger.info(\"get all addresses\")\n return jsonify({\n \"data\": {\n \"count\": len(addresses),\n \"addresses\": addresses\n }}), 200\n except SQLCustomError as error:\n current_app.logger.error(\"fail to get all addresses: %s\", error)\n return jsonify({\n \"errors\": {\n \"error\": error.__dict__\n }\n }), 400",
"def do_addresses(self, args):\n pprint(self.wallet.addresses)",
"def test_get(self):\n\n # Grab the server's addresses...\n addrs = self.server.addresses\n\n # Make sure the public and private lists are present\n dtutil.assert_true('public' in addrs)\n dtutil.assert_true('private' in addrs)\n\n # Are IP addresses actually returned?",
"def test_client_address_retrieve(self):\n pass",
"def test_ipam_ip_addresses_list(self):\n pass",
"def test_get_contracts_addresses_empty():\n addresses = ContractHandler.get_contracts_addresses(_NETWORK, address_file=None)\n assert addresses is None",
"def ListAddresses(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def test_get_all_email_address(self):\n email_addr = 'test_get_email_addr' + '@' + self.email_dom\n email_addr2 = 'test_get_all_email_addr' + '@' + self.email_dom\n org = 'o=%s' % (self.org_name)\n people = '%s=%s' % (self.container_attr, self.user_container)\n uid = '%s=%s' % (self.user_key, self.user_id)\n dn = '%s,%s,%s,%s' % (uid, people, org, self.base_dn)\n dn_info = {self.smtp_address: [email_addr, email_addr2]}\n expected_result = [(dn, dn_info)] \n addr = SpokeEmailAddress(self.org_name, self.user_id)\n addr.create(email_addr)\n addr.create(email_addr2)\n result = addr.get()['data']\n self.assertEqual(result, expected_result)",
"def test_get_order_address(self):\n pass",
"async def test_get_organization_addresses(client):\n params = [('access_token', 'access_token_example')]\n headers = { \n 'Accept': 'application/json',\n }\n response = await client.request(\n method='GET',\n path='/v1/addresses',\n headers=headers,\n params=params,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')",
"def scan_addresses(self, root=None):",
"def test_ipam_ip_addresses_read(self):\n pass",
"def test_12_individual_2_addresses(self):\n with mock_api(individual_2_addresses):\n import_record(self.session, 'magento.res.partner',\n self.backend_id, '9999255')\n cr, uid = self.cr, self.uid\n partner_ids = self.model.search(cr, uid,\n [('magento_id', '=', '9999255'),\n ('backend_id', '=', self.backend_id)])\n self.assertEqual(len(partner_ids), 1)\n partner = self.model.browse(cr, uid, partner_ids[0])\n # Name of the billing address\n self.assertEqual(partner.name, u'Mace Sébastien')\n self.assertEqual(partner.type, 'default')\n # billing address merged with the partner,\n # second address as a contact\n self.assertEqual(len(partner.child_ids), 1)\n self.assertEqual(len(partner.magento_bind_ids), 1)\n self.assertEqual(len(partner.magento_address_bind_ids), 1)\n address_bind = partner.magento_address_bind_ids[0]\n self.assertEqual(address_bind.magento_id, '9999254',\n msg=\"The merged address should be the \"\n \"billing address\")\n self.assertEqual(partner.child_ids[0].type, 'delivery',\n msg=\"The shipping address should be of \"\n \"type 'delivery'\")",
"def rpc_getaddressinfo(self, address: str) -> dict:\n return self._call_command([\"getaddressinfo\", address])",
"def test_addresses_list_for_user_one(self):\n\n card_holder_address_model = FundingSources.get_card_holder_address_model()\n\n card_holder_address_model[\"user_token\"] = self.user.token\n\n self.client.funding_sources.addresses.create(card_holder_address_model)\n\n self.create_card(self.create_card_product(), self.user)\n\n addresses = self.client.funding_sources.addresses.list_for_user(\n self.user.token)\n\n self.assertEqual(len(addresses), 1,\n 'Unexpected number of addresses retrieved')\n\n verify_card_holder_address_response(\n self, addresses[0], card_holder_address_model)\n\n with self.subTest('Address defined is not the default'):\n self.assertTrue(addresses[0].is_default_address)",
"def test_addresses_list_for_user_two(self):\n\n card_holder_address_one = FundingSources.get_card_holder_address_model()\n\n card_holder_address_one[\"user_token\"] = self.user.token\n\n card_holder_address_two = {\n \"user_token\": self.user.token,\n \"first_name\": \"O\",\n \"last_name\": \"PD\",\n \"address_1\": \"455 7th St.\",\n \"city\": \"Oakland\",\n \"state\": \"CA\",\n \"zip\": \"94612\",\n \"country\": \"USA\"\n }\n\n self.client.funding_sources.addresses.create(card_holder_address_one)\n self.client.funding_sources.addresses.create(card_holder_address_two)\n\n addresses = self.client.funding_sources.addresses.list_for_user(\n self.user.token)\n\n self.assertEqual(len(addresses), 2,\n 'Unexpected number of addresses retrieved')\n\n if addresses[0].first_name == card_holder_address_one['first_name']:\n verify_card_holder_address_response(\n self, addresses[0], card_holder_address_one)\n verify_card_holder_address_response(\n self, addresses[1], card_holder_address_two)\n else:\n verify_card_holder_address_response(\n self, addresses[1], card_holder_address_one)\n verify_card_holder_address_response(\n self, addresses[0], card_holder_address_two)",
"def unpack_addresses(self, addresses_to_test):\n if len(addresses_to_test) == 0:\n raise ValueError(\n \"There were no arguments passed to the function. That is wrong. Closing\"\n )\n\n return_addresses = []\n for address in addresses_to_test:\n if \"/\" in address:\n try:\n six_or_four = ipaddress.ip_network(address)\n except ValueError:\n print(f\"{address} is not a valid subnet. Skipping.\")\n continue\n for address_host in six_or_four.hosts():\n return_addresses.append(str(address_host))\n else:\n try:\n ipaddress.ip_address(address)\n except ValueError:\n print(f\"{address} is not a valid address. Skipping.\")\n continue\n return_addresses.append(str(address))\n for address in return_addresses:\n try:\n ipaddress.ip_address(address)\n except ValueError:\n raise ValueError(f\"{address} is not an IPv4/v6 address. Shutting Down\")\n if len(return_addresses) > 0:\n return return_addresses\n else:\n raise ValueError(\"No usable addresses to scan\")",
"def test_client_address_update(self):\n pass",
"def test_14_company_2_addresses(self):\n with mock_api(company_2_addresses):\n import_record(self.session, 'magento.res.partner',\n self.backend_id, '9999257')\n cr, uid = self.cr, self.uid\n partner_ids = self.model.search(cr, uid,\n [('magento_id', '=', '9999257'),\n ('backend_id', '=', self.backend_id)])\n self.assertEqual(len(partner_ids), 1)\n partner = self.model.browse(cr, uid, partner_ids[0])\n # Company of the billing address\n self.assertEqual(partner.name, 'Bertin')\n self.assertEqual(partner.type, 'default')\n # all addresses as contacts\n self.assertEqual(len(partner.child_ids), 2)\n self.assertEqual(len(partner.magento_bind_ids), 1)\n self.assertEqual(len(partner.magento_address_bind_ids), 0)\n def get_address(magento_id):\n address_ids = self.address_model.search(\n cr, uid,\n [('magento_id', '=', magento_id),\n ('backend_id', '=', self.backend_id)])\n self.assertEqual(len(address_ids), 1)\n return self.address_model.browse(cr, uid, address_ids[0])\n # billing address\n address = get_address('9999257')\n self.assertEqual(address.type, 'invoice',\n msg=\"The billing address should be of \"\n \"type 'invoice'\")\n # shipping address\n address = get_address('9999258')\n self.assertEqual(address.type, 'delivery',\n msg=\"The shipping address should be of \"\n \"type 'delivery'\")",
"def get_all_addresses(self, addresses=None, filters=None, allocation_ids=None):\r\n params = {}\r\n if addresses:\r\n self.build_list_params(params, addresses, 'PublicIp')\r\n if allocation_ids:\r\n self.build_list_params(params, allocation_ids, 'AllocationId')\r\n if filters:\r\n self.build_filter_params(params, filters)\r\n return self.get_list('DescribeAddresses', params, [('item', Address)], verb='POST')",
"def test_address_page(self):\n tester = app.test_client(self)\n response = tester.get('/', content_type = \"html_text\")\n self.assertTrue(b'Address Locator' in response.data)",
"def test_get_address(self):\n with self.subprocess_getoutput_patch:\n ret = self.inst._get_address()\n self.assertEqual(ret, \"http://example\")",
"async def test_list_address_tags_by_entity(self):\n await test_service.list_address_tags_by_entity(self)",
"def get_addresses_by_account(account):\n try:\n stdout = subprocess.check_output([\"litecoin-cli\", \"getaddressesbyaccount\", account])\n addresses = json.loads(stdout.decode())\n except:\n sys.exit(1)\n\n return addresses",
"def test_11_individual_1_address(self):\n with mock_api(individual_1_address):\n import_record(self.session, 'magento.res.partner',\n self.backend_id, '9999254')\n cr, uid = self.cr, self.uid\n partner_ids = self.model.search(cr, uid,\n [('magento_id', '=', '9999254'),\n ('backend_id', '=', self.backend_id)])\n self.assertEqual(len(partner_ids), 1)\n partner = self.model.browse(cr, uid, partner_ids[0])\n # Name of the billing address\n self.assertEqual(partner.name, 'Ferreira Margaux')\n self.assertEqual(partner.type, 'default')\n # billing address merged with the partner\n self.assertEqual(len(partner.child_ids), 0)\n self.assertEqual(len(partner.magento_bind_ids), 1)\n self.assertEqual(len(partner.magento_address_bind_ids), 1)\n address_bind = partner.magento_address_bind_ids[0]\n self.assertEqual(address_bind.magento_id, '9999253',\n msg=\"The merged address should be the \"\n \"billing address\")",
"def get_addrs(self) -> List[Multiaddr]:"
]
| [
"0.73864514",
"0.725401",
"0.7201281",
"0.70590824",
"0.69797766",
"0.69144094",
"0.67823094",
"0.66993743",
"0.6611087",
"0.65053475",
"0.64855546",
"0.64352447",
"0.6397789",
"0.6297383",
"0.6290662",
"0.6125736",
"0.6102892",
"0.607289",
"0.60336214",
"0.60237765",
"0.5996575",
"0.59942114",
"0.59783334",
"0.59484595",
"0.59469897",
"0.5941499",
"0.59239525",
"0.59182477",
"0.5917898",
"0.5892071"
]
| 0.84211046 | 0 |
Test the get_application_info RPC. | def test_application_info(self, mocked_serial, mocked_check):
from supvisors.rpcinterface import RPCInterface
# create RPC instance
rpc = RPCInterface(self.supervisor)
# test RPC call
self.assertEqual({'name': 'appli'}, rpc.get_application_info('dummy'))
self.assertEqual([call()], mocked_check.call_args_list)
self.assertEqual([call('dummy')], mocked_serial.call_args_list) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_application(self):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n self.supervisor.supvisors.context.applications = {\n 'appli_1': 'first application'}\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test with known application\n self.assertEqual('first application', rpc._get_application('appli_1'))\n # test with unknown application\n with self.assertRaises(RPCError) as exc:\n rpc._get_application('app')\n self.assertEqual(Faults.BAD_NAME, exc.exception.code)\n self.assertEqual('BAD_NAME: application app unknown in Supvisors',\n exc.exception.text)",
"def test_all_applications_info(self, mocked_get, mocked_check):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n self.supervisor.supvisors.context.applications = {\n 'dummy_1': None, 'dummy_2': None}\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test RPC call\n self.assertItemsEqual([{'name': 'appli_1'}, {'name': 'appli_2'}],\n rpc.get_all_applications_info())\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertItemsEqual([call('dummy_1'), call('dummy_2')],\n mocked_get.call_args_list)",
"def test_duo_application_get(self):\n pass",
"def process_app_info(self):\n pass",
"def test_48_update_app_info(self, Mock, mock):\r\n html_request = FakeRequest(json.dumps(self.pkg_json_not_found), 200,\r\n {'content-type': 'application/json'})\r\n Mock.return_value = html_request\r\n\r\n self.register()\r\n self.new_application()\r\n app = db.session.query(App).first()\r\n err_msg = \"Task Presenter should be empty\"\r\n assert not app.info.get('task_presenter'), err_msg\r\n\r\n res = self.app.post('/app/sampleapp/tasks/taskpresentereditor',\r\n data={'editor': 'Some HTML code!'},\r\n follow_redirects=True)\r\n assert \"Sample App\" in res.data, \"Does not return to app details\"\r\n app = db.session.query(App).first()\r\n for i in range(10):\r\n key = \"key_%s\" % i\r\n app.info[key] = i\r\n db.session.add(app)\r\n db.session.commit()\r\n _info = app.info\r\n\r\n self.update_application()\r\n app = db.session.query(App).first()\r\n for key in _info:\r\n assert key in app.info.keys(), \\\r\n \"The key %s is lost and it should be here\" % key\r\n assert app.name == \"Sample App\", \"The app has not been updated\"\r\n error_msg = \"The app description has not been updated\"\r\n assert app.description == \"Description\", error_msg\r\n error_msg = \"The app long description has not been updated\"\r\n assert app.long_description == \"Long desc\", error_msg",
"def test_get_current_app():\n\n assert isinstance(application_services.get_current_app(), PyrinUnitTestApplication)",
"def test_get_info(self):\n pass",
"def test_get_application_process(self):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n self.supervisor.supvisors.context.applications = {\n 'appli_1': 'first application'}\n self.supervisor.supvisors.context.processes = {\n 'appli_1:proc_1': 'first process'}\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test with full namespec\n self.assertTupleEqual(('first application', 'first process'),\n rpc._get_application_process('appli_1:proc_1'))\n # test with applicative namespec\n self.assertTupleEqual(('first application', None),\n rpc._get_application_process('appli_1:*'))",
"def test_server_details_ok(self):\n response = self.call_api('server_details', {}, 200).json\n self.assertEqual(utils.get_app_version(), response['server_version'])",
"def getAppInfo(self):\n data = self._client.Application.find(self.app_id)\n return data",
"def get_application_info( tree ):\n application_name = None\n # most machines store the machine name string in the tag 'ApplicationName'\n for application_name in tree.getroot().iter( 'ApplicationName' ):\n application_name = application_name.text\n break\n # NovaSeq stores the machine name string in the tag 'Application'\n if( application_name == None ):\n for application_name in tree.getroot().iter( 'Application' ):\n application_name = application_name.text\n break\n if( application_name == None ):\n raise ValueError( 'Unable to find Application* element in BCL RunParameters.xml' )\n\n application_version = None\n for application_version in tree.getroot().iter( 'ApplicationVersion' ):\n application_version = application_version.text\n break\n if( application_version == None ):\n raise ValueError( 'ApplicationVersion element missing in BCL RunParameters.xml' )\n\n re_models = '|'.join( application_name_dict.keys() )\n re_pattern = '(%s)' % re_models\n mobj = re.match( re_pattern, application_name )\n if( mobj == None ):\n raise ValueError( 'unrecognized ApplicationName in RunParameters.xml file' )\n instrument_model = application_name_dict[mobj.group( 1 )]\n\n # Distinguish between HiSeq models 3000 and 4000 using Andrew's(?) method.\n # Note: the p5 index orientations differ between these two models.\n if( instrument_model == 'HiSeq' ):\n application_major_version = int(application_version.split('.')[0])\n if application_major_version > 2:\n instrument_model = 'HiSeq4000'\n else:\n instrument_model = 'HiSeq3000'\n\n return( instrument_model, application_version )",
"def test_app_query(self):\r\n AppFactory.create(info={'total': 150})\r\n res = self.app.get('/api/app')\r\n data = json.loads(res.data)\r\n assert len(data) == 1, data\r\n app = data[0]\r\n assert app['info']['total'] == 150, data\r\n\r\n # The output should have a mime-type: application/json\r\n assert res.mimetype == 'application/json', res\r\n\r\n # Test a non-existant ID\r\n res = self.app.get('/api/app/3434209')\r\n err = json.loads(res.data)\r\n assert res.status_code == 404, err\r\n assert err['status'] == 'failed', err\r\n assert err['target'] == 'app', err\r\n assert err['exception_cls'] == 'NotFound', err\r\n assert err['action'] == 'GET', err",
"def _fetch_app_info(app_id):\n try:\n assert len(app_id), \"Empty string\"\n lookup_url = \"https://itunes.apple.com/lookup?id=\"\n target_url = lookup_url + app_id\n if sys.version_info < (3, 5):\n response = urllib2.urlopen(target_url)\n else:\n response = urllib.request.urlopen(target_url)\n data = response.read() # a `bytes` object\n text = data.decode('utf-8')\n app_info = json.loads(text)\n return app_info\n except AssertionError as e:\n print(e)\n sys.exit(\"Exit script with error code %s\" % e)\n except urllib2.URLError as e:\n print(e)\n sys.exit(\"Exit script with error code %s\" % e)\n except urllib.error.URLError as e:\n print(e)\n sys.exit(\"Exit script with error code %s\" % e)\n except urllib2.HTTPError as e:\n print(e)\n sys.exit(\"Exit script with error code %s\" % e)\n\n except:\n e = sys.exc_info()[0]\n print(\"Error: %s\" % e)\n sys.exit(\"Exit script with error code %s\" % e)",
"def _get_app_param_info(app_info, resultCount=1, resultKey='primaryGenreId'):\n try:\n assert app_info['results'][resultCount - 1][resultKey] is not None, \"Null item\"\n return app_info['results'][resultCount - 1][resultKey]\n except AssertionError as e:\n print(\"get_app_param_info\", e)\n sys.exit(\"Exit script with error code %s\" % e)\n except TypeError as e:\n print(\"get_app_param_info\", e)\n sys.exit(\"Exit script with error code %s\" % e)\n except:\n e = sys.exc_info()[0]\n print(\"Error: get_app_param_info %s\" % e)\n sys.exit(\"Exit script with error code %s\" % e)",
"def test_get_application_name():\n\n assert application_services.get_application_name() == 'tests.unit'",
"def AppGetApp(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()",
"def test_get_app(self):\n settings = SparkSettings()\n self.device.app_name = 'test_app'\n app = self.device.get_app()\n self.assertIsInstance(app, settings.APPS[self.device.app_name])",
"def GetApp(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def android_app_info(self) -> 'outputs.AndroidAppInfoResponse':\n return pulumi.get(self, \"android_app_info\")",
"def testGetPublicInfo(self):\n jso = self.getPublicInfoResponse()\n appInfo = jso.cast()\n Assert.assertEquals(u\"aebf2e22b6bcb3bbd95c180bb68b6df4\", appInfo.getApiKey())\n Assert.assertEquals(2, appInfo.getDevelopers().__len__())",
"def get_app_info(self, name):\n with hide(\"output\", \"running\"):\n result = local(\"redis-cli -h {host} -p 6379 -n {db} hgetall {name}\".format(\n host=self.host, name=name, db=REDIS_APPLICATION_DB_NUM), capture=True)\n\n if len(result.stdout) > 0:\n splits = result.stdout.split(\"\\n\")\n fmt_result = dict([(splits[i], splits[i+1])\n for i in range(0, len(splits), 2)])\n pp = pprint.PrettyPrinter(indent=2)\n pp.pprint(fmt_result)\n return fmt_result\n else:\n warn(\"Application \\\"%s\\\" not found\" % name)\n return None",
"def AppGetApp(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def test_info_get(self):\n response = self.client.open(\n '/info',\n method='GET')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"def get_app_info(app_list, info_list):\n\n app_names = [app.__name__ for app in app_list]\n for app in info_list:\n if app in app_names:\n class_obj = next(i for i in app_list if i.__name__ == app)\n print(app)\n print(' {}'.format(class_obj.__doc__))\n print(' setup args: {}'.format(ARGS.get(app)))\n print(' setup kwargs: {}'.format(KWARGS.get(app)))\n print('')\n\n else:\n print('App {} does not exist'.format(app.__name__))",
"def test_10_get_application(self, Mock, mock2):\r\n # Sign in and create an application\r\n with self.flask_app.app_context():\r\n html_request = FakeRequest(json.dumps(self.pkg_json_not_found), 200,\r\n {'content-type': 'application/json'})\r\n Mock.return_value = html_request\r\n self.register()\r\n res = self.new_application()\r\n\r\n res = self.app.get('/app/sampleapp', follow_redirects=True)\r\n msg = \"Application: Sample App\"\r\n assert self.html_title(msg) in res.data, res\r\n err_msg = \"There should be a contribute button\"\r\n assert \"Start Contributing Now\" in res.data, err_msg\r\n\r\n res = self.app.get('/app/sampleapp/settings', follow_redirects=True)\r\n assert res.status == '200 OK', res.status\r\n self.signout()\r\n\r\n # Now as an anonymous user\r\n res = self.app.get('/app/sampleapp', follow_redirects=True)\r\n assert self.html_title(\"Application: Sample App\") in res.data, res\r\n assert \"Start Contributing Now\" in res.data, err_msg\r\n res = self.app.get('/app/sampleapp/settings', follow_redirects=True)\r\n assert res.status == '200 OK', res.status\r\n err_msg = \"Anonymous user should be redirected to sign in page\"\r\n assert \"Please sign in to access this page\" in res.data, err_msg\r\n\r\n # Now with a different user\r\n self.register(fullname=\"Perico Palotes\", name=\"perico\")\r\n res = self.app.get('/app/sampleapp', follow_redirects=True)\r\n assert self.html_title(\"Application: Sample App\") in res.data, res\r\n assert \"Start Contributing Now\" in res.data, err_msg\r\n res = self.app.get('/app/sampleapp/settings')\r\n assert res.status == '403 FORBIDDEN', res.status",
"def test_query_app(self):\r\n AppFactory.create(short_name='test-app', name='My New App')\r\n # Test for real field\r\n res = self.app.get(\"/api/app?short_name=test-app\")\r\n data = json.loads(res.data)\r\n # Should return one result\r\n assert len(data) == 1, data\r\n # Correct result\r\n assert data[0]['short_name'] == 'test-app', data\r\n\r\n # Valid field but wrong value\r\n res = self.app.get(\"/api/app?short_name=wrongvalue\")\r\n data = json.loads(res.data)\r\n assert len(data) == 0, data\r\n\r\n # Multiple fields\r\n res = self.app.get('/api/app?short_name=test-app&name=My New App')\r\n data = json.loads(res.data)\r\n # One result\r\n assert len(data) == 1, data\r\n # Correct result\r\n assert data[0]['short_name'] == 'test-app', data\r\n assert data[0]['name'] == 'My New App', data",
"def test_application_rules(self, mocked_get, mocked_check):\n from supvisors.rpcinterface import RPCInterface\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test RPC call with aplpication name\n self.assertDictEqual(rpc.get_application_rules('appli'), \n {'application_name': 'appli','start': 1, 'stop': 2, 'required': True})\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call('appli')], mocked_get.call_args_list)",
"def test_query_application_credentials(self):\n self.create_application_credential()\n app_cred_two = self.create_application_credential()\n app_cred_two_name = app_cred_two['name']\n\n app_creds = self._list_app_creds(name=app_cred_two_name)\n self.assertEqual(1, len(app_creds))\n self.assertEqual(app_cred_two_name, app_creds[0]['name'])",
"def test_00_app_get(self):\r\n # GET as Anonymous\r\n url = '/api/app'\r\n action = 'get'\r\n self.check_limit(url, action, 'app')",
"def rpc_info():"
]
| [
"0.7368261",
"0.73541594",
"0.6974",
"0.6691222",
"0.6589215",
"0.65312827",
"0.6484086",
"0.6422234",
"0.6345299",
"0.63110584",
"0.6306084",
"0.62044394",
"0.6204126",
"0.62031215",
"0.61840945",
"0.6136629",
"0.6117112",
"0.6070039",
"0.6049411",
"0.604861",
"0.60403824",
"0.60244155",
"0.6017124",
"0.6009762",
"0.598085",
"0.59656113",
"0.5924774",
"0.59221256",
"0.5921014",
"0.5918663"
]
| 0.78726745 | 0 |
Test the get_all_applications_info RPC. | def test_all_applications_info(self, mocked_get, mocked_check):
from supvisors.rpcinterface import RPCInterface
# prepare context
self.supervisor.supvisors.context.applications = {
'dummy_1': None, 'dummy_2': None}
# create RPC instance
rpc = RPCInterface(self.supervisor)
# test RPC call
self.assertItemsEqual([{'name': 'appli_1'}, {'name': 'appli_2'}],
rpc.get_all_applications_info())
self.assertEqual([call()], mocked_check.call_args_list)
self.assertItemsEqual([call('dummy_1'), call('dummy_2')],
mocked_get.call_args_list) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_application(self):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n self.supervisor.supvisors.context.applications = {\n 'appli_1': 'first application'}\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test with known application\n self.assertEqual('first application', rpc._get_application('appli_1'))\n # test with unknown application\n with self.assertRaises(RPCError) as exc:\n rpc._get_application('app')\n self.assertEqual(Faults.BAD_NAME, exc.exception.code)\n self.assertEqual('BAD_NAME: application app unknown in Supvisors',\n exc.exception.text)",
"def test_application_info(self, mocked_serial, mocked_check):\n from supvisors.rpcinterface import RPCInterface\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test RPC call\n self.assertEqual({'name': 'appli'}, rpc.get_application_info('dummy'))\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call('dummy')], mocked_serial.call_args_list)",
"def get_applications(self):\n status_code_dict = {\n codes.ok: ApplicationListResponse,\n codes.bad_request: ErrorResponse,\n }\n return self.get_request(APPLICATION_URL,\n status_code_response_class_dict=status_code_dict,\n )",
"def listapps(self):\n return jsoncall.do_call(\"listapps\", {'modelname':self.modelname,\\\n 'user':self.user,\\\n 'password':self.password},\n self.connection)",
"def ListApps(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()",
"def get_applications(status):\n return status['applications']",
"def test_root_api(self):\n\n # GIVEN API\n\n # WHEN fetching available applications and models\n response = self.api.root_api()\n\n # THEN it should succeed\n self.assertTrue(response.success)\n\n # AND it should have valid data\n for item in response.data:\n self.assertEqual(len(item.keys()), 3)\n self.assertEqual(set(item.keys()), set(['model', 'actions', 'app_label']))\n\n # AND it contains also UI application models\n self.assertTrue(any('test' in d['app_label'] for d in response.data))\n\n # AND public applications are also available\n data = [item for item in response.data if item['app_label'] == 'admin']\n self.assertEqual(len(data), 1)\n self.assertEqual(data[0]['model'], None)\n self.assertEqual(len(data[0]['actions'].keys()), 2)",
"def _list_apps(config, client):\n logger.info(\"Listing all the published apps by {}: \".format(config.username), fg=\"green\")\n current_page = 0\n total_pages = get_search_results(config, client, current_page)\n if total_pages < 1:\n return\n\n while 0 <= current_page < total_pages:\n try:\n prompt_resp = click.prompt(uxstring.UxString.pagination,\n type=str)\n\n next_page = get_next_page(prompt_resp, current_page)\n\n if next_page == -1:\n model_id = prompt_resp\n display_app_info(config, client, model_id)\n elif next_page >= total_pages or next_page < 0:\n continue\n else:\n get_search_results(config, client, next_page)\n current_page = next_page\n\n except click.exceptions.Abort:\n return",
"def ListApps(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def test_duo_application_list(self):\n pass",
"def test_get_hyperflex_app_catalog_list(self):\n pass",
"def getAppInfo(self):\n data = self._client.Application.find(self.app_id)\n return data",
"def test_duo_application_get(self):\n pass",
"def test_profile_applications(self, mock):\r\n with self.flask_app.app_context():\r\n self.create()\r\n self.signin(email=Fixtures.email_addr, password=Fixtures.password)\r\n self.new_application()\r\n url = '/account/%s/applications' % Fixtures.name\r\n res = self.app.get(url)\r\n assert \"Applications\" in res.data, res.data\r\n assert \"Published\" in res.data, res.data\r\n assert \"Draft\" in res.data, res.data\r\n assert Fixtures.app_name in res.data, res.data\r\n\r\n url = '/account/fakename/applications'\r\n res = self.app.get(url)\r\n assert res.status_code == 404, res.status_code\r\n\r\n url = '/account/%s/applications' % Fixtures.name2\r\n res = self.app.get(url)\r\n assert res.status_code == 403, res.status_code",
"def ListApps(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def applications():\n storeapps = APP.config[\"storage\"]\n base_url = request.host_url + \"application/\"\n\n response = {\"applications\": []}\n for application in nativeapps.io.ls(storeapps, r\".*\\.(apk|ipa)$\"):\n tokens = application.decode(\"utf-8\").split(os.path.sep)\n directory = tokens[-2]\n name, version = os.path.basename(directory).split(\"-\", 1)\n meta_path = os.path.join(os.path.dirname(application), \"metadata.json\")\n\n link = base_url + \"/\".join(tokens[-3:])\n if application.endswith(\".ipa\"):\n link = \"itms-services://?action=download-manifest&url=\" + \\\n base_url + \"/\".join(tokens[-3:-1]) + \"/\" + \"manifest.plist\"\n\n response[\"applications\"].append({\n \"url\": base_url + \"/\".join(tokens[-3:]),\n \"name\": name,\n \"version\": version,\n \"metadata\": nativeapps.io.readfile(meta_path),\n \"link\": link,\n \"type\": application.split(\".\")[-1],\n })\n return flask.jsonify(response)",
"async def get_apps(self, params: Optional = None) -> dict:\r\n return await self.get_items(API_APPS, params=params)",
"def show(ctx, appeui):\n if '.' in appeui:\n appeui = str(hexStringInt(str(appeui)))\n \n # Form the url and payload\n server = ctx.obj['server']\n payload = {'token': ctx.obj['token']}\n url = 'http://{}/api/v{}'.format(server, str(version))\n url += '/apps' if appeui == 'all' else '/app/{}'.format(appeui)\n \n # Make the request\n data = restRequest(server, url, 'get', payload, 200)\n if data is None:\n return\n \n # Single application\n if appeui != 'all':\n a = data\n indent = ' ' * 10\n if a['appinterface_id'] == 0:\n a['appinterface_id'] = '-'\n if a['domain'] is None:\n a['domain'] = '-'\n click.echo('Application EUI: ' + euiString(a['appeui']))\n click.echo('{}name: {}'.format(indent, a['name']))\n click.echo('{}domain: {}'.format(indent, a['domain']))\n click.echo('{}fport: {}'.format(indent, a['fport']))\n click.echo('{}interface: {}'.format(indent, a['appinterface_id']))\n if a['appinterface_id'] != '-':\n click.echo('{}Properties:'.format(indent))\n properties = sorted(a['properties'].values(), key=lambda k: k['port'])\n for p in properties:\n click.echo('{} {} {}:{}'.format(indent, p['port'], p['name'], p['type']))\n return\n \n # All applications\n click.echo('{:14}'.format('Application') + \\\n '{:24}'.format('AppEUI') + \\\n '{:15}'.format('Domain') + \\\n '{:6}'.format('Fport') + \\\n '{:10}'.format('Interface'))\n for i,a in data.iteritems():\n if a['appinterface_id'] == 0:\n a['appinterface_id'] = '-'\n if a['domain'] is None:\n a['domain'] = '-'\n click.echo('{:13.13}'.format(a['name']) + ' ' + \\\n '{:23}'.format(euiString(a['appeui'])) + ' ' + \\\n '{:14.14}'.format(a['domain']) + ' ' + \\\n '{:5.5}'.format(str(a['fport'])) + ' ' + \\\n '{:10}'.format(str(a['appinterface_id'])))",
"def get_applications(rest, sessionsArg, option):\n applications = []\n if option == 'heartbeat':\n appsString = rest.get_environment_applications(sessionsArg).strip();\n else:\n appsString = rest.get_all_applications().strip();\n rawList = appsString.split('\\n<\\n')\n for raw in rawList:\n if printtrace: print '_' * 20\n if applicationdataok(raw):\n attributes = [a.split(': ')[1] for a in raw.split('\\n')]\n if printtrace: print attributes\n\n a = Application()\n a.sessionId = attributes[0]\n a.nameInEnvironmentView = attributes[1]\n a.fileName = attributes[2]\n a.processString = attributes[3]\n a.discoveryChecks = attributes[4:]\n a.isgeneric = a.nameInEnvironmentView == 'generic application' or a.fileName.find('generic-application') > 0\n if not a.isgeneric:\n applications.append(a)\n return applications",
"def get_all_applications():\n cursor.execute(\n f'SELECT * FROM public.applications where status = %s', (\"pending\",))\n rows = cursor.fetchall()\n application_dicts = []\n\n for item in rows:\n application = Application(id=item[0], party_name=item[1], office_name=item[2], user_id=item[3],\n date_created=item[4],status=item[5])\n application = application.json_dumps()\n application_dicts.append(application)\n return application_dicts",
"def ListApps(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)",
"async def app_list(self) -> List[interface.App]:\n return await self.relay(\"app_list\")()",
"def get(self):\n apps = Application.objects()\n\n # TODO return more information\n apps_clean = []\n for app in apps:\n # don't include invalid apps\n if app[\"validated\"] is True:\n apps_clean.append(\n {\"name\": app[\"name\"]}\n )\n\n return apps_clean, 200",
"def retr_auth_apps() :\n\n\t\t\t_logger.info( '...retr_auth_apps...' )\n\t\t\toutput = []\n\t\t\tdb = mongo.db.auth_apps\n\n\t\t\tcur = db.find()\n\t\t\tif cur.count() == 0 :\n\t\t\t\traise mongo_no_resource_exception( 'no authorized apps found' )\n\t\t\tfor app in db.find() :\n\t\t\t\toutput.append( { 'moniker' : app['moniker'] ,\n\t\t\t\t\t\t\t 'description' : app['description'] ,\n\t\t\t\t\t\t\t\t 'url' : app['url'] } )\n\n\t\t\treturn jsonify( {'result' : output} )",
"def test_app_query(self):\r\n AppFactory.create(info={'total': 150})\r\n res = self.app.get('/api/app')\r\n data = json.loads(res.data)\r\n assert len(data) == 1, data\r\n app = data[0]\r\n assert app['info']['total'] == 150, data\r\n\r\n # The output should have a mime-type: application/json\r\n assert res.mimetype == 'application/json', res\r\n\r\n # Test a non-existant ID\r\n res = self.app.get('/api/app/3434209')\r\n err = json.loads(res.data)\r\n assert res.status_code == 404, err\r\n assert err['status'] == 'failed', err\r\n assert err['target'] == 'app', err\r\n assert err['exception_cls'] == 'NotFound', err\r\n assert err['action'] == 'GET', err",
"def describe_apps(StackId=None, AppIds=None):\n pass",
"def app_list(self, third_only=False):\n return self.adb.app_list(third_only)",
"def get_all_apps(self):\n return list(self.apps.values())",
"def get_app_list(self):\n return self.get_setting('applications', 'installed_apps')",
"def get():\n try:\n request_schema = ApplicationListReqSchema()\n dict_data = request_schema.load(request.args)\n page_no = dict_data['page_no']\n limit = dict_data['limit']\n return jsonify({\n 'applications': FormProcessMapperService.get_all_mappers(page_no, limit),\n 'totalCount': FormProcessMapperService.get_mapper_count(),\n 'pageNo': page_no,\n 'limit': limit\n }), HTTPStatus.OK\n except ValidationError as form_err:\n return {'systemErrors': form_err.messages}, HTTPStatus.BAD_REQUEST"
]
| [
"0.7007239",
"0.68767536",
"0.68270475",
"0.67084014",
"0.660541",
"0.66011083",
"0.65988123",
"0.65746206",
"0.6562028",
"0.6561185",
"0.6498757",
"0.6494644",
"0.64858603",
"0.64830405",
"0.6474421",
"0.6452741",
"0.64078385",
"0.6402389",
"0.635788",
"0.63436806",
"0.632355",
"0.63193214",
"0.6283238",
"0.6282834",
"0.62652504",
"0.62298423",
"0.62291163",
"0.6224876",
"0.62234837",
"0.6219674"
]
| 0.8159693 | 0 |
Test the get_process_info RPC. | def test_process_info(self, mocked_get, mocked_check):
from supvisors.rpcinterface import RPCInterface
# create RPC instance
rpc = RPCInterface(self.supervisor)
# test first RPC call with process namespec
self.assertEqual([{'name': 'proc'}], rpc.get_process_info('appli:proc'))
self.assertEqual([call()], mocked_check.call_args_list)
self.assertEqual([call('appli:proc')], mocked_get.call_args_list)
# reset patches
mocked_check.reset_mock()
mocked_get.reset_mock()
# test second RPC call with group namespec
self.assertEqual([{'name': 'proc_1'}, {'name': 'proc_2'}],
rpc.get_process_info('appli:*'))
self.assertEqual([call()], mocked_check.call_args_list)
self.assertEqual([call('appli:*')], mocked_get.call_args_list) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_process(self):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n self.supervisor.supvisors.context.processes = {\n 'proc_1': 'first process'}\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test with known application\n self.assertEqual('first process', rpc._get_process('proc_1'))\n # test with unknown application\n with self.assertRaises(RPCError) as exc:\n rpc._get_process('proc')\n self.assertEqual(Faults.BAD_NAME, exc.exception.code)\n self.assertEqual('BAD_NAME: process proc unknown in Supvisors',\n exc.exception.text)",
"def test_all_process_info(self, mocked_check):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n self.supervisor.supvisors.context.processes = {\n 'proc_1': Mock(**{'serial.return_value': {'name': 'proc_1'}}),\n 'proc_2': Mock(**{'serial.return_value': {'name': 'proc_2'}})}\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test RPC call\n self.assertItemsEqual([{'name': 'proc_1'}, {'name': 'proc_2'}],\n rpc.get_all_process_info())\n self.assertEqual([call()], mocked_check.call_args_list)",
"def process_info(process):\n\thelp(process)",
"def _proc_info(self):\n ret = cext.proc_info(self.pid)\n assert len(ret) == len(pinfo_map)\n return ret",
"def test_application_info(self, mocked_serial, mocked_check):\n from supvisors.rpcinterface import RPCInterface\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test RPC call\n self.assertEqual({'name': 'appli'}, rpc.get_application_info('dummy'))\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call('dummy')], mocked_serial.call_args_list)",
"def getProcessInfo(self, name):\r\n self._update('getProcessInfo')\r\n\r\n group, process = self._getGroupAndProcess(name)\r\n\r\n if process is None:\r\n raise RPCError(Faults.BAD_NAME, name)\r\n\r\n start = int(process.laststart)\r\n stop = int(process.laststop)\r\n now = int(time.time())\r\n\r\n state = process.get_state()\r\n spawnerr = process.spawnerr or ''\r\n exitstatus = process.exitstatus or 0\r\n stdout_logfile = process.config.stdout_logfile or ''\r\n stderr_logfile = process.config.stderr_logfile or ''\r\n\r\n info = {\r\n 'name':process.config.name,\r\n 'group':group.config.name,\r\n 'start':start,\r\n 'stop':stop,\r\n 'now':now,\r\n 'state':state,\r\n 'statename':getProcessStateDescription(state),\r\n 'spawnerr':spawnerr,\r\n 'exitstatus':exitstatus,\r\n 'logfile':stdout_logfile, # b/c alias\r\n 'stdout_logfile':stdout_logfile,\r\n 'stderr_logfile':stderr_logfile,\r\n 'pid':process.pid,\r\n }\r\n\r\n description = self._interpretProcessInfo(info)\r\n info['description'] = description\r\n return info",
"def testProcess(self):\n self.grr_hunt_osquery_collector.Process()\n # extract call kwargs\n call_kwargs = self.mock_grr_api.CreateHunt.call_args[1]\n self.assertEqual(call_kwargs['flow_args'].query,\n 'SELECT * FROM processes')\n self.assertEqual(call_kwargs['flow_args'].timeout_millis,\n 300000)\n self.assertEqual(call_kwargs['flow_args'].ignore_stderr_errors, False)\n self.assertEqual(call_kwargs['flow_name'], 'OsqueryFlow')\n self.assertEqual(call_kwargs['hunt_runner_args'].description,\n 'random reason')",
"def rpc_info():",
"def test_get_application_process(self):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n self.supervisor.supvisors.context.applications = {\n 'appli_1': 'first application'}\n self.supervisor.supvisors.context.processes = {\n 'appli_1:proc_1': 'first process'}\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test with full namespec\n self.assertTupleEqual(('first application', 'first process'),\n rpc._get_application_process('appli_1:proc_1'))\n # test with applicative namespec\n self.assertTupleEqual(('first application', None),\n rpc._get_application_process('appli_1:*'))",
"def _QueryProcessStatus(self, process):\n process_is_alive = process.is_alive()\n if not process_is_alive:\n return None\n\n rpc_client = self._rpc_clients_per_pid.get(process.pid, None)\n return rpc_client.CallFunction()",
"def test_get_info(self):\n pass",
"def get_system_info():\n query = {\"type\": \"op\", \"cmd\": \"<show><system><info></info></system></show>\"}\n\n return __proxy__[\"panos.call\"](query)",
"def test_process_id():\n output = sh.process_id()\n assert isinstance(output, int) and output > 0",
"def get_process():\n data = _get_process_detail_expanded_data()[\"process\"]\n return data",
"def test_get_address(self):\n with self.subprocess_getoutput_patch:\n ret = self.inst._get_address()\n self.assertEqual(ret, \"http://example\")",
"def test_address_info(self):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n self.supervisor.supvisors.context.addresses = {\n '10.0.0.1': Mock(**{'serial.return_value': 'address_info'})}\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test with known address\n self.assertEqual('address_info', rpc.get_address_info('10.0.0.1'))\n # test with unknown address\n with self.assertRaises(RPCError) as exc:\n rpc.get_address_info('10.0.0.0')\n self.assertEqual(Faults.BAD_ADDRESS, exc.exception.code)\n self.assertEqual('BAD_ADDRESS: address 10.0.0.0 unknown in Supvisors',\n exc.exception.text)",
"async def get_system_info(self) -> Dict[str, Any]:\n assert self._client is not None\n return await self._client.invoke_method(\"system.info\")",
"def test_server_info(self):\n pass",
"def procinfo() -> None:\n if pwndbg.gdblib.qemu.is_qemu():\n print(\n message.error(\n \"QEMU target detected: showing result for the qemu process\"\n \" - so it will be a bit inaccurate (excessive for the parts\"\n \" used directly by the qemu process)\"\n )\n )\n exe = pwndbg.auxv.get()[\"AT_EXECFN\"]\n print(\"%-10s %r\" % (\"exe\", exe))\n\n proc = Process()\n\n # qemu-usermode fail!\n if not proc.status:\n return\n\n print(\"%-10s %s\" % (\"cmdline\", proc.cmdline))\n\n print(\"%-10s %s\" % (\"cwd\", proc.cwd))\n\n files = dict(proc.open_files)\n\n for c in proc.connections:\n files[c.fd] = str(c)\n\n print(\"%-10s %s\" % (\"pid\", proc.pid))\n print(\"%-10s %s\" % (\"tid\", proc.tid))\n\n if proc.selinux != \"unconfined\":\n print(\"%-10s %s\" % (\"selinux\", proc.selinux))\n\n print(\"%-10s %s\" % (\"ppid\", proc.ppid))\n\n if not pwndbg.gdblib.android.is_android():\n print(\"%-10s %s\" % (\"uid\", proc.uid))\n print(\"%-10s %s\" % (\"gid\", proc.gid))\n print(\"%-10s %s\" % (\"groups\", proc.groups))\n else:\n print(\"%-10s %s\" % (\"uid\", list(map(pwndbg.lib.android.aid_name, proc.uid))))\n print(\"%-10s %s\" % (\"gid\", list(map(pwndbg.lib.android.aid_name, proc.gid))))\n print(\"%-10s %s\" % (\"groups\", list(map(pwndbg.lib.android.aid_name, proc.groups))))\n\n for fd, path in files.items():\n if not set(path) < set(string.printable):\n path = repr(path)\n\n print(\"%-10s %s\" % (\"fd[%i]\" % fd, path))\n\n return",
"def check_meminfo_response(response):\n\n try:\n data = json.loads(response.strip())\n except ValueError, msg:\n\traise AssertionError(\"Invalid JSON object. Received: \" + response)\n\n for line in open(\"/proc/meminfo\"):\n entry = re.split(\":?\\s+\", line)\n assert data.has_key(entry[0]), entry[0] + \" key is missing\"\n\n try:\n int(data[entry[0]])\n except (TypeError, ValueError):\n raise AssertionError(\"a non-integer was passed to meminfo\")\n\n return True",
"def test_instant_process_statistics(self):\n import os\n from supvisors.statistics import instant_process_statistics\n stats = instant_process_statistics(os.getpid())\n # test that a pair is returned with values in [0;100]\n self.assertEqual(2, len(stats))\n # test cpu value\n self.assertIs(float, type(stats[0]))\n self.assertGreaterEqual(stats[0], 0)\n self.assertLessEqual(stats[0], 100)\n # test mem value\n self.assertIs(float, type(stats[1]))\n self.assertGreaterEqual(stats[1], 0)\n self.assertLessEqual(stats[1], 100)",
"def test_get_info(self):\n self.addCleanup(self.sdkapi.guest_delete, self.userid)\n\n self.sdkapi.guest_create(self.userid, 1, 1024, disk_list=self.disks)\n self.sdkapi.guest_deploy(self.userid, self.image_name)\n\n # get info in shutdown state\n info_off = self.sdkapi.guest_get_info(self.userid)\n self.assertEquals(info_off['power_state'], 'off')\n self.assertEquals(info_off['mem_kb'], 0)\n self.assertEquals(info_off['cpu_time_us'], 0)\n\n # get info in active state\n self.sdkapi.guest_start(self.userid)\n self.assertTrue(self.sdkutils.wait_until_guest_in_power_state(\n self.userid, 'on'))\n time.sleep(1)\n info_on = self.sdkapi.guest_get_info(self.userid)\n self.assertEquals(info_on['power_state'], 'on')\n self.assertNotEqual(info_on['cpu_time_us'], 0)\n self.assertNotEqual(info_on['mem_kb'], 0)\n\n # get info in paused state\n self.sdkapi.guest_pause(self.userid)\n info_on = self.sdkapi.guest_get_info(self.userid)\n self.assertEquals(info_on['power_state'], 'on')\n self.assertNotEqual(info_on['cpu_time_us'], 0)\n self.assertNotEqual(info_on['mem_kb'], 0)",
"def test_getStateIncludesProcesses(self):\r\n self.pm.addProcess(\"foo\", [\"arg1\", \"arg2\"],\r\n uid=1, gid=2, env={})\r\n self.assertEqual(self.pm.__getstate__()['processes'],\r\n {'foo': (['arg1', 'arg2'], 1, 2, {})})",
"def pr_info(self):\n process = self.backend.get_process(str(self.processBox.currentText()))\n\n if not process:\n return\n\n self.infoWindow2 = QDialog(parent=self)\n hbox2 = QHBoxLayout()\n info_box = QTextEdit()\n\n if process.returns:\n info_box.setText(\n str(str(process.id) + ': ' + str(process.description) + \"\\n\\n Returns: \\n\" +\n str(process.get_return_type()) + \"\\n\" + process.returns[\"description\"]))\n else:\n info_box.setText(\n str(str(process.id) + ': ' + str(process.description)))\n\n info_box.setReadOnly(True)\n info_box.setMinimumWidth(500)\n info_box.setMinimumHeight(500)\n hbox2.addWidget(info_box)\n self.infoWindow2.setLayout(hbox2)\n self.infoWindow2.setWindowTitle('Process Information')\n self.infoWindow2.show()",
"def get_processes_info():\n processes_list = []\n for proc in get_processes():\n try:\n # Fetch process details as dict\n pinfo = proc.as_dict(attrs=[\"pid\", \"name\", \"username\"])\n pinfo[\"rss\"] = proc.memory_info().rss / (1024 * 1024)\n pinfo[\"ports\"] = []\n try:\n connections = proc.connections()\n except psutil.Error:\n continue\n if connections:\n for conn in connections:\n pinfo[\"ports\"].append({\"port\": conn.laddr.port, \"status\": conn.status})\n # Append dict to list\n processes_list.append(pinfo)\n except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):\n pass\n processes_list = sorted(processes_list, key=lambda procObj: procObj[\"rss\"], reverse=True)\n return processes_list[:25]",
"def retrieve_execution_info(self, process):\n self._start = process.start\n self._end = process.end\n self._success = process.success\n self.log_stdout = process.log_stdout\n self.log_stderr = process.log_stderr\n self._reserved_path = process._reserved_path",
"def rpc_getinfo(client, rpc_server, rpc_user=BTC_RPC_USER, rpc_password=BTC_RPC_PASSWD, rpc_port=BTC_RPC_PORT):\n try:\n rpc_server = get_ip_by_unknown(client, rpc_server)\n # Test connection by sendinf a getinfo command\n rpc_connection = AuthServiceProxy(\"http://%s:%s@%s:%s\" % (rpc_user, rpc_password, rpc_server, rpc_port))\n get_info = rpc_connection.getinfo()\n return get_info\n except JSONRPCException as err:\n return False",
"def test_info(manager):\n manager.test_window(\"one\")\n manager.c.sync()\n info = manager.c.window.info()\n assert info[\"name\"] == \"one\"\n assert info[\"group\"] == \"a\"\n assert info[\"wm_class\"][0] == \"TestWindow\"\n assert \"x\" in info\n assert \"y\" in info\n assert \"width\" in info\n assert \"height\" in info\n assert \"id\" in info",
"def get_process(self):\n\n self.log.debug('Getting application process data')\n cmd_output = admin_tasks.get_process(self.app_name)\n if cmd_output:\n self.log.info('Application process is running')\n print(\"Command output: \\n\" + cmd_output)\n else:\n self.log.info('Application process is not running')",
"def check_process(self, instance, process):\n\n instance = self.get_instance(instance)\n output = \"\"\n try:\n if instance.get('address'):\n username = instance.get('address') + \"@\" + instance.get('credentials').get('username')\n key = instance.get('credentials').get('publickey')\n output = subprocess.check_output([\"ssh\", key, username, 'ps', 'aux', '|', 'grep', process]).decode(\n \"utf-8\")\n else:\n username = 'ubuntu@' + instance.get('credentials').get('EC2_ACCESS_ID')\n key = instance.get('credentials').get('EC2_SECRET_KEY')\n # output = os.popen(\"ls\"+ \" | \" + \"ssh\"+ \" -i \"+ key +\" \"+ username).read()\n output = subprocess.check_output(\n [\"ssh\", '-i', key, username, 'ps', 'aux', '|', 'grep', process]).decode(\"utf-8\")\n return output\n except:\n return \"Faile to access the instance\""
]
| [
"0.729027",
"0.70583725",
"0.69484574",
"0.6644473",
"0.652423",
"0.63419306",
"0.63237894",
"0.62368655",
"0.6222356",
"0.61415535",
"0.59827346",
"0.59538347",
"0.592685",
"0.5902934",
"0.5892789",
"0.58857656",
"0.58735555",
"0.58320194",
"0.582809",
"0.5811989",
"0.5790525",
"0.57286364",
"0.57009006",
"0.5691166",
"0.5666546",
"0.56575274",
"0.5647627",
"0.56334966",
"0.5633237",
"0.56159556"
]
| 0.7721014 | 0 |
Test the get_all_process_info RPC. | def test_all_process_info(self, mocked_check):
from supvisors.rpcinterface import RPCInterface
# prepare context
self.supervisor.supvisors.context.processes = {
'proc_1': Mock(**{'serial.return_value': {'name': 'proc_1'}}),
'proc_2': Mock(**{'serial.return_value': {'name': 'proc_2'}})}
# create RPC instance
rpc = RPCInterface(self.supervisor)
# test RPC call
self.assertItemsEqual([{'name': 'proc_1'}, {'name': 'proc_2'}],
rpc.get_all_process_info())
self.assertEqual([call()], mocked_check.call_args_list) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_process_info(self, mocked_get, mocked_check):\n from supvisors.rpcinterface import RPCInterface\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test first RPC call with process namespec\n self.assertEqual([{'name': 'proc'}], rpc.get_process_info('appli:proc'))\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call('appli:proc')], mocked_get.call_args_list)\n # reset patches\n mocked_check.reset_mock()\n mocked_get.reset_mock()\n # test second RPC call with group namespec\n self.assertEqual([{'name': 'proc_1'}, {'name': 'proc_2'}],\n rpc.get_process_info('appli:*'))\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call('appli:*')], mocked_get.call_args_list)",
"def test_process_list_with_all_users(self):\n\n class MyResponder(MockGDBServerResponder):\n def qfProcessInfo(self, packet):\n if \"all_users:1\" in packet:\n return \"pid:10;ppid:1;uid:1;gid:1;euid:1;egid:1;name:\" + binascii.hexlify(\"/a/test_process\".encode()).decode() + \";\"\n else:\n return \"E04\"\n\n self.server.responder = MyResponder()\n\n self.runCmd(\"platform select remote-linux\")\n\n try:\n self.runCmd(\"platform connect connect://localhost:%d\" %\n self.server.port)\n self.assertTrue(self.dbg.GetSelectedPlatform().IsConnected())\n self.expect(\"platform process list -x\",\n substrs=[\"1 matching process was found\", \"test_process\"])\n self.expect(\"platform process list\",\n error=True,\n substrs=[\"error: no processes were found on the \\\"remote-linux\\\" platform\"])\n finally:\n self.dbg.GetSelectedPlatform().DisconnectRemote()",
"def test_all_applications_info(self, mocked_get, mocked_check):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n self.supervisor.supvisors.context.applications = {\n 'dummy_1': None, 'dummy_2': None}\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test RPC call\n self.assertItemsEqual([{'name': 'appli_1'}, {'name': 'appli_2'}],\n rpc.get_all_applications_info())\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertItemsEqual([call('dummy_1'), call('dummy_2')],\n mocked_get.call_args_list)",
"def test_get_process(self):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n self.supervisor.supvisors.context.processes = {\n 'proc_1': 'first process'}\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test with known application\n self.assertEqual('first process', rpc._get_process('proc_1'))\n # test with unknown application\n with self.assertRaises(RPCError) as exc:\n rpc._get_process('proc')\n self.assertEqual(Faults.BAD_NAME, exc.exception.code)\n self.assertEqual('BAD_NAME: process proc unknown in Supvisors',\n exc.exception.text)",
"def process_info(process):\n\thelp(process)",
"def getAllProcessInfo(self):\r\n self._update('getAllProcessInfo')\r\n\r\n all_processes = self._getAllProcesses(lexical=True)\r\n\r\n output = []\r\n for group, process in all_processes:\r\n name = make_namespec(group.config.name, process.config.name)\r\n output.append(self.getProcessInfo(name))\r\n return output",
"def _proc_info(self):\n ret = cext.proc_info(self.pid)\n assert len(ret) == len(pinfo_map)\n return ret",
"def get_processes_info():\n processes_list = []\n for proc in get_processes():\n try:\n # Fetch process details as dict\n pinfo = proc.as_dict(attrs=[\"pid\", \"name\", \"username\"])\n pinfo[\"rss\"] = proc.memory_info().rss / (1024 * 1024)\n pinfo[\"ports\"] = []\n try:\n connections = proc.connections()\n except psutil.Error:\n continue\n if connections:\n for conn in connections:\n pinfo[\"ports\"].append({\"port\": conn.laddr.port, \"status\": conn.status})\n # Append dict to list\n processes_list.append(pinfo)\n except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):\n pass\n processes_list = sorted(processes_list, key=lambda procObj: procObj[\"rss\"], reverse=True)\n return processes_list[:25]",
"def test_application_info(self, mocked_serial, mocked_check):\n from supvisors.rpcinterface import RPCInterface\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test RPC call\n self.assertEqual({'name': 'appli'}, rpc.get_application_info('dummy'))\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call('dummy')], mocked_serial.call_args_list)",
"def rpc_info():",
"def processes():\n if not check_params(\n request.args.get(\"host\"), request.args.get(\"username\")\n ):\n abort(400)\n\n return get_processes(\n request.args.get(\"host\"),\n request.args.get(\"username\"),\n request.args.get(\"port\"),\n )",
"def test_all_addresses_info(self):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n self.supervisor.supvisors.context.addresses = {\n '10.0.0.1': Mock(**{'serial.return_value': 'address_info_1'}),\n '10.0.0.2': Mock(**{'serial.return_value': 'address_info_2'})}\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n self.assertItemsEqual(['address_info_1', 'address_info_2'],\n rpc.get_all_addresses_info())",
"def testProcess(self):\n self.grr_hunt_osquery_collector.Process()\n # extract call kwargs\n call_kwargs = self.mock_grr_api.CreateHunt.call_args[1]\n self.assertEqual(call_kwargs['flow_args'].query,\n 'SELECT * FROM processes')\n self.assertEqual(call_kwargs['flow_args'].timeout_millis,\n 300000)\n self.assertEqual(call_kwargs['flow_args'].ignore_stderr_errors, False)\n self.assertEqual(call_kwargs['flow_name'], 'OsqueryFlow')\n self.assertEqual(call_kwargs['hunt_runner_args'].description,\n 'random reason')",
"def GetPublishedProcesses():\r\n pass",
"def test_get_application_process(self):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n self.supervisor.supvisors.context.applications = {\n 'appli_1': 'first application'}\n self.supervisor.supvisors.context.processes = {\n 'appli_1:proc_1': 'first process'}\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test with full namespec\n self.assertTupleEqual(('first application', 'first process'),\n rpc._get_application_process('appli_1:proc_1'))\n # test with applicative namespec\n self.assertTupleEqual(('first application', None),\n rpc._get_application_process('appli_1:*'))",
"def test_getStateIncludesProcesses(self):\r\n self.pm.addProcess(\"foo\", [\"arg1\", \"arg2\"],\r\n uid=1, gid=2, env={})\r\n self.assertEqual(self.pm.__getstate__()['processes'],\r\n {'foo': (['arg1', 'arg2'], 1, 2, {})})",
"def getProcessInfo(self, name):\r\n self._update('getProcessInfo')\r\n\r\n group, process = self._getGroupAndProcess(name)\r\n\r\n if process is None:\r\n raise RPCError(Faults.BAD_NAME, name)\r\n\r\n start = int(process.laststart)\r\n stop = int(process.laststop)\r\n now = int(time.time())\r\n\r\n state = process.get_state()\r\n spawnerr = process.spawnerr or ''\r\n exitstatus = process.exitstatus or 0\r\n stdout_logfile = process.config.stdout_logfile or ''\r\n stderr_logfile = process.config.stderr_logfile or ''\r\n\r\n info = {\r\n 'name':process.config.name,\r\n 'group':group.config.name,\r\n 'start':start,\r\n 'stop':stop,\r\n 'now':now,\r\n 'state':state,\r\n 'statename':getProcessStateDescription(state),\r\n 'spawnerr':spawnerr,\r\n 'exitstatus':exitstatus,\r\n 'logfile':stdout_logfile, # b/c alias\r\n 'stdout_logfile':stdout_logfile,\r\n 'stderr_logfile':stderr_logfile,\r\n 'pid':process.pid,\r\n }\r\n\r\n description = self._interpretProcessInfo(info)\r\n info['description'] = description\r\n return info",
"def getProcessInfo():\n \n blacklist = [\"_Total\",\"Idle\"] #processes we don't care about\n \n #execute wmic command and capture output\n temp = subprocess.check_output([\"wmic\", \"path\", \"Win32_PerfRawData_PerfProc_Process\", \"get\", \n \"Name,PercentProcessorTime\"]) \n \n #iterate over processes and split into lists\n firstline = True\n result = [] #list of lists to contain the final result\n \n for line in temp.splitlines():\n if(firstline):\n firstline = False\n continue\n elif not line: #skip empty lines\n continue\n \n proclist = line.split() #split on whitespace to return a 2 element list\n \n if (proclist[0] not in blacklist ):\n result.append([proclist[0], int(proclist[1])/(10**7)]) #convert times to ints, percent processor time is in 100 nanosecond intervals\n \n \n #sort list on processor time, highest first\n result.sort(key=lambda x: x[1])\n result.reverse()\n \n # narrow process list down\n times = [x[1] for x in result]\n\n nonzero = [x for x in times if x]\n \n ind = min(int(math.ceil(len(times)/5)),len(nonzero)) #reduce processes to top 20% (atleast 1) or to all with nonzero cpu time\n cutoff = max(times[ind],1)\n \n return [x for x in result if x[1] >= cutoff]",
"def get_process_info(name):\n process_lst = list()\n all_pid = psutil.pids()\n for pid in all_pid:\n info = psutil.Process(pid)\n if name in info.name():\n process_lst.append(info)\n\n return process_lst",
"def get_process_list() -> Dict:\n return {proc.pid: proc.name() for proc in psutil.process_iter()}",
"def list_programs():\n return list(INFO)",
"def get_host_info(self, args, get_all=False):\n return None",
"def get_processes():\n yield from psutil.process_iter()",
"def test_instant_process_statistics(self):\n import os\n from supvisors.statistics import instant_process_statistics\n stats = instant_process_statistics(os.getpid())\n # test that a pair is returned with values in [0;100]\n self.assertEqual(2, len(stats))\n # test cpu value\n self.assertIs(float, type(stats[0]))\n self.assertGreaterEqual(stats[0], 0)\n self.assertLessEqual(stats[0], 100)\n # test mem value\n self.assertIs(float, type(stats[1]))\n self.assertGreaterEqual(stats[1], 0)\n self.assertLessEqual(stats[1], 100)",
"def test_block_builtin_processes_from_api(self):\n cwl = {\n \"cwlVersion\": \"v1.0\",\n \"class\": \"CommandLineTool\",\n \"baseCommand\": [\"python3\"],\n \"inputs\": {\n \"stringInput\": \"string\"\n },\n \"requirements\": {\n CWL_REQUIREMENT_APP_DOCKER: {\n \"dockerPull\": \"python:3.7-alpine\"\n },\n },\n \"outputs\": [],\n }\n body = {\n \"processDescription\": {\n \"process\": {\n \"id\": self._testMethodName,\n \"title\": \"some title\",\n \"abstract\": \"this is a test\",\n \"type\": PROCESS_BUILTIN,\n },\n },\n \"deploymentProfileName\": \"http://www.opengis.net/profiles/eoc/wpsApplication\",\n \"executionUnit\": [{\"unit\": cwl}],\n }\n with contextlib.ExitStack() as stack_exec:\n for mock_exec in mocked_execute_process():\n stack_exec.enter_context(mock_exec)\n resp = mocked_sub_requests(self.app, \"post_json\", \"/processes\", data=body, timeout=5,\n headers=self.json_headers, only_local=True, expect_errors=True)\n # With Weaver<=4.1.x, the 'type' was explicitly checked to block it since Deploy payload was kept as is\n # This field was allowed to trickle all they way down to the instantiation of Process object\n # assert resp.status_code == 200\n\n # With Weaver>4.1.x, the deserialized result from Deploy payload is employed, which drops unknown 'type'\n # Ensure that deploy now succeeds, but the obtained Process is not 'builtin' (just a regular application)\n assert resp.status_code == 201\n assert PROCESS_BUILTIN not in resp.json[\"processSummary\"][\"keywords\"]\n process = self.process_store.fetch_by_id(self._testMethodName)\n assert process.type == PROCESS_APPLICATION",
"def ShowAllIPC(cmd_args=None):\n for t in kern.tasks:\n print GetTaskSummary.header + \" \" + GetProcSummary.header\n pval = Cast(t.bsd_info, 'proc *')\n print GetTaskSummary(t) + \" \" + GetProcSummary(pval)\n print PrintIPCInformation.header\n PrintIPCInformation(t.itk_space, False, False) + \"\\n\\n\"",
"def test_reapAllProcesses(self):\n self.assertIdentical(_main._reapAllProcesses, reapAllProcesses)",
"def handle_task_processes(self, request):\n \"\"\"\n @api {get} /task/:id/processes List running processes for a task\n @apiName ListTaskProcesses\n @apiGroup Tasks\n @apiVersion 1.1.0\n\n @apiParam {String} :id Task ID.\n\n @apiSuccessExample {json} Example response:\n {\n \"021b2092ef4111e481a852540064e600\" : {\n \"node\": \"node1\",\n \"start_time\": \"2018-03-29T15:01:13.465183+00:00\",\n \"task\": \"e4d07482e44711e49e76c81f66cd0cca\"\n },\n \"253a96e29868135d746989a6123f521e\" : {\n \"node\": \"node2\",\n \"start_time\": \"2018-03-29T14:01:13.352067+00:00\",\n \"task\": \"508b4b72e44611e49e76c81f66cd0cca\"\n },\n ...\n }\n \"\"\"\n\n match = re.match('/tasks/([0-9a-z]+)/processes', request.uri_path)\n task = match.group(1)\n\n processes = self.cluster.list_task_processes(task)\n\n headers = {\n 'Content-Type': 'application/javascript',\n 'Access-Control-Allow-Origin': '*'\n }\n body = json.dumps(processes)\n\n return HTTPReply(code = 200, body = body, headers = headers)",
"def test_block_unknown_processes(self):\n cwl = {\n \"cwlVersion\": \"v1.0\",\n \"class\": \"CommandLineTool\",\n \"baseCommand\": [\"python3\"],\n \"inputs\": {\n \"stringInput\": \"string\"\n },\n \"requirements\": {\n CWL_REQUIREMENT_APP_DOCKER: {\"dockerPull\": \"python:3.7-alpine\"},\n \"InlineJavascriptRequirement\": {},\n \"ResourceRequirement\": {\"ramMin\": 10240, \"coresMin\": 3}\n\n },\n \"outputs\": [],\n }\n body = {\n \"processDescription\": {\n \"process\": {\n \"id\": self._testMethodName,\n \"title\": \"some title\",\n \"abstract\": \"this is a test\",\n },\n },\n \"deploymentProfileName\": \"http://www.opengis.net/profiles/eoc/wpsApplication\",\n \"executionUnit\": [{\"unit\": cwl}],\n }\n\n with contextlib.ExitStack() as stack_exec:\n for mock_exec in mocked_execute_process():\n stack_exec.enter_context(mock_exec)\n resp = mocked_sub_requests(self.app, \"post_json\", \"/processes\", data=body, timeout=5,\n headers=self.json_headers, only_local=True, expect_errors=True)\n assert resp.status_code == 422",
"async def get_system_info(self) -> Dict[str, Any]:\n assert self._client is not None\n return await self._client.invoke_method(\"system.info\")"
]
| [
"0.7329342",
"0.6957903",
"0.65616316",
"0.6420114",
"0.6354122",
"0.6345824",
"0.63279754",
"0.61812145",
"0.6033765",
"0.5974393",
"0.5931024",
"0.5903762",
"0.58925045",
"0.5880775",
"0.5865628",
"0.57525575",
"0.57450235",
"0.5720751",
"0.5692252",
"0.56884235",
"0.56654066",
"0.565985",
"0.5620404",
"0.561618",
"0.5570062",
"0.55282754",
"0.55244106",
"0.54811347",
"0.54557323",
"0.54556286"
]
| 0.79585636 | 0 |
Test the get_application_rules RPC. | def test_application_rules(self, mocked_get, mocked_check):
from supvisors.rpcinterface import RPCInterface
# create RPC instance
rpc = RPCInterface(self.supervisor)
# test RPC call with aplpication name
self.assertDictEqual(rpc.get_application_rules('appli'),
{'application_name': 'appli','start': 1, 'stop': 2, 'required': True})
self.assertEqual([call()], mocked_check.call_args_list)
self.assertEqual([call('appli')], mocked_get.call_args_list) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_rules(app):\n rules = [\n Rule('/', endpoint='home', handler='apps.busstopped.handlers.MainPage'),\n Rule('/ajax/busstopped/<line>/<direction>', endpoint='ajax-busstopped', handler='apps.busstopped.handlers.AjaxGetBusStopped'),\n Rule('/ajax/point', endpoint='ajax-point', handler='apps.busstopped.handlers.AjaxGetBusStopTimes'),\n Rule('/ajax/getbuspaths', endpoint='ajax-getbuspath', handler='apps.busstopped.handlers.AjaxGetBusPath'),\n Rule('/faq', endpoint='faq', handler='apps.busstopped.handlers.FAQPage'),\n Rule('/changelog', endpoint='change-log', handler='apps.busstopped.handlers.ChangeLogPage'),\n Rule('/info', endpoint='info', handler='apps.busstopped.handlers.InfoPage'),\n Rule('/addpoint', endpoint='add_point', handler='apps.busstopped.handlers.AddPointDocPage'),\n Rule('/news', endpoint='news', handler='apps.busstopped.handlers.NewsPage'),\n Rule('/parse', endpoint='parse', handler='apps.busstopped.handlers.ParseTimesPage'),\n ]\n\n return rules",
"def test_process_rules(self, mocked_rules, mocked_get, mocked_check):\n from supvisors.rpcinterface import RPCInterface\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test first RPC call with process namespec\n self.assertEqual([{'start': 1}], rpc.get_process_rules('appli:proc'))\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call('appli:proc')], mocked_get.call_args_list)\n self.assertEqual([call('1')], mocked_rules.call_args_list)\n # reset patches\n mocked_check.reset_mock()\n mocked_get.reset_mock()\n mocked_rules.reset_mock()\n # test second RPC call with group namespec\n self.assertEqual([{'stop': 2}, {'required': True}],\n rpc.get_process_rules('appli:*'))\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call('appli:*')], mocked_get.call_args_list)\n self.assertEqual([call('1'), call('2')], mocked_rules.call_args_list)",
"def test_all_applications_info(self, mocked_get, mocked_check):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n self.supervisor.supvisors.context.applications = {\n 'dummy_1': None, 'dummy_2': None}\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test RPC call\n self.assertItemsEqual([{'name': 'appli_1'}, {'name': 'appli_2'}],\n rpc.get_all_applications_info())\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertItemsEqual([call('dummy_1'), call('dummy_2')],\n mocked_get.call_args_list)",
"def test_get_internal_process_rules(self):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n process = Mock(application_name='appli', process_name='proc',\n **{'rules.serial.return_value': {'start': 0, 'stop': 1}})\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test with full namespec\n self.assertDictEqual({'application_name': 'appli',\n 'process_name': 'proc', 'start': 0, 'stop': 1},\n rpc._get_internal_process_rules(process))",
"def test_get_inbox_rulesets(self):\n pass",
"def test_rules():",
"def test_get_application(self):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n self.supervisor.supvisors.context.applications = {\n 'appli_1': 'first application'}\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test with known application\n self.assertEqual('first application', rpc._get_application('appli_1'))\n # test with unknown application\n with self.assertRaises(RPCError) as exc:\n rpc._get_application('app')\n self.assertEqual(Faults.BAD_NAME, exc.exception.code)\n self.assertEqual('BAD_NAME: application app unknown in Supvisors',\n exc.exception.text)",
"def test():\n\n zkclient = context.GLOBAL.zk.conn\n cell_name = context.GLOBAL.cell\n admin_cell = admin.Cell(context.GLOBAL.ldap.conn)\n\n # get cell attribute from ldap object\n cell = admin_cell.get(cell_name)\n sysproid = cell['username']\n\n running = zkclient.get_children(z.RUNNING)\n # prefilter treadmill apps to improve efficiency\n running_set = set([name.split('#')[0] for name in running])\n\n class SystemAppTest(unittest.TestCase):\n \"\"\"System apps checkout.\"\"\"\n\n for appname in ['app-dns', 'cellapi', 'adminapi', 'stateapi', 'wsapi']:\n\n @chk.T(SystemAppTest, running_set=running_set, sysproid=sysproid,\n cell=cell_name, appname=appname)\n def _test_app_running(self, running_set, sysproid, cell, appname):\n \"\"\"Check {sysproid}.{appname}.{cell} is running.\"\"\"\n full_app_name = '%s.%s.%s' % (sysproid, appname, cell)\n self.assertIn(full_app_name, running_set)\n\n return SystemAppTest",
"def test_00_app_get(self):\r\n # GET as Anonymous\r\n url = '/api/app'\r\n action = 'get'\r\n self.check_limit(url, action, 'app')",
"def test_duo_application_get(self):\n pass",
"def test_get_feed_names(self):\n response = self.client.open(\n '/v2/rules/list',\n method='GET')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"def create_rules(app_names, error):\n # If no applications is given on the command line, generate the rules\n # for all the registered applications\n if not app_names:\n app_names = [entry.name for entry in pkg_resources.iter_entry_points('nagare.applications')]\n\n package = pkg_resources.Requirement.parse('nagare')\n static = pkg_resources.resource_filename(package, 'static')\n\n apps = [('nagare', static)] # Initialize the result tuple with the static contents of the framework\n\n for app_name in app_names:\n (cfgfile, app, dist, aconf) = util.read_application(app_name, error)\n\n static = aconf['application'].get('static', os.path.join(dist.location, 'static') if dist else None)\n\n if static and os.path.isdir(static):\n apps.append((aconf['application']['name'], static))\n\n return sorted(apps, key=lambda x: len(x[0]))",
"def test_list_rules(self):\n pass",
"def get_rules():\n rules = []\n\n for app_module in get_config('tipfy', 'apps_installed'):\n try:\n # Load the urls module from the app and extend our rules.\n app_rules = import_string('%s.urls' % app_module)\n rules.extend(app_rules.get_rules())\n except ImportError:\n pass\n\n return rules",
"def get_list_of_rules(app_stack_name):\n\n cloudformation = boto3.client('cloudformation')\n response = cloudformation.describe_stack_resources(\n StackName=app_stack_name,\n LogicalResourceId='ALBListenerSSL'\n )\n alb_listener = response['StackResources'][0]['PhysicalResourceId']\n\n client = boto3.client('elbv2')\n response = client.describe_rules(ListenerArn=alb_listener)\n return response['Rules']",
"def test_get_rule_details(self):\n pass",
"def test_get_rule_settings(self):\n # Basic passing test\n rule_settings_params = {'agency_code': '097', 'file': 'B'}\n response = self.app.get('/v1/rule_settings/', rule_settings_params, headers={'x-session-id': self.session_id})\n\n self.assertEqual(response.status_code, 200)\n assert {'errors', 'warnings'} <= set(response.json.keys())",
"def run(rulesfile, verbose):\n kwargs = {}\n if rulesfile:\n kwargs['rules_file'] = rulesfile\n if verbose:\n kwargs['verbosity'] = verbose\n App.run(**kwargs)",
"def test_get_inbox_ruleset(self):\n pass",
"def match_rules(rules, wm):\n res = []\n for r in rules:\n new_patterns = match_rule(r[0],r[1],r[2], wm)\n if new_patterns:\n print(\" Match succeeds\")\n print(\" Adding assertions to WM\")\n else:\n print(\" Match fails\")\n for n in new_patterns:\n if (n not in wm) and (n not in res):\n print(\" \",n)\n res.append(n)\n # print(\"new patterns so far = \", res)\n # print()\n # for testing\n # break\n return res",
"def test_duo_application_list(self):\n pass",
"def test_add_url_rule():\n\n application_services.add_url_rule('/tests/application/rule', view_func=mock_view_function,\n methods=HTTPMethodEnum.GET)",
"def test_application_info(self, mocked_serial, mocked_check):\n from supvisors.rpcinterface import RPCInterface\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test RPC call\n self.assertEqual({'name': 'appli'}, rpc.get_application_info('dummy'))\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call('dummy')], mocked_serial.call_args_list)",
"def test_apply_device_rules(self):\n pass",
"def rules(self):\n return self._alert_rules_client",
"def test_root_api(self):\n\n # GIVEN API\n\n # WHEN fetching available applications and models\n response = self.api.root_api()\n\n # THEN it should succeed\n self.assertTrue(response.success)\n\n # AND it should have valid data\n for item in response.data:\n self.assertEqual(len(item.keys()), 3)\n self.assertEqual(set(item.keys()), set(['model', 'actions', 'app_label']))\n\n # AND it contains also UI application models\n self.assertTrue(any('test' in d['app_label'] for d in response.data))\n\n # AND public applications are also available\n data = [item for item in response.data if item['app_label'] == 'admin']\n self.assertEqual(len(data), 1)\n self.assertEqual(data[0]['model'], None)\n self.assertEqual(len(data[0]['actions'].keys()), 2)",
"def check_sysapps():\n return sysapps.test",
"def test_06_applications_without_apps(self):\r\n # Check first without apps\r\n with self.flask_app.app_context():\r\n self.create_categories()\r\n res = self.app.get('/app', follow_redirects=True)\r\n assert \"Applications\" in res.data, res.data\r\n assert Fixtures.cat_1 in res.data, res.data",
"def test_app_query(self):\r\n AppFactory.create(info={'total': 150})\r\n res = self.app.get('/api/app')\r\n data = json.loads(res.data)\r\n assert len(data) == 1, data\r\n app = data[0]\r\n assert app['info']['total'] == 150, data\r\n\r\n # The output should have a mime-type: application/json\r\n assert res.mimetype == 'application/json', res\r\n\r\n # Test a non-existant ID\r\n res = self.app.get('/api/app/3434209')\r\n err = json.loads(res.data)\r\n assert res.status_code == 404, err\r\n assert err['status'] == 'failed', err\r\n assert err['target'] == 'app', err\r\n assert err['exception_cls'] == 'NotFound', err\r\n assert err['action'] == 'GET', err",
"def test_get_configs():\n\n configs = application_services.get_configs()\n\n assert isinstance(configs, dict)\n assert len(configs) > 0"
]
| [
"0.61743987",
"0.6174281",
"0.60503745",
"0.5967797",
"0.58577245",
"0.5849165",
"0.5733583",
"0.5689103",
"0.5606323",
"0.5581847",
"0.5567735",
"0.55466115",
"0.5459906",
"0.54573005",
"0.5432083",
"0.5393312",
"0.53748804",
"0.5352753",
"0.5312163",
"0.5278006",
"0.5268371",
"0.52590406",
"0.5257305",
"0.52285874",
"0.51971674",
"0.5160017",
"0.51176333",
"0.5089438",
"0.508857",
"0.5053116"
]
| 0.79674536 | 0 |
Test the get_process_rules RPC. | def test_process_rules(self, mocked_rules, mocked_get, mocked_check):
from supvisors.rpcinterface import RPCInterface
# create RPC instance
rpc = RPCInterface(self.supervisor)
# test first RPC call with process namespec
self.assertEqual([{'start': 1}], rpc.get_process_rules('appli:proc'))
self.assertEqual([call()], mocked_check.call_args_list)
self.assertEqual([call('appli:proc')], mocked_get.call_args_list)
self.assertEqual([call('1')], mocked_rules.call_args_list)
# reset patches
mocked_check.reset_mock()
mocked_get.reset_mock()
mocked_rules.reset_mock()
# test second RPC call with group namespec
self.assertEqual([{'stop': 2}, {'required': True}],
rpc.get_process_rules('appli:*'))
self.assertEqual([call()], mocked_check.call_args_list)
self.assertEqual([call('appli:*')], mocked_get.call_args_list)
self.assertEqual([call('1'), call('2')], mocked_rules.call_args_list) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_internal_process_rules(self):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n process = Mock(application_name='appli', process_name='proc',\n **{'rules.serial.return_value': {'start': 0, 'stop': 1}})\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test with full namespec\n self.assertDictEqual({'application_name': 'appli',\n 'process_name': 'proc', 'start': 0, 'stop': 1},\n rpc._get_internal_process_rules(process))",
"def test_application_rules(self, mocked_get, mocked_check):\n from supvisors.rpcinterface import RPCInterface\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test RPC call with aplpication name\n self.assertDictEqual(rpc.get_application_rules('appli'), \n {'application_name': 'appli','start': 1, 'stop': 2, 'required': True})\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call('appli')], mocked_get.call_args_list)",
"def test_rules():",
"def test_process_info(self, mocked_get, mocked_check):\n from supvisors.rpcinterface import RPCInterface\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test first RPC call with process namespec\n self.assertEqual([{'name': 'proc'}], rpc.get_process_info('appli:proc'))\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call('appli:proc')], mocked_get.call_args_list)\n # reset patches\n mocked_check.reset_mock()\n mocked_get.reset_mock()\n # test second RPC call with group namespec\n self.assertEqual([{'name': 'proc_1'}, {'name': 'proc_2'}],\n rpc.get_process_info('appli:*'))\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call('appli:*')], mocked_get.call_args_list)",
"def test_all_process_info(self, mocked_check):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n self.supervisor.supvisors.context.processes = {\n 'proc_1': Mock(**{'serial.return_value': {'name': 'proc_1'}}),\n 'proc_2': Mock(**{'serial.return_value': {'name': 'proc_2'}})}\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test RPC call\n self.assertItemsEqual([{'name': 'proc_1'}, {'name': 'proc_2'}],\n rpc.get_all_process_info())\n self.assertEqual([call()], mocked_check.call_args_list)",
"def test_get_process(self):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n self.supervisor.supvisors.context.processes = {\n 'proc_1': 'first process'}\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test with known application\n self.assertEqual('first process', rpc._get_process('proc_1'))\n # test with unknown application\n with self.assertRaises(RPCError) as exc:\n rpc._get_process('proc')\n self.assertEqual(Faults.BAD_NAME, exc.exception.code)\n self.assertEqual('BAD_NAME: process proc unknown in Supvisors',\n exc.exception.text)",
"def test_get_rule_details(self):\n pass",
"def test_list_rules(self):\n pass",
"def test_get_inbox_rulesets(self):\n pass",
"def test_can_process(self):\n self.assertTrue(self.adapter.can_process(''))",
"def testProcess(self):\n self.grr_hunt_osquery_collector.Process()\n # extract call kwargs\n call_kwargs = self.mock_grr_api.CreateHunt.call_args[1]\n self.assertEqual(call_kwargs['flow_args'].query,\n 'SELECT * FROM processes')\n self.assertEqual(call_kwargs['flow_args'].timeout_millis,\n 300000)\n self.assertEqual(call_kwargs['flow_args'].ignore_stderr_errors, False)\n self.assertEqual(call_kwargs['flow_name'], 'OsqueryFlow')\n self.assertEqual(call_kwargs['hunt_runner_args'].description,\n 'random reason')",
"def test_logic(self):\n cli = Cli()\n cli.submod = mock.MagicMock()\n cli.args = mock.MagicMock()\n cli.args.service = \"group\"\n cli.logic()\n\n self.validate_test(len(cli.submod.mock_calls) == 2)\n self.validate_test(\"call.__getitem__('group')\" in\n str(cli.submod.mock_calls[0]))\n self.validate_test(\"call.__getitem__().run\" in\n str(cli.submod.mock_calls[1]))",
"def test_rpcCall(self):\n pass",
"def process_testvalidate(clients, server_data, mode, single_worker):\n\n mode = [-2] if mode == \"test\" else [2]\n return Server.dispatch_clients(clients, server_data, COMMAND_TESTVAL, mode, single_worker=single_worker)",
"def test_cron_workflow_service_get_cron_workflow(self):\n pass",
"def test_get_run(self):\n pass",
"def test_get_inbox_ruleset(self):\n pass",
"def test_cron_workflow_service_list_cron_workflows2(self):\n pass",
"def process(self,rules):\n for rule in rules:\n r,arg = rule.split('(')\n args = arg[:-1].split(',')\n self.commands.append((r,args))",
"def test_rss_queue_rule(self):\n self.pmdout.start_testpmd(\"%s\" % self.cores, \"--rxq=8 --txq=8 --port-topology=chained\")\n self.dut.send_expect(\"set fwd rxonly\", \"testpmd> \", 120)\n self.dut.send_expect(\"set verbose 1\", \"testpmd> \", 120)\n self.dut.send_expect(\"start\", \"testpmd> \", 120)\n time.sleep(2)\n\n # Create a rss queue rule\n self.dut.send_expect(\n \"flow create 0 ingress pattern end actions rss queues 1 4 7 end / end\", \"created\")\n # send the packets and verify the results\n # ipv4-other and ipv6-other is enabled by default.\n # i40e\n if (self.nic in [\"fortville_eagle\", \"fortville_spirit\",\n \"fortville_spirit_single\", \"fortpark_TLV\", \"fortville_25g\"]):\n rss_queue = [\"1\", \"4\", \"7\"]\n self.send_and_check(self.pkt4, rss_queue)\n self.send_and_check(self.pkt8, rss_queue)\n rss_queue = [\"0\"]\n self.send_and_check(self.pkt1, rss_queue)\n self.send_and_check(self.pkt2, rss_queue)\n self.send_and_check(self.pkt3, rss_queue)\n self.send_and_check(self.pkt5, rss_queue)\n self.send_and_check(self.pkt6, rss_queue)\n self.send_and_check(self.pkt7, rss_queue)\n else:\n rss_queue = [\"1\", \"4\", \"7\"]\n self.send_and_check(self.pkt1, rss_queue)\n self.send_and_check(self.pkt2, rss_queue)\n self.send_and_check(self.pkt3, rss_queue)\n self.send_and_check(self.pkt4, rss_queue)\n self.send_and_check(self.pkt5, rss_queue)\n self.send_and_check(self.pkt6, rss_queue)\n self.send_and_check(self.pkt7, rss_queue)\n self.send_and_check(self.pkt8, rss_queue)\n\n # There can't be more than one RSS queue rule existing.\n self.dut.send_expect(\n \"flow create 0 ingress pattern end actions rss queues 3 end / end\", \"error\")\n self.dut.send_expect(\n \"flow create 0 ingress pattern end actions rss types ipv4-udp end queues 3 end / end\", \"error\")\n # Flush the rules and create a new RSS queue rule.\n self.dut.send_expect(\"flow flush 0\", \"testpmd> \")\n self.dut.send_expect(\n \"flow create 0 ingress pattern end actions rss queues 3 end / end\", \"created\")\n # Send the packets and verify the results\n if (self.nic in [\"fortville_eagle\", \"fortville_spirit\",\n \"fortville_spirit_single\", \"fortpark_TLV\", \"fortville_25g\"]):\n rss_queue = [\"3\"]\n self.send_and_check(self.pkt4, rss_queue)\n self.send_and_check(self.pkt8, rss_queue)\n rss_queue = [\"0\"]\n self.send_and_check(self.pkt1, rss_queue)\n self.send_and_check(self.pkt2, rss_queue)\n self.send_and_check(self.pkt3, rss_queue)\n self.send_and_check(self.pkt5, rss_queue)\n self.send_and_check(self.pkt6, rss_queue)\n self.send_and_check(self.pkt7, rss_queue)\n else:\n rss_queue = [\"3\"]\n self.send_and_check(self.pkt1, rss_queue)\n self.send_and_check(self.pkt2, rss_queue)\n self.send_and_check(self.pkt3, rss_queue)\n self.send_and_check(self.pkt4, rss_queue)\n self.send_and_check(self.pkt5, rss_queue)\n self.send_and_check(self.pkt6, rss_queue)\n self.send_and_check(self.pkt7, rss_queue)\n self.send_and_check(self.pkt8, rss_queue)\n self.dut.send_expect(\"flow flush 0\", \"testpmd> \")\n\n # Set a wrong parameter: queue ID is 16\n self.dut.send_expect(\n \"flow create 0 ingress pattern end actions rss queues 8 end / end\", \"error\")\n # Set all the queues to the rule\n self.dut.send_expect(\n \"flow create 0 ingress pattern end actions rss queues 0 1 2 3 4 5 6 7 end / end\", \"created\")",
"def get_rules(cls):\n raise NotImplementedError()",
"def test_priority_list_ok(self):\n test_name = sys._getframe().f_code.co_name\n rv, output = self._execute('priority list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)",
"def process(self):\n return self.check()",
"def test_cron_workflow_service_list_cron_workflows(self):\n pass",
"def match_rules(rules, wm):\n res = []\n for r in rules:\n new_patterns = match_rule(r[0],r[1],r[2], wm)\n if new_patterns:\n print(\" Match succeeds\")\n print(\" Adding assertions to WM\")\n else:\n print(\" Match fails\")\n for n in new_patterns:\n if (n not in wm) and (n not in res):\n print(\" \",n)\n res.append(n)\n # print(\"new patterns so far = \", res)\n # print()\n # for testing\n # break\n return res",
"def rpc_match():",
"def get(self, *args):\n return _libsbml.ListOfRules_get(self, *args)",
"def test_conflicts(self, mocked_check):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n self.supervisor.supvisors.context.processes = {\n 'proc_1': Mock(**{'conflicting.return_value': True,\n 'serial.return_value': {'name': 'proc_1'}}),\n 'proc_2': Mock(**{'conflicting.return_value': False,\n 'serial.return_value': {'name': 'proc_2'}}),\n 'proc_3': Mock(**{'conflicting.return_value': True,\n 'serial.return_value': {'name': 'proc_3'}})}\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test RPC call\n self.assertItemsEqual([{'name': 'proc_1'}, {'name': 'proc_3'}],\n rpc.get_conflicts())\n self.assertEqual([call()], mocked_check.call_args_list)",
"def list_rulesets(command):\n namespace = app.main(command)\n assert namespace.command == 'lr' or namespace.command == \"listrulesets\"",
"def test_list_firewall_rules_sort(self):\r\n resources = \"firewall_rules\"\r\n cmd = firewallrule.ListFirewallRule(test_cli20.MyApp(sys.stdout),\r\n None)\r\n self._test_list_resources(resources, cmd,\r\n sort_key=[\"name\", \"id\"],\r\n sort_dir=[\"asc\", \"desc\"])"
]
| [
"0.7312982",
"0.68353486",
"0.5950732",
"0.5772875",
"0.56735706",
"0.56297094",
"0.550906",
"0.5429404",
"0.5396469",
"0.52568597",
"0.5201953",
"0.50758487",
"0.50694406",
"0.5058028",
"0.50381976",
"0.5031389",
"0.50032556",
"0.49975392",
"0.49968067",
"0.4989191",
"0.4978525",
"0.49742168",
"0.4963057",
"0.49458036",
"0.49423987",
"0.49384475",
"0.49361435",
"0.49284413",
"0.4923687",
"0.4921319"
]
| 0.7942605 | 0 |
Test the get_conflicts RPC. | def test_conflicts(self, mocked_check):
from supvisors.rpcinterface import RPCInterface
# prepare context
self.supervisor.supvisors.context.processes = {
'proc_1': Mock(**{'conflicting.return_value': True,
'serial.return_value': {'name': 'proc_1'}}),
'proc_2': Mock(**{'conflicting.return_value': False,
'serial.return_value': {'name': 'proc_2'}}),
'proc_3': Mock(**{'conflicting.return_value': True,
'serial.return_value': {'name': 'proc_3'}})}
# create RPC instance
rpc = RPCInterface(self.supervisor)
# test RPC call
self.assertItemsEqual([{'name': 'proc_1'}, {'name': 'proc_3'}],
rpc.get_conflicts())
self.assertEqual([call()], mocked_check.call_args_list) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_conflicts(self):\n query = read_query('content exploration/all_conflicts')\n response = self._submit_query(query)\n return response",
"def get_conflicts(self):\n return []",
"def conflicts(self):\r\n params = {\r\n 'f' : 'json',\r\n 'sessionID' : self._guid\r\n }\r\n url = \"%s/conflicts\" % self._url\r\n return self._con.post(url, params)",
"def checkConflicts(self):\n\t\tapDisplay.printError(\"you did not create a 'checkConflicts' function in your script\")\n\t\traise NotImplementedError()",
"def resolve_conflicts(self, commit=True):\n pass # pragma: no cover",
"def test_conciliate(self, mocked_check):\n from supvisors.rpcinterface import RPCInterface\n # set context and patches\n self.supervisor.supvisors.fsm.state = 3\n self.supervisor.supvisors.context.conflicts.return_value = [1, 2, 4]\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n with patch('supvisors.rpcinterface.conciliate_conflicts') as mocked_conciliate:\n # test RPC call with wrong strategy\n with self.assertRaises(RPCError) as exc:\n self.assertTrue(rpc.conciliate('a strategy'))\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual(Faults.BAD_STRATEGY, exc.exception.code)\n self.assertEqual('BAD_STRATEGY: a strategy', exc.exception.text)\n mocked_check.reset_mock()\n # test RPC call with USER strategy\n self.assertFalse(rpc.conciliate(2))\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual(0, mocked_conciliate.call_count)\n mocked_check.reset_mock()\n # test RPC call with another strategy \n self.assertTrue(rpc.conciliate(1))\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call(self.supervisor.supvisors, 1, [1, 2, 4])],\n mocked_conciliate.call_args_list)",
"def test_conflict(self):\n self._error_test(fitbit_exceptions.HTTPConflict)",
"def get_conflicts_by(self, actor_label):\n query = read_query('trust/conflicts_by') % (actor_label, actor_label)\n response = self._submit_query(query)\n return response",
"def test_budget_function_autocomplete_failure(client):\n\n resp = client.post(\n '/api/v2/autocomplete/budget_function/',\n content_type='application/json',\n data=json.dumps({}))\n assert resp.status_code == status.HTTP_400_BAD_REQUEST",
"def test_resolve(client):\n g.test_authorized_for = []\n res = client.get(\"/v0/resolve\" + get_request_args)\n assert \"Thanks for resolving the issue!\" in res.data.decode(\"utf-8\")",
"def checkConflicts(self):\n\t\treturn",
"def conflicts(self, conflicts):\n\n self._conflicts = conflicts",
"def test_get_operations_list_with_correct_data(self):\n ops = self.client.get_operations_list(self.agent_id)\n self.assertIsInstance(ops, list)",
"def test_request_redis_unavailable(self):\n expected_response_as_json = []\n response = self.client.get(url_for('filmlocations_auto_complete') + '?term=mar')\n received_response_as_json = json.loads(response.get_data())\n received_response_as_json = [ r_r_as_json.encode('ascii').strip(' \\t\\n\\r') for r_r_as_json in received_response_as_json]\n self.assertEquals(set(expected_response_as_json), set(received_response_as_json))",
"def test_two_legged_get(self):\n resp, content = self._two_legged(\"GET\")\n self.assertEqual(int(resp['status']), 200)",
"def test_intercommunalitys_get(self):\n pass",
"def test_handle_edit_lookup_error(self):\n self.db.query.return_value = []\n with self.app.app_context():\n self.assertTupleEqual(self.testcommand.handle(\"team \"\n \"edit brs\", user),\n (self.testcommand.lookup_error, 200))\n self.db.store.assert_not_called()",
"async def test_rpc_error(bus: lightbus.BusNode, dummy_api):\n\n async def co_call_rpc():\n await asyncio.sleep(0.1)\n return await bus.my.dummy.general_error.call_async()\n\n async def co_consume_rpcs():\n return await bus.bus_client.consume_rpcs(apis=[dummy_api])\n\n (call_task, ), (consume_task, ) = await asyncio.wait([co_call_rpc(), co_consume_rpcs()], return_when=asyncio.FIRST_COMPLETED)\n\n consume_task.cancel()\n call_task.cancel()\n\n with pytest.raises(LightbusServerError):\n await call_task.result()",
"def test_handle_response_all_shortlist_contacted_value_not_found(self):\n lookup = Lookup(FindValue, self.target, self.node, self.event_loop)\n lookup._lookup = mock.MagicMock()\n uuids = [uuid for uuid in lookup.pending_requests.keys()]\n uuid = uuids[0]\n contact = lookup.shortlist[0]\n # Only one item in pending_requests\n for i in range(1, len(uuids)):\n del lookup.pending_requests[uuids[i]]\n self.assertEqual(1, len(lookup.pending_requests))\n # Add K items from shortlist to the contacted set.\n for contact in lookup.shortlist:\n lookup.contacted.add(contact)\n # Cause the lookup to fire.\n msg = Nodes(uuid, self.node.network_id, self.node.network_id,\n self.reply_port, self.version, self.seal,\n self.contacts)\n response = asyncio.Future()\n response.set_result(msg)\n lookup._handle_response(uuid, contact, response)\n # The _lookup method should not be called.\n self.assertEqual(lookup._lookup.call_count, 0)\n # The lookup task has fired.\n self.assertTrue(lookup.done())\n with self.assertRaises(ValueNotFound) as result:\n lookup.result()\n self.assertIsInstance(result.exception, ValueNotFound)\n self.assertEqual(result.exception.args[0],\n \"Unable to find value for key: {}\"\n .format(self.target))",
"def test_get_issues(client, test_db):\n g.user = \"testuser\"\n with mock.patch(\"comet_core.api_v0.get_db\", return_value=test_db):\n g.test_authorized_for = [\"[email protected]\"]\n\n res = client.get(\"/v0/issues\")\n assert res.status == \"200 OK\"\n assert not res.json\n\n g.test_authorized_for = [\"[email protected]\"]\n\n res = client.get(\"/v0/issues\")\n assert res.status == \"200 OK\"\n assert res.json, res.json\n\n g.test_authorized_for = Response(status=401)\n\n res = client.get(\"/v0/issues\")\n assert res.status == \"401 UNAUTHORIZED\"\n assert not res.json",
"def test_list_concepts_get(self):\n response = self.client.open(\n '/list_concepts',\n method='GET')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"async def test_rpc_error(bus: lightbus.BusNode, dummy_api):\n\n async def co_call_rpc():\n asyncio.sleep(0.1)\n return await bus.my.dummy.general_error.call_async()\n\n async def co_consume_rpcs():\n return await bus.bus_client.consume_rpcs(apis=[dummy_api])\n\n (call_task, ), (consume_task, ) = await asyncio.wait([co_call_rpc(), co_consume_rpcs()], return_when=asyncio.FIRST_COMPLETED)\n\n consume_task.cancel()\n call_task.cancel()\n\n with pytest.raises(LightbusServerError):\n assert call_task.result()",
"def test_ALLOWED_RPC_FAILS(self):\n self.assertIsInstance(constants.ALLOWED_RPC_FAILS, int,\n \"constants.ALLOWED_RPC_FAILS must be an \" +\n \"integer.\")",
"def test_handle_refresh_lookup_error(self):\n test_user = User(user)\n team = Team(\"BRS\", \"brs\", \"brS\")\n test_user.permissions_level = Permissions.admin\n self.db.retrieve.return_value = None\n self.db.retrieve.side_effect = LookupError\n self.gh.org_get_teams.return_value = [team]\n with self.app.app_context():\n self.assertTupleEqual(self.testcommand.handle(\"team refresh\",\n user),\n (self.testcommand.lookup_error, 200))\n self.db.store.assert_not_called()",
"def svn_client_ctx_t_conflict_baton_get(svn_client_ctx_t_self): # real signature unknown; restored from __doc__\n pass",
"def test_client_get_organizations(mocker, client_all_orgs_input):\n mocker.patch(\"tracker_client.client.get_auth_token\")\n mocker.patch(\"tracker_client.client.create_client\")\n test_client = Client()\n test_client.execute_query = mocker.MagicMock(return_value=client_all_orgs_input)\n\n org_list = test_client.get_organizations()\n\n test_client.execute_query.assert_called_once_with(\n queries.GET_ALL_ORGS, {\"after\": \"abc\", \"search\": \"\"}\n )\n assert org_list[0].acronym == \"FOO\"\n assert org_list[1].name == \"Fizz Bang\"\n assert org_list[0].domain_count == 10\n assert org_list[1].verified",
"def svn_client_ctx_t_conflict_func_get(svn_client_ctx_t_self): # real signature unknown; restored from __doc__\n pass",
"def test_multiple(self):\n\n with self.assertRaises(MultipleObjectsReturned):\n RST_FBO().get()",
"def test_list(self):\n response = self.client.get('/routines/')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data['count'], 2)\n self.assertEqual(len(response.data['results']), 2)\n self.assertEqual(response.data['results'][0]['id'], self.rout1.id)",
"def inspect(self, conflicts, inspect_all=False, set_inspected=False):\r\n url = \"%s/inspectConflicts\" % self._url\r\n params = {\r\n 'f' : 'json',\r\n 'sessionId' : self._guid,\r\n 'inspectAll' : inspect_all,\r\n 'conflicts' : conflicts,\r\n 'setInspected' : set_inspected\r\n }\r\n res = self._con.post(url, params)\r\n return res['success']"
]
| [
"0.6577668",
"0.64147437",
"0.5933698",
"0.58578426",
"0.56442785",
"0.5582066",
"0.55808526",
"0.55581826",
"0.5516254",
"0.54974395",
"0.541847",
"0.5407259",
"0.53910816",
"0.53509426",
"0.53324175",
"0.532675",
"0.5297989",
"0.52902627",
"0.5282945",
"0.5280912",
"0.5280317",
"0.52776116",
"0.52633274",
"0.5256938",
"0.5254578",
"0.5254224",
"0.5243957",
"0.52419645",
"0.5232378",
"0.5229319"
]
| 0.6872376 | 0 |
Test the start_application RPC. | def test_start_application(self, mocked_check):
from supvisors.rpcinterface import RPCInterface
from supvisors.ttypes import ApplicationStates
# prepare context
self.supervisor.supvisors.context.applications = {'appli_1': Mock()}
# get patches
mocked_start = self.supervisor.supvisors.starter.start_application
mocked_progress = self.supervisor.supvisors.starter.in_progress
# create RPC instance
rpc = RPCInterface(self.supervisor)
# test RPC call with unknown strategy
with self.assertRaises(RPCError) as exc:
rpc.start_application('strategy', 'appli')
self.assertEqual(Faults.BAD_STRATEGY, exc.exception.code)
self.assertEqual('BAD_STRATEGY: strategy', exc.exception.text)
self.assertEqual([call()], mocked_check.call_args_list)
self.assertEqual(0, mocked_start.call_count)
self.assertEqual(0, mocked_progress.call_count)
mocked_check.reset_mock()
# test RPC call with unknown application
with self.assertRaises(RPCError) as exc:
rpc.start_application(0, 'appli')
self.assertEqual(Faults.BAD_NAME, exc.exception.code)
self.assertEqual('BAD_NAME: appli', exc.exception.text)
self.assertEqual([call()], mocked_check.call_args_list)
self.assertEqual(0, mocked_start.call_count)
self.assertEqual(0, mocked_progress.call_count)
mocked_check.reset_mock()
# test RPC call with running application
application = self.supervisor.supvisors.context.applications['appli_1']
for appli_state in [ApplicationStates.STOPPING, ApplicationStates.RUNNING,
ApplicationStates.STARTING]:
application.state = appli_state
with self.assertRaises(RPCError) as exc:
rpc.start_application(0, 'appli_1')
self.assertEqual(Faults.ALREADY_STARTED, exc.exception.code)
self.assertEqual('ALREADY_STARTED: appli_1', exc.exception.text)
self.assertEqual([call()], mocked_check.call_args_list)
self.assertEqual(0, mocked_start.call_count)
self.assertEqual(0, mocked_progress.call_count)
mocked_check.reset_mock()
# test RPC call with stopped application
# test no wait and not done
application.state = ApplicationStates.STOPPED
mocked_start.return_value = False
result = rpc.start_application(0, 'appli_1', False)
self.assertTrue(result)
self.assertEqual([call()], mocked_check.call_args_list)
self.assertEqual([call(0, application)], mocked_start.call_args_list)
self.assertEqual(0, mocked_progress.call_count)
mocked_check.reset_mock()
mocked_start.reset_mock()
# test no wait and done
application.state = ApplicationStates.STOPPED
mocked_start.return_value = True
result = rpc.start_application(0, 'appli_1', False)
self.assertFalse(result)
self.assertEqual([call()], mocked_check.call_args_list)
self.assertEqual([call(0, application)], mocked_start.call_args_list)
self.assertEqual(0, mocked_progress.call_count)
mocked_check.reset_mock()
mocked_start.reset_mock()
# test wait and done
mocked_start.return_value = True
result = rpc.start_application(0, 'appli_1')
self.assertFalse(result)
self.assertEqual([call()], mocked_check.call_args_list)
self.assertEqual([call(0, application)], mocked_start.call_args_list)
self.assertEqual(0, mocked_progress.call_count)
mocked_check.reset_mock()
mocked_start.reset_mock()
# test wait and not done
mocked_start.return_value = False
deferred = rpc.start_application(0, 'appli_1')
# result is a function for deferred result
self.assertTrue(callable(deferred))
self.assertEqual([call()], mocked_check.call_args_list)
self.assertEqual([call(0, application)], mocked_start.call_args_list)
self.assertEqual(0, mocked_progress.call_count)
# test returned function: return True when job in progress
mocked_progress.return_value = True
self.assertEqual(NOT_DONE_YET, deferred())
self.assertEqual([call()], mocked_progress.call_args_list)
mocked_progress.reset_mock()
# test returned function: raise exception if job not in progress anymore
# and application not running
mocked_progress.return_value = False
for appli_state in [ApplicationStates.STOPPING, ApplicationStates.STOPPED,
ApplicationStates.STARTING]:
with self.assertRaises(RPCError) as exc:
deferred()
self.assertEqual(Faults.ABNORMAL_TERMINATION, exc.exception.code)
self.assertEqual('ABNORMAL_TERMINATION: appli_1', exc.exception.text)
self.assertEqual([call()], mocked_progress.call_args_list)
mocked_progress.reset_mock()
# test returned function: return True if job not in progress anymore
# and application running
application.state = ApplicationStates.RUNNING
self.assertTrue(deferred())
self.assertEqual([call()], mocked_progress.call_args_list) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_application_start():\n\n process = subprocess.Popen(['python', 'runserver.py'],\n stderr=subprocess.STDOUT,\n stdout=subprocess.PIPE)\n\n assert process.pid\n debug_logging = process.stdout.read(100)\n process.kill()\n assert 'Starting application' in debug_logging",
"def startapp():",
"def test_get_application(self):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n self.supervisor.supvisors.context.applications = {\n 'appli_1': 'first application'}\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test with known application\n self.assertEqual('first application', rpc._get_application('appli_1'))\n # test with unknown application\n with self.assertRaises(RPCError) as exc:\n rpc._get_application('app')\n self.assertEqual(Faults.BAD_NAME, exc.exception.code)\n self.assertEqual('BAD_NAME: application app unknown in Supvisors',\n exc.exception.text)",
"def application():\n yield create_test_application()",
"def test_application_is_singleton():\n\n app = PyrinUnitTestApplication()\n assert app == application_services.get_current_app()",
"def test_application_info(self, mocked_serial, mocked_check):\n from supvisors.rpcinterface import RPCInterface\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test RPC call\n self.assertEqual({'name': 'appli'}, rpc.get_application_info('dummy'))\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call('dummy')], mocked_serial.call_args_list)",
"def test_main_succeeds(app_tester: ApplicationTester) -> None:\n assert app_tester.execute(\"\") == 0",
"def startApplication(self, application):\n process = service.IProcess(application)\n if not self.config['originalname']:\n launchWithName(process.processName)\n self.setupEnvironment(\n self.config['chroot'], self.config['rundir'],\n self.config['nodaemon'], self.config['umask'],\n self.config['pidfile'])\n\n service.IService(application).privilegedStartService()\n\n uid, gid = self.config['uid'], self.config['gid']\n if uid is None:\n uid = process.uid\n if gid is None:\n gid = process.gid\n if uid is not None and gid is None:\n gid = pwd.getpwuid(uid).pw_gid\n\n self.shedPrivileges(self.config['euid'], uid, gid)\n app.startApplication(application, not self.config['no_save'])",
"def test_launch_deployment(self):\n pass",
"def test_get_application_process(self):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n self.supervisor.supvisors.context.applications = {\n 'appli_1': 'first application'}\n self.supervisor.supvisors.context.processes = {\n 'appli_1:proc_1': 'first process'}\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test with full namespec\n self.assertTupleEqual(('first application', 'first process'),\n rpc._get_application_process('appli_1:proc_1'))\n # test with applicative namespec\n self.assertTupleEqual(('first application', None),\n rpc._get_application_process('appli_1:*'))",
"def cmd_start(self, app_name=None):\n rc = self.socket_command_with_project('start', app_name)\n return rc",
"def test_success_start(self, put, get, auth, circuits_app, fn_cloud_foundry_action, fn_cloud_foundry_applications):\n auth.return_value = AuthenticationMock()\n put.return_value = give_response(201, GUIDS_MOCK[\"resources\"][0])\n get.return_value = give_response(200, GUIDS_MOCK)\n\n function_params = {\n \"fn_cloud_foundry_action\": fn_cloud_foundry_action,\n \"fn_cloud_foundry_applications\": fn_cloud_foundry_applications\n }\n results = call_fn_cloud_foundry_manage_applications_function(circuits_app, function_params)\n assert results[\"test1\"][\"success\"] == True\n assert results[\"test1\"][\"current_state\"] == \"STARTED\"",
"def test_11_create_application(self, mock):\r\n # Create an app as an anonymous user\r\n with self.flask_app.app_context():\r\n res = self.new_application(method=\"GET\")\r\n assert self.html_title(\"Sign in\") in res.data, res\r\n assert \"Please sign in to access this page\" in res.data, res\r\n\r\n res = self.new_application()\r\n assert self.html_title(\"Sign in\") in res.data, res.data\r\n assert \"Please sign in to access this page.\" in res.data, res.data\r\n\r\n # Sign in and create an application\r\n res = self.register()\r\n\r\n res = self.new_application(method=\"GET\")\r\n assert self.html_title(\"Create an Application\") in res.data, res\r\n assert \"Create the application\" in res.data, res\r\n\r\n res = self.new_application(long_description='My Description')\r\n assert \"<strong>Sample App</strong>: Update the application\" in res.data\r\n assert \"Application created!\" in res.data, res\r\n\r\n app = db.session.query(App).first()\r\n assert app.name == 'Sample App', 'Different names %s' % app.name\r\n assert app.short_name == 'sampleapp', \\\r\n 'Different names %s' % app.short_name\r\n\r\n assert app.long_description == 'My Description', \\\r\n \"Long desc should be the same: %s\" % app.long_description",
"def test_restart_application(self, mocked_check, mocked_stop, mocked_start):\n from supvisors.rpcinterface import RPCInterface\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test RPC call with sub-RPC calls return a direct result\n mocked_stop.return_value = True\n mocked_start.return_value = False\n deferred = rpc.restart_application(0, 'appli', 'wait')\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call('appli', True)], mocked_stop.call_args_list)\n self.assertEqual(0, mocked_start.call_count)\n mocked_stop.reset_mock()\n mocked_check.reset_mock()\n # result is a function\n self.assertTrue(callable(deferred))\n self.assertTrue(deferred.waitstop)\n # test this function\n self.assertFalse(deferred())\n self.assertFalse(deferred.waitstop)\n self.assertEqual(0, mocked_stop.call_count)\n self.assertEqual([call(0, 'appli', 'wait')], mocked_start.call_args_list)\n mocked_start.reset_mock()\n # test RPC call with sub_RPC calls returning jobs\n # test with mocking functions telling that the jobs are not completed\n mocked_stop_job = Mock(return_value=False)\n mocked_start_job = Mock(return_value=False)\n mocked_stop.return_value = mocked_stop_job\n mocked_start.return_value = mocked_start_job\n deferred = rpc.restart_application(0, 'appli', 'wait')\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call('appli', True)], mocked_stop.call_args_list)\n self.assertEqual(0, mocked_start.call_count)\n mocked_stop.reset_mock()\n # result is a function for deferred result\n self.assertTrue(callable(deferred))\n self.assertTrue(deferred.waitstop)\n # first call to this function tells that job is still in progress\n self.assertEqual(0, mocked_stop_job.call_count)\n self.assertEqual(0, mocked_start_job.call_count)\n self.assertEqual(NOT_DONE_YET, deferred())\n self.assertEqual(0, mocked_stop.call_count)\n self.assertEqual(0, mocked_start.call_count)\n self.assertEqual([call()], mocked_stop_job.call_args_list)\n self.assertEqual(0, mocked_start_job.call_count)\n mocked_stop_job.reset_mock()\n # replace the stop job with a function telling that the job is completed\n mocked_stop_job.return_value = True\n self.assertEqual(NOT_DONE_YET, deferred())\n self.assertFalse(deferred.waitstop)\n self.assertEqual(0, mocked_stop.call_count)\n self.assertEqual([call(0, 'appli', 'wait')], mocked_start.call_args_list)\n self.assertEqual([call()], mocked_stop_job.call_args_list)\n self.assertEqual(0, mocked_start_job.call_count)\n mocked_stop_job.reset_mock()\n # call the deferred function again to check that the start is engaged\n self.assertFalse(deferred())\n self.assertEqual([call()], mocked_start_job.call_args_list)\n self.assertEqual(0, mocked_stop_job.call_count)",
"def test_stage_create_app(self, mock_stage_create_app):\n app = MDFakeFSTestSite(\n \"MDWeb\",\n app_options={}\n )\n app.start()\n\n self.assertTrue(mock_stage_create_app.called)",
"def run(**kwargs) -> None:\n\n # update the path to ensure the App has access to required modules\n app_lib = AppLib()\n app_lib.update_path()\n\n # import modules after path has been updated\n\n # third-party\n from tcex import TcEx # pylint: disable=import-outside-toplevel\n\n # first-party\n from app import App # pylint: disable=import-outside-toplevel\n\n tcex = TcEx()\n\n try:\n # load App class\n app = App(tcex)\n\n # set app property in testing framework\n if callable(kwargs.get('set_app')):\n kwargs.get('set_app')(app)\n\n # configure custom trigger message handler\n tcex.service.create_config_callback = app.create_config_callback\n tcex.service.delete_config_callback = app.delete_config_callback\n tcex.service.shutdown_callback = app.shutdown_callback\n tcex.service.webhook_event_callback = app.webhook_event_callback\n\n # perform prep/setup operations\n app.setup(**{})\n\n # listen on channel/topic\n tcex.service.listen()\n\n # start heartbeat threads\n tcex.service.heartbeat()\n\n # inform TC that micro-service is Ready\n tcex.service.ready = True\n\n # loop until exit\n if hasattr(app, 'loop_forever'):\n app.loop_forever() # pylint: disable=no-member\n else:\n tcex.log.info('Looping until shutdown')\n while tcex.service.loop_forever(sleep=1):\n pass\n\n # perform cleanup/teardown operations\n app.teardown(**{})\n\n # explicitly call the exit method\n tcex.playbook.exit(msg=app.exit_message)\n\n except Exception as e:\n main_err = f'Generic Error. See logs for more details ({e}).'\n tcex.log.error(traceback.format_exc())\n tcex.playbook.exit(1, main_err)",
"def test_stop_application(self, mocked_check):\n from supvisors.rpcinterface import RPCInterface\n from supvisors.ttypes import ApplicationStates\n # prepare context\n self.supervisor.supvisors.context.applications = {'appli_1': Mock()}\n # get patches\n mocked_stop = self.supervisor.supvisors.stopper.stop_application\n mocked_progress = self.supervisor.supvisors.stopper.in_progress\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test RPC call with unknown application\n with self.assertRaises(RPCError) as exc:\n rpc.stop_application('appli')\n self.assertEqual(Faults.BAD_NAME, exc.exception.code)\n self.assertEqual('BAD_NAME: appli', exc.exception.text)\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual(0, mocked_stop.call_count)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n # test RPC call with stopped application\n application = self.supervisor.supvisors.context.applications['appli_1']\n application.state = ApplicationStates.STOPPED\n with self.assertRaises(RPCError) as exc:\n rpc.stop_application('appli_1')\n self.assertEqual(Faults.NOT_RUNNING, exc.exception.code)\n self.assertEqual('NOT_RUNNING: appli_1', exc.exception.text)\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual(0, mocked_stop.call_count)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n # test RPC call with running application\n for appli_state in [ApplicationStates.STOPPING, ApplicationStates.RUNNING,\n ApplicationStates.STARTING]:\n application.state = appli_state\n # test no wait and done\n mocked_stop.return_value = True\n result = rpc.stop_application('appli_1', False)\n self.assertFalse(result)\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call(application)], mocked_stop.call_args_list)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n mocked_stop.reset_mock()\n # test wait and done\n mocked_stop.return_value = True\n result = rpc.stop_application('appli_1')\n self.assertFalse(result)\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call(application)], mocked_stop.call_args_list)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n mocked_stop.reset_mock()\n # test wait and not done\n mocked_stop.return_value = False\n result = rpc.stop_application('appli_1')\n # result is a function\n self.assertTrue(callable(result))\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call(application)], mocked_stop.call_args_list)\n self.assertEqual(0, mocked_progress.call_count)\n # test returned function: return True when job in progress\n mocked_progress.return_value = True\n self.assertEqual(NOT_DONE_YET, result())\n self.assertEqual([call()], mocked_progress.call_args_list)\n mocked_progress.reset_mock()\n # test returned function: raise exception if job not in progress anymore\n # and application not running\n mocked_progress.return_value = False\n for appli_state in [ApplicationStates.STOPPING, ApplicationStates.RUNNING,\n ApplicationStates.STARTING]:\n with self.assertRaises(RPCError) as exc:\n result()\n self.assertEqual(Faults.ABNORMAL_TERMINATION, exc.exception.code)\n self.assertEqual('ABNORMAL_TERMINATION: appli_1', exc.exception.text)\n self.assertEqual([call()], mocked_progress.call_args_list)\n mocked_progress.reset_mock()\n # test returned function: return True if job not in progress anymore\n # and application running\n application.state = ApplicationStates.STOPPED\n self.assertTrue(result())\n self.assertEqual([call()], mocked_progress.call_args_list)\n # reset patches for next loop\n mocked_check.reset_mock()\n mocked_stop.reset_mock()\n mocked_progress.reset_mock()",
"def main():\n print(\"def main\")\n return APP.run()",
"def start():\n app.run()",
"def run_starter(self, expect_to_fail=False):",
"def test_running_main_error_in_app_startup(exopy_qtbot, monkeypatch):\n from exopy.app.app_plugin import AppPlugin\n\n def false_run_startup(self, args):\n raise Exception('Fail to run start up')\n\n monkeypatch.setattr(AppPlugin, 'run_app_startup', false_run_startup)\n\n def check_dialog(qtbot, dial):\n assert 'starting' in dial.text\n\n with pytest.raises(SystemExit):\n with handle_dialog(exopy_qtbot, 'reject', check_dialog):\n main([])",
"def setup_application(self):\n pass",
"def test_running_main(exopy_qtbot, app_dir, monkeypatch):\n from enaml.workbench.ui.ui_plugin import UIPlugin\n\n def wait_for_window(self):\n pass\n\n # Do not release the application\n def no_release(self):\n pass\n\n monkeypatch.setattr(UIPlugin, '_release_application', no_release)\n monkeypatch.setattr(UIPlugin, 'start_application', wait_for_window)\n\n import sys\n old = sys.excepthook\n try:\n main([])\n finally:\n sys.excepthook = old",
"def pytest_configure() -> None: # pragma: no cover\n print(\"Starting server app\")\n PROC.start()\n time.sleep(1)\n if PROC.exitcode is not None:\n pytest.exit(\"Failed to start the server, exit code {}\\nLogs are in logs/server.log\".format(PROC.exitcode))\n return\n\n create_generated_client()",
"def start_app(self, app: str, **kwargs) -> None:\n kwargs[\"app\"] = app\n kwargs[\"namespace\"] = \"admin\"\n kwargs[\"__name\"] = self.name\n self.call_service(\"app/start\", **kwargs)\n return None",
"def setUp(self):\n _, instance_path, shared_inputs = sys.argv\n app = lnt.server.ui.app.App.create_standalone(instance_path)\n app.testing = True\n self.client = app.test_client()\n self.shared_inputs = shared_inputs",
"def test_application_running(self):\n response = self.client.get('/login', content_type='html/text')\n self.assertEqual(response.status_code, 200)\n response = self.client.get('/signup', content_type='html/text')\n self.assertEqual(response.status_code, 200)",
"def test_10_get_application(self, Mock, mock2):\r\n # Sign in and create an application\r\n with self.flask_app.app_context():\r\n html_request = FakeRequest(json.dumps(self.pkg_json_not_found), 200,\r\n {'content-type': 'application/json'})\r\n Mock.return_value = html_request\r\n self.register()\r\n res = self.new_application()\r\n\r\n res = self.app.get('/app/sampleapp', follow_redirects=True)\r\n msg = \"Application: Sample App\"\r\n assert self.html_title(msg) in res.data, res\r\n err_msg = \"There should be a contribute button\"\r\n assert \"Start Contributing Now\" in res.data, err_msg\r\n\r\n res = self.app.get('/app/sampleapp/settings', follow_redirects=True)\r\n assert res.status == '200 OK', res.status\r\n self.signout()\r\n\r\n # Now as an anonymous user\r\n res = self.app.get('/app/sampleapp', follow_redirects=True)\r\n assert self.html_title(\"Application: Sample App\") in res.data, res\r\n assert \"Start Contributing Now\" in res.data, err_msg\r\n res = self.app.get('/app/sampleapp/settings', follow_redirects=True)\r\n assert res.status == '200 OK', res.status\r\n err_msg = \"Anonymous user should be redirected to sign in page\"\r\n assert \"Please sign in to access this page\" in res.data, err_msg\r\n\r\n # Now with a different user\r\n self.register(fullname=\"Perico Palotes\", name=\"perico\")\r\n res = self.app.get('/app/sampleapp', follow_redirects=True)\r\n assert self.html_title(\"Application: Sample App\") in res.data, res\r\n assert \"Start Contributing Now\" in res.data, err_msg\r\n res = self.app.get('/app/sampleapp/settings')\r\n assert res.status == '403 FORBIDDEN', res.status",
"def run_app(self):\n # Update system arguments\n sys.argv[0] = sys.executable\n sys.argv[1] = '{}.py'.format(sys.argv[1])\n\n # Make sure to exit with the return value from the subprocess call\n self._app_process = subprocess.Popen(sys.argv)\n return self._app_process.wait() # returns exit code",
"def test_app():\n pass"
]
| [
"0.7454864",
"0.7355862",
"0.71761405",
"0.69260675",
"0.6739427",
"0.66530526",
"0.6608374",
"0.65118265",
"0.65054536",
"0.6480569",
"0.64436203",
"0.64220744",
"0.63778853",
"0.63504833",
"0.63466454",
"0.63317406",
"0.63304865",
"0.6306708",
"0.6297485",
"0.627524",
"0.62587684",
"0.6250944",
"0.6240315",
"0.62384385",
"0.62313324",
"0.62284404",
"0.62196195",
"0.62190694",
"0.62134725",
"0.6188944"
]
| 0.77363515 | 0 |
Test the stop_application RPC. | def test_stop_application(self, mocked_check):
from supvisors.rpcinterface import RPCInterface
from supvisors.ttypes import ApplicationStates
# prepare context
self.supervisor.supvisors.context.applications = {'appli_1': Mock()}
# get patches
mocked_stop = self.supervisor.supvisors.stopper.stop_application
mocked_progress = self.supervisor.supvisors.stopper.in_progress
# create RPC instance
rpc = RPCInterface(self.supervisor)
# test RPC call with unknown application
with self.assertRaises(RPCError) as exc:
rpc.stop_application('appli')
self.assertEqual(Faults.BAD_NAME, exc.exception.code)
self.assertEqual('BAD_NAME: appli', exc.exception.text)
self.assertEqual([call()], mocked_check.call_args_list)
self.assertEqual(0, mocked_stop.call_count)
self.assertEqual(0, mocked_progress.call_count)
mocked_check.reset_mock()
# test RPC call with stopped application
application = self.supervisor.supvisors.context.applications['appli_1']
application.state = ApplicationStates.STOPPED
with self.assertRaises(RPCError) as exc:
rpc.stop_application('appli_1')
self.assertEqual(Faults.NOT_RUNNING, exc.exception.code)
self.assertEqual('NOT_RUNNING: appli_1', exc.exception.text)
self.assertEqual([call()], mocked_check.call_args_list)
self.assertEqual(0, mocked_stop.call_count)
self.assertEqual(0, mocked_progress.call_count)
mocked_check.reset_mock()
# test RPC call with running application
for appli_state in [ApplicationStates.STOPPING, ApplicationStates.RUNNING,
ApplicationStates.STARTING]:
application.state = appli_state
# test no wait and done
mocked_stop.return_value = True
result = rpc.stop_application('appli_1', False)
self.assertFalse(result)
self.assertEqual([call()], mocked_check.call_args_list)
self.assertEqual([call(application)], mocked_stop.call_args_list)
self.assertEqual(0, mocked_progress.call_count)
mocked_check.reset_mock()
mocked_stop.reset_mock()
# test wait and done
mocked_stop.return_value = True
result = rpc.stop_application('appli_1')
self.assertFalse(result)
self.assertEqual([call()], mocked_check.call_args_list)
self.assertEqual([call(application)], mocked_stop.call_args_list)
self.assertEqual(0, mocked_progress.call_count)
mocked_check.reset_mock()
mocked_stop.reset_mock()
# test wait and not done
mocked_stop.return_value = False
result = rpc.stop_application('appli_1')
# result is a function
self.assertTrue(callable(result))
self.assertEqual([call()], mocked_check.call_args_list)
self.assertEqual([call(application)], mocked_stop.call_args_list)
self.assertEqual(0, mocked_progress.call_count)
# test returned function: return True when job in progress
mocked_progress.return_value = True
self.assertEqual(NOT_DONE_YET, result())
self.assertEqual([call()], mocked_progress.call_args_list)
mocked_progress.reset_mock()
# test returned function: raise exception if job not in progress anymore
# and application not running
mocked_progress.return_value = False
for appli_state in [ApplicationStates.STOPPING, ApplicationStates.RUNNING,
ApplicationStates.STARTING]:
with self.assertRaises(RPCError) as exc:
result()
self.assertEqual(Faults.ABNORMAL_TERMINATION, exc.exception.code)
self.assertEqual('ABNORMAL_TERMINATION: appli_1', exc.exception.text)
self.assertEqual([call()], mocked_progress.call_args_list)
mocked_progress.reset_mock()
# test returned function: return True if job not in progress anymore
# and application running
application.state = ApplicationStates.STOPPED
self.assertTrue(result())
self.assertEqual([call()], mocked_progress.call_args_list)
# reset patches for next loop
mocked_check.reset_mock()
mocked_stop.reset_mock()
mocked_progress.reset_mock() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cmd_stop(self, app_name=None):\n rc = self.socket_command_with_project('stop', app_name)\n return rc",
"def stop_app(package):\n G.DEVICE.stop_app(package)",
"def stop_app(self, app: str, **kwargs) -> None:\n kwargs[\"app\"] = app\n kwargs[\"namespace\"] = \"admin\"\n kwargs[\"__name\"] = self.name\n self.call_service(\"app/stop\", **kwargs)\n return None",
"def stop_app(self, name, stateless):\n raise NotImplementedError",
"def stop_framework (driver):\n status = 0 if driver.run() == mesos_pb2.DRIVER_STOPPED else 1\n driver.stop();\n sys.exit(status)",
"def teardown_module():\n with suppress(Exception):\n mock_ad_xapp.stop()",
"def InterfaceClientStop(self, exitCode=200): \n pass",
"def stopTestRun(self):",
"def test_terminate_run(self):\n pass",
"def stop_wasabi(rpc_user, rpc_pwd, wasabi_proc):\n data = '{\"jsonrpc\":\"2.0\", \"method\":\"stop\"}'\n print('Stopping Wasabi')\n call_rpc(rpc_user, rpc_pwd, data)\n # Checks the Wasabi process indeed quit.\n index = wasabi_proc.expect_exact(['Daemon stopped',\n EOF,\n ], timeout=None)\n if index == 0:\n wasabi_proc.kill(SIGTERM)\n wasabi_proc.wait()\n print('Stopped')\n return\n elif index == 1:\n raise EOFError",
"def shutdown(self):\n\n self.log.debug(\"Shutting down %s application\" % self.app_name)\n cmd_output = admin_tasks.manage_service(self.app_name, 'stop')\n if cmd_output:\n self.log.debug('Getting application process data')\n self.log.info('Application service has been shutdown')\n print(\"Command output: \\n\" + cmd_output)\n else:\n self.log.error('Application service shutdown failed')\n sys.exit(1)",
"def teardown_application(self):\n pass",
"def stop_application_mode(self) -> None:\n # Nothing to do",
"def test_stop_process(self, mocked_check):\n from supvisors.rpcinterface import RPCInterface\n # get patches\n mocked_stop = self.supervisor.supvisors.stopper.stop_process\n mocked_progress = self.supervisor.supvisors.stopper.in_progress\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # patch the instance\n rpc._get_application_process = Mock()\n # test RPC call with running process\n rpc._get_application_process.return_value = (\n None, Mock(**{'stopped.return_value': True,\n 'namespec.return_value': 'proc1'}))\n with self.assertRaises(RPCError) as exc:\n rpc.stop_process('appli_1')\n self.assertEqual(Faults.NOT_RUNNING, exc.exception.code)\n self.assertEqual('NOT_RUNNING: proc1', exc.exception.text)\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual(0, mocked_stop.call_count)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n # test RPC call with running processes\n rpc._get_application_process.return_value = (\n Mock(**{'processes.values.return_value': [\n Mock(**{'stopped.return_value': False}),\n Mock(**{'stopped.return_value': True,\n 'namespec.return_value': 'proc2'})]}), None)\n with self.assertRaises(RPCError) as exc:\n rpc.stop_process('appli_1')\n self.assertEqual(Faults.NOT_RUNNING, exc.exception.code)\n self.assertEqual('NOT_RUNNING: proc2', exc.exception.text)\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual(0, mocked_stop.call_count)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n # test RPC call with stopped processes\n proc_1 = Mock(**{'running.return_value': True,\n 'stopped.return_value': False,\n 'namespec.return_value': 'proc1'})\n proc_2 = Mock(**{'running.return_value': False,\n 'stopped.return_value': False,\n 'namespec.return_value': 'proc2'})\n rpc._get_application_process.return_value = (\n Mock(**{'processes.values.return_value': [\n proc_1, proc_2]}), None)\n # test RPC call with no wait and not done\n mocked_stop.return_value = False\n result = rpc.stop_process('appli:*', False)\n self.assertTrue(result)\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call(proc_1), call(proc_2)], mocked_stop.call_args_list)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n mocked_stop.reset_mock()\n # test RPC call no wait and done\n mocked_stop.return_value = True\n result = rpc.stop_process('appli:*', False)\n self.assertTrue(result)\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call(proc_1), call(proc_2)],\n mocked_stop.call_args_list)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n mocked_stop.reset_mock()\n # test RPC call with wait and done\n result = rpc.stop_process('appli:*', wait=True)\n self.assertTrue(result)\n self.assertEqual([call(proc_1), call(proc_2)],\n mocked_stop.call_args_list)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n mocked_stop.reset_mock()\n # test RPC call with wait and not done\n mocked_stop.return_value = False\n deferred = rpc.stop_process('appli:*', wait=True)\n # result is a function for deferred result\n self.assertTrue(callable(deferred))\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call(proc_1), call(proc_2)],\n mocked_stop.call_args_list)\n self.assertEqual(0, mocked_progress.call_count)\n # test returned function: return True when job in progress\n mocked_progress.return_value = True\n self.assertEqual(NOT_DONE_YET, deferred())\n self.assertEqual([call()], mocked_progress.call_args_list)\n mocked_progress.reset_mock()\n # test returned function: raise exception if job not in progress anymore\n # and process still running\n mocked_progress.return_value = False\n with self.assertRaises(RPCError) as exc:\n deferred()\n self.assertEqual(Faults.ABNORMAL_TERMINATION, exc.exception.code)\n self.assertEqual('ABNORMAL_TERMINATION: proc1', exc.exception.text)\n self.assertEqual([call()], mocked_progress.call_args_list)\n mocked_progress.reset_mock()\n # test returned function: return True if job not in progress anymore\n # and process stopped\n proc_1.running.return_value = False\n self.assertTrue(deferred())\n self.assertEqual([call()], mocked_progress.call_args_list)",
"def test_stop(self):\n\n message = {\"method\": \"stop\",\n \"params\": {\"elem\": self.container_running}}\n response = yield self._get_response(message)\n\n self.assertIsInstance(response, dict)\n self.assertEqual(response[\"method\"], \"stop\")\n self.assertIsInstance(response[\"result\"], list)\n\n container_name = \"/\" + self.container_running\n\n containers = {i[0]: i[1] for i in response[\"result\"]}\n self.assertIn(container_name, containers.keys(),\n \"Container not found\")\n\n find_stop_status = containers[container_name].lower().find(\"exited\")\n\n self.assertEqual(find_stop_status, 0, \"Container has not stopped\")",
"def stopRPC(time): #Status: WIP\r\n pass",
"def stopTest(self, test):",
"def stop_app():\n try:\n get_gui_controller(create_if_missing=False).stop()\n except threadprop.NoControllerThreadError:\n stop_all_controllers(sync=False)",
"def force_stop_app(self,param,ignore_error_handle = False):\n message = {}\n package = str(param.get('package',None));\n step = 'force stop app ' + package;\n try:\n self.driver.force_stop_app(package);\n message = self.feedback.feedback_action_ok(step);\n except BaseException,e:\n message = self.feedback.feedback_action_fail(step,str(e),ignore_error_handle);\n finally:\n return message;",
"def stop():\n app = get_vistrails_application()\n app.finishSession()\n app.save_configuration()\n app.destroy()",
"async def stop(self):\n debug(\"stop() called\")\n\n if self.n2vc and self._running and not self._stopping:\n self._running = False\n self._stopping = True\n\n # Destroy the network service\n try:\n await self.n2vc.DestroyNetworkService(self.ns_name)\n except Exception as e:\n debug(\n \"Error Destroying Network Service \\\"{}\\\": {}\".format(\n self.ns_name,\n e,\n )\n )\n\n # Wait for the applications to be removed and delete the containers\n for application in self.charms:\n try:\n\n while True:\n # Wait for the application to be removed\n await asyncio.sleep(10)\n if not await self.n2vc.HasApplication(\n self.ns_name,\n application,\n ):\n break\n\n # Need to wait for the charm to finish, because native charms\n if self.state[application]['container']:\n debug(\"Deleting LXD container...\")\n destroy_lxd_container(\n self.state[application]['container']\n )\n self.state[application]['container'] = None\n debug(\"Deleting LXD container...done.\")\n else:\n debug(\"No container found for {}\".format(application))\n except Exception as e:\n debug(\"Error while deleting container: {}\".format(e))\n\n # Logout of N2VC\n try:\n debug(\"stop(): Logging out of N2VC...\")\n await self.n2vc.logout()\n self.n2vc = None\n debug(\"stop(): Logging out of N2VC...Done.\")\n except Exception as ex:\n debug(ex)\n\n # Let the test know we're finished.\n debug(\"Marking test as finished.\")\n # self._running = False\n else:\n debug(\"Skipping stop()\")",
"def stop_app(self, package: str) -> None:\n self.shell(['am', 'force-stop', package])",
"def quit_app(self):\n self._socket_client.receiver_controller.stop_app()",
"def test_stop_machine(self, pretty_print, owner_api_token):\n machine = setup_data.get('stop_machine', {}).get(\n 'machine') or setup_data.get('machine') or 'my-machine'\n uri = MIST_URL + \\\n '/api/v2/machines/{machine}/actions/stop'.format(machine=machine)\n request = MistRequests(\n api_token=owner_api_token,\n uri=uri)\n request_method = getattr(request, 'POST'.lower())\n response = request_method()\n if 'stop_machine' in REDIRECT_OPERATIONS:\n assert_response_found(response)\n else:\n assert_response_ok(response)\n assert poll(\n api_token=owner_api_token,\n uri=setup_data['amazon_machine_uri'],\n data={'state': 'stopped', 'actions': {'resize': True}},\n timeout=_setup_module.DEFAULT_TIMEOUT)\n print('Success!!!')",
"def quit_app(self):\n self._socket_client.receiver_controller.stop_app()",
"def stop(params) -> None:\n check_root()\n stop_streamer(params)\n unload_kernel_module(params)\n stop_microservice(params)",
"def stop(self) -> str:\n return self.rpc_call(\"stop\")",
"def test_stopService(self):\n port = self.port(description=u'foo')\n port.startService()\n port.stopService()\n self.assertTrue(self._service.stopped)",
"def stop():\n\n tidyUp()\n shutdown_server()\n return \"Stopping server\"",
"def stop():\n\n tidyUp()\n shutdown_server()\n return \"Stopping server\""
]
| [
"0.7295855",
"0.6790891",
"0.67763025",
"0.6693041",
"0.6658353",
"0.6607735",
"0.65045136",
"0.6495884",
"0.64950496",
"0.64893675",
"0.64876133",
"0.6467326",
"0.64614654",
"0.6457347",
"0.64467245",
"0.64464545",
"0.6444125",
"0.6443598",
"0.64290714",
"0.6423376",
"0.63990396",
"0.63970923",
"0.63907796",
"0.638369",
"0.6371355",
"0.6310167",
"0.629442",
"0.62854075",
"0.6279309",
"0.6279309"
]
| 0.8004846 | 0 |
Test the start_args RPC. | def test_start_args(self, mocked_check, mocked_proc):
from supvisors.rpcinterface import RPCInterface
# prepare context
info_source = self.supervisor.supvisors.info_source
info_source.update_extra_args.side_effect = KeyError
info_source.supervisor_rpc_interface.startProcess.side_effect = [
RPCError(Faults.NO_FILE, 'no file'),
RPCError(Faults.NOT_EXECUTABLE),
RPCError(Faults.ABNORMAL_TERMINATION),
'done']
# create RPC instance
rpc = RPCInterface(self.supervisor)
# test RPC call with extra arguments and a process that is not compliant
with self.assertRaises(RPCError) as exc:
rpc.start_args('appli:proc', 'dummy arguments')
self.assertEqual(Faults.BAD_EXTRA_ARGUMENTS, exc.exception.code)
self.assertEqual("BAD_EXTRA_ARGUMENTS: rules for namespec appli:proc"
" are not compatible with extra arguments in command line",
exc.exception.text)
self.assertEqual(0, mocked_check.call_count)
self.assertEqual(0, info_source.update_extra_args.call_count)
self.assertEqual(0, info_source.supervisor_rpc_interface.startProcess.call_count)
# test RPC call with extra arguments and a process that is compliant
# but unknown in Supervisor
with self.assertRaises(RPCError) as exc:
rpc.start_args('appli:proc', 'dummy arguments')
self.assertEqual(Faults.BAD_NAME, exc.exception.code)
self.assertEqual("BAD_NAME: namespec appli:proc unknown in this Supervisor instance",
exc.exception.text)
self.assertEqual([call('appli:proc', 'dummy arguments')],
info_source.update_extra_args.call_args_list)
self.assertEqual(0, info_source.supervisor_rpc_interface.startProcess.call_count)
info_source.update_extra_args.reset_mock()
info_source.update_extra_args.side_effect = None
# test RPC call with start exceptions
mocked_proc.side_effect = None
mocked_proc.return_value = None, None
# NO_FILE exception triggers an update of the process state
with self.assertRaises(RPCError) as exc:
rpc.start_args('appli:proc')
self.assertEqual(Faults.NO_FILE, exc.exception.code)
self.assertEqual("NO_FILE: no file", exc.exception.text)
self.assertEqual([call('appli:proc', '')],
info_source.update_extra_args.call_args_list)
self.assertEqual([call('appli:proc', True)],
info_source.supervisor_rpc_interface.startProcess.call_args_list)
self.assertEqual([call('appli:proc', 'NO_FILE: no file')],
info_source.force_process_fatal.call_args_list)
# reset patches
info_source.update_extra_args.reset_mock()
info_source.force_process_fatal.reset_mock()
info_source.supervisor_rpc_interface.startProcess.reset_mock()
# NOT_EXECUTABLE exception triggers an update of the process state
with self.assertRaises(RPCError) as exc:
rpc.start_args('appli:proc', wait=False)
self.assertEqual(Faults.NOT_EXECUTABLE, exc.exception.code)
self.assertEqual("NOT_EXECUTABLE", exc.exception.text)
self.assertEqual([call('appli:proc', '')],
info_source.update_extra_args.call_args_list)
self.assertEqual([call('appli:proc', False)],
info_source.supervisor_rpc_interface.startProcess.call_args_list)
self.assertEqual([call('appli:proc', 'NOT_EXECUTABLE')],
info_source.force_process_fatal.call_args_list)
# reset patches
info_source.update_extra_args.reset_mock()
info_source.force_process_fatal.reset_mock()
info_source.supervisor_rpc_interface.startProcess.reset_mock()
# other exception doesn't trigger an update of the process state
with self.assertRaises(RPCError) as exc:
rpc.start_args('appli:proc', wait=False)
self.assertEqual(Faults.ABNORMAL_TERMINATION, exc.exception.code)
self.assertEqual("ABNORMAL_TERMINATION", exc.exception.text)
self.assertEqual([call('appli:proc', '')],
info_source.update_extra_args.call_args_list)
self.assertEqual([call('appli:proc', False)],
info_source.supervisor_rpc_interface.startProcess.call_args_list)
self.assertEqual(0, info_source.force_process_fatal.call_count)
# reset patches
info_source.update_extra_args.reset_mock()
info_source.supervisor_rpc_interface.startProcess.reset_mock()
# finally, normal behaviour
self.assertEqual('done', rpc.start_args('appli:proc')) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def start( *args, **kwargs ):",
"def test_arguments(self):\n args = []\n def main(reactor, x, y, z):\n args.extend((x, y, z))\n return defer.succeed(None)\n r = _FakeReactor()\n exitError = self.assertRaises(\n SystemExit, task.react, main, [1, 2, 3], _reactor=r)\n self.assertEqual(0, exitError.code)\n self.assertEqual(args, [1, 2, 3])",
"def test_args(self):\n parser = argparse.ArgumentParser(\n prog=\"sysbottle\", description=\"sysbottle is parsed\"\n )\n subparsers = parser.add_subparsers()\n sysbottle.build(subparsers)\n args = parser.parse_args(\n [\n \"sysbottle\",\n \"abc.txt\",\n \"-c\",\n \"90\",\n \"-q\",\n \"1\",\n \"-d\",\n \"sda\",\n \"-i\",\n \"5\",\n \"-t\",\n \"3\",\n ]\n )\n self.assertTrue(hasattr(args, \"file\"))\n self.assertTrue(hasattr(args, \"cpu\"))\n self.assertTrue(hasattr(args, \"diskQ\"))\n self.assertTrue(hasattr(args, \"disks\"))\n self.assertTrue(hasattr(args, \"iowait\"))\n self.assertTrue(hasattr(args, \"throughput\"))",
"def startTestRun(self):",
"def _start(args=None):\n options = _parse_args(args)\n main(**options)",
"def Start(self, *args, **kwargs):\r\n\t\tpayload = { \"Arg1\": self }\r\n\t\tfor i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]\r\n\t\tfor item in kwargs.items(): payload[item[0]] = item[1]\r\n\t\treturn self._execute('start', payload=payload, response_object=None)",
"def test_main_arguments():\n args = argparse.Namespace(url=RANDOM_URL,\n username=RANDOM_USERNAME,\n password=RANDOM_PASSWORD,\n tenantcode=RANDOM_TENANTCODE)\n result = Config(\"wso_args.json\").main(args)\n\n assert result is True",
"def Start(self, *args, **kwargs):\n payload = { \"Arg1\": self }\n for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]\n for item in kwargs.items(): payload[item[0]] = item[1]\n return self._execute('start', payload=payload, response_object=None)",
"def init(self, args):\n return True",
"def test_0_args(library_db):\n import argparse\n\n namespace = app.main([])\n assert namespace.__class__ == argparse.Namespace",
"def initTest(self, myargs):\n return",
"def Start(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n payload = {\"Arg1\": self}\n for i in range(len(args)):\n payload[\"Arg%s\" % (i + 2)] = args[i]\n for item in kwargs.items():\n payload[item[0]] = item[1]\n return self._execute(\"start\", payload=payload, response_object=None)",
"def Start(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n payload = { \"Arg1\": self }\n for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]\n for item in kwargs.items(): payload[item[0]] = item[1]\n return self._execute('start', payload=payload, response_object=None)",
"def test_run_started(self):",
"def test_args(self):\n args = forge.args\n assert isinstance(args, forge._signature.VarPositional)\n assert args.name == 'args'\n assert args.converter is None\n assert args.validator is None",
"def startTestHook(self):",
"def main_parse_args(args):\n # Must return so that check command return value is passed back to calling routine\n # otherwise py.test will fail\n return main(parse_args(args))",
"def main_parse_args(args):\n # Must return so that check command return value is passed back to calling routine\n # otherwise py.test will fail\n return main(parse_args(args))",
"def run(self, args: argparse.Namespace) -> None:\n pass",
"def get_start_cmd_args(self):\r\n return self.get_args(OSPL.start)",
"def _run_args(cls, args: Optional[List[str]] = None):\n parser = cls.setup_args()\n opt = parser.parse_args(args=args)\n return cls._run_from_parser_and_opt(opt, parser)",
"def _test(self, args, **extra_args):\n if not isinstance(args, argparse.Namespace):\n raise Exception(\"args should of an instance of argparse.Namespace\")\n\n # create new freight forwarder object\n # config_override=manifest_override\n freight_forwarder = FreightForwarder()\n\n # create commercial invoice this is the contact given to freight forwarder dispatch containers and images\n commercial_invoice = freight_forwarder.commercial_invoice(\n 'test',\n args.data_center,\n args.environment,\n args.service\n )\n\n # run test container.\n bill_of_lading = freight_forwarder.test(commercial_invoice, args.configs)\n\n # pretty lame... Need to work on return values through to app to make them consistent.\n exit_code = 0 if bill_of_lading else 1\n\n if exit_code != 0:\n exit(exit_code)",
"def start(self):\n (self.options, self.arguments) = self.__opt_parser.parse_args()\n\n for opt_name in self.__req_option:\n if not getattr(self.options, opt_name):\n self.__opt_parser.error(\"Required option '%s' not set!\" %\n opt_name)\n\n # Capture warning and critical thresholds if there were any.\n # These can be overridden by API calls.\n\n if hasattr(self.options, 'warning'):\n self.__warning = getattr(self.options, 'warning')\n\n if hasattr(self.options, 'critical'):\n self.__critical = getattr(self.options, 'critical')\n\n self.__started = True\n self.__exit_status = 'OK'",
"def setup(self, args={}):\n\n return Status.RUN",
"def test_validargs(clickrunner):\n for args in maincli.valid_args:\n result = clickrunner.invoke(maincli.entrypoint, args)\n assert result.exit_code == 2\n assert \"Missing command\" in result.output",
"def run_starter(self, expect_to_fail=False):",
"def test_args(self):\n self.assertEqual(self.parser.N_particles, 500)\n self.assertEqual(self.parser.reduced_T, 0.9)\n self.assertEqual(self.parser.reduced_rho, 0.9)\n self.assertEqual(self.parser.n_steps, 1000000)\n self.assertEqual(self.parser.freq_ener, 1000)\n self.assertEqual(self.parser.freq_traj, 1000)\n self.assertEqual(self.parser.max_d, 0.1)\n self.assertEqual(self.parser.energy, 'UnitlessLJ')",
"def test_start_application(self, mocked_check):\n from supvisors.rpcinterface import RPCInterface\n from supvisors.ttypes import ApplicationStates\n # prepare context\n self.supervisor.supvisors.context.applications = {'appli_1': Mock()}\n # get patches\n mocked_start = self.supervisor.supvisors.starter.start_application\n mocked_progress = self.supervisor.supvisors.starter.in_progress\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test RPC call with unknown strategy\n with self.assertRaises(RPCError) as exc:\n rpc.start_application('strategy', 'appli')\n self.assertEqual(Faults.BAD_STRATEGY, exc.exception.code)\n self.assertEqual('BAD_STRATEGY: strategy', exc.exception.text)\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual(0, mocked_start.call_count)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n # test RPC call with unknown application\n with self.assertRaises(RPCError) as exc:\n rpc.start_application(0, 'appli')\n self.assertEqual(Faults.BAD_NAME, exc.exception.code)\n self.assertEqual('BAD_NAME: appli', exc.exception.text)\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual(0, mocked_start.call_count)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n # test RPC call with running application\n application = self.supervisor.supvisors.context.applications['appli_1']\n for appli_state in [ApplicationStates.STOPPING, ApplicationStates.RUNNING,\n ApplicationStates.STARTING]:\n application.state = appli_state\n with self.assertRaises(RPCError) as exc:\n rpc.start_application(0, 'appli_1')\n self.assertEqual(Faults.ALREADY_STARTED, exc.exception.code)\n self.assertEqual('ALREADY_STARTED: appli_1', exc.exception.text)\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual(0, mocked_start.call_count)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n # test RPC call with stopped application\n # test no wait and not done\n application.state = ApplicationStates.STOPPED\n mocked_start.return_value = False\n result = rpc.start_application(0, 'appli_1', False)\n self.assertTrue(result)\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call(0, application)], mocked_start.call_args_list)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n mocked_start.reset_mock()\n # test no wait and done\n application.state = ApplicationStates.STOPPED\n mocked_start.return_value = True\n result = rpc.start_application(0, 'appli_1', False)\n self.assertFalse(result)\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call(0, application)], mocked_start.call_args_list)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n mocked_start.reset_mock()\n # test wait and done\n mocked_start.return_value = True\n result = rpc.start_application(0, 'appli_1')\n self.assertFalse(result)\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call(0, application)], mocked_start.call_args_list)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n mocked_start.reset_mock()\n # test wait and not done\n mocked_start.return_value = False\n deferred = rpc.start_application(0, 'appli_1')\n # result is a function for deferred result\n self.assertTrue(callable(deferred))\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call(0, application)], mocked_start.call_args_list)\n self.assertEqual(0, mocked_progress.call_count)\n # test returned function: return True when job in progress\n mocked_progress.return_value = True\n self.assertEqual(NOT_DONE_YET, deferred())\n self.assertEqual([call()], mocked_progress.call_args_list)\n mocked_progress.reset_mock()\n # test returned function: raise exception if job not in progress anymore\n # and application not running\n mocked_progress.return_value = False\n for appli_state in [ApplicationStates.STOPPING, ApplicationStates.STOPPED,\n ApplicationStates.STARTING]:\n with self.assertRaises(RPCError) as exc:\n deferred()\n self.assertEqual(Faults.ABNORMAL_TERMINATION, exc.exception.code)\n self.assertEqual('ABNORMAL_TERMINATION: appli_1', exc.exception.text)\n self.assertEqual([call()], mocked_progress.call_args_list)\n mocked_progress.reset_mock()\n # test returned function: return True if job not in progress anymore\n # and application running\n application.state = ApplicationStates.RUNNING\n self.assertTrue(deferred())\n self.assertEqual([call()], mocked_progress.call_args_list)",
"def test_010_args(self):\n with self.assertRaisesRegex(RuntimeError, \"Task .* contains an unsupported parameter \\\"[*]args\\\"\"):\n self.get_caller([ArgsTaskOverride])",
"def test_start_process(self, mocked_check):\n from supvisors.rpcinterface import RPCInterface\n # get patches\n mocked_start = self.supervisor.supvisors.starter.start_process\n mocked_progress = self.supervisor.supvisors.starter.in_progress\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # patch the instance\n rpc._get_application_process = Mock()\n # test RPC call with unknown strategy\n with self.assertRaises(RPCError) as exc:\n rpc.start_process('strategy', 'appli:proc')\n self.assertEqual(Faults.BAD_STRATEGY, exc.exception.code)\n self.assertEqual('BAD_STRATEGY: strategy', exc.exception.text)\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual(0, mocked_start.call_count)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n # test RPC call with running process\n rpc._get_application_process.return_value = (\n None, Mock(**{'running.return_value': True,\n 'namespec.return_value': 'proc1'}))\n with self.assertRaises(RPCError) as exc:\n rpc.start_process(0, 'appli_1')\n self.assertEqual(Faults.ALREADY_STARTED, exc.exception.code)\n self.assertEqual('ALREADY_STARTED: proc1', exc.exception.text)\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual(0, mocked_start.call_count)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n # test RPC call with running processes\n rpc._get_application_process.return_value = (\n Mock(**{'processes.values.return_value': [\n Mock(**{'running.return_value': False}),\n Mock(**{'running.return_value': True,\n 'namespec.return_value': 'proc2'})]}), None)\n with self.assertRaises(RPCError) as exc:\n rpc.start_process(0, 'appli_1')\n self.assertEqual(Faults.ALREADY_STARTED, exc.exception.code)\n self.assertEqual('ALREADY_STARTED: proc2', exc.exception.text)\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual(0, mocked_start.call_count)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n # test RPC call with stopped processes\n proc_1 = Mock(**{'running.return_value': False,\n 'stopped.return_value': True,\n 'namespec.return_value': 'proc1'})\n proc_2 = Mock(**{'running.return_value': False,\n 'stopped.return_value': False,\n 'namespec.return_value': 'proc2'})\n rpc._get_application_process.return_value = (\n Mock(**{'processes.values.return_value': [\n proc_1, proc_2]}), None)\n # test RPC call with no wait and not done\n mocked_start.return_value = False\n result = rpc.start_process(1, 'appli:*', 'argument list', False)\n self.assertTrue(result)\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call(1, proc_1, 'argument list'),\n call(1, proc_2, 'argument list')], mocked_start.call_args_list)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n mocked_start.reset_mock()\n # test RPC call no wait and done\n mocked_start.return_value = True\n result = rpc.start_process(1, 'appli:*', 'argument list', False)\n self.assertTrue(result)\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call(1, proc_1, 'argument list'),\n call(1, proc_2, 'argument list')], mocked_start.call_args_list)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n mocked_start.reset_mock()\n # test RPC call with wait and done\n result = rpc.start_process(2, 'appli:*', wait=True)\n self.assertTrue(result)\n self.assertEqual([call(2, proc_1, ''), call(2, proc_2, '')],\n mocked_start.call_args_list)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n mocked_start.reset_mock()\n # test RPC call with wait and not done\n mocked_start.return_value = False\n deferred = rpc.start_process(2, 'appli:*', wait=True)\n # result is a function for deferred result\n self.assertTrue(callable(deferred))\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call(2, proc_1, ''), call(2, proc_2, '')],\n mocked_start.call_args_list)\n self.assertEqual(0, mocked_progress.call_count)\n # test returned function: return True when job in progress\n mocked_progress.return_value = True\n self.assertEqual(NOT_DONE_YET, deferred())\n self.assertEqual([call()], mocked_progress.call_args_list)\n mocked_progress.reset_mock()\n # test returned function: raise exception if job not in progress anymore\n # and process still stopped\n mocked_progress.return_value = False\n with self.assertRaises(RPCError) as exc:\n deferred()\n self.assertEqual(Faults.ABNORMAL_TERMINATION, exc.exception.code)\n self.assertEqual('ABNORMAL_TERMINATION: proc1', exc.exception.text)\n self.assertEqual([call()], mocked_progress.call_args_list)\n mocked_progress.reset_mock()\n # test returned function: return True if job not in progress anymore\n # and process running\n proc_1.stopped.return_value = False\n self.assertTrue(deferred())\n self.assertEqual([call()], mocked_progress.call_args_list)"
]
| [
"0.6905139",
"0.65304023",
"0.64246875",
"0.63796157",
"0.6378576",
"0.6330152",
"0.63238597",
"0.62722135",
"0.62404764",
"0.6227943",
"0.6223856",
"0.61745465",
"0.612772",
"0.61003673",
"0.60885257",
"0.6064845",
"0.60415494",
"0.60415494",
"0.60408163",
"0.6035867",
"0.6031239",
"0.6020441",
"0.60191697",
"0.5971051",
"0.5945823",
"0.59391665",
"0.5936998",
"0.59153956",
"0.5909411",
"0.58879364"
]
| 0.73286694 | 0 |
Test the start_process RPC. | def test_start_process(self, mocked_check):
from supvisors.rpcinterface import RPCInterface
# get patches
mocked_start = self.supervisor.supvisors.starter.start_process
mocked_progress = self.supervisor.supvisors.starter.in_progress
# create RPC instance
rpc = RPCInterface(self.supervisor)
# patch the instance
rpc._get_application_process = Mock()
# test RPC call with unknown strategy
with self.assertRaises(RPCError) as exc:
rpc.start_process('strategy', 'appli:proc')
self.assertEqual(Faults.BAD_STRATEGY, exc.exception.code)
self.assertEqual('BAD_STRATEGY: strategy', exc.exception.text)
self.assertEqual([call()], mocked_check.call_args_list)
self.assertEqual(0, mocked_start.call_count)
self.assertEqual(0, mocked_progress.call_count)
mocked_check.reset_mock()
# test RPC call with running process
rpc._get_application_process.return_value = (
None, Mock(**{'running.return_value': True,
'namespec.return_value': 'proc1'}))
with self.assertRaises(RPCError) as exc:
rpc.start_process(0, 'appli_1')
self.assertEqual(Faults.ALREADY_STARTED, exc.exception.code)
self.assertEqual('ALREADY_STARTED: proc1', exc.exception.text)
self.assertEqual([call()], mocked_check.call_args_list)
self.assertEqual(0, mocked_start.call_count)
self.assertEqual(0, mocked_progress.call_count)
mocked_check.reset_mock()
# test RPC call with running processes
rpc._get_application_process.return_value = (
Mock(**{'processes.values.return_value': [
Mock(**{'running.return_value': False}),
Mock(**{'running.return_value': True,
'namespec.return_value': 'proc2'})]}), None)
with self.assertRaises(RPCError) as exc:
rpc.start_process(0, 'appli_1')
self.assertEqual(Faults.ALREADY_STARTED, exc.exception.code)
self.assertEqual('ALREADY_STARTED: proc2', exc.exception.text)
self.assertEqual([call()], mocked_check.call_args_list)
self.assertEqual(0, mocked_start.call_count)
self.assertEqual(0, mocked_progress.call_count)
mocked_check.reset_mock()
# test RPC call with stopped processes
proc_1 = Mock(**{'running.return_value': False,
'stopped.return_value': True,
'namespec.return_value': 'proc1'})
proc_2 = Mock(**{'running.return_value': False,
'stopped.return_value': False,
'namespec.return_value': 'proc2'})
rpc._get_application_process.return_value = (
Mock(**{'processes.values.return_value': [
proc_1, proc_2]}), None)
# test RPC call with no wait and not done
mocked_start.return_value = False
result = rpc.start_process(1, 'appli:*', 'argument list', False)
self.assertTrue(result)
self.assertEqual([call()], mocked_check.call_args_list)
self.assertEqual([call(1, proc_1, 'argument list'),
call(1, proc_2, 'argument list')], mocked_start.call_args_list)
self.assertEqual(0, mocked_progress.call_count)
mocked_check.reset_mock()
mocked_start.reset_mock()
# test RPC call no wait and done
mocked_start.return_value = True
result = rpc.start_process(1, 'appli:*', 'argument list', False)
self.assertTrue(result)
self.assertEqual([call()], mocked_check.call_args_list)
self.assertEqual([call(1, proc_1, 'argument list'),
call(1, proc_2, 'argument list')], mocked_start.call_args_list)
self.assertEqual(0, mocked_progress.call_count)
mocked_check.reset_mock()
mocked_start.reset_mock()
# test RPC call with wait and done
result = rpc.start_process(2, 'appli:*', wait=True)
self.assertTrue(result)
self.assertEqual([call(2, proc_1, ''), call(2, proc_2, '')],
mocked_start.call_args_list)
self.assertEqual(0, mocked_progress.call_count)
mocked_check.reset_mock()
mocked_start.reset_mock()
# test RPC call with wait and not done
mocked_start.return_value = False
deferred = rpc.start_process(2, 'appli:*', wait=True)
# result is a function for deferred result
self.assertTrue(callable(deferred))
self.assertEqual([call()], mocked_check.call_args_list)
self.assertEqual([call(2, proc_1, ''), call(2, proc_2, '')],
mocked_start.call_args_list)
self.assertEqual(0, mocked_progress.call_count)
# test returned function: return True when job in progress
mocked_progress.return_value = True
self.assertEqual(NOT_DONE_YET, deferred())
self.assertEqual([call()], mocked_progress.call_args_list)
mocked_progress.reset_mock()
# test returned function: raise exception if job not in progress anymore
# and process still stopped
mocked_progress.return_value = False
with self.assertRaises(RPCError) as exc:
deferred()
self.assertEqual(Faults.ABNORMAL_TERMINATION, exc.exception.code)
self.assertEqual('ABNORMAL_TERMINATION: proc1', exc.exception.text)
self.assertEqual([call()], mocked_progress.call_args_list)
mocked_progress.reset_mock()
# test returned function: return True if job not in progress anymore
# and process running
proc_1.stopped.return_value = False
self.assertTrue(deferred())
self.assertEqual([call()], mocked_progress.call_args_list) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_startProcess(self):\r\n self.pm.addProcess(\"foo\", [\"foo\"])\r\n self.pm.startProcess(\"foo\")\r\n self.assertIsInstance(self.pm.protocols[\"foo\"], LoggingProtocol)\r\n self.assertIn(\"foo\", self.pm.timeStarted.keys())",
"def test_startProcessAlreadyStarted(self):\r\n self.pm.addProcess(\"foo\", [\"foo\"])\r\n self.pm.startProcess(\"foo\")\r\n self.assertIdentical(None, self.pm.startProcess(\"foo\"))",
"def test_startService(self):\r\n self.pm.addProcess(\"foo\", [\"foo\"])\r\n # Schedule the process to start\r\n self.pm.startService()\r\n # advance the reactor to start the process\r\n self.reactor.advance(0)\r\n self.assertTrue(\"foo\" in self.pm.protocols)",
"def test_application_start():\n\n process = subprocess.Popen(['python', 'runserver.py'],\n stderr=subprocess.STDOUT,\n stdout=subprocess.PIPE)\n\n assert process.pid\n debug_logging = process.stdout.read(100)\n process.kill()\n assert 'Starting application' in debug_logging",
"def test_get_process(self):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n self.supervisor.supvisors.context.processes = {\n 'proc_1': 'first process'}\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test with known application\n self.assertEqual('first process', rpc._get_process('proc_1'))\n # test with unknown application\n with self.assertRaises(RPCError) as exc:\n rpc._get_process('proc')\n self.assertEqual(Faults.BAD_NAME, exc.exception.code)\n self.assertEqual('BAD_NAME: process proc unknown in Supvisors',\n exc.exception.text)",
"def setUp(self):\n self.p = Process(target = start_server)\n self.p.start()\n time.sleep(0.5)",
"def test_addProcess(self):\r\n self.pm.addProcess(\"foo\", [\"arg1\", \"arg2\"],\r\n uid=1, gid=2, env={})\r\n self.assertEqual(self.pm.protocols, {})\r\n self.assertEqual(self.pm.processes,\r\n {\"foo\": ([\"arg1\", \"arg2\"], 1, 2, {})})\r\n self.pm.startService()\r\n self.reactor.advance(0)\r\n self.assertEqual(self.pm.protocols.keys(), [\"foo\"])",
"def start(self, process_id=None):\n try:\n self.process = psutil.Process(process_id)\n logging.debug(self.process.connections())\n logging.debug(self.process.ppid())\n return \"Process Started\"\n except Exception as e:\n logging.exception(e)\n return \"Process doesnt exists\"",
"def _StartMonitoringProcess(self, process):\n if process is None:\n raise ValueError('Missing process.')\n\n pid = process.pid\n\n if pid in self._process_information_per_pid:\n raise KeyError(\n 'Already monitoring process (PID: {0:d}).'.format(pid))\n\n if pid in self._rpc_clients_per_pid:\n raise KeyError(\n 'RPC client (PID: {0:d}) already exists'.format(pid))\n\n rpc_client = plaso_xmlrpc.XMLProcessStatusRPCClient()\n\n # Make sure that a worker process has started its RPC server.\n # The RPC port will be 0 if no server is available.\n rpc_port = process.rpc_port.value\n time_waited_for_process = 0.0\n while not rpc_port:\n time.sleep(0.1)\n rpc_port = process.rpc_port.value\n time_waited_for_process += 0.1\n\n if time_waited_for_process >= self._RPC_SERVER_TIMEOUT:\n raise IOError(\n 'RPC client unable to determine server (PID: {0:d}) port.'.format(\n pid))\n\n hostname = 'localhost'\n\n if not rpc_client.Open(hostname, rpc_port):\n raise IOError((\n 'RPC client unable to connect to server (PID: {0:d}) '\n 'http://{1:s}:{2:d}').format(pid, hostname, rpc_port))\n\n self._rpc_clients_per_pid[pid] = rpc_client\n self._process_information_per_pid[pid] = process_info.ProcessInfo(pid)",
"def test_basic(self):\n portpicker.PickUnusedPort().AndReturn(2345)\n # As the lock is mocked out, this provides a mox expectation.\n with self.proxy._process_lock:\n safe_subprocess.start_process_file(\n args=['/runtime'],\n input_string=self.runtime_config.SerializeToString(),\n env={'foo': 'bar',\n 'PORT': '2345'},\n cwd=self.tmpdir,\n stderr=subprocess.PIPE).AndReturn(self.process)\n self.proxy._stderr_tee = FakeTee('')\n\n self.mox.ReplayAll()\n self.proxy.start()\n self.assertEquals(2345, self.proxy._proxy._port)\n self.mox.VerifyAll()",
"def test_start_args(self, mocked_check, mocked_proc):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n info_source = self.supervisor.supvisors.info_source\n info_source.update_extra_args.side_effect = KeyError\n info_source.supervisor_rpc_interface.startProcess.side_effect = [\n RPCError(Faults.NO_FILE, 'no file'),\n RPCError(Faults.NOT_EXECUTABLE),\n RPCError(Faults.ABNORMAL_TERMINATION),\n 'done']\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test RPC call with extra arguments and a process that is not compliant\n with self.assertRaises(RPCError) as exc:\n rpc.start_args('appli:proc', 'dummy arguments')\n self.assertEqual(Faults.BAD_EXTRA_ARGUMENTS, exc.exception.code)\n self.assertEqual(\"BAD_EXTRA_ARGUMENTS: rules for namespec appli:proc\"\n \" are not compatible with extra arguments in command line\",\n exc.exception.text)\n self.assertEqual(0, mocked_check.call_count)\n self.assertEqual(0, info_source.update_extra_args.call_count)\n self.assertEqual(0, info_source.supervisor_rpc_interface.startProcess.call_count)\n # test RPC call with extra arguments and a process that is compliant\n # but unknown in Supervisor\n with self.assertRaises(RPCError) as exc:\n rpc.start_args('appli:proc', 'dummy arguments')\n self.assertEqual(Faults.BAD_NAME, exc.exception.code)\n self.assertEqual(\"BAD_NAME: namespec appli:proc unknown in this Supervisor instance\",\n exc.exception.text)\n self.assertEqual([call('appli:proc', 'dummy arguments')],\n info_source.update_extra_args.call_args_list)\n self.assertEqual(0, info_source.supervisor_rpc_interface.startProcess.call_count)\n info_source.update_extra_args.reset_mock()\n info_source.update_extra_args.side_effect = None\n # test RPC call with start exceptions\n mocked_proc.side_effect = None\n mocked_proc.return_value = None, None\n # NO_FILE exception triggers an update of the process state\n with self.assertRaises(RPCError) as exc:\n rpc.start_args('appli:proc')\n self.assertEqual(Faults.NO_FILE, exc.exception.code)\n self.assertEqual(\"NO_FILE: no file\", exc.exception.text)\n self.assertEqual([call('appli:proc', '')],\n info_source.update_extra_args.call_args_list)\n self.assertEqual([call('appli:proc', True)],\n info_source.supervisor_rpc_interface.startProcess.call_args_list)\n self.assertEqual([call('appli:proc', 'NO_FILE: no file')],\n info_source.force_process_fatal.call_args_list)\n # reset patches\n info_source.update_extra_args.reset_mock()\n info_source.force_process_fatal.reset_mock()\n info_source.supervisor_rpc_interface.startProcess.reset_mock()\n # NOT_EXECUTABLE exception triggers an update of the process state\n with self.assertRaises(RPCError) as exc:\n rpc.start_args('appli:proc', wait=False)\n self.assertEqual(Faults.NOT_EXECUTABLE, exc.exception.code)\n self.assertEqual(\"NOT_EXECUTABLE\", exc.exception.text)\n self.assertEqual([call('appli:proc', '')],\n info_source.update_extra_args.call_args_list)\n self.assertEqual([call('appli:proc', False)],\n info_source.supervisor_rpc_interface.startProcess.call_args_list)\n self.assertEqual([call('appli:proc', 'NOT_EXECUTABLE')],\n info_source.force_process_fatal.call_args_list)\n # reset patches\n info_source.update_extra_args.reset_mock()\n info_source.force_process_fatal.reset_mock()\n info_source.supervisor_rpc_interface.startProcess.reset_mock()\n # other exception doesn't trigger an update of the process state\n with self.assertRaises(RPCError) as exc:\n rpc.start_args('appli:proc', wait=False)\n self.assertEqual(Faults.ABNORMAL_TERMINATION, exc.exception.code)\n self.assertEqual(\"ABNORMAL_TERMINATION\", exc.exception.text)\n self.assertEqual([call('appli:proc', '')],\n info_source.update_extra_args.call_args_list)\n self.assertEqual([call('appli:proc', False)],\n info_source.supervisor_rpc_interface.startProcess.call_args_list)\n self.assertEqual(0, info_source.force_process_fatal.call_count)\n # reset patches\n info_source.update_extra_args.reset_mock()\n info_source.supervisor_rpc_interface.startProcess.reset_mock()\n # finally, normal behaviour\n self.assertEqual('done', rpc.start_args('appli:proc'))",
"def Start(self):\n\n\n\n assert not self._process, 'Start() can only be called once'\n self._process = subprocess.Popen(self._args)",
"def start(self):\r\n return self.start_subprocess()",
"def test_startService(self):\n port = self.port(description=u'foo')\n port.privilegedStartService()\n self.assertTrue(self._service.privilegedStarted)\n port.startService()\n self.assertTrue(self._service.started)",
"def setUp(self):\n self.p = Process(target = start_server)\n self.p.start()\n time.sleep(0.5)\n #init_fakeDB()\n time.sleep(0.5)",
"def setUp(self):\n self.p = Process(target = start_server)\n self.p.start()\n time.sleep(0.5)\n #init_fakeDB()\n time.sleep(0.5)",
"def setUp(self):\n self.p = Process(target = start_server)\n self.p.start()\n time.sleep(0.5)\n #init_fakeDB()\n time.sleep(0.5)",
"def reallyStartProcess(self, name):\n if name in self.protocols:\n return\n p = self.protocols[name] = DelayedStartupLoggingProtocol()\n p.service = self\n p.name = name\n procObj, env, uid, gid = self.processes[name]\n self.timeStarted[name] = time.time()\n\n childFDs = {0: \"w\", 1: \"r\", 2: \"r\"}\n\n childFDs.update(procObj.getFileDescriptors())\n\n procObj.starting()\n\n args = procObj.getCommandLine()\n\n self._reactor.spawnProcess(\n p, args[0], args, uid=uid, gid=gid, env=env,\n childFDs=childFDs\n )",
"def test_basic(self):\n # start()\n # As the lock is mocked out, this provides a mox expectation.\n with self.proxy._process_lock:\n safe_subprocess.start_process_file(\n args=['/runtime'],\n input_string=self.runtime_config.SerializeToString(),\n env={'foo': 'bar'},\n cwd=self.tmpdir,\n stderr=subprocess.PIPE).AndReturn(self.process)\n self.process.poll().AndReturn(None)\n self.process.child_out.seek(0).AndReturn(None)\n self.process.child_out.read().AndReturn('1234\\n')\n self.process.child_out.close().AndReturn(None)\n self.process.child_out.name = '/tmp/c-out.ABC'\n os.remove('/tmp/c-out.ABC').AndReturn(None)\n self.proxy._stderr_tee = FakeTee('')\n\n self.mox.ReplayAll()\n self.proxy.start()\n self.assertEquals(1234, self.proxy._proxy._port)\n self.mox.VerifyAll()",
"def start(self):\n self._proc = self._get_subprocess()\n self._pid = self._proc.pid\n self._return_code = None",
"def wait_process_running(process):\n assert process.is_running()",
"def test(self):\n \"\"\"WARNING: IT IS HIGHLY RECOMMENDED TO HAVE ONE TEST ONLY TO ISOLATE FUNCTIONAL TESTS FROM EACH OTHER. i.e. \n Start a new Python Interpreter and JVM for each test. In the end, it means only one test in this class. \"\"\"\n \n logger.info('**Starting test**')\n q = Queue()\n\n p = Process(target=self.client_process1, args=(q,))\n p.start()\n result = q.get()\n p.join()\n self.assertEqual(result, \"OK\") \n logger.debug(\"Restarting dataClay\")\n self.mock.mock.restartDataClay()\n p = Process(target=self.client_process2, args=(q,))\n p.start()\n result = q.get()\n p.join()\n self.assertEqual(result, \"OK\") \n\n logger.info(\"** Test OK!\")",
"def _check_started(f):\n def inner(self, *args, **kwargs):\n if self._proc is None:\n raise ProcessIsNotStartedError('Call start() first to run the process.')\n return f(self, *args, **kwargs)\n\n return inner",
"def start_process():\n global command, process\n\n def on_data(data):\n data = data.decode().strip()\n print('{}'.format(data))\n\n cmd = command.split(' ')\n\n if process:\n process.terminate()\n\n process = MySubprocess(cmd, -1, functools.partial(on_data), None, None)",
"def Start(self):\n self.CallClient(standard.ReadBuffer, next_state=\"WrongProcess\")",
"def sanity_check_process(self):\n assert_equals(self.proc.returncode, None)\n time.sleep(1)",
"def _StartWorkerProcess(self, process_name):",
"def start_process(self, connection):\n\n self.handle_process(connection)",
"def test_start_test(self):\n self.protocol.startTest(self.test)\n self.assertEqual(self.io.getvalue(), compat._b(\n \"test: %s\\n\" % self.test.id()))",
"def testProcess(self):\n self.grr_hunt_osquery_collector.Process()\n # extract call kwargs\n call_kwargs = self.mock_grr_api.CreateHunt.call_args[1]\n self.assertEqual(call_kwargs['flow_args'].query,\n 'SELECT * FROM processes')\n self.assertEqual(call_kwargs['flow_args'].timeout_millis,\n 300000)\n self.assertEqual(call_kwargs['flow_args'].ignore_stderr_errors, False)\n self.assertEqual(call_kwargs['flow_name'], 'OsqueryFlow')\n self.assertEqual(call_kwargs['hunt_runner_args'].description,\n 'random reason')"
]
| [
"0.76212543",
"0.72994727",
"0.7251439",
"0.71272534",
"0.68497163",
"0.683988",
"0.6688136",
"0.66484785",
"0.6599558",
"0.65992403",
"0.65560764",
"0.6489021",
"0.6480702",
"0.64585495",
"0.64510113",
"0.64510113",
"0.64510113",
"0.64309233",
"0.63594514",
"0.6302075",
"0.62976116",
"0.6294695",
"0.6261241",
"0.62426186",
"0.6237144",
"0.6207636",
"0.61718434",
"0.6146581",
"0.6136882",
"0.6134265"
]
| 0.73775786 | 1 |
Test the stop_process RPC. | def test_stop_process(self, mocked_check):
from supvisors.rpcinterface import RPCInterface
# get patches
mocked_stop = self.supervisor.supvisors.stopper.stop_process
mocked_progress = self.supervisor.supvisors.stopper.in_progress
# create RPC instance
rpc = RPCInterface(self.supervisor)
# patch the instance
rpc._get_application_process = Mock()
# test RPC call with running process
rpc._get_application_process.return_value = (
None, Mock(**{'stopped.return_value': True,
'namespec.return_value': 'proc1'}))
with self.assertRaises(RPCError) as exc:
rpc.stop_process('appli_1')
self.assertEqual(Faults.NOT_RUNNING, exc.exception.code)
self.assertEqual('NOT_RUNNING: proc1', exc.exception.text)
self.assertEqual([call()], mocked_check.call_args_list)
self.assertEqual(0, mocked_stop.call_count)
self.assertEqual(0, mocked_progress.call_count)
mocked_check.reset_mock()
# test RPC call with running processes
rpc._get_application_process.return_value = (
Mock(**{'processes.values.return_value': [
Mock(**{'stopped.return_value': False}),
Mock(**{'stopped.return_value': True,
'namespec.return_value': 'proc2'})]}), None)
with self.assertRaises(RPCError) as exc:
rpc.stop_process('appli_1')
self.assertEqual(Faults.NOT_RUNNING, exc.exception.code)
self.assertEqual('NOT_RUNNING: proc2', exc.exception.text)
self.assertEqual([call()], mocked_check.call_args_list)
self.assertEqual(0, mocked_stop.call_count)
self.assertEqual(0, mocked_progress.call_count)
mocked_check.reset_mock()
# test RPC call with stopped processes
proc_1 = Mock(**{'running.return_value': True,
'stopped.return_value': False,
'namespec.return_value': 'proc1'})
proc_2 = Mock(**{'running.return_value': False,
'stopped.return_value': False,
'namespec.return_value': 'proc2'})
rpc._get_application_process.return_value = (
Mock(**{'processes.values.return_value': [
proc_1, proc_2]}), None)
# test RPC call with no wait and not done
mocked_stop.return_value = False
result = rpc.stop_process('appli:*', False)
self.assertTrue(result)
self.assertEqual([call()], mocked_check.call_args_list)
self.assertEqual([call(proc_1), call(proc_2)], mocked_stop.call_args_list)
self.assertEqual(0, mocked_progress.call_count)
mocked_check.reset_mock()
mocked_stop.reset_mock()
# test RPC call no wait and done
mocked_stop.return_value = True
result = rpc.stop_process('appli:*', False)
self.assertTrue(result)
self.assertEqual([call()], mocked_check.call_args_list)
self.assertEqual([call(proc_1), call(proc_2)],
mocked_stop.call_args_list)
self.assertEqual(0, mocked_progress.call_count)
mocked_check.reset_mock()
mocked_stop.reset_mock()
# test RPC call with wait and done
result = rpc.stop_process('appli:*', wait=True)
self.assertTrue(result)
self.assertEqual([call(proc_1), call(proc_2)],
mocked_stop.call_args_list)
self.assertEqual(0, mocked_progress.call_count)
mocked_check.reset_mock()
mocked_stop.reset_mock()
# test RPC call with wait and not done
mocked_stop.return_value = False
deferred = rpc.stop_process('appli:*', wait=True)
# result is a function for deferred result
self.assertTrue(callable(deferred))
self.assertEqual([call()], mocked_check.call_args_list)
self.assertEqual([call(proc_1), call(proc_2)],
mocked_stop.call_args_list)
self.assertEqual(0, mocked_progress.call_count)
# test returned function: return True when job in progress
mocked_progress.return_value = True
self.assertEqual(NOT_DONE_YET, deferred())
self.assertEqual([call()], mocked_progress.call_args_list)
mocked_progress.reset_mock()
# test returned function: raise exception if job not in progress anymore
# and process still running
mocked_progress.return_value = False
with self.assertRaises(RPCError) as exc:
deferred()
self.assertEqual(Faults.ABNORMAL_TERMINATION, exc.exception.code)
self.assertEqual('ABNORMAL_TERMINATION: proc1', exc.exception.text)
self.assertEqual([call()], mocked_progress.call_args_list)
mocked_progress.reset_mock()
# test returned function: return True if job not in progress anymore
# and process stopped
proc_1.running.return_value = False
self.assertTrue(deferred())
self.assertEqual([call()], mocked_progress.call_args_list) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_stopProcessAlreadyStopped(self):\r\n self.pm.addProcess(\"foo\", [\"foo\"])\r\n self.assertIdentical(None, self.pm.stopProcess(\"foo\"))",
"def stop(self):\n if not self.process_pid:\n raise Exception('why is this being called? %s' % self.server_name)\n\n if self.stop_kill:\n os.kill(self.process_pid, signal.SIGTERM)\n rc = wait_for_fork(self.process_pid, raise_error=False)\n return (rc, '', '')",
"def test_stopService(self):\r\n self.pm.addProcess(\"foo\", [\"foo\"])\r\n self.pm.addProcess(\"bar\", [\"bar\"])\r\n # Schedule the process to start\r\n self.pm.startService()\r\n # advance the reactor to start the processes\r\n self.reactor.advance(self.pm.threshold)\r\n self.assertIn(\"foo\", self.pm.protocols)\r\n self.assertIn(\"bar\", self.pm.protocols)\r\n\r\n self.reactor.advance(1)\r\n\r\n self.pm.stopService()\r\n # Advance to beyond the killTime - all monitored processes\r\n # should have exited\r\n self.reactor.advance(self.pm.killTime + 1)\r\n # The processes shouldn't be restarted\r\n self.assertEqual({}, self.pm.protocols)",
"def test_stopProcessForcedKill(self):\r\n self.pm.startService()\r\n self.pm.addProcess(\"foo\", [\"foo\"])\r\n self.assertIn(\"foo\", self.pm.protocols)\r\n self.reactor.advance(self.pm.threshold)\r\n proc = self.pm.protocols[\"foo\"].transport\r\n # Arrange for the fake process to live longer than the killTime\r\n proc._terminationDelay = self.pm.killTime + 1\r\n self.pm.stopProcess(\"foo\")\r\n # If process doesn't die before the killTime, procmon should\r\n # terminate it\r\n self.reactor.advance(self.pm.killTime - 1)\r\n self.assertEqual(0.0, self.pm.timeStarted[\"foo\"])\r\n\r\n self.reactor.advance(1)\r\n # We expect it to be immediately restarted\r\n self.assertEqual(self.reactor.seconds(), self.pm.timeStarted[\"foo\"])",
"def stopProcesses(*args):\n _stopProcessSet(_running)",
"def processStop(name):\n imrclient.update_server_info()\n imrclient.process_stop(name)",
"def xmlrpc_stop(self):\n\n if self._proc is None:\n defer.returnValue(Tcpdump.NOT_RUNNING)\n rc = yield self._proc.kill()\n self._proc = None\n if rc:\n info(\"Tcpdump stopped.\")\n defer.returnValue(Tcpdump.STOPPED)\n else:\n warning(\"Stopping of tcpdump failed RC=%s\" %rc)\n defer.returnValue(Tcpdump.STOPPING_FAILED)",
"def stop_wasabi(rpc_user, rpc_pwd, wasabi_proc):\n data = '{\"jsonrpc\":\"2.0\", \"method\":\"stop\"}'\n print('Stopping Wasabi')\n call_rpc(rpc_user, rpc_pwd, data)\n # Checks the Wasabi process indeed quit.\n index = wasabi_proc.expect_exact(['Daemon stopped',\n EOF,\n ], timeout=None)\n if index == 0:\n wasabi_proc.kill(SIGTERM)\n wasabi_proc.wait()\n print('Stopped')\n return\n elif index == 1:\n raise EOFError",
"def test_stopService(self):\n port = self.port(store=self.store, portNumber=self.lowPortNumber, factory=self.factory)\n port._listen = self.listen\n port.startService()\n stopped = port.stopService()\n stopping = self.ports[0].stopping\n self.failIfIdentical(stopping, None)\n self.assertIdentical(stopped, stopping)",
"def stop_procedure(self):\n pass",
"def test_stopService(self):\n port = self.port(description=u'foo')\n port.startService()\n port.stopService()\n self.assertTrue(self._service.stopped)",
"def stopRPC(time): #Status: WIP\r\n pass",
"def test_stop(self):\n\n message = {\"method\": \"stop\",\n \"params\": {\"elem\": self.container_running}}\n response = yield self._get_response(message)\n\n self.assertIsInstance(response, dict)\n self.assertEqual(response[\"method\"], \"stop\")\n self.assertIsInstance(response[\"result\"], list)\n\n container_name = \"/\" + self.container_running\n\n containers = {i[0]: i[1] for i in response[\"result\"]}\n self.assertIn(container_name, containers.keys(),\n \"Container not found\")\n\n find_stop_status = containers[container_name].lower().find(\"exited\")\n\n self.assertEqual(find_stop_status, 0, \"Container has not stopped\")",
"def stop_subprocesses():\n global message_interface\n global c_library_interface\n if message_interface:\n message_interface.stop()\n if c_library_interface:\n c_library_interface.stop()",
"def _stop_process(self):\n self.stdin_queue.put_nowait(\"quit\")\n ExternalProcess._stop_process(self)",
"def stop(self):\n # print \"process shutdown complete\"",
"def test_removeProcess(self):\r\n self.pm.startService()\r\n self.pm.addProcess(\"foo\", [\"foo\"])\r\n self.assertEqual(len(self.pm.processes), 1)\r\n self.pm.removeProcess(\"foo\")\r\n self.assertEqual(len(self.pm.processes), 0)",
"def stopTest(self, test):",
"def InterfaceClientStop(self, exitCode=200): \n pass",
"def test_stopServiceCancelRestarts(self):\r\n self.pm.addProcess(\"foo\", [\"foo\"])\r\n # Schedule the process to start\r\n self.pm.startService()\r\n # advance the reactor to start the processes\r\n self.reactor.advance(self.pm.threshold)\r\n self.assertIn(\"foo\", self.pm.protocols)\r\n\r\n self.reactor.advance(1)\r\n # Kill the process early\r\n self.pm.protocols[\"foo\"].processEnded(Failure(ProcessDone(0)))\r\n self.assertTrue(self.pm.restart['foo'].active())\r\n self.pm.stopService()\r\n # Scheduled restart should have been cancelled\r\n self.assertFalse(self.pm.restart['foo'].active())",
"def test_stop_application(self, mocked_check):\n from supvisors.rpcinterface import RPCInterface\n from supvisors.ttypes import ApplicationStates\n # prepare context\n self.supervisor.supvisors.context.applications = {'appli_1': Mock()}\n # get patches\n mocked_stop = self.supervisor.supvisors.stopper.stop_application\n mocked_progress = self.supervisor.supvisors.stopper.in_progress\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test RPC call with unknown application\n with self.assertRaises(RPCError) as exc:\n rpc.stop_application('appli')\n self.assertEqual(Faults.BAD_NAME, exc.exception.code)\n self.assertEqual('BAD_NAME: appli', exc.exception.text)\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual(0, mocked_stop.call_count)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n # test RPC call with stopped application\n application = self.supervisor.supvisors.context.applications['appli_1']\n application.state = ApplicationStates.STOPPED\n with self.assertRaises(RPCError) as exc:\n rpc.stop_application('appli_1')\n self.assertEqual(Faults.NOT_RUNNING, exc.exception.code)\n self.assertEqual('NOT_RUNNING: appli_1', exc.exception.text)\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual(0, mocked_stop.call_count)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n # test RPC call with running application\n for appli_state in [ApplicationStates.STOPPING, ApplicationStates.RUNNING,\n ApplicationStates.STARTING]:\n application.state = appli_state\n # test no wait and done\n mocked_stop.return_value = True\n result = rpc.stop_application('appli_1', False)\n self.assertFalse(result)\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call(application)], mocked_stop.call_args_list)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n mocked_stop.reset_mock()\n # test wait and done\n mocked_stop.return_value = True\n result = rpc.stop_application('appli_1')\n self.assertFalse(result)\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call(application)], mocked_stop.call_args_list)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n mocked_stop.reset_mock()\n # test wait and not done\n mocked_stop.return_value = False\n result = rpc.stop_application('appli_1')\n # result is a function\n self.assertTrue(callable(result))\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call(application)], mocked_stop.call_args_list)\n self.assertEqual(0, mocked_progress.call_count)\n # test returned function: return True when job in progress\n mocked_progress.return_value = True\n self.assertEqual(NOT_DONE_YET, result())\n self.assertEqual([call()], mocked_progress.call_args_list)\n mocked_progress.reset_mock()\n # test returned function: raise exception if job not in progress anymore\n # and application not running\n mocked_progress.return_value = False\n for appli_state in [ApplicationStates.STOPPING, ApplicationStates.RUNNING,\n ApplicationStates.STARTING]:\n with self.assertRaises(RPCError) as exc:\n result()\n self.assertEqual(Faults.ABNORMAL_TERMINATION, exc.exception.code)\n self.assertEqual('ABNORMAL_TERMINATION: appli_1', exc.exception.text)\n self.assertEqual([call()], mocked_progress.call_args_list)\n mocked_progress.reset_mock()\n # test returned function: return True if job not in progress anymore\n # and application running\n application.state = ApplicationStates.STOPPED\n self.assertTrue(result())\n self.assertEqual([call()], mocked_progress.call_args_list)\n # reset patches for next loop\n mocked_check.reset_mock()\n mocked_stop.reset_mock()\n mocked_progress.reset_mock()",
"def _kill(self) -> None:\n if not hasattr(self, \"proc\"):\n raise FuzzFrontendError(\"Attempted to kill non-running PID.\")\n\n self.proc.terminate()\n try:\n self.proc.wait(timeout=0.5)\n L.info(\"Fuzzer subprocess exited with `%d`\", self.proc.returncode)\n except subprocess.TimeoutExpired:\n raise FuzzFrontendError(\"Subprocess could not terminate in time\")\n\n self._on = False",
"def vm_stop(self, params: dict) -> Tuple[\"Status\", dict]:",
"def _StopMonitoringProcess(self, process):\n if process is None:\n raise ValueError('Missing process.')\n\n pid = process.pid\n\n self._RaiseIfNotMonitored(pid)\n\n del self._process_information_per_pid[pid]\n\n rpc_client = self._rpc_clients_per_pid.get(pid, None)\n if rpc_client:\n rpc_client.Close()\n del self._rpc_clients_per_pid[pid]\n\n if pid in self._rpc_errors_per_pid:\n del self._rpc_errors_per_pid[pid]\n\n logger.debug('Stopped monitoring process: {0:s} (PID: {1:d})'.format(\n process.name, pid))",
"def remote_kill():",
"def stop_process(check_id, storage, processes):\n\n processes[storage[check_id]['pid']].terminate()\n os.wait()\n del processes[storage[check_id]['pid']]",
"def process_test_stop(self, config, results, result_id, db):\n pass",
"def test_terminate_run(self):\n pass",
"def Stop(self):\n if self.child_pid:\n self.data = self.host.Communicate(self.child_pid, echo_error=True,\n kill=True,\n kill_string=IperfServer.KILL_STRING)\n self.child_pid = None",
"def stop_test(self, request):\n request.worker.stop_test(request.message.test_id)\n\n return SuccessReply()"
]
| [
"0.7195323",
"0.7141915",
"0.71212816",
"0.7064413",
"0.6944247",
"0.69071764",
"0.6824049",
"0.6727711",
"0.66971034",
"0.6683262",
"0.6661832",
"0.6655495",
"0.6623781",
"0.65811753",
"0.65610534",
"0.6559678",
"0.65566313",
"0.65525216",
"0.65419906",
"0.65386456",
"0.64917463",
"0.64850456",
"0.6479115",
"0.6473213",
"0.6430862",
"0.64193416",
"0.6418493",
"0.64087135",
"0.6404914",
"0.6399083"
]
| 0.7584913 | 0 |
Test the restart_process RPC. | def test_restart_process(self, mocked_check, mocked_stop, mocked_start):
from supvisors.rpcinterface import RPCInterface
# create RPC instance
rpc = RPCInterface(self.supervisor)
# test RPC call with sub-RPC calls return a direct result
mocked_stop.return_value = True
mocked_start.return_value = False
deferred = rpc.restart_process(0, 'appli:*', 'arg list', 'wait')
self.assertEqual([call()], mocked_check.call_args_list)
self.assertEqual([call('appli:*', True)], mocked_stop.call_args_list)
self.assertEqual(0, mocked_start.call_count)
mocked_stop.reset_mock()
mocked_check.reset_mock()
# result is a function
self.assertTrue(callable(deferred))
self.assertTrue(deferred.waitstop)
# test this function
self.assertFalse(deferred())
self.assertFalse(deferred.waitstop)
self.assertEqual(0, mocked_stop.call_count)
self.assertEqual([call(0, 'appli:*', 'arg list','wait')], mocked_start.call_args_list)
mocked_start.reset_mock()
# test RPC call with sub_RPC calls returning jobs
# test with mocking functions telling that the jobs are not completed
mocked_stop_job = Mock(return_value=False)
mocked_start_job = Mock(return_value=False)
mocked_stop.return_value = mocked_stop_job
mocked_start.return_value = mocked_start_job
deferred = rpc.restart_process(0, 'appli:*', '', 'wait')
self.assertEqual([call()], mocked_check.call_args_list)
self.assertEqual([call('appli:*', True)], mocked_stop.call_args_list)
self.assertEqual(0, mocked_start.call_count)
mocked_stop.reset_mock()
# result is a function for deferred result
self.assertTrue(callable(deferred))
self.assertTrue(deferred.waitstop)
# test this function
self.assertEqual(0, mocked_stop_job.call_count)
self.assertEqual(0, mocked_start_job.call_count)
self.assertEqual(NOT_DONE_YET, deferred())
self.assertEqual(0, mocked_stop.call_count)
self.assertEqual(0, mocked_start.call_count)
self.assertEqual([call()], mocked_stop_job.call_args_list)
self.assertEqual(0, mocked_start_job.call_count)
mocked_stop_job.reset_mock()
# replace the stop job with a function telling that the job is completed
mocked_stop_job.return_value = True
self.assertEqual(NOT_DONE_YET, deferred())
self.assertFalse(deferred.waitstop)
self.assertEqual(0, mocked_stop.call_count)
self.assertEqual([call(0, 'appli:*', '', 'wait')], mocked_start.call_args_list)
self.assertEqual([call()], mocked_stop_job.call_args_list)
self.assertEqual(0, mocked_start_job.call_count)
mocked_stop_job.reset_mock()
# call the deferred function again to check that the start is engaged
self.assertFalse(deferred())
self.assertEqual([call()], mocked_start_job.call_args_list)
self.assertEqual(0, mocked_stop_job.call_count) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_restart(self, mocked_check):\n from supvisors.rpcinterface import RPCInterface\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test RPC call\n self.assertTrue(rpc.restart())\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call()],\n self.supervisor.supvisors.fsm.on_restart.call_args_list)",
"def vm_restart(self, params: dict) -> Tuple[\"Status\", dict]:",
"def test_restart_with_permission(self):\n self.create_user_with_role(\n self.user.name, self.user.email, self.user.password, Role.tester)\n self.create_forktest(\"own-fork-commit\", TestPlatform.linux, regression_tests=[2])\n self.create_completed_regression_t_entries(3, [2])\n with self.app.test_client() as c:\n response = c.post(\n '/account/login', data=self.create_login_form_data(self.user.email, self.user.password))\n response = c.get('/test/restart_test/3')\n test = Test.query.filter(Test.id == 3).first()\n self.assertEqual(test.finished, False)",
"def test_workflows_restart(self):\n pass",
"def restart(self):\n self.client.post(self.path+'/action', { 'restart': {} })\n return True",
"def test_restart_application(self, mocked_check, mocked_stop, mocked_start):\n from supvisors.rpcinterface import RPCInterface\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test RPC call with sub-RPC calls return a direct result\n mocked_stop.return_value = True\n mocked_start.return_value = False\n deferred = rpc.restart_application(0, 'appli', 'wait')\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call('appli', True)], mocked_stop.call_args_list)\n self.assertEqual(0, mocked_start.call_count)\n mocked_stop.reset_mock()\n mocked_check.reset_mock()\n # result is a function\n self.assertTrue(callable(deferred))\n self.assertTrue(deferred.waitstop)\n # test this function\n self.assertFalse(deferred())\n self.assertFalse(deferred.waitstop)\n self.assertEqual(0, mocked_stop.call_count)\n self.assertEqual([call(0, 'appli', 'wait')], mocked_start.call_args_list)\n mocked_start.reset_mock()\n # test RPC call with sub_RPC calls returning jobs\n # test with mocking functions telling that the jobs are not completed\n mocked_stop_job = Mock(return_value=False)\n mocked_start_job = Mock(return_value=False)\n mocked_stop.return_value = mocked_stop_job\n mocked_start.return_value = mocked_start_job\n deferred = rpc.restart_application(0, 'appli', 'wait')\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call('appli', True)], mocked_stop.call_args_list)\n self.assertEqual(0, mocked_start.call_count)\n mocked_stop.reset_mock()\n # result is a function for deferred result\n self.assertTrue(callable(deferred))\n self.assertTrue(deferred.waitstop)\n # first call to this function tells that job is still in progress\n self.assertEqual(0, mocked_stop_job.call_count)\n self.assertEqual(0, mocked_start_job.call_count)\n self.assertEqual(NOT_DONE_YET, deferred())\n self.assertEqual(0, mocked_stop.call_count)\n self.assertEqual(0, mocked_start.call_count)\n self.assertEqual([call()], mocked_stop_job.call_args_list)\n self.assertEqual(0, mocked_start_job.call_count)\n mocked_stop_job.reset_mock()\n # replace the stop job with a function telling that the job is completed\n mocked_stop_job.return_value = True\n self.assertEqual(NOT_DONE_YET, deferred())\n self.assertFalse(deferred.waitstop)\n self.assertEqual(0, mocked_stop.call_count)\n self.assertEqual([call(0, 'appli', 'wait')], mocked_start.call_args_list)\n self.assertEqual([call()], mocked_stop_job.call_args_list)\n self.assertEqual(0, mocked_start_job.call_count)\n mocked_stop_job.reset_mock()\n # call the deferred function again to check that the start is engaged\n self.assertFalse(deferred())\n self.assertEqual([call()], mocked_start_job.call_args_list)\n self.assertEqual(0, mocked_stop_job.call_count)",
"def restart(reason, *args, **kwargs):\n logging.info(\"Restarting: %s\" % reason)\n os.execv(sys.argv[0], sys.argv)",
"def test_start_process(self, mocked_check):\n from supvisors.rpcinterface import RPCInterface\n # get patches\n mocked_start = self.supervisor.supvisors.starter.start_process\n mocked_progress = self.supervisor.supvisors.starter.in_progress\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # patch the instance\n rpc._get_application_process = Mock()\n # test RPC call with unknown strategy\n with self.assertRaises(RPCError) as exc:\n rpc.start_process('strategy', 'appli:proc')\n self.assertEqual(Faults.BAD_STRATEGY, exc.exception.code)\n self.assertEqual('BAD_STRATEGY: strategy', exc.exception.text)\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual(0, mocked_start.call_count)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n # test RPC call with running process\n rpc._get_application_process.return_value = (\n None, Mock(**{'running.return_value': True,\n 'namespec.return_value': 'proc1'}))\n with self.assertRaises(RPCError) as exc:\n rpc.start_process(0, 'appli_1')\n self.assertEqual(Faults.ALREADY_STARTED, exc.exception.code)\n self.assertEqual('ALREADY_STARTED: proc1', exc.exception.text)\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual(0, mocked_start.call_count)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n # test RPC call with running processes\n rpc._get_application_process.return_value = (\n Mock(**{'processes.values.return_value': [\n Mock(**{'running.return_value': False}),\n Mock(**{'running.return_value': True,\n 'namespec.return_value': 'proc2'})]}), None)\n with self.assertRaises(RPCError) as exc:\n rpc.start_process(0, 'appli_1')\n self.assertEqual(Faults.ALREADY_STARTED, exc.exception.code)\n self.assertEqual('ALREADY_STARTED: proc2', exc.exception.text)\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual(0, mocked_start.call_count)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n # test RPC call with stopped processes\n proc_1 = Mock(**{'running.return_value': False,\n 'stopped.return_value': True,\n 'namespec.return_value': 'proc1'})\n proc_2 = Mock(**{'running.return_value': False,\n 'stopped.return_value': False,\n 'namespec.return_value': 'proc2'})\n rpc._get_application_process.return_value = (\n Mock(**{'processes.values.return_value': [\n proc_1, proc_2]}), None)\n # test RPC call with no wait and not done\n mocked_start.return_value = False\n result = rpc.start_process(1, 'appli:*', 'argument list', False)\n self.assertTrue(result)\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call(1, proc_1, 'argument list'),\n call(1, proc_2, 'argument list')], mocked_start.call_args_list)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n mocked_start.reset_mock()\n # test RPC call no wait and done\n mocked_start.return_value = True\n result = rpc.start_process(1, 'appli:*', 'argument list', False)\n self.assertTrue(result)\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call(1, proc_1, 'argument list'),\n call(1, proc_2, 'argument list')], mocked_start.call_args_list)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n mocked_start.reset_mock()\n # test RPC call with wait and done\n result = rpc.start_process(2, 'appli:*', wait=True)\n self.assertTrue(result)\n self.assertEqual([call(2, proc_1, ''), call(2, proc_2, '')],\n mocked_start.call_args_list)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n mocked_start.reset_mock()\n # test RPC call with wait and not done\n mocked_start.return_value = False\n deferred = rpc.start_process(2, 'appli:*', wait=True)\n # result is a function for deferred result\n self.assertTrue(callable(deferred))\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call(2, proc_1, ''), call(2, proc_2, '')],\n mocked_start.call_args_list)\n self.assertEqual(0, mocked_progress.call_count)\n # test returned function: return True when job in progress\n mocked_progress.return_value = True\n self.assertEqual(NOT_DONE_YET, deferred())\n self.assertEqual([call()], mocked_progress.call_args_list)\n mocked_progress.reset_mock()\n # test returned function: raise exception if job not in progress anymore\n # and process still stopped\n mocked_progress.return_value = False\n with self.assertRaises(RPCError) as exc:\n deferred()\n self.assertEqual(Faults.ABNORMAL_TERMINATION, exc.exception.code)\n self.assertEqual('ABNORMAL_TERMINATION: proc1', exc.exception.text)\n self.assertEqual([call()], mocked_progress.call_args_list)\n mocked_progress.reset_mock()\n # test returned function: return True if job not in progress anymore\n # and process running\n proc_1.stopped.return_value = False\n self.assertTrue(deferred())\n self.assertEqual([call()], mocked_progress.call_args_list)",
"def test_stopServiceCancelRestarts(self):\r\n self.pm.addProcess(\"foo\", [\"foo\"])\r\n # Schedule the process to start\r\n self.pm.startService()\r\n # advance the reactor to start the processes\r\n self.reactor.advance(self.pm.threshold)\r\n self.assertIn(\"foo\", self.pm.protocols)\r\n\r\n self.reactor.advance(1)\r\n # Kill the process early\r\n self.pm.protocols[\"foo\"].processEnded(Failure(ProcessDone(0)))\r\n self.assertTrue(self.pm.restart['foo'].active())\r\n self.pm.stopService()\r\n # Scheduled restart should have been cancelled\r\n self.assertFalse(self.pm.restart['foo'].active())",
"def rstrtmgr_RmRestart(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"dwSessionHandle\", \"dwRestartFlags\", \"fnStatus\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def node_restart(ctx):\n ctx.obj['node'].attempt_restart()",
"def test_install_restart():\n mock_out = {\n \"Success\": True,\n \"RestartNeeded\": 1,\n \"FeatureResult\": [\n {\n \"Id\": 338,\n \"Name\": \"XPS-Viewer\",\n \"DisplayName\": \"XPS Viewer\",\n \"Success\": True,\n \"RestartNeeded\": True,\n \"Message\": \"\",\n \"SkipReason\": 0,\n }\n ],\n \"ExitCode\": 0,\n }\n expected = {\n \"ExitCode\": 0,\n \"RestartNeeded\": True,\n \"Restarted\": True,\n \"Features\": {\n \"XPS-Viewer\": {\n \"DisplayName\": \"XPS Viewer\",\n \"Message\": \"\",\n \"RestartNeeded\": True,\n \"SkipReason\": 0,\n \"Success\": True,\n }\n },\n \"Success\": True,\n }\n\n mock_reboot = MagicMock(return_value=True)\n with patch(\"salt.utils.win_pwsh.run_dict\", return_value=mock_out), patch.dict(\n win_servermanager.__salt__, {\"system.reboot\": mock_reboot}\n ):\n result = win_servermanager.install(\"XPS-Viewer\", restart=True)\n mock_reboot.assert_called_once()\n assert result == expected",
"def reload_test(test_name):\n sudo(\"restart %s\" % test_name)",
"async def module_command_restart(self, ctx, parsed):\n if parsed.invoker != ctx.owner:\n return\n reason = \" \".join(parsed.args[\"msg\"] or []) or \"Restarting\"\n self.quit(reason)\n self._restarting = True",
"def reboot():\n query = {\n \"type\": \"op\",\n \"cmd\": \"<request><restart><system></system></restart></request>\",\n }\n\n return __proxy__[\"panos.call\"](query)",
"async def do_force_restart(self):\n if self.config[\"allow_restart_requests\"]:\n os._exit(42)\n else:\n return self._rpc_failure(\"Restart disallowed by configuration\")",
"def repl_restart(restart: bool = True) -> None:",
"def restart(service):\n # TODO: replace this with your relevant restart logic\n assert service.isalpha()\n run(\"service\", service, \"restart\")",
"def reboot(self,request):\n\t\tresult = True\n\t\tPopen(['/sbin/reboot']) # that's all\n\t\tself.finished(request.id,result)",
"def restart():\n log.info('restart')\n samuraix.restarting = True\n samuraix.app.stop()",
"def test_hostmgr_restart_job_succeeds(self, failure_tester):\n job = failure_tester.job(job_file=\"test_job_no_container.yaml\")\n job.create()\n\n # Restart immediately, so that tasks will be in various\n # stages of launch\n assert 0 != failure_tester.fw.restart(failure_tester.hostmgr, \"leader\")\n\n job.wait_for_state()",
"def restart(name):\n ret = \"restart False\"\n if stop(name) and start(name):\n ret = \"restart True\"\n return ret",
"def test_restart_service_should_return_active(self):\n instance_info.dbaas.instances.restart(instance_info.id)\n resp, body = instance_info.dbaas.client.last_response\n assert_equal(resp.status, 202)\n\n def result_is_active():\n instance = instance_info.dbaas.instances.get(\n instance_info.id)\n if instance.status in CONFIG.running_status:\n return True\n else:\n assert_equal(\"REBOOT\", instance.status)\n return False\n poll_until(result_is_active)",
"def restart(self):\n\t\treturn Job(SDK.PrlVm_Restart(self.handle)[0])",
"def _restart(self):\n pass",
"def cluster_restart(r):\n cluster_id = request_get(r, \"cluster_id\")\n if not cluster_id:\n logger.warning(\"No cluster_id is given\")\n return make_fail_response(\"No cluster_id is given\")\n if cluster_handler.restart(cluster_id):\n return jsonify(response_ok), CODE_OK\n\n return make_fail_response(\"cluster restart failed\")",
"def net_service_restart(self):\n\t\treturn Job(SDK.PrlSrv_NetServiceRestart(self.handle)[0])",
"def restart():\n stop()\n start()",
"def test_stop_process(self, mocked_check):\n from supvisors.rpcinterface import RPCInterface\n # get patches\n mocked_stop = self.supervisor.supvisors.stopper.stop_process\n mocked_progress = self.supervisor.supvisors.stopper.in_progress\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # patch the instance\n rpc._get_application_process = Mock()\n # test RPC call with running process\n rpc._get_application_process.return_value = (\n None, Mock(**{'stopped.return_value': True,\n 'namespec.return_value': 'proc1'}))\n with self.assertRaises(RPCError) as exc:\n rpc.stop_process('appli_1')\n self.assertEqual(Faults.NOT_RUNNING, exc.exception.code)\n self.assertEqual('NOT_RUNNING: proc1', exc.exception.text)\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual(0, mocked_stop.call_count)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n # test RPC call with running processes\n rpc._get_application_process.return_value = (\n Mock(**{'processes.values.return_value': [\n Mock(**{'stopped.return_value': False}),\n Mock(**{'stopped.return_value': True,\n 'namespec.return_value': 'proc2'})]}), None)\n with self.assertRaises(RPCError) as exc:\n rpc.stop_process('appli_1')\n self.assertEqual(Faults.NOT_RUNNING, exc.exception.code)\n self.assertEqual('NOT_RUNNING: proc2', exc.exception.text)\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual(0, mocked_stop.call_count)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n # test RPC call with stopped processes\n proc_1 = Mock(**{'running.return_value': True,\n 'stopped.return_value': False,\n 'namespec.return_value': 'proc1'})\n proc_2 = Mock(**{'running.return_value': False,\n 'stopped.return_value': False,\n 'namespec.return_value': 'proc2'})\n rpc._get_application_process.return_value = (\n Mock(**{'processes.values.return_value': [\n proc_1, proc_2]}), None)\n # test RPC call with no wait and not done\n mocked_stop.return_value = False\n result = rpc.stop_process('appli:*', False)\n self.assertTrue(result)\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call(proc_1), call(proc_2)], mocked_stop.call_args_list)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n mocked_stop.reset_mock()\n # test RPC call no wait and done\n mocked_stop.return_value = True\n result = rpc.stop_process('appli:*', False)\n self.assertTrue(result)\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call(proc_1), call(proc_2)],\n mocked_stop.call_args_list)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n mocked_stop.reset_mock()\n # test RPC call with wait and done\n result = rpc.stop_process('appli:*', wait=True)\n self.assertTrue(result)\n self.assertEqual([call(proc_1), call(proc_2)],\n mocked_stop.call_args_list)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n mocked_stop.reset_mock()\n # test RPC call with wait and not done\n mocked_stop.return_value = False\n deferred = rpc.stop_process('appli:*', wait=True)\n # result is a function for deferred result\n self.assertTrue(callable(deferred))\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call(proc_1), call(proc_2)],\n mocked_stop.call_args_list)\n self.assertEqual(0, mocked_progress.call_count)\n # test returned function: return True when job in progress\n mocked_progress.return_value = True\n self.assertEqual(NOT_DONE_YET, deferred())\n self.assertEqual([call()], mocked_progress.call_args_list)\n mocked_progress.reset_mock()\n # test returned function: raise exception if job not in progress anymore\n # and process still running\n mocked_progress.return_value = False\n with self.assertRaises(RPCError) as exc:\n deferred()\n self.assertEqual(Faults.ABNORMAL_TERMINATION, exc.exception.code)\n self.assertEqual('ABNORMAL_TERMINATION: proc1', exc.exception.text)\n self.assertEqual([call()], mocked_progress.call_args_list)\n mocked_progress.reset_mock()\n # test returned function: return True if job not in progress anymore\n # and process stopped\n proc_1.running.return_value = False\n self.assertTrue(deferred())\n self.assertEqual([call()], mocked_progress.call_args_list)",
"def restart():\n require('PROJECT_NAME')\n\n sudo('supervisorctl restart {0}'.format(env.PROJECT_NAME))"
]
| [
"0.74959534",
"0.66252",
"0.6596089",
"0.65471625",
"0.6435779",
"0.64335114",
"0.6431384",
"0.6397377",
"0.6390887",
"0.638588",
"0.63406944",
"0.63202333",
"0.63175344",
"0.63125736",
"0.62787807",
"0.62722135",
"0.62677217",
"0.6243573",
"0.6206397",
"0.62012064",
"0.6199434",
"0.61900556",
"0.61833805",
"0.6170403",
"0.6141506",
"0.6128153",
"0.610607",
"0.60990494",
"0.6056413",
"0.6031177"
]
| 0.69359463 | 1 |
Test the restart RPC. | def test_restart(self, mocked_check):
from supvisors.rpcinterface import RPCInterface
# create RPC instance
rpc = RPCInterface(self.supervisor)
# test RPC call
self.assertTrue(rpc.restart())
self.assertEqual([call()], mocked_check.call_args_list)
self.assertEqual([call()],
self.supervisor.supvisors.fsm.on_restart.call_args_list) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def restart(self):\n self.client.post(self.path+'/action', { 'restart': {} })\n return True",
"def test_workflows_restart(self):\n pass",
"def request_shutdown(self, restart=False):",
"def test_restart_with_permission(self):\n self.create_user_with_role(\n self.user.name, self.user.email, self.user.password, Role.tester)\n self.create_forktest(\"own-fork-commit\", TestPlatform.linux, regression_tests=[2])\n self.create_completed_regression_t_entries(3, [2])\n with self.app.test_client() as c:\n response = c.post(\n '/account/login', data=self.create_login_form_data(self.user.email, self.user.password))\n response = c.get('/test/restart_test/3')\n test = Test.query.filter(Test.id == 3).first()\n self.assertEqual(test.finished, False)",
"def restart(self) -> None:",
"def _restart(self):\n pass",
"def restart():\n stop()\n start()",
"def test_for_restart(self):\n #print(\"entering test_for_restart()\")\n self._compile_result(\"UserName: \" + self.user_name)\n #if self.first_song == True:\n # msg = \"Welcome_str\"\n # self.first_song = False\n if self._feedback_plat == \"RIVA\":\n self._RIVA_message_num += 1\n text_to_RIVA(\"Welcome_str\")\n else:# self._feedback_plat == \"Text\":\n self._RIVA_message_num += 1\n #to_no_voice_log(\"NewData:{};TTS:{}\".format(self._RIVA_message_num, msg))\n ###else:\n ### text_to_ispeech(ispeech_formatter(msg))\n while True:\n self._check_completion()\n if self._song_over is False:\n print(\"Song Started\")\n if self._feedback_plat == \"RIVA\":\n reset_RIVA_log()\n self._RIVA_message_num += 1\n #text_to_RIVA(self.response_welcome())\n elif self._feedback_plat == \"Text\":\n to_no_voice_log((emo_less_feedback(0, 0, 0)))\n self.execute_song()\n print(\"Re-entered Test_for_restart()\")\n if self._song_over is True:\n interface_info = gather_info(\n parse_csv(read_csv(CSV_functions.MUSICGLOVE)))\n #print(interface_info)\n #print(\"Song_over min/max = \", min_max)\n self.user_stats.set_grips(interface_info)\n self._compile_result(grip_avg_summary_str(interface_info))\n evaluated_info = [evaluate_worst_grip(interface_info, self._last_worst_grip),\n evaluate_best_grip(interface_info)]\n summary = summary_generator(evaluated_info[0], evaluated_info[1])\n if self._feedback_plat == \"RIVA\":\n self._RIVA_message_num += 1\n #print(\"message_num={} summary={}\".format(self._message_num, summary))\n text_to_RIVA(summary)\n else:# self._feedback_plat == \"Text\":\n to_no_voice_log(emo_less_feedback(self._RIVA_message_num, evaluated_info[0], evaluated_info[1]))\n ###else:\n ### text_to_ispeech(ispeech_formatter(summary))\n self._last_30_sec = []\n self._compile_result(summary)\n self._csv_result.extend(read_csv(CSV_functions.MUSICGLOVE))\n make_csv(self._csv_result, CSV_functions.M_GLOVE_SUMMARIES, what_song(self._grip_count))\n self.__init__(self.user_name, restart=True)\n self._RIVA_message_num = 1\n else:\n pass\n return",
"def repl_restart(restart: bool = True) -> None:",
"def restart(self):",
"def restart(name):\n ret = \"restart False\"\n if stop(name) and start(name):\n ret = \"restart True\"\n return ret",
"def rstrtmgr_RmRestart(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"dwSessionHandle\", \"dwRestartFlags\", \"fnStatus\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def node_restart(ctx):\n ctx.obj['node'].attempt_restart()",
"def test_restart_application(self, mocked_check, mocked_stop, mocked_start):\n from supvisors.rpcinterface import RPCInterface\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test RPC call with sub-RPC calls return a direct result\n mocked_stop.return_value = True\n mocked_start.return_value = False\n deferred = rpc.restart_application(0, 'appli', 'wait')\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call('appli', True)], mocked_stop.call_args_list)\n self.assertEqual(0, mocked_start.call_count)\n mocked_stop.reset_mock()\n mocked_check.reset_mock()\n # result is a function\n self.assertTrue(callable(deferred))\n self.assertTrue(deferred.waitstop)\n # test this function\n self.assertFalse(deferred())\n self.assertFalse(deferred.waitstop)\n self.assertEqual(0, mocked_stop.call_count)\n self.assertEqual([call(0, 'appli', 'wait')], mocked_start.call_args_list)\n mocked_start.reset_mock()\n # test RPC call with sub_RPC calls returning jobs\n # test with mocking functions telling that the jobs are not completed\n mocked_stop_job = Mock(return_value=False)\n mocked_start_job = Mock(return_value=False)\n mocked_stop.return_value = mocked_stop_job\n mocked_start.return_value = mocked_start_job\n deferred = rpc.restart_application(0, 'appli', 'wait')\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call('appli', True)], mocked_stop.call_args_list)\n self.assertEqual(0, mocked_start.call_count)\n mocked_stop.reset_mock()\n # result is a function for deferred result\n self.assertTrue(callable(deferred))\n self.assertTrue(deferred.waitstop)\n # first call to this function tells that job is still in progress\n self.assertEqual(0, mocked_stop_job.call_count)\n self.assertEqual(0, mocked_start_job.call_count)\n self.assertEqual(NOT_DONE_YET, deferred())\n self.assertEqual(0, mocked_stop.call_count)\n self.assertEqual(0, mocked_start.call_count)\n self.assertEqual([call()], mocked_stop_job.call_args_list)\n self.assertEqual(0, mocked_start_job.call_count)\n mocked_stop_job.reset_mock()\n # replace the stop job with a function telling that the job is completed\n mocked_stop_job.return_value = True\n self.assertEqual(NOT_DONE_YET, deferred())\n self.assertFalse(deferred.waitstop)\n self.assertEqual(0, mocked_stop.call_count)\n self.assertEqual([call(0, 'appli', 'wait')], mocked_start.call_args_list)\n self.assertEqual([call()], mocked_stop_job.call_args_list)\n self.assertEqual(0, mocked_start_job.call_count)\n mocked_stop_job.reset_mock()\n # call the deferred function again to check that the start is engaged\n self.assertFalse(deferred())\n self.assertEqual([call()], mocked_start_job.call_args_list)\n self.assertEqual(0, mocked_stop_job.call_count)",
"def vm_restart(self, params: dict) -> Tuple[\"Status\", dict]:",
"def restart():\n log.info('restart')\n samuraix.restarting = True\n samuraix.app.stop()",
"def test_install_restart():\n mock_out = {\n \"Success\": True,\n \"RestartNeeded\": 1,\n \"FeatureResult\": [\n {\n \"Id\": 338,\n \"Name\": \"XPS-Viewer\",\n \"DisplayName\": \"XPS Viewer\",\n \"Success\": True,\n \"RestartNeeded\": True,\n \"Message\": \"\",\n \"SkipReason\": 0,\n }\n ],\n \"ExitCode\": 0,\n }\n expected = {\n \"ExitCode\": 0,\n \"RestartNeeded\": True,\n \"Restarted\": True,\n \"Features\": {\n \"XPS-Viewer\": {\n \"DisplayName\": \"XPS Viewer\",\n \"Message\": \"\",\n \"RestartNeeded\": True,\n \"SkipReason\": 0,\n \"Success\": True,\n }\n },\n \"Success\": True,\n }\n\n mock_reboot = MagicMock(return_value=True)\n with patch(\"salt.utils.win_pwsh.run_dict\", return_value=mock_out), patch.dict(\n win_servermanager.__salt__, {\"system.reboot\": mock_reboot}\n ):\n result = win_servermanager.install(\"XPS-Viewer\", restart=True)\n mock_reboot.assert_called_once()\n assert result == expected",
"def IntrumentFailHook(self):\n #Restart iserver\n #If failed to restart\n #\treturn fail\n pass",
"async def module_command_restart(self, ctx, parsed):\n if parsed.invoker != ctx.owner:\n return\n reason = \" \".join(parsed.args[\"msg\"] or []) or \"Restarting\"\n self.quit(reason)\n self._restarting = True",
"async def do_force_restart(self):\n if self.config[\"allow_restart_requests\"]:\n os._exit(42)\n else:\n return self._rpc_failure(\"Restart disallowed by configuration\")",
"def reboot():\n query = {\n \"type\": \"op\",\n \"cmd\": \"<request><restart><system></system></restart></request>\",\n }\n\n return __proxy__[\"panos.call\"](query)",
"def test_restart(self):\n\n first_session_id = self._open_session()\n\n self.restart(\"iml-http-agent\")\n\n # If we try to continue our session, it will tell us to terminate\n response = self._get()\n self.assertResponseOk(response)\n forwarded_messages = response.json()[\"messages\"]\n self.assertEqual(len(forwarded_messages), 1)\n self.assertDictEqual(\n forwarded_messages[0],\n {\n \"fqdn\": self.CLIENT_NAME,\n \"type\": \"SESSION_TERMINATE_ALL\",\n \"plugin\": None,\n \"session_seq\": None,\n \"session_id\": None,\n \"body\": None,\n },\n )\n\n # And we can open a new session which will get a new ID\n second_session_id = self._open_session(expect_initial=False)\n self.assertNotEqual(first_session_id, second_session_id)",
"def reload_test(test_name):\n sudo(\"restart %s\" % test_name)",
"def restart(self):\n\t\treturn self.reset().start()",
"def test_restart_process(self, mocked_check, mocked_stop, mocked_start):\n from supvisors.rpcinterface import RPCInterface\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test RPC call with sub-RPC calls return a direct result\n mocked_stop.return_value = True\n mocked_start.return_value = False\n deferred = rpc.restart_process(0, 'appli:*', 'arg list', 'wait')\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call('appli:*', True)], mocked_stop.call_args_list)\n self.assertEqual(0, mocked_start.call_count)\n mocked_stop.reset_mock()\n mocked_check.reset_mock()\n # result is a function\n self.assertTrue(callable(deferred))\n self.assertTrue(deferred.waitstop)\n # test this function\n self.assertFalse(deferred())\n self.assertFalse(deferred.waitstop)\n self.assertEqual(0, mocked_stop.call_count)\n self.assertEqual([call(0, 'appli:*', 'arg list','wait')], mocked_start.call_args_list)\n mocked_start.reset_mock()\n # test RPC call with sub_RPC calls returning jobs\n # test with mocking functions telling that the jobs are not completed\n mocked_stop_job = Mock(return_value=False)\n mocked_start_job = Mock(return_value=False)\n mocked_stop.return_value = mocked_stop_job\n mocked_start.return_value = mocked_start_job\n deferred = rpc.restart_process(0, 'appli:*', '', 'wait')\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call('appli:*', True)], mocked_stop.call_args_list)\n self.assertEqual(0, mocked_start.call_count)\n mocked_stop.reset_mock()\n # result is a function for deferred result\n self.assertTrue(callable(deferred))\n self.assertTrue(deferred.waitstop)\n # test this function\n self.assertEqual(0, mocked_stop_job.call_count)\n self.assertEqual(0, mocked_start_job.call_count)\n self.assertEqual(NOT_DONE_YET, deferred())\n self.assertEqual(0, mocked_stop.call_count)\n self.assertEqual(0, mocked_start.call_count)\n self.assertEqual([call()], mocked_stop_job.call_args_list)\n self.assertEqual(0, mocked_start_job.call_count)\n mocked_stop_job.reset_mock()\n # replace the stop job with a function telling that the job is completed\n mocked_stop_job.return_value = True\n self.assertEqual(NOT_DONE_YET, deferred())\n self.assertFalse(deferred.waitstop)\n self.assertEqual(0, mocked_stop.call_count)\n self.assertEqual([call(0, 'appli:*', '', 'wait')], mocked_start.call_args_list)\n self.assertEqual([call()], mocked_stop_job.call_args_list)\n self.assertEqual(0, mocked_start_job.call_count)\n mocked_stop_job.reset_mock()\n # call the deferred function again to check that the start is engaged\n self.assertFalse(deferred())\n self.assertEqual([call()], mocked_start_job.call_args_list)\n self.assertEqual(0, mocked_stop_job.call_count)",
"def restart(self):\r\n pass",
"def restart(self):\n pass",
"def restart(config):\n shutdown(config)\n startup(config)\n return",
"def attempt_restart(self):\n self.controller.publish(self, 'restart')",
"def test_restart_service_should_return_active(self):\n instance_info.dbaas.instances.restart(instance_info.id)\n resp, body = instance_info.dbaas.client.last_response\n assert_equal(resp.status, 202)\n\n def result_is_active():\n instance = instance_info.dbaas.instances.get(\n instance_info.id)\n if instance.status in CONFIG.running_status:\n return True\n else:\n assert_equal(\"REBOOT\", instance.status)\n return False\n poll_until(result_is_active)"
]
| [
"0.7226226",
"0.7071774",
"0.7053413",
"0.70081186",
"0.7006562",
"0.698987",
"0.69765115",
"0.69687736",
"0.6965393",
"0.69375616",
"0.68979055",
"0.6863509",
"0.68230516",
"0.6762587",
"0.67550564",
"0.67488146",
"0.67346156",
"0.66920686",
"0.6683994",
"0.6678042",
"0.6675017",
"0.6668436",
"0.66528183",
"0.66411626",
"0.6630546",
"0.66252285",
"0.6617043",
"0.66000015",
"0.65768623",
"0.6567235"
]
| 0.8111426 | 0 |
Test the shutdown RPC. | def test_shutdown(self, mocked_check):
from supvisors.rpcinterface import RPCInterface
# create RPC instance
rpc = RPCInterface(self.supervisor)
# test RPC call
self.assertTrue(rpc.shutdown())
self.assertEqual([call()], mocked_check.call_args_list)
self.assertEqual([call()],
self.supervisor.supvisors.fsm.on_shutdown.call_args_list) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_shutdown(self):\n server, client = loopback()\n assert not server.shutdown()\n assert server.get_shutdown() == SENT_SHUTDOWN\n with pytest.raises(ZeroReturnError):\n client.recv(1024)\n assert client.get_shutdown() == RECEIVED_SHUTDOWN\n client.shutdown()\n assert client.get_shutdown() == (SENT_SHUTDOWN | RECEIVED_SHUTDOWN)\n with pytest.raises(ZeroReturnError):\n server.recv(1024)\n assert server.get_shutdown() == (SENT_SHUTDOWN | RECEIVED_SHUTDOWN)",
"def testServerShutdown(self):\n d = self.testSimpleRequest()\n d.addCallback(lambda _: self.factory.shutdown())\n d.addCallback(lambda _: self.listeningPort.stopListening())\n d.addCallback(lambda _: self.client.check_rate_limit())\n d.addTimeout(0.01, reactor)\n return self.assertFailure(d, ConnectionClosed)",
"def shutdown():\n\n cmd = dict()\n cmd[\"type_\"] = \"shutdown\"\n cmd[\"name_\"] = \"all\"\n\n ## In case of the shutdown there will be no returned message to\n ## check the success.\n s = comm.send_and_receive_socket(cmd)\n\n s.close()",
"def _shutdown(self):",
"def shutdown(self) -> None:",
"def shutdown(self) -> None:",
"def shutdown(self):\n ...",
"def test_shutdown_closed(self):\n server, client = loopback()\n server.sock_shutdown(2)\n with pytest.raises(SysCallError) as exc:\n server.shutdown()\n if platform == \"win32\":\n assert exc.value.args[0] == ESHUTDOWN\n else:\n assert exc.value.args[0] == EPIPE",
"def shutdown(self):\n\n raise NotImplementedError",
"def shutdown(self):",
"async def shutdown(self) -> int:",
"def initiate_shutdown(self) -> None:",
"def request_shutdown(self, restart=False):",
"def test_shutdown(self):\n server = self._server(None)\n server.bio_shutdown()\n with pytest.raises(Error) as err:\n server.recv(1024)\n # We don't want WantReadError or ZeroReturnError or anything - it's a\n # handshake failure.\n assert type(err.value) in [Error, SysCallError]",
"def rpc_shutdown(self):\n\t\tshutdown_thread = threading.Thread(target=self.server.shutdown)\n\t\tshutdown_thread.start()\n\t\treturn",
"def shutdown_server(self):\n try:\n ans = self.xmlproxy.shutdown()\n except socket_error as err:\n self.class_logger.info(\"xmlrpc shutdown complete. (DEBUG: {0})\".format(err))\n except XmlrpcProtocolError as err:\n self.class_logger.info(\"xmlrpc shutdown complete. (DEBUG: {0})\".format(err))\n except Exception as err:\n self.class_logger.info(\"xmlrpc shutdown expected error: {0} - {1}\".format(type(err), err))\n else:\n self.class_logger.info(\"xmlrpc shutdown query answer: %s\" % (ans, ))\n # except socket.error, err:\n # if err[0] == 111:\n # print \"!\"*100\n # print \"ERR '{0}' handled\".format(err)\n # else:\n # raise",
"def shutdown():\n shutdown_server()\n return \"Shutting down server\"",
"def shutdown():\n query = {\n \"type\": \"op\",\n \"cmd\": \"<request><shutdown><system></system></shutdown></request>\",\n }\n\n return __proxy__[\"panos.call\"](query)",
"async def shutdown(self):",
"def shutdown(self):\n pass",
"def shutdown(self):\n pass",
"def shutdown(self):\n pass",
"def server_shutdown():\n if not current_app.testing:\n abort(404)\n shutdown = request.environ.get('werkzeug.server.shutdown')\n if not shutdown:\n abort(500)\n shutdown()\n return 'Shutting down...'",
"def shutdown_callback():\n pass",
"def shutdown(self):\n self._shutdown_requested_event.set()\n SimpleJSONRPCServer.SimpleJSONRPCServer.shutdown(self)\n logging.info('Server shutdown complete')",
"def shutdown(self):\n\n pass",
"def shutdown(self):\n self.action('shutdown')",
"def test_shutdown(self):\n hook = SignalHook(self.test_extension, self.signal,\n self._on_signal_fired)\n hook.disable_hook()\n\n self.assertEqual(len(self._on_signal_fired.calls), 0)\n self.signal.send(self)\n self.assertEqual(len(self._on_signal_fired.calls), 0)",
"def Shutdown(self):\n pass",
"def Shutdown(self):\n pass"
]
| [
"0.783223",
"0.77352977",
"0.77224475",
"0.7679559",
"0.76429117",
"0.76429117",
"0.7637141",
"0.7588095",
"0.7582137",
"0.7575391",
"0.75700086",
"0.75486475",
"0.7517524",
"0.7471619",
"0.74646",
"0.7327999",
"0.73250216",
"0.73169494",
"0.7295251",
"0.72056586",
"0.72056586",
"0.72056586",
"0.72001547",
"0.71922207",
"0.71743387",
"0.7123922",
"0.711921",
"0.7107932",
"0.7103995",
"0.7103995"
]
| 0.80968934 | 0 |
Test the _check_state utility. | def test_check_state(self):
from supvisors.rpcinterface import RPCInterface
# prepare context
self.supervisor.supvisors.fsm.state = 1
# create RPC instance
rpc = RPCInterface(self.supervisor)
# test there is no exception when internal state is in list
rpc._check_state([0, 1, 2])
# test there is an exception when internal state is not in list
with self.assertRaises(RPCError) as exc:
rpc._check_state([0, 2])
self.assertEqual(Faults.BAD_SUPVISORS_STATE, exc.exception.code)
self.assertEqual("BAD_SUPVISORS_STATE: Supvisors (state=DEPLOYMENT) "
"not in state ['INITIALIZATION', 'OPERATION'] to perform request",
exc.exception.text) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_state(self):\n pass",
"def testCheck(self):\n change = ChangeState(self.config, \"changestate_t\")\n\n # Run through all good state transitions and assert that they work\n for state in self.transitions:\n for dest in self.transitions[state]:\n change.check(dest, state)\n dummystates = ['dummy1', 'dummy2', 'dummy3', 'dummy4']\n\n # Then run through some bad state transistions and assertRaises(AssertionError)\n for state in self.transitions:\n for dest in dummystates:\n self.assertRaises(AssertionError, change.check, dest, state)\n return",
"def check(s):\n s.checkState()",
"def test_verify_state_of_a_device():",
"def _verify(\n hass,\n expected_state,\n expected_percentage,\n expected_oscillating,\n expected_direction,\n expected_preset_mode,\n):\n state = hass.states.get(_TEST_FAN)\n attributes = state.attributes\n assert state.state == str(expected_state)\n assert attributes.get(ATTR_PERCENTAGE) == expected_percentage\n assert attributes.get(ATTR_OSCILLATING) == expected_oscillating\n assert attributes.get(ATTR_DIRECTION) == expected_direction\n assert attributes.get(ATTR_PRESET_MODE) == expected_preset_mode",
"def state_failsafe_validate(cfg, app, win, events):",
"def test_update_state(self):\n pass",
"def been_there(state, check_dict, check):\r\n \r\n key = str(state)\r\n if key in check_dict:\r\n return True\r\n else:\r\n if check:\r\n check_dict[key] = True\r\n return False",
"def test_update_state2(self):\n pass",
"def check_state(self):\n import params\n Logger.info(\"--- Check state of HAWQ cluster ---\")\n try:\n command = \"source {0} && hawq state -d {1}\".format(hawq_constants.hawq_greenplum_path_file, params.hawq_master_dir)\n Logger.info(\"Executing hawq status check...\")\n (retcode, out, err) = exec_ssh_cmd(self.active_master_host, command)\n if retcode:\n Logger.error(\"SERVICE CHECK FAILED: hawq state command returned non-zero result: {0}. Out: {1} Error: {2}\".format(retcode, out, err))\n raise Fail(\"Unexpected result of hawq state command.\")\n Logger.info(\"Output of command:\\n{0}\".format(str(out) + \"\\n\"))\n except:\n self.checks_failed += 1",
"def test_check_failed_highstate(self):\n self.assertEqual(self.checkredis.check_failed_highstate(\"aw1-php70-qa\", \"01\"), False)",
"def test_get_state(self):\n\n # test that you can get a state by numerical id\n mdp = MDP()\n mdp.add_state(0)\n self.assertEquals(type(mdp.get_state(0)), State)\n self.assertIn(mdp.get_state(0), mdp.get_state_list())",
"def check_device_state(self):",
"def test_state_after_failure(self):\n pass",
"def test_pep8_conformance_test_state(self):\n pep8s = pep8.StyleGuide(quiet=True)\n result = pep8s.check_files(['tests/test_models/test_state.py'])\n self.assertEqual(result.total_errors, 0,\n \"Found code style errors in state.py.\")",
"def check():",
"def _assert_state(self, state_dict):\n instances = db.instance_get_all(self.context)\n self.assertEqual(len(instances), 1)\n\n if 'vm_state' in state_dict:\n self.assertEqual(state_dict['vm_state'], instances[0]['vm_state'])\n if 'task_state' in state_dict:\n self.assertEqual(state_dict['task_state'],\n instances[0]['task_state'])\n if 'power_state' in state_dict:\n self.assertEqual(state_dict['power_state'],\n instances[0]['power_state'])",
"def check_state(self, username, descriptor, expected_score, expected_max_score, expected_attempts):\r\n module = self.get_student_module(username, descriptor)\r\n self.assertEqual(module.grade, expected_score)\r\n self.assertEqual(module.max_grade, expected_max_score)\r\n state = json.loads(module.state)\r\n attempts = state['attempts']\r\n self.assertEqual(attempts, expected_attempts)\r\n if attempts > 0:\r\n self.assertTrue('correct_map' in state)\r\n self.assertTrue('student_answers' in state)\r\n self.assertGreater(len(state['correct_map']), 0)\r\n self.assertGreater(len(state['student_answers']), 0)",
"def test_update_state1(self):\n pass",
"def state_wait_validate(cfg, app, win, events):",
"def _check_random_state(seed):\n return check_random_state(seed)",
"def verify_winning_state(self, state):\n return self.game.verify_winning_state(state)",
"def test_update_state3(self):\n pass",
"def test_block_bad_state(self):\n pass",
"def test_update_state4(self):\n pass",
"def test_get_node_state(self):\n pass",
"def test_check(self):\n return self._testCheck()",
"def test_create_state(self):\n state = State()\n self.assertTrue(isinstance(state, State))",
"def test_get_node_state_smartfail(self):\n pass",
"def testStateRequest(self):\n pkt = struct.pack('<')\n self.mgr.sendState = Mock()\n self.mgr.handlePacket(app_packet.GOPRO_REQUEST_STATE, pkt)\n self.mgr.sendState.assert_called_with()"
]
| [
"0.79675",
"0.71266323",
"0.70088804",
"0.6942538",
"0.6725998",
"0.6645437",
"0.6645181",
"0.65727943",
"0.6550575",
"0.6542334",
"0.6542207",
"0.6537113",
"0.64828724",
"0.6481464",
"0.6468",
"0.6467975",
"0.64562637",
"0.6441143",
"0.64320195",
"0.64166635",
"0.6395076",
"0.63837904",
"0.6374331",
"0.6303349",
"0.62937933",
"0.6270527",
"0.62213945",
"0.62104917",
"0.6206217",
"0.61988205"
]
| 0.73738015 | 1 |
Test the _check_operating_conciliation utility. | def test_check_operating_conciliation(self):
from supvisors.rpcinterface import RPCInterface
# create RPC instance
rpc = RPCInterface(self.supervisor)
# test the call to _check_state
with patch.object(rpc, '_check_state') as mocked_check:
rpc._check_operating_conciliation()
self.assertListEqual([call([2, 3])], mocked_check.call_args_list) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_check_operating(self):\n from supvisors.rpcinterface import RPCInterface\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test the call to _check_state\n with patch.object(rpc, '_check_state') as mocked_check:\n rpc._check_operating()\n self.assertListEqual([call([2])], mocked_check.call_args_list)",
"def test_check_conciliation(self):\n from supvisors.rpcinterface import RPCInterface\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test the call to _check_state\n with patch.object(rpc, '_check_state') as mocked_check:\n rpc._check_conciliation()\n self.assertListEqual([call([3])], mocked_check.call_args_list)",
"def testConsistency(self):\n #self.assertAlmostEqual(self.fxlinkedcashflow.amount(),0)",
"def test_calculate_contract_fee(a, b, expected):\n assert calculate_contract_fee(a, b) == expected",
"def test_check_cost():",
"def testConsistency(self):",
"def test_apply_endorsements(self):",
"def test_calculate_all_operations(self):\n result = self.calcuate.calcuate('11-2+4x3-5')\n expected_result = \"16\"\n self.assertEqual(expected_result, result)",
"def test_device_creation(self):\n try:\n test_adc = ads1115_differential(standard_params)\n except AssertionError:\n self.fail(\"The standard parameters threw an assertion error.\")\n\n try:\n test_adc = ads1115_differential(assume_defaults)\n except AssertionError:\n self.fail(\"Assuming defaults threw an error.\")\n\n try:\n test_adc = ads1115_differential(check_two_thirds)\n except AssertionError:\n self.fail(\"Using a gain of 2/3 threw an error.\")\n\n try:\n test_adc = ads1115_differential(unfinished_list)\n except AssertionError:\n # An assertion error is expected. Catch it and move on.\n pass\n\n try:\n test_adc = ads1115_differential(invalid_channel)\n except AssertionError:\n # An assertion error is expected. Catch it and move on.\n pass\n\n try:\n test_adc = ads1115_differential(invalid_gain)\n except AssertionError:\n # An assertion error is expected. Catch it and move on.\n pass\n\n try:\n test_adc = ads1115_differential(invalid_address)\n except AssertionError:\n # An assertion error is expected. Catch it and move on.\n pass\n\n try:\n test_adc = ads1115_differential(invalid_busnum)\n except AssertionError:\n # An assertion error is expected. Catch it and move on.\n pass",
"def test_change_provisioned_throughput_usual_case():",
"def test_calculate_working_days():\n assert (\n calculate_working_days(parse('2020-01-01'), parse('2020-03-31')) == 64\n )",
"def test_conditions(self):\n if not CalculatorUtils.clear_calc(self.device):\n Utils.start_home(self.serial)\n AppUtils.kill_app(self.serial, self.package)\n AppUtils.open_app(self.device, self.serial, self.app)\n Utils.wait_short()",
"def test_hotshot_check_date(self):\n date_first = check_date('2015-11-03 13:21:02.071381', '03.11.2015', '20.11.2015')\n date_second = check_date('2015-11-03 13:21:02.071381', '01.11.2015', '02.11.2015')\n\n self.assertTrue(date_first)\n self.assertFalse(date_second)",
"def test_pm_Completeness(self):\n pass",
"def test_callPrice(self):\n call_price1 = calculator.BlackScholes.call_price(**self.params_1)\n call_price2 = calculator.BlackScholes.call_price(**self.params_2)\n self.assertAlmostEqual(call_price1,10.45,delta=0.01)\n self.assertAlmostEqual(call_price2,7.965,delta=0.01)",
"def test_check_sun_above_horizon():\n pass",
"def test_conciliate(self, mocked_check):\n from supvisors.rpcinterface import RPCInterface\n # set context and patches\n self.supervisor.supvisors.fsm.state = 3\n self.supervisor.supvisors.context.conflicts.return_value = [1, 2, 4]\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n with patch('supvisors.rpcinterface.conciliate_conflicts') as mocked_conciliate:\n # test RPC call with wrong strategy\n with self.assertRaises(RPCError) as exc:\n self.assertTrue(rpc.conciliate('a strategy'))\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual(Faults.BAD_STRATEGY, exc.exception.code)\n self.assertEqual('BAD_STRATEGY: a strategy', exc.exception.text)\n mocked_check.reset_mock()\n # test RPC call with USER strategy\n self.assertFalse(rpc.conciliate(2))\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual(0, mocked_conciliate.call_count)\n mocked_check.reset_mock()\n # test RPC call with another strategy \n self.assertTrue(rpc.conciliate(1))\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call(self.supervisor.supvisors, 1, [1, 2, 4])],\n mocked_conciliate.call_args_list)",
"def test_out_of_date(self):\n self.assertTrue(update_available(0.0))",
"def test_e2e_general_mode(self):\n\n cli = \"--balance 1 offline --test\"\n deal = self._run_bot_offine(cli)\n\n self.assertEqual(float(deal.data_row[\"balance\"]) * float(deal.data_row[\"_config_share_balance_to_bid\"]),\n float(deal.data_row[\"start-qty\"]))\n\n self.assertEqual(0.8, float(deal.data_row[\"start-qty\"]))\n self.assertEqual(0.03883667000000002, float(deal.data_row[\"result-fact-diff\"]))",
"def test_operate_cyclic_storage(self, on):\n if on is True:\n override = {} # cyclic storage is True by default\n m = build_model(\n override, \"simple_supply_and_supply_plus,operate,investment_costs\"\n )\n assert m.run_config[\"cyclic_storage\"] is True\n elif on is False:\n override = {\"run.cyclic_storage\": False}\n m = build_model(\n override, \"simple_supply_and_supply_plus,operate,investment_costs\"\n )\n assert m.run_config[\"cyclic_storage\"] is False\n with pytest.warns(exceptions.ModelWarning) as warning:\n m.run(build_only=True)\n check_warn = check_error_or_warning(\n warning, \"Storage cannot be cyclic in operate run mode\"\n )\n if on is True:\n assert check_warn\n elif on is True:\n assert not check_warn\n assert (\n AttrDict.from_yaml_string(m._model_data.attrs[\"run_config\"]).cyclic_storage\n is False\n )",
"def test_01_base(self):\n # Create/validate PO\n order = self.create_and_validate_po()\n\n # Validate picking\n picking = order.picking_ids[0]\n picking.do_transfer()\n self.assertEqual(picking.state, 'done')",
"def test_validate_wc3(self):\r\n assert self.wc2_tree != 0",
"def test_live_migration_common_check_checking_cpuinfo_fail(self):\n\n dest = 'dummydest'\n instance_id = self._create_instance()\n i_ref = db.instance_get(self.context, instance_id)\n\n # compute service for destination\n s_ref = self._create_compute_service(host=i_ref['host'])\n # compute service for original host\n s_ref2 = self._create_compute_service(host=dest)\n\n # mocks\n driver = self.scheduler.driver\n self.mox.StubOutWithMock(driver, 'mounted_on_same_shared_storage')\n driver.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest)\n self.mox.StubOutWithMock(rpc, 'call', use_mock_anything=True)\n rpc.call(mox.IgnoreArg(), mox.IgnoreArg(),\n {\"method\": 'compare_cpu',\n \"args\": {'cpu_info': s_ref2['compute_node'][0]['cpu_info']}}).\\\n AndRaise(rpc.RemoteError(\"doesn't have compatibility to\", \"\", \"\"))\n\n self.mox.ReplayAll()\n try:\n self.scheduler.driver._live_migration_common_check(self.context,\n i_ref,\n dest,\n False)\n except rpc.RemoteError, e:\n c = (e.message.find(_(\"doesn't have compatibility to\")) >= 0)\n\n self.assertTrue(c)\n db.instance_destroy(self.context, instance_id)\n db.service_destroy(self.context, s_ref['id'])\n db.service_destroy(self.context, s_ref2['id'])",
"def test_is_payday_positive2(self):\n date_to_check = date_class(2019,11,1)\n is_payday = self.pay_cycle.is_payday(date_to_check)\n assert is_payday == True\n\n date_to_check = date_class(2019,11,29)\n is_payday = self.pay_cycle.is_payday(date_to_check)\n assert is_payday == True\n\n date_to_check = date_class(2019,12,13)\n is_payday = self.pay_cycle.is_payday(date_to_check)\n assert is_payday == True",
"def _PerformCompare(self, component):\n\n updater_commands = ['/usr/sbin/chromeos-firmwareupdate', '-V']\n content = subprocess.Popen(updater_commands,\n stdout=subprocess.PIPE).stdout.read()\n system_version = self._GetSystemVersion(component, content)\n whitelist_version = self._GetWhitelistVersion(component)\n self.assertEqual(system_version, whitelist_version, msg='%s does not match'\n ' what is in the whitelist.\\n\\tSystem: %s\\n\\tWhitelist: '\n '%s' % (component, system_version, whitelist_version))",
"def test_holidays():\n\n assert not datetime.datetime(2003, 12, 25) in TRADING_DATES\n assert not datetime.datetime(2003, 5, 26) in TRADING_DATES # memorial day",
"def test_is_payday_positive3(self):\n date_to_check = date_class(2020,1,10)\n is_payday = self.pay_cycle.is_payday(date_to_check)\n assert is_payday == True\n\n date_to_check = date_class(2020,1,24)\n is_payday = self.pay_cycle.is_payday(date_to_check)\n assert is_payday == True",
"def test_is_payday_positive4(self):\n date_to_check = date_class(2020,10,2)\n is_payday = self.pay_cycle.is_payday(date_to_check)\n assert is_payday == True\n\n date_to_check = date_class(2020,10,16)\n is_payday = self.pay_cycle.is_payday(date_to_check)\n assert is_payday == True\n\n date_to_check = date_class(2020,10,30)\n is_payday = self.pay_cycle.is_payday(date_to_check)\n assert is_payday == True",
"def tests_truth():\n circ_m = ccxtest(4)\n print(circ_m)\n circ_m = crootnxtest(4)\n print(circ_m)\n circ_m = oracletest(4)\n print(circ_m)\n circ_m = ccx_otest(4)\n print(circ_m)",
"def check_consistency(self):\n raise NotImplementedError()"
]
| [
"0.6519928",
"0.6440953",
"0.61492294",
"0.590797",
"0.5904521",
"0.5858469",
"0.56378543",
"0.55682796",
"0.5562345",
"0.5540245",
"0.5521831",
"0.5508402",
"0.5452533",
"0.5452016",
"0.54487675",
"0.5446748",
"0.54436237",
"0.54253554",
"0.53988814",
"0.5389293",
"0.5381409",
"0.53739536",
"0.5364806",
"0.5339244",
"0.53060687",
"0.530169",
"0.52980065",
"0.5290991",
"0.5285205",
"0.52721375"
]
| 0.7427168 | 0 |
Test the _check_operating utility. | def test_check_operating(self):
from supvisors.rpcinterface import RPCInterface
# create RPC instance
rpc = RPCInterface(self.supervisor)
# test the call to _check_state
with patch.object(rpc, '_check_state') as mocked_check:
rpc._check_operating()
self.assertListEqual([call([2])], mocked_check.call_args_list) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_guest_os(self):\n self.check_guest_os()",
"def test_guest_os(self):\n self.check_guest_os()",
"def test_guest_os(self):\n self.check_guest_os()",
"def test_guest_os(self):\n self.check_guest_os()",
"def test_guest_os(self):\n self.check_guest_os()",
"def test_guest_os(self):\n self.check_guest_os()",
"def test_guest_os(self):\n self.check_guest_os()",
"def test_guest_os(self):\n self.check_guest_os()",
"def test_guest_os(self):\n self.check_guest_os()",
"def test_guest_os(self):\n self.check_guest_os()",
"def test_guest_os(self):\n self.check_guest_os()",
"def test_guest_os(self):\n self.check_guest_os()",
"def test_guest_os(self):\n self.check_guest_os()",
"def test_guest_os(self):\n self.check_guest_os()",
"def test_guest_os(self):\n self.check_guest_os()",
"def test_guest_os(self):\n self.check_guest_os()",
"def test_check_system_python_api(self):\n\n errors, successes = check_system.check_system()\n self.assertTrue(len(errors) + len(successes) >= 4)",
"def test_check_module(self) -> None:\n check_module(\"os\")",
"def test_check_operating_conciliation(self):\n from supvisors.rpcinterface import RPCInterface\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test the call to _check_state\n with patch.object(rpc, '_check_state') as mocked_check:\n rpc._check_operating_conciliation()\n self.assertListEqual([call([2, 3])], mocked_check.call_args_list)",
"def test_os_system(self):\n self.assertEqual(self.settings.OS_SYSTEM, platform.system())",
"def pre_checks_os(operating_system, current_directory):\n if operating_system == \"linux\":\n if os.path.exists(os.path.join(current_directory, \"geckodriver\")) is False:\n shutil.move(\n os.path.join(current_directory, \"webdrivers\", \"geckodriver_linux64\"),\n os.path.join(current_directory, \"geckodriver\"),\n )\n time.sleep(5)\n # Same as above, just for a Windows System\n elif operating_system == \"win32\":\n if os.path.exists(os.path.join(current_directory, \"geckodriver.exe\")) is False:\n shutil.move(\n os.path.join(current_directory, \"webdrivers\", \"geckodriver_win64.exe\"),\n os.path.join(current_directory, \"geckodriver.exe\"),\n )\n time.sleep(5)\n # Checks whether CSV file already exists, otherwise creates one\n if os.path.exists(os.path.join(current_directory, \"prices.csv\")) is False:\n csv_file = open(\"prices.csv\", \"a\")\n csv_file.write(\"Time;Device;Low Price;High Price\\n\")\n csv_file.close()\n # Checks whether url txt file already exists, otherwise gives warning\n if os.path.exists(os.path.join(current_directory, \"url.txt\")) is False:\n url_file = open(\"url.txt\", \"w\")\n print(\"\\nWARNING: No URL provided for program, please edit file 'url.txt' first!\\n\")\n url_file.close()\n sys.exit()",
"def test_conditions(self):\n if not CalculatorUtils.clear_calc(self.device):\n Utils.start_home(self.serial)\n AppUtils.kill_app(self.serial, self.package)\n AppUtils.open_app(self.device, self.serial, self.app)\n Utils.wait_short()",
"def verify_support():\n ostype, majorrelease, _ = get_os_release_data()\n if ostype not in _supported_os:\n _logger.info('OS type %s is not supported.', ostype)\n return False\n if majorrelease not in _supported_release:\n _logger.info('OS %s %s is not supported', ostype, majorrelease)\n return False\n return True",
"def test_verify_state_of_a_device():",
"def check_os():\n\n if platform.system() != \"Darwin\":\n print \"This script only works on macos system\"\n exit(1)",
"def _check_requirements(self): # pylint: disable=too-many-branches, too-many-statements\n self._dev_emu = False\n self._is_root = False\n self._is_su = False\n self._alternate_su_binary = False\n\n if not self._device_serial:\n return\n\n if self._adb_available:\n # try some command\n date_res = self._do_adb_command('shell date')\n # adb not authorized\n if date_res and 'device unauthorized' in date_res:\n # kill adb daemon\n utils.do_shell_command('adb kill-server')\n utils.show_message_box(\n 'device not authorized! allow access from this computer on the device'\n )\n\n if date_res and 'no devices/emulators' in date_res:\n self._dev_emu = False\n return\n elif date_res and 'device not found' in date_res:\n self._dev_emu = False\n return\n else:\n self._dev_emu = True\n\n if self._dev_emu and date_res:\n try:\n # if date was fine it should end with year\n # Thu Feb 8 16:47:32 MST 2001\n date_res = date_res.split(' ')\n res_year = int(date_res[len(date_res) - 1])\n except ValueError:\n return # TODO: raise exceptions\n\n # try some su command to check for su binary\n res = self._do_adb_command('shell su -c date')\n if res and 'invalid' in res:\n res = self._do_adb_command('shell su 0 date')\n if res:\n self._alternate_su_binary = True\n\n if res:\n try:\n # if su date was fine it should end with year\n # Thu Feb 8 16:47:32 MST 2001\n su_res = res.split(' ')\n res_year = int(su_res[len(su_res) - 1])\n if res_year:\n # su cmd is available\n self._is_su = True\n\n # check if both date results matches otherwise its no valid result\n res_len = len(su_res)\n date_len = len(date_res)\n if su_res[res_len - 1] == date_res[date_len -\n 1]: # year\n if su_res[res_len - 2] == date_res[date_len -\n 2]: # timezone\n if su_res[res_len - 4] == date_res[date_len -\n 4]: # day\n if su_res[res_len - 5] == date_res[\n date_len - 5]: # month\n self._is_root = True\n\n except ValueError:\n pass\n\n res = self._do_adb_command('shell mount | grep system')\n if '/sbin/.magisk/block/system /' in res:\n self._syspart_name = '/sbin/.magisk/mirror/system'\n if '/system_root' in res:\n self._syspart_name = '/system_root'\n if '/sbin/.magisk/block/system_root /' in res:\n self._syspart_name = '/sbin/.magisk/mirror/system_root'\n\n # check status of selinux\n res = self._do_adb_command('shell getenforce')\n if res:\n res = res.join(res.split())\n if res != 'Permissive' and res != 'Disabled':\n self._do_adb_command('shell setenforce 0')\n\n # nox fix\n res = self.su_cmd('mount -o ro,remount ' + self._syspart_name)\n if res and 'invalid' in res:\n self._alternate_su_binary = True\n\n # no su -> try if the user is already root\n # on some emulators user is root\n if not self._is_su and self._dev_emu:\n res = self._do_adb_command('shell mount -o ro,remount ' + self._syspart_name)\n if res or res == '':\n if res and 'not user mountable' in res:\n # no root user\n self._is_root = False\n elif res == '':\n # cmd executed fine\n self._is_root = True\n else:\n # dont know some other output\n self._is_root = False\n # check for uid 0\n res = self._do_adb_command('shell id')\n # root should be 0\n # https://superuser.com/questions/626843/does-the-root-account-always-have-uid-gid-0/626845#626845\n self._is_root = 'uid=0' in res\n\n if self._dev_emu:\n # get some infos about the device and keep for later\n self._sdk_version = self._do_adb_command(\n 'shell getprop ro.build.version.sdk')\n if self._sdk_version:\n self._sdk_version = self._sdk_version.join(\n self._sdk_version.split()) # cleans '\\r\\n'\n self._android_version = self._do_adb_command(\n 'shell getprop ro.build.version.release')\n if self._android_version:\n self._android_version = self._android_version.join(\n self._android_version.split())\n\n try:\n self._oreo_plus = (int(\n self._android_version.split('.')[0]) >= 8)\n except ValueError:\n try:\n self._oreo_plus = (int(self._sdk_version) > 25)\n except ValueError:\n pass\n\n # fix some frida server problems\n # frida default port: 27042\n utils.do_shell_command('adb forward tcp:27042 tcp:27042')\n\n # check if we have pidof\n res = self._do_adb_command('shell pidof -s pidof')\n self._have_pidof = 'not found' not in res\n res = self._do_adb_command('shell killall')\n self._have_killall = 'not found' not in res\n\n # check for correct userid\n if self._is_root:\n res = self.su_cmd('id')\n # root should be 0\n # https://superuser.com/questions/626843/does-the-root-account-always-have-uid-gid-0/626845#626845\n self._is_root = 'uid=0' in res",
"def test_system_platform():\n accepted_values = ['windows', 'linux']\n output = sh.system_platform()\n assert output in accepted_values",
"def check_platform():\n if os.getcwd() != os.path.dirname(os.path.abspath(__file__)):\n error = \"must be ran in the directory it's located at\"\n if os.path.sep != '/':\n error = \"a unix-like operating system is required\"\n elif not shutil.which('dpkg-deb'):\n error = \"cannot find dpkg-deb\"\n elif os.getuid() != 0:\n error = \"must be ran as root (or with fakeroot)\"\n else:\n return\n sys.exit(\"{}: error: {}\".format(sys.argv[0], error))",
"def check():",
"def test_device_info_guess_os(properties, expected_os):\n assert DeviceInfo(properties).operating_system == expected_os"
]
| [
"0.68196166",
"0.68196166",
"0.68196166",
"0.68196166",
"0.68196166",
"0.68196166",
"0.68196166",
"0.68196166",
"0.68196166",
"0.68196166",
"0.68196166",
"0.68196166",
"0.68196166",
"0.68196166",
"0.68196166",
"0.68196166",
"0.67601526",
"0.6475166",
"0.643593",
"0.6278641",
"0.61664164",
"0.61343205",
"0.6079802",
"0.6052942",
"0.6024452",
"0.6004492",
"0.5957063",
"0.5943251",
"0.59384024",
"0.590067"
]
| 0.6844397 | 0 |
Test the _check_conciliation utility. | def test_check_conciliation(self):
from supvisors.rpcinterface import RPCInterface
# create RPC instance
rpc = RPCInterface(self.supervisor)
# test the call to _check_state
with patch.object(rpc, '_check_state') as mocked_check:
rpc._check_conciliation()
self.assertListEqual([call([3])], mocked_check.call_args_list) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def testConsistency(self):\n #self.assertAlmostEqual(self.fxlinkedcashflow.amount(),0)",
"def testConsistency(self):",
"def test_check_operating_conciliation(self):\n from supvisors.rpcinterface import RPCInterface\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test the call to _check_state\n with patch.object(rpc, '_check_state') as mocked_check:\n rpc._check_operating_conciliation()\n self.assertListEqual([call([2, 3])], mocked_check.call_args_list)",
"def test_calculate_contract_fee(a, b, expected):\n assert calculate_contract_fee(a, b) == expected",
"def test_c(self):\n v1 = versions.Version(version='1.2', name='foo')\n v2 = versions.Version(version='1.2.1', name='bar')\n\n self.assertFalse(v1 >= v2)\n self.assertTrue(v2 >= v1)",
"def test_c(self):\n v1 = versions.Version(version='1.2', name='foo')\n v2 = versions.Version(version='1.2.1', name='bar')\n\n self.assertTrue(v1 <= v2)\n self.assertFalse(v2 <= v1)",
"def test_compare(self):",
"def testCheck(self):\n change = ChangeState(self.config, \"changestate_t\")\n\n # Run through all good state transitions and assert that they work\n for state in self.transitions:\n for dest in self.transitions[state]:\n change.check(dest, state)\n dummystates = ['dummy1', 'dummy2', 'dummy3', 'dummy4']\n\n # Then run through some bad state transistions and assertRaises(AssertionError)\n for state in self.transitions:\n for dest in dummystates:\n self.assertRaises(AssertionError, change.check, dest, state)\n return",
"def check_consistency(self):\n raise NotImplementedError()",
"def test_c(self):\n v1 = versions.Version(version='1.2.3', name='foo')\n v2 = versions.Version(version='1.2.3', name='bar')\n\n self.assertFalse(v1 != v2)\n self.assertFalse(v2 != v1)",
"def test_validate_wc3(self):\r\n assert self.wc2_tree != 0",
"def test_c(self):\n v1 = versions.Version(version='1.2.0', name='foo')\n v2 = versions.Version(version='1.2', name='bar')\n\n self.assertFalse(v1 < v2)\n self.assertTrue(v2 < v1)",
"def test_check_cost():",
"def test_c(self):\n v1 = versions.Version(version='1.2.0', name='foo')\n v2 = versions.Version(version='1.2', name='bar')\n\n self.assertTrue(v1 > v2)\n self.assertFalse(v2 > v1)",
"def testEquality(self):\n pass",
"def test_verify_balance_behaviour(self, cred):\n # check the initial balance\n resp = requests.get(balance_url.format(cred[0], cred[1]))\n assert resp.status_code == 200\n assert resp.headers['Content-Type'] == 'application/json;charset=UTF-8'\n start_balance = resp.json()['value']\n # now init the verification process\n resp = requests.get(verify_url.format('json', cred[0], cred[1],\n 'TestApp', test_number))\n assert resp.status_code == 200\n assert resp.json()['status'] == '0'\n request_id = resp.json()['request_id']\n resp = requests.get(balance_url.format(cred[0], cred[1]))\n assert resp.status_code == 200\n assert start_balance == resp.json()['value']\n # terminate verification process\n assert 'Workflow terminated' in \\\n terminate_workflow(cred[0], cred[1], request_id).json()['error_text']\n resp = requests.get(balance_url.format(cred[0], cred[1]))\n assert resp.status_code == 200\n assert start_balance == resp.json()['value']",
"def check():",
"def generateBroConsistencyCheck(self):\n pass",
"def _check(self, expected, actual):\n\n assert expected == actual, 'Assert fail. expected={} but actual={}'.format(expected, actual)",
"def test_c(self):\n v1 = versions.Version(version='1.2.3', name='foo')\n v2 = versions.Version(version='2.2.3', name='bar')\n\n self.assertFalse(v1 == v2)\n self.assertFalse(v2 == v1)",
"def final_check(self, test_collection):\n assert True",
"def test_check_cds_10(self):\n self.cds1.translation_table = 1\n import_genome.check_cds(self.cds1, self.eval_flags)\n count = count_status(self.cds1, \"error\", \"warning\")\n self.assertEqual(count, 1)",
"def test_check(self):\n\n self.assertTrue(Naive().check(self.file_gitignore))\n self.assertTrue(Naive().check(self.file_tests))\n self.assertTrue(Naive().check(self.file_bin))\n self.assertTrue(Naive().check(self.file_py))\n self.assertTrue(Naive().check(self.file_authors))",
"def test_eq_true(self):\n self.assertTrue(self.instance == Commit(self.instance.sha))",
"def test_equality(self):\n self.assertEqual(self._version1, self._version1)\n self.assertNotEqual(self._version2, self._version1)\n self.assertEqual(self._version1, PrcsVersion(self._version1))",
"def test_erc20_wrapper__balance_of(\n accounts,\n erc20_wrapper, # pylint: disable=redefined-outer-name\n weth_instance, # pylint: disable=redefined-outer-name\n):\n acc1_original_weth_balance = erc20_wrapper.balance_of.call(accounts[0])\n acc2_original_weth_balance = erc20_wrapper.balance_of.call(accounts[1])\n\n expected_difference = 1 * 10 ** 18\n\n weth_instance.functions.deposit().transact(\n {\"from\": accounts[0], \"value\": expected_difference}\n )\n weth_instance.functions.deposit().transact(\n {\"from\": accounts[1], \"value\": expected_difference}\n )\n acc1_weth_balance = erc20_wrapper.balance_of.call(accounts[0])\n acc2_weth_balance = erc20_wrapper.balance_of.call(accounts[1])\n\n assert (\n acc1_weth_balance - acc1_original_weth_balance == expected_difference\n )\n assert (\n acc2_weth_balance - acc2_original_weth_balance == expected_difference\n )",
"def test_b(self):\n v1 = versions.Version(version='1.2.1', name='foo')\n v2 = versions.Version(version='1.2.1', name='bar')\n\n self.assertTrue(v1 >= v2)\n self.assertTrue(v2 >= v1)",
"def test_consolidate_differing(self):\n payloads = [\n {\"val\": 123, \"validity\": {\"from\": \"2010-01-01\", \"to\": \"2010-12-31\"}},\n {\n \"val\": 123,\n \"val2\": 456,\n \"validity\": {\"from\": \"2011-01-01\", \"to\": \"2011-12-31\"},\n },\n ]\n\n expected = payloads\n\n actual = los_org.OrgUnitImporter.consolidate_payloads(payloads)\n\n assert expected == actual",
"def check_all(c):",
"def check_validity(self):"
]
| [
"0.68510354",
"0.6846493",
"0.6562092",
"0.63179034",
"0.61420643",
"0.6091703",
"0.60424334",
"0.59893084",
"0.5966891",
"0.59391546",
"0.5938386",
"0.5933276",
"0.59315526",
"0.5929876",
"0.589915",
"0.58848256",
"0.58695066",
"0.581776",
"0.5805792",
"0.57975614",
"0.578763",
"0.5778389",
"0.57598424",
"0.57436746",
"0.5705738",
"0.5656132",
"0.5647213",
"0.5646758",
"0.56377155",
"0.5633174"
]
| 0.7145031 | 0 |
Test the _get_application utility. | def test_get_application(self):
from supvisors.rpcinterface import RPCInterface
# prepare context
self.supervisor.supvisors.context.applications = {
'appli_1': 'first application'}
# create RPC instance
rpc = RPCInterface(self.supervisor)
# test with known application
self.assertEqual('first application', rpc._get_application('appli_1'))
# test with unknown application
with self.assertRaises(RPCError) as exc:
rpc._get_application('app')
self.assertEqual(Faults.BAD_NAME, exc.exception.code)
self.assertEqual('BAD_NAME: application app unknown in Supvisors',
exc.exception.text) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_current_app():\n\n assert isinstance(application_services.get_current_app(), PyrinUnitTestApplication)",
"def test_duo_application_get(self):\n pass",
"def test_get_app(self):\n settings = SparkSettings()\n self.device.app_name = 'test_app'\n app = self.device.get_app()\n self.assertIsInstance(app, settings.APPS[self.device.app_name])",
"def test_app_exists(self):\n self.assertFalse(current_app is None)",
"def getApp(self):\n raise Exception('Not implemented.')",
"def application():\n yield create_test_application()",
"def test_application_is_singleton():\n\n app = PyrinUnitTestApplication()\n assert app == application_services.get_current_app()",
"def test_get_application_name():\n\n assert application_services.get_application_name() == 'tests.unit'",
"def test_get_wsgi_application(self):\n application = wsgi.get_wsgi_application()\n\n environ = RequestFactory()._base_environ(\n PATH_INFO=\"/taric_books/\",\n CONTENT_TYPE=\"text/html; charset=utf-8\",\n REQUEST_METHOD=\"GET\"\n )\n\n response_data = {}\n\n def start_response(status, headers):\n response_data[\"status\"] = status\n response_data[\"headers\"] = headers\n\n response = application(environ, start_response)\n\n self.assertEqual(response_data[\"status\"], \"200 OK\")",
"def test_06_applications_2(self):\r\n with self.flask_app.app_context():\r\n self.create()\r\n\r\n res = self.app.get('/app', follow_redirects=True)\r\n assert self.html_title(\"Applications\") in res.data, res.data\r\n assert \"Applications\" in res.data, res.data\r\n assert Fixtures.app_short_name in res.data, res.data",
"def setup_application(self):\n pass",
"def test_app_is_created(app):\n assert app.name == \"myapp.app\"",
"def check_sysapps():\n return sysapps.test",
"def test_valid_app(self):\n # Initialize the app using os.getcwd()\n os.chdir(self.test_app_dir)\n kbase_sdk.init_context.cache_clear()\n context = kbase_sdk.init_context()\n self.assertEqual(context, {\n 'paths': {\n 'root': self.test_app_dir,\n 'config': os.path.join(self.test_app_dir, 'kbase.yaml'),\n 'main_module': os.path.join(self.test_app_dir, 'src', 'main.py'),\n 'src_dir': os.path.join(self.test_app_dir, 'src'),\n 'test_dir': os.path.join(self.test_app_dir, 'test'),\n 'test_main_module': os.path.join(self.test_app_dir, 'test', 'test_main.py')\n },\n 'config': {\n 'module': {\n 'name': 'test_module',\n 'description': 'xyz',\n 'version': '0.0.1',\n 'authors': ['xyz']\n },\n 'narrative_methods': {\n 'my_method': {\n 'input': {\n 'x': {\n 'label': 'label',\n 'type': 'integer'\n },\n 'y': {\n 'label': 'label',\n 'type': 'string',\n 'optional': True\n }\n }\n }\n },\n },\n 'docker_image_name': 'kbase-apps/test_module',\n 'username': 'jayrbolton',\n 'token': 'xyz'\n })",
"def test_11_create_application(self, mock):\r\n # Create an app as an anonymous user\r\n with self.flask_app.app_context():\r\n res = self.new_application(method=\"GET\")\r\n assert self.html_title(\"Sign in\") in res.data, res\r\n assert \"Please sign in to access this page\" in res.data, res\r\n\r\n res = self.new_application()\r\n assert self.html_title(\"Sign in\") in res.data, res.data\r\n assert \"Please sign in to access this page.\" in res.data, res.data\r\n\r\n # Sign in and create an application\r\n res = self.register()\r\n\r\n res = self.new_application(method=\"GET\")\r\n assert self.html_title(\"Create an Application\") in res.data, res\r\n assert \"Create the application\" in res.data, res\r\n\r\n res = self.new_application(long_description='My Description')\r\n assert \"<strong>Sample App</strong>: Update the application\" in res.data\r\n assert \"Application created!\" in res.data, res\r\n\r\n app = db.session.query(App).first()\r\n assert app.name == 'Sample App', 'Different names %s' % app.name\r\n assert app.short_name == 'sampleapp', \\\r\n 'Different names %s' % app.short_name\r\n\r\n assert app.long_description == 'My Description', \\\r\n \"Long desc should be the same: %s\" % app.long_description",
"def test_app():\n pass",
"def test_10_get_application(self, Mock, mock2):\r\n # Sign in and create an application\r\n with self.flask_app.app_context():\r\n html_request = FakeRequest(json.dumps(self.pkg_json_not_found), 200,\r\n {'content-type': 'application/json'})\r\n Mock.return_value = html_request\r\n self.register()\r\n res = self.new_application()\r\n\r\n res = self.app.get('/app/sampleapp', follow_redirects=True)\r\n msg = \"Application: Sample App\"\r\n assert self.html_title(msg) in res.data, res\r\n err_msg = \"There should be a contribute button\"\r\n assert \"Start Contributing Now\" in res.data, err_msg\r\n\r\n res = self.app.get('/app/sampleapp/settings', follow_redirects=True)\r\n assert res.status == '200 OK', res.status\r\n self.signout()\r\n\r\n # Now as an anonymous user\r\n res = self.app.get('/app/sampleapp', follow_redirects=True)\r\n assert self.html_title(\"Application: Sample App\") in res.data, res\r\n assert \"Start Contributing Now\" in res.data, err_msg\r\n res = self.app.get('/app/sampleapp/settings', follow_redirects=True)\r\n assert res.status == '200 OK', res.status\r\n err_msg = \"Anonymous user should be redirected to sign in page\"\r\n assert \"Please sign in to access this page\" in res.data, err_msg\r\n\r\n # Now with a different user\r\n self.register(fullname=\"Perico Palotes\", name=\"perico\")\r\n res = self.app.get('/app/sampleapp', follow_redirects=True)\r\n assert self.html_title(\"Application: Sample App\") in res.data, res\r\n assert \"Start Contributing Now\" in res.data, err_msg\r\n res = self.app.get('/app/sampleapp/settings')\r\n assert res.status == '403 FORBIDDEN', res.status",
"def application(self):\n if not ApplicationFixture._test_app:\n app = self.APP_CLASS()\n app.run_tests()\n ApplicationFixture._test_app = app\n return ApplicationFixture._test_app",
"def test_app_is_testing(self):\n self.assertTrue(current_app.config['TESTING'])",
"def test_app_is_testing(self):\n self.assertTrue(current_app.config['TESTING'])",
"def test_duo_application_list(self):\n pass",
"def test_create_app():\n assert not create_app().testing\n assert create_app({'TESTING': True}).testing",
"def _get_app(flask_app):\n flask_app.test_client_class = TestClient\n return flask_app.test_client()",
"def test_get_application_process(self):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n self.supervisor.supvisors.context.applications = {\n 'appli_1': 'first application'}\n self.supervisor.supvisors.context.processes = {\n 'appli_1:proc_1': 'first process'}\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test with full namespec\n self.assertTupleEqual(('first application', 'first process'),\n rpc._get_application_process('appli_1:proc_1'))\n # test with applicative namespec\n self.assertTupleEqual(('first application', None),\n rpc._get_application_process('appli_1:*'))",
"def test_get_app_default_app(self):\n settings = SparkSettings()\n self.device.app_name = 'not an app'\n app = self.device.get_app()\n self.assertIsInstance(app, settings.DEFAULT_APP)",
"def test_app():\n # setup\n app = main.create_application()\n app.dependency_overrides[get_settings] = get_settings_override\n with TestClient(app) as test_client:\n yield test_client\n # teardown",
"def test_guest_applications(self):\n self.check_guest_applications()",
"def test_guest_applications(self):\n self.check_guest_applications()",
"def test_guest_applications(self):\n self.check_guest_applications()",
"def test_guest_applications(self):\n self.check_guest_applications()"
]
| [
"0.79300493",
"0.7923467",
"0.7482519",
"0.72564995",
"0.7204716",
"0.7126646",
"0.7114369",
"0.70957977",
"0.70542693",
"0.69555336",
"0.68862665",
"0.6877717",
"0.6860193",
"0.6796819",
"0.67926157",
"0.67229486",
"0.67151153",
"0.66720337",
"0.66657794",
"0.66657794",
"0.6582143",
"0.65663594",
"0.65563834",
"0.65546024",
"0.65521705",
"0.653407",
"0.6518404",
"0.6518404",
"0.6518404",
"0.6518404"
]
| 0.8022673 | 0 |
Test the _get_process utility. | def test_get_process(self):
from supvisors.rpcinterface import RPCInterface
# prepare context
self.supervisor.supvisors.context.processes = {
'proc_1': 'first process'}
# create RPC instance
rpc = RPCInterface(self.supervisor)
# test with known application
self.assertEqual('first process', rpc._get_process('proc_1'))
# test with unknown application
with self.assertRaises(RPCError) as exc:
rpc._get_process('proc')
self.assertEqual(Faults.BAD_NAME, exc.exception.code)
self.assertEqual('BAD_NAME: process proc unknown in Supvisors',
exc.exception.text) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_process_id():\n output = sh.process_id()\n assert isinstance(output, int) and output > 0",
"def available_process(**kwargs):\n return LazySubprocessTester([sys.executable, \"-c\", \"import sys; sys.exit(0)\"], **kwargs)",
"def testProcess(self):\n self.grr_hunt_osquery_collector.Process()\n # extract call kwargs\n call_kwargs = self.mock_grr_api.CreateHunt.call_args[1]\n self.assertEqual(call_kwargs['flow_args'].query,\n 'SELECT * FROM processes')\n self.assertEqual(call_kwargs['flow_args'].timeout_millis,\n 300000)\n self.assertEqual(call_kwargs['flow_args'].ignore_stderr_errors, False)\n self.assertEqual(call_kwargs['flow_name'], 'OsqueryFlow')\n self.assertEqual(call_kwargs['hunt_runner_args'].description,\n 'random reason')",
"def test_process_info(self, mocked_get, mocked_check):\n from supvisors.rpcinterface import RPCInterface\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test first RPC call with process namespec\n self.assertEqual([{'name': 'proc'}], rpc.get_process_info('appli:proc'))\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call('appli:proc')], mocked_get.call_args_list)\n # reset patches\n mocked_check.reset_mock()\n mocked_get.reset_mock()\n # test second RPC call with group namespec\n self.assertEqual([{'name': 'proc_1'}, {'name': 'proc_2'}],\n rpc.get_process_info('appli:*'))\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call('appli:*')], mocked_get.call_args_list)",
"def test_check_process_output(self):\n workflow = self.get_workflow(\n \"\"\"file://result <- file://source\n echo test\n \"\"\")\n workflow.pre_check_processes()\n try:\n process = workflow._processes[0]\n create_tuttle_dirs()\n workflow.run_process(process)\n assert False, \"Exception has not been not raised\"\n except ResourceError:\n assert True",
"def test_BLINK_LAUNCH_PROCESS(self):\n self.verify_references_to_prerequisites(processes.BLINK_LAUNCH_PROCESS)",
"def test_get_system(self):\n pass",
"def mock_managed_process(\n *unused_args: str, **unused_kwargs: str\n) -> ContextManager[scripts_test_utils.PopenStub]:\n return contextlib.nullcontext(\n enter_result=scripts_test_utils.PopenStub(alive=False))",
"def sanity_check_process(self):\n assert_equals(self.proc.returncode, None)\n time.sleep(1)",
"def test_popen(self):\n self.executor.command(['grep', 'foo']).popen()",
"def wait_process_running(process):\n assert process.is_running()",
"def test_addProcess(self):\r\n self.pm.addProcess(\"foo\", [\"arg1\", \"arg2\"],\r\n uid=1, gid=2, env={})\r\n self.assertEqual(self.pm.protocols, {})\r\n self.assertEqual(self.pm.processes,\r\n {\"foo\": ([\"arg1\", \"arg2\"], 1, 2, {})})\r\n self.pm.startService()\r\n self.reactor.advance(0)\r\n self.assertEqual(self.pm.protocols.keys(), [\"foo\"])",
"def test_startProcessUnknownKeyError(self):\r\n self.assertRaises(KeyError, self.pm.startProcess, \"foo\")",
"def test_basic(self):\n portpicker.PickUnusedPort().AndReturn(2345)\n # As the lock is mocked out, this provides a mox expectation.\n with self.proxy._process_lock:\n safe_subprocess.start_process_file(\n args=['/runtime'],\n input_string=self.runtime_config.SerializeToString(),\n env={'foo': 'bar',\n 'PORT': '2345'},\n cwd=self.tmpdir,\n stderr=subprocess.PIPE).AndReturn(self.process)\n self.proxy._stderr_tee = FakeTee('')\n\n self.mox.ReplayAll()\n self.proxy.start()\n self.assertEquals(2345, self.proxy._proxy._port)\n self.mox.VerifyAll()",
"def test_system_command(self):\n process = Popen(['ubus'],stdout=PIPE)\n stdout, _ = process.communicate()\n self.assertEqual(process.returncode,0)\n self.assertIn(\"This isn't the real ubus. It's a simulator\",stdout.__str__())",
"def test_removeProcess(self):\r\n self.pm.startService()\r\n self.pm.addProcess(\"foo\", [\"foo\"])\r\n self.assertEqual(len(self.pm.processes), 1)\r\n self.pm.removeProcess(\"foo\")\r\n self.assertEqual(len(self.pm.processes), 0)",
"def test_get_application_process(self):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n self.supervisor.supvisors.context.applications = {\n 'appli_1': 'first application'}\n self.supervisor.supvisors.context.processes = {\n 'appli_1:proc_1': 'first process'}\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test with full namespec\n self.assertTupleEqual(('first application', 'first process'),\n rpc._get_application_process('appli_1:proc_1'))\n # test with applicative namespec\n self.assertTupleEqual(('first application', None),\n rpc._get_application_process('appli_1:*'))",
"def test_startProcess(self):\r\n self.pm.addProcess(\"foo\", [\"foo\"])\r\n self.pm.startProcess(\"foo\")\r\n self.assertIsInstance(self.pm.protocols[\"foo\"], LoggingProtocol)\r\n self.assertIn(\"foo\", self.pm.timeStarted.keys())",
"def test_getStateIncludesProcesses(self):\r\n self.pm.addProcess(\"foo\", [\"arg1\", \"arg2\"],\r\n uid=1, gid=2, env={})\r\n self.assertEqual(self.pm.__getstate__()['processes'],\r\n {'foo': (['arg1', 'arg2'], 1, 2, {})})",
"def get_my_process():\n return get_process_object(os.getpid())",
"def test_get_address(self):\n with self.subprocess_getoutput_patch:\n ret = self.inst._get_address()\n self.assertEqual(ret, \"http://example\")",
"def test_run_process(self):\n workflow = self.get_workflow(\n \"\"\"file://result <- file://source\n echo result > result\n \"\"\")\n process = workflow._processes[0]\n create_tuttle_dirs()\n workflow.run_process(process)\n assert path.isfile(\"result\")",
"def get_process(proc_name):\n #LOG = log.getLogger(__name__)\n procList = []\n try:\n for pr in psutil.process_iter():\n for args in pr.cmdline():\n if proc_name in args:\n procList.append(pr.pid)\n return procList\n except BaseException as e:\n print(\"Error in fetching process: {}\".format(e))\n return None",
"def test_without_manager_defined():\n try:\n process = subprocess.check_output(['python', 'runserver.py'],\n env={},\n stderr=subprocess.STDOUT,\n shell=True)\n except subprocess.CalledProcessError as error:\n assert error.returncode != 0\n assert 'KeyError: None' in process.output\n assert 'JOB_MANAGER_IMPLEMENTATION' in process.output",
"def test_can_process(self):\n self.assertTrue(self.adapter.can_process(''))",
"def test_process_path(path):\n try:\n subprocess.call([path, \"--version\"])\n return True\n except:\n print(\"Cannot find executable on {}\".format(path))\n return False",
"def unavailable_process(**kwargs):\n return LazySubprocessTester([sys.executable, \"-c\", \"import sys; sys.exit(1)\"], **kwargs)",
"def test_basic(self):\n # start()\n # As the lock is mocked out, this provides a mox expectation.\n with self.proxy._process_lock:\n safe_subprocess.start_process_file(\n args=['/runtime'],\n input_string=self.runtime_config.SerializeToString(),\n env={'foo': 'bar'},\n cwd=self.tmpdir,\n stderr=subprocess.PIPE).AndReturn(self.process)\n self.process.poll().AndReturn(None)\n self.process.child_out.seek(0).AndReturn(None)\n self.process.child_out.read().AndReturn('1234\\n')\n self.process.child_out.close().AndReturn(None)\n self.process.child_out.name = '/tmp/c-out.ABC'\n os.remove('/tmp/c-out.ABC').AndReturn(None)\n self.proxy._stderr_tee = FakeTee('')\n\n self.mox.ReplayAll()\n self.proxy.start()\n self.assertEquals(1234, self.proxy._proxy._port)\n self.mox.VerifyAll()",
"def testProcess(self):\n knowledge_base_values = {'current_control_set': u'ControlSet001'}\n test_file_entry = self._GetTestFileEntryFromPath([u'SYSTEM'])\n key_path = u'\\\\ControlSet001\\\\Control\\\\Session Manager\\\\AppCompatCache'\n winreg_key = self._GetKeyFromFileEntry(test_file_entry, key_path)\n\n event_queue_consumer = self._ParseKeyWithPlugin(\n self._plugin, winreg_key,\n knowledge_base_values=knowledge_base_values,\n file_entry=test_file_entry, parser_chain=self._plugin.plugin_name)\n event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)\n\n self.assertEqual(len(event_objects), 330)\n\n event_object = event_objects[9]\n\n expected_timestamp = timelib.Timestamp.CopyFromString(\n '2012-04-04 01:46:37.932964')\n self.assertEqual(event_object.timestamp, expected_timestamp)\n\n self.assertEqual(event_object.pathspec, test_file_entry.path_spec)\n # This should just be the plugin name, as we're invoking it directly,\n # and not through the parser.\n self.assertEqual(event_object.parser, self._plugin.plugin_name)\n\n self.assertEqual(event_object.keyname, key_path)\n expected_msg = (\n u'[{0:s}] Cached entry: 10 Path: '\n u'\\\\??\\\\C:\\\\Windows\\\\PSEXESVC.EXE'.format(event_object.keyname))\n\n expected_msg_short = (\n u'Path: \\\\??\\\\C:\\\\Windows\\\\PSEXESVC.EXE')\n\n self._TestGetMessageStrings(event_object, expected_msg, expected_msg_short)",
"def test_pick_a_process_to_run(self):\n workflow = self.get_workflow(\n \"\"\"file://C <- file://B\n echo C > C\n echo B creates C\n\nfile://B <- file://A\n echo B > B\n echo A creates B\n \"\"\")\n p = workflow.pick_a_process_to_run()\n assert p.id.find(\"_5\") >= 0, p.id"
]
| [
"0.6831519",
"0.6654932",
"0.6633849",
"0.649761",
"0.6460552",
"0.636034",
"0.6298995",
"0.62798244",
"0.6242188",
"0.6223125",
"0.6124697",
"0.61064655",
"0.61037904",
"0.606647",
"0.6062281",
"0.6037169",
"0.603703",
"0.60273314",
"0.6011603",
"0.59744817",
"0.5972552",
"0.59435296",
"0.5937618",
"0.5934733",
"0.5932792",
"0.5929377",
"0.59185725",
"0.59064966",
"0.58983845",
"0.58859456"
]
| 0.7586149 | 0 |
Test the _get_application_process utility. | def test_get_application_process(self):
from supvisors.rpcinterface import RPCInterface
# prepare context
self.supervisor.supvisors.context.applications = {
'appli_1': 'first application'}
self.supervisor.supvisors.context.processes = {
'appli_1:proc_1': 'first process'}
# create RPC instance
rpc = RPCInterface(self.supervisor)
# test with full namespec
self.assertTupleEqual(('first application', 'first process'),
rpc._get_application_process('appli_1:proc_1'))
# test with applicative namespec
self.assertTupleEqual(('first application', None),
rpc._get_application_process('appli_1:*')) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_process(self):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n self.supervisor.supvisors.context.processes = {\n 'proc_1': 'first process'}\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test with known application\n self.assertEqual('first process', rpc._get_process('proc_1'))\n # test with unknown application\n with self.assertRaises(RPCError) as exc:\n rpc._get_process('proc')\n self.assertEqual(Faults.BAD_NAME, exc.exception.code)\n self.assertEqual('BAD_NAME: process proc unknown in Supvisors',\n exc.exception.text)",
"def test_get_current_app():\n\n assert isinstance(application_services.get_current_app(), PyrinUnitTestApplication)",
"def get_process(self):\n\n self.log.debug('Getting application process data')\n cmd_output = admin_tasks.get_process(self.app_name)\n if cmd_output:\n self.log.info('Application process is running')\n print(\"Command output: \\n\" + cmd_output)\n else:\n self.log.info('Application process is not running')",
"def test_get_application(self):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n self.supervisor.supvisors.context.applications = {\n 'appli_1': 'first application'}\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test with known application\n self.assertEqual('first application', rpc._get_application('appli_1'))\n # test with unknown application\n with self.assertRaises(RPCError) as exc:\n rpc._get_application('app')\n self.assertEqual(Faults.BAD_NAME, exc.exception.code)\n self.assertEqual('BAD_NAME: application app unknown in Supvisors',\n exc.exception.text)",
"def test_block_builtin_processes_from_api(self):\n cwl = {\n \"cwlVersion\": \"v1.0\",\n \"class\": \"CommandLineTool\",\n \"baseCommand\": [\"python3\"],\n \"inputs\": {\n \"stringInput\": \"string\"\n },\n \"requirements\": {\n CWL_REQUIREMENT_APP_DOCKER: {\n \"dockerPull\": \"python:3.7-alpine\"\n },\n },\n \"outputs\": [],\n }\n body = {\n \"processDescription\": {\n \"process\": {\n \"id\": self._testMethodName,\n \"title\": \"some title\",\n \"abstract\": \"this is a test\",\n \"type\": PROCESS_BUILTIN,\n },\n },\n \"deploymentProfileName\": \"http://www.opengis.net/profiles/eoc/wpsApplication\",\n \"executionUnit\": [{\"unit\": cwl}],\n }\n with contextlib.ExitStack() as stack_exec:\n for mock_exec in mocked_execute_process():\n stack_exec.enter_context(mock_exec)\n resp = mocked_sub_requests(self.app, \"post_json\", \"/processes\", data=body, timeout=5,\n headers=self.json_headers, only_local=True, expect_errors=True)\n # With Weaver<=4.1.x, the 'type' was explicitly checked to block it since Deploy payload was kept as is\n # This field was allowed to trickle all they way down to the instantiation of Process object\n # assert resp.status_code == 200\n\n # With Weaver>4.1.x, the deserialized result from Deploy payload is employed, which drops unknown 'type'\n # Ensure that deploy now succeeds, but the obtained Process is not 'builtin' (just a regular application)\n assert resp.status_code == 201\n assert PROCESS_BUILTIN not in resp.json[\"processSummary\"][\"keywords\"]\n process = self.process_store.fetch_by_id(self._testMethodName)\n assert process.type == PROCESS_APPLICATION",
"def test_addProcess(self):\r\n self.pm.addProcess(\"foo\", [\"arg1\", \"arg2\"],\r\n uid=1, gid=2, env={})\r\n self.assertEqual(self.pm.protocols, {})\r\n self.assertEqual(self.pm.processes,\r\n {\"foo\": ([\"arg1\", \"arg2\"], 1, 2, {})})\r\n self.pm.startService()\r\n self.reactor.advance(0)\r\n self.assertEqual(self.pm.protocols.keys(), [\"foo\"])",
"def test_process_id():\n output = sh.process_id()\n assert isinstance(output, int) and output > 0",
"def test_BLINK_LAUNCH_PROCESS(self):\n self.verify_references_to_prerequisites(processes.BLINK_LAUNCH_PROCESS)",
"def mock_managed_process(\n *unused_args: str, **unused_kwargs: str\n) -> ContextManager[scripts_test_utils.PopenStub]:\n return contextlib.nullcontext(\n enter_result=scripts_test_utils.PopenStub(alive=False))",
"def test_addProcessEnv(self):\r\n fakeEnv = {\"KEY\": \"value\"}\r\n self.pm.startService()\r\n self.pm.addProcess(\"foo\", [\"foo\"], uid=1, gid=2, env=fakeEnv)\r\n self.reactor.advance(0)\r\n self.assertEqual(\r\n self.reactor.spawnedProcesses[0]._environment, fakeEnv)",
"def available_process(**kwargs):\n return LazySubprocessTester([sys.executable, \"-c\", \"import sys; sys.exit(0)\"], **kwargs)",
"def test_startProcessAlreadyStarted(self):\r\n self.pm.addProcess(\"foo\", [\"foo\"])\r\n self.pm.startProcess(\"foo\")\r\n self.assertIdentical(None, self.pm.startProcess(\"foo\"))",
"def test_get_system(self):\n pass",
"def test_application_start():\n\n process = subprocess.Popen(['python', 'runserver.py'],\n stderr=subprocess.STDOUT,\n stdout=subprocess.PIPE)\n\n assert process.pid\n debug_logging = process.stdout.read(100)\n process.kill()\n assert 'Starting application' in debug_logging",
"def test_without_manager_defined():\n try:\n process = subprocess.check_output(['python', 'runserver.py'],\n env={},\n stderr=subprocess.STDOUT,\n shell=True)\n except subprocess.CalledProcessError as error:\n assert error.returncode != 0\n assert 'KeyError: None' in process.output\n assert 'JOB_MANAGER_IMPLEMENTATION' in process.output",
"def sanity_check_process(self):\n assert_equals(self.proc.returncode, None)\n time.sleep(1)",
"def test_getStateIncludesProcesses(self):\r\n self.pm.addProcess(\"foo\", [\"arg1\", \"arg2\"],\r\n uid=1, gid=2, env={})\r\n self.assertEqual(self.pm.__getstate__()['processes'],\r\n {'foo': (['arg1', 'arg2'], 1, 2, {})})",
"def test_duo_application_get(self):\n pass",
"def test_startProcessUnknownKeyError(self):\r\n self.assertRaises(KeyError, self.pm.startProcess, \"foo\")",
"def test_removeProcess(self):\r\n self.pm.startService()\r\n self.pm.addProcess(\"foo\", [\"foo\"])\r\n self.assertEqual(len(self.pm.processes), 1)\r\n self.pm.removeProcess(\"foo\")\r\n self.assertEqual(len(self.pm.processes), 0)",
"def wait_process_running(process):\n assert process.is_running()",
"def test_startProcess(self):\r\n self.pm.addProcess(\"foo\", [\"foo\"])\r\n self.pm.startProcess(\"foo\")\r\n self.assertIsInstance(self.pm.protocols[\"foo\"], LoggingProtocol)\r\n self.assertIn(\"foo\", self.pm.timeStarted.keys())",
"def get_my_process():\n return get_process_object(os.getpid())",
"def testProcess(self):\n self.grr_hunt_osquery_collector.Process()\n # extract call kwargs\n call_kwargs = self.mock_grr_api.CreateHunt.call_args[1]\n self.assertEqual(call_kwargs['flow_args'].query,\n 'SELECT * FROM processes')\n self.assertEqual(call_kwargs['flow_args'].timeout_millis,\n 300000)\n self.assertEqual(call_kwargs['flow_args'].ignore_stderr_errors, False)\n self.assertEqual(call_kwargs['flow_name'], 'OsqueryFlow')\n self.assertEqual(call_kwargs['hunt_runner_args'].description,\n 'random reason')",
"def test_check_process_output(self):\n workflow = self.get_workflow(\n \"\"\"file://result <- file://source\n echo test\n \"\"\")\n workflow.pre_check_processes()\n try:\n process = workflow._processes[0]\n create_tuttle_dirs()\n workflow.run_process(process)\n assert False, \"Exception has not been not raised\"\n except ResourceError:\n assert True",
"def test_get_application_name():\n\n assert application_services.get_application_name() == 'tests.unit'",
"def test_application_is_singleton():\n\n app = PyrinUnitTestApplication()\n assert app == application_services.get_current_app()",
"def run_app(self):\n # Update system arguments\n sys.argv[0] = sys.executable\n sys.argv[1] = '{}.py'.format(sys.argv[1])\n\n # Make sure to exit with the return value from the subprocess call\n self._app_process = subprocess.Popen(sys.argv)\n return self._app_process.wait() # returns exit code",
"def run(self, test_no):\n with subprocess.Popen(self._app, stderr=subprocess.PIPE,\n stdin=subprocess.PIPE, stdout=subprocess.PIPE) as proc:\n out, err = proc.communicate(\n input=self._input[test_no].encode(\"utf-8\"))\n \n try:\n return proc.returncode, out.decode(\"utf-8\") if out else None\n except UnicodeDecodeError as e:\n return proc.returncode, \"Serious execution badness:\" + format(e)",
"def application():\n yield create_test_application()"
]
| [
"0.7337078",
"0.6793422",
"0.6699983",
"0.6477558",
"0.62719095",
"0.6189609",
"0.6161677",
"0.6108943",
"0.61022514",
"0.6090383",
"0.6088105",
"0.6086325",
"0.60370046",
"0.6022044",
"0.6007411",
"0.59575355",
"0.5954603",
"0.59493035",
"0.59328574",
"0.59290457",
"0.5911457",
"0.5893581",
"0.58790636",
"0.58781874",
"0.5865822",
"0.58390194",
"0.5804239",
"0.5747853",
"0.573447",
"0.5683098"
]
| 0.75706846 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.