query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
sequencelengths
30
30
negative_scores
sequencelengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Parse special keywords in commits to determine further postcommit actions.
def determine_keywords(self): split = dict() split['email_cc'] = re.compile("^\s*CC[-_]?MAIL[:=]\s*(.*)") split['email_cc2'] = re.compile("^\s*C[Cc][:=]\s*(.*)") split['fixed_in'] = re.compile("^\s*FIXED[-_]?IN[:=]\s*(.*)") numeric = dict() numeric['bug_fixed'] = re.compile("^\s*(?:BUGS?|FEATURE)[:=]\s*(.+)") numeric['bug_cc'] = re.compile("^\s*CCBUGS?[:=]\s*(.+)") presence = dict() presence['email_gui'] = re.compile("^\s*GUI:") presence['silent'] = re.compile("(?:CVS|SVN|GIT|SCM).?SILENT") presence['notes'] = re.compile("(?:Notes added by 'git notes add'|Notes removed by 'git notes remove')") results = defaultdict(list) for line in self.commit.message.split("\n"): # If our line starts with Summary: (as it does when using Arcanist's default template) then strip this off # This allows for people to fill keywords in the Differential Summary and have this work smoothly for them line = re.sub("^Summary: (.+)", "\g<1>", line) # Start processing our keywords... for (name, regex) in split.iteritems(): match = re.match( regex, line ) if match: results[name] += [result.strip() for result in match.group(1).split(",")] for (name, regex) in numeric.iteritems(): match = re.match( regex, line ) if match: results[name] += re.findall("(\d{1,10})", match.group(1)) for (name, regex) in presence.iteritems(): if re.match( regex, line ): results[name] = True self.keywords = results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_commit_message(message):\n # ['closes', 'close', 'fix', ...]\n keywords = []\n [keywords.extend(val) for val in KEYWORDS.values()]\n # we need to sort to match longuest command possible\n keywords.sort(lambda x, y: cmp(len(y), len(x)))\n # 'closes|close|fix...'\n keywords_re = '|'.join(keywords)\n\n # [('refs', 'affinitic', '#1'), ('refs', 'affinitic', '#2')]\n refs = re.findall('(%s)[ ]*([a-z]+)[ ]*([# \\d]*)' % keywords_re,\n message,\n re.IGNORECASE)\n\n parseds = []\n for ref in refs:\n if len(ref) != 3:\n # XXX envoi de mail si 1 < ref < 3 ?\n continue\n\n command = _word_to_command(ref[0])\n trac = ref[1].lower()\n tickets = ref[2]\n\n tickets_split = re.findall('\\d+', tickets)\n for ticket in tickets_split:\n parsed = {}\n parsed[\"command\"] = command\n parsed[\"ticket\"] = ticket\n parsed[\"trac\"] = trac\n parseds.append(parsed)\n\n return parseds", "def _parse_commit_log(base_commit, tip_commit):\n\n class LogState(object):\n SEPARATOR_LINE = 0\n COMMIT_SHA1_LINE = 1\n MERGE_LINE = 2\n AUTHOR_LINE = 3\n COMMITTER_LINE = 4\n MIDDLE_SEPARATOR_LINE = 5\n TITLE_LINE = 6\n BLANK_LINE = 7\n BODY_LINES = 8\n\n commit_info = {}\n check_churn = True\n check_move = True\n\n git_log_cmd = shlex.split(\n 'git log --format=full --reverse {base_commit}..{tip_commit}'.format(\n base_commit=base_commit, tip_commit=tip_commit))\n git_log_output = subprocess.check_output(git_log_cmd)\n\n log_line_state = LogState.SEPARATOR_LINE\n commit_sha1 = None\n merge = None\n author = None\n committer = None\n title = None\n separator = None\n body = []\n git_log_output_lines = git_log_output.splitlines()\n for idx, line in enumerate(git_log_output_lines, 1):\n # commit line\n if (\n log_line_state == LogState.SEPARATOR_LINE and\n line.startswith('commit ')):\n commit_sha1 = line.split(' ')[1]\n log_line_state = LogState.COMMIT_SHA1_LINE\n continue\n\n # Merge: line\n if (\n log_line_state == LogState.COMMIT_SHA1_LINE and\n line.startswith('Merge: ')):\n merge = line.split(' ', 1)[1]\n log_line_state = LogState.MERGE_LINE\n continue\n\n # Author: line\n if (\n log_line_state in [\n LogState.COMMIT_SHA1_LINE, LogState.MERGE_LINE] and\n line.startswith('Author: ')):\n author = line.split(' ', 1)[1]\n log_line_state = LogState.AUTHOR_LINE\n continue\n\n # Commit: line\n if log_line_state == LogState.AUTHOR_LINE and line.startswith('Commit: '):\n committer = line.split(' ', 1)[1]\n log_line_state = LogState.COMMITTER_LINE\n continue\n\n # empty line after Commit: line\n if log_line_state == LogState.COMMITTER_LINE and line == '':\n log_line_state = LogState.MIDDLE_SEPARATOR_LINE\n continue\n\n # Title line of commit message\n if (\n log_line_state == LogState.MIDDLE_SEPARATOR_LINE and\n line.startswith(' ')):\n title = line.lstrip(' ')\n log_line_state = LogState.TITLE_LINE\n\n if idx < len(git_log_output_lines):\n continue\n\n commit_status = _validate_commit(\n commit_sha1, merge, author, committer, title, separator, body)\n\n if commit_sha1 not in commit_info.keys():\n commit_info[commit_sha1] = commit_status\n else:\n commit_info[commit_sha1].extend(commit_status)\n\n if check_churn:\n commit_churn_info, branch_churn_sha1s = _check_diff_add_delete(\n commit_sha1, tip_commit)\n\n for commit_churn_sha1 in commit_churn_info.keys():\n if commit_churn_sha1 not in commit_info.keys():\n commit_info[commit_churn_sha1] = commit_churn_info[\n commit_churn_sha1]\n else:\n commit_info[commit_churn_sha1].extend(\n commit_churn_info[commit_churn_sha1])\n\n check_churn = bool(branch_churn_sha1s)\n\n if check_move:\n commit_move_info, branch_move_sha1s = _check_diff_move(\n commit_sha1, tip_commit)\n\n for commit_move_sha1 in commit_move_info.keys():\n if commit_move_sha1 not in commit_info.keys():\n commit_info[commit_move_sha1] = commit_move_info[\n commit_move_sha1]\n else:\n commit_info[commit_move_sha1].extend(\n commit_move_info[commit_move_sha1])\n\n check_move = bool(branch_move_sha1s)\n break\n\n # Blank line between title and body (still contains 4 space prefix)\n if log_line_state == LogState.TITLE_LINE and line.startswith(' '):\n separator = line.lstrip(' ')\n log_line_state = LogState.BLANK_LINE\n\n if idx < len(git_log_output_lines):\n continue\n\n commit_status = _validate_commit(\n commit_sha1, merge, author, committer, title, separator, body)\n\n if commit_sha1 not in commit_info.keys():\n commit_info[commit_sha1] = commit_status\n else:\n commit_info[commit_sha1].extend(commit_status)\n\n if check_churn:\n commit_churn_info, branch_churn_sha1s = _check_diff_add_delete(\n commit_sha1, tip_commit)\n\n for commit_churn_sha1 in commit_churn_info.keys():\n if commit_churn_sha1 not in commit_info.keys():\n commit_info[commit_churn_sha1] = commit_churn_info[\n commit_churn_sha1]\n else:\n commit_info[commit_churn_sha1].extend(\n commit_churn_info[commit_churn_sha1])\n\n check_churn = bool(branch_churn_sha1s)\n\n if check_move:\n commit_move_info, branch_move_sha1s = _check_diff_move(\n commit_sha1, tip_commit)\n\n for commit_move_sha1 in commit_move_info.keys():\n if commit_move_sha1 not in commit_info.keys():\n commit_info[commit_move_sha1] = commit_move_info[\n commit_move_sha1]\n else:\n commit_info[commit_move_sha1].extend(\n commit_move_info[commit_move_sha1])\n\n check_move = bool(branch_move_sha1s)\n break\n\n # Body lines\n if (\n log_line_state in [LogState.BLANK_LINE, LogState.BODY_LINES] and\n line.startswith(' ')):\n body.append(line.lstrip(' '))\n log_line_state = LogState.BODY_LINES\n\n if idx < len(git_log_output_lines):\n continue\n\n commit_status = _validate_commit(\n commit_sha1, merge, author, committer, title, separator, body)\n\n if commit_sha1 not in commit_info.keys():\n commit_info[commit_sha1] = commit_status\n else:\n commit_info[commit_sha1].extend(commit_status)\n\n if check_churn:\n commit_churn_info, branch_churn_sha1s = _check_diff_add_delete(\n commit_sha1, tip_commit)\n\n for commit_churn_sha1 in commit_churn_info.keys():\n if commit_churn_sha1 not in commit_info.keys():\n commit_info[commit_churn_sha1] = commit_churn_info[\n commit_churn_sha1]\n else:\n commit_info[commit_churn_sha1].extend(\n commit_churn_info[commit_churn_sha1])\n\n check_churn = bool(branch_churn_sha1s)\n\n if check_move:\n commit_move_info, branch_move_sha1s = _check_diff_move(\n commit_sha1, tip_commit)\n\n for commit_move_sha1 in commit_move_info.keys():\n if commit_move_sha1 not in commit_info.keys():\n commit_info[commit_move_sha1] = commit_move_info[\n commit_move_sha1]\n else:\n commit_info[commit_move_sha1].extend(\n commit_move_info[commit_move_sha1])\n\n check_move = bool(branch_move_sha1s)\n break\n\n # End of commit message\n if (\n log_line_state in [\n LogState.TITLE_LINE, LogState.BLANK_LINE,\n LogState.BODY_LINES] and\n line == ''):\n\n commit_status = _validate_commit(\n commit_sha1, merge, author, committer, title, separator, body)\n\n if commit_sha1 not in commit_info.keys():\n commit_info[commit_sha1] = commit_status\n else:\n commit_info[commit_sha1].extend(commit_status)\n\n if check_churn:\n commit_churn_info, branch_churn_sha1s = _check_diff_add_delete(\n commit_sha1, tip_commit)\n\n for commit_churn_sha1 in commit_churn_info.keys():\n if commit_churn_sha1 not in commit_info.keys():\n commit_info[commit_churn_sha1] = commit_churn_info[\n commit_churn_sha1]\n else:\n commit_info[commit_churn_sha1].extend(\n commit_churn_info[commit_churn_sha1])\n\n check_churn = bool(branch_churn_sha1s)\n\n if check_move:\n commit_move_info, branch_move_sha1s = _check_diff_move(\n commit_sha1, tip_commit)\n\n for commit_move_sha1 in commit_move_info.keys():\n if commit_move_sha1 not in commit_info.keys():\n commit_info[commit_move_sha1] = commit_move_info[\n commit_move_sha1]\n else:\n commit_info[commit_move_sha1].extend(\n commit_move_info[commit_move_sha1])\n\n check_move = bool(branch_move_sha1s)\n\n log_line_state = LogState.SEPARATOR_LINE\n commit_sha1 = None\n merge = None\n author = None\n committer = None\n title = None\n separator = None\n body = []\n\n return commit_info", "def commits_parsing(query):\n logging.info(\"GET request commit parsing is working\")\n results = {}\n list_of_commits = []\n clear_list_message = []\n clear_list_committer = []\n json_commits = {}\n json_all = {}\n for single_query in query:\n list_of_commits += {single_query[:-6]}\n\n try:\n results = requests.get(single_query[:-6])\n except requests.ConnectionError as exception:\n return f'{exception}'\n\n json_all = results.json()[0]\n\n json_commits = json_all['commit']\n clear_list_message += {json_commits['message']}\n clear_list_committer += {json_commits['committer']['name']}\n\n return clear_list_message, clear_list_committer", "def test_unrecognized_actions_rejected(self):\n # Unexpected whitespace.\n with self.assertRaises(BisectLog.ParserBug):\n _ = BisectLog(\" git bisect skip c123\")\n with self.assertRaises(BisectLog.ParserBug):\n _ = BisectLog(\"git bisect skip c123\")\n with self.assertRaises(BisectLog.ParserBug):\n _ = BisectLog(\"git bisect skip c123\")\n with self.assertRaises(BisectLog.ParserBug):\n _ = BisectLog(\" # git bisect skip c123\")\n # Unrecognized action with commit.\n with self.assertRaises(BisectLog.ParserBug):\n _ = BisectLog(\"git bisect foo c123\")\n # Unrecognized action without commit.\n with self.assertRaises(BisectLog.ParserBug):\n _ = BisectLog(\"git bisect bar\")", "def parse(self, text):\n \n self.clear()\n lines = text.split(\"\\n\")\n self.logger.info(\"Parsing Git history\")\n \n for line in lines:\n if len(line) == 0:\n # Line is a spacer\n pass\n \n elif line[0] == ' ':\n # Line is part of a commit message\n pass\n \n else:\n # Line is part of a commit header\n spaceIdx = line.find(' ')\n if spaceIdx == -1:\n self.logger.warn(\"Skipping unrecognizable history line: \" + line)\n continue\n \n keyword = line[:spaceIdx]\n content = line[spaceIdx+1:]\n self.logger.debug(\"Found key-value pair: {0} {1}\".format(keyword, content))\n \n self._handleKeyValue(keyword, content)\n \n # Grab the last commit\n self._commits[self._currentCommit.hashKey] = self._currentCommit\n self._currentCommit = None\n \n # Finalize the commit tree\n self._resolveCommits()", "def commit_names(self, commit):\n return []", "def process_event(self):\n if self.event['text'][0] == \"!\":\n self.parse_bang_command()\n\n elif self.event['text'][-2:] in self.valid_suffixes:\n self.parse_suffix_command()", "def test_blog_manual_commit():", "def _post_argument_parsing(self):\n pass", "def _validate_commit(\n commit_sha1, merge, author, committer, title, separator, body):\n errors = []\n\n # List of words a commit title can start with\n commit_title_start_words = filter(\n lambda x: x, COMMIT_TITLE_START_WORDS.splitlines())\n\n author_errors = _validate_email(author, 'Author')\n committer_errors = _validate_email(committer, 'Committer')\n\n if author_errors:\n errors.extend(author_errors)\n if committer_errors:\n errors.extend(committer_errors)\n\n title_words = title.split(' ', 1)\n\n # Check if in imperative tense\n if re.search(r'(ed|ing|s)$', title_words[0]):\n errors.append((\n 'title-imperative-tense-check',\n 'Commit title is not in imperative tense'))\n\n # Check if first word is capitalized\n if re.match(r'^[^A-Z]', title_words[0]):\n errors.append((\n 'title-capitalization-check',\n 'Commit title is not capitalized'))\n\n # Check if title begins with known start word\n if title_words[0] not in commit_title_start_words:\n errors.append((\n 'title-verb-check',\n 'Commit title does not begin with a verb'))\n\n # Check if this is a fixup! commit\n if re.match(r'^fixup!', title_words[0]):\n errors.append((\n 'title-fixup-check',\n 'Commit title starts with fixup! '))\n\n # Check if this is a squash! commit\n if re.match(r'^squash!', title_words[0]):\n errors.append((\n 'title-squash-check',\n 'Commit title starts with squash! '))\n\n # Check if the commit title ends in whitespace or punctuation\n if len(title_words) > 1 and re.search(r'[\\s\\W]$', title_words[1]):\n errors.append((\n 'title-whitespace-punctuation-check',\n 'Commit title ends in whitespace or punctuation'))\n\n # Check if the title is greater than 50 characters in length\n if len(title) > 50:\n errors.append((\n 'title-length-check',\n 'Commit title longer than 50 characters'))\n\n # Check if separator line (between title and body) is empty\n if separator is not None and separator != '':\n errors.append((\n 'message-separator-check',\n 'Missing blank line between title and body'))\n\n # Check if the commit message has a body\n if body == []:\n errors.append((\n 'body-check',\n 'Missing commit message body'))\n\n # Check if any line in the body is greater than 72 characters in legnth\n for body_line in body:\n if len(body_line) <= 72:\n continue\n errors.append((\n 'body-length-check',\n 'Commit message body line > 72 characters'))\n break\n\n # Check if commit is a merge commit\n if merge is not None:\n errors.append((\n 'commit-merge-check',\n 'Commit is a merge commit'))\n\n # Check commit diff for whitespace errors\n git_diff_cmd = shlex.split(\n 'git show --check {commit_sha1}'.format(\n commit_sha1=commit_sha1))\n\n has_whitespace_issue = None\n f, _ = tempfile.mkstemp()\n has_whitespace_issue = subprocess.call(git_diff_cmd,\n stdout=f, stderr=f, close_fds=True)\n os.close(f)\n\n if has_whitespace_issue:\n errors.append((\n 'diff-whitespace-check',\n 'Commit diff has whitespace issues'))\n\n return errors", "def postparsing_postcmd(self, stop):\n return stop", "def preprocess_post(self, post):\n # tokenize, clean, & tag part-of-speech for all words\n if self.document_level == 'postwise':\n\n doc_text = all_comments_from_post(post)\n # leave early if there's nothing there\n if doc_text == '':\n return []\n\n tokens = nltk.word_tokenize(doc_text)\n # TODO: skip this if there's no POS filtering args!\n tagged = nltk.pos_tag(tokens)\n\n # filter out most invalid words with valid_word()\n processed_document = []\n for word, pos_tag in tagged:\n if self.valid_word(word, pos_tag):\n cleaned_word = self.clean_word(word)\n # things like digits and other junk become empty string,\n # so exclude them from final document\n if cleaned_word:\n processed_document.append(cleaned_word)\n # finally, update the post\n post['postwise'] = {'tokens': processed_document, 'text': doc_text}\n self.postman.posts_write.update_one({'_id':post['_id']}, {'$set':post}, upsert=True)\n else:\n raise NotImplementedError('document_level: \"%s\"' % self.document_level)\n\n return processed_document", "def test_git_commits(self):\n event_id = dog.Event.create(title=\"Testing git commits\", text=\"\"\"$$$\n eac54655 * Merge pull request #2 from DataDog/alq-add-arg-validation ([email protected])\n |\\\n 760735ef | * origin/alq-add-arg-validation Simple typecheck between metric and metrics ([email protected])\n |/\n f7a5a23d * missed version number in docs ([email protected])\n $$$\"\"\", event_type=\"commit\", source_type_name=\"git\", event_object=\"0xdeadbeef\")['event']['id']\n event = self.get_event_with_retry(event_id)\n self.assertEqual(event['event']['title'], \"Testing git commits\")", "def handle_commits_published(extension=None, **kwargs):\n review_request = kwargs.get('review_request')\n\n if review_request is None:\n return\n\n commit_data = fetch_commit_data(review_request)\n\n if (not is_pushed(review_request, commit_data) or\n not is_parent(review_request, commit_data)):\n return\n\n # Check the change description and only continue if it contains a change\n # to the commit information. Currently change descriptions won't include\n # information about our extra data field, so we'll look for a change to\n # the diff which is mandatory if the commits changed. TODO: Properly use\n # the commit information once we start populating the change description\n # with it.\n #\n # A change description will not exist if this is the first publish of the\n # review request. In that case we know there must be commits since this\n # is a pushed request.\n cd = kwargs.get('changedesc')\n if (cd is not None and ('diff' not in cd.fields_changed or\n 'added' not in cd.fields_changed['diff'])):\n return\n\n # We publish both the review repository url as well as the landing\n # (\"inbound\") repository url. This gives consumers which perform hg\n # operations the option to avoid cloning the review repository, which may\n # be large.\n repo = review_request.repository\n repo_url = repo.path\n landing_repo_url = repo.extra_data.get('landing_repository_url')\n\n child_rrids = []\n commits = []\n ext_commits = json.loads(commit_data.extra_data.get(COMMITS_KEY, '[]'))\n\n for rev, rrid in ext_commits:\n child_rrids.append(int(rrid))\n commits.append({\n 'rev': rev,\n 'review_request_id': int(rrid),\n 'diffset_revision': None\n })\n\n # In order to retrieve the diff revision for each commit we need to fetch\n # their correpsonding child review request.\n review_requests = dict(\n (obj.id, obj) for obj in\n ReviewRequest.objects.filter(pk__in=child_rrids))\n\n for commit_info in commits:\n # TODO: Every call to get_latest_diffset() makes its own query to the\n # database. It is probably possible to retrieve the diffsets we care\n # about using a single query through Django's ORM, but it's not trivial.\n commit_info['diffset_revision'] = review_requests[\n commit_info['review_request_id']\n ].get_latest_diffset().revision\n\n msg = base.GenericMessage()\n msg.routing_parts.append('mozreview.commits.published')\n msg.data['parent_review_request_id'] = review_request.id\n msg.data['parent_diffset_revision'] = review_request.get_latest_diffset().revision\n msg.data['commits'] = commits\n msg.data['repository_url'] = repo_url\n msg.data['landing_repository_url'] = landing_repo_url\n\n # TODO: Make work with RB localsites.\n msg.data['review_board_url'] = get_server_url()\n\n publish_message(extension, msg)", "def calc_lang_features(commits, author):\n\tlang_features = ['/\\*\\*', '\\\\\"\\\\\"\\\\\"', '///', # documentation\n\t\t\t'^\\s*@', 'def.+:.+->', 'using\\s+System\\.ComponentModel\\.DataAnnotations', # assertion\n\t\t\t'assert', 'TODO', 'lambda']\n\n\t# delete contents\n\topen('lang_features.csv', 'w').close()\n\t\n\tfor count, commit in enumerate(commits):\n\t\t# status update\n\t\tif (count + 1) % 5 == 0:\n\t\t\tprint commit, '.. ..', count + 1, ' / ', len(commits)\n\n\n\t\t\t# for each blob modified\n\t\tquery = (\"for x in $(echo \" + commit + \" | ssh da4 ~/lookup/cmputeDiff2.perl); do \" +\n\t\t\t\t# get the chold and parent blob\n\t\t\t\t\"diff_blobs=$(echo $x | awk -v RS=';' 1 | sed -n '3,4 p');\" +\n\t\t\t\t# if a parent blob does not exist, the author authored all of the content of the file\n\t\t\t\t\"if [ $(echo $diff_blobs|wc -w) -eq 1 ]; then \" +\n\t\t\t\t\t\"echo $diff_blobs | ~/lookup/showCnt blob 2> /dev/null; \" +\n\t\t\t\t# if a parent blob exists, find the diff, in order to search only the modified lines\n\t\t\t\t\"elif [ $(echo $diff_blobs|wc -w) -eq 2 ]; then \" +\n\t\t\t\t\t\"vars=( $diff_blobs );\" +\n\t\t\t\t\t# using bash instead of sh in order to use the process substitution,\n\t\t\t\t\t# to get the modified lines\n\t\t\t\t\t\"/bin/bash -c \\\"diff <(echo ${vars[0]} | ~/lookup/showCnt blob)\" +\n\t\t\t\t\t\t\t\t\" <(echo ${vars[1]} | ~/lookup/showCnt blob)\\\";\" +\n\t\t\t\t\"fi;\" +\n\t\t\t# grep the above practices and discard the lines that were deleted from the parent blob\n\t\t\t# (they start with \">\" in diff)\n\t\t\t\"done | egrep \\\"\" + \"|\".join(lang_features) + \"\\\" | grep -v '^>' | wc -l \")\n\t\tcount_uses = int(bash(query).strip())\n\t\tif count_uses > 0: # good practice feature is used\n\t\t\tout = bash('echo ' + commit + ' | ~/lookup/getValues c2P')\n\t\t\tmain_proj = out.strip().split(';')[1]\n\t\t\ttime = search(commit, 'commit')[2]\n\n\t\t\tf = open(\"lang_features.csv\", \"a\")\n\t\t\tprint 'lang_f'\n\t\t\tf.write(author + ', ' + 'LANG_F' + ', ' + str(time) + ', ' + main_proj + ', ' + str(count_uses) + '\\n')\n\t\t\tf.close()\n\t\t\tprint 'wrote: -->', commit", "def postparsing_precmd(self, statement):\n stop = False\n return stop, statement", "def postparsing_precmd(self, statement):\n stop = False\n return stop, statement", "def parse(self, tokens, pred_tags):\n entities = []\n entity = None\n tag = ''\n for idx, st in enumerate(pred_tags):\n if entity is None:\n if st.startswith('B'):\n entity = {}\n entity['start'] = idx\n tag = st[2:]\n else:\n continue\n else:\n if st == 'O':\n entity['end'] = idx\n name = ''.join(tokens[entity['start']: entity['end']])\n entities.append((name, tag))\n entity = None\n tag = ''\n elif st.startswith('B'):\n entity['end'] = idx\n name = ''.join(tokens[entity['start']: entity['end']])\n entities.append((name, tag))\n entity = {}\n entity['start'] = idx\n tag = st[2:]\n else:\n continue\n return entities", "def parse(self, parser, tokens):\n self.parser = parser\n self.bits = tokens.split_contents()\n self.tagname = self.bits.pop(0)\n self.kwargs = {}\n self.blocks = {}\n self.arguments = self.options.get_arguments()\n self.current_argument = None\n self.todo = list(self.bits)\n for bit in self.bits:\n self.handle_bit(bit)\n self.finish()\n self.parse_blocks()\n return self.kwargs, self.blocks", "def on_commit_comment(self, payload):\n pass", "def _postprocess(self, tags: List[str], words: List[str], pos: List[str]):\n result = list()\n\n i = 0\n for tag in tags:\n if (\"<\" not in tag) and (\">\" not in tag):\n if pos:\n result.append(f\"{words[i]}/{pos[i]}\")\n else:\n result.append(words[i])\n i += 1\n else:\n result.append(tag)\n\n return \" \".join(result)", "def process(self):\n\n form = cgi.FieldStorage()\n commit = self.read_commit(form)\n\n print(\"Content-Type: text/plain; charset='utf-8'\\r\")\n print(\"Cache-Control: max-age=60\\r\")\n if form.getfirst(\"download\", \"false\") == \"true\":\n print(\"Content-Disposition: attachment; filename=\\\"patch.txt\\\"\\r\")\n\n print(\"\\r\")\n\n print((\"#\" + json.dumps(PostsaiCommitViewer.format_commit_header(commit), default=convert_to_builtin_type)))\n sys.stdout.flush()\n PostsaiCommitViewer.dump_commit_diff(commit)", "def _parse_tags(self):\n tokens = self.tags_str[1:].split(\";\")\n self._tags = {\n k.strip(): v\n for token in tokens\n for k, v in [token.split(\"=\")]\n }", "def _validate_commits(pull_request):\n commits = github.get_commits(pull_request[\"commits_url\"])\n analyzed = []\n\n for commit_wrapper in commits:\n commit = {\n \"sha\": commit_wrapper[\"sha\"],\n \"message\": commit_wrapper[\"commit\"][\"message\"],\n }\n\n commit[\"standard\"] = _validate_title(commit[\"message\"])\n analyzed.append(commit)\n\n result = all(commit[\"standard\"] for commit in analyzed)\n return analyzed, result", "def parse_bang_command(self):\n valid_commands = {\n 'help': help.HelpPlugin,\n 'karma': karma.KarmaPlugin,\n 'karma_newest': karma.KarmaNewestPlugin,\n 'karma_top': karma.KarmaTopPlugin,\n 'karma_bottom': karma.KarmaBottomPlugin,\n 'roll': roll.RollPlugin,\n 'quest': quest.QuestPlugin,\n 'log': highlights.HighlightPlugin,\n 'attr': attribute.AttrPlugin,\n }\n\n evt_string = self.event['text']\n cmd_string = evt_string[1:]\n\n try:\n command, arg_string = cmd_string.split(' ', 1)\n except ValueError:\n command, arg_string = cmd_string, \"\"\n\n if command in self.valid_commands.keys():\n plugin = self.valid_commands[command](\n self.event,\n arg_string,\n )\n plugin.run()\n\n else:\n message = \"Sorry, '!{}' is not a valid command.\".format(command)\n self.bot.make_post(self.event, message)", "def parseCommit() -> str:\n cmd_tag = f\"git --no-pager diff --diff-filter=ACMR --name-only HEAD~1 HEAD\"\n print(f\"COMMAND: {cmd_tag}\")\n print(\"\", flush=True)\n fileList = subprocess.check_output(cmd_tag, shell=True)\n return fileList.decode('utf-8').splitlines()", "def _clean_commit(self, line):\n cleaned_line = {\n 'repo': line['origin'],\n 'hash': line['data_commit'],\n 'author': line['data_Author'],\n 'category': \"commit\",\n 'created_date': utils.str_to_dt_data(line['data_AuthorDate']),\n 'commit': line['data_Commit'],\n 'commit_date': utils.str_to_dt_data(line['data_CommitDate']),\n 'files_no': len(line['data_files']),\n 'refs': line['data_refs'],\n 'parents': line['data_parents'],\n 'files': line['data_files']\n }\n\n actions = 0\n for file in line['data_files']:\n if 'action' in file:\n actions += 1\n cleaned_line['files_action'] = actions\n\n try:\n non_merge = math.isnan(line['data_Merge'])\n\n except (TypeError, KeyError):\n non_merge = False\n\n cleaned_line['merge'] = not non_merge\n return cleaned_line", "def _get_postproc_token(self):\n if self.config[\"postprocessing\"] == \"gatk_post_bam\":\n do_realignment = self.config[\"gatk_post_bam\"][\"do_realignment\"]\n do_recalibration = self.config[\"gatk_post_bam\"][\"do_recalibration\"]\n else:\n do_realignment, do_recalibration = False, False\n realigned_infix = self.config[\"gatk_post_bam\"][\"realigned_infix\"]\n recalibrated_infix = self.config[\"gatk_post_bam\"][\"recalibrated_infix\"]\n return {\n (False, False): \"\",\n (False, True): \".\" + recalibrated_infix,\n (True, False): \".\" + realigned_infix,\n (True, True): \".\" + realigned_infix + \".\" + recalibrated_infix,\n }[(do_realignment, do_recalibration)]", "def parse_cmd(cmd):\n begin_pat = re.compile(r'BEGIN\\s*\\{(.+?)\\}\\s*;?', re.X | re.S)\n end_pat = re.compile(r'END\\s*\\{(.+?)\\}\\s*;?', re.X | re.S)\n normal_pat = re.compile(r'([^{]*)(\\{(.+?)\\})?\\s*;?', re.X | re.S)\n\n # get BEGIN part\n begin = u''\n m = begin_pat.search(cmd)\n if m:\n begin = m.group(1).strip()\n cmd = cmd.replace(m.group(0), u'')\n\n # get END part\n end = u''\n m = end_pat.search(cmd)\n if m:\n end = m.group(1).strip()\n cmd = cmd.replace(m.group(0), u'')\n\n # get NORMAL part\n normal = (u'', u'')\n m = normal_pat.search(cmd)\n if m:\n pattern = m.group(1) or u'' # get u'' if \\1 is None\n action = m.group(3) or u'' # get u'' if \\3 is None\n normal = (pattern.strip(), action.strip())\n\n return (begin, normal, end)", "def check_commit_problems(self, commit, diff):\n\n # Initialise\n self._license_problem = False\n self._commit_problem = False\n self._commit_notes = defaultdict(list)\n\n # Unsafe regex checks...\n unsafe_matches = list()\n unsafe_matches.append( r\"\\b(KRun::runCommand|K3?ShellProcess|setUseShell|setShellCommand)\\b\\s*[\\(\\r\\n]\" )\n unsafe_matches.append( r\"\\b(system|popen|mktemp|mkstemp|tmpnam|gets|syslog|strptime)\\b\\s*[\\(\\r\\n]\" )\n unsafe_matches.append( r\"(scanf)\\b\\s*[\\(\\r\\n]\" )\n valid_filename_regex = r\"\\.(cpp|cc|cxx|C|c\\+\\+|c|l|y||h|H|hh|hxx|hpp|h\\+\\+|qml)$\"\n\n # Retrieve the diff and do the problem checks...\n filename = unicode(\"\")\n filediff = list()\n for line in diff:\n file_change = re.match( \"^diff --(cc |git a\\/.+ b\\/)(.+)$\", line )\n if file_change:\n # Are we changing file? If so, we have the full diff, so do a license check....\n if filename != \"\" and commit.files_changed[ filename ][\"change\"] in ['A'] and re.search(valid_filename_regex, filename):\n self.check_commit_license(filename, ''.join(filediff))\n\n filediff = list()\n filename = file_change.group(2)\n continue\n\n # Diff headers are bogus\n if re.match(\"@@ -\\d+,\\d+ \\+\\d+ @@\", line):\n filediff = list()\n continue\n\n # Do an incremental check for *.desktop syntax errors....\n if re.search(\"\\.desktop$\", filename) and re.search(\"[^=]+=.*[ \\t]$\", line) and line.startswith(\"+\") and not re.match(\"^\\+#\", line):\n self._commit_notes[filename].append( \"[TRAILING SPACE] **\" )\n self._commit_problem = True\n\n # Check for things which are unsafe...\n for safety_match in unsafe_matches:\n match = re.match(safety_match, line)\n if match:\n note = \"[POSSIBLY UNSAFE: {0}] **\".format( match.group(1) )\n self._commit_notes[filename].append(note)\n self._commit_problem = True\n\n # Store the diff....\n filediff.append(line)\n\n if filename != \"\" and commit.files_changed[ filename ][\"change\"] in ['A'] and re.search(valid_filename_regex, filename):\n self.check_commit_license(filename, ''.join(filediff))" ]
[ "0.599186", "0.520402", "0.5114426", "0.5038884", "0.5008906", "0.49850717", "0.49810693", "0.48139718", "0.47817907", "0.46998245", "0.4676912", "0.46327248", "0.46078002", "0.46047723", "0.4595017", "0.45620742", "0.45620742", "0.45582268", "0.45472842", "0.45239753", "0.45174563", "0.44828975", "0.44813994", "0.44729525", "0.44670293", "0.44607785", "0.44483322", "0.44404364", "0.44357005", "0.44285548" ]
0.56862926
1
Send the commmit notification to CIA. The message is created incrementally using lxml's "E" builder.
def notify(self, builder): # Build the <files> section for the template... commit = builder.commit files = E.files() commit_msg = commit.message.strip() commit_msg = re.sub(r'[\x00-\x09\x0B-\x1f\x7f-\xff]', '', commit_msg) for filename in commit.files_changed: safe_filename = re.sub(r'[\x00-\x09\x0B-\x1f\x7f-\xff]', '', filename) file_element = E.file(safe_filename) files.append(file_element) # Build the message cia_message = self.MESSAGE() cia_message.append(self._generator) source = self.SOURCE(E.project("KDE")) source.append(E.module(self.repository.path)) source.append(E.branch(self.repository.ref_name)) cia_message.append(source) cia_message.append(self.TIMESTAMP(commit.date)) body = self.BODY() commit_data = self.COMMIT() commit_data.append(E.author(commit.author_name)) commit_data.append(E.revision(commit.description)) commit_data.append(files) commit_data.append(E.log(commit_msg)) commit_data.append(E.url(commit.url)) body.append(commit_data) cia_message.append(body) # Convert to a string commit_xml = etree.tostring(cia_message) # Craft the email.... message = MIMEText( commit_xml, 'xml', 'utf-8' ) message['Subject'] = "DeliverXML" message['From'] = "[email protected]" message['To'] = "[email protected]" # Send email... self.smtp.sendmail("[email protected]", ["[email protected]"], message.as_string())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def notify(guid, message):", "def notify(self, id, command, data = None):\n print \"sending:\", id, command, data\n if command == Code.START: data = [id]\n try:\n msg = Message(command = command, data = data)\n self.contacts[id].send(msg.encode())\n except:\n print \"msg failed\"", "def write(self, notification):", "def sendmessage(self):\n \n self.message.parentItem = self.rxtxcontroller.transmittable.rootItem\n self.message.can_id = self.idInput.toPlainText()\n self.message.dlc = self.lengthInput.value()\n self.message.cycle_time = self.cycleInput.toPlainText()\n self.message.time = int(round(time.time() * 1000))\n self.message.rxtx = \"TX\"\n self.message.count = 1\n self.message.data = self.dataInput.toPlainText()\n self.accept()", "def send_notification(self):\n s1 = System()\n b1 = Books(\"1984\", \"George Orwell\", \"Harvill Secker\", \"1949\", \"0123456789123\")\n m1 = Members(\"Richard\", \"Blackmore\", \"14-04-1945\", \"Weston\")\n s1.send_notification(\"Please return book\")\n self.assertEqual(m1.get_notifications(), None)\n s1.add_resource(b1)\n s1.lending_process(b1, m1)\n s1.send_notification(\"Please return book\")\n self.assertEqual(m1.get_notifications(), \"-Please return boo- \")", "def action_invoice_dian_resend(self):\n self.ensure_one()\n template = self.env.ref('l10n_co_e-invoice.email_template_edi_invoice_dian', False)\n compose_form = self.env.ref('mail.email_compose_message_wizard_form', False)\n ctx = dict(\n default_model='account.invoice',\n default_res_id=self.id,\n default_use_template=bool(template),\n default_template_id=template and template.id or False,\n default_composition_mode='comment',\n mark_invoice_as_sent=True,\n )\n return {\n 'name': _('Compose Email'),\n 'type': 'ir.actions.act_window',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'mail.compose.message',\n 'views': [(compose_form.id, 'form')],\n 'view_id': compose_form.id,\n 'target': 'new',\n 'context': ctx,\n }", "def send_notification(self, signal):\n self.cl.sendInitPresence() \n self.cl.send(xmpp.protocol.Message(self.recipient, str(signal), typ='chat'))", "def _send_notification() -> None:\n send_notification(\n self,\n \"slack:@aaron\",\n \"New {0} Version: {1}\".format(\n self.properties[CONF_APP_NAME], new_version\n ),\n title=\"New Software 💿\",\n )", "def test_send_notification(self):\n management.call_command('send_first_report_notification', [], {})\n eq_(len(mail.outbox), 4)", "def sendNotifyToAgent(self, data):\n self.parent.sendNotifyToAgent(adapterId=self.getAdapterId(), agentName=self.cfg['agent-name'], agentData=data)", "def sendBack( self , message ) :\n self._connection.populateXmlToClient(message)", "def perform(self):\n emails.notify(\n event=self.event_type,\n user=self.user,\n node=self.node,\n timestamp=self.timestamp,\n message=self.html_message,\n profile_image_url=self.profile_image_url,\n url=self.url\n )", "def test_notification_cp_email(self):\n # publish the item\n api.content.transition(obj=self.event, transition='publish')\n mailhost = api.portal.get_tool('MailHost')\n self.assertEqual(len(mailhost.messages), 2)\n msg = message_from_string(mailhost.messages[1])\n\n self.assertEqual(msg['To'], CP_LIST_ADDRESS)\n self.assertEqual(\n msg['From'], 'EESTEC International <[email protected]>')\n self.assertEqual(\n msg['Subject'],\n '=?utf-8?q?=5BCP=5D_=5BEVENTS=5D_T=C3=A9st_event?=',\n )\n self.assertIn('a new Event has been published', msg.get_payload())\n self.assertIn('http://nohost/plone/lc/test-event', msg.get_payload())", "async def send_cemi(self, cemi: CEMIFrame) -> None:\n # send L_DATA_IND to network, create L_DATA_CON locally for routing\n cemi.code = CEMIMessageCode.L_DATA_IND\n routing_indication = RoutingIndication(raw_cemi=cemi.to_knx())\n\n async with self._flow_control.throttle():\n self._send_knxipframe(KNXIPFrame.init_from_body(routing_indication))\n\n cemi.code = CEMIMessageCode.L_DATA_CON\n self.cemi_received_callback(cemi.to_knx())", "def send(self, event, message):\n pass", "def _create_msg(self, tr_id, payload, confirm, expire_time, encoding):\n tmp = [\"<SSAP_message><transaction_type>INSERT</transaction_type>\",\n \"<message_type>REQUEST</message_type>\"]\n tmp.extend([\"<transaction_id>\", str(tr_id), \"</transaction_id>\"])\n tmp.extend([\"<node_id>\", str(self.node_id), \"</node_id>\"])\n tmp.extend([\"<space_id>\", str(self.targetSS), \"</space_id>\"])\n tmp.extend(['<parameter name=\"insert_graph\" encoding=\"%s\">' % encoding.upper(),\n str(payload), \"</parameter>\"])\n tmp.extend(['<parameter name = \"confirm\">',\n str(confirm).upper(),\n \"</parameter>\",\n \"</SSAP_message>\"])\n return \"\".join(tmp)", "def send_counterparty(self) -> None:\n object_ = self.objects[0]\n ticket_text = ''\n if 'сб' in object_.counterparty_name.lower() and self.keyword == 'closing':\n # order_id = sberinkas.main(\n # object_.object_SAP_code,\n # object_.object_address,\n # object_.lat,\n # object_.lon\n # )\n # ticket_text = f\"<br>Номер заявки на портале инкассация - {order_id}.\"\n pass\n\n body = '<p>Добрый день!<br><br>' \\\n f'Прошу принять в работу письмо на {self.letter_text}<br>' \\\n f'Скан подписанного письма вышлю позднее.{ticket_text}'\n if 'сб' in object_.counterparty_name.lower():\n self.send_sber_manager_service(body)\n else:\n self.sendmail(\n self.outlook,\n self.to,\n \"\",\n self.letter_name,\n body,\n self.attachment,\n 2\n )", "def notify(cls, self, message):\n pass", "def send_created_email(self):\n if settings.NOTIFY_NEW_REG:\n to = settings.NOTIFY_NEW_REG\n message = \"\"\"\\\nGreetings,<br><br>\n\nA new vehicle registration has been submitted by %s.<br><br>\n\nGo here to view or edit the request: <br>\n<a href=\"%s\">%s</a>\n<br><br>\nSincerely,<br><br>\nThe Janelia Parking Permit Program\n \"\"\" % (self.user_display_name(), self.get_edit_url(True), self.get_edit_url(True))\n subject = 'A new parking permit request has been entered'\n from_email = '[email protected]'\n text_content = re.sub(r'<[^>]+>','',message)\n html_content = message\n msg = EmailMultiAlternatives(subject, text_content, from_email, to)\n msg.attach_alternative(html_content, \"text/html\")\n msg.send()", "def comsume_msg(self, msg_type):", "def _send_message(self, *args, **kwargs):\n with self.comm_lock:\n return super(FrontendComm, self)._send_message(*args, **kwargs)", "def execute(self):\n return LOGGER.info(f\"{datetime.datetime.now()} - Sending notification in Slack\")", "def notification_cbk(self, notification):\n self._logger.info(\"Received netconf notification %s\", notification)\n if self._encoding == \"json\":\n # Convert XML to Json\n xml_str = bytes(repr(notification), 'utf-8')\n resp_str = self._xml_to_json_translator.convert_notification(xml_str)\n if resp_str is None:\n # Use a schema-less conversion\n resp_str = naive_xml_to_json(xml_str)\n else:\n resp_str = repr(notification)\n self._logger.debug(\"Translated Notification: %s\", resp_str)\n self.on_netconf_message(resp_str)", "def notify_user(self, svno, ops):\n\n self.sr=svno\n self.ops=ops\n try:\n from email.mime.text import MIMEText\n from email.mime.multipart import MIMEMultipart\n except Exception, imperr:\n print(\"emailNotify failure - import error %s\" % imperr)\n return(-1)\n nHtml = []\n noHtml = \"\"\n clientEmail = ['[email protected]']\n msg = MIMEMultipart()\n # This is the official email notifier\n rtUser = '[email protected]'\n\n msg['From'] = rtUser\n msg['To'] = \", \".join(clientEmail)\n if self.data['groupid'] == 'Nastran-RG':\n msg[\"Cc\"] = \"[email protected],\\\n [email protected],\\\n [email protected]\"\n elif self.data['groupid'] == 'Patran-RG':\n msg[\"Cc\"] = \"[email protected],\\\n [email protected],\\\n [email protected]\"\n else:\n msg[\"Cc\"] = \"[email protected],\\\n [email protected],\\\n [email protected]\"\n\n if self.ops == 'ipnw':\n msg['Subject'] = '%s regression got impacted due \\\n to vCAC cloud for VMID %s' % \\\n ( pdict[self.data['groupid']], self.sr['requestNumber'])\n else:\n msg['Subject'] = '%s regression got impacted due \\\n to vCAC cloud for service request: %s' % \\\n ( pdict[self.data['groupid']], self.sr['requestNumber'])\n\n nHtml.append(\"<html> <head></head> <body> <p>Jenkin's \\\n vCAC cloud client notification<br>\")\n nHtml.append(\"<b>Hi Helpdesk,</b><br><br><br>\")\n nHtml.append(\"Please create a ticket to solve the \\\n following problem and notify infra team.\")\n if self.ops == 'ipnw':\n nHtml.append(\"VM creation readiness from vCAC \\\n cloud is taking long time, \\\n vm creation service request completed, \\\n But network configuration is having an issue \\\n for VMID <b>%s</b> is stuck. \" % self.sr['requestNumber'])\n else:\n nHtml.append(\"Creation of VM through vCAC cloud is taking \\\n longer time than expected, the service \\\n request <b>%s</b> is stuck. \" % self.sr['requestNumber'])\n\n nHtml.append(\"Regression test for product <b>%s</b> \\\n is stuck and impacted.<br><br>\" % \\\n pdict[self.data['groupid']])\n if os.path.isdir(self.data['rundir']):\n jnfilepath=os.path.join(self.data['rundir'], 'hudjobname.dat')\n if os.path.isfile(jnfilepath):\n lines = [line.rstrip() for line in open(jnfilepath)]\n nHtml.append(\"Please follow job link for \\\n SR# related information.<br>\")\n nHtml.append(\"Jenkins Effected Job URL: <a href=%s> \\\n Effected Build Console \\\n </a><br><br><br>\" % (lines[0]))\n\n nHtml.append(\"This needs immediate attention.<br><br>\")\n nHtml.append(\"Regards,<br>\")\n nHtml.append(\"Rtest Administrator.<br>\")\n nHtml.append(\"[Note: This is an automated mail,\\\n Please do not reply to this mail.]<br>\")\n nHtml.append(\"</p> </body></html>\")\n noHtml = ''.join(nHtml)\n noBody = MIMEText(noHtml, 'html')\n msg.attach(noBody)\n s = smtplib.SMTP('postgate01.mscsoftware.com')\n s.sendmail(rtUser, [clientEmail] + msg[\"Cc\"].split(\",\"), msg.as_string())\n s.quit()\n return 0", "def send_message(self,contato,mensagem):\r\n #Open new chat on whatsapp web\r\n new_msg_button = self.driver.find_element_by_xpath(self.NEW_CHAT)\r\n new_msg_button.click()\r\n sleep(1)\r\n #Search the contact\r\n search_field = self.driver.find_element_by_xpath(self.SEARCH_CONTACT)\r\n search_field.click()\r\n search_field.send_keys(contato)\r\n sleep(1)\r\n #Click on the firts contact with the name that I told \r\n first_contact = self.driver.find_element_by_xpath(self.FIRST_CONTACT)\r\n first_contact.click()\r\n sleep(1.5)\r\n type_field = self.driver.find_element_by_xpath(self.TYPE_MSG)\r\n type_field.click()\r\n type_field.send_keys(mensagem)\r\n send_msg= self.driver.find_element_by_xpath(self.SEND_BUTTON)\r\n send_msg.click()\r\n sleep(1)", "def _send_notification(self, user_id):\n settings = self.settings_repo.find_one_by_id(user_id)\n if settings.req_noti:\n noti = Notification('New Request', '/topics/request', self.BORROW)\n self.noti_service.send_notification(noti)", "def sendNotificationToClerksOffice(date):\n text = translate('notification_email_to_clerk_question_pending_response',\n target_language='en',\n domain='bungeni.core',\n default=\"Questions pending responses.\")\n ministries = _getAllMinistries(date)\n for ministry in ministries:\n questions = _getQuestionsPendingResponse(date, ministry)\n if questions:\n text = text + '\\n' + ministry.full_name +': \\n'\n for question in questions:\n text = text + question.subject + '\\n'\n \n msg = MIMEText(text)\n \n msg['Subject'] = u'Questions pending response'\n msg['From'] = prefs.getAdministratorsEmail()\n msg['To'] = prefs.getClerksOfficeEmail()\n print msg\n #dispatch(msg)", "def notify(self, **kwargs):\n return self.send(kwargs)", "def notify(self, **kwargs):\n return self.send(kwargs)", "def cmd_notification_id(client, args):\n notification = client.get_notification(args.notification_id)\n notification = notification.__dict__\n if 'comment' in notification['content']:\n notification['content'] = format_comment_tree(notification['content'])\n generate_output({'notification': notification})" ]
[ "0.5887094", "0.5750634", "0.57312334", "0.57009876", "0.5486229", "0.54457545", "0.5437605", "0.53316367", "0.5314361", "0.52677506", "0.52383125", "0.5220476", "0.52009946", "0.5194496", "0.5170836", "0.51647365", "0.51592344", "0.51445454", "0.5126826", "0.5122613", "0.5111196", "0.5108509", "0.50696975", "0.50601727", "0.5041371", "0.50351584", "0.50154984", "0.5008828", "0.5008828", "0.49825588" ]
0.5796216
1
Check for potential problems in a commit.
def check_commit_problems(self, commit, diff): # Initialise self._license_problem = False self._commit_problem = False self._commit_notes = defaultdict(list) # Unsafe regex checks... unsafe_matches = list() unsafe_matches.append( r"\b(KRun::runCommand|K3?ShellProcess|setUseShell|setShellCommand)\b\s*[\(\r\n]" ) unsafe_matches.append( r"\b(system|popen|mktemp|mkstemp|tmpnam|gets|syslog|strptime)\b\s*[\(\r\n]" ) unsafe_matches.append( r"(scanf)\b\s*[\(\r\n]" ) valid_filename_regex = r"\.(cpp|cc|cxx|C|c\+\+|c|l|y||h|H|hh|hxx|hpp|h\+\+|qml)$" # Retrieve the diff and do the problem checks... filename = unicode("") filediff = list() for line in diff: file_change = re.match( "^diff --(cc |git a\/.+ b\/)(.+)$", line ) if file_change: # Are we changing file? If so, we have the full diff, so do a license check.... if filename != "" and commit.files_changed[ filename ]["change"] in ['A'] and re.search(valid_filename_regex, filename): self.check_commit_license(filename, ''.join(filediff)) filediff = list() filename = file_change.group(2) continue # Diff headers are bogus if re.match("@@ -\d+,\d+ \+\d+ @@", line): filediff = list() continue # Do an incremental check for *.desktop syntax errors.... if re.search("\.desktop$", filename) and re.search("[^=]+=.*[ \t]$", line) and line.startswith("+") and not re.match("^\+#", line): self._commit_notes[filename].append( "[TRAILING SPACE] **" ) self._commit_problem = True # Check for things which are unsafe... for safety_match in unsafe_matches: match = re.match(safety_match, line) if match: note = "[POSSIBLY UNSAFE: {0}] **".format( match.group(1) ) self._commit_notes[filename].append(note) self._commit_problem = True # Store the diff.... filediff.append(line) if filename != "" and commit.files_changed[ filename ]["change"] in ['A'] and re.search(valid_filename_regex, filename): self.check_commit_license(filename, ''.join(filediff))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def commit_check(ctx):\n result = ctx.run(f\"{VENV_PREFIX} cz check --rev-range master..\", warn=True)\n if result.exited == 3: # NO_COMMIT_FOUND\n exit(0)\n else:\n exit(result.exited)", "def verify_git_clean(path):\n\n sys.stdout.write(\" - Checking for uncommitted changes:\")\n result = run_in_component(path, ['git', 'status', '--porcelain=v1'])\n\n lines = [x for x in result.splitlines() if len(x) > 0]\n\n if len(lines) == 0:\n print(\" OKAY\")\n return\n\n print(\" FAILED\")\n\n raise GenericError(\"There are uncommitted changes in the component, please commit or stash them\")", "def check_commit(self, commit):\n # pylint: disable=too-many-branches\n if LOG.isEnabledFor(logging.DEBUG):\n LOG.debug('check_commit() Checking mark={} sha1={} file-ct={} -- {}'\n .format( commit['mark']\n , p4gf_util.abbrev(commit['sha1'])\n , len(commit['files'])\n , repr(commit['data'])[:20].splitlines()[0]))\n\n if not commit['author_p4user']:\n raise PreflightException(_(\"User '{user}' not permitted to commit\")\n .format(user=commit['author']['email'].strip('<>')))\n\n if 'merge' in commit:\n ref_is_review = (self.gsreview_coll and\n self.gsreview_coll.ref_in_review_list(self._current_prt.ref))\n if not ref_is_review and not self.ctx.merge_commits:\n raise PreflightException(_('Merge commits are not enabled for this repo.'))\n if (not ref_is_review and\n not self.ctx.branch_creation and self.assigner.have_anonymous_branches):\n msg = _('Git branch creation is prohibited for this repo.')\n p4_branch_names_non_lw = [b.git_branch_name for b in self.ctx.branch_dict().values()\n if b.git_branch_name and not b.is_lightweight]\n if len(p4_branch_names_non_lw) > 1:\n msg += _('\\nThis repo has more than one named branch.'\n '\\nTry altering the push order - '\n 'pushing branches with merge ancestors first.')\n raise PreflightException(msg)\n if LOG.isEnabledFor(logging.DEBUG):\n for parent_mark in commit['merge']:\n parent_sha1 = self.fast_export_marks.get_commit(parent_mark)[:7]\n LOG.debug(\"check_commit() merge mark={} sha1={}\"\n .format(parent_mark, parent_sha1))\n\n if not self.ctx.submodules and 'files' in commit:\n for f in commit['files']:\n if f.get('mode') == '160000':\n if 'first_commit' in commit and not self._path_added(f.get('path'), commit):\n LOG.debug2('check_commit() passed {} in {}'.format(\n f.get('path'), p4gf_util.abbrev(commit['sha1'])))\n continue\n raise PreflightException(\n _('Git submodules not permitted: path={path} commit={commit_sha1}')\n .format(path=f.get('path'), commit_sha1=p4gf_util.abbrev(commit['sha1'])))\n\n for f in commit['files']:\n LOG.debug3(\"check_commit : commit files: \" + _log_fe_file(f))\n err = check_valid_filename(f['path'], self.ctx)\n if err:\n raise PreflightException(err)\n if self.ctx.is_lfs_enabled:\n self._check_lfs(commit, f)\n\n # Warn user about any jobs that appear to not exist\n jobs = G2PJob.lookup_jobs(self.ctx, G2PJob.extract_jobs(commit['data']))\n if jobs:\n for job_id in jobs:\n r = self.ctx.p4run('jobs', '-e', 'job={}'.format(job_id))\n if not r:\n _print_error(_(\"Job '{job_id}' doesn't exist\").format(job_id=job_id))\n # Create pending changes for any Git-Swarm reviews", "def something_to_commit():\n\n # Procelain returns nothing if there's nothing to commit\n ret = subprocess.check_output([\"git\", \"status\", \"--porcelain\"])\n\n if (len(ret) > 0):\n return True\n\n return False", "def _validate_commit(\n commit_sha1, merge, author, committer, title, separator, body):\n errors = []\n\n # List of words a commit title can start with\n commit_title_start_words = filter(\n lambda x: x, COMMIT_TITLE_START_WORDS.splitlines())\n\n author_errors = _validate_email(author, 'Author')\n committer_errors = _validate_email(committer, 'Committer')\n\n if author_errors:\n errors.extend(author_errors)\n if committer_errors:\n errors.extend(committer_errors)\n\n title_words = title.split(' ', 1)\n\n # Check if in imperative tense\n if re.search(r'(ed|ing|s)$', title_words[0]):\n errors.append((\n 'title-imperative-tense-check',\n 'Commit title is not in imperative tense'))\n\n # Check if first word is capitalized\n if re.match(r'^[^A-Z]', title_words[0]):\n errors.append((\n 'title-capitalization-check',\n 'Commit title is not capitalized'))\n\n # Check if title begins with known start word\n if title_words[0] not in commit_title_start_words:\n errors.append((\n 'title-verb-check',\n 'Commit title does not begin with a verb'))\n\n # Check if this is a fixup! commit\n if re.match(r'^fixup!', title_words[0]):\n errors.append((\n 'title-fixup-check',\n 'Commit title starts with fixup! '))\n\n # Check if this is a squash! commit\n if re.match(r'^squash!', title_words[0]):\n errors.append((\n 'title-squash-check',\n 'Commit title starts with squash! '))\n\n # Check if the commit title ends in whitespace or punctuation\n if len(title_words) > 1 and re.search(r'[\\s\\W]$', title_words[1]):\n errors.append((\n 'title-whitespace-punctuation-check',\n 'Commit title ends in whitespace or punctuation'))\n\n # Check if the title is greater than 50 characters in length\n if len(title) > 50:\n errors.append((\n 'title-length-check',\n 'Commit title longer than 50 characters'))\n\n # Check if separator line (between title and body) is empty\n if separator is not None and separator != '':\n errors.append((\n 'message-separator-check',\n 'Missing blank line between title and body'))\n\n # Check if the commit message has a body\n if body == []:\n errors.append((\n 'body-check',\n 'Missing commit message body'))\n\n # Check if any line in the body is greater than 72 characters in legnth\n for body_line in body:\n if len(body_line) <= 72:\n continue\n errors.append((\n 'body-length-check',\n 'Commit message body line > 72 characters'))\n break\n\n # Check if commit is a merge commit\n if merge is not None:\n errors.append((\n 'commit-merge-check',\n 'Commit is a merge commit'))\n\n # Check commit diff for whitespace errors\n git_diff_cmd = shlex.split(\n 'git show --check {commit_sha1}'.format(\n commit_sha1=commit_sha1))\n\n has_whitespace_issue = None\n f, _ = tempfile.mkstemp()\n has_whitespace_issue = subprocess.call(git_diff_cmd,\n stdout=f, stderr=f, close_fds=True)\n os.close(f)\n\n if has_whitespace_issue:\n errors.append((\n 'diff-whitespace-check',\n 'Commit diff has whitespace issues'))\n\n return errors", "def validate_change(ticket):\n # First ensure topic line mentions tickets, and pull them out.\n topic = COMMIT_MSG.split('\\n', 1)[0]\n fix_tickets = re.findall(\"[A-Z]{2,5}-[0-9]{1,6}\", topic)\n if len(fix_tickets) == 0:\n print \"\\n\\n\\n\\n\\n*********\\nERROR: commit message does not name a ticket!\"\n return False\n\n # Now get list of approved tickets from master ticket, and ensure\n # all \"fixed\" tickets are approved.\n approved_tickets = get_approved_tickets(ticket)\n for tick in fix_tickets:\n if not tick in approved_tickets:\n print \"\\n\\n\\n\\n\\n*********\\nERROR: ticket {} is not approved (see approval ticket {})\".format(\n tick, ticket)\n return False\n return True", "def test_commit(self):\n # TODO: Test errors while committing and recovery\n pass", "def validate_commit(ctx, sha, **_):\n\n gh = ctx.obj.github\n ci_provider = ctx.obj.ci_provider\n\n sha = sha or (ci_provider.sha if ci_provider else None)\n\n def _pre_issue():\n log.echo('Commit references an issue...', break_line=False)\n\n def _post_issue():\n log.checkmark()\n\n def _pre_label():\n log.echo('Issue is labeled with a release label...', break_line=False)\n\n def _post_label():\n log.checkmark()\n\n log.echo('Validating commit', add=True)\n\n try:\n gh.validate_commit(sha=sha,\n hooks={\n 'pre_issue': _pre_issue,\n 'pre_label': _pre_label,\n 'post_issue': _post_issue,\n 'post_label': _post_label\n })\n except exceptions.ReleaseValidationFailedException as e:\n log.xmark()\n log.sub()\n tb = sys.exc_info()[2]\n utils.raise_with_traceback(e, tb)\n log.sub()\n\n log.echo('Validation passed')", "def lint_commit_message(commit):\n success = True\n lines = commit.message.splitlines()\n\n # Check length of summary line.\n summary_line_len = len(lines[0])\n if summary_line_len > COMMIT_MSG_MAX_SUMMARY_LEN:\n error(\n \"The summary line in the commit message is %d characters long; \"\n \"only %d characters are allowed.\" %\n (summary_line_len, COMMIT_MSG_MAX_SUMMARY_LEN), commit)\n success = False\n\n # Check that summary line does not end with a period\n if lines[0].endswith('.'):\n error(\"The summary line must not end with a period.\", commit)\n success = False\n\n # Check that we don't have any fixups.\n if lines[0].startswith('fixup!'):\n error(\"Fixup commits are not allowed. Please resolve by rebasing.\",\n commit)\n success = False\n\n # Try to determine whether we got an area prefix in the commit message:\n summary_line_split = lines[0].split(':')\n summary_line_split_len = len(summary_line_split)\n\n # We didn't get an area prefix, so just make sure the message started with a\n # capital letter.\n if summary_line_split_len == 1:\n if not re.match(r'[A-Z]', lines[0]):\n error(\"The summary line must start with a capital letter.\", commit)\n success = False\n # The user specified an area on which she worked.\n elif summary_line_split_len == 2:\n if not re.match(r'[a-z_A-Z\\-]*(/[a-z_A-Z\\-]+)*', summary_line_split[0]):\n error(\n 'The area specifier is mal-formed. Only letters,'\n 'underscores and hyphens are allowed. Different areas must be'\n 'separated by a slash.', commit)\n success = False\n # Check the second part of the commit message.\n if not summary_line_split[1].startswith(' '):\n error(\"The area must be separated by a single space.\", commit)\n success = False\n if not re.match(r'\\s[A-Z]', summary_line_split[1]):\n error(\n \"The summary line after the colon must start with a capital letter.\",\n commit)\n success = False\n # We do not allow more than one area i.e., colon.\n else:\n error(\"Only one colon is allowed to specify the area of changes.\",\n commit)\n success = False\n\n # Check for an empty line separating the summary line from the long\n # description.\n if len(lines) > 1 and lines[1] != \"\":\n error(\n \"The second line of a commit message must be empty, as it \"\n \"separates the summary from the long description.\", commit)\n success = False\n\n return success", "def check_commit_msg(commitish):\n\n hdr = CommitSubHeader()\n line_list = dump_raw_body(commitish)\n\n if COMMIT_MESSAGE_CHECK and line_list[1] != \"\":\n if line_list[1].find('REF: ') == -1:\n add_error(\"Summary field must have just one line in %s\" % commitish)\n else:\n add_error(\"No empty line after Summary field in %s\" % commitish)\n\n if COMMIT_MESSAGE_CHECK and len(line_list[0]) < 5 or len(line_list[0]) > 78:\n add_error(\"Wrong size (%d) of Summary field in %s\" % (len(line_list[0]), commitish))\n\n while len(line_list) != 0:\n line = line_list.pop(0)\n\n if line.find('REF: ') == 0:\n if hdr.ref == None:\n hdr.ref = 1 # Not None\n elif COMMIT_MESSAGE_CHECK:\n add_error(\"Field 'REF:' must be once in %s\" % commitish)\n continue\n\n if COMMIT_MESSAGE_CHECK and not Commit.rt_header_fields['REF: '].match(line[len('REF: '):]):\n add_error(\"Wrong field 'REF:' in %s\" % commitish)\n else:\n hdr.ref = line[len('REF: '):]\n\n elif line.find('Signed-off-by: ') == 0:\n if hdr.signed == None:\n hdr.signed = 1 # Not None\n elif COMMIT_MESSAGE_CHECK:\n add_error(\"Field 'Signed-off-by:' must be once in %s\" % commitish)\n continue\n\n if COMMIT_MESSAGE_CHECK and not Commit.rt_header_fields['Signed-off-by: '].match(line[len('Signed-off-by: '):]):\n add_error(\"Wrong field 'Signed-off-by:' in %s\" % commitish)\n else:\n hdr.signed = line[len('Signed-off-by: '):]\n\n elif len(line) != 0:\n hdr.desc = 1\n if COMMIT_MESSAGE_CHECK and len(line) > 78:\n add_error(\"Wrong size (%d) of field 'Description' in %s\" % (len(line), commitish))\n\n if COMMIT_MESSAGE_CHECK and hdr.ref == None:\n add_error(\"No field 'REF:' in %s\" % commitish)\n if COMMIT_MESSAGE_CHECK and hdr.desc == None:\n add_error(\"No field 'Description' in %s\" % commitish)\n if COMMIT_MESSAGE_CHECK and hdr.signed == None:\n add_error(\"No field 'Signed-off-by:' in %s\" % commitish)\n\n return hdr", "def _ensure_commit(git_sha1):\n cmd = [\"git\", \"cat-file\", \"-e\", git_sha1 + \"^{commit}\"]\n p = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=False)\n if p.returncode == 0:\n # we have the commit locally\n return\n # we don't have the commit, must fetch\n cmd = [\"git\", \"fetch\", \"https://github.com/pytorch/pytorch.git\", git_sha1]\n p = subprocess.run(cmd, check=True)", "def __gitVerify(self):\n self.vcs.gitVerify(self.project.getProjectPath())", "def lint_commit_base(commit):\n success = True\n # Merge commits have two parents, we maintain a linear history.\n if len(commit.parents) > 1:\n error(\n \"Please resolve merges by re-basing. Merge commits are not allowed.\",\n commit)\n success = False\n\n return success", "def resolve_conflicts(self, commit=True):\n pass # pragma: no cover", "def _validate_commits(pull_request):\n commits = github.get_commits(pull_request[\"commits_url\"])\n analyzed = []\n\n for commit_wrapper in commits:\n commit = {\n \"sha\": commit_wrapper[\"sha\"],\n \"message\": commit_wrapper[\"commit\"][\"message\"],\n }\n\n commit[\"standard\"] = _validate_title(commit[\"message\"])\n analyzed.append(commit)\n\n result = all(commit[\"standard\"] for commit in analyzed)\n return analyzed, result", "def lint(self, commit):\n LOG.debug(\"Linting commit %s\", commit.sha or \"[SHA UNKNOWN]\")\n LOG.debug(\"Commit Object\\n\" + str(commit))\n\n # Ensure the Deprecation class has a reference to the config currently being used\n Deprecation.config = self.config\n\n # Apply config rules\n for rule in self.configuration_rules:\n rule.apply(self.config, commit)\n\n # Skip linting if this is a special commit type that is configured to be ignored\n ignore_commit_types = [\"merge\", \"squash\", \"fixup\", \"fixup_amend\", \"revert\"]\n for commit_type in ignore_commit_types:\n if getattr(commit, f\"is_{commit_type}_commit\") and getattr(self.config, f\"ignore_{commit_type}_commits\"):\n return []\n\n violations = []\n # determine violations by applying all rules\n violations.extend(self._apply_line_rules([commit.message.title], commit, self.title_line_rules, 1))\n violations.extend(self._apply_line_rules(commit.message.body, commit, self.body_line_rules, 2))\n violations.extend(self._apply_commit_rules(self.commit_rules, commit))\n\n # Sort violations by line number and rule_id. If there's no line nr specified (=common certain commit rules),\n # we replace None with -1 so that it always get's placed first. Note that we need this to do this to support\n # python 3, as None is not allowed in a list that is being sorted.\n violations.sort(key=lambda v: (-1 if v.line_nr is None else v.line_nr, v.rule_id))\n return violations", "def check_commit_for_branch( self\n , commit\n , branch_id\n , any_locked_files\n , case_conflict_checker ):\n rev = commit['sha1']\n if LOG.isEnabledFor(logging.DEBUG):\n LOG.debug(\"check_commit_for_branch() \"\n \"Checking branch={} mark={} sha1={} file-ct={} -- {}\"\n .format( branch_id\n , commit['mark']\n , p4gf_util.abbrev(rev)\n , len(commit['files'])\n , repr(commit['data'])[:20].splitlines()[0]))\n\n if self._already_copied_commit(rev, branch_id):\n return\n\n # following checks assume client has been set for branch\n self.ensure_branch_preflight(commit, branch_id)\n with self.ctx.switched_to_branch(\n self._current_branch\n , set_client=self.set_client_on_branch_switch\n ):\n if case_conflict_checker:\n case_conflict_checker.read_fast_export_commit(\n commit, self._current_branch)\n\n # Empty commits require root-level .p4gf_placeholder to be mapped\n # in the current branch view.\n if not commit['files'] and not self._is_placeholder_mapped():\n raise PreflightException(\n _(\"Empty commit {sha1} not permitted. Git Fusion branch views\"\n \" must include root to permit empty commits.\")\n .format(sha1=p4gf_util.abbrev(rev)))\n\n with Timer(CHECK_PROTECTS):\n self._check_protects(commit['author_p4user'], commit['files'])\n\n with Timer(CHECK_OVERLAP):\n self._check_overlap(commit)\n\n # fetch the branch setting only, without cascading to repo/global config\n if self._current_branch.is_read_only:\n raise PreflightException(_(\"Push to branch {branch} prohibited.\")\n .format(branch=self._current_branch.git_branch_name))\n self._check_stream_writable(commit)\n self._check_stream_in_classic(commit)\n\n LOG.debug('checking locked files under //{}/...'.format(self.ctx.p4.client))\n if any_locked_files:\n # Convert the git commit paths to depotPaths\n files_in_commit = [self.ctx.gwt_path(f['path']).to_depot()\n for f in commit['files']]\n LOG.debug(\"files_in_commit {0}\".format(files_in_commit))\n for f in files_in_commit:\n if f in any_locked_files:\n # Collect the names (and clients) of users with locked files.\n # Report back to the pusher so they can take appropriate action.\n msg = _('{file} - locked by {user}').format(file=f,\n user=any_locked_files[f])\n LOG.info(msg)\n raise PreflightException(msg)\n\n # +++ Spend time extracting Jobs and P4Changelist owner\n # here if we actually do need to call\n # the preflight-commit hook.\n if self.ctx.preflight_hook.is_callable():\n jobs = G2PJob.extract_jobs(commit['data'])\n jobs2 = G2PJob.lookup_jobs(self.ctx, jobs)\n self.ctx.preflight_hook(\n ctx = self.ctx\n , fe_commit = commit\n , branch_id = branch_id\n , jobs = jobs2\n )", "def is_valid_commits(args):\n if args.commits is not None:\n return True\n return False", "def is_commit_id_valid(commit_id, wit_path):\n\n if not is_branch(wit_path, commit_id):\n if commit_id.isalnum() and len(commit_id) == 40:\n\n if commit_id in _get_all_saves_names(wit_path):\n return True\n\n else:\n logging.error(f'No commit named {commit_id}.')\n\n else:\n logging.error('branch or commit does not exist. commit id must be 40 digits long and hexadecimal.')\n else:\n return True", "def __gitBisectBad(self):\n pfile = self.project.getProjectFile()\n lastModified = QFileInfo(pfile).lastModified().toString()\n shouldReopen = (\n self.vcs.gitBisect(self.project.getProjectPath(), \"bad\") or\n QFileInfo(pfile).lastModified().toString() != lastModified\n )\n if shouldReopen:\n res = E5MessageBox.yesNo(\n self.parent(),\n self.tr(\"Bisect\"),\n self.tr(\"\"\"The project should be reread. Do this now?\"\"\"),\n yesDefault=True)\n if res:\n self.project.reopenProject()", "def commit_exists(repo, commit):\n cmd = ['git', 'cat-file', '-t', commit]\n try:\n devnull = open(os.devnull, 'wb')\n output = subprocess.check_output(cmd, cwd=repo,\n stderr=devnull)\n return output.rstrip() == 'commit'\n except subprocess.CalledProcessError:\n return False", "def _check_inputs(self):\n\n if not os.path.isdir(self._parent_repo):\n raise Error('Invalid parent repo path %r' % self._parent_repo)\n\n self._run_git_command(['--help'], error_message='Unable to run git')\n self._run_git_command(['status'],\n error_message='%r is not a valid git repo' %\n os.path.abspath(self._parent_repo))\n self._run_git_command(['fetch', 'origin'],\n error_message='Failed to fetch origin')\n self._run_git_command(\n ['rev-parse', '%s^{commit}' % self._branch_ref],\n error_message='Branch %s not found' % self._branch_ref)\n self._run_git_command(\n ['rev-parse', '%s^{commit}' % self._revision],\n error_message='Revision \"%s\" not found' % self._revision)", "def enforce_clean_option(args, run):\n repos = run.experiment_info[\"repositories\"]\n if not repos:\n raise RuntimeError(\n \"No version control detected. \"\n \"Cannot enforce clean repository.\\n\"\n \"Make sure that your sources under VCS and the \"\n \"corresponding python package is installed.\"\n )\n else:\n for repo in repos:\n if repo[\"dirty\"]:\n raise RuntimeError(\n \"EnforceClean: Uncommited changes in \"\n 'the \"{}\" repository.'.format(repo)\n )", "def verify_rev(rev):\n return not subprocess.call(['git', 'rev-parse', '-q', '--verify', '--no-revs', rev])", "def warn_uncommitted_changes(force):\n output = subprocess.run([\"git\", \"status\"], capture_output=True, text=True,)\n if \"modified\" in output.stdout or \"Untracked\" in output.stdout:\n print(\"Warning: repository has uncommitted changes:\\n\")\n print(\"-----------------------------------------------------------------------\")\n print(f\"{output.stdout}\")\n print(\"-----------------------------------------------------------------------\")\n if not force:\n print(\"\\nRun with -f to override\")\n sys.exit(1)", "def check_pr_details(self, pr_number):\n pr = self.repo.get_pull(pr_number)\n email_pattern = re.compile(r'^.*@suse\\.(com|cz|de)$')\n\n for commit in pr.get_commits():\n sha = commit.sha\n author = commit.author\n title = message = commit.commit.message\n # Not sure why we need to use the nested commit for the email\n email = commit.commit.author.email\n user_id = f'{author.login}({email})'\n body = ''\n\n # This could be probably smarter but commit contains something like the following\n # message=\"$commit_title\\n\\n$long_commit_message\" and as such maybe we can split it and\n # check for the following limits: title max 50 chars, body max 72 chars per line and at\n # least as long as the commit title to avoid commit message bodies full of whitespaces\n try:\n title, body = message.split('\\n\\n', 1)\n except ValueError:\n print('No commit body was detected')\n\n print(f'Checking commit \"{sha}: {title}\"')\n\n if not email_pattern.fullmatch(email):\n print(f'Checking if {user_id} is part of the SUSE organization...')\n\n if self.org.has_in_members(commit.author):\n print(f'{user_id} is part of SUSE organization but a SUSE e-mail address was not used for commit: {sha}')\n sys.exit(1)\n\n # replace case-insensitive \"(bsc#)\" (or []) and surrounding spaces\n # with a single space, then prune leading/trailing spaces\n title = re.sub(r'\\s*[([]\\s*(?i:bsc)#\\d+\\s*[)\\]]\\s*', ' ', title).strip()\n if len(title) > 50:\n print('Commit message title should be less than 50 characters (excluding the bsc# reference)')\n sys.exit(1)\n\n # No body detected. Nothing else to do here.\n if not body:\n continue\n\n if len(body) < len(title):\n print('Commit message body is too short')\n sys.exit(1)\n\n # strip multi-line '```code```' blocks & lines starting w\\ `code`\n code_pattern = re.compile(\n r'''\n ((?m:^)\\s*```) # multi-line beginning, 0-more whitespace, ```\n (?s:.*?) # non-greedy, zero or more chars, including \\n\n \\1 # whatever matched at the beginning\n | # or...\n (?m:^)\\s*` # start of line, optional whitespace, backtick\n [^`]+ # oneor more non-backtick chars\n `\\s*(?m:$) # and a backtick at the end of the line\n ''',\n re.VERBOSE\n )\n for body_line in re.sub(code_pattern, '', body).splitlines():\n if len(body_line) > 72:\n print('Each line in the commit body should be less than 72 characters')\n sys.exit(1)\n\n print(f'PR-{pr_number} commits verified.')", "def _check_branch(opt, params):\n\n # Check the current branch and hash\n _get_branch(opt)\n\n if params.git_branch != opt.git_branch or params.git_hash != opt.git_hash:\n msg = 'You are not on the right branch or commit. Please run the following in the repository: \\n'\n msg += f'git checkout {params.git_branch}\\n'\n msg += f'git revert {params.git_hash}'\n sys.exit(msg)", "def test_noChangeFromTrunk(self):\n runCommand([\"git\", \"checkout\", \"-b\", \"mypatch\"], cwd=self.repo.path)\n\n logs = []\n\n with self.assertRaises(SystemExit) as e:\n CheckNewsfragmentScript(logs.append).main([self.repo.path])\n\n self.assertEqual(e.exception.args, (0,))\n self.assertEqual(\n logs[-1], \"On trunk or no diffs from trunk; no need to look at this.\"\n )", "def repo_has_uncommitted():\n buff = subprocess.check_output(['hg', 'status'])\n\n if len(buff):\n print('Dirty / uncommitted changes in repository!')\n return True\n\n return False", "def local_changes():\n result, output = popen('git status', False, False)\n try:\n return not output[-1].startswith(\"nothing to commit\")\n except IndexError:\n return True" ]
[ "0.7050094", "0.69747615", "0.6810369", "0.6753324", "0.67024326", "0.66566247", "0.6637356", "0.65921104", "0.65602815", "0.65418464", "0.6462754", "0.6436193", "0.6383709", "0.6378951", "0.62852246", "0.6269505", "0.6268365", "0.6259074", "0.6205398", "0.6135695", "0.6093538", "0.60787183", "0.6051862", "0.60444015", "0.6023536", "0.5981489", "0.59676933", "0.59646606", "0.59513015", "0.5943752" ]
0.800173
0
Returns index of the resource to use for making requests to get data if none of the resources are available, then send number of seconds until the resource is not available
def get_resource_index(self): result = -1 max_sleep_time = self.time_window with self._lock: while result == -1: for i in range(0, self.num_keys): curr_sleep_time = max((self.timers[i][0] + self.time_window) - time.time(), 0) max_sleep_time = min(max_sleep_time, curr_sleep_time) if self.timers[i][1] >= self.window_limit and self.timers[i][0] + self.time_window < time.time(): self.timers[i][0] = 0 self.timers[i][1] = 0 if self.timers[i][1] < self.window_limit: result = i break if result == -1: # case when all streams are rate limited # logging.warning('sleeping for %d seconds.' % max_sleep_time) # time.sleep(max_sleep_time) return -1 * max_sleep_time if self.timers[result][0] == 0: self.timers[result][0] = time.time() self.timers[result][1] += 1 return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def handle_get():\n for resource in resources:\n\n # acquire lock\n res_lock = resource.get(\"lock\")\n res_lock.acquire()\n\n # Get if available\n if resource.get(\"available\") == \"true\":\n # Available - acquire resource and return\n resource.update({\"available\": \"false\"})\n res_lock.release()\n return jsonify(resource.get(\"data\"))\n\n # Not available, release and continue\n res_lock.release()\n\n # All resources are taken\n return app.make_response(('No available resource', 500))", "def ask_for_numbers():\n requests.get(\"http://zero2.local:5000/get_num\", timeout=(20,0.02))\n return 1", "def on_get(self, req, resp):\n try:\n n_reqs = int(req.params.get('n', self.default_reqs))\n except ValueError:\n error_response(resp, 'ERROR: Incorrect number of requests')\n return\n\n urls = self.scheduler.requests(n_reqs)\n resp.data = json.dumps(urls, ensure_ascii=True)\n resp.content_type = \"application/json\"\n resp.status = falcon.HTTP_200", "def GetResourceSample():\n client = CreateClient()\n for e1 in client.GetResources(limit=5).entry:\n e2 = client.GetResource(e1)\n print 'Refetched: ', e2.title.text, e2.resource_id.text", "def do_GET(self):\n global st_point, cur_request\n if time.time() - st_point < 1 and cur_request > args.MAX_REQ:\n self.send_response(429)\n self.send_header(\"Content-type\",\"text/html\")\n self.end_headers()\n time.sleep(0.2)\n return\n elif time.time() - st_point > 1:\n st_point = time.time()\n cur_request = 1\n self.func_PARSE()\n if self.parsed_url[2] in [\"/ping\", \"/cats\"]:\n self.func_DO()\n else:\n self.send_response(400)\n text=\"<h1 align=center>Bad request</h1>\"\n self.func_PRINT(text)", "def request_more_resources():\n logger.info(\"NEED MORE RESOURCES!!!!\")", "def retrieve_report(resource, url, key):\n # TODO: manage time\n params = {\"apikey\": key, \"resource\": resource}\n res = requests.post(url, data=params)\n\n while res.status_code == 204 or json.loads(res.text)[\"response_code\"] == -2:\n time.sleep(15)\n res = requests.post(url, data=params)\n\n return res", "def get_num_pages(self) -> Optional[int]:\n timeout: float = 5\n num_attempts = 0\n while num_attempts < 10:\n r = hit_api(self.key_manager, self.url, self.logger, timeout=timeout, method=\"HEAD\")\n\n if r:\n break\n\n timeout = timeout * 1.2\n else:\n raise RuntimeError(\"Unable to get the number of pages of data in 10 attempts\")\n\n if 'last' not in r.links.keys():\n return 1\n \n # get the last url from header\n last_page_url = r.links['last']['url']\n\n parsed_url = urlparse(last_page_url)\n try:\n num_pages = int(parse_qs(parsed_url.query)['page'][0])\n except (KeyError, ValueError):\n return None\n\n return num_pages", "def try_query(pid):\n retries = 1\n while True:\n try:\n query = client.query_data_points(page_size=PAGE_SIZE, source=pid)\n return query\n except HTTPError as e:\n if retries > 10:\n raise e\n print(e)\n wait = retries * 15\n time.sleep(wait)\n retries += 1", "def http_call(self, request):\n response = self.session.get(request)\n attempts = 0\n while response.status_code == 429:\n if attempts > 5:\n break\n attempts = attempts + 1\n time.sleep(30)\n response = self.session.get(request)\n response.raise_for_status()\n return response", "def wait_for_not_found(name, read_method, resource_type=None, **kwargs):\n sleep_time = CONF.kubernetes.status_poll_interval\n retries_total = CONF.kubernetes.status_total_retries\n\n commonutils.interruptable_sleep(CONF.kubernetes.start_prepoll_delay)\n\n i = 0\n while i < retries_total:\n try:\n resp = read_method(name=name, **kwargs)\n resp_id = resp.metadata.uid\n current_status = resp.status.phase\n except rest.ApiException as ex:\n if ex.status == 404:\n return\n else:\n raise\n else:\n commonutils.interruptable_sleep(sleep_time)\n i += 1\n if i == retries_total:\n raise exceptions.TimeoutException(\n desired_status=\"Terminated\",\n resource_name=name,\n resource_type=resource_type,\n resource_id=resp_id or \"<no id>\",\n resource_status=current_status,\n timeout=(retries_total * sleep_time))", "async def async_get_stage(self, attempts=50):\n\n # Query the API until a sensible (> 0) value is received, or the number of attempts is exceeded\n for attempt in range(attempts):\n res = await self.async_query_api(\"/GetStatus\")\n\n # Return the current loadshedding stage by subtracting 1 from the query result\n # Occasionally the Eskom API will return a negative stage, so simply retry if this occurs\n if res and int(res) > 0:\n return int(res) - 1\n\n # If the query does not succeed after the number of attempts has been exceeded, raise an exception\n raise Exception(\n f\"Error, invalid loadshedding stage received from API after {attempts} attempts\"\n )", "def get_from_index(index, type, id): \n response = None\n \n #Try 3 times to read the document from ES, each time picking a random ES node address in case of failure\n for retries in range(3): \n try:\n response = es.get(index=index, doc_type=type, id=id)\n log(\"ES Get Response :: \" + json.dumps(response))\n except ImproperlyConfigured:\n log(\"ES ImproperlyConfigured!\" + traceback.format_exc())\n continue\n except ElasticsearchException:\n log(\"ES ElasticsearchException!\" + traceback.format_exc())\n continue\n except TransportError:\n log(\"ES TransportError!\" + traceback.format_exc())\n continue\n except NotFoundError:\n log(\"ES NotFoundError!\" + traceback.format_exc())\n continue\n except ConflictError:\n log(\"ES ConflictError!\" + traceback.format_exc())\n continue\n except RequestError:\n log(\"ES RequestError!\" + traceback.format_exc())\n continue\n except SerializationError:\n log(\"ES SerializationError!\" + traceback.format_exc())\n continue\n except ConnectionError:\n log(\"ES ConnectionError!\" + traceback.format_exc())\n continue\n except Exception:\n log(\"ES Exception!\" + traceback.format_exc())\n continue\n finally:\n log(\"Total number of ES read attempts: \" + str(retries + 1))\n #Exit for loop if ES transaction is successful otherwise pick another node and continue retrying\n break\n\n if response is None or response == '':\n return ('false', retries + 1)\n else:\n return ('true', retries + 1)", "def initialize_timer():\n try:\n print_debug(\"Initializing the timer by fetching it on the online API\")\n response = WEB_INSTANCE.open(config.API_LOCATION).read()\n response = response.rstrip()\n print_debug(\"Found \"+str(response)+\" on the online API\")\n save_time_left(response)\n return response\n except Exception, e:\n print(e)\n return 'WAITING'", "def winhttp_WinHttpQueryDataAvailable(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"hRequest\", \"lpdwNumberOfBytesAvailable\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def __len__(self, context=None):\n if context is not None:\n context = self._repair_context(context)\n uri = self.rest_services[\"size\"]\n payload=dict()\n if context:\n context = context.n3()\n payload[\"context\"] = context\n r = requests.get(uri, params = payload)\n return int(r.text)", "def do_get(self):\n try:\n res = requests.get(self.url, timeout=self.timeout)\n response = make_response(res.__dict__)\n self.elapsed_times.append(response.elapsed)\n self.logger.info( str(response) )\n except MyTimeoutException:\n response = make_response({'url':self.url,'elapsed':-1,'status_code':-1})\n self.elapsed_times.append(self.timeout)\n self.logger.info( str(response) )\n self.fail_count += 1", "def poll_health():\n global timesCalled\n\n # Poll /health\n session = requests.Session()\n retry = Retry(connect=3, backoff_factor=0.5)\n adapter = HTTPAdapter(max_retries=retry)\n session.mount('http://', adapter)\n response = session.get(health_url)\n\n # Check HTTP status code\n status_code = response.status_code\n if status_code != status_ok:\n exit(1)\n\n # Get metrics values\n metrics = response.json()['metrics']\n requestLatencyValues.append(metrics['requestLatency'])\n dbLatencyValues.append(metrics['dbLatency'])\n cacheLatencyValues.append(metrics['cacheLatency'])\n\n # If 60 seconds has passed, send data to STDOUT\n timesCalled += 1\n if timesCalled == 6:\n output_data()\n\n timesCalled = 0\n requestLatencyValues.clear()\n dbLatencyValues.clear()\n cacheLatencyValues.clear()", "def task_retry_count():\n retries = flask.request.headers.get(\n 'X-AppEngine-TaskRetryCount')\n if retries is not None:\n return int(retries)\n return None", "def __getitem__(self, index):\n self.wait()\n return self._results.__getitem__(index)", "def _request(payloadString):\n global countRequested\n global lastReqTime\n if lastReqTime is not None and time.time() - lastReqTime < interReqTime:\n timeToSleep = random()*(interReqTime-time.time()+lastReqTime)*2\n logging.info(\"Sleeping for {0} seconds before request.\".format(\n timeToSleep))\n time.sleep(timeToSleep)\n logging.info(\"Issuing request for the following payload: {0}\".format(\n payloadString))\n r = requests.get(\"{0}/{1}\".format(baseUrl, payloadString))\n lastReqTime = time.time()\n countRequested += 1\n if r.status_code == requests.codes.ok:\n return r.text\n else:\n raise Exception(\"Could not process request. \\\n Received status code {0}.\".format(r.status_code))", "def wait(self, timeout=None):\n if self.counter > 0:\n return self.counter\n\n self._wait(timeout) # return value irrelevant, whether we got it or got a timeout\n return self.counter", "def read_blinds_status_from_thingspeak():\n results = 1\n URL='https://api.thingspeak.com/channels/1152832/feeds.json?api_key='\n KEY='4DDGV289MS3GJCBY'\n prev_len_data = 0 #the length of the list of data points collected on the previous loop search\n \n while (1):\n HEADER='&results=%d' % (2**results)\n NEW_URL=URL+KEY+HEADER\n \n try: \n get_data=requests.get(NEW_URL).json()\n \n data = []\n for x in get_data['feeds']:\n print(x['field3'])\n data.append(x['field3']) #get lightstatus\n #End for\n \n index = search_for_nums(data) #searching for most recent lightstatus input\n \n if index != None: #found most recent data\n print(\"data point found...blindsstatus: %s \" % (data[index]))\n return int(data[index])\n else:\n print(\"missing data point\")\n results += 1\n \n if prev_len_data == len(data): #if the list of data previously collected is the same as the current\n print (\"No data points currently exist\") #all current available data has been exhausted. Move on\n return\n else: \n prev_len_data = len(data) #there are more points available. try again.\n #END if\n #END if\n except:\n print (\"Error reading blinds_status from ThingSpeak\")\n #END try-except\n #END WHILE", "def getRetryCount():\n return int(webapp2.get_request().headers.get('X-Appengine-TaskRetryCount', 0))", "def request(self):\n xml = urllib2.urlopen(self.url, self.data, self.timeout).read()\n if int(xml.count('name')) >= 0:\n self.items['used_download'] = int(xml.count('name'))/2 - 1\n self.items['download'] = self.items['used_download']\n return self", "def get_next_client_index(self, write=True):\r\n if write or len(self._server) == 1:\r\n return 0\r\n\r\n return random.randint(1, len(self._server) - 1)", "async def fetch_one(session, num):\n\n url = f'https://projecteuler.net/overview={num:03}'\n\n async with async_timeout.timeout(10):\n async with session.get(url) as response:\n if response.status == 200:\n if response.headers['Content-Type'] == 'application/pdf':\n data = await response.read()\n filename = f'{num:03}_overview.pdf'\n\n # Working with files synchronously as async would require a thread pool\n with get_path('doc', filename).open('wb') as pdf_file:\n pdf_file.write(data)\n return num\n else:\n print(f\"Got {response.status} while fetching {url}\")", "def rest_api_status(self):\n with self.resource_lock:\n pass", "def robust_request(twitter, resource, params, max_tries=5):\n for i in range(max_tries):\n request = twitter.request(resource, params)\n if request.status_code == 200:\n return request\n else:\n print('Got error %s \\nsleeping for 15 minutes.' % request.text)\n sys.stderr.flush()\n time.sleep(61 * 15)", "def robust_request(twitter, resource, params, max_tries=5):\n for i in range(max_tries):\n request = twitter.request(resource, params)\n if request.status_code == 200:\n return request\n else:\n print('Got error %s \\nsleeping for 15 minutes.' % request.text)\n sys.stderr.flush()\n time.sleep(61 * 15)" ]
[ "0.6093405", "0.5707536", "0.56445384", "0.5552679", "0.55315655", "0.54823583", "0.5422522", "0.5402123", "0.5400669", "0.538704", "0.5379008", "0.5378094", "0.53733474", "0.535927", "0.5357638", "0.53508085", "0.53386307", "0.5338582", "0.5338395", "0.53344756", "0.5322996", "0.5296604", "0.5291609", "0.5290525", "0.52858925", "0.5276881", "0.525561", "0.5244207", "0.52355975", "0.52355975" ]
0.67190135
0
Test Chronos GR Config plugin writes new config when config has changed
def test_chronos_gr_config_changed(self, mock_run_command, mock_safely_write): # Create the plugin plugin = ChronosGRConfigPlugin({}) # Set up the config strings to be tested old_config_string = "Old Chronos GR config" new_config_string = "New Chronos GR config" # Call 'on_config_changed' with file.open mocked out with mock.patch('clearwater_etcd_plugins.chronos.chronos_gr_config_plugin.open', \ mock.mock_open(read_data=old_config_string), create=True) as mock_open: plugin.on_config_changed(new_config_string, None) # Test assertions mock_open.assert_called_once_with(plugin.file(), "r") mock_safely_write.assert_called_once_with(plugin.file(), new_config_string) mock_run_command.assert_called_once_with("/usr/share/clearwater/clearwater-queue-manager/scripts/modify_nodes_in_queue add apply_chronos_gr_config")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_config_update(get_config):\n cfg = get_config(Config, {'test': 'main'})\n update_from = {\"name\": \"new_name\"}\n cfg.update(update_from)\n\n assert cfg.data.get('name') == \"new_name\", \"config was not updated\"", "def test_update_wait():\n wait = '10 seconds'\n config_info = read_config()\n config_info['wait'] = wait\n open(config_file, 'w').close()\n with open(config_file, 'r+') as conf:\n conf.write(json.dumps(config_info))\n config_info = read_config()\n\n assert config_info['wait'] == wait", "def test_write_config(self):\n config = Config()\n config.config = test_config\n config.config_file = \"./config\"\n config.write_config()\n with open(config.config_file) as config_file:\n data = config_file.read()\n self.assertTrue(data)\n os.remove(config.config_file)", "def test_config_overwrite(self):\n inc = REFRESH_COMMANDS.calculate_refresh_commands(\"Rainmeter.exe\", \"test-config\", \"file.inc\", False, True)\n ini = REFRESH_COMMANDS.calculate_refresh_commands(\"Rainmeter.exe\", \"test-config\", \"file.ini\", False, True)\n\n self.assertEquals(inc, ini)", "def test_config_change():\n clean_tables()\n config = set_configuration()\n assert config['age']['value'] == \"72\"\n assert config['retainUnsent']['value'] == \"False\" \n\n config = update_configuration(age=0, retain_unsent=True) \n assert config['age']['value'] == \"0\" \n assert config['retainUnsent']['value'] == \"True\"\n\n clean_tables()", "def test_config(setup_debug, tmp_path):\n os.chdir(tmp_path)\n \n ssh_tunnels = SSHTunnels(users=[\"bbeeson\"])\n c0 = (TEST_DATA / \"config\").read_text()\n # run and add 'queen'\n c1 = ssh_tunnels.update_config(TEST_DATA / \"config\")\n # run and do nothing\n c2 = ssh_tunnels.update_config(TEST_DATA / \"config\")\n assert len(c1) > len(c0)\n assert len(c1) == len(c2)\n \n # c_ref = (TEST_DATA / \"test_ssh_config2\").read_text()\n # should have just added queen\n #assert c2 == c_ref", "def test_update_reg_ex_config(self):\n pass", "def test_update_age():\n age = '2 minutes'\n config_info = read_config()\n config_info['age'] = age\n open(config_file, 'w').close()\n with open(config_file, 'r+') as conf:\n conf.write(json.dumps(config_info))\n config_info = read_config()\n\n assert config_info['age'] == age", "def test_config_reload(self):\n server = self.start_server(\"hello world\", 200)\n try:\n self.setup_dynamic()\n\n cfg_file = \"test.yml\"\n\n self.write_dyn_config(\n cfg_file, self.http_cfg(\"myid\", \"http://localhost:{}\".format(server.server_port)))\n\n self.wait_until(lambda: self.output_has(lines=1))\n\n self.assert_last_status(\"up\")\n\n self.write_dyn_config(\n cfg_file, self.http_cfg(\"myid\", \"http://203.0.113.1:8186\"))\n\n self.wait_until(lambda: self.last_output_line()[\n \"url.full\"] == \"http://203.0.113.1:8186\")\n\n self.assert_last_status(\"down\")\n\n self.proc.check_kill_and_wait()\n finally:\n server.shutdown()", "def test_configuration_changes(self):\n config = serialization.load_file(join(EXAMPLES, 'complete.yml'))[0]\n s = simulation.from_config(config)\n for i in range(5):\n s.run_simulation(dry_run=True)\n nconfig = s.to_dict()\n del nconfig['topology']\n assert config == nconfig", "def test_set_config__twice__with_same_content(self):\n test_datafile = json.dumps(self.config_dict_with_features)\n mock_logger = mock.Mock()\n mock_notification_center = mock.Mock()\n\n with mock.patch('optimizely.config_manager.BaseConfigManager._validate_instantiation_options'), \\\n mock.patch('optimizely.optimizely_config.OptimizelyConfigService.get_config') as mock_opt_service:\n project_config_manager = config_manager.StaticConfigManager(\n datafile=test_datafile, logger=mock_logger, notification_center=mock_notification_center,\n )\n\n project_config_manager._set_config(test_datafile)\n mock_logger.debug.assert_called_with(\n 'Received new datafile and updated config. ' 'Old revision number: None. New revision number: 1.'\n )\n self.assertEqual(1, mock_logger.debug.call_count)\n mock_notification_center.send_notifications.assert_called_once_with('OPTIMIZELY_CONFIG_UPDATE')\n self.assertEqual(1, mock_opt_service.call_count)\n\n mock_logger.reset_mock()\n mock_notification_center.reset_mock()\n mock_opt_service.reset_mock()\n\n # Call set config again and confirm that no new log message denoting config update is there\n project_config_manager._set_config(test_datafile)\n self.assertEqual(0, mock_logger.debug.call_count)\n self.assertEqual(0, mock_notification_center.call_count)\n # Assert that mock_opt_service is not called again.\n self.assertEqual(0, mock_opt_service.call_count)", "def test_custom_configuration_updated(self):\n component_protocol_id = ComponentId(\n ComponentType.PROTOCOL, self.new_protocol_id\n )\n component_contract_id = ComponentId(\n ComponentType.CONTRACT, self.new_contract_id\n )\n component_connection_id = ComponentId(\n ComponentType.CONNECTION, self.new_connection_id\n )\n component_skill_id = ComponentId(ComponentType.SKILL, self.new_skill_id)\n\n assert (\n self.agent_config.component_configurations[component_protocol_id]\n == self.expected_custom_component_configuration\n )\n assert (\n self.agent_config.component_configurations[component_contract_id]\n == self.expected_custom_component_configuration\n )\n assert (\n self.agent_config.component_configurations[component_connection_id]\n == self.expected_custom_component_configuration\n )\n assert (\n self.agent_config.component_configurations[component_skill_id]\n == self.expected_custom_component_configuration\n )", "def testUpdateConfigFile(self):\n # Test update project field.\n gcp_setup_runner.UpdateConfigFile(self.cfg_path, \"project\",\n \"test_project\")\n cfg = config.AcloudConfigManager.LoadConfigFromProtocolBuffer(\n open(self.cfg_path, \"r\"), user_config_pb2.UserConfig)\n self.assertEqual(cfg.project, \"test_project\")\n self.assertEqual(cfg.ssh_private_key_path, \"\")\n # Test add ssh key path in config.\n gcp_setup_runner.UpdateConfigFile(self.cfg_path,\n \"ssh_private_key_path\", \"test_path\")\n cfg = config.AcloudConfigManager.LoadConfigFromProtocolBuffer(\n open(self.cfg_path, \"r\"), user_config_pb2.UserConfig)\n self.assertEqual(cfg.project, \"test_project\")\n self.assertEqual(cfg.ssh_private_key_path, \"test_path\")\n # Test config is not a file\n with mock.patch(\"os.path.isfile\") as chkfile:\n chkfile.return_value = False\n gcp_setup_runner.UpdateConfigFile(self.cfg_path, \"project\",\n \"test_project\")\n cfg = config.AcloudConfigManager.LoadConfigFromProtocolBuffer(\n open(self.cfg_path, \"r\"), user_config_pb2.UserConfig)\n self.assertEqual(cfg.project, \"test_project\")", "def test_config_add(self):\n self.setup_dynamic()\n\n self.wait_until(lambda: self.log_contains(\n \"Starting reload procedure, current runners: 0\"))\n\n server = self.start_server(\"hello world\", 200)\n try:\n self.write_dyn_config(\n \"test.yml\", self.http_cfg(\"myid\", \"http://localhost:{}\".format(server.server_port)))\n\n self.wait_until(lambda: self.log_contains(\n \"Starting reload procedure, current runners: 1\"))\n\n self.wait_until(lambda: self.output_has(lines=1))\n\n self.proc.check_kill_and_wait()\n finally:\n server.shutdown()", "def test_set_config__success(self):\n test_datafile = json.dumps(self.config_dict_with_features)\n mock_logger = mock.Mock()\n mock_notification_center = mock.Mock()\n\n with mock.patch('optimizely.config_manager.BaseConfigManager._validate_instantiation_options'):\n project_config_manager = config_manager.StaticConfigManager(\n datafile=test_datafile, logger=mock_logger, notification_center=mock_notification_center,\n )\n\n project_config_manager._set_config(test_datafile)\n mock_logger.debug.assert_called_with(\n 'Received new datafile and updated config. ' 'Old revision number: None. New revision number: 1.'\n )\n mock_notification_center.send_notifications.assert_called_once_with('OPTIMIZELY_CONFIG_UPDATE')\n\n self.assertIsInstance(\n project_config_manager.optimizely_config,\n optimizely_config.OptimizelyConfig\n )", "def conf_update(self):\n pass", "def test_set_config__twice__with_diff_content(self):\n test_datafile = json.dumps(self.config_dict_with_features)\n mock_logger = mock.Mock()\n mock_notification_center = mock.Mock()\n\n with mock.patch('optimizely.config_manager.BaseConfigManager._validate_instantiation_options'):\n project_config_manager = config_manager.StaticConfigManager(\n datafile=test_datafile, logger=mock_logger, notification_center=mock_notification_center,\n )\n\n mock_logger.debug.assert_called_with(\n 'Received new datafile and updated config. ' 'Old revision number: None. New revision number: 1.'\n )\n self.assertEqual(1, mock_logger.debug.call_count)\n mock_notification_center.send_notifications.assert_called_once_with('OPTIMIZELY_CONFIG_UPDATE')\n self.assertEqual('1', project_config_manager.optimizely_config.revision)\n\n mock_logger.reset_mock()\n mock_notification_center.reset_mock()\n\n # Call set config again\n other_datafile = json.dumps(self.config_dict_with_multiple_experiments)\n project_config_manager._set_config(other_datafile)\n mock_logger.debug.assert_called_with(\n 'Received new datafile and updated config. ' 'Old revision number: 1. New revision number: 42.'\n )\n self.assertEqual(1, mock_logger.debug.call_count)\n mock_notification_center.send_notifications.assert_called_once_with('OPTIMIZELY_CONFIG_UPDATE')\n self.assertEqual('42', project_config_manager.optimizely_config.revision)", "def test_config_overwrites():\n basepath = os.path.abspath(os.path.join(os.path.dirname(__file__), \"..\", '..'))\n temppath = '/tmp/'\n\n conf = core.Config(datapath=temppath)\n\n assert conf.basepath.lower() == basepath.lower()\n assert conf.datapath.lower() == temppath.lower()", "def test_update_configuration(self):\n\n ts_name = 'test-update-1'\n configuration = timeserie_configuration.get_timeserie_configure(\n self.get_local_dynamo_cli(), ts_name)\n self.assertTrue(configuration.default)\n self.assertEquals(configuration.retentions, granularities.RETENTIONS_GRANULARITY)\n self.assertEquals(configuration.timezone, granularities.DEFAULT_TIMEZONE)\n self.assertEquals(configuration.aggregation_method,\n aggregations.DEFAULT_AGGREGATION)\n\n custom_tz = 'America/New_York'\n custom_agg = aggregations.AGGREGATION_LAST\n custom_ret = granularities.RETENTIONS_GRANULARITY\n custom_ret[granularities.SECOND] = 3 * 365 * 12 * 30 * 24 * 60 * 60\n timeserie_configuration.update_timeserie_configuration(\n self.get_local_dynamo_cli(), ts_name, custom_tz, custom_agg, custom_ret)\n\n configuration = timeserie_configuration.get_timeserie_configure(\n self.get_local_dynamo_cli(), ts_name)\n self.assertFalse(configuration.default)\n self.assertEquals(configuration.retentions, custom_ret)\n self.assertEquals(configuration.timezone, custom_tz)\n self.assertEquals(configuration.aggregation_method, custom_agg)", "def test_change_config(self):\n browser = Browser(self.app)\n portalURL = self.portal.absolute_url()\n browser.addHeader('Authorization', 'Basic %s:%s' % (SITE_OWNER_NAME, SITE_OWNER_PASSWORD))\n browser.open(portalURL + '/@@overview-controlpanel')\n browser.getLink('Image WatchDog settings').click()\n browser.getControl('Optimize PNG').selected = True\n browser.getControl('Enabled').selected = True\n browser.getControl('Save').click()\n\n registry = getUtility(IRegistry)\n settings = registry.forInterface(IImageWatchDogSettings)\n self.assertTrue(settings.optimize)\n self.assertTrue(settings.enabled)", "def test_config_write(get_config, default_config):\n cfg = get_config(Config, default_config('sys'))\n\n try:\n cfg.write()\n except Exception as e:\n pytest.fail(f'exception raised as\\n{e}')", "def test_update_lastID():\n config_info = read_config()\n config_info['lastID'] = 0\n open(config_file, 'w').close()\n with open(config_file, 'r+') as conf:\n conf.write(json.dumps(config_info))\n config_info = read_config()\n\n assert config_info['lastID'] == 0", "def test_new_config(self, context, permissions, wizard):\n context.config_exists.return_value = False\n permissions.return_value = True\n wizard.return_value = \"/some/file/path\"\n\n runner = CliRunner()\n result = runner.invoke(cli_node_new_configuration, [\n \"--name\", \"some-name\",\n \"--environment\", \"application\"\n ])\n\n # check that info message is produced\n self.assertEqual(result.output[:6], \"[info]\")\n\n # check OK exit code\n self.assertEqual(result.exit_code, 0)", "def test_get_config(default_config, tmp_path):\n abcconfig.write_config(default_config, configpath=tmp_path)\n config = abcconfig.get_config(configpath=tmp_path)\n assert config == default_config", "def test_global_config_persistence():\n gc = GlobalConfig()\n\n # Track old one\n old_analytics_opt_in = gc.analytics_opt_in\n\n # Toggle it\n gc.analytics_opt_in = not old_analytics_opt_in\n\n # Initialize new config\n gc = GlobalConfig()\n\n # It still should be equal to the old value, as we have not saved\n assert old_analytics_opt_in == gc.analytics_opt_in\n\n # Get raw config\n raw_config = yaml_utils.read_json(os.path.join(APP_DIR, GLOBAL_CONFIG_NAME))\n assert raw_config[\"analytics_opt_in\"] == old_analytics_opt_in", "def update(self):\n self.save_config_file()", "def _on_config_changed(self, _):\n self._configure_pod()", "def test_config_ok_config(self):\n test_data = (\"[gnupg]\\n\"\n \"recipients = [email protected]\\n\"\n \"signer = [email protected]\\n\"\n \"\\n\"\n \"[amazon-s3]\\n\"\n \"access_key = ACCESSKEY\\n\"\n \"secret_access_key = SECRETACCESSKEY\\n\"\n \"\\n\"\n \"[data]\\n\"\n \"\\n\"\n \"bucket = DATABUCKET\\n\"\n \"[metadata]\\n\"\n \"bucket = METADATABUCKET\\n\"\n \"\\n\")\n if os.path.isfile(\"test_config.conf\"):\n os.remove(\"test_config.conf\")\n file(\"test_config.conf\", \"wb\").write(test_data)\n config = Config(\"test_config.conf\")\n self.assertIn(\"gnupg\", config.config.sections())\n self.assertIn(\"amazon-s3\", config.config.sections())\n self.assertEqual(config.config.get(\n \"gnupg\", \"recipients\"), \"[email protected]\")\n self.assertEqual(config.config.get(\n \"gnupg\", \"signer\"), \"[email protected]\")\n self.assertEqual(config.config.get(\n \"amazon-s3\", \"access_key\"), \"ACCESSKEY\")\n self.assertEqual(config.config.get(\n \"amazon-s3\", \"secret_access_key\"), \"SECRETACCESSKEY\")\n self.assertEqual(config.config.get(\n \"data\", \"bucket\"), \"DATABUCKET\")\n self.assertEqual(config.config.get(\n \"metadata\", \"bucket\"), \"METADATABUCKET\")\n os.remove(\"test_config.conf\")", "def test_logging_config(self):\n topdir = os.path.dirname(os.path.dirname(__file__))\n # logging config from default\n os.system('rm %s/logging.conf' % topdir)\n cmd, output = runCmdOutput(['-p', '7788'])\n self.assertEqual(cmd.returncode, os.EX_OK)\n # logging config from file\n os.system('cp %s/logging.conf.sample %s/logging.conf' %\n (topdir, topdir))\n cmd, output = runCmdOutput(['-p', '7788'])\n self.assertEqual(cmd.returncode, os.EX_OK)", "def _change_conf_check(mds_config):\n loop = asyncio.get_event_loop()\n crt = model.async_set_application_config('ceph-fs', mds_config)\n loop.run_until_complete(crt)\n results = _get_conf()\n self.assertEquals(\n results['mds_cache_memory_limit'],\n mds_config['mds-cache-memory-limit'])\n self.assertAlmostEqual(\n float(results['mds_cache_reservation']),\n float(mds_config['mds-cache-reservation']))\n self.assertAlmostEqual(\n float(results['mds_health_cache_threshold']),\n float(mds_config['mds-health-cache-threshold']))" ]
[ "0.72591335", "0.72113305", "0.71514034", "0.7107235", "0.7026826", "0.69808435", "0.69801", "0.6910698", "0.6887558", "0.68464094", "0.6824999", "0.68176496", "0.67969394", "0.67067325", "0.6699056", "0.6610438", "0.6609877", "0.6593223", "0.6569542", "0.6516883", "0.65156054", "0.64069116", "0.6402888", "0.63679236", "0.6356955", "0.63513726", "0.63385147", "0.6323897", "0.63080955", "0.6299405" ]
0.75435835
0
Load the draft results.
def test_load_draft(league): draft = league.draft_results() assert(len(draft) == 144) #mcdavid 1st assert(draft[0]['player_key'] == '396.p.6743') # carter hart 67th assert(draft[66]['player_key'] == '396.p.7156') # zadorov last assert(draft[-1]['player_key'] == '396.p.5995')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _load_enriched_results(self):\n return super().load_results()", "def load(self):\n self.results = pickle_load('results', self.main_dir)", "def run(self):\n results = self.fetch()\n return results", "def load_results_internal(self):\r\n filename = f\"{self.search_internal_path}/results_internal.dill\"\r\n\r\n with open_(filename, \"rb\") as f:\r\n return dill.load(f)", "def stage_draft_rulings(self):\r\n rulings = pd.read_excel(self.draft_ruling_path)\r\n for k in rulings.keys():\r\n rulings[k].fillna(value=\"\", inplace=True)\r\n rulings = rulings.to_dict(\"records\")\r\n id_to_ruling = dict(\r\n map(lambda r: (self.id(r), r), rulings)\r\n )\r\n u.cache_results(id_to_ruling, self.staged_ruling_path)", "def load_raw_results(self):\n if not self.setup.complete():\n raise AttributeError(\"Import setup is not complete\")\n access = DataImport(self.setup)\n if access.data_loaded:\n self.raw_results = access.results\n self.set_start_stop_time()\n return True\n return False", "def _load_results(self):\n\n _LOG.debug(\"stats colnames: %s\", \", \".join(self._stats_colnames))\n _LOG.debug(\"additional colnames: %s\", \", \".join(self._more_colnames))\n\n for res in self.rsts:\n _LOG.debug(\"hover colnames: %s\", \", \".join(self._hov_colnames[res.reportid]))\n\n colnames = []\n for colname in self._hov_colnames[res.reportid] + self._more_colnames:\n if colname in res.colnames_set:\n colnames.append(colname)\n\n csel = Trivial.list_dedup(self._stats_colnames + colnames)\n res.clear_filts()\n res.set_csel(csel)\n res.load_df()\n\n # We'll be dropping columns and adding temporary columns, so we'll affect the original\n # dataframe. This is more effecient than creating copies.\n self._mangle_loaded_res(res)", "def load_rentedout():", "def drafts():\n query = Entry.drafts().order_by(Entry.last_mod_date.desc())\n return object_list('index.html', query)", "def _load(self):\n\n context = {\n \"username\": self.username,\n \"reponame\": self.reponame,\n \"name\": self.name\n }\n LOG.debug(\"Loading %s\" % self.branch_id)\n doc = self._client.getjson(path=\"/users/%(username)s/repos/%(reponame)s\"\n \"/branches/%(name)s\" % context)\n LOG.debug(\"doc loaded: %r\" % doc)\n slice_id = \"%(username)s/%(reponame)s/%(slice_id)s\" % {\n \"username\": self.username,\n \"reponame\": self.reponame,\n \"slice_id\": doc[\"slice_id\"]\n }\n self._slice = self._client.slice(slice_id)\n self._packages = doc[\"packages\"]", "def load_results(self):\n self.find_benchmark_directories()\n for (benchmark, producer), result in self.results.items():\n print('Reading results for ' + benchmark + ' ' + producer)\n if not result.directory:\n print('No results found for ' + benchmark + ' ' + producer)\n else:\n print('Generating report for: ' + result.directory)\n report = Report(result.directory)\n result.reports = report.generate()", "def nflffdraftresults(self, irc, msg, args, opttype):\n \n validtypes = ['QB','TQB','RB','WR','TE','DT','DE','LB','CB','S','D/ST','K','P','HC','ALL']\n \n if opttype and opttype not in validtypes:\n irc.reply(\"Type must be one of: %s\" % validtypes)\n return\n\n url = self._b64decode('aHR0cDovL2dhbWVzLmVzcG4uZ28uY29tL2ZmbC9saXZlZHJhZnRyZXN1bHRz')\n \n if opttype:\n url += '?position=%s' % opttype\n \n\n try:\n req = urllib2.Request(url)\n html = (urllib2.urlopen(req)).read()\n except:\n irc.reply(\"Failed to open: %s\" % url)\n return\n \n soup = BeautifulSoup(html)\n table = soup.find('table', attrs={'class':'tableBody'})\n headers = table.findAll('tr')[2]\n rows = table.findAll('tr')[3:13]\n\n append_list = []\n\n for row in rows:\n rank = row.find('td')\n player = rank.findNext('td')\n avgpick = player.findNext('td').findNext('td')\n append_list.append(rank.getText() + \". \" + ircutils.bold(player.getText()) + \" (\" + avgpick.getText() + \")\")\n\n descstring = string.join([item for item in append_list], \" | \") # put the list together.\n\n if not opttype:\n opttype = 'ALL'\n\n title = \"Top 10 drafted at: %s\" % opttype\n output = \"{0} :: {1}\".format(ircutils.mircColor(title, 'red'), descstring)\n irc.reply(output)", "def trigger_refresh(self):\n self.get_selected()\n self.manage_loading(loading=True)\n self.current_feed.fetch_content(unread_only=self.show_unread_only)\n self.manage_actions()", "def drafts(self):\n if self._drafts is None:\n if self._initialize_drafts():\n self._save_drafts()\n return self._drafts", "def load(self):\n self._load()", "def load_submission_schedule():\n logger.info('Loading submission window schedule data')\n load_submission_window_schedule()", "def results(self):\n if not self._results:\n self.read_results()\n return self._results", "def get_results(self):\n\n super().get_results()", "def _fetch_data(self):\n pass", "def results(self):\n pass", "def load(self, index):\n selected = self.games[index]\n try:\n with open(path.join(self.saved_games, selected)) as f:\n self.game_data['game_data'] = json.load(f)\n self.game_data['file_name'] = selected\n self.game_data['loaded'] = True\n self.game_data['next'] = False\n super().set_state(TRANSITION_OUT)\n logger.info('Load : %s', selected)\n except EnvironmentError as e:\n logger.exception(e)\n\n try:\n self.load_minimap()\n except EnvironmentError as e:\n logger.exception(e)", "def load_data(self):", "def _get_results(self, res):\n self.async_res = res\n self.full_res = res.wait() # pragma: no cover\n self.trained = True # pragma: no cover\n self.mod_id = self.full_res['model_id'] # pragma: no cover\n self.data_id = self.full_res['data_id'] # pragma: no cover\n self.params_dump = self.full_res['params_dump'] # pragma: no cover\n if self.verbose > 0: # pragma: no cover\n print(\"Result {} | {} ready\".format(\n self.mod_id, self.data_id)) # pragma: no cover", "def __call__(self, results):\n results = super().__call__(results)\n if self.with_bbox_3d:\n results = self._load_bboxes_3d(results)\n if results is None:\n return None\n if self.with_bbox_depth:\n results = self._load_bboxes_depth(results)\n if results is None:\n return None\n\n if self.with_corners_2d:\n results = self._load_corners_2d(results)\n if self.with_label_3d:\n results = self._load_labels_3d(results)\n if self.with_attr_label:\n results = self._load_attr_labels(results)\n if self.with_mask_3d:\n results = self._load_masks_3d(results)\n if self.with_seg_3d:\n results = self._load_semantic_seg_3d(results)\n if self.with_tokens:\n results = self._load_tokens(results)\n\n return results", "def results(self):\r\n pass", "def getResults():", "def run_query(self):\n query_dictionary_file_lines = self.get_dictionary_file_lines_for_keywords()\n result_postings_list = merge_lists([result.postings_list for result in query_dictionary_file_lines])\n self.result = result_postings_list\n print(\"Found {} matching documents\".format(len(result_postings_list)))", "def fetch(self):\n # This method also sets self._results_filtered and\n # self._urltable.\n page = self._conn.fetch_page(self._ddg_url.relative())\n\n if logger.isEnabledFor(logging.DEBUG):\n import tempfile\n fd, tmpfile = tempfile.mkstemp(prefix='ddgr-response-')\n os.close(fd)\n with open(tmpfile, 'w', encoding='utf-8') as fp:\n fp.write(page)\n logger.debug(\"Response body written to '%s'.\", tmpfile)\n\n parser = DdgParser(news=self._ddg_url.news)\n parser.feed(page)\n\n self.results = parser.results\n self._results_filtered = parser.filtered\n self._urltable = {}\n for r in self.results:\n self._urltable.update(r.urltable())", "def load(self):\n self.tickets = Ticket.objects.select_related(\n 'input_module'\n ).filter(**self.ticket_data_query()).filter(\n Q(created__gte=self.date_start, created__lte=self.date_end) |\n Q(closed_date__gte=self.date_start, closed_date__lte=self.date_end) |\n Q(assigned_date__gte=self.date_start, assigned_date__lte=self.date_end)\n )\n\n self.closed_tickets = self.tickets.filter(\n closed_date__gte=self.date_start,\n closed_date__lte=self.date_end\n )\n\n self.assigned_tickets = self.tickets.filter(\n assigned_date__gte=self.date_start,\n assigned_date__lte=self.date_end\n )\n\n self.closed = self.closed_tickets.count()\n self.closed_by_users: int = 0\n\n # in minutes (time between open and assignment)\n avg_pre_processing = [0]\n\n # in minutes (time between open and close)\n avg_full_processing = [0]\n\n # how many messages before closing the tickets\n avg_msg_to_close = [0]\n\n # how many minutes between taken by operators and closed\n avg_time_created_taken = [0]\n\n # Time between creating a ticket and operator first message.\n first_time_op_answer = [0]\n\n # in a single loop I have to process\n # whatever, otherwise it will takes too long. Efficiency may be huge, we know.\n tmsgs = TicketReply.objects.filter(\n ticket__pk__in = self.tickets.values_list(\"pk\", flat=True)\n ).values_list(\"ticket__pk\", \"created\", \"owner\")\n\n operators_pks = self.get_operators_pks()\n\n content_type = ContentType.objects.get_for_model(Ticket)\n\n for i in self.tickets:\n ticket_time = timezone.localtime(i.created).strftime(\"%d-%m-%Y %H\")\n ticket_day, ticket_hour = ticket_time.split(\" \")\n ticket_day_eu = timezone.localtime(i.created).strftime(\"%Y-%m-%d\")\n if not self.ticket_per_day_hour.get(ticket_day):\n # {'01-01-2022': {'total': int, 'hours': {0: int, ... 23: int}}}\n self.ticket_per_day_hour[ticket_day] = {'total': 0, 'hours': {}}\n\n self.ticket_per_day_hour[ticket_day]['total'] += 1\n if not self.ticket_per_day_hour[ticket_day][\"hours\"].get(ticket_hour):\n self.ticket_per_day_hour[ticket_day][\"hours\"][ticket_hour] = 0\n self.ticket_per_day_hour[ticket_day][\"hours\"][ticket_hour] += 1\n\n # if not self.ticket_per_day.get(ticket_day_eu):\n # self.ticket_per_day[ticket_day_eu] = 0\n # self.ticket_per_day[ticket_day_eu] += 1\n\n # put the ticket in a configured time slot\n if i.created >= self.date_start and i.created <= self.date_end:\n for slot, hour_range in STATS_TIME_SLOTS.items():\n if int(ticket_hour) in hour_range:\n self.ticket_per_weekday[\n timezone.localtime(i.created).strftime(calendar.day_name.format)\n ][slot - 1] += 1\n break\n\n if i.is_notification:\n self.notifications += 1\n else:\n _msgs = tmsgs.filter(ticket=i)\n op_msgs = _msgs.filter(\n owner__pk__in = operators_pks\n ).values_list(\"created\", flat=True)\n if _msgs and op_msgs:\n first_time_op_answer.append(\n (op_msgs[0] - i.created).seconds\n )\n\n # Cosa si vuole mostrare?\n # Quanti ticket si trovavano in stato \"assegnato\" in quel giorno?\n # if not i.has_been_taken():\n # O quanti ticket sono stati presi in carico in quel giorno?\n # if not i.assigned_date and not i.is_closed:\n if i.created >= self.date_start and i.created <= self.date_end:\n self.open += 1\n # if not self.open_day_serie.get(ticket_day):\n # self.open_day_serie[ticket_day] = 0\n # self.open_day_serie[ticket_day] += 1\n self.open_day_serie[timezone.localtime(i.created).strftime(\"%d-%m-%Y\")] += 1\n\n # elif not i.is_closed and i.assigned_date >= self.date_start and i.assigned_date <= self.date_end:\n if i.assigned_date and i.assigned_date >= self.date_start and i.assigned_date <= self.date_end:\n self.assigned += 1\n avg_pre_processing.append(\n (i.assigned_date - i.created).seconds\n )\n # if not self.assigned_day_serie.get(ticket_day):\n # self.assigned_day_serie[ticket_day] = 0\n # self.assigned_day_serie[ticket_day] += 1\n self.assigned_day_serie[timezone.localtime(i.assigned_date).strftime(\"%d-%m-%Y\")] += 1\n\n if i.closed_date and not i.is_closed:\n # if not self.reopened_day_serie.get(ticket_day):\n # self.reopened_day_serie[ticket_day] = 0\n # self.reopened_day_serie[ticket_day] += 1\n\n # get reopen time from first log action after closing\n reopen_log_entry = Log.objects.filter(content_type_id=content_type.pk,\n object_id=i.pk,\n action_time__gt=i.closed_date,\n action_time__lte=self.date_end,\n action_time__gte=self.date_start).first()\n if reopen_log_entry:\n self.reopened += 1\n self.reopened_day_serie[timezone.localtime(reopen_log_entry.action_time).strftime(\"%d-%m-%Y\")] += 1\n\n # elif i.closed_date and i in self.closed_tickets:\n if i.closed_date and i in self.closed_tickets:\n # is closed\n # if not self.closed_day_serie.get(ticket_day):\n # self.closed_day_serie[ticket_day] = 0\n # self.closed_day_serie[ticket_day] += 1\n self.closed_day_serie[timezone.localtime(i.closed_date).strftime(\"%d-%m-%Y\")] += 1\n if i.closed_by:\n # otherwise the user closed by himself\n _op_name = i.closed_by.__str__()\n if not self.closed_by_ops.get(_op_name, None):\n self.closed_by_ops[_op_name] = 0\n self.closed_by_ops[_op_name] += 1\n self.closed_by_ops_count += 1\n else:\n self.closed_by_users += 1\n\n avg_full_processing.append(\n (i.closed_date - i.created).seconds\n )\n if i.assigned_date:\n avg_time_created_taken.append(\n (i.closed_date - i.assigned_date).seconds\n )\n\n # get how many messages has been taken to close this ticket\n # excluding the closing message\n _mcount = tmsgs.filter(ticket=i).count()\n avg_msg_to_close.append(_mcount)\n\n _user_name = i.created_by.__str__()\n if not self.open_by_user.get(_user_name, None):\n self.open_by_user[_user_name] = 0\n self.open_by_user[_user_name] += 1\n\n # aggregation and details in hours\n self.avg_pre_processing_seconds = statistics.mean(avg_pre_processing)\n self.avg_pre_processing = int(self.avg_pre_processing_seconds / 60)\n\n self.avg_full_processing = int(statistics.mean(avg_full_processing) / 60)\n self.avg_msg_to_close = statistics.mean(avg_msg_to_close)\n self.first_time_op_answer_seconds = statistics.mean(first_time_op_answer)\n self.avg_first_time_op_answer = int(self.first_time_op_answer_seconds / 60)\n self.avg_time_created_taken = int(statistics.mean(avg_time_created_taken) / 60)\n\n # sort descending\n self.open_by_user = {k: v for k, v in sorted(self.open_by_user.items(), key=lambda item: item[1])}\n self.closed_by_ops = {k: v for k, v in sorted(self.closed_by_ops.items(), key=lambda item: item[1], reverse=True)}", "def _setData(self):\n formatter = date.getLocaleFormatter(self.request, \"date\", \"long\")\n data_list = []\n results = self.query.all()\n \n # if no current parliament, no data\n try:\n parliament_id = model_utils.get_current_parliament().parliament_id\n except: \n return data_list\n #\n government_id = self.__parent__.government_id\n for result in results:\n data = {}\n data[\"qid\"] = \"g_%s\" % (result.group_id)\n data[\"subject\"] = result.short_name\n data[\"title\"] = \"%s (%s)\" % (result.short_name, result.type)\n data[\"result_item_class\"] = \"workflow-state-%s\" % (result.status)\n _url = \"/archive/browse/parliaments/obj-%s\" % (parliament_id)\n if type(result) == domain.Parliament:\n data[\"url\"] = url.set_url_context(_url)\n continue\n elif type(result) == domain.Committee:\n #data[\"url\"] = url + \"/committees/obj-\" + str(result.group_id) \n data[\"url\"] = url.set_url_context(\"/groups/%s/%s\" % (\n result.parent_group.group_principal_id,\n result.group_principal_id))\n elif type(result) == domain.PoliticalGroup:\n data[\"url\"] = url.set_url_context(\n \"%s/politicalgroups/obj-%s\" % (_url, result.group_id))\n elif type(result) == domain.Ministry:\n data[\"url\"] = url.set_url_context(\n \"%s/governments/obj-%s/ministries/obj-%s\" % (\n _url, government_id, result.group_id))\n else:\n data[\"url\"] = \"#\"\n data[\"status\"] = misc.get_wf_state(result)\n data[\"status_date\"] = formatter.format(result.status_date)\n data[\"owner\"] = \"\"\n data[\"type\"] = _(result.type)\n data[\"to\"] = \"\"\n data_list.append(data)\n self._data = data_list" ]
[ "0.6382324", "0.6246874", "0.5810889", "0.5807111", "0.57492447", "0.56064266", "0.5601259", "0.55607885", "0.54455495", "0.5442246", "0.5427821", "0.5390353", "0.53654075", "0.5347294", "0.53464603", "0.53339255", "0.53226596", "0.53156066", "0.5312531", "0.5308264", "0.52943623", "0.52930695", "0.527965", "0.5263135", "0.52562904", "0.5250532", "0.52485144", "0.5240229", "0.5226236", "0.5226052" ]
0.6256367
1
Return team roster at given date.
def get_team_roster(league): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def roster(\n self, ctx: commands.Context, season: Optional[YearFinder] = None, *, search: HockeyTeams\n ) -> None:\n season_str = None\n season_url = \"\"\n if season:\n if season.group(3):\n if (int(season.group(3)) - int(season.group(1))) > 1:\n return await ctx.send(_(\"Dates must be only 1 year apart.\"))\n if (int(season.group(3)) - int(season.group(1))) <= 0:\n return await ctx.send(_(\"Dates must be only 1 year apart.\"))\n if int(season.group(1)) > datetime.now().year:\n return await ctx.send(_(\"Please select a year prior to now.\"))\n season_str = f\"{season.group(1)}{season.group(3)}\"\n else:\n if int(season.group(1)) > datetime.now().year:\n return await ctx.send(_(\"Please select a year prior to now.\"))\n year = int(season.group(1)) + 1\n season_str = f\"{season.group(1)}{year}\"\n if season:\n season_url = f\"?season={season_str}\"\n if search is None:\n return await ctx.send(_(\"You must provide a valid current team.\"))\n rosters = {}\n players = []\n teams = [team for team in TEAMS if search.lower() in team.lower()]\n if teams != []:\n for team in teams:\n url = f\"{BASE_URL}/api/v1/teams/{TEAMS[team]['id']}/roster{season_url}\"\n async with self.session.get(url) as resp:\n data = await resp.json()\n if \"roster\" in data:\n for player in data[\"roster\"]:\n players.append(player[\"person\"][\"id\"])\n else:\n return await ctx.send(_(\"No team name was provided.\"))\n\n if players:\n await BaseMenu(\n source=PlayerPages(pages=players, season=season_str),\n cog=self,\n delete_message_after=False,\n clear_reactions_after=True,\n timeout=60,\n ).start(ctx=ctx)\n else:\n if season:\n year = _(\" in the {season} season\").format(\n season=f\"{season.group(1)}-{season.group(3)}\"\n )\n else:\n year = \"\"\n await ctx.send(\n _(\"I could not find a roster for the {team}{year}.\").format(team=team, year=year)\n )", "def __call__(self, date):\n for game in self._games:\n if game.datetime.year == date.year and \\\n game.datetime.month == date.month and \\\n game.datetime.day == date.day:\n return game\n raise ValueError('No games found for requested date')", "def get_games_by_date(self, date):\n return self._db.Games.find({'date' : date})", "def date_leaderboard(cls, date, limit=False, cutoff=False):\r\n\r\n\t\tdate = DATES.to_ID(date)\r\n\r\n\t\tdate_rankings = []\r\n\r\n\t\tPLAYERS = cls.RESULTDAILY.keys()\r\n\r\n\t\tfor player in PLAYERS:\r\n\t\t\tscore, RM, RD, RP = cls.player_info(player, date, convert=True)\r\n\t\t\t\r\n\t\t\tdate_rankings.append([player, score, RM, RD, RP])\r\n\t\t\r\n\t\tdate_rankings = sorted(date_rankings, key=lambda m: m[1], reverse=True)\r\n\r\n\t\tif cutoff and min([p[3] for p in date_rankings]) < cls.RD_CUTOFF:\r\n\t\t\tdate_rankings = [p for p in date_rankings if p[3] < cls.RD_CUTOFF]\r\n\t\t\r\n\t\tif limit:\r\n\t\t\tdate_rankings = date_rankings[:limit]\r\n\t\t\r\n\t\treturn date_rankings", "def get_team_game_preview(self, team, date):\n abbr = convert_name(team, how='abbr')\n return self._db.Games.find({'date' : date,\n '$or' : [{'home' : abbr},\n {'away' : abbr}]})", "def get_next_game(today_game_date: datetime, team_id: int) -> dict:\n\n game_date = today_game_date.strftime(\"%Y-%m-%d\")\n tomorrow = (today_game_date + timedelta(days=1)).strftime(\"%Y-%m-%d\")\n end_date = (today_game_date + timedelta(days=365)).strftime(\"%Y-%m-%d\")\n\n logging.info(\"Checking the schedule API endpoint for the next game.\")\n url = f\"schedule?teamId={team_id}&startDate={game_date}&endDate={end_date}\"\n\n response = api.nhl_api(url)\n if not response:\n return None\n\n next_game_json = response.json()\n next_game = next_game_json.get(\"dates\")[1].get(\"games\")[0]\n\n return next_game", "def for_date(self, date):\n return self.get(start_date__lte=date, end_date__gte=date)", "def starting_date(cls, player):\r\n\r\n\t\treturn cls.RESULTDAILY[player][0]", "async def reschedule(self, ctx, match_id: str, *, date: str):\n tournament = self.get_tournament(ctx.guild.id)\n try:\n new_date = tournament.parse_date(date, prefer_dates_from=\"future\")\n except ValueError:\n raise commands.UserInputError()\n if not new_date:\n raise commands.UserInputError()\n\n for bracket in tournament.brackets:\n if await self.reschedule_for_bracket(\n ctx,\n tournament,\n bracket,\n match_id,\n new_date,\n ):\n return\n raise tosurnament.InvalidMatchId()", "def get_schedule():\n startdate = '02/28/2020'\n enddate = '04/01/2020'\n return statsapi.schedule(start_date=startdate, end_date=enddate, team=134)", "def getMatchDate(self) -> str:\n return self.__getDataField(\"date\")", "def player_rank(cls, player, date):\r\n\r\n\t\ttry:\r\n\t\t\tP_RANKS = cls.RANKS[player]\r\n\t\texcept KeyError:\t# If player does not exist\r\n\t\t\treturn False\r\n\r\n\t\tinit_date = P_RANKS[0]\r\n\r\n\t\t# If player hadn't played yet by the date specified\r\n\t\tif date < init_date:\r\n\t\t\treturn False\r\n\t\t\r\n\t\tdate_ind = DATES.day_diff(date, init_date)\r\n\r\n\t\trank = P_RANKS[date_ind + 1]\r\n\t\t\r\n\t\treturn rank", "def get_games(date):\n scoreboard = nba_py.Scoreboard(month=date.month,\n day=date.day,\n year=date.year)\n line_score = scoreboard.line_score()\n game_header = scoreboard.game_header()\n\n games = []\n current_game = {}\n game_sequence = 0\n game_sequence_counter = 0\n\n # Get HOME TEAM and AWAY TEAM data for each boxscore game in line_score.\n for i, value in enumerate(line_score):\n if (value[\"GAME_SEQUENCE\"] != game_sequence):\n game_sequence += 1\n\n current_game[\"GAME_ID\"] = value[\"GAME_ID\"]\n home_team_id = game_header[game_sequence - 1][\"HOME_TEAM_ID\"]\n\n if (home_team_id == value[\"TEAM_ID\"]):\n current_game[\"HOME_TEAM\"] = value[\"TEAM_ABBREVIATION\"]\n current_game[\"HOME_TEAM_WINS_LOSSES\"] = value[\"TEAM_WINS_LOSSES\"]\n current_game[\"HOME_TEAM_PTS\"] = value[\"PTS\"]\n current_game[\"HOME_TEAM_ID\"] = value[\"TEAM_ID\"]\n if (current_game[\"HOME_TEAM\"] in TEAM_ID_DATA):\n current_game[\"HOME_TEAM_IMG\"] = TEAM_ID_DATA[current_game[\"HOME_TEAM\"]][\"img\"]\n else:\n current_game[\"AWAY_TEAM\"] = value[\"TEAM_ABBREVIATION\"]\n current_game[\"AWAY_TEAM_WINS_LOSSES\"] = value[\"TEAM_WINS_LOSSES\"]\n current_game[\"AWAY_TEAM_PTS\"] = value[\"PTS\"]\n current_game[\"AWAY_TEAM_ID\"] = value[\"TEAM_ID\"]\n if (current_game[\"AWAY_TEAM\"] in TEAM_ID_DATA):\n current_game[\"AWAY_TEAM_IMG\"] = TEAM_ID_DATA[current_game[\"AWAY_TEAM\"]][\"img\"]\n\n if (value[\"TEAM_ABBREVIATION\"] in TEAMS):\n if (home_team_id == value[\"TEAM_ID\"]):\n current_game[\"HOME_TEAM_FULL_NAME\"] = TEAMS[value[\"TEAM_ABBREVIATION\"]][\"city\"] + \\\n \" \" + TEAMS[value[\"TEAM_ABBREVIATION\"]][\"name\"]\n else:\n current_game[\"AWAY_TEAM_FULL_NAME\"] = TEAMS[value[\"TEAM_ABBREVIATION\"]][\"city\"] + \\\n \" \" + TEAMS[value[\"TEAM_ABBREVIATION\"]][\"name\"]\n \n game_sequence = value[\"GAME_SEQUENCE\"]\n game_sequence_counter += 1\n elif game_sequence_counter == 1:\n if (\"AWAY_TEAM\" in current_game):\n current_game[\"HOME_TEAM\"] = value[\"TEAM_ABBREVIATION\"]\n current_game[\"HOME_TEAM_WINS_LOSSES\"] = value[\"TEAM_WINS_LOSSES\"]\n current_game[\"HOME_TEAM_PTS\"] = value[\"PTS\"]\n current_game[\"HOME_TEAM_ID\"] = value[\"TEAM_ID\"]\n if (current_game[\"HOME_TEAM\"] in TEAM_ID_DATA):\n current_game[\"HOME_TEAM_IMG\"] = TEAM_ID_DATA[current_game[\"HOME_TEAM\"]][\"img\"]\n else:\n current_game[\"AWAY_TEAM\"] = value[\"TEAM_ABBREVIATION\"]\n current_game[\"AWAY_TEAM_WINS_LOSSES\"] = value[\"TEAM_WINS_LOSSES\"]\n current_game[\"AWAY_TEAM_PTS\"] = value[\"PTS\"]\n current_game[\"AWAY_TEAM_ID\"] = value[\"TEAM_ID\"]\n if (current_game[\"AWAY_TEAM\"] in TEAM_ID_DATA):\n current_game[\"AWAY_TEAM_IMG\"] = TEAM_ID_DATA[current_game[\"AWAY_TEAM\"]][\"img\"]\n\n if (value[\"TEAM_ABBREVIATION\"] in TEAMS):\n if (\"AWAY_TEAM\" in current_game):\n current_game[\"HOME_TEAM_FULL_NAME\"] = TEAMS[value[\"TEAM_ABBREVIATION\"]][\"city\"] + \\\n \" \" + TEAMS[value[\"TEAM_ABBREVIATION\"]][\"name\"]\n else:\n current_game[\"AWAY_TEAM_FULL_NAME\"] = TEAMS[value[\"TEAM_ABBREVIATION\"]][\"city\"] + \\\n \" \" + TEAMS[value[\"TEAM_ABBREVIATION\"]][\"name\"]\n\n current_game[\"GAME_STATUS_TEXT\"] = game_header[game_sequence - 1][\"GAME_STATUS_TEXT\"]\n if not game_header[game_sequence - 1][\"NATL_TV_BROADCASTER_ABBREVIATION\"]:\n current_game[\"BROADCASTER\"] = \"\"\n else:\n current_game[\"BROADCASTER\"] = game_header[game_sequence - 1][\"NATL_TV_BROADCASTER_ABBREVIATION\"]\n\n games.append(current_game)\n\n current_game = {}\n\n game_sequence = value[\"GAME_SEQUENCE\"]\n game_sequence_counter -= 1\n\n east_standings = scoreboard.east_conf_standings_by_day()\n west_standings = scoreboard.west_conf_standings_by_day()\n\n return (games, east_standings, west_standings)", "def get_race_date(today, time_range):\r\n max_date = today.replace(year = today.year + time_range)\r\n return get_date(\"When is your marathon?\", \"Race Date\", today, max_date)", "def get_team_roster_and_depth_charts(self, team_name):\n result = self._method_call(\"Players/{team}\", team=team_name)\n return result", "def return_football_season(date=datetime.datetime.today()):\n date_aux = subtract_months(date, 6)\n beginning_year = str(date_aux.year)\n ending_year = date_aux.year + 1\n ending_year = str(ending_year)[-2:]\n season = ''.join([beginning_year, '-', ending_year])\n return season", "def fetch_roster_data(\n round_number: int = None, verbose: int = 1\n) -> List[Dict[str, Any]]:\n if verbose == 1:\n print(f\"Fetching roster data for round {round_number}...\")\n\n data = fetch_afl_data(\"/rosters\", params={\"round_number\": round_number})\n\n if verbose == 1:\n if not any(data):\n print(\n \"No roster data was received. It's likely that the team roster page \"\n \"hasn't been updated for the upcoming round.\"\n )\n else:\n print(\"Roster data received!\")\n\n return data", "def getTeamStat(self, year = 2014):\r\n \r\n year_next = (year % 100) + 1\r\n season = str(year) + '-' + str(year_next)\r\n \r\n stat_url = 'http://stats.nba.com/stats/leaguedashteamstats?Conference=&'\\\r\n 'DateFrom=&DateTo=&Division=&GameScope=&GameSegment=&'\\\r\n 'LastNGames=0&LeagueID=00&Location=&MeasureType=Base&'\\\r\n 'Month=0&OpponentTeamID=0&Outcome=&PORound=0&PaceAdjust=N&'\\\r\n 'PerMode=PerGame&Period=0&PlayerExperience=&PlayerPosition=&'\\\r\n 'PlusMinus=N&Rank=N&Season=' + season + '&SeasonSegment=&'\\\r\n 'SeasonType=Regular+Season&ShotClockRange=&StarterBench=&'\\\r\n 'TeamID=0&VsConference=&VsDivision='\r\n \r\n response = requests.get(stat_url)\r\n data = json.loads(response.text)\r\n \r\n headers = data['resultSets'][0]['headers']\r\n stat_data = data['resultSets'][0]['rowSet']\r\n df = pd.DataFrame(stat_data,columns=headers) \r\n \r\n team_df = df[[\"TEAM_ID\",\"TEAM_NAME\",\"GP\",\"W\",\"L\",\"W_PCT\",\"MIN\",\"FGM\",\r\n \"FGA\",\"FG_PCT\",\"FG3M\",\"FG3A\",\"FG3_PCT\",\"FTM\",\"FTA\",\"FT_PCT\",\r\n \"OREB\",\"DREB\",\"REB\",\"AST\",\"TOV\",\"STL\",\"BLK\",\"BLKA\",\"PF\",\r\n \"PFD\",\"PTS\",\"PLUS_MINUS\"]]\r\n \r\n return team_df", "def get_rosters(year, output_file=True):\n year = str(year)\n\n # Get this year's playoff teams from config file\n current_year_playoff_teams = playoff_teams[year]\n\n # Get team attributes from config file\n playoff_team_attrs = {team: attrs for team, attrs in teams.items() if team in current_year_playoff_teams}\n\n # Positions of interest\n eligible_pos = ['WR', 'TE', 'QB', 'RB', 'FB', 'K', 'RB/WR']\n\n # For each playoff team get roster data from Pro Football Reference\n rosters = []\n for key, val in playoff_team_attrs.items():\n url = f'https://www.pro-football-reference.com/teams/{key}/{year}_roster.htm'\n print(f\"Fetching: {url}\")\n page = requests.get(url)\n df = pd.read_html(page.text.replace('<!--',''))[1] # Replace the value that interrupts HTML parsing\n df = df[df['Pos'].isin(eligible_pos)][['Player', 'Pos', 'G']]\n df['Team'] = val['teamFull'] # Add full team name ex: Buffalo Bills\n df['TeamMascot'] = df['Team'].apply(lambda x: x.split(' ')[-1]) # Mascot only\n df['TeamShort'] = val['teamShort'] # Abbreviated team name\n df['TeamKey'] = key.upper() # Pro football reference abbrev, these are weird\n rosters.append(df)\n \n df_combined = pd.concat(rosters)\n df_combined.columns = [c.lower() for c in df_combined.columns]\n\n if output_file:\n save_dir = prompt_save_location()\n file_path = os.path.join(save_dir, f\"rosters{year}.csv\")\n df_combined.to_csv(file_path, index = False)\n print(\"Output File Successfully Created!\")\n print(f\"Destination: {file_path}\")\n\n return df_combined", "def read_by_date(self, date=None):\n\n if not date:\n date = datetime.today()\n elif isinstance(date, str):\n date = datetime.strptime(date, \"%Y-%m-%d\")\n else:\n pass # Assume datetime object\n\n datestring = date.strftime(\"%Y-%m-%d\")\n filepath = Path(datadir(), 'ClubElo_{}.csv'.format(datestring))\n url = 'http://api.clubelo.com/{}'.format(datestring)\n\n if not filepath.exists():\n self._download_and_save(url, filepath)\n\n df = (pd.read_csv(str(filepath),\n parse_dates=['From', 'To'],\n infer_datetime_format=True,\n dayfirst=False\n )\n .rename(columns={'Club': 'team'})\n .replace({'team': TEAMNAME_REPLACEMENTS})\n .replace('None', np.nan)\n .assign(Rank=lambda x: x['Rank'].astype('float'))\n .assign(league=lambda x: x['Country'] + '_' + x['Level'].astype(str))\n .pipe(self._translate_league)\n .reset_index(drop=True)\n .set_index('team')\n )\n return df", "def latest_season_before(date):\n\tif date.month < 9:\n\t\treturn date.year - 1\n\treturn date.year", "def getStream(self, date):\n\n return getMerraStream(date)", "def get_roster_players_via_api(self, team, season=None):\n # setting up empty list of players\n players = list()\n\n if season is None:\n season = str(retrieve_season())\n\n # creating stats api url with optional season parameter\n url = \"\".join((self.API_TEAM_SITE_PREFIX, str(team.team_id)))\n url_params = {\n 'expand': 'team.roster',\n 'season': \"%s%d\" % (season, int(season) + 1)\n }\n # retrieving data\n r = requests.get(url, params=url_params)\n team_data = r.json()\n\n if 'teams' not in team_data:\n logging.warn(\n \"+ %s not part of the league in %s/%d\" % (\n team, season, int(season) + 1))\n return players\n\n team_data = team_data['teams'][0]\n\n if 'roster' not in team_data:\n logging.warn(\n \"+ No roster found for %s/%d %s\" % (\n season, int(season) + 1, team))\n return players\n\n roster = team_data['roster']['roster']\n\n for plr_src in roster:\n # retrieving player if of current player in roster\n plr_id = plr_src['person']['id']\n # searching and optionally creating player with found player id\n plr = self.search_player_by_id(plr_id)\n players.append(plr)\n\n return players", "def _get_schedule_html_for_date(squadron_url: str, date_state: str) -> str:\n state = date_state.copy()# don't mutate the original\n headers = {'Content-Type': 'application/x-www-form-urlencoded'}\n state['btnViewSched'] = 'View Schedule'\n html_string = _get_page_html(squadron_url, state, headers=headers)\n return html_string", "def get_games(season, date):\n url = \"http://live.nhl.com/GameData/SeasonSchedule-\" + season + \".json\"\n response = urllib.urlopen(url)\n data = json.loads(response.read())\n games = []\n for game in data:\n if game[\"est\"][:8] == date:\n games.append(game)\n return games", "def dawn(self, date=None, local=True):\n\n if self.astral is None:\n self.astral = Astral()\n\n if date is None:\n date = datetime.date.today()\n\n dawn = self.astral.dawn_utc(date, self.latitude, self.longitude)\n\n if local:\n return dawn.astimezone(self.tz) \n else:\n return dawn", "def get_roster_players_with_data(self, team):\n # TODO: find usage for this function\n # getting html document with team's roster\n doc = self.get_html_document(team, 'roster')\n\n # retrieving player page urls, and player first and last names\n # from roster page\n urls = doc.xpath(\"//td[@class='name-col']/a[@href]/@href\")\n first_names = doc.xpath(\n \"//td[@class='name-col']/a/div/span[@class='name-col__item \" +\n \"name-col__firstName']/text()\")\n # using filter to get rid of empty strings after stripping string\n # elements\n # using replace to get rid of asterisk indicating players on injury\n # reserve\n last_names = filter(\n None, [\n x.replace(\"*\", \"\").strip() if x else None for x in doc.xpath(\n \"//td[@class='name-col']/a/div/span[@class='name-\" +\n \"col__item name-col__lastName']/text()\")])\n\n # retrieving further player data from roster page\n # player jersey numbers\n numbers = doc.xpath(\n \"//td[@class='number-col fixed-width-font']/text()\")\n # player positions\n positions = [x[:1] for x in doc.xpath(\n \"//td[@class='position-col fixed-width-font']/text()\")]\n # shooting hands, unfortunately goaltender's glove hands aren't\n # listed any longer\n hands = doc.xpath(\"//td[@class='shoots-col fixed-width-font']/text()\")\n # player heights (in ft. + in.)\n heights = doc.xpath(\n \"//td[@class='height-col fixed-width-font']/span[2]/text()\")\n # player weights (in lbs.)\n weights = [int(x) if x.isdigit() else 0 for x in doc.xpath(\n \"//td[@class='weight-col fixed-width-font']/text()\")]\n # player dates of birth\n dobs = doc.xpath(\"//td[@class='birthdate-col']/span[2]/text()\")\n hometowns = doc.xpath(\"//td[@class='hometown-col']/text()\")\n\n players = list()\n\n for (\n first_name, last_name, url, _, position, _, _, _, _, _\n ) in zip(\n first_names, last_names, urls, numbers, positions,\n hands, weights, heights, dobs, hometowns\n ):\n # retrieving nhl id from player page url\n plr_id = int(url.split(\"-\")[-1])\n\n # trying to find player in database\n plr = Player.find_by_id(plr_id)\n # creating player if not already in database\n if plr is None:\n plr = self.create_player(\n plr_id, last_name, first_name, position)\n print(\"%s created...\" % plr)\n\n players.append(plr)\n\n return players", "def getTeam(self):\n return self.team", "def get_upcoming(self):\n try:\n race = next(\n (\n race\n for race in self.race_weekends\n if race[\"sessions\"][\"race\"] >= self.date\n ),\n self.race_weekends[-1],\n )\n return race\n except Exception:\n logger.exception(f\"Error getting upcoming race for year {self.date.year}\")", "def get_season(\n current_date: date, hemisphere: str, season_tracking_type: str\n) -> str | None:\n\n if hemisphere == \"equator\":\n return None\n\n if season_tracking_type == TYPE_ASTRONOMICAL:\n spring_start = ephem.next_equinox(str(current_date.year)).datetime()\n summer_start = ephem.next_solstice(str(current_date.year)).datetime()\n autumn_start = ephem.next_equinox(spring_start).datetime()\n winter_start = ephem.next_solstice(summer_start).datetime()\n else:\n spring_start = datetime(2017, 3, 1).replace(year=current_date.year)\n summer_start = spring_start.replace(month=6)\n autumn_start = spring_start.replace(month=9)\n winter_start = spring_start.replace(month=12)\n\n if spring_start <= current_date < summer_start:\n season = STATE_SPRING\n elif summer_start <= current_date < autumn_start:\n season = STATE_SUMMER\n elif autumn_start <= current_date < winter_start:\n season = STATE_AUTUMN\n elif winter_start <= current_date or spring_start > current_date:\n season = STATE_WINTER\n\n # If user is located in the southern hemisphere swap the season\n if hemisphere == NORTHERN:\n return season\n return HEMISPHERE_SEASON_SWAP.get(season)" ]
[ "0.6794363", "0.63804543", "0.5813007", "0.5741318", "0.57277983", "0.5647605", "0.5467505", "0.5346135", "0.5336544", "0.53275824", "0.53005236", "0.5272862", "0.5261342", "0.521931", "0.5192084", "0.514297", "0.51121926", "0.50987285", "0.5097064", "0.50702137", "0.50670063", "0.5008724", "0.49876702", "0.4974747", "0.49680772", "0.49635786", "0.49604803", "0.49472487", "0.49153125", "0.49094445" ]
0.65519655
1
Calibrate the chemical shifts of each spin in the peak list.
def calibrate_peaklist(peaklist, calibration, attr='shift'): if len(calibration) != peaklist.dims: raise ValueError('incorrect calibration list length') for peak in peaklist: for spin, cal in zip(peak, calibration): shift = getattr(spin, attr) shift -= cal setattr(spin, attr, shift) return peaklist
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def updatePeakShifts(peak):\n\n for peakDim in peak.peakDims:\n for contrib in peakDim.peakDimContribs:\n if contrib.peakDimComponent:\n continue\n updateResonShift(contrib.resonance,peakDim)", "def apply_calibration(self, cal):\n\n n_edges = len(self.channels) + 1\n channel_edges = np.linspace(-0.5, self.channels[-1] + 0.5, num=n_edges)\n self.bin_edges_kev = cal.ch2kev(channel_edges)", "def calibration_wheel(self):\n self.spectrum = self.spectrum", "def calibrate(self):\n\t\tLTOGRIGHT = []\n\t\tLTOGUP = []\n\t\tRTOGRIGHT = []\n\t\tRTOGUP = []\n\t\tstart = time.time()\n\t\tcalibration_time = 5.0\n\t\twhile time.time() - start < calibration_time:\n\t\t\tevents = pygame.event.get()\n\t\t\tfor event in events:\n\t\t\t\tif event.type == pygame.JOYAXISMOTION:\n\t\t\t\t\tLTOGRIGHT.append(self.joystick.get_axis(self.LTOGRIGHT))\n\t\t\t\t\tLTOGUP.append(-self.joystick.get_axis(self.LTOGUP))\n\t\t\t\t\tRTOGRIGHT.append(self.joystick.get_axis(self.RTOGRIGHT))\n\t\t\t\t\tRTOGUP.append(-self.joystick.get_axis(self.RTOGUP))\n\n\t\t# calibration sets highest value equal to 1.0\n\t\tself.calibration[0] = 1.0/max(LTOGRIGHT)\n\t\tself.calibration[1] = -1.0/min(LTOGRIGHT)\n\t\tself.calibration[2] = -1.0/min(LTOGUP)\n\t\tself.calibration[3] = 1.0/max(LTOGUP)\n\t\tself.calibration[4] = 1.0/max(RTOGRIGHT)\n\t\tself.calibration[5] = -1.0/min(RTOGRIGHT)\n\t\tself.calibration[6] = -1.0/min(RTOGUP)\n\t\tself.calibration[7] = 1.0/max(RTOGUP)", "def calibrate(self):\n\t\twl = BLi.getWavelength()\n\t\tif abs(self.stokes()) <= .5:\n\t\t\txxx=self.sign()*180/pi*asin( wl/(2*self.dspace)) - (self.thp())\n\t\t\tself.offset2(-xxx)\n\t\t\tyyy=self.tthp()-self.sign()*2*180/pi*asin(wl/(2*self.dspace))-self.offset5()\n\t\t\tself.offset4(yyy)\n\t\t\tself.offset9(self.dettrans())\n\t\telif abs(self.stokes()-90.) <= .5:\n\t\t\txxx=self.sign()*180/pi*asin( wl/(2*self.dspace)) - (self.thp())\n\t\t\tself.offset3(-xxx)\n\t\t\tyyy=self.tthp()-self.sign()*2*180/pi*asin(wl/(2*self.dspace))-self.offset5()\n\t\t\tself.offset8(yyy)\n\t\t\tself.offset10(self.dettrans())\n\t\telse:\n\t\t\tprint \"Can't calibrate at stokes=\",self.stokes()\n\t\treturn [self.sign(),self.offset2(), self.offset3(),self.offset4(),self.offset5(),self.offset8(),self.offset9(),self.offset10()]", "def set_correction(self, matrix=[[1, 0], [0, 1]], shift=[0, 0], meta=None,\n **kwargs):\n # compute the matrix for the scale and rotation correction\n shift = (np.asarray(shift) - np.dot(self._wcslin.wcs.crpix, matrix) +\n self._wcslin.wcs.crpix)\n\n matrix = inv(matrix).T\n\n cwcs = self._wcs.deepcopy()\n\n # estimate step for numerical differentiation. We need a step\n # large enough to avoid rounding errors and small enough to get a\n # better precision for numerical differentiation.\n # TODO: The logic below should be revised at a later time so that it\n # better takes into account the two competing requirements.\n crpix1, crpix2 = self._wcs.wcs.crpix\n hx = max(1.0, min(20.0, (crpix1 - 1.0) / 100.0,\n (self._wcs.pixel_shape[0] - crpix1) / 100.0))\n hy = max(1.0, min(20.0, (crpix2 - 1.0) / 100.0,\n (self._wcs.pixel_shape[1] - crpix2) / 100.0))\n\n # compute new CRVAL for the image WCS:\n crpixinref = self._wcslin.wcs_world2pix(\n self._wcs.wcs_pix2world([self._wcs.wcs.crpix], 1), 1)\n crpixinref = np.dot(crpixinref - shift, matrix.T).astype(np.float64)\n self._wcs.wcs.crval = self._wcslin.wcs_pix2world(crpixinref, 1)[0]\n self._wcs.wcs.set()\n\n # approximation for CD matrix of the image WCS:\n (U, u) = _linearize(cwcs, self._wcs, self._wcslin, self._wcs.wcs.crpix,\n matrix, shift, hx=hx, hy=hy)\n self._wcs.wcs.cd = np.dot(self._wcs.wcs.cd.astype(np.longdouble),\n U).astype(np.float64)\n self._wcs.wcs.set()\n\n # save linear transformation info to the meta attribute:\n super().set_correction(matrix=matrix, shift=shift, meta=meta, **kwargs)", "def calibrate_file(self, calibration_slope, calibration_offset):\n\n self.data.average_spectrum = (calibration_slope * self.data.average_spectrum \n + calibration_offset)\n\n individual_wavelength = np.zeros(2048)\n individual_slope = np.zeros(2048)\n individual_offset = np.zeros(2048)\n\n for i_wavelength in range(2048):\n individual_wavelength[i_wavelength] = self.data.wavelength[\n i_wavelength * self.header.zero_fill]\n individual_slope[i_wavelength] = calibration_slope[\n i_wavelength * self.header.zero_fill]\n individual_offset[i_wavelength] = calibration_offset[\n i_wavelength * self.header.zero_fill]\n\n index = np.argsort(individual_wavelength)\n individual_wavelength = individual_wavelength[index]\n self.data.individual_wavelength = individual_wavelength\n average_spectrum = self.data.average_spectrum[index]\n\n i_min = np.argmin(abs(individual_wavelength - 8.0))\n i_max = np.argmin(abs(individual_wavelength - 14.0))\n\n for i in range(self.header.number_of_coadds):\n i_center_burst = np.argmax(np.absolute(self.data.interferogram[i]))\n\n size = self.header.interferogram_size\n interferogram_shift = size/2 - i_center_burst\n\n self.data.interferogram[i] = np.roll(self.data.interferogram[i], \n interferogram_shift)\n self.data.interferogram[i] = self.data.interferogram[i][\n size/2-2048:size/2+2048]\n\n window_fn = np.hanning(4096)\n \n spectrum = np.fft.fft(self.data.interferogram[i] * window_fn)\n spectrum = spectrum/3300\n spectrum = individual_slope * np.absolute(spectrum[0:2048]\n ) + individual_offset\n spectrum = spectrum[index]\n\n self.data.spectrum.append(spectrum)", "def calibration(self, cal: int, /) -> None:", "def initialise_calibration(self):\n for i in range(0, self.NUM_SENSORS):\n self.calibratedMax[i] = 0\n self.calibratedMin[i] = self.READING_TIMEOUT", "def updateAllShifts(shiftList):\n \n for shift in shiftList.measurements:\n averageShiftValue(shift)", "def expandcal(self):\n ind=np.zeros(self.spec.shape[0]).astype(int)\n for k in range(self.nscan):\n ind[self.getscanind(k)]=k\n ind[self.getcalind(k)]=k\n return ind", "def calibrate(self, cal=1.0, pol_eff=1.0):\n \n if self.ncomp == 1:\n self.data *= cal\n else:\n self.data[0] *= cal\n self.data[1] *= cal * pol_eff\n self.data[2] *= cal * pol_eff\n\n return self", "def updatePeakDimShifts(peakDim):\n\n for contrib in peakDim.peakDimContribs:\n if contrib.peakDimComponent:\n continue\n updateResonShift(contrib.resonance,peakDim)", "def calibrate(): \n \n # Calibrate of the run using beam data. Creates a folder cal-files/caltag \n # containing all calibration data. \n CalObj = Calibration(steerfiles=steerfiles, name=localcaltag + '-cal') \n\n # Set Beam energy\n CalObj.set_beam_momentum(beamenergy)\n\n # Get gearfile and set air as DUT material\n localgearfile = CalObj.get_filename('gear.xml')\n set_parameter(gearfile=localgearfile, sensorID=11, parametername='thickness', value=0.0001)\n set_parameter(gearfile=localgearfile, sensorID=11, parametername='radLength', value=304000.0)\n \n # Create list of calibration steps \n calpath = create_calibration_path(CalObj)\n \n # Run the calibration steps \n CalObj.calibrate(path=calpath,ifile=rawfile_air,caltag=localcaltag)", "def calibrate(self):\n if self.iCAL_required:\n logger.info(\"iCAL-sensitive registers were modified, performing calibration...\")\n return self._run_ical()\n else:\n logger.info(\"iCAL-sensitive registers were not modified, skipping calibration...\")\n return 0 # Still success", "def calibrate(self):\n import time\n\n CALIBRATE_SLEEP = 0.75\n\n self.change_power(-self._calpow)\n encprev, encnow = 0, None\n while encprev != encnow:\n encprev = encnow\n time.sleep(CALIBRATE_SLEEP)\n encnow = self._bp.get_motor_encoder(self._port)\n self._pmin = encnow\n self.change_power(0)\n\n self.change_power(self._calpow)\n encprev, encnow = 0, None\n while encprev != encnow:\n encprev = encnow\n time.sleep(CALIBRATE_SLEEP)\n encnow = self._bp.get_motor_encoder(self._port)\n self._pmax = encnow\n self.change_power(0)\n\n if self._pmax == self._pmin:\n raise Exception('motor {} does not move'.format(self._port))\n\n self._pinit = (self._pmax + self._pmin) * 0.5\n time.sleep(0.5)\n self.to_init_position()", "def read_calibrated(self):\n\n self.read_sensors()\n\n print(\"uncalibrated readings\")\n self.print_sensor_values(self.sensorValues)\n\n for i in range(0, self.NUM_SENSORS):\n denominator = self.calibratedMax[i] - self.calibratedMin[i]\n val = 0\n if denominator != 0:\n val = (self.sensorValues[i] - self.calibratedMin[i]) * 1000 / denominator\n if val < 0:\n val = 0\n elif val > 1000:\n val = 1000\n self.sensorValues[i] = val\n\n print(\"calibrated readings\")\n self.print_sensor_values(self.sensorValues)", "def calibrate_sensors(self):\n for j in range(0, 10):\n self.read_sensors()\n for i in range(0, self.NUM_SENSORS):\n if self.calibratedMax[i] < self.sensorValues[i]:\n self.calibratedMax[i] = self.sensorValues[i]\n if self.calibratedMin[i] > self.sensorValues[i] and self.sensorValues[i] > 30:\n self.calibratedMin[i] = self.sensorValues[i]", "def check_calcs_v2(list_mols, data_file=\"\", max_restart=False, depth='medium', frc=False):\n import warnings\n from gausspy.gaussian_job_manager import server_data_unequal\n from gausspy.data_extract_utils import latest_restarts, import_moldata, load_from_server\n from gausspy.data_extract_utils import oniom_components_on_server, export_moldata, clean_local_files\n from gausspy import oniom_utils\n\n if max_restart:\n list_mols = latest_restarts(list_mols)\n\n #if we are forcing we ignore previously saved data\n if not frc:\n current_data = import_moldata(data_file)\n else:\n current_data = []\n\n #check saved data against current list of molecules\n current_labels = [m.calc.label for m in current_data]\n mol_labels = [m.calc.label for m in list_mols]\n mismatched_labels = [label for label in current_labels if label not in mol_labels]\n\n if mismatched_labels:\n warnings.warn(\n RuntimeWarning(\n \"Calculations: {m} in data_file do not match molecules passed to check_calcs\".format(\n m=\" \".join(mismatched_labels))\n )\n )\n\n #extract calculation data from the datafile (note at the moment we are not extracting the non calculation part\n # which means that if the calculation modifies the original ase object those changes will be lost\n for saved_mol in current_data:\n try:\n ind = mol_labels.index(saved_mol.calc.label)\n #loading the entire object = list_mols[ind] = saved_mod\n #but because of python's reference passing behaviour this would mean check_calcs_v2 would not act like check_calcs (think restarted incomplete calculations)\n list_mols[ind].calc = saved_mol.calc\n except ValueError:\n pass\n\n if frc:\n update_mask = [True for i in range(len(list_mols))]\n else:\n update_mask =server_data_unequal(list_mols)\n\n mols_to_update = [list_mols[i] for i in range(len(list_mols)) if update_mask[i]]\n for mol in mols_to_update:\n #if log files on home directory and server are different we copy those files to the home directory\n mol = load_from_server(mol, depth)\n\n #if we have an oniom calculation we check to see if the components of the calculation have been run and if so we retrieve them and attach them to the calculation object\n if 'oniom' in mol.calc.method and oniom_components_on_server(mol):\n init_mol = copy.deepcopy(mol)\n init_mol.calc.label += '_init'\n init_mol = load_from_server(init_mol, depth)\n mol.calc.components = oniom_utils.oniom_comp_calcs(init_mol)\n mol.calc.components = check_calcs_v2(mol.calc.components, depth=depth, max_restart=True, frc=frc)\n\n if data_file and any(update_mask):\n export_moldata(data_file, list_mols)\n clean_local_files(mols_to_update)\n\n return list_mols", "def calibrate(msname, parset, skymodel, logname_root, use_timecorr=False,\n time_block=None, ionfactor=0.5, outdir='.', instrument='instrument',\n solint=None, flag_filler=False, ncores=1):\n log = logging.getLogger(\"Calib\")\n\n if not use_timecorr:\n subprocess.call(\"calibrate-stand-alone -f {0} {1} {2} > {3}/logs/\"\n \"{4}_peeling_calibrate.log 2>&1\".format(msname, parset, skymodel,\n outdir, logname_root), shell=True)\n subprocess.call(\"cp -r {0}/instrument {0}/instrument_out\".format(msname),\n shell=True)\n else:\n # Perform a time-correlated solve\n dataset = msname\n blockl = time_block\n anttab = pt.table(dataset + '/ANTENNA', ack=False)\n antlist = anttab.getcol('NAME')\n instrument_orig = msname+'/instrument'\n instrument_out = msname+'/instrument_out'\n if solint < 1:\n solint = 1\n\n\n # Get time per sample and number of times\n t = pt.table(dataset, readonly=True, ack=False)\n for t2 in t.iter([\"ANTENNA1\",\"ANTENNA2\"]):\n if (t2.getcell('ANTENNA1',0)) < (t2.getcell('ANTENNA2',0)):\n timepersample = t2[1]['TIME']-t2[0]['TIME'] # sec\n trows = t2.nrows()\n t.close()\n\n # Calculate various intervals\n fwhm_min, fwhm_max = modify_weights(msname, ionfactor, dryrun=True) # s\n if time_block is None:\n # Set blockl to enclose the max FWHM and be divisible by 2 and by solint\n blockl = int(np.ceil(fwhm_max / timepersample / 2.0 / solint) * 2 * solint)\n tdiff = solint * timepersample / 3600. # difference between solutions in hours\n tlen = timepersample * np.float(blockl) / 3600. # length of block in hours\n nsols = int(np.ceil(trows / solint)) # number of solutions\n\n log.info('Performing time-correlated peeling for {0}...\\n'\n ' Time per sample: {1} (s)\\n'\n ' Samples in total: {2}\\n'\n ' Block size: {3} (samples)\\n'\n ' {4} (s)\\n'\n ' Number of solutions: {5}\\n'\n ' Ionfactor: {6}\\n'\n ' FWHM range: {7} - {8} (s)'.format(msname, timepersample,\n trows, blockl, tlen*3600.0, nsols, ionfactor, fwhm_min, fwhm_max))\n\n # Make a copy of the master parmdb to store time-correlated solutions\n # in, resetting and flagging as needed\n os.system('rm -rf ' +instrument_out)\n clean_and_copy_parmdb(instrument_orig, instrument_out, blockl,\n flag_filler=flag_filler, msname=msname, timepersample=timepersample)\n\n # Calibrate the chunks\n chunk_list = []\n tlen_mod = tlen / 2.0 # hours\n chunk_mid_start = blockl / 2 / solint\n chunk_mid_end = nsols - blockl / 2 / solint\n for c in range(nsols):\n chunk_obj = Chunk(dataset)\n chunk_obj.chunk = c\n chunk_obj.outdir = outdir\n if c < chunk_mid_start:\n chunk_obj.trim_start = True\n chunk_obj.t0 = 0.0 # hours\n chunk_obj.t1 = np.float(chunk_obj.t0) + tlen_mod # hours\n tlen_mod += tdiff # add one solution interval (in hours)\n elif c > chunk_mid_end:\n tlen_mod -= tdiff # subtract one solution interval (in hours)\n chunk_obj.trim_start = False\n chunk_obj.t0 = tdiff*float(chunk_obj.chunk - chunk_mid_start) # hours\n chunk_obj.t1 = np.float(chunk_obj.t0) + tlen_mod # hours\n else:\n chunk_obj.trim_start = False\n chunk_obj.t0 = tdiff*float(chunk_obj.chunk - chunk_mid_start) # hours\n chunk_obj.t1 = np.float(chunk_obj.t0) + tlen # hours\n chunk_obj.ionfactor = ionfactor\n chunk_obj.parset = parset\n chunk_obj.skymodel = skymodel\n chunk_obj.logname_root = logname_root + '_part' + str(c)\n chunk_obj.solnum = chunk_obj.chunk\n chunk_obj.output = chunk_obj.outdir + '/part' + str(chunk_obj.chunk) + os.path.basename(chunk_obj.dataset)\n chunk_obj.ntot = nsols\n chunk_list.append(chunk_obj)\n\n # Split the dataset into parts\n for chunk_obj in chunk_list:\n split_ms(chunk_obj.dataset, chunk_obj.output, chunk_obj.t0, chunk_obj.t1)\n\n # Calibrate in parallel\n pool = multiprocessing.Pool(ncores)\n pool.map(calibrate_chunk, chunk_list)\n pool.close()\n pool.join()\n\n # Copy over the solutions to the final parmdb\n pdb = lofar.parmdb.parmdb(instrument_out)\n parms = pdb.getValuesGrid(\"*\")\n for chunk_obj in chunk_list:\n instrument_input = chunk_obj.output + '/instrument'\n pdb_part = lofar.parmdb.parmdb(instrument_input)\n parms_part = pdb_part.getValuesGrid(\"*\")\n keynames = parms_part.keys()\n for key in keynames:\n if 'Phase' in key:\n tmp1=np.copy(parms[key]['values'][:,0])\n tmp1[chunk_obj.solnum] = np.copy(parms_part[key]['values'][0,0])\n parms[key]['values'][:,0] = tmp1\n os.system('rm -rf ' + instrument_out)\n lofar.expion.parmdbmain.store_parms(instrument_out, parms, create_new=True)\n\n # Clean up\n for chunk_obj in chunk_list:\n os.system('rm -rf {0}*'.format(chunk_obj.output))\n os.system('rm calibrate-stand-alone*.log')\n\n # Move the solutions to original parmdb\n subprocess.call('cp -r {0} {1}'.format(instrument_out, instrument_orig),\n shell=True)", "def steering_calibration(self):\n\n # Minimum change of rotary encoder per 100 ms to detect stall\n min_difference = 5\n # Calibration motor power in percentage (absolute)\n calibration_power = 30\n\n print('Calibration of steering axis started')\n current_pos = self.brick_pi.get_motor_encoder(self.motor_steer)\n last_pos = 99999999 # any super big number\n # Left turn calibration\n while abs(current_pos - last_pos) > min_difference:\n last_pos = current_pos\n self.brick_pi.set_motor_power(self.motor_steer, calibration_power)\n time.sleep(0.1)\n current_pos = self.brick_pi.get_motor_encoder(self.motor_steer)\n\n self.brick_pi.set_motor_power(self.motor_steer, 0)\n time.sleep(0.1)\n print('Reset motor encoder after left turn: 0')\n self.brick_pi.reset_motor_encoder(self.motor_steer)\n\n current_pos = self.brick_pi.get_motor_encoder(self.motor_steer)\n last_pos = 99999999 # any super big number\n # Right turn calibration\n while abs(current_pos - last_pos) > min_difference:\n last_pos = current_pos\n self.brick_pi.set_motor_power(self.motor_steer, -calibration_power)\n time.sleep(0.1)\n current_pos = self.brick_pi.get_motor_encoder(self.motor_steer)\n\n self.brick_pi.set_motor_power(self.motor_steer, 0)\n time.sleep(0.1)\n self.brick_pi.offset_motor_encoder(self.motor_steer, current_pos / 2)\n self.steering_limit = abs(\n self.brick_pi.get_motor_encoder(self.motor_steer))\n print('Offset motor encoder after right turn: ' +\n str(self.brick_pi.get_motor_encoder(self.motor_steer)))\n\n self.brick_pi.set_motor_position(self.motor_steer, 0)\n print('Calibration of steering axis completed')", "def vec_rolling_rms(decays: jnp.ndarray) -> _InitUpdate:\n return _vmap_accumulator(rolling_rms, decays)", "def applyCal(beam, row, freqs, freqs_cal, cf, T_d_x, T_d_y):\n \n P_sys_xx = beam.cols.xx[row].astype('float')\n xx_on = beam.cols.xx_cal_on[row].astype('float')\n xx_off = beam.cols.xx_cal_off[row].astype('float')\n P_on_xx = np.average(extractMid(xx_on))\n P_off_xx = np.average(extractMid(xx_off))\n \n #P_on_xx = fitLine(freqs_cal, xx_on, len(freqs))\n #P_off_xx = fitLine(freqs_cal, xx_off, len(freqs))\n\n P_sys_yy = beam.cols.yy[row].astype('float')\n yy_on = beam.cols.yy_cal_on[row].astype('float')\n yy_off = beam.cols.yy_cal_off[row].astype('float')\n P_on_yy = np.average(extractMid(yy_on))\n P_off_yy = np.average(extractMid(yy_off))\n \n #P_on_yy = fitLine(freqs_cal, yy_on, len(freqs))\n #P_off_yy = fitLine(freqs_cal, yy_off, len(freqs))\n\n \n T_sys_xx = P_sys_xx / (cf*P_on_xx - cf*P_off_xx) * T_d_x\n T_sys_yy = P_sys_yy / (cf*P_on_yy - cf*P_off_yy) * T_d_y\n \n return T_sys_xx, T_sys_yy", "def update_continuum_mask(self, refresh=False):\n\n ymin, ymax = (-1e8, 1e8)\n kwds = {\n \"xmin\": np.nan,\n \"xmax\": np.nan,\n \"ymin\": ymin,\n \"ymax\": ymax,\n \"facecolor\": \"r\",\n \"edgecolor\": \"none\",\n \"alpha\": 0.25,\n \"zorder\": -1\n }\n\n transform = lambda start, end, v=0: np.array([\n [start * (1 - v/c), ymin],\n [start * (1 - v/c), ymax],\n [end * (1 - v/c), ymax],\n [end * (1 - v/c), ymin],\n [start * (1 - v/c), ymin]\n ])\n\n mask = self._cache[\"masks\"][self.continuum_mask.currentText()]\n\n # Any added regions to mask out? v-stack these\n try:\n self._masked_wavelengths\n except AttributeError:\n self._masked_wavelengths = []\n self._masked_wavelengths_norm = []\n\n # Different kind of masks: rest_wavelength, obs_wavelength, pixels\n # rest_wavelength\n # The obsered spectrum is shifted to be at rest, so the continuum masks\n # will also be in the rest frame. So we don't need to shift the\n # 'rest_wavelength' mask, but we do need to shift the 'obs_wavelength'\n # mask\n\n # Get the applied velocity to shift some masks.\n try:\n rv_applied = self.parent.session.metadata[\"rv\"][\"rv_applied\"]\n except (AttributeError, KeyError):\n rv_applied = 0\n\n _ =self.parent.session.metadata[\"normalization\"][\"normalization_kwargs\"]\n \n masked_regions = [\n np.array(mask.get(\"rest_wavelength\", [])),\n np.array(mask.get(\"obs_wavelength\", [])) * (1 - rv_applied/c),\n np.array(_[self.current_order_index].get(\"exclude\", []))\n ]\n if \"pixel\" in mask:\n masked_regions.append(\n # MAGIC HACK\n self.current_order.dispersion[np.array(mask[\"pixel\"])] + 1e-3\n )\n\n for each in masked_regions:\n each.shape = (-1, 2)\n\n masked_regions = np.vstack(masked_regions)\n\n # Remove duplicate masked regions.\n _ = np.ascontiguousarray(masked_regions).view(\n np.dtype((\n np.void, \n masked_regions.dtype.itemsize * masked_regions.shape[1])))\n __, idx = np.unique(_, return_index=True)\n masked_regions = masked_regions[idx]\n\n i = 0\n for start, end in masked_regions:\n if i >= len(self._masked_wavelengths):\n # Create a polygon in the main axis.\n self._masked_wavelengths.append(\n self.ax_order.axvspan(**kwds))\n\n # And for the normalization axis.\n self._masked_wavelengths_norm.append(\n self.ax_order_norm.axvspan(**kwds))\n\n polygons = (\n self._masked_wavelengths[i],\n self._masked_wavelengths_norm[i]\n )\n for polygon in polygons:\n polygon.set_xy(transform(start, end))\n\n i += 1\n\n # Any leftover polygons?\n for polygon in self._masked_wavelengths[i:]:\n polygon.set_xy(transform(np.nan, np.nan))\n\n for polygon in self._masked_wavelengths_norm[i:]:\n polygon.set_xy(transform(np.nan, np.nan))\n\n\n if refresh:\n self.norm_plot.draw()\n return True", "def calEachCrossflow2peak():\n \n crossFlow = pd.read_csv('Data_crossflow.csv', index_col = 'Unnamed: 0')\n peakCross = crossFlow['Node2']\n crossFlowPeakFactor = peakCross/0.8\n \n peakCross2 = crossFlow['Node6']\n crossFlowPeakFactor2 = peakCross2/0.8\n #original_factor = peakCross/0.8\n #need to judge the sign of lateral flow according to CTF rule!!\n gapsToFlip = [2,4,6,7,9,11,13,14,16,18,20,21] #gaps in y direction\n gapsToFlipIndex = [x - 1 for x in gapsToFlip]\n for index in gapsToFlipIndex:\n crossFlowPeakFactor[index] = -crossFlowPeakFactor[index] \n crossFlowPeakFactor2[index] = -crossFlowPeakFactor2[index]\n \n return crossFlowPeakFactor, crossFlowPeakFactor2", "def __setSpectrum__(self):\n \n self.Ck = []\n TempCk = []\n TempOneCk = OneCk()\n \n # Process 1st frequency\n Tb = self.freqs[1].totalJ\n \n for b in range(-Tb, Tb, 1):\n TempOneCk.freq = b*self.freqs[1].Vph\n TempOneCk.Amp = self.freqs[1].Cjk(b)\n self.Ck.append(TempOneCk)\n \n # Process additional frequencies\n CkSize = len(self.Ck)\n Added = FALSE\n \n for f in range(2, len(self.freqs), 1):\n # Reset temporary variables\n Tb = self.freqs[f].totalJ\n TempCk = []\n \n # Calculate each Ck coefficient\n for b in range(-Tb, Tb, 1):\n for k in range(CkSize):\n TempOneCk.Amp = Ck[k].Amp * self.freq[f].Cjk(b)\n \n # Check to see if Amp is big enough to keep\n if( abs(TempOneCk.Amp) > self.min_Ck ):\n Added = FALSE\n TempOneCk.freq = self.Ck[k].freq + b*self.freqs.Vph\n \n # If freq is already in Ck, add new value to old,\n # if not, add new value and freq to spectrum\n for c in TempCk:\n if abs(c.freq-TempOneCk.freq < DOUBLE_PRECISION):\n c.Amp += TempOneCk.Amp\n Added = TRUE\n break\n \n if (not Added):\n TempCk.append(TempOneCk)\n \n self.Ck = TempCk\n CkSize = len(self.Ck)", "def _doCalibration(self):\n self._cmdCalibration(2)", "def calibration(self, pulse_min: int, pulse_max: int, pulse_centre: int, /) -> None:", "def find_calibrators(master_skymodel, beamMS, flux_cut_Jy=15.0,\n maj_cut_arcsec=None, plot=False):\n log.info('Checking {0}:'.format(beamMS))\n s = lsmtool.load(master_skymodel, beamMS=beamMS)\n if maj_cut_arcsec is not None:\n log.info('Filtering out sources larger than {0} arcsec:'.format(maj_cut_arcsec))\n if s.hasPatches:\n sizes = s.getPatchSizes(units='arcsec', weight=True, applyBeam=True)\n else:\n sizes = s.getColValues('MajorAxis', units='arcsec')\n indices = np.where(sizes <= maj_cut_arcsec)[0]\n s.select(indices, force=True, aggregate=True)\n if len(s) == 0:\n return [], [], []\n\n # Make sure all fluxes are at 60 MHz\n reffreqs = s.getColValues('ReferenceFrequency')\n fluxes = s.getColValues('I')\n alphas = s.getColValues('SpectralIndex')[:, 0] # just use slope\n fluxes_60 = fluxes*(60e6/reffreqs)**alphas\n s.setColValues('I', fluxes_60)\n s.setColValues('ReferenceFrequency', np.array([60e6]*len(reffreqs)))\n\n # Now select only those sources above the given apparent flux cut\n log.info('Filtering out sources with fluxes below {0} Jy:'.format(flux_cut_Jy))\n s.select(['I', '>', flux_cut_Jy, 'Jy'], applyBeam=True, aggregate='sum',\n force=True)\n\n if len(s) > 0:\n if plot:\n print('Showing potential calibrators. Close the plot window to continue.')\n s.plot()\n cal_fluxes = s.getColValues('I', aggregate='sum', applyBeam=True).tolist()\n if s.hasPatches:\n cal_names = s.getPatchNames().tolist()\n cal_sizes = s.getPatchSizes(units='arcsec', weight=True,\n applyBeam=True).tolist()\n else:\n cal_names = s.getColValues('Name').tolist()\n cal_sizes = s.getColValues('MajorAxis', units='arcsec').tolist()\n return cal_names, cal_fluxes, cal_sizes\n else:\n return [], [], []", "def addPeakResonancesToSeqSpinSystems(peak, seqOffsets):\n \n assert len(peak.peakDims) == len(seqOffsets)\n assert None in seqOffsets # otherwise no reference point\n\n spinSystems = []\n resonanceList = []\n for i, peakDim in enumerate(peak.sortedPeakDims()):\n spinSystem = None\n resonances = []\n for contrib in peakDim.peakDimContribs:\n resonance = contrib.resonance\n resonances.append(resonance)\n \n if resonance.resonanceGroup:\n if not spinSystem:\n spinSystem = resonance.resonanceGroup\n\n elif spinSystem is not resonance.resonanceGroup:\n msg = 'There are multiple spin systems for peak dimension %d.\\n' % (i+1)\n msg += 'Continue and merge spin systems together?'\n if showOkCancel('Confirm', msg):\n mergeSpinSystems(resonance.resonanceGroup,spinSystem)\n else:\n return\n\n resonanceList.append(resonances)\n spinSystems.append( spinSystem )\n\n ref = None\n I = 0\n for i, spinSystem in enumerate(spinSystems):\n if spinSystem is not None:\n if seqOffsets[i] is None:\n if ref is None:\n ref = spinSystem\n I = i\n \n else:\n if spinSystem is not ref:\n msg = 'Dimensions %d and %d have different spin systems.\\n' % (I+1,i+1)\n msg += 'Continue and merge spin systems together?'\n if showOkCancel('Confirm', msg):\n mergeSpinSystems(spinSystem, ref)\n else:\n return\n \n if ref is not None:\n for i, seqOffset in enumerate(seqOffsets):\n \n if seqOffset:\n spinSystem = findConnectedSpinSystem(ref, seqOffset)\n if spinSystems[i] is ref:\n if seqOffsets[i] < 0:\n deltaText = '%d' % seqOffset\n else:\n deltaText = '+%d' % seqOffset\n showWarning('Failure','Spin system cannot be both i and i%s (dimension %d)' % (deltaText,i+1))\n continue\n \n \n if spinSystem and spinSystems[i]:\n if spinSystem is not spinSystems[i]:\n if (not spinSystem.residue) or (not spinSystems[i].residue):\n if seqOffsets[i] < 0:\n deltaText = '%d' % seqOffset\n else:\n deltaText = '+%d' % seqOffset\n \n msg = 'There is an i%s spin system already present (dimension %d).\\n' % (deltaText, i+1)\n msg += 'Merge spin systems together?'\n if showOkCancel('Confirm', msg):\n spinSystem = mergeSpinSystems(spinSystems[i],spinSystem)\n else:\n spinSystem = None\n\n elif spinSystem.residue is spinSystems[i].residue:\n name = '%d%s' % (spinSystem.residue.seqCode,spinSystem.residue.ccpCode)\n msg = 'There are multiple spin systems for residue %s.\\n?' % name\n msg += 'Merge spin systems together?'\n \n if showOkCancel('Confirm',msg):\n spinSystem = mergeSpinSystems(spinSystems[i],spinSystem)\n else:\n spinSystem = None\n\n else:\n txt1 = '%d%s' % (spinSystem.residue.seqCode,spinSystem.residue.ccpCode)\n txt2 = '%d%s' % (spinSystems[i].residue.seqCode,spinSystems[i].residue.ccpCode)\n msg = 'Cannot set spin system for F%d dim' % (i+1)\n msg += 'Offset %d causes conflict between %s and %s' % (seqOffset, txt1, txt2)\n showWarning('Failure',msg)\n return\n \n if resonanceList[i]:\n nmrProject = resonanceList[i][0].nmrProject\n if not spinSystem:\n if spinSystems[i]:\n spinSystem = spinSystems[i]\n else:\n spinSystem = nmrProject.newResonanceGroup()\n \n makeSeqSpinSystemLink(ref, spinSystem, seqOffsets[i])\n \n for resonance in resonanceList[i]:\n if resonance.resonanceGroup is not spinSystem:\n addSpinSystemResonance(spinSystem,resonance)" ]
[ "0.61754125", "0.56453633", "0.5504699", "0.5399412", "0.5393503", "0.5272751", "0.5264761", "0.5158035", "0.5157666", "0.51486784", "0.51470107", "0.5123972", "0.51041424", "0.50907195", "0.5079858", "0.5070017", "0.5054352", "0.50522673", "0.5047045", "0.50407064", "0.50199366", "0.5008871", "0.50031376", "0.49525067", "0.49328467", "0.49030977", "0.48967198", "0.48689806", "0.48678187", "0.48554978" ]
0.6538406
0
Map each unique spin link to all of its corresponding peaks. NOESY peak lists represent spin links between Hydrogen atoms. Whether 2D, 3D or 4D, each peak in a NOESY peak list has exactly two Hydrogen spins. Here, a spin link is represented by a frozenset containing the spin.assignment tuples for each Hydrogen atom. This function returns a dictionary mapping each unique spin link to a list of the Peaks in the PeakList that contain those two Hydrogen atoms. Examples >>> spin_link_dict = peaklist.spin_link_dict() >>> spin_link, peaks = spin_link_dict.popitem() >>> spin_link frozenset([Assignment(res_type='Q', res_num=21, atom='HN'), Assignment( res_type='G', res_num=17, atom='HN')]) >>> print(peaks[0]) Peak(spins=[ Spin(res_type=G, res_num=17, atom=HN), Spin(res_type=G, res_num=17, atom=N), Spin(res_type=Q, res_num=21, atom=HN)]) >>> print(peaks[1]) Peak(spins=[ Spin(res_type=Q, res_num=21, atom=HN), Spin(res_type=Q, res_num=21, atom=N), Spin(res_type=G, res_num=17, atom=HN)]) Returns
def get_spin_link_dict(peaklist): spin_link_dict = {} for peak in peaklist: spins = [spin for spin in peak if spin.atom is not None and spin.atom[0] == 'H'] if len(spins) != 2: err = ('expected 2 Hydrogens in each peak, ' 'found %d' % len(spins)) raise ValueError(err) link = frozenset(spin.assignment for spin in spins) spin_link_dict.setdefault(link, []).append(peak) return spin_link_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mod_map(mods, plinkmap):\n modmap = {}\n for chrom in plinkmap:\n if chrom not in modmap:\n modmap[chrom] = []\n markers = plinkmap[chrom]\n modif = mods[chrom]\n for i, m in enumerate(modif):\n if m == 'I':\n p2 = float(markers[i+1][3])\n p1 = float(markers[i-1][3])\n pk = float(markers[i][3])\n g2 = float(markers[i+1][2])\n g1 = float(markers[i-1][2])\n d = (p2 - pk) / (p2 - p1)\n gu = g2 - d*(g2 - g1)\n if g2 == gu:\n gi = str(round((g2 + g1)/2, ndigits=2))\n else:\n gi = str(round(gu, ndigits=2))\n modmar = [markers[i][0], markers[i][1], gi, markers[i][3]]\n elif m == 'J':\n jgpos = marker[i][2] + '1'\n modmar = [markers[i][0], markers[i][1], jgpos, markers[i][3]]\n else:\n modmar = markers[i]\n modmap[chrom].append(modmar)\n return modmap", "def make_links_dict(pairs_dict):\n links_dict = {}\n for end1 in pairs_dict:\n \n if (end1 in pairs_dict) and (len(pairs_dict[end1])) > 0:\n best_pair = max(pairs_dict[end1], key = pairs_dict[end1].get)\n \n if best_pair in pairs_dict and len(pairs_dict[best_pair]) > 0:\n \n if max(pairs_dict[best_pair], key = pairs_dict[best_pair].get) == end1:\n links_dict[end1] = best_pair\n links_dict[best_pair] = end1\n return links_dict", "def anchors(self):\n dims = self.dims\n anchors = []\n for peak in self:\n possible_anchors = []\n for combination in combinations(range(dims), 2):\n spins = [peak[i] for i in combination]\n if any(s.res_num is None or s.atom is None for s in spins):\n continue\n res_nums = [spin.res_num for spin in spins]\n atoms = [spin.atom for spin in spins]\n elements = [atom[0] for atom in atoms]\n positions = [atom[1:] for atom in atoms]\n same_res_num = res_nums[0] == res_nums[1]\n valid_pairs = [set(('H', 'N')), set(('H', 'C'))]\n is_proton_heavy_pair = set(elements) in valid_pairs\n same_position = all(c[0] == c[1] for c in zip(*positions))\n if same_res_num and is_proton_heavy_pair and same_position:\n if '' in positions and set(elements) != set(('H', 'N')):\n # One of the atom names must have been 'H', 'N' or 'C'\n # Of these, only the amide proton anchor is valid\n continue\n if elements[0] == 'H':\n possible_anchors.append(combination)\n else:\n possible_anchors.append(combination[::-1])\n if len(possible_anchors) > 1:\n pa_sets = [set(pa) for pa in possible_anchors]\n overlap = set.intersection(*pa_sets)\n if overlap:\n # Ambiguous, overlapping anchors\n continue\n for poss_anc in possible_anchors:\n if poss_anc not in anchors:\n anchors.append(poss_anc)\n anchors = tuple(anchors)\n return anchors", "def mapping(reads_list, k, h, index, genome):\n snps_dict = {}\n # Map the read on the genome and store the snps found\n for read in reads_list:\n reversed_read = reverse_read(read)\n reverse = False\n list_mapping = seed_and_extend(read, k, h, index, genome)\n if list_mapping[0] < len(genome):\n reverse = False\n if VERBOSE:\n print(\"Read number : \", reads_list.index(read) + 1, \\\n \"\\n Mapping at position :\", list_mapping[0], \\\n \" on straight strand. \\n With \", list_mapping[1], \\\n \"substitutions at positions :\", list_mapping[2])\n else:\n list_mapping = seed_and_extend(reversed_read, k, h, index, genome)\n if list_mapping[0] < len(genome):\n reverse = True\n if VERBOSE:\n print(\"Read number : \", reads_list.index(read) + 1, \\\n \"\\n Mapping at position :\", list_mapping[0], \\\n \" on reverse strand. \\n With \", list_mapping[1], \\\n \"substitutions at positions :\", list_mapping[2])\n else:\n reverse = False\n if VERBOSE:\n print(\"No mapping found for read number :\", reads_list.index(read) + 1)\n if list_mapping[0] < len(genome):\n for mismatch in list_mapping[2]:\n if reverse == False:\n if mismatch in snps_dict.keys():\n snps_dict[mismatch].append(read[mismatch - list_mapping[0]])\n else:\n snps_dict[mismatch] = [read[mismatch - list_mapping[0]]]\n else:\n if mismatch in snps_dict.keys():\n snps_dict[mismatch].append(reversed_read[mismatch - list_mapping[0]])\n else:\n snps_dict[mismatch] = [reversed_read[mismatch - list_mapping[0]]]\n\n return snps_dict", "def getSeqSpinSystemLinks(spinSystem, delta=None):\n\n seqLinks = {}\n for link in spinSystem.findAllResonanceGroupProbs(linkType='sequential',isSelected=True):\n if delta is None:\n seqLinks[link] = None\n \n elif link.sequenceOffset == delta:\n seqLinks[link] = None\n\n for link in spinSystem.findAllFromResonanceGroups(linkType='sequential',isSelected=True):\n if delta is None:\n seqLinks[link] = None\n \n elif link.sequenceOffset == -delta:\n seqLinks[link] = None\n\n return seqLinks.keys()", "def get_link_inr(network_name: str, rx_pair_inr: Dict) -> Dict:\n results: DefaultDict = defaultdict(list)\n for (rx_node, rx_from_node), inr_power in rx_pair_inr.items():\n link_name = Topology.mac_to_link_name.get(network_name, {}).get(\n (rx_node, rx_from_node)\n )\n if link_name is None:\n continue\n\n inr_db = 10 * np.log10(inr_power)\n if inr_db < HardwareConfig.MINIMUM_SNR_DB:\n continue\n\n results[link_name].append(\n {\"rx_node\": rx_node, \"rx_from_node\": rx_from_node, \"inr_curr_power\": inr_db}\n )\n return results", "def parse_map(plinkmap):\n plink = {}\n with open(plinkmap, 'r') as f:\n for line in f:\n tmp = line.strip().split()\n chrom = tmp[0]\n if chrom not in plink:\n plink[chrom] = []\n plink[chrom].append(tmp)\n # Then sort on physical position\n for c in plink:\n plink[c] = sorted(plink[c], key=lambda x: int(x[3]))\n return plink", "def create_links_dict(all_pages):\n links_dict = dict()\n\n n_link = 0\n for j in range(N_PROCESSES):\n for n_site, site in enumerate(all_pages[j]):\n link = site[\"link\"]\n link = reduce_to_domain(link)\n\n if len(link) >= MIN_LINK_LEN and links_dict.get(link, -1) == -1:\n links_dict[link] = n_link\n n_link += 1\n\n if site[\"hyperlinks\"] is None:\n continue\n\n for child_link in site[\"hyperlinks\"]:\n child_link = reduce_to_domain(child_link)\n\n if len(child_link) >= MIN_LINK_LEN and links_dict.get(child_link, -1) == -1:\n links_dict[child_link] = n_link\n n_link += 1\n\n with open(os.path.join(\"..\", \"files\", \"all_links.json\"), \"w\", encoding=\"utf-8\") as f:\n json.dump(links_dict, f, indent=4, ensure_ascii=False)", "def get_all_backup_links(\n network_name: str,\n node_mac_map: DefaultDict,\n link_name_map: Dict[str, Dict],\n conn_list: List,\n) -> DefaultDict:\n backup_links: DefaultDict = defaultdict(dict)\n for conn_list_item in conn_list:\n tx_node_mac = conn_list_item[\"tx_node\"]\n rx_node_mac = conn_list_item[\"rx_node\"]\n backup_link_candidate = {\n \"link_type\": 1,\n \"linkup_attempts\": 0,\n \"is_alive\": False,\n \"name\": \"\",\n \"is_backup_cn_link\": True,\n }\n\n if tx_node_mac not in node_mac_map or rx_node_mac not in node_mac_map:\n logging.debug(f\"One of the mac addresses is not in {network_name}.\")\n continue\n\n # TODO: This part will be used in the later version.\n # No CNs can be tested at this point in the live network.\n # Will come back to complete the logic later on.\n tx_node_type = node_mac_map[tx_node_mac][\"type\"]\n rx_node_type = node_mac_map[rx_node_mac][\"type\"]\n if tx_node_type == NodeType.CN or rx_node_type == NodeType.CN:\n backup_link_candidate[\"is_backup_cn_link\"] = True\n\n if node_mac_map[tx_node_mac][\"name\"] < node_mac_map[rx_node_mac][\"name\"]:\n backup_link_candidate[\"a_node_mac\"] = tx_node_mac\n backup_link_candidate[\"z_node_mac\"] = rx_node_mac\n backup_link_candidate[\"a_node_name\"] = node_mac_map[tx_node_mac][\"name\"]\n backup_link_candidate[\"z_node_name\"] = node_mac_map[rx_node_mac][\"name\"]\n else:\n backup_link_candidate[\"a_node_mac\"] = rx_node_mac\n backup_link_candidate[\"z_node_mac\"] = tx_node_mac\n backup_link_candidate[\"a_node_name\"] = node_mac_map[rx_node_mac][\"name\"]\n backup_link_candidate[\"z_node_name\"] = node_mac_map[tx_node_mac][\"name\"]\n\n backup_link_candidate_name = (\n f\"link-{backup_link_candidate['a_node_name']}\"\n f\"-{backup_link_candidate['z_node_name']}\"\n )\n backup_link_candidate[\"name\"] = backup_link_candidate_name\n # Do not process any active links in the topology file\n # TODO: check whether this part is necessary.\n # If it is the case, we need to check node macs instead of link name only.\n if backup_link_candidate_name not in link_name_map:\n backup_links[backup_link_candidate_name][\"link\"] = backup_link_candidate\n if len(conn_list_item[\"routes\"]) != 0:\n (_tx_beam_idx, _rx_beam_idx, snr) = conn_list_item[\"routes\"][0]\n backup_links[backup_link_candidate_name][\"snr\"] = snr\n\n return backup_links", "def compute_pagerank(urls, inlinks, outlinks, b=.85, iters=20):\n ###TODO\n pagerank = defaultdict(lambda: 1.0)\n N = len(urls)\n for url in urls:\n pagerank[url]\n for i in range(0, iters):\n for url in urls:\n result_sum = 0.0\n for link in inlinks[url]:\n if len(outlinks[link]) is not 0:\n result_sum += (pagerank[link] / len(outlinks[link]))\n pagerank[url] = (1/N) * (1-b) + (b * result_sum)\n return pagerank\n pass", "def backlinks(self) -> Dict[str, List[str]]:\n bk_links: Dict[str, List[str]] = {}\n for note in filter(lambda n: n.links_to is not None, self.by_id.values()):\n for fwd in note.links_to:\n if fwd not in bk_links:\n bk_links[fwd] = [note.id]\n else:\n bk_links[fwd].append(note.id)\n\n return bk_links", "def propagatePeakAssignments(peaks, refPeak=None, cleanNonRef=False,\n tolerances=None, warnUnalias=False):\n\n if refPeak:\n peaksIn = [refPeak, ]\n else:\n peaksIn = peaks\n \n if not tolerances:\n tolerances = []\n \n dimResonances = {}\n resonanceDims = {}\n for peak in peaksIn:\n for i, peakDim in enumerate(peak.sortedPeakDims()):\n dataDim = peakDim.dataDim\n expDimRef = dataDim.expDim.findFirstExpDimRef()\n \n if not expDimRef:\n continue\n \n key = expDimRef.isotopeCodes\n if dimResonances.get(key) is None:\n dimResonances[key] = []\n \n if peakDim.peakDimContribs:\n # could be in different spectra\n \n for contrib in peakDim.peakDimContribs:\n resonance = contrib.resonance\n \n dimResonances[key].append(resonance)\n if resonanceDims.get(resonance) is None:\n resonanceDims[resonance] = []\n \n if i not in resonanceDims[resonance]:\n resonanceDims[resonance].append(i)\n\n if refPeak and cleanNonRef:\n for peak in peaks:\n if peak is refPeak:\n continue\n \n for peakDim in peak.peakDims:\n clearPeakDim(peakDim)\n\n shiftRanges = {}\n for peak in peaks:\n if peak is refPeak:\n continue\n\n for i, peakDim in enumerate(peak.sortedPeakDims()):\n dataDimRef = peakDim.dataDimRef\n \n if dataDimRef:\n dataDim = dataDimRef.dataDim\n \n if dataDim not in shiftRanges:\n shiftMin, shiftMax = getDataDimFullShiftRange(dataDim)\n shiftRanges[dataDim] = (shiftMin, shiftMax)\n else:\n shiftMin, shiftMax = shiftRanges[dataDim]\n \n if i < len(tolerances):\n tolerance = tolerances[i]\n else:\n tolerance = getAnalysisDataDim(dataDim).assignTolerance\n \n key = dataDimRef.expDimRef.isotopeCodes\n pValue = peakDim.realValue\n\n extantResonances = []\n for contrib in peakDim.peakDimContribs:\n if contrib.peakDimComponent:\n continue\n extantResonances.append(contrib.resonance)\n \n assignResonances = []\n closeResonances = []\n for resonance in dimResonances[key]:\n if resonance not in extantResonances:\n shiftList = peak.peakList.dataSource.experiment.shiftList\n shift = resonance.findFirstShift(parentList=shiftList)\n \n if shift:\n # Could result in unaliasing the peak\n\n sValue = shift.value\n # Only assign if within known bounds\n if not (shiftMin < sValue < shiftMax): # Inside, not on edge\n continue\n \n assignResonances.append(resonance)\n \n if abs(sValue-pValue) <= tolerance:\n closeResonances.append(resonance)\n \n elif i in resonanceDims.get(resonance, []):\n # No shift so only propagate across the same dim numbers\n assignResonances.append(resonance)\n \n # Can't have both aliased and unaliased resonances: go for the\n # unaliased/close ppm ones in preference \n \n if closeResonances:\n for resonance in closeResonances:\n assignResToDim(peakDim, resonance, tolerance=tolerance,\n doWarning=False)\n \n elif not extantResonances:\n # Don't risk aliasing changes if already assigned\n # warn for aliasing changes\n for resonance in assignResonances:\n assignResToDim(peakDim, resonance, tolerance=tolerance,\n doWarning=warnUnalias)", "def build_links(self):\n xygrid = self.xymap.xygrid\n\n # we must use the xygrid coordinates\n x, y = self.x, self.y\n\n # scan in all directions for links\n for direction, (dx, dy) in MAPSCAN.items():\n\n lx, ly = x + dx, y + dy\n\n if lx in xygrid and ly in xygrid[lx]:\n link = xygrid[lx][ly]\n\n # just because there is a link here, doesn't mean it has a\n # connection in this direction. If so, the `end_node` will be None.\n end_node, weight, steps = link.traverse(REVERSE_DIRECTIONS[direction])\n\n if end_node:\n # the link could be followed to an end node!\n\n self.first_links[direction] = link\n\n # check the actual direction-alias to use, since this may be\n # different than the xygrid cardinal directions. There must be\n # no duplicates out of this node or there will be a\n # multi-match error later!\n first_step_name = steps[0].direction_aliases.get(direction, direction)\n if first_step_name in self.closest_neighbor_names:\n raise MapParserError(\n f\"has more than one outgoing direction '{first_step_name}'. \"\n \"All directions out of a node must be unique.\",\n self,\n )\n self.closest_neighbor_names[first_step_name] = direction\n\n node_index = end_node.node_index\n self.weights[node_index] = weight\n self.links[direction] = end_node\n # this is useful for map building later - there could be multiple\n # links tied together until getting to the node\n self.xy_steps_to_node[direction] = steps\n\n # used for building the shortest path. Note that we store the\n # aliased link directions here, for quick display by the\n # shortest-route solver\n shortest_route = self.shortest_route_to_node.get(node_index, (\"\", [], BIGVAL))[\n 2\n ]\n if weight < shortest_route:\n self.shortest_route_to_node[node_index] = (first_step_name, steps, weight)", "def _make_links(self,\n links: Mapping[str, Union[str, Dict[str, Any]]],\n relationship: Optional[str] = None):\n evaluated_links = {}\n for name, link_payload in links.items():\n evaluated_links[name] = link_payload\n for param, arg in link_payload.items():\n evaluated_links[name][param] = (\n arg(self) if callable(arg) else arg)\n links_factories = self.__links_factories__\n return {\n name: links_factories[self._qualname(name, relationship)](**evaluated_links[name])\n if links_factories.get(self._qualname(name, relationship)) is not None\n else evaluated_links[name]\n for name in evaluated_links\n }", "def get_dictionary_of_peptides_and_isomeric_peak_areas(self, fout_peptides_isomeric_peak_areas):\n \n try:\n fin_handle = open(fout_peptides_isomeric_peak_areas)\n\n except IOError:\n raise(\"Provide a file containing percentage peak_area of isomeric peptides\")\n\n # local list; this appends all lines within a block and is emptied at the end of the block\n L_peptide_isomeric_peak_area = []\n\n block_start = False; pep_NC = \"\"\n \n for line in fin_handle:\n \n line = line.strip()\n \n # skipping the blank line\n if not line.strip():\n continue\n \n # skipping the comment line\n if line[0]==\"#\":\n continue\n \n \n if line==\"PEPSTART\": block_start=True\n\n elif line==\"PEPEND\" :\n block_start=False\n \n #end elif\n \n if block_start and line!=\"PEPSTART\":\n L = line.split(\":\")\n if L[0].strip() == \"peptide\":\n pep_NC = L[1].strip() #e.g, '15-25'\n\n elif L[0].strip()==\"IsomericPeptidesPeakArea\":\n right_side = L[1].strip()\n\n L_modtypes_freq_peak_area = [m.strip() for m in right_side.split(\" \")]\n percentage_peak_area = L_modtypes_freq_peak_area[-1] # last column\n D_modtype_freq = {}\n\n # running the loop so as to skip the last element\n for i, m in enumerate(L_modtypes_freq_peak_area[:-1]):\n mtype = (m.split('=')[0]).strip()\n freq = (m.split('=')[1]).strip()\n D_modtype_freq[mtype] = freq\n\n #end for\n\n L_peptide_isomeric_peak_area.append((D_modtype_freq, percentage_peak_area))\n \n # end if block_start and line!=\"PEPSTART\" \n\n # pushing into the dictionary after end of each block\n\n if line==\"PEPEND\":\n\n # sorting the list based on total frequency of isomeric peptides\n L_sorted = sorted(L_peptide_isomeric_peak_area, key=lambda x: sum([int(f) for f in x[0].values()]))\n \n self._D_peptide_isomeric_peak_areas[pep_NC] = L_sorted\n \n #emptying the list for next block\n L_peptide_isomeric_peak_area = []\n \n # emptying the peptide N_loc, C_loc string at the end of the block\n pep_NC = \"\"", "def get_linked_neighbors(self, directions=None):\n if not directions:\n directions = REVERSE_DIRECTIONS.keys()\n\n xygrid = self.xymap.xygrid\n links = {}\n for direction in directions:\n dx, dy = MAPSCAN[direction]\n end_x, end_y = self.x + dx, self.y + dy\n if end_x in xygrid and end_y in xygrid[end_x]:\n # there is is something there, we need to check if it is either\n # a map node or a link connecting in our direction\n node_or_link = xygrid[end_x][end_y]\n if node_or_link.multilink or node_or_link.get_direction(direction):\n links[direction] = node_or_link\n return links", "def _partition_pairs_by_slot(\n self, mapping: Mapping[AnyKeyT, EncodableT]\n ) -> Dict[int, List[EncodableT]]:\n\n slots_to_pairs = {}\n for pair in mapping.items():\n slot = key_slot(self.encoder.encode(pair[0]))\n slots_to_pairs.setdefault(slot, []).extend(pair)\n\n return slots_to_pairs", "def getSlotMap(self):\n slotMap = dict()\n for entry in self.slots:\n slotMap[entry] = self.__getattribute__(\"on_\" + entry)\n return slotMap", "def get_links(user):\n # secure_filename('some.file') strips hacker attempts away from input. \n linksfile = secure_filename('%s.links'%(user))\n\n # Here we should check if file exists with -> os.path.isfile(path)\n\n try:\n with codecs.open(linksfile, 'rb') as userfile: \n links = pickle.loads(userfile.read())\n except IOError:\n links = {}\n return links", "def get_primers(header,\r\n mapping_data):\r\n\r\n if \"LinkerPrimerSequence\" in header:\r\n primer_ix = header.index(\"LinkerPrimerSequence\")\r\n else:\r\n raise IndexError(\r\n (\"Mapping file is missing LinkerPrimerSequence field.\"))\r\n if \"ReversePrimer\" in header:\r\n rev_primer_ix = header.index(\"ReversePrimer\")\r\n else:\r\n raise IndexError((\"Mapping file is missing ReversePrimer field.\"))\r\n\r\n iupac = {'A': 'A', 'T': 'T', 'G': 'G', 'C': 'C', 'R': '[AG]', 'Y': '[CT]',\r\n 'S': '[GC]', 'W': '[AT]', 'K': '[GT]', 'M': '[AC]', 'B': '[CGT]',\r\n 'D': '[AGT]', 'H': '[ACT]', 'V': '[ACG]', 'N': '[ACGT]'}\r\n\r\n raw_forward_primers = set([])\r\n raw_forward_rc_primers = set([])\r\n raw_reverse_primers = set([])\r\n raw_reverse_rc_primers = set([])\r\n\r\n for line in mapping_data:\r\n # Split on commas to handle pool of primers\r\n raw_forward_primers.update([upper(primer).strip() for\r\n primer in line[primer_ix].split(',')])\r\n raw_forward_rc_primers.update([str(DNA(primer).rc()) for\r\n primer in raw_forward_primers])\r\n raw_reverse_primers.update([upper(primer).strip() for\r\n primer in line[rev_primer_ix].split(',')])\r\n raw_reverse_rc_primers.update([str(DNA(primer).rc()) for\r\n primer in raw_reverse_primers])\r\n\r\n if not raw_forward_primers:\r\n raise ValueError((\"No forward primers detected in mapping file.\"))\r\n if not raw_reverse_primers:\r\n raise ValueError((\"No reverse primers detected in mapping file.\"))\r\n\r\n # Finding the forward primers, or rc of reverse primers indicates forward\r\n # read. Finding the reverse primer, or rc of the forward primers, indicates\r\n # the reverse read, so these sets are merged.\r\n raw_forward_primers.update(raw_reverse_rc_primers)\r\n raw_reverse_primers.update(raw_forward_rc_primers)\r\n\r\n forward_primers = []\r\n reverse_primers = []\r\n for curr_primer in raw_forward_primers:\r\n forward_primers.append(compile(''.join([iupac[symbol] for\r\n symbol in curr_primer])))\r\n for curr_primer in raw_reverse_primers:\r\n reverse_primers.append(compile(''.join([iupac[symbol] for\r\n symbol in curr_primer])))\r\n\r\n return forward_primers, reverse_primers", "def testDereferenceLinks(self):\n ddict = {\"ext_group\": {\"dataset\": 10}}\n dictdump.dicttonx(ddict, self.h5_ext_fname)\n ddict = {\"links\": {\"group\": {\"dataset\": 10, \">relative_softlink\": \"dataset\"},\n \">relative_softlink\": \"group/dataset\",\n \">absolute_softlink\": \"/links/group/dataset\",\n \">external_link\": \"nx_ext.h5::/ext_group/dataset\"}}\n dictdump.dicttonx(ddict, self.h5_fname)\n\n ddict = dictdump.h5todict(self.h5_fname, dereference_links=True)\n self.assertTrue(ddict[\"links\"][\"absolute_softlink\"], 10)\n self.assertTrue(ddict[\"links\"][\"relative_softlink\"], 10)\n self.assertTrue(ddict[\"links\"][\"external_link\"], 10)\n self.assertTrue(ddict[\"links\"][\"group\"][\"relative_softlink\"], 10)", "def dict() -> Dict[str, Pin]:", "def link_residues(self) -> None:\n ...", "def addPeakResonancesToSeqSpinSystems(peak, seqOffsets):\n \n assert len(peak.peakDims) == len(seqOffsets)\n assert None in seqOffsets # otherwise no reference point\n\n spinSystems = []\n resonanceList = []\n for i, peakDim in enumerate(peak.sortedPeakDims()):\n spinSystem = None\n resonances = []\n for contrib in peakDim.peakDimContribs:\n resonance = contrib.resonance\n resonances.append(resonance)\n \n if resonance.resonanceGroup:\n if not spinSystem:\n spinSystem = resonance.resonanceGroup\n\n elif spinSystem is not resonance.resonanceGroup:\n msg = 'There are multiple spin systems for peak dimension %d.\\n' % (i+1)\n msg += 'Continue and merge spin systems together?'\n if showOkCancel('Confirm', msg):\n mergeSpinSystems(resonance.resonanceGroup,spinSystem)\n else:\n return\n\n resonanceList.append(resonances)\n spinSystems.append( spinSystem )\n\n ref = None\n I = 0\n for i, spinSystem in enumerate(spinSystems):\n if spinSystem is not None:\n if seqOffsets[i] is None:\n if ref is None:\n ref = spinSystem\n I = i\n \n else:\n if spinSystem is not ref:\n msg = 'Dimensions %d and %d have different spin systems.\\n' % (I+1,i+1)\n msg += 'Continue and merge spin systems together?'\n if showOkCancel('Confirm', msg):\n mergeSpinSystems(spinSystem, ref)\n else:\n return\n \n if ref is not None:\n for i, seqOffset in enumerate(seqOffsets):\n \n if seqOffset:\n spinSystem = findConnectedSpinSystem(ref, seqOffset)\n if spinSystems[i] is ref:\n if seqOffsets[i] < 0:\n deltaText = '%d' % seqOffset\n else:\n deltaText = '+%d' % seqOffset\n showWarning('Failure','Spin system cannot be both i and i%s (dimension %d)' % (deltaText,i+1))\n continue\n \n \n if spinSystem and spinSystems[i]:\n if spinSystem is not spinSystems[i]:\n if (not spinSystem.residue) or (not spinSystems[i].residue):\n if seqOffsets[i] < 0:\n deltaText = '%d' % seqOffset\n else:\n deltaText = '+%d' % seqOffset\n \n msg = 'There is an i%s spin system already present (dimension %d).\\n' % (deltaText, i+1)\n msg += 'Merge spin systems together?'\n if showOkCancel('Confirm', msg):\n spinSystem = mergeSpinSystems(spinSystems[i],spinSystem)\n else:\n spinSystem = None\n\n elif spinSystem.residue is spinSystems[i].residue:\n name = '%d%s' % (spinSystem.residue.seqCode,spinSystem.residue.ccpCode)\n msg = 'There are multiple spin systems for residue %s.\\n?' % name\n msg += 'Merge spin systems together?'\n \n if showOkCancel('Confirm',msg):\n spinSystem = mergeSpinSystems(spinSystems[i],spinSystem)\n else:\n spinSystem = None\n\n else:\n txt1 = '%d%s' % (spinSystem.residue.seqCode,spinSystem.residue.ccpCode)\n txt2 = '%d%s' % (spinSystems[i].residue.seqCode,spinSystems[i].residue.ccpCode)\n msg = 'Cannot set spin system for F%d dim' % (i+1)\n msg += 'Offset %d causes conflict between %s and %s' % (seqOffset, txt1, txt2)\n showWarning('Failure',msg)\n return\n \n if resonanceList[i]:\n nmrProject = resonanceList[i][0].nmrProject\n if not spinSystem:\n if spinSystems[i]:\n spinSystem = spinSystems[i]\n else:\n spinSystem = nmrProject.newResonanceGroup()\n \n makeSeqSpinSystemLink(ref, spinSystem, seqOffsets[i])\n \n for resonance in resonanceList[i]:\n if resonance.resonanceGroup is not spinSystem:\n addSpinSystemResonance(spinSystem,resonance)", "def relabel(peak_ids, oldparams, mask):\n spot_data = {}\n peak_num = 1\n for peak in peak_ids:\n #coords = np.where(mask == peak)\n paramsnew = oldparams[peak-1,:] # object 1 will be fitparams row 0\n # Rearrange params from fit function so coordinates lead.\n spot_data[peak_num] = paramsnew[[1,2,3,0,4,5,6]]\n peak_num = peak_num + 1\n return spot_data", "def getLinkEnds(self):\n dataDict = self.__dict__\n result = set(ca.boundLinkEnd for ca in self.chemAtoms if isinstance(ca,LinkAtom))\n if None in result:\n result.remove(None)\n result = frozenset(result)\n return result", "def build_graph(link_data, links):\n graph = {}\n\n # add all data for links\n for l in links:\n #print(\"Adding \"+l)\n #print(link_data.get(l))\n graph[l] = list(link_data.get(l))\n\n # add all links that point to links\n for slink in link_data:\n for l in links:\n # the links is already in graph, skip\n if graph.has_key(slink):\n continue\n\n try:\n dest_links = list(link_data.get(slink))\n # if slink points to l\n _ = dest_links.index(l)\n # add the slink to graph\n graph[slink] = dest_links\n #print(\"Adding \"+slink)\n except Exception as e:\n pass\n\n #print(len(graph))\n #print(graph)\n\n return graph", "def makeGraphDictionary(self):\n graph_dict_incomplete = {}\n # dictionary contains all links, no matter if they are functional\n for i in range(0, len(self._partner_indices)):\n graph_dict_incomplete[i] = set(self._partner_indices[i])\n if self._variant[0] == \"V0_instant\":\n self.graph_dict = graph_dict_incomplete\n else:\n # helper\n link_list = []\n link_list2 = []\n for vertex in graph_dict_incomplete:\n self.setKeyDictionary(dictionary=self.graph_dict,\n key=vertex,\n value=set())\n for neighbour in graph_dict_incomplete[vertex]:\n # Iterate through all plants and the neighbours\n # If a new pair occurs it will be appended in link_list2\n # If the pair occurs again it wll be appended in link_list\n # This means that the link (or rgf process) is finished\n # for both plants\n if {neighbour, vertex} not in link_list2:\n link_list2.append({vertex, neighbour})\n else:\n # plants are only put in the dict. if they occur more\n # than once, i.e. both partners have finished rgf\n link_list.append({vertex, neighbour})\n self.setKeyDictionary(dictionary=self.graph_dict,\n key=vertex,\n value=neighbour)", "def download_burstlink_mapping(fpath='burstlink.json'):\n\n try:\n url = 'https://raw.githubusercontent.com/soruly/burstlink/master/burstlink.json'\n # downloading\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) \\\n AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0.1 Safari/605.1.15'\n }\n resp = requests.get(url, headers=headers)\n mapping = resp.json()\n for i in range(len(mapping)):\n item = mapping[i]\n item['mal'] = None if 'mal' not in item else item['mal']\n item['anidb'] = None if 'anidb' not in item else item['anidb']\n item['anilist'] = None if 'anilist' not in item else item['anilist']\n mapping[i] = item\n with open(fpath, 'w', encoding='utf-8') as f:\n json.dump(mapping, f, indent=2, ensure_ascii=False)\n return True\n except Exception:\n traceback.print_exc()\n return False", "def addPeakResonancesToSpinSystem(peaks):\n \n # TBD check experiment type of the peak\n \n if not peaks:\n return\n \n resonances = []\n for peak in peaks:\n for peakDim in peak.peakDims:\n for contrib in peakDim.peakDimContribs:\n if contrib.peakDimComponent:\n continue\n resonances.append(contrib.resonance)\n \n spinSystems = []\n for resonance in resonances:\n resonanceGroup = resonance.resonanceGroup\n if resonanceGroup and (resonanceGroup not in spinSystems):\n spinSystems.append(resonanceGroup)\n\n spinSystem = None\n if len(spinSystems) == 1:\n spinSystem = spinSystems[0]\n elif len(spinSystems) > 1:\n msg = 'There are multiple spin systems for these peaks.\\n'\n msg += 'Continue and merge spin systems together?'\n if showOkCancel('Confirm',msg):\n spinSystem = spinSystems[0]\n for spinSystem2 in spinSystems[1:]:\n mergeSpinSystems(spinSystem2,spinSystem)\n else:\n return\n \n if spinSystem is None:\n spinSystem = peaks[0].topObject.newResonanceGroup()\n\n for resonance in resonances:\n addSpinSystemResonance(spinSystem,resonance)\n\n return spinSystem" ]
[ "0.4860738", "0.4834604", "0.48159462", "0.4767646", "0.47199568", "0.46659744", "0.46064976", "0.4587481", "0.4538893", "0.45380697", "0.45124298", "0.44684395", "0.44490376", "0.4414482", "0.4384815", "0.43777874", "0.4372807", "0.43616307", "0.43376932", "0.43176925", "0.43066147", "0.42994502", "0.42880207", "0.42873344", "0.42709255", "0.4265717", "0.42531103", "0.42473865", "0.42406118", "0.42374805" ]
0.7973843
0
Sort peaks by the assignments of their constituent spins. Sort the peaks by the assignments of spins in particular dimensions. The default order sorts the peaks by the dimensions associated with spin anchors first then by the remaining dimensions in the order they appear in each peak. Optionally place all commented peaks at the end of the peak list.
def sort_by_assignments(peaklist, order=None, commented_at_end=False): anchors = peaklist.anchors anchored = tuple(i for anchor in anchors for i in anchor) unanchored = set(range(peaklist.dims)) - set(anchored) default_order = anchored + tuple(sorted(unanchored)) order = order if order is not None else default_order peaklist.sort(key=lambda peak: tuple(peak[i] for i in order)) if commented_at_end: peaklist.sort(key=lambda peak: peak.commented) return peaklist
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _sort_cubelist(self, cubelist):\n sorted_cubelist = []\n realization_num = 1\n cubelist = cubelist.merge(unique=False)\n for cube in cubelist:\n # If time is a scalar coordinate, promote it to a dimension \n # coordinate, this is because all cubes must have the same number \n # of dimensions to be compared.\n if len(cube.coord(self.time_coord).points) == 1:\n cube = iris.util.new_axis(cube, scalar_coord=self.time_coord)\n \n # Chop cubes into individual realizations for relabelling.\n member_slices = get_coordinate_slice_dimensions(\n cube, [self.realization,self.forecast_ref_time],\n ignore_missing_coords=True)\n for member_slice in cube.slices(member_slices):\n \n if self.realization in [coord.name() \n for coord in member_slice.coords()]:\n member_slice.coord(\n self.realization).points = [realization_num]\n else:\n realization_coord = iris.coords.AuxCoord([realization_num],\n self.realization)\n member_slice.add_aux_coord(realization_coord)\n \n member_slice.cell_methods = None\n sorted_cubelist.append(member_slice)\n realization_num += 1\n \n sorted_cubelist = iris.cube.CubeList(sorted_cubelist)\n # Mask missing time steps so merging can be done.\n sorted_cubelist = pad_coords(sorted_cubelist, self.time_coord)\n cube = sorted_cubelist.merge_cube()\n # Check x-y coordinates match the specified range.\n cube = self._area_inst.check_cube_area_bounds(cube, self.xy_coords, \n self.area_bounds)\n cube = self.extract_area_bounds(cubes=cube)\n \n if cube.coord_dims(cube.coord(self.realization)) == \\\n cube.coord_dims(cube.coord(self.forecast_ref_time)):\n # Re order realizations in initialisation date order.\n ordered_inits = sorted(cube.coord('forecast_reference_time').points)\n ordered_mems = range(1, len(cube.coord('realization').points)+1)\n ordered_cubes = []\n for member_slice in cube.slices(member_slices):\n mem_index = ordered_inits.index(\n member_slice.coord(self.forecast_ref_time).points[0])\n member_slice.coord('realization').points = ordered_mems[mem_index]\n del ordered_inits[mem_index]\n del ordered_mems[mem_index]\n ordered_cubes.append(member_slice)\n cube = iris.cube.CubeList(ordered_cubes).merge_cube()\n \n return cube", "def findpeakl(data, zp, noise, bottom, top, r):\n length = len(data)\n width = (top - bottom)/(length - 1)\n absdata = []\n peak = []\n for i in range(length):\n absdata.append(abs(data[i] - zp[i]))\n i = 0\n fsnum = 0#fitting section number\n fsec = []#fitting section\n fdata = []#signal of fitting section\n fnum = []#fitting number\n fm = []#index of max and min points\n while(i<length):\n if absdata[i]>noise*r:\n fsnum = fsnum + 1\n fsec.append([])\n fdata.append([])\n tempmax = absdata[i]\n tempmin = absdata[i]\n inma = i\n inmi = i\n fnum.append(0)\n fm.append([])\n direction = 1#1:rising,0:descending\n while(absdata[i]>noise*r):\n if direction==1:\n if absdata[i]>tempmax:\n tempmax = absdata[i]\n inma = i\n elif absdata[i]<tempmax - noise*r:\n direction = 0\n fm[fsnum - 1].append([inma,inmi])\n tempmin = absdata[i]\n inmi = i\n fnum[fsnum - 1] = fnum[fsnum - 1] + 1\n elif direction==0:\n if absdata[i]<tempmin:\n tempmin = absdata[i]\n inmi = i\n elif absdata[i]>tempmin + noise*r:\n direction = 1\n tempmax = absdata[i]\n inma = i\n fsec[fsnum - 1].append(bottom + width*i)\n fdata[fsnum - 1].append(absdata[i])\n i = i + 1\n if i>=length:\n break\n if fm[fsnum - 1]==[]:\n del fsec[fsnum - 1]\n del fdata[fsnum - 1]\n del fnum[fsnum - 1]\n del fm[fsnum - 1]\n fsnum = fsnum - 1\n i = i + 1\n for i in range(fsnum):\n pars = Parameters()\n j = 0\n mod = LorentzianModel(prefix = 'l1_')\n pars.update(LorentzianModel(prefix = 'l%i_'%(j + 1)).make_params())\n sigma0 = abs(width*(fm[i][j][0] - fm[i][j][1]))/math.sqrt(absdata[fm[i][j][0]]/absdata[fm[i][j][1]] - 1)\n pars['l%i_center'%(j + 1)].set(value = bottom + width*fm[i][j][0],min = fsec[i][0],max = fsec[i][ - 1])\n pars['l%i_sigma'%(j + 1)].set(value = sigma0,min = sigma0/20,max = sigma0*20)\n pars['l%i_amplitude'%(j + 1)].set(value = absdata[fm[i][j][0]]*sigma0/0.3183099,min = noise*r*sigma0/0.3183099,max = absdata[fm[i][j][0]]*20*sigma0/0.3183099)\n for j in range(1,fnum[i]):\n mod = mod + LorentzianModel(prefix = 'l%i_'%(j + 1))\n pars.update(LorentzianModel(prefix = 'l%i_'%(j + 1)).make_params())\n sigma0 = abs(width*(fm[i][j][0] - fm[i][j][1]))/math.sqrt(absdata[fm[i][j][0]]/absdata[fm[i][j][1]] - 1)\n pars['l%i_center'%(j + 1)].set(value = bottom + width*fm[i][j][0],min = fsec[i][0],max = fsec[i][ - 1])\n pars['l%i_sigma'%(j + 1)].set(value = sigma0,min = sigma0/20,max = sigma0*20)\n pars['l%i_amplitude'%(j + 1)].set(value = absdata[fm[i][j][0]]*sigma0/0.3183099,min = noise*r*sigma0/0.3183099,max = absdata[fm[i][j][0]]*20*sigma0/0.3183099)\n# =============================================================================\n# result = mod.fit(fdata[i],pars,x = fsec[i])\n# #print(result.fit_report())\n# plt.plot(fsec[i],fdata[i],'bo',label = 'original')\n# plt.plot(fsec[i],result.best_fit,'r-',label = 'fitting')\n# plt.title('Lorentzian fitting')\n# plt.show()\n# =============================================================================\n tempbo = int((fsec[i][0] - bottom)/width)\n tempto = int((fsec[i][ - 1] - bottom)/width)\n for k in range(fnum[i]):\n gama2 = (pars['l%i_sigma'%(k + 1)].value)**2\n amplitude = pars['l%i_height'%(k + 1)].value*gama2\n miu = pars['l%i_center'%(k + 1)].value\n sum1 = 0\n for p in range(tempbo,tempto + 1):\n v = abs(amplitude/((bottom + width*p - miu)*(bottom + width*p - miu) + gama2))\n sum1 = sum1 + (v - absdata[k])*(v - absdata[k])\n sum1 = sum1/(tempto - tempbo + 1)\n peak.append([gama2,miu,amplitude,sum1,tempbo,tempto])\n return peak", "def preference_ordering(self) -> None:\n for i in self._destinations:\n self._destinations[i] = sorted(self._destinations[i])", "def sort(self):\n for i in range(self.num):\n for j in range(i,0,-1):\n if self.genepool[0][j].fitness < self.genepool[0][j-1].fitness:\n self.genepool[0][j], self.genepool[0][j-1] = self.genepool[0][j-1], self.genepool[0][j]\n else:\n break\n for i in range(self.num):\n for j in range(i,0,-1):\n if self.genepool[1][j].fitness < self.genepool[1][j-1].fitness:\n self.genepool[1][j], self.genepool[1][j-1] = self.genepool[1][j-1], self.genepool[1][j]\n else:\n break", "def propagatePeakAssignments(peaks, refPeak=None, cleanNonRef=False,\n tolerances=None, warnUnalias=False):\n\n if refPeak:\n peaksIn = [refPeak, ]\n else:\n peaksIn = peaks\n \n if not tolerances:\n tolerances = []\n \n dimResonances = {}\n resonanceDims = {}\n for peak in peaksIn:\n for i, peakDim in enumerate(peak.sortedPeakDims()):\n dataDim = peakDim.dataDim\n expDimRef = dataDim.expDim.findFirstExpDimRef()\n \n if not expDimRef:\n continue\n \n key = expDimRef.isotopeCodes\n if dimResonances.get(key) is None:\n dimResonances[key] = []\n \n if peakDim.peakDimContribs:\n # could be in different spectra\n \n for contrib in peakDim.peakDimContribs:\n resonance = contrib.resonance\n \n dimResonances[key].append(resonance)\n if resonanceDims.get(resonance) is None:\n resonanceDims[resonance] = []\n \n if i not in resonanceDims[resonance]:\n resonanceDims[resonance].append(i)\n\n if refPeak and cleanNonRef:\n for peak in peaks:\n if peak is refPeak:\n continue\n \n for peakDim in peak.peakDims:\n clearPeakDim(peakDim)\n\n shiftRanges = {}\n for peak in peaks:\n if peak is refPeak:\n continue\n\n for i, peakDim in enumerate(peak.sortedPeakDims()):\n dataDimRef = peakDim.dataDimRef\n \n if dataDimRef:\n dataDim = dataDimRef.dataDim\n \n if dataDim not in shiftRanges:\n shiftMin, shiftMax = getDataDimFullShiftRange(dataDim)\n shiftRanges[dataDim] = (shiftMin, shiftMax)\n else:\n shiftMin, shiftMax = shiftRanges[dataDim]\n \n if i < len(tolerances):\n tolerance = tolerances[i]\n else:\n tolerance = getAnalysisDataDim(dataDim).assignTolerance\n \n key = dataDimRef.expDimRef.isotopeCodes\n pValue = peakDim.realValue\n\n extantResonances = []\n for contrib in peakDim.peakDimContribs:\n if contrib.peakDimComponent:\n continue\n extantResonances.append(contrib.resonance)\n \n assignResonances = []\n closeResonances = []\n for resonance in dimResonances[key]:\n if resonance not in extantResonances:\n shiftList = peak.peakList.dataSource.experiment.shiftList\n shift = resonance.findFirstShift(parentList=shiftList)\n \n if shift:\n # Could result in unaliasing the peak\n\n sValue = shift.value\n # Only assign if within known bounds\n if not (shiftMin < sValue < shiftMax): # Inside, not on edge\n continue\n \n assignResonances.append(resonance)\n \n if abs(sValue-pValue) <= tolerance:\n closeResonances.append(resonance)\n \n elif i in resonanceDims.get(resonance, []):\n # No shift so only propagate across the same dim numbers\n assignResonances.append(resonance)\n \n # Can't have both aliased and unaliased resonances: go for the\n # unaliased/close ppm ones in preference \n \n if closeResonances:\n for resonance in closeResonances:\n assignResToDim(peakDim, resonance, tolerance=tolerance,\n doWarning=False)\n \n elif not extantResonances:\n # Don't risk aliasing changes if already assigned\n # warn for aliasing changes\n for resonance in assignResonances:\n assignResToDim(peakDim, resonance, tolerance=tolerance,\n doWarning=warnUnalias)", "def findpeakg(data, zp, noise, bottom, top, r):\n length = len(data)\n width = (top - bottom)/(length - 1)\n absdata = []\n peak = []\n for i in range(length):\n absdata.append(abs(data[i] - zp[i]))\n i = 0\n fsnum = 0#fitting section number\n fsec = []#fitting section\n fdata = []#signal of fitting section\n fnum = []#fitting number\n fm = []#index of max and min points\n while(i<length):\n if absdata[i]>noise*r:\n fsnum = fsnum + 1\n fsec.append([])\n fdata.append([])\n tempmax = absdata[i]\n tempmin = absdata[i]\n inma = i\n inmi = i\n fnum.append(0)\n fm.append([])\n direction = 1#1:rising,0:descending\n while(absdata[i]>noise*r):\n if direction==1:\n if absdata[i]>tempmax:\n tempmax = absdata[i]\n inma = i\n elif absdata[i]<tempmax - noise*r:\n direction = 0\n fm[fsnum - 1].append([inma,inmi])\n tempmin = absdata[i]\n inmi = i\n fnum[fsnum - 1] = fnum[fsnum - 1] + 1\n elif direction==0:\n if absdata[i]<tempmin:\n tempmin = absdata[i]\n inmi = i\n elif absdata[i]>tempmin + noise*r:\n direction = 1\n tempmax = absdata[i]\n inma = i\n fsec[fsnum - 1].append(bottom + width*i)\n fdata[fsnum - 1].append(absdata[i])\n i = i + 1\n if i>=length:\n break\n if fm[fsnum - 1]==[]:\n del fsec[fsnum - 1]\n del fdata[fsnum - 1]\n del fnum[fsnum - 1]\n del fm[fsnum - 1]\n fsnum = fsnum - 1\n i = i + 1\n for i in range(fsnum):\n pars = Parameters()\n j = 0\n mod = GaussianModel(prefix = 'g1_')\n pars.update(GaussianModel(prefix = 'g%i_'%(j + 1)).make_params())\n sigma0 = math.sqrt((width*(fm[i][j][0] - fm[i][j][1]))**2/(2*math.log(absdata[fm[i][j][0]]/absdata[fm[i][j][1]])))\n pars['g%i_center'%(j + 1)].set(value = bottom + width*fm[i][j][0],min = fsec[i][0],max = fsec[i][ - 1])\n pars['g%i_sigma'%(j + 1)].set(value = sigma0,min = sigma0/20,max = sigma0*20)\n pars['g%i_amplitude'%(j + 1)].set(value = absdata[fm[i][j][0]]/0.3989423*sigma0,min = noise*r/0.3989423*sigma0,max = absdata[fm[i][j][0]]*20/0.3989423*sigma0)\n for j in range(1,fnum[i]):\n mod = mod + GaussianModel(prefix = 'g%i_'%(j + 1))\n pars.update(GaussianModel(prefix = 'g%i_'%(j + 1)).make_params())\n sigma0 = math.sqrt((width*(fm[i][j][0] - fm[i][j][1]))**2/(2*math.log(absdata[fm[i][j][0]]/absdata[fm[i][j][1]])))\n pars['g%i_center'%(j + 1)].set(value = bottom + width*fm[i][j][0],min = fsec[i][0],max = fsec[i][-1])\n pars['g%i_sigma'%(j + 1)].set(value = sigma0,min = sigma0/20,max = sigma0*20)\n pars['g%i_amplitude'%(j + 1)].set(value = absdata[fm[i][j][0]]/0.3989423*sigma0,min = noise*r/0.3989423*sigma0,max = absdata[fm[i][j][0]]*20/0.3989423*sigma0)\n# =============================================================================\n# result = mod.fit(fdata[i],pars,x = fsec[i])\n# #print(result.fit_report())\n# plt.plot(fsec[i],fdata[i],'bo',label = 'original')\n# plt.plot(fsec[i],result.best_fit,'r-',label = 'fitting')\n# plt.title('Gaussian fitting')\n# plt.show()\n# =============================================================================\n tempbo = int((fsec[i][0] - bottom)/width)\n tempto = int((fsec[i][-1] - bottom)/width)\n for k in range(fnum[i]):\n amplitude = pars['g%i_height'%(k + 1)].value\n sigma = pars['g%i_sigma'%(k + 1)].value\n miu = pars['g%i_center'%(k + 1)].value\n sum1 = 0\n for p in range(tempbo,tempto + 1):\n v = abs(amplitude*math.exp( - (bottom + width*p - miu)*(bottom + width*p - miu)/(2*sigma*sigma)))\n sum1 = sum1 + (v - absdata[k])*(v - absdata[k])\n sum1 = sum1/(tempto - tempbo + 1)\n peak.append([sigma,miu,amplitude,sum1,tempbo,tempto])\n return peak", "def _sort_data(self, cubelist):\n sorted_cubelist = []\n for dates in self.dates:\n year_cubelist = self.extract_dates(dates, cubelist)\n for cube in year_cubelist.merge():\n # Check x-y coordinates match the specified range.\n cube = self._area_inst.check_cube_area_bounds(cube, \n self.xy_coords, \n self.area_bounds)\n cube = self.extract_area_bounds(cubes=cube)\n sorted_cubelist.append(cube)\n return iris.cube.CubeList(sorted_cubelist)", "def order_pseudotime(self):\n # within segs_tips, order tips according to pseudotime\n if self.iroot is not None:\n for itips, tips in enumerate(self.segs_tips):\n if tips[0] != -1:\n indices = np.argsort(self.pseudotime[tips])\n self.segs_tips[itips] = self.segs_tips[itips][indices]\n else:\n logg.debug(f' group {itips} is very small')\n # sort indices according to segments\n indices = np.argsort(self.segs_names)\n segs_names = self.segs_names[indices]\n # find changepoints of segments\n changepoints = np.arange(indices.size - 1)[np.diff(segs_names) == 1] + 1\n if self.iroot is not None:\n pseudotime = self.pseudotime[indices]\n for iseg, seg in enumerate(self.segs):\n # only consider one segment, it's already ordered by segment\n seg_sorted = seg[indices]\n # consider the pseudotime on this segment and sort them\n seg_indices = np.argsort(pseudotime[seg_sorted])\n # within the segment, order indices according to increasing pseudotime\n indices[seg_sorted] = indices[seg_sorted][seg_indices]\n # define class members\n self.indices = indices\n self.changepoints = changepoints", "def changePeaks(self):\n # Change the number of peaks\n if self.minpeaks is not None and self.maxpeaks is not None:\n npeaks = len(self.peaks_function)\n u = self.random.random()\n r = self.maxpeaks - self.minpeaks\n if u < 0.5:\n # Remove n peaks or less depending on the minimum number of peaks\n u = self.random.random()\n n = min(npeaks - self.minpeaks, int(round(r * u * self.number_severity)))\n for i in range(n):\n idx = self.random.randrange(len(self.peaks_function))\n self.peaks_function.pop(idx)\n self.peaks_position.pop(idx)\n self.peaks_height.pop(idx)\n self.peaks_width.pop(idx)\n self.last_change_vector.pop(idx)\n else:\n # Add n peaks or less depending on the maximum number of peaks\n u = self.random.random()\n n = min(self.maxpeaks - npeaks, int(round(r * u * self.number_severity)))\n for i in range(n):\n self.peaks_function.append(self.random.choice(self.pfunc_pool))\n self.peaks_position.append([self.random.uniform(self.min_coord, self.max_coord) for _ in range(self.dim)])\n self.peaks_height.append(self.random.uniform(self.min_height, self.max_height))\n self.peaks_width.append(self.random.uniform(self.min_width, self.max_width))\n self.last_change_vector.append([self.random.random() - 0.5 for _ in range(self.dim)])\n\n for i in range(len(self.peaks_function)):\n # Change peak position\n shift = [self.random.random() - 0.5 for _ in range(len(self.peaks_position[i]))]\n shift_length = sum(s**2 for s in shift)\n shift_length = self.move_severity / math.sqrt(shift_length) if shift_length > 0 else 0\n \n shift = [shift_length * (1.0 - self.lambda_) * s \\\n + self.lambda_ * c for s, c in zip(shift, self.last_change_vector[i])]\n \n shift_length = sum(s**2 for s in shift)\n shift_length = self.move_severity / math.sqrt(shift_length) if shift_length > 0 else 0\n\n shift = [s*shift_length for s in shift]\n \n new_position = []\n final_shift = []\n for pp, s in zip(self.peaks_position[i], shift):\n new_coord = pp + s\n if new_coord < self.min_coord:\n new_position.append(2.0 * self.min_coord - pp - s)\n final_shift.append(-1.0 * s)\n elif new_coord > self.max_coord:\n new_position.append(2.0 * self.max_coord - pp - s)\n final_shift.append(-1.0 * s)\n else:\n new_position.append(new_coord)\n final_shift.append(s)\n\n self.peaks_position[i] = new_position\n self.last_change_vector[i] = final_shift\n\n # Change peak height\n change = self.random.gauss(0, 1) * self.height_severity\n new_value = change + self.peaks_height[i]\n if new_value < self.min_height:\n self.peaks_height[i] = 2.0 * self.min_height - self.peaks_height[i] - change\n elif new_value > self.max_height:\n self.peaks_height[i] = 2.0 * self.max_height - self.peaks_height[i] - change\n else:\n self.peaks_height[i] = new_value\n\n # Change peak width\n change = self.random.gauss(0, 1) * self.width_severity\n new_value = change + self.peaks_width[i]\n if new_value < self.min_width:\n self.peaks_width[i] = 2.0 * self.min_width - self.peaks_width[i] - change\n elif new_value > self.max_width:\n self.peaks_width[i] = 2.0 * self.max_width - self.peaks_width[i] - change\n else:\n self.peaks_width[i] = new_value\n\n self._optimum = None", "def sort(self): # sort all entries to make room for new ones, determine best and worst\n ns = self.num_stored.value\n ys = np.asarray(self.ys[:ns])\n yi = ys.argsort()\n sortRuns = []\n for i in range(len(yi)):\n y = ys[yi[i]]\n xs = self.get_x(yi[i])\n sortRuns.append((y, xs))\n numStored = min(len(sortRuns),int(0.9*self.capacity)) # keep 90% best \n for i in range(numStored):\n self.replace(i, sortRuns[i][0], sortRuns[i][1])\n self.num_sorted.value = numStored \n self.num_stored.value = numStored \n return numStored", "def panPeakDetect(detection, fs):\n\n min_distance = int(0.25 * fs)\n\n signal_peaks = [0]\n noise_peaks = []\n\n SPKI = 0.0\n NPKI = 0.0\n\n threshold_I1 = 0.0\n threshold_I2 = 0.0\n\n RR_missed = 0\n index = 0\n indexes = []\n\n missed_peaks = []\n peaks = []\n\n for i in range(len(detection)):\n\n if 0 < i < len(detection) - 1:\n if detection[i - 1] < detection[i] and detection[i + 1] < detection[i]:\n peak = i\n peaks.append(i)\n\n if detection[peak] > threshold_I1 and (peak - signal_peaks[-1]) > 0.25 * fs:\n\n signal_peaks.append(peak)\n indexes.append(index)\n SPKI = 0.125 * detection[signal_peaks[-1]] + 0.875 * SPKI\n if RR_missed != 0:\n if signal_peaks[-1] - signal_peaks[-2] > RR_missed:\n missed_section_peaks = peaks[indexes[-2] + 1:indexes[-1]]\n missed_section_peaks2 = []\n for missed_peak in missed_section_peaks:\n if missed_peak - signal_peaks[-2] > min_distance and signal_peaks[\n -1] - missed_peak > min_distance and detection[missed_peak] > threshold_I2:\n missed_section_peaks2.append(missed_peak)\n\n if len(missed_section_peaks2) > 0:\n missed_peak = missed_section_peaks2[np.argmax(detection[missed_section_peaks2])]\n missed_peaks.append(missed_peak)\n signal_peaks.append(signal_peaks[-1])\n signal_peaks[-2] = missed_peak\n\n else:\n noise_peaks.append(peak)\n NPKI = 0.125 * detection[noise_peaks[-1]] + 0.875 * NPKI\n\n threshold_I1 = NPKI + 0.25 * (SPKI - NPKI)\n threshold_I2 = 0.5 * threshold_I1\n\n if len(signal_peaks) > 8:\n RR = np.diff(signal_peaks[-9:])\n RR_ave = int(np.mean(RR))\n RR_missed = int(1.66 * RR_ave)\n\n index = index + 1\n # First possible peak detection\n first_possible_peak = np.argmax(detection[0:int(0.25 * fs)])\n if detection[first_possible_peak] > SPKI:\n signal_peaks[0] = first_possible_peak\n else:\n signal_peaks.pop(0)\n signal_peaks = np.array(signal_peaks)\n return signal_peaks", "def updatePeakShifts(peak):\n\n for peakDim in peak.peakDims:\n for contrib in peakDim.peakDimContribs:\n if contrib.peakDimComponent:\n continue\n updateResonShift(contrib.resonance,peakDim)", "def parks(self):\n point_array = [0, 2, 8, 12, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14]\n park_coords = []\n parks_sorted = []\n for i in range(4):\n for j in range(4):\n if self.as_list[i][j] == 'p':\n park_coords.append(tuple([i, j]))\n while len(park_coords) > 0:\n x, y = park_coords.pop(0)\n if len(parks_sorted) == 0:\n parks_sorted.append([(x, y)])\n else:\n borders_bool = []\n for block_no, park_block in enumerate(parks_sorted):\n borders_bool.append(False)\n for i, j in park_block:\n if abs(x - i) + abs(y - j) == 1:\n borders_bool[block_no] = True\n if (num_true := borders_bool.count(True)) == 1:\n parks_sorted[borders_bool.index(True)].append((x, y))\n elif num_true > 1:\n new_parks_sorted = []\n i_mega_park = None\n for block_no, park_block in enumerate(parks_sorted):\n if borders_bool[block_no]: # If it is bordering\n if i_mega_park is None:\n i_mega_park = block_no\n new_parks_sorted.append(park_block)\n else:\n new_parks_sorted[i_mega_park] += park_block\n new_parks_sorted[i_mega_park] += [(x, y)]\n parks_sorted = new_parks_sorted\n else:\n new_parks_sorted.append(park_block)\n parks_sorted = new_parks_sorted\n else:\n parks_sorted.append([(x, y)])\n\n return sum([point_array[len(block)] for block in parks_sorted])", "def sortNondominated(fitness, k=None, first_front_only=False):\r\n if k is None:\r\n k = len(fitness)\r\n\r\n # Use objectives as keys to make python dictionary\r\n map_fit_ind = defaultdict(list)\r\n for i, f_value in enumerate(fitness): # fitness = [(1, 2), (2, 2), (3, 1), (1, 4), (1, 1)...]\r\n map_fit_ind[f_value].append(i)\r\n fits = list(map_fit_ind.keys()) # fitness values\r\n\r\n current_front = []\r\n next_front = []\r\n dominating_fits = defaultdict(int) # n (The number of people dominate you)\r\n dominated_fits = defaultdict(list) # Sp (The people you dominate)\r\n\r\n # Rank first Pareto front\r\n # *fits* is a iterable list of chromosomes. Each has multiple objectives.\r\n for i, fit_i in enumerate(fits):\r\n for fit_j in fits[i + 1:]:\r\n # Eventhougn equals or empty list, n & Sp won't be affected\r\n if dominates(fit_i, fit_j):\r\n dominating_fits[fit_j] += 1\r\n dominated_fits[fit_i].append(fit_j)\r\n elif dominates(fit_j, fit_i):\r\n dominating_fits[fit_i] += 1\r\n dominated_fits[fit_j].append(fit_i)\r\n if dominating_fits[fit_i] == 0:\r\n current_front.append(fit_i)\r\n\r\n fronts = [[]] # The first front\r\n for fit in current_front:\r\n fronts[-1].extend(map_fit_ind[fit])\r\n pareto_sorted = len(fronts[-1])\r\n\r\n # Rank the next front until all individuals are sorted or\r\n # the given number of individual are sorted.\r\n # If Sn=0 then the set of objectives belongs to the next front\r\n if not first_front_only: # first front only\r\n N = min(len(fitness), k)\r\n while pareto_sorted < N:\r\n fronts.append([])\r\n for fit_p in current_front:\r\n # Iterate Sn in current fronts\r\n for fit_d in dominated_fits[fit_p]:\r\n dominating_fits[fit_d] -= 1 # Next front -> Sn - 1\r\n if dominating_fits[fit_d] == 0: # Sn=0 -> next front\r\n next_front.append(fit_d)\r\n # Count and append chromosomes with same objectives\r\n pareto_sorted += len(map_fit_ind[fit_d])\r\n fronts[-1].extend(map_fit_ind[fit_d])\r\n current_front = next_front\r\n next_front = []\r\n\r\n return fronts", "def find_peak_locations(data, tol=prominence_tolerance, ranked=False):\n\n prominences = [(i, calculate_peak_prominence(data, i)) for i in range(len(data))]\n\n # normalize to interval [0,1]\n prom_max = max([x[1] for x in prominences])\n if prom_max == 0 or len(prominences) == 0:\n # failure to find any peaks; probably monotonically increasing / decreasing\n return []\n\n prominences[:] = [(x[0], x[1] / prom_max) for x in prominences]\n\n # take only the tallest peaks above given tolerance\n peak_locs = [x for x in prominences if x[1] > tol]\n\n # if a peak has a flat top, then both 'corners' of that peak will have high prominence; this\n # is rather unavoidable. just check for adjacent peaks with exactly the same prominence and\n # remove the lower one\n to_remove = [\n peak_locs[i]\n for i in range(len(peak_locs) - 2)\n if peak_locs[i][1] == peak_locs[i + 1][1]\n ]\n for r in to_remove:\n peak_locs.remove(r)\n\n if ranked:\n peak_locs.sort(key=lambda x: x[1] * -1)\n else:\n peak_locs[:] = [x[0] for x in peak_locs]\n\n return peak_locs", "def sort(self):\n for i in range(self.num):\n for j in range(i,0,-1):\n if self.genepool[0][j] < self.genepool[0][j-1]:\n self.genepool[0][j], self.genepool[0][j-1] = self.genepool[0][j-1], self.genepool[0][j]\n else:\n break\n for i in range(self.num):\n for j in range(i,0,-1):\n if self.genepool[1][j] < self.genepool[1][j-1]:\n self.genepool[1][j], self.genepool[1][j-1] = self.genepool[1][j-1], self.genepool[1][j]\n else:\n break", "def addPeakResonances(peaks):\n \n contribs = []\n for peak in peaks:\n for peakDim in peak.peakDims:\n if len(peakDim.peakDimContribs) < 1:\n contrib = assignResToDim(peakDim)\n contribs.append(contrib)\n \n resonances = [c.resonance for c in contribs]\n \n return resonances", "def _sort_measurements(self):\n if self._unsorted:\n sorted_ndxs = np.argsort(self._angles)\n self._distances = self._distances[sorted_ndxs]\n self._angles = self._angles[sorted_ndxs]\n self._intensities = self._intensities[sorted_ndxs]\n self._error_codes = self._error_codes[sorted_ndxs]\n self._unsorted = False", "def movePeaks(hist, peaks, dist=20):\n peakList = []\n smooth_hist = smooth(hist)\n for pk in peaks:\n p = int(round(pk))\n while True:\n start = int(round(max(0, p - dist)))\n end = int(round(min(len(hist), p + dist)))\n if end < start:\n new_peak = p\n break\n new_peak = start + np.argmax(hist[int(start):int(end)])\n\n # if the local maximum is not far from initital peak, break\n if abs(p - new_peak) <= 5: #\n break\n else:\n left = min(p, new_peak)\n right = max(p, new_peak)\n\n # Check if between initial peak and local maximum has valley\n if all(smooth_hist[left + 1:right] > smooth_hist[p]):\n break\n dist = dist / 2\n peakList.append(new_peak)\n return list(peakList)", "def quick_sort(data, head, tail, draw_data, time_tick):\n if head < tail:\n partition_index = partition(data, head, tail, draw_data, time_tick)\n\n # Left partition\n quick_sort(data, head, partition_index-1, draw_data, time_tick)\n\n # Right partition\n quick_sort(data, partition_index+1, tail, draw_data, time_tick)", "def stitchpeaklist(inpeak_list,mergethreshold):\n peak_list=[]\n prev_peak=['chr0',0,1]\n inpeak_list.sort()\n for curr_peak in inpeak_list:\n if curr_peak[0]==prev_peak[0] and prev_peak[2]+mergethreshold>=curr_peak[1]:\n curr_peak[1]=min(prev_peak[1],curr_peak[1])\n curr_peak[2]=max(prev_peak[2],curr_peak[2])\n else:\n if prev_peak!=['chr0',0,1]:\n peak_list.append(prev_peak)\n prev_peak=curr_peak[:]\n peak_list.append(prev_peak)\n return peak_list", "def standard_sorting(cls, zmat):\n if zmat is None:\n return None\n nats = len(zmat)\n ncoords = 3*nats - 6\n if nats < 4:\n return None\n else:\n r_coords = [0, 1, 3]\n a_coords = [2, 4]\n t_coords = [5]\n if nats > 4:\n extra = np.arange(6, ncoords+1)\n r_coords += extra[::4].tolist()\n a_coords += extra[1::4].tolist()\n t_coords += extra[2::4].tolist()\n return np.argsort(np.concatenate([r_coords, a_coords, t_coords]))", "def parse_peaks(self):\n peaks = []\n if self._mir_root.tag == 'method':\n for peak in self._mir_root[0].findall(\"peak\"):\n p = dict(peak.items())\n peaks.append(Peak(float(p['m_z']), float(p['tolerance'])))\n return sorted(peaks, key=lambda x: x[0])", "def expression_peaks(cluster, magnitude, group1 = [ \"SP\", \"SL06\", \"SL12\", \"SL24\",\"SL48\", \"SL96\" ], group2 = [ \"FL\", \"FP06\", \"FP12\", \"FP24\",\"FP48\", \"FP96\" ]):\n if cluster.averaged == False:\n cluster.average_matrix(group1 + group2)\n verbalise(\"G\", cluster.sample_header)\n peaklist = {}\n\n for gene in range(cluster.genenumber):\n # for group 1:\n datalist = list(cluster.data_matrix[:,gene])\n maxexpression = max(datalist[:len(group1)])\n maxposn = datalist.index(maxexpression)\n\n # check fold change is sufficient:\n if maxexpression >= magnitude + datalist[0]:\n # check adjacent peaks are not too big:\n # difference of 5.64 corresponds to 2% of the untransformed fpkm value\n # difference of 1.00 corresponds to 50% of the untransformed fpkm value\n if maxposn == len(group1) - 1:\n if (maxexpression - 5.64 < datalist[maxposn - 1] < maxexpression - 1):\n peaklist[cluster.gene_header[gene]] = group1[maxposn]\n\n elif (maxexpression * 0.02 < datalist[maxposn - 1] < maxexpression * 0.5) and \\\n (maxexpression * 0.02 < datalist[maxposn + 1] < maxexpression * 0.5):\n\n peaklist[cluster.gene_header[gene]] = group1[maxposn]\n\n # for group 2:\n maxexpression = max(datalist[len(group1):])\n maxposn = datalist.index(maxexpression)\n\n # check fold change is sufficient for reciprocal swap:\n if maxexpression >= magnitude * datalist[len(group1)]:\n # check adjacent peaks are not too big:\n try:\n if maxposn == len(group1+group2) - 1:\n if (maxexpression * 0.02 < datalist[maxposn - 1] < maxexpression * 0.5):\n peaklist[cluster.gene_header[gene]] = (group1 + group2)[maxposn]\n\n elif (maxexpression * 0.02 < datalist[maxposn - 1] < maxexpression * 0.5) and \\\n (maxexpression * 0.02 < datalist[maxposn + 1] < maxexpression * 0.5):\n\n peaklist[cluster.gene_header[gene]] = (group1 + group2)[maxposn]\n except IndexError as inst:\n verbalise(\"R\", inst)\n verbalise(\"R\", datalist)\n verbalise(\"R\", \"Max is %.3f at position %d\" % (maxexpression, maxposn))\n\n verbalise(\"G\", len(peaklist), \"significant peaks found.\")\n return peaklist", "def get_top_spec(mz_list,intensity_list,min_perc=False,windowed_mode=False,top=10,window_size=100,add_dummy_peak=True):\n\tgr_intensity_list = []\n\tgr_mz_list = []\n\t\n\t#In the case of minimal percentage... calculate perc intensity and filter\n\tif min_perc:\n\t\tfor i,mz in zip(intensity_list,mz_list):\n\t\t\tif i > min_perc:\n\t\t\t\tgr_intensity_list.append(i)\n\t\t\t\tgr_mz_list.append(mz)\n\t\n\t#In the case of windowed mode... iterate over the possible windows and intensity values; take the top per window\n\tif windowed_mode:\n\t\tstart_index = 0\n\t\tfor w in range(window_size,int(max(mz_list)),window_size):\n\t\t\ttemp_mz = []\n\t\t\ttemp_intens = []\n\t\t\ttemp_start_index = 0\n\t\t\t\n\t\t\t#Iterate over all m/z values and see if they fall within the window\n\t\t\tfor mz,intens in zip(mz_list[start_index:],intensity_list[start_index:]):\n\t\t\t\tif mz > w and mz <= w+window_size:\n\t\t\t\t\ttemp_start_index += 1\n\t\t\t\t\ttemp_mz.append(mz)\n\t\t\t\t\ttemp_intens.append(intens)\n\t\t\t\tif mz > w+window_size:\n\t\t\t\t\tbreak\n\t\t\t#Next window ignore all these lower values\n\t\t\tstart_index = copy.deepcopy(temp_start_index)\n\t\t\t\n\t\t\t#Use all if there are less peaks than the top number of peaks it should select\n\t\t\tif len(temp_mz) <= top:\n\t\t\t\tgr_mz_list.extend(temp_mz)\n\t\t\t\tgr_intensity_list.extend(temp_intens)\n\t\t\t\tcontinue\n\t\t\t\n\t\t\t#Get the indexes of the top peaks\n\t\t\tidxs = np.sort(np.argpartition(np.array(temp_intens), -top)[-top:])\n\t\t\tgr_mz_list.extend([temp_mz[idx] for idx in idxs])\n\t\t\tgr_intensity_list.extend([temp_intens[idx] for idx in idxs])\n\t\n\t#If not windowed or min perc use a simple top peaks\n\tif not windowed_mode and not min_perc:\n\t\tif len(intensity_list) > top:\n\t\t\t#Get the indexes of the top peaks\n\t\t\tidxs = np.sort(np.argpartition(np.array(intensity_list), -top)[-top:])\n\t\t\tgr_mz_list = [mz_list[idx] for idx in idxs]\n\t\t\tgr_intensity_list = [intensity_list[idx] for idx in idxs]\n\t\telse:\n\t\t\t#If there are less peaks than top peaks; return all\n\t\t\tgr_mz_list = mz_list\n\t\t\tgr_intensity_list = intensity_list\n\t\n\t#If needed add a dummy peak; this is important later since I want to take into account immonium ions and small fragments\n\tif add_dummy_peak:\n\t\tgr_mz_list.insert(0,0.0)\n\t\tgr_intensity_list.insert(0,1.0)\n\t\n\treturn(gr_mz_list,gr_intensity_list)", "def sort_holes(self, wall, holes):\n center = wall.matrix_world @ self.center\n holes = [(o, (o.matrix_world.translation - center).length) for o in holes]\n self.quicksort(holes)\n return [o[0] for o in holes]", "def detect_min_max(arr):\n\n max_value = max(np.absolute(np.reshape(arr, -1)))\n peaks_max = []\n peaks_min = []\n x_max = []\n y_max = []\n z_max = []\n x_min = []\n y_min = []\n z_min = []\n\n for j1 in range(10, arr.shape[0]-10):\n for j2 in range(10, arr.shape[1]-10):\n for j3 in range(10, arr.shape[2]-10):\n if (np.absolute(arr[j1, j2, j3]) > 0.3*max_value):\n\n aaaa = [\n arr[j1, j2, j3 + 1], arr[j1, j2 + 1, j3],\n arr[j1 + 1, j2, j3], arr[j1, j2, j3 - 1],\n arr[j1, j2 - 1, j3], arr[j1 - 1, j2, j3],\n arr[j1 + 1, j2 + 1, j3 + 1],\n arr[j1 - 1, j2 - 1, j3 - 1],\n arr[j1 - 1, j2 + 1, j3 + 1], arr[j1, j2 + 1, j3 + 1],\n arr[j1, j2 - 1, j3 - 1], arr[j1, j2 - 1, j3 + 1],\n arr[j1, j2 + 1, j3 - 1], arr[j1 + 1, j2, j3 + 1],\n arr[j1 - 1, j2, j3 - 1], arr[j1 - 1, j2, j3 + 1],\n arr[j1 + 1, j2, j3 - 1], arr[j1 + 1, j2 + 1, j3],\n arr[j1 - 1, j2 - 1, j3], arr[j1 + 1, j2 - 1, j3],\n arr[j1 - 1, j2 + 1, j3], arr\n [j1 + 1, j2 - 1, j3 + 1], arr\n [j1 + 1, j2 + 1, j3 - 1], arr\n [j1 - 1, j2 - 1, j3 + 1], arr\n [j1 + 1, j2 - 1, j3 - 1], arr\n [j1 - 1, j2 + 1, j3 - 1]]\n bbbb = [\n arr[j1, j2, j3 + 9], arr[j1, j2 + 9, j3],\n arr[j1 + 9, j2, j3], arr[j1, j2, j3 - 9],\n arr[j1, j2 - 9, j3], arr[j1 - 9, j2, j3]]\n\n if ((arr[j1, j2, j3] > max(aaaa)) and (max(aaaa) > max(bbbb))):\n peaks_max = np.append(peaks_max, arr[j1, j2, j3])\n x_max = np.append(x_max, j1)\n y_max = np.append(y_max, j2)\n z_max = np.append(z_max, j3)\n\n if ((arr[j1, j2, j3] < min(aaaa)) and (min(aaaa) < min(bbbb))):\n peaks_min = np.append(peaks_min, arr[j1, j2, j3])\n x_min = np.append(x_min, j1)\n y_min = np.append(y_min, j2)\n z_min = np.append(z_min, j3)\n\n return peaks_min, np.vstack(\n (x_min, y_min, z_min)), peaks_max, np.vstack(\n (x_max, y_max, z_max))", "def sort_segment_points(Aps, Bps):\n mid = []\n j = 0\n mid.append(Aps[0])\n for i in range(len(Aps)-1):\n dist = distance_tt_point(Aps[i], Aps[i+1])\n for m in range(j, len(Bps)):\n distm = distance_tt_point(Aps[i], Bps[m])\n if dist > distm:\n direction = dot(normalize(line(Aps[i].gen2arr(), Aps[i+1].gen2arr())), normalize(Bps[m].gen2arr()))\n if direction > 0:\n j = m + 1\n mid.append(Bps[m])\n break\n\n mid.append(Aps[i+1])\n for m in range(j, len(Bps)):\n mid.append(Bps[m])\n return mid", "def getAndSortFiducialPoints(self, center):\r\n # self.__registrationStatus.setText('Registration processing...')\r\n # pNode = self.parameterNode()\r\n # fixedAnnotationList = slicer.mrmlScene.GetNodeByID(pNode.GetParameter('fixedLandmarksListID'))\r\n # if fixedAnnotationList != None:\r\n # fixedAnnotationList.RemoveAllChildrenNodes()\r\n markerCenters = center\r\n nbCenter = len(center)\r\n for k in range(nbCenter):\r\n point = [0]\r\n for i in range(nbCenter):\r\n U,V,W = 0,0,0\r\n for j in range(nbCenter):\r\n d = 0\r\n if i != j and markerCenters[i]!=(0,0,0):\r\n d2 = (markerCenters[i][0]-markerCenters[j][0])**2+(markerCenters[i][1]-markerCenters[j][1])**2+(markerCenters[i][2]-markerCenters[j][2])**2\r\n d = d2**0.5\r\n # print markerCenters[i],markerCenters[j]\r\n #print d\r\n if d >=45 and d<=53:\r\n U += 1\r\n elif d >53 and d<60:\r\n V +=1\r\n elif d >=70 and d<80:\r\n W +=1\r\n #print U,V,W\r\n if U+V+W>=3:\r\n #print markerCenters[i]\r\n point.extend([i])\r\n point.remove(0)\r\n minX = [999,999,999,999]\r\n maxX = [-999,-999,-999,-999]\r\n sorted = [[0,0,0] for l in range(4)]\r\n sortedConverted = [[0,0,0] for l in range(4)]\r\n for i in range(2):\r\n for k in point:\r\n if markerCenters[k][0]<= minX[0]:\r\n minX[0] = markerCenters[k][0]\r\n minX[1] = k\r\n elif markerCenters[k][0]<= minX[2]:\r\n minX[2] = markerCenters[k][0]\r\n minX[3] = k\r\n if markerCenters[k][0]>= maxX[0]:\r\n maxX[0] = markerCenters[k][0]\r\n maxX[1] = k\r\n elif markerCenters[k][0]>= maxX[2]:\r\n maxX[2] = markerCenters[k][0]\r\n maxX[3] = k\r\n if markerCenters[minX[1]][1] < markerCenters[minX[3]][1]:\r\n sorted[0] = minX[1]\r\n sorted[1] = minX[3]\r\n else:\r\n sorted[0] = minX[3]\r\n sorted[1] = minX[1]\r\n if markerCenters[maxX[1]][1]>markerCenters[maxX[3]][1]:\r\n sorted[2] = maxX[1]\r\n sorted[3] = maxX[3]\r\n else:\r\n sorted[2] = maxX[3]\r\n sorted[3] = maxX[1]\r\n sorted2 = [0,0,0,0]\r\n if 1:#self.horizontalTemplate.isChecked():\r\n sorted2[0]=sorted[2]\r\n sorted2[2]=sorted[0]\r\n sorted2[1]=sorted[3]\r\n sorted2[3]=sorted[1]\r\n else:\r\n sorted2[0]=sorted[3]\r\n sorted2[2]=sorted[1]\r\n sorted2[1]=sorted[0]\r\n sorted2[3]=sorted[2]\r\n # logic = slicer.modules.annotations.logic()\r\n # logic.SetActiveHierarchyNodeID(pNode.GetParameter('fixedLandmarksListID'))\r\n # if pNode.GetParameter(\"Template\")=='4points':\r\n # nbPoints=4\r\n # elif pNode.GetParameter(\"Template\")=='3pointsCorners':\r\n # nbPoints=3\r\n l = slicer.modules.annotations.logic()\r\n l.SetActiveHierarchyNodeID(slicer.util.getNode('Fiducial List_fixed').GetID())\r\n for k in range(4) :\r\n fiducial = slicer.mrmlScene.CreateNodeByClass('vtkMRMLAnnotationFiducialNode')\r\n fiducial.SetReferenceCount(fiducial.GetReferenceCount()-1)\r\n fiducial.SetFiducialCoordinates(markerCenters[sorted2[k]])\r\n fiducial.SetName(str(k))\r\n fiducial.Initialize(slicer.mrmlScene)\r\n\r\n sRed = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNodeRed\")\r\n if sRed ==None :\r\n sRed = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNode1\")\r\n # sRed.SetSliceVisible(1)\r\n m= sRed.GetSliceToRAS()\r\n m.SetElement(0,3,sortedConverted[3][0])\r\n m.SetElement(1,3,sortedConverted[3][1])\r\n m.SetElement(2,3,sortedConverted[3][2])\r\n sRed.Modified()\r\n return sorted2", "def sort_slices(slices):\n result = []\n for x in slices:\n sorted = []\n semi_sorted = []\n # Sort by stations\n x.sort(key=lambda y: y[2])\n\n # Sort by channels\n found_channels = []\n current_station = x[0][2]\n for y in x:\n if current_station != y[2]:\n current_station = y[2]\n found_channels = []\n if y[3][-1] in found_channels:\n continue\n if y[3][-1] in config.archive_channels_order:\n found_channels.append(y[3][-1])\n semi_sorted.append(y)\n\n current_station = \"\"\n index = 0\n for y in semi_sorted:\n if y[2] != current_station:\n current_station = y[2]\n for channel in config.archive_channels_order:\n sorting_index = index\n while sorting_index < len(semi_sorted) and semi_sorted[sorting_index][2] == current_station:\n if semi_sorted[sorting_index][3][-1] == channel:\n sorted.append(semi_sorted[sorting_index])\n break\n sorting_index += 1\n index += 1\n\n result.append(sorted)\n\n return result" ]
[ "0.5235512", "0.51029795", "0.5057203", "0.50256133", "0.4997207", "0.49645668", "0.49587327", "0.49490315", "0.4915132", "0.4911893", "0.48918042", "0.4875538", "0.4869072", "0.48368287", "0.4831977", "0.48246452", "0.48140344", "0.47824505", "0.47735858", "0.4757741", "0.47366777", "0.47363457", "0.47206417", "0.47083598", "0.469233", "0.46840373", "0.46707654", "0.46692488", "0.46684074", "0.46657324" ]
0.73416525
0
Return an shellescaped version of the string using double quotes. Reliably quote a string which may contain unsafe characters (e.g. space or quote characters), while retaining some shell features such as variable interpolation. The returned value can be used in a shell command line as one token that gets to be further interpreted by the shell. The set of characters that retain their special meaning may depend on the
def DoubleQuote(s): if not s: return '""' elif all(c in _SafeShellChars for c in s): return s else: return '"' + s.replace('"', '\\"') + '"'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def shlex_quote(s):\n if not s:\n return \"''\"\n if _find_unsafe(s) is None:\n return s\n\n # use single quotes, and put single quotes into double quotes\n # the string $'b is then quoted as '$'\"'\"'b'\n return \"'\" + s.replace(\"'\", \"'\\\"'\\\"'\") + \"'\"", "def shlex_quote(s):\n if not s:\n return \"''\"\n # PKGW: builtin not available in Python 2\n ###if _find_unsafe(s) is None:\n ### return s\n\n # use single quotes, and put single quotes into double quotes\n # the string $'b is then quoted as '$'\"'\"'b'\n return \"'\" + s.replace(\"'\", \"'\\\"'\\\"'\") + \"'\"", "def shellquote(s):\n return '\"' + s.replace(\"'\", \"'\\\\''\") + '\"'", "def quote(s):\n # Based on shlex.quote. Bun unlike shlex, it quotes every string and\n # not just the ones that contain unsafe characters.\n return \"'\" + s.replace(\"'\", \"'\\\"'\\\"'\") + \"'\"", "def shellformat(string):\n return \"'\" + string.replace(\"'\", \"'\\\\''\") + \"'\"", "def _sh_quote(s):\n if not s:\n return b\"\"\n if _find_unsafe(s) is None:\n return s\n\n # use single quotes, and put single quotes into double quotes\n # the string $'b is then quoted as '$'\"'\"'b'\n return b\"'\" + s.replace(b\"'\", b\"'\\\"'\\\"'\") + b\"'\"", "def SingleQuote(s):\n return pipes.quote(s)", "def wrap_with_in_single_quote(s):\n return \"'{}'\".format(s)", "def wrap_with_in_single_quote(s):\n return \"'{}'\".format(s)", "def quote(s):\n if not s:\n return \"''\"\n if _find_unsafe(s) is None:\n return s\n\n # use single quotes, and put single quotes into double quotes\n # the string $'b is then quoted as '$'\"'\"'b'\n return \"'\" + s.replace(\"'\", \"'\\\"'\\\"'\") + \"'\"", "def esc_quotes(strng):\n\n return strng.replace('\"','\\\\\"').replace(\"'\",\"\\\\'\")", "def sh_quote_unsafe(arg):\n return ('\"' + _DQUOTE_RE.sub(r'\\1\\1\\\"', str(arg)) + '\"' )", "def quote(s):\n if not s:\n return \"''\"\n if _find_unsafe(s) is None:\n return \"'\" + s + \"'\"\n\n # use single quotes, and put single quotes into double quotes\n # the string $'b is then quoted as '$'\"'\"'b'\n return \"'\" + s.replace(\"'\", \"'\\\"'\\\"'\") + \"'\"", "def sh_quote_safe(arg):\n return (\"'\" + str(arg).replace(\"'\", r\"'\\''\") + \"'\")", "def shQuote(text):\n\treturn \"'%s'\" % text.replace(\"'\", r\"'\\''\")", "def quot(string):\r\n return string.replace('\"', \"'\")", "def quote(s):\n return unescape(quoteattr(s))", "def QuotedEscaped (s):\n return repr(s)", "def unquote(s, *a, **kw):\n return quote(s, *a, **kw)", "def lisp_string(python_string):\n return '\"%s\"' % python_string.replace('\\\\', '\\\\\\\\').replace('\"', '\\\\\"')", "def shquote(arg):\n for c in '\"', \"'\", \"\\\\\", \"#\":\n if c in arg:\n return repr(arg)\n if arg.split() != [arg]:\n return repr(arg)\n return arg", "def str_wrap_double(s):\n s = str(s)\n return '\"' + s + '\"'", "def shell_escape(s):\n from tempfile import mkstemp\n fd, path = mkstemp()\n try:\n with os.fdopen(fd, 'w') as f:\n f.write(s)\n cmd = r\"\"\"cat %s | sed -e \"s/'/'\\\\\\\\''/g; 1s/^/'/; \\$s/\\$/'/\" \"\"\" % path\n escaped_str = check_output(cmd, shell=True)\n finally:\n os.remove(path)\n\n return escaped_str", "def Quote(s):\n if not nonnormal_char_re.search(s):\n return s # no quoting necessary\n slist = []\n for char in s:\n if nonnormal_char_re.search(char):\n slist.append(\"\\\\x%02x\" % ord(char))\n else:\n slist.append(char)\n return '\"%s\"' % \"\".join(slist)", "def quoteString(s):\n if s is None:\n return None\n quoted = str(s).replace(\"\\\\\", \"\\\\\\\\\").replace(\"'\", \"\\\\'\")\n return \"'{}'\".format(quoted)", "def quote(s):\n\n\ts = \"'\" + s.replace(\"'\", \"\"\"'\"'\"'\"\"\") + \"'\"\n\n\t#get rid of gratuitous leading and trailing empty strings\n\tif s.startswith(\"''\"): s = s[2:]\n\tif s.endswith(\"''\"): s = s[:-2]\n\n\treturn s", "def c_stringify(cls, st):\n return '\"{0}\"'.format(st.value.replace('\"', '\\\\\"'))", "def special_character(raw_string, force_quote = False):\n if raw_string == \"\":\n return '\"\"'\n\n # Pass through other values, such as None:\n if type(raw_string) not in types.StringTypes:\n return raw_string\n\n # quick bypass if there are no characters to force escapeaping:\n if not force_quote and not _needs_escapeaping_re.search(raw_string):\n return raw_string\n \n if '\"' not in raw_string:\n return '\"%s\"' % (_avert_unallowable(raw_string),)\n\n if \"'\" not in raw_string:\n return \"'%s'\" % (_avert_unallowable(raw_string),)\n\n # If there are both single and double special_characters in the string, we\n # enclose the whole thing in double special_characters and escape double quotes\n # in the original string.\n return '\"%s\"' % (_avert_unallowable(raw_string, True),)", "def quote(value):\n return DoubleQuotedScalarString(value)", "def safe_quoted_string(value):\n validate_safe_string(value)\n return u'\\'{}\\''.format(value)" ]
[ "0.7271046", "0.7253869", "0.72336555", "0.7229503", "0.70458126", "0.6944718", "0.6929941", "0.69232047", "0.69232047", "0.6779495", "0.67524135", "0.6597111", "0.65538913", "0.6536574", "0.65330154", "0.65087634", "0.6507119", "0.64975196", "0.64822", "0.646473", "0.644913", "0.63986534", "0.63792753", "0.6364152", "0.63629556", "0.63309103", "0.6299909", "0.62974316", "0.62783724", "0.6259792" ]
0.7298411
0
Constructs a shell snippet for a command using a variable to shrink it. Takes into account all quoting that needs to happen.
def ShrinkToSnippet(cmd_parts, var_name, var_value): def shrink(value): parts = (x and SingleQuote(x) for x in value.split(var_value)) with_substitutions = ('"$%s"' % var_name).join(parts) return with_substitutions or "''" return ' '.join(shrink(part) for part in cmd_parts)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def substshell(command, path=None, output=os.devnull, mode='w'):\n _compile = SubstCommandCompiler(path)\n _compile.init_command(command)\n return functools.partial(_compile, output, mode)", "def make_shell_cmd(self, locals):\n\t\tdef cmd_shell():\n\t\t\timport code\n\t\t\tcode.interact(banner=self.shell_banner, local=locals, exitmsg='Returning to command shell...')\n\n\t\treturn cmd_shell", "def shellify(val):\n\n if val==None:\n s=''\n elif not isinstance(val,str):\n s=str(val)\n else:\n return shlex.quote(val)\n return shlex.quote(s)", "def _wrap_command_line(s: str) -> str:\n if not _command_pattern.fullmatch(s):\n return s\n indent = \"\".join(itertools.takewhile(lambda c: c.isspace(), s))\n cmd = s[len(indent):]\n return f\"{indent}pass # {cmd}{_command_escape_comment}\"", "def _build_command(self, cmd, unit):\n return '#' + unit + cmd + NEWLINE", "def _build_direct_command(self, cmd, arg):\n return \"%s%s\" % (arg, self._newline)", "def wrap_command(command: str) -> str: \n\n wrapper = \"\"\"\n sub callback {\n {{COMMAND}};\n }\n\n import java.io.*; \n import java.util.*; \n $baos = [new ByteArrayOutputStream]; \n $oos = [new ObjectOutputStream: $baos]; \n [$oos writeObject: callback()]; \n [$oos close]; \n $encoder = [Base64 getEncoder]; \n println([$encoder encodeToString: [$baos toByteArray]]);\n \"\"\"\n\n # Replace command in wrapper\n wrapper = wrapper.replace(r\"{{COMMAND}}\", command)\n return convert_to_oneline(wrapper)", "def do_shell(self, line):\n eval(line)", "def _instantiateSecrets(cmd, secrets, hide):\n if secrets:\n for (i, secret) in enumerate(secrets):\n if hide:\n secret = '<hidden>'\n cmd = cmd.replace(f':{i}:', secret)\n return cmd", "def build_sh_cmd(cmd, cwd=None):\n args = cmd.split()\n return getattr(sh, args[0]).bake(_cwd=cwd, *args[1:])", "def _preprocess(command):\n for shell_command in DockerProxy.DockerProxy.shell_commands:\n if shell_command in command:\n replace_string = \"/bin/bash -c \\\"\" + shell_command\n command = command.replace(shell_command, replace_string)\n command += \"\\\"\"\n return command", "def _build_simple_command(self, cmd):\n return cmd+SBE37_NEWLINE", "def do_shell(self, line):\n os.system(line)", "def win32_command(command, *args, **kwargs):\n # pylint: disable = redefined-outer-name\n return ' '.join([metasub(\n '\"%s\"' % (slashsub(token).replace('\"', '\\\\\"'),)\n if needq(token) else token\n ) for token in map(_make_formatter(*args, **kwargs),\n split_command(command))])", "def sh_quote_safe(arg):\n return (\"'\" + str(arg).replace(\"'\", r\"'\\''\") + \"'\")", "def sh_quote_unsafe(arg):\n return ('\"' + _DQUOTE_RE.sub(r'\\1\\1\\\"', str(arg)) + '\"' )", "def StringifyCommand(cmd):\n ret = ''\n grouping = 0\n for a in cmd:\n if grouping == 0 and len(ret) > 0:\n ret += \" \\\\\\n \"\n elif grouping > 0:\n ret += \" \"\n if grouping == 0:\n grouping = 1\n if a.startswith('-') and len(a) == 2:\n grouping = 2\n ret += a\n grouping -= 1\n return ret", "def shell_command_strings(self, command):\n return (None, \"$(shell \" + command + \")\", None)", "def command_create(self):\n command = []\n for macro in self.my_xml.tool_data[self.shell_dict['short_name']]['pre_tmpls']:\n command.append(self.my_xml.chth_tmpl.substitute(macro=macro))\n command.extend(self.pre_chth)\n command.append(Template('@CMD_BEGIN@ $short_name').substitute(self.shell_dict))\n command.extend(self.tool_chth)\n for macro in self.my_xml.tool_data[self.shell_dict['short_name']]['post_tmpls']:\n command.append(self.my_xml.chth_tmpl.substitute(macro=macro))\n\n return '\\n'.join(command)", "def shellcommand(command):\n\n subprocess.call(str(command))", "def _make_posix_command():\n qsearch = _re.compile(r'[^a-zA-Z\\d_./-]').search\n needq = lambda x: not x or qsearch(x)\n\n def posix_command(command, *args, **kwargs):\n \"\"\"\n Return a POSIX shell suitable commandline\n\n Either args or kwargs or neither of them can be set. There cannot be\n set both of them.\n\n :Parameters:\n `command` : ``str``\n Generic commandline, possibly containing substitutions, filled by\n args or kwargs. See `split_command` for generic commandline\n syntax.\n\n `args` : ``tuple``\n Substitution tuple\n\n `kwargs` : ``dict``\n Substitution dict\n\n :Return: Strictly quoted shell commandline for POSIX shells\n :Rtype: ``str``\n \"\"\"\n # pylint: disable = redefined-outer-name\n return ' '.join([\n \"'%s'\" % (token.replace(\"'\", \"'\\\\''\")) if needq(token) else token\n for token in map(_make_formatter(*args, **kwargs),\n split_command(command))\n ])\n return posix_command", "def shell_command(self):\n # TODO: fix this naive version by adding quotes where appropriate\n return \" \".join(self.args)", "def do_shell(self, command):\n os.system(command)", "def shellquote(arg):\n if re.match('^[-_.:/=a-zA-Z0-9]*$', arg):\n return arg\n else:\n return \"'%s'\" % arg.replace(\"'\", r\"'\\''\")", "def expand_var(self, key, val=None):\n if val is None:\n return 'unset {0};'.format(key)\n else:\n return '{0}=\"{1}\";export {0};'.format(key, val)", "def _unwrap_command_line(s: str) -> str:\n if not _command_escape_pattern.fullmatch(s):\n return s\n indent = \"\".join(itertools.takewhile(lambda c: c.isspace(), s))\n cmd = s[(len(indent) + 8):-len(_command_escape_comment)]\n return indent + cmd", "def rebuild_command(args):\n return \"%s\\n\" % (\" \".join(args)).replace(\"\\\\\", \"\\\\\\\\\")", "async def shell(*command: Strings, prefix: Optional[Strings] = None, **resources: int) -> None:\n current = Invocation.current\n if prefix is None:\n global default_shell_prefix # pylint: disable=invalid-name\n prefix = default_shell_prefix.value\n\n def _run_shell(parts: List[str]) -> Awaitable:\n assert prefix is not None\n global shell_executable # pylint: disable=invalid-name\n return asyncio.create_subprocess_shell(\n \" \".join(flatten(prefix, parts)),\n executable=shell_executable.value,\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE,\n )\n\n await current.done(current.run_action(\"shell\", _run_shell, *command, **resources))", "def get_shell_cmd(self, action, wildcards):\n assert action == \"run\", \"Unsupported action\"\n ins = expand(\n self.base_path_in.format(wildcards=wildcards),\n postproc=[self._get_postproc_token()],\n ext=self.extensions,\n )\n outs = [s.format(**wildcards) for s in expand(self.base_path_out, ext=self.extensions)]\n assert len(ins) == len(outs)\n return \"\\n\".join(\n (\n \"test -L {out} || ln -sr {in_} {out}\".format(in_=in_, out=out)\n for in_, out in zip(ins, outs)\n )\n )", "def get_shell(self, shell):" ]
[ "0.5757063", "0.53830165", "0.53762203", "0.5325956", "0.53113496", "0.5278688", "0.518739", "0.5154804", "0.51409817", "0.5108147", "0.50834435", "0.50418144", "0.5036694", "0.49932173", "0.49862692", "0.4973778", "0.49597052", "0.49114555", "0.49044985", "0.4899245", "0.48928267", "0.48887584", "0.4858884", "0.48539594", "0.4846609", "0.48303756", "0.48161605", "0.4791082", "0.4765783", "0.4755334" ]
0.7213392
0
An fcntlbased implementation of _IterProcessStdout.
def _IterProcessStdoutFcntl(process, iter_timeout=None, timeout=None, buffer_size=4096, poll_interval=1): # pylint: disable=too-many-nested-blocks import fcntl try: # Enable non-blocking reads from the child's stdout. child_fd = process.stdout.fileno() fl = fcntl.fcntl(child_fd, fcntl.F_GETFL) fcntl.fcntl(child_fd, fcntl.F_SETFL, fl | os.O_NONBLOCK) end_time = (time.time() + timeout) if timeout else None iter_end_time = (time.time() + iter_timeout) if iter_timeout else None while True: if end_time and time.time() > end_time: raise TimeoutError() if iter_end_time and time.time() > iter_end_time: yield None iter_end_time = time.time() + iter_timeout if iter_end_time: iter_aware_poll_interval = min(poll_interval, max(0, iter_end_time - time.time())) else: iter_aware_poll_interval = poll_interval read_fds, _, _ = select.select([child_fd], [], [], iter_aware_poll_interval) if child_fd in read_fds: data = _read_and_decode(child_fd, buffer_size) if not data: break yield data if process.poll() is not None: # If process is closed, keep checking for output data (because of timing # issues). while True: read_fds, _, _ = select.select([child_fd], [], [], iter_aware_poll_interval) if child_fd in read_fds: data = _read_and_decode(child_fd, buffer_size) if data: yield data continue break break finally: try: if process.returncode is None: # Make sure the process doesn't stick around if we fail with an # exception. process.kill() except OSError: pass process.wait()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _stdout_reader(self):\n self._is_launched.wait()\n stdout_iterator = iter(self._popen.stdout.readline, b\"\")\n for line in stdout_iterator:\n self._log(\"raw\", \"stdout : {0}\".format(line.strip()))\n self.stdout_queue.put_nowait(line.strip())\n self.stdout_queue.put_nowait(None) # Stop queue consumers", "def _IterProcessStdoutQueue(process,\n iter_timeout=None,\n timeout=None,\n buffer_size=4096,\n poll_interval=1):\n # pylint: disable=unused-argument\n if six.PY3:\n import queue\n else:\n import Queue as queue\n import threading\n\n stdout_queue = queue.Queue()\n\n def read_process_stdout():\n # TODO(jbudorick): Pick an appropriate read size here.\n while True:\n try:\n output_chunk = _read_and_decode(process.stdout.fileno(), buffer_size)\n except IOError:\n break\n stdout_queue.put(output_chunk, True)\n if not output_chunk and process.poll() is not None:\n break\n\n reader_thread = threading.Thread(target=read_process_stdout)\n reader_thread.start()\n\n end_time = (time.time() + timeout) if timeout else None\n\n try:\n while True:\n if end_time and time.time() > end_time:\n raise TimeoutError()\n try:\n s = stdout_queue.get(True, iter_timeout)\n if not s:\n break\n yield s\n except queue.Empty:\n yield None\n finally:\n try:\n if process.returncode is None:\n # Make sure the process doesn't stick around if we fail with an\n # exception.\n process.kill()\n except OSError:\n pass\n process.wait()\n reader_thread.join()", "def __readStdout(self):\n if self.process is not None:\n self.process.setReadChannel(QProcess.StandardOutput)\n \n while self.process.canReadLine():\n s = str(self.process.readLine(),\n Preferences.getSystem(\"IOEncoding\"),\n 'replace')\n if (\n self.currentChangelist != \"\" and\n self.rx_status.exactMatch(s)\n ):\n file = self.rx_status.cap(5).strip()\n filename = file.replace(self.path + os.sep, \"\")\n if filename not in self.changeListsDict[\n self.currentChangelist\n ]:\n self.changeListsDict[self.currentChangelist].append(\n filename)\n elif (\n self.currentChangelist != \"\" and\n self.rx_status2.exactMatch(s)\n ):\n file = self.rx_status2.cap(2).strip()\n filename = file.replace(self.path + os.sep, \"\")\n if filename not in self.changeListsDict[\n self.currentChangelist\n ]:\n self.changeListsDict[self.currentChangelist].append(\n filename)\n elif self.rx_changelist.exactMatch(s):\n self.currentChangelist = self.rx_changelist.cap(1)\n if self.currentChangelist not in self.changeListsDict:\n self.changeListsDict[self.currentChangelist] = []", "def cmd_iter(cmd):\n\n def thread_enqueue(label, f, q):\n t = threading.Thread(target=enqueue_output, args=(label, f, q))\n t.daemon = True ## thread dies with the program\n t.start()\n return t\n\n def enqueue_output(label, out, queue):\n prev_line = None\n for line in out.read():\n if prev_line is not None:\n queue.put((label, \"%s\\n\" % prev_line))\n prev_line = line\n # print(\"%s: %r\" % (label, line))\n # print(\"END of %s\" % (label, ))\n if prev_line:\n queue.put((label, prev_line))\n out.close()\n\n proc = Proc(cmd)\n proc.stdin.close()\n q = Queue()\n t1 = thread_enqueue(\"out\", proc.stdout, q)\n t2 = thread_enqueue(\"err\", proc.stderr, q)\n running = True\n while True:\n try:\n yield q.get(True, 0.001)\n except Empty:\n if not running:\n break\n proc.poll()\n running = proc.returncode is None or \\\n any(t.is_alive() for t in (t1, t2))\n\n # print(\"%s: %r\" % (\"errlvl\", proc.returncode))\n yield \"errorlevel\", proc.returncode", "def _stream(cmd):\n # color_print(getuser() + '$ ' + cmd, COLOR.BLUE)\n output = [] # used to collect o/p from both stdout and stderr\n\n try:\n proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=1, shell=True)\n except subprocess.CalledProcessError as ex:\n print(\"Status : FAIL\", ex.returncode, ex.output)\n else:\n with proc.stdout:\n for line in iter(proc.stdout.readline, b''):\n # print(line)\n output.append(line)\n\n # Note: output is streamed to the user as and when it occurs.\n with proc.stderr:\n for line in iter(proc.stderr.readline, b''):\n # print(line)\n output.append(line)\n\n return output", "def __iter__(self):\n p = subprocess.Popen(self.comm, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)\n while True:\n line = p.stdout.readline()\n if not line:\n\t\t break\n line = line.strip()\n yield line", "def run(self):\n for line in iter(self.pipeReader.readline, ''):\n print(line.strip('\\n'))\n self.pipeReader.close()", "def _stdout_to_flag(self):\n self._is_running.wait()\n while self._is_running.is_set():\n msg = self.stdout_queue.get()\n if msg is None or len(msg) < 1: # It's time to stop\n break\n if msg[0] == \"#\": # It's a signal from the kxkmcard program\n self.onEvent(msg[1:].split(' '))\n else:\n self._log(\"warning\", \"unknown stdout line {0}\".format(msg))", "def run(process, line_handler):\n\n io_q = queue.Queue(5)\n threads = {\n \"stdout\": threading.Thread(\n target=read_stream, args=(\"stdout\", process.stdout, io_q)\n ),\n \"stderr\": threading.Thread(\n target=read_stream, args=(\"stderr\", process.stderr, io_q)\n ),\n }\n # Unfortunately, stdout and stderr are not synchronised with each other.\n # This makes capturing both for real-time processing useless. So it is\n # currently all captured under stdout. Even more unfortunately, stderr\n # comes through first before stdout. This means writes that are made first\n # to stdout will not be first through the pipe if there is stderr output.\n #\n # This lack of sychronisation between stdout and stderr output makes\n # real-time display useless because they aren't captured and passed\n # through to the handler as they are encountered.\n #\n # Worse still, there appear to be issues with subprocess output capture on\n # Windows.\n #\n # A proper resolution would be to provide a custom subprocess module but\n # since the common usage does not require real-time capture of\n # stdout/stderr, this is not worth the effort. Manually running whatever\n # was intended for the subprocess outside ttt is the only recourse.\n #\n for thread in threads.values():\n thread.start()\n\n stdout = []\n stderr = []\n while threads:\n try:\n item = io_q.get(True, 1)\n except queue.Empty:\n if process.poll() is not None:\n break\n else:\n outstream, message = item\n if message == \"EXIT\":\n threads[outstream].join()\n del threads[outstream]\n else:\n message = message.rstrip(os.linesep)\n channel = sys.stdout if outstream == \"stdout\" else sys.stderr\n (stdout if outstream == \"stdout\" else stderr).append(message)\n if line_handler is not None:\n line_handler(channel, message)\n else:\n channel.write(message)\n channel.flush()\n\n for t in threads.values():\n t.join()\n process.wait()\n return (process.returncode, stdout, stderr)", "def nostdout():\n\n save_stdout = sys.stdout\n sys.stdout = cStringIO.StringIO()\n yield\n sys.stdout = save_stdout", "def reader_thread(self, q):\r\n try:\r\n with self.process.stdout as pipe:\r\n for line in iter(pipe.readline, b''):\r\n q.put(line)\r\n finally:\r\n q.put(None)", "def redirect_stdout():\n save_stdout = sys.stdout\n sys.stdout = _TQDMFile(sys.stdout)\n yield\n sys.stdout = save_stdout", "def _watch(self):\n # self._popen.wait()\n lines_iterator = iter(self._popen.stdout.readline, b\"\")\n for line in lines_iterator:\n line = line.strip()\n # log.log(\"raw\",self.name.upper()+\" SAYS: \"+line)\n # cmd = line.split(' ')[0]\n # args = line.split(' ')[1:]\n if line[0] == '#':\n self.onEvent(line.split(' '))\n if self.onClose:\n self.onEvent([self.onClose])\n self._running.clear()\n if self.stderr is not None:\n self.stderr.close()", "def piped(self):\n\t\tpass", "def non_blocking_streamlit(process: psutil.Popen) -> None:\n while process.is_running():\n process.communicate()", "def read(self):\n # now read stderr for log messages, we could buffer here but since\n # we're just logging the messages, I don't care to\n try:\n out = self.proc.stderr.read()\n if out:\n LOG.debug('reading %s got %d bytes on stderr', self.name,\n len(out))\n for line in out.splitlines():\n LOG.warning('%s: %s', self.name, line)\n except IOError as err:\n if err.errno != errno.EAGAIN:\n # allowing a caller to handle the exception as well\n raise\n except:\n LOG.exception('uncaught exception in stderr read')\n\n # This read call is non-blocking\n try:\n self.buffer += self.proc.stdout.read()\n if len(self.buffer):\n LOG.debug('reading %s, buffer now %d bytes',\n self.name, len(self.buffer))\n except IOError as err:\n if err.errno != errno.EAGAIN:\n raise\n except:\n # sometimes the process goes away in another thread and we don't\n # have it anymore\n LOG.exception('uncaught exception in stdout read')\n return\n\n # iterate for each line we have\n while self.buffer:\n idx = self.buffer.find('\\n')\n if idx == -1:\n break\n\n line = self.buffer[0:idx].strip()\n if line:\n self.datalines.append(line)\n self.buffer = self.buffer[idx+1:]", "def stdio(self):\n\n if isinstance(self.log_file, TotalLogFile):\n self.stdio_stolen = True\n self.log_file.stdio()", "def _flush_buffer(self):\n self.pexpect_child.logfile = None\n flushedStuff = \"\"\n while self.pexpect_child.expect([pexpect.TIMEOUT, r\".+\"], timeout=1):\n flushedStuff += self.pexpect_child.match.group(0)\n self.pexpect_child.logfile = self.log_file", "def Wait(self):\n try:\n # Flush stdout and stderr to be sure no output is interleaved.\n sys.stdout.flush()\n sys.stderr.flush()\n\n # File position pointers are shared across processes, so we must open\n # our own file descriptor to ensure output is not lost.\n self._WaitForStartup()\n silent_death_time = time.time() + self.SILENT_TIMEOUT\n results = []\n with open(self._output.name, 'r') as output:\n pos = 0\n running, exited_cleanly, task_errors, all_errors = (True, False, [], [])\n while running:\n # Check whether the process is still alive.\n running = self.is_alive()\n\n try:\n errors, results = \\\n self._queue.get(True, self.PRINT_INTERVAL)\n if errors:\n task_errors.extend(errors)\n all_errors.extend(errors)\n\n running = False\n exited_cleanly = True\n except Queue.Empty:\n pass\n\n if not running:\n # Wait for the process to actually exit. If the child doesn't exit\n # in a timely fashion, kill it.\n self.join(self.EXIT_TIMEOUT)\n if self.exitcode is None:\n msg = '%r hung for %r seconds' % (self, self.EXIT_TIMEOUT)\n all_errors.extend(\n failures_lib.CreateExceptInfo(ProcessExitTimeout(msg), ''))\n self._KillChildren([self])\n elif not exited_cleanly:\n msg = ('%r exited unexpectedly with code %s'\n % (self, self.exitcode))\n all_errors.extend(\n failures_lib.CreateExceptInfo(ProcessUnexpectedExit(msg), ''))\n\n # Read output from process.\n output.seek(pos)\n buf = output.read(_BUFSIZE)\n\n if len(buf) > 0:\n silent_death_time = time.time() + self.SILENT_TIMEOUT\n elif running and time.time() > silent_death_time:\n msg = ('No output from %r for %r seconds' %\n (self, self.SILENT_TIMEOUT))\n all_errors.extend(\n failures_lib.CreateExceptInfo(ProcessSilentTimeout(msg), ''))\n self._KillChildren([self])\n\n # Read remaining output from the process.\n output.seek(pos)\n buf = output.read(_BUFSIZE)\n running = False\n\n # Print output so far.\n while len(buf) > 0:\n sys.stdout.write(buf)\n pos += len(buf)\n if len(buf) < _BUFSIZE:\n break\n buf = output.read(_BUFSIZE)\n\n # Print error messages if anything exceptional occurred.\n if len(all_errors) > len(task_errors):\n logging.PrintBuildbotStepFailure()\n msg = '\\n'.join(x.str for x in all_errors if x)\n logging.warning(msg)\n traceback.print_stack()\n\n sys.stdout.flush()\n sys.stderr.flush()\n\n # Propagate any results.\n for result in results:\n results_lib.Results.Record(*result)\n\n finally:\n self.Cleanup(silent=True)\n\n # If an error occurred, return it.\n return all_errors", "def run(self):\n for line in iter(self.pipeReader.readline, ''):\n logging.log(self.level, line.strip('\\n'))\n\n self.pipeReader.close()", "def _stdin_writer(self):\n self._is_launched.wait()\n while True:\n message = self.stdin_queue.get()\n if message is None or self._is_stopping or not self._is_running.is_set():\n if message is not None:\n log.debug(\"Ignore {0} on process {1} because it's stopped\".format(message, self.name))\n break\n self._direct_stdin_writer(message)\n self._log(\"raw\", \"write to stdin : {0}\".format(message.encode(\"utf-8\")))", "def _dumpStdout(self, p, outputCallback):\n while p.poll() is None:\n try:\n # May raise IOError if in non-blocking mode\n l = p.stdout.read()\n outputCallback(l)\n except IOError:\n pass\n time.sleep(0.1)\n outputCallback(p.stdout.read())", "def hook() -> None:\n real_recv = process.recv_raw\n\n def recv(self: process, numb: int) -> bytes:\n data = real_recv(self, numb)\n # Sometimes the returned data is of type str\n # Accept them by converting them to bytes\n if type(data) == str:\n data = data.encode()\n try:\n stdout_all = self.stdout_all\n except Exception: # pylint: disable=broad-except\n stdout_all = b\"\"\n stdout_all += data\n self.stdout_all = stdout_all\n return data\n\n process.recv_raw = recv", "def tail(self):\n for line in iter(self.proc.stdout.readline, ''):\n if len(line) == 0:\n break\n if self.log_filter(line.decode('ASCII')):\n continue\n if self.verbose:\n logging.debug(f\"{self.prefix}: {line.decode().rstrip()}\")\n with self.logs_cond:\n self.logs.append(str(line.rstrip()))\n self.logs_cond.notifyAll()\n self.running = False\n self.proc.stdout.close()\n if self.proc.stderr:\n self.proc.stderr.close()", "def process_output(self, stdout=True, final_read=False):\n if stdout:\n pipe, buf, tee = self.sp.stdout, self.stdout_file, self.stdout_tee\n else:\n pipe, buf, tee = self.sp.stderr, self.stderr_file, self.stderr_tee\n\n if final_read:\n # read in all the data we can from pipe and then stop\n data = []\n while select.select([pipe], [], [], 0)[0]:\n data.append(os.read(pipe.fileno(), 1024))\n if len(data[-1]) == 0:\n break\n data = \"\".join(data)\n else:\n # perform a single read\n data = os.read(pipe.fileno(), 1024)\n buf.write(data)\n tee.write(data)", "def write_queued_output(self):\n for stream in [\"stdout\", \"stderr\"]:\n while True:\n output, queue_size = getattr(self, stream).readline(timeout=0.1)\n if not (output is None or len(output) == 0):\n self.log(output, self.log_level[stream])\n if queue_size == 0:\n break", "def readOutput(self):\n while True:\n char = os.read(self.pipe_out, 1).decode(self.encoding)\n if not char or self.escape_char in char:\n break\n self.capturedtext += char", "def stdout(self):\n pass", "def print_output(self, final=False):\n encoding = sys.stdout.encoding\n if final and self.process: # ask for process because might be an action\n line = self.process.stdout.read().decode(encoding)\n self.last_run['output'] += line\n sys.stdout.write(line)\n else:\n str_chunk = None\n chunk = bytes()\n while not isinstance(str_chunk, str):\n assert self.process\n chunk += self.process.stdout.read(1)\n try:\n str_chunk = chunk.decode(encoding)\n except:\n str_chunk = None\n self.last_run['output'] += str_chunk\n sys.stdout.write(str_chunk)", "def _ffmpeg_loop(cls, ffmpeg: subprocess.Popen) -> Iterable[Progress]:\n while ffmpeg.poll() is None:\n rlist, _, _ = select((ffmpeg.stderr, ffmpeg.stdout), (), ())\n # Read logs from stdin\n if ffmpeg.stderr in rlist:\n status = cls.process_logs(ffmpeg.stderr.read().splitlines())\n if status:\n yield status\n # ignore stdout\n if ffmpeg.stdout in rlist:\n ffmpeg.stdout.read()" ]
[ "0.6449587", "0.6135046", "0.60871077", "0.595699", "0.594129", "0.58840406", "0.5684317", "0.5682602", "0.5677694", "0.5666196", "0.565126", "0.55647177", "0.5556993", "0.55479926", "0.54802346", "0.5458966", "0.5452489", "0.54261523", "0.5415573", "0.539853", "0.53972363", "0.5392595", "0.5381609", "0.534832", "0.5345636", "0.53397214", "0.533477", "0.52793676", "0.5277436", "0.52761036" ]
0.67283905
0
Create a SparseAutoEncoder object.
def __init__(self, n_inpt, n_hidden, hidden_transfer='sigmoid', out_transfer='identity', reconstruct_loss='squared', c_sparsity=1, sparsity_loss='bern_bern_kl', sparsity_target=0.01, tied_weights=True, batch_size=None, optimizer='lbfgs', max_iter=1000, verbose=False): super(SparseAutoEncoder, self).__init__( n_inpt, n_hidden, hidden_transfer, out_transfer, reconstruct_loss, c_sparsity, sparsity_loss, sparsity_target, tied_weights) self.batch_size = batch_size self.optimizer = optimizer self.f_transform = None self.f_reconstruct = None self.parameters.data[:] = np.random.standard_normal( self.parameters.data.shape).astype(theano.config.floatX) self.max_iter = max_iter self.verbose = verbose
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_sparseDB():\n datas = data.Kmercount_to_matrix()\n datas.run()\n print('***Sparse matrix created***')", "def make_sparse(data):\n assert data.train_pos_edge_index is not None\n\n (row, col), N = data.train_pos_edge_index, data.num_nodes\n perm = (col * N + row).argsort()\n row, col = row[perm], col[perm]\n\n value = [data.edge_id[(row[i] * N + col[i]).item()].item() for i in perm]\n\n data.adj_t = SparseTensor(\n row=col,\n col=row,\n value=torch.tensor(value, dtype=torch.float32),\n sparse_sizes=(N, N),\n is_sorted=True,\n )\n\n # Pre-process some important attributes.\n data.adj_t.storage.rowptr()\n data.adj_t.storage.csr2csc()\n\n return data", "def to_sparse(self):\n from divisi2.sparse import SparseMatrix\n return SparseMatrix(self, self.row_labels, self.col_labels)", "def sparse_constructor(self, indices: 'np.ndarray', values: 'np.ndarray', shape: List[int]) -> 'SparseTensor':\n return SparseTensor(indices, values, shape)", "def sparse_constructor(value, name=None, strict=False, allow_downcast=None,\r\n borrow=False, format=None):\r\n if not isinstance(value, scipy.sparse.spmatrix):\r\n raise TypeError(\"Expected a sparse matrix in the sparse shared variable constructor. Received: \",\r\n value.__class__)\r\n\r\n if format is None:\r\n format = value.format\r\n type = SparseType(format=format, dtype=value.dtype)\r\n if not borrow:\r\n value = copy.deepcopy(value)\r\n return SparseTensorSharedVariable(type=type, value=value, name=name,\r\n strict=strict, allow_downcast=allow_downcast)", "def to_sparse(self):\n from divisi2.sparse import SparseVector\n return SparseVector(self, self.labels)", "def _from_dict_to_sparse(self, adj_dict):\n indices = list(adj_dict.keys())\n values = [1] * len(indices)\n\n edge_index = torch.LongTensor(indices).T.to(self.device)\n edge_attr = torch.FloatTensor(values).to(self.device)\n\n edge_index, edge_attr = utils.to_symmetric(edge_index, edge_attr, self.n)\n\n return SparseTensor.from_edge_index(edge_index=edge_index,\n edge_attr=edge_attr,\n sparse_sizes=torch.Size([self.n, self.n]))", "def SparseEmbedding(data=None, weight=None, input_dim=_Null, output_dim=_Null, dtype=_Null, out=None, name=None, **kwargs):\n return (0,)", "def _make_train(data, smooth_factor):\n train_matrix = data_to_sparse(data).tolil()\n user_counts = np.array(train_matrix.sum(axis=1))[:, 0]\n train_matrix[np.where(user_counts == 0)] = smooth_factor\n train_matrix = normalize(train_matrix, 'l1', axis=1)\n return train_matrix.tocsr()", "def make_sparse(sparse_mx, args):\n sparse_mx = sparse_mx.tocoo().astype(np.float32)\n\n indices = tensor(np.vstack((sparse_mx.row, sparse_mx.col)), args, torch.long)\n values = tensor(sparse_mx.data, args)\n shape = torch.Size(sparse_mx.shape)\n return torch.sparse.FloatTensor(indices, values, shape)", "def to_sparse(self):\n if self.rep.fmt == 'sparse':\n return self\n\n return self.from_rep(self.rep.to_sdm())", "def makesparse(matrix):\n n = matrix[0].size\n elements = []\n for i in range(n):\n for j in range(n):\n if matrix[i][j] != 0 :\n temp = MatrixElement(i, j, matrix[i][j])\n elements.append(temp)\n return SparseMatrix(n, elements)", "def train_clustermodel_sparse(self):\n\n print('Clustering using: ' + self.algorithm)\n uniquesegments_df, sparse_matrix = self.create_sparse_matrix(self.data)\n\n clusterer = self.clustering_algorithms[self.algorithm]\n self.clustering_model = clusterer.fit(sparse_matrix)\n \n clusters_df = pd.DataFrame(self.clustering_model.labels_, columns = ['cluster_sparse'])\n clusters_df['segmentskey'] = clusters_df.index\n clusters_df = clusters_df.reset_index(drop=True)\n self.clusters_df_final = pd.merge(uniquesegments_df, clusters_df, on=['segmentskey'])\n self.clusters_df_final['cluster_sparse'].value_counts()\n \n today = datetime.date.today()\n filename = self.algorithm + '_sparse_cluster_model_' + today.strftime('%Y%m%d') + '.pkl'\n joblib.dump(self.clustering_model, filename)\n \n print('Stored ' + filename)\n \n return self.clustering_model, self.clusters_df_final[['segment_id','cluster_sparse']]", "def sparse(cls, a_ndarray, i_ndarray, shape, bigdl_type=\"float\"):\n if a_ndarray is None:\n return None\n invalidInputError(isinstance(a_ndarray, np.ndarray),\n f\"input should be a np.ndarray, not ${type(a_ndarray)}\")\n invalidInputError(isinstance(i_ndarray, np.ndarray),\n f\"indices should be a np.ndarray, not ${type(i_ndarray)}\")\n invalidInputError(i_ndarray.size == a_ndarray.size * shape.size,\n f\"size of values ${a_ndarray.size * shape.size} and\"\n f\" indices ${i_ndarray.size} should match\")\n return cls(a_ndarray,\n shape,\n bigdl_type,\n i_ndarray)", "def save_sparse_csr(filename,array, labels, vocab):\n np.savez(filename,data = array.data ,indices=array.indices,\n indptr =array.indptr, shape=array.shape, labels=labels, vocab=vocab)", "def _binary_3d_label_to_sparse(labels):\n return sparse_tensor.SparseTensor.from_value(\n _binary_3d_label_to_sparse_value(labels))", "def sparseFeature(feat_name, feat_num, embed_dim=4):\n return {'feat_name': feat_name, 'feat_num': feat_num, 'embed_dim': embed_dim}", "def sparse_to_dense(example):\n for key in list(example.keys()):\n val = example[key]\n if tf.keras.backend.is_sparse(val):\n val = tf.sparse.to_dense(val)\n example[key] = val\n\n return example", "def sparse_encode(D, data, callback=None, n_alphas=3):\n D = np.asanyarray(D, dtype=np.double)\n data = np.asanyarray(data, dtype=np.double)\n data = np.atleast_2d(data)\n\n # TODO: use a smart sparse representation instead\n encoded = np.zeros((data.shape[0], D.shape[1]), dtype=np.double)\n\n for i, code in enumerate(data):\n clf = LassoCV(n_alphas=n_alphas).fit(D, code, fit_intercept=False)\n encoded[i][:] = clf.coef_\n\n if callback is not None:\n callback(i)\n return encoded", "def __init__(self, idxbase=0):\n if idxbase not in (0, 1):\n raise ValueError(\"Invalid index base\")\n\n self.api = cuSparse()\n self.idxbase = (CUSPARSE_INDEX_BASE_ZERO,\n CUSPARSE_INDEX_BASE_ONE)[idxbase]", "def set_sparse_signals(self):\n\t\n\t\tparams_dSs = [self.mu_dSs, self.sigma_dSs]\n\t\tparams_Ss0 = [self.mu_Ss0, self.sigma_Ss0]\n\t\tself.dSs, self.idxs = sparse_vector([self.Nn, self.Kk], \n\t\t\t\t\t\t\t\t\t\t\t\tparams_dSs,\tseed=self.seed_dSs)\n\t\t\n\t\t# Replace components with conflicting background odor \n\t\tif self.Kk_split is not None and self.Kk_split != 0:\n\t\t\tassert 0 <= self.Kk_split <= self.Kk, \\\n\t\t\t\t\"Splitting sparse signal into two levels requires Kk_split\" \\\n\t\t\t\t\" to be non-negative and less than or equal to Kk.\"\n\t\t\tassert self.mu_dSs_2 is not None \\\n\t\t\t\tand self.sigma_dSs_2 is not None, \\\n\t\t\t\t\"Splitting sparse signal into two levels requires that\" \\\n\t\t\t\t\" mu_dSs_2 and sigma_dSs_2 are set.\"\n\n\t\t\tsp.random.seed(self.seed_dSs)\n\t\t\tself.idxs_2 = sp.random.choice(self.idxs[0], self.Kk_split, \n\t\t\t\t\t\t\t\t\t\t\treplace=False)\n\t\t\tfor idx_2 in self.idxs_2:\n\t\t\t\tself.dSs[idx_2] = sp.random.normal(self.mu_dSs_2, \n\t\t\t\t\t\t\t\t\t\t\t\t\tself.sigma_dSs_2)\n\t\telse:\n\t\t\tself.idxs_2 = []\n\t\t\tself.Kk_split = 0\n\t\t\t\n\t\t# Ss0 is the ideal (learned) background stimulus without noise\n\t\tself.Ss0, self.Ss0_noisy = sparse_vector_bkgrnd([self.Nn, self.Kk], \n\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.idxs, params_Ss0,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tseed=self.seed_Ss0)\n\t\t\n\t\tself.Ss = self.dSs + self.Ss0_noisy", "def test_to_sparse(self, fn_name, fn_args, proto_list_key):\n self.run_benchmarks(fn_name, _get_prensor_to_sparse_tensor_fn, fn_args,\n proto_list_key)", "def _binary_2d_label_to_sparse(labels):\n return sparse_tensor.SparseTensor.from_value(\n _binary_2d_label_to_sparse_value(labels))", "def test_return_sparse():\n X = Vectorizer(strategy=\"bow\", return_sparse=True).fit_transform(X_text, y10)\n assert all(pd.api.types.is_sparse(X[c]) for c in X.columns)", "def _identity_sparse(d, stype=\"csr\", dtype=complex):\n return sp.eye(d, dtype=dtype, format=stype)", "def copy(self):\n return SparseN(self)", "def as_sparse_variable(x, name=None):\r\n\r\n # TODO\r\n # Verify that sp is sufficiently sparse, and raise a\r\n # warning if it is not\r\n\r\n if isinstance(x, gof.Apply):\r\n if len(x.outputs) != 1:\r\n raise ValueError(\"It is ambiguous which output of a \"\r\n \"multi-output Op has to be fetched.\", x)\r\n else:\r\n x = x.outputs[0]\r\n if isinstance(x, gof.Variable):\r\n if not isinstance(x.type, SparseType):\r\n raise TypeError(\"Variable type field must be a SparseType.\", x,\r\n x.type)\r\n return x\r\n try:\r\n return constant(x, name=name)\r\n except TypeError:\r\n raise TypeError(\"Cannot convert %s to SparseType\" % x, type(x))", "def SimpleSparseTensorFrom(x):\n x_ix = []\n x_val = []\n for batch_i, batch in enumerate(x):\n for time, val in enumerate(batch):\n x_ix.append([batch_i, time])\n x_val.append(val)\n x_shape = [len(x), np.asarray(x_ix).max(0)[1]+1]\n x_ix = tf.constant(x_ix, tf.int64)\n x_val = tf.constant(x_val, tf.int32)\n x_shape = tf.constant(x_shape, tf.int64)\n\n #return tf.SparseTensor(x_ix, x_val, x_shape)\n return ([x_ix, x_val, x_shape])", "def make_stax_model(self):", "def dense_to_sparse(adj):\n assert adj.dim() >= 2 and adj.dim() <= 3\n assert adj.size(-1) == adj.size(-2)\n\n index = adj.nonzero(as_tuple=True)\n #print(index)\n edge_attr = adj[index]\n\n if len(index) == 3:\n batch = index[0] * adj.size(-1)\n index = (batch + index[1], batch + index[2])\n\n return torch.stack(index, dim=0), edge_attr" ]
[ "0.63621956", "0.62124324", "0.60858965", "0.59326434", "0.5876335", "0.5779511", "0.576974", "0.5758959", "0.5684206", "0.56325567", "0.56308913", "0.5587562", "0.55547976", "0.5466962", "0.5456364", "0.52503127", "0.5241987", "0.5203801", "0.5184267", "0.51695746", "0.51491255", "0.5141162", "0.5115967", "0.51068705", "0.5081264", "0.5074779", "0.5066938", "0.50650316", "0.502198", "0.50199896" ]
0.63101643
1
Create a DenoisingAutoEncoder object.
def __init__(self, n_inpt, n_hidden, hidden_transfer='sigmoid', out_transfer='identity', reconstruct_loss='squared', noise_type='gauss', c_noise=.2, tied_weights=True, batch_size=None, optimizer='lbfgs', max_iter=1000, verbose=False): super(DenoisingAutoEncoder, self).__init__( n_inpt, n_hidden, hidden_transfer, out_transfer, reconstruct_loss, noise_type, c_noise, tied_weights) self.batch_size = batch_size self.optimizer = optimizer self.f_transform = None self.f_reconstruct = None climin.initialize.randomize_normal(self.parameters.data) self.max_iter = max_iter self.verbose = verbose
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _define_encoder(self):\n raise NotImplementedError", "def build_autoencoder(input_dim):\r\n input_layer = Input(shape=(input_dim, 1))\r\n enc = Conv1D(filters=16, kernel_size=2, padding='same', activation='relu')(input_layer)\r\n enc = MaxPooling1D(pool_size=2, padding='same')(enc)\r\n enc = Conv1D(filters=32, kernel_size=2, padding='same', activation='relu')(enc)\r\n enc = MaxPooling1D(pool_size=2, padding='same')(enc)\r\n enc = Conv1D(filters=64, kernel_size=2, padding='same', activation='relu')(enc)\r\n enc = MaxPooling1D(pool_size=2, padding='same')(enc)\r\n enc = Flatten()(enc)\r\n enc = Dense(64)(enc)\r\n\r\n dec = Dense(200704)(enc)\r\n dec = Reshape((3136, 64))(dec)\r\n dec = Conv1D(filters=64, kernel_size=2, padding='same', activation='relu')(dec)\r\n dec = UpSampling1D(2)(dec)\r\n dec = Conv1D(filters=32, kernel_size=2, padding='same', activation='relu')(dec)\r\n dec = UpSampling1D(2)(dec)\r\n dec = Conv1D(filters=16, kernel_size=2, padding='same', activation='relu')(dec)\r\n dec = UpSampling1D(2)(dec)\r\n dec = Conv1D(filters=1, kernel_size=2, padding='same', activation='relu')(dec)\r\n\r\n autoencoder = Model(input_layer, dec)\r\n autoencoder.compile(optimizer='adam', loss='mean_squared_error', metrics=['accuracy'])\r\n autoencoder.summary()\r\n encoder = Model(input_layer, enc)\r\n return autoencoder, encoder", "def SemiAutoencoder(ds, compression_factor=16, input_noise=0.2, dropout_p=0.1, activ='tanh', final_activ='tanh'):\n # compression_factor=20\n print('DS shape: {}'.format(ds.shape))\n in_dims = np.prod(ds.shape[1:])\n encoding_dim = int(in_dims // compression_factor)\n in_shape = ds[0].shape\n print('Input Dims: {}, input shape: {}, encoding dims: {}'.format(in_dims, in_shape, encoding_dim))\n\n # this is our input placeholder\n input_img = Input(shape=(in_dims,))\n encoded = GaussianNoise(input_noise)(input_img)\n\n encoded = Dense(encoding_dim * 4, activation=activ, activity_regularizer=regularizers.activity_l1(10e-5))(encoded)\n # encoded = Dense(encoding_dim*4, activation='sigmoid')(input_img)\n encoded = BatchNormalization()(encoded)\n encoded = Dropout(dropout_p)(encoded) # batch norm before dropout\n # encoded = Dense(encoding_dim*3, activation=activ)(encoded)\n # encoded = Dropout(dropout_p)(encoded)\n encoded = Dense(encoding_dim * 2, activation=activ)(encoded)\n encoded = Dropout(dropout_p)(encoded)\n\n encoded = Dense(encoding_dim, activation=activ)(encoded)\n # Middle Noise\n encoded = GaussianNoise(0.02)(encoded)\n\n # DECODED LAYER\n # \"decoded\" is the lossy reconstruction of the input\n decoded = Dense(encoding_dim * 2, activation=activ)(encoded)\n # decoded = Dropout(dropout_p)(decoded)\n decoded = Dense(encoding_dim * 4, activation=activ)(decoded)\n decoded = Dropout(dropout_p)(decoded)\n decoded = Dense(in_dims, activation=final_activ)(decoded)\n\n # MODEL\n autoencoder = Model(input=input_img, output=decoded)\n\n # SEPERATE ENCODER MODEL\n encoder = Model(input=input_img, output=encoded)\n\n # create a placeholder for an encoded (32-dimensional) input\n encoded_input = Input(shape=(encoding_dim,))\n\n # retrieve the last layer of the autoencoder model\n decoder_layer0 = autoencoder.layers[-4]\n decoder_layer1 = autoencoder.layers[-3]\n decoder_layer2 = autoencoder.layers[-2]\n decoder_layer3 = autoencoder.layers[-1]\n # todo: make this into a dedicated unrolling function\n\n # create the decoder model - unrolling the model as we go\n decoder = Model(input=encoded_input, output=decoder_layer3(decoder_layer2(\n decoder_layer1(decoder_layer0(encoded_input)))))\n\n # model.add(GaussianNoise(0.1), input_shape=(n_input_len,))\n autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')\n\n autoencoder.model_name = 'Autoencoder 1'\n return autoencoder, encoder, decoder", "def __init__(self, **kwargs):\n var_defaults = {\n \"bias_init\" : 'zeros',\n \"weight_init\" : [0.0, 0.1],\n \"seed\" : None,\n \"num_hid_nodes\" : 32,\n \"activations\": 'sigmoid',\n \"lr\" : 0.1,\n \"decay\": 0,\n \"momentum\" : 0,\n \"nesterov\" : False,\n \"loss\" : 'mean_squared_error',\n \"epochs\" : 10,\n \"batch_size\" : 256,\n \"verbose\" : 2\n }\n for var, default in var_defaults.items():\n setattr(self, var, kwargs.get(var, default))\n self.autoencoder = Sequential()", "def getLabelEncoder():\n classes = list(string.letters + string.digits)\n classes.append('')\n le = LabelEncoder()\n le.fit(classes)\n\n return le", "def encode(\n cls: Type[\"DataDocument\"], encoding: str, data: D, **kwargs: Any\n ) -> \"DataDocument[D]\":\n # Dispatch encoding\n blob = lookup_serializer(encoding).dumps(data, **kwargs)\n\n inst = cls(blob=blob, encoding=encoding)\n inst._cache_data(data)\n return inst", "def build_label_transform():\n\n return NALabelEncoder()", "def _create_encoder(self):\n logger.debug(\"GumBolt::_create_encoder\")\n return HierarchicalEncoder(\n input_dimension=self._flat_input_size,\n n_latent_hierarchy_lvls=self.n_latent_hierarchy_lvls,\n n_latent_nodes=self.n_latent_nodes,\n n_encoder_layer_nodes=self.n_encoder_layer_nodes,\n n_encoder_layers=self.n_encoder_layers,\n skip_latent_layer=False,\n smoother=\"Gumbel\",\n cfg=self._config)", "def __init__(self, *args, **kwargs):\n super(Encoder, self).__init__(*args, **kwargs)\n self._mask = (1 << self._precision) - 1", "def __init__(self, params, model, name=\"ds2_encoder\", mode='train'):\n super(DeepSpeech2Encoder, self).__init__(params, model, name, mode)", "def __init__(self, n_feature, n_hidden, n_output):\n super(simpleAE, self).__init__()\n self.Encoder = Encoder(n_feature, n_hidden)\n self.Decoder = Decoder(n_hidden, n_output)\n self.model_name = 'simpleAE'", "def _define_encoder(self):\n self.encoder = nn.Sequential(View((-1, 64 * 64 * 3)),\n nn.Linear(64 * 64 * 3, 5120, bias=False), nn.SELU(),\n nn.BatchNorm1d(5120),\n nn.Linear(5120, 2560, bias=False), nn.SELU(),\n nn.BatchNorm1d(2560),\n nn.Linear(2560, 512, bias=False), nn.SELU(),\n nn.BatchNorm1d(512),\n nn.Linear(512, 128, bias=False), nn.SELU(),\n nn.BatchNorm1d(128),\n nn.Linear(128, self.encoding_shape, bias=False), nn.SELU(),\n )", "def train(data, n_visible=16*16, n_hidden=200, batch_size=20,\n learning_rate=0.1, n_epochs=5, beta=3.0, sparsity=0.0,\n weight_decay=0.0, stop_diff=None, corruption_rate=0.3):\n da = DenoisingAutoencoder(n_visible=n_visible, n_hidden=n_hidden)\n da.train(data, batch_size=batch_size, corruption_rate=corruption_rate,\n learning_rate=learning_rate, n_epochs=n_epochs, stop_diff=stop_diff)\n return da", "def registerEncoder (encoder):\n assert False, \"TODO:\"", "def __init__(self, encoder_base: int = 2, encoder_precision: int = 16) -> None:\n self.encoder_base = encoder_base\n self.encoder_precision = encoder_precision", "def encoder(self, inputs):\n pass", "def _construct_ae(self):\n if self.joint_train:\n self.critic.trainable = False\n autoencoder = Model(self.encoder.input,\n [self.decoder(self.encoder.output),\n self.critic(self.encoder.output)])\n autoencoder.compile(optimizer=self.ae_opt(lr=self.ae_learning_rate),\n loss=['binary_crossentropy',\n 'binary_crossentropy'],\n loss_weights=[self.reconst_weight,\n self.adv_weight])\n else:\n autoencoder = Model(self.encoder.input,\n self.decoder(self.encoder.output))\n autoencoder.compile(optimizer=self.ae_opt(lr=self.ae_learning_rate),\n loss='mse')\n return autoencoder", "def build_encoder(opt, embeddings, structure_embeddings):\n return TransformerEncoder(opt.enc_layers, opt.enc_rnn_size, opt.heads, opt.transformer_ff, opt.dropout, embeddings, structure_embeddings)", "def build_encoder(opt, embeddings):\n enc_type = opt.encoder_type if opt.model_type == \"text\" else opt.model_type\n return str2enc[enc_type].from_opt(opt, embeddings)", "def encoder(self) -> json.JSONEncoder:\n return encoder_from_string(self.doc.get('encoder'))", "def base_encoder(cls, data, init_encoder, downsize_encoder, input_encoder):\n #todo: maybe do positional encoding before passing to init_encoder\n data = downsize_encoder(init_encoder(data))\n return input_encoder(data)", "def build_label_transform():\n\n return RobustLabelEncoder(\n labels=['0'], fill_label_value='1', include_unseen_class=True\n )", "def __init__(self, n_inpt, n_hidden, hidden_transfer='identity',\n out_transfer='identity', loss='squared', tied_weights=True,\n batch_size=None,\n optimizer='lbfgs', max_iter=1000, verbose=False):\n super(AutoEncoder, self).__init__(\n n_inpt, n_hidden, hidden_transfer, out_transfer, loss, tied_weights)\n self.batch_size = batch_size\n self.optimizer = optimizer\n self.f_transform = None\n self.f_reconstruct = None\n self.parameters.data[:] = np.random.standard_normal(\n self.parameters.data.shape).astype(theano.config.floatX)\n self.max_iter = max_iter\n self.verbose = verbose", "def __init__(self):\n super(BaseRNNEncoder, self).__init__()", "def __init__(self, n_inpt, n_hidden, hidden_transfer='sigmoid',\n out_transfer='identity', reconstruct_loss='squared',\n c_jacobian=1, tied_weights=True, batch_size=None,\n optimizer='lbfgs', max_iter=1000, verbose=False):\n super(ContractiveAutoEncoder, self).__init__(\n n_inpt, n_hidden, hidden_transfer, out_transfer,\n reconstruct_loss, c_jacobian,\n tied_weights)\n self.batch_size = batch_size\n self.optimizer = optimizer\n self.f_transform = None\n self.f_reconstruct = None\n self.parameters.data[:] = np.random.standard_normal(\n self.parameters.data.shape).astype(theano.config.floatX)\n self.max_iter = max_iter\n self.verbose = verbose", "def create_encoding(df):\n vocab = []\n vocab_df = df[\"company\"] + df[\"address\"] + df[\"date\"] + df[\"total\"]\n [vocab.extend(row) for row in vocab_df]\n enc = LabelEncoder()\n enc.fit(vocab)\n return enc", "def encoder(self) -> IntegerEncoder:\n\n return self._encoder", "def build_autoencoder(self):\n # first build the encoder model\n inputs = Input(shape=(self.state_dim, ), name='state')\n feature_size = 32\n x = Dense(256, activation='relu')(inputs)\n x = Dense(128, activation='relu')(x)\n feature = Dense(feature_size, name='feature_vector')(x)\n\n # instantiate encoder model\n self.encoder = Model(inputs, feature, name='encoder')\n self.encoder.summary()\n plot_model(self.encoder,\n to_file='encoder.png', \n show_shapes=True)\n\n # build the decoder model\n feature_inputs = Input(shape=(feature_size,), \n name='decoder_input')\n x = Dense(128, activation='relu')(feature_inputs)\n x = Dense(256, activation='relu')(x)\n outputs = Dense(self.state_dim, activation='linear')(x)\n\n # instantiate decoder model\n self.decoder = Model(feature_inputs, \n outputs, \n name='decoder')\n self.decoder.summary()\n plot_model(self.decoder, \n to_file='decoder.png', \n show_shapes=True)\n\n # autoencoder = encoder + decoder\n # instantiate autoencoder model\n self.autoencoder = Model(inputs, \n self.decoder(self.encoder(inputs)),\n name='autoencoder')\n self.autoencoder.summary()\n plot_model(self.autoencoder, \n to_file='autoencoder.png', \n show_shapes=True)\n\n # Mean Square Error (MSE) loss function, Adam optimizer\n self.autoencoder.compile(loss='mse', optimizer='adam')", "def _create_encoder(self):\n\n def _init_weights(layer):\n \"\"\"Initializes the weights of a layer based on type.\"\"\"\n if isinstance(layer, (nn.Conv2d, nn.Linear)):\n torch.nn.init.xavier_uniform_(layer.weight)\n try:\n # Some layers may not have biases, so catch the exception and pass.\n layer.bias.data.fill_(0.0)\n except AttributeError:\n pass\n\n kernel_size = 5\n pad = 2\n input_channels = 1\n first_conv_channels = 6\n second_conv_channels = 16\n max_pool_kernel = 2\n linear_size = 120\n n_pixels = 7\n\n encoder = nn.Sequential(\n nn.Conv2d(\n input_channels, first_conv_channels, kernel_size, padding=pad),\n nn.BatchNorm2d(first_conv_channels),\n nn.ReLU(),\n nn.MaxPool2d(max_pool_kernel),\n nn.Conv2d(\n first_conv_channels, second_conv_channels, kernel_size,\n padding=pad),\n nn.BatchNorm2d(second_conv_channels),\n nn.ReLU(),\n nn.MaxPool2d(max_pool_kernel),\n utils.Flatten(),\n nn.Linear(n_pixels * n_pixels * self.n_digits * second_conv_channels,\n linear_size),\n nn.BatchNorm1d(linear_size),\n nn.ReLU(),\n nn.Linear(linear_size, self.embedding_dim),\n nn.Linear(self.embedding_dim, self.n_classes, bias=False),\n )\n\n encoder.apply(_init_weights)\n\n # This is the empirical approximation for initialization the vMF\n # distributions for each class in the final layer.\n if self.use_vmf:\n utils.vmf_class_weight_init(encoder[-1].weight, self.kappa_confidence,\n self.embedding_dim)\n\n return encoder", "def __init__(self, autoencoder, latent_space):\r\n self._autoencoder = autoencoder\r\n self._latent_space = latent_space" ]
[ "0.6016746", "0.55889976", "0.545366", "0.54502213", "0.5415948", "0.54003376", "0.54001844", "0.53689516", "0.534991", "0.5316329", "0.5203007", "0.51553774", "0.5107471", "0.5106341", "0.5100852", "0.50989753", "0.5094069", "0.5073897", "0.50694823", "0.5016635", "0.50073105", "0.49909428", "0.49849224", "0.4968867", "0.49330625", "0.4929339", "0.4921299", "0.4915225", "0.49001294", "0.4899458" ]
0.6117516
0
Fixture for setting up configuration parser
def setup_config(): config = configparser.ConfigParser() config.read(CONFIG_PATH) return config
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setUp(self):\n self.parser = create_parser()", "def setup(self):\n file_under_test = os.path.join(os.curdir, 'application-core',\n 'app.core.config.xml')\n with open(file_under_test) as f:\n config = f.read()\n self.config = objectify.fromstring(config)", "def setUp(self):\r\n\t\tself._configuration_ = Declare.Configuration.read(\"configuration.json\")", "def setUp(self):\n self.parser = echo.create_parser()", "def setUp(self):\n self.parser = echo.create_parser()", "def setUpConfig(self):\n pass", "def setup_config(self, args=None):\n self.config_parse(args=args)", "def test_everything():\n # TODO: split this up and write better tests\n\n @make_config()\n class Config:\n \"\"\"The test configuration for configurave.\"\"\"\n\n root_url: str = ce(\n comment=\"The root url configuration for the application\",\n description=\"A long ass multiline description goes here about all the options\"\n \" you could potentially decide upon using.\",\n )\n\n c = Config(\n sources=[ # in order of priority\n \"tests/test-config/config.toml\",\n \"ENV\", # Temporarily enabled, needs seperate optional dotenv test\n ]\n )\n\n assert \"root_url\" in str(c._crve_configs)\n assert c.root_url == \"test url\"\n\n default_toml = (\n \"# The test configuration for configurave.\\n\"\n \"# This is an autogenerated default configuration file written by Configurave\\n\\n\"\n \"# (str): The root url configuration for the application\\n\"\n \"# root_url = \\n\"\n \"# Description: A long ass multiline description goes here about all the\\n\"\n \"# options you could potentially decide upon using.\\n\"\n )\n assert c.defaults_toml() == default_toml", "def setup_parser_config(subparsers):\r\n parser = subparsers.add_parser('config', help='Freeseer configuration functions')\r\n subparsers = parser.add_subparsers(dest=\"config_service\")\r\n setup_parser_config_reset(subparsers)\r\n setup_parser_config_youtube(subparsers)", "def setUp(self):\n self.parser = command_line.get_args()", "def test_fully_default_configuration(self):\n configuration = mini_spider.parse_configuration(self.configuration_file_path)\n self.assertEqual(configuration.get('spider', 'url_list_file'), './urls')\n self.assertEqual(configuration.get('spider', 'output_directory'), './output')\n self.assertEqual(configuration.getint('spider', 'max_depth'), 1)\n self.assertEqual(configuration.getint('spider', 'crawl_interval'), 1)\n self.assertEqual(configuration.getint('spider', 'crawl_timeout'), 1)\n self.assertEqual(configuration.getint('spider', 'thread_count'), 8)\n self.assertEqual(configuration.get('spider', 'target_url'), '.*\\.(gif|png|jpg|bmp)$')", "def test_normal_configuration(self):\n self.write_configuration_file(\n '[spider]\\n'\n 'url_list_file: ./urls\\n'\n 'output_directory: ./output\\n'\n 'max_depth: 6\\n'\n 'crawl_interval: 1\\n'\n 'crawl_timeout: 5\\n'\n 'target_url: .*\\.(gif|png|jpg|bmp)$\\n'\n 'thread_count: 8\\n'\n )\n\n configuration = mini_spider.parse_configuration(self.configuration_file_path)\n self.assertEqual(configuration.get('spider', 'url_list_file'), './urls')\n self.assertEqual(configuration.get('spider', 'output_directory'), './output')\n self.assertEqual(configuration.getint('spider', 'max_depth'), 6)\n self.assertEqual(configuration.getint('spider', 'crawl_interval'), 1)\n self.assertEqual(configuration.getint('spider', 'crawl_timeout'), 5)\n self.assertEqual(configuration.getint('spider', 'thread_count'), 8)\n self.assertEqual(configuration.get('spider', 'target_url'), '.*\\.(gif|png|jpg|bmp)$')", "def configure(self, parser: argparse.ArgumentParser) -> None:\n pass", "def parse_config(self):\n # TODO: parse config file\n pass", "def mock_config():\n from .. import config\n\n _old_fs = os.getenv('FREESURFER_HOME')\n if not _old_fs:\n os.environ['FREESURFER_HOME'] = mkdtemp()\n\n filename = Path(pkgrf('fmriprep', 'data/tests/config.toml'))\n settings = loads(filename.read_text())\n for sectionname, configs in settings.items():\n if sectionname != 'environment':\n section = getattr(config, sectionname)\n section.load(configs, init=False)\n config.nipype.omp_nthreads = 1\n config.nipype.init()\n config.loggers.init()\n config.init_spaces()\n\n config.execution.work_dir = Path(mkdtemp())\n config.execution.bids_dir = Path(pkgrf('fmriprep', 'data/tests/ds000005')).absolute()\n config.execution.fmriprep_dir = Path(mkdtemp())\n config.execution.init()\n\n yield\n\n shutil.rmtree(config.execution.work_dir)\n shutil.rmtree(config.execution.fmriprep_dir)\n\n if not _old_fs:\n del os.environ[\"FREESURFER_HOME\"]", "def test_collect_configuration(self):\n sample_config = \"\"\"[dyndnsc]\nconfigs = testconfig\n\n[testconfig]\nuse_preset = testpreset\nupdater-userid = bob\nupdater-password = XYZ\n# test overwriting a preset value:\ndetector-url = http://myip.example.com/\n\n[preset:testpreset]\nupdater = fubarUpdater\nupdater-url = https://update.example.com/nic/update\nupdater-moreparam = some_stuff\ndetector = webcheck4\ndetector-family = INET\ndetector-url = http://ip.example.com/\ndetector-parser = plain\n \"\"\"\n p = configparser.ConfigParser()\n p.readfp(StringIO(sample_config)) # XXX readfp() is deprecated since py 3.2\n config = collect_config(p)\n self.assertEqual(dict, type(config))\n self.assertTrue('testconfig' in config)\n self.assertTrue('detector' in config['testconfig'])\n self.assertTrue(isinstance(config['testconfig']['detector'], list))\n self.assertEqual(1, len(config['testconfig']['detector']))\n detector, detector_opts = config['testconfig']['detector'][-1]\n self.assertEqual(detector, \"webcheck4\") # from the preset\n self.assertEqual(detector_opts['url'], \"http://myip.example.com/\") # from the user conf\n self.assertTrue('updater' in config['testconfig'])\n self.assertTrue(isinstance(config['testconfig']['updater'], list))\n self.assertEqual(1, len(config['testconfig']['updater']))\n updater = config['testconfig']['updater'][0]\n self.assertEqual(\"fubarUpdater\", updater[0])\n self.assertTrue(\"url\" in updater[1])\n self.assertTrue(\"moreparam\" in updater[1])\n self.assertEqual(\"some_stuff\", updater[1][\"moreparam\"])", "def setUp(self) -> None:\n\n # Call the superclass setup\n super().setUp()\n\n # Read the config file from the settings\n self.config = read_settings(form_abs_path(__file__, \"../settings.cfg\"), \"Space Invaders\")", "def test_parser():\n\n parser = configparser.RawConfigParser()\n version = '1.2.3'\n string = 'string-value'\n bool = 'False'\n literal = \"['a', 'b', 'c']\"\n literal2 = '1.23'\n section = 'dashboard'\n\n parser.add_section(section)\n parser.set(section, 'APP_VERSION', version)\n parser.set(section, 'string', string)\n parser.set(section, 'bool', bool)\n parser.set(section, 'literal', literal)\n parser.set(section, 'literal2', literal2)\n\n assert parse_version(parser, section, 'default') == version\n assert parse_string(parser, section, 'string', 'default') == string\n assert not parse_bool(parser, section, 'bool', 'True')\n assert parse_literal(parser, section, 'literal', 'default') == ['a', 'b', 'c']\n assert parse_literal(parser, section, 'literal2', 'default') == 1.23", "def __init__(self, _confFixture, _settings):\n self._conf = _confFixture\n self._settings = _settings", "def test_partly_default_configuration(self):\n self.write_configuration_file(\n '[spider]\\n'\n 'max_depth: 10\\n'\n 'crawl_interval: 2\\n'\n 'crawl_timeout: 10\\n'\n 'target_url: .*\\.(com|cn|net)$\\n'\n )\n configuration = mini_spider.parse_configuration(self.configuration_file_path)\n self.assertEqual(configuration.get('spider', 'url_list_file'), './urls')\n self.assertEqual(configuration.get('spider', 'output_directory'), './output')\n self.assertEqual(configuration.getint('spider', 'max_depth'), 10)\n self.assertEqual(configuration.getint('spider', 'crawl_interval'), 2)\n self.assertEqual(configuration.getint('spider', 'crawl_timeout'), 10)\n self.assertEqual(configuration.getint('spider', 'thread_count'), 8)\n self.assertEqual(configuration.get('spider', 'target_url'), '.*\\.(com|cn|net)$')", "def test_read_config_option(self):\n # set up config\n config.set_config_file(os.path.join(path_to_module, \"test_config.conf\"))\n config.setup()\n # Test that all the parameters loaded from file are correct\n self.assertEqual(config.read_config_option('client_id'), 'uploader')\n self.assertEqual(config.read_config_option('client_secret'), 'secret')\n self.assertEqual(config.read_config_option('username'), 'admin')\n self.assertEqual(config.read_config_option('password'), 'password1')\n self.assertEqual(config.read_config_option('base_url'), 'http://localhost:8080/irida-latest/api/')\n self.assertEqual(config.read_config_option('parser'), 'miseq')\n self.assertEqual(config.read_config_option('readonly', bool), False)", "def setup_parser(self, parser):", "def test_config():\n args = Namespace(molecule=\"nucleotide\", verbose=False)\n config = core.Config.from_args(args)\n assert config.verbose is False\n assert config.molecule == 'nucleotide'\n assert config.extended_validation == 'none'\n\n args = Namespace(molecule=\"protein\", verbose=True)\n config = core.Config.from_args(args)\n assert config.verbose is True\n assert config.molecule == 'protein'", "def testconfig(self):\n\n configuration = Parser.getNodeTag(self, self.xmlDoc, \"configuration\")\n metadatadb = Parser.getNodeTag(self, configuration, \"metadatadb\") \n self.user = Parser.getNodeVal(self, metadatadb, \"user\")\n self.host = Parser.getNodeVal(self, metadatadb, \"host\")\n self.port = Parser.getNodeVal(self, metadatadb, \"port\")\n self.database = Parser.getNodeVal(self, metadatadb, \"database\")\n self.metaDBSchema = Parser.getNodeVal(self, metadatadb, \"schema\")\n \n try:\n self.passwd = Parser.getNodeVal(self, self.metadatadb, \"passwd\")\n self.metaDB = self.user + \"/\" + self.passwd + \"@\" + self.host + \":\" + self.port + \"/\" \\\n + self.database + \":\" + self.metaDBSchema\n except Exception:\n self.metaDB = self.user + \"@\" + self.host + \":\" + self.port + \"/\" + self.database + \":\" \\\n + self.metaDBSchema", "def test_read_namespaced_build_config(self):\n pass", "def test_config_class():\n assert config is not None", "def test_load_configs_testing(self):\n global locator, config_paths\n locator.load_config(config_paths[0])\n\n self.assertEqual(locator.config['routines'], ['debug'])\n self.assertEqual(locator.config['driver'],\n {\n 'type': 'TestDriver',\n 'kwargs': {\n 'verbose': False\n }\n })", "def config():\n data = \"\"\"[YESSSSMS]\nLOGIN = 03211234567\nPASSWD = MySecr3t\nDEFAULT_TO = +43664123123123\nMVNO = YESSS\n\"\"\"\n with mock.patch(\n \"configparser.open\",\n # \"builtins.open\",\n mock.mock_open(read_data=data),\n ):\n yield", "def test_define():\n client = TestClient()\n client.run(\"config set general.fakeos=Linux\")\n conf_file = load(client.cache.conan_conf_path)\n assert \"fakeos = Linux\" in conf_file\n\n client.run('config set general.compiler=\"Other compiler\"')\n conf_file = load(client.cache.conan_conf_path)\n assert 'compiler = Other compiler' in conf_file\n\n client.run('config set general.compiler.version=123.4.5')\n conf_file = load(client.cache.conan_conf_path)\n assert 'compiler.version = 123.4.5' in conf_file\n assert \"14\" not in conf_file\n\n client.run('config set general.new_setting=mysetting')\n conf_file = load(client.cache.conan_conf_path)\n assert 'new_setting = mysetting' in conf_file\n\n client.run('config set proxies.https=myurl')\n conf_file = load(client.cache.conan_conf_path)\n assert \"https = myurl\" in conf_file.splitlines()", "def test_init_from(config):\n\n config.init_from()\n config.init_from(file='../../config.cfg')" ]
[ "0.70605105", "0.7017709", "0.7011017", "0.692149", "0.692149", "0.690285", "0.68758297", "0.68144774", "0.6782708", "0.6702887", "0.66660374", "0.6653196", "0.66530657", "0.6643987", "0.6641886", "0.6610034", "0.6574361", "0.65678746", "0.6562894", "0.6540612", "0.6537637", "0.6505559", "0.65037274", "0.65011334", "0.6493991", "0.6430816", "0.6400373", "0.6387626", "0.6365025", "0.6344628" ]
0.7371306
0
Fixture for retrieving mock event
def get_mock_event(): event = { "httpMethod": "GET", "//body": "{\"name\": \"Sam\"}", "resource": "/{proxy+}", "queryStringParameters": {}, "pathParameters": { "proxy": "users" }, "requestContext": { "accountId": "222222222", "identity": { "sourceIp": "2a02:a445:6d36:1:1e3:a188:313c:1d31", "userAgent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_1_6) " "AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2743.116 Safari/537.36", }, "resourcePath": "/{proxy+}", "httpMethod": "GET", "apiId": "xxxxxxxxxx" } } return event
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def event_factory_fixture():\n def _factory(device_id, event_type=\"DEVICE_EVENT\", capability='',\n attribute='Updated', value='Value', data=None):\n event = Mock()\n event.event_type = event_type\n event.device_id = device_id\n event.component_id = 'main'\n event.capability = capability\n event.attribute = attribute\n event.value = value\n event.data = data\n event.location_id = str(uuid4())\n return event\n return _factory", "def test_describe_event(self):\n pass", "def test_future_event(self):\n pass", "def test_get_event(self):\n event = Event(self.client, 123, {})\n\n self.assertEqual(event.action, \"ticket_create\")\n self.assertEqual(event.created, datetime(2018, 1, 1, 0, 1, 1))\n self.assertEqual(event.duration, 300.56)\n self.assertIsNotNone(event.entity)\n self.assertEqual(event.id, 123)\n self.assertEqual(event.message, \"None\")\n self.assertIsNone(event.percent_complete)\n self.assertIsNone(event.rate)\n self.assertTrue(event.read)\n self.assertIsNotNone(event.secondary_entity)\n self.assertTrue(event.seen)\n self.assertIsNone(event.status)\n self.assertIsNone(event.time_remaining)\n self.assertEqual(event.username, \"exampleUser\")", "def test_create_event(\n event_manager: EventManager, subscriber: Mock, input: bytes, expected: tuple\n) -> None:\n event_manager.handler(input)\n assert subscriber.call_count == 1\n\n event: Event = subscriber.call_args[0][0]\n assert event.topic == expected[\"topic\"]\n assert event.source == expected[\"source\"]\n assert event.id == expected[\"source_idx\"]\n assert event.group == expected[\"group\"]\n assert event.state == expected[\"state\"]\n assert event.is_tripped is expected[\"tripped\"]", "def test_api_predictor_events_get(self):\n pass", "def test_new_general_event(client, transactional_db, mocker):\n arn = 'arn:aws:sns:us-east-1:538745987955:kf-coord-api-us-east-1-dev'\n settings.SNS_ARN = arn\n mock = mocker.patch('coordinator.api.models.boto3.client')\n assert Event.objects.count() == 0\n\n ev = Event(event_type='error', message='test error event')\n ev.save()\n assert Event.objects.count() == 1\n assert mock().publish.call_count == 1\n message = {\n 'default': json.dumps({\n 'event_type': 'error',\n 'message': 'test error event',\n 'task_service': None,\n 'task': None,\n 'release': None\n })\n }\n arn = 'arn:aws:sns:us-east-1:538745987955:kf-coord-api-us-east-1-dev'\n mock().publish.assert_called_with(Message=json.dumps(message),\n MessageStructure='json',\n TopicArn=arn)\n settings.SNS_ARN = None", "async def test_api_fire_event_with_data(\n hass: HomeAssistant, mock_api_client: TestClient\n) -> None:\n test_value = []\n\n @ha.callback\n def listener(event):\n \"\"\"Record that our event got called.\n\n Also test if our data came through.\n \"\"\"\n if \"test\" in event.data:\n test_value.append(1)\n\n hass.bus.async_listen_once(\"test_event_with_data\", listener)\n\n await mock_api_client.post(\"/api/events/test_event_with_data\", json={\"test\": 1})\n\n await hass.async_block_till_done()\n\n assert len(test_value) == 1", "def make_event(entity_id):\n domain = split_entity_id(entity_id)[0]\n state = mock.MagicMock(\n state=\"not blank\",\n domain=domain,\n entity_id=entity_id,\n object_id=\"entity\",\n attributes={},\n )\n return mock.MagicMock(data={\"new_state\": state}, time_fired=12345)", "def test_mqtt_event(event_manager: EventManager, subscriber: Mock) -> None:\n mqtt_event = {\n \"topic\": \"tns1:Device/tnsaxis:Sensor/PIR\",\n \"source\": \"sensor\",\n \"source_idx\": \"0\",\n \"type\": \"state\",\n \"value\": \"0\",\n }\n event_manager.handler(mqtt_event)\n assert subscriber.call_count == 1\n\n event: Event = subscriber.call_args[0][0]\n assert event.operation == EventOperation.INITIALIZED\n assert event.topic == \"tns1:Device/tnsaxis:Sensor/PIR\"\n assert event.id == \"0\"\n assert event.state == \"0\"\n assert not event.is_tripped\n\n mqtt_event[\"value\"] = \"1\"\n event_manager.handler(mqtt_event)\n assert subscriber.call_count == 2\n\n event: Event = subscriber.call_args[0][0]\n assert event.operation == EventOperation.CHANGED\n assert event.state == \"1\"\n assert event.is_tripped", "def test_event_object():\n data = retrieve_fixture()\n event = Event(event=data)\n\n assert event.event_month == \"09\"\n assert event.event_day == \"12\"\n assert event.event_year == \"2020\"", "def event_request_factory_fixture(event_factory):\n def _factory(device_ids=None, events=None):\n request = Mock()\n request.installed_app_id = uuid4()\n if events is None:\n events = []\n if device_ids:\n events.extend([event_factory(id) for id in device_ids])\n events.append(event_factory(uuid4()))\n events.append(event_factory(device_ids[0], event_type=\"OTHER\"))\n request.events = events\n return request\n return _factory", "def test_past_event(self):\n pass", "def testEventInit(self):\n e1 = Event(5, 'obj', 'message')\n self.assertEqual(e1.timestamp, 5)\n self.assertEqual(e1.eventObject, 'obj')\n self.assertEqual(e1.logMessage, 'message')", "async def test_api_fire_event_with_no_data(\n hass: HomeAssistant, mock_api_client: TestClient\n) -> None:\n test_value = []\n\n @ha.callback\n def listener(event):\n \"\"\"Record that our event got called.\"\"\"\n test_value.append(1)\n\n hass.bus.async_listen_once(\"test.event_no_data\", listener)\n\n await mock_api_client.post(\"/api/events/test.event_no_data\")\n await hass.async_block_till_done()\n\n assert len(test_value) == 1", "def test_gameHandleEvents(self):\n # this kinda gonna be reiterating the other tests??\n # the tests of all the individual methods below make this test work\n pass", "def sample_document_callback_assert(event):\n assert event.setter is mock_session", "def test_load_response_descriptor_events_event_event_resource(self):\n pass", "def test_run(self, init_event, mocker):\n mocker.patch.object(\n houdini_toolbox.events.event.HoudiniEvent,\n \"enabled\",\n new_callable=mocker.PropertyMock(return_value=True),\n )\n mock_stats = mocker.patch.object(\n houdini_toolbox.events.event.HoudiniEvent,\n \"stats\",\n new_callable=mocker.PropertyMock,\n )\n mock_item_map = mocker.patch.object(\n houdini_toolbox.events.event.HoudiniEvent,\n \"item_map\",\n new_callable=mocker.PropertyMock,\n )\n\n mock_stats.return_value = mocker.MagicMock(\n spec=houdini_toolbox.events.stats.HoudiniEventStats\n )\n\n mock_map = {}\n mock_item_map.return_value = mock_map\n\n event = init_event()\n\n mock_item1 = mocker.MagicMock(spec=houdini_toolbox.events.item.HoudiniEventItem)\n mock_item1.run.side_effect = lambda sa: sa[\"order\"].append(mock_item1)\n\n mock_item2 = mocker.MagicMock(spec=houdini_toolbox.events.item.HoudiniEventItem)\n mock_item2.run.side_effect = lambda sa: sa[\"order\"].append(mock_item2)\n\n mock_item3 = mocker.MagicMock(spec=houdini_toolbox.events.item.HoudiniEventItem)\n mock_item3.run.side_effect = lambda sa: sa[\"order\"].append(mock_item3)\n\n # Assign objects to event map with priorities.\n mock_map[0] = [mock_item2]\n mock_map[15] = [mock_item3]\n mock_map[5] = [mock_item1]\n\n scriptargs = {\"key\": \"value\", \"order\": []}\n\n expected_scriptargs = {\n \"key\": \"value\",\n # We expect events to be run in decreasing priority order\n \"order\": [mock_item3, mock_item1, mock_item2],\n }\n\n # Run the test event.\n event.run(scriptargs)\n\n # Make sure each thing was ran.\n mock_item1.run.assert_called_once()\n mock_item2.run.assert_called_once()\n mock_item3.run.assert_called_once()\n\n assert scriptargs == expected_scriptargs\n\n # Ensure the context manager was called.\n mock_stats.return_value.__enter__.assert_called_once()\n mock_stats.return_value.__exit__.assert_called_once()", "def test_events(self):\n\n response = self.client.get(reverse('events'))\n\n assert response.status_code == 200", "def test_post_add_log_event(self):\n pass", "def test_instantiation(self):\n event = Event(\n Guild(12345),\n 'Some title',\n datetime(2020, 10, 10, 10, 10, tzinfo=utc),\n 'Some description')\n self.assertEqual(event.guild.id, 12345)\n self.assertEqual(event.title, 'Some title')\n self.assertEqual(event.date, datetime(2020, 10, 10, 10, 10, tzinfo=utc))\n self.assertEqual(event.description, 'Some description')", "def test_create_event_load(self):\n res = self.client.get('/create-event')\n data = res.data.decode('utf-8')\n assert res.status == '200 OK'\n assert 'Create Event' in data", "def init_event(mocker):\n mocker.patch.object(\n houdini_toolbox.events.event.HoudiniEvent, \"__init__\", lambda x, y: None\n )\n\n def _create():\n return houdini_toolbox.events.event.HoudiniEvent(None)\n\n return _create", "def test_timestamp_noint(self, mock):\n mock.configure_mock(**(self.config_payload(True, False)))\n self.assertRaises(\n TypeError,\n lf.lambda_handler, event=self.lambdaevent, context=None)\n mock.client.return_value.update_thing_shadow.assert_not_called()", "def setUp(self):\n self.output = StringIO.StringIO()\n self.formatter = json_out.Json(None, self.output)\n self.event_object = JsonTestEvent()", "async def test_api_fire_event_context(\n hass: HomeAssistant, mock_api_client: TestClient, hass_access_token: str\n) -> None:\n test_value = []\n\n @ha.callback\n def listener(event):\n \"\"\"Record that our event got called.\"\"\"\n test_value.append(event)\n\n hass.bus.async_listen(\"test.event\", listener)\n\n await mock_api_client.post(\n \"/api/events/test.event\",\n headers={\"authorization\": f\"Bearer {hass_access_token}\"},\n )\n await hass.async_block_till_done()\n\n refresh_token = await hass.auth.async_validate_access_token(hass_access_token)\n\n assert len(test_value) == 1\n assert test_value[0].context.user_id == refresh_token.user.id", "async def test_event(bus: lightbus.BusNode, dummy_api):\n\n received_kwargs = []\n\n async def listener(**kwargs):\n received_kwargs.append(kwargs)\n\n async def co_fire_event():\n await asyncio.sleep(0.01)\n return await bus.my.dummy.my_event.fire_async(field='Hello! 😎')\n\n async def co_listen_for_events():\n await bus.my.dummy.my_event.listen_async(listener)\n # Consume a single event, rather than loop forever using consume_events()\n await bus.bus_client._consume_events_once()\n\n await asyncio.gather(co_fire_event(), co_listen_for_events())\n assert received_kwargs == [{'field': 'Hello! 😎'}]", "def mock_event(player: dict) -> dict:\n return {\n \"body\": {\n \"Player\": player,\n \"playerId\": \"player_hash\",\n \"action\": \"attack\",\n \"enhanced\": False,\n }\n }", "def test_get_Events(self):\n event_a = Event.objects.create(title=\"christmas party\",\n start=datetime.strptime(\"2020-12-03 12:00\", \"%Y-%m-%d %H:%M\"),\n end=datetime.strptime(\"2020-12-03 16:00\", \"%Y-%m-%d %H:%M\"),\n event_owner=self.person_a,\n duration=timedelta(hours=4),\n recurrence_interval=0, description=\"happy christmas party\", website_publish=True)\n event_a.invites.add(self.comms_grp)\n event_a.save()\n event_b = Event.objects.create(title=\"Spring clean\",\n start=datetime.strptime(\"2020-04-03 09:00\", \"%Y-%m-%d %H:%M\"),\n end=datetime.strptime(\"2020-04-03 16:00\", \"%Y-%m-%d %H:%M\"),\n event_owner=self.person_a,\n duration=timedelta(hours=7),\n recurrence_interval=0, description=\"get the church clean\", website_publish=True)\n event_b.invites.add(self.comms_grp)\n event_b.save()\n client = APIClient()\n resp = client.get('/api/events')\n self.assertEqual(resp.status_code, 200)\n events = Event.objects.all()\n self.assertEqual(events[0].title, json.loads(resp.content)[1]['title'])\n self.assertEqual(events[1].title, json.loads(resp.content)[0]['title'])" ]
[ "0.74541396", "0.724666", "0.72374237", "0.71662945", "0.7073701", "0.7027783", "0.68203604", "0.68195695", "0.680202", "0.6784378", "0.6762648", "0.67611665", "0.66722715", "0.66431737", "0.6619424", "0.65345937", "0.65251654", "0.65242076", "0.65172464", "0.6512973", "0.6508907", "0.64926445", "0.6475576", "0.6469131", "0.6420528", "0.64146644", "0.64124495", "0.6411172", "0.64062", "0.6396349" ]
0.74155563
1
Unit test get_ip_type_by_address method of the Bad Bots class
def test_get_ip_type_by_address(setup_config, get_mock_event): # !ARRANGE! bad_bots = BadBots(setup_config, get_mock_event) ipv4_address_1 = '1.1.1.1' ipv4_address_2 = '11.22.33.44' ipv4_address_3 = '123.123.123.123' ipv6_address_1 = '2a02:a445:6d36:1:1e3:a188:313c:1d31' ipv6_address_2 = '3731:54:65fe:2::a7' ipv6_address_3 = 'fd07:a47c:3742:823e:3b02:76:982b:463' # !ACT! # Detect the IP type of provided IP addresses ipv4_address_1_type = bad_bots.get_ip_type_by_address(ipv4_address_1) ipv4_address_2_type = bad_bots.get_ip_type_by_address(ipv4_address_2) ipv4_address_3_type = bad_bots.get_ip_type_by_address(ipv4_address_3) ipv6_address_1_type = bad_bots.get_ip_type_by_address(ipv6_address_1) ipv6_address_2_type = bad_bots.get_ip_type_by_address(ipv6_address_2) ipv6_address_3_type = bad_bots.get_ip_type_by_address(ipv6_address_3) # !ASSERT! # Assert IP addresses are of type IPv4 assert ipv4_address_1_type.value == BadBots.SourceIPType.IPV4.value assert ipv4_address_2_type.value == BadBots.SourceIPType.IPV4.value assert ipv4_address_3_type.value == BadBots.SourceIPType.IPV4.value # Assert IP addresses are of type IPv6 assert ipv6_address_1_type.value == BadBots.SourceIPType.IPV6.value assert ipv6_address_2_type.value == BadBots.SourceIPType.IPV6.value assert ipv6_address_3_type.value == BadBots.SourceIPType.IPV6.value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_ip_addresses_exists():\n load_ips()\n validate_names()", "def test_external_ip_get_kind(self):\n assert_equal(self.test_external_ip.get_kind(), 'mpexternalip')", "def test_ipam_ip_addresses_read(self):\n pass", "def test_get_source_ip(self):\n pass", "def test_ip(self):\n ##Todo: Improve this check\n ip = socket.gethostbyname(socket.gethostname())\n ip = [int(i) for i in ip.split('.')]\n assert len(ip) == 4\n assert ip[0] == 10\n assert ip[1] == 137\n assert ip[2] == 1\n assert ip[3] >= 1 and ip[3] <= 255", "def test_get_node_internal_ip_address(self):\n pass", "def test_get_geoip():\n assert get_geoip(\"74.125.67.100\") == \"US\"", "def test_ipam_ip_addresses_list(self):\n pass", "def test_ipam_ip_addresses_create(self):\n pass", "def testIP(self):\n self.assertEqual([\"http://234.234.234.234\"], grab('http://234.234.234.234', self.needScheme))", "def test_address_info(self):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n self.supervisor.supvisors.context.addresses = {\n '10.0.0.1': Mock(**{'serial.return_value': 'address_info'})}\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test with known address\n self.assertEqual('address_info', rpc.get_address_info('10.0.0.1'))\n # test with unknown address\n with self.assertRaises(RPCError) as exc:\n rpc.get_address_info('10.0.0.0')\n self.assertEqual(Faults.BAD_ADDRESS, exc.exception.code)\n self.assertEqual('BAD_ADDRESS: address 10.0.0.0 unknown in Supvisors',\n exc.exception.text)", "def test_functional_good_ip(self, url):\n response = requests.get(\"http://localhost:80/ip2w/{url}\".format(url=url))\n if response.status_code != BAD_GATEWAY:\n print(\"\\nGATEWAY is OK\")\n self.assertEqual(response.status_code, OK)\n content = response.json()\n self.assertEqual(len(content), 3)\n self.assertTrue(content.get(\"temp\"))\n self.assertTrue(content.get(\"city\"))\n else:\n print(\"\\nGATEWAY is RESET BY PEER\")", "def test_add_ip(self):\n ip = '1.1.1.1'\n info = self.api.add_ipadress(ip, tags=['asd'])\n self.assertEqual(info['value'], ip)\n tags = [t['name'] for t in info['tags']]\n self.assertEqual(tags, ['asd'])", "def test_get_ip_tags_invalid_ip(client, database):\n\n invalid_ip = \"http://127.0.0.1:5000/ip-tags/10.1.2.3000\"\n response = client.get(invalid_ip)\n response_data = response.get_json()\n\n assert response.status_code == 400\n assert response.headers[\"Content-Type\"] == \"application/json\"\n assert (\n response_data[\"error\"]\n == \"400 Bad Request: Address 10.1.2.3000 does not have IPv4 format\"\n )", "def test_exclude_ip_ban(self):\n pass", "def test_read_host_subnet(self):\n pass", "def IP(address):\n for klass in (V4Address, V6Address):\n try:\n ip = klass(address)\n except ValueError, e:\n error = e\n else:\n return ip\n\n raise error", "def test_ip(response):\n \n # from comeon_core import update\n ip = getIP()\n print(ip)\n #init_db(engine)\n #update()\n assert True", "def _get_address_type(self):\n return self.__address_type", "def test_functional_bad_ip(self, url):\n response = requests.get(\"http://localhost:80/ip2w/{url}\".format(url=url))\n self.assertEqual(response.status_code, BAD_REQUEST)\n self.assertEqual(response.json().get(\"error\"),\n \"No city for ip {}\".format(url))", "def test_get_ip_from_headers(self):\n response = self.client.get(self.voter_location_url, REMOTE_ADDR='69.181.21.132')\n self.assertEqual(response.status_code, 200)\n json_data = json.loads(response.content.decode())\n self.assertEqual(json_data['success'], True)\n self.assertEqual(json_data['voter_location_found'], True)", "def test_target_resembles_ip(self):\n for fqdn in ('10.234.30.253', '128.193.0.3', 'fe80::e1c9:1:228d:d8'):\n with self.assertRaises(ValidationError):\n self.create_ptr(ip_str='128.193.0.2', fqdn=fqdn,\n ip_type='4')", "def mock_country_code_by_addr(self, ip_addr):\r\n ip_dict = {\r\n '1.0.0.0': 'CU',\r\n '2.0.0.0': 'IR',\r\n '3.0.0.0': 'SY',\r\n '4.0.0.0': 'SD',\r\n '5.0.0.0': 'AQ', # Antartica\r\n }\r\n return ip_dict.get(ip_addr, 'US')", "def vt_ip_check(ip, vt_api):\n if not is_IPv4Address(ip):\n return None\n\n url = 'https://www.virustotal.com/vtapi/v2/ip-address/report'\n parameters = {'ip': ip, 'apikey': vt_api}\n response = requests.get(url, params=parameters)\n try:\n return response.json()\n except ValueError:\n return None", "def test_validate_ip_ok():\n ip = '1.1.1.1'\n assert howisresolved.validate_ip(ip) is None", "def get_ip(self):", "def test_find_agent_ips(self):\n\n with patch(\n \"salt.cloud.clouds.proxmox.query\",\n return_value={\n \"result\": [\n {\n \"name\": \"eth0\",\n \"ip-addresses\": [\n {\"ip-address\": \"1.2.3.4\", \"ip-address-type\": \"ipv4\"},\n {\"ip-address\": \"2001::1:2\", \"ip-address-type\": \"ipv6\"},\n ],\n },\n {\n \"name\": \"eth1\",\n \"ip-addresses\": [\n {\"ip-address\": \"2.3.4.5\", \"ip-address-type\": \"ipv4\"},\n ],\n },\n {\n \"name\": \"dummy\",\n },\n ]\n },\n ) as mock_query:\n vm_ = {\n \"technology\": \"qemu\",\n \"host\": \"myhost\",\n \"driver\": \"proxmox\",\n \"ignore_cidr\": \"1.0.0.0/8\",\n }\n\n # CASE 1: Test ipv4 and ignore_cidr\n result = proxmox._find_agent_ip(vm_, ANY)\n mock_query.assert_any_call(\n \"get\", \"nodes/myhost/qemu/{}/agent/network-get-interfaces\".format(ANY)\n )\n\n assert result == \"2.3.4.5\"\n\n # CASE 2: Test ipv6\n\n vm_[\"protocol\"] = \"ipv6\"\n result = proxmox._find_agent_ip(vm_, ANY)\n mock_query.assert_any_call(\n \"get\", \"nodes/myhost/qemu/{}/agent/network-get-interfaces\".format(ANY)\n )\n\n assert result == \"2001::1:2\"", "def test_ipam_ip_addresses_update(self):\n pass", "def test_get(self):\n\n # Grab the server's addresses...\n addrs = self.server.addresses\n\n # Make sure the public and private lists are present\n dtutil.assert_true('public' in addrs)\n dtutil.assert_true('private' in addrs)\n\n # Are IP addresses actually returned?", "def test_ipv4_in_net_internal_v6(self):\n test_ip = ip_address.IPAddress(\"192.168.178.4\")\n assert test_ip.in_network(\"192.168.178.0/24\")\n assert test_ip.in_network(\"192.168.178.0/29\")\n \n test_ip = ip_address.IPAddress(\"192.168.178.4/2\")\n assert test_ip.in_network(\"192.0.0.0/2\")\n\n test_ip = ip_address.IPAddress(\"192.168.178.4\")\n assert test_ip.in_network(\"10.0.11.0/4\") == False\n assert test_ip.in_network(\"192.169.178.0/24\") == False\n \n \n test_ip = ip_address.IPAddress(\"192.168.67.3\")\n assert test_ip.in_network(\"192.168.0.0/16\")" ]
[ "0.67973435", "0.6795078", "0.67154825", "0.66857165", "0.66440994", "0.6609387", "0.65462726", "0.6507168", "0.649237", "0.6404722", "0.63306016", "0.6302462", "0.6274524", "0.6213682", "0.6204644", "0.62016076", "0.61814064", "0.6152368", "0.6097284", "0.60947865", "0.60787016", "0.6026259", "0.6008837", "0.5960325", "0.59585816", "0.5950451", "0.59444684", "0.593575", "0.5914348", "0.5899763" ]
0.78166175
0
Unit test check_bot_confidence method of the Bad Bots class
def test_check_bot_confidence(setup_config, get_mock_event): # !ARRANGE! bad_bots = BadBots(setup_config, get_mock_event) bot_1 = Bot() bot_1.source_ip = '1.1.1.1' bot_1.http_query_string_parameters = '<script></script>' bot_1.http_body = 'EXEC' bot_1.geolocation = 'United States' bot_1.source_ip_type = BadBots.SourceIPType.IPV4 bot_1.http_method = "CONNECT" bot_1.http_user_agent = "Mozilla/5.0 (compatible; Sosospider/2.0; +http://help.soso.com/webspider.htm)" bot_2 = Bot() bot_2.source_ip = '77.168.51.231' bot_2.http_query_string_parameters = 'hello' bot_2.http_body = 'hello!' bot_2.geolocation = 'Netherlands' bot_2.source_ip_type = BadBots.SourceIPType.IPV4 bot_2.http_method = "GET" bot_2.http_user_agent = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36" bot_3 = Bot() bot_3.source_ip = '2a02:a445:6d36:1:1e3:a188:313c:1d33' bot_3.http_query_string_parameters = 'param=true' bot_3.http_body = 'username=xxx' bot_3.geolocation = 'United States' bot_3.source_ip_type = BadBots.SourceIPType.IPV6 bot_3.http_method = "GET" bot_3.http_user_agent = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36" # !ACT! # Do confidence check on potential bots confidence_score_bot_1 = bad_bots.check_bot_confidence(bot_1) confidence_score_bot_2 = bad_bots.check_bot_confidence(bot_2) confidence_score_bot_3 = bad_bots.check_bot_confidence(bot_3) # !ASSERT! # Assert IP addresses are of type IPv4 assert(confidence_score_bot_1 == 25) assert(confidence_score_bot_2 == 0) assert(confidence_score_bot_3 == 5)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_word_confidences(self):\n self._api.SetImageFile(self._image_file)\n words = self._api.AllWords()\n self.assertEqual(words, [])\n self._api.Recognize()\n words = self._api.AllWords()\n confidences = self._api.AllWordConfidences()\n self.assertEqual(len(words), len(confidences))\n mapped_confidences = self._api.MapWordConfidences()\n self.assertEqual([v[0] for v in mapped_confidences], words)\n self.assertEqual([v[1] for v in mapped_confidences], confidences)", "def test_robbins_confidence(self):\n c = array([1,2,3,0,1])\n r = robbins_confidence(c, 0.05)\n n = 7\n s = 2\n k = sqrt(8/0.05)\n self.assertEqual(r, ((s-k)/(n+1), (s+k)/(n+1)))", "def test_chao1_confidence(self): \n #NOTE: EstimateS rounds to 2 dp\n self.assertFloatEqual(chao1_confidence(self.TestData), (9.07,17.45), \\\n eps=0.01)\n self.assertFloatEqual(chao1_confidence(self.TestData, \\\n bias_corrected=False), (9.17,21.89), eps=0.01)\n self.assertFloatEqualAbs(chao1_confidence(self.NoSingles),\\\n (4, 4.95), eps=0.01)\n self.assertFloatEqualAbs(chao1_confidence(self.NoSingles, \\\n bias_corrected=False), (4,4.95), eps=0.01)\n self.assertFloatEqualAbs(chao1_confidence(self.NoDoubles), \\\n (4.08,17.27), eps=0.01)\n self.assertFloatEqualAbs(chao1_confidence(self.NoDoubles, \\\n bias_corrected=False), (4.08,17.27), eps=0.01)", "def test_likelihood(app):\n\n assert False", "def test_confidences(self):\n\n # Add alignments to pipeline\n for hit, aln in zip(self.pipeline[\"templates\"], self.ALIGNMENTS):\n hit[\"alignment\"] = aln\n\n parser = hhsuite.FastaParser()\n results = parser.run(self.pipeline)\n self.assertEqual(\n results[\"templates\"][0][\"sequence_alignments\"][\"confidence\"],\n \"---5-4-----\")\n self.assertEqual(\n results[\"templates\"][1][\"sequence_alignments\"][\"confidence\"],\n \"----3-----\")", "def test_rb_utils(self):\n\n t1 = 100.\n t2 = 100.\n gate2Q = 0.5\n gate1Q = 0.1\n twoq_coherence_err = rb.rb_utils.coherence_limit(2, [t1, t1],\n [t2, t2], gate2Q)\n\n oneq_coherence_err = rb.rb_utils.coherence_limit(1, [t1],\n [t2], gate1Q)\n\n self.assertAlmostEqual(oneq_coherence_err, 0.00049975, 6,\n \"Error: 1Q Coherence Limit\")\n\n self.assertAlmostEqual(twoq_coherence_err, 0.00597, 5,\n \"Error: 2Q Coherence Limit\")\n\n twoq_epc = rb.rb_utils.twoQ_clifford_error([5.2, 5.2, 1.5],\n [0, 1, -1],\n [0.001, 0.0015, 0.02])\n\n self.assertAlmostEqual(twoq_epc, 0.0446283, 6,\n \"Error: 2Q EPC Calculation\")", "def test_error_at_995tpr(self):\r\n\r\n return self.test_error_at_confidence(self.confidence_at_tpr(0.995))", "def compute_confidence_interval(self) -> bool:\n return False", "def test_error_at_95tpr(self):\r\n\r\n return self.test_error_at_confidence(self.confidence_at_tpr(0.95))", "def test_error_at_99tpr(self):\r\n\r\n return self.test_error_at_confidence(self.confidence_at_tpr(0.99))", "def testIsBiconnected(self):\n self.assertEqual(is_biconnected(self.G1), True)\n self.assertEqual(is_biconnected(self.G2), False)", "def test_dice_coef_loss():\n assert dice_coef_loss() == expected_dice_coef_loss", "def test_verify_fails_expected_metric_kwargs(perfectModelEnsemble_initialized_control):\n pm = perfectModelEnsemble_initialized_control\n pm = pm - pm.mean(\"time\").mean(\"init\")\n with pytest.raises(ValueError) as excinfo:\n pm.verify(\n metric=\"threshold_brier_score\", comparison=\"m2c\", dim=[\"init\", \"member\"]\n )\n assert \"Please provide threshold.\" == str(excinfo.value)", "def test_b_grade_above(self):\r\n self.basic_setup()\r\n self.submit_question_answer('p1', {'2_1': 'Correct'})\r\n self.submit_question_answer('p2', {'2_1': 'Correct'})\r\n self.check_grade_percent(0.67)\r\n self.assertEqual(self.get_grade_summary()['grade'], 'B')", "def check():\n hokusai.check()", "def test_should_contain_badge_classes(self):\n\n badgr = self.get_badgr_setup()\n with vcr.use_cassette('tests/vcr_cassettes/badge_retrieval.yaml'):\n self.assertTrue(isinstance(badgr.badges[0], Badge))", "def test_score_with_fitted_estimator(self):\n model = GaussianNB().fit(self.binary.X.train, self.binary.y.train)\n\n # NOTE that the wrapper will pass a call down to `classes_`\n oz = ClassificationScoreVisualizer(model)\n assert_not_fitted(oz, [\"class_counts_\", \"score_\"])\n\n msg = \"could not determine class_counts_\"\n with pytest.warns(YellowbrickWarning, match=msg):\n oz.score(self.binary.X.test, self.binary.y.test)\n assert_fitted(oz, [\"classes_\", \"class_counts_\", \"score_\"])", "def test_healthcheck(self):\n self.assertEqual(\"OK\", \"OK\")", "def test_busy_cook(cook_busy, product_for_cook):\n with pytest.raises(CustomWarning):\n assert cook_busy.cook_dish(product_for_cook)", "def test_validate_metadata_pass(self):\n mock_event = {\n 'gds_volume_name': \"bssh.xxxx\",\n 'gds_folder_path': \"/Runs/cccc.gggg\",\n 'seq_run_id': \"yyy\",\n 'seq_name': \"zzz\",\n }\n\n settings_by_override_cycles = [\n {\n \"batch_name\": \"my-passing-batch\",\n \"samples\": [\n \"PTC_EXPn200908LL_L2000001\",\n \"PTC_EXPn200908LL_L2000002\",\n \"PTC_EXPn200908LL_L2000003\"\n ],\n \"settings\": {\n \"override_cycles\": \"Y100;I8N2;I8N2;Y100\"\n }\n }\n ]\n\n reason = bcl_convert.validate_metadata(mock_event, settings_by_override_cycles)\n\n logger.info(\"-\" * 32)\n logger.info(json.dumps(reason))\n\n self.assertIsNone(reason)\n\n # should not call to slack webhook\n verify(libslack.http.client.HTTPSConnection, times=0).request(...)", "def test_error_at_98tpr(self):\r\n\r\n return self.test_error_at_confidence(self.confidence_at_tpr(0.98))", "def test_cv_warning_messages():\n fr = h2o.import_file(path=pyunit_utils.locate(\"smalldata/admissibleml_test/Bank_Personal_Loan_Modelling.csv\"))\n target = \"Personal Loan\"\n fr[target] = fr[target].asfactor()\n x = [\"Experience\",\"Income\",\"Family\",\"CCAvg\",\"Education\",\"Mortgage\",\n \"Securities Account\",\"CD Account\",\"Online\",\"CreditCard\"]\n splits = fr.split_frame(ratios=[0.80])\n train = splits[0]\n test = splits[1]\n infogram_model_cv_v = H2OInfogram(seed = 12345, protected_columns=[\"Age\",\"ZIP Code\"], nfolds=3) \n infogram_model_cv_v.train(x=x, y=target, training_frame=train, validation_frame=test)\n \n pyunit_utils.checkLogWeightWarning(\"infogram_internal_cv_weights_\", wantWarnMessage=False)", "def test_status(self):\n self.assertEqual('perfect', self.__metric.status())", "def test_life_critical():\n assert chap2.life_critical()", "def test_balance_tracking(self):\n # TODO\n pass", "def test_get_boat(self):\n pass", "def test_heartbeat( self ):\n with self.app.app_context():\n url = '/donation/heartbeat'\n\n # Ensure a GET with no saved caged_donors returns 0.\n response = self.test_client.get( url, headers=self.headers )\n self.assertEqual( response.status_code, status.HTTP_200_OK )", "def test_coast():\n dwd = DwdWeatherWarningsAPI(WARNCELL_NAME_COAST)\n assert dwd.data_valid\n assert dwd.warncell_id == WARNCELL_ID_COAST\n assert dwd.warncell_name == WARNCELL_NAME_COAST\n start_time = datetime.datetime.now(\n datetime.timezone.utc\n ) - datetime.timedelta(0, TIME_TOLERANCE)\n stop_time = start_time + datetime.timedelta(0, (2 * TIME_TOLERANCE))\n assert start_time < dwd.last_update < stop_time\n assert MIN_WARNING_LEVEL <= dwd.current_warning_level <= MAX_WARNING_LEVEL\n assert MIN_WARNING_LEVEL <= dwd.expected_warning_level <= MAX_WARNING_LEVEL\n assert isinstance(dwd.current_warnings, list)\n assert isinstance(dwd.expected_warnings, list)", "def test_b_grade_exact(self):\r\n self.basic_setup()\r\n self.submit_question_answer('p1', {'2_1': 'Correct'})\r\n self.check_grade_percent(0.33)\r\n self.assertEqual(self.get_grade_summary()['grade'], 'B')", "def test_return_advice_actual_weight_is_too_low(self):\n data_weight_user = {\"height\": \"1,60\", \"actual_weight\": \"45\",\n \"cruising_weight\": \"45\", \"weight_goal\": \"40\"}\n advice = self.new_weight_advice_goal.return_weight_advices_goal(data_weight_user)[1]\n\n text = \"Ton poids actuel est déjà bien bas... je te déconseille \" \\\n \"de perdre plus de poids. \"\n self.assertEqual(advice, text)" ]
[ "0.67971265", "0.6142417", "0.61158496", "0.6029153", "0.57504106", "0.572192", "0.56564856", "0.5608764", "0.5544521", "0.554337", "0.55383706", "0.55253214", "0.54930353", "0.5490882", "0.5488589", "0.54793096", "0.5459387", "0.5425729", "0.540067", "0.5386406", "0.5373596", "0.53599584", "0.53518784", "0.53335965", "0.53318787", "0.5326166", "0.5317281", "0.5312178", "0.53002065", "0.52993965" ]
0.7511095
0
Generates IDL files from a template for user and system marshaling.
def _Main(): cmd_parser = argparse.ArgumentParser( description='Tool to generate IDL from template.') cmd_parser.add_argument('--idl_template_file', dest='idl_template_file', type=str, required=True, help='Input IDL template file.') cmd_parser.add_argument('--idl_output_file', type=str, required=True, help='Output IDL file.') flags = cmd_parser.parse_args() _GenerateIDLFile(flags.idl_template_file, flags.idl_output_file)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def buildAutogenContents(self):\n if len(self.mTemplates) == 0:\n return None\n \n content = \"/** Autogenerated temporary file for template instantiation. */\\n\"\n for t in self.mTemplates:\n template_type = t.mTemplateType\n typedef_name = t.mTypedefName\n content += \"\"\"\n typedef %(template_type)s %(typedef_name)s;\n inline unsigned __instantiate_%(typedef_name)s()\n { return unsigned(sizeof(%(typedef_name)s)); }\n \"\"\" % vars() \n \n return content", "def create_base_templates(outdir, templateEnv):\n for file in ME_TEMPLATES:\n filename = os.path.join(outdir, ME_FILENAME.format(file))\n template = templateEnv.get_template(file + '.go.jinja')\n\n with open(filename, 'w') as f:\n output = template.render(copyright=COPYRIGHT,\n generator_warning=GENERATOR_WARNING,\n package_name=PACKAGE_NAME)\n f.write(output)\n pass", "def build_mapping() -> str:\n templates = make_module_docstring(\"Template classes for GBD entities\", __file__)\n templates += make_import(\"typing\", [\"Union\", \"Tuple\"])\n templates += (\n make_import(\n \".id\",\n [\n \"c_id\",\n \"s_id\",\n \"hs_id\",\n \"me_id\",\n \"cov_id\",\n \"rei_id\",\n \"scalar\",\n ],\n )\n + SPACING\n )\n templates += make_gbd_record()\n\n for entity, info in get_base_types().items():\n templates += SPACING\n templates += make_record(entity, **info)\n\n return templates", "def test_pnictogen():\n for template in templates:\n template_prefix, extension = os.path.splitext(template)\n for xyz_file in example_xyz_files:\n input_prefix, xyz_file_extension = os.path.splitext(xyz_file)\n\n mol = Atoms(\n cclib.bridge.cclib2openbabel.readfile(xyz_file, xyz_file_extension[1:])\n )\n written_files = pnictogen(mol, input_prefix, template, extension[1:])\n\n assert_equals(type(written_files), list)\n for written_file in written_files:\n assert_equals(type(written_file), str)\n\n written_files2 = pnictogen(mol, input_prefix, template)\n assert_equals(written_files, written_files2)\n\n # Allow use of template in the parent directory\n with cd(\"pnictogen/repo\"):\n mol = Atoms(\n cclib.bridge.cclib2openbabel.readfile(\"../../data/water-dimer.xyz\", \"xyz\")\n )\n written_files = pnictogen(mol, \"../../data/water-dimer\", \"ADF.in\", \"in\")\n\n assert_equals(written_files, [\"../../data/water-dimer.in\"])\n\n main([\"-g\", \"/tmp/hello.world.ORCA.inp\"])\n mol = Atoms(cclib.bridge.cclib2openbabel.readfile(\"data/co.xyz\", \"xyz\"))\n written_files = pnictogen(mol, \"data/co\", \"/tmp/hello.world.ORCA.inp\", foo=\"bar\")\n\n assert_equals(written_files, [\"data/co.inp\"])", "def generate_header():\n header_file = AUTOGEN_WARNING\n header_file += \"/// /file atomic_nuclear_data.h\\n\"\n header_file += \"/// /author Andrew Davis ([email protected])\\n\"\n header_file += \"///\\n\"\n header_file += (\n \"/// /brief Implements all the fundamental atomic & nuclear data data\\n\"\n )\n header_file += \"#include <map>\\n\"\n header_file += \"\\n\"\n header_file += \"namespace pyne\\n\"\n header_file += \"{\\n\"\n header_file += (\n \" /// main function to be called when you wish to load the nuclide data \\n\"\n )\n header_file += \" /// into memory \\n\"\n header_file += \" void _load_atomic_mass_map_memory();\\n\"\n header_file += \" /// function to create mapping from nuclides in id form\\n\"\n header_file += \" /// to their atomic masses\\n\"\n header_file += \" \\n\"\n header_file += \" void _insert_atomic_mass_map();\\n\"\n header_file += \" \\n\"\n header_file += \" /// function to create mapping from nuclides in id form \\n\"\n header_file += \" /// to their natural abundances\\n\"\n header_file += \" void _insert_abund_map();\\n\"\n header_file += \" \\n\"\n header_file += (\n \" /// Mapping from nuclides in id form to their natural abundances\\n\"\n )\n header_file += \" extern std::map<int,double> natural_abund_map;\\n\"\n header_file += \" \\n\"\n header_file += \" /// Mapping from nuclides in id form to their atomic masses.\\n\"\n header_file += \" extern std::map<int,double> atomic_mass_map;\\n\"\n header_file += \" \\n\"\n header_file += (\n \" /// Mapping from nuclides in id form to the associated error in \\n\"\n )\n header_file += \" /// abdundance \\n\"\n header_file += \" extern std::map<int,double> atomic_mass_error_map;\\n\"\n header_file += \"} // namespace pyne\\n\"\n return header_file", "def create_files_from_templates(self, model_attributes):\n for folder_name in [\"views\", \"urls\"]:\n file_path = \"%s/%s/%s_%s.py\" % (model_attributes['app_label'], folder_name,\n model_attributes['model_name_slug'], folder_name)\n template_path = \"django_baker/%s\" % (folder_name)\n self.create_file_from_template(file_path, template_path, model_attributes)\n for file_name in [\"base\", \"list\", \"detail\", \"create\", \"update\", \"delete\"]:\n file_path = \"%s/templates/%s/%s_%s.html\" % (model_attributes['app_label'], model_attributes['app_label'],\n model_attributes['model_name_slug'], file_name)\n template_path = \"django_baker/%s.html\" % (file_name)\n self.create_file_from_template(file_path, template_path, model_attributes)", "def generate_basic_modules(template_dir=TEMPLATE_DIR, out_dir=PKG_DIR):\n print(80 * \"-\")\n print(\"Package:\", out_dir)\n\n basic_modules = [\"_init.py\",\n \"constants.py\",\n \"base_api.py\",\n \"exception.py\"]\n\n if not os.path.exists(out_dir):\n os.mkdir(out_dir)\n\n installed = []\n for module in basic_modules:\n in_file = os.path.join(template_dir, module)\n\n if module == \"_init.py\":\n module = \"__init__.py\"\n\n out_file = os.path.join(out_dir, module)\n try:\n shutil.copy(in_file, out_file)\n except (FileNotFoundError, shutil.SameFileError) as err:\n print(err)\n installed.append(\"- \" + out_file)\n\n print(\"Basic modules:\")\n print(\"\\n\".join(installed))", "def generate():", "def gen_script(model: onnx.ModelProto, output_file: str = None) -> str:\n current_dir = os.path.dirname(os.path.realpath(__file__))\n env = jinja2.Environment(loader=jinja2.FileSystemLoader(current_dir + '/templates/'))\n model_header_render = gen_model_header(env, model)\n imports, main_function, sub_functions = gen_graph_functions(env, model.graph)\n\n wdir = \"\"\n if len(imports) > 0:\n # need to set wdir to enable imports\n wdir = util.resolve_systemds_root() + \"/scripts\"\n\n main_template = env.get_template(\"main.dml.jinja\")\n result_render = main_template.render(\n title=\"This file was generated by onnx-systemds\",\n model_header_render=model_header_render,\n wdir=wdir,\n imports=imports,\n main_function=main_function,\n sub_functions=sub_functions\n )\n if output_file:\n directory = os.path.dirname(output_file)\n if len(directory) > 0:\n os.makedirs(directory, exist_ok=True)\n with open(output_file, 'w') as f:\n f.write(result_render)\n\n return result_render", "def generate(self):\n self._open_file()\n # copied from GenerateCSPEC.py\n self._write_header_and_defaults()\n self._write_source()\n self._write_sample()\n\n self._write_all_components()\n self._write_mantle_module()\n self._write_segment()\n self._write_all_ids()\n self._write_footer()\n self._close_file()", "def generate(env):\r\n if not exists(env):\r\n return 0;\r\n\r\n TLBImpBuilder = env.Builder(\r\n action = SCons.Action.Action(\r\n TLBImpGenerator\r\n , generator = 1\r\n #, cmdstr = \"$TLBIMPCOMSTR\"\r\n )\r\n , src_suffix = '.dll'\r\n , target_suffix = '.dll'\r\n )\r\n\r\n dotNETSDK = _getNETSDKPath()\r\n homedir = env.Dir(dotNETSDK)\r\n bindir = homedir.Dir('bin')\r\n\r\n env['TLBIMP'] = 'tlbimp.exe'\r\n env['TLBIMPFLAGS'] = '/nologo /silent /strictref:nopia'\r\n env['TLBIMPCOMSTR'] = '[.NET] TLBIMP: Generating interop assembly for typelib in: $SOURCE to: $TARGET'\r\n env['BUILDERS']['TLBImp'] = TLBImpBuilder\r\n\r\n # Agrego al PATH el directorio del tlbimp\r\n env.PrependENVPath(\r\n 'PATH',\r\n bindir.abspath\r\n )", "def TLBImpGenerator(\r\n target\r\n , source\r\n , env\r\n , for_program = 0\r\n , for_signature = 0\r\n ):\r\n\r\n src = source[0].children()\r\n\r\n assert len(src) >= 1, \"[.NET] TLBIMP: At least one source is needed. Check your declarations.\"\r\n\r\n cmdline = env['TLBIMP']\r\n cmdline += ' '\r\n cmdline += env['TLBIMPFLAGS']\r\n if env.get('namespace') != None:\r\n cmdline += ' /namespace:$namespace'\r\n if env.get('key_file') != None:\r\n cmdline += ' /keyfile:$key_file'\r\n cmdline += ' /out:' + target[0].abspath\r\n for refnode in source[1:]:\r\n for ref in refnode.children():\r\n cmdline += ' /reference:' + ref.abspath\r\n cmdline += ' ' + src[0].abspath\r\n\r\n return [cmdline]", "def Generate(self):\n return self.Render(self.TEMPLATE_NAME, {\n 'name': self._namespace.name,\n 'enums': self._enums,\n 'types': self._types,\n 'events': self._namespace.events,\n 'functions': self._namespace.functions,\n # TODO(sammc): Don't change years when regenerating existing output files.\n 'year': datetime.date.today().year,\n 'source_file': self._namespace.source_file,\n })", "def writeDomainFile():\n writeTemplate(localTemplate)", "def Write(self):\n template_mappings = {}\n\n template_file = os.path.join(self._l2tdevtools_path, self._TEMPLATE_FILE)\n file_content = self._GenerateFromTemplate(template_file, template_mappings)\n\n file_content = file_content.encode('utf-8')\n\n with open(self.PATH, 'wb') as file_object:\n file_object.write(file_content)", "def generate_input_file(temp_type, out_file):\r\n\r\n file_path = os.path.realpath(__file__)\r\n dir_path = os.sep.join(file_path.split(os.sep)[:-1])\r\n\r\n if temp_type == 0:\r\n template = 'Template00_CompleteParameters.py'\r\n elif temp_type == 1:\r\n template = 'Template01_SingleRowCylindricalRollerBearing.py'\r\n elif temp_type == 3:\r\n template = 'Template03_CylindricalRollerThustBearing.py'\r\n elif temp_type == 4:\r\n template = 'Template04_BallOnDisk.py'\r\n elif temp_type == 5:\r\n template = 'Template05_PinOnDisk.py'\r\n elif temp_type == 6:\r\n template = 'Template06_4Ball.py'\r\n elif temp_type == 7:\r\n template = 'Template07_BallOn3Plates.py'\r\n elif temp_type == 8:\r\n template = 'Template08_RingOnRing.py'\r\n else:\r\n raise ValueError(\"temp_type value '{}' undefined\".format(temp_type))\r\n\r\n shutil.copy(os.sep.join([dir_path, 'UserInputTemplates', template]),\r\n out_file)\r\n return out_file", "def test_code_template(tmpdir):\n # Create temp file\n fn = tmpdir.mkdir(\"data\")\n expected_file = os.path.join(str(fn), 'loader.py')\n\n # Gen code template\n runner = CliRunner()\n result = runner.invoke(cli.generate_code_template,\n ['-o', str(fn)], env=env)\n\n assert result.exit_code == 0\n assert os.path.isfile(expected_file)\n\n # Update file\n with open(expected_file, 'w') as f:\n f.write('print(\"hello world!\")')\n\n # Try to generate file again\n result = runner.invoke(cli.generate_code_template,\n ['-o', str(fn)], env=env)\n\n assert 'already exists' in result.stdout\n assert result.exit_code == 0\n\n # Check file\n with open(expected_file, 'r') as f:\n assert 'hello world!' in f.read()", "def run(tree, args):\n\n global run_before\n\n if run_before:\n util.fatalError(\"Sorry, the C++ backend cannot process more \"\n \"than one IDL file at a time.\")\n run_before = 1\n\n dirname, filename = os.path.split(tree.file())\n basename,ext = os.path.splitext(filename)\n config.state['Basename'] = basename\n config.state['Directory'] = dirname\n\n process_args(args)\n\n try:\n # Check the input tree only contains stuff we understand\n support.checkIDL(tree)\n\n # initialise the handy ast module\n ast.__init__(tree)\n\n # Initialise the descriptor generating code\n descriptor.__init__(tree)\n\n # Build the map of AST nodes to Environments\n tree.accept(id.WalkTree())\n\n # AMI code hooks into existing infrastructure (ie doesn't need to\n # be driven explicitly here)\n #if config.state['AMI']:\n # tree = ami.__init__(tree)\n # tree.accept(id.WalkTree())\n # Not ported yet.\n \n header.run(tree)\n \n skel.run(tree)\n \n # if we're generating code for Typecodes and Any then\n # we need to create the DynSK.cc file\n if config.state['Typecode']:\n dynskel.run(tree)\n\n if config.state['Example Code']:\n impl.run(tree)\n\n except AttributeError, e:\n name = e.args[0]\n unsupported_visitors = map(lambda x:\"visit\" + x,\n AST_unsupported_nodes[:])\n if name in unsupported_visitors:\n # delete all possibly partial output files\n for file in output.listAllCreatedFiles():\n os.unlink(file)\n\n util.unsupportedIDL()\n \n raise\n\n except SystemExit, e:\n # fatalError function throws SystemExit exception\n # delete all possibly partial output files\n for file in output.listAllCreatedFiles():\n os.unlink(file)\n \n raise", "def create_input_file(self, polymer_identifier, format, outpath):\n\n\t\tsmiles = self.get_smiles_from_identifier(polymer_identifier)\n\t\t\n\t\tresult = generate_input_files(smiles, format)\n\t\twith open(outpath, 'w+') as f:\n\t\t\tf.write(result)", "def generate(options):\n interactive = options['i']\n if interactive:\n generate_interactive(options)\n else:\n generate_rcfile(vars(options['c']), options['rcfile'])", "def _GenerateFromTemplate(self, template_filename, template_mappings):\n template_filename = os.path.join(\n self._l2tdevtools_path, self._TEMPLATE_DIRECTORY, template_filename)\n return super(AppveyorYmlWriter, self)._GenerateFromTemplate(\n template_filename, template_mappings)", "def test_create_template_for_all_namespaces(self):\n pass", "def genStixDoc(\n outputDir_,\n targetFileSha1_,\n targetFileSha256_,\n targetFileSha512_,\n targetFileSsdeep_,\n targetFileMd5_,\n targetFileSize_,\n targetFileName_,\n ipv4Addresses_,\n hostNames_):\n parsedTargetFileName = reFileName(targetFileName_)[1]\n parsedTargetFilePrefix = reFileName(targetFileName_)[0]\n stix.utils.set_id_namespace({\"http://www.nickdriver.com/cuckoo2CRITs\" : \"cuckoo2CRITs\"})\n NS = cybox.utils.Namespace(\"http://www.nickdriver.com/cuckoo2CRITs\", \"cuckoo2CRITs\")\n cybox.utils.set_id_namespace(NS)\n stix_package = STIXPackage()\n\n stix_header = STIXHeader()\n stix_header.title = 'File: ' + parsedTargetFileName + ' with the associated hashes, network indicators'\n stix_header.description = 'File: ' + parsedTargetFileName + ' with the associated hashes, network indicators'\n stix_package.stix_header = stix_header\n\n #Will take this out later\n # Create the ttp\n malware_instance = MalwareInstance()\n malware_instance.add_name(parsedTargetFileName)\n malware_instance.description = targetFileSha1_\n ttp = TTP(title='TTP: ' + parsedTargetFileName)\n ttp.behavior = Behavior()\n ttp.behavior.add_malware_instance(malware_instance)\n #stix_package.add_ttp(ttp)\n \n #Trying to create an array that will be added later...\n stix_observables = []\n \n #This works - leaving intact until the new portion works\n '''\n # Create the indicator for the ipv4 addresses\n ipv4Object = Address(ipv4Addresses_, Address.CAT_IPV4)\n #stix_msg['stix_observables'].extend(Observables([ipv4Object]))\n stix_observables.extend([ipv4Object])\n '''\n for ip in ipv4Addresses_:\n\t\tipv4Object = Address(ip, Address.CAT_IPV4)\n\t\tstix_observables.extend([ipv4Object])\n \n \n '''\n #This works - leaving intact until the new portion works\n # Create the indicator for the domain names\n domainNameObject = DomainName()\n domainNameObject.value = hostNames_\n '''\n for name in hostNames_:\n\t\tdomainNameObject = DomainName()\n\t\tdomainNameObject.value = name\n\t\tstix_observables.extend([domainNameObject])\n\t\t\n \n\n \n # Create the observable for the file\n fileObject = File()\n fileObject.file_name = parsedTargetFileName\n #fileObject.file_name.condition = 'Equals'\n fileObject.size_in_bytes = targetFileSize_\n #fileObject.size_in_bytes.condition = 'Equals'\n fileObject.add_hash(Hash(targetFileSha1_, type_='SHA1', exact=True))\n fileObject.add_hash(Hash(targetFileSha256_, type_='SHA256', exact=True))\n fileObject.add_hash(Hash(targetFileSha512_, type_='SHA512', exact=True))\n fileObject.add_hash(Hash(targetFileSsdeep_, type_='SSDEEP', exact=True))\n fileObject.add_hash(Hash(targetFileMd5_, type_='MD5', exact=True))\n \n stix_observables.extend([fileObject])\n \n \n stix_package.observables = Observables(stix_observables)\n \n #DEBUG\n #stagedStixDoc = stix_package.to_xml()\n #pp = pprint.PrettyPrinter(indent=4)\n #pp.pprint(stagedStixDoc)\n\t\t\n #print \"stix_observables list\"\n\n #pp.pprint(stix_observables)\n \n '''\n #VERY BASIC STIX ATTEMPT - THIS WORKS!\n a = Address(\"1.2.3.4\", Address.CAT_IPV4)\n d = DomainName()\n d.value = \"cybox.mitre.org\"\n stix_package.observables = Observables([a, d])\n #concensus - Observable does not work - ObservableS does\n '''\n\t\n\t\n\t###UNCOMMENT THIS WHEN DONE###\n\t\n \n stagedStixDoc = stix_package.to_xml()\n stagedStixDoc = fixAddressObject(stagedStixDoc)\n stagedStixDoc = fixDomainObject(stagedStixDoc)\n today = datetime.datetime.now()\n now = today.strftime('%Y-%m-%d_%H%M%S')\n if not os.path.exists(outputDir_):\n os.makedirs(outputDir_)\n with open (outputDir_ + '/' + now + '-' + targetFileSha1_ + '.stix.xml', 'a') as myfile:\n myfile.write(stagedStixDoc)\n _l.debug('Wrote file: ' + now + '-' + targetFileSha1_ + '.stix.xml')\n \n return", "def doMakeEyeballTemplate(self):\n \"\"\"\n returnList = []\n templObjNameList = []\n templHandleList = []\n \"\"\"\n try:\n log.debug(\">>> doMakeLimbTemplate\")\n assert self.cls == 'TemplateFactory.go',\"Not a TemlateFactory.go instance!\"\n\n #Gather limb specific data and check\n #==============\n mi_helper = self._mi_module.helper\n if not mi_helper:\n raise StandardError,\"No helper found!\"\n\n b_irisControl = mi_helper.irisHelper\n b_pupilControl = mi_helper.pupilHelper\n\n mi_helper.parent = self._mi_module.templateNull\n except Exception,error:raise Exception,\"doMakeEyeballTemplate | {0}\".format(error)\n\n\n return True", "def generate(self, namespace: Optional[str], template: str, func: Callable, call_args: Dict) -> str:", "def generate(self):\n try:\n self._parse_groups()\n self._parse_types()\n self._parse_enums()\n self._parse_features()\n self._parse_extensions()\n self._add_extra_enums()\n self._parse_and_build_commands()\n self._build_all_enums()\n self._build_enum_groups()\n self._generate_files()\n except Exception as exception:\n print('Generate failed: {}'.format(str(exception)))\n raise", "def _create_swig_interface(self, path: pathlib.Path) -> str:\n module_name = path.with_suffix('').name\n header_code = self.create_header_file(path)\n include_directives = []\n function_signatures = []\n for line in header_code.splitlines():\n if line.startswith('#include'):\n collection = include_directives\n else:\n collection = function_signatures\n collection.append(line)\n swig_interface = SWIG_INTERFACE_TEMPLATE.format(\n module_name=module_name, include_directives='\\n'.join(include_directives),\n function_signatures='\\n'.join(function_signatures))\n _LOG.debug('SWIG interface: \"\"\"%s\"\"\"', swig_interface)\n return swig_interface", "def generate(random, pid, autogen_tools, n):\n\n generator_path = autogen_tools.get_directory(__file__)\n\n template_path = path.join(generator_path, \"code.txt.template\")\n rendered_template_path = path.join(generator_path, \"code.txt\")\n\n autogen_tools.replace_source_tokens(\n template_path,\n {\"flag\": gen_code(n, \"Aviation House\")},\n rendered_template_path\n )\n\n code_link = autogen_tools.generate_resource_link(pid, \"code.txt\", title=\"Encrypted file\")\n\n return {\n \"resource_files\": {\n \"public\": [\n (rendered_template_path, \"code.txt\"),\n ],\n },\n \"static_files\": {\n },\n \"problem_updates\": {\n \"description\": \"<p>We've updated the system to AES. We heard that this is military grade encryption so that should fix everything</p><p>The team have stored the password in %s. Bet you can't get into it</p>\" % code_link\n }\n }", "def create(self):\n\t\tlipsBaseFile.imp()", "def generate_makefile(path, make_template):\n full_path = os.path.abspath(path)\n\n # List of *.c files in the directory.\n app_src = [ f for f in os.listdir(full_path) if f.endswith('.c') ]\n \n # if there is an *.rci file, assume rci is enabled and that ConfigGenerator\n # will generate a remote_config.c we want to compile\n rci = [ f for f in os.listdir(full_path) if f.endswith('.rci') ]\n if rci and \"remote_config.c\" not in app_src:\n app_src.append(\"remote_config.c\")\n\n # Get the Mode and name of the Sample, this will be used for determining\n # what libraries and platform files to include.\n (_, mode, _, sample) = full_path.rsplit(os.sep, 3)\n\n subs = DEFAULT_SUBS.copy()\n\n dvt_test = path.find('dvt') != -1\n\n if dvt_test:\n subs['CONNECTOR_DIR'] = '../../../private'\n subs['PUBLIC_HEADER_DIR'] = '../../../public/include'\n subs['PLATFORM_DIR'] = \"\"\"\n# Location of Platform Src Code.\nPLATFORM_DIR=../../../public/run/platforms/$(PLATFORM)\"\"\"\n\n # Change platform to 'template' if this is a template test.\n if sample == \"template_test\":\n subs[\"TARGET_PLATFORM\"] = \"\"\"\n# Target Platform\nPLATFORM = template\"\"\"\n\n # Treat compile and link as a special case, no platform used.\n if sample in LINK_SAMPLES:\n subs['TARGET_PLATFORM'] = ''\n subs['PLATFORM_DIR'] = ''\n subs['PLATFORM_VPATH'] = ''\n subs['PLATFORM_HEADER_INCLUDE'] = ''\n subs['SRCS'] = 'SRCS = $(APP_SRCS) $(PRIVATE_SRCS)'\n else:\n # Assume this is the base set of Platform files\n # Only include files who are also not in local directory.\n subs['PLATFORM_SRCS'] = 'PLATFORM_SRCS = '\n for f in ['os.c', 'config.c', 'debug.c', 'main.c']:\n if f not in app_src:\n subs['PLATFORM_SRCS'] += '$(PLATFORM_DIR)/%s ' % f\n\n # Add all *.c files in the directory.\n subs['APP_SRCS'] = 'APP_SRCS = ' + ' '.join([ re.sub(r'\\.c$', '.c', f) \\\n for f in app_src ])\n\n # Add -lpthread as a linked library if this is a run sample.\n if (dvt_test or mode == 'run') and sample not in LINK_SAMPLES:\n subs['LIBS'] += ' -lpthread' \n\n if sample == 'connect_on_ssl' and 'network_ssl.c' not in app_src:\n # Add network_ssl.c to PLATFORM_SRCS and -lssl to LIBS.\n subs['PLATFORM_SRCS'] += ' $(PLATFORM_DIR)/network_dns.c'\n subs['PLATFORM_SRCS'] += ' $(PLATFORM_DIR)/network_tcp_ssl.c'\n subs['PLATFORM_SRCS'] += ' $(PLATFORM_DIR)/network_udp.c'\n subs['LIBS'] += ' -lssl -lcrypto'\n elif sample not in LINK_SAMPLES:\n if 'network.c' not in app_src:\n if 'network_dns.c' not in app_src:\n subs['PLATFORM_SRCS'] += ' $(PLATFORM_DIR)/network_dns.c'\n if 'network_tcp.c' not in app_src:\n subs['PLATFORM_SRCS'] += ' $(PLATFORM_DIR)/network_tcp.c'\n if 'network_udp.c' not in app_src:\n subs['PLATFORM_SRCS'] += ' $(PLATFORM_DIR)/network_udp.c'\n\n if sample == 'sm_sms':\n if 'network_sms.c' not in app_src:\n # Add network_ssl.c to PLATFORM_SRCS and -lssl to LIBS.\n subs['PLATFORM_SRCS'] += ' $(PLATFORM_DIR)/network_sms.c'\n else:\n subs['PLATFORM_SRCS'] += ' $(PLATFORM_DIR)/network_sms.c'\n if sample == 'sm_sms_gammu':\n subs['LIBS'] += ' -lGammu'\n subs['GAMMU_INCLUDES'] = \"CFLAGS += -I/usr/include/gammu\"\n subs['PLATFORM_SRCS'] += ' $(PLATFORM_DIR)/network_sms.c'\n if sample == 'file_system' and 'file_system.c' not in app_src:\n # Add file_system.c to PLATFORM_SRCS. -lcrypto if APP_ENABLE_MD5\n # passed.\n subs['PLATFORM_SRCS'] += \" $(PLATFORM_DIR)/file_system.c\"\n\n if sample == 'fs_os_abort' and 'file_system.c' not in app_src:\n # Add file_system.c to PLATFORM_SRCS. -lcrypto if APP_ENABLE_MD5\n # passed.\n subs['PLATFORM_SRCS'] += \" $(PLATFORM_DIR)/file_system.c\"\n\n if sample == 'file_system' or sample == 'file_system_dir_cov':\n if dvt_test:\n subs['LIBS'] += \"\"\"\nAPP_ENABLE_MD5=true\"\"\"\n\n subs['LIBS'] += \"\"\"\n\nifeq ($(APP_ENABLE_MD5),true)\nLIBS += -lcrypto\nCFLAGS+= -DAPP_ENABLE_MD5=true\nendif\"\"\"\n\n if sample == 'ic_timing':\n subs['LIBS'] += ' -lrt' \n\n\n if dvt_test:\n subs['POST_SAMPLE'] = '$(AT)python ../../../dvt/scripts/iikmapsum.py $(SAMPLE).map | tee $(basename $@).stats'\n \n if sample == 'build_library':\n make_lib_template = open(LIB_TEMPLATE, 'r')\n lib_template_data = make_lib_template.read()\n make_lib_template.close()\n# lib_template = Template(lib_template_data)\n return lib_template_data\n else:\n return make_template.substitute(**subs)" ]
[ "0.60924524", "0.5882521", "0.5875414", "0.5807833", "0.57298577", "0.5695035", "0.56720126", "0.55950695", "0.55856544", "0.55830556", "0.557712", "0.55616045", "0.555261", "0.5524907", "0.5512638", "0.5499624", "0.5496154", "0.5461171", "0.54406464", "0.54035246", "0.5400476", "0.5395825", "0.5393488", "0.53826606", "0.5379185", "0.5377172", "0.5363132", "0.53524625", "0.5345551", "0.5330658" ]
0.6245206
0
add rankig to each node using google pagerank algorithm
def add_pagerank(self): query = ''' MATCH (c1:)-[r:INTERACTS]->(c2:) RETURN c1.name, c2.name, r.weight AS weight ''' ig = IGraph.TupleList(self.graph.run(query), weights=True) pg = ig.pagerank() pgvs = [] for p in zip(ig.vs, pg): print(p) pgvs.append({"name": p[0]["name"], "pg": p[1]}) write_clusters_query = ''' UNWIND {nodes} AS n MATCH (c:) WHERE c.name = n.name SET c.pagerank = n.pg ''' self.graph.run(write_clusters_query, nodes=pgvs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_rank(self):\n self.__rank += 1", "def rank():\n return 0", "def set_rank_order(self):\n for k in self._run:\n self._run[k].sort(key=lambda x:x.get_rank(),reverse=False)\n tot_res = len(self._run[k])\n for r in self._run[k]:\n r.set_score(tot_res - int(r.get_rank()) + 1)\n print r.get_str()", "def __rank__(self) -> int:", "def calculate_page_rank(self, iterations=5):\n # clear out the current page rank tables\n self.con.execute('drop table if exists pagerank')\n self.con.execute('create table pagerank(urlid primary key,score)')\n\n # initialize every url with a page rank of 1\n for (urlid,) in self.con.execute('select rowid from urllist'):\n self.con.execute('insert into pagerank(urlid,score) values (%d,1.0)' % urlid)\n self.dbcommit()\n\n for i in range(iterations):\n # Need multiple iterations, as the page ranks of pages linked to this\n # one will be consistently updated on each iteration\n print(\"Iteration %d\" % i)\n for (urlid,) in self.con.execute('select rowid from urllist'):\n # Default page rank\n page_rank = 0.15\n\n # Loop through all the pages that link to this one\n for (linker,) in self.con.execute('select distinct fromid from link where toid=%d'\n % urlid):\n # Get the page rank of the linker\n linkingpr = self.con.execute('select score from pagerank where urlid=%d'\n % linker).fetchone()[0]\n\n # Get the total number of links from the linker\n linkingcount = self.con.execute('select count(*) from link where fromid=%d'\n % linker).fetchone()[0]\n # add to page rank, accounting for the link count\n page_rank += 0.85 * (linkingpr / linkingcount)\n self.con.execute('update pagerank set score=%f where urlid=%d'\n % (page_rank, urlid))\n self.dbcommit()", "def pagerank(self, limit=20):\r\n\t\tfor urlid in self.url_ids:\r\n\t\t\tself.all_scores[urlid] = 1.0\r\n\r\n\t\tfor i in range(limit):\r\n\t\t\tfor urlid in self.url_ids:\r\n\t\t\t\tscore = self.all_scores[urlid]\r\n\t\t\t\tfor fromid in self.from_ids[urlid]:\r\n\t\t\t\t\tscore += self.all_scores[fromid] / \\\r\n\t\t\t\t\t\t\t (len(self.from_ids[fromid])+len(self.to_ids[fromid]))\r\n\t\t\t\tscore *= 0.85\r\n\t\t\t\tscore += 0.15\r\n\t\t\t\tself.all_scores[urlid] = score\r\n\t\tself.save_pr()", "def pagerank(self, alpha=0.85):\n try:\n self.logger.info('正在计算网络的PageRank值 ...')\n return self.order_dict(nx.pagerank(self.G, alpha=alpha), index=1)\n except Exception as e:\n self.logger.error(\"计算失败,原因:{0}\".format(e))", "def pagerank(self):\n\n raise NotImplementedError", "def update(self, rank):\n # calculate MR and MRR\n self.mr += rank\n self.mrr += 1 / rank\n # calculate Hits@k\n if rank <= 1:\n self.hits1 += 1\n self.hits3 += 1\n self.hits5 += 1\n self.hits10 += 1\n elif rank <= 3:\n self.hits3 += 1\n self.hits5 += 1\n self.hits10 += 1\n elif rank <= 5:\n self.hits5 += 1\n self.hits10 += 1\n elif rank <= 10:\n self.hits10 += 1", "def iterate_pagerank(corpus, damping_factor):\n pagerank = dict()\n newrank = dict()\n\n for page in corpus:\n pagerank[page] = 1 / len(corpus)\n\n repeat = True\n\n while repeat:\n\n for page in pagerank:\n\n summation = 0\n\n links = get_links(corpus, page)\n\n if not links:\n for p in corpus:\n summation += pagerank[p] / len(corpus)\n\n for link in links:\n summation += pagerank[link] / len(corpus[link])\n\n newrank[page] = (1 - damping_factor) / len(corpus) + damping_factor * summation\n\n repeat = False\n\n for page in pagerank:\n if abs(newrank[page] - pagerank[page]) > 0.001:\n repeat = True\n\n pagerank[page] = newrank[page]\n\n return pagerank", "def run_pagerank(tag_table, unique_tags, targetNum):\n id2tag = {i: tag for i, tag in enumerate(unique_tags)}\n tag2id = {tag: i for i, tag in id2tag.items()}\n\n co_occurence = dict()\n for tag_list in tag_table:\n indices = [tag2id[tag] for tag in tag_list]\n for pair in combinations(indices, 2):\n co_occurence[pair] = co_occurence.get(pair, 0) + 1\n\n nodes = range(len(unique_tags))\n edges = [(pair[0], pair[1], weight) for pair, weight in co_occurence.items()]\n G = nx.Graph()\n G.add_nodes_from(nodes)\n G.add_weighted_edges_from(edges)\n pr = nx.pagerank(G, weight='weight')\n\n top_indices, top_scores = zip(*sorted(pr.items(), key=operator.itemgetter(1), reverse=True)[:targetNum])\n topTags = [id2tag[i] for i in top_indices]\n return topTags", "def _update_ranks(sample_count):\n raise NotImplementedError", "def iterate_pagerank(corpus, damping_factor):\n pagerank = dict()\n new_pagerank = dict()\n repeat = True\n\n # Assigning each page a rank of 1 / N, where N is the total number of pages in the corpus.\n for page in corpus:\n pagerank[page] = 1 / len(corpus)\n\n # Repeatedly calculate new rank values based on all of the current rank values\n while repeat:\n for page in corpus:\n\n # Probability that we followed a link from a page i to current page.\n followed = 0.0\n for linked_page in linked_pages(corpus, page):\n followed += pagerank[linked_page] / number_of_links(corpus, linked_page)\n\n new_pagerank[page] = (1 - damping_factor) / len(corpus) + damping_factor * followed\n\n repeat = False\n\n # Repeat the process if new PageRank value changes by more than 0.001\n for page in pagerank:\n if not isclose(pagerank[page], new_pagerank[page], abs_tol=0.001):\n repeat = True\n\n # Assigning new values to the previous ones\n pagerank[page] = new_pagerank[page]\n\n # Sorting pagerank by keys\n pagerank = dict(sorted(pagerank.items()))\n\n return pagerank", "def rank_transform(self):\n sorted_targets = sorted(self.genomes, key=lambda item: item.fitness)\n for index, target in enumerate(sorted_targets):\n target.fitness = index/len(sorted_targets) - 0.5", "def pagerank(matrix, bias, d=0.85):\n n = matrix.shape[0]\n rank = 0\n new_rank = np.array([1.0 / n] * n)\n for i in range(0,200):\n print \"iteration: \"+str(i)\n rank = new_rank\n new_rank = np.array([(1.0-d)/n] * n) + d * np.dot(matrix, rank)\n# new_rank = (1.0-d) * bias + d * np.dot(matrix, rank)\n # new_rank = [(((1.0-d) / n) +\n # d * sum((rank[i] * link) for i, link in enumerate(row)))\n # for row in matrix]\n if(has_converged(rank, new_rank)):\n break\n return new_rank", "def get_ranks(d): \n raise NotImplementedError(\"Problem 3 Incomplete\")", "def _graph_ranks(avranks, names, p_values, cd=None, cdmethod=None, lowv=None, highv=None, highlight=None,\n width=6, textspace=1, reverse=False, filename=None, labels=False, **kwargs):\n width = float(width)\n textspace = float(textspace)\n \n def lloc(_list, n):\n \"\"\"\n List location in list of list structure.\n Enable the use of negative locations:\n -1 is the last element, -2 second last...\n \"\"\"\n if n < 0:\n return len(_list[0]) + n\n return n\n \n def nth(_list, n):\n \"\"\"\n Returns only nth elemnt in a list.\n \"\"\"\n n = lloc(_list, n)\n return [a[n] for a in _list]\n\n def mxrange(lr):\n \"\"\"\n Multiple xranges. Can be used to traverse matrices.\n This function is very slow due to unknown number of\n parameters.\n >>> mxrange([3,5])\n [(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2)]\n >>> mxrange([[3,5,1],[9,0,-3]])\n [(3, 9), (3, 6), (3, 3), (4, 9), (4, 6), (4, 3)]\n \"\"\"\n if len(lr):\n yield ()\n else:\n # it can work with single numbers\n index = lr[0]\n if isinstance(index, int):\n index = [index]\n for a in range(*index):\n for b in mxrange(lr[1:]):\n yield tuple([a] + list(b))\n\n sums = avranks\n\n nnames = names\n ssums = sums\n\n if lowv is None:\n lowv = min(1, int(math.floor(min(ssums))))\n if highv is None:\n highv = max(len(avranks), int(math.ceil(max(ssums))))\n\n cline = 0.4\n\n k = len(sums)\n\n linesblank = 0\n scalewidth = width - 2 * textspace\n\n def rankpos(rank):\n if not reverse:\n a = rank - lowv\n else:\n a = highv - rank\n return textspace + scalewidth / (highv - lowv) * a\n\n distanceh = 0.25\n\n cline += distanceh\n\n # calculate height needed height of an image\n minnotsignificant = max(2 * 0.2, linesblank)\n height = cline + ((k + 1) / 2) * 0.2 + minnotsignificant\n\n fig = plt.figure(figsize=(width, height*1.05))\n fig.set_facecolor('white')\n ax = fig.add_axes([0, 0, 1, 1]) # reverse y axis\n ax.set_axis_off()\n\n hf = 1. / height # height factor\n wf = 1. / width\n\n def hfl(_list):\n return [a * hf for a in _list]\n\n def wfl(_list):\n return [a * wf for a in _list]\n\n # Upper left corner is (0,0).\n ax.plot([0, 1], [0, 1], c=\"w\")\n ax.set_xlim(0, 1)\n ax.set_ylim(1, 0)\n\n def line(l, color='k', **kwargs):\n \"\"\"\n Input is a list of pairs of points.\n \"\"\"\n ax.plot(wfl(nth(l, 0)), hfl(nth(l, 1)), color=color, **kwargs)\n\n def text(x, y, s, *args, **kwargs):\n ax.text(wf * x, hf * y, s, *args, **kwargs)\n\n line([(textspace, cline), (width - textspace, cline)], linewidth=2)\n\n bigtick = 0.3\n smalltick = 0.15\n linewidth = 2.0\n linewidth_sign = 4.0\n\n tick = None\n for a in list(np.arange(lowv, highv, 0.5)) + [highv]:\n tick = smalltick\n if a == int(a):\n tick = bigtick\n line([(rankpos(a), cline - tick / 2),\n (rankpos(a), cline)],\n linewidth=2)\n\n for a in range(lowv, highv + 1):\n text(rankpos(a), cline - tick / 2 - 0.05, str(a),\n ha=\"center\", va=\"bottom\", size=16)\n\n k = len(ssums)\n\n def filter_names(name):\n return name\n\n space_between_names = 0.24\n\n for i in range(math.ceil(k / 2)):\n chei = cline + minnotsignificant + i * space_between_names\n if nnames[i] == highlight:\n line([(rankpos(ssums[i]), cline),\n (rankpos(ssums[i]), chei),\n (textspace - 0.1, chei)],\n linewidth=linewidth, color='red')\n else:\n line([(rankpos(ssums[i]), cline),\n (rankpos(ssums[i]), chei),\n (textspace - 0.1, chei)],\n linewidth=linewidth)\n if labels:\n text(textspace + 0.3, chei - 0.075,\n format(ssums[i], '.4f'), ha=\"right\", va=\"center\", size=10)\n if nnames[i] == highlight:\n text(textspace - 0.2, chei,\n filter_names(nnames[i]), ha=\"right\", va=\"center\", size=18, color='red')\n else:\n text(textspace - 0.2, chei,\n filter_names(nnames[i]), ha=\"right\", va=\"center\", size=18)\n\n for i in range(math.ceil(k / 2), k):\n chei = cline + minnotsignificant + (k - i - 1) * space_between_names\n if nnames[i] == highlight:\n line([(rankpos(ssums[i]), cline),\n (rankpos(ssums[i]), chei),\n (textspace + scalewidth + 0.1, chei)],\n linewidth=linewidth, color='red')\n else:\n line([(rankpos(ssums[i]), cline),\n (rankpos(ssums[i]), chei),\n (textspace + scalewidth + 0.1, chei)],\n linewidth=linewidth)\n if labels:\n text(textspace + scalewidth - 0.3, chei - 0.075,\n format(ssums[i], '.4f'), ha=\"left\", va=\"center\", size=10)\n if nnames[i] == highlight:\n text(textspace + scalewidth + 0.2, chei, filter_names(nnames[i]),\n ha=\"left\", va=\"center\", size=18, color='red')\n else:\n text(textspace + scalewidth + 0.2, chei, filter_names(nnames[i]),\n ha=\"left\", va=\"center\", size=18)\n start = cline + 0.2\n side = -0.02\n height = 0.1\n\n # draw no significant lines\n # get the cliques\n cliques = _form_cliques(p_values, nnames)\n achieved_half = False\n print(nnames)\n for clq in cliques:\n if len(clq) == 1:\n continue\n print(clq)\n min_idx = np.array(clq).min()\n max_idx = np.array(clq).max()\n if min_idx >= len(nnames) / 2 and achieved_half == False:\n start = cline + 0.25\n achieved_half = True\n line([(rankpos(ssums[min_idx]) - side, start),\n (rankpos(ssums[max_idx]) + side, start)],\n linewidth=linewidth_sign)\n start += height", "def mpi_rank(self, new_value):", "def prufer_rank(self):\n r = 0\n p = 1\n for i in range(self.nodes - 3, -1, -1):\n r += p*self.prufer_repr[i]\n p *= self.nodes\n return r", "def iterate_pagerank(corpus, damping_factor):\n\n PageRank = dict()\n accuracy = 0.001\n\n # initialise the rank of each page with 1 / N\n\n N = len(corpus)\n\n for page in corpus:\n PageRank[page] = 1 / N\n\n # for each page, use the PageRank formula to calculate the ranks\n\n while True:\n\n count = 0\n\n for page in corpus:\n\n new_rank = (1 - damping_factor) / N\n change = 0\n\n for new_page in corpus:\n\n if page in corpus[new_page]:\n NumLinks = len(corpus[new_page])\n change = change + (PageRank[new_page] / NumLinks)\n\n change = damping_factor * change\n new_rank += change\n\n if abs(PageRank[page] - new_rank) < accuracy:\n count += 1\n\n PageRank[page] = new_rank\n\n if count == N:\n break\n\n return PageRank", "def competitionRanking(groups, setRank):\n rank = 1\n for k, g in groups:\n cnt = 0\n for item in g:\n setRank(item, rank)\n cnt += 1\n rank += cnt", "def denseRanking(groups, setRank):\n rank = 1\n for k, g in groups:\n for item in g:\n setRank(item, rank)\n rank += 1", "def getPageRank(elistPath, alpha, maxiter, tolerance):\n\n adjGraph = AdjGraph(elistPath, separator=\" \")\n graph = adjGraph.SNAPGraph\n\n preference_vector = []\n for node in graph.Nodes():\n id = node.GetId()\n if (id % 4) == 0:\n preference_vector.append(id)\n\n pageRank, convIter, time = biasedPageRank(\n adjGraph, preference_vector=preference_vector, alpha=alpha,\n max_iterations=maxiter, tolerance=tolerance)\n\n writeCentrality(\"pagerank.txt\", pageRank)\n return pageRank, convIter, time", "def calculate_PageRank(outlinks):\n\n\t# Damping factor\n\td = 0.85\n\n\t# size of the matrix\n\tsize = outlinks.shape[0]\n\n\t# list to hold page ranks\n\tpage_ranks = [1 for i in range(size)]\n\n\t# Calculating the out degree of each node and storing in a list\n\tout_degrees = []\n\tfor i in range(size):\n\t\tsums = 0\n\t\tfor j in range(size):\n\t\t\tsums += outlinks[i][j]\n\t\tout_degrees.append(sums)\n\n\t#print(out_degrees)\n\n\tprint('Initial page ranks:')\n\tprint(page_ranks)\n\n\tfor _ in range(100):\n\t\tfor j in range(size):\n\t\t\ttemp = 0\n\t\t\tfor i in range(size):\n\t\t\t\tif outlinks[i][j] == 1:\n\t\t\t\t\ttemp += page_ranks[i] / out_degrees[i]\n\t\t\ttemp *= d\n\t\t\ttemp += (1-d)\n\t\t\tpage_ranks[j] = round(temp, 4)\n\n\treturn page_ranks", "def _add_ranks(standings, key):\n prev_key = None\n current_rank = 0\n for i, team in enumerate(standings, start=1):\n this_key = key(team)\n if this_key != prev_key:\n current_rank = i\n prev_key = this_key\n team.rank = current_rank", "def rank(self, current_order_by_value: Comparable, current_row_number: int) -> int:", "def __ranking_function(self, doc, query_tokens):", "def increment_node_index(self):\n self.node_index += 1", "def rank(ctx, path, metric, revision, limit, desc, threshold, wrap):\n config = ctx.obj[\"CONFIG\"]\n\n if not exists(config):\n handle_no_cache(ctx)\n\n from wily.commands.rank import rank\n\n logger.debug(f\"Running rank on {path} for metric {metric} and revision {revision}\")\n rank(\n config=config,\n path=path,\n metric=metric,\n revision_index=revision,\n limit=limit,\n threshold=threshold,\n descending=desc,\n wrap=wrap,\n )", "def set_rank(self,rank):\n self._rank = rank" ]
[ "0.6782681", "0.6664084", "0.6492858", "0.64766073", "0.6452059", "0.6438518", "0.64112085", "0.6404493", "0.62282807", "0.6224711", "0.62225074", "0.61820155", "0.6177658", "0.61345315", "0.61322975", "0.6032352", "0.6016183", "0.59991485", "0.59905785", "0.5985397", "0.5966015", "0.59427786", "0.5938758", "0.5916292", "0.59072614", "0.5894973", "0.5890533", "0.58685136", "0.58652604", "0.58532816" ]
0.75397
0
add community membership to each node using walktrap algorithm implemented in igraph
def add_communites(self): query = ''' MATCH (c1:)-[r:INTERACTS]->(c2:) RETURN c1.name, c2.name, r.weight AS weight ''' ig = IGraph.TupleList(self.graph.run(query), weights=True) clusters = IGraph.community_walktrap(ig, weights="weight").as_clustering() nodes = [{"name": node["name"]} for node in ig.vs] for node in nodes: idx = ig.vs.find(name=node["name"]).index node["community"] = clusters.membership[idx] write_clusters_query = ''' UNWIND {nodes} AS n MATCH (c:) WHERE c.name = n.name SET c.community = toInt(n.community) ''' self.graph.run(write_clusters_query, nodes=nodes)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def assign_communities(graph):\n communities = nx.algorithms.community\\\n .greedy_modularity_communities(nx.Graph(graph))\n for node in graph.nodes:\n graph.nodes[node]['community'] = [i for i,c in enumerate(communities)\n if node in c][0]\n graph.graph['modularity'] = nx.algorithms.community.quality\\\n .modularity(nx.Graph(graph),\n communities)", "def add_adj_nodes(self):\n\n for x, row in enumerate(self.grid):\n for y, cell in enumerate(row):\n if x-1 >= 0:\n cell.above = self.grid[x-1][y]\n if y+1 < len(self.grid[0]):\n cell.right = self.grid[x][y+1]\n if x+1 < len(self.grid):\n cell.below = self.grid[x+1][y]\n if y-1 >= 0:\n cell.left = self.grid[x][y-1]", "def forward(self, nodes):\n sec_level_conlved = []\n\n for node in nodes:\n\n first_neighs = list(self.user_to_users_social_adjacency[int(node)])\n\n sec_neighs = []\n for neigh_node in first_neighs:\n sec_neighs.append(self.user_to_users_social_adjacency[int(neigh_node)])\n\n sec_neighs_aggregate_to_first_neighs_feats = self.aggregator.forward(first_neighs, sec_neighs, self.userFeatsUVFlag, False)\n\n # self_feats_first = self.uv_updated_features(torch.LongTensor(first_neighs).cpu().numpy()).to(self.device)\n self_feats_first = self.user_embeddings.weight[first_neighs]\n self_feats_first = self_feats_first\n\n first_neighs_sec_neighs_feats = torch.cat([self_feats_first, sec_neighs_aggregate_to_first_neighs_feats], dim=1)\n\n first_neighs_sec_neighs_feats = F.relu(self.w1(first_neighs_sec_neighs_feats))\n first_neighs_sec_neighs_feats = F.relu(self.w2(first_neighs_sec_neighs_feats))\n\n sec_level_conlved.append(first_neighs_sec_neighs_feats)\n\n parentnodes_convolved_with_sec_level_convolves = self.aggregator.forward(nodes, sec_level_conlved, self.userFeatsUVFlag, True)\n\n nodes_self_features = self.uv_updated_features(torch.LongTensor(nodes.cpu().numpy())).to(self.device)\n nodes_self_features = nodes_self_features.t() #TODO\n\n convolved = torch.cat([nodes_self_features, parentnodes_convolved_with_sec_level_convolves], dim=1)\n convolved = F.relu(self.w_cnvlvd(convolved))\n\n return convolved", "def iter_node(self,i):\n nd = self.nodes[i]\n for kn in nd.get_close():\n # for kn in nd.get_known():\n # for kn in nd.neighbours:\n kn_node = self.nodes[kn.lindex]\n nd.add_known_nodes(kn.path_len,kn_node.get_close())", "def assign_louvain_communities(\n reddit_graph: nx.Graph,\n wiki_graph: nx.Graph = None,\n reddit_edge_weight: str = \"count\",\n others_threshold: int = 2,\n louvain_resolution_reddit: float = 1,\n) -> Union[nx.Graph, Tuple[nx.Graph, nx.Graph]]:\n reddit_dendrogram = community.generate_dendrogram(\n reddit_graph, weight=reddit_edge_weight, resolution=louvain_resolution_reddit\n )\n if wiki_graph:\n wiki_dendrogram = community.generate_dendrogram(\n wiki_graph,\n )\n\n # Iterate over reddit nodes to assign communities\n for node in reddit_graph:\n # Iterate over all levels of the dendrogram\n for level in range(len(reddit_dendrogram) - 1):\n actual_level = len(reddit_dendrogram) - 2 - level\n\n partition = community.partition_at_level(reddit_dendrogram, level)\n\n node_community = partition[node]\n counts = Counter(partition.values())\n if counts[node_community] < others_threshold:\n node_community = -1\n reddit_graph.nodes[node][\n f\"louvain_community_reddit_R{louvain_resolution_reddit:.2f}_L{actual_level}\"\n ] = f\"L{actual_level}-{node_community:03}\"\n if wiki_graph:\n # Also add the community from the other graph to allow comparing\n # Again, iterate over all levels in the dendrogram\n for level in range(len(wiki_dendrogram) - 1):\n actual_level = len(wiki_dendrogram) - 2 - level\n\n partition = community.partition_at_level(wiki_dendrogram, level)\n\n try:\n node_community = partition[node]\n counts = Counter(partition.values())\n if counts[node_community] < others_threshold:\n node_community = -1\n\n reddit_graph.nodes[node][\n f\"louvain_community_wiki_L{actual_level}\"\n ] = f\"L{actual_level}-{node_community:03}\"\n\n except:\n reddit_graph.nodes[node][\n f\"louvain_community_wiki_L{level}\"\n ] = f\"L{level}-NONE\"\n if wiki_graph:\n for node in wiki_graph:\n for level in range(\n len(wiki_dendrogram) - 1,\n ):\n actual_level = len(wiki_dendrogram) - 2 - level\n\n partition = community.partition_at_level(wiki_dendrogram, level)\n node_community = partition[node]\n\n counts = Counter(partition.values())\n if counts[node_community] < others_threshold:\n node_community = -1\n\n wiki_graph.nodes[node][\n f\"louvain_community_wiki_L{actual_level}\"\n ] = f\"L{actual_level}-{node_community:03}\"\n # Also add the community from the other graph to allow comparing\n\n for level in range(len(reddit_dendrogram) - 1):\n actual_level = len(reddit_dendrogram) - 2 - level\n\n partition = community.partition_at_level(reddit_dendrogram, level)\n\n try:\n node_community = partition[node]\n\n counts = Counter(partition.values())\n if counts[node_community] < others_threshold:\n node_community = -1\n wiki_graph.nodes[node][\n f\"louvain_community_reddit_R{louvain_resolution_reddit:.2f}_L{actual_level}\"\n ] = f\"L{actual_level}-{node_community:03}\"\n except:\n wiki_graph.nodes[node][\n f\"louvain_community_reddit_R{louvain_resolution_reddit:.2f}_L{level}\"\n ] = f\"L{level}-NONE\"\n\n return (\n (reddit_graph, reddit_dendrogram, wiki_graph, wiki_dendrogram)\n if wiki_graph\n else (reddit_graph, reddit_dendrogram)\n )", "def find_local_community(G, seed_node, weight, debug_log=False):\n nodes_in_community = seed_node if isinstance(seed_node, list) else [seed_node]\n modularity = edge_modularity(G, nodes_in_community=nodes_in_community, weight=weight)\n neighbor_edges = get_neighbor_edges(G, nodes_in_community=nodes_in_community)\n if debug_log:\n print('==========\\nInitial community has nodes:', nodes_in_community)\n print('Neighbor edges:', neighbor_edges)\n print('Modularity = %f' % modularity)\n while neighbor_edges:\n # Compute the edge_modularity for each neighbor edge,\n # suppose the neighbor edge is added to the community\n mod_max, c_max, e_max = 0, None, None\n for e in neighbor_edges:\n # edges in the current community\n edges_in_temp_community = list(G.subgraph(nodes_in_community).edges)\n # append the candidate edge\n edges_in_temp_community.append(e)\n nodes_in_temp_community = list(G.edge_subgraph(edges_in_temp_community).nodes)\n mod_temp = edge_modularity(G, nodes_in_community=nodes_in_temp_community, weight=weight)\n if mod_temp > mod_max:\n mod_max, c_max, e_max = mod_temp, nodes_in_temp_community, e\n if mod_max > modularity:\n if debug_log:\n print('==========\\nEdge', e_max, 'and node', set(e_max).difference(nodes_in_community), 'are added to the community')\n\n # Update the community and the corresponding neighbor edges\n nodes_in_community = c_max\n modularity = mod_max\n neighbor_edges = get_neighbor_edges(G, nodes_in_community=nodes_in_community)\n\n if debug_log:\n print('The community has nodes:', nodes_in_community)\n print('Modularity = %f' % mod_max)\n print('Neighbor edges:', neighbor_edges)\n else:\n break\n return nodes_in_community, modularity", "def wsngraph():\n G = nx.Graph()\n G.add_node(1)\n G.add_node(2)\n G.add_node(3)\n G.add_node(4)\n G.add_node(5)\n G.add_node(6)\n G.add_node(7)\n G.add_node(8)\n G.add_node(9)\n G.add_node(10)\n G.add_node(11)\n G.add_node(12)\n G.add_edge(1,3,weight=1)\n G.add_edge(1,2,weight=6)\n G.add_edge(1,12,weight=16)\n G.add_edge(2,11,weight=12)\n G.add_edge(2,6,weight=10)\n G.add_edge(2,5,weight=11)\n G.add_edge(3,4,weight=10)\n G.add_edge(3,7,weight=11)\n G.add_edge(3,8,weight=14)\n G.add_edge(3,9,weight=11)\n G.add_edge(4,7,weight=9)\n G.add_edge(5,6,weight=7)\n G.add_edge(5,9,weight=12)\n G.add_edge(6,9,weight=9)\n G.add_edge(7,10,weight=10)\n G.add_edge(8,10,weight=2)\n G.add_edge(8,11,weight=11)\n G.add_edge(8,9,weight=12)\n G.add_edge(9,11,weight=8)\n G.add_edge(10,12,weight=3)\n G.pos={}\n G.pos[1]=(6,4)\n G.pos[2]=(-1,3.7)\n G.pos[3]=(4.7,3.5)\n G.pos[4]=(5.3,3.2)\n G.pos[5]=(0,3)\n G.pos[6]=(1.4,3.4)\n G.pos[7]=(5,2.6)\n G.pos[8]=(4.7,0)\n G.pos[9]=(1.4,2.4)\n G.pos[10]=(5.2,0.5)\n G.pos[11]=(1.3,0)\n G.pos[12]=(6,2.4)\n elarge=[(u,v) for (u,v,d) in G.edges(data=True) if d['weight'] > 8]\n esmall=[(u,v) for (u,v,d) in G.edges(data=True) if d['weight'] <= 8]\n nx.draw_networkx_nodes(G,G.pos,node_color='w')\n nx.draw_networkx_edges(G,G.pos,elarge,width=3,edge_color='r',alpha=0.3)\n nx.draw_networkx_edges(G,G.pos,esmall,width=1,edge_color='b',alpha=0.3)\n nx.draw_networkx_labels(G,G.pos)\n ax=plt.gca()\n ax.axison = False\n label = {} \n for (u,v) in G.edges():\n d = G.get_edge_data(u,v)\n label[(u,v)]=d['weight']\n edge_label=nx.draw_networkx_edge_labels(G,G.pos,edge_labels=label)\n\n return(G)", "def detection_algorithm(G, edge_weight):\n Gc = G.copy()\n set_node_attributes(Gc, attr_name='k-index')\n seed_node2communities = {}\n\n from operator import itemgetter\n while Gc.number_of_nodes() > 0:\n seed_node = max(list(Gc.nodes(data='k-index')), key=itemgetter(1))[0]\n nodes_in_community, modularity = find_local_community(Gc, seed_node=seed_node, weight=edge_weight)\n seed_node2communities[seed_node] = (nodes_in_community, modularity)\n Gc.remove_nodes_from(nodes_in_community)\n return seed_node2communities", "def additive_phylogeny(matrix, n, G):\n new_node = n\n\n def additive_recur_helper(matrix, n, G):\n\n nonlocal new_node\n\n if n == 2:\n print(\"d add_edge (%s,%s):%s\" % (0, 1, matrix[0, 1]))\n G.add_edge(0, 1, weight=matrix[0, 1])\n return\n\n limblen = limblength(n - 1, matrix)\n i, k = find_i_k(matrix, n - 1, limblen)\n x = matrix[i, n - 1] - limblen\n\n print(\"n=%s limblen=%s i=%s k=%s x=%s\" % (n, limblen, i, k, x))\n\n additive_recur_helper(matrix[0 : n - 1, 0 : n - 1], n - 1, G)\n\n v = node_at_distance(G, i, k, x, matrix[i, k], new_node)\n if v == new_node:\n new_node += 1\n\n print(\"node_at_distance %s from %s is %s\" % (x, i, v))\n\n print(\"e add_edge (%s,%s):%s\" % (v, n - 1, limblen))\n G.add_edge(v, n - 1, weight=limblen)\n\n # draw graph if small\n if len(G) < 30:\n global plot_cnt\n pos = nx.kamada_kawai_layout(G)\n labels = nx.get_edge_attributes(G, \"weight\")\n nx.draw(G, pos, with_labels=True)\n nx.draw_networkx_edge_labels(G, pos, edge_labels=labels)\n plt.draw()\n plt.savefig(\"Graph\" + str(plot_cnt) + \".png\", format=\"PNG\")\n plt.clf()\n plot_cnt += 1\n\n return\n\n additive_recur_helper(matrix, n, G)\n\n return", "def make_communities(community_side, communities_per_side):\n community_size = community_side * community_side\n communities = []\n seed_node = 0\n for i in range(communities_per_side):\n for j in range(communities_per_side):\n community = []\n for k in range(community_side):\n for z in range(community_side):\n _id = (\n communities_per_side * community_size * i\n + community_side * j\n + z\n + k * (communities_per_side * community_side)\n )\n # print(f\"{_id} \", end=\"\")\n community.append(_id)\n # print(\"- \", end=\"\")\n communities.append(community)\n #print()\n return communities", "def getUpstream(node, distance, pInteractions):\n rpInteractions = reverseInteractions(pInteractions)\n seenNodes = set([node])\n borderNodes = [node]\n frontierNodes = []\n for dist in range(distance):\n while len(borderNodes) > 0:\n currNode = borderNodes.pop()\n if currNode in rpInteractions:\n for i in rpInteractions[currNode].keys():\n if i not in seenNodes:\n seenNodes.update([i])\n frontierNodes.append(i)\n borderNodes = deepcopy(frontierNodes)\n frontierNodes = list()\n return(seenNodes)", "def __decorate_nodes(nodes, space):\n for n in nodes:\n if n not in self.__node_spaces:\n self.__node_spaces[n] = set([])\n self.__node_spaces[n].add(space)\n pred_nodes = self.__plan_graph.subjects(AGORA.next, n)\n __decorate_nodes(pred_nodes, space)", "def getMutationPathways(node, gPathway, distance = [2, 1], include = None):\n rpInteractions = reverseInteractions(gPathway.interactions)\n if include == None:\n include = set(gPathway.nodes.keys())\n upPathway = Pathway({node : gPathway.nodes[node]}, {})\n downPathway = Pathway({node : gPathway.nodes[node]}, {})\n seenUp = set([node])\n seenDown = set([node])\n unresolvedUp = [node]\n unresolvedDown = [node]\n for d in range(distance[0]): \n ## Up-\n frontierUp = []\n while len(unresolvedUp) > 0:\n currNode = unresolvedUp.pop()\n ## Add complex as upstream for seed node\n if currNode == node:\n if currNode in gPathway.interactions:\n for target in gPathway.interactions[currNode].keys():\n if gPathway.interactions[currNode][target] == \"component>\":\n seenUp.update([target])\n upPathway.nodes[target] = gPathway.nodes[target]\n upPathway.interactions[currNode] = {}\n upPathway.interactions[currNode][target] = \"component>\"\n unresolvedUp.append(target)\n ## Add upstream\n if currNode in gPathway.rinteractions:\n for target in gPathway.rinteractions[currNode].keys():\n if target not in seenUp:\n seenUp.update([target])\n if gPathway.nodes[target] == \"protein\":\n if target in include:\n upPathway.nodes[target] = gPathway.nodes[target]\n upPathway.interactions[target] = {}\n upPathway.interactions[target][currNode] = gPathway.interactions[target][currNode]\n frontierUp.append(target)\n elif gPathway.nodes[target] == \"complex\":\n upPathway.nodes[target] = gPathway.nodes[target]\n upPathway.interactions[target] = {}\n upPathway.interactions[target][currNode] = gPathway.interactions[target][currNode]\n unresolvedUp.append(target)\n else:\n if target not in upPathway.interactions:\n upPathway.interactions[target] = {}\n if currNode not in upPathway.interactions[target]:\n upPathway.interactions[target][currNode] = gPathway.interactions[target][currNode]\n unresolvedUp = deepcopy(frontierUp)\n for d in range(distance[1]):\n ## Down-\n frontierDown = []\n while len(unresolvedDown) > 0:\n currNode = unresolvedDown.pop()\n ## Add downstream\n if currNode in gPathway.interactions:\n for target in gPathway.interactions[currNode].keys():\n if target not in seenDown:\n seenDown.update([target])\n if gPathway.nodes[target] == \"protein\":\n if target in include:\n downPathway.nodes[target] = gPathway.nodes[target]\n if currNode not in downPathway.interactions:\n downPathway.interactions[currNode] = {}\n downPathway.interactions[currNode][target] = gPathway.interactions[currNode][target]\n frontierDown.append(target)\n elif gPathway.nodes[target] == \"complex\":\n downPathway.nodes[target] = gPathway.nodes[target]\n if currNode not in downPathway.interactions:\n downPathway.interactions[currNode] = {}\n downPathway.interactions[currNode][target] = gPathway.interactions[currNode][target]\n unresolvedDown.append(target)\n else:\n if currNode not in downPathway.interactions:\n downPathway.interactions[currNode] = {}\n if target not in downPathway.interactions[currNode]:\n downPathway.interactions[currNode][target] = gPathway.interactions[currNode][target]\n ## Add upstream for non-seed node\n # if currNode != node:\n # if currNode in gPathway.rinteractions:\n # for target in gPathway.rinteractions[currNode].keys():\n # if target not in seenDown:\n # seenDown.update([target])\n # if gPathway.nodes[target] == \"protein\":\n # if target in include:\n # downPathway.nodes[target] = gPathway.nodes[target]\n # downPathway.interactions[target] = {}\n # downPathway.interactions[target][currNode] = gPathway.interactions[target][currNode]\n # elif gPathway.nodes[target] == \"complex\":\n # downPathway.nodes[target] = gPathway.nodes[target]\n # downPathway.interactions[target] = {}\n # downPathway.interactions[target][currNode] = gPathway.interactions[target][currNode]\n # unresolvedDown.append(target)\n # else:\n # if target not in downPathway.interactions:\n # downPathway.interactions[target] = {}\n # if currNode not in downPathway.interactions[target]:\n # downPathway.interactions[target][currNode] = gPathway.interactions[target][currNode]\n unresolvedDown = deepcopy(frontierDown)\n return(upPathway, downPathway)", "def transition(self):\n for node in self.net.nodes():\n if node not in self.evidence:\n self.update_node(node)", "def sum_product(nodes, edges, node_potentials, edge_potentials):\n marginals = {}\n messages = {}\n\n # -------------------------------------------------------------------------\n # YOUR CODE HERE\n #\n \n def send_message(j, i, grand_children_of_i):\n \"\"\"\n Send messages from node j to node i, i.e. summing over all xj\n \n Input\n -----\n j: Source node (to be summed over)\n i: Destination node\n grand_children_of_i: All neighboring nodes except node i (sources of messages).\n \"\"\"\n messages[(j,i)] = {xi: 0 for xi in node_potentials[i]}\n \n incoming_messages = {xj: 1 for xj in node_potentials[j]} # Default to be 1 for leaf nodes (no effect)\n if len(grand_children_of_i) != 0: # Only deal with this case because at leaf node, no messages to be collected\n for xj in node_potentials[j]:\n for grand_child in grand_children_of_i:\n incoming_messages[xj] *= messages[(grand_child, j)][xj]\n for xj in node_potentials[j]:\n for xi in node_potentials[i]:\n messages[(j,i)][xi] += node_potentials[j][xj] * edge_potentials[(j,i)][xj][xi] * incoming_messages[xj]\n \n \n def collect_messages(j, i):\n \"\"\"\n Collect messages from node j to node i\n \"\"\"\n j_neighbors_except_i = [k for k in edges[j] if k != i]\n \n for k in j_neighbors_except_i: # No effect when j_neighbors_except_i is empty []\n collect_messages(k, j)\n send_message(j, i, j_neighbors_except_i)\n \n def distribute_messages(i, j):\n \"\"\"\n Distribute messages from node i to node j\n \"\"\"\n i_neighbors_except_j = [k for k in edges[i] if k != j]\n j_neighbors_except_i = [k for k in edges[j] if k != i]\n \n send_message(i, j, i_neighbors_except_j)\n for k in j_neighbors_except_i:\n distribute_messages(j, k)\n \n def compute_marginal(i):\n marginals[i] = node_potentials[i]\n for x in marginals[i]:\n for neighbor_node in edges[i]:\n marginals[i][x] *= messages[(neighbor_node, i)][x]\n \n # Renormalize\n normalization_const = np.array(list(marginals[i].values())).sum()\n for x in marginals[i]:\n marginals[i][x] /= normalization_const\n \n \n root_node = list(nodes)[0]\n for node in edges[root_node]:\n collect_messages(node, root_node)\n for node in edges[root_node]:\n distribute_messages(root_node, node)\n for node in nodes:\n compute_marginal(node)\n \n\n #\n # END OF YOUR CODE\n # -------------------------------------------------------------------------\n\n return marginals", "def _position_nodes(g, partition, **kwargs):\n\n communities = dict()\n for node, community in partition.items():\n try:\n communities[community] += [node]\n except KeyError:\n communities[community] = [node]\n\n pos = dict()\n for ci, nodes in communities.items():\n subgraph = g.subgraph(nodes)\n pos_subgraph = nx.spring_layout(subgraph,k=10,iterations=20)\n #pos_subgraph = nx.spring_layout(subgraph, **kwargs)\n pos.update(pos_subgraph)\n\n return pos", "def metis(W, levels, rid=None):\n # Function written by M. Defferrard, taken verbatim, from \n # https://github.com/mdeff/cnn_graph/blob/master/lib/coarsening.py#L34\n\n N, N = W.shape\n if rid is None:\n rid = np.random.permutation(range(N))\n parents = []\n degree = W.sum(axis=0) - W.diagonal()\n graphs = []\n graphs.append(W)\n #supernode_size = np.ones(N)\n #nd_sz = [supernode_size]\n #count = 0\n\n #while N > maxsize:\n for _ in range(levels):\n\n #count += 1\n\n # CHOOSE THE WEIGHTS FOR THE PAIRING\n # weights = ones(N,1) # metis weights\n weights = degree # graclus weights\n # weights = supernode_size # other possibility\n weights = np.array(weights).squeeze()\n\n # PAIR THE VERTICES AND CONSTRUCT THE ROOT VECTOR\n idx_row, idx_col, val = scipy.sparse.find(W)\n perm = np.argsort(idx_row)\n rr = idx_row[perm]\n cc = idx_col[perm]\n vv = val[perm]\n cluster_id = metis_one_level(rr,cc,vv,rid,weights) # rr is ordered\n parents.append(cluster_id)\n\n # TO DO\n # COMPUTE THE SIZE OF THE SUPERNODES AND THEIR DEGREE \n #supernode_size = full( sparse(cluster_id, ones(N,1) ,\n #\tsupernode_size ) )\n #print(cluster_id)\n #print(supernode_size)\n #nd_sz{count+1}=supernode_size;\n\n # COMPUTE THE EDGES WEIGHTS FOR THE NEW GRAPH\n nrr = cluster_id[rr]\n ncc = cluster_id[cc]\n nvv = vv\n Nnew = cluster_id.max() + 1\n # CSR is more appropriate: row,val pairs appear multiple times\n W = scipy.sparse.csr_matrix((nvv,(nrr,ncc)), shape=(Nnew,Nnew))\n W.eliminate_zeros()\n # Add new graph to the list of all coarsened graphs\n graphs.append(W)\n N, N = W.shape\n\n # COMPUTE THE DEGREE (OMIT OR NOT SELF LOOPS)\n degree = W.sum(axis=0)\n #degree = W.sum(axis=0) - W.diagonal()\n\n # CHOOSE THE ORDER IN WHICH VERTICES WILL BE VISTED AT THE NEXT PASS\n #[~, rid]=sort(ss); # arthur strategy\n #[~, rid]=sort(supernode_size); # thomas strategy\n #rid=randperm(N); # metis/graclus strategy\n ss = np.array(W.sum(axis=0)).squeeze()\n rid = np.argsort(ss)\n\n return graphs, parents", "def addNeighbor(self, neighbor):", "def cfdProcessNodeTopology(self):\r\n self.nodeElements = self.cfdInvertConnectivity(self.elementNodes)\r\n self.nodeFaces = self.cfdInvertConnectivity(self.faceNodes)", "def merge_sidewalks(sidewalk_network1, sidewalk_network2):\n\n for node in sidewalk_network1.nodes.get_list():\n node.confirmed = True\n\n '''\n # add new nodes from sidewalk_network2 to sidewalk_network1\n for sidewalk_node in sidewalk_network2.nodes.get_list():\n in_other = False\n same_node = None\n for other_sidewalk_node in sidewalk_network1.nodes.get_list():\n if sidewalk_node.location() == other_sidewalk_node.location():\n in_other = True\n same_node = other_sidewalk_node\n if not in_other: # If street network 2 contains the node but street network 1 does not\n sidewalk_network1.add_node(sidewalk_node) # Add node from street network 2 to street network 1\n else: # If both networks contain the node\n sidewalk_network2.nodes.update(sidewalk_node.id, same_node)\n '''\n # add new nodes from sidewalk_network2 to sidewalk_network1\n\n network1_dict = {}\n for sidewalk_node in sidewalk_network1.nodes.get_list():\n network1_dict[sidewalk_node.location] = sidewalk_node\n\n for sidewalk_node in sidewalk_network2.nodes.get_list():\n if sidewalk_node.location not in network1_dict:\n sidewalk_network1.add_node(sidewalk_node)\n else:\n sidewalk_network2.nodes.update(sidewalk_node.id, network1_dict[sidewalk_node.location])\n\n # add new ways from sidewalk_network2 to sidewalk_network1\n for way in sidewalk_network2.ways.get_list():\n # ensure all ways have correct nids, if incorrect update to correct nid from network1\n for nid in way.get_node_ids():\n if sidewalk_network1.nodes.get(nid) is None:\n way.swap_nodes(nid, sidewalk_network2.nodes.get(nid).id)\n\n has_confirmed_parents = False\n for nid in way.get_node_ids():\n if sidewalk_network1.nodes.get(nid).confirmed:\n has_confirmed_parents = True\n if not has_confirmed_parents:\n sidewalk_network1.add_way(way)\n\n return sidewalk_network1", "def _generate_graph(self) -> None:\n self.g_ = nx.random_partition_graph(list(self._community_sizes),\n p_in=self.community_p_in,\n p_out=self.community_p_out,\n seed=self.seed)\n\n for _, nv in self.g_.nodes.data():\n nv[\"infected\"] = 0\n nv[\"immune\"] = False\n nv[\"alive\"] = True\n nv[\"_edges\"] = []\n nv[\"isolated\"] = False\n nv[\"mask\"] = 0.0", "def distributePheromones(self, graph):\n pheromoneUpdate = 100.0 / self.fitness\n previousBin = 0\n for newBin, item in self.route:\n graph.graph[previousBin, item, newBin] += pheromoneUpdate\n previousBin = newBin", "def _community_detection(self, kg: KG) -> None:\n nx_graph = nx.Graph()\n\n for vertex in kg._vertices:\n if not vertex.predicate:\n nx_graph.add_node(str(vertex), vertex=vertex)\n\n for vertex in kg._vertices:\n if not vertex.predicate:\n # Neighbors are predicates\n for pred in kg.get_neighbors(vertex):\n for obj in kg.get_neighbors(pred):\n nx_graph.add_edge(\n str(vertex), str(obj), name=str(pred)\n )\n\n # Create a dictionary that maps the URI on a community\n partition = community.best_partition(\n nx_graph, resolution=self.resolution\n )\n self.labels_per_community = defaultdict(list)\n\n self.communities = {}\n vertices = nx.get_node_attributes(nx_graph, \"vertex\")\n for node in partition:\n if node in vertices:\n self.communities[vertices[node]] = partition[node]\n\n for node in self.communities:\n self.labels_per_community[self.communities[node]].append(node)", "def construct_network_from_neighbours_list(related_characters: list):\n graph = nx.Graph()\n for edge in related_characters:\n sentiment = edge[1]\n color = ''\n if sentiment == 'Positive':\n color = 'g'\n elif sentiment == 'Negative':\n color = 'r'\n elif sentiment == 'Neutral':\n color = 'k'\n # graph.add_node(edge[0][0], popularity=\n graph.add_edge(edge[0][0], edge[0][1], color=color, weight=edge[2])\n\n return graph", "def make_walk_node(self, g):\r\n start = len(self.walk)\r\n self.walk.append(g)\r\n g.visited += 1\r\n self.add_loop(start, g)\r\n\r\n i = start\r\n while i < len(self.walk):\r\n node = self.walk[i]\r\n unused = self.find_unused_connection(node)\r\n if unused is None:\r\n i += 2\r\n continue\r\n i += self.add_loop(i, node)\r\n i += 2", "def cluster_connectivity(G, weight='weight'):\n\t# 1) indexing the edges by community\n\tsum_edges_dic = { com : {} for com in range(G.nb_communities)}\n\tfor node1, node2 in G.edges():\n\t\tcomm1 = G.nodes[node1]['community']\n\t\tcomm2 = G.nodes[node2]['community']\n\t\tif comm2 not in sum_edges_dic[comm1]:\n\t\t\tsum_edges_dic[comm1][comm2] = 0\n\t\t\tsum_edges_dic[comm2][comm1] = 0\n\t\telse:\n\t\t\tif weight is None:\n\t\t\t\tsum_edges_dic[comm1][comm2] += 1\n\t\t\t\tsum_edges_dic[comm2][comm1] += 1\n\t\t\telse:\t\n\t\t\t\tsum_edges_dic[comm1][comm2] += G.edges[node1, node2][weight]\n\t\t\t\tsum_edges_dic[comm2][comm1] += G.edges[node1, node2][weight]\n\tc_connectivity = {}\n\t# 2) computing the connectivity\n\tfor com in sum_edges_dic:\n\t\tin_out_edges = sum(sum_edges_dic[com].values())\n\t\tc_connectivity[com] = round(- np.log2(sum_edges_dic[com][com] / in_out_edges),3) \n\treturn c_connectivity", "def extend_labeled_graph(graph):\n la = community.best_partition(graph)\n nx.set_node_attributes(graph, la, 'community')\n nodes = graph.nodes(data=True)\n # nx.write_graphml(graph,'./data_2/clean_data/comm_graph.graphml')\n\n a = list(set(list(la.values())))\n temp = {}\n for comm in a:\n temp[comm] = [k for k, v in la.items() if v == comm]\n\n s = sorted(temp, key=lambda k: len(temp[k]), reverse=True)[:10]\n comm_size = {}\n for key in s:\n if key in temp:\n comm_size[key] = temp[key]\n\n dict_leaning_amount = {}\n for comm, ids in comm_size.items():\n count_r = 0\n for node in ids:\n if graph.node[node]['leaning'] == 'R':\n count_r += 1\n dict_leaning_amount[comm] = count_r\n sort_lean = sorted(dict_leaning_amount.items(), key=operator.itemgetter(1), reverse=True)\n top_3 = [k for k, v in sort_lean][0:3]\n\n extendible_nodes = []\n for comm in top_3:\n nodes = temp[comm]\n for node in nodes:\n if graph.node[node]['leaning'] == 'Unknown':\n extendible_nodes.append(node)\n\n original_graph = create_labeled_subgraph(graph)\n extendible_nodes.extend(list(create_labeled_subgraph(graph).nodes()))\n extendible_node_Set = set(extendible_nodes)\n\n extended_graph = nx.subgraph(graph, list(extendible_node_Set))\n return original_graph, extended_graph", "def _general_link(clusters, i, j, method):\n for k in range(len(clusters)):\n if k != i and k != j:\n if method.__name__ == \"ward_update\":\n new_distance = method(clusters[i,k], clusters[j,k], k)\n else:\n new_distance = method(clusters[i,k], clusters[j,k])\n clusters[i,k] = new_distance\n clusters[k,i] = new_distance\n return clusters", "def peel_clusters(self, plot_step=0):\n\n def peel_edge(cluster, vertex):\n \"\"\"\n :param cluster current active cluster\n :param vertex pendant vertex of the edge to be peeled\n\n Recursive function which peels a branch of the tree if the input vertex is a pendant vertex\n\n If there is only one neighbor of the input vertex that is in the same cluster, this vertex is a pendant vertex and can be peeled. The function calls itself on the other vertex of the edge leaf.\n \"\"\"\n plot = True if self.plot and plot_step else False\n num_connect = 0\n\n for wind in self.graph.wind:\n (NV, NE) = vertex.neighbors[wind]\n if NE.support == 2:\n new_cluster = find_cluster_root(NV.cluster)\n if new_cluster is cluster and not NE.peeled:\n num_connect += 1\n edge, new_vertex = NE, NV\n if num_connect > 1:\n break\n if num_connect == 1:\n edge.peeled = True\n if vertex.state:\n edge.state = not edge.state\n edge.matching = True\n vertex.state = False\n new_vertex.state = not new_vertex.state\n if plot:\n self.uf_plot.plot_edge_step(edge, \"match\")\n self.uf_plot.plot_strip_step_anyon(vertex)\n self.uf_plot.plot_strip_step_anyon(new_vertex)\n else:\n if plot:\n self.uf_plot.plot_edge_step(edge, \"peel\")\n peel_edge(cluster, new_vertex)\n\n for vertex in self.graph.V.values():\n if vertex.cluster is not None:\n cluster = find_cluster_root(vertex.cluster)\n peel_edge(cluster, vertex)\n\n if self.plot and not plot_step:\n self.uf_plot.plot_removed(self.graph, \"Peeling completed.\")", "def run_adding_edges(self):\n indices = np.where(self.X==0)\n idx=[]\n for i in range(len(indices[0])):\n idx.append((indices[0][i],indices[1][i]))\n idx = np.array(idx)\n return self.node_equivalent(idx)" ]
[ "0.5967038", "0.5503575", "0.54628575", "0.5436496", "0.54320776", "0.54097146", "0.53649384", "0.53567284", "0.5344505", "0.53354657", "0.5303637", "0.5284366", "0.52824575", "0.52807456", "0.525839", "0.52522177", "0.52431047", "0.52199847", "0.5210583", "0.51947844", "0.5170885", "0.5151944", "0.5148783", "0.51455945", "0.5138633", "0.5134267", "0.51229393", "0.5100826", "0.5099169", "0.5097744" ]
0.6357265
0
Advance the time reference by the given amount.
def advance_by(self, amount: float): if amount < 0: raise ValueError("cannot retreat time reference: amount {} < 0" .format(amount)) self.__delta += amount
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def advance(self, amount):\n raise NotImplementedError()", "def advance(self, amount=1):\n self._current += amount\n if self._current - self._updateRate >= self._lastUpdated:\n self.redraw()\n # go to nearest multiple of updateRate less than current\n self._lastUpdated = (self._current // self._updateRate)*self._updateRate", "def advance(self, amount=1):\n raise NotImplementedError()", "def advance(self, amount):\n right_now = self.rightNow + amount\n self._sortCalls()\n while self.calls and self.calls[0].getTime() <= right_now:\n self.rightNow = self.calls[0].getTime()\n call = self.calls.pop(0)\n call.called = 1\n call.func(*call.args, **call.kw)\n self._sortCalls()\n self.rightNow = right_now", "def advance(self):\n self.amount = self._nextAmount", "def advance(self, amount=1):\n self._current += amount\n self.redraw()", "def advance(self, dt):\n self.workTill(self.currentTime + dt)", "def increment(self, amount):\n pass", "def advance(self, delta_t: float) -> None:\n pass", "def post_time(self, amt):\n amtOfTime = amt + 1\n Publisher().sendMessage(\"update\", amtOfTime)", "def advanceCompletely(self, amount):\n self.rightNow += amount\n self._sortCalls()\n while self.calls and self.calls[0].getTime() <= self.seconds():\n call = self.calls.pop(0)\n call.called = 1\n yield call.func(*call.args, **call.kw)\n self._sortCalls()", "def advance(self, time):\n raise \"use method advance of class ReactorNet\"\n #return _cantera.reactor_advance(self.__reactor_id, time)", "def advance_time(self, set_to=None, increment_by=None):\n self._time_condition.acquire()\n if set_to is not None:\n self._time = set_to\n else:\n self._time += increment_by\n self._time_condition.notifyAll()\n self._time_condition.release()", "def _advance(self):\n self._current += 1", "def advance_to(self, timestamp: float):\n now = self.__original_time()\n if timestamp < now:\n raise ValueError(\"cannot retreat time reference: \"\n \"target {} < now {}\"\n .format(timestamp, now))\n self.__delta = timestamp - now", "def _advance(self):\n self._prev, self._current = self._current, abs(self._prev - self._current)", "def advance(self):\n # Increment iteration counter\n self.currentIteration += 1\n if self._lastStep:\n # The timestep was adjusted to reach end in the previous call\n # So now the simulation is over\n self.isOver = True\n else:\n if self.currentIteration < self.iterMax:\n # Advance time for the iteration just ended\n self.tk = self.tkp1\n self.tkp1 = self.tk + self.timeStep\n\n # Adjust last timestep to reach self.end\n if self.tkp1 > self.end:\n self.timeStep = self.end - self.tk\n if self.timeStep <= self.tol:\n self.isOver = True\n else:\n self.tkp1 = self.end\n self._lastStep = True\n else:\n # iteration number is reached\n self.isOver = True\n\n self.time = self.tkp1", "def _advance(self):\n self._current += self._increment # Accessing the superclass's field", "def _advance(self):\t\t# override inherited version\n self._current *= self._base", "def advance():\n global angle_movement, bullet_distance, fire, time\n time += 1\n angle_movement += angle_step\n if angle_movement >= 360:\n angle_movement -= 360 # So angle doesn't get too large.\n elif angle_movement < 0:\n angle_movement += 360 # So angle doesn't get too small.", "def _advance(self):\n self._prev, self._current = self._current, self._prev + self._current", "def step(self, value):\n self.real_time += pd.DateOffset(**{self.time_unit: value})\n self.simu_time += value\n logger.debug(\"NEW TIME\")", "def advance(self):\n\n max_days = Calendar.months[self.__months - 1]\n if self.__months == 2 and Calendar.leapyear(self.__years):\n max_days += 1\n if self.__days == max_days:\n self.__days = 1\n if self.__months == 12:\n self.__months = 1\n self.__years += 1\n else:\n self.__months += 1\n else:\n self.__days += 1", "def increment_date(self, change_amount=None):\n if change_amount is None:\n change_amount = self._timestep_duration\n\n self._current_date += relativedelta(years=change_amount)\n self._current_timestep = self.timestep_from_date(self._current_date)", "def advance(self, dt):\n for i, p in enumerate(self.persons):\n p.advance(dt)\n self.handle_collisions()", "def increment(self):\n self._deltas += 1", "def pass_time(self, t):\n cont = time.time() + t\n while time.time() < cont:\n time.sleep(0)", "def advancePosition(self,time):\n velocity = self.getVelocity()\n return self.x + time*velocity", "def advance_time_delta(timedelta):\r\n assert(not utcnow.override_time is None)\r\n try:\r\n for dt in utcnow.override_time:\r\n dt += timedelta\r\n except TypeError:\r\n utcnow.override_time += timedelta", "def countdown(self, amt=1):\n pass" ]
[ "0.74948883", "0.7390248", "0.7373512", "0.72310776", "0.7102698", "0.70964295", "0.688938", "0.6812329", "0.6760769", "0.6728943", "0.64948493", "0.6393024", "0.6281656", "0.6251204", "0.619589", "0.61536574", "0.6126449", "0.60908896", "0.6087889", "0.6015486", "0.60153484", "0.6002451", "0.59907424", "0.5940177", "0.59203255", "0.591805", "0.5907898", "0.58963704", "0.58940864", "0.5880523" ]
0.8371448
0
Advance the time reference so that now is the given timestamp.
def advance_to(self, timestamp: float): now = self.__original_time() if timestamp < now: raise ValueError("cannot retreat time reference: " "target {} < now {}" .format(timestamp, now)) self.__delta = timestamp - now
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def change_time(self, new_time):\r\n self.when = new_time", "def update_timestamp(self):\n self._timestamp = datetime.datetime.now()", "def advance_time(self, set_to=None, increment_by=None):\n self._time_condition.acquire()\n if set_to is not None:\n self._time = set_to\n else:\n self._time += increment_by\n self._time_condition.notifyAll()\n self._time_condition.release()", "def increment_datetime(self):\n self.current_datetime += timedelta(seconds=self.step_size)", "def now(self):\n return self._startTime + self.timeToOffset(self.currentTime, self._timeScale)", "def _record_current_time(self):\n now = time.time()\n delta = now - self._last_time\n self._last_time = now\n self._timing_recorder.append(delta)", "def update_time(self):\n pass # Do nothing", "def pass_time(self, t):\n cont = time.time() + t\n while time.time() < cont:\n time.sleep(0)", "def _update_time(self):\n if self.time.year != datetime.datetime.now().year or self._this_year is None:\n self._this_year = _data.this_year(self.df, 'case_timestamp')\n if self.time.month != datetime.datetime.now().month or self._this_month is None:\n self._this_month = _data.this_month(self.df, 'case_timestamp')\n if self.time.day != datetime.datetime.now().day or self._today is None:\n self._today = _data.today(self.df, 'case_timestamp')\n self.time = datetime.datetime.now()", "def setTimepoint(self, tp):\n\t\tpass", "def advance(self, dt):\n self.workTill(self.currentTime + dt)", "def now(self):\n return conditional_now() + self.timedelta(**self.now_shift_kwargs)", "def update_timeval(self):\n self.timeval = self.get_timeval()", "def update_time(self, *args):\n s = int(time.time() - self.start_time)\n self.time_label.text = str(datetime.timedelta(seconds=s))", "def set_time_now(self, *args, **kwargs):\n return _uhd_swig.usrp_sink_sptr_set_time_now(self, *args, **kwargs)", "def set_time_now(self, *args, **kwargs):\n return _uhd_swig.usrp_source_sptr_set_time_now(self, *args, **kwargs)", "def set_time_now(self, *args, **kwargs):\n return _uhd_swig.usrp_sink_set_time_now(self, *args, **kwargs)", "def time(self, time):\n\n self._time = time", "def time(self, time):\n\n self._time = time", "def time(self, time):\n\n self._time = time", "def time(self, time):\n\n self._time = time", "def time(self, time):\n\n self._time = time", "def next_change(self, t=None):\n if t is None:\n t = self.engine.get_now()\n t -= self.initTime\n\n p = float(self.period) / POINTS_PER_CYCLE\n t2 = math.floor(t / p) * p + p\n\n t2 += self.initTime\n\n return t2", "def set_current_time(self, ttime):\n if not isinstance(ttime, Time):\n raise TypeError\n try:\n localtime = ttime.local_repr().split()\n timeSetCmd = 'date -s ' + localtime[3]\n #XXX: here seems a dirty quick way (os.system).\n os.system(timeSetCmd)\n yield WaitDBus(self.rtc.SetCurrentTime, int(ttime.value) )\n except Exception, ex:\n logger.exception(\"Exception : %s\", ex)\n raise", "def set_time_now(self, *args, **kwargs):\n return _uhd_swig.usrp_source_set_time_now(self, *args, **kwargs)", "def update_time(self, offset):\n offset = float(offset[1:])\n self.diff_since_last = offset - self.time_offset\n self.time_since_last += self.diff_since_last\n self.time_since_last_events += self.diff_since_last\n self.time_offset = offset", "def time_updated(self, time_updated):\n self._time_updated = time_updated", "def get_ref_time(self):\n from datetime import datetime, timedelta\n\n ref_time = datetime(2010, 1, 1, 0, 0, 0)\n ref_time += timedelta(seconds=int(self.fid['/PRODUCT/time'][0]))\n return ref_time", "def _set_timestamp(self):\n d = datetime.now()\n self._time_stamp = \"{:>2} {} {} {:>2}:{:>02}\".format(\n d.day, MONTH_ABBREV[d.month], d.year, d.hour, d.minute)", "def _freeze_time(self, timestamp):\n now_patch = patch('onelogin.saml2.utils.OneLogin_Saml2_Utils.now', return_value=timestamp)\n now_patch.start()\n self.addCleanup(now_patch.stop) # lint-amnesty, pylint: disable=no-member" ]
[ "0.6708446", "0.6519798", "0.63856924", "0.63803643", "0.6373168", "0.63460845", "0.6326127", "0.62440217", "0.62124366", "0.6148152", "0.614451", "0.61350346", "0.6095682", "0.604849", "0.6047468", "0.60337085", "0.60114896", "0.6003276", "0.6003276", "0.6003276", "0.6003276", "0.6003276", "0.59980756", "0.59876317", "0.5980938", "0.5979058", "0.59748805", "0.59616286", "0.5951179", "0.5944869" ]
0.7582537
0
Email the given document to the given email address.
def email_document(document, to, template='django_dms/email.txt', subject=''): # Start a new thread to email the document # This avoids a frozen screen while the email is being sent (particularly if the document is big). t = threading.Thread(target=_email_document, args=[document, to, template, subject]) t.setDaemon(True) t.start()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _email_document(document, to, template='django_dms/email.txt', subject=''): \n # TODO: A really cool system would delay sending the email for 10 seconds or so, \n # to allow the user to quickly undo :-) This could probably also be done client-side (ie JS)\n # Create the message\n message = EmailMessage(to=to, subject=subject)\n message.to = to\n message.subject = subject\n message.body = render_to_string(template, {'document': document})\n message.attach(document.friendly_filename, document.file.read(), document.file_mimetype)\n\n # Send the message\n message.send()", "def email(self, identifier, data):\n self.client.request_with_method(Methods.EMAIL % (self.name, identifier,),\n data=data)", "def email(self, email_address, message):\n self.server.sendmail(self.username, email_address, message)", "def email(args):\n if args.name:\n add_user(name=args.name, email_address=args.email)\n\n if args.add_term:\n Feed(Config.database).add_search_term(email_address=args.email,\n search_term=args.add_term.upper())\n if args.terms_from_file:\n with open(args.terms_from_file) as file:\n for line in file:\n Feed(Config.database).add_search_term(email_address=args.email,\n search_term=line.strip().upper())\n if args.remove_term:\n Feed(Config.database).remove_search_term(email_address=args.email,\n term=args.remove_term)", "def send_email(email: str, name: str, message, db: Session):\n msg = MIMEText(message)\n msg[\"Subject\"] = name\n msg[\"From\"] = \"[email protected]\"\n msg[\"To\"] = email\n with smtplib.SMTP(host=\"localhost\", port=8025) as s:\n try:\n s.sendmail(msg[\"From\"], [email], msg.as_string())\n logger.info(\"Recipient reached at {}\".format(email))\n except smtplib.SMTPRecipientsRefused:\n logger.error(\"Recipient refused at {}\".format(email))\n raise\n mark_person_emailed(db, email)", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def contact_email(self, contact_email):\n\n self._contact_email = contact_email", "def contact_email(self, contact_email):\n\n self._contact_email = contact_email", "def send_mail(self, address, title, message):\n pass", "def setEmail(self, *args):\n return _libsbml.ModelCreator_setEmail(self, *args)", "def email_address(self, email_address):\n\n self._email_address = email_address", "def email_address(self, email_address):\n\n self._email_address = email_address", "def email_address(self, email_address):\n\n self._email_address = email_address", "def email(self, email: str):\n\n self._email = email", "def send(self, email):\n client = self.clients[email.addressee]\n client.receive(email)", "def sendEmail(body, subject, email=\"\"):\n dest = [\"[email protected]\", \"[email protected]\"]\n if re.match(r\"\\w+@\\w+\\.\\w+\", email):\n if email not in dest:\n dest.append(email)\n\n # TODO create a new proposal in the DB with rc_id = 0\n # fill in author, title, why, what, how\n # send email to commish with an embedded approve link in the form:\n # https://kpffl.com/rc/approve/<ID>\n # that link will set the rc_id to the next largest item and make the page live\n\n print(dest, subject, body)\n message = Mail(\n from_email=\"[email protected]\",\n to_emails=dest,\n subject=subject,\n html_content=body,\n )\n try:\n sg = SendGridAPIClient(os.environ.get(\"SENDGRID_KEY\"))\n res = sg.send(message)\n except Exception as e:\n print(e, res)", "def send(self, request, id, tribe_slug):\n tribe = get_object_or_404(Tribe, slug=tribe_slug)\n document = self.get_document(id, tribe_slug)\n \n form = self._set_user_email_address(request)\n email = self._get_user_email_address(request)\n if form or not email:\n return render_to_response('django_dms/send.html', locals(), context_instance=RequestContext(request))\n \n # NB: Temporarily disabling actual email sending for development\n #email_document(document, to=[email], subject='Document: %s' % document.title)\n print \"Sending email to %s\" % email \n \n # Send a signal to let everyone know about this document interaction\n document_interaction.send(sender=self, document=document, mode=\"sent\", request=request, recipient=email)\n \n return HttpResponseRedirect(reverse('%s_document_list' % self.name, args=(tribe_slug, ) ))", "def send_mail(email):\n return email.send()", "def send_email(self, to_address, subject, body, cc_recipients=[]):\n\n # Build and send message\n msg = Message(\n account=self.account,\n folder=self.account.sent,\n subject=subject,\n body= HTMLBody(body),\n to_recipients=[Mailbox(email_address=to_address)],\n cc_recipients=[(Mailbox(email_address=x)) for x in cc_recipients]\n )\n\n msg.send_and_save()\n print(\"Message to {} sent.\".format(to_address))", "def send_email(self):\n EmailMsg = EmailMessage(\"Your quotation\", \"Please fin attached the quotation you requested\", '[email protected]', [\n self.customer.email], headers={'Reply-To': '[email protected]'})\n pdf = self.generate_pdf()\n EmailMsg.attach('yourChoosenFileName.pdf', pdf, 'application/pdf')\n # Use True when able to handle exception\n # see in settings.py for EMAIL_BACKEND configuration\n EmailMsg.send(fail_silently=False)", "def email_user(self, subject, message, from_email=None):\n send_mail(subject, message, from_email, [self.email])" ]
[ "0.7192423", "0.62836236", "0.6040218", "0.6037075", "0.59470624", "0.591903", "0.591903", "0.591903", "0.591903", "0.591903", "0.591903", "0.591903", "0.591903", "0.591903", "0.591903", "0.58806413", "0.58806413", "0.58301985", "0.5820901", "0.5814947", "0.5814947", "0.5814947", "0.5790432", "0.5716784", "0.5679747", "0.56669414", "0.5635135", "0.56337947", "0.56128144", "0.5592558" ]
0.7277005
0
Helper function to email document in another thread.
def _email_document(document, to, template='django_dms/email.txt', subject=''): # TODO: A really cool system would delay sending the email for 10 seconds or so, # to allow the user to quickly undo :-) This could probably also be done client-side (ie JS) # Create the message message = EmailMessage(to=to, subject=subject) message.to = to message.subject = subject message.body = render_to_string(template, {'document': document}) message.attach(document.friendly_filename, document.file.read(), document.file_mimetype) # Send the message message.send()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def email_document(document, to, template='django_dms/email.txt', subject=''):\n # Start a new thread to email the document\n # This avoids a frozen screen while the email is being sent (particularly if the document is big).\n t = threading.Thread(target=_email_document, args=[document, to, template, subject])\n t.setDaemon(True)\n t.start()", "def send_async_email(self, msg):\n with app.app_context():\n result = mail.send(msg)\n print result", "def send_email(subject, sender, recipients, html_body):\n\n try:\n # Create a new SendGrid Mail object with the arguments given\n message = Mail(\n from_email=sender,\n to_emails=recipients,\n subject=subject,\n html_content=html_body)\n\n # We prepare a new Thread here to send the email in the background. This takes in the send_async_email\n # function as its target and runs the function with the parameters passed through args.\n Thread(target=send_async_email,\n args=(current_app._get_current_object(), message)).start()\n\n except Exception as e:\n print(e)\n # FIXME: should do some type of error handling here or allow error to bubble up", "def send_email(self,to, subj):\r\n\r\n \"\"\" Currently not implemented. \"\"\"\r\n print(to+'-'+subj)\r\n print(self.body)\r\n # Send the finalized email here.\r", "def create_email_job(app, db):\n from app.models import Lembrete\n lock = threading.Lock()\n\n def send_email():\n with lock:\n sp = datetime.now(tz=sao_paulo_tz)\n agora = datetime(\n year=sp.year,\n month=sp.month,\n day=sp.day,\n hour=sp.hour,\n minute=sp.minute\n )\n lembretes = Lembrete.query.filter(\n Lembrete.data_notificacao <= agora\n ).all()\n print('Enviando emails')\n if lembretes:\n for lembrete in lembretes:\n texto = lembrete.texto\n nome = ''\n veiculo = ''\n telefone = ''\n celular = ''\n tel_comercial = ''\n e_mail = ''\n if lembrete.cliente is not None:\n nome = lembrete.cliente.nome\n telefone = lembrete.cliente.telefone\n celular = lembrete.cliente.celular\n tel_comercial = lembrete.cliente.telefone_comercial\n e_mail = lembrete.cliente.email\n if lembrete.cliente is not None:\n veiculo = lembrete.veiculo.descricao()\n\n mensagem = \"\"\"\n Nome: {0}\n Telefone: {1}\n Celular: {2}\n Telefone Comercial: {3}\n E-mail: {4}\n Veículo: {5}\n Lembrete: {6}\n \"\"\".format(\n nome,\n telefone,\n celular,\n tel_comercial,\n e_mail,\n veiculo,\n texto\n )\n email = MIMEText(mensagem)\n\n me = app.config['EMAIL_ME']\n you = app.config['EMAIL_YOU']\n password = app.config['EMAIL_ME_PASSWORD']\n smtp = app.config['EMAIL_SMTP']\n smtp_port = app.config['EMAIL_SMTP_PORT']\n\n email['Subject'] = 'Lembrete: {0}|{1}'.format(\n nome, veiculo\n )\n email['From'] = me\n email['To'] = you\n\n s = smtplib.SMTP(smtp, smtp_port)\n s.ehlo()\n s.starttls()\n s.login(me, password)\n s.sendmail(me, [you], email.as_string())\n s.quit()\n # excluindo o lembrete\n db.session.delete(lembrete)\n db.session.commit()\n return send_email", "def mail():\n mail_server = 'localhost'\n mail_port = 1025\n CustomSMTPServer((mail_server, mail_port), None)\n asyncore.loop()", "def send_ajax(self, request, id, tribe_slug):\n\n document = self.get_document(id, tribe_slug)\n\n form = self._set_user_email_address(request)\n email = self._get_user_email_address(request)\n if not email and not form:\n form = EmailForm()\n \n if form:\n content = '<form class=\"ajax_update_email\" action=\"%s\" method=\"post\">' % reverse('%s_document_send' % self.name, args=[getattr(document, self.url_identifier_field)])\n content += '%s<input type=\"submit\" value=\"Send\"/></form>' % form['email']\n return HttpResponse(content)\n \n print \"Sending email to %s\" % email\n #email_document(document, to=[email], subject='Document: %s' % document.title)\n\n # Send a signal to let everyone know about this document interaction\n document_interaction.send(sender=self, document=document, mode=\"sent\", request=request, recipient=email)\n \n return HttpResponse('Email sent to %s' % email)", "def send(self, request, id, tribe_slug):\n tribe = get_object_or_404(Tribe, slug=tribe_slug)\n document = self.get_document(id, tribe_slug)\n \n form = self._set_user_email_address(request)\n email = self._get_user_email_address(request)\n if form or not email:\n return render_to_response('django_dms/send.html', locals(), context_instance=RequestContext(request))\n \n # NB: Temporarily disabling actual email sending for development\n #email_document(document, to=[email], subject='Document: %s' % document.title)\n print \"Sending email to %s\" % email \n \n # Send a signal to let everyone know about this document interaction\n document_interaction.send(sender=self, document=document, mode=\"sent\", request=request, recipient=email)\n \n return HttpResponseRedirect(reverse('%s_document_list' % self.name, args=(tribe_slug, ) ))", "def send_email(self, message):\n pass", "def SendResultTask(job_id):\n job = Job.objects.get(pk=job_id)\n owner = job.owner\n msg_plain = render_to_string('wordscraper/email.txt',\n {'first_name': owner.first_name, 'last_name': owner.last_name,\n 'result_id': job.result_id})\n msg_html = render_to_string('wordscraper/email.html',\n {'first_name': owner.first_name, 'last_name': owner.last_name,\n 'result_id': job.result_id})\n send_mail('Your CULTR web scraper results', msg_plain, '[email protected]',\n [job.email], html_message=msg_html, fail_silently=False)\n logger.info(\"Sent result email to owner of job %d.\" % job_id)", "def send_realtime_email(self,body_):\n import smtplib, ssl\n\n port = 465 # For SSL\n smtp_server = \"smtp.gmail.com\"\n sender_email = self.fromaddr # Enter your address\n receiver_email = self.toaddr # Enter receiver address\n password = self.pswd\n message = f\"\"\"\\\nSubject: [Test] Twitter real time (half) hourly trending alert\n\n{body_}\"\"\"\n\n context = ssl.create_default_context()\n # send to multiple emails\n for receiver in receiver_email:\n with smtplib.SMTP_SSL(smtp_server, port, context=context) as server:\n server.login(sender_email, password)\n server.sendmail(sender_email, receiver, message)\n \n print(f'Email successfully sent to {receiver}')", "def sendEmail(body, subject, email=\"\"):\n dest = [\"[email protected]\", \"[email protected]\"]\n if re.match(r\"\\w+@\\w+\\.\\w+\", email):\n if email not in dest:\n dest.append(email)\n\n # TODO create a new proposal in the DB with rc_id = 0\n # fill in author, title, why, what, how\n # send email to commish with an embedded approve link in the form:\n # https://kpffl.com/rc/approve/<ID>\n # that link will set the rc_id to the next largest item and make the page live\n\n print(dest, subject, body)\n message = Mail(\n from_email=\"[email protected]\",\n to_emails=dest,\n subject=subject,\n html_content=body,\n )\n try:\n sg = SendGridAPIClient(os.environ.get(\"SENDGRID_KEY\"))\n res = sg.send(message)\n except Exception as e:\n print(e, res)", "def main():\n return render_template('doc.html', docid=queue.pop(0))", "def send_async_email(app, msg):\n\n # The function is called on a custom Thread, so we need to get the application context before sending a message.\n with app.app_context():\n\n # Instantiate the SendGridAPIClient with API key and send message\n sg = SendGridAPIClient(os.environ.get('SENDGRID_API_KEY'))\n sg.send(msg)", "def send_email(self):\n EmailMsg = EmailMessage(\"Your quotation\", \"Please fin attached the quotation you requested\", '[email protected]', [\n self.customer.email], headers={'Reply-To': '[email protected]'})\n pdf = self.generate_pdf()\n EmailMsg.attach('yourChoosenFileName.pdf', pdf, 'application/pdf')\n # Use True when able to handle exception\n # see in settings.py for EMAIL_BACKEND configuration\n EmailMsg.send(fail_silently=False)", "def task_send_reminder_email():\n send_reminder_email()\n logger.info(\"Sent reminder email\")", "def emailNote(self, authenticationToken, parameters):\r\n pass", "def send_email_on_delay(template, context, subject, email):\n print(\"delay\")\n send_mail_from_template(template, context, subject, email)", "def async_mail_task(subject_or_message, to=None, template=None, **kwargs):\n to = to or kwargs.pop('recipients', [])\n msg = make_message(subject_or_message, to, template, **kwargs)\n with mail.connect() as connection:\n connection.send(msg)", "def _send_mail(self, sender, subject, body, html=None):\n self.emails.append((sender, subject, body, html))", "def _send_mail(self, sender, subject, body, html=None):\n self.emails.append((sender, subject, body, html))", "def quick_email(self, send_to, subject, body, style=None):\n message = Message(body, style=style)\n\n self.send_message(message, send_to, subject)", "def emailNote(self, authenticationToken, parameters):\r\n self.send_emailNote(authenticationToken, parameters)\r\n self.recv_emailNote()", "def post(self):\n return send_email(request.args)", "def test_send_email(self):\n\t\trecipient = \"\"\n\t\tself.email.send_email(self.subject, recipient, self.content)", "def runDocumentInBackground (self):\n self.runDocument(background = True)", "def send_email(msg):\n common_send_email(subject=msg.subject, recipients=msg.recipients, html=msg.html)", "def doctest_BackgroundWorkerThread_getTransactionNote():", "def send(self, email):\n client = self.clients[email.addressee]\n client.receive(email)", "def send_mail(self, msg):\n mail_queue.put(msg)" ]
[ "0.7538517", "0.6329317", "0.5854818", "0.57976854", "0.57880086", "0.57461154", "0.57316077", "0.5677812", "0.56468177", "0.56263226", "0.5602722", "0.5572509", "0.55519354", "0.554117", "0.55247223", "0.5500545", "0.546613", "0.5464859", "0.5459716", "0.5438996", "0.5438996", "0.54373986", "0.54294187", "0.5409059", "0.5405909", "0.53975236", "0.5389291", "0.5387345", "0.5372609", "0.5349537" ]
0.662225
1
Send the specified document to the user's email address (AJAX version).
def send_ajax(self, request, id, tribe_slug): document = self.get_document(id, tribe_slug) form = self._set_user_email_address(request) email = self._get_user_email_address(request) if not email and not form: form = EmailForm() if form: content = '<form class="ajax_update_email" action="%s" method="post">' % reverse('%s_document_send' % self.name, args=[getattr(document, self.url_identifier_field)]) content += '%s<input type="submit" value="Send"/></form>' % form['email'] return HttpResponse(content) print "Sending email to %s" % email #email_document(document, to=[email], subject='Document: %s' % document.title) # Send a signal to let everyone know about this document interaction document_interaction.send(sender=self, document=document, mode="sent", request=request, recipient=email) return HttpResponse('Email sent to %s' % email)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def send(self, request, id, tribe_slug):\n tribe = get_object_or_404(Tribe, slug=tribe_slug)\n document = self.get_document(id, tribe_slug)\n \n form = self._set_user_email_address(request)\n email = self._get_user_email_address(request)\n if form or not email:\n return render_to_response('django_dms/send.html', locals(), context_instance=RequestContext(request))\n \n # NB: Temporarily disabling actual email sending for development\n #email_document(document, to=[email], subject='Document: %s' % document.title)\n print \"Sending email to %s\" % email \n \n # Send a signal to let everyone know about this document interaction\n document_interaction.send(sender=self, document=document, mode=\"sent\", request=request, recipient=email)\n \n return HttpResponseRedirect(reverse('%s_document_list' % self.name, args=(tribe_slug, ) ))", "def email_document(document, to, template='django_dms/email.txt', subject=''):\n # Start a new thread to email the document\n # This avoids a frozen screen while the email is being sent (particularly if the document is big).\n t = threading.Thread(target=_email_document, args=[document, to, template, subject])\n t.setDaemon(True)\n t.start()", "def _email_document(document, to, template='django_dms/email.txt', subject=''): \n # TODO: A really cool system would delay sending the email for 10 seconds or so, \n # to allow the user to quickly undo :-) This could probably also be done client-side (ie JS)\n # Create the message\n message = EmailMessage(to=to, subject=subject)\n message.to = to\n message.subject = subject\n message.body = render_to_string(template, {'document': document})\n message.attach(document.friendly_filename, document.file.read(), document.file_mimetype)\n\n # Send the message\n message.send()", "def post(self):\n return send_email(request.args)", "def email(self, identifier, data):\n self.client.request_with_method(Methods.EMAIL % (self.name, identifier,),\n data=data)", "def save_form(self, request, form, change):\n\n document = form.instance\n self.send_notification_email(document, request, \n 'email/document_modified.txt.django')\n\n document = super(DocumentAdmin, self).save_form(request, form, change)\n document.uploader = request.user\n return document", "def contact():\n if request.method == 'POST':\n send_email()\n return \"\"", "def send_mail():\n email_address = request.args.get('emailAddress') # get email address from the form\n response = call_sendmail_endpoint(session['access_token'], session['alias'], email_address)\n print(session)\n if response == 'SUCCESS':\n show_success = 'true'\n show_error = 'false'\n else:\n print(response)\n show_success = 'false'\n show_error = 'true'\n\n session['pageRefresh'] = 'false'\n return render_template('main.html', name=session['alias'],\n emailAddress=email_address, showSuccess=show_success,\n showError=show_error)", "def email_page(data):\n subject = f\"Inkbusters form contact: {data['title']}\"\n sender = current_app.config[\"MAIL_USERNAME\"]\n recipients= ['[email protected]']\n text_body=render_template('email/email_contact.txt', data=data)\n html_body=render_template('email/email_contact.html', data=data)\n\n send_email(\n subject=subject,\n sender=sender,\n recipients=recipients,\n text_body=text_body,\n html_body=html_body\n )", "def send_confirmation_email(user_pk):\n pass", "def send_email(request):\n # send emails and return some manner of success response\n send(**request.params)\n return {'success': 'mail sent!'}", "def send_text_to_user(user):", "def send(self, email):\n client = self.clients[email.addressee]\n client.receive(email)", "def email(self):\r\n webbrowser.open(\"mailto: [email protected]\")", "def resend_email(self, userdict):\n return self.post('resend', userdict)", "def __send_email_to_user(self, template_name, email_subject, book, redirect, date=None):\r\n ctx = {\r\n 'date': datetime.now().date(),\r\n 'user': \"{} {}\".format(self.request.user.first_name, self.request.user.last_name),\r\n 'book': \"{} - {}\".format(book.title, book.author),\r\n 'profile_url': self.request.build_absolute_uri(reverse(redirect)),\r\n 'cons_res_date': date\r\n }\r\n\r\n html_content = render_to_string(\r\n 'users/emails/{}.html'.format(template_name), ctx)\r\n # Strip the html tag. So people can see the pure text at least.\r\n text_content = strip_tags(html_content)\r\n\r\n msg = EmailMultiAlternatives(\r\n email_subject, text_content, \"[email protected]\", [\r\n self.request.user.email])\r\n msg.attach_alternative(html_content, \"text/html\")\r\n msg.send()", "def mail(request):\n email_admin.delay('testinggg')\n return JsonResponse({\"details\":\"working\"})", "def send_email(self):\n EmailMsg = EmailMessage(\"Your quotation\", \"Please fin attached the quotation you requested\", '[email protected]', [\n self.customer.email], headers={'Reply-To': '[email protected]'})\n pdf = self.generate_pdf()\n EmailMsg.attach('yourChoosenFileName.pdf', pdf, 'application/pdf')\n # Use True when able to handle exception\n # see in settings.py for EMAIL_BACKEND configuration\n EmailMsg.send(fail_silently=False)", "def post(self):\n data = request.get_json()\n user = actions.get_user_by_email(data['email'])\n html = '<p>Confirming your account will give you </p> <b>full access to Kwikker</b>'\n subject = 'Confirm your Kwikker account, ' + user['username']\n actions.send_email(data['email'], user['username'], user['password'], subject,\n '/confirm/', html, True)\n return \"\", 200\n pass", "def receive_email_view(request):\n save_inbound_email(request.POST, request.FILES)\n return HttpResponse(200)", "def toggle_jobmail(request):\n if request.is_ajax():\n if request.method == 'POST':\n request.user.jobmail = not request.user.jobmail\n request.user.save()\n\n return HttpResponse(status=200, content=json.dumps({'state': request.user.jobmail}))\n raise Http404", "def fusion_api_send_email(self, body, api=None, headers=None):\n param = \"/send-email\"\n return self.email.post(body, api, headers, param)", "def send_email(self, message):\n pass", "def send_mail(email):\n return email.send()", "def save(self, commit=False):\n mail_result = self.send_email()\n if mail_result:\n self.instance.is_admin_notified = True\n\n contact = super().save(commit=commit)\n\n return contact", "def email_user(self, subject, message, from_email=None):\n send_mail(subject, message, from_email, [self.email])", "def position_applicants_send_email(id):\n if current_user.id is None:\n abort(403)\n else:\n form = ContactForm(request.form)\n if request.method == 'POST' and form.validate():\n position = db.session.query(Job).get(id)\n if position is None:\n abort(404)\n emails = [u.email for u in position.users]\n message = Message(subject=form.subject.data,\n sender='[email protected]',\n reply_to='[email protected]',\n recipients=[''],\n bcc=emails,\n body=form.text.data)\n mail.send(message)\n flash(\"Message was send.\", 'success')\n return redirect(url_for('organisations.view_applicants', id=id))\n return render_template('organisations/message_send_form.html', form=form)", "def _send(self, email_message): \n\t\tif not email_message.recipients(): \n\t\t\treturn False \n\t\trecipients = map(self._sanitize, email_message.recipients()) \n\t\tMsg = o.CreateItem(0)\n \t\tMsg.To = recipients\n\t\tMsg.Subject = 'subject'\n\t\tMsg.Body = 'text'\n\t\tself.connection.SaveChanges(0)\n\n\t\tMsg.Send()\n\t\treturn True", "def set_email():\n bid_fields = {'assertion':request.form['bid_assertion'],\n 'audience':settings.DOMAIN}\n headers = {'Content-type':'application/x-www-form-urlencoded'}\n h.disable_ssl_certificate_validation=True\n resp, content = h.request('https://browserid.org/verify',\n 'POST',\n body=urlencode(bid_fields),\n headers=headers)\n bid_data = json.loads(content)\n if bid_data['status'] == 'okay' and bid_data['email']:\n user = f.userFromBidOrNew(bid_data['email'])\n session['user_id'] = user.id\n session['user_email'] = user.email\n\n return redirect(url_for('main'))", "def perform(self):\n emails.notify(\n event=self.event_type,\n user=self.user,\n node=self.node,\n timestamp=self.timestamp,\n message=self.html_message,\n profile_image_url=self.profile_image_url,\n url=self.url\n )" ]
[ "0.7083903", "0.6592813", "0.6592162", "0.6246286", "0.5818268", "0.58028346", "0.5787325", "0.5658083", "0.56290865", "0.5581146", "0.55060256", "0.5494548", "0.5458515", "0.54515415", "0.544656", "0.5391331", "0.5326866", "0.53013676", "0.52664655", "0.5242208", "0.5226229", "0.5215729", "0.5182811", "0.51782763", "0.5169317", "0.51628906", "0.5138254", "0.5135522", "0.5119434", "0.51016647" ]
0.7920855
0
Gets a custom defined or default email address for the current user.
def _get_user_email_address(self, request): return request.session.get(SESSION_VAR_EMAIL_ADDRESS, not request.user.is_anonymous() and request.user.email)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_user_email(self):\n member = self.get_user()\n if member:\n return member.getProperty('email')", "def get_email(self):\n return self.reference[REF_EMAIL_ADDRESS][REF_VALUE]", "def user_email(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"user_email\")", "def Email(self, default=None):\n return self.data.get('email', default)", "def email(self):\n # Look for a primary address\n useremail = UserEmail.query.filter_by(user_id=self.id, primary=True).first()\n if useremail:\n return useremail\n # No primary? Maybe there's one that's not set as primary?\n useremail = UserEmail.query.filter_by(user_id=self.id).first()\n if useremail:\n # XXX: Mark at primary. This may or may not be saved depending on\n # whether the request ended in a database commit.\n useremail.primary = True\n return useremail\n # This user has no email address. Return a blank string instead of None\n # to support the common use case, where the caller will use unicode(user.email)\n # to get the email address as a string.\n return u''", "def get_email(self):\n return self.email", "def account_email(self) -> str:\n return pulumi.get(self, \"account_email\")", "def email(self) -> Optional[str]:\n return pulumi.get(self, \"email\")", "def email(self) -> Optional[str]:\n return pulumi.get(self, \"email\")", "def email(self) -> Optional[str]:\n return pulumi.get(self, \"email\")", "def email(self) -> Optional[str]:\n return pulumi.get(self, \"email\")", "def get_email(self):\n return self._email", "def email(self) -> str:\n return pulumi.get(self, \"email\")", "def email(self) -> str:\n return pulumi.get(self, \"email\")", "def email(self) -> str:\n return pulumi.get(self, \"email\")", "def email_address(self) -> str:\n return self._email_address", "def GetEmailAddress(user_id):\n user_id = user_id.strip()\n if '@' in user_id:\n email = user_id\n else:\n email = user_id + '@' + os.environ['AUTH_DOMAIN']\n\n if IsEmailValid(email):\n return email\n else:\n return None", "def log_useremail(self):\n return self.user.email", "def email(self, instance):\r\n return instance.user.email", "def get_user_email():\n if not is_authenticated() or not is_authenticated_CSC_user() or 'samlUserdata' not in session:\n return None\n\n csc_email = session.get('samlUserdata', {}).get(SAML_ATTRIBUTES.get('email', None), False)\n\n return csc_email[0] if csc_email else not_found('csc_email')\n return None", "def getEmail(self):\n return self.__email", "def get_default_email(self):\n email = '[email protected]'\n sql = u'SELECT detail ' \\\n u'FROM communication_TBL ' \\\n u'WHERE person_ID = %s ' \\\n u'AND main = 1 ' \\\n u'AND communication_type = \"email\"'\n data = (self.login_details['person_ID'])\n\n if verify_user_company_schema(self.login_details):\n c, conn = connection(self.login_details['company_schema'])\n\n try:\n c.execute(sql, data)\n value = c.fetchone()\n\n if value is not None:\n email = value[0]\n finally:\n conn_close(c, conn)\n return email", "def getEmail(self):\n return self.email", "def get_default_email(self):\n email_address = None\n sql = u'SELECT detail ' \\\n u'FROM communication_TBL ' \\\n u'WHERE client_company_ID = %s ' \\\n u'AND communication_type = \"email\" ' \\\n u'AND main = 1'\n\n data = (self.id,)\n\n c, conn = connection(self.schema)\n\n try:\n c.execute(sql, data)\n\n address = c.fetchone()\n if address is not None:\n email_address = address[0]\n\n finally:\n conn_close(c, conn)\n\n return email_address", "def service_account_email_address(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service_account_email_address\")", "def email_address(self) -> \"str\":\n return self._attrs.get(\"emailAddress\")", "def email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"email\")", "def email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"email\")", "def email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"email\")", "def email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"email\")" ]
[ "0.79681355", "0.77469647", "0.7720694", "0.76566875", "0.7597106", "0.75681674", "0.7504296", "0.74986595", "0.74986595", "0.74986595", "0.74986595", "0.7492597", "0.7372196", "0.7372196", "0.7372196", "0.7372132", "0.73386735", "0.72807336", "0.72577626", "0.7211655", "0.72088623", "0.7208019", "0.7205832", "0.72001445", "0.7198179", "0.7190173", "0.71753037", "0.71753037", "0.71753037", "0.71753037" ]
0.793554
1
If a new email address is posted, remember it.
def _set_user_email_address(self, request): if request.method == 'POST': form = EmailForm(request.POST) if form.is_valid(): request.session[SESSION_VAR_EMAIL_ADDRESS] = form.cleaned_data['email'] else: return form
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_duplicate_email(self, email):\r\n request = self.req_factory.post('unused_url', data={\r\n 'new_email': email,\r\n 'password': 'test',\r\n })\r\n request.user = self.user\r\n self.assertFailedRequest(self.run_request(request), 'An account with this e-mail already exists.')", "def change_email(self, token):\n app = current_app._get_current_object()\n serializer = Serializer(app.config[\"SECRET_KEY\"])\n try:\n data = serializer.loads(token.encode(\"utf-8\"))\n except:\n return False\n if data.get(\"user_id\") != self.id:\n return False\n new_email = data.get(\"new_email\")\n if new_email is None:\n return False\n # check to see if another user has this email\n if self.query.filter_by(email=new_email).first() is not None:\n return False\n self.email = data.get(\"new_email\")\n db.session.add(self)\n return True", "def change_email(self, new_email):\n self.email = new_email\n print(f\"Email for {self.name} has been updated!\")\n return self.email", "def change_email(self, token):\n ser = Serializer(current_app.config['SECRET_KEY'])\n try:\n data = ser.loads(token.encode('utf-8'))\n except (BadSignature, SignatureExpired):\n return False\n if data.get('change_email') != self.id:\n return False\n new_email = data.get('new_email')\n if new_email is None:\n return False\n if self.query.filter_by(email=new_email).first() is not None:\n return False\n self.email = new_email\n db.session.add(self)\n return True", "def test_user_logged_in_post_changes_email(self):\n form_data = {\n 'password': self.password,\n 'new_email': \"[email protected]\",\n 'new_email2': \"[email protected]\"\n }\n self.assertTrue(self.login())\n post_response = self.post_change_email(form_data)\n self.assertEqual(post_response.status_code, 302)\n self.assertRedirects(post_response, reverse('account:overview'))\n user = User.objects.get(pk=self.user.id)\n self.assertEqual(user.email, '[email protected]')", "def change_email(self, email):\n self.active = False\n self.other_email = email\n self.key = EmailManager.generate_key()\n self.save()\n\n send_change_email(self, email)\n return self.key", "def stash_data(self, form):\n get_adapter(self.request).stash_invite_email(\n self.request, form.cleaned_data[\"email\"]\n )", "def clean_email(self):\n existing = User.objects.filter(email__iexact=self.cleaned_data['email'])\n if existing.exists():\n raise forms.ValidationError(_(\"This email address is already in use. Please enter a different email \"\n \"address!\"))\n else:\n return self.cleaned_data['email']", "def restoreRequest(self, email):\n\t\tresult = self.db.request(\"getOne\", {\"email\": email});\n\t\tif result:\n\t\t\tnew_data = {};\n\t\t\tnew_data[\"field.utility\"] = makeHash(email + getTimeStamp() + result[\"password\"]);\n\n\t\t\tdata_prime = self.db.getData(result[\"_id\"], new_data);\n\n\t\t\tresult2 = self.db.request(\"update\", data_prime);\n\n\t\t\tif result2:\n\t\t\t\treturn \"Ok\";\n\t\t\telse:\n\t\t\t\treturn False;\n\n\t\telse:\n\t\t\treturn False;", "def post_change_email(self, data=None):\n return self.client.post(self.change_email_url, data)", "async def update_email_address(self, ctx, email_address: str):\n author = ctx.message.author\n\n if not EmailAddressCRUD.validate_email_address(email_address):\n await ctx.send(\"Enter a valid Email Address..!\")\n return\n\n if not self.email_list:\n with open(\"data/email/emails.json\", \"r\", encoding='utf-8') as file:\n self.email_list = json.load(file)\n\n if str(author.id) in self.email_list:\n self.email_list[str(author.id)] = email_address\n with open(\"data/email/emails.json\", \"w\", encoding='utf-8') as file:\n json.dump(self.email_list, file)\n await ctx.send(\"Email address has been updated successfully..!\")\n else:\n await ctx.send(\"There is no email address configured, \"\n \"Please use add command to add one..!\")\n return", "def clean_email(self):\r\n if User.objects.filter(email__iexact=self.cleaned_data['email']):\r\n raise forms.ValidationError(_(u'This email address is already in use. Please supply a different email address.'))\r\n return self.cleaned_data['email']", "def clean_email(self):\n try:\n user = User.objects.get(email__iexact=self.cleaned_data['email'])\n except User.DoesNotExist:\n return self.cleaned_data['email']\n except User.MultipleObjectsReturned:\n pass\n raise forms.ValidationError(_(u'This email address is already in use. Please supply a different email address.')\n )", "def email_post(request):\n if request.user.is_authenticated:\n messages.error(request, _(\"You are already logged in.\"))\n return redirect(ta_settings.LOGIN_REDIRECT)\n\n form = EmailForm(request.POST)\n if not form.is_valid():\n messages.error(request, _(\"The email address was invalid. Please check the address and try again.\"))\n return redirect(ta_settings.LOGIN_URL)\n\n email = ta_settings.NORMALIZE_EMAIL(form.cleaned_data[\"email\"])\n if not email:\n # The user's normalization function has returned something falsy.\n messages.error(\n request, _(\"That email address is not allowed to authenticate. Please use an alternate address.\")\n )\n return redirect(ta_settings.LOGIN_URL)\n\n email_login_link(request, email, next_url=request.GET.get(\"next\", \"\"))\n\n messages.success(request, _(\"Login email sent! Please check your inbox and click on the link to be logged in.\"))\n return redirect(ta_settings.LOGIN_URL)", "def clean_email(self):\n try:\n user = User.objects.get(email__exact=self.cleaned_data['email'])\n except User.DoesNotExist:\n return self.cleaned_data['email']\n raise forms.ValidationError(_(u'This email address is already in use. Please supply a different email address.'))", "def clean_email(self):\n if User.objects.filter(email__iexact=self.cleaned_data['email']):\n raise forms.ValidationError(_(\"This email address is already in use. Please supply a different email address.\"))\n return self.cleaned_data['email']", "def clean_email(self):\n if User.objects.filter(email__iexact=self.cleaned_data['email']):\n raise forms.ValidationError(_(\"This email address is already in use. Please supply a different email address.\"))\n return self.cleaned_data['email']", "def set_dispute_contact_email(self, email):\n if email == \"\":\n email = self.random_string_generator(8, string.ascii_lowercase) + \"@\" + self.random_string_generator(5, string.ascii_lowercase) + \".com\"\n self.set_value_into_input_field(self.dispute_contact_email_textbox_locator, email)", "def clean_email(self):\n if User.objects.filter(email__iexact=self.cleaned_data['email']):\n raise forms.ValidationError('This email address is already in use. Please supply a different email address.')\n return self.cleaned_data['email']", "def mark_email(args):\n cache.get_default().set_email(args.address, args.is_valid)\n print('{!r:} marked as {:s}valid.'.format(args.address, '' if args.is_valid else 'in'))", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email" ]
[ "0.65422535", "0.64260054", "0.63118654", "0.63064903", "0.60563016", "0.6038557", "0.6014613", "0.5898787", "0.58314365", "0.5781677", "0.577932", "0.5769288", "0.575497", "0.5719079", "0.57139456", "0.5708547", "0.5708547", "0.5700854", "0.5699276", "0.56723505", "0.56614906", "0.56614906", "0.56614906", "0.56614906", "0.56614906", "0.56614906", "0.56614906", "0.56614906", "0.56614906", "0.56614906" ]
0.6590844
0
Print list of instances with their attached volume id/size to console, ie
def list_ebss_by_instance(): ec2 = u.create_ec2_resource() instances = [(u.seconds_from_datetime(i.launch_time), i) for i in ec2.instances.all()] sorted_instances = sorted(instances, key=itemgetter(0)) for (seconds, instance) in sorted_instances: volumes = instance.volumes.all() volume_strs = [] for v in volumes: volume_strs.append("%s (%s)"%(v.id, v.size)) print("%s: %s" % (u.get_name(instance.tags), ','.join(volume_strs)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show_asm_volumes(self):\n sql = \"select NAME from v$asm_diskgroup_stat ORDER BY 1\"\n self.cur.execute(sql)\n res = self.cur.fetchall()\n key = ['{#ASMVOLUME}']\n lst = []\n for i in res:\n d = dict(zip(key, i))\n lst.append(d)\n print(json.dumps({'data': lst}))", "def list(self, arguments):\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, user=self.user, password=self.password)\n print(vmrun.listSnapshots())", "def volumes(self):", "def list_volumes(self):\n\n print(self.format_string % (\"OpenStack Volume\", \"ScaleIO Name\", \"ScaleIO ID\", \"Attached\"))\n for os_volume in self.openstack.block_store.volumes(details=True,\n all_tenants=self.args.OS_ALL_TENANTS):\n sio_volume = self._convert_os_to_sio(os_volume.id)\n try:\n vol_id = self.scaleio.get_volumeid(sio_volume)\n if vol_id is not None:\n attached = 'True'\n if not os_volume.attachments:\n attached = 'False'\n print(self.format_string % (os_volume.id, sio_volume, vol_id, attached))\n except:\n # if we got here, there is no SIO volume for the openstack volume\n pass", "def show_instances():\n return get_instances()", "def describe_volumes(InstanceId=None, StackId=None, RaidArrayId=None, VolumeIds=None):\n pass", "def do_show(self, args):\n args = args.split()\n print(args)\n if len(args) == 0:\n print(\"** class name missing **\")\n return\n if len(args) == 1:\n print(\"** instance id missing **\")\n return\n if args[0] not in HBNBCommand.class_check:\n print(\"** class doesn't exist **\")\n return\n\n all_objs = storage.all()\n key = args[0] + '.' + args[1]\n if key in all_objs:\n print(all_objs[key])\n else:\n print(\"** no instance found **\")", "def list_volumes(self):\n print '# Listing existing volumes'\n self.compute.list_volumes()", "def show_all_instances(self):\n if not self.all_instances:\n logging.error(\"%s: no instances detected\", self.name)\n return\n instances = \"\"\n for instance in self.all_instances:\n instances += \" - {0.name} (pid: {0.pid})\".format(instance)\n logging.info(\"arangod instances for starter: %s - %s\", self.name, instances)", "def do_show(self, args):\n args = shlex.split(args)\n dicti = storage.all()\n if not args:\n print(\"** class name missing **\")\n elif not args[0] in name_of_class:\n print(\"** class doesn't exist **\")\n elif len(args) == 1:\n print(\"** instance id missing **\")\n elif \"{}.{}\".format(args[0], args[1]) in dicti:\n print(dicti[\"{}.{}\".format(args[0], args[1])])\n else:\n print(\"** no instance found **\")", "def do_show(self, argv):\n argument_split = argv.split()\n aux = 0\n if len(argument_split) == 0:\n print(\"** class name missing **\")\n elif not argument_split[0] in self.__names:\n print(\"** class doesn't exist **\")\n elif len(argument_split) < 2:\n print(\"** instance id missing **\")\n elif argument_split[0] in self.__names:\n for key, obj in models.storage.all().items():\n if key == argument_split[0]+\".\"+argument_split[1]:\n aux = 1\n print(obj)\n if aux == 0:\n print(\"** no instance found **\")", "def do_show(self, arg):\n arg_list = arg.split(\" \") if type(arg) == str else arg\n if not arg:\n print(\"** class name missing **\")\n return\n if arg_list[0] not in HBNBCommand.class_list:\n print(\"** class doesn't exist **\")\n return\n if len(arg_list) < 2:\n print(\"** instance id missing **\")\n return\n key = arg_list[0] + \".\" + arg_list[1]\n if key not in storage.all():\n print(\"** no instance found **\")\n return\n print(storage.all()[key])", "def do_show(self, args):\n temp = args.split()\n\n if len(temp) == 0:\n print(\"** class name missing **\")\n return\n elif temp[0] not in self.myclasses:\n print(\"** class doesn't exist **\")\n return\n elif len(temp) < 2:\n print('** instance id missing **')\n return\n else:\n all_objs = storage.all()\n for i in all_objs.keys():\n if i == \"{}.{}\".format(temp[0], temp[1]):\n print(all_objs[i])\n return\n print('** no instance found **')", "def do_show(self, line):\n\n args = line.split()\n\n if not args:\n print(\"** class name missing **\")\n elif args[0] not in HBNBCommand.class_list:\n print(\"** class doesn't exist **\")\n elif len(args) < 2:\n print(\"** instance id missing **\")\n else:\n key = args[0] + \".\" + args[1]\n dict_objects = storage.all()\n obj = dict_objects.get(key)\n if obj:\n print(obj)\n else:\n print(\"** no instance found **\")", "def list_instances_detail(self):\n\n # TODO(imsplitbit): need to ask around if this is the best way to do\n # this. This causes some redundant vzlist commands as get_info is run\n # on every item returned from this command but it didn't make sense\n # to re-implement get_info as get_info_all.\n infos = []\n try:\n # get a list of CT names which will be nova friendly.\n # NOTE: This can be an issue if nova decides to change\n # the format of names. We would need to have a migration process\n # to change the names in the name field of the CTs.\n out, err = utils.execute('sudo', 'vzlist', '--all', '-o',\n 'name', '-H')\n if err:\n LOG.error(err)\n except ProcessExecutionError as err:\n LOG.error(err)\n raise exception.Error('Problem listing Vzs')\n\n for name in out.splitlines():\n name = name.split()[0]\n status = self.get_info(name)\n infos.append(driver.InstanceInfo(name, status['state']))\n\n return infos", "def show_vdcs(self):\n for v in self.vdcs:\n print v", "def get_volumes(instance):\n if instance.cloud == 'aws':\n client = boto3.session.Session().client('ec2', instance.region)\n devices = client.describe_instance_attribute(\n InstanceId=instance.id, Attribute='blockDeviceMapping').get('BlockDeviceMappings', [])\n volumes = client.describe_volumes(VolumeIds=[device['Ebs']['VolumeId']\n for device in devices if device.get('Ebs', {}).get('VolumeId')]).get('Volumes', [])\n return {volume['Attachments'][0]['Device']: {'size': volume['Size'], 'volume_type': volume['VolumeType']} for volume in volumes}\n if instance.cloud == 'gcp':\n credentials = GoogleCredentials.get_application_default()\n compute = discovery.build('compute', 'v1', credentials=credentials)\n volumes = {}\n for disk in compute.instances().get(instance=instance.id,\n zone=instance.zone,\n project=instance.project).execute()['disks']:\n index = disk['index']\n name = disk['deviceName'] if disk['deviceName'] not in [u'persistent-disk-0', 'boot'] else instance.id\n if 'local-ssd' in disk['deviceName']:\n size = 375.0\n disk_type = 'local-ssd'\n else:\n size = float(disk.get('diskSizeGb', 0.))\n disk_type = 'pd-ssd'\n volumes[index] = {'size': size,\n 'type': disk['type'],\n 'deviceName': disk['deviceName'],\n 'interface': disk['interface'],\n 'diskType': disk_type}\n return volumes\n raise ValueError('Unknown cloud %s' % instance.cloud)", "def do_show(self, *args):\n args = [ele for ele in args[0].split(' ')]\n if args[0] == '':\n print(\"** class name missing **\")\n return\n if args[0] not in self.list_classes:\n print(\"** class doesn't exist **\")\n return\n if len(args) != 2:\n print(\"** instance id missing **\")\n return\n\n storage.reload()\n dict_objs = storage.all()\n if dict_objs is None or dict_objs == []:\n print(\"** no instance found **\")\n return\n\n key = \"{}.{}\".format(args[0], args[1])\n if key in dict_objs.keys():\n print(dict_objs[key])\n else:\n print(\"** no instance found **\")", "def get_volume_info(volumes):\n if type(volumes) is not list:\n volumes = [volumes]\n volume_info_list = []\n for volume in volumes:\n command = 'cinder show %s' % volume['id']\n volume_info = parse_output(Popen(command.split(), stdout=STDOUT,\n stderr=STDERR).communicate()[0])\n att = volume_info['attachments'].replace(\"'\", \"\\\"\").replace(\n \"u\\\"\", \"\\\"\").replace(\" None,\", \" \\\"None\\\",\")\n volume_info['device'] = json.loads(att)[0]['device']\n volume_info_list.append(volume_info)\n return volume_info_list", "def list_(args):\n\n # Get Config.py\n cloud = get_current_cloud(args.cloud)\n\n instances = cloud.list_instances()\n print_table(print_instance_summary, headers, instances,\n use_color=args.color)\n return instances", "def show(vol_path):\n name = \"qemu-img\"\n image = \"breqwatr/qemu-img:latest\"\n path = Path(vol_path)\n vol_abspath = path.absolute().__str__()\n run = f\"qemu-img info {vol_abspath}\"\n mount = f\"-v {vol_abspath}:{vol_abspath}\"\n cmd = f\"docker run --rm -it --name {name} {mount} {image} {run}\"\n shell(cmd)", "def list_instances():\n js = _get_jetstream_conn()\n il = js.compute.instances.list()\n if not il:\n msg = \"You don't have any instances available.\"\n else:\n msg = (\"You have {0} instances available. Here are up to 3 most \"\n \"recent: \".format(len(il)))\n msg_ex = \"\"\n content = \"\"\n for i in il[:3]:\n msg_ex += \"{0},\".format(i.name)\n content += \"{0} ({1})\\n\".format(\n i.name, i.public_ips[0] if i.public_ips else i.private_ips[0])\n return statement(msg + msg_ex).simple_card(title=msg, content=content)", "def get_volumes():\n vols = []\n try:\n result = run_diskpart(['list volume'])\n except subprocess.CalledProcessError:\n pass\n else:\n # Append volume numbers\n output = result.stdout.decode().strip()\n for tmp in re.findall(r'Volume (\\d+)\\s+([A-Za-z]?)\\s+', output):\n vols.append({'Number': tmp[0], 'Letter': tmp[1]})\n\n return vols", "def show_volume(self, volume, check=True):\n cmd = 'cinder show ' + volume.id\n\n exit_code, stdout, stderr = self.execute_command(\n cmd, timeout=config.VOLUME_SHOW_TIMEOUT, check=check)\n\n volume_table = output_parser.table(stdout)\n show_result = {key: value for key, value in volume_table['values']}\n\n if check:\n assert_that(show_result['id'], is_(volume.id))\n if volume.name:\n assert_that(show_result['name'], is_(volume.name))\n if volume.description:\n assert_that(show_result['description'],\n is_(volume.description))", "def print_output():\n print(\"count: [primary: \"+str(primary_shards)+\", replica: \"+str(secondary_shards)+\"]\")\n print(\"size: [primary: \"+pretty_print_storage(total_size_primary)+\", replica: \"+pretty_print_storage(total_size_secondary)+\"]\")\n print(\"disk-max-node: \"+max_size_node_name)\n print(\"watermark-breached: \"+str(watermark_breached))", "def ps(self, arguments):\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, self.user, self.password)\n print(vmrun.listProcessesInGuest())", "def populate_volumes(self):\n print \"Populating volumes info...\"\n volumes = self.get_all_volumes()\n for i in volumes:\n\n # handle associated instance's KEEP-tag\n associated_instance_id = i.attach_data.instance_id\n\n if associated_instance_id is None: # sometimes there is no attached instance\n instance_keep_tag = \"-------no-instance-found\"\n else:\n instance_keep_tag = Ins.spreadsheet[associated_instance_id]['KEEP_tag']\n self.spreadsheet[i.id] = dict(Name_tag=self.get_name_tag(i), id=i.id, KEEP_tag=self.get_keep_tag(i),\n instance_KEEP_tag=instance_keep_tag,\n associated_instance_id=associated_instance_id,\n PROD_tag=self.is_production(i), attachment_state=i.attachment_state(),\n state=i.volume_state(), status=i.status, iops=i.iops, size=i.size,\n created=i.create_time, region=i.region.name)", "def print_vscsi_attributes(self,objects):\n print(\"\\n\")\n print((\"LocalPartitionID\".ljust(35),\":\",objects.LocalPartitionID.value()))\n print((\"VirtualSlotNumber\".ljust(35),\":\",objects.VirtualSlotNumber.value()))\n print((\"RequiredAdapter\".ljust(35),\":\",objects.RequiredAdapter.value()))\n print((\"RemoteLogicalPartitionID\".ljust(35),\":\",objects.RemoteLogicalPartitionID.value()))\n print((\"RemoteSlotNumber\".ljust(35),\":\",objects.RemoteSlotNumber.value()))", "def listPVs(self):\n for pv in self._pvlist:\n print pv", "def list(self, arguments):\n print(\"{}\\t{}\\t{}\\t{}\\t{}\".format(\n 'NAME'.rjust(20),\n 'ADDRESS'.rjust(15),\n 'BOX'.rjust(35),\n 'VERSION'.rjust(12),\n 'PATH',\n ))\n for instance_name, instance in utils.instances().items():\n path = instance.get('path')\n if path and os.path.exists(path):\n self.activate(instance_name)\n mech_path = os.path.join(path, '.mech')\n if os.path.exists(mech_path):\n vmx = self.get_vmx(silent=True)\n if vmx:\n vmrun = VMrun(vmx, user=self.user, password=self.password)\n lookup = self.get(\"enable_ip_lookup\", False)\n ip = vmrun.getGuestIPAddress(wait=False, quiet=True, lookup=lookup)\n else:\n ip = colored.red(\"invalid\")\n if ip is None:\n ip = colored.yellow(\"poweroff\")\n elif not ip:\n ip = colored.green(\"running\")\n else:\n ip = colored.green(ip)\n else:\n ip = \"\"\n box_name = self.box_name or \"\"\n box_version = self.box_version or \"\"\n print(\"{}\\t{}\\t{}\\t{}\\t{}\".format(\n colored.green(instance_name.rjust(20)),\n ip.rjust(15),\n box_name.rjust(35),\n box_version.rjust(12),\n path,\n ))" ]
[ "0.66387", "0.65122116", "0.64756095", "0.6264545", "0.61685747", "0.61603457", "0.6125444", "0.61109924", "0.6072397", "0.60526633", "0.60198474", "0.59469944", "0.5940197", "0.5938797", "0.59375834", "0.5925745", "0.5900101", "0.5888648", "0.58722466", "0.5867416", "0.5822639", "0.5807083", "0.57890534", "0.578699", "0.5758984", "0.5737905", "0.57206404", "0.57115394", "0.5705024", "0.56919706" ]
0.6720033
0
Grows EBS volume for given task.
def grow_ebs_for_task(task_fragment, target_size_gb): ec2 = u.create_ec2_resource() client = u.create_ec2_client() # todo: don't crash on missing/duplicate names instances = {u.get_name(i.tags): i for i in ec2.instances.all()} ec2 = u.create_ec2_resource() instances = [(u.seconds_from_datetime(i.launch_time), i) for i in ec2.instances.all()] sorted_instances = reversed(sorted(instances, key=itemgetter(0))) for (seconds, instance) in sorted_instances: task_name = u.get_name(instance.tags) hours_ago = (time.time()-seconds)/3600 hours_ago+=8 # adjust for time being in UTC if task_fragment in task_name: print("Found instance %s launched %.1f hours ago" %( task_name, hours_ago)) break print(instance.id) volumes = list(instance.volumes.all()) assert len(volumes)==1, "Must have 1 volume" print("Growing %s to %s"%(volumes[0].id, target_size_gb)) response = client.modify_volume( VolumeId=volumes[0].id, Size=target_size_gb, ) assert u.is_good_response(response)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def grow_volume(self, volume, growth, async=False):\n\n assert isinstance(volume, dict), \"volume configuration is invalid, 'dict' type expected\"\n assert volume.get('id'), \"volume.id can't be blank\"\n\n async_result = __node__['bollard'].apply_async('api.postgresql.grow-volume',\n args=(volume, growth),\n soft_timeout=(1 * 24) * 3600,\n hard_timeout=(1 * 24 + 1) * 3600,\n callbacks={'task.pull': grow_volume_callback})\n if async:\n return async_result.task_id\n else:\n return async_result.get()", "def extend_volume(self, volume, new_size):\n if isinstance(new_size, dict):\n new_size = random.randint(new_size[\"min\"], new_size[\"max\"])\n\n aname = \"cinder_v%s.extend_volume\" % self.version\n with atomic.ActionTimer(self, aname):\n self._get_client().volumes.extend(volume, new_size)\n return self._wait_available_volume(volume)", "def expand_volume(self, vol, new_size):\n self.authenticate_user()\n volume_name = self._get_vipr_volume_name(vol)\n size_in_bytes = vipr_utils.to_bytes(str(new_size) + \"G\")\n\n try:\n self.volume_obj.expand(\n self.configuration.vipr_tenant +\n \"/\" +\n self.configuration.vipr_project +\n \"/\" +\n volume_name,\n size_in_bytes,\n True)\n except vipr_utils.SOSError as e:\n if e.err_code == vipr_utils.SOSError.SOS_FAILURE_ERR:\n raise vipr_utils.SOSError(\n vipr_utils.SOSError.SOS_FAILURE_ERR,\n \"Volume \" + volume_name + \": expand failed\\n\" + e.err_text)\n else:\n with excutils.save_and_reraise_exception():\n LOG.exception(_(\"Volume : %s expand failed\") % volume_name)", "def extend_volume(self, volume, new_size):\n LOG.info('Extending volume: %(id)s New size: %(size)s GB',\n {'id': volume['id'], 'size': new_size})\n nfs_share = volume['provider_location']\n nms = self.share2nms[nfs_share]\n volume_path = self.remote_path(volume)\n if getattr(self.configuration,\n self.driver_prefix + '_sparsed_volumes'):\n self._create_sparsed_file(nms, volume_path, new_size)\n else:\n block_size_mb = 1\n block_count = ((new_size - volume['size']) * units.Gi /\n (block_size_mb * units.Mi))\n\n nms.appliance.execute(\n 'dd if=/dev/zero seek=%(seek)d of=%(path)s'\n ' bs=%(bs)dM count=%(count)d' % {\n 'seek': volume['size'] * units.Gi / block_size_mb,\n 'path': volume_path,\n 'bs': block_size_mb,\n 'count': block_count\n }\n )", "def perform_module_operation(self):\n size = self.module.params['size']\n state = self.module.params['state']\n new_name = self.module.params['new_name']\n vol_id = self.module.params['vol_id']\n vol_name = self.module.params['vol_name']\n sg_name = self.module.params['sg_name']\n cap_unit = self.module.params['cap_unit']\n new_sg_name = self.module.params['new_sg_name']\n\n if vol_name is not None and sg_name is None:\n self.show_error_exit(msg='Specify Storage group name along '\n 'with volume name')\n\n if size and cap_unit is None:\n cap_unit = 'GB'\n elif cap_unit and size is None:\n self.show_error_exit(msg='Parameters size and cap_unit are '\n 'required together')\n self.volume_id = vol_id\n\n vol = self.get_volume()\n\n existing_vol_size = 0\n if vol is not None:\n self.volume_id = vol['volumeId']\n vol_id = vol['volumeId']\n existing_vol_size = vol['cap_gb']\n\n changed = False\n\n # Call to create volume in storage group\n if state == 'present' and vol is None:\n if new_name:\n self.show_error_exit(msg=\"Invalid argument new_name \"\n \"while creating a volume\")\n if size is None:\n self.show_error_exit(msg='Size is required to create volume')\n vol_id = self.create_volume(vol_name, sg_name, size, cap_unit)\n changed = True\n\n if state == 'present' and vol and size:\n if size is None:\n self.show_error_exit(msg='Size is required to expand volume')\n # Convert the given size to GB\n if size is not None and size > 0:\n size = utils.get_size_in_gb(size, cap_unit)\n LOG.info('Existing Size: %s GB, Specified Size: %s GB',\n existing_vol_size, size)\n changed = self.expand_volume_helper(vol, size, existing_vol_size)\n\n if state == 'present' and vol and new_name is not None:\n if len(new_name.strip()) == 0:\n self.show_error_exit(msg=\"Please provide valid volume \"\n \"name.\")\n\n vol_name = vol['volume_identifier']\n if new_name != vol_name:\n LOG.info('Changing the name of volume %s to %s',\n vol_name, new_name)\n changed = self.rename_volume(vol_id, new_name) or changed\n\n if state == 'absent' and vol:\n LOG.info('Deleting volume %s ', vol_id)\n changed = self.delete_volume(vol_id) or changed\n\n if state == 'present' and vol and new_sg_name:\n vol_sg = vol['storageGroupId'][0]\n if vol_sg != new_sg_name:\n LOG.info('Moving volume from %s to %s', vol_sg, new_name)\n changed = self.move_volume_between_storage_groups(\n vol, sg_name, new_sg_name) or changed\n\n '''\n Finally update the module changed state and saving updated volume\n details\n '''\n self.u4v_conn.set_array_id(\n array_id=self.module.params['serial_no'])\n self.result[\"changed\"] = changed\n if state == 'present':\n self.result[\"volume_details\"] = self.get_volume()\n LOG.info(\"Closing unisphere connection %s\", self.u4v_conn)\n utils.close_connection(self.u4v_conn)\n LOG.info(\"Connection closed successfully\")\n self.module.exit_json(**self.result)", "def grow(self, size):\n # size of the instance\n if size is not None and (type(size) == int or size.isdigit()):\n size = { 'size': int(size) }\n else:\n # TODO : proper error\n raise Exception()\n\n if self.size > size['size']:\n # TODO : proper error\n raise Exception((\"This instance has a data storage volume of %d GB and cannot \" + \\\n \"be shrunk. (Tried to specify %d GB as new size.)\") % (self.size, size['size']))\n\n self.client.post(self.path+'/action', { 'resize': {'volume': size} })\n return True", "def extend_volume(self, volume, new_size):\n spdk_name = self._get_spdk_volume_name(volume.name)\n params = {'name': spdk_name, 'size': new_size * units.Gi}\n self._rpc_call('bdev_lvol_resize', params)", "def guest_grow_root_volume(self, userid, os_version):\n LOG.debug('Begin to punch grow partition commands to guest: %s',\n userid)\n linuxdist = self._dist_manager.get_linux_dist(os_version)()\n # get configuration commands\n config_cmds = linuxdist.get_extend_partition_cmds()\n # Creating tmp file with these cmds\n temp_folder = self._pathutils.get_guest_temp_path(userid)\n file_path = os.path.join(temp_folder, 'gpartvol.sh')\n LOG.debug('Creating file %s to contain root partition extension '\n 'commands' % file_path)\n with open(file_path, \"w\") as f:\n f.write(config_cmds)\n try:\n self._smtclient.punch_file(userid, file_path, \"X\")\n finally:\n LOG.debug('Removing the folder %s ', temp_folder)\n shutil.rmtree(temp_folder)", "def test_volume_extend(self, volume, volumes_steps):\n volumes_steps.extend_volume(volume.name)", "def resize_volume(self, delta_disk, vdisk_name):\n LOG.debug(\"Entering\")\n cmd = \"svctask expandvdisksize -size %s \" \\\n \"-unit b %s\" % (delta_disk, vdisk_name)\n\n output = self._svc_command(cmd)[0]\n LOG.debug(\"Exiting\")", "def volume_up(self) -> None:\n self.volume = min(self.volume + self.config.volume_step, 100)", "def GrowInstanceDisk(self, instance, disk, amount, wait_for_sync=None,\n reason=None):\n body = {\n \"amount\": amount,\n }\n\n _SetItemIf(body, wait_for_sync is not None, \"wait_for_sync\", wait_for_sync)\n\n query = []\n _AppendReason(query, reason)\n\n return self._SendRequest(HTTP_POST,\n (\"/%s/instances/%s/disk/%s/grow\" %\n (GANETI_RAPI_VERSION, instance, disk)),\n query, body)", "async def expand(self, job, id, options):\n pool = await self.middleware.call('pool.get_instance', id)\n if osc.IS_LINUX:\n if options.get('passphrase'):\n raise CallError('Passphrase should not be supplied for this platform.')\n # FIXME: We have issues in ZoL where when pool is created with partition uuids, we are unable\n # to expand pool where all pool related options error out saying I/O error\n # https://github.com/zfsonlinux/zfs/issues/9830\n raise CallError('Expand is not supported on this platform yet because of underlying ZFS issues.')\n else:\n if pool['encrypt']:\n if not pool['is_decrypted']:\n raise CallError('You can only expand decrypted pool')\n\n for error in (\n await self.middleware.call('pool.pool_lock_pre_check', pool, options['geli']['passphrase'])\n ).errors:\n raise CallError(error.errmsg)\n\n all_partitions = {p['name']: p for p in await self.middleware.call('disk.list_all_partitions')}\n\n try:\n if osc.IS_FREEBSD:\n sysctl.filter('kern.geom.debugflags')[0].value = 16\n geli_resize = []\n try:\n for vdev in sum(pool['topology'].values(), []):\n if vdev['type'] != 'DISK':\n logger.debug('Not expanding vdev of type %r', vdev['type'])\n continue\n\n if vdev['status'] != 'ONLINE':\n logger.debug('Not expanding vdev that is %r', vdev['status'])\n continue\n\n part_data = all_partitions.get(vdev['device'])\n if not part_data:\n logger.debug('Unable to find partition data for %s', vdev['device'])\n\n partition_number = part_data['partition_number']\n if not partition_number:\n logger.debug('Could not parse partition number from %r', vdev['device'])\n continue\n\n assert part_data['disk'] == vdev['disk']\n\n if osc.IS_LINUX:\n await run(\n 'sgdisk', '-d', str(partition_number), '-n', f'{partition_number}:0:0',\n '-c', '2:', '-u', f'{partition_number}:{part_data[\"partition_uuid\"]}',\n '-t', f'{partition_number}:BF01', part_data['path']\n )\n await run('partprobe', os.path.join('/dev', part_data['disk']))\n else:\n await run('camcontrol', 'reprobe', vdev['disk'])\n await run('gpart', 'recover', vdev['disk'])\n await run('gpart', 'resize', '-i', str(partition_number), vdev['disk'])\n\n if osc.IS_FREEBSD and pool['encrypt']:\n geli_resize_cmd = (\n 'geli', 'resize', '-s', str(part_data['size']), vdev['device']\n )\n rollback_cmd = (\n 'gpart', 'resize', '-i', str(partition_number), '-s', str(part_data['size']), vdev['disk']\n )\n\n logger.warning('It will be obligatory to notify GELI that the provider has been resized: %r',\n join_commandline(geli_resize_cmd))\n logger.warning('Or to resize provider back: %r',\n join_commandline(rollback_cmd))\n geli_resize.append((geli_resize_cmd, rollback_cmd))\n finally:\n if osc.IS_FREEBSD and geli_resize:\n await self.__geli_resize(pool, geli_resize, options)\n finally:\n if osc.IS_FREEBSD:\n sysctl.filter('kern.geom.debugflags')[0].value = 0\n\n for vdev in sum(pool['topology'].values(), []):\n if vdev['type'] != 'DISK' or vdev['status'] != 'ONLINE':\n continue\n\n await self.middleware.call('zfs.pool.online', pool['name'], vdev['guid'], True)", "def add_volume(self, size=100):\n tfvars_file = \"terraform.tfvars.json\"\n with open(os.path.join(self.cluster_path, tfvars_file)) as f:\n tfvars = json.load(f)\n\n cluster_id = tfvars['cluster_id']\n worker_pattern = f'{cluster_id}-worker*'\n logger.info(f'Worker pattern: {worker_pattern}')\n self.create_ebs_volumes(worker_pattern, size)", "def create_transfer_tasks(\n task_queue, src_layer_path, dest_layer_path, \n chunk_size=None, shape=Vec(2048, 2048, 64), \n fill_missing=False, translate=(0,0,0), \n bounds=None, mip=0, preserve_chunk_size=True\n ):\n shape = Vec(*shape)\n vol = CloudVolume(src_layer_path, mip=mip)\n translate = Vec(*translate) // vol.downsample_ratio\n \n if not chunk_size:\n chunk_size = vol.info['scales'][mip]['chunk_sizes'][0]\n chunk_size = Vec(*chunk_size)\n\n try:\n dvol = CloudVolume(dest_layer_path, mip=mip)\n except Exception: # no info file\n info = copy.deepcopy(vol.info)\n dvol = CloudVolume(dest_layer_path, info=info)\n dvol.commit_info()\n\n dvol.info['scales'] = dvol.info['scales'][:mip+1]\n dvol.info['scales'][mip]['chunk_sizes'] = [ chunk_size.tolist() ]\n dvol.commit_info()\n\n create_downsample_scales(dest_layer_path, \n mip=mip, ds_shape=shape, preserve_chunk_size=preserve_chunk_size)\n \n if bounds is None:\n bounds = vol.bounds.clone()\n else:\n bounds = vol.bbox_to_mip(bounds, mip=0, to_mip=mip)\n bounds = Bbox.clamp(bounds, vol.bounds)\n\n total = int(reduce(operator.mul, np.ceil(bounds.size3() / shape)))\n for startpt in tqdm(xyzrange( bounds.minpt, bounds.maxpt, shape ), desc=\"Inserting Transfer Tasks\", total=total):\n task = TransferTask(\n src_path=src_layer_path,\n dest_path=dest_layer_path,\n shape=shape.clone(),\n offset=startpt.clone(),\n fill_missing=fill_missing,\n translate=translate,\n mip=mip,\n )\n task_queue.insert(task)\n task_queue.wait('Uploading Transfer Tasks')\n\n job_details = {\n 'method': {\n 'task': 'TransferTask',\n 'src': src_layer_path,\n 'dest': dest_layer_path,\n 'shape': list(map(int, shape)),\n 'fill_missing': fill_missing,\n 'translate': list(map(int, translate)),\n 'bounds': [\n bounds.minpt.tolist(),\n bounds.maxpt.tolist()\n ],\n 'mip': mip,\n },\n 'by': OPERATOR_CONTACT,\n 'date': strftime('%Y-%m-%d %H:%M %Z'),\n }\n\n dvol = CloudVolume(dest_layer_path)\n dvol.provenance.sources = [ src_layer_path ]\n dvol.provenance.processing.append(job_details) \n dvol.commit_provenance()\n\n if vol.path.protocol != 'boss':\n vol.provenance.processing.append(job_details)\n vol.commit_provenance()", "def volume_up(self) -> None:\n newvolume = min(self._client.volume + 4, 100)\n self._client.set_volume(newvolume)", "def expand_volume_helper(self, vol, size_in_gb, existing_vol_size):\n vol_id = vol['volumeId']\n try:\n if size_in_gb < existing_vol_size:\n self.show_error_exit(msg='Current volume size {0} GB is '\n 'greater than {1} GB specified.'.\n format(existing_vol_size, size_in_gb))\n elif size_in_gb > existing_vol_size:\n if 'rdfGroupId' in vol:\n array_id = self.module.params['serial_no']\n array_details = self.common.get_array(array_id=array_id)\n if utils.parse_version(array_details['ucode'])\\\n < utils.parse_version(self.foxtail_version):\n msg = (\"Expansion of SRDF protected volume is\"\n \" supported from v5978.444.444 onward. Please\"\n \" upgrade the array for this support.\")\n self.show_error_exit(msg=msg)\n return self.srdf_volume_expansion(vol, size_in_gb,\n existing_vol_size)\n return self.expand_volume(vol_id, size_in_gb,\n existing_vol_size)\n\n LOG.info('Current volume size and specified volume size'\n ' are equal')\n return False\n except Exception as e:\n error_message = 'Expand volume %s failed with error: %s' \\\n % (vol_id, str(e))\n self.show_error_exit(msg=error_message)", "def asm_volume_puse(self, name):\n sql = '''select round(((TOTAL_MB-FREE_MB)/TOTAL_MB*100),2) \n from v$asm_diskgroup_stat where name = '{0}' '''.format(name)\n self.cur.execute(sql)\n res = self.cur.fetchall()\n for i in res:\n print(i[0])", "def resize_volume(self, size):\n curr_size = self.volume.size\n if size <= curr_size:\n raise exc.InvalidVolumeResize(\"The new volume size must be larger \"\n \"than the current volume size of '%s'.\" % curr_size)\n body = {\"volume\": {\"size\": size}}\n self.manager.action(self, \"resize\", body=body)", "def volumes(self):", "def volume_increase():\n request_command(tv_command=TVCommand.volume_increase)", "def ensure_space(self,\n context: context.RequestContext,\n volume: objects.Volume) -> bool:\n\n # Check to see if the cache is actually limited.\n if self.max_cache_size_gb == 0 and self.max_cache_size_count == 0:\n return True\n\n # Make sure that we can potentially fit the image in the cache\n # and bail out before evicting everything else to try and make\n # room for it.\n if (self.max_cache_size_gb != 0 and\n volume.size > self.max_cache_size_gb):\n return False\n\n # Assume the entries are ordered by most recently used to least used.\n entries = self.db.image_volume_cache_get_all(\n context,\n **self._get_query_filters(volume))\n\n current_count = len(entries)\n\n current_size = 0\n for entry in entries:\n current_size += entry['size']\n\n # Add values for the entry we intend to create.\n current_size += volume.size\n current_count += 1\n\n LOG.debug('Image-volume cache for %(service)s current_size (GB) = '\n '%(size_gb)s (max = %(max_gb)s), current count = %(count)s '\n '(max = %(max_count)s).',\n {'service': volume.service_topic_queue,\n 'size_gb': current_size,\n 'max_gb': self.max_cache_size_gb,\n 'count': current_count,\n 'max_count': self.max_cache_size_count})\n\n while (((current_size > self.max_cache_size_gb and\n self.max_cache_size_gb > 0)\n or (current_count > self.max_cache_size_count and\n self.max_cache_size_count > 0))\n and len(entries)):\n entry = entries.pop()\n LOG.debug('Reclaiming image-volume cache space; removing cache '\n 'entry %(entry)s.', {'entry': self._entry_to_str(entry)})\n self._delete_image_volume(context, entry)\n current_size -= entry['size']\n current_count -= 1\n LOG.debug('Image-volume cache for %(service)s new size (GB) = '\n '%(size_gb)s, new count = %(count)s.',\n {'service': volume.service_topic_queue,\n 'size_gb': current_size,\n 'count': current_count})\n\n # It is only possible to not free up enough gb, we will always be able\n # to free enough count. This is because 0 means unlimited which means\n # it is guaranteed to be >0 if limited, and we can always delete down\n # to 0.\n if self.max_cache_size_gb > 0:\n if current_size > self.max_cache_size_gb > 0:\n LOG.warning('Image-volume cache for %(service)s does '\n 'not have enough space (GB).',\n {'service': volume.service_topic_queue})\n return False\n\n return True", "def resize_volume(self, volumeObj, sizeInGb, bsize=1000):\n current_vol = self.get_volume_by_id(volumeObj.id)\n if current_vol.size_kb > (sizeInGb * bsize * bsize):\n raise RuntimeError(\n \"resize_volume() - New size needs to be bigger than: %d KBs\" % current_vol.size_kb)\n \n resizeDict = { 'sizeInGB' : str(sizeInGb) }\n response = self.conn.connection._do_post(\"{}/{}{}/{}\".format(\n self.conn.connection._api_url, \"instances/Volume::\", volumeObj.id, 'action/setVolumeSize'), json=resizeDict)\n return response", "def __mount_ebs_volume( self ):\n ebs_volume_size = self.instance_tag( 'ebs_volume_size' ) or '0'\n ebs_volume_size = int( ebs_volume_size )\n if ebs_volume_size:\n instance_name = self.instance_tag( 'Name' )\n cluster_ordinal = int( self.instance_tag( 'cluster_ordinal' ) )\n volume_name = '%s__%d' % (instance_name, cluster_ordinal)\n volume = EC2VolumeHelper( ec2=self.ec2,\n availability_zone=self.availability_zone,\n name=volume_name,\n size=ebs_volume_size,\n volume_type=\"gp2\" )\n # TODO: handle case where volume is already attached\n device_ext = '/dev/sdf'\n device = '/dev/xvdf'\n volume.attach( self.instance_id, device_ext )\n\n # Wait for inode to appear and make sure its a block device\n while True:\n try:\n assert stat.S_ISBLK( os.stat( device ).st_mode )\n break\n except OSError as e:\n if e.errno == errno.ENOENT:\n time.sleep( 1 )\n else:\n raise\n\n # Only format empty volumes\n volume_label = volume_label_hash( volume_name )\n if check_output( [ 'file', '-sL', device ] ).strip( ) == device + ': data':\n check_call( [ 'mkfs', '-t', 'ext4', device ] )\n check_call( [ 'e2label', device, volume_label ] )\n else:\n # If the volume is not empty, verify the file system label\n actual_label = check_output( [ 'e2label', device ] ).strip( )\n if actual_label != volume_label:\n raise AssertionError(\n \"Expected volume label '%s' (derived from '%s') but got '%s'\" %\n (volume_label, volume_name, actual_label) )\n current_mount_point = self.__mount_point( device )\n if current_mount_point is None:\n mkdir_p( self.persistent_dir )\n check_call( [ 'mount', device, self.persistent_dir ] )\n elif current_mount_point == self.persistent_dir:\n pass\n else:\n raise RuntimeError(\n \"Can't mount device %s on '%s' since it is already mounted on '%s'\" % (\n device, self.persistent_dir, current_mount_point) )\n else:\n # No persistent volume is attached and the root volume is off limits, so we will need\n # to place persistent data on the ephemeral volume.\n self.persistent_dir = self.ephemeral_dir", "def volume_up(self):\n self._remote.volume(int(self._volume * 60) + 2)", "def get_capacity():\n fs.get_capacity()", "def add(self, task, qhigh, qlow):\n try:\n qlen = self.tasks.qsize()\n if qlen > qhigh:\n print \"Throttling input, reached HWM:\", qhigh\n while qlen > qlow:\n delay = random.randint(1,10)\n time.sleep(delay)\n qlen = self.tasks.qsize()\n print \"Throttling released, down to LWM:\", qlow\n except NotImplementedError:\n # Skip on Mac OS X (WARNING - use on OS X in testing only, queue \n # size will max out at a paltry 32768 items)\n pass\n try:\n self.tasks.put(task)\n self.recordsProcessed += task.datalen\n except qFull:\n # While testing: we shouldn't hopefully end up here...\n print \"ERR: queue full\"\n sys.exit(-1)", "def manage_existing_get_size(self, volume, existing_ref):\n existing_vol_name = self._get_existing_vol_name(existing_ref)\n\n # The ZFSSA NFS driver only has one mounted share.\n local_share_mount = self._get_mount_point_for_share(\n self._mounted_shares[0])\n local_vol_path = os.path.join(local_share_mount, existing_vol_name)\n\n try:\n if os.path.isfile(local_vol_path):\n size = int(math.ceil(float(\n utils.get_file_size(local_vol_path)) / units.Gi))\n except (OSError, ValueError):\n err_msg = (_(\"Failed to get size of existing volume: %(vol). \"\n \"Volume Manage failed.\"), {'vol': existing_vol_name})\n LOG.error(err_msg)\n raise exception.VolumeBackendAPIException(data=err_msg)\n\n LOG.debug(\"Size volume: %(vol)s to be migrated is: %(size)s.\",\n {'vol': existing_vol_name, 'size': size})\n\n return size", "def disk_set(vm_hostname, size):\n with ExitStack() as es:\n vm = es.enter_context(_get_vm(vm_hostname))\n\n current_size_gib = vm.dataset_obj['disk_size_gib']\n if size.startswith('+'):\n new_size_gib = current_size_gib + parse_size(size[1:], 'g')\n elif size.startswith('-'):\n new_size_gib = current_size_gib - parse_size(size[1:], 'g')\n else:\n new_size_gib = parse_size(size, 'g')\n\n if new_size_gib == vm.dataset_obj['disk_size_gib']:\n raise Warning('Disk size is the same.')\n\n if vm.dataset_obj['datacenter_type'] == 'aws.dct':\n vm.aws_disk_set(new_size_gib)\n elif vm.dataset_obj['datacenter_type'] == 'kvm.dct':\n _check_defined(vm)\n\n vm.hypervisor.vm_set_disk_size_gib(vm, new_size_gib)\n\n else:\n raise NotImplementedError(\n 'This operation is not yet supported for {}'.format(\n vm.dataset_obj['datacenter_type'])\n )\n\n vm.dataset_obj['disk_size_gib'] = new_size_gib\n vm.dataset_obj.commit()", "def volume_up(self) -> None:\n if self.volume_level is None:\n return\n volume = round(self.volume_level * MAX_VOLUME)\n self._monoprice.set_volume(self._zone_id, min(volume + 1, MAX_VOLUME))" ]
[ "0.64236933", "0.6261913", "0.60468847", "0.587187", "0.5659941", "0.5560449", "0.53718984", "0.534629", "0.5339353", "0.53099704", "0.52602965", "0.5249011", "0.518899", "0.5125479", "0.50864977", "0.5082796", "0.5080378", "0.50407803", "0.50402933", "0.5031618", "0.5022762", "0.5007264", "0.49975425", "0.49558678", "0.49314073", "0.490756", "0.4846032", "0.48446414", "0.4841026", "0.4829933" ]
0.69317895
0
This class tests the PyTorchYolo object detector.
def get_pytorch_yolo(get_default_cifar10_subset): import cv2 import torch from pytorchyolo import models from pytorchyolo.utils.loss import compute_loss from art.estimators.object_detection.pytorch_yolo import PyTorchYolo model_path = "/tmp/PyTorch-YOLOv3/config/yolov3.cfg" weights_path = "/tmp/PyTorch-YOLOv3/weights/yolov3.weights" model = models.load_model(model_path=model_path, weights_path=weights_path) class YoloV3(torch.nn.Module): def __init__(self, model): super().__init__() self.model = model def forward(self, x, targets=None): if self.training: outputs = self.model(x) # loss is averaged over a batch. Thus, for patch generation use batch_size = 1 loss, loss_components = compute_loss(outputs, targets, self.model) loss_components_dict = {"loss_total": loss} return loss_components_dict else: return self.model(x) model = YoloV3(model) object_detector = PyTorchYolo( model=model, input_shape=(3, 416, 416), clip_values=(0, 1), attack_losses=("loss_total",) ) n_test = 10 (_, _), (x_test_cifar10, y_test_cifar10) = get_default_cifar10_subset x_test_cifar10 = x_test_cifar10[0:n_test] x_test = cv2.resize( x_test_cifar10[0].transpose((1, 2, 0)), dsize=(416, 416), interpolation=cv2.INTER_CUBIC ).transpose((2, 0, 1)) x_test = np.expand_dims(x_test, axis=0) x_test = np.repeat(x_test, repeats=2, axis=0) # Create labels result = object_detector.predict(x=x_test) y_test = [ { "boxes": result[0]["boxes"], "labels": result[0]["labels"], "scores": np.ones_like(result[0]["labels"]), }, { "boxes": result[1]["boxes"], "labels": result[1]["labels"], "scores": np.ones_like(result[1]["labels"]), }, ] yield object_detector, x_test, y_test
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def detection(input_path, output_path, yolo_model_path):\n detector = VideoObjectDetection()\n # this function sets the model type of the object detection instance you created to the YOLOv3 model\n detector.setModelTypeAsYOLOv3()\n # this function accepts a string that must be the path to the model file,\n # it must correspond to the model typeset for the object detection instance\n detector.setModelPath(yolo_model_path)\n # this function loads the model from the path given\n detector.loadModel()\n\n # the function performs object detection on a video file or video live-feed\n # after the model has been loaded into the instance that was created\n detector.detectCustomObjectsFromVideo(input_file_path=input_path, output_file_path=output_path,\n frames_per_second=20, log_progress=True)", "def yolo_test_file(self):\n # Detect objects\n annotatedImage, predictedObjects = self.detect_from_file(\n self.inputFile)\n # Show image\n if self.showImage:\n cv2.imshow('YOLO Detection', annotatedImage)\n cv2.waitKey(10)\n # Save annotated image\n if self.saveAnnotatedImage:\n cv2.imwrite(self.outputFile, annotatedImage)\n # Save the parameters of detected objects in xml format\n if self.saveAnnotatedXML:\n xmlFileName = os.path.join(\n self.textOutputFolder,\n self.outputFile.split('.')[0] + '.xml')\n self.save_xml(xmlFileName, predictedObjects)", "def yolo_detection(raw_image):\n class_ids = []\n confidences = []\n boxes = []\n height , width ,c= raw_image.shape\n blob = cv2.dnn.blobFromImage(raw_image, 0.00392, (416,416), (0,0,0), True, crop=False)\n net.setInput(blob)\n outs = net.forward(output_layers)\n\n for out in outs:\n for detection in out:\n scores = detection[5:]\n class_id = np.argmax(scores)\n confidence = scores[class_id]\n if confidence > 0.4:\n center_x = int(detection[0]*width)\n center_y = int(detection[1]*height)\n w = int(detection[2]*width)\n h = int(detection[3]*height)\n ##Rectangle Draw\n topleft_x = int(center_x-(w/2))\n topleft_y = int(center_y-(h/2))\n\n boxes.append([topleft_x,topleft_y,w,h])\n confidences.append(float(confidence))\n class_ids.append(class_id)\n indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)\n #DISPLAY DETECTION\n total_detections = len(boxes)\n for i in range(total_detections):\n if i in indexes:\n topleft_x, topleft_y, w,h = boxes[i]\n label = detection_classes[class_ids[i]]\n cv2.rectangle(raw_image, (topleft_x,topleft_y), (topleft_x+w,topleft_y+h), (0,100,255), 1)\n cv2.putText(raw_image, label, (topleft_x, topleft_y),cv2.FONT_HERSHEY_COMPLEX,1,(0,165,255))\n\n\n return raw_image", "def yolo_test_video(self):\n # Open the input video, blocking call\n inputVideo = cv2.VideoCapture(self.inputFile)\n\t\t\n # Get infomration about the input video\n codec = int(inputVideo.get(cv2.CAP_PROP_FOURCC))\n fps = int(inputVideo.get(cv2.CAP_PROP_FPS))\n frameWidth = int(inputVideo.get(cv2.CAP_PROP_FRAME_WIDTH))\n frameHeight = int(inputVideo.get(cv2.CAP_PROP_FRAME_HEIGHT))\n\n # Open the output stream\n outputVideo = cv2.VideoWriter(self.outputFile,\n codec,\n fps,\n (frameWidth,frameHeight))\n frameIndex = inputVideo.get(cv2.CAP_PROP_POS_FRAMES)\n totalFrames = inputVideo.get(cv2.CAP_PROP_FRAME_COUNT)\n \t \n\tavgGrabTime = 0\n\tavgYoloTime = 0\n\tavgWriteTime = 0\n \n # For each frame in the video\n while True:\n \n startTime = time.time()\n \n # Calculate the time it takes to grab a frame\n startGrabTime = time.time()\n grabbed, frame = inputVideo.read()\n endGrabTime = time.time() \n\t avgGrabTime+=(endGrabTime-startGrabTime)\n\t \n\n if grabbed:\n\t\t\n # Calculate the time it takes to run YOLO pipeline \n\t\tstartYoloTime = time.time()\n annotatedFrame, predictedObjects = self.detect_from_image(frame)\n\t\tendYoloTime = time.time()\n\t\tavgYoloTime+= ( endYoloTime - startYoloTime)\n\n frameIndex = inputVideo.get(cv2.CAP_PROP_POS_FRAMES)\n \t\n\t\tcurrentTime = time.time()\n\t\telapsedTime = currentTime - startTime\n\t\tcurrentFPS = (1)/elapsedTime \n\t\t \t\n #cv2.rectangle(annotatedFrame, (0, 0), (30, 30), (0,0,0), -1)\n cv2.putText(\n annotatedFrame, 'FPS' + ': %.2f' % currentFPS,\n (15, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5,\n (255, 255, 255), 2\n )\n\t\t\n # Calculate the time it takes to write an annotated frame to video\n\t\tstartWriteTime = time.time()\n outputVideo.write(annotatedFrame)\n\t\tendWriteTime = time.time()\n\t\tavgWriteTime +=(endWriteTime - startWriteTime)\n\t\n else:\n inputVideo.set(cv2.CAP_PROP_POS_FRAMES, frameIndex-1)\n cv2.waitKey(100)\n\n if frameIndex==totalFrames:\n break\n\t\t\n inputVideo.release()\n outputVideo.release()\n cv2.destroyAllWindows()\n \n avgGrabTime/=totalFrames\n avgYoloTime/=totalFrames\n avgWriteTime/=totalFrames\n\n if self.verbose:\n print ('Average time for extracting compressed video frame : %.3f' %avgGrabTime)\n print ('Average time for YOLO object detection : %.3f' %avgYoloTime )\n print ('Average time for writing frame to video : %.3f' %avgWriteTime)", "def yolo_object_detection(image_filename, net, confidence, threshold, labels, colors):\n # read image file\n # image is an array of image data (row, column, channel)\n image = cv2.imread(image_filename)\n (H, W) = image.shape[:2]\n\n # preprocess image data with rescaling and resizing to fit YOLO input shape\n # OpenCV assumes BGR images: we have to convert to RGB, with swapRB=True\n blob = cv2.dnn.blobFromImage(image, 1 / 255.0, (416, 416), swapRB=True, crop=False)\n\n # set a new input to the network\n net.setInput(blob)\n\n # get YOLOv3's output layer names\n ln = net.getLayerNames()\n ln_out = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]\n\n # perform object detection\n layerOutputs = net.forward(ln_out)\n\n\n # Get the result from outputs, and filter them by confidence\n boxes = []\n scores = []\n classes = []\n for output in layerOutputs: # There are three output layers in YOLO v3\n # Filter outputs by confidence\n (xywh_filterd, score_filtered, class_filtered) = filter_outputs(output, confidence)\n\n boxes.append(xywh_filterd)\n scores.append(score_filtered)\n classes.append(class_filtered)\n\n # Change shapes of arrays so that all boxes from any output layers are stored together\n boxes = np.vstack([r for r in boxes])\n scores = np.concatenate([r for r in scores], axis=None)\n classes = np.concatenate([r for r in classes], axis=None)\n\n # Apply Non-max supression\n boxes_coord = rescale_box_coord(boxes, W, H)\n nms_idx = yolo_non_max_supression(boxes_coord, scores, confidence, threshold)\n \n # filter the good ones\n return image, [{'box':boxes[_], 'score':scores[_], 'class':classes[_]} for _ in nms_idx]", "def run_yolo_onpic(image_path):\n try:\n Image.open(image_path)\n # print('running detector on %s' % image_path)\n except:\n print('Cannot open image', image_path)\n return 0, 0, 0\n output_file = \"predictions_\" + os.path.basename(image_path)\n test_detector(b'cfg/coco.data', b'cfg/yolo.cfg', b'yolo.weights',\n image_path.encode('utf-8'), parameters.YOLO_THRES, 0.5, output_file.encode('utf-8'))\n w, h, o = read_bounding_boxes('bounding_boxes.txt')\n return w, h, o", "def run_detect(**kwargs):\n cmd = 'python yolov3/detect.py'\n pms_list = [\n 'image_folder', 'model_def', \n 'weights_path', 'class_path', \n 'conf_thres', 'nms_thres',\n 'batch_size', 'n_cpu', \n 'img_size', 'checkpoint_model'\n ]\n call_command(pms_list, cmd, kwargs)", "def object_detection(self):\r\n pass", "def yolo_test_db(self):\n # For each file in database\n for inputFileName in tqdm.tqdm(os.listdir(self.inputFolder)):\n # File path\n inputFile = os.path.join(self.inputFolder, inputFileName)\n # Detect object\n annotatedImage, predictedObjects = self.detect_from_file(\n inputFile)\n # Show image\n if self.showImage:\n cv2.imshow('YOLO Detection', annotatedImage)\n cv2.waitKey(1)\n # Save annotated image\n if self.saveAnnotatedImage:\n outputFileName = os.path.join(self.outputFolder, inputFileName)\n cv2.imwrite(outputFileName, annotatedImage)\n # Save the parameters of detected objects in xml format\n if self.saveAnnotatedXML:\n xmlFileName = os.path.join(\n\n self.textOutputFolder, fileName.split('.')[0] + '.xml')\n self.save_xml(xmlFileName, predictedObjects)", "def testVideoOnObjectDetection(testVideo1, testVideo2, label):\n \n this_dir = os.path.abspath(os.path.join(os.getcwd(), '../objectDetection/testing/'))\n \n print('****************************************************************************************************')\n print('getenv: ', os.getcwd())\n print(\"this_dir: \", this_dir)\n print('labelmap: ', os.path.abspath(os.path.join(this_dir, \"..\", \"training/labelmap.pbtxt\")))\n print('****************************************************************************************************')\n \n GRAPH_PATH = os.path.abspath(os.path.join(this_dir, \"..\", \"inference_graph/frozen_inference_graph.pb\"))\n LABEL_PATH = os.path.abspath(os.path.join(this_dir, \"..\", \"training/labelmap.pbtxt\"))\n \n video1 = cv2.VideoCapture(testVideo1)\n video2 = cv2.VideoCapture(testVideo2)\n \n coors = objectDetection.coordinates.coordinates()\n obj_detect = hand_detection.Object_Detection(coors, GRAPH_PATH, LABEL_PATH, video1, video2, Verbose=True)\n \n results = []\n \n while(video1.isOpened() and video2.isOpened()):\n output = obj_detect.Detect()\n if output is None: break\n else: results.append(output)\n \n cv2.destroyAllWindows()\n \n print(results)\n print([result for result in results])\n correct = CheckWrong([result[\"video1\"][\"classes\"] for result in results], label)\n \n assert correct == True\n \n return", "def enable_detector_yolo():\n global enabled_detector, enable_detection, detector, use_cuda\n if not enable_detection:\n enable_detection = True\n\n thresh = request.form[\"thresh\"]\n confidence = request.form['confidence']\n distance_check = request.form['tracker_dst']\n\n if thresh == '':\n thresh = float(0.25)\n\n if confidence == '':\n confidence = float(0.25)\n\n if distance_check == '':\n distance_check = float(350)\n\n print('Using thresh and conf {} {}'.format(thresh, confidence))\n detector = Yolo(confidence_param=confidence,\n thresh_param=thresh, use_cuda=use_cuda, distance_check=distance_check)\n if detector is not None:\n enabled_detector = \"Yolo4 tiny detector\"\n return render_settings_view()", "def test_predictor():", "def run_test(**kwargs):\n cmd = 'python yolov3/test.py'\n pms_list = [\n 'batch_size', 'model_def',\n 'data_config', 'weights_path',\n 'class_path', 'iou_thres',\n 'nms_thres', 'conf_thres',\n 'n_cpu', 'img_size'\n ]\n call_command(pms_list, cmd, kwargs)", "def main(\n image = None ,\n gpu = -1,\n weights_path= f\"{ Path(__file__).parent }/weights/yolov3.weights\",\n background = False\n):\n print( weights_path )\n my_path = Path( __file__ ).parent\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--gpu', type=int, default= gpu )\n parser.add_argument('--cfg', type=str, default=my_path/'config/yolov3_default.cfg')\n parser.add_argument('--ckpt', type=str,\n help='path to the checkpoint file')\n parser.add_argument('--weights_path', type=str,\n default= weights_path, help='path to weights file')\n parser.add_argument('--image', type=str , default= image )\n parser.add_argument('--background', type=bool,\n default= background , help='background(no-display mode. save \"./output.png\")')\n parser.add_argument('--detect_thresh', type=float,\n default= 0.5 , help='confidence threshold')\n args = parser.parse_args()\n\n with open(args.cfg, 'r') as f:\n cfg = yaml.load(f)\n\n imgsize = cfg['TEST']['IMGSIZE']\n model = YOLOv3(cfg['MODEL'])\n\n confthre = cfg['TEST']['CONFTHRE'] \n nmsthre = cfg['TEST']['NMSTHRE']\n\n if args.detect_thresh:\n confthre = args.detect_thresh\n\n\n\n img = imread( args.image )\n if img is None :\n print( \"load image failed\" )\n print( args.image )\n return\n\n img_raw = img.copy()[:, :, ::-1].transpose((2, 0, 1))\n img, info_img = preprocess(img, imgsize, jitter=0) # info = (h, w, nh, nw, dx, dy)\n img = np.transpose(img / 255., (2, 0, 1))\n img = torch.from_numpy(img).float().unsqueeze(0)\n\n if args.gpu >= 0:\n model.cuda(args.gpu)\n img = Variable(img.type(torch.cuda.FloatTensor))\n else:\n img = Variable(img.type(torch.FloatTensor))\n\n assert args.weights_path or args.ckpt, 'One of --weights_path and --ckpt must be specified'\n\n if args.weights_path:\n print(\"loading yolo weights %s\" % (args.weights_path))\n parse_yolo_weights(model, args.weights_path)\n elif args.ckpt:\n print(\"loading checkpoint %s\" % (args.ckpt))\n state = torch.load(args.ckpt)\n if 'model_state_dict' in state.keys():\n model.load_state_dict(state['model_state_dict'])\n else:\n model.load_state_dict(state)\n\n model.eval()\n\n\n with torch.no_grad():\n outputs1 = model(img)\n # np.save(\"output.npy\" , outputs.numpy() )\n # torch.save( outputs1 , \"outputs1.pt\" )\n out1 = torch.load( \"outputs1.pt\" )\n rere = torch.equal( outputs1 , out1 )\n outputs = postprocess(outputs1, 80, confthre, nmsthre)\n\n a = \"hoho\"\n\n\n if outputs[0] is None:\n print(\"No Objects Deteted!!\")\n return\n\n coco_class_names, coco_class_ids, coco_class_colors = get_coco_label_names()\n\n bboxes = list()\n classes = list()\n colors = list()\n\n for x1, y1, x2, y2, conf, cls_conf, cls_pred in outputs[0]:\n\n cls_id = coco_class_ids[int(cls_pred)]\n print(int(x1), int(y1), int(x2), int(y2), float(conf), int(cls_pred))\n print('\\t+ Label: %s, Conf: %.5f' %\n (coco_class_names[cls_id], cls_conf.item()))\n box = yolobox2label([y1, x1, y2, x2], info_img)\n bboxes.append(box)\n classes.append(cls_id)\n colors.append(coco_class_colors[int(cls_pred)])\n\n # args.background = True\n\n if args.background:\n import matplotlib\n matplotlib.use('Agg')\n\n from utils.vis_bbox import vis_bbox\n\n vis_bbox(\n img_raw, bboxes, label=classes, label_names=coco_class_names,\n instance_colors=colors, linewidth=2)\n\n\n if args.background:\n output = Path( \"./output\" )\n output.mkdir( parents=True , exist_ok=True )\n now = datetime.now().strftime(\"%Y-%m-%d %H-%M-%S\")\n output /= f\"output-{now}.png\"\n plt.savefig( output )\n\n return str( output.absolute() )\n # return plt_to_qpixmap(plt.gca())\n else :\n plt.show()", "def test():\n\n # load image and adjust its format\n if MEMORY_CACHE:\n test_input = dataset[0]['file']\n oriImg = test_input.byte().permute((1, 2, 0)).numpy() # B,G,R order\n else:\n oriImg = cv2.imread(dataset[0]['file']) # B,G,R order\n test_input = torch.from_numpy(oriImg).permute((2, 0, 1)).float()\n \n # transfer data on GPU on demand\n if CUDA:\n test_input = test_input.cuda()\n\n # perform prediction\n net.eval()\n with torch.no_grad():\n result = net(test_input.unsqueeze(0))[0]\n\n print(result)\n\n # draw rectangles and its class\n img = cv2.cvtColor(oriImg, cv2.COLOR_BGR2RGB)\n for box, label, score in zip(result['boxes'], result['labels'], result['scores']):\n # if score > 0.5:\n if label < len(orig_labels):\n img = cv2.rectangle(img, (box[0], box[1]), (box[2], box[3]), (0, 0, 255), 3)\n img = cv2.putText(img, '{}: {:.0%}'.format(orig_labels[label], score), (box[0] + 5, box[3] - 5), cv2.FONT_HERSHEY_SIMPLEX, .7, (0, 255, 0), 2, cv2.LINE_AA)\n plt.imshow(img)\n plt.axis('off')\n plt.show()", "def __init__(self):\n # TODO\n self.confThreshold = 0.6\n self.nmsThreshold = 0.5\n self.inpWidth = 320\n self.inpHeight = 320\n classesFile = \"/content/drive/My Drive/tracking_course/Detection/yolo_workshop/coco.names\"\n self.classes = None\n with open(classesFile,'rt') as f:\n self.classes = f.read().rstrip('\\n').split('\\n')\n\n modelConfiguration = \"/content/drive/My Drive/tracking_course/Detection/yolo_workshop/yolov3.cfg\"\n modelWeights = \"/content/drive/My Drive/tracking_course/Detection/yolo_workshop/yolov3.weights\"\n self.net = cv2.dnn.readNetFromDarknet(modelConfiguration, modelWeights)\n self.net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)\n self.net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)", "def setup_class(cls):\n super().setup_class()\n cls.detector = cls.faceEngine.createFaceDetector(DetectorType.FACE_DET_V3)\n cls.headPoseEstimator = cls.faceEngine.createHeadPoseEstimator()\n cls.livenessEstimator = cls.faceEngine.createLivenessV1Estimator()\n cls.detection = cls.detector.detectOne(VLImage.load(filename=CLEAN_ONE_FACE))", "def predict_from_cv2(yolo, inputfilepath):\n\n print(\"call func of predict_from_cv2\")\n img = cv2.imread(inputfilepath)\n yolo_results = yolo.predict(img)\n for yolo_result in yolo_results:\n print(yolo_result.get_detect_result())", "def detect_video(yolo_v3_model, video_path, batch_frames, output_path, train_input_size, classes_file_path, \n score_threshold, iou_threshold, num_of_anchor_bbox, strides, anchors, show = False, \n rectangle_colors = ''):\n \n # obtain number of classes\n num_of_classes = len(read_class_names(classes_file_path))\n \n # obtain VideoCapture object \n vid = cv2.VideoCapture(video_path)\n \n # obtain width, height and fps of video\n # by default VideoCapture returns float instead of int\n width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))\n fps = int(vid.get(cv2.CAP_PROP_FPS))\n\n # obtain video codec\n codec = cv2.VideoWriter_fourcc(*'XVID')\n \n # obtain output_path\n # output_path must be .mp4\n out = cv2.VideoWriter(output_path, codec, fps+1, (width, height)) \n\n # create list to store images\n images = []\n \n # variable to track frame\n frame = 0 \n \n while True:\n \n try:\n \n # grabs, decodes and returns the next video frame\n _, image = vid.read()\n \n # append original image to original_images list\n images.append(image[:])\n \n # increment frame\n frame += 1\n \n \n # if current frame is less than batch_frames\n if frame < batch_frames:\n \n # move to next frame \n continue\n \n # iterate over images in chronological order (last image is image of interest to put bbox)\n for x in range(batch_frames):\n \n # convert original image to grayscale \n image = cv2.cvtColor(images[-batch_frames + x + 1], cv2.COLOR_BGR2RGB)\n \n # preprocess image\n image = transform_images(image[:], train_input_size)\n \n # obtain concat frame if none exist\n if x == 0: \n \n concat_image = image[:]\n \n # concatenate subsequent frames to concat_image\n else:\n \n concat_image = np.concatenate((concat_image, image), axis = -1)\n \n except:\n \n break\n \n # add batch dimensions to concatenated image \n concat_image = concat_image[np.newaxis, ...].astype(np.float32)\n \n # create constant tensor from concatenated image and feed it to yolo_v3_model\n batched_input = tf.constant(concat_image)\n yolo_output = yolo_v3_model(batched_input)\n\n # list to store bboxes from respective scales\n pred_bbox = []\n\n # iterate over 3 scales\n for i in range(3):\n\n # decode resepctive yolo_output from each scale\n pred_result = decode(yolo_output = yolo_output[i], num_of_anchor_bbox = num_of_anchor_bbox, \n classes = num_of_classes, strides = strides, anchors = anchors, index = i)\n\n # append to pred_bbox\n pred_bbox.append(pred_result)\n \n # obtain results of shape (:, 5 + num_classes), i.e all bboxes\n pred_bbox = [tf.reshape(x, (-1, tf.shape(x)[-1])) for x in pred_bbox]\n \n # concatenate all bboxes from all scales\n pred_bbox = tf.concat(pred_bbox, axis = 0)\n\n # post process all bboxes using latest image in orignal_images\n bboxes = postprocess_boxes(pred_bbox, images[-1], train_input_size, score_threshold)\n\n # non maximal supression for bboxes\n bboxes = nms(bboxes, iou_threshold, method = 'nms')\n\n # draw bbox on latest image in orignal_images\n image = draw_bbox(images[-1], bboxes, classes_file_path, rectangle_colors = rectangle_colors)\n \n # save image frame to video path if path to save is given\n if output_path != '': out.write(image)\n \n # display image frame (i.e play video) if show is true \n if show:\n \n # show the image\n cv2.imshow('output', image)\n \n # if q key is presssed\n if cv2.waitKey(25) & 0xFF == ord(\"q\"):\n \n # end session\n cv2.destroyAllWindows()\n \n # break out of while loop\n break\n \n # When everything done, release the capture\n vid.release()\n cv2.destroyAllWindows()", "def detect_image(yolo_v3_model, image_paths, batch_frames, output_path, train_input_size, classes_file_path, \n score_threshold, iou_threshold, num_of_anchor_bbox, strides, anchors, show = False, \n rectangle_colors = ''):\n \n # obtain number of classes\n num_of_classes = len(read_class_names(classes_file_path))\n \n # create list to store images\n original_images = []\n \n # iterate over images in chronological order (last image is image of interest to put bbox)\n for x in range(batch_frames):\n \n # obtain original image\n original_image = cv2.imread(image_paths[x])\n \n # append original image to original_images list\n original_images.append(original_image[:])\n \n # convert original image to grayscale \n image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)\n \n # preprocess image\n image = transform_images(image[:], train_input_size)\n\n # obtain concat frame if none exist\n if x == 0: \n\n concat_image = image[:]\n\n # concatenate subsequent frames to concat_image\n else:\n\n concat_image = np.concatenate((concat_image, image), axis = -1)\n \n # add batch dimensions to concatenated image \n concat_image = concat_image[np.newaxis, ...].astype(np.float32)\n \n # create constant tensor from concatenated image and feed it to yolo_v3_model\n batched_input = tf.constant(concat_image)\n yolo_output = yolo_v3_model(batched_input)\n \n # list to store bboxes from respective scales\n pred_bbox = []\n \n # iterate over 3 scales\n for i in range(3):\n\n # decode resepctive yolo_output from each scale\n pred_result = decode(yolo_output = yolo_output[i], num_of_anchor_bbox = num_of_anchor_bbox, \n classes = num_of_classes, strides = strides, anchors = anchors, index = i)\n \n # obtain results of shape (:, 5 + num_classes), i.e all bboxes\n pred_result_reshaped = tf.reshape(pred_result, (-1, tf.shape(pred_result)[-1]))\n \n # append to pred_bbox\n pred_bbox.append(pred_result_reshaped)\n \n # concatenate all bboxes from all scales\n pred_bbox = tf.concat(pred_bbox, axis = 0)\n \n # post process all bboxes using latest image in orignal_images\n bboxes = postprocess_boxes(pred_bbox, original_images[-1], train_input_size, score_threshold)\n \n # non maximal supression for bboxes\n bboxes = nms(bboxes, iou_threshold, method = 'nms')\n \n # draw bbox on latest image in orignal_images\n image = draw_bbox(original_images[-1], bboxes, classes_file_path, rectangle_colors = rectangle_colors)\n \n # save image if path to save is given\n if output_path != '': cv2.imwrite(output_path, image)\n \n # display image if show is true \n if show:\n \n # show the image\n cv2.imshow(\"predicted image\", image)\n \n # load and hold the image\n cv2.waitKey(0)\n \n # to close the window after the required kill value was provided\n cv2.destroyAllWindows()\n \n return image", "def run_yolo(net, image, coco_classes, save_image=False):\n\n global frame, classes\n # Give the configuration and weight files for the model and load the network using them.\n classes = coco_classes\n\n frame = cv2.imread(str(image))\n\n # Crop the frame\n # (y_min, y_max) (x_min, x_max)\n # frame = frame[300:1080, 200:1920] # Classifying people\n # frame = frame[0:500, 0:1920] # Classifying Cars\n\n # Stop the program if reached end of video\n if frame is None:\n return\n\n # Create a 4D blob from a frame.\n blob = cv2.dnn.blobFromImage(\n frame, 1 / 255, (inpWidth, inpHeight), [0, 0, 0], 1, crop=False\n )\n\n # Sets the input to the network\n net.setInput(blob)\n\n # Runs the forward pass to get output of the output layers\n outs = net.forward(getOutputsNames(net))\n\n # Remove the bounding boxes with low confidence\n postprocess(frame, outs, save_image)\n\n # Get the overall time for inference(t) and the timings for each of the layers(in layersTimes)\n t, _ = net.getPerfProfile()\n label = \"Inference time: %.2f ms\" % (t * 1000.0 / cv2.getTickFrequency())\n # cv2.putText(frame, label, (0, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255))\n print(label)\n\n # Save image with all bounding boxes\n # utils.write_image(frame)", "def detect(self):\n # process the input video and get the attributes:\n self.process_video()\n\n # build a rcnn/ yolov5 predictor:\n self.build_predictor()\n\n \n # assert not os.path.isfile(args.output_file), \"File with the name %s already exists\"%args.output_file\n # build the writer with same attributes:\n self.vid_writer = cv2.VideoWriter(self.output, self.fourcc, self.fps, (self.w, self.h))\n\n # inference time:\n start = time.time()\n print(\"Started inference\\n\")\n \n # progress bar using tqdm:\n pbar = tqdm(total=self.nframes)\n\n while(self.cap.isOpened()):\n ret, frame = self.cap.read()\n if ret == False:\n break # when the last frame is read \n\n # different formats of results:\n if self.library == \"yolov5\":\n # predict and bring the outputs to cpu:\n results = self.predictor(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)) # convert to RGB\n predictions = results.xyxy[0].cpu()\n # find the instance indices with person:\n person_idx = predictions[:,5] == self.label_dict[\"person\"]\n # extract the corresponding boxes and scores:\n boxes = predictions[person_idx,:4].numpy()\n probs = predictions[person_idx,4].numpy()\n\n if self.library == \"detectron2\":\n # predict and bring the outputs to cpu:\n results = self.predictor(frame) # RGB conversion done automatically in detectron\n predictions = results[\"instances\"].to(\"cpu\")\n # find the instance indices with person:\n person_idx = [predictions.pred_classes == self.label_dict[\"person\"]]\n # extract the corresponding boxes and scores:\n boxes = predictions.pred_boxes[person_idx].tensor.numpy()\n probs = predictions.scores[person_idx].numpy()\n\n # draw boxes and write the frame to the video:\n if len(boxes): # check whether there are predictions\n box_frame = self.draw_person_boxes(frame, boxes, probs)\n else:\n box_frame = frame\n self.vid_writer.write(box_frame)\n\n pbar.update(1)\n pbar.close()\n\n # release the video capture object and write object:\n self.cap.release()\n self.vid_writer.release()\n\n print(\"Inferene on the video file took %0.3f seconds\"%(time.time()-start))", "def demo(sess, net, img_path):\n\n # Load the demo image\n once_time = 0\n\n im = cv2.imread(img_path)\n im = cv2.resize(im, (227, 227))\n # im = im[np.newaxis, :, :, :]\n t = time.time()\n im_orig = im.astype(np.float32, copy=True)\n im_orig -= cfg.PIXEL_MEANS\n print('subtract consume time {}s'.format(time.time() - t))\n im = im_orig[np.newaxis, :, :, :]\n # print('>>>>>>>', im.shape[0], im.shape[1])\n\n # Detect all object classes and regress object bounds\n timer = Timer()\n timer.tic()\n yaw, pitch, roll, yaw_raw, pitch_raw, roll_raw = net.test_image(sess, im)\n # yaw, pitch = net.test_image(sess, im)\n print(yaw, pitch, roll)\n # print(yaw_raw)\n # print(pitch_raw)\n # print(roll_raw)\n timer.toc()\n once_time = timer.total_time\n print('Detection took {:.3f}s'.format(timer.total_time))\n\n # cv2_vis(im, CLASSES[1], dets, result_file)\n return yaw, pitch, roll, once_time", "def enable_detector_yolo_full():\n global enabled_detector, enable_detection, detector, use_cuda\n if not enable_detection:\n enable_detection = True\n\n thresh = request.form[\"thresh\"]\n confidence = request.form['confidence']\n distance_check = request.form['tracker_dst']\n\n if thresh == '':\n thresh = float(0.3)\n\n if confidence == '':\n confidence = float(0.5)\n\n if distance_check == '':\n distance_check = float(350)\n\n yolo4_cfg = os.path.join(\n \"detectors/yolo_detector/weights/yolo4coco/yolo4.cfg\")\n yolo4_weights = os.path.join(\n \"detectors/yolo_detector/weights/yolo4coco/yolo4.weights\")\n labels = os.path.join(\n \"detectors/yolo_detector/weights/yolo-coco/coco.names\")\n\n detector = Yolo(config=yolo4_cfg, weights=yolo4_weights, labels=labels,\n confidence_param=confidence, thresh_param=thresh, use_cuda=use_cuda, distance_check=distance_check)\n if detector is not None:\n enabled_detector = \"Yolo4 detector\"\n return render_settings_view()", "def demo(net, data_dir, imgfile, out_dir):\n\n # Load the demo image\n im_file = os.path.join(data_dir, imgfile)\n im = cv2.imread(im_file)\n\n timer = Timer()\n timer.tic()\n scores, boxes = im_detect(net, im)\n scores = np.squeeze(scores)\n timer.toc()\n print ('Detection took {:.3f}s for '\n '{:d} object proposals').format(timer.total_time, boxes.shape[0])\n\n # Visualize detections for each class\n CONF_THRESH = 0.12\n NMS_THRESH = 0.3\n color_white = (0, 0, 0)\n for cls_ind, cls in enumerate(CLASSES[1:]):\n cls_ind += 1 \n cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]\n cls_scores = scores[:, cls_ind]\n dets = np.hstack((cls_boxes,\n cls_scores[:, np.newaxis])).astype(np.float32)\n keep = nms(dets, NMS_THRESH)\n dets = dets[keep, :]\n color = (random.randint(0, 256), random.randint(0, 256), random.randint(0, 256))\n inds = np.where(dets[:, -1] >= CONF_THRESH)[0]\n for i in inds:\n bbox = dets[i, :4]\n score = dets[i, -1]\n bbox = map(int, bbox)\n cv2.rectangle(im, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color=color, thickness=4)\n cv2.putText(im, '%s %.3f' % (cls, score), (bbox[0], bbox[1] + 15),\n color=color_white, fontFace=cv2.FONT_HERSHEY_COMPLEX, fontScale=0.5)\n return im", "def test():\n args = parse_args()\n\n devid = int(os.getenv('DEVICE_ID')) if os.getenv('DEVICE_ID') else 0\n context.set_context(mode=context.GRAPH_MODE, device_target='Ascend', save_graphs=True, device_id=devid)\n\n # logger\n args.outputs_dir = os.path.join(args.log_path,\n datetime.datetime.now().strftime('%Y-%m-%d_time_%H_%M_%S'))\n rank_id = int(os.environ.get('RANK_ID')) if os.environ.get('RANK_ID') else 0\n args.logger = get_logger(args.outputs_dir, rank_id)\n\n context.reset_auto_parallel_context()\n parallel_mode = ParallelMode.STAND_ALONE\n context.set_auto_parallel_context(parallel_mode=parallel_mode, gradients_mean=True, device_num=1)\n\n args.logger.info('Creating Network....')\n network = SolveOutput(YOLOV3DarkNet53(is_training=False))\n\n data_root = args.data_root\n ann_file = args.annFile\n\n args.logger.info(args.pretrained)\n if os.path.isfile(args.pretrained):\n param_dict = load_checkpoint(args.pretrained)\n param_dict_new = {}\n for key, values in param_dict.items():\n if key.startswith('moments.'):\n continue\n elif key.startswith('yolo_network.'):\n param_dict_new[key[13:]] = values\n else:\n param_dict_new[key] = values\n load_param_into_net(network, param_dict_new)\n args.logger.info('load_model {} success'.format(args.pretrained))\n else:\n args.logger.info('{} not exists or not a pre-trained file'.format(args.pretrained))\n assert FileNotFoundError('{} not exists or not a pre-trained file'.format(args.pretrained))\n exit(1)\n\n config = ConfigYOLOV3DarkNet53()\n if args.testing_shape:\n config.test_img_shape = conver_testing_shape(args)\n\n ds, data_size = create_yolo_dataset(data_root, ann_file, is_training=False, batch_size=1,\n max_epoch=1, device_num=1, rank=rank_id, shuffle=False,\n config=config)\n\n args.logger.info('testing shape : {}'.format(config.test_img_shape))\n args.logger.info('totol {} images to eval'.format(data_size))\n\n network.set_train(False)\n # build attacker\n attack = DeepFool(network, num_classes=80, model_type='detection', reserve_ratio=0.9, bounds=(0, 1))\n input_shape = Tensor(tuple(config.test_img_shape), ms.float32)\n\n args.logger.info('Start inference....')\n batch_num = args.samples_num\n adv_example = []\n for i, data in enumerate(ds.create_dict_iterator(num_epochs=1)):\n if i >= batch_num:\n break\n image = data[\"image\"]\n image_shape = data[\"image_shape\"]\n\n gt_boxes, gt_logits = network(image, input_shape)\n gt_boxes, gt_logits = gt_boxes.asnumpy(), gt_logits.asnumpy()\n gt_labels = np.argmax(gt_logits, axis=2)\n\n adv_img = attack.generate((image.asnumpy(), image_shape.asnumpy()), (gt_boxes, gt_labels))\n adv_example.append(adv_img)\n np.save('adv_example.npy', adv_example)", "def setUp(self):\n \n self.DetectorObj = Detector(light_type, position, angle)\n\n self.detector_type = self.DetectorObj.detector_type\n self.psd = self.DetectorObj.psd\n self.intensity = self.DetectorObj.intensity\n self.database = self.DetectorObj.database\n self.position = self.DetectorObj.position\n self.angle = self.DetectorObj.angle\n self.linearity_curve = self.DetectorObj.linearity_curve\n self.FOV = self.DetectorObj.FOV\n \n pass", "def yolo_forward(net, LABELS, image, confidence_level, save_image=False):\n\n # initialize a list of colors to represent each possible class label\n np.random.seed(42)\n colors = np.random.randint(0, 255, size=(10000, 3),\n dtype='uint8')\n\n # grab image spatial dimensions\n (H, W) = image.shape[:2]\n\n # determine only the *output* layer names that we need from YOLO\n ln = net.getLayerNames()\n ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]\n\n # construct a blob from the input image and then perform a forward\n # pass of the YOLO object detector, giving us our bounding boxes and\n # associated probabilities\n # also time it\n blob = cv2.dnn.blobFromImage(image, 1 / 255.0, (416, 416),\n swapRB=True, crop=False)\n net.setInput(blob)\n start = time.time()\n layer_outputs = net.forward(ln)\n end = time.time()\n\n # show timing information on YOLO\n print('[INFO] YOLO took {:.6f} seconds'.format(end - start))\n\n # initialize our lists of detected bounding boxes, confidences, and\n # class IDs, respectively\n boxes = []\n confidences = []\n class_ids = []\n\n # loop over each of the layer outputs\n for output in layer_outputs:\n # loop over each of the detections\n for detection in output:\n # extract the class ID and confidence (i.e., probability) of\n # the current object detection\n scores = detection[5:]\n class_id = np.argmax(scores)\n confidence = scores[class_id]\n\n # filter out weak predictions by ensuring the detected\n # probability is greater than the minimum probability\n if confidence > confidence_level:\n # scale the bounding box coordinates back relative to the\n # size of the image, keeping in mind that YOLO actually\n # returns the center (x, y)-coordinates of the bounding\n # box followed by the boxes' width and height\n box = detection[0:4] * np.array([W, H, W, H])\n (centerX, centerY, width, height) = box.astype('int')\n\n # use the center (x, y)-coordinates to derive the top and\n # and left corner of the bounding box\n x = int(centerX - (width / 2))\n y = int(centerY - (height / 2))\n\n # update our list of bounding box coordinates, confidences,\n # and class IDs\n boxes.append([x, y, int(width), int(height)])\n confidences.append(float(confidence))\n class_ids.append(class_id)\n\n # apply non-maxima suppression to suppress weak, overlapping bounding\n # boxes\n # idxs = cv2.dnn.NMSBoxes(boxes, confidences, confidence_level, threshold)\n\n print(class_ids)\n print(LABELS)\n # print(labels)\n\n labels = [LABELS[i] for i in class_ids]\n\n if save_image:\n yolo_save_img(image, class_ids, boxes, labels, confidences, colors, 'python_predictions.jpg')\n\n return class_ids, labels, boxes, confidences", "def detect_objects(interpreter, image):\n set_input_tensor(interpreter, image)\n interpreter.invoke()\n\n # Get all output details\n #boxes = get_output_tensor(interpreter, 0)\n classes = get_output_tensor(interpreter, 1)\n scores = get_output_tensor(interpreter, 2)\n #count = int(get_output_tensor(interpreter, 3))\n\n #results = []\n #for i in range(count):\n # if scores[i] >= threshold:\n # result = {\n # #'bounding_box': boxes[i],\n # 'class_id': classes[i],\n # 'score': scores[i]\n # }\n # results.append(result)\n \n \n #print(\"detection results:\\n\" + str(results))\n #return results\n return np.array([int(_class) for _class in classes]), np.array(scores)", "def detect(parser):\n cli_args = add_all_args(parser, DETECTION)\n detector = Detector(\n input_shape=cli_args.input_shape,\n model_configuration=cli_args.model_cfg,\n classes_file=cli_args.classes,\n max_boxes=cli_args.max_boxes,\n iou_threshold=cli_args.iou_threshold,\n score_threshold=cli_args.score_threshold,\n )\n check_args = [\n item for item in [cli_args.image, cli_args.image_dir, cli_args.video] if item\n ]\n assert (\n len(check_args) == 1\n ), 'Expected --image or --image-dir or --video, got more than one'\n target_photos = []\n if cli_args.image:\n target_photos.append(get_abs_path(cli_args.image))\n if cli_args.image_dir:\n target_photos.extend(\n get_abs_path(cli_args.image_dir, image)\n for image in get_image_files(cli_args.image_dir)\n )\n if cli_args.image or cli_args.image_dir:\n detector.predict_photos(\n photos=target_photos,\n trained_weights=cli_args.weights,\n batch_size=cli_args.process_batch_size,\n workers=cli_args.workers,\n output_dir=cli_args.output_dir,\n )\n if cli_args.video:\n detector.detect_video(\n video=get_abs_path(cli_args.video, verify=True),\n trained_weights=get_abs_path(cli_args.weights, verify=True),\n codec=cli_args.codec,\n display=cli_args.display_vid,\n output_dir=cli_args.output_dir,\n )" ]
[ "0.6627789", "0.6447896", "0.6315877", "0.62284786", "0.6174531", "0.61566085", "0.6096233", "0.60372835", "0.58174103", "0.5799088", "0.5774036", "0.5773982", "0.57679653", "0.56752574", "0.5674558", "0.5668986", "0.5649171", "0.5648845", "0.5647306", "0.5644756", "0.56349933", "0.55926454", "0.5554171", "0.5548961", "0.5539405", "0.5535786", "0.5531372", "0.55205065", "0.5507268", "0.55062026" ]
0.7094565
0
Splits image into tiles by size of tile. tile_w tile width tile_h tile height
def split_image_into_tiles_of_size(arr: Image, tile_w: int, tile_h: int, overlap: int): x_axis = -1 y_axis = -2 arr_width, arr_height = arr.shape[x_axis], arr.shape[y_axis] x_ntiles = ( arr_width // tile_w if arr_width % tile_w == 0 else (arr_width // tile_w) + 1 ) y_ntiles = ( arr_height // tile_h if arr_height % tile_h == 0 else (arr_height // tile_h) + 1 ) tiles = [] # row for i in range(0, y_ntiles): # height of this tile ver_f = tile_h * i ver_t = ver_f + tile_h # col for j in range(0, x_ntiles): # width of this tile hor_f = tile_w * j hor_t = hor_f + tile_w tile = get_tile(arr, hor_f, hor_t, ver_f, ver_t, overlap) tiles.append(tile) tile_shape = [tile_h, tile_w] ntiles = dict(x=x_ntiles, y=y_ntiles) padding = dict(left=0, right=0, top=0, bottom=0) if arr_width % tile_w == 0: padding["right"] = 0 else: padding["right"] = tile_w - (arr_width % tile_w) if arr_height % tile_h == 0: padding["bottom"] = 0 else: padding["bottom"] = tile_h - (arr_height % tile_h) info = dict(tile_shape=tile_shape, ntiles=ntiles, overlap=overlap, padding=padding) return tiles, info
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _split_image_into_tiles(\n self, image: np.ndarray\n ) -> t.Sequence[t.Tuple[t.Tuple[t.Any, ...], np.ndarray]]:\n h, w, c = image.shape\n tile_height = (\n math.ceil(h / (self._n_tiles // 2 - 1))\n if self._n_tiles > 4\n else math.ceil(h / (self._n_tiles // 2))\n )\n tile_width = math.ceil(w / (self._n_tiles // 2))\n tiles = [] # type: ignore\n for i in range(0, h, tile_height):\n for j in range(0, w, tile_width):\n tiles.append(\n (\n (i, i + tile_height, j, j + tile_width),\n image[i : i + tile_height, j : j + tile_width, :],\n )\n )\n return tiles", "def split_image_into_number_of_tiles(\n arr: Image, x_ntiles: int, y_ntiles: int, overlap: int\n):\n img_width, img_height = arr.shape[-1], arr.shape[-2]\n tile_w = img_width // x_ntiles\n tile_h = img_height // y_ntiles\n return split_image_into_tiles_of_size(arr, tile_w, tile_h, overlap)", "def image_to_tiles(img, tile_size):\n padding_argument = [(0,0),(0,0),(0,0)]\n for input_dim in [0,1]:\n padding_argument[input_dim] = (0, (tile_size - img.shape[input_dim]) % tile_size)\n img = np.pad(img, padding_argument, mode='constant')\n tiles = img.reshape((img.shape[0]//tile_size, \n tile_size,\n img.shape[1]//tile_size,\n tile_size,\n img.shape[2]\n )).swapaxes(1,2)\n return tiles", "def split_to_tiles(array: np.ndarray, tile_height: int, tile_width: int) -> np.ndarray:\n arr_height, arr_width, *dimensions = array.shape\n nchannels = dimensions[0] if dimensions else 1\n new_shape = get_shape_for_tile_split(\n arr_height, arr_width, nchannels, tile_height, tile_width\n )\n return array.reshape(new_shape).swapaxes(1, 2)", "def image_tiles(bqsession, image_service_url, tile_size=64):\n dims = bqsession.fetchxml(image_service_url, dims='')\n x = int(dims.xpath('//tag[@name=\"image_num_x\"]')[0].attrib[ 'value'])\n y = int(dims.xpath('//tag[@name=\"image_num_y\"]')[0].attrib[ 'value'])\n \n for ix in range(int(x/tile_size)-1):\n for iy in range(int(y/tile_size)-1):\n yield bqsession.c.prepare_url(image_service_url, tile='0,%s,%s,%s' % (str(ix), str(iy), str(tile_size)))", "def tile_images(img, img_size=32, rows=4, cols=4, spacing=1):\n images = np.ones([3, rows * (img_size + spacing) - spacing, cols * (img_size + spacing)], dtype=np.float32)\n coords = [(i, j) for i in range(rows) for j in range(cols)]\n\n for (i, j), image in zip(coords, img):\n x = i * (img_size + spacing)\n y = j * (img_size + spacing)\n images[:, x: x+img_size, y:y+img_size] = image\n\n return images", "def make_tiles(input_path, save_path, dimension):\n for filename in os.listdir(input_path):\n if filename.endswith(\".png\"):\n image_path = input_path + filename\n\n width, height = Image.open(image_path).size\n\n # Ensures image is square.\n assert width == height\n # Ensures the image can be cut into the desired dimensions.\n assert width % dimension == 0\n n_tiles = (width / dimension) ** 2\n\n tiles = image_slicer.slice(image_path, n_tiles, save=False)\n image_slicer.save_tiles(\n tiles, directory=save_path, prefix=filename[0:2], format=\"png\"\n )", "def split_tileset(self, tileset):\n\n tiles = self.tiles\n firstgid = tileset.firstgid\n tilewidth = self.tilewidth\n tileheight = self.tileheight\n margin = tileset.margin\n\n # carga la imagen del tileset y obtiene sus dimensiones\n image = pygame.image.load(tileset.image_path).convert_alpha()\n image_width, image_height = image.get_size()\n\n # calcula el número de columnas\n cols = image_width // tilewidth\n\n # calcula el espaciamiento entre cada tile en cada eje\n tx = tilewidth + tileset.spacing\n ty = tileheight + tileset.spacing\n\n # calcula la máxima distancia a iterar en cada eje\n max_y = image_height - tileheight + 1\n max_x = image_width - tilewidth + 1\n\n # divide una imagen en tiles\n for row, y in enumerate(xrange(margin, max_y, ty)):\n for col, x in enumerate(xrange(margin, max_x, tx)):\n tile = image.subsurface((x, y, tilewidth, tileheight))\n tiles[firstgid + row * cols + col] = tile", "def tile_image(\n im: Image.Image, width: int, height: int, mode: Optional[str] = \"RGB\", **kwargs: Any\n) -> Image.Image:\n im_out = Image.new(mode, (width, height), **kwargs)\n\n h_tiles = ceil(width / im.width)\n v_tiles = ceil(height / im.height)\n\n for i in range(v_tiles):\n y = im.height * i\n for j in range(h_tiles):\n x = im.width * j\n im_out.paste(im, box=(x, y))\n\n return im_out", "def __init__(self, width, height, tilesize = 256, tileformat='jpg'):\n\n self.tilesize = tilesize\n self.tileformat = tileformat\n imagesize = (width, height)\n tiles = ( math.ceil( width / tilesize ), math.ceil( height / tilesize ) )\n\n # Size (in tiles) for each tier of pyramid.\n self.tierSizeInTiles = []\n self.tierSizeInTiles.push( tiles )\n\n # Image size in pixels for each pyramid tierself\n self.tierImageSize = []\n self.tierImageSize.append( imagesize );\n\n while (imagesize[0] > tilesize or imageSize[1] > tilesize ):\n imagesize = (math.floor( imagesize[0] / 2 ), math.floor( imagesize[1] / 2) )\n tiles = ( math.ceil( imagesize[0] / tilesize ), math.ceil( imagesize[1] / tilesize ) )\n self.tierSizeInTiles.append( tiles )\n self.tierImageSize.append( imagesize )\n\n self.tierSizeInTiles.reverse()\n self.tierImageSize.reverse()\n\n # Depth of the Zoomify pyramid, number of tiers (zoom levels)\n self.numberOfTiers = len(self.tierSizeInTiles)\n\n # Number of tiles up to the given tier of pyramid.\n self.tileCountUpToTier = []\n self.tileCountUpToTier[0] = 0\n for i in range(1, self.numberOfTiers+1):\n self.tileCountUpToTier.append(\n self.tierSizeInTiles[i-1][0] * self.tierSizeInTiles[i-1][1] + self.tileCountUpToTier[i-1]\n )", "def tile(X, rows, cols):\n tiling = np.zeros((rows * X.shape[1], cols * X.shape[2], X.shape[3]), dtype = X.dtype)\n for i in range(rows):\n for j in range(cols):\n idx = i * cols + j\n if idx < X.shape[0]:\n img = X[idx,...]\n tiling[\n i*X.shape[1]:(i+1)*X.shape[1],\n j*X.shape[2]:(j+1)*X.shape[2],\n :] = img\n return tiling", "def tile(X, rows, cols):\n tiling = np.zeros((rows * X.shape[1], cols * X.shape[2], X.shape[3]), dtype = X.dtype)\n for i in range(rows):\n for j in range(cols):\n idx = i * cols + j\n if idx < X.shape[0]:\n img = X[idx,...]\n tiling[\n i*X.shape[1]:(i+1)*X.shape[1],\n j*X.shape[2]:(j+1)*X.shape[2],\n :] = img\n return tiling", "def split_into_tiles(self, x: torch.Tensor):\n tiles, self._coords, self._overlap = self._get_tiles_and_coords(x)\n self._num_tiles = tiles.shape[0]\n return tiles", "def slice_to_tiles(self, tile_raw_size=None, show_info=\"\"):\n if not tile_raw_size: tile_raw_size = self.tile_raw_size\n tile_raw_w,tile_raw_h = tile_raw_size\n tile_w,tile_h = round(tile_raw_w),round(tile_raw_h)\n\n if show_info:\n print(f\" ==Slicing {show_info} Tiles==\")\n print(f' Tile raw size: {tile_raw_size[0]} x {tile_raw_size[1]} px\\n')\n\n #process into list of image objects\n tiles = []\n true_x, true_y = (0,0)\n with Image.open(self.path) as img_obj:\n w,h = img_obj.size\n for row in range(0,h-tile_h,tile_h):\n tiles_row = []\n y = round(true_y)\n for col in range(0,w-tile_w,tile_w):\n x = round(true_x)\n im_crop = img_obj.crop((x,y,x+tile_w,y+tile_h))\n tiles_row.append(im_crop)\n true_x += tile_raw_w\n tiles.append(tiles_row)\n true_y += tile_raw_h\n true_x = 0\n\n return tiles", "def build_tiles(img,tilefile,tilesize,options=[]):\n\tlevels=ceil(log(max(img.get_xsize(),img.get_ysize())/tilesize)/log(2.0))\n\t\n\ttf=file(tilefile,\"w\")\n\t\n\ttile_dict={}\n\tpos=0\n\timg2=img.copy()\n\txs,ys=img2.get_xsize(),img2.get_ysize()\n\tfor l in range(int(levels)):\n\t\trmin=img2.get_attr(\"mean\")-img2.get_attr(\"sigma\")*3.0\n\t\trmax=img2.get_attr(\"mean\")+img2.get_attr(\"sigma\")*3.0\n\t\tfor x in range(0,img2.get_xsize(),tilesize):\n\t\t\tfor y in range(0,img2.get_ysize(),tilesize):\n\t\t\t\ti=img2.get_clip(Region(x,y,tilesize,tilesize))\n\t\t\t\ti.set_attr(\"render_min\",rmin)\n\t\t\t\ti.set_attr(\"render_max\",rmax)\n\t\t\t\ti.set_attr(\"jpeg_quality\",70)\n\t\t\t\tfsp=\"tmpimg.%d.%03d.%03d.jpg\"%(l,x/tilesize,y/tilesize)\n\t\t\t\ti.write_image(fsp)\n\t\t\t\tsz=os.stat(fsp).st_size\n\t\t\t\ttile_dict[(l,x/tilesize,y/tilesize)]=(pos,sz)\n\t\t\t\tpos+=sz\n\t\timg2.process_inplace(\"math.meanshrink\",{\"n\":2})\n\t\n\t# This will produce 2 power spectrum images in the tile file\n\t# with scale factors -1 and -2\n\tif \"pspec\" in options :\n\t\tnx,ny=img.get_xsize()/512,img.get_ysize()/512\n\t\ta=EMData()\n\t\ta.set_size(512,512)\n\t\tif (ny>2 and nx>2) :\n\t\t\tfor y in range(1,ny-1):\n\t\t\t\tfor x in range(1,nx-1):\n\t\t\t\t\tc=img.get_clip(Region(x*512,y*512,512,512))\n\t\t\t\t\tc.process_inplace(\"normalize\")\n\t\t\t\t\tc.process_inplace(\"math.realtofft\")\n\t\t\t\t\tc.process_inplace(\"math.squared\")\n\t\t\t\t\ta+=c\n\t\t\ta.set_value_at(256,256,0,.01)\n\t\t\ta-=a.get_attr(\"minimum\")-a.get_attr(\"sigma\")*.01\n\t\t\ta.process_inplace(\"math.log\")\n\t\t\ta-=a.get_attr(\"minimum\")\n\t\t\ta.set_attr(\"render_min\",a.get_attr(\"minimum\")-a.get_attr(\"sigma\")*.1)\n\t\t\ta.set_attr(\"render_max\",a.get_attr(\"mean\")+a.get_attr(\"sigma\")*4.0)\n\t\t\ta.set_attr(\"jepg_quality\",80)\n\t\t\ta.write_image(\"/tmp/tmpimg.mrc\")\n\t\t\tfsp=\"tmpimg.jpg\"\n\t\t\ta.write_image(fsp)\n\t\t\tsz=os.stat(fsp).st_size\n\t\t\ttile_dict[(-1,0,0)]=(pos,sz)\n\t\t\tpos+=sz\n\t\n#\t\ttry:\n\t\t\timport matplotlib\n\t\t\tmatplotlib.use('Agg')\n\t\t\timport pylab\n\t\t\tmanager = pylab.get_current_fig_manager()\n\t\t\tapix=options[\"pspec\"]\n\t\t\tdx=1.0/(2.0*apix*256.0)\n\t\t\tx=pylab.arange(dx,dx*255.9,dx)\n\t\t\ty=a.calc_radial_dist(255,1,1,0)\t# radial power spectrum (log)\n\t\t\tpylab.figure(figsize=(8,6),dpi=96)\n\t\t\tpylab.axes([.08,.08,.9,.9], axisbg='w')\n\t\t\tpylab.plot(x,y)\n\t\t\tpylab.axis([0,dx*256,min(y),max(y)])\n\t\t\tpylab.xlabel(\"Spatial Freq. (1/A)\")\n\t\t\tpylab.ylabel(\"Log Intensity (10^x)\")\n#\t\t\tprint y\n\t\t\t\n\t\t\tfsp=\"tmpimg2.png\"\n\t\t\tpylab.savefig(fsp,dpi=96)\n\t\t\tsz=os.stat(fsp).st_size\n\t\t\ttile_dict[(-2,0,0)]=(pos,sz)\n\t\t\tpos+=sz\n\n#\t\texcept:\n#\t\t\tprint \"Unable to generate plot (need matplotlib)\"\n\t\t\t\n\t\n\tpickle.dump(tile_dict,tf)\n\t\n\tfor l in range(int(levels)):\n\t\tfor x in range(0,xs,tilesize):\n\t\t\tfor y in range(0,ys,tilesize):\n\t\t\t\tfsp=\"tmpimg.%d.%03d.%03d.jpg\"%(l,x/tilesize,y/tilesize)\n\t\t\t\ta=file(fsp,\"r\")\n\t\t\t\tb=a.read()\n\t\t\t\ta.close()\n\t\t\t\ttf.write(b)\n\t\t\t\tos.remove(fsp)\n\t\txs/=2\n\t\tys/=2\n\t\n\tif \"pspec\" in options :\n\t\tfor fsp in [\"tmpimg.jpg\",\"tmpimg2.png\"] :\n\t\t\ta=file(fsp,\"r\")\n\t\t\tb=a.read()\n\t\t\ta.close()\n\t\t\ttf.write(b)\n#\t\t\tos.remove(fsp)\n\t\n\ttf.close()", "def img_to_tiles(cls, tiff_path, region, res, tile, tile_date_path, img_format, mp):\n\n # Get metadata from original image\n metadata = TiffMetadata(tiff_path)\n\n WIDTH, HEIGHT = region.calculate_width_height(res)\n ultra_large = False\n if WIDTH * HEIGHT > 2 * Image.MAX_IMAGE_PIXELS:\n ultra_large = True\n\n # Use the following dictionary to get the coordinates of each tile\n geoTran_d = TileUtils.getGeoTransform(tiff_path)\n\n # Check for valid tiling dimensions\n if (tile.width > WIDTH or tile.height > HEIGHT):\n raise argparse.ArgumentTypeError(\"Tiling dimensions greater than image dimensions\")\n\n # Determine the number of tiles per row and column\n if tile.handling == Handling.discard_incomplete_tiles:\n num_rows = (HEIGHT - tile.height * tile.overlap) // (tile.height * (1 - tile.overlap))\n num_cols = (WIDTH - tile.width * tile.overlap) // (tile.width * (1 - tile.overlap))\n else:\n num_rows = math.ceil((HEIGHT - tile.height * tile.overlap) / (tile.height * (1 - tile.overlap)))\n num_cols = math.ceil((WIDTH - tile.width * tile.overlap) / (tile.width * (1 - tile.overlap)))\n\n num_iterations = num_rows * num_cols\n \n # Find the pixel coordinate extents of each tile to be generated\n print(\"Gathering tiling information...\", end=\"\", flush=True)\n if mp:\n with Pool(processes=NUM_CORES) as pool:\n args = zip(repeat((metadata, tile, WIDTH, HEIGHT, geoTran_d, tile_date_path, num_rows, num_cols)), list(range(num_iterations)))\n pixel_coords = pool.map(getTilingSplitCoordsMP, args)\n else:\n pixel_coords = []\n for index in range(num_iterations):\n pixel_coords.append(getTilingSplitCoordsTuple(metadata,tile, WIDTH, HEIGHT, geoTran_d, tile_date_path, num_rows, num_cols, index))\n print(\"done!\")\n\n if mp:\n print(\"Generating {} tiles using {} processes...\".format(len(pixel_coords), NUM_CORES), flush=True)\n else:\n print(\"Generating {} tiles sequentially...\".format(len(pixel_coords)), flush=True)\n\n if ultra_large: \n # Create the intermediate tiles\n inter_dir, img_width, img_height = TileUtils.img_to_intermediate_images(tiff_path, tile, WIDTH, HEIGHT, metadata.date, img_format)\n\n # Add each coordinate to its proper list\n intermediate_files = [f for f in os.listdir(inter_dir) if f.endswith(img_format)]\n\n # Get the tiling information for all intermediate tiles\n intermediate_info = TileUtils.getIntermediateTilingInfo(tile, pixel_coords, WIDTH, HEIGHT, img_width, img_height, intermediate_files)\n\n # Tile the complete images\n print(\"\\tTiling from complete images\")\n for single_inter_imgs in tqdm(intermediate_info[0]):\n filename = single_inter_imgs[0][0]\n inter_metadata = IntermediateMetadata(filename)\n\n img_path = os.path.join(inter_dir, filename)\n src = Image.open(img_path)\n img_arr = np.array(src)\n\n if mp:\n # Create a shared array\n X_shape = img_arr.shape\n X = RawArray('B', X_shape[0] * X_shape[1] * X_shape[2])\n\n # Wrap shared array as numpy array\n X_np = np.frombuffer(X, dtype='uint8').reshape(X_shape)\n\n # Copy image to the shared array\n np.copyto(X_np, img_arr)\n \n # Use multiprocessing to tile the numpy array\n with Pool(processes=NUM_CORES, initializer=init_worker, initargs=(X, X_shape, None, None)) as pool:\n multi = [pool.apply_async(TileUtils.generate_tile, args=(tile, WIDTH, HEIGHT, x, y, done_x, done_y, path, img_format,), kwds={\"inter_x\":(x - inter_metadata.start_x), \"inter_y\":(y - inter_metadata.start_y)}) for (filename, x, y, done_x, done_y, path) in single_inter_imgs]\n f = [p.get() for p in multi]\n pool.close()\n pool.join()\n else: \n for filename, x, y, done_x, done_y, path in single_inter_imgs:\n TileUtils.generate_tile(tile, WIDTH, HEIGHT, x, y, done_x, done_y, path, img_format, inter_x=(x - inter_metadata.start_x), inter_y=(y - inter_metadata.start_y), img_arr=img_arr)\n\n # Close the image\n src.close()\n # Tile in between two images\n print(\"\\tTiling between two images\")\n if mp:\n with Pool(processes=NUM_CORES) as pool:\n args = zip(repeat((tile.width, tile.height, inter_dir, img_format)), intermediate_info[1])\n result = list(tqdm(pool.imap(processDoublesMP, args), total=len(intermediate_info[1])))\n else:\n for double_inter_imgs in tqdm(intermediate_info[1]):\n processDoublesTuple(tile.width, tile.height, inter_dir, img_format, double_inter_imgs)\n \n # Tile in between four images\n print(\"\\tTiling between four images\")\n if mp:\n # Use half as many processes as cores to ensure not running out of available mem and getting stuck\n with Pool(processes=(NUM_CORES // 2)) as pool:\n args = zip(repeat((tile.width, tile.height, inter_dir, img_format)), intermediate_info[2])\n result = list(tqdm(pool.imap(processQuadsMP, args), total=len(intermediate_info[2])))\n else:\n for quad_inter_imgs in tqdm(intermediate_info[2]):\n processQuadsTuple(tile.width, tile.height, inter_dir, img_format, quad_inter_imgs)\n shutil.rmtree(inter_dir)\n else: \n # Open image as a numpy array in order to tile from the array\n src = Image.open(tiff_path)\n img_arr = np.array(src)\n\n if mp:\n # Create a shared array\n X_shape = img_arr.shape\n X = RawArray('B', X_shape[0] * X_shape[1] * X_shape[2])\n\n # Wrap shared array as numpy array\n X_np = np.frombuffer(X, dtype='uint8').reshape(X_shape)\n\n # Copy image to the shared array\n np.copyto(X_np, img_arr)\n\n # Use multiprocessing to tile the numpy array\n with Pool(processes=NUM_CORES, initializer=init_worker, initargs=(X, X_shape, None, None)) as pool:\n multi = [pool.apply_async(TileUtils.generate_tile, args=(tile, WIDTH, HEIGHT, x, y, done_x, done_y, path, img_format)) for (x, y, done_x, done_y, path) in pixel_coords]\n f = [p.get() for p in tqdm(multi)]\n pool.close()\n pool.join()\n else:\n for x, y, done_x, done_y, path in tqdm(pixel_coords):\n TileUtils.generate_tile(tile, WIDTH, HEIGHT, x, y, done_x, done_y, path, img_format, img_arr=img_arr)\n \n # Close the image\n src.close()\n print(\"done!\")", "def __init__(self, width, height, tilesize=256, tileformat='jpg'):\n\n self.tilesize = tilesize\n self.tileformat = tileformat\n imagesize = (width, height)\n tiles = (math.ceil(width / tilesize), math.ceil(height / tilesize))\n\n # Size (in tiles) for each tier of pyramid.\n self.tierSizeInTiles = []\n self.tierSizeInTiles.append(tiles)\n\n # Image size in pixels for each pyramid tierself\n self.tierImageSize = []\n self.tierImageSize.append(imagesize)\n\n while (imagesize[0] > tilesize or imagesize[1] > tilesize):\n imagesize = (math.floor(imagesize[0] / 2), math.floor(imagesize[1] / 2))\n tiles = (math.ceil(imagesize[0] / tilesize), math.ceil(imagesize[1] / tilesize))\n self.tierSizeInTiles.append(tiles)\n self.tierImageSize.append(imagesize)\n\n self.tierSizeInTiles.reverse()\n self.tierImageSize.reverse()\n\n # Depth of the Zoomify pyramid, number of tiers (zoom levels)\n self.numberOfTiers = len(self.tierSizeInTiles)\n\n # Number of tiles up to the given tier of pyramid.\n self.tileCountUpToTier = []\n self.tileCountUpToTier[0] = 0\n for i in range(1, self.numberOfTiers+1):\n self.tileCountUpToTier.append(\n self.tierSizeInTiles[i-1][0] * self.tierSizeInTiles[i-1][1] +\n self.tileCountUpToTier[i-1]\n )", "def split_image_with_bboxes(bboxes, image, tiles=4):\n\n if tiles == 0:\n return {(0, 0): {\"image\": image, \"bboxes\": bboxes}}\n assert tiles % 2 == 0, \"Error in splitting images. Uneven number of images requested.\"\n\n split = tiles / 2\n\n height, width, *_ = image.shape\n\n new_height = height / split\n new_width = width / split\n\n tiles = {}\n\n tile_height = new_height\n\n for row in range(int(split)):\n tile_width = new_width\n for col in range(int(split)):\n\n # Create image with true values on tile\n canvas = np.zeros_like(image)\n tile_start = (int(tile_height-new_height), int(tile_width-new_width))\n tile_end = (int(tile_height), int(tile_width))\n canvas[tile_start[0]:tile_end[0], tile_start[1]:tile_end[1]] = 1\n\n new_bboxes = []\n for bbox in bboxes:\n\n xmin, ymin, xmax, ymax = bbox\n\n # Overlap of image tile and bbox\n bbox_image = np.zeros_like(image)\n bbox_image[ymin:ymax, xmin:xmax] = 1\n\n overlap = np.logical_and(canvas, bbox_image)\n\n if np.sum(overlap) < 1:\n continue\n\n overlap_index = np.argwhere(overlap)\n\n overlap_xmin, overlap_ymin = overlap_index[0][1], overlap_index[0][0]\n overlap_xmax, overlap_ymax = overlap_index[-1][1]+1, overlap_index[-1][0]+1\n\n new_xmin = overlap_xmin - col * new_width\n new_ymin = overlap_ymin - row * new_height\n new_xmax = overlap_xmax - col * new_width\n new_ymax = overlap_ymax - row * new_height\n\n new_bbox = (new_xmin, new_ymin, new_xmax, new_ymax)\n\n new_bboxes.append(new_bbox)\n\n cropped_image = image[tile_start[0]:tile_end[0], tile_start[1]:tile_end[1]]\n tiles[(row, col)] = {\"image\": cropped_image, \"bboxes\": new_bboxes}\n\n tile_width = tile_width + new_width\n tile_height = tile_height + new_height\n\n return tiles", "def tiles(self, width: int, height: int) -> TileSet:\n y_count = len(self.tiling)\n for y_index, y_tile in enumerate(self.tiling):\n\n x_count = len(y_tile)\n for x_index, tile_strength in enumerate(y_tile):\n\n # Doing multiplication before devision here to make sure rounding is correct\n bounding_box = (\n # from (x1, y1)\n int(width * x_index / x_count),\n int(height * y_index / y_count),\n # to (x2, y2)\n int(width * (x_index + 1) / x_count),\n int(height * (y_index + 1) / y_count),\n )\n\n yield bounding_box, tile_strength", "def slice_image(image, tile_size):\n height = image.shape[0]\n width = image.shape[1]\n assert height > tile_size and width > tile_size\n\n num_tiles_x, num_tiles_y = number_of_patches(width, height, tile_size)\n width, height = output_image_size(num_tiles_x, num_tiles_y, tile_size)\n\n # Crop image to new size\n image = image[:height, :width]\n\n tiles = np.zeros((num_tiles_y, num_tiles_x, tile_size, tile_size, 3))\n for i, ty in enumerate(range(0, height, tile_size)):\n for j, tx in enumerate(range(0, width, tile_size)):\n tiles[i, j] = image[ty : ty + tile_size, tx : tx + tile_size]\n\n return tiles", "def montage(images, w_sub, h_sub, step):\n target = Image.new('RGB', (w_sub*step, h_sub*step))\n left = 0\n right = w_sub\n for i in range(len(images)):\n top=(i//step)*h_sub\n target.paste(images[i], (left, top, right, top+h_sub))\n if(i//step < (i+1)//step):#Check if this row is done\n left = 0#Reset the position in a row\n right = w_sub\n else: #Next picture\n left += w_sub\n right += w_sub\n quality_value = 100\n return target", "def test_tiled():\n size = [25, 25]\n img = Image.new('RGB', (10, 10))\n img.putpixel((5, 5), (0, 255, 0))\n\n parameters = {'data': [img], 'size': size}\n\n tiled = images.tiled(parameters)\n\n assert_equal(tiled.size, tuple(size))\n assert_equal(tiled.getpixel((5, 5)), (0, 255, 0))\n assert_equal(tiled.getpixel((15, 5)), (0, 255, 0))", "def readTiles(self):\n TileImage = Image.open(self.Filename).convert(\"RGB\")\n TileIW, TileIH = TileImage.size\n TilesetW, TilesetH = TileIW // self.TileWidth, TileIH // self.TileHeight\n\n for y in range(TilesetH):\n for x in range(TilesetW):\n box = self.TileWidth * x, self.TileHeight * y, self.TileWidth * (x+1), self.TileHeight * (y+1)\n tile = TileImage.crop(box)\n self.List.append(tile)\n\n str = tile.tostring()\n if not str in self.TileDict:\n #print(\"add tile: \", str)\n self.TileDict[str] = len(self.List) - 1\n print(\"tile count: {}, unique count: {}\".format(len(self.List),len(self.TileDict.values())))", "def _get_tile_info(img_shape, tile_shape, ambiguous_size=128):\n # * get normal tiling set\n tile_grid_top_left, _ = _get_patch_top_left_info(img_shape, tile_shape, tile_shape)\n tile_grid_bot_right = []\n for idx in list(range(tile_grid_top_left.shape[0])):\n tile_tl = tile_grid_top_left[idx][:2]\n tile_br = tile_tl + tile_shape\n axis_sel = tile_br > img_shape\n tile_br[axis_sel] = img_shape[axis_sel]\n tile_grid_bot_right.append(tile_br)\n tile_grid_bot_right = np.array(tile_grid_bot_right)\n tile_grid = np.stack([tile_grid_top_left, tile_grid_bot_right], axis=1)\n tile_grid_x = np.unique(tile_grid_top_left[:, 1])\n tile_grid_y = np.unique(tile_grid_top_left[:, 0])\n # * get tiling set to fix vertical and horizontal boundary between tiles\n # for sanity, expand at boundary `ambiguous_size` to both side vertical and horizontal\n stack_coord = lambda x: np.stack([x[0].flatten(), x[1].flatten()], axis=-1)\n tile_boundary_x_top_left = np.meshgrid(\n tile_grid_y, tile_grid_x[1:] - ambiguous_size\n )\n tile_boundary_x_bot_right = np.meshgrid(\n tile_grid_y + tile_shape[0], tile_grid_x[1:] + ambiguous_size\n )\n tile_boundary_x_top_left = stack_coord(tile_boundary_x_top_left)\n tile_boundary_x_bot_right = stack_coord(tile_boundary_x_bot_right)\n tile_boundary_x = np.stack(\n [tile_boundary_x_top_left, tile_boundary_x_bot_right], axis=1\n )\n #\n tile_boundary_y_top_left = np.meshgrid(\n tile_grid_y[1:] - ambiguous_size, tile_grid_x\n )\n tile_boundary_y_bot_right = np.meshgrid(\n tile_grid_y[1:] + ambiguous_size, tile_grid_x + tile_shape[1]\n )\n tile_boundary_y_top_left = stack_coord(tile_boundary_y_top_left)\n tile_boundary_y_bot_right = stack_coord(tile_boundary_y_bot_right)\n tile_boundary_y = np.stack(\n [tile_boundary_y_top_left, tile_boundary_y_bot_right], axis=1\n )\n tile_boundary = np.concatenate([tile_boundary_x, tile_boundary_y], axis=0)\n # * get tiling set to fix the intersection of 4 tiles\n tile_cross_top_left = np.meshgrid(\n tile_grid_y[1:] - 2 * ambiguous_size, tile_grid_x[1:] - 2 * ambiguous_size\n )\n tile_cross_bot_right = np.meshgrid(\n tile_grid_y[1:] + 2 * ambiguous_size, tile_grid_x[1:] + 2 * ambiguous_size\n )\n tile_cross_top_left = stack_coord(tile_cross_top_left)\n tile_cross_bot_right = stack_coord(tile_cross_bot_right)\n tile_cross = np.stack([tile_cross_top_left, tile_cross_bot_right], axis=1)\n return tile_grid, tile_boundary, tile_cross", "def _tile_images(imgs, tile_shape, concatenated_image, margin_color=None):\n x_num, y_num = tile_shape\n one_width = imgs[0].shape[1]\n one_height = imgs[0].shape[0]\n if concatenated_image is None:\n concatenated_image = np.zeros((one_height * y_num, one_width * x_num, 3),\n dtype=np.uint8)\n if margin_color is not None:\n concatenated_image[:, :] = margin_color\n for y in range(y_num):\n for x in range(x_num):\n i = x + y * x_num\n if i >= len(imgs):\n pass\n else:\n concatenated_image[y*one_height:(y+1)*one_height,x*one_width:(x+1)*one_width,] = imgs[i]\n return concatenated_image", "def stich(data, title=None):\n # Get name, list of tiles, width and height\n name = data[\"levels\"][0][\"name\"] \n tiles = data[\"levels\"][0][\"tiles\"]\n width = data[\"levels\"][0][\"width\"]\n height = data[\"levels\"][0][\"height\"]\n\n # Create the directory to place all the downloaded tiles in\n if title: #if title provided, name directory based on that\n dirname = title\n else: #if title not provided, generate a name\n dirname = name + str(width) + str(height)\n os.makedirs(dirname, exist_ok=True)\n os.chdir(dirname)\n\n #Create the empty image based on dimensions\n result = Image.new('RGB', (width, height))\n tile_size = None \n\n # actually get the tiles\n for i in tiles:\n image = get_tile(i['url']) #download image\n if not tile_size:\n tile_size = image.size[0] # on the first tile get the image size\n result.paste(im=image, box=(i['x'] * tile_size, i['y'] * tile_size)) # each tile has a number which isn't\n # it's cooridnate in pixels but it's order. \n # To get pixel coordinate just multiply by the size of each tile\n result.save('final.jpeg') # save file in directory\n os.chdir(os.path.join( os.path.dirname( __file__ ), '..' )) # then navigate back up to the base directory", "def forward_tiled(self, image: numpy.ndarray, tile_size: int) -> numpy.ndarray:\n # Constant that only really gets repeated a ton here.\n context = 7\n context2 = context + context\n\n # Notably, numpy is used here because it makes this fine manipulation a lot simpler.\n # Scaling first - repeat on axis 2 and axis 3 (Y & X)\n image = image.repeat(2, 2).repeat(2, 3)\n\n # Resulting image buffer. This is made before the input is padded,\n # since the input has the padded shape right now.\n image_out = numpy.zeros(image.shape)\n\n # Padding next. Note that this padding is done on the whole image.\n # Padding the tiles would lose critical context, cause seams, etc.\n image = numpy.pad(image, [[0, 0], [0, 0], [context, context], [context, context]], mode = \"edge\")\n\n # Now for tiling.\n # The output tile size is the usable output from an input tile (tile_size).\n # As such, the tiles overlap.\n out_tile_size = tile_size - context2\n for out_y in range(0, image_out.shape[2], out_tile_size):\n for out_x in range(0, image_out.shape[3], out_tile_size):\n # Input is sourced from the same coordinates, but some stuff ought to be\n # noted here for future reference:\n # + out_x/y's equivalent position w/ the padding is out_x + context.\n # + The output, however, is without context. Input needs context.\n # + Therefore, the input rectangle is expanded on all sides by context.\n # + Therefore, the input position has the context subtracted again.\n # + Therefore:\n in_y = out_y\n in_x = out_x\n # not shown: in_w/in_h = tile_size (as opposed to out_tile_size)\n # Extract tile.\n # Note that numpy will auto-crop this at the bottom-right.\n # This will never be a problem, as tiles are specifically chosen within the padded section.\n tile = image[:, :, in_y:in_y + tile_size, in_x:in_x + tile_size]\n # Extracted tile dimensions -> output dimensions\n # This is important because of said cropping, otherwise it'd be interior tile size.\n out_h = tile.shape[2] - context2\n out_w = tile.shape[3] - context2\n # Process tile.\n tile_t = Tensor(tile)\n tile_fwd_t = self.forward(tile_t)\n # Replace tile.\n image_out[:, :, out_y:out_y + out_h, out_x:out_x + out_w] = tile_fwd_t.numpy()\n\n return image_out", "def split_tiles(module_data):\n raise NotImplementedError", "def __createTiles(self, length, width, height):\n\n rectangles = []\n centrePoints = []\n \n # Defines the dimensions required to fit all tiles\n totalHeight = length * height\n totalWidth = length * width\n \n # Go through all tiles\n y = length\n while y < totalHeight + length:\n\n x = length\n while x < totalWidth + length:\n # Creates a Rect object\n rectangle = pygame.Rect(x, y, length, length)\n rectangles.append(rectangle)\n\n # Calculates the tile's centre point.\n centrePoint = (math.floor(x + length/2), math.floor(y + length/2))\n centrePoints.append(centrePoint)\n\n x += length\n y += length\n\n return rectangles, centrePoints", "def slice(\n filename,\n number_tiles=None,\n col=None,\n row=None,\n save=True,\n DecompressionBombWarning=True,\n):\n if DecompressionBombWarning is False:\n Image.MAX_IMAGE_PIXELS = None\n\n im = Image.open(filename)\n im_w, im_h = im.size\n\n columns = 0\n rows = 0\n if number_tiles:\n validate_image(im, number_tiles)\n columns, rows = calc_columns_rows(number_tiles)\n else:\n validate_image_col_row(im, col, row)\n columns = col\n rows = row\n\n tile_w, tile_h = int(floor(im_w / columns)), int(floor(im_h / rows))\n\n tiles = []\n number = 1\n for pos_y in range(0, im_h - rows, tile_h): # -rows for rounding error.\n for pos_x in range(0, im_w - columns, tile_w): # as above.\n area = (pos_x, pos_y, pos_x + tile_w, pos_y + tile_h)\n image = im.crop(area)\n position = (int(floor(pos_x / tile_w)) + 1, int(floor(pos_y / tile_h)) + 1)\n coords = (pos_x, pos_y)\n tile = Tile(image, number, position, coords)\n tiles.append(tile)\n number += 1\n if save:\n save_tiles(\n tiles, prefix=get_basename(filename), directory=os.path.dirname(filename)\n )\n return tuple(tiles)" ]
[ "0.76203066", "0.71610355", "0.69022644", "0.6677268", "0.6614609", "0.6609202", "0.6596505", "0.6574447", "0.6529179", "0.652334", "0.650743", "0.650743", "0.6493861", "0.6485636", "0.64644873", "0.6450046", "0.644942", "0.6383776", "0.6369156", "0.634634", "0.63084143", "0.6303704", "0.6262621", "0.62292117", "0.62207806", "0.6218618", "0.6204421", "0.6173811", "0.61385846", "0.61018085" ]
0.7902135
0
Splits image into tiles by number of tile. x_ntiles number of tiles horizontally y_ntiles number of tiles vertically
def split_image_into_number_of_tiles( arr: Image, x_ntiles: int, y_ntiles: int, overlap: int ): img_width, img_height = arr.shape[-1], arr.shape[-2] tile_w = img_width // x_ntiles tile_h = img_height // y_ntiles return split_image_into_tiles_of_size(arr, tile_w, tile_h, overlap)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _split_image_into_tiles(\n self, image: np.ndarray\n ) -> t.Sequence[t.Tuple[t.Tuple[t.Any, ...], np.ndarray]]:\n h, w, c = image.shape\n tile_height = (\n math.ceil(h / (self._n_tiles // 2 - 1))\n if self._n_tiles > 4\n else math.ceil(h / (self._n_tiles // 2))\n )\n tile_width = math.ceil(w / (self._n_tiles // 2))\n tiles = [] # type: ignore\n for i in range(0, h, tile_height):\n for j in range(0, w, tile_width):\n tiles.append(\n (\n (i, i + tile_height, j, j + tile_width),\n image[i : i + tile_height, j : j + tile_width, :],\n )\n )\n return tiles", "def split_into_tiles(self, x: torch.Tensor):\n tiles, self._coords, self._overlap = self._get_tiles_and_coords(x)\n self._num_tiles = tiles.shape[0]\n return tiles", "def split_image_into_tiles_of_size(arr: Image, tile_w: int, tile_h: int, overlap: int):\n x_axis = -1\n y_axis = -2\n arr_width, arr_height = arr.shape[x_axis], arr.shape[y_axis]\n\n x_ntiles = (\n arr_width // tile_w if arr_width % tile_w == 0 else (arr_width // tile_w) + 1\n )\n y_ntiles = (\n arr_height // tile_h if arr_height % tile_h == 0 else (arr_height // tile_h) + 1\n )\n\n tiles = []\n\n # row\n for i in range(0, y_ntiles):\n # height of this tile\n ver_f = tile_h * i\n ver_t = ver_f + tile_h\n\n # col\n for j in range(0, x_ntiles):\n # width of this tile\n hor_f = tile_w * j\n hor_t = hor_f + tile_w\n\n tile = get_tile(arr, hor_f, hor_t, ver_f, ver_t, overlap)\n\n tiles.append(tile)\n tile_shape = [tile_h, tile_w]\n ntiles = dict(x=x_ntiles, y=y_ntiles)\n padding = dict(left=0, right=0, top=0, bottom=0)\n if arr_width % tile_w == 0:\n padding[\"right\"] = 0\n else:\n padding[\"right\"] = tile_w - (arr_width % tile_w)\n if arr_height % tile_h == 0:\n padding[\"bottom\"] = 0\n else:\n padding[\"bottom\"] = tile_h - (arr_height % tile_h)\n info = dict(tile_shape=tile_shape, ntiles=ntiles, overlap=overlap, padding=padding)\n return tiles, info", "def make_tiles_limits(im, n_splits, margin=0):\n \n if n_splits == 1:\n return [0, im.shape[1], 0, im.shape[0]]\n # number of splits per axis\n ax_splits = int(np.log2(n_splits))\n x_segments = split_range(im.shape[1], ax_splits)\n y_segments = split_range(im.shape[0], ax_splits)\n \n if margin > 0:\n x_segments = extend_indices(x_segments, margin=margin)\n y_segments = extend_indices(y_segments, margin=margin)\n \n # make combinations of [xmin, xmax, ymin, ymax] indices of tiles\n tiles_indices = []\n for xlim in x_segments:\n for ylim in y_segments:\n tiles_indices.append(xlim + ylim)\n return tiles_indices", "def tile(X, rows, cols):\n tiling = np.zeros((rows * X.shape[1], cols * X.shape[2], X.shape[3]), dtype = X.dtype)\n for i in range(rows):\n for j in range(cols):\n idx = i * cols + j\n if idx < X.shape[0]:\n img = X[idx,...]\n tiling[\n i*X.shape[1]:(i+1)*X.shape[1],\n j*X.shape[2]:(j+1)*X.shape[2],\n :] = img\n return tiling", "def tile(X, rows, cols):\n tiling = np.zeros((rows * X.shape[1], cols * X.shape[2], X.shape[3]), dtype = X.dtype)\n for i in range(rows):\n for j in range(cols):\n idx = i * cols + j\n if idx < X.shape[0]:\n img = X[idx,...]\n tiling[\n i*X.shape[1]:(i+1)*X.shape[1],\n j*X.shape[2]:(j+1)*X.shape[2],\n :] = img\n return tiling", "def chunks(img, n):\n shape = img.shape\n imgs = []\n\n nx = int(n * (shape[1]/(shape[0] + shape[1])))\n ny = n - nx\n\n x = int(shape[0]/n)\n y = int(shape[0]/n)\n\n for i in range(nx - 1):\n line = []\n for j in range(ny - 1):\n line.append(img[y*j: y*(j+1), x*i: x*(i+1), ::])\n imgs.append(line)\n return imgs", "def image_to_tiles(img, tile_size):\n padding_argument = [(0,0),(0,0),(0,0)]\n for input_dim in [0,1]:\n padding_argument[input_dim] = (0, (tile_size - img.shape[input_dim]) % tile_size)\n img = np.pad(img, padding_argument, mode='constant')\n tiles = img.reshape((img.shape[0]//tile_size, \n tile_size,\n img.shape[1]//tile_size,\n tile_size,\n img.shape[2]\n )).swapaxes(1,2)\n return tiles", "def split_to_tiles(array: np.ndarray, tile_height: int, tile_width: int) -> np.ndarray:\n arr_height, arr_width, *dimensions = array.shape\n nchannels = dimensions[0] if dimensions else 1\n new_shape = get_shape_for_tile_split(\n arr_height, arr_width, nchannels, tile_height, tile_width\n )\n return array.reshape(new_shape).swapaxes(1, 2)", "def image_tiles(bqsession, image_service_url, tile_size=64):\n dims = bqsession.fetchxml(image_service_url, dims='')\n x = int(dims.xpath('//tag[@name=\"image_num_x\"]')[0].attrib[ 'value'])\n y = int(dims.xpath('//tag[@name=\"image_num_y\"]')[0].attrib[ 'value'])\n \n for ix in range(int(x/tile_size)-1):\n for iy in range(int(y/tile_size)-1):\n yield bqsession.c.prepare_url(image_service_url, tile='0,%s,%s,%s' % (str(ix), str(iy), str(tile_size)))", "def split_tiles(module_data):\n raise NotImplementedError", "def num_tiles(self):\n return self.num_row_tiles * self.num_col_tiles", "def make_tiles(input_path, save_path, dimension):\n for filename in os.listdir(input_path):\n if filename.endswith(\".png\"):\n image_path = input_path + filename\n\n width, height = Image.open(image_path).size\n\n # Ensures image is square.\n assert width == height\n # Ensures the image can be cut into the desired dimensions.\n assert width % dimension == 0\n n_tiles = (width / dimension) ** 2\n\n tiles = image_slicer.slice(image_path, n_tiles, save=False)\n image_slicer.save_tiles(\n tiles, directory=save_path, prefix=filename[0:2], format=\"png\"\n )", "def split_tileset(self, tileset):\n\n tiles = self.tiles\n firstgid = tileset.firstgid\n tilewidth = self.tilewidth\n tileheight = self.tileheight\n margin = tileset.margin\n\n # carga la imagen del tileset y obtiene sus dimensiones\n image = pygame.image.load(tileset.image_path).convert_alpha()\n image_width, image_height = image.get_size()\n\n # calcula el número de columnas\n cols = image_width // tilewidth\n\n # calcula el espaciamiento entre cada tile en cada eje\n tx = tilewidth + tileset.spacing\n ty = tileheight + tileset.spacing\n\n # calcula la máxima distancia a iterar en cada eje\n max_y = image_height - tileheight + 1\n max_x = image_width - tilewidth + 1\n\n # divide una imagen en tiles\n for row, y in enumerate(xrange(margin, max_y, ty)):\n for col, x in enumerate(xrange(margin, max_x, tx)):\n tile = image.subsurface((x, y, tilewidth, tileheight))\n tiles[firstgid + row * cols + col] = tile", "def split_image_with_bboxes(bboxes, image, tiles=4):\n\n if tiles == 0:\n return {(0, 0): {\"image\": image, \"bboxes\": bboxes}}\n assert tiles % 2 == 0, \"Error in splitting images. Uneven number of images requested.\"\n\n split = tiles / 2\n\n height, width, *_ = image.shape\n\n new_height = height / split\n new_width = width / split\n\n tiles = {}\n\n tile_height = new_height\n\n for row in range(int(split)):\n tile_width = new_width\n for col in range(int(split)):\n\n # Create image with true values on tile\n canvas = np.zeros_like(image)\n tile_start = (int(tile_height-new_height), int(tile_width-new_width))\n tile_end = (int(tile_height), int(tile_width))\n canvas[tile_start[0]:tile_end[0], tile_start[1]:tile_end[1]] = 1\n\n new_bboxes = []\n for bbox in bboxes:\n\n xmin, ymin, xmax, ymax = bbox\n\n # Overlap of image tile and bbox\n bbox_image = np.zeros_like(image)\n bbox_image[ymin:ymax, xmin:xmax] = 1\n\n overlap = np.logical_and(canvas, bbox_image)\n\n if np.sum(overlap) < 1:\n continue\n\n overlap_index = np.argwhere(overlap)\n\n overlap_xmin, overlap_ymin = overlap_index[0][1], overlap_index[0][0]\n overlap_xmax, overlap_ymax = overlap_index[-1][1]+1, overlap_index[-1][0]+1\n\n new_xmin = overlap_xmin - col * new_width\n new_ymin = overlap_ymin - row * new_height\n new_xmax = overlap_xmax - col * new_width\n new_ymax = overlap_ymax - row * new_height\n\n new_bbox = (new_xmin, new_ymin, new_xmax, new_ymax)\n\n new_bboxes.append(new_bbox)\n\n cropped_image = image[tile_start[0]:tile_end[0], tile_start[1]:tile_end[1]]\n tiles[(row, col)] = {\"image\": cropped_image, \"bboxes\": new_bboxes}\n\n tile_width = tile_width + new_width\n tile_height = tile_height + new_height\n\n return tiles", "def getNumTiles(self):\n return len(list(product(list(range(self.width+1))[1:], list(range(self.height+1))[1:])))", "def tile_images(img, img_size=32, rows=4, cols=4, spacing=1):\n images = np.ones([3, rows * (img_size + spacing) - spacing, cols * (img_size + spacing)], dtype=np.float32)\n coords = [(i, j) for i in range(rows) for j in range(cols)]\n\n for (i, j), image in zip(coords, img):\n x = i * (img_size + spacing)\n y = j * (img_size + spacing)\n images[:, x: x+img_size, y:y+img_size] = image\n\n return images", "def getNumTiles(self):\n return self.w * self.h", "def calcul_xy_array(img_x, img_y, tile_x, tile_y):\n array = []\n\n modu_x = img_x % tile_x\n modu_y = img_y % tile_y\n div_x = img_x // tile_x\n div_y = img_y // tile_y\n current_x = 0\n current_y = 0\n\n for i in range(div_y):\n for j in range(div_x):\n array.append((current_x, current_y))\n current_x += tile_x\n if modu_x:\n array.append((img_x - tile_x, current_y))\n current_y += tile_y\n current_x = 0\n\n if modu_y:\n current_y = img_y - tile_y\n for j in range(div_x):\n array.append((current_x, current_y))\n current_x += tile_x\n if modu_x:\n array.append((img_x - tile_x, current_y))\n\n return array", "def tiles(self, width: int, height: int) -> TileSet:\n y_count = len(self.tiling)\n for y_index, y_tile in enumerate(self.tiling):\n\n x_count = len(y_tile)\n for x_index, tile_strength in enumerate(y_tile):\n\n # Doing multiplication before devision here to make sure rounding is correct\n bounding_box = (\n # from (x1, y1)\n int(width * x_index / x_count),\n int(height * y_index / y_count),\n # to (x2, y2)\n int(width * (x_index + 1) / x_count),\n int(height * (y_index + 1) / y_count),\n )\n\n yield bounding_box, tile_strength", "def slice_to_tiles(self, tile_raw_size=None, show_info=\"\"):\n if not tile_raw_size: tile_raw_size = self.tile_raw_size\n tile_raw_w,tile_raw_h = tile_raw_size\n tile_w,tile_h = round(tile_raw_w),round(tile_raw_h)\n\n if show_info:\n print(f\" ==Slicing {show_info} Tiles==\")\n print(f' Tile raw size: {tile_raw_size[0]} x {tile_raw_size[1]} px\\n')\n\n #process into list of image objects\n tiles = []\n true_x, true_y = (0,0)\n with Image.open(self.path) as img_obj:\n w,h = img_obj.size\n for row in range(0,h-tile_h,tile_h):\n tiles_row = []\n y = round(true_y)\n for col in range(0,w-tile_w,tile_w):\n x = round(true_x)\n im_crop = img_obj.crop((x,y,x+tile_w,y+tile_h))\n tiles_row.append(im_crop)\n true_x += tile_raw_w\n tiles.append(tiles_row)\n true_y += tile_raw_h\n true_x = 0\n\n return tiles", "def getNumTiles(self):\n return self.height * self.width", "def get_num_tiles(rows, cols, row_tile_size, col_tile_size):\n num_row_tiles = math.ceil(rows / row_tile_size)\n num_col_tiles = math.ceil(cols / col_tile_size)\n return num_row_tiles, num_col_tiles", "def get_tiles(self):\n\n tiles = []\n for x in range(self.position[0],\n self.position[0] + CAR_LENGTH if self.is_horizontal else self.position[0] + CAR_WIDTH):\n for y in range(self.position[1],\n self.position[1] + CAR_WIDTH if self.is_horizontal else self.position[1] + CAR_LENGTH):\n tiles.append((x, y))\n\n return tiles", "def readTiles(self):\n TileImage = Image.open(self.Filename).convert(\"RGB\")\n TileIW, TileIH = TileImage.size\n TilesetW, TilesetH = TileIW // self.TileWidth, TileIH // self.TileHeight\n\n for y in range(TilesetH):\n for x in range(TilesetW):\n box = self.TileWidth * x, self.TileHeight * y, self.TileWidth * (x+1), self.TileHeight * (y+1)\n tile = TileImage.crop(box)\n self.List.append(tile)\n\n str = tile.tostring()\n if not str in self.TileDict:\n #print(\"add tile: \", str)\n self.TileDict[str] = len(self.List) - 1\n print(\"tile count: {}, unique count: {}\".format(len(self.List),len(self.TileDict.values())))", "def getNumTiles(self):\n #raise NotImplementedError #refer https://docs.python.org/2/library/exceptions.html\n return self.width * self.height", "def img_to_tiles(cls, tiff_path, region, res, tile, tile_date_path, img_format, mp):\n\n # Get metadata from original image\n metadata = TiffMetadata(tiff_path)\n\n WIDTH, HEIGHT = region.calculate_width_height(res)\n ultra_large = False\n if WIDTH * HEIGHT > 2 * Image.MAX_IMAGE_PIXELS:\n ultra_large = True\n\n # Use the following dictionary to get the coordinates of each tile\n geoTran_d = TileUtils.getGeoTransform(tiff_path)\n\n # Check for valid tiling dimensions\n if (tile.width > WIDTH or tile.height > HEIGHT):\n raise argparse.ArgumentTypeError(\"Tiling dimensions greater than image dimensions\")\n\n # Determine the number of tiles per row and column\n if tile.handling == Handling.discard_incomplete_tiles:\n num_rows = (HEIGHT - tile.height * tile.overlap) // (tile.height * (1 - tile.overlap))\n num_cols = (WIDTH - tile.width * tile.overlap) // (tile.width * (1 - tile.overlap))\n else:\n num_rows = math.ceil((HEIGHT - tile.height * tile.overlap) / (tile.height * (1 - tile.overlap)))\n num_cols = math.ceil((WIDTH - tile.width * tile.overlap) / (tile.width * (1 - tile.overlap)))\n\n num_iterations = num_rows * num_cols\n \n # Find the pixel coordinate extents of each tile to be generated\n print(\"Gathering tiling information...\", end=\"\", flush=True)\n if mp:\n with Pool(processes=NUM_CORES) as pool:\n args = zip(repeat((metadata, tile, WIDTH, HEIGHT, geoTran_d, tile_date_path, num_rows, num_cols)), list(range(num_iterations)))\n pixel_coords = pool.map(getTilingSplitCoordsMP, args)\n else:\n pixel_coords = []\n for index in range(num_iterations):\n pixel_coords.append(getTilingSplitCoordsTuple(metadata,tile, WIDTH, HEIGHT, geoTran_d, tile_date_path, num_rows, num_cols, index))\n print(\"done!\")\n\n if mp:\n print(\"Generating {} tiles using {} processes...\".format(len(pixel_coords), NUM_CORES), flush=True)\n else:\n print(\"Generating {} tiles sequentially...\".format(len(pixel_coords)), flush=True)\n\n if ultra_large: \n # Create the intermediate tiles\n inter_dir, img_width, img_height = TileUtils.img_to_intermediate_images(tiff_path, tile, WIDTH, HEIGHT, metadata.date, img_format)\n\n # Add each coordinate to its proper list\n intermediate_files = [f for f in os.listdir(inter_dir) if f.endswith(img_format)]\n\n # Get the tiling information for all intermediate tiles\n intermediate_info = TileUtils.getIntermediateTilingInfo(tile, pixel_coords, WIDTH, HEIGHT, img_width, img_height, intermediate_files)\n\n # Tile the complete images\n print(\"\\tTiling from complete images\")\n for single_inter_imgs in tqdm(intermediate_info[0]):\n filename = single_inter_imgs[0][0]\n inter_metadata = IntermediateMetadata(filename)\n\n img_path = os.path.join(inter_dir, filename)\n src = Image.open(img_path)\n img_arr = np.array(src)\n\n if mp:\n # Create a shared array\n X_shape = img_arr.shape\n X = RawArray('B', X_shape[0] * X_shape[1] * X_shape[2])\n\n # Wrap shared array as numpy array\n X_np = np.frombuffer(X, dtype='uint8').reshape(X_shape)\n\n # Copy image to the shared array\n np.copyto(X_np, img_arr)\n \n # Use multiprocessing to tile the numpy array\n with Pool(processes=NUM_CORES, initializer=init_worker, initargs=(X, X_shape, None, None)) as pool:\n multi = [pool.apply_async(TileUtils.generate_tile, args=(tile, WIDTH, HEIGHT, x, y, done_x, done_y, path, img_format,), kwds={\"inter_x\":(x - inter_metadata.start_x), \"inter_y\":(y - inter_metadata.start_y)}) for (filename, x, y, done_x, done_y, path) in single_inter_imgs]\n f = [p.get() for p in multi]\n pool.close()\n pool.join()\n else: \n for filename, x, y, done_x, done_y, path in single_inter_imgs:\n TileUtils.generate_tile(tile, WIDTH, HEIGHT, x, y, done_x, done_y, path, img_format, inter_x=(x - inter_metadata.start_x), inter_y=(y - inter_metadata.start_y), img_arr=img_arr)\n\n # Close the image\n src.close()\n # Tile in between two images\n print(\"\\tTiling between two images\")\n if mp:\n with Pool(processes=NUM_CORES) as pool:\n args = zip(repeat((tile.width, tile.height, inter_dir, img_format)), intermediate_info[1])\n result = list(tqdm(pool.imap(processDoublesMP, args), total=len(intermediate_info[1])))\n else:\n for double_inter_imgs in tqdm(intermediate_info[1]):\n processDoublesTuple(tile.width, tile.height, inter_dir, img_format, double_inter_imgs)\n \n # Tile in between four images\n print(\"\\tTiling between four images\")\n if mp:\n # Use half as many processes as cores to ensure not running out of available mem and getting stuck\n with Pool(processes=(NUM_CORES // 2)) as pool:\n args = zip(repeat((tile.width, tile.height, inter_dir, img_format)), intermediate_info[2])\n result = list(tqdm(pool.imap(processQuadsMP, args), total=len(intermediate_info[2])))\n else:\n for quad_inter_imgs in tqdm(intermediate_info[2]):\n processQuadsTuple(tile.width, tile.height, inter_dir, img_format, quad_inter_imgs)\n shutil.rmtree(inter_dir)\n else: \n # Open image as a numpy array in order to tile from the array\n src = Image.open(tiff_path)\n img_arr = np.array(src)\n\n if mp:\n # Create a shared array\n X_shape = img_arr.shape\n X = RawArray('B', X_shape[0] * X_shape[1] * X_shape[2])\n\n # Wrap shared array as numpy array\n X_np = np.frombuffer(X, dtype='uint8').reshape(X_shape)\n\n # Copy image to the shared array\n np.copyto(X_np, img_arr)\n\n # Use multiprocessing to tile the numpy array\n with Pool(processes=NUM_CORES, initializer=init_worker, initargs=(X, X_shape, None, None)) as pool:\n multi = [pool.apply_async(TileUtils.generate_tile, args=(tile, WIDTH, HEIGHT, x, y, done_x, done_y, path, img_format)) for (x, y, done_x, done_y, path) in pixel_coords]\n f = [p.get() for p in tqdm(multi)]\n pool.close()\n pool.join()\n else:\n for x, y, done_x, done_y, path in tqdm(pixel_coords):\n TileUtils.generate_tile(tile, WIDTH, HEIGHT, x, y, done_x, done_y, path, img_format, img_arr=img_arr)\n \n # Close the image\n src.close()\n print(\"done!\")", "def arrange_tiles(self, layer):\n\n # número de tiles en 'x'\n width = self.width\n arranged_tiles = layer.arranged_tiles\n\n row = -1\n\n # convierte una lista en un diccionario\n for col, tile in enumerate(layer.tiles):\n # calcula la ubicación en dos dimensiones (fila y columna) de cada tile,\n # los tiles originalmente están ordenados en línea\n col %= width\n if col == 0:\n row += 1\n\n # excluye los tiles con id 0,\n # id 0 representa un espacio vacío en el tilemap\n if tile != 0:\n arranged_tiles[(row, col)] = tile\n\n # libera la memoria ocupada por la lista de tiles\n layer.tiles = None", "def tile_images(image_stack):\n assert len(image_stack.shape) == 4\n image_list = [image_stack[i, :, :, :] for i in range(image_stack.shape[0])]\n tiled_images = np.concatenate(image_list, axis=1)\n return tiled_images", "def __createTiles(self, length, width, height):\n\n rectangles = []\n centrePoints = []\n \n # Defines the dimensions required to fit all tiles\n totalHeight = length * height\n totalWidth = length * width\n \n # Go through all tiles\n y = length\n while y < totalHeight + length:\n\n x = length\n while x < totalWidth + length:\n # Creates a Rect object\n rectangle = pygame.Rect(x, y, length, length)\n rectangles.append(rectangle)\n\n # Calculates the tile's centre point.\n centrePoint = (math.floor(x + length/2), math.floor(y + length/2))\n centrePoints.append(centrePoint)\n\n x += length\n y += length\n\n return rectangles, centrePoints" ]
[ "0.7743909", "0.7442206", "0.7223278", "0.6875224", "0.6805701", "0.6805701", "0.68043333", "0.6783657", "0.67152864", "0.6648476", "0.6550898", "0.6506808", "0.6499578", "0.644437", "0.64369965", "0.64329946", "0.6417711", "0.6391121", "0.6388546", "0.637921", "0.6316766", "0.63017416", "0.6299771", "0.6295904", "0.62767625", "0.6265029", "0.6232143", "0.62023723", "0.618484", "0.6164547" ]
0.8323475
0
Generates an array of ppxf_util.gaussian emission lines to be used as gas templates in PPXF. Generally, these templates represent the instrumental line spread function (LSF) at the set of wavelengths of each emission line. In this case, pPXF will return the intrinsic (i.e. astrophysical) dispersion of the gas lines. Alternatively, one can input FWHM_gal=0, in which case the emission lines are deltafunctions and pPXF will return a dispersion which includes both the intrumental and the intrinsic disperson. Additional lines can be easily added by editing the code of this procedure, which is meant as a template to be modified by the users where needed. For accuracy the ppxf_util.gaussians are integrated over the pixels boundaries. This can be changed by setting `pixel`=False. The [OI], [OIII] and [NII] doublets are fixed at theoretical flux ratio~3. The [OII] and [SII] doublets can be restricted to physical range of ratios. The Balmet Series can be fixed to the theoretically predicted decrement.
def emission_lines(logLam_temp, lamRange_gal, FWHM_gal, pixel=True, tie_balmer=False, limit_doublets=False, vacuum=False): if tie_balmer: # Balmer decrement for Case B recombination (T=1e4 K, ne=100 cm^-3) # Table 4.4 of Dopita & Sutherland 2003 https://www.amazon.com/dp/3540433627 # Balmer: Htheta Heta Hzeta Heps Hdelta Hgamma Hbeta Halpha wave = np.array([3797.90, 3835.39, 3889.05, 3970.07, 4101.76, 4340.47, 4861.33, 6562.80]) # air wavelengths if vacuum: wave = ppxf_util.air_to_vac(wave) gauss = ppxf_util.gaussian(logLam_temp, wave, FWHM_gal, pixel) ratios = np.array([0.0530, 0.0731, 0.105, 0.159, 0.259, 0.468, 1, 2.86]) ratios *= wave[-2]/wave # Account for varying pixel size in Angstrom emission_lines = gauss @ ratios line_names = ['Balmer'] w = (wave > lamRange_gal[0]) & (wave < lamRange_gal[1]) line_wave = np.mean(wave[w]) if np.any(w) else np.mean(wave) else: # Use fewer lines here, as the weak ones are difficult to measure # Balmer: Hdelta Hgamma Hbeta Halpha line_wave = [4101.76, 4340.47, 4861.33, 6562.80] # air wavelengths if vacuum: line_wave = ppxf_util.air_to_vac(line_wave) line_names = ['Hdelta', 'Hgamma', 'Hbeta', 'Halpha'] emission_lines = ppxf_util.gaussian(logLam_temp, line_wave, FWHM_gal, pixel) if limit_doublets: # The line ratio of this doublet lam3729/lam3726 is constrained by # atomic physics to lie in the range 0.28--1.47 (e.g. fig.5.8 of # Osterbrock & Ferland 2005 https://www.amazon.co.uk/dp/1891389343/). # We model this doublet as a linear combination of two doublets with the # maximum and minimum ratios, to limit the ratio to the desired range. # -----[OII]----- wave = [3726.03, 3728.82] # air wavelengths if vacuum: wave = ppxf_util.air_to_vac(wave) names = ['[OII]3726_d1', '[OII]3726_d2'] gauss = ppxf_util.gaussian(logLam_temp, wave, FWHM_gal, pixel) doublets = gauss @ [[1, 1], [0.28, 1.47]] # produces *two* doublets emission_lines = np.column_stack([emission_lines, doublets]) line_names = np.append(line_names, names) line_wave = np.append(line_wave, wave) # The line ratio of this doublet lam6716/lam6731 is constrained by # atomic physics to lie in the range 0.44--1.43 (e.g. fig.5.8 of # Osterbrock & Ferland 2005 https://www.amazon.co.uk/dp/1891389343/). # We model this doublet as a linear combination of two doublets with the # maximum and minimum ratios, to limit the ratio to the desired range. # -----[SII]----- wave = [6716.47, 6730.85] # air wavelengths if vacuum: wave = ppxf_util.air_to_vac(wave) names = ['[SII]6731_d1', '[SII]6731_d2'] gauss = ppxf_util.gaussian(logLam_temp, wave, FWHM_gal, pixel) doublets = gauss @ [[0.44, 1.43], [1, 1]] # produces *two* doublets emission_lines = np.column_stack([emission_lines, doublets]) line_names = np.append(line_names, names) line_wave = np.append(line_wave, wave) else: # Here the doublets are free to have any ratio # -----[OII]----- -----[SII]----- wave = [3726.03, 3728.82, 6716.47, 6730.85] # air wavelengths if vacuum: wave = ppxf_util.air_to_vac(wave) names = ['[OII]3726', '[OII]3729', '[SII]6716', '[SII]6731'] gauss = ppxf_util.gaussian(logLam_temp, wave, FWHM_gal, pixel) emission_lines = np.column_stack([emission_lines, gauss]) line_names = np.append(line_names, names) line_wave = np.append(line_wave, wave) # To keep the flux ratio of a doublet fixed, we place the two lines in a single template # -----[OIII]----- wave = [4958.92, 5006.84] # air wavelengths if vacuum: wave = ppxf_util.air_to_vac(wave) doublet = ppxf_util.gaussian(logLam_temp, wave, FWHM_gal, pixel) @ [0.33, 1] emission_lines = np.column_stack([emission_lines, doublet]) line_names = np.append(line_names, '[OIII]5007_d') # single template for this doublet line_wave = np.append(line_wave, wave[1]) # To keep the flux ratio of a doublet fixed, we place the two lines in a single template # -----[OI]----- wave = [6300.30, 6363.67] # air wavelengths if vacuum: wave = ppxf_util.air_to_vac(wave) doublet = ppxf_util.gaussian(logLam_temp, wave, FWHM_gal, pixel) @ [1, 0.33] emission_lines = np.column_stack([emission_lines, doublet]) line_names = np.append(line_names, '[OI]6300_d') # single template for this doublet line_wave = np.append(line_wave, wave[0]) # To keep the flux ratio of a doublet fixed, we place the two lines in a single template # -----[NII]----- wave = [6548.03, 6583.41] # air wavelengths if vacuum: wave = ppxf_util.air_to_vac(wave) doublet = ppxf_util.gaussian(logLam_temp, wave, FWHM_gal, pixel) @ [0.33, 1] emission_lines = np.column_stack([emission_lines, doublet]) line_names = np.append(line_names, '[NII]6583_d') # single template for this doublet line_wave = np.append(line_wave, wave[1]) #added by anja to ppxf_util.emission_lines version # To keep the flux ratio of a doublet fixed, we place the two lines in a single template # -----[NI]----- wave = [5197.90, 5200.39] # air wavelengths if vacuum: wave = ppxf_util.air_to_vac(wave) doublet = ppxf_util.gaussian(logLam_temp, wave, FWHM_gal, pixel) @ [1, 0.7] emission_lines = np.column_stack([emission_lines, doublet]) line_names = np.append(line_names, '[NI]5200_d') # single template for this doublet line_wave = np.append(line_wave, wave[1]) #---------------------- # Only include lines falling within the estimated fitted wavelength range. # w = (line_wave > lamRange_gal[0]) & (line_wave < lamRange_gal[1]) emission_lines = emission_lines[:, w] line_names = line_names[w] line_wave = line_wave[w] print('Emission lines included in gas templates:') print(line_names) return emission_lines, line_names, line_wave
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Generate_BG_Template(outputSize=300, angularSize = 10, fileOut = 'BGRateMap.pickle' ):\r\n template = np.zeros((outputSize,outputSize))\r\n ppd=float(outputSize)/float(angularSize) # pixels per deg\r\n \r\n events110 = ParseFermi.Import_File('photons.txt', energyRange = (120000,140000),lonRange=(-5,5),latRange = (-5,5))\r\n events130 = ParseFermi.Import_File('photons.txt', energyRange = (100000,120000),lonRange=(-5,5),latRange = (-5,5))\r\n events150 = ParseFermi.Import_File('photons.txt', energyRange = (140000,200000),lonRange=(-5,5),latRange = (-5,5))\r\n \r\n for i in range(10000,200001,20000):\r\n if i == 130000:\r\n continue\r\n events = ParseFermi.Import_File('photons.txt', energyRange = (i-10000,i+10000),lonRange=(-5,5),latRange = (-5,5))\r\n BG = np.zeros((outputSize,outputSize)) \r\n for j in events:\r\n xIDX = int(j[1]*ppd+float(outputSize/2))\r\n yIDX = int(j[2]*ppd+float(outputSize/2))\r\n BG[yIDX][xIDX] += 1.0\r\n \r\n psfDeg = .2+float(200)/float(i)\r\n psfOut = psfDeg*ppd\r\n #print i/1e3, psfDeg, psfOut\r\n \r\n template += scipy.ndimage.filters.gaussian_filter(BG, psfOut)\r\n \r\n template = template/np.max(template)\r\n \r\n # Write to file \r\n outFile = open(fileOut, \"wb\" )\r\n pickle.dump(template, outFile)\r\n print 'Rate Map saved to ', fileOut\r\n \r\n plt.imshow(scipy.fliplr(template), 'jet',extent=[5,-5,-5,5])\r\n\r\n plt.xlabel(r'$l [^\\circ]$')\r\n plt.ylabel(r'$b [^\\circ]$')\r\n plt.xlim(5,-5)\r\n plt.ylim(-5,5)\r\n plt.colorbar()\r\n\r\n x,y = Find_Centroid(template)\r\n x,y = (x/ppd -angularSize/2.0,) ,(y/ppd -angularSize/2.0,)\r\n print x,y\r\n plt.scatter(x,y, s=10, c='r', marker = '+')\r\n \r\n X,Y = FormatEvents(events110)\r\n plt.scatter(X, Y, label = '100-120 GeV', marker = 'o' , c = 'k')\r\n \r\n X,Y = FormatEvents(events130)\r\n plt.scatter(X, Y, label = '120-140 GeV', marker = 'o' , c = 'r')\r\n \r\n X,Y = FormatEvents(events150)\r\n plt.scatter(X, Y, label = '140-200 GeV', marker = 'o' , c = 'g' )\r\n \r\n from matplotlib.font_manager import FontProperties\r\n fontP = FontProperties()\r\n fontP.set_size('small')\r\n plt.legend(loc=1, ncol=1, fancybox=True, shadow=False,prop=fontP,borderaxespad=0.,labelspacing = .2)\r\n \r\n from matplotlib.backends.backend_pdf import PdfPages\r\n if fileOut != '':\r\n pp = PdfPages(fileOut + '_sideband.pdf')\r\n plt.savefig(pp, format='pdf')\r\n print \"Figures saved to \", str(fileOut)+ '_sideband.pdf\\n',\r\n pp.close()\r\n \r\n plt.show()\r\n return template", "def load_templates(fwhm=400, line_complexes=True, stars=False,\n full_line_list=None, continuum_list=None,\n fsps_templates=False, alf_template=False):\n \n if stars:\n # templates = glob.glob('%s/templates/Pickles_stars/ext/*dat' %(os.getenv('GRIZLI')))\n # templates = []\n # for t in 'obafgkmrw':\n # templates.extend( glob.glob('%s/templates/Pickles_stars/ext/uk%s*dat' %(os.getenv('THREEDHST'), t)))\n # templates.extend(glob.glob('%s/templates/SPEX/spex-prism-M*txt' %(os.getenv('THREEDHST'))))\n # templates.extend(glob.glob('%s/templates/SPEX/spex-prism-[LT]*txt' %(os.getenv('THREEDHST'))))\n # \n # #templates = glob.glob('/Users/brammer/Downloads/templates/spex*txt')\n # templates = glob.glob('bpgs/*ascii')\n # info = catIO.Table('bpgs/bpgs.info')\n # type = np.array([t[:2] for t in info['type']])\n # templates = []\n # for t in 'OBAFGKM':\n # test = type == '-%s' %(t)\n # so = np.argsort(info['type'][test])\n # templates.extend(info['file'][test][so])\n # \n # temp_list = OrderedDict()\n # for temp in templates:\n # #data = np.loadtxt('bpgs/'+temp, unpack=True)\n # data = np.loadtxt(temp, unpack=True)\n # #data[0] *= 1.e4 # spex\n # scl = np.interp(5500., data[0], data[1])\n # name = os.path.basename(temp)\n # #ix = info['file'] == temp\n # #name='%5s %s' %(info['type'][ix][0][1:], temp.split('.as')[0])\n # print(name)\n # temp_list[name] = utils.SpectrumTemplate(wave=data[0],\n # flux=data[1]/scl)\n \n # np.save('stars_bpgs.npy', [temp_list])\n \n \n # tall = np.load(os.path.join(os.getenv('GRIZLI'), \n # 'templates/stars.npy'))[0]\n # \n # return tall\n # \n # temp_list = OrderedDict()\n # for k in tall:\n # if k.startswith('uk'):\n # temp_list[k] = tall[k]\n # \n # return temp_list\n # \n # for t in 'MLT':\n # for k in tall:\n # if k.startswith('spex-prism-'+t):\n # temp_list[k] = tall[k]\n # \n # return temp_list\n \n #return temp_list\n templates = ['M6.5.txt', 'M8.0.txt', 'L1.0.txt', 'L3.5.txt', 'L6.0.txt', 'T2.0.txt', 'T6.0.txt', 'T7.5.txt']\n templates = ['stars/'+t for t in templates]\n else:\n ## Intermediate and very old\n # templates = ['templates/EAZY_v1.0_lines/eazy_v1.0_sed3_nolines.dat', \n # 'templates/cvd12_t11_solar_Chabrier.extend.skip10.dat'] \n templates = ['eazy_intermediate.dat', \n 'cvd12_t11_solar_Chabrier.dat']\n \n ## Post starburst\n #templates.append('templates/UltraVISTA/eazy_v1.1_sed9.dat')\n templates.append('post_starburst.dat')\n \n ## Very blue continuum\n #templates.append('templates/YoungSB/erb2010_continuum.dat')\n templates.append('erb2010_continuum.dat')\n \n ### Test new templates\n # templates = ['templates/erb2010_continuum.dat',\n # 'templates/fsps/tweak_fsps_temp_kc13_12_006.dat',\n # 'templates/fsps/tweak_fsps_temp_kc13_12_008.dat']\n \n if fsps_templates:\n #templates = ['templates/fsps/tweak_fsps_temp_kc13_12_0{0:02d}.dat'.format(i+1) for i in range(12)]\n templates = ['fsps/fsps_QSF_12_v3_nolines_0{0:02d}.dat'.format(i+1) for i in range(12)]\n #templates = ['fsps/fsps_QSF_7_v3_nolines_0{0:02d}.dat'.format(i+1) for i in range(7)]\n \n \n if alf_template:\n templates.append('alf_SSP.dat')\n \n if continuum_list is not None:\n templates = continuum_list\n \n temp_list = OrderedDict()\n for temp in templates:\n data = np.loadtxt(os.path.join(os.getenv('GRIZLI'), 'templates', temp), unpack=True)\n #scl = np.interp(5500., data[0], data[1])\n scl = 1.\n name = temp #os.path.basename(temp)\n temp_list[name] = SpectrumTemplate(wave=data[0], flux=data[1]/scl,\n name=name)\n \n temp_list[name].name = name\n \n if stars:\n return temp_list\n \n ### Emission lines:\n line_wavelengths, line_ratios = get_line_wavelengths()\n \n if line_complexes:\n #line_list = ['Ha+SII', 'OIII+Hb+Ha', 'OII']\n #line_list = ['Ha+SII', 'OIII+Hb', 'OII']\n line_list = ['Ha+NII+SII+SIII+He', 'OIII+Hb', 'OII+Ne', 'Lya+CIV']\n else:\n if full_line_list is None:\n line_list = DEFAULT_LINE_LIST\n else:\n line_list = full_line_list\n \n #line_list = ['Ha', 'SII']\n \n # Use FSPS grid for lines\n wave_grid = None\n # if fsps_templates:\n # wave_grid = data[0]\n # else:\n # wave_grid = None \n \n for li in line_list:\n scl = line_ratios[li]/np.sum(line_ratios[li])\n for i in range(len(scl)):\n line_i = SpectrumTemplate(wave=wave_grid, \n central_wave=line_wavelengths[li][i], \n flux=None, fwhm=fwhm, velocity=True)\n \n if i == 0:\n line_temp = line_i*scl[i]\n else:\n line_temp = line_temp + line_i*scl[i]\n \n name = 'line {0}'.format(li)\n line_temp.name = name\n temp_list[name] = line_temp\n \n return temp_list", "def array_templates(templates, max_R=5000):\n from grizli.utils_c.interp import interp_conserve_c\n \n wave = np.unique(np.hstack([templates[t].wave for t in templates]))\n clipsum, iter = 1, 0\n while (clipsum > 0) & (iter < 10):\n clip = np.gradient(wave)/wave < 1/max_R\n idx = np.arange(len(wave))[clip]\n wave[idx[::2]] = np.nan\n wave = wave[np.isfinite(wave)]\n iter += 1\n clipsum = clip.sum()\n #print(iter, clipsum)\n \n NTEMP = len(templates)\n flux_arr = np.zeros((NTEMP, len(wave)))\n \n for i, t in enumerate(templates):\n flux_arr[i,:] = interp_conserve_c(wave, templates[t].wave,\n templates[t].flux)\n \n is_line = np.array([t.startswith('line ') for t in templates])\n \n return wave, flux_arr, is_line", "def gen_gaussian_low(img, c_res, c=0.5, vx_size=1):\n\n # Input parsing\n assert (c_res > 0) and (c > 0) and (vx_size > 0)\n assert isinstance(img, np.ndarray) and (len(img.shape) == 3)\n\n # Initialization\n f_vx = c_res / vx_size\n ff_vx = min(img.shape) / (2. * np.pi * f_vx)\n sf_vx = ff_vx / math.sqrt(2. * math.log(1. / c))\n\n # Meshgrid generation\n nx, ny, nz = (img.shape[0] - 1) * .5, (img.shape[1] - 1) * .5, (img.shape[2] - 1) * .5\n if (nx % 1) == 0:\n arr_x = np.concatenate((np.arange(-nx, 0, 1), np.arange(0, nx + 1, 1)))\n else:\n if nx < 1:\n arr_x = np.arange(0, 1)\n else:\n nx = math.ceil(nx)\n arr_x = np.concatenate((np.arange(-nx, 0, 1), np.arange(0, nx, 1)))\n if (ny % 1) == 0:\n arr_y = np.concatenate((np.arange(-ny, 0, 1), np.arange(0, ny + 1, 1)))\n else:\n if ny < 1:\n arr_y = np.arange(0, 1)\n else:\n ny = math.ceil(ny)\n arr_y = np.concatenate((np.arange(-ny, 0, 1), np.arange(0, ny, 1)))\n if (nz % 1) == 0:\n arr_z = np.concatenate((np.arange(-nz, 0, 1), np.arange(0, nz + 1, 1)))\n else:\n if nz < 1:\n arr_z = np.arange(0, 1)\n else:\n nz = math.ceil(nz)\n arr_z = np.concatenate((np.arange(-nz, 0, 1), np.arange(0, nz, 1)))\n [X, Y, Z] = np.meshgrid(arr_x, arr_y, arr_z, indexing='ij')\n X = X.astype(np.float32, copy=False)\n Y = Y.astype(np.float32, copy=False)\n Z = Z.astype(np.float32, copy=False)\n R = np.sqrt(X * X + Y * Y + Z * Z)\n\n # Building\n return np.exp(-R / (2.*sf_vx*sf_vx))", "def grid_lineify(f, x_lim=(0.,256) ,y_lim=(0.,256), ntraj = 600,\n max_step = 3000, gamma = 0.02, dt = 9., e0 = 0.1,\n T = 0.1,\n e_thresh = 0.001, h = 2e-1, m = 3, bounce = False\n ):\n lines = []\n nx = int(np.sqrt(ntraj))\n x_starts, y_starts = np.meshgrid(np.linspace(x_lim[0],x_lim[1],nx),\n np.linspace(y_lim[0],y_lim[1],nx))\n x_starts = x_starts.flatten()\n y_starts = y_starts.flatten()\n for traj in range(len(x_starts)):\n x,y = x_starts[traj].item(), y_starts[traj].item()\n PE = f(x, y)\n v0 = np.sqrt(e0/m)\n vx,vy = np.random.normal(0,v0), np.random.normal(0,v0)\n line = []\n step = 0\n while step < max_step and np.sqrt(vx*vx+vy*vy) > e_thresh:\n PE = f(x, y)\n if (np.exp(-PE/.01) > np.random.random()):\n break\n # cdiff grad\n gx = ((f(x+h,y)-f(x-h,y))/(2*h)).item()\n gy = ((f(x,y+h)-f(x,y-h))/(2*h)).item()\n vx += 0.5*dt*(gx - gamma*vx + np.random.normal(0,np.sqrt(gamma*e0)) )/m\n vy += 0.5*dt*(gy - gamma*vy + np.random.normal(0,np.sqrt(gamma*e0)) )/m\n x += vx*dt\n y += vy*dt\n # Bounce off edges.\n if (bounce):\n if (x > x_lim[1]):\n x -= 2.0*np.abs(x-x_lim[1])\n vx *= -1\n if (x < x_lim[0]):\n x += 2.0*np.abs(x-x_lim[0])\n vx *= -1\n if (y > y_lim[1]):\n y -= 2.0*np.abs(y-y_lim[1])\n vy *= -1\n if (y < y_lim[0]):\n y += 2.0*np.abs(y-y_lim[0])\n vy *= -1\n else: # absorb\n if (x > x_lim[1]):\n break\n elif (x < x_lim[0]):\n break\n elif (y > y_lim[1]):\n break\n elif (y < y_lim[0]):\n break\n line.append([x,y])\n gx = ((f(x+h,y)-f(x-h,y))/(2*h)).item()\n gy = ((f(x,y+h)-f(x,y-h))/(2*h)).item()\n vx += 0.5*dt*(gx - gamma*vx + np.random.normal(0,np.sqrt(gamma*e0)) )/m\n vy += 0.5*dt*(gy - gamma*vy + np.random.normal(0,np.sqrt(gamma*e0)) )/m\n step += 1\n lines.append(line)\n return lines", "def Build_Background_Template(numBGPhotons, bgTemplate, PSFTableFront, PSFTableBack,flatLevel = 0.0,HESS = False,outputSize=300,angularSize=10.0):\r\n \r\n numPhotons = numBGPhotons\r\n numHigh = int(round(.32 *numPhotons))\r\n numLow = numPhotons-numHigh\r\n \r\n bgEventsX = []\r\n bgEventsY = []\r\n \r\n bgTemplate = bgTemplate *(1.0-flatLevel) + flatLevel\r\n# import matplotlib.pyplot as plt\r\n# plt.imshow(bgTemplate,'jet',vmin=0, vmax=1)\r\n# plt.colorbar()\r\n# plt.show()\r\n\r\n app=float(angularSize)/float(outputSize) # angle per pixel\r\n for i in range(numPhotons):\r\n x ,y = 0, 0\r\n while True:\r\n x,y = np.random.randint(0,high = len(bgTemplate)),np.random.randint(0,high = len(bgTemplate))\r\n if (np.random.ranf() < bgTemplate[y][x]):\r\n break\r\n # Shift and scale coordinates to output map and then compute PSF modification to the position.\r\n psfMod = PSF_Spread(PSFTableFront,PSFTableBack, HESS =HESS)\r\n dx = psfMod[0]*math.cos(psfMod[1]) # PSF shift in deg\r\n dy = psfMod[0]*math.sin(psfMod[1]) # PSF shift in deg\r\n \r\n bgEventsX.append((x-outputSize/2.0)*app + dx)\r\n bgEventsY.append((y-outputSize/2.0)*app + dy)\r\n \r\n return (bgEventsX, bgEventsY)", "def gen_new_phiw_div_phib_arr(N_PROCESSES, phiw_div_phib_arr_new, cond_GT, fcn_D, fcn_eta, z_div_L_arr, phiw_div_phib_arr, Pi_div_DLP_arr, weight, gp_arr, gm_arr, yt_arr, phi_yt_arr, ID_yt_arr, Ieta_yt_arr):\n phi_b = cond_GT['phi_bulk']\n ed = cond_GT['epsilon_d']\n membrane_geometry = cond_GT['membrane_geometry']\n \n Ny = size(yt_arr)\n # # Python allocate the name for phi_yt_arr[0], this is the same as reference value for C++ \" y= &x\"\n phi_arr_z0 = phi_yt_arr[0]\n Ieta_arr_z0= Ieta_yt_arr[0]\n ID_arr_z0 = ID_yt_arr[0]\n\n ind_z0 = 0 #z-index at inlet\n \n z0_div_L = 0. #z-coord at inlet\n \n r0_div_R = 0. #r-coord at the centerline of pipe\n rw_div_R = 1. #r-coord at the membrane wall\n \n vw_div_vw0_z0 = get_v_conv(rw_div_R, z0_div_L, Pi_div_DLP_arr[ind_z0], cond_GT, gp_arr[ind_z0], gm_arr[ind_z0])\n gen_phi_wrt_yt(z0_div_L, phiw_div_phib_arr[ind_z0]*phi_b, fcn_D, vw_div_vw0_z0, yt_arr, phi_arr_z0, cond_GT)\n gen_INT_inv_f_wrt_yt(yt_arr, phi_arr_z0, Ieta_arr_z0, fcn_eta, cond_GT)\n Ieta_arr_z0 /= Ieta_arr_z0[-1] # CHECK\n gen_INT_inv_f_wrt_yt(yt_arr, phi_arr_z0, ID_arr_z0, fcn_D, cond_GT)\n\n uZ_z0 = get_uZ_out(z0_div_L, cond_GT['k'], cond_GT['Bp'], cond_GT['Bm'], gp_arr[ind_z0], gm_arr[ind_z0])\n F2_0 = cal_F2_Z(vw_div_vw0_z0, ed, yt_arr, Ieta_arr_z0, ID_arr_z0, uZ_z0, membrane_geometry)\n\n Nz = size(z_div_L_arr)\n if (N_PROCESSES ==1):\n # when only single-processor is allocated\n for i in range(1, Nz):\n phiw_div_phib_arr_new[i] = process_at_zi(z_div_L_arr[i], phiw_div_phib_arr[i]*phi_b, Pi_div_DLP_arr[i], cond_GT, gp_arr[i], gm_arr[i], yt_arr, phi_yt_arr[i], Ieta_yt_arr[i], fcn_eta, ID_yt_arr[i], fcn_D, F2_0)\n else:\n # this uses multiprocessing packages\n import multiprocessing as mp\n \n pool = mp.Pool(N_PROCESSES)\n args_list = [(z_div_L_arr[i], phiw_div_phib_arr[i]*phi_b, Pi_div_DLP_arr[i], cond_GT, gp_arr[i], gm_arr[i], yt_arr, phi_yt_arr[i], Ieta_yt_arr[i], fcn_eta, ID_yt_arr[i], fcn_D, F2_0)\\\n for i in range(1, Nz)]\n phiw_div_phib_arr_new[1:] = pool.starmap(process_at_zi, args_list)\n pool.close()\n pool.join()\n\n cnt_EXCEED = 0 \n for i,x in enumerate(phiw_div_phib_arr_new):\n\n x = x*cond_GT['phi_bulk']\n if x > cond_GT['phi_freeze']:\n cnt_EXCEED += 1\n phiw_div_phib_arr_new[i] = cond_GT['phi_freeze']/cond_GT['phi_bulk'] # this prevent the accidently beyond the freezing concentration\n if(cnt_EXCEED>0):\n print('Warning: exceed phi_freeze %d times out of %d\\n'%(cnt_EXCEED, cond_GT['Nz']))\n\n FPI_operator(cond_GT['weight'], phiw_div_phib_arr, phiw_div_phib_arr_new, N_skip=1) # phiw(0) must be phib.\n\n return 0", "def fillSignalTemplates(opt):\n\n totalSig={}\n templates=[]\n\n #import signal events\n data=ROOT.TChain('data')\n data.AddFile(os.path.join(opt.input,opt.sig))\n\n #define final preselection cuts\n cuts='xangle==%d'%opt.xangle\n if len(opt.presel) : cuts += ' && ' + opt.presel\n if opt.csiacc:\n csiCuts ='csi1>%f && csi1<%f && '%opt.csiacc[opt.xangle][0]\n csiCuts+='csi2>%f && csi2<%f'%opt.csiacc[opt.xangle][1]\n cuts=csiCuts if len(cuts)==0 else '{0} && {1}'.format(cuts,csiCuts)\n\n #loop over categories build templates\n for icat in range(len(opt.categs)):\n\n #apply category cuts\n categCut=opt.categs[icat]\n categCut=cuts if len(categCut)==0 else '%s && %s'%(categCut,cuts)\n\n catName='%s_a%d_%d'%(opt.chTag,opt.xangle,icat)\n print '\\t',catName,categCut\n\n #signal modelling histograms\n histos=[]\n for name,pfix in [('sig_'+catName,''),('sig_%s_sigShape'%catName,'mix')]:\n\n templCuts=categCut.replace('csi1',pfix+'csi1')\n templCuts=templCuts.replace('csi2',pfix+'csi2')\n wgtExpr='wgt*%f'%(SIGNALXSECS[opt.xangle]*opt.lumi)\n data.Draw('{0}mmiss >> h({1},{2},{3})'.format(pfix,opt.nbins,opt.mMin,opt.mMax),\n '{0}*({1})'.format(wgtExpr,templCuts),\n 'goff')\n h=data.GetHistogram()\n histos.append( h.Clone(name) ) \n histos[-1].SetDirectory(0)\n\n if len(histos)==1:\n totalSig[icat]=h.Integral()\n\n h.Reset('ICE')\n templates += defineProcessTemplates(histos)\n \n print '\\t total signal:',totalSig\n return totalSig,templates", "def make_alf_template():\n import alf.alf\n import fsps\n \n ssp = alf.alf.Alf()\n \n sp = fsps.StellarPopulation(zcontinuous=1)\n sp.params['logzsol'] = 0.2\n\n # Alf\n m = ssp.get_model(in_place=False, logage=0.96, zh=0.2, mgh=0.2)\n \n # FSPS\n w, spec = sp.get_spectrum(tage=10**0.96, peraa=True)\n \n # blue\n blue_norm = spec[w > 3600][0] / m[ssp.wave > 3600][0]\n red_norm = spec[w > 1.7e4][0] / m[ssp.wave > 1.7e4][0]\n \n templx = np.hstack([w[w < 3600], ssp.wave[(ssp.wave > 3600) & (ssp.wave < 1.7e4)], w[w > 1.7e4]])\n temply = np.hstack([spec[w < 3600]/blue_norm, m[(ssp.wave > 3600) & (ssp.wave < 1.7e4)], spec[w > 1.7e4]/red_norm])\n \n np.savetxt('alf_SSP.dat', np.array([templx, temply]).T, fmt='%.5e', header='wave flux\\nlogage = 0.96\\nzh=0.2\\nmgh=0.2\\nfsps: w < 3600, w > 1.7e4')", "def fillBackgroundTemplates(opt):\n\n totalBkg={}\n templates=[]\n\n #import signal events\n data=ROOT.TChain('data')\n for f in [os.path.join(opt.input,x) for x in os.listdir(opt.input) if 'Data13TeV' in x]:\n if 'MuonEG' in f : continue\n data.AddFile(f)\n\n #define final preselection cuts\n cuts='xangle==%d'%opt.xangle\n if len(opt.presel) : cuts += ' && ' + opt.presel \n if opt.csiacc:\n csiCuts ='csi1>%f && csi1<%f && '%opt.csiacc[opt.xangle][0]\n csiCuts+='csi2>%f && csi2<%f'%opt.csiacc[opt.xangle][1]\n cuts=csiCuts if len(cuts)==0 else '{0} && {1}'.format(cuts,csiCuts)\n\n #loop over categories build templates\n for icat in range(len(opt.categs)):\n\n #apply category cuts\n categCut=opt.categs[icat]\n categCut=cuts if len(categCut)==0 else '%s && %s'%(categCut,cuts)\n catName='%s_a%d_%d'%(opt.chTag,opt.xangle,icat)\n\n print '\\t',catName,categCut\n\n #background modelling histos\n histos=[]\n data_obs=None\n for name,pfix in [('bkg_'+catName,'mix'),('bkg_%s_bkgShape'%catName,'mixem')]:\n\n templCuts=categCut.replace('csi1',pfix+'csi1')\n templCuts=templCuts.replace('csi2',pfix+'csi2')\n data.Draw('{0}mmiss >> h({1},{2},{3})'.format(pfix,opt.nbins,opt.mMin,opt.mMax),templCuts,'goff')\n h=data.GetHistogram()\n histos.append(h.Clone(name))\n histos[-1].SetDirectory(0)\n\n if len(histos)==1:\n totalBkg[icat]=h.Integral()\n if not opt.unblind :\n data_obs=h.Clone('data_obs_'+catName)\n data_obs.SetDirectory(0)\n\n h.Reset('ICE')\n templates += defineProcessTemplates(histos)\n\n #observed data in this category if unblinding\n if opt.unblind:\n data.Draw('mmiss >> h({1},{2},{3})'.format(opt.nbins,opt.mMin,opt.mMax),categCut,'goff')\n data_obs=data.GetHistogram().Clone('data_obs_'+catName)\n data_obs.SetDirectory(0)\n\n templates.append(data_obs)\n\n print '\\t total background:',totalBkg\n return totalBkg,templates", "def gvg(series, tmin=None, tmax=None, fill_method='linear', limit=8,\n output='mean', min_n_meas=2, min_n_years=8, year_offset='a'):\n return __gxg__(series, __mean_spring__, tmin=tmin, tmax=tmax,\n fill_method=fill_method, limit=limit, output=output,\n min_n_meas=min_n_meas, min_n_years=min_n_years,\n year_offset=year_offset)", "def gvg(series, tmin=None, tmax=None, fill_method='linear', limit=8,\n output='mean', min_n_meas=2, min_n_years=8, year_offset='a'):\n return __gxg__(series, __mean_spring__, tmin=tmin, tmax=tmax,\n fill_method=fill_method, limit=limit, output=output,\n min_n_meas=min_n_meas, min_n_years=min_n_years,\n year_offset=year_offset)", "def generate_random_linelist (teff,wv_bounds=(4500,5500),species_params=None,filepath=None):\n abund_offset_range = (-1,1)\n species_offset_range = (-1,1)\n ew_dist_width = 30\n ep_range = (0,12)\n loggf_range = (-6.0,0.5) \n \n theta = 5040.0/teff\n \n # # TODO: remove this calculation???\n # # # fix to a particular line which should be by the turnoff\n # # # Fe I 88.2 2.22 EP -4.2 loggf\n # loggf = -4.2\n # ep = 2.22\n # x_turnoff = abund_standard['Fe']['abundance']+loggf-theta*ep\n # x-x_turnoff = -5\n # \n # based on the model abundance used in the cog file\n xnorm = -6.5\n ynorm = -2.0\n \n # read in the parameters \n if species_params is None:\n species_params = _elements_params\n el_params = species_params.copy()\n for el,pars in _elements_params.items():\n el_params.setdefault(el,pars)\n \n\n coeffs, knots, centers, scales = np.array(cog_ppol_hf[\"coefficients\"]), np.array(cog_ppol_hf[\"knots\"]), np.array(cog_ppol_hf[\"centers\"]), np.array(cog_ppol_hf[\"scales\"])\n iqp = piecewise_polynomial.InvertiblePiecewiseQuadratic(coeffs, knots, centers=centers, scales=scales)\n iqp_deriv = iqp.deriv()\n \n # calc the linelist\n linelist = {}\n element_abund = {}\n for species,pars in list(species_params.items()):\n wvs = np.random.uniform(wv_bounds[0],wv_bounds[1],pars['n'])\n solar_abund_offset = np.random.uniform(*abund_offset_range)\n \n # get the abundance for this element, ignore species\n abund = abund_standard[species]['abundance']+solar_abund_offset\n element_abund.setdefault(abund_standard[species]['element'],abund) \n \n species_offset = np.random.uniform(*species_offset_range) \n species_abund = element_abund[abund_standard[species]['element']]+species_offset\n species_abund = np.repeat(species_abund,pars['n'])\n \n # generate the parameters for the lines\n spe_col = np.repeat(abund_standard.species_id(species),pars['n'])\n ew = np.random.exponential(ew_dist_width,pars['n'])\n ep = np.random.uniform(ep_range[0],ep_range[1],pars['n'])\n loggf = np.random.uniform(loggf_range[0],loggf_range[1],pars['n'])\n \n # calculate the line strengths from the COG\n #x = species_abund + loggf - theta*ep + xnorm\n logrw = np.log10(ew/wvs)\n x = iqp.inverse(logrw-ynorm)\n loggf = species_abund - x - theta*ep + xnorm\n\n # estimate the lorzentian and gaussian widths for this line\n lorz_width = estimate_lorentz_width(x, iqp_deriv)\n gauss_width = np.repeat(99.9,pars['n'])\n \n # add to the linelist\n linelist[species] = np.dstack((wvs,spe_col,ep,loggf,ew,gauss_width,lorz_width))[0]\n \n if filepath is not None:\n # save moog file\n f = open(filepath,'w')\n header = \"# Fake linelist created THIMBLES with teff {} # \"\n header += \"wvs species ep loggf ew gauss_width lorz_width # \"\n header += \"guassian and lorentzian widths are estimate\\n\"\n f.write(header.format(teff))\n \n fmt = \"{0:>9.5f} {1:>9.1f} {2:>9.2f} {3:>9.2f}\"+20*\" \"+\" {4:>9.2f}\"+10*\" \"\n fmt += \" {5:>9.2f} {6:>9.2f} FAKE_LINE\\n\"\n for species,ll in linelist.items():\n for row in ll:\n f.write(fmt.format(*row)) \n return linelist", "def pure_gabor():\n \n dots = pickle.load(open(\"/Users/bptripp/code/nengo-FPGA/v1/dot-images-coh1-2000ms-s02.p\", \"rb\" ), encoding='latin1') \n x = np.arange(-40, 41, 1)\n gaborx, gabory = make_gabors(x)\n centres = np.array([[200,200]])\n \n nf = dots.shape[2]\n nrf = centres.shape[0] # number of receptive fields\n ng = gaborx.shape[1] # number of gabors per receptive field\n \n # offsets (from RF centres) of subimages to multiply with kernels\n vw = int(np.floor(gabory.size/2))\n v_offsets = np.arange(-vw, vw+1)\n hw = int(np.floor(gaborx.shape[0]/2))\n h_offsets = np.arange(-hw, hw+1)\n \n result = np.zeros((nrf, ng, nf))\n for i in range(dots.shape[2]): \n for j in range(nrf): \n v_indices = v_offsets + centres[j,0]\n h_indices = h_offsets + centres[j,1]\n region = dots[v_indices[:,np.newaxis],h_indices,i]\n for k in range(ng): \n gabor = np.outer(gabory, gaborx[:,k])\n result[j,k,i] = np.sum(gabor * region)\n return result", "def get_iPTF16hgs(colorplt = False):\n z = 0.017\n ebv = 0\n D = cosmo.luminosity_distance([z])[0].value * 1e+6 # in pc\n dis_mod = 5*np.log10(D / 10)\n \n tb = pd.read_csv('../data/otherSN/iPTF16hgs/table1.txt', sep=\"\\t\")\n tb = tb.drop(columns=[\"Unnamed: 5\"])\n tb = tb.rename(columns={'Filter' : 'filter',\n 'MJD': 'mjd'})\n tb = tb[~np.array([x[0]=='>' for x in tb['Magnitude'].values])]\n tb['mag'] = np.array([float(x.split(\" +or-\")[0]) for x in tb['Magnitude'].values])\n tb['emag'] = np.array([float(x.split(\" +or-\")[1]) for x in tb['Magnitude'].values])\n tb = tb.drop(columns=[\"Magnitude\"])\n \n ixg = tb['filter'].values == \"g\"\n ixr = tb['filter'].values == \"r\"\n ixi = tb['filter'].values == \"i\"\n tb['wave'] = np.zeros(len(tb))\n tb['wave'].values[ixg] = 4814\n tb['wave'].values[ixr] = 6422\n tb['wave'].values[ixi] = 7883\n tb['mag0'] = tb['mag'] - extinction.ccm89(tb['wave'].values, 3.1*ebv, 3.1)\n tb['mag0_abs'] = tb['mag0'] - dis_mod\n t_max = 57691.59 # from the paper\n tb['tmax_of'] = tb['mjd'] - t_max\n tb['tmax_rf'] = (tb['mjd'] - t_max) / (1+z)\n \"\"\"\n plt.errorbar(tb[\"tmax_rf\"].values[ixg], tb[\"mag\"].values[ixg], tb[\"emag\"].values[ixg], fmt=\".g\")\n plt.errorbar(tb[\"tmax_rf\"].values[ixr], tb[\"mag\"].values[ixr], tb[\"emag\"].values[ixr], fmt=\".r\")\n plt.errorbar(tb[\"tmax_rf\"].values[ixi], tb[\"mag\"].values[ixi], tb[\"emag\"].values[ixi], fmt=\".y\")\n \"\"\"\n tb = add_datecol(tb)\n tb = add_physcol(tb)\n #tb = tb.drop(columns=[\"datetime64\"])\n if colorplt==False:\n return tb\n else:\n #tb = tb[tb.mjd > 55352.5]\n #tb = tb[tb.mjd < 55593.5]\n \n dates = get_date_span(tb)\n datesave = []\n for i in range(len(dates)):\n x = dates[i]\n ix = tb[\"date\"].values == x\n tbsub = tb[ix]\n if len(tbsub)!=0:\n flts = tbsub['filter'].values\n if \"r\" in flts and np.sum(np.unique(flts))!=1:\n datesave.append(x)\n datesave = np.array(datesave)\n \n mcolor = []\n mcolor_unc = []\n mjds = []\n colorname = []\n for i in range(len(datesave)):\n x = datesave[i]\n ix = tb[\"date\"].values == x\n tbsub = tb[ix]\n gtb = tbsub[tbsub[\"filter\"].values==\"g\"]\n rtb = tbsub[tbsub[\"filter\"].values==\"r\"]\n itb = tbsub[tbsub[\"filter\"].values==\"i\"]\n if len(gtb)!=0:\n gmjds = gtb[\"mjd\"].values\n gmags = gtb[\"mag0\"].values\n gemags = gtb[\"emag\"].values\n gwtgs = 1/gemags**2\n gmag = np.sum(gmags * gwtgs) / np.sum(gwtgs)\n gmjd = np.sum(gmjds * gwtgs) / np.sum(gwtgs)\n gemag = 1/ np.sqrt(np.sum(gwtgs))\n if len(rtb)!=0:\n rmjds = rtb[\"mjd\"].values\n rmags = rtb[\"mag0\"].values\n remags = rtb[\"emag\"].values\n rwtgs = 1/remags**2\n rmag = np.sum(rmags * rwtgs) / np.sum(rwtgs)\n rmjd = np.sum(rmjds * rwtgs) / np.sum(rwtgs)\n remag = 1/ np.sqrt(np.sum(rwtgs))\n if len(itb)!=0:\n imjds = itb[\"mjd\"].values\n imags = itb[\"mag0\"].values\n iemags = itb[\"emag\"].values\n iwtgs = 1/iemags**2\n imag = np.sum(imags * iwtgs) / np.sum(iwtgs)\n imjd = np.sum(imjds * iwtgs) / np.sum(iwtgs)\n iemag = 1/ np.sqrt(np.sum(iwtgs))\n if len(gtb)!=0 and len(rtb)!=0:\n mcolor.append(gmag - rmag)\n mjds.append( 0.5 * (gmjd + rmjd) )\n mcolor_unc.append( np.sqrt(gemag**2 + remag**2) )\n colorname.append(\"gmr\")\n if len(rtb)!=0 and len(itb)!=0:\n mcolor.append(rmag - imag)\n mjds.append( 0.5 * (rmjd + imjd) )\n mcolor_unc.append( np.sqrt(remag**2 + iemag**2) )\n colorname.append(\"rmi\")\n \n ctb = Table(data = [mjds, mcolor, mcolor_unc, colorname],\n names = [\"mjd\", \"c\", \"ec\", \"cname\"])\n \n ctb['tmax_rf'] = (ctb['mjd'] - t_max) / (1+z)\n ctb = ctb.to_pandas()\n return ctb", "def generate_6D_Gaussian_bunch_matched(\n self, n_macroparticles, intensity, epsn_x, epsn_y, sigma_z=None, epsn_z=None\n ):\n if self.longitudinal_mode == 'linear':\n assert(sigma_z is not None)\n bunch = self.generate_6D_Gaussian_bunch(n_macroparticles, intensity,\n epsn_x, epsn_y, sigma_z)\n elif self.longitudinal_mode == \"non-linear\":\n epsx_geo = epsn_x / self.betagamma\n epsy_geo = epsn_y / self.betagamma\n\n injection_optics = self.transverse_map.get_injection_optics()\n\n bunch = generators.ParticleGenerator(\n macroparticlenumber=n_macroparticles,\n intensity=intensity,\n charge=self.charge,\n mass=self.mass,\n circumference=self.circumference,\n gamma=self.gamma,\n distribution_x=generators.gaussian2D(epsx_geo),\n alpha_x=injection_optics[\"alpha_x\"],\n beta_x=injection_optics[\"beta_x\"],\n D_x=injection_optics[\"D_x\"],\n distribution_y=generators.gaussian2D(epsy_geo),\n alpha_y=injection_optics[\"alpha_y\"],\n beta_y=injection_optics[\"beta_y\"],\n D_y=injection_optics[\"D_y\"],\n distribution_z=generators.RF_bucket_distribution(\n self.longitudinal_map.get_bucket(gamma=self.gamma),\n sigma_z=sigma_z,\n epsn_z=epsn_z,\n ),\n ).generate()\n else:\n raise ValueError('Unknown longitudinal mode!')\n\n return bunch", "def growth_curve(userinputs, filter, catalog):\n logging.info('Running growth curve analysis on {}'.format(catalog))\n # Load the photometry results from the catalog (that is returned by the phot\n # function)\n aper_st, flux_st = np.loadtxt(catalog, unpack=True, usecols=(0,3))\n\n #Growth curve is only done on the ref image so we get the filter from userinp.\n ref_filter = filter\n\n ratio_st = np.empty(len(aper_st))\n\n #number of apertures\n naper = 20\n\n # Calculate the number of stars, make sure it is an integer\n nstar = int(len(aper_st)/naper)\n logging.info('Number of stars used: {}'.format(nstar))\n aper_ind = naper - 1\n\n for k in range(nstar):\n\n for i in range(naper):\n\n ratio_st[i + k*naper] = flux_st[i + k*naper]/flux_st[aper_ind + k*naper]\n\n\n # Find median ratio at each aperture between all the stars and all the clusters\n med_st = np.empty(naper)\n\n for i in range(naper):\n\n med_st[i] = np.median(ratio_st[i::naper])\n\n\n # Plot growth curves\n logging.info('Creating Growth curve plots')\n fig = plt.figure(figsize = (7,7))\n\n aper_x = np.arange(naper) + 1\n\n for i in range(nstar):\n\n ratio_y = ratio_st[i*naper:(i + 1)*naper]\n plt.plot(aper_x, ratio_y, 'y-')\n plt.annotate(str(i + 1), xy=(8.0, ratio_y[7]),\n horizontalalignment='left', verticalalignment='top', fontsize=6)\n\n\n plt.plot(aper_x, med_st, 'r-' , linewidth=4.0)\n plt.hlines(0.5, 0, 20, color='black', linewidth=2, zorder=10)\n plt.vlines(4, 0, 1.1, color='black', linewidth=2, linestyle='dashed', zorder=10)\n plt.vlines(5, 0, 1.1, color='black', linewidth=2, linestyle='dashed', zorder=10)\n plt.vlines(6, 0, 1.1, color='black', linewidth=2, linestyle='dashed', zorder=10)\n\n plt.ylabel('Normalized Flux ' + ref_filter.upper())\n plt.xlabel('Radius (pix)')\n plt.xlim(1,20)\n plt.minorticks_on()\n\n fig.savefig(userinputs['OUTDIR'] + '/plots/plot_growth_curve_{}.pdf'.format(ref_filter))", "def generate_gaussian():\n amp = 10 * numpy.random.chisquare(3)\n width = numpy.random.chisquare(3)\n mean = numpy.random.uniform(-10 + width, 10 - width)\n x = numpy.linspace(-10, 10, 500)\n y = amp * numpy.exp(- (x - mean) ** 2 / width ** 2)\n add_noise(y, 0.1)\n return x, y", "def generate_6D_Gaussian_bunch(\n self, n_macroparticles, intensity, epsn_x, epsn_y, sigma_z\n ):\n if self.longitudinal_mode == \"linear\":\n check_inside_bucket = lambda z, dp: np.array(len(z) * [True])\n Q_s = self.longitudinal_map.Q_s\n elif self.longitudinal_mode == \"non-linear\":\n bucket = self.longitudinal_map.get_bucket(\n gamma=self.gamma, mass=self.mass, charge=self.charge\n )\n check_inside_bucket = bucket.make_is_accepted(margin=0.05)\n Q_s = bucket.Q_s\n else:\n raise NotImplementedError(\"Something wrong with self.longitudinal_mode\")\n\n eta = self.longitudinal_map.alpha_array[0] - self.gamma ** -2\n beta_z = np.abs(eta) * self.circumference / 2.0 / np.pi / Q_s\n sigma_dp = sigma_z / beta_z\n epsx_geo = epsn_x / self.betagamma\n epsy_geo = epsn_y / self.betagamma\n\n injection_optics = self.transverse_map.get_injection_optics()\n\n bunch = generators.ParticleGenerator(\n macroparticlenumber=n_macroparticles,\n intensity=intensity,\n charge=self.charge,\n mass=self.mass,\n circumference=self.circumference,\n gamma=self.gamma,\n distribution_x=generators.gaussian2D(epsx_geo),\n alpha_x=injection_optics[\"alpha_x\"],\n beta_x=injection_optics[\"beta_x\"],\n D_x=injection_optics[\"D_x\"],\n distribution_y=generators.gaussian2D(epsy_geo),\n alpha_y=injection_optics[\"alpha_y\"],\n beta_y=injection_optics[\"beta_y\"],\n D_y=injection_optics[\"D_y\"],\n distribution_z=generators.cut_distribution(\n generators.gaussian2D_asymmetrical(sigma_u=sigma_z, sigma_up=sigma_dp),\n is_accepted=check_inside_bucket,\n ),\n ).generate()\n\n return bunch", "def flatNoiseCGH():\n #Get data\n wdir = '/home/rallured/Dropbox/AXRO/Metrology/NoiseStudy/FlatMeasurements/'\n d1,dx1 = met.read4DFits(wdir+'161205_RefFlat_Avg8_Meas1.fits')\n d2,dx2 = met.read4DFits(wdir+'161205_RefFlat_Avg8_Meas2.fits')\n p1,px1 = met.read4DFits(wdir+'161205_RefFlat_ParrotingTestPitch_Meas1.fits')\n p2,px2 = met.read4DFits(wdir+'161205_RefFlat_ParrotingTestPitch_Meas2.fits')\n p3,px3 = met.read4DFits(wdir+'161205_RefFlat_ParrotingTestRoll_Meas1.fits')\n p4,px4 = met.read4DFits(wdir+'161205_RefFlat_ParrotingTestRoll_Meas2.fits')\n\n #Construct baseline power spectra\n f1,pow1 = fourier.meanPSD(d1-d2,win=np.hanning,dx=dx1)\n f2,pow2 = fourier.meanPSD(d1-d2,win=np.hanning,dx=dx1,axis=1)\n \n #Construct parroted power spectra\n f3,pow3 = fourier.meanPSD(p1-p2,win=np.hanning,dx=dx1)\n f4,pow4 = fourier.meanPSD(p1-p2,win=np.hanning,dx=dx2,axis=1)\n f5,pow5 = fourier.meanPSD(p3-p4,win=np.hanning,dx=dx1)\n f6,pow6 = fourier.meanPSD(p3-p4,win=np.hanning,dx=dx2,axis=1)\n\n #Plot\n plt.loglog(f1,pow1/f1[0],label='Axial Baseline')\n plt.loglog(f2,pow2/f2[0],label='Azimuthal Baseline')\n plt.loglog(f3,pow3/f3[0],label='Pitch Axial')\n plt.loglog(f4,pow4/f4[0],label='Pitch Azimuthal')\n plt.loglog(f5,pow5/f5[0],label='Roll Axial')\n plt.loglog(f6,pow6/f6[0],label='Roll Azimuthal')\n plt.title('Residual Fringe Repeatability Impact')\n plt.xlabel('Frequency (1/mm)')\n plt.ylabel('Power ($\\mu$m$^2$ mm)')\n plt.grid()\n plt.legend(loc='lower left')\n\n return f1,pow1", "def make_egge(w,minZ,maxZ,ires=1,m=mz0):\n cmds = []\n # coefficients for the amplitudes\n cmds.append(\"A[1,0,1000000]\")\n cmds.append(\"B[1,0,1000000]\")\n cmds.append(\"C[10000.0,0,1000000]\")\n # amplitudes\n cmds.append('m[%s,%s,%s]'%(m,minZ,maxZ))\n cmds.append('g[8,0,100]')\n denom = '((x^2-m^2)^2+g^2*m^2)'\n cmds.append(\"expr::z_rbw('x^2/%s',x,m,g)\"%denom)\n cmds.append(\"expr::z_int('(x^2-m^2)/%s',x,m,g)\"%denom)\n cmds.append(\"expr::z_rad('1/(x^2+1)',x)\")\n # resolution model\n cmds += resolutions[ires]()\n [w.factory(cmd) for cmd in cmds]\n # sum-of-amplitudes pdf\n lshape = RooRealSumPdf('lshape','lshape',RooArgList(w.function('z_rad'),w.function('z_int'),w.function('z_rbw')),RooArgList(w.var('A'),w.var('B'),w.var('C')))\n getattr(w,'import')(lshape)\n # convolution\n pdf = w.pdf('lshape')\n if w.pdf('res'):\n w.var('x').setBins(10000,'cache')\n cmd = 'FCONV::sum(x,lshape,res)'\n w.factory(cmd)\n pdf = w.pdf('sum')\n return pdf, kFALSE", "def write_gdfs(self):\n for cat, gdf in self.inventory.gdfs.items():\n info = self.inventory.emission_infos[cat]\n for sub in self.inventory.substances:\n source_group = self.source_groups[(sub, cat)]\n if sub not in gdf.columns:\n continue\n\n mask_polygons = gdf.geom_type.isin([\"Polygon\", \"MultiPolygon\"])\n if any(mask_polygons):\n gdf_polygons = gdf.loc[mask_polygons]\n self._write_polygons(\n gdf_polygons.geometry, gdf_polygons[sub], info, source_group\n )\n\n mask_points = gdf.geom_type == \"Point\"\n if any(mask_points):\n gdf_points = gdf.loc[mask_points]\n self._add_points(\n gdf_points.geometry, gdf_points[sub], info, source_group\n )\n\n mask_lines = gdf.geom_type.isin([\"LineString\"])\n if any(mask_lines):\n gdf_lines = gdf.loc[mask_lines]\n self._write_lines(\n gdf_lines.geometry, gdf_lines[sub], info, source_group\n )\n\n mask_multilines = gdf.geom_type.isin([\"MultiLineString\"])\n if any(mask_multilines):\n gdf_multilines = gdf.loc[mask_multilines]\n # Split all the multilines into lines\n for shape, shape_emission in zip(\n gdf_multilines.geometry, gdf_multilines[sub]\n ):\n lenghts = np.array([line.length for line in shape.geoms])\n proprtions = lenghts / shape.length\n for line, prop in zip(shape.geoms, proprtions):\n self._write_line(\n line, shape_emission * prop, info, source_group\n )\n mask_missing = ~(\n mask_multilines | mask_lines | mask_points | mask_polygons\n )\n if any(mask_missing):\n raise NotImplementedError(\n f\"Shapes of type: '{gdf.loc[mask_missing].geom_type.unique()}'\"\n \" are not implemented.\"\n )\n\n # Write all the points as a singl batch\n pd.concat(self.points_dfs).to_csv(\n self.file_points, mode=\"a\", index=False,\n )", "def addGaussian(ax, ismulti):\n shape = (96, 288) #ax.shape[:2]\n intensity_noise = np.random.uniform(low=0, high=0.05)\n if ismulti:\n ax[:,:,0] = ax[:,:,0]*(1+ intensity_noise*np.random.normal(loc=0, scale=1, size=shape[0]*shape[1]).reshape(shape[0],shape[1]))\n else:\n ax[:,:,0] = ax[:,:,0] + intensity_noise*np.random.normal(loc=0, scale=1, size=shape[0]*shape[1]).reshape(shape[0],shape[1])\n return ax", "def dfluxes(wavelength, s, line1, line2, lowlow= 25, lowhigh=15, highlow=15, highhigh = 25, \n lmin=0, lmax=0, fmin=0, fmax=0,\n broad1=2.355, broad2=2.355, sus_line1=True, sus_line2=True,\n plot=True, verbose=True, plot_sus = False, fcal = True, \n fit_continuum = True, median_kernel=35, warnings = True ): # Broad is FWHM for Gaussian sigma= 1, \n # Setup wavelength limits\n if lmin == 0 :\n lmin = line1-65. # By default, +-65 A with respect to line\n if lmax == 0 :\n lmax = line2+65.\n \n # Extract subrange to fit\n w_spec = []\n f_spec = []\n w_spec.extend((wavelength[i]) for i in range(len(wavelength)) if (wavelength[i] > lmin and wavelength[i] < lmax) ) \n f_spec.extend((s[i]) for i in range(len(wavelength)) if (wavelength[i] > lmin and wavelength[i] < lmax) ) \n \n \n if np.nanmedian(f_spec) == np.nan: print(\" NO HAY DATOS.... todo son NANs!\")\n\n \n # Setup min and max flux values in subrange to fit\n if fmin == 0 :\n fmin = np.nanmin(f_spec) \n if fmax == 0 :\n fmax = np.nanmax(f_spec) \n \n\n # We have to find some \"guess numbers\" for the Gaussian\n # Now guess_centre is line\n guess_centre1 = line1\n guess_centre2 = line2 \n guess_centre = (guess_centre1+guess_centre2)/2. \n # Define continuum regions: [-lowlow, -lowhigh] and [highlow,highhigh] in Angstroms with respect to guess_centre\n \n\n w_cont=[]\n f_cont=[]\n w_cont.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre-lowlow and w_spec[i] < guess_centre-lowhigh) or (w_spec[i] > guess_centre+highlow and w_spec[i] < guess_centre+highhigh) ) \n f_cont.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre-lowlow and w_spec[i] < guess_centre-lowhigh) or (w_spec[i] > guess_centre+highlow and w_spec[i] < guess_centre+highhigh) ) \n\n if fit_continuum:\n # Linear Fit to continuum \n f_cont_filtered=sig.medfilt(f_cont,np.int(median_kernel))\n try: \n mm,bb = np.polyfit(w_cont, f_cont_filtered, 1)\n except Exception:\n bb = np.nanmedian(f_cont_filtered)\n mm = 0.\n if warnings: \n print(\" WARNING: Impossible to get the continuum!\")\n print(\" Scaling the continuum to the median value\") \n continuum = mm*np.array(w_spec)+bb \n c_cont = mm*np.array(w_cont)+bb \n\n else: \n # Median value in each continuum range # NEW 15 Sep 2019\n w_cont_low = []\n f_cont_low = []\n w_cont_low.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre-lowlow and w_spec[i] < guess_centre-lowhigh) ) \n f_cont_low.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre-lowlow and w_spec[i] < guess_centre-lowhigh) ) \n median_w_cont_low = np.nanmedian(w_cont_low)\n median_f_cont_low = np.nanmedian(f_cont_low)\n w_cont_high = []\n f_cont_high = []\n w_cont_high.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre+highlow and w_spec[i] < guess_centre+highhigh) ) \n f_cont_high.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre+highlow and w_spec[i] < guess_centre+highhigh) ) \n median_w_cont_high = np.nanmedian(w_cont_high)\n median_f_cont_high = np.nanmedian(f_cont_high) \n \n b = (median_f_cont_low-median_f_cont_high)/(median_w_cont_low-median_w_cont_high)\n a = median_f_cont_low- b * median_w_cont_low\n \n continuum = a + b*np.array(w_spec)\n c_cont = b*np.array(w_cont)+ a \n \n # rms continuum\n rms_cont = np.nansum([ np.abs(f_cont[i] - c_cont[i]) for i in range(len(w_cont)) ]) / len(c_cont)\n\n # Search for index here w_spec(index) closest to line\n min_w = np.abs(np.array(w_spec)-line1)\n mini = np.nanmin(min_w)\n guess_peak1 = f_spec[min_w.tolist().index(mini)] - continuum[min_w.tolist().index(mini)]\n min_w = np.abs(np.array(w_spec)-line2)\n mini = np.nanmin(min_w)\n guess_peak2 = f_spec[min_w.tolist().index(mini)] - continuum[min_w.tolist().index(mini)]\n\n # Search for beginning/end of emission line, choosing line +-10 \n # 28th Feb 2019: Check central value between low_limit and high_limit\n\n # LOW limit\n low_limit=0\n w_fit = []\n f_fit = []\n w_fit.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre1-15 and w_spec[i] < guess_centre1)) \n f_fit.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre1-15 and w_spec[i] < guess_centre1)) \n if fit_continuum: \n c_fit=mm*np.array(w_fit)+bb \n else: \n c_fit=b*np.array(w_fit)+a\n \n\n fs=[]\n ws=[]\n for ii in range(len(w_fit)-1,1,-1):\n if f_fit[ii]/c_fit[ii] < 1.05 and f_fit[ii-1]/c_fit[ii-1] < 1.05 and low_limit == 0: low_limit = w_fit[ii]\n# if f_fit[ii]/c_fit[ii] < 1.05 and low_limit == 0: low_limit = w_fit[ii]\n fs.append(f_fit[ii]/c_fit[ii])\n ws.append(w_fit[ii])\n if low_limit == 0: \n sorted_by_flux=np.argsort(fs)\n low_limit = ws[sorted_by_flux[0]]\n \n # HIGH LIMIT \n high_limit=0\n w_fit = []\n f_fit = []\n w_fit.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre2 and w_spec[i] < guess_centre2+15)) \n f_fit.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre2 and w_spec[i] < guess_centre2+15)) \n if fit_continuum: \n c_fit=mm*np.array(w_fit)+bb \n else: \n c_fit=b*np.array(w_fit)+a\n \n fs=[]\n ws=[]\n for ii in range(len(w_fit)-1):\n if f_fit[ii]/c_fit[ii] < 1.05 and f_fit[ii+1]/c_fit[ii+1] < 1.05 and high_limit == 0: high_limit = w_fit[ii]\n# if f_fit[ii]/c_fit[ii] < 1.05 and high_limit == 0: high_limit = w_fit[ii]\n fs.append(f_fit[ii]/c_fit[ii])\n ws.append(w_fit[ii])\n if high_limit == 0: \n sorted_by_flux=np.argsort(fs)\n high_limit = ws[sorted_by_flux[0]] \n \n # Fit a Gaussian to data - continuum \n p0 = [guess_centre1, guess_peak1, broad1/2.355, guess_centre2, guess_peak2, broad2/2.355] # broad is the Gaussian sigma, 1.0 for emission lines\n try:\n fit, pcov = curve_fit(dgauss, w_spec, f_spec-continuum, p0=p0, maxfev=10000) # If this fails, increase maxfev...\n fit_error = np.sqrt(np.diag(pcov))\n\n\n # New 28th Feb 2019: Check central value between low_limit and high_limit\n # Better: between guess_centre - broad, guess_centre + broad\n # If not, redo fit fixing central value to the peak (it does not work... just fix FWHM= (high_limit-low_limit)/2.5 )\n\n if verbose != False: print(\" ----------------------------------------------------------------------------------------\")\n if fit[0] < guess_centre1 - broad1 or fit[0] > guess_centre1 + broad1 or fit[3] < guess_centre2 - broad2 or fit[3] > guess_centre2 + broad2:\n if warnings: \n if fit[0] < guess_centre1 - broad1 or fit[0] > guess_centre1 + broad1: \n print(\" Fitted center wavelength\", fit[0],\"is NOT in the expected range [\",guess_centre1 - broad1,\",\",guess_centre1 + broad1,\"]\")\n else:\n print(\" Fitted center wavelength\", fit[0],\"is in the expected range [\",guess_centre1 - broad1,\",\",guess_centre1 + broad1,\"]\")\n if fit[3] < guess_centre2 - broad2 or fit[3] > guess_centre2 + broad2: \n print(\" Fitted center wavelength\", fit[3],\"is NOT in the expected range [\",guess_centre2 - broad2,\",\",guess_centre2 + broad2,\"]\")\n else:\n print(\" Fitted center wavelength\", fit[3],\"is in the expected range [\",guess_centre2 - broad2,\",\",guess_centre2 + broad2,\"]\")\n print(\" Fit failed!\")\n \n fit[0]=guess_centre1\n fit_error[0] = 0.000001\n fit[1]=guess_peak1\n fit_error[1] = 0.000001\n fit[2] = broad1/2.355\n fit_error[2] = 0.000001 \n fit[3]=guess_centre2\n fit_error[3] = 0.000001\n fit[4]=guess_peak2\n fit_error[4] = 0.000001\n fit[5] = broad2/2.355\n fit_error[5] = 0.000001\n else:\n if warnings: print(\" Fitted center wavelength\", fit[0],\"is in the expected range [\",guess_centre1 - broad1,\",\",guess_centre1 + broad1,\"]\")\n if warnings: print(\" Fitted center wavelength\", fit[3],\"is in the expected range [\",guess_centre2 - broad2,\",\",guess_centre2 + broad2,\"]\")\n \n\n if warnings: \n print(\" Fit parameters = \", fit[0], fit[1], fit[2]) \n print(\" \", fit[3], fit[4], fit[5])\n if fit[2] == broad1/2.355 and warnings == True : \n print(\" WARNING: Fit in\",fit[0],\"failed! Using given centre wavelengths (cw), peaks at (cv) & sigmas=broad/2.355 given.\") # CHECK THIS \n\n gaussian_fit = dgauss(w_spec, fit[0], fit[1], fit[2],fit[3], fit[4], fit[5])\n \n gaussian_1 = gauss(w_spec, fit[0], fit[1], fit[2])\n gaussian_2 = gauss(w_spec, fit[3], fit[4], fit[5])\n \n\n # Estimate rms of the Gaussian fit in range [low_limit, high_limit]\n residuals = f_spec-gaussian_fit-continuum\n rms_fit = np.nansum([ ((residuals[i]**2)/(len(residuals)-2))**0.5 for i in range(len(w_spec)) if (w_spec[i] >= low_limit and w_spec[i] <= high_limit) ]) \n \n # Fluxes, FWHM and Eq. Width calculations # CHECK THIS , not well done for dfluxes !!!\n \n gaussian_flux_1 = gauss_flux(fit[1],fit[2])\n gaussian_flux_2 = gauss_flux(fit[4],fit[5]) \n gaussian_flux = gaussian_flux_1+ gaussian_flux_2 \n if warnings: \n print(\" Gaussian flux = \", gaussian_flux_1, \" + \",gaussian_flux_2,\" = \",gaussian_flux)\n print(\" Gaussian ratio = \", gaussian_flux_1/gaussian_flux_2)\n \n error1 = np.abs(gauss_flux(fit[1]+fit_error[1],fit[2]) - gaussian_flux)\n error2 = np.abs(gauss_flux(fit[1],fit[2]+fit_error[2]) - gaussian_flux)\n gaussian_flux_error = 1 / ( 1/error1**2 + 1/error2**2 )**0.5\n \n fwhm=fit[2]*2.355\n fwhm_error = fit_error[2] *2.355\n fwhm_vel = fwhm / fit[0] * C \n fwhm_vel_error = fwhm_error / fit[0] * C \n \n gaussian_ew = gaussian_flux/np.nanmedian(f_cont)\n gaussian_ew_error = gaussian_ew * gaussian_flux_error/gaussian_flux \n \n # Integrated flux\n # IRAF: flux = sum ((I(i)-C(i)) * (w(i2) - w(i1)) / (i2 - i2) \n flux = np.nansum([ (f_spec[i]-continuum[i])*(w_spec[i+1]-w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] >= low_limit and w_spec[i] <= high_limit) ]) \n flux_error = rms_cont * (high_limit - low_limit)\n wave_resolution = (wavelength[-1]-wavelength[0])/len(wavelength)\n ew = wave_resolution * np.nansum ([ (1 - f_spec[i]/continuum[i]) for i in range(len(w_spec)) if (w_spec[i] >= low_limit and w_spec[i] <= high_limit) ]) \n ew_error = np.abs(ew*flux_error/flux) \n gauss_to_integrated = gaussian_flux/flux * 100.\n \n # Plotting \n if plot :\n plt.figure(figsize=(10, 4))\n #Plot input spectrum\n plt.plot(np.array(w_spec),np.array(f_spec), \"blue\", lw=2, alpha = 0.7)\n plt.minorticks_on() \n plt.xlabel(\"Wavelength [$\\mathrm{\\AA}$]\")\n if fcal:\n plt.ylabel(\"Flux [ erg cm$^{-2}$ s$^{-1}$ $\\mathrm{\\AA}^{-1}$ ]\")\n else:\n plt.ylabel(\"Flux [ counts ]\")\n plt.xlim((line1+line2)/2-40,(line1+line2)/2+40)\n plt.ylim(fmin,fmax)\n \n # Vertical line at guess_centre\n plt.axvline(x=guess_centre1, color='r', linestyle='-', alpha=0.5)\n plt.axvline(x=guess_centre2, color='r', linestyle='-', alpha=0.5)\n\n # Horizontal line at y = 0\n plt.axhline(y=0, color='k', linestyle=':', alpha=0.5) \n # Dashed green regions for continuum, defined by [lowlow, lowhigh] and [highlow,highhigh]\n plt.axvspan(guess_centre+highlow, guess_centre+highhigh, facecolor='g', alpha=0.15,zorder=3)\n plt.axvspan(guess_centre-lowlow, guess_centre-lowhigh, facecolor='g', alpha=0.15,zorder=3)\n # Plot linear fit for continuum\n plt.plot(w_spec, continuum,\"g--\")\n # Plot Gaussian fit \n plt.plot(w_spec, gaussian_fit+continuum, 'r-', alpha=0.8) \n # Vertical line at Gaussian center\n plt.axvline(x=fit[0], color='k', linestyle='-', alpha=0.5)\n plt.axvline(x=fit[3], color='k', linestyle='-', alpha=0.5)\n # Plot Gaussians + cont\n plt.plot(w_spec, gaussian_fit+continuum, 'r-', alpha=0.5, lw=3) \n plt.plot(w_spec, gaussian_1+continuum, color=\"navy\",linestyle='--', alpha=0.8)\n plt.plot(w_spec, gaussian_2+continuum, color=\"#1f77b4\",linestyle='--', alpha=0.8)\n plt.plot(w_spec, np.array(f_spec)-(gaussian_fit), 'orange', alpha=0.4, linewidth=5) \n\n # Vertical lines to emission line\n plt.axvline(x= low_limit, color='k', linestyle=':', alpha=0.5)\n plt.axvline(x= high_limit, color='k', linestyle=':', alpha=0.5) \n plt.title('Double Gaussian Fit') # Fit: x0=%.2f y0=%.2e sigma=%.2f flux=%.2e rms=%.3e' % (fit[0], fit[1], fit[2], gaussian_flux, rms_fit))\n plt.show()\n plt.close()\n \n # Plot residuals\n# plt.figure(figsize=(10, 1))\n# plt.xlabel(\"Wavelength [$\\mathrm{\\AA}$]\")\n# plt.ylabel(\"RMS\")\n# plt.xlim((line1+line2)/2-40,(line1+line2)/2+40)\n# plt.axvline(x=fit[0], color='k', linestyle='-', alpha=0.5)\n# plt.axvline(x=fit[3], color='k', linestyle='-', alpha=0.5)\n# plt.plot(w_spec, residuals, 'k')\n# plt.minorticks_on()\n# plt.show()\n# plt.close()\n\n \n # Printing results\n if verbose :\n #print \"\\n> WARNING !!! CAREFUL WITH THE VALUES PROVIDED BELOW, THIS TASK NEEDS TO BE UPDATED!\\n\"\n print(\"\\n> Gauss and continuum fitting + integrated flux calculations:\\n\")\n print(\" rms continuum = %.3e erg/cm/s/A \" % (rms_cont)) \n print(\" Gaussian Fit parameters: x0 = ( %.2f +- %.2f ) A \" % (fit[0], fit_error[0]))\n print(\" y0 = ( %.3f +- %.3f ) 1E-16 erg/cm2/s/A\" % (fit[1]/1E-16, fit_error[1]/1E-16 ))\n print(\" sigma = ( %.3f +- %.3f ) A\" % (fit[2], fit_error[2])) \n print(\" rms fit = %.3e erg/cm2/s/A\" % (rms_fit))\n print(\" Gaussian Flux = ( %.2f +- %.2f ) 1E-16 erg/s/cm2 (error = %.1f per cent)\" % (gaussian_flux/1E-16, gaussian_flux_error/1E-16, gaussian_flux_error/gaussian_flux*100))\n print(\" FWHM = ( %.3f +- %.3f ) A = ( %.1f +- %.1f ) km/s \" % (fwhm, fwhm_error, fwhm_vel, fwhm_vel_error))\n print(\" Eq. Width = ( %.1f +- %.1f ) A\" % (-gaussian_ew, gaussian_ew_error)) \n print(\"\\n Integrated flux = ( %.2f +- %.2f ) 1E-16 erg/s/cm2 (error = %.1f per cent) \" % ( flux/1E-16, flux_error/1E-16, flux_error/flux *100)) \n print(\" Eq. Width = ( %.1f +- %.1f ) A\" % (ew, ew_error))\n print(\" Gauss/Integrated = %.2f per cent \" % gauss_to_integrated)\n \n \n # New 22 Jan 2019: sustract Gaussian fit\n index=0\n s_s=np.zeros_like(s)\n sustract_this = np.zeros_like(gaussian_fit)\n if sus_line1:\n sustract_this = sustract_this + gaussian_1\n if sus_line2:\n sustract_this = sustract_this + gaussian_2 \n \n \n for wave in range(len(wavelength)):\n s_s[wave]=s[wave]\n if wavelength[wave] == w_spec[0] : \n s_s[wave] = f_spec[0]-sustract_this[0]\n index=1\n if wavelength[wave] > w_spec[0] and wavelength[wave] <= w_spec[-1]:\n s_s[wave] = f_spec[index]-sustract_this[index]\n index=index+1\n if plot_sus: \n plt.figure(figsize=(10, 4))\n plt.plot(wavelength,s, \"r\")\n plt.plot(wavelength,s_s, \"c\")\n plt.minorticks_on() \n plt.xlabel(\"Wavelength [$\\mathrm{\\AA}$]\")\n if fcal:\n plt.ylabel(\"Flux [ erg cm$^{-2}$ s$^{-1}$ $\\mathrm{\\AA}^{-1}$ ]\")\n else:\n plt.ylabel(\"Flux [ counts ]\")\n plt.xlim(lmin,lmax)\n plt.ylim(fmin,fmax)\n plt.show()\n plt.close()\n \n # This gaussian_flux in 3 is gaussian 1 + gaussian 2, given in 15, 16, respectively\n # 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16\n resultado = [rms_cont, fit[0], fit_error[0], gaussian_flux, gaussian_flux_error, fwhm, fwhm_error, flux, flux_error, ew, ew_error, s_s, fit[3], fit[4],fit[5], gaussian_flux_1, gaussian_flux_2 ]\n return resultado \n except Exception:\n if verbose: print(\" Double Gaussian fit failed!\")\n resultado = [0, line1, 0, 0, 0, 0, 0, 0, 0, 0, 0, s, 0, 0, 0, 0, 0 ] # line was identified at lambda=line but Gaussian fit failed\n\n # NOTA: PUEDE DEVOLVER EL FLUJO INTEGRADO AUNQUE FALLE EL AJUSTE GAUSSIANO...\n\n # Plotting \n if plot :\n plt.figure(figsize=(10, 4))\n plt.plot(np.array(w_spec),np.array(f_spec), \"b\", lw=3, alpha = 0.5)\n plt.minorticks_on() \n plt.xlabel(\"Wavelength [$\\mathrm{\\AA}$]\")\n if fcal:\n plt.ylabel(\"Flux [ erg cm$^{-2}$ s$^{-1}$ $\\mathrm{\\AA}^{-1}$ ]\")\n else:\n plt.ylabel(\"Flux [ counts ]\") \n plt.xlim(lmin,lmax)\n plt.ylim(fmin,fmax)\n \n # Vertical line at guess_centre\n plt.axvline(x=guess_centre, color='r', linestyle='-', alpha=0.5)\n # Horizontal line at y = 0\n plt.axhline(y=0, color='k', linestyle=':', alpha=0.5) \n # Dashed green regions for continuum, defined by [lowlow, lowhigh] and [highlow,highhigh]\n plt.axvspan(guess_centre+highlow, guess_centre+highhigh, facecolor='g', alpha=0.15,zorder=3)\n plt.axvspan(guess_centre-lowlow, guess_centre-lowhigh, facecolor='g', alpha=0.15,zorder=3)\n # Plot linear fit for continuum\n plt.plot(w_spec, continuum,\"g--\")\n # Plot Gaussian fit \n# plt.plot(w_spec, gaussian_fit+continuum, 'r-', alpha=0.8) \n # Vertical line at Gaussian center\n# plt.axvline(x=fit[0], color='k', linestyle='-', alpha=0.5)\n # Vertical lines to emission line\n plt.axvline(x= low_limit, color='k', linestyle=':', alpha=0.5)\n plt.axvline(x= high_limit, color='k', linestyle=':', alpha=0.5) \n # Plot residuals\n# plt.plot(w_spec, residuals, 'k')\n plt.title(\"No Gaussian fit obtained...\")\n plt.show()\n\n\n return resultado", "def line_sSFR(**kwargs):\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n GR = glo.global_results()\n \n L_line = getattr(GR,'L_'+p.line+'_sun')#[0:100]\n SFR = getattr(GR,'SFR')#[0:100]\n Zsfr = getattr(GR,'Zsfr')#[0:100]\n R_gas = getattr(GR,'R2_gas')#[0:100]\n M_H2 = getattr(GR,'M_H2_R2_gas')#[0:100]\n M_star = getattr(GR,'M_star')#[0:100]\n\n # Take only MS galaxies?\n if p.select == '_MS':\n indices = aux.select_salim18(GR.M_star,GR.SFR)\n L_line = L_line[indices]\n SFR = SFR[indices]\n Zsfr = Zsfr[indices]\n print('With MS selection criteria: only %i galaxies' % (len(L_line)))\n\n SFR = SFR[L_line > 0]\n Zsfr = Zsfr[L_line > 0]\n R_gas = R_gas[L_line > 0]\n M_H2 = M_H2[L_line > 0]\n M_star = M_star[L_line > 0]\n sSFR = SFR/M_star\n L_line = L_line[L_line > 0]\n\n print('%i data points ' % (len(L_line)))\n\n labs = {'_M10':'Mach=10 power-law',\\\n '_arepoPDF_dim':'AREPO parametric PDF with extinction',\\\n '_arepoPDF':'AREPO parametric PDF'}\n lab = labs[p.table_ext]\n\n if p.add:\n ax = p.ax\n else:\n fig,ax = plt.subplots(figsize=(8,6))\n\n if p.select == 'Sigma_M_H2':\n Sigma_M_H2 = M_H2/(np.pi*R_gas**2)\n m = ax.scatter(sSFR[np.argsort(Sigma_M_H2)],L_line[np.argsort(Sigma_M_H2)],marker='o',s=20,\\\n c=np.log10(Sigma_M_H2[np.argsort(Sigma_M_H2)]),vmin=3.5,label=lab,alpha=0.6,zorder=10)\n cbar = plt.colorbar(m,ax=ax)\n cbar.set_label(label=r'log $\\Sigma_{H2}$ [M$_{\\odot}$/kpc$^2$]',size=15)\n else:\n m = ax.scatter(sSFR,L_line,marker='o',s=20,\\\n c=Zsfr,label=lab,alpha=0.6,zorder=10)\n cbar = plt.colorbar(m,ax=ax)\n cbar.set_label(label=r'$\\langle Z\\rangle_{\\mathrm{SFR}}$ [Z$_{\\odot}$]',size=15)\n\n if p.add_obs:\n add_line_sSFR_obs(p.line,L_line,ax,select=p.select)\n\n ax.set_xscale('log')\n ax.set_yscale('log')\n ax.set_xlabel(getlabel('sSFR'))\n ax.set_ylabel(getlabel(p.line))\n handles,labels = ax.get_legend_handles_labels()\n handles = np.flip(handles)\n labels = np.flip(labels)\n # ax.legend(handles,labels,loc='upper left',fontsize=7)\n ax.legend(handles,labels,loc='lower right',fontsize=7,frameon=True,framealpha=0.5) \n print(np.min(sSFR),np.max(sSFR))\n if not p.xlim: p.xlim = 10.**np.array([-13,-7])\n if not p.ylim: \n p.ylim = [np.median(L_line)/1e6,np.median(L_line)*1e4]\n ax.set_xlim(p.xlim)\n ax.set_ylim(p.ylim)\n ax.grid(ls='--')\n\n if p.savefig & (not p.add):\n if not os.path.isdir(p.d_plot + 'luminosity/'): os.mkdir(p.d_plot + 'luminosity/') \n plt.savefig(p.d_plot + 'luminosity/%s_sSFR.png' % p.line, format='png', dpi=300)", "def preparehspiceidvg(wheretosimpath,templatepath,modelverilogpath,modelcardpath,vgs,vds,Lparam,NFINparam, DEVTYPEparam):\n #make an aux copy of hspice file to simulate\n shutil.copyfile(templatepath,wheretosimpath+'idvgaux.sp')\n #make an aux copy of modelcard file to simulate\n shutil.copyfile(modelcardpath,wheretosimpath+'modelcardaux.nmos')\n\n #update path of model and modelcard\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'pathmodelverilog', modelverilogpath)\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'pathmodelcard', '\\\"modelcardaux.nmos\\\"')\n\n #bias update\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vgsi', str(vgs[0]))\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vgsf', str(vgs[-1]))\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vgsdelta', str(vgs[1]-vgs[0]))\n\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vdsi', str(vds[0]))\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vdsf', str(vds[-1]))\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vdsdelta', str(vds[1]-vds[0]))\n\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'Lparam', Lparam)\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'NFINparam',NFINparam)\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'DEVTYPEparam',DEVTYPEparam)", "def timinggrid(self):\n\n gelem = Element(\"g\") # create a group\n for i in range(int(self.cycles)):\n\n lelem = Element(\"line\")\n lelem.attrib['x1'] = str(i*self.period + self.period/2.0 + self.xzero)\n lelem.attrib['y1'] = str(0);\n lelem.attrib['x2'] = str(i*self.period + self.period/2.0 + self.xzero)\n lelem.attrib['y2'] = str(self.signalcnt*(self.height + self.signalspacing) + self.signalspacing)\n lelem.attrib['stroke'] = \"grey\"\n lelem.attrib['stroke-width'] = \"0.5\"\n gelem.append(lelem)\n\n \n self.svgelem.append(gelem)\n self.svgelem.append(self.signalselem)", "def fit_gaussian(array):\n\n shape = array.shape\n xmean, ymean = numpy.array(shape) / 2.\n\n xx, yy = numpy.mgrid[:shape[0], :shape[1]]\n\n g_init = astropy.modeling.models.Gaussian2D(amplitude=1., x_mean=xmean, y_mean=ymean,\n x_stddev=1., y_stddev=1.)\n\n f2 = astropy.modeling.fitting.LevMarLSQFitter()\n\n gg = f2(g_init, xx, yy, array)\n\n return gg", "def gendata(params,xmin,xmax,npts=4000):\n F = lorentzian.ForwardFactory\n def gensample(F, xmin, xmax):\n from numpy import arange\n import random\n a = arange(xmin, xmax, (xmax-xmin)/200.)\n ymin = 0\n ymax = F(a).max()\n while 1:\n t1 = random.random() * (xmax-xmin) + xmin\n t2 = random.random() * (ymax-ymin) + ymin\n t3 = F(t1)\n if t2 < t3:\n return t1\n fwd = F(params)\n return array([gensample(fwd, xmin,xmax) for i in xrange(npts)])", "def synthetic_gen(self):\r\n logging.debug('generating synthetic map...')\r\n data = self.realData\r\n unit = Params.unitGrid\r\n x_min = np.floor(Params.LOW[0] / unit) * unit\r\n x_max = np.ceil(Params.HIGH[0] / unit) * unit\r\n y_min = np.floor(Params.LOW[1] / unit) * unit\r\n y_max = np.ceil(Params.HIGH[1] / unit) * unit\r\n\r\n x_CELL = int(np.rint((x_max - x_min) / unit))\r\n y_CELL = int(np.rint((y_max - y_min) / unit))\r\n\r\n self.root.n_box = np.array([[x_min, y_min], [x_max, y_max]])\r\n\r\n self.mapp = np.zeros((x_CELL, y_CELL)) - 1 # ## initialize every cell with -1\r\n for i in range(Params.NDATA): # ## populate the map\r\n point = data[:, i]\r\n cell_x = int(np.floor((point[0] - x_min) / unit))\r\n cell_y = int(np.floor((point[1] - y_min) / unit))\r\n if self.mapp[cell_x, cell_y] != -1:\r\n self.mapp[cell_x, cell_y] += 1\r\n else:\r\n self.mapp[cell_x, cell_y] = 1\r\n\r\n for i in range(x_CELL): # ## perturb the counts\r\n for j in range(y_CELL):\r\n if self.mapp[i, j] != -1:\r\n self.mapp[i, j] += np.rint(self.differ.getNoise(1, 0.5 * self.param.Eps))\r\n else:\r\n self.mapp[i, j] = np.rint(self.differ.getNoise(1, 0.5 * self.param.Eps))\r\n # if noisy count is negative, ignore the noise and generate no points\r\n if self.mapp[i, j] < 0:\r\n self.mapp[i, j] = 0" ]
[ "0.5699116", "0.5599356", "0.5230809", "0.5207666", "0.5170641", "0.5158478", "0.51580274", "0.5144128", "0.5076723", "0.5075624", "0.50331193", "0.50331193", "0.5024959", "0.49573076", "0.49408728", "0.49217612", "0.48975858", "0.48955354", "0.48943654", "0.48930743", "0.48799923", "0.48453504", "0.48302928", "0.48218024", "0.48129565", "0.4784888", "0.47834697", "0.4777946", "0.47671887", "0.4763021" ]
0.58730155
0
Combine SSP traces to have mass/luminosity weighted properties
def weighted_traces(parnames, trace, nssps): weights = np.array([trace["w_{}".format(i+1)].data for i in range( nssps)]) wtrace = [] for param in parnames: data = np.array([trace["{}_{}".format(param, i+1)].data for i in range(nssps)]) t = np.average(data, weights=weights, axis=0) wtrace.append(Table([t], names=["{}_weighted".format(param)])) return hstack(wtrace)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_combined_variation(nums, SSC, band, rms):\n\n def get_spectra(nums, SSC, band, rms):\n spectrum = spectra[str(SSC['no'])][band]\n frequency = spectrum['frequency'].to(u.GHz)\n intensity = spectrum['spectrum'].to(u.K)\n # shift spectrum to rest frequency\n velshift = SSC['velshift']\n frequency = [(-vsys-velshift).to(u.GHz, equivalencies=u.doppler_optical(f)).value for f in frequency]*u.GHz\n # remove NaNs\n frequency, intensity = crossmatch(frequency.to(u.GHz).value, intensity.to(u.K).value)\n # add noise\n intensities = []\n for num in nums:\n if not num==0:\n randstate = np.random.RandomState(num)\n noise = np.random.normal(loc=0., scale=rms.to(u.K).value, size=len(frequency))\n int_noise = intensity+noise\n intensities.append(int_noise)\n else:\n intensities.append(intensity)\n # get percentiles\n d16,dmed,d84 = np.percentile(np.array(intensities), (16,50,84), axis=0)\n return frequency,d16,dmed,d84\n\n def get_models(nums, SSC, band):\n with open(escape_fname(os.path.join(XCLASSdir,'SSC_'+str(SSC['no']),'model_spectrum','run_0','combined_model.spectrum.pickle')), 'rb') as f:\n m = pickle.load(f, encoding=\"latin1\")\n frequency = (m[:,0]*u.MHz).to(u.GHz).value\n\n models = []\n for num in nums:\n with open(escape_fname(os.path.join(XCLASSdir,'SSC_'+str(SSC['no']),'model_spectrum','run_'+str(num),'combined_model.spectrum.pickle')), 'rb') as f:\n m = pickle.load(f, encoding=\"latin1\")\n model = (m[:,1]*u.K).value\n models.append(model)\n m16,mmed,m84 = np.percentile(np.array(models), (16,50,84), axis=0)\n return frequency,m16,mmed,m84\n\n def set_up_figure(SSC, band):\n fig,ax = plt.subplots(nrows=1, ncols=1, squeeze=True, sharex='col', sharey='row', figsize=(10,8))\n ax.text(0.05, 0.9, 'SSC '+str(SSC['no'])+': '+band, color='k', transform=ax.transAxes, ha='left', va='top', weight='bold', fontsize=16, bbox=props)\n return fig,ax\n\n def plot_spectra(ax, frequency, d16, dmed, d84):\n ax.plot(frequency, dmed, lw=1, ls='-', color='k', zorder=3)\n ax.fill_between(frequency, d16, d84, color='k', alpha=0.5, zorder=2)\n\n def plot_fitted_spectra(ax, frequency, m16, mmed, m84):\n ax.plot(frequency, mmed, lw=1, ls='-', color='r', zorder=5)\n ax.fill_between(frequency, m16, m84, color='r', alpha=0.5, zorder=4)\n\n def get_detected_lines(band=None):\n # get detected species\n all_species = []\n for SSC in SSCs:\n for specie in detected_species[str(SSC['no'])]:\n if not specie in all_species:\n all_species.append(specie)\n # get all lines of the detected species\n all_lines = []\n for specie in all_species:\n slines = [l for l in lines if l['XCLASS']==specie]\n for sl in slines:\n all_lines.append(sl)\n # keep only lines of given band\n if not band==None:\n bandlines = []\n for line in all_lines:\n if band=='LSB':\n if line['restfreq']<350*u.GHz:\n bandlines.append(line)\n elif band=='USB':\n if line['restfreq']>350*u.GHz:\n bandlines.append(line)\n return sorted(bandlines, key=lambda k: k['restfreq'])\n else:\n return sorted(all_lines, key=lambda k: k['restfreq'])\n\n def label_lines(ax, spectrum, band):\n detected_lines = get_detected_lines(band=band)\n for idx,line in enumerate(detected_lines):\n restfreq = line['restfreq'].to(u.GHz).value\n if (restfreq>frequency[0] and restfreq<frequency[-1]):\n if band=='LSB':\n xlim = [342.4, 346.2]\n elif band=='USB':\n xlim = [354.3, 358.1]\n xloc = xlim[0] +((idx+0.5)/len(detected_lines))*(xlim[1]-xlim[0])\n ax.axvline(x=restfreq, ymin=0, ymax=1, color='dimgrey', ls='--', lw=0.5, zorder=1)\n ax.plot([restfreq,xloc], [1.05*np.nanmax(spectrum), 1.05*1.05*np.nanmax(spectrum)], color='dimgrey', ls='--', lw=0.5, zorder=1, clip_on=False)\n ax.text(xloc, 1.06*1.05*np.nanmax(spectrum), line_tex(line), color='dimgrey', fontsize=10, rotation=90, ha='center', va='bottom')\n\n def format_figure(ax, frequency, spectrum, band):\n if band=='LSB':\n ax.set_xlim([342.4, 346.2])\n elif band=='USB':\n ax.set_xlim([354.3, 358.1])\n ax.set_ylim(-0.05*np.nanmax(spectrum), 1.05*np.nanmax(spectrum))\n ax.xaxis.set_major_locator(MultipleLocator(0.5))\n ax.xaxis.set_minor_locator(MultipleLocator(0.1))\n ax.yaxis.set_major_locator(MultipleLocator(10))\n ax.yaxis.set_minor_locator(MultipleLocator(2))\n ax.tick_params(axis='both', which='major', labelsize=12)\n ax.set_axisbelow(True)\n ax.grid(axis='y', ls=':', c='grey')\n ax.set_xlabel(r'$\\nu_\\mathrm{rest}$ [GHz]', fontsize=12)\n ax.set_ylabel(r'T$_\\mathrm{b}$ [K]', fontsize=12)\n fig.set_tight_layout(True)\n\n def save_figure(fig, band):\n savepath = escape_fname(os.path.join(plotdir, '03.XCLASS_fit', 'combined_spectra', 'SSC_'+str(SSC['no'])+'.'+band+'.combined_spectra.pdf'))\n os.system('mkdir -p '+os.path.dirname(savepath))\n fig.savefig(savepath, dpi=300, bbox_inches='tight')\n\n\n frequency, d16,dmed,d84 = get_spectra(nums, SSC, band, rms)\n mfrequency, m16,mmed,m84 = get_models(nums, SSC, band)\n fig,ax = set_up_figure(SSC, band)\n plot_spectra(ax, frequency, d16,dmed,d84)\n plot_fitted_spectra(ax, mfrequency, m16,mmed,m84)\n label_lines(ax, dmed, band)\n format_figure(ax, frequency, dmed, band)\n save_figure(fig, band)", "def spectral_data(spectra):\n weights = np.concatenate([ s.ivar for s in spectra ])\n flux = np.concatenate([ s.flux for s in spectra ])\n wflux = weights * flux\n return (weights, flux, wflux)", "def spindle_attributes(self):\n try:\n self.channels\n except AttributeError:\n # create if doesn't exist\n self.channels = [x[0] for x in self.data.columns]\n\n dfs =['spfiltEEG', 'spRMS', 'spRMSmavg'] # for > speed, don't store spRMS as an attribute\n [setattr(self, df, pd.DataFrame(index=self.data.index)) for df in dfs]\n self.spThresholds = pd.DataFrame(index=['Mean RMS', 'Low Threshold', 'High Threshold'])\n self.spindle_events = {}\n self.spindle_rejects = {}", "def weight_mm(self,m1,m2):\n lw = 1.\n\n # particle id and isolation\n lw *= self._muIDISOWeight.value(m1.pt(),m1.eta(),'0')\n lw *= self._muIDISOWeight.value(m2.pt(),m2.eta(),'0')\n\n # Trigger\n hlt_sf_run2012_a = (self._muTRIGGERWeight_leg8_A.value(m1.pt(),m1.eta(),'0')*self._muTRIGGERWeight_leg17_A.value(m2.pt(),m2.eta(),'0') +\\\n self._muTRIGGERWeight_leg17_A.value(m1.pt(),m1.eta(),'0')*self._muTRIGGERWeight_leg8_A.value(m2.pt(),m2.eta(),'0') -\\\n self._muTRIGGERWeight_leg17_A.value(m1.pt(),m1.eta(),'0')*self._muTRIGGERWeight_leg17_A.value(m2.pt(),m2.eta(),'0'))\n\n hlt_sf_run2012_b = (self._muTRIGGERWeight_leg8_B.value(m1.pt(),m1.eta(),'0')*self._muTRIGGERWeight_leg17_B.value(m2.pt(),m2.eta(),'0') +\\\n self._muTRIGGERWeight_leg17_B.value(m1.pt(),m1.eta(),'0')*self._muTRIGGERWeight_leg8_B.value(m2.pt(),m2.eta(),'0') -\\\n self._muTRIGGERWeight_leg17_B.value(m1.pt(),m1.eta(),'0')*self._muTRIGGERWeight_leg17_B.value(m2.pt(),m2.eta(),'0'))\n \n lw *= (0.5*hlt_sf_run2012_a + 0.5*hlt_sf_run2012_b) ##percentage according to the lumi in which they were not prescaled (apparently same efficinecy for AB)\n #lw *= 0.966 ## temporary solution!\n\n if abs(configuration.LeptonTnPfactor)<0.01 :\n return lw\n else:\n return lw + configuration.LeptonTnPfactor*self.uncertainty_mm(m1,m2)", "def flatNoisePellicle():\n #Get data\n wdir = '/home/rallured/Dropbox/AXRO/Metrology/' \\\n 'NoiseStudy/SolarBwPellicle/'\n d1,dx1 = met.read4DFits(wdir+'161209_Avg8_Meas1.fits')\n d2,dx2 = met.read4DFits(wdir+'161209_Avg8_Meas2.fits')\n d3,dx3 = met.read4DFits(wdir+'161209_Avg8_Meas3.fits')\n d4,dx4 = met.read4DFits(wdir+'161209_Avg8_Meas4.fits')\n\n #Construct power spectra\n f12,pow12 = fourier.meanPSD((d1-d2)[:,100:-100],\\\n win=np.hanning,dx=dx1,irregular=True)\n f23,pow23 = fourier.meanPSD((d2-d3)[:,100:-100],\\\n win=np.hanning,dx=dx1,irregular=True)\n f34,pow34 = fourier.meanPSD((d3-d4)[:,100:-100],\\\n win=np.hanning,dx=dx1,irregular=True)\n f14,pow14 = fourier.meanPSD((d1-d4)[:,100:-100],\\\n win=np.hanning,dx=dx1,irregular=True)\n\n #Mid frequency\n midfreq = [1000*np.sqrt(np.sum(p[np.logical_and(f>.1,f<1.)])) \\\n for f,p in zip([f12,f23,f34,f14],[pow12,pow23,pow34,pow14])]\n\n #Plot\n plt.loglog(f12,pow12/f12[0],label='1-2: %.2f' % midfreq[0])\n plt.loglog(f23,pow23/f23[0],label='2-3: %.2f' % midfreq[1])\n plt.loglog(f34,pow34/f34[0],label='3-4: %.2f' % midfreq[2])\n plt.loglog(f14,pow14/f14[0],label='1-4: %.2f' % midfreq[3])\n plt.legend(loc='lower left')\n plt.grid()\n plt.title('4D Repeatability: SolarB Flat+Pellicle')\n plt.xlabel('Frequency (1/mm)')\n plt.ylabel('Power ($\\mu$m$^2$ mm)')\n\n print midfreq\n\n return f12,pow12", "def __init__(self, data1, data2, tail = 'two', significant_level=0.05):\r\n Critical_05 = pd.DataFrame({'2': [-1.0, -1.0, -1.0, -1.0, -1.0, -1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, 3.0, 3.0, 4.0, 4.0, 4.0, 4.0, 5.0, 5.0, 5.0, 5.0, 5.0, 6.0, 6.0, 6.0, 6.0, 7.0, 7.0] ,\r\n '3': [-1.0, -1.0, -1.0, 0.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0, 5.0, 5.0, 6.0, 6.0, 7.0, 7.0, 8.0, 8.0, 9.0, 9.0, 10.0, 10.0, 11.0, 11.0, 12.0, 13.0, 13.0, 14.0, 14.0, 15.0, 15.0, 16.0, 16.0, 17.0, 17.0, 18.0, 18.0] ,\r\n '4': [-1.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 11.0, 12.0, 13.0, 13.0, 15.0, 16.0, 17.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 31.0] ,\r\n '5': [-1.0, 0.0, 1.0, 2.0, 3.0, 5.0, 6.0, 7.0, 8.0, 9.0, 11.0, 12.0, 13.0, 14.0, 15.0, 17.0, 18.0, 19.0, 20.0, 22.0, 23.0, 24.0, 25.0, 27.0, 28.0, 29.0, 30.0, 32.0, 33.0, 34.0, 35.0, 37.0, 38.0, 39.0, 40.0, 41.0, 43.0, 44.0, 45.0] ,\r\n '6': [-1.0, 1.0, 2.0, 3.0, 5.0, 6.0, 8.0, 10.0, 11.0, 13.0, 14.0, 16.0, 17.0, 19.0, 21.0, 22.0, 24.0, 25.0, 27.0, 29.0, 30.0, 32.0, 33.0, 35.0, 37.0, 38.0, 40.0, 42.0, 43.0, 45.0, 46.0, 48.0, 50.0, 51.0, 53.0, 55.0, 56.0, 58.0, 59.0] ,\r\n '7': [-1.0, 1.0, 3.0, 5.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0, 18.0, 20.0, 22.0, 24.0, 26.0, 28.0, 30.0, 32.0, 34.0, 36.0, 38.0, 40.0, 42.0, 44.0, 46.0, 48.0, 50.0, 52.0, 54.0, 56.0, 58.0, 60.0, 62.0, 64.0, 66.0, 68.0, 70.0, 72.0, 74.0] ,\r\n '8': [0, 2, 4, 6, 7, 10, 13, 15, 17, 19, 22, 24, 26, 29, 31, 34, 36, 38, 41, 43, 45, 48, 50, 53, 55, 57, 60, 62, 65, 67, 69, 72, 74, 77, 79, 81, 84, 86, 89] ,\r\n '9': [0, 2, 4, 7, 10, 12, 15, 17, 20, 23, 26, 28, 31, 34, 37, 39, 42, 45, 48, 50, 53, 56, 59, 62, 64, 67, 70, 73, 76, 78, 81, 84, 87, 89, 92, 95, 98, 101, 103] ,\r\n '10': [0, 3, 5, 8, 11, 14, 17, 20, 23, 26, 29, 33, 36, 39, 42, 45, 48, 52, 55, 58, 61, 64, 67, 71, 74, 77, 80, 83, 87, 90, 93, 96, 99, 103, 106, 109, 112, 115, 119] ,\r\n '11': [0, 3, 6, 9, 13, 16, 19, 23, 26, 30, 33, 37, 40, 44, 47, 51, 55, 58, 62, 65, 69, 73, 76, 80, 83, 87, 90, 94, 98, 101, 105, 108, 112, 116, 119, 123, 127, 130, 134] ,\r\n '12': [1, 4, 7, 11, 14, 18, 22, 26, 29, 33, 37, 41, 45, 49, 53, 57, 61, 65, 69, 73, 77, 81, 85, 89, 93, 97, 101, 105, 109, 113, 117, 121, 125, 129, 133, 137, 141, 145, 149] ,\r\n '13': [1, 4, 8, 12, 16, 20, 24, 28, 33, 37, 41, 45, 50, 54, 59, 63, 67, 72, 76, 80, 85, 89, 94, 98, 102, 107, 111, 116, 120, 125, 129, 133, 138, 142, 147, 151, 156, 160, 165] ,\r\n '14': [1, 5, 9, 13, 17, 22, 26, 31, 36, 40, 45, 50, 55, 59, 64, 67, 74, 78, 83, 88, 93, 98, 102, 107, 112, 117, 122, 127, 131, 136, 141, 146, 151, 156, 161, 165, 170, 175, 180] ,\r\n '15': [1, 5, 10, 14, 19, 24, 29, 34, 39, 44, 49, 54, 59, 64, 70, 75, 80, 85, 90, 96, 101, 106, 111, 117, 122, 127, 132, 138, 143, 148, 153, 159, 164, 169, 174, 180, 185, 190, 196] ,\r\n '16': [1, 6, 11, 15, 21, 26, 31, 37, 42, 47, 53, 59, 64, 70, 75, 81, 86, 92, 98, 103, 109, 115, 120, 126, 132, 137, 143, 149, 154, 160, 166, 171, 177, 183, 188, 194, 200, 206, 211] ,\r\n '17': [2, 6, 11, 17, 22, 28, 34, 39, 45, 51, 57, 63, 67, 75, 81, 87, 93, 99, 105, 111, 117, 123, 129, 135, 141, 147, 154, 160, 166, 172, 178, 184, 190, 196, 202, 209, 215, 221, 227] ,\r\n '18': [2, 7, 12, 18, 24, 30, 36, 42, 48, 55, 61, 67, 74, 80, 86, 93, 99, 106, 112, 119, 125, 132, 138, 145, 151, 158, 164, 171, 177, 184, 190, 197, 203, 210, 216, 223, 230, 236, 243] ,\r\n '19': [2, 7, 13, 19, 25, 32, 38, 45, 52, 58, 65, 72, 78, 85, 92, 99, 106, 113, 119, 126, 133, 140, 147, 154, 161, 168, 175, 182, 189, 196, 203, 210, 217, 224, 231, 238, 245, 252, 258] ,\r\n '20': [2, 8, 14, 20, 27, 34, 41, 48, 55, 62, 69, 76, 83, 90, 98, 105, 112, 119, 127, 134, 141, 149, 156, 163, 171, 178, 186, 193, 200, 208, 215, 222, 230, 237, 245, 252, 259, 267, 274] \r\n })\r\n\r\n Critical_1 = pd.DataFrame({'2': [-1.0, -1.0, -1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, 4.0, 4.0, 4.0, 5.0, 5.0, 5.0, 6.0, 6.0, 6.0, 7.0, 7.0, 7.0, 7.0, 8.0, 8.0, 8.0, 9.0, 9.0, 9.0, 10.0, 10.0, 10.0, 11.0] ,\r\n '3': [-1.0, -1.0, 0.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 5.0, 5.0, 6.0, 7.0, 7.0, 8.0, 9.0, 9.0, 10.0, 11.0, 11.0, 12.0, 13.0, 13.0, 14.0, 15.0, 15.0, 16.0, 17.0, 17.0, 18.0, 19.0, 19.0, 20.0, 21.0, 21.0, 22.0, 23.0, 23.0, 24.0] ,\r\n '4': [-1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0, 38.0, 39.0] ,\r\n '5': [0, 1, 2, 4, 5, 6, 8, 9, 11, 12, 13, 15, 16, 18, 19, 20, 22, 23, 25, 26, 28, 29, 30, 32, 33, 35, 36, 38, 39, 40, 42, 43, 45, 46, 48, 49, 50, 52, 53] ,\r\n '6': [0, 2, 3, 5, 7, 8, 10, 12, 14, 16, 17, 19, 21, 23, 25, 26, 28, 30, 32, 34, 36, 37, 39, 41, 43, 45, 46, 48, 50, 52, 54, 56, 57, 59, 61, 63, 65, 67, 68] ,\r\n '7': [0, 2, 4, 6, 8, 11, 13, 15, 17, 19, 21, 24, 26, 28, 30, 33, 35, 37, 39, 41, 44, 46, 48, 50, 53, 55, 57, 59, 61, 64, 66, 68, 70, 73, 75, 77, 79, 82, 84] ,\r\n '8': [1, 3, 5, 8, 10, 13, 15, 18, 20, 23, 26, 28, 31, 33, 36, 39, 41, 44, 47, 49, 52, 54, 57, 60, 62, 65, 68, 70, 73, 76, 78, 81, 84, 86, 89, 91, 94, 97, 99] ,\r\n '9': [1, 3, 6, 9, 12, 15, 18, 21, 24, 27, 30, 33, 36, 39, 42, 45, 48, 51, 54, 57, 60, 63, 66, 69, 72, 75, 78, 82, 85, 88, 91, 94, 97, 100, 103, 106, 109, 112, 115] ,\r\n '10': [1, 4, 7, 11, 14, 17, 20, 24, 27, 31, 34, 37, 41, 44, 48, 51, 55, 58, 62, 65, 68, 72, 75, 79, 82, 86, 89, 93, 96, 100, 103, 107, 110, 114, 117, 121, 124, 128, 131] ,\r\n '11': [1, 5, 8, 12, 16, 19, 23, 27, 31, 34, 38, 42, 46, 50, 54, 57, 61, 65, 69, 73, 77, 81, 85, 89, 92, 96, 100, 104, 108, 112, 116, 120, 124, 128, 131, 135, 139, 143, 147] ,\r\n '12': [2, 5, 9, 13, 17, 21, 26, 30, 34, 38, 42, 47, 51, 55, 60, 64, 68, 72, 77, 81, 85, 90, 94, 98, 103, 107, 111, 116, 120, 124, 128, 133, 137, 141, 146, 150, 154, 159, 163] ,\r\n '13': [2, 6, 10, 15, 19, 24, 28, 33, 37, 42, 47, 51, 56, 61, 65, 70, 75, 80, 84, 89, 94, 98, 103, 108, 113, 117, 122, 127, 132, 136, 141, 146, 151, 156, 160, 165, 170, 175, 179] ,\r\n '14': [2, 7, 11, 16, 21, 26, 31, 36, 41, 46, 51, 56, 61, 66, 71, 77, 82, 87, 92, 97, 102, 107, 113, 118, 123, 128, 133, 138, 144, 149, 154, 159, 164, 170, 175, 180, 185, 190, 196] ,\r\n '15': [3, 7, 12, 18, 23, 28, 33, 39, 44, 50, 55, 61, 66, 72, 77, 83, 88, 94, 100, 105, 111, 116, 122, 128, 133, 139, 144, 150, 156, 161, 167, 172, 178, 184, 189, 195, 201, 206, 212] ,\r\n '16': [3, 8, 14, 19, 25, 30, 36, 42, 48, 54, 60, 65, 71, 77, 83, 89, 95, 101, 107, 113, 119, 125, 131, 137, 143, 149, 156, 162, 168, 174, 180, 186, 192, 198, 204, 210, 216, 222, 228] ,\r\n '17': [3, 9, 15, 20, 26, 33, 39, 45, 51, 57, 64, 70, 77, 83, 89, 96, 102, 109, 115, 121, 128, 134, 141, 147, 154, 160, 167, 173, 180, 186, 193, 199, 206, 212, 219, 225, 232, 238, 245] ,\r\n '18': [4, 9, 16, 22, 28, 35, 41, 48, 55, 61, 68, 75, 82, 88, 95, 102, 109, 116, 123, 130, 136, 143, 150, 157, 164, 171, 178, 185, 192, 199, 206, 212, 219, 226, 233, 240, 247, 254, 261] ,\r\n '19': [4, 10, 17, 23, 30, 37, 44, 51, 58, 65, 72, 80, 87, 94, 101, 109, 116, 123, 130, 138, 145, 152, 160, 167, 174, 182, 189, 196, 204, 211, 218, 226, 233, 241, 248, 255, 263, 270, 278] ,\r\n '20': [4, 11, 18, 25, 32, 39, 47, 54, 62, 69, 77, 84, 92, 100, 107, 115, 123, 130, 138, 146, 154, 161, 169, 177, 185, 192, 200, 208, 216, 224, 231, 239, 247, 255, 263, 271, 278, 286, 294] })\r\n \r\n self.critical05 = Critical_05\r\n self.critical1 = Critical_1\r\n\r\n # Mann Whitney Test \r\n x = np.asarray(data1)\r\n y = np.asarray(data2)\r\n n1 = len(x)\r\n n2 = len(y)\r\n ranked = rankdata(np.concatenate((x, y)))\r\n rankx = ranked[0:n1] # get the x-ranks\r\n u1 = n1*n2 + (n1*(n1+1))/2.0 - np.sum(rankx, axis=0) # calc U for x\r\n u2 = n1*n2 - u1 # remainder is U for y\r\n\r\n # use the min(u1, u2) as u-stat\r\n if u1 <= u2:\r\n stat_a, larger = u1, 1\r\n else:\r\n stat_a, larger = u2, 2\r\n\r\n # compute the effect size \r\n effect = 1 - (2*stat_a)/(n1*n2) \r\n\r\n # Mann-Whitney test \r\n if min(n1, n2) < 2: # sample size too small - cannot do test\r\n return 'Sorry, sample size is too small to test significance. Please collect more data...'\r\n\r\n # Do test for small sample size \r\n elif 2<=min(n1, n2) <= 20 and 2 <= max(n1, n2) <= 40:\r\n if tail != 'two': # only have data for two tail testing\r\n return 'Sorry, sample size too small, only two-tailed test available...'\r\n\r\n u_05 = Critical_05[str(min(n1, n2))][max(n1, n2)-2] # u=critical at signif level .05\r\n u_1 = Critical_1[str(min(n1, n2))][max(n1, n2)-2] # u=critical at signif level .1\r\n\r\n if significant_level == 0.05 and stat_a <= u_05:\r\n self.significance = True\r\n self.sample_size = 'Small'\r\n self.n1 = n1\r\n self.n2 = n2\r\n self.criticalu = u_05\r\n self.u = stat_a\r\n self.effectsize = effect\r\n self.largergroup = larger\r\n elif significant_level == 0.1 and stat_a <= u_1:\r\n self.significance = True\r\n self.sample_size = 'Small'\r\n self.n1 = n1\r\n self.n2 = n2\r\n self.criticalu = u_1\r\n self.u = stat_a\r\n self.effectsize = effect\r\n self.largergroup = larger\r\n elif significant_level == 0.05:\r\n self.significance = False\r\n self.sample_size = 'Small'\r\n self.n1 = n1\r\n self.n2 = n2\r\n self.criticalu = u_05\r\n self.u = stat_a\r\n self.effectsize = effect\r\n self.largergroup = larger\r\n else:\r\n self.significance = False\r\n self.sample_size = 'Small'\r\n self.n1 = n1\r\n self.n2 = n2\r\n self.criticalu = u_1\r\n self.u = stat_a\r\n self.effectsize = effect\r\n self.largergroup = larger\r\n\r\n else:\r\n T = tiecorrect(ranked)\r\n sd = np.sqrt(T * n1 * n2 * (n1+n2+1) / 12.0)\r\n \r\n if T == 0:\r\n raise ValueError('All numbers are identical in mannwhitneyu')\r\n meanrank = n1*n2/2.0 + 0.5 \r\n\r\n if tail == 'two':\r\n bigu = max(u1, u2)\r\n elif tail == 'less':\r\n bigu = u1\r\n elif tail == 'more':\r\n bigu = u2\r\n z = (bigu - meanrank) / sd\r\n \r\n if tail == 'two':\r\n p = 2 * norm.sf(abs(z))\r\n else:\r\n p = norm.sf(z)\r\n if p <= significant_level:\r\n self.significance = True\r\n else:\r\n self.significance = False\r\n \r\n self.sample_size = 'Large'\r\n self.n1 = n1\r\n self.n2 = n2\r\n self.p = p\r\n self.u = stat_a\r\n self.effectsize = effect\r\n self.largergroup = larger", "def calc_spindle_means(self):\n\n print('Aligning spindles...')\n # align spindles accoridng to timedelta & combine into single dataframe\n spindle_aggregates = {}\n datatypes = ['Raw', 'spfilt']\n for chan in self.spindles.keys():\n # only use channels that have spindles\n if self.spindles[chan]:\n spindle_aggregates[chan] = {}\n for datatype in datatypes:\n # set the base df\n agg_df = pd.DataFrame(self.spindles[chan][0][datatype])\n agg_df = agg_df.rename(columns={datatype:'spin_0'})\n rsuffix = list(range(1, len(self.spindles[chan])))\n # join on the index for each spindle\n agg_df = agg_df.join([self.spindles[chan][x][datatype].rename('spin_'+str(x)) for x in rsuffix], how='outer')\n spindle_aggregates[chan][datatype] = agg_df\n \n print('Calculating spindle statistics...')\n # create a new multiindex dataframe for calculations\n spindle_means = {}\n calcs = ['count', 'mean', 'std' ,'sem']\n tuples = [(chan, calc) for chan in spindle_aggregates.keys() for calc in calcs]\n columns = pd.MultiIndex.from_tuples(tuples, names=['channel', 'calc'])\n for datatype in datatypes:\n spindle_means[datatype] = pd.DataFrame(columns=columns)\n # fill the dataframe\n for chan in spindle_aggregates.keys():\n spindle_means[datatype][(chan, 'count')] = spindle_aggregates[chan][datatype].notna().sum(axis=1)\n spindle_means[datatype][(chan, 'mean')] = spindle_aggregates[chan][datatype].mean(axis=1)\n spindle_means[datatype][(chan, 'std')] = spindle_aggregates[chan][datatype].std(axis=1)\n spindle_means[datatype][(chan, 'sem')] = spindle_aggregates[chan][datatype].sem(axis=1)\n \n self.spindle_aggregates = spindle_aggregates\n self.spindle_means = spindle_means\n print('Done. Spindles aggregated by channel in obj.spindle_aggregates dict. Spindle statisics stored in obj.spindle_means dataframe.\\n')", "def flatNoiseCGH():\n #Get data\n wdir = '/home/rallured/Dropbox/AXRO/Metrology/NoiseStudy/FlatMeasurements/'\n d1,dx1 = met.read4DFits(wdir+'161205_RefFlat_Avg8_Meas1.fits')\n d2,dx2 = met.read4DFits(wdir+'161205_RefFlat_Avg8_Meas2.fits')\n p1,px1 = met.read4DFits(wdir+'161205_RefFlat_ParrotingTestPitch_Meas1.fits')\n p2,px2 = met.read4DFits(wdir+'161205_RefFlat_ParrotingTestPitch_Meas2.fits')\n p3,px3 = met.read4DFits(wdir+'161205_RefFlat_ParrotingTestRoll_Meas1.fits')\n p4,px4 = met.read4DFits(wdir+'161205_RefFlat_ParrotingTestRoll_Meas2.fits')\n\n #Construct baseline power spectra\n f1,pow1 = fourier.meanPSD(d1-d2,win=np.hanning,dx=dx1)\n f2,pow2 = fourier.meanPSD(d1-d2,win=np.hanning,dx=dx1,axis=1)\n \n #Construct parroted power spectra\n f3,pow3 = fourier.meanPSD(p1-p2,win=np.hanning,dx=dx1)\n f4,pow4 = fourier.meanPSD(p1-p2,win=np.hanning,dx=dx2,axis=1)\n f5,pow5 = fourier.meanPSD(p3-p4,win=np.hanning,dx=dx1)\n f6,pow6 = fourier.meanPSD(p3-p4,win=np.hanning,dx=dx2,axis=1)\n\n #Plot\n plt.loglog(f1,pow1/f1[0],label='Axial Baseline')\n plt.loglog(f2,pow2/f2[0],label='Azimuthal Baseline')\n plt.loglog(f3,pow3/f3[0],label='Pitch Axial')\n plt.loglog(f4,pow4/f4[0],label='Pitch Azimuthal')\n plt.loglog(f5,pow5/f5[0],label='Roll Axial')\n plt.loglog(f6,pow6/f6[0],label='Roll Azimuthal')\n plt.title('Residual Fringe Repeatability Impact')\n plt.xlabel('Frequency (1/mm)')\n plt.ylabel('Power ($\\mu$m$^2$ mm)')\n plt.grid()\n plt.legend(loc='lower left')\n\n return f1,pow1", "def coadd(self, sp, method='pixel'):\n\t\tif method == 'pixel':\n\t\t\tw1 = 1/self.oriNoise**2\n\t\t\tw2 = 1/sp.oriNoise**2\n\t\t\tself.oriFlux = (self.oriFlux*w1 + sp.oriFlux*w2)/(w1 + w2)\n\t\t\tself.oriNoise = np.sqrt(1/(w1 + w2))\n\t\t\t## set up masking criteria\n\t\t\tself.avgFlux = np.mean(self.oriFlux)\n\t\t\tself.stdFlux = np.std(self.oriFlux)\n\t\t\tself.smoothFlux = self.oriFlux\n\t\t\t## set the outliers as the flux below \n\t\t\tif self.apply_sigma_mask:\n\t\t\t\tself.smoothFlux[self.smoothFlux <= self.avgFlux-2*self.stdFlux] = 0\n\t\t\t\tself.mask = np.where(self.smoothFlux <= 0)\n\t\t\telse:\n\t\t\t\tself.mask = []\n\t\t\tself.wave = np.delete(self.oriWave, list(self.mask))\n\t\t\tself.flux = np.delete(self.oriFlux, list(self.mask))\n\t\t\tself.noise = np.delete(self.oriNoise, list(self.mask))\n\n\t\telif method == 'wavelength':\n\t\t\tself_supers = copy.deepcopy(self)\n\t\t\tg = interpolate.interp1d(self.wave, self.flux)\n\t\t\tsp_supers = copy.deepcopy(sp)\n\t\t\tf = interpolate.interp1d(sp.wave, sp.flux)\n\t\t\t## 10x supersample the average difference of \n\t\t\t## the wavelength\n\t\t\t#step0 = np.mean(np.diff(self.wave))/10\n\t\t\t#self_supers.wave = np.arange(self.wave[0],\n\t\t\t#\tself.wave[-1],step0)\n\t\t\tself_supers.flux = g(self_supers.wave)\n\t\t\tself_supers.oriWave = np.arange(self.oriWave[0],\n\t\t\t\tself.oriWave[-1],(self.oriWave[-1]-self.oriWave[0])/10240)\n\t\t\tg1 = interpolate.interp1d(self.oriWave, self.oriFlux)\n\t\t\tself_supers.oriFlux = g1(self_supers.oriWave)\n\n\t\t\t#step = np.mean(np.diff(sp.wave))/10\n\t\t\t#sp_supers.wave = np.arange(sp.wave[0],sp.wave[-1],step)\n\t\t\t#sp_supers.flux = f(sp_supers.wave)\n\t\t\tsp_supers.oriWave = np.arange(sp.oriWave[0],\n\t\t\t\tsp.oriWave[-1],(sp.oriWave[-1]-sp.oriWave[0])/10240)\n\t\t\tf1 = interpolate.interp1d(sp.oriWave, sp.oriFlux)\n\t\t\tsp_supers.oriFlux = f1(sp_supers.oriWave)\n\n\t\t\t## calculate the max cross correlation value\n\t\t\tdef xcorr(a0,b0,shift):\n\t\t\t\t\"\"\"\n\t\t\t\tShift is the index number after supersampling \n\t\t\t\tboth of the spectra.\n\t\t\t\t\"\"\"\n\t\t\t\ta = copy.deepcopy(a0)\n\t\t\t\tb = copy.deepcopy(b0)\n\n\t\t\t\t## shift the wavelength of b\n\t\t\t\tlength = b.oriFlux.shape[0]\n\t\t\t\tif shift >= 0:\n\t\t\t\t\tmask_a = np.arange(0,shift,1)\n\t\t\t\t\ta.oriFlux = np.delete(a.oriFlux,mask_a)\n\t\t\t\t\tmask_b = np.arange(length-1,length-shift-1,-1)\n\t\t\t\t\tb.oriFlux = np.delete(b.oriFlux,mask_b)\n\n\t\t\t\telif shift < 0:\n\t\t\t\t\tmask_a = np.arange(length-1,length+shift-1,-1)\n\t\t\t\t\ta.oriFlux = np.delete(a.oriFlux,mask_a)\n\t\t\t\t\tmask_b = np.arange(0,-shift,1)\n\t\t\t\t\tb.oriFlux = np.delete(b.oriFlux,mask_b)\n\n\t\t\t\t## shift the wavelength of b\n\t\t\t\t#b.wave += shift * step\n\t\t\t\t## discard the points where the wavelength values\n\t\t\t\t## are larger\n\t\t\t\t#condition = (a.wave > b.wave[0]) & (a.wave < b.wave[-1])\n\t\t\t\t\n\t\t\t\t#a.flux = a.flux[np.where(condition)]\n\t\t\t\t#a.wave = a.wave[np.where(condition)]\n\t\t\t\t## resampling the telluric model\n\t\t\t\t#b.flux = np.array(smart.integralResample(xh=b.wave, \n\t\t\t\t#\tyh=b.flux, xl=a.wave))\n\t\t\t\t\n\t\t\t\treturn np.inner(a.oriFlux, b.oriFlux)/\\\n\t\t\t\t(np.average(a.oriFlux)*np.average(b.oriFlux))/a.oriFlux.shape[0]\n\n\t\t\txcorr_list = []\n\t\t\t## mask the ending pixels\n\t\t\tself_supers2 = copy.deepcopy(self_supers)\n\t\t\tsp_supers2 = copy.deepcopy(sp_supers)\n\t\t\tself_supers2.wave = self_supers2.wave[1000:-1000]\n\t\t\tself_supers2.flux = self_supers2.flux[1000:-1000]\n\t\t\tsp_supers2.wave = sp_supers2.wave[1000:-1000]\n\t\t\tsp_supers2.flux = sp_supers2.flux[1000:-1000]\n\t\t\tfor shift in np.arange(-10,10,1):\n\t\t\t\txcorr_list.append(xcorr(self_supers2,sp_supers2,shift))\n\n\t\t\t## dignostic plot for cc result\n\t\t\tfig, ax = plt.subplots()\n\t\t\tax.plot(np.arange(-10,10,1),np.array(xcorr_list),'k-')\n\t\t\tplt.show()\n\t\t\tplt.close()\n\n\t\t\tstep = np.absolute(np.mean(np.diff(sp_supers.wave)))\n\t\t\tbestshift = np.arange(-10*step,10*step,step)[np.argmax(xcorr_list)]\n\t\t\tsp_supers.oriWave += bestshift\n\t\t\t## discard the points where the wavelength values\n\t\t\t## are larger\n\t\t\tcondition = (self.oriWave > sp_supers.oriWave[0])\\\n\t\t\t& (self.oriWave < sp_supers.oriWave[-1])\n\n\t\t\tself.oriFlux = self.oriFlux[np.where(condition)]\n\t\t\tself.oriWave = self.oriWave[np.where(condition)]\n\t\t\tself.oriNoise = self.oriNoise[np.where(condition)]\n\t\t\tsp_supers.oriNoise = sp_supers.oriNoise[np.where(condition)]\n\t\t\tsp_supers.oriFlux = np.array(smart.integralResample(xh=sp_supers.oriWave, \n\t\t\t\tyh=sp_supers.oriFlux, xl=self.oriWave))\n\n\t\t\tw1 = 1/self.oriNoise**2\n\t\t\tw2 = 1/sp_supers.oriNoise**2\n\t\t\tself.oriFlux = (self.oriFlux*w1 + sp_supers.oriFlux*w2)/(w1 + w2)\n\t\t\tself.oriNoise = np.sqrt(1/(w1 + w2))\n\t\t\t## set up masking criteria\n\t\t\tself.avgFlux = np.mean(self.oriFlux)\n\t\t\tself.stdFlux = np.std(self.oriFlux)\n\t\t\tself.smoothFlux = self.oriFlux\n\t\t\t## set the outliers as the flux below \n\t\t\tself.smoothFlux[self.smoothFlux <= self.avgFlux-2*self.stdFlux] = 0\n\t\t\tself.mask = np.where(self.smoothFlux <= 0)\n\t\t\tself.wave = np.delete(self.oriWave, list(self.mask))\n\t\t\tself.flux = np.delete(self.oriFlux, list(self.mask))\n\t\t\tself.noise = np.delete(self.oriNoise, list(self.mask))\n\n\t\treturn self", "def main():\n\n\n ## Groups showing similar noise profile\n #grp1 = [ 1, 4, 5, 8, 9 ]\n #grp2 = [ 18, 19, 22, 23, 30, 31 ]\n grp1 = [ 0, 1, 6, 7, 4, 5 ]\n grp2 = [ 12, 13, 16, 17, 18, 19 ]\n #grp3 = [ 18, 19, 22, 23, 26, 27 ]\n with tb.open_file(sys.argv[1], 'r') as dataF:\n\n npm = len(dataF.root.Sensors.DataPMT)#len(dataF.root.RD.pmtrwf[0])\n nevt = len(dataF.root.RD.pmtrwf)\n\n ## Filter definition\n fSample = 40E6\n freqLPF = 100E3\n freqLPFd = 2*freqLPF / fSample\n b, a = signal.butter(1, freqLPFd, 'low', analog=False)\n ##\n fig, axes = plt.subplots(nrows=3, ncols=2, figsize=(20,6))\n #fig.tight_layout()\n fig.show()\n wf_len = len(dataF.root.RD.pmtrwf[0][0])\n if len(sys.argv) > 3:\n wf_len = wf_len/2+1 \n elif len(sys.argv) == 3:\n g1_first = np.zeros(wf_len, np.float64)\n g2_first = np.zeros(wf_len, np.float64)\n g3_first = np.zeros(wf_len, np.float64)\n mean_first = np.zeros(wf_len, np.float64)\n ##\n for ievt in range(nevt):\n ## clear the axies\n for ax in axes.flatten():\n ax.cla()\n plt_frq = np.zeros(wf_len, np.float64)\n fwf_mean = np.zeros(wf_len, np.float64)\n wf_mean = np.zeros(wf_len, np.float64) # No filter\n g1_mean = np.zeros(wf_len, np.float64)\n g2_mean = np.zeros(wf_len, np.float64)\n g3_mean = np.zeros(wf_len, np.float64)\n for ipm in range(npm):\n\n sg = getWF(dataF, ipm, ievt)\n sg = sg - np.mean(sg)\n\n sgf = signal.lfilter(b, a, sg)\n ## remove mean again just in case\n sgf = sgf - np.mean(sgf)\n #sgf = sg\n\n pmID = getPMid(dataF, ipm)\n\n if len(sys.argv) == 3:\n axes[0][0].plot(sgf, label='pmt '+str(pmID))\n fwf_mean += sgf/npm\n wf_mean += sg/npm\n if pmID in grp1:\n g1_mean += sgf/len(grp1)\n elif pmID in grp2:\n g2_mean += sgf/len(grp2)\n elif pmID in grp3:\n g3_mean += sgf/len(grp3)\n else:\n ft = np.fft.rfft(sgf)\n freq = np.fft.rfftfreq(len(sgf), d=25E-9)\n if ipm == 0:\n plt_frq = freq\n if sys.argv[2] == 'mag':\n ft_mag = np.absolute(ft)\n axes[0][0].plot(freq, ft_mag, label='pmt '+str(pmID))\n fwf_mean += ft_mag/npm\n if pmID in grp1:\n g1_mean += ft_mag/len(grp1)\n elif pmID in grp2:\n g2_mean += ft_mag/len(grp2)\n elif pmID in grp3:\n g3_mean += ft_mag/len(grp3)\n elif sys.argv[2] == 'phase':\n ft_pha = np.angle(ft)\n axes[0][0].plot(freq, ft_pha, label='pmt '+str(pmID))\n fwf_mean += ft_pha/npm\n if pmID in grp1:\n g1_mean += ft_pha/len(grp1)\n elif pmID in grp2:\n g2_mean += ft_pha/len(grp2)\n elif pmID in grp3:\n g3_mean += ft_pha/len(grp3)\n \n \n ## The axes not set\n if len(sys.argv) == 3:\n axes[0][1].plot(g1_mean)\n axes[0][1].set_title('Group 1 mean waveform')\n axes[1][0].plot(g2_mean)\n axes[1][0].set_title('Group 2 mean waveform')\n axes[1][1].plot(g3_mean)\n axes[1][1].set_title('Group 3 mean waveform')\n axes[2][0].plot(fwf_mean)\n axes[2][0].set_title('Mean waveform')\n if ievt == 0:\n g1_first = g1_mean\n g2_first = g2_mean\n g3_first = g3_mean\n mean_first = fwf_mean\n else:\n axes[0][1].plot(g1_first)\n axes[1][0].plot(g2_first)\n axes[1][1].plot(g3_first)\n axes[2][0].plot(mean_first)\n axes[2][1].plot(wf_mean)\n axes[2][1].set_title('Mean waveform and corrected')\n axes[2][1].plot(wf_mean-fwf_mean)\n axes[2][1].set_xlim(0, 1000)\n else:\n axes[0][0].set_xlim(0,50000)\n axes[0][1].plot(plt_frq, g1_mean)\n axes[0][1].set_title('Group 1 mean '+sys.argv[2])\n axes[0][1].set_xlim(0,50000)\n axes[1][0].plot(plt_frq, g2_mean)\n axes[1][0].set_title('Group 2 mean '+sys.argv[2])\n axes[1][0].set_xlim(0,50000)\n axes[1][1].plot(plt_frq, g3_mean)\n axes[1][1].set_title('Group 3 mean '+sys.argv[2])\n axes[1][1].set_xlim(0,50000)\n axes[2][0].plot(plt_frq, fwf_mean)\n axes[2][0].set_title('Mean '+sys.argv[2])\n axes[2][0].set_xlim(0,50000)\n plt.draw()\n #fig.legend(loc=0)\n catcher = input(\"next plot?\")\n if catcher == 'q':\n exit()\n plt.cla()", "def process_trace(n_tr, tr, sta, orig_time, cmps, cfg):\n cmp = tr.stats.channel[2:3]\n sta[cmp] = {}\n sta[cmp][\"times\"] = tr.times(reftime=orig_time)\n\n sta[cmp][\"tr_results\"] = np.zeros(\n (len(cfg.picking.FILT_WINS[\"P\"]), sta[\"lenD\"])\n )\n sta[cmp][\"f1_results\"] = np.zeros(\n (len(cfg.picking.FILT_WINS[\"P\"]), len(cfg.picking.KURT_WINS),\n sta[\"lenD\"])\n )\n sta[cmp][\"f1_mean\"] = np.zeros(sta[\"lenD\"])\n sta[cmp][\"f3_results\"] = np.zeros(\n (len(cfg.picking.FILT_WINS[\"P\"]),\n len(cfg.picking.KURT_WINS), sta[\"lenD\"])\n )\n sta[cmp][\"f3_mean_smooth\"] = np.zeros(\n (len(cfg.picking.CF_MEAN_SMOOTH_WIND), sta[\"lenD\"])\n )\n sta[cmp][\"f4_all\"] = np.zeros((len(cfg.picking.CF_MEAN_SMOOTH_WIND),\n sta[\"lenD\"]))\n sta[cmp][\"f1_mean_smooth\"] = np.zeros(sta[\"lenD\"])\n # Get suitable filters (exclude those fully outside Nyquist freq.)\n for phase in [\"P\", \"S\"]:\n if cmp in cmps[phase]:\n sta[\"picks\"][\"poss_obs\"][phase][cmp] = {}\n sta[cmp][\"filtwins_check\"] = [\n filt_win for filt_win in cfg.picking.FILT_WINS[phase]\n if filt_win[0] < sta[\"samplerate\"] / 2\n ]\n if cfg.picking.INTEGRATE_S is True:\n tr.integrate()\n\n for n_filt, filt in enumerate(sta[cmp][\"filtwins_check\"]):\n # Ensure that filter covers sample rate / 2\n if (tr.stats.sampling_rate / 2) <= filt[0]:\n print(\"Skipping this Kurtosis run due to sample rate/2<f\")\n continue\n tr.filter(\"bandpass\", freqmin=filt[0], freqmax=filt[1])\n try:\n sta[cmp][\"tr_results\"][n_filt] = tr.data\n except ValueError: # If input array length is inconsistent\n continue\n # Loop over kurtosis windows\n for n_kurt, kurt_win_s in enumerate(cfg.picking.KURT_WINS):\n f1 = CF_kurtosis(kurt_win_s, tr)\n sta[cmp][\"f1_results\"][n_filt, n_kurt] = f1 # Needed for weights\n f2 = kurt_transform_f2(f1, kurt_win_s, tr)\n f3 = kurt_transform_f3(f2, kurt_win_s, tr)\n\n sta[cmp][\"f3_results\"][n_filt, n_kurt] = f3\n sta[cmp][\"f1_mean\"] = np.nanmean(sta[cmp][\"f1_results\"], axis=0)[0]\n sta[cmp][\"f1_mean_smooth\"] = do_smooth(\n sta[cmp][\"f1_mean\"], cfg.picking.CF_MEAN_SMOOTH_WIND[0],\n tr.stats.sampling_rate\n )\n # ^ Throws up a warning first time due to NaN slices\n # Compute mean CF and final kurtosis transform\n f3_mean = np.nanmean(sta[cmp][\"f3_results\"], axis=0)[0]\n\n for nsm, smooth_wind in enumerate(cfg.picking.CF_MEAN_SMOOTH_WIND):\n sta[cmp][\"f3_mean_smooth\"][nsm] = do_smooth(\n f3_mean, smooth_wind, tr.stats.sampling_rate\n )\n f4 = kurt_transform_f4(sta[cmp][\"f3_mean_smooth\"][nsm],\n np.max(cfg.picking.KURT_WINS), tr)\n sta[cmp][\"f4_all\"][nsm] = f4\n\n # Now pick (avoiding end and beginning of signal)\n # Pick the P-waves\n if cmp in cmps[\"P\"]:\n sta[\"picks\"][\"poss_obs\"][\"P\"][cmp][nsm] = []\n # Find points where Kurt<0 & doesn't look like S-wave\n p_cands = np.argwhere((f4 < 0.0))\n for idx in p_cands.tolist():\n kurt_wgt = np.min(np.where(np.array(\n cfg.picking.KURT2WGHT[\"P\"]\n <= sta[cmp][\"f1_mean_smooth\"][idx])))\n sta[\"picks\"][\"poss_obs\"][\"P\"][cmp][nsm].append([\n orig_time+sta[cmp][\"times\"][idx][0], f4[idx][0],\n tr.stats.channel, kurt_wgt, idx,\n sta[cmp][\"times\"][idx][0]\n ])\n # Pick the S-waves\n if cmp in cmps[\"S\"]:\n sta[\"picks\"][\"poss_obs\"][\"S\"][cmp][nsm] = []\n\n # Find points where Kurt<0 & doesn't look like S-wave\n s_cands = np.argwhere((f4 < 0.0))\n for idx in s_cands.tolist():\n kurt_wgt = np.min(np.where(np.array(cfg.picking.KURT2WGHT[\"S\"]\n <= sta[cmp][\"f1_mean_smooth\"][idx]))\n )\n sta[\"picks\"][\"poss_obs\"][\"S\"][cmp][nsm].append([\n orig_time+sta[cmp][\"times\"][idx][0], f4[idx][0],\n tr.stats.channel, kurt_wgt, idx,\n sta[cmp][\"times\"][idx][0]\n ])\n return(sta)", "def process_traces(self, s, h):\n # filter data\n if PAR.FREQLO and PAR.FREQHI:\n s = sbandpass(s, h, PAR.FREQLO, PAR.FREQHI)\n\n return s", "def _summarize_trace(power_trace, ci_alpha=None):\n power_trace = np.atleast_2d(power_trace)\n pwr_mean = power_trace.mean(0)\n\n if power_trace.shape[0] == 1:\n pwr_err = np.zeros(power_trace.shape) * np.nan\n elif ci_alpha is None:\n pwr_err = power_trace.std(0)\n else:\n pwr_err = confidence_bound(power_trace, alpha=ci_alpha, axis=0)\n\n pwr_lo = pwr_mean - pwr_err\n pwr_hi = pwr_mean + pwr_err\n\n return pwr_mean, pwr_lo, pwr_hi", "def one_transition_spectrum_gauss(self,tr):\n \n \n fa = tr[\"fa\"] # Frequency axis\n HWHH = tr[\"HWHH\"] # Half width at the half hight (maximum)\n dd = tr[\"dd\"] # transition dipole strength\n rr = tr[\"rr\"] # transition dipole strength\n ld = tr[\"ld\"] # linear dichroism strength\n om = tr[\"om\"]+self.rwa # frequency\n \n # LineShape = lambda p, x: (x/(p[1]*np.sqrt(2*m.pi))*np.exp(-0.5*((x-p[0])/p[1])**2))\n # broad = broad/np.sqrt(2*np.log(2))\n sigma = HWHH/numpy.sqrt(2*numpy.log(2))\n \n # x = ta.data\n \n data = (fa.data/(sigma*numpy.sqrt(2*numpy.pi))*numpy.exp(-0.5*((fa.data-om)/sigma)**2))\n data_abs = dd*data\n data_CD = rr*data\n data_LD = ld*data\n \n return data_abs,data_CD, data_LD", "def create_python_data(self) -> dict:\r\n s = self.scale\r\n minimum, maximum = self.get_min_max()\r\n diff = maximum - minimum\r\n\r\n output = {}\r\n\r\n # Create the data for the scatters\r\n for name, data in self.scatters_data.items():\r\n mapping = self.scatters[name][\"mapping\"]\r\n colormaps = self.scatters[name][\"colormap\"]\r\n cmaps = [None] * len(colormaps)\r\n\r\n for i, colormap in enumerate(colormaps):\r\n if isinstance(colormap, str):\r\n cmaps[i] = plt.cm.get_cmap(colormap)\r\n else:\r\n cmaps[i] = colormap\r\n\r\n output[name] = {}\r\n output[name][\"meta\"] = self.scatters[name]\r\n output[name][\"type\"] = \"scatter\"\r\n\r\n output[name][\"x\"] = np.array(\r\n [s * (x - minimum) / diff for x in data[mapping[\"x\"]]], dtype=np.float32\r\n )\r\n output[name][\"y\"] = np.array(\r\n [s * (y - minimum) / diff for y in data[mapping[\"y\"]]], dtype=np.float32\r\n )\r\n output[name][\"z\"] = np.array(\r\n [s * (z - minimum) / diff for z in data[mapping[\"z\"]]], dtype=np.float32\r\n )\r\n\r\n if mapping[\"labels\"] in data:\r\n # Make sure that the labels are always strings\r\n output[name][\"labels\"] = list(map(str, data[mapping[\"labels\"]]))\r\n\r\n if mapping[\"s\"] in data:\r\n output[name][\"s\"] = np.array(data[mapping[\"s\"]], dtype=np.float32)\r\n\r\n output[name][\"colors\"] = [{}] * len(data[mapping[\"c\"]])\r\n for s in range(len(data[mapping[\"c\"]])):\r\n if mapping[\"cs\"] in data:\r\n colors = np.array([cmaps[s](x) for x in data[mapping[\"c\"]][s]])\r\n\r\n for i, c in enumerate(colors):\r\n hsl = np.array(colour.rgb2hsl(c[:3]))\r\n hsl[1] = hsl[1] - hsl[1] * data[mapping[\"cs\"]][s][i]\r\n colors[i] = np.append(np.array(colour.hsl2rgb(hsl)), 1.0)\r\n\r\n colors = np.round(colors * 255.0)\r\n\r\n output[name][\"colors\"][s][\"r\"] = np.array(\r\n colors[:, 0], dtype=np.float32\r\n )\r\n output[name][\"colors\"][s][\"g\"] = np.array(\r\n colors[:, 1], dtype=np.float32\r\n )\r\n output[name][\"colors\"][s][\"b\"] = np.array(\r\n colors[:, 2], dtype=np.float32\r\n )\r\n else:\r\n colors = np.array([cmaps[s](x) for x in data[mapping[\"c\"]][s]])\r\n colors = np.round(colors * 255.0)\r\n output[name][\"colors\"][s][\"r\"] = np.array(\r\n colors[:, 0], dtype=np.float32\r\n )\r\n output[name][\"colors\"][s][\"g\"] = np.array(\r\n colors[:, 1], dtype=np.float32\r\n )\r\n output[name][\"colors\"][s][\"b\"] = np.array(\r\n colors[:, 2], dtype=np.float32\r\n )\r\n\r\n for name, data in self.trees_data.items():\r\n mapping = self.trees[name][\"mapping\"]\r\n point_helper = self.trees[name][\"point_helper\"]\r\n\r\n output[name] = {}\r\n output[name][\"meta\"] = self.trees[name]\r\n output[name][\"type\"] = \"tree\"\r\n\r\n if point_helper is not None and point_helper in self.scatters_data:\r\n scatter = self.scatters_data[point_helper]\r\n scatter_mapping = self.scatters[point_helper][\"mapping\"]\r\n\r\n x_t = []\r\n y_t = []\r\n z_t = []\r\n\r\n for i in range(len(data[mapping[\"from\"]])):\r\n x_t.append(scatter[scatter_mapping[\"x\"]][data[mapping[\"from\"]][i]])\r\n x_t.append(scatter[scatter_mapping[\"x\"]][data[mapping[\"to\"]][i]])\r\n y_t.append(scatter[scatter_mapping[\"y\"]][data[mapping[\"from\"]][i]])\r\n y_t.append(scatter[scatter_mapping[\"y\"]][data[mapping[\"to\"]][i]])\r\n z_t.append(scatter[scatter_mapping[\"z\"]][data[mapping[\"from\"]][i]])\r\n z_t.append(scatter[scatter_mapping[\"z\"]][data[mapping[\"to\"]][i]])\r\n\r\n output[name][\"x\"] = np.array(\r\n [s * (x - minimum) / diff for x in x_t], dtype=np.float32\r\n )\r\n output[name][\"y\"] = np.array(\r\n [s * (y - minimum) / diff for y in y_t], dtype=np.float32\r\n )\r\n output[name][\"z\"] = np.array(\r\n [s * (z - minimum) / diff for z in z_t], dtype=np.float32\r\n )\r\n else:\r\n output[name][\"x\"] = np.array(\r\n [s * (x - minimum) / diff for x in data[mapping[\"x\"]]],\r\n dtype=np.float32,\r\n )\r\n output[name][\"y\"] = np.array(\r\n [s * (y - minimum) / diff for y in data[mapping[\"y\"]]],\r\n dtype=np.float32,\r\n )\r\n output[name][\"z\"] = np.array(\r\n [s * (z - minimum) / diff for z in data[mapping[\"z\"]]],\r\n dtype=np.float32,\r\n )\r\n\r\n if mapping[\"c\"] in data:\r\n colormap = self.trees[name][\"colormap\"]\r\n cmap = None\r\n if isinstance(colormap, str):\r\n cmap = plt.cm.get_cmap(colormap)\r\n else:\r\n cmap = colormap\r\n\r\n colors = np.array([cmap(x) for x in data[mapping[\"c\"]]])\r\n colors = np.round(colors * 255.0)\r\n output[name][\"r\"] = np.array(colors[:, 0], dtype=np.float32)\r\n output[name][\"g\"] = np.array(colors[:, 1], dtype=np.float32)\r\n output[name][\"b\"] = np.array(colors[:, 2], dtype=np.float32)\r\n\r\n return output", "def GetUnivariateSmry(ds,quantileCuts=[0.05 , 0.1, 0.2, 0.25,0.3, 0.4, 0.5, 0.6, 0.7, 0.75,0.8, 0.9, 0.95,0.98,0.99]):\n# Quantile distn:\n d1 = ds.quantile(quantileCuts).T\n d1.reset_index(inplace=True)\n qNames = [f'Q{int(x* 100)}' for x in quantileCuts]\n newNames = ['index']\n newNames.extend(qNames)\n d1.columns = newNames \n \n# Other Basic metrics\n d2 = pd.DataFrame(ds.isna().sum(),columns = ['NullCount'])\n d2['DataType'] = d2.index.map(ds.dtypes)\n d2['BlankCount'] = d2.index.map((ds=='').sum())\n d2['NonNullCount'] = d2.index.map(ds.notna().sum())\n d2['FillPerc']= round(d2['NonNullCount']/ds.shape[0],2)\n d2['UniqueCount'] = d2.index.map(ds.nunique())\n d2['Min'] = ds.min(numeric_only=True)\n d2['Mean'] = ds.mean()\n d2['NonZeroMean'] = ds.replace(0, np.nan).mean()\n d2['Max'] = ds.max(numeric_only=True)\n d2['Total']= ds.sum(numeric_only=True)\n d2['std'] = ds.std()\n d2['skewness'] = ds.skew()\n d2['kurtosis'] = ds.kurtosis()\n d2.reset_index(inplace=True)\n \n# creating master summary\n d = d2.merge(d1, on='index', how='left')\n d.rename(columns={\"index\":\"ParameterName\"},inplace=True)\n \n# re-arranging columns\n first_cols = ['ParameterName','DataType']\n last_cols = [col for col in d.columns if col not in first_cols]\n d = d[first_cols+last_cols]\n \n return d", "def side_traces(x,im):\n s0 = x['side-traces'][0]\n s1 = x['side-traces'][1]\n t1 = Scatter(y=s0)\n t2 = Scatter(y=s1)\n\n #put_thing(im,x['abs-line'],(255,0,0),(0,0),3)\n\n groups = []\n diff_traces = []\n markers = []\n y3 = []\n TriangleHumps.get_dimensions(x,debug_groups=groups,debug_diffs=diff_traces,debug_markers = markers, im = im,y3=y3)\n mode = stats.mode(y3)[0][0]\n trigger = mode*2+1\n t3 = Scatter(y=y3)\n\n annotations = []\n diff_traces = [Scatter(y=v) for v in diff_traces]\n t4 = Scatter(x=markers,y=[10]*len(markers),mode = 'markers+text')\n for gru in groups:\n for hump in gru:\n annotations.append({\n 'x':hump['range'][0],\n 'y':trigger,\n 'text':'%d,%d'%(hump['area'],hump['length']),\n })\n\n name = 'mode=%d,trigger=%d,groups=%d' % (mode,trigger,len(groups))\n \n #return (t1,t2,t3,)\n #print('markers %d:' % x['id'],markers,[trigger]*len(markers))\n return [t3,t4,] + diff_traces,annotations, name", "def skystats(stamp):\n\t\n\tif isinstance(stamp, galsim.Image):\n\t\ta = stamp.array\n\t\t# Normally there should be a .transpose() here, to get the orientation right.\n\t\t# But in the present case it doesn't change anything, and we can skip it.\n\telse:\n\t\ta = stamp # Then we assume that it's simply a numpy array.\n\t\n\tedgepixels = np.concatenate([\n\t\t\ta[0,1:], # left\n\t\t\ta[-1,1:], # right\n\t\t\ta[:,0], # bottom\n\t\t\ta[1:-1,-1] # top\n\t\t\t])\n\tassert len(edgepixels) == 2*(a.shape[0]-1) + 2*(a.shape[0]-1)\n\n\t# And we convert the mad into an estimate of the Gaussian std:\n\treturn {\n\t\t\"std\":np.std(edgepixels), \"mad\": 1.4826 * mad(edgepixels),\n\t\t\"mean\":np.mean(edgepixels), \"med\":np.median(edgepixels),\n\t\t\"stampsum\":np.sum(a)\n\t\t}", "def raw_processing(self):\n well_dilution_code = {'e': 5, 'f': 6, 'g': 7, 'h': 8}\n\n for well in self.data_labels:\n x = 10 ** well_dilution_code[well[-1]]\n y = self.film_count[self.data_labels.index(well)] * 5 * x * (20 / self.plated_volume)\n z = self.plank_count[self.data_labels.index(well)] * 5 * x * (20 / self.plated_volume)\n\n self.film_conc.append(y)\n self.plank_conc.append(z)", "def features_combine():\n\n\n\t# PROCESSING AUDIO", "def consolidate_unitary_EPSP_traces(source_dict):\n trace_len = int((context.ISI['units'] + context.trace_baseline) / context.dt)\n target_dict = {}\n\n for syn_group in source_dict:\n if syn_group not in target_dict:\n target_dict[syn_group] = {}\n num_syn_ids = len(context.syn_id_dict[syn_group])\n for syn_condition in source_dict[syn_group]:\n if syn_condition not in target_dict[syn_group]:\n target_dict[syn_group][syn_condition] = {}\n for rec_name in context.synaptic_integration_rec_names:\n target_array = np.empty((num_syn_ids, trace_len))\n for i, syn_id in enumerate(context.syn_id_dict[syn_group]):\n target_array[i,:] = source_dict[syn_group][syn_condition][syn_id][rec_name]\n target_dict[syn_group][syn_condition][rec_name] = target_array\n\n return target_dict", "def emission_vs_depth(filename,**kwargs):\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n if p.add:\n ax = plt.gca(); c = 'r'\n if not p.add:\n fig,ax = plt.subplots(figsize=(8,6)); c = 'b'\n st_cols = ['depth','[CII]158','[OI]63','CO(1-0)','CO(2-1)','CO(3-2)']\n st = pd.read_csv(p.d_cloudy.replace('ext/','') + 'NH/' + filename + '.str',sep='\\t',skiprows=1,names=st_cols)\n dx = np.append(0,np.diff(st.depth))\n pc2cm = u.parsec.to(u.cm)\n # Derive mass-luminosity ratio\n import astropy.constants as c\n M = 1e3 * c.m_p.value * st.depth.values.max() / u.M_sun.to(u.kg) \n cloudy_lin_header = ['#lineslist','C 1 609.590m','C 1 370.269m','C 2 157.636m','O 1 63.1679m','O 1 145.495m','O 3 88.3323m','N 2 205.244m','N 2 121.767m','CO 2600.05m','CO 1300.05m','CO 866.727m','CO 650.074m','CO 325.137m','H2 17.0300m','H2 12.2752m','H2 9.66228m','H2 8.02362m','H2 6.90725m','H2 6.10718m','H2 5.50996m','O 4 25.8832m','NE 2 12.8101m','NE 3 15.5509m','S 3 18.7078m','FE 2 25.9811m']\n cloudy_lin = pd.read_csv(p.d_cloudy.replace('ext/','') + 'NH/' + filename + '.lin',\\\n sep='\\t',names=cloudy_lin_header,comment='#').reset_index(drop=True)\n Cloudy_lines_dict = aux.get_Cloudy_lines_dict()\n cloudy_lin = cloudy_lin.rename(columns=Cloudy_lines_dict)\n L = cloudy_lin['CO(1-0)'][0] * u.erg.to('J') / c.L_sun.value\n print(L,M)\n ax.plot(st.depth/pc2cm,dx*st['CO(1-0)'],'-',color='m',label='CO(1-0): %.2e Lsun/Msun' % (L/M))\n L = cloudy_lin['[OI]63'][0] * u.erg.to('J') / c.L_sun.value\n print(L,M)\n ax.plot(st.depth/pc2cm,dx*st['[OI]63'],'g--',label='[OI]63: %.2e Lsun/Msun' % (L/M))\n ax.set_yscale(\"log\")\n ax.set_xscale(\"log\")\n ax.set_xlabel('Depth [pc]')\n ax.set_ylabel('Intensity [ergs/s/cm^2]')\n ax.legend()\n if p.savefig:\n if not os.path.isdir(p.d_plot + 'look-up/'): os.mkdir(p.d_plot + 'look-up/') \n plt.savefig('plots/look-up/emission_%s' % filename,dpi=200)", "def weighted_ps(self, mfactor=1.1):\n self.weightedpower=[]\n #ksum=np.sum(self.psdata[self.klist)\n Nk=int(len(self.klist)/mfactor)\n for i in range(self.Nsubs):\n nsum=np.sum(self.psdata[i][1][0:Nk])\n total=np.sum(np.array([self.psdata[i][1][j]*self.powerspectra[i][j] for j in range(Nk)]))\n self.weightedpower.append(total/nsum)\n\n # also find correlation\n self.corr=[]\n for i in range(self.Nsubs):\n self.corr.append(self.ds[i]*self.weightedpower[i])\n\n self.corr_mean=np.mean(self.corr)\n self.corr_sigma=np.sqrt(np.var(self.corr))", "def get_traces(self, conc_traces, spectra):\n # linear fit of the fitted_concs to the spectra CANNOT fit intercept here!\n #self.regressor.fit(conc_traces,spectral_trace)\n #fitted_spectral_traces = self.regressor.predict(conc_traces)\n fitted_spectral_traces = spectra.dot(conc_traces.T) \n return fitted_spectral_traces.T", "def log_weights_statistics(self):\n for weight_name, weight_parameter in self._weights.items():\n for statistic_function in self._statistics_functions:\n self._weights_statistics[statistic_function.__name__][\n weight_name\n ].append(float(statistic_function(weight_parameter)))", "def create_data(self) -> str:\r\n s = self.scale\r\n mini, maxi = self.get_min_max()\r\n diff = maxi - mini\r\n\r\n output = \"const data = {\\n\"\r\n\r\n # Create the data for the scatters\r\n # TODO: If it's not interactive, labels shouldn't be exported.\r\n for name, data in self.scatters_data.items():\r\n mapping = self.scatters[name][\"mapping\"]\r\n colormaps = self.scatters[name][\"colormap\"]\r\n cmaps = [None] * len(colormaps)\r\n\r\n for i, colormap in enumerate(colormaps):\r\n if isinstance(colormap, str):\r\n cmaps[i] = plt.cm.get_cmap(colormap)\r\n else:\r\n cmaps[i] = colormap\r\n\r\n output += name + \": {\\n\"\r\n x_norm = [round(s * (x - mini) / diff, 3) for x in data[mapping[\"x\"]]]\r\n output += \"x: [\" + \",\".join(map(str, x_norm)) + \"],\\n\"\r\n\r\n y_norm = [round(s * (y - mini) / diff, 3) for y in data[mapping[\"y\"]]]\r\n output += \"y: [\" + \",\".join(map(str, y_norm)) + \"],\\n\"\r\n\r\n z_norm = [round(s * (z - mini) / diff, 3) for z in data[mapping[\"z\"]]]\r\n output += \"z: [\" + \",\".join(map(str, z_norm)) + \"],\\n\"\r\n\r\n if mapping[\"labels\"] in data:\r\n fmt_labels = [\"'{0}'\".format(s) for s in data[mapping[\"labels\"]]]\r\n output += \"labels: [\" + \",\".join(fmt_labels) + \"],\\n\"\r\n\r\n if mapping[\"s\"] in data:\r\n output += \"s: [\"\r\n\r\n for series in range(len(data[mapping[\"s\"]])):\r\n output += (\r\n \"[\"\r\n + \",\".join(map(str, np.round(data[mapping[\"s\"]][series], 3)))\r\n + \"],\\n\"\r\n )\r\n\r\n output += \"],\\n\"\r\n\r\n output += \"colors: [\\n\"\r\n for series in range(len(data[mapping[\"c\"]])):\r\n output += \"{\\n\"\r\n if mapping[\"cs\"] in data:\r\n colors = np.array(\r\n [cmaps[series](x) for x in data[mapping[\"c\"]][series]]\r\n )\r\n\r\n for i, c in enumerate(colors):\r\n hsl = np.array(colour.rgb2hsl(c[:3]))\r\n hsl[1] = hsl[1] - hsl[1] * data[mapping[\"cs\"]][series][i]\r\n colors[i] = np.append(np.array(colour.hsl2rgb(hsl)), 1.0)\r\n\r\n colors = np.round(colors * 255.0)\r\n\r\n output += (\r\n \"r: [\" + \",\".join(map(str, map(int, colors[:, 0]))) + \"],\\n\"\r\n )\r\n output += (\r\n \"g: [\" + \",\".join(map(str, map(int, colors[:, 1]))) + \"],\\n\"\r\n )\r\n output += (\r\n \"b: [\" + \",\".join(map(str, map(int, colors[:, 2]))) + \"],\\n\"\r\n )\r\n elif mapping[\"c\"] in data:\r\n colors = np.array(\r\n [cmaps[series](x) for x in data[mapping[\"c\"]][series]]\r\n )\r\n colors = np.round(colors * 255.0)\r\n output += (\r\n \"r: [\" + \",\".join(map(str, map(int, colors[:, 0]))) + \"],\\n\"\r\n )\r\n output += (\r\n \"g: [\" + \",\".join(map(str, map(int, colors[:, 1]))) + \"],\\n\"\r\n )\r\n output += (\r\n \"b: [\" + \",\".join(map(str, map(int, colors[:, 2]))) + \"],\\n\"\r\n )\r\n output += \"},\\n\"\r\n\r\n output += \"]\"\r\n output += \"},\\n\"\r\n\r\n for name, data in self.trees_data.items():\r\n mapping = self.trees[name][\"mapping\"]\r\n point_helper = self.trees[name][\"point_helper\"]\r\n\r\n output += name + \": {\\n\"\r\n\r\n if point_helper is not None and point_helper in self.scatters_data:\r\n scatter = self.scatters_data[point_helper]\r\n scatter_mapping = self.scatters[point_helper][\"mapping\"]\r\n\r\n x_t = []\r\n y_t = []\r\n z_t = []\r\n\r\n for i in range(len(data[mapping[\"from\"]])):\r\n x_t.append(scatter[scatter_mapping[\"x\"]][data[mapping[\"from\"]][i]])\r\n x_t.append(scatter[scatter_mapping[\"x\"]][data[mapping[\"to\"]][i]])\r\n y_t.append(scatter[scatter_mapping[\"y\"]][data[mapping[\"from\"]][i]])\r\n y_t.append(scatter[scatter_mapping[\"y\"]][data[mapping[\"to\"]][i]])\r\n z_t.append(scatter[scatter_mapping[\"z\"]][data[mapping[\"from\"]][i]])\r\n z_t.append(scatter[scatter_mapping[\"z\"]][data[mapping[\"to\"]][i]])\r\n\r\n x_norm = [round(s * (x - mini) / diff, 3) for x in x_t]\r\n output += f\"x: [\" + \",\".join(map(str, x_norm)) + \"],\\n\"\r\n\r\n y_norm = [round(s * (y - mini) / diff, 3) for y in y_t]\r\n output += \"y: [\" + \",\".join(map(str, y_norm)) + \"],\\n\"\r\n\r\n z_norm = [round(s * (z - mini) / diff, 3) for z in z_t]\r\n output += \"z: [\" + \",\".join(map(str, z_norm)) + \"],\\n\"\r\n else:\r\n x_norm = [round(s * (x - mini) / diff, 3) for x in data[mapping[\"x\"]]]\r\n output += \"x: [\" + \",\".join(map(str, x_norm)) + \"],\\n\"\r\n\r\n y_norm = [round(s * (y - mini) / diff, 3) for y in data[mapping[\"y\"]]]\r\n output += \"y: [\" + \",\".join(map(str, y_norm)) + \"],\\n\"\r\n\r\n z_norm = [round(s * (z - mini) / diff, 3) for z in data[mapping[\"z\"]]]\r\n\r\n output += \"z: [\" + \",\".join(map(str, z_norm)) + \"],\\n\"\r\n\r\n if mapping[\"c\"] in data:\r\n colormap = self.trees[name][\"colormap\"]\r\n cmap = None\r\n if isinstance(colormap, str):\r\n cmap = plt.cm.get_cmap(colormap)\r\n else:\r\n cmap = colormap\r\n\r\n colors = np.array([cmap(x) for x in data[mapping[\"c\"]]])\r\n colors = np.round(colors * 255.0)\r\n output += \"r: [\" + \",\".join(map(str, colors[:, 0])) + \"],\\n\"\r\n output += \"g: [\" + \",\".join(map(str, colors[:, 1])) + \"],\\n\"\r\n output += \"b: [\" + \",\".join(map(str, colors[:, 2])) + \"],\\n\"\r\n\r\n output += \"},\\n\"\r\n\r\n output += \"};\\n\"\r\n\r\n return output", "def add_supplementary_traces(recording, stimulus=None, derivative=None, timeconstant=2.6):\n if stimulus is None:\n stimulus = recording.stimulus\n\n pos_only = np.zeros_like(stimulus)\n neg_only = np.zeros_like(stimulus)\n pos_only[np.where(stimulus > 0)[0]] = stimulus[np.where(stimulus > 0)[0]] # positive stimulus only\n neg_only[np.where(stimulus < 0)[0]] = stimulus[np.where(stimulus < 0)[0]] # negative stimulus only\n\n if derivative is None:\n derivative = np.gradient(stimulus)\n\n supp_data = {'deriv_stim': derivative,\n 'abs_deriv_stim': np.abs(np.gradient(stimulus)),\n 'pos_only_stim': pos_only,\n 'abs_neg_only_stim': np.abs(neg_only)} # dictionary of supplementary data to add\n\n for data_name in supp_data: # put all supp. data in rec\n recording.add_supp_single_data(s_name=data_name, s_data=supp_data[data_name])\n\n pos_deriv = derivative.copy()\n pos_deriv[pos_deriv < 0] = 0\n neg_deriv = derivative.copy()\n neg_deriv[neg_deriv > 0] = 0\n recording.add_supp_single_data('pos_deriv_stim', pos_deriv) # positive derivative only\n recording.add_supp_single_data('neg_deriv_stim', neg_deriv) # postiive derivative only\n\n # Add exponentially smoothed functions. Especially useful for fast derivatives.\n recording.add_supp_single_data('conv_stim', exp_smooth(stimulus, time_constant=timeconstant))\n recording.add_supp_single_data('conv_deriv_stim', exp_smooth(derivative, time_constant=timeconstant))\n recording.add_supp_single_data('conv_pos_stim', exp_smooth(recording.pos_only_stim, time_constant=timeconstant))\n recording.add_supp_single_data('conv_neg_stim', exp_smooth(recording.abs_neg_only_stim, time_constant=timeconstant))\n recording.add_supp_single_data('conv_pos_deriv_stim', exp_smooth(recording.pos_deriv_stim, time_constant=timeconstant))\n recording.add_supp_single_data('conv_neg_deriv_stim', exp_smooth(recording.neg_deriv_stim, time_constant=timeconstant))\n recording.add_supp_single_data('abs_conv_neg_deriv_stim', np.abs(exp_smooth(recording.neg_deriv_stim, time_constant=timeconstant)))\n recording.add_supp_single_data('abs_conv_all_deriv_stim', np.abs(exp_smooth(derivative, time_constant=timeconstant)))", "def sharpen_bands(self):\n for label in self.labels:\n self.sharp_bands[label] = self.bands[label] - self.gauss_bands[\n label]", "def get_traces(self, traces, **kwargs):\n self.resource.clear()\n sweep = kwargs.get(\"sweep\", False)\n\n name_prefix = kwargs.get(\"name_prefix\", \"\")\n if name_prefix:\n name_prefix += \" - \"\n\n channels = OrderedDict()\n for trace in traces:\n ch = trace[\"channel\"]\n if ch not in channels.keys():\n channels[ch] = {\n \"frequency\": None,\n \"traces\": list()}\n channels[ch][\"traces\"].append(trace)\n\n if sweep is True:\n self.sweep(channels=list(channels.keys()))\n\n traces = []\n for ch, ch_data in channels.items():\n frequency = ch_data[\"frequency\"] = self.get_frequency()\n for trace in ch_data[\"traces\"]:\n self.scpi.set_selected_meas_by_number(trace[\"channel\"], trace[\"measurement number\"])\n sdata = self.scpi.query_data(trace[\"channel\"], \"SDATA\")\n s = sdata[::2] + 1j * sdata[1::2]\n ntwk = skrf.Network()\n ntwk.s = s\n ntwk.frequency = frequency\n ntwk.name = name_prefix + trace.get(\"parameter\", \"trace\")\n traces.append(ntwk)\n return traces", "def calc_source_blend_params(params,log):\n\n source = photometry_classes.Star()\n\n source.fs_g = params['f_s_g']\n source.sig_fs_g = params['sig_f_s_g']\n (source.g, source.sig_g) = flux_to_mag_pylima(source.fs_g,source.sig_fs_g)\n\n source.fs_r = params['f_s_r']\n source.sig_fs_r = params['sig_f_s_r']\n (source.r, source.sig_r) = flux_to_mag_pylima(source.fs_r,source.sig_fs_r)\n\n source.fs_i = params['f_s_i']\n source.sig_fs_i = params['sig_f_s_i']\n (source.i, source.sig_i) = flux_to_mag_pylima(source.fs_i,source.sig_fs_i)\n\n source.compute_colours(use_inst=True)\n source.transform_to_JohnsonCousins()\n\n log.info('\\n')\n log.info('Source measured photometry:')\n log.info(source.summary(show_mags=True))\n log.info(source.summary(show_mags=False,show_colours=True))\n log.info(source.summary(show_mags=False,johnsons=True))\n\n blend = photometry_classes.Star()\n\n blend.fs_g = params['f_b_g']\n blend.sig_fs_g = params['sig_f_b_g']\n (blend.g, blend.sig_g) = flux_to_mag_pylima(blend.fs_g,blend.sig_fs_g)\n\n blend.fs_r = params['f_b_r']\n blend.sig_fs_r = params['sig_f_b_r']\n (blend.r, blend.sig_r) = flux_to_mag_pylima(blend.fs_r,blend.sig_fs_r)\n\n blend.fs_i = params['f_b_i']\n blend.sig_fs_i = params['sig_f_b_i']\n (blend.i, blend.sig_i) = flux_to_mag_pylima(blend.fs_i,blend.sig_fs_i)\n\n blend.compute_colours(use_inst=True)\n blend.transform_to_JohnsonCousins()\n\n log.info('\\n')\n log.info('Blend measured photometry:')\n log.info(blend.summary(show_mags=True))\n log.info(blend.summary(show_mags=False,show_colours=True))\n log.info(blend.summary(show_mags=False,johnsons=True))\n\n return source, blend" ]
[ "0.53677773", "0.52855563", "0.52082103", "0.5192049", "0.50655925", "0.50220954", "0.5019231", "0.4980716", "0.496982", "0.49639016", "0.496379", "0.49591964", "0.49528778", "0.49409777", "0.4926693", "0.49150053", "0.48961598", "0.4841809", "0.48396593", "0.48142034", "0.48041737", "0.4799263", "0.47968122", "0.4772713", "0.4767107", "0.47551715", "0.47549874", "0.4741252", "0.47255725", "0.47172722" ]
0.5468182
1
update learning rate of optimizers
def updatelearningrate(self, epoch): self.lr = getlearningrate(epoch=epoch, opt=self.opt) # update learning rate of model optimizer if isinstance(self.model, list): count = 0 for param_group in self.optimzer.param_groups: # if type(model) is <list> then update modules with different learning rate param_group['lr'] = self.lr count += 1 # print ">>> count is:", count-1 else: for param_group in self.optimzer.param_groups: param_group['lr'] = self.lr
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _update_learning_rate(self):\r\n\r\n self.n_steps += 1\r\n lr = self.factor * self._get_lr_scale()\r\n for param_group in self._optimizer.param_groups:\r\n param_group['lr'] = lr", "def update_learning_rate(self):\n self.scheduler.step()\n lr = self.optimizer.param_groups[0]['lr']\n print('learning rate = %.7f' % lr)", "def update_learning_rate(self) -> None:\n optimizer = list(self.optimizers.values())[0]\n old_lr = optimizer.param_groups[0]['lr']\n for name, scheduler in self.schedulers.items():\n if name == 'generator' and self.opt.generator_scheduler_name == 'plateau':\n scheduler.step(self.metric)\n elif name == 'discriminator' and self.opt.discriminator_scheduler_name == 'plateau':\n scheduler.step(self.metric)\n else:\n scheduler.step()\n\n lr = optimizer.param_groups[0]['lr']\n print('learning rate %.7f -> %.7f' % (old_lr, lr))\n return", "def update_learning_rate(self):\n for scheduler in self.schedulers:\n if self.opt.lr_policy == 'plateau':\n scheduler.step(self.metric)\n else:\n scheduler.step()\n\n lr = self.optimizers[0].param_groups[0]['lr']\n print('learning rate = %.7f' % lr)", "def step_and_update_lr(self):\r\n self._update_learning_rate()\r\n self._optimizer.step()", "def update_learning_rate(self, it):\n self.scheduler.step()\n for param_group in self.optimizer.param_groups:\n v = param_group['lr']\n self.tb_logger.add_scalar('train/lr', v, it)", "def update_learning_rate(self) -> None:\n self.epsilon = self.initial_epsilon / (1. + self.rate_decay * self.n_it)\n return", "def update_learning_rate(self) -> None:\n self.epsilon = self.initial_epsilon / (1. + self.rate_decay * self.n_it)\n return", "def update(self):\n for filter in self.filters:\n filter.update(self.learning_rate)", "def __update(self, learning_rate):\n for layer in self.layers:\n layer.weights.set_value((layer.weights - learning_rate * layer.dW).eval())\n layer.biases.set_value((layer.biases - learning_rate * layer.db).eval())", "def adjust_learning_rate(optimizer):\n for group in optimizer.param_groups:\n if 'step' not in group:\n group['step'] = 0\n group['step'] += 1\n\n group['lr'] = args.lr / (1 + group['step'] * args.lr_decay)", "def update_learning_rate(self):\r\n self.scheduler.step(self.clock.epoch)", "def adjust_learning_rate(args, optimizer, epoch):\n if (epoch*3==args.epochs) or (epoch*3==2*args.epochs):\n lr = args.lr * (0.1 ** (epoch*3//args.epochs))\n print(\"Changing Learning Rate to {}\".format(lr))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer):\n for group in optimizer.param_groups:\n if 'step' not in group:\n group['step'] = 0.\n else:\n group['step'] += 1.\n group['lr'] = args.lr * (\n 1.0 - float(group['step']) * float(args.batch_size) / (args.n_triplets * float(args.epochs)))\n return", "def adjust_learning_rate(self, optimizer, epoch, args):\n lr = args.learning_rate * (0.1 ** (epoch // 30))\n # print(lr)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(cfg, optimizer):\n for idx, group in enumerate(optimizer.param_groups):\n init_lr = cfg.TRAINING.LR\n if 'step' not in group:\n group['step'] = 0.\n else:\n group['step'] += 1.\n\n group['lr'] = init_lr * (\n 1.0 - float(group['step']) * float(cfg.TRAINING.BATCH_SIZE) /\n (cfg.TRAINING.N_TRIPLETS * float(cfg.TRAINING.EPOCHS)))\n return", "def adjust_learning_rate(optimizer, epoch, args):\n \"\"\"Comes from pytorch demo\"\"\"\n lr = args.lr * (0.1 ** (epoch // 30))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, epoch, args, step):\n lr = args.lr * (0.1 ** (epoch // step))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(self, optimizer, epoch, initial_lr, writer=None):\n lr = initial_lr * (0.98 ** epoch)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n if writer:\n writer.add_scalar(\"lr_G\", lr, epoch + 1)", "def adjust_learning_rate(optimizer, lr, step, args):\n # decay = 0.1**(sum(epoch >= np.array(lr_steps)))\n lr = lr * (0.95**(step//args.lr_decay_every))\n print(\"current learning rate: {:.6f}\".format(lr))\n param_group = optimizer.param_groups\n for i in range(len(param_group)):\n param_group[i]['lr'] = lr\n\n return optimizer", "def adjust_learning_rate(optimizer, epochs, base_lr):\r\n lr = base_lr * (0.01 ** (epochs//5))\r\n print('Learning Rate decreased to {}'.format(lr))\r\n for param_group in optimizer.state_dict()['param_groups']:\r\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, epoch, args):\r\n lr = args.lr * (0.1 ** (epoch // 30))\r\n for param_group in optimizer.param_groups:\r\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, batch):\n lr = learning_rate\n for i in range(len(steps)):\n scale = scales[i] if i < len(scales) else 1\n if batch >= steps[i]:\n lr = lr * scale\n if batch == steps[i]:\n break\n else:\n break\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr / batch_size\n return lr", "def adjust_learning_rate(optimizer, epoch, lr):\n lr = lr * ((1 - 0.015) ** epoch)\n print('learning rate : {}'.format(lr))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(opt, optimizer, epoch):\r\n lr = opt.learning_rate * (0.1 ** (epoch // opt.lr_update))\r\n for param_group in optimizer.param_groups:\r\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, epoch, learning_rate):\n\n if epoch >= 60 and epoch < 75:\n lr = learning_rate / 2\n elif epoch >= 75:\n lr = learning_rate / 4\n else:\n lr = learning_rate\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, epoch, args):\n lr = args.lr\n if 20 < epoch <= 30:\n lr = 0.0001\n elif 30 < epoch :\n lr = 0.00001\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n print(\"learning rate -> {}\\n\".format(lr))", "def adjust_learning_rate(self, opt, epoch):\n lr = opt.learning_rate * 0.1 ** (epoch // opt.lr_update)\n for param_group in self.optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(optimizer, gamma, step):\n lr = args.lr * (0.8 ** step)\n print(lr)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def adjust_learning_rate(learning_rate,optimizer, epoch):\n lr = learning_rate * (0.1 ** (epoch // 25))\n print(str(lr))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr" ]
[ "0.82430375", "0.8091104", "0.8081948", "0.8057003", "0.7882125", "0.7729992", "0.7728464", "0.7728464", "0.77180463", "0.7680515", "0.7672647", "0.7670474", "0.76550764", "0.76442766", "0.762199", "0.76109266", "0.7595715", "0.758477", "0.7532394", "0.7522077", "0.75184435", "0.75164783", "0.7510736", "0.75061715", "0.75036716", "0.7503188", "0.7501985", "0.7486009", "0.74847925", "0.7483046" ]
0.81921303
1
Return apitools message object for give message name.
def GetApiMessage(message_name): messages = apis.GetMessagesModule(_BQ_API, _BQ_API_VERSION) return getattr(messages, message_name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def msg(name):\n msg = Message.ByKeys(name)\n if msg is not None:\n txt = msg.message_ml\n if msg_is_ignored(name):\n txt = IGNORE_PREFIX + txt\n else:\n misc.cdblogv(misc.kLogErr, 0,\n \"bomcreator: could not find message '%s'\" % name)\n txt = name\n return txt", "def GetAioMessageStruct(message_type_name):\n try:\n if message_type_name == 'kMessageTypeControlTelemetry':\n return getattr(pack_control_telemetry, 'ControlTelemetry')\n elif message_type_name == 'kMessageTypeControlSlowTelemetry':\n return getattr(pack_control_telemetry, 'ControlSlowTelemetry')\n elif message_type_name == 'kMessageTypeControlDebug':\n return getattr(pack_control_telemetry, 'ControlDebugMessage')\n elif message_type_name == 'kMessageTypeSimTelemetry':\n return getattr(pack_sim_telemetry, 'SimTelemetry')\n elif message_type_name == 'kMessageTypeGroundTelemetry':\n return getattr(pack_ground_telemetry, 'GroundTelemetry')\n elif message_type_name in ('kMessageTypeDynamicsReplay',\n 'kMessageTypeEstimatorReplay',\n 'kMessageTypeSimCommand',\n 'kMessageTypeSimSensor',\n 'kMessageTypeSimTetherDown'):\n return getattr(pack_sim_messages,\n message_type_name[len('kMessageType'):] + 'Message')\n else:\n return getattr(pack_avionics_messages,\n message_type_name[len('kMessageType'):] + 'Message')\n except AttributeError:\n raise AioClientException(\n 'No struct for AIO message type: ' + message_type_name)", "def get_message(self, _id):\n return Message.deserialize(self._get_single('messages', {'id': _id}))", "def get_obj(self, name):\r\n val = self.get(name)\r\n if not val:\r\n return None\r\n if name.find('queue') >= 0:\r\n obj = boto.lookup('sqs', val)\r\n if obj:\r\n obj.set_message_class(ServiceMessage)\r\n elif name.find('bucket') >= 0:\r\n obj = boto.lookup('s3', val)\r\n elif name.find('domain') >= 0:\r\n obj = boto.lookup('sdb', val)\r\n else:\r\n obj = None\r\n return obj", "def makeMessage( name, *structure ):\n return X12Message( name, *structure )", "def get_obj(self, name):\n val = self.get(name)\n if not val:\n return None\n if name.find('queue') >= 0:\n obj = boto.lookup('sqs', val)\n if obj:\n obj.set_message_class(ServiceMessage)\n elif name.find('bucket') >= 0:\n obj = boto.lookup('s3', val)\n elif name.find('domain') >= 0:\n obj = boto.lookup('sdb', val)\n else:\n obj = None\n return obj", "def _ns_message(self, queue, message_id):\n return self._ns(queue, \"messages\", message_id)", "def message(self, message_id):\r\n return Message(self, message_id)", "def get_message_by_id(message_id):\n return Message.query.get(message_id)", "def get_message(obj):\n if isinstance(obj, email.Message.Message):\n return obj\n if hasattr(obj, \"read\"):\n obj = obj.read()\n try:\n msg = email.message_from_string(obj)\n except email.Errors.MessageParseError:\n msg = None\n return msg", "def UnpackMessage(swig_obj_pointer, msg_name):\n\n ptr = int(swig_obj_pointer)\n c_array = ctypes.c_char * aio.GetPackMessageSize(msg_name)\n received = c_array.from_address(ptr)\n\n msg_type = MESSAGE_TYPE_HELPER.Value(msg_name)\n return c_helpers.Unpack(received[:], MESSAGE_STRUCTS[msg_type])", "def _get_message(self, sender_message):\n # type: (str) -> Message or None\n st_re = self.SENDER_TEXT.search(sender_message)\n if st_re is None:\n return None\n else:\n return Message(speaker=st_re.group(1), text=st_re.group(2).strip())", "def message(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'message')\r\n return http.Request('GET', url), parsers.parse_json", "def getMessage():\n return message", "def get_message(self, message_id):\n r = requests.get('https://outlook.office.com/api/v2.0/me/messages/' + message_id, headers=self._headers)\n check_response(r)\n return Message._json_to_message(self, r.json())", "def get_message(self, message_id: int) -> discord.Message:\n return self._connection._get_message(message_id)", "def GetMessage(service, user_id, msg_id):\n try:\n message = service.users().messages().get(userId=user_id, id=msg_id).execute()\n\n return message\n except errors.HttpError:\n print('An error occurred: ')", "def _get_message(self):\n return self.__message", "def load_message(message_id):\n pathname = \"messages/{}.json\".format(message_id)\n return _load_message(pathname)", "def from_string(name: str) -> MessageOperation:\n if name == \"cipher\":\n return MessageOperation.cipher\n elif name == \"decipher\":\n return MessageOperation.decipher\n elif name == \"attack\":\n return MessageOperation.attack", "def read_message(m_bytes, proto_version):\n\n # This is the sub-module for the specified proto version.\n try:\n proto_module = PROTOCOL_VERSION_MAP[proto_version]\n except KeyError:\n # TODO: Depending on the backwards-compatibility policy with gotalk,\n # we might be able to fall back to the latest known version and\n # potentially limp along. Too early to know.\n raise InvalidProtocolVersionError(\"Invalid gotalk protocol version.\")\n\n type_id = m_bytes[0]\n try:\n msg_class_name = MESSAGE_TYPE_TO_CLASS_MAP[type_id]\n except KeyError:\n raise InvalidMessageTypeIDError()\n msg_class = getattr(proto_module, msg_class_name)\n return msg_class.from_bytes(m_bytes)", "def get_message(self, bulk_id):\n res = self.client.get(\"/v1/messages/\" + str(bulk_id))\n\n try:\n return Message(res.data[\"message\"])\n except:\n raise ValueError(\"returned response not valid\")", "def message(self, *args, **kwargs) -> Message:\n return Message(self.handle, *args, **kwargs)", "def GetMessage(service, user_id, msg_id):\n try:\n #take out format='raw' if don't want base64\n message = service.users().messages().get(userId=user_id, id=msg_id, format='raw').execute()\n\n print('Message snippet: %s' % message['snippet'])\n\n return message\n except errors.HttpError, error:\n print('An error occurred: %s' % error)", "def GetMessage(service, user_id, msg_id, snippetMessage=True):\n try:\n message = service.users().messages().get(userId=user_id, id=msg_id).execute()\n #print('Message snippet: %s' % message['snippet'])\n #print('Message snippet: %s' % message['payload']['headers'])\n #print(unicode('Message snippet: %s' % message['snippet'],'utf-8'))\n\n if snippetMessage:\n return message['snippet']\n else:\n return message\n except errors.HttpError, error:\n print('An error occurred: %s' % error)", "def get_message(message_id, service):\n message = service.users().messages().get(userId='me', id=message_id).execute()\n return message", "def get_message(self):\n return self.msg", "def get_message(self):\n return self.message", "def get_message(self):\n return self.message", "def get_message(self) -> Union[\"Message\", None]:\n raw_data = (\n self.raw_data.get(\"message\") or\n self.raw_data.get(\"edited_message\")\n )\n\n if raw_data:\n return Message(raw_data)\n\n return None" ]
[ "0.69583714", "0.65979904", "0.64236474", "0.62720525", "0.62713873", "0.625511", "0.6236496", "0.6140541", "0.61393994", "0.6108671", "0.6034471", "0.6006937", "0.5988172", "0.5965715", "0.5953879", "0.5941471", "0.5921525", "0.5919041", "0.5918195", "0.590729", "0.5900649", "0.589385", "0.5874447", "0.58742064", "0.5866837", "0.58479506", "0.58422524", "0.5823816", "0.5823816", "0.57933754" ]
0.80781776
0
Builds a bigquery AccessValueListEntry array from input file. Expects YAML or JSON formatted file.
def PermissionsFileProcessor(input_file): access_value_msg = GetApiMessage('Dataset').AccessValueListEntry try: permissions_array = [] permissions_from_file = yaml.load(input_file[0]) permissions_from_file = permissions_from_file.get('access', None) if not permissions_from_file or not isinstance(permissions_from_file, list): raise PermissionsFileError( 'Error parsing permissions file: no access list defined in file') for access_yaml in permissions_from_file: permission = encoding.PyValueToMessage(access_value_msg, access_yaml) if _ValidatePermission(permission): permissions_array.append(permission) else: raise PermissionsFileError(('Error parsing permissions file:' ' invalid permission definition' ' [{}]'.format(permission))) return sorted(permissions_array, key=lambda x: x.role) except yaml.YAMLParseError as ype: raise PermissionsFileError('Error parsing permissions file [{}]'.format( ype))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_from_file(self, file):\n self.value = []\n with open(file, \"r\") as f:\n fl = f.readlines()\n\n for l in fl:\n self.value.append([int(x) for x in l.split()])", "def load_vals(txtfile):\n import ast\n \n data = []\n li = load_help(txtfile)\n \n for i in xrange(len(li)):\n if li[i] == 'Value' and i < len(li)-1:\n dic = ast.literal_eval(li[i+1])\n data.append(dic)\n return data", "def _read_file(self, input_file):\n with io.open(input_file, \"r\", encoding=\"UTF-8\") as file:\n examples = []\n for line in file:\n data = line.strip().split(\"_!_\")\n example = InputExample(\n guid=data[0], label=data[1], text_a=data[3])\n examples.append(example)\n\n return examples", "def build_accession_parser(rules_file):\n\n rules_data = json.load(rules_file)\n rules_by_prefix_len = {}\n for prefix_list, database, molecule_type, type_description in rules_data:\n for prefix in prefix_list:\n prefix_length = len(prefix)\n if REFSEQ_PREFIX_RE.match(prefix) is not None:\n # RefSeq whose accessions start with XX_ has its own rules\n if 'RefSeq' not in rules_by_prefix_len:\n rules_by_prefix_len['RefSeq'] = []\n rules_by_prefix_len['RefSeq'].append((prefix, database, molecule_type, type_description))\n elif '-' in prefix or '_' in prefix:\n (prefix_length, matcher) = make_range_matcher(prefix)\n if prefix_length not in rules_by_prefix_len:\n rules_by_prefix_len[prefix_length] = []\n rules_by_prefix_len[prefix_length].append((matcher, database, molecule_type, type_description))\n else:\n if prefix_length not in rules_by_prefix_len:\n rules_by_prefix_len[prefix_length] = []\n rules_by_prefix_len[prefix_length].append((prefix, database, molecule_type, type_description))\n return rules_by_prefix_len", "def _read_data(cls, input_file): # 这里是对文件的处理\r\n with open(input_file, encoding='utf-8') as f:\r\n lines = []\r\n\r\n for line in f:\r\n line = json.loads(line)\r\n words = ' '.join(list(line['natural']))\r\n labels = ' '.join(line['tag_seq'])\r\n poss = line['pos_seq']\r\n dps = line['dp_seq']\r\n head = line['head_seq']\r\n lines.append([labels, words, poss, dps, head])\r\n\r\n return lines", "def parse_namelist(file, flat=False, silence_cast_errors=False):\n\n data = {}\n current_namelist = \"\"\n raw_lines = []\n with open(file) as f:\n for line in f:\n # Remove comments\n line = line.split(\"#\")[0].strip()\n if \"=\" in line or \"&\" in line:\n raw_lines.append(line)\n elif line:\n raw_lines[-1] += line\n\n for line in raw_lines:\n if line.startswith(\"&\"):\n current_namelist = line.split(\"&\")[1]\n if current_namelist: # else : it's the end of a namelist.\n data[current_namelist] = {}\n else:\n field, value = map(str.strip, line[:-1].split(\"=\"))\n try:\n value = _parse_namelist_val(value)\n except ValueError as err:\n if silence_cast_errors:\n warn(\n \"Unable to cast value {} at line {}\".format(\n value, raw_lines.index(line)\n )\n )\n else:\n raise err\n\n if \"(\" in field: # Field is an array\n field, idxs = field[:-1].split(\"(\")\n field = field.casefold()\n if field not in data[current_namelist]:\n data[current_namelist][field] = []\n # For generality, we will assign a slice, so we cast in list\n value = value if isinstance(value, list) else [value]\n idxs = [\n slice(int(idx.split(\":\")[0]) - 1, int(idx.split(\":\")[1]))\n if \":\" in idx\n else slice(int(idx) - 1, int(idx))\n for idx in idxs.split(\",\")\n ]\n\n datafield = data[current_namelist][field]\n # Array are 1D or 2D, if 2D we extend it to the good shape,\n # filling it with [] and pass the appropriate sublist.\n # Only works with slice assign (a:b) in first position.\n missing_spots = idxs[-1].stop - len(datafield)\n if missing_spots > 0:\n datafield.extend([] for i in range(missing_spots))\n if len(idxs) == 2:\n datafield = datafield[idxs[1].start]\n datafield[idxs[0]] = value\n else:\n data[current_namelist][field.casefold()] = value\n\n if flat:\n for namelist in list(data.keys()):\n data.update(data.pop(namelist))\n\n return data", "def _read_jsonl(cls, input_file):\n with open(input_file, 'rb') as f:\n return [json.loads(ln) for ln in f]", "def _read_jsonl(cls, input_file):\n with open(input_file, 'rb') as f:\n return [json.loads(ln) for ln in f]", "def build_data_from_file(cls,file_path,number_elems=None):\n raise NotImplementedError('Abstract method has not been implemented')", "def read_cfg(file):\n result = []\n if isfile(file):\n with open(file) as f:\n cfg = json.load(f)\n for entry in cfg:\n if \"start\" in entry:\n filter = (entry[\"start\"], entry.get(\"end\", None))\n result.append(filter)\n return result", "def read_file_into_list(source_file):\n\twith open(source_file, 'r') as source:\n\t\tdata = base64.b64encode(source.read())\n\t\treturn [data[i:i+SPLIT_LENGTH] for i in range(0, len(data), SPLIT_LENGTH)]", "def load_from_file(cls):\n\n try:\n list_of_ins = []\n with open(cls.__name__ + '.json') as my_file:\n dicts = Base.from_json_string(my_file.read())\n for key in dicts:\n list_of_ins += [cls.create(**key)]\n return (list_of_ins)\n except:\n return ([])", "def get_ade20_vqa_data(file_name=\"ade20k_vqa.jsonl\"):\n conf = get_config()\n vqa_file = conf[\"ade20k_vqa_dir\"]\n file = os.path.join(vqa_file, file_name)\n print(f\"Reading {file}\")\n with jsonlines.open(file) as reader:\n data = [i for i in iter(reader)]\n return data", "def read_file_into_list(source_file):\n with open(source_file, 'r') as source:\n data = base64.b64encode(source.read())\n return [data[i:i+SPLIT_LENGTH] for i in range(0, len(data), SPLIT_LENGTH)]", "def _ReadEntries(self):\n scope = {}\n filename = os.path.join(self._root_dir, self._options.entries_filename)\n if not os.path.exists(filename):\n return []\n exec(gclient_utils.FileRead(filename), scope)\n return scope[\"entries\"]", "def parse_datafile(file):\n data = []\n with open(file) as fh:\n for line in fh:\n line = line.rstrip(\"\\n\")\n\n # Turn [] strings into {} to be treated properly as JSON hashes\n if line.startswith('[') and line.endswith(']'):\n line = '{' + line[1:-1] + '}'\n\n if line.startswith(\"{\"):\n data.append(json.loads(line))\n else:\n data.append(line)\n return data", "def parse_datafile(file):\n data = []\n with open(file) as fh:\n for line in fh:\n line = line.rstrip(\"\\n\")\n\n # Turn [] strings into {} to be treated properly as JSON hashes\n if line.startswith('[') and line.endswith(']'):\n line = '{' + line[1:-1] + '}'\n\n if line.startswith(\"{\"):\n data.append(json.loads(line))\n else:\n data.append(line)\n return data", "def _read_input(self, in_file):\n result = {}\n with open(in_file, \"r\") as f:\n reader = csv.DictReader(f, delimiter=str(\"\\t\"))\n for row in reader:\n result[row[\"accession\"]] = {\n \"transcript_sequence\": row[\"transcript_sequence\"],\n \"cds_start_i\": int(row[\"cds_start_i\"]),\n \"cds_end_i\": int(row[\"cds_end_i\"]),\n }\n\n return result", "def read_csvfile(inputfn):\n with open(inputfn, 'rU') as fd:\n datastruct = gen_csv_data(fd, returntype='list') # Make sure to store as list before closing file.\n return datastruct", "def load_builtin_data(name):\n\t\n\tpath = Path(resource_filename('pyospray', f'data/{name}.txt'))\n\tret = {}\n\tvalues = None\n\twith path.open('r') as f:\n\t\tlines = (line.rstrip('\\n') for line in f)\n\t\tfor token, content in tokenize(lines):\n\t\t\tif token == 'key':\n\t\t\t\tvalues = []\n\t\t\t\tret[content] = values\n\t\t\t\n\t\t\telif token == 'values':\n\t\t\t\tvalues.extend(content)\n\t\t\t\n\t\t\telse:\n\t\t\t\traise NotImplementedError\n\t\n\treturn ret", "def read_data(cls, input_file):\n with tf.gfile.Open(input_file, \"r\") as f:\n lines = []\n for line in f:\n line = line.strip()\n if line.startswith('-DOCSTART-'):\n continue\n else:\n word_labels = line.split('-seq-')\n assert len(word_labels) == 2\n\n words = word_labels[0]\n labels = word_labels[1]\n lines.append([words, labels])\n\n return lines", "def load_data_from_file(file_name):\n file_path = os.getcwd() + '/data/' + file_name\n\n constructors = []\n if os.path.exists(file_path):\n with open(file_path, 'r') as csvfile:\n file_rows = csvfile.readlines()\n\n constructors = [line.replace('\\n', '').split('|') for line in file_rows]\n\n return constructors", "def readData(file):\n \n inputValues=list()\n outputValue=list()\n totalData=list()\n \n with open(file) as fp :\n for line in fp:\n if line.strip( ) == '':\n continue\n attributeValue = line.strip().split(\",\")\n inputValue1 = float(attributeValue[0])\n inputValue2 = float(attributeValue[1])\n \n inputValues+=[[inputValue1]+[inputValue2]]\n outputValue+=[int(attributeValue[2])]\n totalData+=[[inputValue1]+[inputValue2]+[int(attributeValue[2])]]\n \n \n return inputValues,outputValue,totalData", "def load_reference_from_stream(self, f):\n qids_to_relevant_docids = {}\n for l in f:\n vals = l.strip().split('\\t')\n if len(vals) != 4:\n vals = l.strip().split(' ')\n if len(vals) != 4:\n pdb.set_trace()\n raise IOError('\\\"%s\\\" is not valid format' % l)\n\n qid = vals[0]\n if qid in qids_to_relevant_docids:\n pass\n else:\n qids_to_relevant_docids[qid] = []\n _rel = int(vals[3])\n if _rel > 0:\n qids_to_relevant_docids[qid].append(vals[2])\n\n return qids_to_relevant_docids", "def read_data(self, filepath, is_build_vocab=False):\r\n\r\n with open(\"general_list.pkl\", \"rb\") as file:\r\n self.general_list = pl.load(file)\r\n self.vocab.token2idx = {\"<pad>\": 0, \"<unk>\": 1}\r\n print(len(self.general_list))\r\n ll = 2\r\n for token in self.general_list:\r\n self.vocab.token2idx[token] = ll\r\n ll+=1\r\n\r\n print(\"max id\", max(list(self.vocab.token2idx.values())), len(self.vocab.token2idx))\r\n self.vocab.idx2token = {idx: token for token, idx in self.vocab.token2idx.items()}\r\n #print(\"max_len\", self.vocab.token2idx)\r\n datas = []\r\n\r\n with open(filepath, \"r\", encoding=\"utf-8\") as reader:\r\n for line in reader:\r\n line = line.strip()\r\n if not line:\r\n continue\r\n obj = json.loads(line)\r\n datas.append(obj)\r\n\r\n return datas", "def from_file_to_list(input_file):\n\tfile = open(input_file)\n\n\tdict_values = [\"\" for k in range(8)]\n\n\tfor line in file:\n\t\ts = line.split(\" \")\n\t\ts.pop(0) # first column only indicate line's number\n\t\ts.remove('\\n')\n\t\tfor idx, a in enumerate(s):\n\t\t\tdict_values[idx] += a\n\n\n\tfile.close\n\n\treturn dict_values", "def test_reading_nested_user_map_definition_from_file():\n with open(\"definitions/Person.buf\") as f:\n Person = Map.from_open_file(f)\n\n expected = Map(\n MapEntrySpec(1, \"name\", String),\n MapEntrySpec(2, \"members\", List(Person))\n )\n\n with open(\"definitions/Club.buf\") as f:\n assert expected == Map.from_open_file(f, \"definitions\")\n assert expected == Map.from_file(\"definitions/Club.buf\")\n assert expected == Map.from_file(\"./definitions/Club.buf\")", "def read_reference(conformation_fname):\r\n\r\n #Create empty set\r\n reference_atoms = []\r\n\r\n #Try/catch if file cannot be found. Open file in read mode\r\n #For eveyr line in the text file, strip all white spaces from front and back\r\n #If not empty line, split line on commas and put integers in set. These correspond to atom numbers of the key atoms\r\n #Return this list\r\n\r\n try:\r\n with open(conformation_fname, \"r\") as fin :\r\n num = 1\r\n for line in fin:\r\n if num < 10:\r\n num = num + 1\r\n continue\r\n content = line.strip()\r\n if content == '':\r\n continue\r\n else:\r\n reference_atoms.append(content.split())\r\n #reference_atom_num.update([int(i) for i in content.split(',')])\r\n return reference_atoms\r\n #Catch OS error\r\n except OSError:\r\n print('OS error')\r\n sys.exit()\r\n #Catch value error (not appropriate values to be converted to int)\r\n except ValueError:\r\n print('Could not convert data to integer')\r\n sys.exit()", "def _read_jsonl(cls, input_file):\n with open(input_file, \"r\", encoding=\"utf-8-sig\") as f:\n lines = []\n for line in f:\n lines.append(json.loads(line))\n return lines", "def test_reading_user_map_definition_with_list():\n assert Map(\n MapEntrySpec(1, \"name\", String),\n MapEntrySpec(2, \"phones\", List(String))\n ) == Map.from_lines([\n \"1. name: string\",\n \"2. phones: list(string)\"\n ])" ]
[ "0.56744987", "0.5283599", "0.5243404", "0.5234778", "0.5156196", "0.51066506", "0.5005265", "0.5005265", "0.49895862", "0.4981226", "0.49759138", "0.49112916", "0.49017558", "0.48792005", "0.48355103", "0.48244855", "0.48244855", "0.48212677", "0.4802652", "0.47956467", "0.47937998", "0.47522512", "0.47453332", "0.47386205", "0.47376984", "0.4733793", "0.47195417", "0.47126207", "0.47107288", "0.47105974" ]
0.5508567
1
Set projectId value for a BigQueryXXXRequests.
def SetProjectId(ref, args, request): del ref project = args.project or properties.VALUES.core.project.Get(required=True) project_ref = resources.REGISTRY.Parse(project, collection='bigquery.projects') request.projectId = project_ref.Name() return request
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_project_quotas(self, project_id, request_model, extra_headers=None,\n use_auth=True, user_name=None):\n resp = self.client.post(\n 'project-quotas/' + project_id,\n request_model=request_model,\n response_model_type=quota_models.ProjectQuotaModel,\n extra_headers=extra_headers,\n use_auth=use_auth, user_name=user_name)\n return resp", "def set_project(project_id):\n return fluent.set_project(project_id)", "def set_project_id(self, project_id):\n self._project_id = project_id", "def project_id(self, project_id):\n\n self._project_id = project_id", "def project_id(self, project_id):\n\n self._project_id = project_id", "def project(self, value):\n\n if self._project != value:\n self._project = value\n self._update_page()", "def scope_project(self, project_key):\n self.raw['scope'] = 'PROJECT'\n self.raw['projectKey'] = project_key\n return self", "def set_or_create_project(conn: BlitzGateway, project: Union[str, int],\n across_groups: Optional[bool] = True) -> int:\n if isinstance(project, str):\n project_id = post_project(conn, project)\n print(f'Created new Project:{project_id}')\n elif (isinstance(project, int)):\n project_id = project\n else:\n raise TypeError(\"'project' must be str or int\")\n return project_id", "def change_project(self, project, project_format='id'):\n name = 'tenant' if self.api_version == 2 else 'project'\n self.creds['%s_%s' % (name, project_format)] = project\n opposite_format = 'name' if project_format == 'id' else 'id'\n del self.creds['%s_%s' % (name, opposite_format)]", "def updateProject(self, projectId,payload):\n uri = \"/v1/projects/\" +str(projectId)\n response = self.client.put(uri,payload)\n return response", "def project_name(self, project_name):\n\n self._project_name = project_name", "def project(self, project):\n self._project = project", "def project(self, project):\n self._project = project", "def set_keystone_v3_project(self, **kwargs):\n LOG_OBJ.debug(\"Creating the project.\")\n\n _url = \"http://\" + self.host_ip + \":35357/v3/projects/\" + \\\n str(kwargs['project_id'])\n _headers = {'x-auth-token': self.cloud_admin_info[\"token_domain\"],\n 'content-type': 'application/json'}\n\n _project_info = {\"project\": {}}\n for argument in [\"name\", \"description\", \"domain_id\",\n \"enabled\", \"disabled\"]:\n try:\n _project_info['project'].update(\n {argument: kwargs[argument]})\n except KeyError:\n pass\n _body = json.dumps(_project_info)\n response = self.request(\"PATCH\", _url, _headers, _body)\n\n if response is None:\n LOG_OBJ.error(\"No response from Server while set the project\")\n print (\"No response from Server while set the project\")\n return response\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\" Set project Failed with status %s and error : %s\" %\n (response.status, response.data))\n print (\" Set project Failed with status %s and error : %s\" %\n (response.status, response.data))\n return response.status\n\n return True", "def update_project(self, project_id, project):\n\n with self._transaction.cursor() as cur:\n # ensure this project exists\n cur.execute(\n \"SELECT project_id \"\n \"FROM barcodes.project \"\n \"WHERE project_id=%s;\",\n (project_id,))\n\n row = cur.fetchone()\n if row is None:\n raise NotFound(\"No project with ID %s\" % project_id)\n\n query = f\"\"\"\n UPDATE barcodes.project\n SET {p.DB_PROJ_NAME_KEY}=%s,\n {p.SUBPROJECT_NAME_KEY}=%s,\n {p.ALIAS_KEY}=%s,\n {p.IS_MICROSETTA_KEY}=%s,\n {p.SPONSOR_KEY}=%s,\n {p.COORDINATION_KEY}=%s,\n {p.CONTACT_NAME_KEY}=%s,\n {p.ADDTL_CONTACT_NAME_KEY}=%s,\n {p.CONTACT_EMAIL_KEY}=%s,\n {p.DEADLINES_KEY}=%s,\n {p.NUM_SUBJECTS_KEY}=%s,\n {p.NUM_TIMEPOINTS_KEY}=%s,\n {p.START_DATE_KEY}=%s,\n {p.BANK_SAMPLES_KEY}=%s,\n {p.PLATING_START_DATE_KEY}=%s,\n {p.DISPOSITION_COMMENTS_KEY}=%s,\n {p.COLLECTION_KEY}=%s,\n {p.IS_FECAL_KEY}=%s,\n {p.IS_SALIVA_KEY}=%s,\n {p.IS_SKIN_KEY}=%s,\n {p.IS_BLOOD_KEY}=%s,\n {p.IS_OTHER_KEY}=%s,\n {p.DO_16S_KEY}=%s,\n {p.DO_SHALLOW_SHOTGUN_KEY}=%s,\n {p.DO_SHOTGUN_KEY}=%s,\n {p.DO_RT_QPCR_KEY}=%s,\n {p.DO_SEROLOGY_KEY}=%s,\n {p.DO_METATRANSCRIPTOMICS_KEY}=%s,\n {p.DO_MASS_SPEC_KEY}=%s,\n {p.MASS_SPEC_COMMENTS_KEY}=%s,\n {p.MASS_SPEC_CONTACT_NAME_KEY}=%s,\n {p.MASS_SPEC_CONTACT_EMAIL_KEY}=%s,\n {p.DO_OTHER_KEY}=%s,\n {p.BRANDING_ASSOC_INSTRUCTIONS_KEY}=%s,\n {p.BRANDING_STATUS_KEY}=%s\n WHERE project_id=%s;\"\"\"\n\n cur.execute(query,\n (\n project.project_name,\n project.subproject_name,\n project.alias,\n project.is_microsetta,\n project.sponsor,\n project.coordination,\n project.contact_name,\n project.additional_contact_name,\n project.contact_email,\n project.deadlines,\n project.num_subjects,\n project.num_timepoints,\n project.start_date,\n project.bank_samples,\n project.plating_start_date,\n project.disposition_comments,\n project.collection,\n project.is_fecal,\n project.is_saliva,\n project.is_skin,\n project.is_blood,\n project.is_other,\n project.do_16s,\n project.do_shallow_shotgun,\n project.do_shotgun,\n project.do_rt_qpcr,\n project.do_serology,\n project.do_metatranscriptomics,\n project.do_mass_spec,\n project.mass_spec_comments,\n project.mass_spec_contact_name,\n project.mass_spec_contact_email,\n project.do_other,\n project.branding_associated_instructions,\n project.branding_status,\n project_id\n ))\n return cur.rowcount == 1", "def projects(self, projects):\n if (self.local_vars_configuration.client_side_validation and\n projects is not None and not isinstance(projects, int)):\n raise ValueError(\"Parameter `projects` must be an integer\") # noqa: E501\n\n self._projects = projects", "def set_project(\n name\n):\n if not is_alive():\n err_msg = \"Cannot connect to getML engine. Make sure the engine is running and you are logged in.\"\n raise ConnectionRefusedError(err_msg)\n\n cmd = dict()\n cmd[\"type_\"] = \"set_project\"\n cmd[\"name_\"] = name\n\n comm.send(cmd)", "def _project(request, key):\n context = request.context\n if not context.project_id:\n raise exceptions.QuotaMissingTenant()\n return {key: {key + '_id': context.project_id}}", "def list_namespaced_project_request(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method list_namespaced_project_request\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/oapi/v1/projectrequests'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='UnversionedStatus',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def update_project(arn=None, name=None, defaultJobTimeoutMinutes=None):\n pass", "def projects(self, projects):\n\n self._projects = projects", "def project(self, project):\n\n self._project = project", "def project(self, project):\n\n self._project = project", "def project(self, project):\n\n self._project = project", "def project(self, project):\n\n self._project = project", "def create_namespaced_project_request(self, body, **kwargs):\n\n all_params = ['body', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method create_namespaced_project_request\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `create_namespaced_project_request`\")\n\n resource_path = '/oapi/v1/projectrequests'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1ProjectRequest',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def setRequestId(self, reqid) :\n self.request_id = reqid", "def set_project(self, version):\n raise NotImplementedError(\"set_project is not implemented\")", "def test_projects_id_put(self):\n project = Project()\n response = self.client.open('/project-tracker/projects/{id}'.format(id=3.4),\n method='PUT',\n data=json.dumps(project),\n content_type='application/json')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def num_projects(self, num_projects):\n\n self._num_projects = num_projects" ]
[ "0.61926836", "0.6037495", "0.5940458", "0.58782727", "0.58782727", "0.5392154", "0.53831047", "0.5383003", "0.5351515", "0.5217758", "0.52084017", "0.51797605", "0.51797605", "0.51708114", "0.5099718", "0.5095747", "0.5076841", "0.50675255", "0.50642264", "0.5054526", "0.5043689", "0.5032281", "0.5032281", "0.5032281", "0.5032281", "0.50313395", "0.5015842", "0.50030303", "0.4980436", "0.4980237" ]
0.7350548
0
Ensure that view parameters are set properly tables create request.
def SetViewParameters(ref, args, request): del ref # unused if not args.view: request.table.view = None return request
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create(self, tables, views, schema_name, config):\n if not isinstance(tables, dict):\n return False # Raise Exception That Tables Are In A Wrong Format???!!!\n success = True\n if schema_name is not None:\n self._create_schema(schema_name)\n for table_name_instance in tables.items():\n if self._create_table(table_name_instance[1]) is False:\n success = False\n break\n if isinstance(views, dict):\n for view_name_instance in views.items():\n if self._create_view(view_name_instance[1], schema_name, config) is False:\n success = False\n break\n return success", "def test_create(self):\n session = self.prepare(user_table=True)\n\n result = list(session.execute((\"SELECT * FROM system_schema.views \"\n \"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING\")))\n assert len(result) == 1, \"Expecting 1 materialized view == got\" + str(result)", "def createViews(views):\n ...", "def create_view(self, repo, view, sql):\n return self.user_con.create_view(\n repo=repo, view=view, sql=sql)", "def test_migrate_view_fields(self):\n self.test_view = RecordView.create(\n self.testcoll, test_view_id, test_view_create_values\n )\n migrate_coll_data(self.testcoll)\n # Read field definition and check for inline field list\n view_data = self.check_entity_values(\n \"_view\", test_view_id, check_values=test_view_migrated_values\n )\n return", "def create_table(self):\n pass", "def pre_route_table_create(self, resource_dict):\n pass", "def create_all_views():\n cursor.execute(articleList)\n cursor.execute(goodViews)\n cursor.execute(authorsTitles)\n cursor.execute(titleViews)\n cursor.execute(dailyTotalView)\n cursor.execute(dailyErrorView)", "def post_route_table_create(self, resource_dict):\n pass", "def create_table_request_info(self):\n table_query = f\"\"\"\n Create Table If Not Exists Request_Info(\n {self.__fields[0]} INT AUTO_INCREMENT PRIMARY KEY,\n {self.__fields[1]} TIMESTAMP DEFAULT CURRENT_TIMESTAMP,\n {self.__fields[2]} CHAR(30),\n {self.__fields[3]} CHAR(30),\n {self.__fields[4]} CHAR(30) NULL,\n {self.__fields[5]} DATE,\n {self.__fields[6]} CHAR(15),\n {self.__fields[7]} CHAR(30),\n {self.__fields[8]} CHAR(30),\n {self.__fields[9]} CHAR(30),\n {self.__fields[10]} INT(32),\n {self.__fields[11]} CHAR(30),\n {self.__fields[12]} INT(32),\n {self.__fields[13]} VARCHAR(30))\n \"\"\"\n self.execute(table_query)", "def test_create_view_returns_empty(dummy_request):\n from learning_journal.views.default import new_entry\n assert new_entry(dummy_request) == {}", "def _create_view(self, view, schema=None, config=None):\n viewname, vschema = view[\"__tablename__\"].split(' ')[0], view[\"__schema__\"].split(' ')[0]\n try:\n dve = SQL('NULL from {}.{}').format(Identifier(vschema),\n Identifier(viewname))\n veq = self.__session.query(self._sql_to_string(dve)).limit(1)\n self.__session.execute(veq)\n self._commit()\n except ProgrammingError:\n self._rollback()\n like = text(\"information_schema.routines.routine_name like 'crosstab%'\")\n count = self.__session.query('* FROM information_schema.routines')\n count = count.filter(like).count()\n if int(count) == 0:\n self._create_extension(config)\n self.exschema = 'public'\n else:\n like = text(\"information_schema.routines.routine_name like 'crosstab%'\")\n count = self.__session.query('routine_schema FROM'\n ' information_schema.routines')\n count = count.filter(like).limit(1)\n count = self.__session.execute(count).fetchone()[0]\n self._commit()\n self.exschema = count\n like = text(\"SELECT has_schema_privilege(:exschema, 'USAGE')\")\n like = self.__session.execute(like,\n {\"exschema\": self.exschema}).fetchone()[0]\n self._commit()\n if not like:\n self._grant_access(config)\n viewst, raw = self._sql_to_string(view[\"__statement__\"]), '{}.crosstab'\n defsch = self._sql_to_string(SQL(raw).format(Identifier(schema)))\n exsch = SQL(raw).format(Identifier(self.exschema))\n self.__session.execute(viewst.replace(defsch, self._sql_to_string(exsch)))\n self._commit()\n except Exception:\n self._rollback()\n self._reset_session()\n raise", "def table_creater(self, tablename, columnnames, entries):\n createrurl = self.casjobsurl + '/contexts/MyDB/query'", "def creates_view(self):\n return self.statements[0].creates_view()", "def test_create_view_returns_empty_dict_on_get(dummy_request):\n from learning_journal.views.default import create_view\n result = create_view(dummy_request)\n assert result == {}", "def prepare(self, request):\n pass", "def _create_or_alter_view(self, survey_data):\n self.log.info(\"Creating or altering view vw_AllSurveyData \")\n edit_view = self._get_query('edit_view') + \"( \" + survey_data + \" )\"\n self.db.execute_query(edit_view)\n self.log.info(\"View was edited successfully\")", "def setup_view(self, view, request, *args, **kwargs):\n view.request = request\n view.args = args\n view.kwargs = kwargs\n return view", "def pre_interface_route_table_create(self, resource_dict):\n pass", "def post(self):\n args = parser.parse_args()\n table = TableDetails(args.get('table_size'))\n db.session.add(table)\n db.session.commit()\n return table, 201", "def create_tables( self ) :\n return self._create_tables", "def propagate_view_creation_over_non_existing_table(self):\n\n cluster = self.cluster\n cluster.populate(3)\n cluster.start()\n node1, node2, node3 = self.cluster.nodelist()\n session = self.patient_cql_connection(node1, consistency_level=ConsistencyLevel.QUORUM)\n create_ks(session, 'ks', 3)\n\n session.execute('CREATE TABLE users (username varchar PRIMARY KEY, state varchar)')\n\n # create a materialized view only in nodes 1 and 2\n node3.stop(wait_other_notice=True)\n session.execute(('CREATE MATERIALIZED VIEW users_by_state AS '\n 'SELECT * FROM users WHERE state IS NOT NULL AND username IS NOT NULL '\n 'PRIMARY KEY (state, username)'))\n\n # drop the base table only in node 3\n node1.stop(wait_other_notice=True)\n node2.stop(wait_other_notice=True)\n node3.start(wait_for_binary_proto=True)\n session = self.patient_cql_connection(node3, consistency_level=ConsistencyLevel.QUORUM)\n session.execute('DROP TABLE ks.users')\n\n # restart the cluster\n cluster.stop()\n cluster.start()\n\n # node3 should have received and ignored the creation of the MV over the dropped table\n assert node3.grep_log('Not adding view users_by_state because the base table')", "def test_create_table_successfully (self):\n\n new_table = self.wrapper.create_table(self.table, [self.bob, self.jane])\n self.assertIsNone(new_table)", "def create_table(self, param, timeout):\n _abstract()", "def create_table(self, param, timeout):\n _abstract()", "def view(name, selectable, *, clear: bool = False):\n log.debug('view(%r, clear=%r)', name, clear)\n\n if clear:\n DDL[name] = None, None\n return None\n\n DDL[name] = (CreateView(name, selectable),\n DropView(name))\n\n return make_table(selectable, name=name)", "def putTestData(self):\n # print 'Not Yet implement / sample DB table create'\n tkMessageBox.showinfo(\"Message\", \"Sample DB Table Create\")", "def prePresent(self, request):", "def beforeCreate(self):", "def create(self):" ]
[ "0.63214684", "0.6122003", "0.60363513", "0.59719974", "0.583535", "0.5826202", "0.5768594", "0.57384115", "0.55392927", "0.55050695", "0.54572743", "0.54387593", "0.5411729", "0.53755325", "0.53628856", "0.5360331", "0.5358349", "0.53566074", "0.53360575", "0.5325151", "0.5300957", "0.5295821", "0.52832156", "0.5261851", "0.5261851", "0.5245268", "0.5236487", "0.5225806", "0.5225423", "0.5215412" ]
0.65944326
0
Process the overwrite flag on tables create.
def ProcessTableOverwrite(ref, args, request): dataset_id = ref.datasetId table_id = ref.Name() project_id = ref.projectId if args.overwrite: if _TableExists(dataset_id, table_id, project_id): _TryDeleteTable(dataset_id, table_id, project_id) return request
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_tables_and_apply_patches(self):\n\n if self.authorized and not self.db_tables_initiated:\n with self.connection.cursor() as cursor:\n for statement in self.parse_mysql_sql_file():\n cursor.execute(statement)\n\n PyFunceble.LOGGER.info(\n \"Created the missing tables. Applied all patched\"\n )\n\n self.db_tables_initiated = True", "def ProcessTableCopyOverwrite(ref, args, request):\n del ref # Unused\n if args.overwrite:\n request.job.configuration.copy.writeDisposition = 'WRITE_TRUNCATE'\n return request", "def process(self):\n try:\n # self.alter_columns()\n self.collect_drop_fk()\n self.update_table()\n self.create_tables()\n self.db_operations.create_fk_constraint(self.fk_constraints, self.contraints_columns)\n return True\n except Exception as err:\n logger.error(\"create_tables [error] -> %s\" % err)", "def process_overrides(self, db, dest, kvargs, lines):\n logging.info(\"process_overrides db:{} dest:{} kvargs:{} \".format(db.name,dest,kvargs))\n keyword = kvargs['keyword']\n db.create_overrides(keyword)\n return True", "def s3_table_set_before_write(cls, table):\n\n update_default = cls.s3_table_name_update_default\n\n table._before_insert.append(update_default)\n table._before_update.append(lambda s, data: update_default(data))", "def __new_tables_statement(self):\n new_tables = self.__new_tables()\n for table in new_tables:\n with open('./update/create_tables.sql', 'a') as f:\n create_statement = self.source.query_create_table_statement(table.name)\n f.write(create_statement)\n f.write('\\n')", "def _do_action_tables_create(self):\n\n schema_shell = os.path.join(self.bento_home, \"schema-shell\", \"bin\", \"kiji-schema-shell\")\n assert os.path.isfile(schema_shell), schema_shell\n\n # Delete the table first!\n cmd = (\n \"kiji delete --target={kiji_uri} --interactive=false; \" +\n \"kiji install --kiji={kiji_uri}\" ).format(kiji_uri=self.kiji_uri)\n self._run_kiji_job(cmd)\n\n for ddl in self.ddls:\n ddl_full_path = os.path.join(self.movie_advisor_home, ddl)\n assert os.path.isfile(ddl_full_path)\n cmd = \"{schema_shell} --kiji={kiji_uri} --file={ddl_full_path}\".format(\n schema_shell=schema_shell,\n kiji_uri=self.kiji_uri,\n ddl_full_path=ddl_full_path)\n self._run_kiji_job(cmd)", "def test_database_object_overwrite_parameter_is_set(self):\n database = generate_database_object(overwrite=True)\n\n self.assertEqual(\n True,\n database.overwrite == True,\n \"Database object did not have an overwrite flag, despite being created with one.\"\n )", "def touch(self):\n if self.marker_table_bound is None:\n self.create_marker_table()\n\n table = self.marker_table_bound\n id_exists = self.exists()\n with self.engine.begin() as conn:\n if not id_exists:\n ins = table.insert().values(\n ParquetSource=self.parquet_source,\n Environment=self.environment,\n BackupDate=self.backup_date,\n TargetTable=self.target_table,\n InsertedDate=datetime.now())\n else:\n ins = table.update().where(sqlalchemy.and_(\n table.c.ParquetSource == self.parquet_source,\n table.c.Environment == self.environment,\n table.c.TargetTable == self.target_table)).\\\n values(ParquetSource=self.parquet_source,\n Environment=self.environment,\n BackupDate=self.backup_date,\n TargetTable=self.target_table,\n InsertedDate=datetime.now())\n conn.execute(ins)\n assert self.exists()", "def check_and_create_table(self) -> None:\n table_ids = [t.table_id for t in self.instance.list_tables()]\n\n if not self.table_id in table_ids:\n self.table.create()\n f = self.table.column_family(self.family_id)\n f.create()\n\n f_inc = self.table.column_family(self.incrementer_family_id,\n gc_rule=MaxVersionsGCRule(1))\n f_inc.create()\n\n f_log = self.table.column_family(self.log_family_id)\n f_log.create()\n\n f_ce = self.table.column_family(self.cross_edge_family_id,\n gc_rule=MaxVersionsGCRule(1))\n f_ce.create()\n\n print(\"Table created\")", "def create_all_tables(self):\n pass", "def create_tables(self):\n for query in table_create_sql:\n self.cursor.execute(query)\n\n self.commit()", "def on_doctype_update():\n\tif not frappe.db.sql(\"\"\"show index from `tabDefaultValue`\n\t\twhere Key_name=\"defaultvalue_parent_defkey_index\" \"\"\"):\n\t\tfrappe.db.commit()\n\t\tfrappe.db.sql(\"\"\"alter table `tabDefaultValue`\n\t\t\tadd index defaultvalue_parent_defkey_index(parent, defkey)\"\"\")\n\n\tif not frappe.db.sql(\"\"\"show index from `tabDefaultValue`\n\t\twhere Key_name=\"defaultvalue_parent_parenttype_index\" \"\"\"):\n\t\tfrappe.db.commit()\n\t\tfrappe.db.sql(\"\"\"alter table `tabDefaultValue`\n\t\t\tadd index defaultvalue_parent_parenttype_index(parent, parenttype)\"\"\")", "def createTables(self,table=\"all\"):\n auto=\"\"\n\tif self.dbType==\"mysql\":\n\t auto=\"AUTO_INCREMENT\"\n\t \n\ttableName=\"FileID\"\n if table==\"all\" or table==tableName:\n\t # Drop/create FileID table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"CREATE TABLE %s (\n\t fileid %s %s PRIMARY KEY, \n\t fileName TEXT,\n\t typeid %s\n\t )\n\t \"\"\"%(tableName,self.long,auto,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"KeyFile\"\n\tif table==\"all\" or table==tableName: \n\t # Drop/create KeyFile table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t graphid %s NOT NULL, \n\t view VARCHAR(255) NOT NULL, \n\t run %s NOT NULL, \n\t uid %s, \n\t keyFileId %s NOT NULL, PRIMARY KEY(graphid,view,run,uid) )\n\t \"\"\"%(tableName,self.UINT,self.UINT,self.uid,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\t\n\ttableName=\"RunUID\"\n if table==\"all\" or table==tableName:\n\t # Drop/create RunUID table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t run %s NOT NULL,\n\t uid %s )\n\t \"\"\"%(tableName,self.UINT,self.uid)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"MaxMasterID\"\n if table==tableName:\n\t # Drop/create RunUID table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t masterMaxId %s NOT NULL,\n\t comment TEXT )\n\t \"\"\"%(tableName,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"Location\"\n if table==\"all\" or table==tableName:\n\t # Drop/create Localtion table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t id %s %s PRIMARY KEY,\n\t graphid %s NOT NULL, \n\t run %s NOT NULL, \n\t uid %s, \n\t locationFileId %s NOT NULL )\n\t \"\"\"%(tableName,self.long,auto,self.UINT,self.UINT,self.uid,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t query = \"CREATE INDEX LocationGroups ON Location(graphid,run,uid)\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"Version\"\n if table==\"all\" or table==tableName:\n\t # Drop/create Version table in SQLDB.EventStoreDB\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t id %s %s PRIMARY KEY,\n\t grade VARCHAR(255) NOT NULL, \n\t timeStamp %s NOT NULL, \n\t minRunNumber %s NOT NULL, \n\t maxRunNumber %s NOT NULL, \n\t graphid %s NOT NULL,\n\t state VARCHAR(10) ) \n\t \"\"\"%(tableName,self.long,auto,self.UINT,self.UINT,self.UINT,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"SpecificVersion\"\n if table==\"all\" or table==tableName:\n\t # Drop/create SpecificVersion table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t svName VARCHAR(255) NOT NULL PRIMARY KEY, \n\t svid %s NOT NULL )\n\t \"\"\"%(tableName,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"SpecificVersionComment\"\n if table==\"all\" or table==tableName:\n\t # Drop/create SpecificVersionComment table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t id %s %s NOT NULL PRIMARY KEY,\n\t svid %s NOT NULL,\n\t CommentDate %s,\n\t Comment TEXT )\n\t \"\"\"%(tableName,self.UINT,auto,self.UINT,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"GraphPath\"\n if table==\"all\" or table==tableName:\n\t # Drop/create GraphPath table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t graphid %s NOT NULL PRIMARY KEY, \n\t svid %s NOT NULL )\n\t \"\"\"%(tableName,self.UINT,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"PathDepend\"\n if table==\"all\" or table==tableName:\n\t # Drop/create GraphPath table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t parentId %s, \n\t childId %s NOT NULL )\n\t \"\"\"%(tableName,self.UINT,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"FileType\"\n if table==\"all\" or table==tableName: \n\t # Drop/create FileType table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"CREATE TABLE %s (\n\t id %s %s PRIMARY KEY, \n\t type VARCHAR(8) NOT NULL,\n\t description TEXT )\n\t \"\"\"%(tableName,self.UINT,auto)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\t\n\ttableName=\"OrphanFileID\"\n if table==\"all\" or table==tableName:\n\t # Drop/create FileType table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"CREATE TABLE %s (\n\t id %s PRIMARY KEY, \n\t dateTime DATETIME,\n\t user VARCHAR(8) NOT NULL )\n\t \"\"\"%(tableName,self.long)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query", "def _create_tables_classic(self, engine, metadata):\n if engine and metadata:\n with (yield from engine) as conn:\n for x in self._models.values():\n try:\n yield from conn.execute(CreateTable(x))\n except ProgrammingError as error:\n if hasattr(self.app, 'log') and self.app.log:\n if self.app.debug:\n self.app.log.info(\"[PostgressPlugin] [ `{}` already exists]\".format(x))\n else:\n if self.app.debug:\n print(\"[PostgressPlugin] [ `{}` already exists]\".format(x))\n return", "def create_table(self):\n pass", "def touch(self, connection=None):\n self.create_marker_table()\n\n if connection is None:\n connection = self.connect()\n connection.autocommit = True # if connection created here, we commit it here\n\n connection.cursor().execute(\n \"\"\"INSERT INTO {marker_table} (update_id, target_table)\n VALUES (%s, %s)\n ON DUPLICATE KEY UPDATE\n update_id = VALUES(update_id)\n \"\"\".format(marker_table=self.marker_table),\n (self.update_id, self.table)\n )\n # make sure update is properly marked\n assert self.exists(connection)", "def create_tables(self):\n for name, attribute in self.__dict__.items():\n if hasattr(attribute, 'create_table_in_sqlite_db'):\n attribute.create_table_in_sqlite_db()", "def _create(self, tables, views, schema_name, config):\n if not isinstance(tables, dict):\n return False # Raise Exception That Tables Are In A Wrong Format???!!!\n success = True\n if schema_name is not None:\n self._create_schema(schema_name)\n for table_name_instance in tables.items():\n if self._create_table(table_name_instance[1]) is False:\n success = False\n break\n if isinstance(views, dict):\n for view_name_instance in views.items():\n if self._create_view(view_name_instance[1], schema_name, config) is False:\n success = False\n break\n return success", "def create(self, overwrite_existing=False):\r\n\r\n #### Begin functionality here\r\n if debug: eprint(\"INFO: Creating database \" + self.filename)\r\n if os.path.exists(self.filename):\r\n os.remove(self.filename)\r\n engine = create_engine(\"sqlite:///\"+self.filename)\r\n Base.metadata.create_all(engine)\r\n self.connect()\r\n return()", "def is_overwrite_all(self):\n return self._tag == 'overwrite_all'", "def create_databases(self, overwrite = False):\r\n self.validate_config()\r\n self.template_runner.create_databases(overwrite)", "def create_tables( self ) :\n return self._create_tables", "def _create_schema(self):\n self._conn.executescript(self._db_schema)", "async def _create_tables_declarative(self, base, engine):\n if hasattr(base, 'metadata'):\n base.metadata.create_all(bind=engine, checkfirst=True)\n return", "def post_migrations(self):", "def create_table_execute(self):\n self.execute(query=self.default_template.format(self.table_name), data=None)", "def PreCreate(self, pre):\n pass", "def PreCreate(self, pre):\n pass", "def PreCreate(self, pre):\n pass" ]
[ "0.6433206", "0.6371944", "0.6202041", "0.61849165", "0.60638314", "0.5852797", "0.580371", "0.5792183", "0.5743237", "0.5713558", "0.5528236", "0.5432233", "0.54164076", "0.5352115", "0.5333362", "0.5304936", "0.52447927", "0.5226724", "0.52159977", "0.52028906", "0.5177918", "0.51768345", "0.51553905", "0.51210314", "0.5118915", "0.5118286", "0.5114869", "0.50867444", "0.50867444", "0.50867444" ]
0.6969729
0
Process the overwrite flag on tables copy.
def ProcessTableCopyOverwrite(ref, args, request): del ref # Unused if args.overwrite: request.job.configuration.copy.writeDisposition = 'WRITE_TRUNCATE' return request
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ProcessTableOverwrite(ref, args, request):\n dataset_id = ref.datasetId\n table_id = ref.Name()\n project_id = ref.projectId\n\n if args.overwrite:\n if _TableExists(dataset_id, table_id, project_id):\n _TryDeleteTable(dataset_id, table_id, project_id)\n\n return request", "def process_overrides(self, db, dest, kvargs, lines):\n logging.info(\"process_overrides db:{} dest:{} kvargs:{} \".format(db.name,dest,kvargs))\n keyword = kvargs['keyword']\n db.create_overrides(keyword)\n return True", "def is_overwrite_all(self):\n return self._tag == 'overwrite_all'", "def test_overwrites(self):\n\n extra_con = set([Constraint('fake', ['OVERWRITE'])])\n the_process_unit = ProcessUnit([self.a_pattern_ds], '/%fake%/%file%/%pattern%.txt',\n 'echo', extra_constraints=extra_con)\n\n ds_result = the_process_unit.execute(simulate=True)\n\n expected_in_cons = set([Constraint('fake', ['fake_1']),\n Constraint('file', ['file_1']),\n Constraint('pattern', ['pattern_1'])])\n expected_out_cons = set([Constraint('fake', ['OVERWRITE']),\n Constraint('file', ['file_1']),\n Constraint('pattern', ['pattern_1'])])\n\n self.assertEqual(expected_in_cons, self.a_pattern_ds.constraints)\n self.assertEqual(expected_out_cons, ds_result.constraints)", "def needPartitionTableUpdate(self):\n n_table=list()\n d_table=self.destination.getPartitionTable()\n s_table=self.source.getPartitionTable()\n for i in range(len(s_table)):\n n_table.append(re.sub(self.source.getDeviceName(), \\\n self.destination.getDeviceName(), \\\n s_table[i]))\n if d_table == n_table:\n return False\n else:\n return True", "def no_overwrite_example():", "def _apply(self):\n s = [(iptables_save, iptables_restore, self.ipv4)]\n if self.use_ipv6:\n s += [(ip6tables_save, ip6tables_restore, self.ipv6)]\n\n for save, restore, tables in s:\n all_tables, _err = save()\n all_lines = all_tables.split('\\n')\n for table_name, table in six.iteritems(tables):\n start, end = self._find_table(all_lines, table_name)\n all_lines[start:end] = self._modify_rules(\n all_lines[start:end], table, table_name)\n table.dirty = False\n restore('\\n'.join(all_lines))", "def s3_table_set_before_write(cls, table):\n\n update_default = cls.s3_table_name_update_default\n\n table._before_insert.append(update_default)\n table._before_update.append(lambda s, data: update_default(data))", "def ProcessDatasetOverwrite(ref, args, request):\n del ref\n dataset_id = request.dataset.datasetReference.datasetId\n project_id = request.projectId\n\n if args.overwrite:\n if _DatasetExists(dataset_id, project_id):\n _TryDeleteDataset(dataset_id, project_id)\n\n return request", "def pre_osc_check(self):\n # Make sure temporary table we will use during copy doesn't exist\n self.table_check()\n self.decide_pk_for_filter()\n\n # Check if we can have indexes in new table to efficiently look up\n # current old pk combinations\n if not self.validate_post_alter_pk():\n self.table_size = self.get_table_size(self.table_name)\n if self.skip_pk_coverage_check:\n log.warning(\n \"Indexes on new table cannot cover current PK of \"\n \"the old schema, which will make binary logs replay \"\n \"in an inefficient way.\"\n )\n elif self.table_size < self.pk_coverage_size_threshold:\n log.warning(\n \"No index on new table can cover old pk. Since this is \"\n \"a small table: {}, we fallback to a full table dump\".format(\n self.table_size\n )\n )\n # All columns will be chosen if we are dumping table without\n # chunking, this means all columns will be used as a part of\n # the WHERE condition when replaying\n self.is_full_table_dump = True\n self._pk_for_filter = [col.name for col in self._old_table.column_list]\n self._pk_for_filter_def = self._old_table.column_list.copy()\n elif self.is_full_table_dump:\n log.warning(\n \"Skipping coverage index test, since we are doing \"\n \"full table dump\"\n )\n else:\n old_pk_names = \", \".join(\n \"`{}`\".format(col.name)\n for col in self._old_table.primary_key.column_list\n )\n raise OSCError(\"NO_INDEX_COVERAGE\", {\"pk_names\": old_pk_names})\n\n log.info(\n \"PK filter for replaying changes later: {}\".format(self._pk_for_filter)\n )\n\n self.foreign_key_check()\n self.trigger_check()\n self.init_range_variables()\n self.get_table_chunk_size()\n self.make_chunk_size_odd()\n self.check_disk_size()\n self.ts_bootstrap_check()\n self.drop_columns_check()", "def test_merge_overwrite_traditional(self):\n mdict = copy.deepcopy(self.dict1)\n mdict[\"A\"] = \"b\"\n ret = dictupdate.merge_overwrite(copy.deepcopy(self.dict1), {\"A\": \"b\"})\n self.assertEqual(mdict, ret)", "def test_merge_overwrite_missing_source_key(self):\n mdict = copy.deepcopy(self.dict1)\n mdict[\"D\"] = \"new\"\n ret = dictupdate.merge_overwrite(copy.deepcopy(self.dict1), {\"D\": \"new\"})\n self.assertEqual(mdict, ret)", "def ProcessTableCopyConfiguration(ref, args, request):\n del ref # Unused\n source_ref = args.CONCEPTS.source.Parse()\n destination_ref = args.CONCEPTS.destination.Parse()\n arg_utils.SetFieldInMessage(\n request, 'job.configuration.copy.destinationTable.datasetId',\n destination_ref.Parent().Name())\n arg_utils.SetFieldInMessage(\n request, 'job.configuration.copy.destinationTable.projectId',\n destination_ref.projectId)\n arg_utils.SetFieldInMessage(request,\n 'job.configuration.copy.destinationTable.tableId',\n destination_ref.Name())\n arg_utils.SetFieldInMessage(request,\n 'job.configuration.copy.sourceTable.datasetId',\n source_ref.Parent().Name())\n arg_utils.SetFieldInMessage(request,\n 'job.configuration.copy.sourceTable.projectId',\n source_ref.projectId)\n arg_utils.SetFieldInMessage(request,\n 'job.configuration.copy.sourceTable.tableId',\n source_ref.Name())\n return request", "def sectional_overwrite_check(self):\n\n for rule in self.options['sectional_overwrite']:\n if self.lineage_test(rule):\n return True\n return False", "def run(self):\n if not (self.table and self.columns):\n raise Exception(\"table and columns need to be specified\")\n\n connection = self.output().connect()\n\n # attempt to copy the data into mysql\n # if it fails because the target table doesn't exist\n # try to create it by running self.create_table\n for attempt in range(2):\n try:\n cursor = connection.cursor()\n print(\"caling init copy...\")\n self.init_copy(connection)\n self.copy(cursor)\n self.post_copy(connection)\n if self.enable_metadata_columns:\n self.post_copy_metacolumns(cursor)\n except Error as err:\n if err.errno == errorcode.ER_NO_SUCH_TABLE and attempt == 0:\n # if first attempt fails with \"relation not found\", try creating table\n # logger.info(\"Creating table %s\", self.table)\n connection.reconnect()\n self.create_table(connection)\n else:\n raise\n else:\n break\n\n # mark as complete in same transaction\n self.output().touch(connection)\n connection.commit()\n connection.close()", "def onUndo(self):\n pass", "def overwrite_all ( self ):\n return self.value == self.OV_ALL", "def undo(self):\n LOG.debug(\"In the undo method, will attempt to restore\")\n\n # validate detected nothing to do for this, nothing was done\n # for execute, so simply return\n if self.no_op:\n return\n\n if not self.source_dev or not self.target_dev:\n return\n LOG.debug(\"The source dictionary is: %s\", self.source_dict_restore)\n LOG.debug(\"The target dictionary is: %s\", self.target_dict_restore)\n\n # In scenario where no source IP Address...\n if self.source_dict_restore:\n self.commandex.send_ifcfg(self.source_dev,\n self.source_dict_restore)\n\n # May have failed because the ifcfg didn't even exist, nothing\n # to roll back then\n if self.target_dict_restore:\n self.commandex.send_ifcfg(self.target_dev,\n self.target_dict_restore)", "def abstract_write(self, *params): \n section = self.table_section_from_parameter(*params)\n row_args = self.table_row_args_from_parameter(*params)\n new_row = self.table_row_class(*row_args)\n # IMPORTANTE! ASUMO QUE UN BLOQUE INVALIDO ES IGUAL A UNO VACIO!\n # TO DO: PREGUNTAR EN ISSUE\n found_index = False\n for index, table_row in enumerate(section.copy()): \n if table_row is None or not table_row.valid:\n found_index = True\n overwrite_index = index\n break\n\n if not found_index:\n overwrite_index = self.next_replace(section)\n\n replace_index = self.index_mapping_from_parameter(overwrite_index, *params)\n\n\n old_line = self.table[replace_index]\n #print(f\"{self.__class__.__name__} Replace -> Index: {replace_index}\")\n\n # Perfom the actual write\n self.table[replace_index] = new_row", "def touch(self):\n if self.marker_table_bound is None:\n self.create_marker_table()\n\n table = self.marker_table_bound\n id_exists = self.exists()\n with self.engine.begin() as conn:\n if not id_exists:\n ins = table.insert().values(\n ParquetSource=self.parquet_source,\n Environment=self.environment,\n BackupDate=self.backup_date,\n TargetTable=self.target_table,\n InsertedDate=datetime.now())\n else:\n ins = table.update().where(sqlalchemy.and_(\n table.c.ParquetSource == self.parquet_source,\n table.c.Environment == self.environment,\n table.c.TargetTable == self.target_table)).\\\n values(ParquetSource=self.parquet_source,\n Environment=self.environment,\n BackupDate=self.backup_date,\n TargetTable=self.target_table,\n InsertedDate=datetime.now())\n conn.execute(ins)\n assert self.exists()", "def process_dump_files(self):\n if not self.open_workbooks():\n exit()\n\n self.message('*****************************************************************************')\n self.message('Only columns that exist in both the dump and destination file will be synced.')\n self.message('They must also match exactly including spelling and capitalization.')\n self.message('*****************************************************************************')\n\n self.wb_destination.active = self.wb_destination['Hypercare Incidents']\n self.parse_dump_file(self.wb_incident.active, self.wb_destination.active, self.fn_incident)\n\n self.wb_destination.active = self.wb_destination['Hypercare Defects']\n self.parse_dump_file(self.wb_defect.active, self.wb_destination.active, self.fn_defect)\n\n self.wb_destination.active = self.wb_destination['Hypercare Enhancements']\n self.parse_dump_file(self.wb_enhancement.active, self.wb_destination.active, self.fn_enhancement)\n\n self.wb_destination.active = self.wb_destination['ALM Defects']\n self.parse_dump_file(self.wb_alm.active, self.wb_destination.active, self.fn_alm)", "def process_overrides(recipes, args, production_cat, pkginfo_template):\n for recipe in recipes:\n print SEPARATOR\n\n if recipe in RECIPE_EXCLUSIONS:\n print_error(\"Not overriding %s because it is in the list of \"\n \"exclusions.\" % recipe)\n continue\n if recipe.startswith(\"local\"):\n print_error(\"Not overriding %s because it _is_ an override.\" %\n recipe)\n continue\n\n override_path = make_override(recipe, args.override_dir)\n if override_path is None:\n continue\n\n # Copy just-generated override's Input section to Input_Original.\n override = FoundationPlist.readPlist(override_path)\n override[\"Input_Original\"] = override[\"Input\"]\n override[\"Input\"] = {}\n override[\"Input\"][\"pkginfo\"] = {}\n\n current_version = get_current_production_version(\n production_cat, override, args)\n apply_current_or_orig_values(override, current_version, args)\n\n if not args.suppress_subdir:\n copy_package_path_to_input(override, current_version, args)\n\n if pkginfo_template:\n apply_pkginfo_template(override, pkginfo_template)\n\n FoundationPlist.writePlist(override, override_path)", "def create_tables_and_apply_patches(self):\n\n if self.authorized and not self.db_tables_initiated:\n with self.connection.cursor() as cursor:\n for statement in self.parse_mysql_sql_file():\n cursor.execute(statement)\n\n PyFunceble.LOGGER.info(\n \"Created the missing tables. Applied all patched\"\n )\n\n self.db_tables_initiated = True", "def copy_db(src=FRESHDB, dst=[APPDB]):\n for dest in dst:\n try:\n x = shutil.copy2(src, dest)\n print('File copied to {}'.format(x))\n except shutil.SameFileError:\n print('Both source and destination are identical.')", "def test_database_object_overwrite_parameter_is_set(self):\n database = generate_database_object(overwrite=True)\n\n self.assertEqual(\n True,\n database.overwrite == True,\n \"Database object did not have an overwrite flag, despite being created with one.\"\n )", "def copy_file_check(self):\n pass", "def run_copy(self, src, dst):\n pass", "def set_file_ingested(self, original_name, ingested, tablename):\n if ingested:\n prep_stmt = self.session.prepare(\n 'INSERT INTO {0} ({1}) VALUES (?, ?, ?)'.format(\n tablename, \",\".join(COLUMNS_META)\n ))\n bound = prep_stmt.bind([int(time.time()) * 1000, self.who, original_name])\n else:\n prep_stmt = self.session.prepare(\n 'DELETE FROM {0} WHERE {1}=?'.format(tablename, COLUMNS_META[2])\n )\n bound = prep_stmt.bind([original_name])\n # This is not asynchronous since this will be sent once per large file.\n self.session.execute(bound)", "def load_into_staging_tables(cur, conn):\n for query in copy_table_queries:\n cur.execute(query)\n conn.commit()", "def test_upload_overwrite(self):\n self.request.access.allow_overwrite = [\"everyone\"]\n name, filename = \"a\", \"a-1.tar.gz\"\n self.db.upload(filename, BytesIO(b\"old\"), name)\n self.db.upload(filename, BytesIO(b\"new\"), name)\n\n all_versions = self.db.all(name)\n self.assertEqual(len(all_versions), 1)" ]
[ "0.67875683", "0.5678444", "0.5599842", "0.55489415", "0.53554213", "0.52686346", "0.52098215", "0.5204137", "0.51888007", "0.51499987", "0.5148369", "0.5104553", "0.5098129", "0.5079654", "0.50565994", "0.50067544", "0.49729812", "0.4921561", "0.49208447", "0.49190444", "0.49049547", "0.4876093", "0.48642325", "0.48454264", "0.48426992", "0.4805531", "0.47853872", "0.47764587", "0.47709787", "0.47685695" ]
0.8015918
0
Build JobConfigurationTableCopy from request resource args.
def ProcessTableCopyConfiguration(ref, args, request): del ref # Unused source_ref = args.CONCEPTS.source.Parse() destination_ref = args.CONCEPTS.destination.Parse() arg_utils.SetFieldInMessage( request, 'job.configuration.copy.destinationTable.datasetId', destination_ref.Parent().Name()) arg_utils.SetFieldInMessage( request, 'job.configuration.copy.destinationTable.projectId', destination_ref.projectId) arg_utils.SetFieldInMessage(request, 'job.configuration.copy.destinationTable.tableId', destination_ref.Name()) arg_utils.SetFieldInMessage(request, 'job.configuration.copy.sourceTable.datasetId', source_ref.Parent().Name()) arg_utils.SetFieldInMessage(request, 'job.configuration.copy.sourceTable.projectId', source_ref.projectId) arg_utils.SetFieldInMessage(request, 'job.configuration.copy.sourceTable.tableId', source_ref.Name()) return request
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GetTableCopyResourceArgs():\n table_spec_data = yaml_data.ResourceYAMLData.FromPath('bq.table')\n arg_specs = [\n resource_args.GetResourcePresentationSpec(\n verb='to copy from', name='source', required=True, prefixes=True,\n attribute_overrides={'table': 'source'}, positional=False,\n resource_data=table_spec_data.GetData()),\n resource_args.GetResourcePresentationSpec(\n verb='to copy to', name='destination',\n required=True, prefixes=True,\n attribute_overrides={'table': 'destination'}, positional=False,\n resource_data=table_spec_data.GetData())]\n fallthroughs = {\n '--source.dataset': ['--destination.dataset'],\n '--destination.dataset': ['--source.dataset']\n }\n return [concept_parsers.ConceptParser(arg_specs, fallthroughs)]", "def _create_jobs_table_data(self, dist_src, sectors, \r\n building_types_and_ids_and_home_based): \r\n res_sample = []; jobs_table = []\r\n no_samples = []\r\n\r\n for key in sectors.keys():\r\n sector_num = key\r\n sector = sectors[key]\r\n for zone in sector.keys():\r\n for building_type, number_of_jobs in sector[zone]:\r\n for type, id, home_based in building_types_and_ids_and_home_based:\r\n if building_type == id: \r\n dist = dist_src[type]\r\n break\r\n else:\r\n raise TypeError, (\"Invalid building type: %s\" \r\n % building_type)\r\n \r\n try:\r\n samples = self._zip_and_sample(dist[zone], \r\n int(number_of_jobs))\r\n if samples is None: \r\n no_samples += [(zone, building_type)]\r\n raise\r\n \r\n except:\r\n pass\r\n \r\n else:\r\n for type, id, home_based in building_types_and_ids_and_home_based:\r\n if building_type == id and home_based == True:\r\n home = 1 \r\n else: home = 0 \r\n \r\n for grid_id in samples:\r\n jobs_table += [{'sector':sector_num, \r\n 'home':home, \r\n 'building':building_type, \r\n 'grid':grid_id}]\r\n \r\n if len(no_samples) > 0:\r\n print ('No job samples created for (zone, building_type): %s!' \r\n % no_samples)\r\n \r\n return jobs_table", "def _copy_to_head_args(args: Namespace) -> Namespace:\n\n _head_args = copy.deepcopy(args)\n _head_args.polling = args.polling\n _head_args.port = args.port\n _head_args.host = args.host[0]\n _head_args.uses = args.uses\n _head_args.pod_role = PodRoleType.HEAD\n _head_args.runtime_cls = 'HeadRuntime'\n _head_args.replicas = 1\n\n if args.name:\n _head_args.name = f'{args.name}/head'\n else:\n _head_args.name = f'head'\n\n return _head_args", "def ProcessTableCopyOverwrite(ref, args, request):\n del ref # Unused\n if args.overwrite:\n request.job.configuration.copy.writeDisposition = 'WRITE_TRUNCATE'\n return request", "def test_create_copy(self):\n\n config = {\n 'version': '2.0',\n 'input_files': {\n 'INPUT_1': [{\n 'id': 1234,\n 'type': 'PRODUCT',\n 'workspace_name': 'wksp-name',\n 'workspace_path': 'the/workspace/path/file.json',\n 'local_file_name': 'file_abcdfeg.json',\n 'is_deleted': False,\n }]\n },\n 'output_workspaces': {\n 'OUTPUT_1': 'WORKSPACE_1'\n },\n 'tasks': [\n {\n 'task_id': 'task-1234',\n 'type': 'main',\n 'resources': {'cpu': 1.0},\n 'args': 'foo ${INPUT_1} ${JOB_OUTPUT_DIR}',\n 'env_vars': {'ENV_VAR_NAME': 'ENV_VAR_VALUE'},\n 'workspaces': {'WORKSPACE_NAME': {'mode': 'ro'}},\n 'mounts': {'MOUNT_NAME': 'MOUNT_VOLUME_NAME'},\n 'settings': {'SETTING_NAME': 'SETTING_VALUE'},\n 'volumes': {\n 'VOLUME_NAME_1': {\n 'container_path': '/the/container/path',\n 'mode': 'ro',\n 'type': 'host',\n 'host_path': '/the/host/path'\n },\n 'VOLUME_NAME_2': {\n 'container_path': '/the/other/container/path',\n 'mode': 'rw',\n 'type': 'volume',\n 'driver': 'SUPER_DRIVER_5000',\n 'driver_opts': {'turbo': 'yes-pleez'}\n }\n },\n 'docker_params': [{'flag': 'hello', 'value': 'scale'}]\n }\n ]\n }\n exe_config = ExecutionConfiguration(config)\n\n copy = exe_config.create_copy()\n self.assertDictEqual(copy.get_dict(), config)", "def _GetMigrationJob(\n self,\n source_ref,\n destination_ref,\n conversion_workspace_ref,\n cmek_key_ref,\n args,\n ):\n migration_job_type = self.messages.MigrationJob\n labels = labels_util.ParseCreateArgs(\n args, self.messages.MigrationJob.LabelsValue\n )\n type_value = self._GetType(migration_job_type, args.type)\n source = source_ref.RelativeName()\n destination = destination_ref.RelativeName()\n params = {}\n if args.IsSpecified('peer_vpc'):\n params['vpcPeeringConnectivity'] = self._GetVpcPeeringConnectivity(args)\n elif args.IsSpecified('vm_ip'):\n params['reverseSshConnectivity'] = self._GetReverseSshConnectivity(args)\n elif args.IsSpecified('static_ip'):\n params['staticIpConnectivity'] = self._GetStaticIpConnectivity()\n\n migration_job_obj = migration_job_type(\n labels=labels,\n displayName=args.display_name,\n state=migration_job_type.StateValueValuesEnum.CREATING,\n type=type_value,\n dumpPath=args.dump_path,\n source=source,\n destination=destination,\n **params)\n if conversion_workspace_ref is not None:\n migration_job_obj.conversionWorkspace = self._GetConversionWorkspaceInfo(\n conversion_workspace_ref, args\n )\n if cmek_key_ref is not None:\n migration_job_obj.cmekKeyName = cmek_key_ref.RelativeName()\n\n if args.IsKnownAndSpecified('filter'):\n args.filter, server_filter = filter_rewrite.Rewriter().Rewrite(\n args.filter\n )\n migration_job_obj.filter = server_filter\n\n if args.IsKnownAndSpecified('dump_parallel_level'):\n migration_job_obj.performanceConfig = self._GetPerformanceConfig(args)\n\n return migration_job_obj", "def infocalypse_copy(ui_, repo, **opts):\n params, stored_cfg = get_config_info(ui_, opts)\n\n insert_uri = opts['inserturi']\n if insert_uri == '':\n # REDFLAG: fix parameter definition so that it is required?\n ui_.warn(\"Please set the insert URI with --inserturi.\\n\")\n return\n\n request_uri = opts['requesturi']\n if request_uri == '':\n request_uri = stored_cfg.get_request_uri(repo.root)\n if not request_uri:\n ui_.warn(\"There is no stored request URI for this repo.\\n\"\n \"Please set one with the --requesturi option.\\n\")\n return\n\n params['INSERT_URI'] = insert_uri\n params['REQUEST_URI'] = request_uri\n execute_copy(ui_, repo, params, stored_cfg)", "def __get_datatables_args():\n\n table_args = dict()\n\n #\n # Common Arguments\n #\n\n table_args['column-count'] = 0\n table_args['sort-col-count'] = 0\n\n if request.args.get('draw'):\n table_args['sequence'] = request.args.get('draw')\n\n if request.args.get('start'):\n table_args['offset'] = int(request.args.get('start'))\n\n if request.args.get('length'):\n table_args['limit'] = int(request.args.get('length'))\n\n if request.args.get('search[value]'):\n table_args['filter'] = request.args.get('search[value]')\n\n if request.args.get('search[regex]'):\n table_args['filter-regex'] = request.args.get('search[regex]')\n\n #\n # Custom Arguments\n #\n\n if request.args.get('time_filter'):\n table_args['time_filter'] = request.args.get('time_filter')\n\n i = 0\n while True:\n if request.args.get('columns[%d][data]' % i):\n table_args['column-count'] += 1\n table_args['mDataProp_%d' % i] = request.args.get('columns[%d][data]' % i)\n else:\n break\n\n #\n # Column Search\n #\n\n if request.args.get('columns[%d][searchable]' % i):\n table_args['bSearchable_%d' % i] = request.args.get('columns[%d][searchable]' % i)\n\n if request.args.get('columns[%d][search][value]' % i):\n table_args['sSearch_%d' % i] = request.args.get('columns[%d][search][value]' % i)\n\n if request.args.get('columns[%d][search][regex]' % i):\n table_args['bRegex_%d' % i] = request.args.get('columns[%d][search][regex]' % i)\n\n #\n # Column Sort\n #\n\n if request.args.get('columns[%d][orderable]' % i):\n table_args['bSortable_%d' % i] = request.args.get('columns[%d][orderable]' % i)\n\n if request.args.get('order[%d][column]' % i):\n table_args['sort-col-count'] += 1\n table_args['iSortCol_%d' % i] = int(request.args.get('order[%d][column]' % i))\n\n if request.args.get('order[%d][dir]' % i):\n table_args['sSortDir_%d' % i] = request.args.get('order[%d][dir]' % i)\n\n i += 1\n\n return table_args", "def initiate_build(self, config: Union[TableConfig, str, UUID],\n version: Union[str, UUID] = None) -> JobSubmissionResponse:\n if isinstance(config, TableConfig):\n if version is not None:\n logger.warning('Ignoring version {} since config object was provided.'\n .format(version))\n if config.version_number is None:\n raise ValueError('Cannot build table from config which has no version. '\n 'Try registering the config before building.')\n if config.config_uid is None:\n raise ValueError('Cannot build table from config which has no uid. '\n 'Try registering the config before building.')\n uid = config.config_uid\n version = config.version_number\n else:\n if version is None:\n raise ValueError('Version must be specified when building by config uid.')\n uid = config\n job_id = uuid4()\n logger.info('Building table from config {} version {} with job ID {}...'\n .format(uid, version, job_id))\n path = 'projects/{}/ara-definitions/{}/versions/{}/build'.format(\n self.project_id, uid, version\n )\n response = self.session.post_resource(\n path=path,\n json={},\n params={\n 'job_id': job_id\n }\n )\n submission = JobSubmissionResponse.build(response)\n logger.info('Build job submitted with job ID {}.'.format(submission.job_id))\n return submission", "def __gen_datatable__(self):\n # | - __generate_data_table\n rows_list = []\n for Job_i in self.Job_list:\n # | - FOR LOOP BODY\n entry_param_dict = {}\n for prop, value in Job_i.job_params.items():\n entry_param_dict[prop] = value\n\n entry_param_dict[\"Job\"] = Job_i\n entry_param_dict[\"path\"] = Job_i.full_path\n entry_param_dict[\"max_revision\"] = Job_i.max_revision\n entry_param_dict[\"revision_number\"] = Job_i.revision_number\n\n rows_list.append(entry_param_dict)\n # __|\n\n data_frame = pd.DataFrame(rows_list)\n\n return(data_frame)\n # __|", "def __init__(self, job_template_name, job_input, device_list,\n api_server_config, logger, amqp_client,\n transaction_id, transaction_descr, args):\n self._job_template_name = job_template_name\n self._job_input = job_input\n self._device_list = device_list\n self._api_server_config = api_server_config\n self._logger = logger\n self._job_id = None\n self._job_status = None\n self._amqp_client = amqp_client\n self._transaction_id = transaction_id\n self._transaction_descr = transaction_descr\n self._args = args\n super(JobHandler, self).__init__()", "def get_merged_args(args):\n config_dict = load_config(args.config)\n\n args_dict = {\n \"cleaning_policy\": args.cleaning_policy,\n \"clear\": args.clear,\n \"content\": args.content,\n \"dry_run\": args.dry_run,\n \"force\": args.force,\n \"in_lines\": args.in_lines,\n \"max_size\": args.max_size,\n \"regex\": args.regex,\n \"restore\": args.restore,\n \"rmdir\": args.rmdir,\n \"short\": args.short,\n \"silent\": args.silent,\n \"storage_time\": args.storage_time,\n \"wastebasket_path\": args.wastebasket_path\n }\n\n for arg, value in args_dict.iteritems():\n if not value:\n args_dict[arg] = config_dict[arg]\n\n if args_dict[\"cleaning_policy\"] == POLICY:\n args_dict[\"cleaning_policy\"] = config_dict[\"cleaning_policy\"]\n\n if args_dict[\"storage_time\"] == STORAGE_TIME:\n args_dict[\"storage_time\"] = config_dict[\"storage_time\"]\n\n if args_dict[\"max_size\"] == MAX_SIZE:\n args_dict[\"max_size\"] = config_dict[\"max_size\"]\n\n return args_dict", "def __copy__(self):\n from bn.distribs.distribution_builder import MultivariateTableBuilder\n builder = MultivariateTableBuilder()\n for assignment in self._table.keys():\n builder.add_row(copy(assignment), self._table[assignment])\n\n return builder.build()", "def CopyTapSettings(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n payload = { \"Arg1\": self.href }\n for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]\n for item in kwargs.items(): payload[item[0]] = item[1]\n return self._execute('copyTapSettings', payload=payload, response_object=None)", "def __copy__(self):\n # prepare unnamed arguments\n args = [getattr(self, arg) for arg in self._copy_conf['args']]\n\n # prepare named arguments\n kwargs = {}\n for arg in self._copy_conf['kwargs']:\n # if arg is a tuple, the first entry will be the named kwargs, and\n # the second will be the name of the attribute to copy\n name = arg\n if isinstance(arg, tuple):\n name, arg = arg\n if hasattr(self, arg):\n kwargs[name] = getattr(self, arg)\n\n # create the new instance\n new_copy = self.__class__(*args, **kwargs)\n\n # then copy attributes\n for attr_name in self._copy_conf['attrs']:\n if hasattr(self, attr_name):\n setattr(new_copy, attr_name, getattr(self, attr_name))\n\n return new_copy", "def visit_copy_command(element, compiler, **kw):\n qs = \"\"\"COPY {table}{columns} FROM :data_location\n WITH CREDENTIALS AS :credentials\n {format}\n {parameters}\"\"\"\n parameters = []\n bindparams = [\n sa.bindparam(\n 'data_location',\n value=element.data_location,\n type_=sa.String,\n ),\n sa.bindparam(\n 'credentials',\n value=element.credentials,\n type_=sa.String,\n ),\n ]\n\n if element.format == Format.csv:\n format_ = 'FORMAT AS CSV'\n if element.quote is not None:\n format_ += ' QUOTE AS :quote_character'\n bindparams.append(sa.bindparam(\n 'quote_character',\n value=element.quote,\n type_=sa.String,\n ))\n elif element.format == Format.json:\n format_ = 'FORMAT AS JSON AS :json_option'\n bindparams.append(sa.bindparam(\n 'json_option',\n value=element.path_file,\n type_=sa.String,\n ))\n elif element.format == Format.avro:\n format_ = 'FORMAT AS AVRO AS :avro_option'\n bindparams.append(sa.bindparam(\n 'avro_option',\n value=element.path_file,\n type_=sa.String,\n ))\n elif element.format == Format.orc:\n format_ = 'FORMAT AS ORC'\n elif element.format == Format.parquet:\n format_ = 'FORMAT AS PARQUET'\n elif element.format == Format.fixed_width and element.fixed_width is None:\n raise sa_exc.CompileError(\n \"'fixed_width' argument required for format 'FIXEDWIDTH'.\")\n else:\n format_ = ''\n\n if element.delimiter is not None:\n parameters.append('DELIMITER AS :delimiter_char')\n bindparams.append(sa.bindparam(\n 'delimiter_char',\n value=element.delimiter,\n type_=sa.String,\n ))\n\n if element.fixed_width is not None:\n parameters.append('FIXEDWIDTH AS :fixedwidth_spec')\n bindparams.append(sa.bindparam(\n 'fixedwidth_spec',\n value=_process_fixed_width(element.fixed_width),\n type_=sa.String,\n ))\n\n if element.compression is not None:\n parameters.append(Compression(element.compression).value)\n\n if element.manifest:\n parameters.append('MANIFEST')\n\n if element.accept_any_date:\n parameters.append('ACCEPTANYDATE')\n\n if element.accept_inv_chars is not None:\n parameters.append('ACCEPTINVCHARS AS :replacement_char')\n bindparams.append(sa.bindparam(\n 'replacement_char',\n value=element.accept_inv_chars,\n type_=sa.String\n ))\n\n if element.blanks_as_null:\n parameters.append('BLANKSASNULL')\n\n if element.date_format is not None:\n parameters.append('DATEFORMAT AS :dateformat_string')\n bindparams.append(sa.bindparam(\n 'dateformat_string',\n value=element.date_format,\n type_=sa.String,\n ))\n\n if element.empty_as_null:\n parameters.append('EMPTYASNULL')\n\n if element.encoding is not None:\n parameters.append('ENCODING AS ' + Encoding(element.encoding).value)\n\n if element.escape:\n parameters.append('ESCAPE')\n\n if element.explicit_ids:\n parameters.append('EXPLICIT_IDS')\n\n if element.fill_record:\n parameters.append('FILLRECORD')\n\n if element.ignore_blank_lines:\n parameters.append('IGNOREBLANKLINES')\n\n if element.ignore_header is not None:\n parameters.append('IGNOREHEADER AS :number_rows')\n bindparams.append(sa.bindparam(\n 'number_rows',\n value=element.ignore_header,\n type_=sa.Integer,\n ))\n\n if element.dangerous_null_delimiter is not None:\n parameters.append(\"NULL AS '%s'\" % element.dangerous_null_delimiter)\n\n if element.remove_quotes:\n parameters.append('REMOVEQUOTES')\n\n if element.roundec:\n parameters.append('ROUNDEC')\n\n if element.time_format is not None:\n parameters.append('TIMEFORMAT AS :timeformat_string')\n bindparams.append(sa.bindparam(\n 'timeformat_string',\n value=element.time_format,\n type_=sa.String,\n ))\n\n if element.trim_blanks:\n parameters.append('TRIMBLANKS')\n\n if element.truncate_columns:\n parameters.append('TRUNCATECOLUMNS')\n\n if element.comp_rows:\n parameters.append('COMPROWS :numrows')\n bindparams.append(sa.bindparam(\n 'numrows',\n value=element.comp_rows,\n type_=sa.Integer,\n ))\n\n if element.comp_update:\n parameters.append('COMPUPDATE ON')\n elif element.comp_update is not None:\n parameters.append('COMPUPDATE OFF')\n\n if element.max_error is not None:\n parameters.append('MAXERROR AS :error_count')\n bindparams.append(sa.bindparam(\n 'error_count',\n value=element.max_error,\n type_=sa.Integer,\n ))\n\n if element.no_load:\n parameters.append('NOLOAD')\n\n if element.stat_update:\n parameters.append('STATUPDATE ON')\n elif element.stat_update is not None:\n parameters.append('STATUPDATE OFF')\n\n if element.region is not None:\n parameters.append('REGION :region')\n bindparams.append(sa.bindparam(\n 'region',\n value=element.region,\n type_=sa.String\n ))\n\n columns = ' (%s)' % ', '.join(\n compiler.preparer.format_column(column) for column in element.columns\n ) if element.columns else ''\n\n qs = qs.format(\n table=compiler.preparer.format_table(element.table),\n columns=columns,\n format=format_,\n parameters='\\n'.join(parameters)\n )\n\n return compiler.process(sa.text(qs).bindparams(*bindparams), **kw)", "def create_job_configuration(start_time: str) -> ItemsJobConfig:\n # Create job configuration\n config = {\n 'source_url': os.getenv(\"ITEMS_SOURCE_URL\", default=\"\"),\n 'dest_new_url': os.getenv(\"ITEMS_DEST_NEW_URL\", default=\"\"),\n 'dest_updates_url': os.getenv(\"ITEMS_DEST_UPDATES_URL\", default=\"\"),\n 'caiasoft_api_key': os.getenv('CAIASOFT_API_KEY', default=\"\"),\n 'storage_dir': os.getenv('ITEMS_STORAGE_DIR', default=\"\"),\n 'last_success_lookup': os.getenv('ITEMS_LAST_SUCCESS_LOOKUP', default=\"\")\n }\n\n job_id_prefix = \"caia.items\"\n\n job_config = ItemsJobConfig(config, job_id_prefix, start_time)\n logger.info(f\"Job Id: {job_config['job_id']}\")\n logger.debug(f\"job_config={job_config}\")\n\n return job_config", "def __generate_data_table__(self):\n # | - __generate_data_table__\n rows_list = []\n for job in self.job_var_lst:\n revisions = self.job_revision_number(job)\n for revision in range(revisions + 1)[1:]:\n # | - FOR LOOP BODY\n entry_param_dict = {}\n for prop in job:\n entry_param_dict[prop[\"property\"]] = prop[\"value\"]\n\n entry_param_dict[\"variable_list\"] = job\n entry_param_dict[\"path\"] = self.var_lst_to_path(job)\n\n entry_param_dict[\"max_revision\"] = revisions\n entry_param_dict[\"revision_number\"] = revision\n\n rows_list.append(entry_param_dict)\n # __|\n\n data_frame = pd.DataFrame(rows_list)\n\n return(data_frame)\n # __|", "def from_mapping(context: CreateCommandsContext, dry_run):\n if dry_run:\n logger.info(\"** Dry run, nothing will be sent to server **\")\n\n # Make sure no jobs are actually created\n context.client_tool.create_path_job = mock_create\n context.client_tool.create_pacs_job = mock_create\n\n job_sets = extract_job_sets(\n context.default_parameters(), context.get_mapping()\n )\n\n # inspect project name and destination to present the next question to the user\n project_names = set()\n destination_paths = set()\n for job_set in job_sets:\n project_names.add(job_set.get_param_by_type(Project).value)\n destination_paths.add(job_set.get_param_by_type(DestinationPath).value)\n\n question = (\n f\"This will create {len(job_sets)} jobs on \"\n f\"{context.get_active_server().name},\"\n f\" for projects '{list(project_names)}', writing data to \"\n f\"'{[str(x) for x in destination_paths]}'. Are you sure?\"\n )\n if not click.confirm(question):\n logger.info(\"Cancelled\")\n return\n\n created_job_ids = create_jobs(context, job_sets)\n\n if created_job_ids:\n context.add_to_batch(created_job_ids)\n\n logger.info(\"Done\")", "def _prepare(self):\n logging.info('-> copy configuration...')\n path_cofig = self.params['path_config_bUnwarpJ']\n shutil.copy(path_cofig, os.path.join(self.params['path_exp'],\n os.path.basename(path_cofig)))\n if 'path_config_IJ_SIFT' in self.params:\n path_cofig = self.params['path_config_IJ_SIFT']\n shutil.copy(path_cofig, os.path.join(self.params['path_exp'],\n os.path.basename(path_cofig)))\n if 'path_config_IJ_MOPS' in self.params:\n path_cofig = self.params['path_config_IJ_MOPS']\n shutil.copy(path_cofig, os.path.join(self.params['path_exp'],\n os.path.basename(path_cofig)))", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n copy: Optional[pulumi.Input[pulumi.InputType['JobCopyArgs']]] = None,\n extract: Optional[pulumi.Input[pulumi.InputType['JobExtractArgs']]] = None,\n job_id: Optional[pulumi.Input[str]] = None,\n job_timeout_ms: Optional[pulumi.Input[str]] = None,\n labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n load: Optional[pulumi.Input[pulumi.InputType['JobLoadArgs']]] = None,\n location: Optional[pulumi.Input[str]] = None,\n project: Optional[pulumi.Input[str]] = None,\n query: Optional[pulumi.Input[pulumi.InputType['JobQueryArgs']]] = None,\n __props__=None,\n __name__=None,\n __opts__=None):\n if __name__ is not None:\n warnings.warn(\"explicit use of __name__ is deprecated\", DeprecationWarning)\n resource_name = __name__\n if __opts__ is not None:\n warnings.warn(\"explicit use of __opts__ is deprecated, use 'opts' instead\", DeprecationWarning)\n opts = __opts__\n if opts is None:\n opts = pulumi.ResourceOptions()\n if not isinstance(opts, pulumi.ResourceOptions):\n raise TypeError('Expected resource options to be a ResourceOptions instance')\n if opts.version is None:\n opts.version = _utilities.get_version()\n if opts.id is None:\n if __props__ is not None:\n raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')\n __props__ = dict()\n\n __props__['copy'] = copy\n __props__['extract'] = extract\n if job_id is None:\n raise TypeError(\"Missing required property 'job_id'\")\n __props__['job_id'] = job_id\n __props__['job_timeout_ms'] = job_timeout_ms\n __props__['labels'] = labels\n __props__['load'] = load\n __props__['location'] = location\n __props__['project'] = project\n __props__['query'] = query\n __props__['job_type'] = None\n __props__['user_email'] = None\n super(Job, __self__).__init__(\n 'gcp:bigquery/job:Job',\n resource_name,\n __props__,\n opts)", "def build_from_config(self, config: Union[TableConfig, str, UUID], *,\n version: Union[str, int] = None,\n timeout: float = 15 * 60) -> GemTable:\n job = self.initiate_build(config, version)\n return self.get_by_build_job(job, timeout=timeout)", "def _GetUpdatedMigrationJob(\n self, migration_job, source_ref, destination_ref, args):\n update_fields = self._GetUpdateMask(args)\n if args.IsSpecified('display_name'):\n migration_job.displayName = args.display_name\n if args.IsSpecified('type'):\n migration_job.type = self._GetType(self.messages.MigrationJob, args.type)\n if args.IsSpecified('dump_path'):\n migration_job.dumpPath = args.dump_path\n if args.IsSpecified('source'):\n migration_job.source = source_ref.RelativeName()\n if args.IsSpecified('destination'):\n migration_job.destination = destination_ref.RelativeName()\n if args.IsKnownAndSpecified('dump_parallel_level'):\n migration_job.performanceConfig = self._GetPerformanceConfig(args)\n self._UpdateConnectivity(migration_job, args)\n self._UpdateLabels(args, migration_job, update_fields)\n return migration_job, update_fields", "def __init__(self, job_spec: JobSpec):\n self.job_spec = job_spec\n \n self.merge_rules = spark.read.csv(job_spec.mapping_document_path, header='true').toPandas()\n\n self.join_map = JoinMap()\n \n # defined externally\n self.merge_action_map = merge_action_map", "def _init_from_config(self):\n self.arch = self.job_config.get('arch', 'x86_64')\n self.os_type = self.job_config.get(\"os_type\")\n self.flavor = self.job_config.get(\"flavor\")\n self.codename = self.job_config.get(\"codename\")\n self.os_version = self._get_version()\n # if os_version is given, prefer version/codename derived from it\n if self.os_version:\n self.os_version, self.codename = \\\n OS.version_codename(self.os_type, self.os_version)\n self.branch = self.job_config.get(\"branch\")\n self.tag = self.job_config.get(\"tag\")\n self.ref = self.job_config.get(\"ref\")\n self.distro = self._get_distro(\n distro=self.os_type,\n version=self.os_version,\n codename=self.codename,\n )\n self.pkg_type = \"deb\" if self.os_type.lower() in (\n \"ubuntu\",\n \"debian\",\n ) else \"rpm\"\n\n if not getattr(self, 'flavor'):\n # avoiding circular imports\n from teuthology.suite.util import get_install_task_flavor\n # when we're initializing from a full teuthology config, not just a\n # task config we need to make sure we're looking at the flavor for\n # the install task\n self.flavor = get_install_task_flavor(self.job_config)", "def table_creater(self, tablename, columnnames, entries):\n createrurl = self.casjobsurl + '/contexts/MyDB/query'", "def create_compilation_job(CompilationJobName=None, RoleArn=None, InputConfig=None, OutputConfig=None, StoppingCondition=None):\n pass", "def prepare_pr_condor_job(self, pool_type, pool_address, number_of_jobs, subtask_index, data_files, rank='0', extraArgs=''):\n ############\n copasi_file = 'auto_copasi_%d.$(Process).cps' % subtask_index\n output_file = 'output_%d.$(Process).txt' % subtask_index\n \n \n \n if pool_type == 'ec2':\n binary_dir = '/usr/local/bin'\n transfer_executable = 'NO'\n else:\n binary_dir, binary = os.path.split(settings.COPASI_LOCAL_BINARY)\n transfer_executable = 'YES'\n \n input_files_string = ', '\n for data_file in data_files:\n input_files_string += (data_file + ', ')\n input_files_string = input_files_string.rstrip(', ')\n\n condor_job_string = Template(condor_spec.raw_condor_job_string).substitute(copasiFile=copasi_file, \n otherFiles=input_files_string,\n rank=rank,\n binary_dir = binary_dir,\n transfer_executable = transfer_executable,\n pool_type = pool_type,\n pool_address = pool_address,\n subtask=str(subtask_index),\n n = number_of_jobs,\n outputFile = output_file,\n extraArgs='',\n )\n \n condor_job_filename = 'auto_condor_%d.job'%subtask_index\n condor_job_full_filename = os.path.join(self.path, condor_job_filename)\n condor_file = open(condor_job_full_filename, 'w')\n condor_file.write(condor_job_string)\n condor_file.close()\n\n return condor_job_filename", "def configure_queued_job(self, job):\n\n config = ExecutionConfiguration()\n data = job.get_job_data()\n\n # Add input file meta-data\n input_files_dict = self._create_input_file_dict(data)\n config.set_input_files(input_files_dict)\n\n # Set up env vars for job's input data\n input_values = data.get_injected_input_values(input_files_dict)\n interface = job.job_type_rev.get_input_interface()\n\n env_vars = {}\n if isinstance(data, JobData):\n # call job.data.job_data.JobData.get_injected_env_vars\n env_vars = data.get_injected_env_vars(input_files_dict, interface)\n else:\n # call old job.configuration.data.job_data.get_injected_env_vars\n # TODO: remove once old JobData class is no longer used\n env_vars = data.get_injected_env_vars(input_files_dict)\n\n task_workspaces = {}\n if job.job_type.is_system:\n # Add any workspaces needed for this system job\n task_workspaces = QueuedExecutionConfigurator._system_job_workspaces(job)\n else:\n # Set any output workspaces needed\n output_workspaces = {}\n if job.input and 'version' in job.input and job.input['version'] == '1.0':\n # Set output workspaces using legacy job data\n self._cache_workspace_names(data.get_output_workspace_ids())\n output_workspaces = {}\n for output, workspace_id in data.get_output_workspaces().items():\n output_workspaces[output] = self._cached_workspace_names[workspace_id]\n config.set_output_workspaces(output_workspaces)\n if not output_workspaces:\n # Set output workspaces from job configuration\n output_workspaces = {}\n job_config = job.get_job_configuration()\n interface = SeedManifest(job.job_type_rev.manifest, do_validate=False)\n for output_name in interface.get_file_output_names():\n output_workspace = job_config.get_output_workspace(output_name)\n if output_workspace:\n output_workspaces[output_name] = output_workspace\n config.set_output_workspaces(output_workspaces)\n\n # Create main task with fields populated from input data\n args = job.get_job_interface().get_injected_command_args(input_values, env_vars)\n config.create_tasks(['main'])\n config.add_to_task('main', args=args, env_vars=env_vars, workspaces=task_workspaces)\n return config", "def copy(self) -> pulumi.Output[Optional['outputs.JobCopy']]:\n return pulumi.get(self, \"copy\")" ]
[ "0.6806518", "0.51471066", "0.5135233", "0.5034788", "0.4957393", "0.49131292", "0.483293", "0.4809419", "0.47891185", "0.47865072", "0.4757659", "0.46871853", "0.46683812", "0.46534342", "0.46357578", "0.46097738", "0.45914286", "0.45865327", "0.457555", "0.45609608", "0.45531315", "0.4541542", "0.45230606", "0.45217237", "0.45121175", "0.44751364", "0.44724846", "0.44585148", "0.44583514", "0.44578895" ]
0.73063713
0
Process schema Updates (additions/mode changes) for the request. Retrieves the current table schema for ref and attempts to merge in the schema provided in the requests. This is necessary since the API backend does not handle PATCH semantics for schema updates (e.g. process the deltas) so we must always send the fully updated schema in the requests.
def ProcessSchemaUpdate(ref, args, request): table = request.table relaxed_columns = args.relax_columns if not table.schema and not relaxed_columns: # if not updating schema, return request # then just return. original_schema = _TryGetCurrentSchema(ref.Parent().Name(), ref.Name(), ref.projectId) new_schema_columns = table.schema updated_fields = _GetUpdatedSchema(original_schema, new_schema_columns, relaxed_columns) table_schema_type = GetApiMessage('TableSchema') request.table.schema = table_schema_type(fields=updated_fields) return request
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_schema_updates(self):\n data = self.client._perform_json(\n \"GET\", \"/projects/%s/recipes/%s/schema-update\" % (self.project_key, self.recipe_name))\n return RequiredSchemaUpdates(self, data)", "async def upgradeSchema(self) -> None:", "def merge_schema_entry(\n self,\n old_schema_entry,\n new_schema_entry,\n base_path=None,\n ):\n if not old_schema_entry:\n return new_schema_entry\n\n # If the new schema is None, return immediately.\n if not new_schema_entry:\n return new_schema_entry\n\n # If a field value is missing, permanently set 'filled' to False.\n if not new_schema_entry['filled'] or not old_schema_entry['filled']:\n old_schema_entry['filled'] = False\n new_schema_entry['filled'] = False\n\n old_status = old_schema_entry['status']\n new_status = new_schema_entry['status']\n\n # new 'soft' does not clobber old 'hard'\n if old_status == 'hard' and new_status == 'soft':\n mode = self.merge_mode(old_schema_entry,\n new_schema_entry,\n base_path)\n if mode is None:\n return None\n old_schema_entry['info']['mode'] = mode\n return old_schema_entry\n\n # new 'hard' clobbers old 'soft'\n if old_status == 'soft' and new_status == 'hard':\n mode = self.merge_mode(old_schema_entry,\n new_schema_entry,\n base_path)\n if mode is None:\n return None\n new_schema_entry['info']['mode'] = mode\n return new_schema_entry\n\n # Verify that it's soft->soft or hard->hard\n if old_status != new_status:\n raise Exception(\n f'Unexpected schema_entry type, this should never happen: '\n f'old ({old_status}); new ({new_status})'\n )\n\n old_info = old_schema_entry['info']\n old_name = old_info['name']\n old_type = old_info['type']\n old_mode = old_info['mode']\n new_info = new_schema_entry['info']\n new_name = new_info['name']\n new_type = new_info['type']\n new_mode = new_info['mode']\n\n full_old_name = json_full_path(base_path, old_name)\n full_new_name = json_full_path(base_path, new_name)\n\n # Defensive check, names should always be the same.\n if old_name != new_name:\n if old_name.lower() != new_name.lower():\n raise Exception(\n 'Unexpected difference in name, should never happen:'\n f' old_name ({full_old_name}) != new_name ({full_new_name})'\n )\n else:\n # preserve old name if case is different\n new_info['name'] = old_info['name']\n\n # Recursively merge in the subfields of a RECORD, allowing\n # NULLABLE to become REPEATED (because 'bq load' allows it).\n if old_type == 'RECORD' and new_type == 'RECORD':\n # Allow NULLABLE RECORD to be upgraded to REPEATED RECORD because\n # 'bq load' allows it.\n if old_mode == 'NULLABLE' and new_mode == 'REPEATED':\n old_info['mode'] = 'REPEATED'\n self.log_error(\n f'Converting schema for \"{full_old_name}\" from '\n 'NULLABLE RECORD into REPEATED RECORD'\n )\n elif old_mode == 'REPEATED' and new_mode == 'NULLABLE':\n # TODO: Maybe remove this warning output. It was helpful during\n # development, but maybe it's just natural.\n self.log_error(\n f'Leaving schema for \"{full_old_name}\" as REPEATED RECORD'\n )\n\n # RECORD type needs a recursive merging of sub-fields. We merge into\n # the 'old_schema_entry' which assumes that the 'old_schema_entry'\n # can be modified in situ.\n old_fields = old_info['fields']\n new_fields = new_info['fields']\n for key, new_entry in new_fields.items():\n old_entry = old_fields.get(key)\n new_base_path = json_full_path(base_path, old_name)\n old_fields[key] = self.merge_schema_entry(\n old_schema_entry=old_entry,\n new_schema_entry=new_entry,\n base_path=new_base_path,\n )\n return old_schema_entry\n\n new_mode = self.merge_mode(old_schema_entry,\n new_schema_entry,\n base_path)\n if new_mode is None:\n return None\n new_schema_entry['info']['mode'] = new_mode\n\n # For all other types...\n if old_type != new_type:\n # Check that the converted types are compatible.\n candidate_type = convert_type(old_type, new_type)\n if not candidate_type:\n self.log_error(\n f'Ignoring field with mismatched type: '\n f'old=({old_status},{full_old_name},{old_mode},{old_type});'\n f' new=({new_status},{full_new_name},{new_mode},{new_type})'\n )\n return None\n\n new_info['type'] = candidate_type\n return new_schema_entry", "def resolve_schema_in_request_body(self, request_body):\n content = request_body[\"content\"]\n for content_type in content:\n schema = content[content_type][\"schema\"]\n content[content_type][\"schema\"] = self.openapi.resolve_schema_dict(schema)", "def process(*, schemas: types.Schemas) -> None:\n # Retrieve back references\n backrefs = process_helper.get_artifacts(\n schemas=schemas, get_schema_artifacts=_get_schema_backrefs\n )\n # Map to a schema for each grouped back references\n backref_schemas = process_helper.calculate_outputs(\n artifacts=backrefs, calculate_output=_backrefs_to_schema\n )\n # Convert to list to resolve iterator\n backref_schema_list = list(backref_schemas)\n # Add backreferences to schemas\n for name, backref_schema in backref_schema_list:\n schemas[name] = {\"allOf\": [schemas[name], backref_schema]}", "def _GetUpdatedSchema(\n original_schema,\n new_columns=None,\n relaxed_columns=None):\n orig_field_map = (\n {f.name: f for f in original_schema.fields} if original_schema else {})\n\n if relaxed_columns:\n orig_field_map = _GetRelaxedCols(relaxed_columns, orig_field_map)\n\n if new_columns:\n orig_field_map = _AddNewColsToSchema(new_columns.fields, orig_field_map)\n\n return sorted(orig_field_map.values(), key=lambda x: x.name)", "def resolve_schema(self, data):\n if not isinstance(data, dict):\n return\n\n # OAS 2 component or OAS 3 header\n if \"schema\" in data:\n data[\"schema\"] = self.openapi.resolve_schema_dict(data[\"schema\"])\n # OAS 3 component except header\n if self.openapi_version.major >= 3:\n if \"content\" in data:\n for content_type in data[\"content\"]:\n schema = data[\"content\"][content_type][\"schema\"]\n data[\"content\"][content_type][\n \"schema\"\n ] = self.openapi.resolve_schema_dict(schema)", "def update_schema(self, new_schema):\n return self.conn.update_schema(new_schema)", "def upgrade_schema():\n\n db_version = get_db_version()\n try:\n while db_version < CURRENT_DATABASE_VERSION:\n db_version += 1\n upgrade_script = 'upgrade_to_'+str(db_version)\n globals()[upgrade_script]()\n except KeyError as e:\n logging.exception('Attempted to upgrade using script that does not exist: {}'.format(e))\n sys.exit(1)\n except Exception as e:\n logging.exception('Incremental upgrade of db failed')\n sys.exit(1)\n else:\n config.db.singletons.update_one({'_id': 'version'}, {'$set': {'database': CURRENT_DATABASE_VERSION}})\n sys.exit(0)", "def update_schema(self, engine_name, schema):\n endpoint = \"engines/{}/schema\".format(engine_name)\n data = json.dumps(schema)\n return self.swiftype_session.request('post', endpoint, data=data)", "def merge_schema(self, schema):\n for _, attr_schema in schema.iter_attributes():\n self.merge_attribute_schema(attr_schema)", "def test_compare_schemas_happypath(self):\n status = schema_utils.compare_schemas(\n self.base_schema,\n self.base_schema\n )\n\n assert status == schema_utils.Update.no_update", "def merge_mode(self, old_schema_entry, new_schema_entry, base_path):\n old_info = old_schema_entry['info']\n new_info = new_schema_entry['info']\n old_mode = old_info['mode']\n old_name = old_info['name']\n old_type = old_info['type']\n old_status = old_schema_entry['status']\n new_mode = new_info['mode']\n new_name = new_info['name']\n new_type = new_info['type']\n new_status = new_schema_entry['status']\n\n full_old_name = json_full_path(base_path, old_name)\n full_new_name = json_full_path(base_path, new_name)\n\n # If the old field is a REQUIRED primitive (which could only have come\n # from an existing schema), the new field can be either a\n # NULLABLE(filled) or a NULLABLE(unfilled).\n if old_mode == 'REQUIRED' and new_mode == 'NULLABLE':\n # If the new field is filled, then retain the REQUIRED.\n if new_schema_entry['filled']:\n return old_mode\n else:\n # The new field is not filled (i.e. an empty or null field).\n # If --infer_mode is active, then we allow the REQUIRED to\n # revert back to NULLABLE.\n if self.infer_mode:\n return new_mode\n else:\n self.log_error(\n f'Ignoring non-RECORD field with mismatched mode.'\n ' cannot convert to NULLABLE because infer_schema not'\n ' set:'\n f' old=({old_status},{full_old_name},{old_mode},'\n f'{old_type});'\n f' new=({new_status},{full_new_name},{new_mode},'\n f'{new_type})'\n )\n return None\n elif old_mode != new_mode:\n self.log_error(\n f'Ignoring non-RECORD field with mismatched mode: '\n f'old=({old_status},{full_old_name},{old_mode},'\n f'{old_type});'\n f' new=({new_status},{full_new_name},{new_mode},'\n f'{new_type})'\n )\n return None\n return old_mode", "def _patch_schema(self):\n fields = get_json()['data']['attributes'].keys()\n return make_entity_schema(\n self.SCHEMA, self.RESOURCE_NAME,\n make_data_schema(\n self.SCHEMA, id_required=True,\n only=fields, partial=True\n )\n )", "def _update(self, schema: 'Schema'):\n for method in schema._get_methods():\n if method.id in self:\n raise ValueError(\n f\"Duplicate method id for {method.method} id: {method.id}\"\n )\n\n for combinator in schema._get_combinators():\n if combinator.id in self:\n raise ValueError(\n f\"Duplicate combinator id for {combinator.predicate} \" +\n f\"id: {combinator.id}\"\n )\n\n self.constructors += schema.constructors\n self.functions += schema.functions\n\n self._build_schema_data()", "def preprocess_schema(schema, imported_schemas, elements, xsd_uri, dialect, http, cache, force_download, wsdl_basedir, global_namespaces=None, qualified=False):\n\n from .simplexml import SimpleXMLElement # here to avoid recursive imports\n\n # analyze the namespaces used in this schema\n local_namespaces = {}\n for k, v in schema[:]:\n if k.startswith(\"xmlns\"):\n local_namespaces[get_local_name(k)] = v\n if k == 'targetNamespace':\n # URI namespace reference for this schema\n if v == \"urn:DefaultNamespace\":\n v = global_namespaces[None]\n local_namespaces[None] = v\n if k == 'elementFormDefault':\n qualified = (v == \"qualified\")\n # add schema namespaces to the global namespace dict = {URI: ns prefix}\n for ns in local_namespaces.values():\n if ns not in global_namespaces:\n global_namespaces[ns] = 'ns%s' % len(global_namespaces)\n \n for element in schema.children() or []:\n if element.get_local_name() in ('import', 'include',):\n schema_namespace = element['namespace']\n schema_location = element['schemaLocation']\n if schema_location is None:\n log.debug('Schema location not provided for %s!' % schema_namespace)\n continue\n if schema_location in imported_schemas:\n log.debug('Schema %s already imported!' % schema_location)\n continue\n imported_schemas[schema_location] = schema_namespace\n log.debug('Importing schema %s from %s' % (schema_namespace, schema_location))\n # Open uri and read xml:\n xml = fetch(schema_location, http, cache, force_download, wsdl_basedir)\n\n # Parse imported XML schema (recursively):\n imported_schema = SimpleXMLElement(xml, namespace=xsd_uri)\n preprocess_schema(imported_schema, imported_schemas, elements, xsd_uri, dialect, http, cache, force_download, wsdl_basedir, global_namespaces, qualified)\n\n element_type = element.get_local_name()\n if element_type in ('element', 'complexType', \"simpleType\"):\n namespace = local_namespaces[None] # get targetNamespace\n element_ns = global_namespaces[ns] # get the prefix\n element_name = element['name']\n log.debug(\"Parsing Element %s: %s\" % (element_type, element_name))\n if element.get_local_name() == 'complexType':\n children = element.children()\n elif element.get_local_name() == 'simpleType':\n children = element('restriction', ns=xsd_uri)\n elif element.get_local_name() == 'element' and element['type']:\n children = element\n else:\n children = element.children()\n if children:\n children = children.children()\n elif element.get_local_name() == 'element':\n children = element\n if children:\n process_element(elements, element_name, children, element_type, xsd_uri, dialect, namespace, qualified)", "def modify_schema(setup_path, names, lp, creds, reporter, ldif, msg):\n\n return provision_schema(setup_path, names, lp, creds, reporter, ldif, msg, True)", "def modify_bigquery_schema(self, schema, info_keys):\n # type: (bigquery.TableSchema, Set[str]) -> None", "async def update_db(appname, schema, table, action):\n loop = asyncio.get_event_loop()\n if args.action == 'info':\n await schema_info(args.appname, args.schema)\n sys.exit()\n\n dbstate = await schema_state(args.appname, args.schema, args.table)\n modstate = model_state(args.appname, args.schema, args.table)\n updiff, downdiff = compare_state(args.appname, args.schema, args.table,\n dbstate, modstate)\n if args.action == 'upgrade':\n await set_state(args.appname, updiff)\n elif args.action == 'downgrade':\n await set_state(args.appname, downdiff)\n else:\n print(\"upgrade diff looks like\")\n print(updiff)\n print(\"downgrade diff looks like\")\n print(downdiff)", "def _Dynamic_GetSchema(self, req, schema, request_id=None):\n # This is not used, but it is required for the method signature.\n del request_id\n\n app_str = req.app()\n self.__ValidateAppId(app_str)\n schema.set_more_results(False)", "def status_changes_schema():\n schema = endpoint_schema(\"status_changes\")\n items = schema[\"properties\"][\"data\"][\"properties\"][\"status_changes\"][\"items\"]\n\n # merge the state machine definitions and transition combinations rule\n state_machine_defs, transitions = common.vehicle_state_machine(\"vehicle_state\", \"event_types\")\n schema[\"definitions\"].update(state_machine_defs)\n items[\"allOf\"].append(transitions)\n\n trip_id_ref = common.load_definitions(\"trip_id_reference\")\n items[\"allOf\"].append(trip_id_ref)\n\n # verify and return\n return common.check_schema(schema)", "def update(self, schema: 'Schema'):\n self._update(schema)", "def test_schema_updates(self):\n with patch(\"openedx.core.djangoapps.content_libraries.libraries_index.ContentLibraryIndexer.SCHEMA_VERSION\",\n new=0):\n result = self._create_library(slug=\"test-lib-schemaupdates-1\", title=\"Title 1\", description=\"Description\")\n library_key = LibraryLocatorV2.from_string(result['id'])\n assert len(ContentLibraryIndexer.get_items([library_key])) == 1\n\n with patch(\"openedx.core.djangoapps.content_libraries.libraries_index.ContentLibraryIndexer.SCHEMA_VERSION\",\n new=1):\n assert len(ContentLibraryIndexer.get_items([library_key])) == 0\n\n call_command(\"reindex_content_library\", all=True, force=True)\n\n assert len(ContentLibraryIndexer.get_items([library_key])) == 1", "def process_updates():\n print \"[{x}] Processing Requests\".format(x=dates.now())\n WorkflowApi.process_requests()\n WorkflowApi.process_enhancements()", "def fetch_schema(self) -> None:\n if self.schema_file:\n logger.info(\"Loaded schema from file '%s'\", self.schema_file)\n self._schema = load_schema_file(self.schema_file)\n else:\n url = self.schema_url or urljoin(self.base_url, \"schema/openapi.yaml\")\n logger.info(\"Fetching schema at '%s'\", url)\n self._schema = schema_fetcher.fetch(url, {\"v\": \"3\"})", "def ensure_internal_schema_updated(self):\n if self._internal_schema_updated:\n return\n if internalmigrations.needs_upgrading(self):\n assert not self._in_transaction\n with self.lock():\n internalmigrations.upgrade(self)\n self.connection.commit()\n self._internal_schema_updated = True", "def rebuild(self, dframe, overwrite=False):\n current_schema = self\n new_schema = schema_from_dframe(dframe, self)\n\n if current_schema and not overwrite:\n # merge new schema with existing schema\n current_schema.update(new_schema)\n new_schema = current_schema\n\n return new_schema", "def test_schema_updates(self):\n lib = self._create_library(slug=\"test-lib--block-schemaupdates-1\", title=\"Title 1\", description=\"Description\")\n with patch(\"openedx.core.djangoapps.content_libraries.libraries_index.LibraryBlockIndexer.SCHEMA_VERSION\",\n new=0):\n block = self._add_block_to_library(lib['id'], \"problem\", \"problem1\")\n assert len(LibraryBlockIndexer.get_items([block['id']])) == 1\n\n with patch(\"openedx.core.djangoapps.content_libraries.libraries_index.LibraryBlockIndexer.SCHEMA_VERSION\",\n new=1):\n assert len(LibraryBlockIndexer.get_items([block['id']])) == 0\n\n call_command(\"reindex_content_library\", all=True, force=True)\n\n assert len(LibraryBlockIndexer.get_items([block['id']])) == 1", "def test_compare_schemas_major(self):\n status = schema_utils.compare_schemas(\n self.base_schema,\n self.major_removed_value\n )\n\n assert status == schema_utils.Update.major", "def _load_schema(self, mode=\"staging\"):\n\n self._check_mode(mode)\n\n json_path = self.table_folder / f\"schema-{mode}.json\"\n columns = self.table_config[\"columns\"]\n\n if mode == \"staging\":\n new_columns = []\n for c in columns:\n # case is_in_staging are None then must be True\n is_in_staging = (\n True if c.get(\"is_in_staging\") is None else c[\"is_in_staging\"]\n )\n # append columns declared in table_config.yaml to schema only if is_in_staging: True\n if is_in_staging and not c.get(\"is_partition\"):\n c[\"type\"] = \"STRING\"\n new_columns.append(c)\n\n del columns\n columns = new_columns\n\n elif mode == \"prod\":\n schema = self._get_table_obj(mode).schema\n\n # get field names for fields at schema and at table_config.yaml\n column_names = [c[\"name\"] for c in columns]\n schema_names = [s.name for s in schema]\n\n # check if there are mismatched fields\n not_in_columns = [name for name in schema_names if name not in column_names]\n not_in_schema = [name for name in column_names if name not in schema_names]\n\n # raise if field is not in table_config\n if not_in_columns:\n raise BaseDosDadosException(\n \"Column {error_columns} was not found in table_config.yaml. Are you sure that \"\n \"all your column names between table_config.yaml, publish.sql and \"\n \"{project_id}.{dataset_id}.{table_id} are the same?\".format(\n error_columns=not_in_columns,\n project_id=self.table_config[\"project_id_prod\"],\n dataset_id=self.table_config[\"dataset_id\"],\n table_id=self.table_config[\"table_id\"],\n )\n )\n\n # raise if field is not in schema\n if not_in_schema:\n raise BaseDosDadosException(\n \"Column {error_columns} was not found in publish.sql. Are you sure that \"\n \"all your column names between table_config.yaml, publish.sql and \"\n \"{project_id}.{dataset_id}.{table_id} are the same?\".format(\n error_columns=not_in_schema,\n project_id=self.table_config[\"project_id_prod\"],\n dataset_id=self.table_config[\"dataset_id\"],\n table_id=self.table_config[\"table_id\"],\n )\n )\n\n # if field is in schema, get field_type and field_mode\n for c in columns:\n for s in schema:\n if c[\"name\"] == s.name:\n c[\"type\"] = s.field_type\n c[\"mode\"] = s.mode\n break\n ## force utf-8, write schema_{mode}.json\n json.dump(columns, (json_path).open(\"w\", encoding=\"utf-8\"))\n\n # load new created schema\n return self.client[f\"bigquery_{mode}\"].schema_from_json(str(json_path))" ]
[ "0.65370333", "0.61016726", "0.59257406", "0.59054583", "0.57786834", "0.5593872", "0.5549331", "0.5481114", "0.5472658", "0.54690826", "0.5401272", "0.5361555", "0.53470576", "0.5315268", "0.5254957", "0.52487254", "0.5139043", "0.50770354", "0.5074396", "0.5061819", "0.5033071", "0.502315", "0.5017254", "0.50041014", "0.5001959", "0.50011426", "0.49769256", "0.49501354", "0.49407235", "0.4929252" ]
0.8179473
0
Try to retrieve the current BigQuery TableSchema for a table_ref. Tries to fetch the schema of an existing table. Raises SchemaUpdateError if table is not found or if table is not of type 'TABLE'.
def _TryGetCurrentSchema(dataset_id, table_id, project_id): client = GetApiClient() service = client.tables get_request_type = GetApiMessage('BigqueryTablesGetRequest') get_request = get_request_type(datasetId=dataset_id, tableId=table_id, projectId=project_id) try: table = service.Get(get_request) if not table or table.type != 'TABLE': raise SchemaUpdateError('Schema modifications only supported ' 'on TABLE objects received [{}]'.format( table)) except apitools_exceptions.HttpNotFoundError: raise SchemaUpdateError('Table with id [{}:{}:{}] not found.'.format( project_id, dataset_id, table_id)) return table.schema
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_table_schema(dataset_id, table_id):\n logging.info('getting table schema')\n bigquery_client = bigquery.Client()\n dataset_ref = bigquery_client.dataset(dataset_id)\n bg_tableref = bigquery.table.TableReference(dataset_ref, table_id)\n bg_table = bigquery_client.get_table(bg_tableref)\n return bg_table.schema", "def fetch_table_schema(self, table_name):\n ddl = self.query(sql.show_create_table(table_name))\n if ddl:\n try:\n return parse_create(ddl[0][\"Create Table\"])\n except ParseError as e:\n raise OSCError(\n \"TABLE_PARSING_ERROR\",\n {\"db\": self._current_db, \"table\": self.table_name, \"msg\": str(e)},\n )", "def _get_stored_schema(self, table: str) -> Optional[TableSchema]:\n try:\n with open(self.schemas / (table + '.json'), 'r') as f:\n return json.load(f)\n except FileNotFoundError:\n return None", "def _get_table_reflection(self, schema: str, table: str) -> Table:\n return self.sql_metadata.tables.get(f\"{schema}.{table}\",\n Table(table, self.sql_metadata, schema=schema, autoload=True))", "def ProcessSchemaUpdate(ref, args, request):\n table = request.table\n relaxed_columns = args.relax_columns\n if not table.schema and not relaxed_columns: # if not updating schema,\n return request # then just return.\n\n original_schema = _TryGetCurrentSchema(ref.Parent().Name(),\n ref.Name(),\n ref.projectId)\n\n new_schema_columns = table.schema\n updated_fields = _GetUpdatedSchema(original_schema,\n new_schema_columns,\n relaxed_columns)\n\n table_schema_type = GetApiMessage('TableSchema')\n request.table.schema = table_schema_type(fields=updated_fields)\n\n return request", "def CreateTableFromJson(self, table_name, schema_json):\n try:\n schema = json.JSONDecoder().decode(schema_json)\n except ValueError, e:\n raise SchemaError('Could not parse fields:\\n%s\\n%s' %\n (schema_json, str(e)))\n\n conn = self._Connect()\n result = conn.Call(\n dict(method='bigquery.tables.insert',\n collection='tables',\n operation=bq.REST.INSERT,\n params=dict(name=table_name, fields=schema)))\n return result", "def create_tables(client: bigquery.Client, tableSchemas: dict) -> dict:\n ds = create_dataset(client, f'FusionTable_Autoimport_{datetime.now()}')\n\n def _create_field_schema(col_schema: dict) -> bigquery.SchemaField:\n \"\"\"Create a SchemaField from the dict\"\"\"\n name = to_safe_name(col_schema['name'])\n return bigquery.SchemaField(\n name,\n col_schema.get('type'),\n col_schema.get('mode', 'NULLABLE'),\n col_schema.get('description', '')\n )\n\n def _table_from_ft(ft_schema: dict) -> bigquery.Table:\n \"\"\"Create a local representation of a BigQuery table\"\"\"\n # A \"TableSchema\" is just a sequence of SchemaFields https://googleapis.dev/python/bigquery/latest/generated/google.cloud.bigquery.table.Table.html\n schema = list(map(_create_field_schema, ft_schema['columns']))\n table = bigquery.Table(\n bigquery.TableReference(ds, to_safe_name(ft_schema['name'])),\n schema\n )\n table.description = ft_schema.get('description', '')\n return table\n\n return {\n ftId: client.create_table(_table_from_ft(ftSchema))\n for (ftId, ftSchema) in tableSchemas.items()\n }", "def get_schema(self, repo, table):\n return self.user_con.get_schema(repo=repo, table=table)", "def create_bq_table(client, dataset, table, schema):\n \n print('Creating table %s.%s' % (dataset, table))\n exists = client.check_table(dataset, table)\n if exists:\n raise AssertionError(\"Table already exists: %s.%s\" % (dataset,table))\n created = client.create_table(dataset, table, schema)\n # Check that the empty table was created\n exists = client.check_table(dataset, table)\n if not exists:\n raise RuntimeError('Table creation failed: %s.%s' % (dataset, table))", "def __check_table(input_table):\n\n try:\n table = TABLE_TYPES[input_table]\n return table\n except KeyError:\n raise InvalidTableType(input_table)", "def CreateTableFromFile(self, table_name, schema_path):\n try:\n schema_file = open(schema_path)\n schema_json = schema_file.read()\n schema_file.close()\n except IOError, e:\n raise SchemaError('Could not read file (%s):\\n%s' %\n (schema_path, str(e)))\n return self.CreateTableFromJson(table_name, schema_json)", "def DescribeTable(self, table_name):\n conn = self._Connect()\n cursor = conn.cursor()\n return cursor.bq_get_table_metadata(table_name)", "def getTableSchema(self,tableName):\n\tif not self.schemaDict.has_key(tableName):\n\t if self.dbType==\"sqlite\":\n\t query = \"SELECT * FROM sqlite_master WHERE name='%s'\"%tableName\n\t tup = self.fetchOne(query)\n\t schema= tup[4]\n\t else: # MySQL \n\t query = \"DESCRIBE %s\"%tableName\n\t tup = self.fetchAll(query)\n\t schema= \"CREATE TABLE %s (\"%tableName\n\t for item in tup:\n\t name = item[0]\n\t\t type = item[1]\n\t\t priKey = item[3]\n\t\t autoInc = item[5] \n\t schema+=name+' '+type+' '+priKey+' '+autoInc\n\t\t if item!=tup[-1]:\n\t\t schema+=','\n\t schema+=\" )\"\n\t return schema\n\telse:\n\t return self.schemaDict[tableName]", "def _table_from_ft(ft_schema: dict) -> bigquery.Table:\n # A \"TableSchema\" is just a sequence of SchemaFields https://googleapis.dev/python/bigquery/latest/generated/google.cloud.bigquery.table.Table.html\n schema = list(map(_create_field_schema, ft_schema['columns']))\n table = bigquery.Table(\n bigquery.TableReference(ds, to_safe_name(ft_schema['name'])),\n schema\n )\n table.description = ft_schema.get('description', '')\n return table", "def get_schema(db, sourcename):\n try:\n schema = db[\"tables\"][sourcename]\n schema[\"type\"] = constants.TABLE\n except KeyError:\n try:\n schema = db[\"views\"][sourcename]\n schema[\"type\"] = constants.VIEW\n except KeyError:\n raise ValueError(\"no such table/view\")\n return schema", "def get_tables_in_schema(self, conn, schema_name):\n return conn.get_tables(schema_name)['table_name']", "def _get_table_schema(self):\n\n return {\n 'AttributeDefinitions': [\n {\n 'AttributeName': self._key_field.name,\n 'AttributeType': self._key_field.data_type\n }\n ],\n 'TableName': self.table_name,\n 'KeySchema': [\n {\n 'AttributeName': self._key_field.name,\n 'KeyType': 'HASH'\n }\n ],\n 'ProvisionedThroughput': {\n 'ReadCapacityUnits': self.read_capacity_units,\n 'WriteCapacityUnits': self.write_capacity_units\n }\n }", "def get_table_definition(jwt_payload: dict, schema_name: str, table_name: str):\n DJConnector.set_datajoint_config(jwt_payload)\n\n schema_virtual_module = dj.create_virtual_module(schema_name, schema_name)\n return getattr(schema_virtual_module, table_name).describe()", "def _create_table_if_not_exists(self) -> bigquery.Table:\n table = self.client.create_table(\n table=bigquery.Table(table_ref=self._table_ref, schema=Schema),\n exists_ok=True,\n )\n logging.info(\"table %s already exists.\", table.full_table_id)\n return table", "def getTableSchema(self, lsstLevel, dbName, tableName):\n return self._doRequest(self.httpClient.getTableSchema, lsstLevel, dbName, tableName)", "def schema(self) -> 'outputs.TableSchemaResponse':\n return pulumi.get(self, \"schema\")", "def get_schema(self, get_stats=False):\n query = \"schema {}\"\n\n results = self.run_dgraph_query_raw(query)\n\n schema = {}\n\n for row in results[\"schema\"]:\n table_name = row[\"predicate\"]\n\n if table_name not in schema:\n schema[table_name] = {\"name\": table_name, \"columns\": []}\n\n return list(schema.values())", "def schema_ref(schema, table):\n return schema + '.' + table", "def get_table_exists(schema_name, table_name):\n sql = (\"SELECT * FROM sys.objects so JOIN sys.schemas ss on (so.schema_id = ss.schema_id) \"\n \"WHERE so.type = 'U' AND so.name = ? and ss.name = ?\")\n\n row = fetch_row(sql, [table_name, schema_name])\n\n return True if row else False", "def ensure_schema(client, table_name):\n query = ''.join([\n 'CREATE TABLE {cf} ',\n '(\"lockId\" ascii, \"claimId\" timeuuid, PRIMARY KEY(\"lockId\", \"claimId\"));'])\n\n def errback(failure):\n failure.trap(InvalidRequestException)\n\n return client.execute(query.format(cf=table_name),\n {}, ConsistencyLevel.QUORUM).addErrback(errback)", "def BqTableSchemaFileProcessor(file_arg):\n table_schema_type = GetApiMessage('TableSchema')\n schema_field_type = GetApiMessage('TableFieldSchema')\n\n try:\n schema_json = yaml.load(file_arg)\n schema_json = schema_json.get('schema', None)\n\n if not schema_json or not isinstance(schema_json, list):\n raise SchemaFileError(\n 'Error parsing schema file: no schema field list defined in file')\n\n all_fields = []\n for field in schema_json:\n new_field = schema_field_type(name=field['name'],\n type=field['type'],\n mode=field.get('mode', 'NULLABLE'))\n all_fields.append(new_field)\n\n return table_schema_type(fields=sorted(all_fields, key=lambda x: x.name))\n except yaml.YAMLParseError as ype:\n raise SchemaFileError('Error parsing schema file [{}]'.format(ype))\n except (AttributeError, KeyError) as e:\n raise SchemaFileError(\n 'Error parsing schema file, invalid field definition [{}]'.format(e))", "def validate_table(self, table, table_struct, verbose=True):\n \n assert(self.connected)\n try: \n assert(self.check_table(table, verbose=False)) \n except AssertionError: \n raise TableNotFoundError\n \n GET_SCHEMA_INFORMATION_COMMAND = \"SELECT ORDINAL_POSITION, COLUMN_NAME, COLUMN_TYPE, IS_NULLABLE, COLUMN_KEY, EXTRA \" \\\n \t \"FROM INFORMATION_SCHEMA.COLUMNS \" \\\n \t \"WHERE TABLE_NAME='{0}' ORDER BY ORDINAL_POSITION\".format(table)\n \n GET_SCHEMA_FK_INFORMATION_COMMAND = \"SELECT COLUMN_NAME, CONSTRAINT_NAME, REFERENCED_TABLE_NAME, REFERENCED_COLUMN_NAME \" \\\n \"FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE \" \\\n \"WHERE REFERENCED_TABLE_SCHEMA = '{0}' AND TABLE_NAME = '{1}' AND COLUMN_NAME = '{2}'\"\n \n CHANGE_TYPE_COMMAND = \"ALTER TABLE {0} MODIFY {1} {2} {3}\"\n \n ADD_FK_COMMAND = \"ALTER TABLE {0} ADD FOREIGN KEY ({1}) REFERENCES {2}({3})\" \n \n DROP_FK_CONSTRAINT_COMMAND = \"ALTER TABLE {0} DROP FOREIGN KEY {1}\" \n \n \n self.cursor.execute(GET_SCHEMA_INFORMATION_COMMAND)\n \n # load all column info from the database \n columns = {}\n for c in self.cursor:\n columns[c[1]] = c\n \n for column,db_col in zip(table_struct,columns):\n \n # load parameter values from the DB \n (ord_pos, name, col_type, isnull, key_type, extra) = columns[db_col]\n \n isnull = isnull == 'YES'\n auto_increment = extra == 'auto_increment'\n foreign_key = key_type == 'MUL'\n \n # parse new parameter values\n struct_type = table_struct[column][0]\n parameters = table_struct[column][1] if ( len(table_struct[column]) > 1) else None\n \n # get parameters values in boolean format\n if (parameters == None):\n new_isnull = True\n new_auto_increment = False\n new_foreign_key = False\n else:\n if 'not_null' in parameters: new_isnull = not parameters['not_null']\n else: new_isnull = True\n \n if 'auto_increment' in parameters: new_auto_increment = parameters['auto_increment']\n else: new_auto_increment = False\n \n if 'foreign_key' in parameters: new_foreign_key = parameters['foreign_key']\n else: new_foreign_key = False\n \n \n \n \n if verbose: \n print(\"\\n---\\n\\nChecking column '{0}'...\".format(column))\n \n # check name, type and each parameter \n if name == column:\n \n # if something doesn't match, change within the database\n if ( col_type != struct_type ): \n if verbose:\n print(\"Column '{0}' found in the correct position with the incorrect type.\".format(column))\n print(\"Changing the type of '{0}' from '{1}' to '{2}'\".format(column, col_type, struct_type),)\n \n cmd = CHANGE_TYPE_COMMAND.format(table, column, struct_type.upper(), '')\n \n if verbose: print(\"\\t\" + cmd)\n \n self.cursor.execute(cmd) \n \n if ( isnull != new_isnull ):\n \n if verbose:\n print(\"Column '{0}' found in the correct position an incorrect parameter.\".format(column))\n print(\"Changing the type of '{0}' from '{1}' to '{2}'\".format(column, \"NOT NULLABLE\" if new_isnull else \"NULLABLE\", \"NULLABLE\" if new_isnull else \"NOT NULLABLE\"))\n \n \n cmd = CHANGE_TYPE_COMMAND.format(table, column, struct_type.upper(), \"NOT NULL\" if not new_isnull else \"\" )\n \n if verbose: print(\"\\t\" + cmd)\n \n \n self.cursor.execute(cmd)\n \n if ( auto_increment != new_auto_increment ):\n \n if verbose:\n print(\"Column '{0}' found in the correct position an incorrect parameter.\".format(column))\n print(\"Changing the type of '{0}' from '{1}' to '{2}'\".format(column, \"AUTO INCREMENT\" if new_auto_increment else \"none\", \"none\" if new_auto_increment else \"AUTO INCREMENT\"))\n \n \n cmd = CHANGE_TYPE_COMMAND.format(table, column, struct_type.upper(), \"AUTO INCREMENT\" if new_auto_increment else \"\" )\n \n if verbose: print(\"\\t\" + cmd)\n \n \n self.cursor.execute(cmd)\n \n \n if ( foreign_key != new_foreign_key ):\n \n \n if verbose:\n print(\"Column '{0}' found in the correct position an incorrect parameter.\".format(column))\n print(\"Changing the type of '{0}' from '{1}' to '{2}'\".format(column, \"FOREIGN KEY\" if new_auto_increment else \"none\", \"none\" if new_auto_increment else \"FOREIGN KEY\"))\n \n \n \n if ('foreign_key' in parameters and parameters['foreign_key']):\n \n referenced_table = parameters['references'].split('(')[0]\n referenced_column = parameters['references'].split('(')[1][:-1] \n \n \n if (not self.check_table(referenced_table, verbose=False)):\n raise(TableNotFoundError)\n \n \n if (not self.check_column(referenced_column, referenced_table, verbose=False)):\n raise(ColumnNotFoundError)\n \n \n cmd = ADD_FK_COMMAND.format(table,column,referenced_table, referenced_column)\n\n \n \n if verbose: print(\"\\t\" + cmd)\n \n try:\n self.cursor.execute(cmd) \n except:\n print(\" > Error: Cannot add foreign key constraint to column '{0}' in the table '{1}'. You must remove all data from\\n > this column using the clear_column() command first.\".format(column, table))\n \n else:\n \n # check if column has a foreign key constraint\n \n cmd = GET_SCHEMA_FK_INFORMATION_COMMAND.format(self.config['database'], table, column)\n \n self.cursor.execute(cmd)\n \n fk_name = None\n for row in self.cursor:\n fk_name = row[1]\n break\n \n if fk_name != None:\n cmd = DROP_FK_CONSTRAINT_COMMAND.format(table, fk_name)\n \n if verbose: \n print(\"Column '{0}' involved in foreign key constraint '{1}'\".format(column, fk_name))\n print(\"Dropping foreign key constraint '{0}'\".format(fk_name))\n print(\"\\t\" + cmd)\n \n self.cursor.execute(cmd)\n\n \n \n if verbose: print(\"Done.\")\n \n \n if (len(columns) > len(table_struct)):\n \n if verbose: print(\"\\n---\\n\\nExtra columns found in database\")\n \n for col in columns:\n if (col not in table_struct): \n \n if verbose:\n print(\"Column '{0}' found in the database but not found in the configuration.\".format(col))\n \n self.delete_column(col, table)\n \n \n elif(len(table_struct) > len(columns)):\n \n if verbose: print(\"\\n---\\n\\nExtra columns found in configuration. \")\n\n for col in table_struct:\n if col not in columns:\n if verbose: print(\"Column '{0}' found in configuration but not in database\".format(col))\n self.insert_column(col, table_struct[col][0], table, params = table_struct[col][1] if ( len(table_struct[col]) > 1) else None)", "def _load_bigquery_schemas(self):\n logger.info(\"Reading BigQuery schema files...\")\n for table_name in self.tables + self.type_tables:\n logger.info(f\"Reading schema file for table '{table_name}'...\")\n schema_json = resource_stream('sotorrent_pipeline',\n f'bigquery_schemas/{table_name}.json').read().decode()\n self.bigquery_schemas[table_name] = json.loads(schema_json)\n self.bigquery_schemas_with_fields[table_name] = json.loads('{\"fields\":' + schema_json + '}')\n logger.info(f\"Read {len(self.bigquery_schemas)} schema file(s).\")", "def is_table_exist(table_name: str, schema: str) -> str:\n\n return f\"\"\"\n SELECT\n table_name\n FROM\n information_schema.tables\n WHERE\n table_name = '{table_name}' AND\n table_schema='{schema}'\n LIMIT 1\n \"\"\"", "def read_schema_from_db(cur, table):\n num_rows = cur.execute(\"\"\"DESCRIBE {}\"\"\".format(table))\n tbl_schema = []\n for i in range(num_rows):\n row = cur.fetchone()\n tbl_schema.append([row[0], row[1]])\n return tbl_schema" ]
[ "0.73889893", "0.6379297", "0.6305416", "0.6051959", "0.58043814", "0.57696646", "0.56733495", "0.5627315", "0.5534374", "0.5378048", "0.5363961", "0.53382576", "0.5309689", "0.5298764", "0.5259146", "0.52387893", "0.52369916", "0.52243495", "0.52111673", "0.5200395", "0.5174603", "0.5162272", "0.5160721", "0.51297754", "0.5118883", "0.5115819", "0.5082074", "0.5081547", "0.50572026", "0.5016008" ]
0.74110204
0
Change mode to `NULLABLE` for columns in existing schema. Tries set mode on existing columns in orig_schema_map to `NULLABLE`. Raises SchemaUpdateError if column is not found in orig_schema_map.
def _GetRelaxedCols(relaxed_columns, orig_schema_map): updated_schema_map = orig_schema_map.copy() for col in relaxed_columns: if col in orig_schema_map: updated_schema_map[col].mode = 'NULLABLE' else: raise SchemaUpdateError(_INVALID_SCHEMA_UPDATE_MESSAGE) return updated_schema_map
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def merge_mode(self, old_schema_entry, new_schema_entry, base_path):\n old_info = old_schema_entry['info']\n new_info = new_schema_entry['info']\n old_mode = old_info['mode']\n old_name = old_info['name']\n old_type = old_info['type']\n old_status = old_schema_entry['status']\n new_mode = new_info['mode']\n new_name = new_info['name']\n new_type = new_info['type']\n new_status = new_schema_entry['status']\n\n full_old_name = json_full_path(base_path, old_name)\n full_new_name = json_full_path(base_path, new_name)\n\n # If the old field is a REQUIRED primitive (which could only have come\n # from an existing schema), the new field can be either a\n # NULLABLE(filled) or a NULLABLE(unfilled).\n if old_mode == 'REQUIRED' and new_mode == 'NULLABLE':\n # If the new field is filled, then retain the REQUIRED.\n if new_schema_entry['filled']:\n return old_mode\n else:\n # The new field is not filled (i.e. an empty or null field).\n # If --infer_mode is active, then we allow the REQUIRED to\n # revert back to NULLABLE.\n if self.infer_mode:\n return new_mode\n else:\n self.log_error(\n f'Ignoring non-RECORD field with mismatched mode.'\n ' cannot convert to NULLABLE because infer_schema not'\n ' set:'\n f' old=({old_status},{full_old_name},{old_mode},'\n f'{old_type});'\n f' new=({new_status},{full_new_name},{new_mode},'\n f'{new_type})'\n )\n return None\n elif old_mode != new_mode:\n self.log_error(\n f'Ignoring non-RECORD field with mismatched mode: '\n f'old=({old_status},{full_old_name},{old_mode},'\n f'{old_type});'\n f' new=({new_status},{full_new_name},{new_mode},'\n f'{new_type})'\n )\n return None\n return old_mode", "def nullable(self):\n _columns = []\n if not isinstance(self._last_column, list):\n _columns = [self._last_column]\n\n for column in _columns:\n column.nullable()\n return self", "def filter_schema(schema):\n for column, column_schema in schema.iteritems():\n if column_schema.get(CARDINALITY):\n del column_schema[CARDINALITY]\n schema[column] = column_schema\n\n return schema", "def _delete_null_columns(self):\r\n ds = DeleteStatement(self.column_family_name)\r\n deleted_fields = False\r\n for _, v in self.instance._values.items():\r\n col = v.column\r\n if v.deleted:\r\n ds.add_field(col.db_field_name)\r\n deleted_fields = True\r\n elif isinstance(col, Map):\r\n uc = MapDeleteClause(col.db_field_name, v.value, v.previous_value)\r\n if uc.get_context_size() > 0:\r\n ds.add_field(uc)\r\n deleted_fields = True\r\n\r\n if deleted_fields:\r\n for name, col in self.model._primary_keys.items():\r\n ds.add_where_clause(WhereClause(\r\n col.db_field_name,\r\n EqualsOperator(),\r\n col.to_database(getattr(self.instance, name))\r\n ))\r\n self._execute(ds)", "def ProcessSchemaUpdate(ref, args, request):\n table = request.table\n relaxed_columns = args.relax_columns\n if not table.schema and not relaxed_columns: # if not updating schema,\n return request # then just return.\n\n original_schema = _TryGetCurrentSchema(ref.Parent().Name(),\n ref.Name(),\n ref.projectId)\n\n new_schema_columns = table.schema\n updated_fields = _GetUpdatedSchema(original_schema,\n new_schema_columns,\n relaxed_columns)\n\n table_schema_type = GetApiMessage('TableSchema')\n request.table.schema = table_schema_type(fields=updated_fields)\n\n return request", "def merge_schema_entry(\n self,\n old_schema_entry,\n new_schema_entry,\n base_path=None,\n ):\n if not old_schema_entry:\n return new_schema_entry\n\n # If the new schema is None, return immediately.\n if not new_schema_entry:\n return new_schema_entry\n\n # If a field value is missing, permanently set 'filled' to False.\n if not new_schema_entry['filled'] or not old_schema_entry['filled']:\n old_schema_entry['filled'] = False\n new_schema_entry['filled'] = False\n\n old_status = old_schema_entry['status']\n new_status = new_schema_entry['status']\n\n # new 'soft' does not clobber old 'hard'\n if old_status == 'hard' and new_status == 'soft':\n mode = self.merge_mode(old_schema_entry,\n new_schema_entry,\n base_path)\n if mode is None:\n return None\n old_schema_entry['info']['mode'] = mode\n return old_schema_entry\n\n # new 'hard' clobbers old 'soft'\n if old_status == 'soft' and new_status == 'hard':\n mode = self.merge_mode(old_schema_entry,\n new_schema_entry,\n base_path)\n if mode is None:\n return None\n new_schema_entry['info']['mode'] = mode\n return new_schema_entry\n\n # Verify that it's soft->soft or hard->hard\n if old_status != new_status:\n raise Exception(\n f'Unexpected schema_entry type, this should never happen: '\n f'old ({old_status}); new ({new_status})'\n )\n\n old_info = old_schema_entry['info']\n old_name = old_info['name']\n old_type = old_info['type']\n old_mode = old_info['mode']\n new_info = new_schema_entry['info']\n new_name = new_info['name']\n new_type = new_info['type']\n new_mode = new_info['mode']\n\n full_old_name = json_full_path(base_path, old_name)\n full_new_name = json_full_path(base_path, new_name)\n\n # Defensive check, names should always be the same.\n if old_name != new_name:\n if old_name.lower() != new_name.lower():\n raise Exception(\n 'Unexpected difference in name, should never happen:'\n f' old_name ({full_old_name}) != new_name ({full_new_name})'\n )\n else:\n # preserve old name if case is different\n new_info['name'] = old_info['name']\n\n # Recursively merge in the subfields of a RECORD, allowing\n # NULLABLE to become REPEATED (because 'bq load' allows it).\n if old_type == 'RECORD' and new_type == 'RECORD':\n # Allow NULLABLE RECORD to be upgraded to REPEATED RECORD because\n # 'bq load' allows it.\n if old_mode == 'NULLABLE' and new_mode == 'REPEATED':\n old_info['mode'] = 'REPEATED'\n self.log_error(\n f'Converting schema for \"{full_old_name}\" from '\n 'NULLABLE RECORD into REPEATED RECORD'\n )\n elif old_mode == 'REPEATED' and new_mode == 'NULLABLE':\n # TODO: Maybe remove this warning output. It was helpful during\n # development, but maybe it's just natural.\n self.log_error(\n f'Leaving schema for \"{full_old_name}\" as REPEATED RECORD'\n )\n\n # RECORD type needs a recursive merging of sub-fields. We merge into\n # the 'old_schema_entry' which assumes that the 'old_schema_entry'\n # can be modified in situ.\n old_fields = old_info['fields']\n new_fields = new_info['fields']\n for key, new_entry in new_fields.items():\n old_entry = old_fields.get(key)\n new_base_path = json_full_path(base_path, old_name)\n old_fields[key] = self.merge_schema_entry(\n old_schema_entry=old_entry,\n new_schema_entry=new_entry,\n base_path=new_base_path,\n )\n return old_schema_entry\n\n new_mode = self.merge_mode(old_schema_entry,\n new_schema_entry,\n base_path)\n if new_mode is None:\n return None\n new_schema_entry['info']['mode'] = new_mode\n\n # For all other types...\n if old_type != new_type:\n # Check that the converted types are compatible.\n candidate_type = convert_type(old_type, new_type)\n if not candidate_type:\n self.log_error(\n f'Ignoring field with mismatched type: '\n f'old=({old_status},{full_old_name},{old_mode},{old_type});'\n f' new=({new_status},{full_new_name},{new_mode},{new_type})'\n )\n return None\n\n new_info['type'] = candidate_type\n return new_schema_entry", "def remove_null_fields(self):\n with open(self.schema_path, 'r') as file_obj:\n schema_data = yaml.safe_load(file_obj)\n schema_fields = schema_data.get('mapping').keys()\n for field in schema_fields:\n # We want to keep 'false' and 0 values, and avoid removing fields that are required in the schema.\n if field in self.data and self.data[field] in (None, '', [], {}) and \\\n not schema_data.get('mapping', {}).get(field, {}).get('required'):\n self.data.pop(field)", "def modify_bigquery_schema(self, schema, info_keys):\n # type: (bigquery.TableSchema, Set[str]) -> None", "def merge_schema(self, schema):\n self.validate_schema(schema)\n\n if self.exclusive is False:\n self.exclusive = schema.exclusive\n\n if self.default is None:\n self.default = schema.default", "def _patch_schema(self):\n fields = get_json()['data']['attributes'].keys()\n return make_entity_schema(\n self.SCHEMA, self.RESOURCE_NAME,\n make_data_schema(\n self.SCHEMA, id_required=True,\n only=fields, partial=True\n )\n )", "def drop_null(self, how: Literal[\"any\", \"all\"] = \"any\"):\n # TODO only flat columns supported...\n assert self._dtype is not None\n res = Scope._EmptyColumn(self._dtype.constructor(nullable=False))\n if how == \"any\":\n for i in self:\n if not self._has_any_null(i):\n res._append(i)\n elif how == \"all\":\n for i in self:\n if not self._has_all_null(i):\n res._append(i)\n return res._finalize()", "def correct_db_schema_precision(\n instance: Recorder,\n table_object: type[DeclarativeBase],\n schema_errors: set[str],\n) -> None:\n table_name = table_object.__tablename__\n\n if f\"{table_name}.double precision\" in schema_errors:\n from ..migration import ( # pylint: disable=import-outside-toplevel\n _modify_columns,\n )\n\n precision_columns = _get_precision_column_types(table_object)\n # Attempt to convert timestamp columns to µs precision\n session_maker = instance.get_session\n engine = instance.engine\n assert engine is not None, \"Engine should be set\"\n _modify_columns(\n session_maker,\n engine,\n table_name,\n [f\"{column} {DOUBLE_PRECISION_TYPE_SQL}\" for column in precision_columns],\n )", "def _add_to_schema(self, new: dict):\n self._defaults.update(new)\n self._migrate()", "def is_nullable(schema_obj):\n\n if isinstance(schema_obj, schema.Field):\n return schema_obj.is_nullable\n return False", "def setNullAllowed(self, onlyNullAllowed):\n self._onlyNullAllowed = onlyNullAllowed", "def nullify(self):\n self._original_values.clear()\n self._modified_values.clear()\n self._extra_record_data.clear()\n self._references.clear()\n for mtm in self._mtm_referencelist:\n self._mtm_referencelist[mtm].parentobjectid = None\n for chl in self._child_referencelist:\n self._child_referencelist[chl].clear() \n self._ismodified = False\n self._hasdata = False\n self._astxt = \"(null)\"\n if self._table: \n for f in self._table:\n self._original_values[f.name] = None", "def set_sql_mode(self):\n self.execute_sql(\n sql.set_session_variable(\"sql_mode\"),\n (\"STRICT_ALL_TABLES,NO_AUTO_VALUE_ON_ZERO\",),\n )", "def is_nullable(self) -> bool: # pragma: no cover\n pass", "def _AddNewColsToSchema(new_fields, orig_schema_map):\n updated_schema_map = orig_schema_map.copy()\n for new_field in new_fields:\n if new_field.name in orig_schema_map:\n raise SchemaUpdateError(_INVALID_SCHEMA_UPDATE_MESSAGE)\n updated_schema_map[new_field.name] = new_field\n return updated_schema_map", "def fillna_mode(data, columns, verbose=True):\n for col in columns:\n fill_val = data[col].mode()[0]\n if verbose: print('Filling ' + col + ' with: ' + fill_val)\n data[col].fillna(fill_val, inplace=True)", "def update_table_columns(self, table_name, table):\n table_definition = self._table_definitions[table_name]\n new_columns = table.columns.difference(table_definition.c.keys())\n new_column_types = {c: table.dtypes[c] for c in new_columns}\n\n allows_covariates = table_definition.name in [\"avgint\", \"data\"]\n\n good_prefixes = [\"c_\"]\n if allows_covariates:\n good_prefixes.append(\"x_\")\n bad_column_names = [c for c in new_columns if c[:2] not in good_prefixes]\n if bad_column_names:\n msg = f\"\"\"\n Table '{table_definition.name}' has these columns {list(table_definition.c.keys())}.\n It allows additional comment columns, which must start 'c_'.\"\"\"\n if allows_covariates:\n msg += \" In addition it allows covariate columns, which must start with 'x_'.\"\n msg += f\" You supplied columns that don't meet those requirements: {bad_column_names}\"\n\n raise ValueError(dedent(msg))\n\n add_columns_to_table(table_definition, new_column_types)", "def _copy_custom_attributes(self, column):\n\n column._fk = self._fk\n column._fk_on_update = self._fk_on_update\n column._fk_on_delete = self._fk_on_delete\n\n super()._copy_custom_attributes(column)", "async def upgradeSchema(self) -> None:", "def _GetUpdatedSchema(\n original_schema,\n new_columns=None,\n relaxed_columns=None):\n orig_field_map = (\n {f.name: f for f in original_schema.fields} if original_schema else {})\n\n if relaxed_columns:\n orig_field_map = _GetRelaxedCols(relaxed_columns, orig_field_map)\n\n if new_columns:\n orig_field_map = _AddNewColsToSchema(new_columns.fields, orig_field_map)\n\n return sorted(orig_field_map.values(), key=lambda x: x.name)", "def reset_schema_defs():\n SCHEMA_DEFS.clear()\n SCHEMA_DEFS.update((typ, typ) for typ in PRIMITIVE_TYPES)", "def replace(self, dictionary):\n for column in self.__table__.columns.keys():\n setattr(self, column, None)\n self.from_dict(dictionary)", "def _fillna_meta_cols(self):\n for col_name, fill_value in self._fillna.items():\n if col_name in self._hybrid_meta.columns:\n self._hybrid_meta[col_name].fillna(fill_value, inplace=True)\n else:\n self.__warn_missing_col(col_name, action='fill')\n\n self._hybrid_meta[self.__solar_rpi_n].fillna(-1, inplace=True)\n self._hybrid_meta[self.__wind_rpi_n].fillna(-1, inplace=True)", "def update_records(cursor,table_schema,table_name,column_name,value):\n update_records = \"UPDATE \" + table_schema + \".\" + table_name + \" SET \" + column_name + \"='\" + value + \"' WHERE COALESCE(\" + column_name + \",'')='';\"\n cursor.execute(update_records)", "def _set_editable_mode(self):\n dist = self.distribution\n build = dist.get_command_obj(\"build\")\n for cmd_name in build.get_sub_commands():\n cmd = dist.get_command_obj(cmd_name)\n if hasattr(cmd, \"editable_mode\"):\n cmd.editable_mode = True\n elif hasattr(cmd, \"inplace\"):\n cmd.inplace = True # backward compatibility with distutils", "def test_null_update_deletes_column(self):\n partition = uuid4()\n for i in range(5):\n TestQueryUpdateModel.create(partition=partition, cluster=i, count=i, text=str(i))\n\n # sanity check\n for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)):\n self.assertEqual(row.cluster, i)\n self.assertEqual(row.count, i)\n self.assertEqual(row.text, str(i))\n\n # perform update\n TestQueryUpdateModel.objects(partition=partition, cluster=3).update(text=None)\n\n for i, row in enumerate(TestQueryUpdateModel.objects(partition=partition)):\n self.assertEqual(row.cluster, i)\n self.assertEqual(row.count, i)\n self.assertEqual(row.text, None if i == 3 else str(i))" ]
[ "0.581769", "0.5178056", "0.5142687", "0.5107475", "0.50148743", "0.4938137", "0.49304643", "0.4709507", "0.4704251", "0.4693166", "0.46439534", "0.460573", "0.45949084", "0.4564342", "0.45579243", "0.44939494", "0.4487079", "0.44671857", "0.44571728", "0.44475183", "0.44284815", "0.44273856", "0.4420121", "0.44103774", "0.4398482", "0.4389953", "0.43886152", "0.43481353", "0.4338426", "0.43361387" ]
0.637235
0
Add new columns to an existing schema. Tries add new fields to an existing schema. Raises SchemaUpdateError if column already exists in the orig_schema_map.
def _AddNewColsToSchema(new_fields, orig_schema_map): updated_schema_map = orig_schema_map.copy() for new_field in new_fields: if new_field.name in orig_schema_map: raise SchemaUpdateError(_INVALID_SCHEMA_UPDATE_MESSAGE) updated_schema_map[new_field.name] = new_field return updated_schema_map
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_column(self, schema):\n self[schema.name] = schema.copy()", "def _add_to_schema(self, new: dict):\n self._defaults.update(new)\n self._migrate()", "def _GetRelaxedCols(relaxed_columns, orig_schema_map):\n updated_schema_map = orig_schema_map.copy()\n for col in relaxed_columns:\n if col in orig_schema_map:\n updated_schema_map[col].mode = 'NULLABLE'\n else:\n raise SchemaUpdateError(_INVALID_SCHEMA_UPDATE_MESSAGE)\n return updated_schema_map", "def _GetUpdatedSchema(\n original_schema,\n new_columns=None,\n relaxed_columns=None):\n orig_field_map = (\n {f.name: f for f in original_schema.fields} if original_schema else {})\n\n if relaxed_columns:\n orig_field_map = _GetRelaxedCols(relaxed_columns, orig_field_map)\n\n if new_columns:\n orig_field_map = _AddNewColsToSchema(new_columns.fields, orig_field_map)\n\n return sorted(orig_field_map.values(), key=lambda x: x.name)", "def add_cols(self, source) :\n\n cols = source.get_cols()\n types = source.get_types()\n\n new_cols = []\n new_types = []\n for i in range(len(cols)) :\n if cols[i] not in self.cols :\n new_cols.append(cols[i])\n new_types.append(types[i])\n self.cols.extend(new_cols)\n self.types.extend(new_types)\n\n self._alter_table(new_cols, new_types)\n\n row_ids = self.get_values('__ROWID')\n \n for col in new_cols :\n new_vals = source.get_values(col)\n if len(row_ids) == 0 :\n for val in new_vals :\n self._insert_internal(['__ROWID', col], [0, val])\n\n row_ids = self.get_values('__ROWID')\n\n else :\n binds = zip(new_vals, row_ids)\n q = self._quoter(col)\n sql_base = 'UPDATE \"%s\" SET \"%s\" = %s WHERE \"__ROWID\" = %%d' % (self.name, col, q)\n cur = self.con.cursor()\n for bind in binds :\n if bind[0] :\n update_sql = sql_base % (str(bind[0]), bind[1])\n cur.execute(update_sql)\n\n self.version += 1", "def update_table_columns(self, table_name, table):\n table_definition = self._table_definitions[table_name]\n new_columns = table.columns.difference(table_definition.c.keys())\n new_column_types = {c: table.dtypes[c] for c in new_columns}\n\n allows_covariates = table_definition.name in [\"avgint\", \"data\"]\n\n good_prefixes = [\"c_\"]\n if allows_covariates:\n good_prefixes.append(\"x_\")\n bad_column_names = [c for c in new_columns if c[:2] not in good_prefixes]\n if bad_column_names:\n msg = f\"\"\"\n Table '{table_definition.name}' has these columns {list(table_definition.c.keys())}.\n It allows additional comment columns, which must start 'c_'.\"\"\"\n if allows_covariates:\n msg += \" In addition it allows covariate columns, which must start with 'x_'.\"\n msg += f\" You supplied columns that don't meet those requirements: {bad_column_names}\"\n\n raise ValueError(dedent(msg))\n\n add_columns_to_table(table_definition, new_column_types)", "def test_add_column(self):\n name_column = Varchar()\n name_column._meta.name = \"name\"\n\n genre_column = Varchar()\n genre_column._meta.name = \"genre\"\n\n schema: t.List[DiffableTable] = [\n DiffableTable(\n class_name=\"Band\",\n tablename=\"band\",\n columns=[name_column, genre_column],\n )\n ]\n schema_snapshot: t.List[DiffableTable] = [\n DiffableTable(\n class_name=\"Band\",\n tablename=\"band\",\n columns=[name_column],\n )\n ]\n\n schema_differ = SchemaDiffer(\n schema=schema, schema_snapshot=schema_snapshot, auto_input=\"y\"\n )\n\n self.assertTrue(len(schema_differ.add_columns.statements) == 1)\n self.assertEqual(\n schema_differ.add_columns.statements[0],\n \"manager.add_column(table_class_name='Band', tablename='band', column_name='genre', db_column_name='genre', column_class_name='Varchar', column_class=Varchar, params={'length': 255, 'default': '', 'null': False, 'primary_key': False, 'unique': False, 'index': False, 'index_method': IndexMethod.btree, 'choices': None, 'db_column_name': None})\", # noqa\n )", "def update(self):\n current = LazyRegister(self.db)\n current.render()\n cur = self.db.cursor()\n for table in self.tables:\n if table in current.tables:\n additions, removals = current.tables[table].migrate(self.tables[table])\n for addition in additions:\n cur.execute(\"\"\"ALTER TABLE %s ADD COLUMN %s\"\"\" % (\n table, addition[1].get_sql()\n ))\n print('Added column: ', addition[0])\n for removal in removals:\n #cur.execute(\"\"\"ALTER TABLE %s DROP COLUMN %s\"\"\" % (\n # table, removal[0]\n #))\n #print('Removed column: ', removal[0])\n print('Did not removed column: ', removal[0])\n else:\n schema = self.tables[table].get_create_table_sql()\n cur.execute(schema)\n print('Added table %s' % table)", "def add_columns(self, table, col_data, col_type):\n conn = psycopg2.connect(self.name, sslmode='require')\n c = conn.cursor()\n for data, typ in zip(col_data, col_type):\n c.execute(\"ALTER TABLE {tn} ADD COLUMN {cn} {ct}\".\n format(tn=table, cn=data, ct=typ))\n conn.commit() \n conn.close()", "def add_table_column(self, schema, column):\n if not column[\"name\"] or not constants.NAME_RX.match(column[\"name\"]):\n raise ValueError(\"invalid column name\")\n if utils.name_in_nocase(column[\"name\"], [c[\"name\"] for c in schema[\"columns\"]]):\n raise ValueError(\"non-unique column name\")\n if column[\"type\"] not in constants.COLUMN_TYPES:\n raise ValueError(\"invalid column type\")\n sql = (\n f'''ALTER TABLE \"{schema['name']}\"'''\n f\"\"\" ADD COLUMN \"{column['name']}\" {column['type']}\"\"\"\n )\n if column.get(\"notnull\"):\n notnull = [\"NOT NULL\"]\n if column[\"type\"] == constants.INTEGER:\n notnull.append(\"DEFAULT 0\")\n elif column[\"type\"] == constants.REAL:\n notnull.append(\"DEFAULT 0.0\")\n elif column[\"type\"] in (constants.TEXT, constants.BLOB):\n notnull.append(\"DEFAULT ''\")\n sql += \" \" + \" \".join(notnull)\n self.dbcnx.execute(sql)\n schema[\"columns\"].append(column)\n self.update_table(schema)", "def add_column_to_staging_table(cursor,table_schema,table_name,column_name):\n if not check_if_column_exists(cursor, table_schema, table_name, column_name):\n add_column = \"ALTER TABLE \" + table_schema + \".\" + table_name + \" ADD COLUMN \" + column_name + \" text;\"\n cursor.execute(add_column)", "def add_schema_fields(self, fields):\n if not fields:\n return\n\n data = json.dumps(fields)\n\n try:\n return self.client.post(\n self._get_collection_url('schema/fields'),\n body=data\n )\n except solr_errors.SolrError as e:\n raise solr_errors.SolrSchemaUpdateError(fields, message=e.args[0])", "def add_column_into_target_sf(self, tap_type, table, new_column):\n self.run_query_target_snowflake(\n f'ALTER TABLE ppw_e2e_tap_{tap_type}{self.sf_schema_postfix}.{table} ADD {new_column[\"name\"]} int'\n )\n self.run_query_target_snowflake(\n f'UPDATE ppw_e2e_tap_{tap_type}{self.sf_schema_postfix}.{table}'\n f' SET {new_column[\"name\"]}={new_column[\"value\"]} WHERE 1=1'\n )", "def update_schema(self, new_schema):\n return self.conn.update_schema(new_schema)", "def add_feature_columns(self, feature_columns: typing.List[str]):\n self.feature_columns += feature_columns", "def _addColumn(self, table, column, init_data):\n\t\tcommand = \"ALTER TABLE \" + table + \" ADD COLUMN \" + str(column) + \" \" + getSQLiteType(init_data)\n\t\ttry:\n\t\t\tself._run_command(command)\n\t\texcept sqlite3.OperationalError:\n\t\t\tprint(\"Column \" + str(column) + \" already exists!\")", "def __append_columns(self, new_dataframe):\n self.dataframe = pd.merge(self.dataframe, new_dataframe)", "def newColumn (layer,FieldName,DataType):\n # Check if field already exists\n if layer.fields().indexFromName(FieldName)==-1:\n caps = layer.dataProvider().capabilities()\n if caps & QgsVectorDataProvider.AddAttributes:\n res = layer.dataProvider().addAttributes([QgsField(FieldName,DataType)])\n print(\"New field \\\"{}\\\" added\".format(FieldName))\n # Update to propagate the changes\n layer.updateFields()\n else:\n print(\"Field \\\"{}\\\" already exists.\".format(FieldName))", "def rename_column(self, original_column_name, new_column_name):\n self.renames.append((original_column_name, new_column_name))\n if not self.column_exists(new_column_name):\n super(MigrationTable, self).rename_column(original_column_name, new_column_name)", "def _add_hybrid_cols(self):\n for new_col_name, method in HYBRID_METHODS.items():\n out = method(self)\n if out is not None:\n try:\n self._hybrid_meta[new_col_name] = out\n except ValueError as e:\n msg = (\"Unable to add {!r} column to hybrid meta. The \"\n \"following exception was raised when adding \"\n \"the data output by '{}': {!r}.\")\n w = msg.format(new_col_name, method.__name__, e)\n logger.warning(w)\n warn(w, OutputWarning)", "def ProcessSchemaUpdate(ref, args, request):\n table = request.table\n relaxed_columns = args.relax_columns\n if not table.schema and not relaxed_columns: # if not updating schema,\n return request # then just return.\n\n original_schema = _TryGetCurrentSchema(ref.Parent().Name(),\n ref.Name(),\n ref.projectId)\n\n new_schema_columns = table.schema\n updated_fields = _GetUpdatedSchema(original_schema,\n new_schema_columns,\n relaxed_columns)\n\n table_schema_type = GetApiMessage('TableSchema')\n request.table.schema = table_schema_type(fields=updated_fields)\n\n return request", "def add_column_into_source(self, tap_type, table, new_column):\n run_query_method = getattr(self, f'run_query_tap_{tap_type}')\n run_query_method(\n f'ALTER TABLE {table} ADD {new_column[\"name\"]} int'\n )\n run_query_method(\n f'UPDATE {table} set {new_column[\"name\"]}={new_column[\"value\"]} where 1=1'\n )", "def update(self, other):\n # Check if any columns will remain with their original length. If so\n # also check if the lengths of the tables are the same.\n self._dirty = True\n nrows = other.number_of_rows()\n\n if (other._columns and\n set(self._columns) - set(other._columns) and\n other.number_of_rows() != self.number_of_rows()):\n\n raise ValueError('Can not add columns of length {}'\n ' to table of length {}'.format(\n other.number_of_rows(),\n self.number_of_rows()))\n\n for name, column in other._columns.items():\n self._set_column_column_nocheck(name, column, nrows)\n\n self.set_table_attributes(other.get_table_attributes())\n self.set_name(other.get_name())", "def create_column(self, new_column, dtype):\n self.logger.debug(\"[%u] Ready to add column %s\" %\n (os.getpid(), new_column))\n ddl = \"\"\"\n ALTER TABLE {schema}.{table}\n ADD COLUMN IF NOT EXISTS {col} {type}\n \"\"\"\n # TODO Replace by execute_ddl func and test it\n with get_sink_connection_string(self) as conn:\n with conn.cursor() as cursor:\n cursor.execute(ddl.format(schema=self.config['schema'],\n table=self.config['table'],\n col=new_column,\n type=dtype))\n self.logger.debug(\"[%u] Column %s has been added\" %\n (os.getpid(), new_column))", "def modify_bigquery_schema(self, schema, info_keys):\n # type: (bigquery.TableSchema, Set[str]) -> None", "def add_col(self, colname, n_batch=5000, debug=False):\n\n if debug: print(\"Create new column {col}\".format(col=colname))\n # Alter table add column\n #\n alter_query = '''\n ALTER TABLE \"{tablename}\"\n ADD COLUMN \"{colname}\" {datatype};\n '''.format(tablename=self.get_carto_tablename(),\n colname=colname,\n datatype=datatype_map(str(self.dtypes[colname])))\n if debug: print(alter_query)\n\n # add column\n resp = self.carto_sql_client.send(alter_query)\n if debug: print(resp)\n\n # update all the values in that column\n #\n # NOTE: fails if colval is 'inf' or some other exceptional Python\n # or NumPy type\n n_items = len(self[colname])\n update_query = '''\n UPDATE \"{tablename}\"\n SET \"{colname}\" = {colval}\n WHERE \"cartodb_id\" = {cartodb_id};\n '''\n queries = []\n\n for row_num, item in enumerate(self[colname].iteritems()):\n # if debug: print(item)\n pgtype = dtype_to_pgtype(self[colname].dtype, colname)\n temp_query = update_query.format(\n tablename=self.get_carto_tablename(),\n colname=colname,\n colval=numpy_val_to_pg_val(item[1], pgtype),\n cartodb_id=item[0]).strip()\n queries.append(temp_query)\n if (len(queries) == n_batch) or (row_num == n_items - 1):\n output_query = '\\n'.join(queries)\n if debug: print(output_query)\n if debug: print(\"Num chars in query: {}\".format(len(output_query)))\n resp = self.carto_sql_client.send(output_query)\n queries = []\n\n return None", "def AddColumns(sqlite_file, table_name):\r\n columns = ['cf_direct_parent','cf_kingdom','cf_superclass',\\\r\n 'cf_class','cf_subclass','cf_intermediate_0','cf_intermediate_1',\\\r\n 'cf_intermediate_2','cf_intermediate_3','cf_intermediate_4',\\\r\n 'cf_intermediate_5','cf_molecular_framework','cf_alternative_parents',\\\r\n 'cf_substituents', 'cf_description']\r\n column_type = 'TEXT'\r\n # Connecting to the database file\r\n conn = sqlite3.connect(sqlite_file) # Connecting to the database\r\n c = conn.cursor() # Adding a cursor to interact with the database\r\n # Adding new column, if it does not exist yet, without a row value\r\n for new_column_name in columns:\r\n try:\r\n c.execute(\"ALTER TABLE {tn} ADD COLUMN '{cn}' {ct}\"\\\r\n .format(tn=table_name, cn=new_column_name, ct=column_type))\r\n print(\"Column created: {cn}\".format(cn=new_column_name))\r\n except sqlite3.OperationalError:\r\n print(\"Column already exists: {cn}\".format(cn=new_column_name))\r\n conn.commit()\r\n conn.close()\r\n return None", "def rename_columns(self, col):\n try:\n self.cleaned_data.columns = col\n except Exception as e:\n raise e", "def addCommonExtraColumn(self, req, study_id, found_extra_table, column_name, data_type, description):\n debug = False\n common_extra_table_name = None\n min_column_count = None\n quoted_column_name = '\"{0}\"'.format(column_name.upper())\n \n if 'SAMPLE' in found_extra_table:\n common_extra_table_name = 'COMMON_EXTRA_SAMPLE'\n min_column_count = 2\n elif 'PREP' in found_extra_table:\n common_extra_table_name = 'COMMON_EXTRA_PREP'\n min_column_count = 3\n \n if common_extra_table_name == None:\n raise Exception('Error: Could not determine the common extra table name. The found extra table is: %s' % found_extra_table)\n \n # Set the database data type:\n database_data_type = ''\n if data_type == 'text' or database_data_type == 'range':\n database_data_type = 'varchar2(4000)'\n elif data_type == 'numeric':\n database_data_type = 'int'\n elif data_type == 'date':\n database_data_type = 'date'\n \n if database_data_type == '':\n raise Exception('Could not determine common extra column data type.')\n\n # Create the column if it doesn't already exist\n statement = \"\"\"\n select count(*) \n from all_tab_columns \n where column_name = '{0}' \n and table_name = '{1}'\n \"\"\".format(column_name.upper(), common_extra_table_name)\n if debug:\n req.write('<pre>' + statement + '</pre><br/>')\n con = self.getMetadataDatabaseConnection()\n results = con.cursor().execute(statement).fetchone()\n if results[0] == 0:\n statement = 'alter table %s add %s %s' % (common_extra_table_name, quoted_column_name, database_data_type)\n if debug:\n req.write('<pre>' + statement + '</pre><br/>')\n con.cursor().execute(statement)\n \n # Copy the data found in the found extra_table\n if common_extra_table_name == 'COMMON_EXTRA_SAMPLE':\n statement = \"\"\"\n MERGE INTO common_extra_sample e\n USING (\n SELECT sample_id, {0}\n FROM {1}\n ) x\n ON (e.sample_id = x.sample_id)\n WHEN MATCHED THEN \n UPDATE SET e.{0} = x.{0}\n WHEN NOT MATCHED THEN \n INSERT (e.sample_id, e.{0})\n VALUES (x.sample_id, x.{0})\n \"\"\".format(quoted_column_name, found_extra_table)\n else:\n statement = \"\"\"\n MERGE INTO common_extra_prep e\n USING (\n SELECT sample_id, row_number, {0}\n FROM {1}\n ) x\n ON (e.sample_id = x.sample_id and e.row_number = x.row_number)\n WHEN MATCHED THEN \n UPDATE SET e.{0} = x.{0}\n WHEN NOT MATCHED THEN \n INSERT (e.sample_id, e.row_number, e.{0})\n VALUES (x.sample_id, x.row_number, x.{0})\n \"\"\".format(quoted_column_name, found_extra_table)\n \n if debug:\n req.write('<pre>' + statement + '</pre><br/>')\n con.cursor().execute(statement)\n statement = 'commit'\n if debug:\n req.write('<pre>' + statement + '</pre><br/>')\n con.cursor().execute(statement)\n \n # Remove the column from the found extra table. If it's the last custom column in the table, remove the table\n statement = \"select count(*) from all_tab_columns where table_name = '%s'\" % (found_extra_table)\n if debug:\n req.write('<pre>' + statement + '</pre><br/>')\n results = con.cursor().execute(statement).fetchone()\n if results[0] <= min_column_count:\n statement = 'drop table %s' % (found_extra_table)\n if debug:\n req.write('<pre>' + statement + '</pre><br/>')\n con.cursor().execute(statement)\n else:\n statement = 'alter table %s drop column %s' % (found_extra_table, quoted_column_name)\n if debug:\n req.write('<pre>' + statement + '</pre><br/>')\n con.cursor().execute(statement)\n \n # Clean up references in study_actual_columns\n extra_table_study_id = found_extra_table.split('_')[2]\n\n statement = \"\"\"\n update study_actual_columns \n set table_name = '\"{0}\"' \n where study_id = {1} \n and table_name = '\"{2}\"'\n \"\"\".format(common_extra_table_name, extra_table_study_id, found_extra_table)\n if debug:\n req.write('<pre>' + statement + '</pre><br/>')\n con.cursor().execute(statement)\n statement = 'commit'\n if debug:\n req.write('<pre>' + statement + '</pre><br/>')\n con.cursor().execute(statement)", "def addSchemaFile(self, newSchemaFile):\n\t\tself.schemaFile.append(newSchemaFile)" ]
[ "0.6689222", "0.6531303", "0.60537446", "0.6039341", "0.60350144", "0.5883875", "0.5845651", "0.5813085", "0.5690668", "0.5641946", "0.56057465", "0.54915494", "0.54909086", "0.5434344", "0.542425", "0.5340224", "0.53072643", "0.5284575", "0.52605635", "0.5229555", "0.52235705", "0.5215396", "0.51270014", "0.5114374", "0.5099848", "0.5091163", "0.50850743", "0.5079301", "0.5066776", "0.5061453" ]
0.82155335
0
Try to delete a dataset, propagating error on failure.
def _TryDeleteDataset(dataset_id, project_id): client = GetApiClient() service = client.datasets delete_request_type = GetApiMessage('BigqueryDatasetsDeleteRequest') delete_request = delete_request_type(datasetId=dataset_id, projectId=project_id, deleteContents=True) service.Delete(delete_request) log.info('Deleted dataset [{}:{}]'.format(project_id, dataset_id))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_dataset(self, dataset):\n raise NotImplementedError('delete_dataset')", "def _handle_creation_failure(session: Session, stub_dataset: Dataset, error: str):\n try:\n dataset.delete(session, stub_dataset)\n except requests.HTTPError:\n raise CreationFailure(\n f\"Created dataset did not delete after an earlier error: {error}\"\n )\n raise CreationFailure(error)", "def _delete_dataset_netex(dataset_id):\n try:\n logging.info(\"Going to delete the netex file of the dataset %s\", dataset_id)\n community_resource = _find_community_resources(dataset_id)\n _delete_community_resources(dataset_id, community_resource)\n logging.info(\"deleted community resource for the dataset %s\", dataset_id)\n except requests.HTTPError as err:\n logging.warning(\n \"Unable to delete to the dataset %s. Http Error %s\", dataset_id, err\n )\n except Exception as err:\n logging.warning(\n \"Unable to delete to the dataset %s. Generic Error %s\", dataset_id, err\n )", "def delete_dataset(self, dataset: DatasetDB):\n try:\n self._es.delete_index(dataset_records_index(dataset.id))\n finally:\n self._es.delete_document(index=DATASETS_INDEX_NAME, doc_id=dataset.id)", "def test_dataset_deltitem(train_dataset):\n with pytest.raises(Exception):\n del train_dataset[0]", "def delete(_id):\n dataset = ESDataset.get(id=_id, ignore=404)\n\n if not dataset:\n raise NoEntityError(f\"dataset {_id} does not exist.\")\n\n dataset.delete()\n\n return dataset.name", "def delete(log, args):\n log('dataset id: {highlight}{id}{reset}',\n highlight=Fore.GREEN,\n id=args.id,\n reset=Style.RESET_ALL)\n log.warn('delete dataset command coming soon.')", "def delete_dataset(self, identifier):\n # Delete the dataset directory if it exists. Otherwise return False\n dataset_dir = self.get_dataset_dir(identifier)\n if not os.path.isdir(dataset_dir):\n return False\n shutil.rmtree(dataset_dir)\n return True", "def delete_dataset(dataset_path):\n force_rmtree(dataset_path)", "def DeleteDataset(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")", "def delete_dataset(request):\n body = json.loads(request.body)\n dataset_id = body.get('dataset_id', '')\n organization_id = body.get('organization_id')\n # check if user has access to the dataset\n d = ImportRecord.objects.filter(\n super_organization_id=organization_id, pk=dataset_id\n )\n if not d.exists():\n return {\n 'status': 'error',\n 'message': 'user does not have permission to delete dataset',\n }\n d = d[0]\n d.delete()\n return {\n 'status': 'success',\n }", "def delete(self, dataset_name=None, dataset_id=None, sure=False, really=False):\n if sure and really:\n dataset = self.get(dataset_name=dataset_name, dataset_id=dataset_id)\n success, response = self._client_api.gen_request(req_type='delete',\n path='/datasets/{}'.format(dataset.id))\n if not success:\n raise exceptions.PlatformException(response)\n logger.info('Dataset {} was deleted successfully'.format(dataset.name))\n return True\n else:\n raise exceptions.PlatformException(\n error='403',\n message='Cant delete dataset from SDK. Please login to platform to delete')", "def delete(self, ds, del_raw_data=False, **kwargs):\n self.logger.warning('ds_id already exists: {}. Deleting'.format(ds.id))\n self._del_iso_images(ds)\n self._es.delete_ds(ds.id)\n self._db.alter('DELETE FROM dataset WHERE id=%s', ds.id)\n if del_raw_data:\n self.logger.warning('Deleting raw data: {}'.format(ds.input_path))\n wd_man = WorkDirManager(ds.id)\n wd_man.del_input_data(ds.input_path)\n if self.mode == 'queue':\n self._queue.publish({'ds_id': ds.id, 'status': DatasetStatus.DELETED}, SM_DS_STATUS)", "def test_that_when_dataset_is_deleted_the_account_is_still_there(self):\n test_dataset = Dataset.objects.get(\n dataset_slug=\"google-geojson-example\")\n test_dataset.delete()\n with self.assertRaises(ObjectDoesNotExist):\n Dataset.objects.get(dataset_slug=\"google-geojson-example\")\n Account.objects.get(account_slug=\"test_user\")", "def delete(self, name):\n assert name, \"Must input a valid dataset name.\"\n self.manager.delete_data(name)", "def _try_delete_resource(self, delete_callable, *args, **kwargs):\n try:\n delete_callable(*args, **kwargs)\n # if resource is not found, this means it was deleted in the test\n except exceptions.NotFound:\n pass", "def delete_datasetreplica(self, dataset_replica):\n raise NotImplementedError('delete_datasetreplica')", "def delete(self, ds, del_raw_data=False):\n self._post_sm_msg(ds=ds, action=DatasetAction.DELETE, priority=DatasetActionPriority.HIGH)", "def test_dataset_exists(client, to_delete):\n DATASET_ID = \"get_table_dataset_{}\".format(_millis())\n dataset_ref = client.dataset(DATASET_ID)\n dataset = bigquery.Dataset(dataset_ref)\n dataset = client.create_dataset(dataset)\n to_delete.append(dataset)\n\n assert dataset_exists(client, dataset_ref)\n assert not dataset_exists(client, client.dataset(\"dataset doesnot exist\"))", "def delete_dataset_target_bigquery(self, dataset):\n return db.delete_dataset_bigquery(\n dataset, project=self.get_conn_env_var('TARGET_BIGQUERY', 'PROJECT')\n )", "def purge_dataset(request):\n data = json.loads(request.body.decode('utf-8'))\n if not request.user.is_staff:\n return JsonResponse({'error': 'Only admin can purge dataset'}, status=403)\n try:\n dset = models.Dataset.objects.get(pk=data['item_id'])\n except models.Dataset.DoesNotExist:\n return JsonResponse({'error': 'Dataset does not exist'}, status=403)\n purgemsg = delete_dataset_from_cold(dset)\n if purgemsg['state'] == 'error':\n return JsonResponse(purgemsg, status=500)\n else:\n return JsonResponse(purgemsg)", "def raise_exception_for_dataset(dataset_reference):\n if dataset_reference.dataset_id == non_existing_dataset_id:\n raise cloud.exceptions.NotFound('')", "def delete_bigquery_dataset(self, shared_state: Dict[str, Any]):\n\n yield\n\n # Get the bigquery dataset id used for testing and wipe it\n bigquery_dataset = shared_state[\"bigquery_dataset\"]\n bigquery_client = shared_state[\"bigquery_client\"]\n bigquery_client.delete_dataset(\n bigquery_dataset.dataset_id, delete_contents=True, not_found_ok=True\n ) # Make an API request.", "def test_delete_unexpected_error(self, requests_mock, capsys):\n requests_mock.delete(data_url, exc=ConnectionError)\n with pytest.raises(ConnectionError):\n r = operations.delete(data_url)\n assert 'Unexpected error when connecting to' in capsys.readouterr().out", "def delete(dtype, name, rootdir=None):\n # type and the name\n # delete them\n num_deleted = 0\n for dataset in FreezableAPI.datasets(dtype,name,rootdir=rootdir,fullpath=True):\n # delete it\n shutil.rmtree(dataset)\n num_deleted += 1\n return num_deleted", "def delete_temp_dataset():\n\n bq.delete_dataset(temp_dataset_ref, delete_contents=True, not_found_ok=True)", "def delete(self, identifier, dataset):\n # Fix identifier because SQlAlchemy can't parse RDF Literals\n identifier = str(identifier)\n\n #self._load_config()\n self.log( 'Removing resource %s in dataset %s' % (identifier, dataset))\n\n # Remove it\n data = self.es_instance.delete_document(identifier, dataset)\n\n self.log( 'Registro %s removido com sucesso' % identifier)\n\n return data", "def delete_task(dataset):\n Observation.delete_all(dataset)\n super(dataset.__class__, dataset).delete({DATASET_ID: dataset.dataset_id})", "def do_delete(self, node_id):\n _error_code, _msg = RAMSTKDataModel.do_delete(self, node_id)\n\n # pylint: disable=attribute-defined-outside-init\n # It is defined in RAMSTKDataModel.__init__\n if _error_code != 0:\n _error_code = 1\n _msg = _msg + (\n '\\n RAMSTK ERROR: Attempted to delete non-existent '\n 'Allocation ID {0:d}.').format(node_id)\n else:\n self.last_id = max(self.tree.nodes.keys())\n\n return _error_code, _msg", "def delete_ds(self, dt):\n\n for k in self.datasets_keys:\n for F in self.datasets[k]:\n if F not in data[k].keys():\n continue \n max_date = data[k][F]['max_date'] \n \"\"\" Deleting unecessary ds \"\"\"\n if dt > max_date : # check max date and check if data is still loaded\n print(blue + 'Memory used before deleting : ' , process.memory_info().rss/1000000000 , cend) \n del data[k][F] \n print(\"*** Erasing dataset: \" , k , ' ' , F ) \n print(blue + 'Memory used after deleting : ' , process.memory_info().rss/1000000000 , cend) \n \n else:\n continue" ]
[ "0.7605005", "0.7107175", "0.70523417", "0.69809973", "0.6883148", "0.6740322", "0.6667521", "0.65653944", "0.6536053", "0.65328515", "0.65034896", "0.6410687", "0.6386324", "0.6328574", "0.63253295", "0.62904114", "0.6277353", "0.62735856", "0.6252367", "0.61857146", "0.6168082", "0.6162157", "0.613244", "0.6089961", "0.60710067", "0.60618997", "0.6054307", "0.60175395", "0.59812534", "0.59728926" ]
0.74737805
1
Get Table resource args (source, destination) for copy command.
def GetTableCopyResourceArgs(): table_spec_data = yaml_data.ResourceYAMLData.FromPath('bq.table') arg_specs = [ resource_args.GetResourcePresentationSpec( verb='to copy from', name='source', required=True, prefixes=True, attribute_overrides={'table': 'source'}, positional=False, resource_data=table_spec_data.GetData()), resource_args.GetResourcePresentationSpec( verb='to copy to', name='destination', required=True, prefixes=True, attribute_overrides={'table': 'destination'}, positional=False, resource_data=table_spec_data.GetData())] fallthroughs = { '--source.dataset': ['--destination.dataset'], '--destination.dataset': ['--source.dataset'] } return [concept_parsers.ConceptParser(arg_specs, fallthroughs)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ProcessTableCopyConfiguration(ref, args, request):\n del ref # Unused\n source_ref = args.CONCEPTS.source.Parse()\n destination_ref = args.CONCEPTS.destination.Parse()\n arg_utils.SetFieldInMessage(\n request, 'job.configuration.copy.destinationTable.datasetId',\n destination_ref.Parent().Name())\n arg_utils.SetFieldInMessage(\n request, 'job.configuration.copy.destinationTable.projectId',\n destination_ref.projectId)\n arg_utils.SetFieldInMessage(request,\n 'job.configuration.copy.destinationTable.tableId',\n destination_ref.Name())\n arg_utils.SetFieldInMessage(request,\n 'job.configuration.copy.sourceTable.datasetId',\n source_ref.Parent().Name())\n arg_utils.SetFieldInMessage(request,\n 'job.configuration.copy.sourceTable.projectId',\n source_ref.projectId)\n arg_utils.SetFieldInMessage(request,\n 'job.configuration.copy.sourceTable.tableId',\n source_ref.Name())\n return request", "def visit_copy_command(element, compiler, **kw):\n qs = \"\"\"COPY {table}{columns} FROM :data_location\n WITH CREDENTIALS AS :credentials\n {format}\n {parameters}\"\"\"\n parameters = []\n bindparams = [\n sa.bindparam(\n 'data_location',\n value=element.data_location,\n type_=sa.String,\n ),\n sa.bindparam(\n 'credentials',\n value=element.credentials,\n type_=sa.String,\n ),\n ]\n\n if element.format == Format.csv:\n format_ = 'FORMAT AS CSV'\n if element.quote is not None:\n format_ += ' QUOTE AS :quote_character'\n bindparams.append(sa.bindparam(\n 'quote_character',\n value=element.quote,\n type_=sa.String,\n ))\n elif element.format == Format.json:\n format_ = 'FORMAT AS JSON AS :json_option'\n bindparams.append(sa.bindparam(\n 'json_option',\n value=element.path_file,\n type_=sa.String,\n ))\n elif element.format == Format.avro:\n format_ = 'FORMAT AS AVRO AS :avro_option'\n bindparams.append(sa.bindparam(\n 'avro_option',\n value=element.path_file,\n type_=sa.String,\n ))\n elif element.format == Format.orc:\n format_ = 'FORMAT AS ORC'\n elif element.format == Format.parquet:\n format_ = 'FORMAT AS PARQUET'\n elif element.format == Format.fixed_width and element.fixed_width is None:\n raise sa_exc.CompileError(\n \"'fixed_width' argument required for format 'FIXEDWIDTH'.\")\n else:\n format_ = ''\n\n if element.delimiter is not None:\n parameters.append('DELIMITER AS :delimiter_char')\n bindparams.append(sa.bindparam(\n 'delimiter_char',\n value=element.delimiter,\n type_=sa.String,\n ))\n\n if element.fixed_width is not None:\n parameters.append('FIXEDWIDTH AS :fixedwidth_spec')\n bindparams.append(sa.bindparam(\n 'fixedwidth_spec',\n value=_process_fixed_width(element.fixed_width),\n type_=sa.String,\n ))\n\n if element.compression is not None:\n parameters.append(Compression(element.compression).value)\n\n if element.manifest:\n parameters.append('MANIFEST')\n\n if element.accept_any_date:\n parameters.append('ACCEPTANYDATE')\n\n if element.accept_inv_chars is not None:\n parameters.append('ACCEPTINVCHARS AS :replacement_char')\n bindparams.append(sa.bindparam(\n 'replacement_char',\n value=element.accept_inv_chars,\n type_=sa.String\n ))\n\n if element.blanks_as_null:\n parameters.append('BLANKSASNULL')\n\n if element.date_format is not None:\n parameters.append('DATEFORMAT AS :dateformat_string')\n bindparams.append(sa.bindparam(\n 'dateformat_string',\n value=element.date_format,\n type_=sa.String,\n ))\n\n if element.empty_as_null:\n parameters.append('EMPTYASNULL')\n\n if element.encoding is not None:\n parameters.append('ENCODING AS ' + Encoding(element.encoding).value)\n\n if element.escape:\n parameters.append('ESCAPE')\n\n if element.explicit_ids:\n parameters.append('EXPLICIT_IDS')\n\n if element.fill_record:\n parameters.append('FILLRECORD')\n\n if element.ignore_blank_lines:\n parameters.append('IGNOREBLANKLINES')\n\n if element.ignore_header is not None:\n parameters.append('IGNOREHEADER AS :number_rows')\n bindparams.append(sa.bindparam(\n 'number_rows',\n value=element.ignore_header,\n type_=sa.Integer,\n ))\n\n if element.dangerous_null_delimiter is not None:\n parameters.append(\"NULL AS '%s'\" % element.dangerous_null_delimiter)\n\n if element.remove_quotes:\n parameters.append('REMOVEQUOTES')\n\n if element.roundec:\n parameters.append('ROUNDEC')\n\n if element.time_format is not None:\n parameters.append('TIMEFORMAT AS :timeformat_string')\n bindparams.append(sa.bindparam(\n 'timeformat_string',\n value=element.time_format,\n type_=sa.String,\n ))\n\n if element.trim_blanks:\n parameters.append('TRIMBLANKS')\n\n if element.truncate_columns:\n parameters.append('TRUNCATECOLUMNS')\n\n if element.comp_rows:\n parameters.append('COMPROWS :numrows')\n bindparams.append(sa.bindparam(\n 'numrows',\n value=element.comp_rows,\n type_=sa.Integer,\n ))\n\n if element.comp_update:\n parameters.append('COMPUPDATE ON')\n elif element.comp_update is not None:\n parameters.append('COMPUPDATE OFF')\n\n if element.max_error is not None:\n parameters.append('MAXERROR AS :error_count')\n bindparams.append(sa.bindparam(\n 'error_count',\n value=element.max_error,\n type_=sa.Integer,\n ))\n\n if element.no_load:\n parameters.append('NOLOAD')\n\n if element.stat_update:\n parameters.append('STATUPDATE ON')\n elif element.stat_update is not None:\n parameters.append('STATUPDATE OFF')\n\n if element.region is not None:\n parameters.append('REGION :region')\n bindparams.append(sa.bindparam(\n 'region',\n value=element.region,\n type_=sa.String\n ))\n\n columns = ' (%s)' % ', '.join(\n compiler.preparer.format_column(column) for column in element.columns\n ) if element.columns else ''\n\n qs = qs.format(\n table=compiler.preparer.format_table(element.table),\n columns=columns,\n format=format_,\n parameters='\\n'.join(parameters)\n )\n\n return compiler.process(sa.text(qs).bindparams(*bindparams), **kw)", "def CopyFile(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n payload = {}\n for i in range(len(args)):\n payload[\"Arg%s\" % (i + 1)] = args[i]\n for item in kwargs.items():\n payload[item[0]] = item[1]\n return self._execute(\"copyFile\", payload=payload, response_object=None)", "def getCloneArgs(self):\n\n values = {\n \"dest\": self.subnode_dest.makeClone()\n if self.subnode_dest is not None\n else None,\n }\n\n values.update(self.getDetails())\n\n return values", "def test_args_copy():\n args = cli.parse_args(['-c'])\n assert args.copy\n args = cli.parse_args(['--copy'])\n assert args.copy", "def getCloneArgs(self):\n\n values = {\n \"source\": self.subnode_source.makeClone(),\n }\n\n values.update(self.getDetails())\n\n return values", "def getCloneArgs(self):\n\n values = {\n \"source\": self.subnode_source.makeClone(),\n }\n\n values.update(self.getDetails())\n\n return values", "def getCloneArgs(self):\n\n values = {\n \"source\": self.subnode_source.makeClone(),\n }\n\n values.update(self.getDetails())\n\n return values", "def command_copy(args):\n sources = args.sources\n destpath = args.destpath\n source_files = []\n for file_ in sources:\n if \"*\" in file_:\n selected = glob(file_)\n source_files.extend(selected)\n elif os.path.isfile(file_):\n source_files.append(file_)\n\n if destpath.endswith(\"/\") or os.path.isdir(destpath) or len(sources) > 1:\n # -- DESTDIR-MODE: Last argument is a directory.\n destdir = destpath\n else:\n # -- DESTFILE-MODE: Copy (and rename) one file.\n assert len(source_files) == 1\n destdir = os.path.dirname(destpath)\n\n # -- WORK-HORSE: Copy one or more files to destpath.\n if not os.path.isdir(destdir):\n sys.stdout.write(\"copy: Create dir %s\\n\" % destdir)\n os.makedirs(destdir)\n for source in source_files:\n destname = os.path.join(destdir, os.path.basename(source))\n sys.stdout.write(\"copy: %s => %s\\n\" % (source, destname))\n shutil.copy(source, destname)\n return 0", "def getCloneArgs(self):\n\n values = {\n \"source\": self.subnode_source.makeClone(),\n \"subscribed\": self.subnode_subscribed.makeClone(),\n \"subscript\": self.subnode_subscript.makeClone(),\n }\n\n values.update(self.getDetails())\n\n return values", "def CopyObjsCommand(self, args, sub_opts=None, headers=None, debug=0,\n command='cp'):\n # Expand wildcards and containers in source StorageUris.\n src_uri_expansion = self.ExpandWildcardsAndContainers(\n args[0:len(args)-1], sub_opts, headers, debug)\n\n # Check for various problems and determine base_dst_uri based for request.\n (base_dst_uri, multi_src_request) = self.ErrorCheckCopyRequest(\n src_uri_expansion, args[-1], headers, debug, command)\n # Rewrite base_dst_uri and create dest dir as needed for multi-source copy.\n if multi_src_request:\n base_dst_uri = self.HandleMultiSrcCopyRequst(src_uri_expansion,\n base_dst_uri)\n\n # Now iterate over expanded src URIs, and perform copy operations.\n total_elapsed_time = total_bytes_transferred = 0\n for src_uri in iter(src_uri_expansion):\n for exp_src_uri in src_uri_expansion[src_uri]:\n print 'Copying %s...' % exp_src_uri\n dst_uri = self.ConstructDstUri(src_uri, exp_src_uri, base_dst_uri)\n (elapsed_time, bytes_transferred) = self.PerformCopy(\n exp_src_uri, dst_uri, sub_opts, headers, debug)\n total_elapsed_time += elapsed_time\n total_bytes_transferred += bytes_transferred\n if debug == 3:\n # Note that this only counts the actual GET and PUT bytes for the copy\n # - not any transfers for doing wildcard expansion, the initial HEAD\n # request boto performs when doing a bucket.get_key() operation, etc.\n if total_bytes_transferred != 0:\n print 'Total bytes copied=%d, total elapsed time=%5.3f secs (%sps)' % (\n total_bytes_transferred, total_elapsed_time,\n MakeHumanReadable(float(total_bytes_transferred) /\n float(total_elapsed_time)))", "def doTheCopy(argpath,argdest):\n print(\"To copy:\"+argpath)\n shutil.copy(argpath,argdest)", "def supported_table_args(self) -> t.Tuple[str, ...]:", "def cp(self, copy_from, copy_to, **kwargs):\n return self.exec_command('cp %s %s' % (copy_from, copy_to), **kwargs)", "def getCloneArgs(self):\n\n values = {\n \"dest\": self.subnode_dest.makeClone()\n if self.subnode_dest is not None\n else None,\n \"value\": self.subnode_value.makeClone(),\n }\n\n values.update(self.getDetails())\n\n return values", "def args():\n\n useDB = docopt(__doc__)['--from-db']\n snapFile = docopt(__doc__)['-i']\n # csvFile = docopt(__doc__)['-o']\n # utils.askErase(csvFile)\n\n return [snapFile, useDB]", "def copyData(self, src_schema, src_table, src_columns, dest_schema, dest_table, dest_columns):\r\n sql = 'INSERT INTO {} ( {} ) SELECT {} FROM {}'.format(self.encodeTableName(dest_schema, dest_table), ','.join(dest_columns),\r\n ','.join(src_columns), self.encodeTableName(src_schema, src_table))\r\n return self.runSql(sql)", "def copy(CopySource=None, Bucket=None, Key=None, ExtraArgs=None, Callback=None, SourceClient=None, Config=None):\n pass", "def svn_client_copy_source_t_path_get(svn_client_copy_source_t_self): # real signature unknown; restored from __doc__\n return \"\"", "def get_source_system_profile_params(argv):\n with get_audit_db(argv) as audit_db:\n if audit_db is None:\n if not argv.tablelist:\n return []\n\n if len(argv.tablelist) == 1:\n # A file containing table names\n if os.path.isfile(argv.tablelist[0]):\n with open(argv.tablelist[0]) as f:\n return [(argv.sourceschema,\n t.strip(),\n argv.targetschema,\n None) for t in f]\n\n return [(argv.sourceschema, table, argv.targetschema, None)\n for table in argv.tablelist]\n\n sql = \"\"\"\n SELECT source_region, object_name, target_region, query_condition\n FROM {audit_schema}.source_system_profile\n WHERE profile_name = %s\n AND version = %s\n AND active_ind = 'Y'\n ORDER BY object_seq\"\"\".format(audit_schema=argv.auditschema)\n\n bind_values = [argv.profilename, argv.profileversion]\n result = audit_db.execute_query(sql, argv.arraysize, bind_values)\n\n return [(row[0], row[1], row[2], row[3]) for row in result]", "def infocalypse_copy(ui_, repo, **opts):\n params, stored_cfg = get_config_info(ui_, opts)\n\n insert_uri = opts['inserturi']\n if insert_uri == '':\n # REDFLAG: fix parameter definition so that it is required?\n ui_.warn(\"Please set the insert URI with --inserturi.\\n\")\n return\n\n request_uri = opts['requesturi']\n if request_uri == '':\n request_uri = stored_cfg.get_request_uri(repo.root)\n if not request_uri:\n ui_.warn(\"There is no stored request URI for this repo.\\n\"\n \"Please set one with the --requesturi option.\\n\")\n return\n\n params['INSERT_URI'] = insert_uri\n params['REQUEST_URI'] = request_uri\n execute_copy(ui_, repo, params, stored_cfg)", "def svn_fs_copy(*args):\r\n return _fs.svn_fs_copy(*args)", "def destination(self) -> str:\n for a in self.args:\n if a[0] != '-':\n return a\n try:\n return self.kwargs['dest']\n except KeyError:\n for a in self.args:\n if a.startswith('--'):\n dest = a.lstrip('-').replace('-', '_')\n if dest.isidentifier():\n return dest\n raise AttributeError(F'The argument with these values has no destination: {self!r}')", "def _copy_to_head_args(args: Namespace) -> Namespace:\n\n _head_args = copy.deepcopy(args)\n _head_args.polling = args.polling\n _head_args.port = args.port\n _head_args.host = args.host[0]\n _head_args.uses = args.uses\n _head_args.pod_role = PodRoleType.HEAD\n _head_args.runtime_cls = 'HeadRuntime'\n _head_args.replicas = 1\n\n if args.name:\n _head_args.name = f'{args.name}/head'\n else:\n _head_args.name = f'head'\n\n return _head_args", "def copyCommand(self):\n\n selection = self.selectedIndexes()\n\n if selection:\n rows = [index.row() for index in selection]\n columns = [index.column() for index in selection]\n if len(rows) == 4:\n model = self.proxyModel.sourceModel()\n row = rows[3]\n column = columns[3]\n command = model.dataset.data[row][column].cell\n QApplication.clipboard().setText(command)", "def destination(self) -> pulumi.Input['DestinationArgs']:\n return pulumi.get(self, \"destination\")", "def test_args_combination():\n args = cli.parse_args(['-cp'])\n assert args.copy\n assert args.paste\n args = cli.parse_args(['-c', '-p'])\n assert args.copy\n assert args.paste", "def getCloneArgs(self):\n\n values = {\n \"source\": self.subnode_source.makeClone(),\n \"expression\": self.subnode_expression.makeClone(),\n \"lower\": self.subnode_lower.makeClone()\n if self.subnode_lower is not None\n else None,\n \"upper\": self.subnode_upper.makeClone()\n if self.subnode_upper is not None\n else None,\n }\n\n values.update(self.getDetails())\n\n return values", "def getCloneArgs(self):\n\n values = {\n \"source\": self.subnode_source.makeClone(),\n \"expression\": self.subnode_expression.makeClone(),\n }\n\n values.update(self.getDetails())\n\n return values", "def _copy_file ( self, source, dest ):\n return" ]
[ "0.62132674", "0.56889355", "0.55710876", "0.55588037", "0.5540921", "0.54608697", "0.54608697", "0.54608697", "0.53417224", "0.5325953", "0.52964944", "0.52829", "0.5282849", "0.52788526", "0.52047265", "0.52009565", "0.51657414", "0.51652837", "0.516145", "0.5143834", "0.5141003", "0.5089321", "0.5060838", "0.5059258", "0.5040755", "0.50163513", "0.4990004", "0.49761227", "0.4948509", "0.4938476" ]
0.8508633
0
Print a simple greeting to each user in the list.
def greet_users(names): for name in names: msg = "Hello, " + name.title() + "!" print(msg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def greet_users(names):\n for name in names:\n print(f\"Hello, {name.title()}!\")", "def greeting(list_of_guests):\r\n for i in list_of_guests: \r\n print('Witaj ' + i)", "def greet_users(names):\n for name in names:\n msg = f\"Hello, {name.title()}\"\n print(msg)", "def greet_users(names):\n for name in names:\n msg = f\"Hello, {name.title()}!\"\n print(msg)", "def greet_users(names):\n for name in names:\n msg = f\"Hello, {name.title()}!\"\n print(msg)", "def greet_user(names):\n\n for name in names:\n msg = f\"Hello, {name.title()} !\"\n print(msg)", "def print_users(self):\n for i, item in enumerate(self.users):\n print(\"{}. {}\".format(i, item.name))", "def greet_user(self):\n print(\"Hello, \" + self.first_name.title() + \" \" + self.last_name.title() + \"!\")", "def greet_user(self):\n print(\"Hello \" + self.first_name.title() + \" \" +\n self.last_name.title() + \", welcome back!\")", "def greet_user(self):\n print(\"Welcome, \" + self.first_name.title() + \"!\")", "def greet_user(self):\r\n\r\n print(\"Hello \" + self.first_name.title() + \" \" + self.last_name.title() + \".\")", "def greet_user(self):\n greeting = f\"Hi {self.first_name.title()}, welcome back!\\n\"\n print(greeting)", "def greet_user(self):\n print(\"\\nWelcome, \" + self.username + \"!\")", "def greet_user(self):\n print(\"\\nWelcome, \" + self.username + \"!\")", "def greet_user(self):\r\n\t\tprint(f\"\\nHello {self.first_name}!\\n\")", "def greet_user(self):\n print(\"Hello \" + self.f_name.title() + \"!, hope you're well today!\")", "def greet_user(username):\r\n print(f\"Hello, {username.title()}!\")", "def greet_user(self):\n\t\tprint(f\"How are you doing {self.first_name.title()}?\")", "def greet_user():\r\n print('Hi,' + FirstName + ' ' + LastName + ' thanks for joining us inside the beer app!')", "async def _welcome(self, ctx, *, users: discord.User = None):\n if not users:\n await ctx.send(f\"Welcome {ctx.author.mention} to {ctx.guild.name}!\")\n else:\n if len(users) > 1:\n users = humanize_list(users)\n else:\n users = users[0].mention\n await ctx.send(f\"Welcome {users} to {ctx.guild.name}!\")", "def print_users(self):\n for user in self.users.values():\n print(user)", "def greet_user(self):\n print(f\"Hiya {self.username}!\")", "def greet_user(username):\n print(\"Hello, \" + username.title() + \"!\")", "def greet_user(username):\n print(\"Hello, \" + username.title() + \"!\")", "def greet_user(username):\r\n print(\"Hello, \" + username + \"!\")", "def greeter_user(username):\n print(f\"Hello {username}\")", "def greet_user(username):\n print(\"Hello, \" + username + \"!\")", "def greet_user(self):\n print(\"Greatings \"+ self.first_name + \" \" + self.last_name +\", welcome to the matrix.\")", "def greet_user():\n print(\"Hello\")", "def greeting(players_name):\n print \"\\nGreat! Welcome, \" + players_name + \". The purpose of this game is to fill in the blanks for all the sentences provided.\"" ]
[ "0.7734112", "0.77260417", "0.77145106", "0.76987046", "0.76987046", "0.7657923", "0.7309932", "0.7257354", "0.72450185", "0.71396", "0.7127663", "0.71238464", "0.70934796", "0.70934796", "0.7048947", "0.70248073", "0.69579583", "0.69458425", "0.6898827", "0.6775238", "0.67534596", "0.6752124", "0.67099774", "0.67099774", "0.6647687", "0.6625254", "0.66171527", "0.66072917", "0.6599232", "0.6530941" ]
0.7734813
1
Register the message handlers that every journal should support.
def register_message_handlers(journal): journal.dispatcher.register_message_handler( DumpQuorumMessage, _dumpquorumhandler)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _register_handlers(self):\n DBG(\"\\nregister handlers\")\n for hook, handler in self.handlers:\n g.registerHandler(hook, handler)\n\n signal_manager.connect(self.c, 'body_changed', self._after_body_key)", "def _register_handlers(self):\n import handlers as th\n import inspect\n for name, class_type in inspect.getmembers(th, predicate=inspect.isclass):\n if class_type is th.ZMQTopicHandlerBase:\n continue\n handler = class_type()\n topic = handler.get_topic()\n if topic in self._topic_handlers:\n self._topic_handlers.append(handler)\n else:\n self._topic_handlers[topic] = [handler]", "def fileHandlers(self, handlers):\n for handler in handlers:\n self.logger.addHandler(handler)", "def _register_handlers(self):\n self.jm.register_handler(\"move_node\", self.move_node)\n self.jm.register_handler(\"copy_node\", self.copy_node)\n self.jm.register_handler(\"push_to_vospace\", self.push_to_vospace)\n self.jm.register_handler(\"push_from_vospace\", self.push_from_vospace)\n self.jm.register_handler(\"pull_to_vospace\", self.pull_to_vospace)\n self.jm.register_handler(\"pull_from_vospace\", self.pull_from_vospace)", "def registerMessageHandler(self, message_handler, message_priority_list):\n if isinstance(message_handler, MessageHandler):\n for key in message_priority_list:\n rule = (message_priority_list[key], message_handler)\n self.message_handlers[key].append(rule)\n self.message_handlers[key].sort() # Keep priority order\n else:\n self.logger.critical(\n \"MessageHandler registration failed. Object \" +\n repr(message_handler) +\" is invalid type.\")\n raise TypeError(\"Only MessageHandlers can be registered!\")\n self.logger.debug(\"MessageHandler '\" + str(message_handler) +\n \"' registered to the message bus.\")", "def register_service(self, service):\n for message_handler in service.iter_message_handlers():\n self.message_handlers[message_handler.name] = message_handler", "def add_handlers(self, host_pattern, host_handlers):\n pass", "def register_handler(logger):\n # Register exit handler\n atexit.register(res_mgr)\n\n # Register SIGINT and SIGTERM\n signal.signal(signal.SIGINT, _signal_handler)\n signal.signal(signal.SIGTERM, _signal_handler)\n\n ResourceManager._register_exception_handler(logger)", "def addHandlers(self, handlers):\n self._eventHandlers.update(handlers)\n keys = self._eventHandlers.keys()\n pygame.event.set_allowed(keys)", "def setup_signal_handlers(self):\n signal.signal(signal.SIGUSR1, self.handle_logging_signal)\n signal.signal(signal.SIGUSR2, self.handle_logging_signal)", "def register_handlers(dp, di_container: di.Container):\n general.router.register_handlers(dp)\n\n di_container.wire(packages=[sys.modules[__name__]])", "def add_topic_handlers(self):\n self.client.message_callback_add(deployment_topic, self.on_deployment_topic)\n self.client.message_callback_add(illumination_topic, self.on_illumination_topic)", "def register_handler(config):\n\n @respond_to(\".*\")\n def handle(message):\n \"\"\"Respond to every Slack message and dispatch to another handler based\n on the contents of the message.\n\n This duplicates a little bit of the work that slackbot does, but allows\n us to define handlers dynamically based on the job config.\n \"\"\"\n\n text = message.body[\"text\"]\n logger.info(\"Received message\", message=text)\n\n if text == \"status\":\n handle_status(message)\n return\n\n for slack_config in config[\"slack\"]:\n if slack_config[\"regex\"].match(text):\n handle_command(message, slack_config)\n return\n\n for namespace, help_config in config[\"help\"].items():\n for pattern in [\"^{} help$\", \"^help {}$\"]:\n if re.match(pattern.format(namespace), text):\n handle_namespace_help(message, help_config)\n return\n\n include_apology = text != \"help\"\n handle_help(message, config[\"help\"], include_apology)", "def add_package_handler(self, package_name, cls):\n for module in messages.MESSAGES:\n if self._fuzzy_module_name_eq(module, package_name):\n for name in module.DESCRIPTOR.message_types_by_name:\n self.add_handler(name, getattr(cls, 'on_' + name.lower()))", "def _notify_handlers(self):\n\n # Notify all handlers \n for handler_callback in self._registered_handlers:\n try:\n handler_callback(self._balloon_position)\n except Exception as e:\n # A receiver failed, catch and move on\n pass", "def get_message_handlers(self):\n\t\treturn self.message_handlers", "def _update_handlers(self):\n handler_map = defaultdict(list)\n for i, obj in enumerate(self.handlers):\n for dummy, handler in inspect.getmembers(obj, callable):\n if not hasattr(handler, \"_pyxmpp_event_handled\"):\n continue\n # pylint: disable-msg=W0212\n event_class = handler._pyxmpp_event_handled\n handler_map[event_class].append( (i, handler) )\n self._handler_map = handler_map", "def register(self, msg_type, handler):\n # Should check type is valid\n if not handler and msg_type in self.handlers.keys():\n del self.handlers[msg_type]\n return\n self.handlers[msg_type] = handler", "def load_handlers(self):\n\t\tself.handlers = []\n\t\tfor mod in os.listdir('classes/handlers'):\n\t\t\tif mod == '__init__.py' or mod[-3:] != '.py':\n\t\t\t\tcontinue\n\t\t\tlib = __import__(mod[:-3], locals(), globals())\n\t\t\tself.handlers.append(lib)\n\t\t#\n\t\tself.handlers.sort(key=lambda x: x.order, reverse=False)\n\t\tprint(\"Loaded handlers: \", ', '.join([x.tag for x in self.handlers]) )\n\t\tassert len(self.handlers)>0", "def register_transaction_types(journal):\n journal.dispatcher.register_message_handler(\n PermissionedValidatorRegistryTransactionMessage,\n transaction_message.transaction_message_handler)\n journal.add_transaction_store(PermissionedValidatorRegistryTransaction)\n set_global_permissioned_validators(journal.permissioned_validators)", "def register(self, events=[]):\n self.events = events\n if not self in manager.handler:\n manager.handler.append(self)", "def install_event_handlers(self, categories=None, handlers=None):\n if categories is not None and handlers is not None:\n raise ValueError(\"categories and handlers are mutually exclusive!\")\n\n from .events import get_event_handler_classes\n if categories:\n raise NotImplementedError()\n handlers = [cls() for cls in get_event_handler_classes(categories=categories)]\n else:\n handlers = handlers or [cls() for cls in get_event_handler_classes()]\n\n self._event_handlers = handlers", "def _handlers(self):\n settings = self.get_settings(prefix='tangled.app.handler.')\n # System handler chain\n handlers = [settings['exc']]\n if self.has_any('static_directory'):\n # Only enable static file handler if there's at least one\n # local static directory registered.\n dirs = self.get_all('static_directory')\n if any(isinstance(d, LocalDirectory) for d in dirs):\n handlers.append(settings['static_files'])\n handlers.append(settings['tweaker'])\n handlers.append(settings['notifier'])\n handlers.append(settings['resource_finder'])\n if self.get_setting('csrf.enabled'):\n handlers.append(settings['csrf'])\n if 'auth' in settings:\n handlers.append(settings['auth'])\n # Handlers added by extensions and applications\n handlers += self.get_all(abcs.AHandler, [])\n if self.get_setting('cors.enabled'):\n handlers.append(settings['cors'])\n # Main handler\n handlers.append(settings['main'])\n # Wrap handlers\n wrapped_handlers = []\n next_handler = None\n for handler in reversed(handlers):\n handler = HandlerWrapper(handler, next_handler)\n wrapped_handlers.append(handler)\n next_handler = handler\n wrapped_handlers.reverse()\n return wrapped_handlers", "def _register(self, comm, handler):", "def u2handlers(self):\n return []", "def get_handlers(self):\n raise NotImplementedError()", "def register_handlers(path = EXPLOIT_FOLDER):\n\n exploit_folder = './{}/{}'.format(os.path.dirname(__file__), path)\n handlers = []\n\n for module in os.listdir(exploit_folder):\n\n if not module.endswith(\".py\") or module == \"__init__.py\":\n continue\n\n # Execute the script\n # We assume that each executed script registers himself to the handlers dictionary.\n try:\n execfile('./{}/{}'.format(path, module))\n except Exception as e:\n log.failure(\"Could not register handler '{}' : {}\".format(module, e))\n\n log.info(\"Registered {} handler(s).\".format(len(handlers)))\n for handler in handlers:\n\n handler_name = handler.__name__\n log.info(\"- Registered '{}' handler\".format(handler_name))\n\n return handlers", "def add_handler(self, handler):\n pass", "def register_handler(self, topic, handler):\n if topic in self._topic_handlers:\n self._topic_handlers.append(handler)\n else:\n self._topic_handlers[topic] = [handler]", "def _install_signal_handlers(workers_socket, manager_socket):\n\n def sighup_handler(signal, frame):\n logger.info(\"hangup signal (SIGHUP) received; reloading configuration\")\n workers_socket.close()\n manager_socket.close()\n main()\n\n signal.signal(signal.SIGHUP, sighup_handler)\n\n def cleanup():\n workers_socket.close()\n manager_socket.close()\n context.destroy()\n\n def sigint_handler(signal, frame):\n logger.info(\"interrupt signal (SIGINT or Ctrl-C) received; shutting down\")\n cleanup()\n raise SystemExit\n\n signal.signal(signal.SIGINT, sigint_handler)\n\n def sigterm_handler(signal, frame):\n logger.info(\"termination signal (SIGTERM) received; shutting down\")\n cleanup()\n raise SystemExit\n\n signal.signal(signal.SIGTERM, sigterm_handler)" ]
[ "0.72620213", "0.70435107", "0.66157293", "0.65716064", "0.6314305", "0.62207043", "0.6180045", "0.60054207", "0.6003531", "0.5998502", "0.5990488", "0.59491366", "0.5929773", "0.5925832", "0.5805171", "0.58019876", "0.5768111", "0.575978", "0.5712927", "0.5671215", "0.56642425", "0.5644854", "0.5639432", "0.5635607", "0.5624606", "0.56242144", "0.5588481", "0.5579341", "0.5577335", "0.5525336" ]
0.8429424
0
Constructor for DumpQuorumMessage class.
def __init__(self, minfo=None): if minfo is None: minfo = {} super(DumpQuorumMessage, self).__init__(minfo) self.IsSystemMessage = False self.IsForward = True self.IsReliable = True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dump(self):\n result = super(DumpQuorumMessage, self).dump()\n return result", "def __init__(self):\n super().__init__()\n\n self.__encoded_msg = ''", "def __init__(self, msg):\n\n self.msg = msg", "def __init__(self, msg):\n self.msg = msg", "def __init__(self, msg):\n self.msg = msg", "def __init__(self, msg):\n self.msg = msg", "def __init__(self, msg):\n self.msg = msg", "def __init__(self, msg):\n self.msg = msg", "def __init__(self, msg):\n self.msg = msg", "def __init__(self, msg):\n self.msg = msg", "def __init__(self, msg):\n self.msg = msg", "def __init__(self, msg):\n self.msg = msg", "def __init__(self, msg):\n self.msg = msg", "def __init__(self, msg):\n self.msg = msg", "def __init__(self, msg):\n self.msg = msg", "def __init__(self):\n fmt = \"%(message)s\"\n super().__init__(fmt=fmt)\n\n self.baseline = None\n self.cut = None\n self.manual_push = 0", "def __init__(self, message):\r\n self.__message = message", "def __init__(self, msg=\"\"):\n self._msg = msg\n super().__init__()", "def __init__(self, message):\n self.message = message", "def __init__(self, message):\n self.message = message", "def __init__(self, message):\n self.message = message", "def __init__(self):\n self.type = None\n self.msg = \"\"\n self.process = None\n self.edge_id = None", "def __init__(self, message):\n super().__init__(message)", "def __init__(self, connectionPool, timeout=10):\n MsgPackProtocol.__init__(self, timeout)\n self.connectionPool = connectionPool\n self.log = Logger(system=self)\n self.storage = self.connectionPool.storage\n self.peersKeyId = None", "def __init__(self, message=None):\n self.message = message", "def __init__(self, message=None):\n self.message = message", "def __init__(self, msg: str):\n self.msg = msg", "def __init__(self, message=None) -> None:\n super().__init__(message)", "def __init__(self, *args, **kwds):\n if args or kwds:\n super(DahuaQrcodeScanData, self).__init__(*args, **kwds)\n # message fields cannot be None, assign default values for those that are\n if self.Header is None:\n self.Header = std_msgs.msg.Header()\n if self.x_pos is None:\n self.x_pos = 0\n if self.y_pos is None:\n self.y_pos = 0\n if self.angle is None:\n self.angle = 0\n if self.code_type is None:\n self.code_type = 0\n if self.code_num is None:\n self.code_num = 0\n else:\n self.Header = std_msgs.msg.Header()\n self.x_pos = 0\n self.y_pos = 0\n self.angle = 0\n self.code_type = 0\n self.code_num = 0", "def __init__(self, msg_id=0, xtd=0, rtr=0, ndata=0, data=() ):\r\n self.msg_id = msg_id\r\n self.rtr = rtr\r\n self.xtd = xtd\r\n self.ndata = ndata\r\n self.data = data # tuple with length 0..8\r\n self.timestamp = time.time() # Timestamp of object creation\r" ]
[ "0.7168633", "0.57381815", "0.5708409", "0.56985074", "0.56985074", "0.56985074", "0.56985074", "0.56985074", "0.56985074", "0.56985074", "0.56985074", "0.56985074", "0.56985074", "0.56985074", "0.56985074", "0.569133", "0.5681516", "0.5638713", "0.5632342", "0.5632342", "0.5632342", "0.5629523", "0.5628895", "0.56131965", "0.55871975", "0.55871975", "0.55618376", "0.55437434", "0.5537567", "0.552138" ]
0.7561885
0
Returns a dict with information about the dump quorum message.
def dump(self): result = super(DumpQuorumMessage, self).dump() return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dumps(self) -> Dict[str, Any]:\n return {\n \"commitId\": self.commit_id,\n \"parentCommitId\": self.parent_commit_id,\n \"message\": self.message,\n \"committer\": self.committer.dumps(),\n }", "def __init__(self, minfo=None):\n if minfo is None:\n minfo = {}\n super(DumpQuorumMessage, self).__init__(minfo)\n\n self.IsSystemMessage = False\n self.IsForward = True\n self.IsReliable = True", "def msg_info_dict(self):\n return self._msg_info_dict", "def dump(self) -> dict[Any, str]:\r\n ...", "def dump(self):\n return {\"data\": self.data, \"encoding\": self.encoding,\n \"type\": self.type_name}", "def messages(self):\n return {}", "def dump(self):\n res = []\n #res.append(\"Submeshes: %d\" % len(self.submeshes))\n #res.append(\"IdxBuf: 0x%04X bytes\" % len(self.idx_buf))\n #res.append(\"PrimFmt: 0x%04X (%s)\" % (\n # self.prim_fmt_id, self.prim_fmt))\n #res.append(\"IdxType: 0x%02X (%s)\" % (\n # self.header['idx_type'], self.idx_fmt,\n #))\n #res.append(\"IdxCnt: %d\" % self.header['idx_cnt'])\n #res.append(\"VisGrp: %d\" % self.header['visibility_group'])\n #res.append(\"Unknown: 0x%08X 0x%08X 0x%08X\" % (\n # self.header['unk08'],\n # self.header['unk10'],\n # self.header['unk34'],\n #))\n #return '\\n'.join(res).replace('\\n', '\\n ')\n\n return \"%4d│%04X│%04X %-24s│%02X %s│%5d│%5d│%08X│%08X│%08X\" %(\n len(self.submeshes),\n len(self.idx_buf),\n self.prim_fmt_id, self.prim_fmt,\n self.header['idx_type'], self.idx_fmt,\n self.header['idx_cnt'],\n self.header['visibility_group'],\n self.header['unk08'], self.header['unk10'],\n self.header['unk34'],\n )", "def get_message_payload(self):\n return {\n 'ts': self.timestamp,\n 'channel': self.channel,\n 'username': self.username,\n 'icon_emoji': self.icon_emoji,\n 'blocks': [self._get_message_block()],\n }", "def status(self):\n return {\n 'id': 'status',\n 'protocol_version': 'PV62',\n 'network': self.origin_node.network.name,\n 'td': self.origin_node.chain.head.header.difficulty,\n 'best_hash': self.origin_node.chain.head.header.hash,\n 'genesis_hash': self.origin_node.chain.genesis.header.hash,\n 'size': kB_to_MB(self._message_size['status'])\n }", "def as_dict(self):\n return {'message':self.message, 'line': self.line}", "def get_info(self):\n return {'q_ref': self.q_ref, 'v_ref': self.v_ref, 'U': self.U, 'type': 'POD'}", "async def dump_message(obj, msg, field_archiver=None):\n mtype = msg.__class__\n fields = mtype.f_specs()\n\n obj = collections.OrderedDict() if obj is None else get_elem(obj)\n for field in fields:\n await dump_message_field(obj, msg=msg, field=field, field_archiver=field_archiver)\n return obj", "def DumpCommand(database):\n if(database.Keys()):\n return \", \".join(database.Keys())\n else:\n return \"Nothing to dump\"", "def get_dump_status(self, uid: str) -> Dict[str, str]:\n return self.http.get(\n self.config.paths.dumps + '/' + str(uid) + '/status'\n )", "def dump():\n\t\treturn self.__dict__;", "def dumps(self):\n return {\n 'version': self.version(), # str version (M.m.s)\n 'region': self.region(), # integer type\n 'name': self.name(), # str type\n 'id': self._id, # previous integer unique id\n 'created': self._created, # created timestamp\n 'stage': self._stage, # \"entry\" if self._stage == Region.STAGE_ENTRY else \"exit\" if self._stage == Region.STAGE_EXIT else \"both\",\n 'direction': self._dir, # \"long\" if self._dir == Region.LONG else \"short\" if self._dir == Region.SHORT else \"both\",\n 'timeframe': self._timeframe, # timeframe_to_str(self._timeframe),\n 'expiry': self._expiry, # datetime.fromtimestamp(self._expiry).strftime('%Y-%m-%dT%H:%M:%S'),\n }", "def report_dump_runinfo(dump_items):\n runinfo_lines = [\"name:%s; status:%s; updated:%s\" %\n (item.name(), item.status(), item.updated())\n for item in dump_items]\n runinfo_lines.reverse()\n txt_content = \"\\n\".join(runinfo_lines)\n content = {}\n content['txt'] = txt_content + \"\\n\"\n # {\"jobs\": {name: {\"status\": stuff, \"updated\": stuff}}, othername: {...}, ...}\n content_json = {\"jobs\": {}}\n for item in sorted(dump_items, reverse=True, key=lambda job: job.name()):\n content_json[\"jobs\"][item.name()] = {'status': item.status(), 'updated': item.updated()}\n content['json'] = json.dumps(content_json)\n return content", "def dump(self):\n\n result = {\n 'size': self.size,\n 'type': self.type,\n 'filename': self.fullpath,\n 'changed': self.changed,\n }\n\n return result", "def getData(self):\n return dict(self._dump_data)", "def _dump_queue(self):\n outfile = self.registryValue('dumpFile')\n with open(outfile, 'w') as h:\n i = 1\n for nick, msg in self._queue:\n if msg is None:\n msg = '[no message]'\n h.write(\"% 2d\\t%s\\t%s\\n\" % (i, nick, msg))\n i += 1", "def dumps(self) -> Dict[str, Any]:\n return {\"number\": self.number, \"title\": self.title}", "def message2std(message):\n message['query_graph'] = message.pop('question_graph')\n for node in message['query_graph']['nodes']:\n node['node_id'] = node.pop('id')\n for edge in message['query_graph']['edges']:\n edge['edge_id'] = edge.pop('id')\n return message", "def dump(self):\n avps = self.get_all_avps_contents()\n auth = self.compute_authenticator(avps)\n header = struct.pack(RadiusMessage.RADIUS_HDR_TMPL, self.code,\n self.pid, len(self), auth)\n return b\"\".join([header, avps])", "def serialize(self):\n return {'id': self.id,\n 'rowId': self.id,\n 'run_uuid': self.run_uuid,\n 'exc_info': self.exc_info,\n 'exc_text': self.exc_text,\n 'filename': self.filename,\n 'func_name': self.func_name,\n 'level_name': self.level_name,\n 'level_no': self.level_no,\n 'line_no': self.line_no,\n 'message': self.message,\n 'module': self.module,\n 'name': self.name,\n 'pathname': self.pathname,\n 'process': self.process,\n 'process_name': self.process_name,\n 'relative_created': self.relative_created,\n 'stack_info': self.stack_info,\n 'thread': self.thread,\n 'thread_name': self.thread_name,\n 'time_collected': datetime_to_str(self.time_collected),\n }", "def msg_info_multiple_dict(self):\n return self._msg_info_multiple_dict", "def help_dump(self):\n print(DUMP)", "def messages(self) -> dict:\n raise NotImplementedError", "def json_dump(self):\n return {\n 'log': {\n 'title': self.str_title,\n 'body': self.str_payload\n }\n }", "def printMixData(self):\n\t\tprint \"OPERATED MIXNODE: Name: %s, address: (%d, %s), PubKey: %s\" % (self.name, self.port, self.host, self.pubk)", "def dump(self) -> dict:\n d = {}\n for item in self.__dict__:\n if item in ['parsed', 'dump', 'parse_data', 'iter_list', 'safe_load']:\n continue\n if isinstance(self.__dict__[item], ConfigKey):\n d[item] = self.__dict__[item].dump()\n elif isinstance(self.__dict__[item], list):\n d[item] = self.iter_list_dump(self.__dict__[item])\n else:\n d[item] = self.__dict__[item]\n return d" ]
[ "0.6006111", "0.567213", "0.5670017", "0.5648441", "0.5578808", "0.53971803", "0.5394207", "0.5387473", "0.53841007", "0.5374053", "0.5371038", "0.53614724", "0.53580964", "0.5316283", "0.5305731", "0.52922034", "0.52715415", "0.51807743", "0.5176574", "0.51549894", "0.5138521", "0.51342803", "0.512532", "0.5109059", "0.50799423", "0.507258", "0.5060591", "0.5055935", "0.5043304", "0.49712044" ]
0.7622692
0
Retrieve a known stored filter object from the db
def retrieve_filter(self, filter_id): LOG.debug("Retrieve filter {}".format(filter_id)) filter_obj = self.filter_collection.find_one({"_id": ObjectId(filter_id)}) # use _id to preselect the currently loaded filter, and drop it while we are at it filter_obj.update([("filters", filter_obj.pop("_id", None))]) return filter_obj
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def filter():\n return get_filter_data(db, MyTable)", "def get_instance(self, data):\n if self.transient:\n return None\n props = get_primary_keys(self.opts.model)\n filters = {prop.key: data.get(prop.key) for prop in props}\n if None not in filters.values():\n return self.session.query(self.opts.model).filter_by(**filters).first()\n return None", "def get_instance(self, data):\n filters = {\n key: data[key]\n for key in self.fields.keys() if key in self.lookup_fields}\n\n if None not in filters.values():\n return self.session.query(\n self.opts.model\n ).filter_by(\n **filters\n ).first()\n return None", "def get_filter(name):\n try:\n return FILTERS[name.upper()]\n except:\n msg = 'Unknown model of filter {}, options are {}'\n raise ValueError(msg.format(name, list(FILTERS.keys())))", "def get(cls, filters: Dict = None):\n if filters is None:\n filters = {}\n\n data = DATABASE_CONNECTION.get(cls.__name__)\n\n for k, v in filters.items():\n data = [row for row in data if row[k] in v]\n\n res = [cls.deserialize(row) for row in data]\n\n return res", "def _s_filter(cls, arg):\n return cls.query.filter_by(name=arg)", "def getFilter(self):\n\n return self.filter", "def get_record(self, collection_name, filter):\n\n try:\n self.logger.info('in get_record()')\n collection = self.get_db()[collection_name]\n record = collection.find_one(filter)\n self.logger.info('in get_record()')\n return record\n except Exception as e:\n self.logger.error(f'Error occurred while getting records {e}')", "def filter(self):\n return self._filter", "def get_one(self, filters: dict) -> dict:\n try:\n payment = Payments()\n payment.find(filters)\n app.logger.info(f\"Se encontro el complemento {str(payment._id)}\")\n return payment\n except Exception as e:\n print(\"ERROR in repo\", e)\n app.logger.error(e)\n return None", "def test_get_saved_filter(self):\n filter_id = self.filter_1.pk\n url = reverse('xds_api:saved-filter', args=(filter_id,))\n\n response = self.client.get(url)\n responseDict = json.loads(response.content)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(responseDict[\"name\"], \"Devops\")", "def get_filters(self):", "def get_filtered(cls, client, filter_) :\n\t\ttry :\n\t\t\tobj = bfdsession()\n\t\t\toption_ = options()\n\t\t\toption_.filter = filter_\n\t\t\tresponse = obj.getfiltered(client, option_)\n\t\t\treturn response\n\t\texcept Exception as e :\n\t\t\traise e", "def filter(self, filter_id):\r\n self.require_collection()\r\n url = '{0}/{1}'.format(self.get_url(), filter_id)\r\n request = http.Request('GET', url)\r\n\r\n return request, parsers.parse_json", "def get_object(self):\n queryset = self.get_queryset()\n\n model = self.get_model()\n obj = queryset.get(get_primary_keys(model, self.kwargs))\n\n if not obj:\n raise Http404('No %s matches the given query.' % model.__name__)\n\n return obj", "def get_exact_filter_by_name(self, name):\n for entry in self.filters:\n if (entry['type'] == 'filter' and entry['name'] == name and\n entry['comparator'] == 'equals'):\n return entry", "def _get_filter(self, cr, uid, external_session, step, previous_filter=None, context=None):\n return None", "def find(cls, **kwargs):\n return cls.query.filter_by(**kwargs).first()", "def __getitem__(self, name):\n with self as s:\n try:\n f = s._load_filter(name)\n except TypeError:\n f = [s._load_filter(k) for k in name]\n return f", "def __getitem__(self, name):\n with self as s:\n try:\n f = s._load_filter(name)\n except TypeError:\n f = [s._load_filter(k) for k in name]\n return f", "def __getitem__(self, name):\n with self as s:\n try:\n f = s._load_filter(name)\n except TypeError:\n f = [s._load_filter(k) for k in name]\n return f", "def db_for_read(self, model, **hints):\n if model == FilterRecordingTracking:\n return 'db_rest_api'\n return None", "def retrieve_from_db(self):\n pass", "def factory(self, request):\n # This yields the \"context\", which should be the row object\n try:\n q = model.session.query(self.sqla_table)\n q = self.filter_sqlalchemy_query(q, request)\n return q.one()\n except NoResultFound:\n # 404!\n raise NotFound()", "def test_get_saved_filters(self):\n url = reverse('xds_api:saved-filters')\n\n saved_config = SavedFilter(owner=self.user_1,\n name=\"Devops\", query=\"randomQuery\")\n saved_config.save()\n\n response = self.client.get(url)\n responseDict = json.loads(response.content)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(responseDict[0][\"name\"], \"Devops\")", "def _get_filter_set(cls, info: 'ResolveInfo') -> 'FilterSet':\n field_name = info.field_asts[0].name.value\n schema_field = info.parent_type.fields.get(field_name)\n filters_type = schema_field.args[cls.filter_arg].type\n filters: 'FilterSet' = filters_type.graphene_type\n return filters", "def getFilter(self, type: int) -> int:\n ...", "def get(cls, **filters) -> dict:\n errors = cls.validate_query(filters)\n if errors:\n raise ValidationFailed(filters, errors)\n\n cls.deserialize_query(filters)\n\n if cls.__collection__.count_documents(filters) > 1:\n raise ValidationFailed(\n filters, message=\"More than one result: Consider another filtering.\"\n )\n\n if cls.logger.isEnabledFor(logging.DEBUG):\n cls.logger.debug(f\"Query document matching {filters}...\")\n document = cls.__collection__.find_one(filters)\n if cls.logger.isEnabledFor(logging.DEBUG):\n cls.logger.debug(\n f'{\"1\" if document else \"No corresponding\"} document retrieved.'\n )\n return cls.serialize(document)", "def filters(self):\n\t\treturn self.local_filter", "def _load_filter(self, *args, **kwargs):\n raise NotImplementedError" ]
[ "0.6960243", "0.62703", "0.62410784", "0.61514443", "0.6141893", "0.61203706", "0.60491437", "0.60114807", "0.585763", "0.5800049", "0.578962", "0.5782984", "0.5733101", "0.5730981", "0.571787", "0.5717332", "0.5716527", "0.5708957", "0.5699338", "0.5699338", "0.5699338", "0.5667219", "0.56482416", "0.56412035", "0.5615871", "0.5610554", "0.56024283", "0.559583", "0.55694085", "0.5552218" ]
0.6990458
0
Obtain a cursor for all filters available to an institute in a category.
def filters(self, institute_id, category="snv"): filters_res = self.filter_collection.find( {"institute_id": institute_id, "category": category} ) return filters_res
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def select(self, eng_category):\r\n sql_select_query = \"SELECT Name, URL, Ingredients FROM \"+ str(eng_category)\r\n self.mycursor.execute(sql_select_query)\r\n records = self.mycursor.fetchall()\r\n \r\n return records", "def filter():\n return get_filter_data(db, MyTable)", "def get_filters(self):", "def search_categories(self):\n with Transaction().start(DBNAME, 1):\n categorieslist = self.Category.search(['parent', '=', 'Ingredients'])\n return tuple(i.name for i in categorieslist)", "def filter( self, trans, user, query, column_filter ):\n if column_filter == \"All\":\n return query\n return query.filter( model.Category.name == column_filter )", "def filter( self, trans, user, query, column_filter ):\n if column_filter == \"All\":\n return query\n return query.filter( model.Category.name == column_filter )", "def _filter(self, __button):\r\n# WARNING: Refactor _filter; current McCabe Complexity metric = 54.\r\n _criteria = []\r\n _inputs = []\r\n _compound = []\r\n\r\n # Read the user inputs for the different fields that can be used to\r\n # filter with.\r\n _criteria.append(self.cmbCriteriaID.get_active_text())\r\n _inputs.append(self.txtFilterID.get_text())\r\n _compound.append(self.cmbCompound1.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaCategory.get_active_text())\r\n _inputs.append(self.cmbFilterCategory.get_active())\r\n _compound.append(self.cmbCompound2.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaType.get_active_text())\r\n _inputs.append(self.cmbFilterType.get_active())\r\n _compound.append(self.cmbCompound3.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaStatus.get_active_text())\r\n _inputs.append(self.cmbFilterStatus.get_active())\r\n _compound.append(self.cmbCompound4.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaCriticality.get_active_text())\r\n _inputs.append(self.cmbFilterCriticality.get_active())\r\n _compound.append(self.cmbCompound5.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaAge.get_active_text())\r\n _inputs.append(self.txtFilterAge.get_text())\r\n _compound.append(self.cmbCompound6.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaLifeCycle.get_active_text())\r\n _inputs.append(self.cmbFilterLifeCycle.get_active())\r\n _compound.append(self.cmbCompound7.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaShortDesc.get_active_text())\r\n _inputs.append(self.txtFilterShortDesc.get_text())\r\n _compound.append(self.cmbCompound8.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaLongDesc.get_active_text())\r\n _inputs.append(self.txtFilterLongDesc.get_text())\r\n _compound.append(self.cmbCompound9.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaRemarks.get_active_text())\r\n _inputs.append(self.txtFilterRemarks.get_text())\r\n _compound.append(self.cmbCompound10.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaAnalysis.get_active_text())\r\n _inputs.append(self.txtFilterAnalysis.get_text())\r\n _compound.append(self.cmbCompound11.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaTest.get_active_text())\r\n _inputs.append(self.txtFilterTest.get_text())\r\n _compound.append(self.cmbCompound12.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaTestCase.get_active_text())\r\n _inputs.append(self.txtFilterTestCase.get_text())\r\n _compound.append(self.cmbCompound13.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaRequestBy.get_active_text())\r\n _inputs.append(self.cmbFilterRequestBy.get_active_text())\r\n _compound.append(self.cmbCompound14.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaRequestDate.get_active_text())\r\n _inputs.append(self.txtFilterRequestDate.get_text())\r\n _compound.append(self.cmbCompound15.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaReviewBy.get_active_text())\r\n _inputs.append(self.cmbFilterReviewBy.get_active_text())\r\n _compound.append(self.cmbCompound16.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaReviewDate.get_active_text())\r\n _inputs.append(self.txtFilterReviewDate.get_text())\r\n _compound.append(self.cmbCompound17.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaApproveBy.get_active_text())\r\n _inputs.append(self.cmbFilterApproveBy.get_active_text())\r\n _compound.append(self.cmbCompound18.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaApproveDate.get_active_text())\r\n _inputs.append(self.txtFilterApproveDate.get_text())\r\n _compound.append(self.cmbCompound19.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaCloseBy.get_active_text())\r\n _inputs.append(self.cmbFilterCloseBy.get_active_text())\r\n _compound.append(self.cmbCompound20.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaCloseDate.get_active_text())\r\n _inputs.append(self.txtFilterCloseDate.get_text())\r\n _compound.append(self.cmbCompound21.get_active_text())\r\n\r\n _inputs.append(self.chkFilterAccepted.get_active())\r\n _compound.append(self.cmbCompound22.get_active_text())\r\n\r\n _inputs.append(self.chkFilterReviewed.get_active())\r\n\r\n _criteria.append(self.cmbCriteriaAssembly.get_active_text())\r\n _model = self.cmbAssembly.get_model()\r\n _row = self.cmbAssembly.get_active_iter()\r\n if _row is not None:\r\n _text = int(_model.get_value(_row, 1))\r\n else:\r\n _text = 0\r\n _inputs.append(_text)\r\n _compound.append(self.cmbCompound23.get_active_text())\r\n\r\n # Build the query from the user-provided inputs.\r\n if all(_c is None for _c in _criteria):\r\n query = None\r\n elif Configuration.RTK_MODULES[0] == 1:\r\n query = \"SELECT * FROM rtk_incident \\\r\n WHERE fld_revision_id={0:d} AND \".format(\r\n self._revision_id)\r\n else:\r\n query = \"SELECT * FROM rtk_incident \\\r\n WHERE fld_revision_id=0 AND \"\r\n\r\n if _criteria[0] is not None and _criteria[0] != '':\r\n query = query + \"fld_incident_id\" + _criteria[0] + _inputs[0]\r\n if _compound[0] is not None and _compound[0] != '':\r\n query = query + \" \" + _compound[0] + \" \"\r\n\r\n if _criteria[1] is not None and _criteria[1] != '':\r\n query = query + \"fld_incident_category\" + _criteria[1] + \\\r\n str(_inputs[1])\r\n if _compound[1] is not None and _compound[1] != '':\r\n query = query + \" \" + _compound[1] + \" \"\r\n\r\n if _criteria[2] is not None and _criteria[2] != '':\r\n query = query + \"fld_incident_type\" + _criteria[2] + \\\r\n str(_inputs[2])\r\n if _compound[2] is not None and _compound[2] != '':\r\n query = query + \" \" + _compound[2] + \" \"\r\n\r\n if _criteria[3] is not None and _criteria[3] != '':\r\n query = query + \"fld_status\" + _criteria[3] + str(_inputs[3])\r\n if _compound[3] is not None and _compound[3] != '':\r\n query = query + \" \" + _compound[3] + \" \"\r\n\r\n if _criteria[4] is not None and _criteria[4] != '':\r\n query = query + \"fld_criticality\" + _criteria[4] + str(_inputs[4])\r\n if _compound[4] is not None and _compound[4] != '':\r\n query = query + \" \" + _compound[4] + \" \"\r\n\r\n if _criteria[5] is not None and _criteria[5] != '':\r\n query = query + \"fld_incident_age\" + _criteria[5] + str(_inputs[5])\r\n if _compound[5] is not None and _compound[5] != '':\r\n query = query + \" \" + _compound[5] + \" \"\r\n\r\n if _criteria[6] is not None and _criteria[6] != '':\r\n query = query + \"fld_life_cycle\" + _criteria[6] + str(_inputs[6])\r\n if _compound[6] is not None and _compound[6] != '':\r\n query = query + \" \" + _compound[6] + \" \"\r\n\r\n if _criteria[21] is not None and _criteria[21] != '':\r\n query = query + \"fld_hardware_id\" + _criteria[21] + \\\r\n str(_inputs[23])\r\n if _compound[22] is not None and _compound[22] != '':\r\n query = query + \" \" + _compound[22] + \" \"\r\n\r\n if _criteria[7] is not None and _criteria[7] != '':\r\n query = query + \"fld_short_description \" + _criteria[7] + \\\r\n \" '%\" + _inputs[7] + \"%'\"\r\n if _compound[7] is not None and _compound[7] != '':\r\n query = query + \" \" + _compound[7] + \" \"\r\n\r\n if _criteria[8] is not None and _criteria[8] != '':\r\n query = query + \"fld_long_description \" + _criteria[8] + \\\r\n \" '%\" + _inputs[8] + \"%'\"\r\n if _compound[8] is not None and _compound[8] != '':\r\n query = query + \" \" + _compound[8] + \" \"\r\n\r\n if _criteria[9] is not None and _criteria[9] != '':\r\n query = query + \"fld_remarks \" + _criteria[9] + \\\r\n \" '%\" + _inputs[9] + \"%'\"\r\n if _compound[9] is not None and _compound[9] != '':\r\n query = query + \" \" + _compound[9] + \" \"\r\n\r\n if _criteria[10] is not None and _compound[10] != '':\r\n query = query + \"fld_analysis \" + _criteria[10] + \\\r\n \" '%\" + _inputs[10] + \"%'\"\r\n if _compound[10] is not None and _compound[10] != '':\r\n query = query + \" \" + _compound[10] + \" \"\r\n\r\n if _criteria[11] is not None and _compound[11] != '':\r\n query = query + \"fld_test_found \" + _criteria[11] + \\\r\n \" '%\" + _inputs[11] + \"%'\"\r\n if _compound[11] is not None and _compound[11] != '':\r\n query = query + \" \" + _compound[11] + \" \"\r\n\r\n if _criteria[12] is not None and _compound[12] != '':\r\n query = query + \"fld_test_case \" + _criteria[12] + \\\r\n \" '%\" + _inputs[12] + \"%'\"\r\n if _compound[12] is not None and _compound[12] != '':\r\n query = query + \" \" + _compound[12] + \" \"\r\n\r\n if _criteria[13] is not None and _compound[13] != '':\r\n query = query + \"fld_request_by\" + _criteria[13] + \\\r\n \"'\" + _inputs[13] + \"'\"\r\n if _compound[13] is not None and _compound[13] != '':\r\n query = query + \" \" + _compound[13] + \" \"\r\n\r\n if _criteria[14] is not None and _compound[14] != '':\r\n query = query + \"fld_request_date\" + _criteria[14] + \\\r\n str(datetime.strptime(_inputs[14], \"%Y-%m-%d\").toordinal())\r\n if _compound[14] is not None and _compound[14] != '':\r\n query = query + \" \" + _compound[14] + \" \"\r\n\r\n if _criteria[15] is not None and _compound[15] != '':\r\n query = query + \"fld_reviewed_by\" + _criteria[15] + \\\r\n \"'\" + _inputs[15] + \"'\"\r\n if _compound[15] is not None and _compound[15] != '':\r\n query = query + \" \" + _compound[15] + \" \"\r\n\r\n if _criteria[16] is not None and _compound[16] != '':\r\n query = query + \"fld_reviewed_date\" + _criteria[16] + \\\r\n str(datetime.strptime(_inputs[16], \"%Y-%m-%d\").toordinal())\r\n if _compound[16] is not None and _compound[16] != '':\r\n query = query + \" \" + _compound[16] + \" \"\r\n\r\n if _criteria[17] is not None and _compound[17] != '':\r\n query = query + \"fld_approved_by\" + _criteria[17] + \\\r\n \"'\" + _inputs[17] + \"'\"\r\n if _compound[17] is not None and _compound[17] != '':\r\n query = query + \" \" + _compound[17] + \" \"\r\n\r\n if _criteria[18] is not None and _compound[18] != '':\r\n query = query + \"fld_approved_date\" + _criteria[18] + \\\r\n str(datetime.strptime(_inputs[18], \"%Y-%m-%d\").toordinal())\r\n if _compound[18] is not None and _compound[18] != '':\r\n query = query + \" \" + _compound[18] + \" \"\r\n\r\n if _criteria[19] is not None and _compound[19] != '':\r\n query = query + \"fld_complete_by\" + _criteria[19] + \\\r\n \"'\" + _inputs[19] + \"'\"\r\n if _compound[19] is not None and _compound[19] != '':\r\n query = query + \" \" + _compound[19] + \" \"\r\n\r\n if _criteria[20] is not None and _compound[20] != '':\r\n query = query + \"fld_complete_date\" + _criteria[20] + \\\r\n str(datetime.strptime(_inputs[20], \"%Y-%m-%d\").toordinal())\r\n if _compound[20] is not None and _compound[20] != '':\r\n query = query + \" \" + _compound[20] + \" \"\r\n\r\n if _inputs[21]:\r\n query = query + \"fld_accepted=%d\" % 1\r\n if _compound[21] is not None and _compound[21] != '':\r\n query = query + \" \" + _compound[21] + \" \"\r\n\r\n if _inputs[22]:\r\n query = query + \"fld_reviewed=%d\" % 1\r\n\r\n self._modulebook.request_filter_incidents(self._revision_id, query)", "def get_all_possible_filters(item_category):\n\n\tpk_lists = []\n\n\tfor filter_category in FilterCategory.objects.filter(item_category=item_category):\n\t\tfilter_option_set = filter_category.filteroption_set.all()\n\t\ttemp_list = list(filter_option_set.values_list('pk', flat=True))\n\n\t\tpk_lists.append(temp_list)\n\n\treturn pk_lists", "def search(self, what, cat='all'):\n # Sign in:\n if self.search_auth:\n self._sign_in()\n opener = self.opener\n else:\n opener = urllib2.build_opener(urllib2.BaseHandler())\n ret = []\n page = 0\n while page < self.PAGE_NUMBER:\n results = []\n parser = self.FilelistParser(results, self.url)\n url = self.url+'/browse.php?search=%s&cat=%s&searchin=0&sort=0&page=%d'%(what, self.supported_categories[cat], page)\n f = opener.open(url)\n dat = f.read().decode('iso-8859-1', 'replace')\n results_re = re.compile(\"(?s)<div class='cblock-innercontent'>.*\")\n for match in results_re.finditer(dat):\n res_tab = match.group(0)\n parser.feed(res_tab)\n parser.close()\n break\n if len(results) <= 0:\n break\n page += 1", "def browse_categories():\n print(\"***** Find Businesses by Categories *****\")\n while True:\n print()\n category = input(\n 'Please enter a type of business (category) or type \"back\" or \"quit\": ')\n print()\n if category == \"quit\":\n print(\"Goodbye!\")\n sys.exit()\n if category == \"back\":\n return\n\n # create a regex pattern for business name\n pattern = r\".*\" + re.escape(category) + r\".*\"\n regx = re.compile(pattern, re.IGNORECASE)\n\n cursor = business_col.find({\"categories\": regx})\n\n business_objects = cursor.limit(10)\n \n if cursor.count() == 0:\n print(\"No businesses found with given category.\")\n continue\n for business_object in business_objects:\n print_business(business_object)", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')", "def get_used():\r\n sql = text('''\r\n SELECT category.* FROM category, app\r\n WHERE app.category_id=category.id GROUP BY category.id\r\n ''')\r\n results = db.engine.execute(sql)\r\n categories = []\r\n for row in results:\r\n category = dict(id=row.id, name=row.name, short_name=row.short_name,\r\n description=row.description)\r\n categories.append(category)\r\n return categories", "def filter_query(self, form: dict): #-> cursor object\n form = templates.clean_filters(form)\n bitmap_query = templates.bitmap_filter_query(form)\n if not self.client:\n self.connect()\n if bitmap_query:\n print(\"This is the bitmap filter query: \", bitmap_query)\n cursor = self.client.moviebuff.bitmap.find(bitmap_query).limit(25)\n id_list = []\n if cursor:\n for x in cursor:\n id_list.append(x[\"Imdb_Title_id\"])\n order = templates.order_by(form)[\"$orderby\"]\n return self.db.find({\"$query\": { \"Imdb_Title_id\": { \"$in\": id_list}}, \"$orderby\": order})\n\n query = templates.filter_query(form)\n print(\"This is the full filter query: \", query)\n return self.db.find(query).limit(25)", "def filters():\n states = list(storage.all('State').values())\n states.sort(key=lambda state: state.name)\n cities = list(storage.all('City').values())\n cities.sort(key=lambda city: city.name)\n amenities = list(storage.all('Amenity').values())\n amenities.sort(key=lambda amenity: amenity.name)\n\n return render_template('10-hbnb_filters.html', states=states,\n cities=cities, amenities=amenities)", "def filter(self, filters):", "def get_category_list():\n return Category.objects.filter(active=True)", "def starwars_search(self, category, attribute, filters):\n self.load_response(category)\n while self.counter != int(self.response_info['count']):\n self.attribute_search(attribute, filters)\n self.load_next_response()", "def filters(self):\n # easy enough\n return self.dcpl.getFilters()", "def filters(self):\n return self.England_filter", "def with_category(self, category: str) -> list:\n return list(self.__holder.db_tags.filter(\n lambda t: t.category == category))", "async def search_by_product_or_category(\n conn, cursor, product: str = \"\", category: str = \"\"\n) -> List[str]:\n\n if (not product) and (not category):\n filter_term = \"\"\n elif product and category:\n filter_term = (\n f\"\\n WHERE product = '{product}' AND category = '{category}'\"\n )\n elif product:\n filter_term = f\"\\n WHERE product = '{product}'\"\n else:\n filter_term = f\"\\n WHERE category = '{category}'\"\n\n statement = f\"\"\"\n SELECT product.name as product,\n product.description as description,\n product.category as category,\n supplier_product.price as price,\n supplier_product.supplier as supplier,\n supplier_product.price as price,\n product.rating as product_rating,\n supplier.rating as supplier_rating,\n ROUND(((product.rating + supplier.rating)/2),2) as combined_rating,\n product.last_updated as last_updated \n FROM product \n INNER JOIN supplier_product\n ON product.name = supplier_product.product\n INNER JOIN supplier \n ON supplier_product.supplier = supplier.name {filter_term}\n ORDER BY (product.rating + supplier.rating) DESC\n \"\"\"\n await cursor.execute(statement)\n categories = await cursor.fetchall()\n return categories", "def filters(self):\n\t\treturn self.local_filter", "def show(collection, filter = {}):\n # creates a connection with database\n result = []\n myclient = pymongo.MongoClient(\"mongodb://localhost:27017/\")\n db = myclient[\"techstart\"]\n col = db[collection]\n for x in col.find(filter):\n result.append(x)\n return result", "def __getSelectionFilter(self):\n \n selectionPairs = []\n selectionPairs.append(('field','field'))\n selectionPairs.append(('spw','spw'))\n selectionPairs.append(('polarization','correlation'))\n selectionPairs.append(('baseline','antenna'))\n selectionPairs.append(('time','timerange'))\n selectionPairs.append(('scan','scan'))\n selectionPairs.append(('uvdist','uvrange'))\n selectionPairs.append(('scanintent','intent'))\n selectionPairs.append(('observation','observation'))\n return self.__generateFilter(selectionPairs)", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')\n\n # Get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n while True:\n city = input('Would you like to see data for Chicago, New York City or Washington?')\n if city.lower() in CITY_DATA:\n break\n print('ERROR: City does not match. Please try again.')\n\n # Get user input for month (all, january, february, ... , june)\n while True:\n month = input(\"Type month (January, February, March, April, May or June) to filter by or type 'all' for no filter\")\n if month.lower() in MONTH_LIST or month.lower() == 'all':\n break\n print(\"ERROR: Input was not a month from January to June nor all. Please try again.\")\n\n # Get user input for day of week (all, monday, tuesday, ... sunday)\n while True:\n day = input(\"Type day of the week to filter or type 'all' for no filter\")\n if day.lower() in DAY_LIST or day.lower() == 'all':\n break\n print(\"ERROR: Input was not a day of the week nor all.\")\n\n print('-'*40)\n return city, month, day", "def generateSearchFilters(self, searchDict):\n\n location = Location(searchDict['location'])\n location.setRangeCoordinates(searchDict['searchRange'])\n category = Category.query.get(searchDict['category'])\n filters = {\n \"name\": searchDict['name'],\n \"category\": category,\n \"location\": location,\n \"reviewed\": bool(searchDict['reviewed_filter']),\n \"friends\": bool(searchDict['friends_filter']),\n \"groups\": bool(searchDict['groups_filter'])\n }\n sort = searchDict['sort']\n return filters, sort", "def generateSearchFilters(self, searchDict):\n\n location = Location(searchDict['location'])\n location.setRangeCoordinates(searchDict['searchRange'])\n category = Category.query.get(searchDict['category'])\n filters = {\n \"name\": searchDict['name'],\n \"category\": category,\n \"location\": location,\n \"reviewed\": bool(searchDict['reviewed_filter']),\n \"friends\": bool(searchDict['friends_filter']),\n \"groups\": bool(searchDict['groups_filter'])\n }\n sort = searchDict['sort']\n return filters, sort", "def searchAttributeValues( self, REQUEST=None, category=None, field=None ):\n results = []\n\n if not ( category and field ):\n return results\n\n membership = getToolByName( self, 'portal_membership', None )\n user = membership.getAuthenticatedMember()\n uname = user.getUserName()\n IsManager = user.IsManager()\n IsAdmin = user.IsAdmin()\n\n prptool = getToolByName( self, 'portal_properties', None )\n interval = prptool and prptool.getProperty( 'created_search_interval' ) or 60\n\n indexes = {}\n indexes['category'] = category\n indexes['created'] = { 'query' : ( DateTime()-interval, DateTime() ), 'range' : 'min:max' }\n\n if not IsAdmin:\n indexes['Creator'] = [ uname ]\n\n found_objects = self.searchResults( meta_type='HTMLDocument', **indexes )\n\n if not found_objects:\n return results\n\n for x in found_objects:\n value = ( str(x['CategoryAttributes'][field]) ).strip()\n if value and value not in ['None'] and value not in results:\n results.append( value )\n\n interrupt_thread( self )\n\n results.sort()\n return results", "def get_filters():\n\n city = prompts.city_prompt.launch()\n\n _filter = prompts.filter_prompt.launch()\n\n if _filter == \"Month\":\n month = prompts.month_prompt.launch()\n day = \"All\"\n\n elif _filter == \"Day\":\n day = prompts.day_prompt.launch()\n month = \"All\"\n\n elif _filter == \"Both\":\n month = prompts.month_prompt.launch()\n day = prompts.day_prompt.launch()\n\n else:\n month, day = \"All\", \"All\"\n\n print(\"-\" * 40)\n return city, month, day", "def _filter(\n self, criteria: Q, offset: int = 0, limit: int = 10, order_by: list = ()\n ) -> ResultSet:\n conn = self.provider.get_connection()\n\n # Build the filters from the criteria\n q = elasticsearch_dsl.Q()\n if criteria.children:\n q = self._build_filters(criteria)\n\n s = (\n Search(using=conn, index=self.model_cls._index._name)\n .query(q)\n .params(version=True)\n )\n\n if order_by:\n s = s.sort(*order_by)\n\n s = s[offset : offset + limit]\n\n # Return the results\n try:\n response = s.execute()\n result = ResultSet(\n offset=offset,\n limit=limit,\n total=response.hits.total.value,\n items=response.hits,\n )\n except Exception as exc:\n logger.error(f\"Error while filtering: {exc}\")\n raise\n\n return result" ]
[ "0.5364717", "0.53382677", "0.5266764", "0.5199028", "0.51203734", "0.51203734", "0.5113741", "0.508854", "0.5086552", "0.5083298", "0.50145316", "0.49857625", "0.4980039", "0.49589002", "0.49563545", "0.49544093", "0.49255437", "0.48955005", "0.48900995", "0.48809904", "0.48734722", "0.4855942", "0.4836454", "0.47952694", "0.47769836", "0.47742456", "0.47742456", "0.47673994", "0.47592837", "0.47563037" ]
0.6988995
0
Copy `in_tree` to `out_tree`, checking selection(in_tree) for each event.
def tree_copy_selection(in_tree, out_tree, selection): for entry in in_tree: if selection(entry): out_tree.Fill()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def copy_tree_checker(src, dst):\n copy_tree(src, dst)\n return True", "def execute(self, context):\n\n # go to subtree, select all except input and output groups and mark nodes to be copied\n group_node = context.active_node\n sub_tree = group_node.group_tree\n\n if len(self.conflicts) > 0:\n self._resolve_conflicts(sub_tree, group_node.get_tree())\n\n bpy.ops.arm.edit_group_tree(node_index=group_node.get_id_str())\n [setattr(n, 'select', False) for n in sub_tree.nodes]\n group_nodes_filter = filter(lambda n: n.bl_idname not in {'LNGroupInputsNode', 'LNGroupOutputsNode'}, sub_tree.nodes)\n for node in group_nodes_filter:\n node.select = True\n node['sub_node_name'] = node.name # this will be copied within the nodes\n\n # the attribute should be empty in destination tree\n tree = context.space_data.path[-2].node_tree\n for node in tree.nodes:\n if 'sub_node_name' in node:\n del node['sub_node_name']\n\n # Frames can't be just copied because they do not have absolute location, but they can be recreated\n frame_names = {n.name for n in sub_tree.nodes if n.select and n.bl_idname == 'NodeFrame'}\n [setattr(n, 'select', False) for n in sub_tree.nodes if n.bl_idname == 'NodeFrame']\n\n if any(n for n in sub_tree.nodes if n.select): # if no selection copy operator will raise error\n # copy and past nodes into group tree\n bpy.ops.node.clipboard_copy()\n context.space_data.path.pop()\n bpy.ops.node.clipboard_paste() # this will deselect all and select only pasted nodes\n\n # move nodes in group node center\n tree_select_nodes = [n for n in tree.nodes if n.select]\n center = reduce(lambda v1, v2: v1 + v2,\n [Vector(ArmLogicTreeNode.absolute_location(n)) for n in tree_select_nodes]) / len(tree_select_nodes)\n [setattr(n, 'location', n.location - (center - group_node.location)) for n in tree_select_nodes]\n\n # recreate frames\n node_name_mapping = {n['sub_node_name']: n.name for n in tree.nodes if 'sub_node_name' in n}\n ArmAddGroupTreeFromSelected.recreate_frames(sub_tree, tree, frame_names, node_name_mapping)\n else:\n context.space_data.path.pop() # should exit from subtree anywhere\n\n # delete group node\n tree.nodes.remove(group_node)\n for node in tree.nodes:\n if 'sub_node_name' in node:\n del node['sub_node_name']\n\n tree.update()\n\n return {'FINISHED'}", "def _write (self, in_tree, dest):\n\t\t## Preparation:\n\t\tself._src_tree = in_tree\n\t\tself._dest_strm = dest\n\t\t## Main:\n\t\troot = in_tree.root\n\t\tif (not root):\n\t\t\troot = in_tree.get_centroid_nodes()[0]\n\t\tself._writeNode (root)", "def copy_tree(self, infile, outfile,\n preserve_mode=1, preserve_times=1, preserve_symlinks=0,\n level=1, condition=None):\n return copy_tree(\n infile, outfile,\n preserve_mode,preserve_times,preserve_symlinks,\n not self.force,\n dry_run=self.dry_run,\n condition=condition)", "def walk_copy(node, src):\n parent = node.parent\n children = node.children\n\n # position of node\n pos = ('root' if node.is_root() else 'basal' if parent.is_root()\n else 'derived')\n\n # whether tree is rooted\n root = node if pos == 'root' else node.parent if pos == 'basal' else None\n rooted = None if pos == 'derived' else (\n True if len(root.children) == 2 else False)\n\n if rooted:\n if pos == 'root':\n raise ValueError('Cannot walk from root of a rooted tree.')\n elif pos == 'basal':\n sibling = [x for x in node.siblings()][0]\n\n # direction of walking\n move = (('bottom' if src is sibling else 'top' if src in children\n else 'n/a') if rooted and pos == 'basal'\n else ('down' if src is parent else 'up' if src in children\n else 'n/a'))\n if move == 'n/a':\n raise ValueError('Source and node are not neighbors.')\n\n # create a new node\n res = TreeNode(node.name)\n\n # determine length of the new node\n res.length = (node.length if move == 'down'\n else src.length + node.length if move == 'bottom'\n else src.length) # up or top\n\n # determine support of the new node\n res.support = (node.support if move in ('down', 'bottom')\n else src.support)\n\n # append children except for src (if applies)\n res.extend([walk_copy(c, node) for c in children if c is not src])\n\n # append parent if walking up (except at root)\n if move == 'up' and pos != 'root':\n res.append(walk_copy(parent, node))\n\n # append sibling if walking from one basal node to another\n if move == 'top':\n res.append(walk_copy(sibling, node))\n\n return res", "def tree_copy_duplicate_removal(in_tree, out_tree, key, keys):\n for entry in in_tree:\n key_value = getattr(entry, key)\n if not key_value in keys:\n out_tree.Fill()\n keys.add(key_value)", "def _tree_update(self, new_tree: Tree, event: Event):\n raise NotImplementedError()", "def convertTreeToCoveringTree( self, tree ):\n\n self.debug( \"convertTreeToCoveringTree: tree at start\" )\n if E.getLogLevel() >= 2: self.printTree( tree )\n \n ntree = self.addChildren( tree )\n \n #######\n # descend tree and add new domains\n # if domain has only a single child: delete the child and\n # rewire\n for t in ntree:\n info, children = t\n \n if info:\n node, parent, level, ranges = info\n \n if len(children) == 1:\n ntree[children[0]][0] = None\n ntree[node][1] = ntree[children[0]][1]\n \n #######\n # build new tree with new node identifiers\n current_node = 0\n covering_tree = []\n \n levels = map( lambda x: [], [0] * len(tree))\n \n for t in ntree:\n info, children = t\n \n if not info: continue\n node, parent, level, ranges = info\n \n if len(children) == 2:\n \n # add new node to tree, rename parent in children and\n # set borders\n leftchild = children[0]\n rightchild = children[1] \n \n # change left child\n lnode, lparent, llevel, lranges = ntree[leftchild][0]\n rnode, rparent, rlevel, rranges = ntree[rightchild][0] \n \n if ranges:\n lranges, rranges = self.getCoveringRanges( lranges, rranges, ranges )\n else:\n continue\n \n # change left child\n ntree[leftchild][0]= (None, current_node, level + 1, lranges) \n \n # change right child \n # cnode, cparent, clevel, cranges = ntree[rightchild][0]\n ntree[rightchild][0]= (None, current_node, level + 1, rranges )\n \n covering_tree.append( [level, parent, 0, 0, ranges] )\n levels[level].append( current_node )\n \n current_node += 1\n \n max_range = covering_tree[0][4][0][1]\n \n self.debug( \"convertTreeToCoveringTree: tree before removing small domains\" )\n if E.getLogLevel() >= 2: self.printTree( covering_tree )\n \n ###################################\n ## remove small fragments\n ## has to be done per level in order to be consistent\n ## done here and not during matrix decomposition, so that\n ## matrix needs not to be permuted more than once.\n for l in range(0, len(levels)):\n if len(levels[l]) == 0: break\n # collect all domains per level in a list of the form\n # (from, to, node)\n ranges = []\n for node in levels[l]:\n ranges += map(lambda x: (x[0], x[1], node), covering_tree[node][4])\n covering_tree[node][4] = []\n \n # and remove small fragments\n new_ranges = self.removeSmallRanges( ranges )\n \n # and put back into tree if there is more than one range\n for (xfrom, xto, node) in new_ranges:\n covering_tree[node][4].append( (xfrom, xto) )\n \n ###################################\n ## delete nodes with empty ranges or only a single child.\n ## renumber nodes so that there are no gaps\n\n self.debug( \"convertTreeToCoveringTree: after removing small domains\" )\n if E.getLogLevel() >= 2: self.printTree( covering_tree )\n \n return self.collapseTree( covering_tree )", "def test_copy_button_clicked_with_no_selection_on_to_task_tree_view(self):\n # select one task in from_task_tree_view\n\n # Select Task4 in from_task_tree_view\n selection_model = self.dialog.from_task_tree_view.selectionModel()\n model = self.dialog.from_task_tree_view.model()\n\n project1_item = model.item(0, 0)\n self.dialog.from_task_tree_view.expand(project1_item.index())\n\n task1_item = project1_item.child(0, 0)\n self.dialog.from_task_tree_view.expand(task1_item.index())\n\n task4_item = task1_item.child(0, 0)\n\n selection_model.select(\n task4_item.index(),\n QtGui.QItemSelectionModel.Select\n )\n\n self.assertEqual(PatchedMessageBox.called_function, '')\n\n # now try to copy it\n QTest.mouseClick(self.dialog.copy_push_button, Qt.LeftButton)\n\n self.assertEqual(PatchedMessageBox.called_function, 'critical')\n self.assertEqual(PatchedMessageBox.title, 'Error')\n self.assertEqual(PatchedMessageBox.message,\n 'Please select a task from <b>To Task</b> list')", "def selection_correction_method2(tree, scale, h_in, h_out):\n for event in tree:\n cut = [0, 0]\n S15_ch = i_channel(0, event)\n RT = event.DD_Rise[S15_ch]\n onset = event.DD_Rise10pct[S15_ch]\n energy_S15 = event.DD_AmplADU[S15_ch]\n if cut[0]==0:\n if energy_S15>1000 and RT>1.1 and RT<1.51 and onset>39 and onset<47:\n energy = energy_S15*scale\n h_in.Fill(energy)\n cut[0]=1\n if cut[1]==0:\n if energy_S15>1000 and RT>1.1 and RT<1.51 and ((onset>=15 and onset<=36) or (onset>=50 and onset<=110)):\n energy = energy_S15*scale\n h_out.Fill(energy)\n cut[1]=1", "def command_copytree(args):\n for srcdir in args.srcdirs:\n basename = os.path.basename(srcdir)\n destdir2 = os.path.normpath(os.path.join(args.destdir, basename))\n if os.path.exists(destdir2):\n shutil.rmtree(destdir2)\n sys.stdout.write(\"copytree: %s => %s\\n\" % (srcdir, destdir2))\n shutil.copytree(srcdir, destdir2)\n return 0", "def test_on_copy_not_on_root():\n builder = TreeBuilder()\n builder.create_root(0)\n builder.add_child(5)\n builder.add_child(6, move=True)\n\n _ = builder.build()\n builder.add_child(7)\n\n t = builder.build()\n assert_tree_structure(t, {(): 0, (0, ): 5, (1, ): 6, (1, 0): 7})", "def makeTree(self):\n return makeTree(self.events,self.outTree)", "def tree_removeDeadBranches():\n nonlocal d_tree\n d_tree = { k : v for k, v in d_tree.items() if v}\n # By creating a new binding for 'd_tree', we have effectively\n # severed the connection back to the original dictionary.\n # We now need to copy this d_tree to the self.d_inputTree\n # self.d_outputTree structures\n self.d_inputTree = d_tree\n self.d_outputTree = self.d_inputTree.copy()", "def cpr(src, dst):\n shutil.copytree(src, dst)", "def copytree(src, dest):\n shutil.copytree(src, dest)\n restorecon(dest, recursive=True)", "def selection_correction_method1(tree, scale, h_in, h_out):\n #h_in = ROOT.TH1D(\"h_in\", \"neutron spectrum with all cuts: inside onset window; Energy [keV]; counts\", 50, 0, 25)\n #h_out = ROOT.TH1D(\"h_out\", \"neutron spectrum with all cuts: outside onset window; Energy [keV]; counts\", 50, 0, 25)\n for event in tree:\n cut = [0, 0]\n S15_ch = i_channel(0, event)\n bpm_ch = i_channel(4, event)\n RT = event.DD_Rise[S15_ch]\n S15_w2 = event.DD_AmplADU[S15_ch]\n onset = event.DD_Rise10pct[S15_ch]\n if cut[0]==0:\n # first cut: for inside onset window\n # if event passes the first cuts\n if S15_w2>1000 and RT>1.1 and RT<1.51 and onset>39 and onset<47:\n # loop over the pmt channel numbers to calculate the time of flight: time bd - time bpm\n for n_channel in range(5, 16):\n pmt_i = i_channel(n_channel, event)\n cfd_pmt = event.cfdPulse_CFDNS[pmt_i]\n cfd_bpm = event.cfdPulse_CFDNS[bpm_ch]\n # calculation of the time of flight\n tof = (cfd_pmt-cfd_bpm)%400\n #cut on tof: time of flight of the neutron\n if tof<335 and tof>295:\n energy2 = S15_w2*scale\n # fill histogram inside onset window\n h_in.Fill(energy2)\n cut[0]=1\n break\n if cut[1]==0:\n if S15_w2>1000 and RT<1.51 and RT>1.1 and ((onset<36 and onset>15) or (onset>50 and onset<=110)):\n for n_channel in range(5, 16):\n pmt_i = i_channel(n_channel, event)\n cfd_pmt = event.cfdPulse_CFDNS[pmt_i]\n cfd_bpm = event.cfdPulse_CFDNS[bpm_ch]\n tof = (cfd_pmt-cfd_bpm)%400\n if tof<335 and tof>295:\n energy2 = S15_w2*scale\n h_out.Fill(energy2)\n cut[1]=1\n break\n return h_in, h_out", "def copy_subtree(src, dst):\n for src_f in os.listdir(src):\n src_path = os.path.join(src, src_f)\n if os.path.isdir(src_path):\n dst_path = os.path.join(dst, src_f)\n if not os.path.exists(dst_path):\n shutil.copytree(src_path, dst_path)\n else:\n ProcessJson.copy_subtree(src_path, dst_path)\n elif os.path.isfile(src_path):\n dst_path = os.path.join(dst, src_f)\n if not os.path.exists(dst_path):\n shutil.copy(src_path, dst_path)", "def selection_correction_method1_v2(tree, scale, h_in, h_out):\n #h_in = ROOT.TH1D(\"h_in\", \"neutron spectrum with all cuts: inside onset window; Energy [keV]; counts\", 50, 0, 25)\n #h_out = ROOT.TH1D(\"h_out\", \"neutron spectrum with all cuts: outside onset window; Energy [keV]; counts\", 50, 0, 25)\n for event in tree:\n cut = [0, 0]\n S15_ch = i_channel(0, event)\n bpm_ch = i_channel(4, event)\n RT = event.DD_Rise[S15_ch]\n S15_w2 = event.DD_AmplADU[S15_ch]\n onset = event.DD_Rise10pct[S15_ch]\n if cut[0]==0:\n # first cut: for inside onset window\n # if event passes the first cuts\n if S15_w2>1000 and RT>1.1 and RT<1.51 and onset>39 and onset<47:\n # loop over the pmt channel numbers to calculate the time of flight: time bd - time bpm\n for n_channel in range(5, 16):\n pmt_i = i_channel(n_channel, event)\n cfd_pmt = event.cfdPulse_CFDNS[pmt_i]\n cfd_bpm = event.cfdPulse_CFDNS[bpm_ch]\n # calculation of the time of flight\n tof = (cfd_pmt-cfd_bpm)%400\n #cut on tof: time of flight of the neutron\n if tof<335 and tof>295:\n energy2 = S15_w2*scale\n # fill histogram inside onset window\n h_in.Fill(energy2)\n cut[0]=1\n break\n if cut[1]==0:\n if S15_w2>1000 and RT<1.51 and RT>1.1 and ((onset<36 and onset>15) or (onset>50 and onset<=110)):\n for n_channel in range(5, 16):\n pmt_i = i_channel(n_channel, event)\n cfd_pmt = event.cfdPulse_CFDNS[pmt_i]\n cfd_bpm = event.cfdPulse_CFDNS[bpm_ch]\n tof = (cfd_pmt-cfd_bpm)%400\n if tof<335 and tof>295:\n energy2 = S15_w2*scale\n h_out.Fill(energy2)\n cut[1]=1\n break", "def copyAndCleanTree (self):\n\t\t# TODO: Need to do several things here:\n\t\t# - NoNames\n\t\t# - copy support scores to internal branch names\n\n\t\t## Main:\n\t\t# Copy the tree so as not to damage original\n\t\tete_tree = deepcopy (self.data)\n\n\t\t# set root branch to zero, make change later\n\t\tete_tree.dist = 0.0\n\n\t\t# find max / min branchlength for diagnostic purposes\n\t\t# doesn't use negative or zero branch lengths\n\t\t# Also clean names\n\t\tmax_bl = None\n\t\tmin_bl = None\n\t\tfor n in ete_tree.traverse (\"postorder\"):\n\t\t\tif (0.0 < n.dist):\n\t\t\t\tif (max_bl is None) or (max_bl < n.dist):\n\t\t\t\t\tmax_bl = n.dist\n\t\t\t\tif (min_bl is None) or (n.dist < min_bl):\n\t\t\t\t\tmin_bl = n.dist\n\t\t\tclean_name = n.name.strip()\n\t\t\tif (clean_name[0] == \"'\") and (clean_name[-1] == \"'\"):\n\t\t\t\tclean_name = clean_name[1:-1]\n\t\t\tn.name = clean_name\n\n\t\t# set all branches to be at least 1/100 of the largest or 1/10 the\n\t\t# smallest, whichever is larger\n\t\tdefault_bl = max (max_bl / 100, min_bl/10)\n\t\tfor n in ete_tree.traverse (\"postorder\"):\n\t\t\tif (n.dist <= 0.0):\n\t\t\t\tn.dist = default_bl\n\n\t\t# get support values on tree by setting supprt as name\n\t\tfor n in ete_tree.traverse (\"postorder\"):\n\t\t\t# if an internal node\n\t\t\tif (not n.is_leaf()):\n\t\t\t\tn.name = config.SUPPORT_FMT % n.support\t\n\n\t\t# very hacky - calc appropriate scale bar size and stick on root\n\t\tmagn = int (floor (log10 (max_bl)))\n\t\tscale_size = 10**magn\n\t\tete_tree.scale_size = scale_size\n\n\t\t## Postcondtions & return:int ( floor ( log10 (x)))\n\t\treturn ete_tree", "def test_copy_button_clicked_with_same_task_is_selected_in_both_sides(self):\n # select one task in from_task_tree_view\n\n # Select Task4 in from_task_tree_view\n selection_model = self.dialog.from_task_tree_view.selectionModel()\n model = self.dialog.from_task_tree_view.model()\n\n project1_item = model.item(0, 0)\n self.dialog.from_task_tree_view.expand(project1_item.index())\n\n task1_item = project1_item.child(0, 0)\n self.dialog.from_task_tree_view.expand(task1_item.index())\n\n task4_item = task1_item.child(0, 0)\n\n selection_model.select(\n task4_item.index(),\n QtGui.QItemSelectionModel.Select\n )\n\n # Select Task4 in to_task_tree_view\n selection_model = self.dialog.to_task_tree_view.selectionModel()\n model = self.dialog.to_task_tree_view.model()\n\n project1_item = model.item(0, 0)\n self.dialog.to_task_tree_view.expand(project1_item.index())\n\n task1_item = project1_item.child(0, 0)\n self.dialog.to_task_tree_view.expand(task1_item.index())\n\n task4_item = task1_item.child(0, 0)\n\n selection_model.select(\n task4_item.index(),\n QtGui.QItemSelectionModel.Select\n )\n\n self.assertEqual(PatchedMessageBox.called_function, '')\n\n # now try to copy it\n QTest.mouseClick(self.dialog.copy_push_button, Qt.LeftButton)\n\n self.assertEqual(PatchedMessageBox.called_function, 'critical')\n self.assertEqual(PatchedMessageBox.title, 'Error')\n self.assertEqual(PatchedMessageBox.message,\n 'Please select two different tasks')", "def range(self, event):\r\n \r\n p = (event.x, self.toCartesian(event.y))\r\n \r\n if self.selectedRegion is None:\r\n self.selectedStart = Region(p[X],p[Y], p[X],p[Y])\r\n self.selectedRegion = self.selectedStart.unionPoint(p)\r\n \r\n self.paint()\r\n \r\n # return (node,sub-tree) where sub-tree is True if draining entire tree\r\n # rooted at node. Draw these as shaded red rectangle to identify whole\r\n # sub-tree is selected.\r\n for pair in self.tree.range(self.selectedRegion):\r\n p = pair[0].point\r\n \r\n if pair[1]:\r\n self.canvas.create_rectangle(pair[0].region.x_min, self.toTk(pair[0].region.y_min), \r\n pair[0].region.x_max, self.toTk(pair[0].region.y_max),\r\n fill='Red', stipple='gray12')\r\n else:\r\n self.canvas.create_rectangle(p[X] - BoxSize, self.toTk(p[Y]) - BoxSize, \r\n p[X] + BoxSize, self.toTk(p[Y]) + BoxSize, fill='Red')\r\n\r\n self.queryRect = self.canvas.create_rectangle(self.selectedRegion.x_min, self.toTk(self.selectedRegion.y_min), \r\n self.selectedRegion.x_max, self.toTk(self.selectedRegion.y_max), \r\n outline='Red', dash=(2, 4))", "def alter_tree(node):\n if not node.input:\n return _alter_node(node)\n\n converted_children = []\n for input_op in node.input:\n converted_children.append(alter_tree(input_op))\n node.input = converted_children\n return _alter_node(node)", "def test_after_creation_copy():\n builder = TreeBuilder()\n builder.create_root(0)\n builder.add_child(2, move=True)\n builder.add_child(13)\n builder.move_to_parent()\n builder.add_child(7)\n\n t1 = builder.build()\n\n builder.move_to_root()\n builder.set_data(4)\n builder.add_child(3, move=True)\n builder.add_child(15)\n\n t2 = builder.build()\n\n assert t2 is not t1\n assert t2[0] is not t1[0]\n assert t2[0][0] is not t1[0][0]\n assert t2[1] is not t1[1]\n\n assert t2.data == 4\n assert t2[0].data == 2\n assert t2[0][0].data == 13\n assert t2[1].data == 7\n assert t2[2].data == 3\n assert t2[2][0].data == 15\n\n assert len(t2) == 3\n assert len(t2[0]) == 1\n assert len(t2[1]) == 0\n assert len(t2[2]) == 1", "def onSelectionChanging(self, event):\n\t\tif self.ignore:\n\t\t\tevent.Skip()\n\t\t\treturn\n\t\tif not self.multiSelect and not self.programmatic:\n\t\t if platform.system() not in [\"Darwin\", \"Linux\"]: \n\t\t\t self.tree.UnselectAll()\n\t\titem = event.GetItem()\n\t\tif not item.IsOk():\n\t\t\tLogging.info(\"Item %s is not ok\" % str(item), kw = \"io\")\n\t\t\treturn\n\t\t\t\t\n\t\tobj = self.tree.GetPyData(item)\n\t\tif obj == \"1\":\n\t\t\t#self.tree.UnselectItem(item)\n\t\t\tevent.Veto()\n\t\t\treturn\n\t\telif obj == \"2\":\n\t\t\t# Select it's children\n\t\t\tself.ignore = 1\n\t\t\tself.tree.UnselectItem(item)\n\t\t\tcitem, cookie = self.tree.GetFirstChild(item)\n\t\t\twhile citem.IsOk():\n\t\t\t\tif not self.tree.IsSelected(citem):\n\t\t\t\t\tself.tree.ToggleItemSelection(citem)\n\t\t\t\tcitem = self.tree.GetNextSibling(citem) \n\t\t\tevent.Veto()\n\t\t\tself.ignore = 0", "def copy_tree(t):\n return tree(label(t), [copy_tree(b) for b in branches(t)])", "def copySpecial():\n depNode = nuke.dependencies(nuke.selectedNode())\n dependNode = nuke.dependentNodes(nuke.INPUTS or nuke.HIDDEN_INPUTS or nuke.EXPRESSIONS, [nuke.selectedNode()])\n i = 0\n if dependNode[0].Class() in ['Scene', 'MergeGeo']:\n i = nuke.inputs(dependNode[0])+1\n\n nuke.nodeCopy(nukescripts.cut_paste_file())\n\n for node in nuke.allNodes():\n node['selected'].setValue(0)\n\n nuke.nodePaste(nukescripts.cut_paste_file())\n\n newNode = nuke.selectedNode()\n newNode.setInput(0, depNode[0])\n dependNode[0].setInput(i+1, newNode)", "def visit(self):\n self.tree = self.recursive_visit(self.tree)\n # assert self.current_line == self.tree.absolute_bounding_box.bottom_right.line", "def see(self, cut):\n newptree = PTree()\n newptree._root = self._root.see(cut)\n return newptree", "def analyze(self, event): \n trgObjects = Collection(event,self.trgColl)\n if self.trgMuMinPt!=None and self.trgMuMinPt>0:\n trgObjIdx = [ idx for idx,trg in enumerate(trgObjects) if getattr(trg,\"pt\")>self.trgMuMinPt and getattr(trg,self.trgBranch)==1]\n \n else:\n trgObjIdx = [ idx for idx,trg in enumerate(trgObjects) if getattr(trg,self.trgBranch)==1]\n \n \n if len(trgObjIdx)==0 and self.skipNoTrgEvt: \n return False\n\n passedPath= [ path for path in self.selectionPathList if getattr(event,path)]\n if len(self.selectionPathList)>0 and len(passedPath)==0:\n if self.skipNoTrgEvt:\n return False\n trgObjIdx=[]\n if len(trgObjIdx)==0:\n for br in self.branches:\n self.out.fillBranch(\"%s_%s\"%(self.outputColl,br),[])\n for col in self.recoColl:\n self.out.fillBranch(\"%s_isTrg\"%(col),0)\n if self.skipProbe or self.skipTag:\n return False\n else:\n Bmu_fired=0\n # print trgObjIdx\n for idx,col in zip(self.recoIdx,self.recoColl):\n out=getattr(event,idx)\n if out in trgObjIdx:\n self.out.fillBranch(\"%s_isTrg\"%(col),1)\n Bmu_fired+=1\n else:\n self.out.fillBranch(\"%s_isTrg\"%(col),0)\n\n if Bmu_fired==0 and self.skipProbe: \n return False \n if Bmu_fired>0 and Bmu_fired==len(trgObjIdx) and self.skipTag:\n return False\n \n for br in self.branches:\n out=[ getattr(trgObjects[idx],br) for idx in trgObjIdx ]\n self.out.fillBranch(\"%s_%s\"%(self.outputColl,br),out)\n return True" ]
[ "0.62684405", "0.5746463", "0.5593152", "0.5558629", "0.5449127", "0.5425309", "0.53680193", "0.5225137", "0.51527005", "0.5107789", "0.5051202", "0.504769", "0.5014795", "0.50122386", "0.49738657", "0.49728838", "0.49718148", "0.496103", "0.49524227", "0.4934227", "0.49341118", "0.4926265", "0.49151012", "0.48898342", "0.48875463", "0.48850015", "0.4859013", "0.4840263", "0.48214337", "0.47888294" ]
0.78454643
0
Copy `in` to `out` for events where event.`key` does not exist in `keys` `keys` is the set of keys seen so far.
def tree_copy_duplicate_removal(in_tree, out_tree, key, keys): for entry in in_tree: key_value = getattr(entry, key) if not key_value in keys: out_tree.Fill() keys.add(key_value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_missing_values(events):\n ret = deepcopy(events)\n srchd, key_events = [], []\n for evt in events:\n _tmp = [(j, e) for j, e in enumerate(events) if e['key']\n == evt['key'] and not e['key'] in srchd]\n if _tmp != []:\n key_events.append(_tmp)\n srchd.append(evt['key'])\n dels = []\n for di_evts in key_events:\n if di_evts[0][1]['event'] == 'keystrokeUp':\n dels.append(di_evts[0][0])\n if di_evts[len(di_evts) - 1][1]['event'] == 'keystrokeDown':\n dels.append(di_evts[len(di_evts) - 1][0])\n if dels != []:\n for i in sorted(dels, reverse=True):\n del ret[i]\n return ret", "def filter_keys_out(items, keys):\n for key, value in items.items():\n if key in keys:\n continue\n yield key, value", "def without_keys(d, keys):\n return {x: d[x] for x in d if x not in keys}", "def without_keys(keys):\n keys = frozenset(keys) # frozenset has efficient membership lookup\n return filter_keys_c(fnot(partial(operator.contains, keys)))", "def key_not_in(self, key_not_in):\n\n self._key_not_in = key_not_in", "def omit(self, *keys):\n return _({k: self[k] for k in self._ if k not in keys})", "def except_keys(dic, *keys):\n ret = dic.copy()\n for key in keys:\n try:\n del ret[key]\n except KeyError:\n pass\n return ret", "def remove_keys(d, keys):\n pp = deepcopy(d)\n if isinstance(keys, (list, tuple)):\n for k in keys:\n pp.pop(k, None)\n else:\n pp.pop(keys, None)\n return pp", "def exclude(m, keys):\n return {k: v for k, v in m.items() if k not in keys}", "def del_dict_keys(dict_in, keys):\n for key in keys:\n if key in dict_in:\n del dict_in[key]\n return dict_in", "def keep_in_dictionary(self,dictionary,*keys):\r\n remove_keys = [k for k in dictionary if k not in keys]\r\n self.remove_from_dictionary(dictionary,*remove_keys)", "def remove_keys_from_dict(dictionary, keys):\n\n # Copy dictionary\n dictionary_updated = dictionary.copy()\n try:\n [dictionary_updated.pop(key) for key in keys]\n except:\n print(\"Error: No ratio and sampling strategy parameters\")\n return dictionary_updated", "def Exclude(*keys):\n\n def exclude(row):\n res = dict(row)\n for k in keys:\n if k in res:\n del res[k]\n return res\n\n return \"Exclude\" >> beam.Map(exclude)", "def _filter_keys(item, keys):\n return dict((k, v) for k, v in item.iteritems() if k in keys)", "def _filter_keys(d: dict, keys: set) -> dict:\n return {key: d[key] for key in keys if key in d}", "def remove_outlier(keys):\n for key in keys:\n data_dict.pop(key, 0)", "def update_ifnotin(d1, d2):\n for k, v in d2.items():\n if k not in d1:\n d1[k] = v\n return d1", "def exclude_keys(dictionary: Mapping, keys: Sequence[Hashable]) -> dict:\n return {k: v for k, v in dictionary.items() if k not in keys}", "def remove_keys(_dict, keys):\n if not _dict:\n return None\n new = dict(_dict)\n for key in keys:\n new.pop(key, None)\n return new", "def remove_keys(data: dict, keys: list[str]) -> None:\n for k in keys:\n _ = data.pop(k, None)", "def filter_keys_in_set(ds, keys):\n logger.info(\"For each element in the dataset, keeping only values with keys: %s.\", ', '.join(keys))\n\n def filter_keys(x):\n return {k: v for k, v in x.items() if k in keys}\n\n return ds.map(filter_keys, num_parallel_calls=TF_AUTOTUNE)", "def dict_filter(d, keys, into=dict):\n \n if hasattr(keys, \"__call__\"):\n f = keys\n keys = filter(f, d.keys())\n return into(map(lambda k:(k,d[k]), keys))", "def select_features(d, keys):\n return {x: d[x] for x in d if x not in keys}", "def discard(self, key):\r\n if key in self.map: \r\n key, prev, next = self.map.pop(key)\r\n prev[NEXT] = next\r\n next[PREV] = prev\r\n if self.emitter:\r\n self.emitter.emit()", "def _pick(d, keys):\n return {k: v for k, v in d.items() if k in keys}", "def _filter_dict(src_dict, key_set):\n for k in set(src_dict.keys()) - key_set:\n src_dict.pop(k)", "def remove_outlier(dict_object, keys):\r\n for key in keys:\r\n dict_object.pop(key, 0)", "def dfilter(d: dict, *keys: Iterable, reverse=False) -> dict:\n return {k: v for k, v in d.items() if k in keys and not reverse or k not in keys and reverse}", "def remove_outlier(dict_object, keys):\n for key in keys:\n dict_object.pop(key, 0)", "def filterKeys(document, keys):\n return {key: document[key] for key in keys}" ]
[ "0.623754", "0.6082424", "0.5918844", "0.59167224", "0.56955945", "0.5677415", "0.5673033", "0.5609741", "0.55079687", "0.55027145", "0.5486954", "0.5394884", "0.53932744", "0.5390138", "0.53668404", "0.53537357", "0.533538", "0.52792567", "0.5232413", "0.5232282", "0.5213723", "0.5176005", "0.50757587", "0.504963", "0.50445294", "0.50268996", "0.49940884", "0.49480045", "0.49420717", "0.49132234" ]
0.663589
0
Convert the numpy array representing the GOL grid to a QImage.
def numpy_to_qimage(np_array: np.ndarray, show_age: bool): # Only support 2D array of bytes assert len(np_array.shape) == 2 and np_array.dtype == np.uint8 width = np_array.shape[1] height = np_array.shape[0] bytes_per_line = width image = QImage(np_array, width, height, bytes_per_line, QImage.Format_Indexed8) # Maps array values to color if show_age: image.setColorTable(colors.AGE_COLOR_TABLE) else: image.setColorTable(colors.BINARY_COLOR_TABLE) return image
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convertNumpy2Image(self, array):\n cv2image = cv2.cvtColor(array, cv2.COLOR_BGR2RGBA)\n img = Image.fromarray(cv2image)\n imgtk = ImageTk.PhotoImage(image=img)\n return imgtk", "def make_image(self, save=False):\n\n # image_grid = np.full((self.size_x, self.size_y), '#888888', dtype=str)\n image_grid = np.full((self.size_x, self.size_y, 3), 0, dtype=np.uint8)\n\n # self.grid = np.flip(self.grid, 1)\n\n # self.grid = np.swapaxes(self.grid, 0, 0)\n \"\"\"\n image_grid[self.grid == 0] = 'FFFFFF'\n image_grid[self.grid == 1] = '000000'\n image_grid[self.grid == 2] = '00FF00'\n image_grid[self.grid == 3] = '0000FF'\n image_grid[self.grid == 4] = 'FFFF00'\n image_grid[self.grid == 5] = '00FFFF'\n image_grid[self.grid == 6] = 'FF00FF'\n \"\"\"\n image_grid[self.grid == 0] = (1, 1, 1)\n image_grid[self.grid == 1] = (0, 0, 0)\n image_grid[self.grid == 2] = (1, 0, 1)\n image_grid[self.grid == 3] = (0, 1, 0)\n image_grid[self.grid == 4] = (0, 0, 1)\n image_grid[self.grid == 5] = (0, 1, 1)\n image_grid[self.grid == 6] = (1, 1, 0)\n\n #for ant in self.ants:\n # image_grid[ant.x, ant.y] = (1, 0, 0)\n\n # image_grid = image_grid.swapaxes(0, 1)\n # self.grid = self.grid.swapaxes(0, 1)\n\n\n\n DPI = 100\n width, height = 1000, 1000\n fig = plt.figure(figsize=(width / DPI, height / DPI), dpi=DPI, facecolor='k')\n ax = fig.add_subplot()\n\n plt.axis('equal')\n plt.subplots_adjust(top=1, bottom=0, right=1, left=0, hspace=0, wspace=0)\n\n for y in range(self.size_x):\n for x in range(self.size_y):\n if self.grid[x, y] != 0:\n # Only plot a hexagon if its state is not zero.\n plot_hex(ax, x, y, image_grid[x, y])\n\n ax.set_xlim(0, self.size_x)\n ax.set_ylim(0, self.size_y)\n\n plt.show()\n\n logging.info(\"Finished Image Processing\")", "def rgb2qimage(rgb):\n if len(rgb.shape) != 3:\n raise ValueError(\"rgb2QImage can only convert 3D arrays\")\n if rgb.shape[2] not in (3, 4):\n raise ValueError(\n \"rgb2QImage can expects the last dimension to contain exactly three (R,G,B) or four (R,G,B,A) channels\")\n\n h, w, channels = rgb.shape\n\n # Qt expects 32bit BGRA data for color images:\n bgra = numpy.empty((h, w, 4), numpy.uint8, 'C')\n bgra[..., 0] = rgb[..., 2]\n bgra[..., 1] = rgb[..., 1]\n bgra[..., 2] = rgb[..., 0]\n if rgb.shape[2] == 3:\n bgra[..., 3].fill(255)\n fmt = QImage.Format_RGB32\n else:\n bgra[..., 3] = rgb[..., 3]\n fmt = QImage.Format_ARGB32\n\n result = QImage(bgra.data, w, h, fmt)\n result.ndarray = bgra\n return result", "def display(array):\n plt.figure()\n plt.imshow(array)\n plt.show()", "def gray2qimage(gray):\n if len(gray.shape) != 2:\n raise ValueError(\"gray2QImage can only convert 2D arrays\")\n\n gray = numpy.require(gray, numpy.uint8, 'C')\n\n h, w = gray.shape\n\n result = QImage(gray.data, w, h, QImage.Format_Indexed8)\n result.ndarray = gray\n for i in range(256):\n result.setColor(i, QColor(i, i, i).rgb())\n return result", "def _prepare_image(self, grid):\n grid = np.array(grid, dtype=np.uint8)\n\n width = int(grid.shape[1] * self.scale_percent)\n height = int(grid.shape[0] * self.scale_percent)\n grid = cv2.resize(grid, (width, height), interpolation=cv2.INTER_AREA)\n return grid", "def render_image(grid,window):\r\n X = len(grid[0])\r\n Y = len(grid)\r\n#top row:\r\n for j in range(Y):\r\n for sub_j in range(3): #3 rows \r\n ROW = []\r\n for i in range(X):\r\n ROW += grid[j][i].arr[sub_j]\r\n \r\n for k in range(len(ROW)):\r\n COLOR = (ROW[k],ROW[k],ROW[k])\r\n Y_pos = (3*j + sub_j)*pixel_size*scale\r\n X_pos = k*(pixel_size)*scale\r\n width = height = pixel_size*scale\r\n pygame.draw.rect(window,COLOR,(X_pos,Y_pos,width,height))\r\n \r\n# print(ROW)\r\n return", "def reconstructImage(self,arr):\n\t\tarr = arr * 256\n\t\tarr = np.array(np.round(arr),dtype=np.uint8)\n\t\t#arr = np.array(arr,dtype=np.uint8)\n\n\t\t# We need to transpose the array because we flatten X by columns\n\t\t#arr = arr.T\n\t\t#a = arr.reshape((self.width, self.height,3))\n\t\t\n\t\tif self.mode == 'L':\n\t\t\ta = arr.reshape((self.width, self.height))\n\t\telse:\n\t\t\ta = arr.reshape((self.width, self.height,3))\n\n\t\t#a = arr.reshape((3,self.width, self.height))\t\t\n\t\t#a = arr.transpose(0, 3, 1, 2)\n\n\t\tim = Image.fromarray(a,mode=self.mode)\n\n\t\treturn im", "def _arr_to_img(arr, verbose=False):\n return Image.fromarray(arr)", "def makeImage(self):\n\n for row in range(self.height):\n self.makeRow(row)\n self.window.update() # display a row of pixels", "def display_image(X):\n\n\tim = X.reshape(28, 28)\n\ttemp = plt.imshow(im)\n\tplt.show()", "def test_grdimage(grid):\n fig = Figure()\n fig.grdimage(grid, cmap=\"earth\", projection=\"W0/6i\")\n return fig", "def arr2img(ar):\n return Image.fromstring('L', (ar.shape[1], ar.shape[0]), ar.astype('b').tostring())", "def visualize_AQ(self):\n M = np.matrix(self.data[0])\n fig = plt.figure()\n ax = fig.add_subplot(1,1,1)\n ax.set_aspect('equal')\n plt.imshow(M, interpolation='nearest', cmap=plt.cm.YlOrRd)\n plt.colorbar()\n plt.show()", "def show(self):\n data = []\n for row in self.grid:\n mid, bottom = [], []\n for node in row:\n \tmid += [0, int(node.right)]\n \tbottom += [int(node.down), 1]\n data += mid + [0] + bottom + [0] \n data[self.width*2+1] = 1\n data[-1] = 1\n data += (self.width*2) * [0]\n im = Image.new('1', (self.width*2+1, self.height*2+1))\n im.putdata(data)\n im.save('maze.png')\n im.show()", "def _plot_rawdata(self):\n fig, ax = plt.subplots(1, 1)\n ax.imshow(self.data, origin='top')\n ax.set_title('Gauss-Legendre Quadrature Grid')\n ax.set_xlabel('longitude index')\n ax.set_ylabel('latitude index')\n fig.tight_layout(pad=0.5)\n return fig,ax", "def showAssembled(self):\n im = np.zeros(self.puzzleImage.shape);\n r,c,d = self.puzzleImage.shape;\n r = r/len(self.puzzlePieces); # assume square matrix\n c = c/len(self.puzzlePieces);\n \n for i in range (len(self.puzzlePieces)):\n for j in range (len(self.puzzlePieces)):\n im[i*r:(i+1)*r, j*c:(j+1)*c] = self.puzzlePieces[i,j];\n \n plt.imshow(im);\n plt.show();", "def _repr_html_(self):\n\n import numpy as np\n import matplotlib.pyplot as plt\n from .._tier9 import imshow\n\n\n size_in_pixels = np.prod(self.shape)\n size_in_bytes = size_in_pixels * self.dtype.itemsize\n\n labels = (self.dtype == np.uint32)\n\n # In case the image is 2D, 3D and larger than 100 pixels, turn on fancy view\n if len(self.shape) in (2, 3) and size_in_pixels >= 100:\n import matplotlib.pyplot as plt\n imshow(self,\n labels=labels,\n continue_drawing=True,\n colorbar=not labels)\n image = self._png_to_html(self._plt_to_png())\n else:\n return \"<pre>cle.array(\" + str(np.asarray(self)) + \", dtype=\" + str(self.dtype) + \")</pre>\"\n\n\n if size_in_bytes > 1024:\n size_in_bytes = size_in_bytes / 1024\n if size_in_bytes > 1024:\n size_in_bytes = size_in_bytes / 1024\n if size_in_bytes > 1024:\n size_in_bytes = size_in_bytes / 1024\n size = \"{:.1f}\".format(size_in_bytes) + \" GB\"\n else:\n size = \"{:.1f}\".format(size_in_bytes) + \" MB\"\n else:\n size = \"{:.1f}\".format(size_in_bytes) + \" kB\"\n else:\n size = \"{:.1f}\".format(size_in_bytes) + \" B\"\n\n histogram = \"\"\n\n if size_in_bytes < 100 * 1024 * 1024:\n if not labels:\n\n import numpy as np\n from .._tier2 import minimum_of_all_pixels, maximum_of_all_pixels\n from .._tier3 import histogram\n\n num_bins = 32\n\n h = np.asarray(histogram(self, num_bins=num_bins))\n\n plt.figure(figsize=(1.8, 1.2))\n plt.bar(range(0, len(h)), h)\n\n # hide axis text\n # https://stackoverflow.com/questions/2176424/hiding-axis-text-in-matplotlib-plots\n # https://pythonguides.com/matplotlib-remove-tick-labels\n frame1 = plt.gca()\n frame1.axes.xaxis.set_ticklabels([])\n frame1.axes.yaxis.set_ticklabels([])\n plt.tick_params(left=False, bottom=False)\n\n histogram = self._png_to_html(self._plt_to_png())\n\n min_max = \"<tr><td>min</td><td>\" + str(self.min()) + \"</td></tr>\" + \\\n \"<tr><td>max</td><td>\" + str(self.max()) + \"</td></tr>\"\n\n else:\n\n min_max = \"\"\n\n all = [\n \"<table>\",\n \"<tr>\",\n \"<td>\",\n image,\n \"</td>\",\n \"<td style=\\\"text-align: center; vertical-align: top;\\\">\",\n \"<b><a href=\\\"https://github.com/clEsperanto/pyclesperanto_prototype\\\" target=\\\"_blank\\\">cle._</a> image</b><br/>\",\n \"<table>\",\n \"<tr><td>shape</td><td>\" + str(self.shape).replace(\" \", \"&nbsp;\") + \"</td></tr>\",\n \"<tr><td>dtype</td><td>\" + str(self.dtype) + \"</td></tr>\",\n \"<tr><td>size</td><td>\" + size + \"</td></tr>\",\n min_max,\n \"</table>\",\n histogram,\n \"</td>\",\n \"</tr>\",\n \"</table>\",\n ]\n\n return \"\\n\".join(all)", "def display_image ( X ):\r\n\t# on teste que le tableau contient bien 256 valeurs\r\n\tif X.size != 256:\r\n\t\traise ValueError ( \"Les images doivent etre de 16x16 pixels\" )\r\n\r\n\t# on cree une image pour imshow: chaque pixel est un tableau a 3 valeurs\r\n\t# (1 pour chaque canal R,G,B). Ces valeurs sont entre 0 et 1\r\n\tY = X / X.max ()\r\n\timg = np.zeros ( ( Y.size, 3 ) )\r\n\tfor i in range ( 3 ):\r\n\t\timg[:,i] = X\r\n\r\n\t# on indique que toutes les images sont de 16x16 pixels\r\n\timg.shape = (16,16,3)\r\n\r\n\t# affichage de l'image\r\n\tplt.imshow( img )\r\n\tplt.show ()", "def get_plain_image_as_widget(self):\n arr = self.getwin_array(order=self.rgb_order)\n\n # convert numpy array to native image widget\n image_w = self._get_wimage(arr)\n return image_w", "def grid_image(output):\n grid = []\n for data in output:\n grid += [make_grid(data, nrow=5, normalize=True)]\n return grid", "def display(array):\n if isinstance(array, np.ndarray):\n plt.imshow(array)\n plt.show()\n else:\n raise TypeError(\"display() needs a numpy ndarray as parameter, \"\n f\"got {type(array)}\")", "def to_qt_pixmap(self, scale=None):\n bytes_per_line = 3 * self.width\n img = self.to_color().img\n rgb = opencv.cvtColor(img, opencv.COLOR_BGR2RGB)\n q_img = QImage(rgb.data, self.width, self.height, bytes_per_line, QImage.Format_RGB888)\n pixmap = QPixmap.fromImage(q_img)\n\n if scale is not None:\n pixmap = pixmap.scaled(scale, QtCore.Qt.KeepAspectRatio, QtCore.Qt.SmoothTransformation)\n\n return pixmap", "def display_image(mat):\n\timg = Image.fromarray(mat)\n\timg.show()", "def draw_grid(self):\n plt.imshow(py.array(\n map(lambda x: map(lambda y: mplc.colorConverter.to_rgb(colord[y]), x), self.create_grid(self.graph))),\n interpolation='nearest')\n plt.show()", "def _repr_png_(self):\n mol = self.owner.mol\n keku = IPythonConsole.kekulizeStructures\n size = IPythonConsole.molSize\n opts = IPythonConsole.drawOptions\n return Draw._moltoimg(\n mol, size, self.aix, \"\", returnPNG=True, drawOptions=opts,\n kekulize=keku, highlightBonds=self.bix\n )", "def _repr_png_(self):\n mol = self.owner.mol\n keku = IPythonConsole.kekulizeStructures\n size = IPythonConsole.molSize\n opts = IPythonConsole.drawOptions\n return Draw._moltoimg(\n mol, size, self.aix, \"\", returnPNG=True, drawOptions=opts,\n kekulize=keku, highlightBonds=self.bix\n )", "def display(self):\n rows = [(self.views[0].display, len(self.views))]\n fig, axes = plt.subplots(1, len(self.views),\n figsize=self._figsize(rows),\n squeeze=True)\n for ax, view in zip(axes.ravel(), self.views):\n ax.imshow(view.display)\n ax.axis('off')\n ax.xaxis.set_visible(False)\n ax.yaxis.set_visible(False)\n ax.set(title=view.position.id)\n fig.tight_layout()\n fig.canvas.draw()\n img_array = np.array(fig.canvas.renderer._renderer)\n plt.close('all')\n return img_array", "def __translate(self, img):\n if not isinstance(img, Image):\n raise InvalidImageTypeException(\"display_images only accepts objects of type Image\")\n\n w = img.width()\n h = img.height()\n tkimg = Tkinter.PhotoImage(width=w, height=h)\n for x in range(w):\n for y in range(h):\n tkimg.put('#%02x%02x%02x' % img.get_rgb(x, y), (x, y))\n return tkimg", "def draw_image(self):\n \n pixel_array = self.imageprepare(self.image_path)\n newArr = self.reshape_pixel_array(pixel_array)\n plt.imshow(newArr, interpolation='nearest')\n plt.savefig('MNIST_IMAGE.png')#save MNIST image\n plt.show()#Show / plot that image" ]
[ "0.6397902", "0.6196134", "0.60192525", "0.59758794", "0.5931983", "0.5765702", "0.5721111", "0.5687924", "0.56588805", "0.5628664", "0.55947673", "0.55882084", "0.5583067", "0.55810326", "0.55731523", "0.5548744", "0.5506583", "0.54962564", "0.54832053", "0.54686344", "0.5464198", "0.544716", "0.54423577", "0.54351217", "0.5428951", "0.5421566", "0.5421566", "0.5419445", "0.54120886", "0.540037" ]
0.68053734
0
Prepare paths specified as config. The input is a list of either strings, or 2tuples (source, target). Where single strings are supplied, the basenames are used as targets. Where targets are given explicitly, they must not be absolute paths. Returns a list of 2tuples, or throws ConfigError if something is wrong in the input.
def process_path_specs(specs): processedSpecs = [] for spec in specs: if not isinstance(spec, (list, tuple)): source = spec target = None elif len(spec) != 2: raise ConfigError("path spec must be a list or tuple of " "length two") else: source, target = spec source = os.path.normpath(source) if not target: target = os.path.basename(source) elif os.path.isabs(target): raise ConfigError("target path for include file may not be " "an absolute path") processedSpecs.append((source, target)) return processedSpecs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def copy_paths(src, dst, paths, *, exclude=None):\n files = []\n\n for path in paths:\n if isinstance(path, tuple):\n files += copy_path(src, dst, path[0], path[1], exclude=exclude)\n else:\n files += copy_path(src, dst, path, exclude=exclude)\n\n return files", "def cfgPathToList( arg ):\n from types import StringTypes\n listPath = []\n if type( arg ) not in StringTypes:\n return listPath\n while arg.find( '/' ) == 0:\n arg = arg[1:]\n return arg.split( '/' )", "def _copy_paths(self, paths, source, destination, output_path,\r\n final_path=None):\r\n for path in paths:\r\n if final_path:\r\n copy(os.path.join(source, path),\r\n os.path.join(output_path, destination, final_path))\r\n else:\r\n copy(os.path.join(source, path),\r\n os.path.join(output_path, destination, path))", "def _resolvePathPatterns(self, sources, source):\n kept = []\n pattern = re.compile(source['pathPattern'])\n basedir = self._basePath / source['path']\n if (self._basePath.name == Path(self._largeImagePath).name and\n (self._basePath.parent / source['path']).is_dir()):\n basedir = self._basePath.parent / source['path']\n basedir = basedir.resolve()\n for entry in basedir.iterdir():\n match = pattern.search(entry.name)\n if match:\n if entry.is_file():\n kept.append((entry.name, entry, match))\n elif entry.is_dir() and (entry / entry.name).is_file():\n kept.append((entry.name, entry / entry.name, match))\n for idx, (_, entry, match) in enumerate(sorted(kept)):\n subsource = copy.deepcopy(source)\n # Use named match groups to augment source values.\n for k, v in match.groupdict().items():\n if v.isdigit():\n v = int(v)\n if k.endswith('1'):\n v -= 1\n if '.' in k:\n subsource.setdefault(k.split('.', 1)[0], {})[k.split('.', 1)[1]] = v\n else:\n subsource[k] = v\n subsource['path'] = entry\n for axis in self._axesList:\n stepKey = '%sStep' % axis\n valuesKey = '%sValues' % axis\n if stepKey in source:\n if axis in source or valuesKey not in source:\n subsource[axis] = subsource.get(axis, 0) + idx * source[stepKey]\n else:\n subsource[valuesKey] = [\n val + idx * source[stepKey] for val in subsource[valuesKey]]\n del subsource['pathPattern']\n sources.append(subsource)", "def build_path_pairs(self):\n\n if self.source_paths is None:\n\n raise ValueError(\"self.source_paths uninitialized!\")\n\n for source_path in self.source_paths:\n\n for block_data_dir in data_settings.BLOCK_DATA_DIRS:\n\n block_id = os.path.split(block_data_dir)[-1]\n\n source_data_dir, filename = os.path.split(source_path)\n containing_dir = os.path.split(source_data_dir)[-1]\n\n if not containing_dir in [block_id, data_settings.GRANULE]:\n\n continue\n\n block_data_path = os.path.join(block_data_dir, filename)\n self.path_pairs.append((source_path, block_data_path))", "def in_filepath_list(class_paths: List[str]) -> List:\n registry, not_founds = build_registry(class_paths)\n builder = FilepathListBuilder()\n source = builder.build(registry)\n\n return [source, not_founds]", "def _GetFilePairs(config):\n\n ret = []\n\n has_bazel_genfiles = os.path.exists(\"bazel-bin\")\n\n for filename in config.file_list:\n target = os.path.join(config.package_name, filename)\n generated = os.path.join(config.package_name, config.pattern % filename)\n if has_bazel_genfiles:\n generated = os.path.join(\"bazel-bin\", generated)\n\n # Generated files should always exist. Blaze should guarantee this before\n # we are run.\n if not os.path.isfile(generated):\n print(\"Generated file '%s' does not exist.\" % generated)\n print(\"Please run this command to generate it:\")\n print(\" bazel build %s:%s\" % (config.package_name, config.target_name))\n sys.exit(1)\n ret.append(_FilePair(target, generated))\n\n return ret", "def targets(path, args):\n if args:\n return \" \".join([\"{0}{1}\".format(path, target) for target in args])", "def build_destination_files(destination, requested_paths):\n pathlib.Path(destination).resolve()\n longest_common_requested_path = longest_common_path_prefix(requested_paths)\n destination_files = [destination / path.relative_to(longest_common_requested_path) for path in requested_paths]\n existing_files = [path for path in destination_files if path.exists()]\n return destination_files, existing_files", "def handle_multiple_destinations(self):\n\n # Create the to-directory if it does not exist\n for destination in config.dest:\n if not path.exists(destination.dest):\n makedirs(destination.dest)\n\n # Clone the modules and copy the right directories\n for module in config.modules:\n Logger.assemble_module(module)\n\n directory = path.join(TEMP_DIR, module.name)\n remove_dir(directory)\n clone(module, directory)\n self.commit_hashes[module.name] = self.get_commit_hash(directory)\n\n for destination in config.dest:\n to_directory = path.join(destination.dest, module.name)\n remove_dir(to_directory)\n shutil.move(\n path.join(TEMP_DIR, module.name, destination.src), to_directory\n )", "def expand_paths(self, paths):\n \n expanded_paths = []\n if isinstance(paths, str): # A single path\n expanded = glob.glob(paths)\n for e in expanded:\n expanded_paths.append(os.path.abspath(e))\n elif isinstance(paths, list): # Multiple path\n for p in paths:\n expanded = glob.glob(p)\n for e in expanded:\n expanded_paths.append(os.path.abspath(e))\n else:\n _LOG.exception(\"Unknown input for the 'add' function.\")\n return expanded_paths", "def _resolve_target_sources(self, target_sources, extension=None, relative_to_target_base=False):\r\n resolved_sources = []\r\n for resolved in Target.resolve_all(target_sources):\r\n if hasattr(resolved, 'sources'):\r\n resolved_sources.extend(\r\n source if relative_to_target_base else os.path.join(resolved.target_base, source)\r\n for source in resolved.sources if not extension or source.endswith(extension)\r\n )\r\n return resolved_sources", "def _resolveFramePaths(self, sourceList):\n # we want to work with both _basePath / <path> and\n # _basePath / .. / <path> / <name> to be compatible with Girder\n # resource layouts.\n sources = []\n for source in sourceList:\n if source.get('pathPattern'):\n self._resolvePathPatterns(sources, source)\n else:\n self._resolveSourcePath(sources, source)\n for source in sources:\n if hasattr(source.get('path'), 'resolve'):\n source['path'] = source['path'].resolve(False)\n return sources", "def glob_paths(self, name, source, pattern, test_data=()):\n assert isinstance(source, config_types.Path)\n result = self._run(\n name, ['glob', source, pattern],\n lambda: self.test_api.glob_paths(test_data),\n self.m.raw_io.output_text())\n ret = [source.join(*x.split(self.m.path.sep))\n for x in result.stdout.splitlines()]\n result.presentation.logs[\"glob\"] = map(str, ret)\n return ret", "def get_files(target_files, config):\n out = []\n find_fn = _find_file(config)\n for fname_in in target_files.keys():\n if isinstance(fname_in, (list, tuple)):\n fnames = fname_in\n else:\n fnames = fname_in.split(\";\")\n for fname in fnames:\n remote_fname = find_fn(fname)\n if remote_fname:\n if isinstance(remote_fname, (list, tuple)):\n out.extend(remote_fname)\n else:\n out.append(remote_fname)\n return out", "def resolve_specs(paths):\n specs = []\n for path in paths:\n if os.path.isdir(path):\n _, _, files = os.walk(path).next()\n specs.extend(os.path.join(path, fname) for fname in files)\n else:\n specs.append(path)\n return specs", "def configfiles(basename):\n dirs = (\"config\", \"config-\" + os.uname()[1].rsplit(\".\")[0])\n dirpaths = (join(d, basename) for d in dirs)\n realpaths = (join(scriptdir, d) for d in dirpaths)\n return [relpath(d) for d in realpaths]", "def buildReplaceList (remplacements, sourcePath, includes, excludes = [], destinationPath = None, replaceFilename = True):\n\tfrom os import sep\n\n\tdestinations = []\n\n\t# Analyzes the directories\n\tsources = scanAll(normalizePath(sourcePath), includes, excludes)[0]\n\n\t# If the destination directory is not defined\n\tif destinationPath == None:\n\t\tdestinations = sources[:]\n\telse:\n\t\tdestinations = []\n\n\t\t# Creation de la liste des fichiers de destination\n\t\tfor source in sources:\n\t\t\t# Gets the destination directory name\n\t\t\tdestination = normalizePath(destinationPath + sep + source[len (sourcePath):])\n\n\t\t\t# If file names are to be replaced\n\t\t\tif replaceFilename:\n\t\t\t\t# For each replacement to be made in the destination directory name\n\t\t\t\tfor i in remplacements:\n\t\t\t\t\t# Replaces values in line \n\t\t\t\t\tdestination = destination.replace(i[0], i[1])\n\n\t\t\t# Adding the directory to the list\n\t\t\tdestinations.append (normalizePath(destination))\n\treturn sources, destinations", "def config(c):\n for sp_ns in ns_foreach_task_subdir(c):\n try:\n sp_ns.tasks.config(c)\n except UnexpectedExit:\n pass", "def parse_targets(\n name=None, pkgs=None, sources=None, saltenv=\"base\", normalize=True, **kwargs\n):\n if \"__env__\" in kwargs:\n # \"env\" is not supported; Use \"saltenv\".\n kwargs.pop(\"__env__\")\n\n if __grains__[\"os\"] == \"MacOS\" and sources:\n log.warning('Parameter \"sources\" ignored on MacOS hosts.')\n\n version = kwargs.get(\"version\")\n\n if pkgs and sources:\n log.error('Only one of \"pkgs\" and \"sources\" can be used.')\n return None, None\n\n elif \"advisory_ids\" in kwargs:\n if pkgs:\n log.error('Cannot use \"advisory_ids\" and \"pkgs\" at the same time')\n return None, None\n elif kwargs[\"advisory_ids\"]:\n return kwargs[\"advisory_ids\"], \"advisory\"\n else:\n return [name], \"advisory\"\n\n elif pkgs:\n if version is not None:\n log.warning(\n \"'version' argument will be ignored for multiple package targets\"\n )\n pkgs = _repack_pkgs(pkgs, normalize=normalize)\n if not pkgs:\n return None, None\n else:\n return pkgs, \"repository\"\n\n elif sources and __grains__[\"os\"] != \"MacOS\":\n if version is not None:\n log.warning(\n \"'version' argument will be ignored for multiple package targets\"\n )\n sources = pack_sources(sources, normalize=normalize)\n if not sources:\n return None, None\n\n srcinfo = []\n for pkg_name, pkg_src in sources.items():\n if __salt__[\"config.valid_fileproto\"](pkg_src):\n # Cache package from remote source (salt master, HTTP, FTP) and\n # append the cached path.\n srcinfo.append(__salt__[\"cp.cache_file\"](pkg_src, saltenv))\n else:\n # Package file local to the minion, just append the path to the\n # package file.\n if not os.path.isabs(pkg_src):\n raise SaltInvocationError(\n \"Path {} for package {} is either not absolute or \"\n \"an invalid protocol\".format(pkg_src, pkg_name)\n )\n srcinfo.append(pkg_src)\n\n return srcinfo, \"file\"\n\n elif name:\n if normalize:\n _normalize_name = __salt__.get(\n \"pkg.normalize_name\", lambda pkgname: pkgname\n )\n packed = {_normalize_name(x): version for x in name.split(\",\")}\n else:\n packed = {x: version for x in name.split(\",\")}\n return packed, \"repository\"\n\n else:\n log.error(\"No package sources provided\")\n return None, None", "def FindSources(env, dest, source, suffixes=None):\n for source_entry in env.Flatten(source):\n if type(source_entry) == str:\n # Search for matches for each source entry\n source_nodes = env.Glob(source_entry)\n else:\n # Source entry is already a file or directory node; no need to glob it\n source_nodes = [source_entry]\n for s in source_nodes:\n if str(s.__class__) == 'SCons.Node.FS.Dir':\n # Recursively search subdir. Since glob('*') doesn't match dot files,\n # also glob('.*').\n FindSources(env, dest, [s.abspath + '/*', s.abspath + '/.*'],\n suffixes)\n elif suffixes and s.suffix in suffixes:\n dest.add(s)", "def test_missing_paths():\n with pytest.raises(InputError):\n make_config([])", "def validate(self, config):\n if not isinstance(config, list):\n config = [config]\n\n for conf in config:\n if not conf.get('path'):\n raise ConfigError('Camera needs a `path` to save files to.')\n \n return config", "def sources_absolute_paths(self):\r\n abs_target_base = os.path.join(get_buildroot(), self.target_base)\r\n for src in self.sources:\r\n yield os.path.join(abs_target_base, src)", "def run_path_visualisation(paths, config, modulesConfig):\n all_targets = [os.path.basename(config[s][\"target\"]) for s in config.sections]\n all_target_tasks = {os.path.basename(config[s][\"target\"]):s for s in config.sections}\n \n added_tasks = []\n prepared_paths = []\n for path in paths:\n prepared_tasks = []\n for idx, task in enumerate(list(reversed(path))):\n s_module, s_name, *identifier = task.split(\" \")\n\n # Special Rule For Join Module To Have A Connection To Another Module\n special_connection = False\n if s_module == \"processing_join\":\n args = config[task]\n con_module, con_name, *identifier = all_target_tasks.get(os.path.basename(args[\"joinwith\"]), s_module+\"_SPECIAL \"+s_name+\"_SPECIAL\").split(\" \")\n special_connection = {\n \"connection_to_module\" : con_module,\n \"connection_to_name\" : con_name,\n \"will_be_created\" : (os.path.basename(args[\"joinwith\"]) in all_targets)\n }\n\n prepared_tasks.append({\n 'module':s_module,\n 'name':s_name,\n 'display': (task not in added_tasks),\n 'specialConnection': special_connection,\n 'last': (idx == len(path) - 1),\n 'attributes': config[task]\n })\n added_tasks.append(task)\n prepared_paths.append(prepared_tasks)\n logger.debug(\"Path prepared for visualization!\")\n render_path_visualisation(config['projectRoot'], config['projectName'], prepared_paths)", "def _make_path_list(cfg, dir_name, file_name, rank=None):\n if not cfg.DATASET.IS_ABSOLUTE_PATH:\n assert len(dir_name) == 1 or len(dir_name) == len(file_name)\n if len(dir_name) == 1:\n file_name = [os.path.join(dir_name[0], x) for x in file_name]\n else:\n file_name = [os.path.join(dir_name[i], file_name[i])\n for i in range(len(file_name))]\n\n if cfg.DATASET.LOAD_2D: # load 2d images\n temp_list = copy.deepcopy(file_name)\n file_name = []\n for x in temp_list:\n suffix = x.split('/')[-1]\n if suffix in ['*.png', '*.tif']:\n file_name += sorted(glob.glob(x, recursive=True))\n else: # complete filename is specified\n file_name.append(x)\n\n file_name = _distribute_data(cfg, file_name, rank)\n return file_name", "def trainer_paths(config):\n arch_datetime = arch_datetime_path(config)\n return (\n ensure_dir(join(arch_datetime, 'checkpoints')),\n ensure_dir(join(arch_datetime, 'runs'))\n )", "def test_config_merging_toml_paths_only():\n toml = StringIO(\n dedent(\n \"\"\"\\\n [tool.vulture]\n paths = [\"path1\", \"path2\"]\n \"\"\"\n )\n )\n cliargs = [\n \"--exclude=test_*.py\",\n ]\n result = make_config(cliargs, toml)\n assert result[\"paths\"] == [\"path1\", \"path2\"]\n assert result[\"exclude\"] == [\"test_*.py\"]", "def import_data_from_config(config):\n\n merge_columns = config[\"import_data\"][\"merge_columns\"]\n\n if not isinstance(merge_columns, list):\n msg = \"merge_columns (if used) must be a list\"\n raise ValueError(msg)\n\n data_out = config[\"import_data\"][\"output_data_directory\"]\n mkdir(data_out)\n\n # Require 'input_data_directories' to be a list\n data_in_list = config[\"import_data\"][\"input_data_directories\"]\n if not isinstance(data_in_list, list):\n msg = \"input_data_directories must be a list\"\n raise ValueError(msg)\n\n target_column = config[\"target_column\"]\n\n for d_in in data_in_list:\n import_directory_csv(d_in, data_out, target_column, merge_columns)", "def _process_candidate_conf_files(self, reordered_files):\n confs = []\n for r, f in reordered_files:\n if not os.path.exists(f):\n continue\n\n conf = ConfFile(f, self.syspaths)\n conf.replace(self.remap_renamer)\n temp_name = \"%s...%s\" % (r['from'], r['to'])\n conf.path = conf.path.replace(r['from'], temp_name)\n conf.path = conf.path.replace(temp_name, r['to'])\n confs.append(conf)\n\n return confs" ]
[ "0.5550079", "0.5392351", "0.5171191", "0.5132501", "0.5108284", "0.5076208", "0.5054015", "0.5015645", "0.4951827", "0.4932448", "0.49286622", "0.49249643", "0.48797044", "0.48406097", "0.48093775", "0.4735485", "0.47257975", "0.47036657", "0.47026363", "0.46804345", "0.467906", "0.4676848", "0.46630636", "0.46223402", "0.46163362", "0.45962885", "0.45886078", "0.45858467", "0.45850947", "0.4573892" ]
0.6430958
0
Return the paths of directories which contain files that should not be included, generally because they contain standard system libraries.
def _GetDefaultBinPathExcludes(self): if sys.platform == "win32": import cx_Freeze.util systemDir = cx_Freeze.util.GetSystemDir() windowsDir = cx_Freeze.util.GetWindowsDir() return [windowsDir, systemDir, os.path.join(windowsDir, "WinSxS")] elif sys.platform == "darwin": return ["/lib", "/usr/lib", "/System/Library/Frameworks"] else: return ["/lib", "/lib32", "/lib64", "/usr/lib", "/usr/lib32", "/usr/lib64"]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def removeduppaths():\r\n # This ensures that the initial path provided by the interpreter contains\r\n # only absolute pathnames, even if we're running from the build directory.\r\n L = []\r\n known_paths = set()\r\n for dir in sys.path:\r\n # Filter out duplicate paths (on case-insensitive file systems also\r\n # if they only differ in case); turn relative paths into absolute\r\n # paths.\r\n dir, dircase = makepath(dir)\r\n if not dircase in known_paths:\r\n L.append(dir)\r\n known_paths.add(dircase)\r\n sys.path[:] = L\r\n return known_paths", "def include_directories(self):\n\n status, stdout, stderr = self.__xcall__(['--cflags-only-I'])\n\n if status != 0:\n raise RuntimeError(\"error querying --cflags-only-I for package `%s': %s\" % (self.name, stderr))\n\n retval = []\n for token in stdout.split():\n retval.append(token[2:])\n\n return uniq(retval)", "def lib_directories(self):\n if self._lib_directories is None:\n self._lib_directories = []\n app_path = os.getcwd()\n contents = os.listdir(app_path)\n for c in contents:\n # ensure content starts with lib, is directory, and is readable\n if c.startswith('lib') and os.path.isdir(c) and (os.access(c, os.R_OK)):\n self._lib_directories.append(c)\n return sorted(self._lib_directories, reverse=True)", "def library_directories(self):\n\n status, stdout, stderr = self.__xcall__(['--libs-only-L'])\n\n if status != 0:\n raise RuntimeError(\"error querying --libs-only-L for package `%s': %s\" % (self.name, stderr))\n\n retval = []\n for token in stdout.split():\n retval.append(token[2:])\n\n return uniq(retval)", "def storer_paths():\n return [dir_unchecked(), dir_checked(),\n dir_backup(), dir_tests()]", "def include_dirs(self):", "def lib_dirs(self):\r\n ret = []\r\n for x in [y.type for y in self.variables] + [\r\n y.op for y in self.node_order]:\r\n try:\r\n ret += x.c_lib_dirs()\r\n except utils.MethodNotDefined:\r\n pass\r\n return utils.uniq(ret)", "def library_search_path(self, pedantic=False):\n return []", "def get_source_paths():\r\n script_paths = set()\r\n try:\r\n script_paths.update(filter(None, os.environ.get(PYENV).split(os.pathsep)))\r\n script_paths.update(filter(None, os.environ.get(MELENV).split(os.pathsep)))\r\n except AttributeError:\r\n logger.debug('No custom environ variables set.')\r\n\r\n cwd = os.path.dirname(os.path.abspath(__file__))\r\n for each in os.listdir(cwd):\r\n path = os.path.join(cwd, each)\r\n if not os.path.isdir(path) or each.startswith(EXCLUDE_PATTERNS):\r\n continue\r\n script_paths.add(path)\r\n\r\n return script_paths", "def get_non_vendor_package_path(aea_project_path: Path) -> Set[Path]:\n result: Set[Path] = set()\n for item_type_plural in ComponentType.plurals():\n nonvendor_package_dir_of_type = aea_project_path / item_type_plural\n result = result.union(\n {p for p in nonvendor_package_dir_of_type.iterdir() if p.is_dir()}\n if nonvendor_package_dir_of_type.exists()\n else {}\n )\n return result", "def get_missing_sources(source_paths, files_only=False):\n missing_sources = [\n source_path\n for source_path in source_paths\n if (not os.path.isdir(source_path) or files_only) and not os.path.isfile(source_path)\n ]\n return missing_sources", "def top_level_directories(self):\n return [d for d in self.directories if len([x for x in self.directories if x in d]) == 1]", "def get_unignored_file_paths(ignore_list=None, whitelist=None):\n unignored_files = []\n if ignore_list is None:\n ignore_list = []\n if whitelist is None:\n whitelist = []\n\n for root, dirs, files in os.walk(\".\"):\n floyd_logger.debug(\"Root:%s, Dirs:%s\", root, dirs)\n\n if ignore_path(unix_style_path(root), ignore_list, whitelist):\n # Reset dirs to avoid going further down this directory.\n # Then continue to the next iteration of os.walk, which causes\n # everything in this directory to be ignored.\n #\n # Note that whitelisted files that are within directories that are\n # ignored will not be whitelisted. This follows the expected\n # behavior established by .gitignore logic:\n # \"It is not possible to re-include a file if a parent directory of\n # that file is excluded.\"\n # https://git-scm.com/docs/gitignore#_pattern_format\n dirs[:] = []\n floyd_logger.debug(\"Ignoring directory : %s\", root)\n continue\n\n for file_name in files:\n file_path = unix_style_path(os.path.join(root, file_name))\n if ignore_path(file_path, ignore_list, whitelist):\n floyd_logger.debug(\"Ignoring file : %s\", file_name)\n continue\n\n unignored_files.append(os.path.join(root, file_name))\n\n return unignored_files", "def getdirs():\n dirs = [i for i in os.listdir(dname) if not \\\n os.path.isfile(os.path.join(dname, i))]\n return dirs", "def get_ignored_dirs(ci_ignore_path):\n with open(ci_ignore_path, 'r') as ignore_file:\n return set([\n normpath(line.strip())\n for line in ignore_file.readlines()\n if not line.startswith('#') and not is_blank(line)\n ])", "def get_theme_base_dirs_unchecked():\n theme_dirs = getattr(settings, \"COMPREHENSIVE_THEME_DIRS\", None)\n\n return get_theme_base_dirs_from_settings(theme_dirs)", "def _files_without_hidden(path):\n return [name for name in os.listdir(path) if not name.startswith('.')]", "def site_paths(buildout, prefixes):\n\n def is_buildout_dir(path):\n return path.startswith(buildout['eggs-directory']) or \\\n path.startswith(buildout['develop-eggs-directory'])\n\n def is_in_prefixes(path):\n return any([path.startswith(k) for k in prefixes])\n\n retval = [os.path.realpath(k) for k in site.sys.path]\n return [k for k in retval if not (is_buildout_dir(k) or is_in_prefixes(k))]", "def test_find_with_excluded_hidden_dirs_relative(self):\n tdir1 = self._make_test_dir('.test1')\n tdir2 = self._make_test_dir('test_2')\n tdir3 = self._make_test_dir('test.3')\n files = [\n os.path.join(tdir1, 'testfile1.py'),\n os.path.join(tdir2, 'testfile2.py'),\n os.path.join(tdir3, 'testfile3.py'),\n ]\n _touch_files(files)\n\n # We must temporarily change the current directory, so that we test against\n # patterns like ./.test1/file instead of /tmp/foo/.test1/file\n with _restore_working_dir():\n\n os.chdir(self.test_tmpdir)\n actual = file_resources.GetCommandLineFiles(\n [os.path.relpath(self.test_tmpdir)],\n recursive=True,\n exclude=['*.test1*'])\n\n self.assertEqual(\n sorted(actual),\n sorted([\n os.path.join(\n os.path.relpath(self.test_tmpdir), os.path.basename(tdir2),\n 'testfile2.py'),\n os.path.join(\n os.path.relpath(self.test_tmpdir), os.path.basename(tdir3),\n 'testfile3.py'),\n ]))", "def required_dirs(self) -> list:\n return [\n self.get(\"campaign.characters.path\"),\n self.get(\"campaign.session.path\"),\n self.get(\"campaign.plot.path\"),\n ]", "def listdir_nohidden(path):\n\treturn glob.glob(os.path.join(path, '*'))", "def _init_pathinfo():\r\n d = set()\r\n for dir in sys.path:\r\n try:\r\n if os.path.isdir(dir):\r\n dir, dircase = makepath(dir)\r\n d.add(dircase)\r\n except TypeError:\r\n continue\r\n return d", "def _find_files(root_dir, should_include):\n paths = [] # Return value.\n\n is_module = lambda path: path.endswith(\".py\")\n\n # os.walk() is new in Python 2.3\n # http://docs.python.org/library/os.html#os.walk\n for dir_path, dir_names, file_names in os.walk(root_dir):\n new_paths = [os.path.join(dir_path, file_name) for file_name in file_names]\n new_paths = filter(is_module, new_paths)\n new_paths = filter(should_include, new_paths)\n paths.extend(new_paths)\n\n return paths", "def get_directories():\n # get current working dir\n directory = os.getcwd()\n # list of dir to look in repo for files\n directories = [\n directory,\n os.path.expanduser(os.path.join(directory, 'src')),\n os.path.expanduser(os.path.join(directory, 'tests'))\n ]\n return directories", "def get_untracked_files():\n untracked_files = set()\n for _, dirs, files in os.walk(os.getcwd()):\n for d in dirs:\n if d not in staging_obj_names:\n file_path = get_path_outside_wit(filename=d.strip())\n if file_path:\n untracked_files.add(file_path)\n for f in files:\n if f not in staging_obj_names:\n file_path = get_path_outside_wit(filename=f.strip())\n if file_path:\n untracked_files.add(file_path)\n return untracked_files", "def missingConfigFiles(self):\n return [ conf\n for conf in self.configFiles\n if not os.path.exists(conf)\n and not os.path.isfile(conf)\n ]", "def directories(self):\n directories = list(set([\n '/'.join(f.split('/')[:-1]) for f in self.files\n ]))\n return sorted(directories)", "def syspaths(self):\n res = []\n for path, jsmodule in self.jsmodules.items():\n if jsmodule.js_lib_path != \"\":\n js_lib_path = os.path.dirname(jsmodule.js_lib_path.rstrip(\"/\")) # get parent\n if not js_lib_path in res:\n res.append(js_lib_path)\n return res", "def scrubbed_sys_path():\n for p in sys.path[:]:\n if not isinstance(p, str):\n yield p\n\n # Scrub any/all pex locations from sys.path.\n pp = pathlib.Path(p)\n if pex_root not in pp.parents:\n yield p", "def list_dir_no_hidden(path):\n\n return glob(os.path.join(path, \"*\"))" ]
[ "0.68841964", "0.66146666", "0.6608054", "0.6544238", "0.6524269", "0.65076166", "0.64406294", "0.64142877", "0.63882494", "0.6343896", "0.6316815", "0.6312666", "0.6243478", "0.6181976", "0.6178829", "0.6160177", "0.615914", "0.61474144", "0.61427253", "0.61281586", "0.6118144", "0.607804", "0.60767174", "0.6061069", "0.60532624", "0.60111415", "0.60055494", "0.6000497", "0.59944016", "0.5977549" ]
0.7017395
0
Return true if the file should be copied to the target machine. This is done by checking the binPathIncludes, binPathExcludes, binIncludes and binExcludes configuration variables using first the full file name, then just the base file name, then the file name without any version numbers. Files are included unless specifically excluded but inclusions take precedence over exclusions.
def _ShouldCopyFile(self, path): # check for C runtime, if desired path = os.path.normcase(path) dirName, fileName = os.path.split(path) if fileName.startswith("msvcr") and fileName.endswith(".dll"): self.msvcRuntimeDir = dirName return self.includeMSVCR # check the full path if path in self.binIncludes: return True if path in self.binExcludes: return False # check the file name by itself (with any included version numbers) if fileName in self.binIncludes: return True if fileName in self.binExcludes: return False # check the file name by itself (version numbers removed) name = self._RemoveVersionNumbers(fileName) if name in self.binIncludes: return True if name in self.binExcludes: return False # check the path for inclusion/exclusion for path in self.binPathIncludes: if dirName.startswith(path): return True for path in self.binPathExcludes: if dirName.startswith(path): return False return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def conditional_copy(asciitest_out_dir, doc_file):\n # path join uses backslash win32 which is not cmake compatible\n\n filename = save_cmake_filename(doc_file)\n\n filename1 = os.path.join(asciitest_out_dir, filename + \".temp\").replace(\"\\\\\",\"/\")\n filename2 = os.path.join(asciitest_out_dir, filename).replace(\"\\\\\",\"/\")\n\n update_if_different(filename1, filename2)", "def include_file(self, filename):\n # Only include Python files for now.\n if filename[-3:] == '.py':\n return True\n return False", "def copy_file_check(self):\n pass", "def BinaryExists(filename):\n return os.path.exists(os.path.join(self.options.build_dir, filename))", "def include_source_files(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"include_source_files\")", "def copy_file(self, filepath):\n copy_file = False\n try:\n copy_file = self.data[filepath]['copy']\n except KeyError:\n return False\n return copy_file", "def sysFile(*args, copy: AnyStr=\"\", delete: bool=True, makeDir: bool=True, move: AnyStr=\"\",\n removeEmptyDir: bool=True, rename: AnyStr=\"\", **kwargs)->bool:\n pass", "def ShouldBuild(self, src_files, dst_files):\n if self.force:\n return True\n\n oldest = None\n for dst in dst_files:\n if not os.path.exists(dst):\n self.DebugMsg(\"Build because %s does not exist\" % dst)\n return True\n modified = os.path.getmtime(dst)\n if oldest == None or modified < oldest:\n old = dst\n oldest = modified\n\n for src in src_files:\n modified = os.path.getmtime(src)\n if modified > oldest:\n self.DebugMsg(\"Build because %s is newer than %s\" % (src, old))\n return True\n\n self.DebugMsg(\"%s are up to date\" % \", \".join(dst_files))\n return False", "def should_do_write():\n if not suffix_is_supported():\n return False\n\n if not has_write_access():\n return False\n\n # Files under exclude_dir should be exempted from writing.\n filepath = CURRENT_BUFFER.name\n file_dir = filepath.rsplit('/', 1)[0]\n exclude_dirs = vim.eval(\"g:BHExcludeDir\")\n exclude_dirs = [os.path.realpath(os.path.expanduser(_dir)) for _dir in exclude_dirs]\n for dirname in exclude_dirs:\n if file_dir.startswith(dirname):\n debug(\"File in BHExcludeDir, do not write header.\")\n return False\n\n # whitelist: files directly inside BHIn will have a header.\n in_list = vim.eval(\"g:BHIn\")\n for dirname in in_list:\n dirname = os.path.realpath(os.path.expanduser(dirname))\n if file_dir == dirname:\n debug(\"File in BHIn, do write.\")\n return True\n\n # whitelist: files under BHUnder or its sub-dir will have a header.\n under_list = vim.eval(\"g:BHUnder\")\n for dirname in under_list:\n dirname = os.path.realpath(os.path.expanduser(dirname))\n if filepath.startswith(dirname):\n debug(\"File under BHUnder, do write.\")\n return True\n\n debug(\"default, do not write header.\")\n return False", "def copy_file(src: str, dst: str, filter: str|List[str]|None = None) -> bool:\n if _passes_filter(src, filter):\n if os.path.isdir(dst):\n dst = os.path.join(dst, os.path.basename(src))\n if _should_copy(src, dst):\n #console(f'copy {src}\\n --> {dst}')\n shutil.copyfile(src, dst, follow_symlinks=True)\n shutil.copystat(src, dst, follow_symlinks=True)\n return True\n return False", "def can_minimize_file(file_path):\n # If this is not a binary file, we should be able to minimize it in some way.\n if not utils.is_binary_file(file_path):\n return True\n\n # Attempt to minimize IPC dumps.\n if file_path.endswith(testcase_manager.IPCDUMP_EXTENSION):\n return supports_ipc_minimization(file_path)\n\n # Other binary file formats are not supported.\n return False", "def determine_should_sync(\n self, src_file: Optional[FileStats], dest_file: Optional[FileStats]\n ) -> bool:\n if dest_file:\n dest_file.operation_name = \"delete\"\n LOGGER.debug(\n \"syncing: (None) -> %s (remove), file does not \"\n \"exist at source (%s) and delete mode enabled\",\n dest_file.src if dest_file else None,\n dest_file.dest if dest_file else None,\n )\n return True", "def test_copy_required_include_and_exclude(self):\n include = ['yara/*', '*_malware_*']\n exclude = ['*mobile*', 'yara/?.yara']\n\n self.assertTrue(clone_rules._copy_required('yara/packed.yara', include, exclude))\n self.assertTrue(clone_rules._copy_required('base_malware_index.yara', include, exclude))\n self.assertTrue(clone_rules._copy_required('yara/mac_malware.yar', include, exclude))\n\n self.assertFalse(clone_rules._copy_required('not_included.yara', include, exclude))\n self.assertFalse(clone_rules._copy_required('yara/mobile_malware.yara', include, exclude))\n self.assertFalse(clone_rules._copy_required('yara/A.yara', include, exclude))", "def _include_file(self, root_parts, f):\n if len(root_parts) and root_parts[0] == \"lwc\":\n # only include expected file extensions within lwc components\n return f.lower().endswith((\".js\", \".js-meta.xml\", \".html\", \".css\", \".svg\"))\n return True", "def copy_file ( self, source, dest, chown=True, chmod=True ):\n if self._copy_file ( source, dest ):\n if chmod:\n self.chmod_file ( dest )\n if chown:\n self.chown_file ( dest )\n\n return True\n else:\n return False", "def copy_file(source, destination):\n\n try:\n shutil.copy(source, destination)\n except (OSError, IOError):\n return False\n else:\n return True", "def copy_if_needed(src: str, dst: str, filter: str|List[str]|None = None) -> bool:\n #console(f'COPY {src} --> {dst}')\n if os.path.isdir(src):\n return copy_dir(src, dst, filter)\n else:\n return copy_file(src, dst, filter)", "def _local_install(self):\n config = self._config\n ext = config.plugins[self.full_name].get('pkg_extension', '')\n if not ext:\n return False\n\n # ensure extension begins with a dot\n ext = '.{0}'.format(ext.lstrip('.'))\n\n return config.context.package.arg.endswith(ext)", "def needs_conan(self):\n return any([Path(self.project_dir/conanfile).exists()\n for conanfile in (\"conanfile.py\", \"conanfile.txt\")])", "def check_for_assemble_file(task_file):\n if not os.path.exists(task_file):\n print_failure_msg(\"{} file is missing\".format(task_file))\n exit(127)\n return True", "def _FilterFile(affected_file):\n return affected_file.LocalPath().endswith(\n ('.h', '.cc', '.cpp', '.cxx', '.mm'))", "def _edit_arch_target_based(self, spec, prefix):\n if spec.version < Version(\"2.14\"):\n return False\n\n found_special_opt = False\n with working_dir(\"arch\"):\n arch_filename = \"{0}.arch\".format(self.build_directory)\n\n replace = [\n [r\"^CHARMARCH = .*$\", \"CHARMARCH = {0}\".format(self.spec[\"charmpp\"].charmarch)],\n [r\"^NAMD_ARCH = .*$\", \"NAMD_ARCH = {0}\".format(self.arch)],\n ]\n\n # Optimizations for skylake_avx512\n if (\n spec.platform == \"linux\"\n and self.compiler.name == \"intel\"\n and \"avx512\" in spec.target\n and spec.target >= \"skylake_avx512\"\n ):\n if spec.version >= Version(\"2.15\") and os.path.exists(\"Linux-AVX512-icc.arch\"):\n tty.info(\"Building binaries with AVX512-tile optimization\")\n copy(\"Linux-AVX512-icc.arch\", arch_filename)\n elif spec.version >= Version(\"2.14\") and os.path.exists(\"Linux-SKX-icc.arch\"):\n tty.info(\"Building binaries with Skylake-X\" \"AVX512 optimization\")\n copy(\"Linux-SKX-icc.arch\", arch_filename)\n else:\n return False\n\n replace.append([r\"^CXX = icpc\", \"CXX = {0}\".format(self.compiler.cxx)])\n replace.append([r\"^CC = icc\", \"CC = {0}\".format(self.compiler.cc)])\n found_special_opt = True\n\n if found_special_opt:\n for pattern, replacement in replace:\n filter_file(pattern, replacement, arch_filename)\n\n return found_special_opt", "def should_build(target_platform, changed_files):\n return any(_should_file_trigger_build(target_platform, file) for file in changed_files)", "def _accept_for_flag (self, filename):\n\t\troot, ext = os.path.splitext(filename)\n\t\tif not ext:\n\t\t\treturn 1\n\t\telse:\n\t\t\tbinary_extensions = ['.jpg', '.gif', '.png', '.jar' ]\n\t\t\treturn ext not in ['.bak', '.off','.old', '.works', '.clean', '.obs', '.log', '.db'] + binary_extensions", "def is_file_excluded(self, file_path: Union[str, os.PathLike]) -> bool:\n # TODO: current design of ignore file can't distinguish between files and directories of the same name\n if self._path_spec is None:\n self._path_spec = self._create_pathspec()\n if not self._path_spec:\n return False\n file_path = self._get_rel_path(file_path)\n if file_path is None:\n return True\n\n norm_file = normalize_file(file_path)\n matched = False\n for pattern in self._path_spec:\n if pattern.include is not None:\n if pattern.match_file(norm_file) is not None:\n matched = pattern.include\n\n return matched", "def _check_for_custom_config(self, standard_conf_path):\n\n ret_val = False\n conf_filename = os.path.basename(standard_conf_path)\n custom_conf_expected_path = CUSTOM_CONFIG_DIR + '/' + self._get_tempdir() + '/' + conf_filename\n\n if os.path.isfile(custom_conf_expected_path):\n ret_val = True\n\n return ret_val", "def test_check(self):\n\n self.assertTrue(PostfixExclude().check(self.file_gitignore))\n self.assertTrue(PostfixExclude().check(self.file_py))\n self.assertTrue(PostfixExclude().check(self.file_authors))\n self.assertTrue(PostfixExclude().check(self.file__init__))\n self.assertTrue(PostfixExclude().check(self.file_bin))", "def file_allowed(self):\n if self._allowed_ext:\n if self.get_ext() not in self._allowed_ext:\n return False\n \n return True", "def _include_path(self, path, extensions=None):\r\n if extensions is None:\r\n extensions = tuple(self.readers.extensions)\r\n basename = os.path.basename(path)\r\n\r\n #check IGNORE_FILES\r\n ignores = self.settings['IGNORE_FILES']\r\n if any(fnmatch.fnmatch(basename, ignore) for ignore in ignores):\r\n return False\r\n\r\n if extensions is False or basename.endswith(extensions):\r\n return True\r\n return False", "def on_file(self) -> bool:\n\n return (\n self.env_var_helper.set_name(\"PYFUNCEBLE_DEBUG\").exists()\n or self.env_var_helper.set_name(\"DEBUG_PYFUNCEBLE\").exists()\n )" ]
[ "0.5926794", "0.576537", "0.5734732", "0.563512", "0.55802953", "0.5525144", "0.5376832", "0.5344218", "0.5341193", "0.5339224", "0.5329194", "0.53072923", "0.5279975", "0.5243441", "0.5227413", "0.5205067", "0.51915103", "0.51590645", "0.5157132", "0.51411957", "0.512988", "0.5128976", "0.5109894", "0.51074076", "0.5100447", "0.50748885", "0.50723916", "0.5058974", "0.50575316", "0.5048318" ]
0.80116713
0
Takes a user and a group name, and returns `True` if the user is in that group.
def is_in_group(user, group_name): return is_in_group_user_id(user.id, group_name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_user_in_group(user, group):\n users = group.get_users()\n if user in users:\n return True\n return False", "def is_in_group(user, group_name):\n return user.groups.filter(name__exact=group_name).exists()", "def is_in_group(user, group_name):\n return Group.objects.get(name=group_name).user_set.filter(id=user.id).exists()", "def is_user_in_group(user, group):\n\n if user == group.get_name():\n return True\n elif user in group.get_users():\n return True\n else:\n for group in group.get_groups():\n return is_user_in_group(user, group)\n\n return False", "def is_user_in_group(user: str, group: Group) -> bool:\n if group is None or user is None or user is \"\":\n return False\n if user in group.get_users():\n return True\n for sub_group in group.get_groups():\n user_exists = is_user_in_group(user, sub_group)\n if user_exists:\n return True\n return False", "def is_user_in_group(user, group):\r\n if type(group) is not Group:\r\n raise ValueError(\"Not a valid group\")\r\n\r\n if type(user) is not str:\r\n raise ValueError(\"Not a valid user\")\r\n\r\n user_name = find_user(user, group)\r\n if user_name == \"\":\r\n return False\r\n\r\n return True", "def _is_in_group(user, group_name):\n try:\n return Group.objects.get(name=group_name).user_set.filter(id=user.id).exists()\n except Group.DoesNotExist:\n return None", "def is_user_in_group(user, group):\n # Check group\n if user in group.users: # O(N)\n return True\n\n # Check subgroups\n for sub_group in group.groups: # O(N)\n if is_user_in_group(user, sub_group):\n return True\n\n return False", "def is_user_in_group(_cls, user, group):\n if user is None or group is None:\n return \"Please enter a valid user and group\"\n\n if user in group.get_users():\n return True\n else:\n for sub_group in group.get_groups():\n if Group.is_user_in_group(user, sub_group):\n return True\n\n return False", "def user_in_group(user, *group_names):\n\treturn bool(user.groups.filter(name__in=group_names)) | user.is_superuser", "def is_user_in_group(user, group):\n sub_user=group.get_users() # Get all the users within the group\n\n if user in sub_user: # If user is within the group, return True\n return True\n\n sub_group=group.get_groups() # Get all the sub groups within the group\n\n if len(sub_group)==0: # Base case if there are no sub groups within group\n return False\n\n for item in sub_group: # Recursively search within sub groups for the user\n return is_user_in_group(user,item)\n return False", "def _user_belongs_to(group_name):\n user_name = _get_user_name()\n groups = _get_user_groups(user_name)\n return group_name in groups", "def has_group(user, group_name):\n return user.groups.filter(name=group_name).exists()", "def is_in_group_user_id(user_id, group_name):\n try:\n return Group.objects.get(name=group_name).user_set.filter(id=user_id).exists()\n except Group.DoesNotExist:\n return None", "def has_group(group, user, request):\n return group_names[group] in groupfinder(user.username, request)", "def alreay_in_group(self,uid,group_id):\n uid = str(uid)\n user_group_list = self.get_group_list_via_uid(uid)\n return True if group_id in user_group_list else False", "def is_in_group(self, group):\n return group in self.get_all_groups()", "def group_authenticated(self, user_token, group):\n if self.authenticated(user_token):\n token = self.token_storage.get(user_token)\n groups = self.get_groups(token.username)\n if group in groups:\n return True\n\n return False", "def is_group(self, group_name):\n\n return group_name in self._group", "def in_group(self, group):\n\n return self.secondary_groups.filter(\n groups_users.c.group_id == group.id).count() > 0", "def userMemebership(self, username, group):\r\n return group in self.getUserGroups(username)", "def is_member_of_group(self, mail, group):\n members = self.get_group_members(group)\n\n if mail in members:\n return True\n return False", "def belongs_to(self, group):\n return self in group.users", "def has_group(self,groupname):\n\n if not self.check_prereqs():\n return False\n\n db = self.env.get_db_cnx()\n cursor = db.cursor()\n\n query=self.create_query(self.sql_get_groups_query+\" WHERE $groupname_field$='$groupname$'\",{'groupname':groupname,'groupname_field':self.sql_groupname_field})\n self.log.debug(\"sqlflexibleauthstore: has_group: %s\" % (query,))\n\n cursor.execute(query)\n for row in cursor:\n return True\n return False", "def has_permission(user, required_groups):\n user_groups = set([g.name for g in user.groups.all()])\n return user_groups.issuperset(required_groups)", "def IsObjectInGroup(object_id, group_name=None):\n rhobj = rhutil.coercerhinoobject(object_id, True, True)\n count = rhobj.GroupCount\n if count<1: return False\n if not group_name: return True\n index = scriptcontext.doc.Groups.Find(group_name, True)\n if index<0: raise ValueError(\"%s group does not exist\"%group_name)\n group_ids = rhobj.GetGroupList()\n for id in group_ids:\n if id==index: return True\n return False", "def in_projects_admin_group(user):\n if user:\n return user.groups.filter(name='projects_admin').count() != 0", "def test_has_access_is_in_group(self):\n user, usrmgr_mock = self.__get_test_instance(\n \"@foouser\", 1337, group=\"foogroup\")\n usrmgr_mock.return_value.user_is_in_group.return_value = True\n with patch.object(user, \"save\"):\n user.has_access(\"foogroup\")", "def allowed_group_access_use(user, group):\n return (user.has_perm(\"vnswww.group_use_any\")\n or (user.has_perm(\"vnswww.group_use_org\")\n and group.org == user.get_profile().org))", "def has_group(self, resolvable):\n group = self._resolve_group(resolvable)\n\n for g in self.groups.query(name=group.name):\n if g.name == group.name:\n return True\n\n return False" ]
[ "0.90404195", "0.89909446", "0.8921993", "0.88956964", "0.8691311", "0.8651488", "0.85226196", "0.8471016", "0.8424121", "0.83981097", "0.83976525", "0.8325658", "0.8266118", "0.8199991", "0.81931895", "0.7539715", "0.7505139", "0.7332905", "0.726839", "0.7217727", "0.7149777", "0.70672965", "0.7025459", "0.6690672", "0.66790277", "0.66556174", "0.65745276", "0.64787424", "0.64599943", "0.6438533" ]
0.90948594
0
'If you create a Lambda function that processes events from streambased services (Amazon Kinesis Streams), the number of shards per stream is the unit of concurrency. If your stream has 100 active shards, there will be 100 Lambda functions running concurrently. Then, each Lambda function processes events on a shard in the order that they arrive.' Therefore, for checkpointing logic, we should make the primary
def handler(event, context): debug = False rewind = False dry_run = False table = _ensure_dynamo_table() consumer_id = 'test-consumer' if debug: state = table.scan() print "Active leases in Dynamo:", state["Count"] for item in state["Items"]: print json.dumps(item, indent=4, sort_keys=True) lease = None shard = None try: visitors = set() last_timestamp = None for i, record in enumerate(event.get('Records', [])): event_id, data = (record['eventID'], record['kinesis']['data']) shard, checkpoint = event_id.split(u':') if rewind: print "Rewinding to checkpoint 0" _clear_consumer_lease(table, consumer_id, shard) rewind = False if lease is None: lease = _get_consumer_lease(table, consumer_id, shard) \ or {"checkpoint": "0"} if checkpoint <= lease["checkpoint"]: # replayed event, we should skip it print "Replayed event; skipping" continue # => decode from b64 raw_event = base64.b64decode(data) # => parse from JSON json_event = json.loads(raw_event) # => extract out visitor id and timestamp if present visitor = json_event.get("visitor_site_id", "N/A") visitors.add(visitor) last_timestamp = json_event.get("ts_action", "N/A") # => do something with the data result = process(json_event) if result: pass # => checkpoint the shard lease["checkpoint"] = checkpoint logger.info("Saw {} unique visitors in batch ending with {}".format( len(visitors), last_timestamp)) if not dry_run: _put_consumer_lease(table, consumer_id, shard, lease) except Exception as ex: # do not save consumer checkpoints because error happened # instead, we should probably log something about the error # in the consumer lease, to allow the Lambda to retry a fixed # number of times, before finally "giving up" and skipping # the records raise "^ some form of error handling required" if ex: pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def lambda_handler(event, context):\n\n mytime, lambda_name, env_vars = lambda_init.init_lambda(context)\n stage = env_vars[\"stage\"]\n consumer_master_past_lambda = env_vars[\"consumer_master_past_name\"]\n\n apps, test_params = init_apps_from_test_params(event)\n filters = init_filters()\n\n step = generate_step_from_mytime(mytime)\n\n print(\"step:\", step)\n for app in apps:\n advance_app_timestamp(app, step)\n\n consumer_event = {}\n\n # Invoke the consumer-master lambda for each app in apps\n for app in apps:\n headers = Headers(\n shadowreader_type=\"past\", stage=stage, app=app, step=step\n ).headers\n\n consumer_event = {\n \"app\": app.name,\n \"identifier\": app.identifier,\n \"base_url\": app.base_url,\n \"cur_timestamp\": app.cur_timestamp,\n \"rate\": app.rate,\n \"baseline\": app.baseline,\n \"parent_lambda\": lambda_name,\n \"child_lambda\": consumer_master_past_lambda,\n \"headers\": headers,\n \"filters\": filters,\n }\n invoke_func(consumer_event, func=consumer_master_past_lambda)\n\n if apps and consumer_event:\n print_to_logs(consumer_event, apps)\n\n # Collect metrics and put metrics into CW\n metrics = []\n for app in apps:\n # This is the timestamp (in epoch time) that is being replayed\n # by the load test.\n metric = {\n \"name\": \"replayed_timestamp\",\n \"stage\": stage,\n \"lambda_name\": lambda_name,\n \"app\": app.name,\n \"identifier\": app.identifier,\n \"mytime\": mytime,\n \"val\": app.cur_timestamp,\n }\n metrics.append(metric)\n\n if sr_plugins.exists(\"metrics\"):\n metric_emitter = sr_plugins.load(\"metrics\")\n for metric in metrics:\n metric_emitter.main(metric)\n\n cur_params = {\"apps\": apps, \"filters\": filters, \"test_params\": test_params}\n\n if sr_plugins.exists(\"test_params_emitter\"):\n params_emitter = sr_plugins.load(\"test_params_emitter\")\n params_emitter.main(\n cur_params,\n lambda_name,\n mytime,\n stage,\n env_vars,\n sr_config,\n sr_plugins._sr_plugins,\n )\n\n return json.dumps(cur_params, default=str), json.dumps(consumer_event, default=str)", "def __init__(__self__, *,\n function_name: pulumi.Input[str],\n amazon_managed_kafka_event_source_config: Optional[pulumi.Input['EventSourceMappingAmazonManagedKafkaEventSourceConfigArgs']] = None,\n batch_size: Optional[pulumi.Input[int]] = None,\n bisect_batch_on_function_error: Optional[pulumi.Input[bool]] = None,\n destination_config: Optional[pulumi.Input['EventSourceMappingDestinationConfigArgs']] = None,\n document_db_event_source_config: Optional[pulumi.Input['EventSourceMappingDocumentDbEventSourceConfigArgs']] = None,\n enabled: Optional[pulumi.Input[bool]] = None,\n event_source_arn: Optional[pulumi.Input[str]] = None,\n filter_criteria: Optional[pulumi.Input['EventSourceMappingFilterCriteriaArgs']] = None,\n function_response_types: Optional[pulumi.Input[Sequence[pulumi.Input['EventSourceMappingFunctionResponseTypesItem']]]] = None,\n maximum_batching_window_in_seconds: Optional[pulumi.Input[int]] = None,\n maximum_record_age_in_seconds: Optional[pulumi.Input[int]] = None,\n maximum_retry_attempts: Optional[pulumi.Input[int]] = None,\n parallelization_factor: Optional[pulumi.Input[int]] = None,\n queues: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n scaling_config: Optional[pulumi.Input['EventSourceMappingScalingConfigArgs']] = None,\n self_managed_event_source: Optional[pulumi.Input['EventSourceMappingSelfManagedEventSourceArgs']] = None,\n self_managed_kafka_event_source_config: Optional[pulumi.Input['EventSourceMappingSelfManagedKafkaEventSourceConfigArgs']] = None,\n source_access_configurations: Optional[pulumi.Input[Sequence[pulumi.Input['EventSourceMappingSourceAccessConfigurationArgs']]]] = None,\n starting_position: Optional[pulumi.Input[str]] = None,\n starting_position_timestamp: Optional[pulumi.Input[float]] = None,\n topics: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tumbling_window_in_seconds: Optional[pulumi.Input[int]] = None):\n pulumi.set(__self__, \"function_name\", function_name)\n if amazon_managed_kafka_event_source_config is not None:\n pulumi.set(__self__, \"amazon_managed_kafka_event_source_config\", amazon_managed_kafka_event_source_config)\n if batch_size is not None:\n pulumi.set(__self__, \"batch_size\", batch_size)\n if bisect_batch_on_function_error is not None:\n pulumi.set(__self__, \"bisect_batch_on_function_error\", bisect_batch_on_function_error)\n if destination_config is not None:\n pulumi.set(__self__, \"destination_config\", destination_config)\n if document_db_event_source_config is not None:\n pulumi.set(__self__, \"document_db_event_source_config\", document_db_event_source_config)\n if enabled is not None:\n pulumi.set(__self__, \"enabled\", enabled)\n if event_source_arn is not None:\n pulumi.set(__self__, \"event_source_arn\", event_source_arn)\n if filter_criteria is not None:\n pulumi.set(__self__, \"filter_criteria\", filter_criteria)\n if function_response_types is not None:\n pulumi.set(__self__, \"function_response_types\", function_response_types)\n if maximum_batching_window_in_seconds is not None:\n pulumi.set(__self__, \"maximum_batching_window_in_seconds\", maximum_batching_window_in_seconds)\n if maximum_record_age_in_seconds is not None:\n pulumi.set(__self__, \"maximum_record_age_in_seconds\", maximum_record_age_in_seconds)\n if maximum_retry_attempts is not None:\n pulumi.set(__self__, \"maximum_retry_attempts\", maximum_retry_attempts)\n if parallelization_factor is not None:\n pulumi.set(__self__, \"parallelization_factor\", parallelization_factor)\n if queues is not None:\n pulumi.set(__self__, \"queues\", queues)\n if scaling_config is not None:\n pulumi.set(__self__, \"scaling_config\", scaling_config)\n if self_managed_event_source is not None:\n pulumi.set(__self__, \"self_managed_event_source\", self_managed_event_source)\n if self_managed_kafka_event_source_config is not None:\n pulumi.set(__self__, \"self_managed_kafka_event_source_config\", self_managed_kafka_event_source_config)\n if source_access_configurations is not None:\n pulumi.set(__self__, \"source_access_configurations\", source_access_configurations)\n if starting_position is not None:\n pulumi.set(__self__, \"starting_position\", starting_position)\n if starting_position_timestamp is not None:\n pulumi.set(__self__, \"starting_position_timestamp\", starting_position_timestamp)\n if topics is not None:\n pulumi.set(__self__, \"topics\", topics)\n if tumbling_window_in_seconds is not None:\n pulumi.set(__self__, \"tumbling_window_in_seconds\", tumbling_window_in_seconds)", "def __init__(\n self, stream_name, checkpoint_table=None, host_key=None, shard_iterator_type=None,\n iterator_timestamp=None, shard_iterators=None, recover_from_dynamo=False,\n iterator_sequence_number=None, custom_kinesis_client=None):\n\n super(AsyncKinesisConsumer, self).__init__()\n\n self.stream_name = stream_name\n self.shard_iterator_type = shard_iterator_type\n self.iterator_timestamp = iterator_timestamp\n self.iterator_sequence_number = iterator_sequence_number\n self.restricted_shard_iterators = shard_iterators\n\n if recover_from_dynamo and not checkpoint_table:\n raise RuntimeError('Can not use recover_from_dynamo without checkpoint table')\n self.recover_from_dynamodb = recover_from_dynamo\n\n # Allow a custom kinesis client to be passed in. This allows for setting of any additional parameters in\n # the client without needing to track them in this library.\n if custom_kinesis_client is not None:\n self.kinesis_client = custom_kinesis_client\n else:\n self.kinesis_client = aioboto3.client('kinesis')\n\n self.checkpoint_table = checkpoint_table\n self.checkpoint_callback = None\n self.host_key = host_key\n\n self.shard_readers = {}\n self.dynamodb_instances = {}\n self.stream_data = None\n self.force_rescan = True\n\n self.checkpoint_interval = AsyncKinesisConsumer.DEFAULT_CHECKPOINT_INTERVAL\n self.lock_holding_time = AsyncKinesisConsumer.DEFAULT_LOCK_HOLDING_TIME\n self.reader_sleep_time = AsyncKinesisConsumer.DEFAULT_SLEEP_TIME\n self.fallback_time_delta = AsyncKinesisConsumer.DEFAULT_FALLBACK_TIME_DELTA", "def lambda_handler(event, context):\n raw_kinesis_records = event['Records']\n\n # Deaggregate all records in one call\n records = deaggregate_records(raw_kinesis_records)\n for record in records:\n # Kinesis data in Python Lambdas is base64 encoded\n payload = base64.b64decode(record['kinesis']['data'])\n # payload is the actual ion binary record published by QLDB to the stream\n ion_record = ion.loads(payload)\n print(\"Ion reocord: \", (ion.dumps(ion_record, binary=False)))\n\n if ((\"recordType\" in ion_record) and (ion_record[\"recordType\"] == \"REVISION_DETAILS\")):\n revision_data, revision_metadata = get_data_metdata_from_revision_record(ion_record)\n print(revision_metadata[\"version\"])\n table_info = get_table_info_from_revision_record(ion_record)\n\n # Check if new wallet is being created or balance update.\n if (revision_metadata[\"version\"] == 0): # a new wallet created\n if (table_info and table_info[\"tableName\"] == \"Wallet\" and wallet_data_has_required_fields(\n revision_data)):\n # add dynamo DB insertion\n print(\"Proceed to create wallet in dynamo userwallet table\")\n dynamodb = boto3.resource('dynamodb')\n table = dynamodb.Table('Wallet')\n response = table.put_item(\n Item={\n 'walletid': revision_data[\"walletid\"],\n 'Balance': revision_data[\"Balance\"],\n 'last_txn_source': revision_data[\"last_txn_source\"],\n 'last_txn_ref': revision_data[\"last_txn_ref\"],\n 'last_txn_type': revision_data[\"last_txn_type\"],\n 'last_txn_amount': revision_data[\"last_txn_amount\"],\n 'last_txn_date': revision_data[\"last_txn_date\"],\n 'version' : 0\n }\n )\n else: # Balance updates\n if (table_info and table_info[\"tableName\"] == \"Wallet\" and wallet_data_has_required_fields(\n revision_data)):\n # add dynamo db logic to update the balance\n print(\"Dyanmo update balance\")\n dynamodb = boto3.resource('dynamodb')\n table = dynamodb.Table('Wallet')\n response = table.update_item(\n Key={\n 'walletid': revision_data[\"walletid\"]\n },\n UpdateExpression=\"set Balance=:a , last_txn_source=:b , last_txn_ref=:c, last_txn_type=:d ,last_txn_amount=:e ,last_txn_date=:f ,version=:g\",\n ExpressionAttributeValues={\n ':a': revision_data[\"Balance\"],\n ':b': revision_data[\"last_txn_source\"],\n ':c': revision_data[\"last_txn_ref\"],\n ':d': revision_data[\"last_txn_type\"],\n ':e': revision_data[\"last_txn_amount\"],\n ':f': revision_data[\"last_txn_date\"] ,\n ':g': revision_metadata[\"version\"],\n },\n ConditionExpression=\"version < :g\",\n ReturnValues=\"UPDATED_NEW\"\n )\n\n # update all transactions to dynamodb except for getfunds\n dynamodb = boto3.resource('dynamodb')\n table = dynamodb.Table('Transactions')\n response = table.put_item(\n Item={\n 'walletid': revision_data[\"walletid\"],\n 'updated_balance': revision_data[\"Balance\"],\n 'txn_source': revision_data[\"last_txn_source\"],\n 'txn_ref': revision_data[\"last_txn_ref\"],\n 'txn_type': revision_data[\"last_txn_type\"],\n 'txn_amount': revision_data[\"last_txn_amount\"],\n 'txn_date': revision_data[\"last_txn_date\"],\n 'version' : revision_metadata[\"version\"]\n }\n )\n\n return {\n 'statusCode': 200\n }", "def lambda_handler(event, context):\n # EOL char append function\n encode_data = lambda x: \"{data}{eol}\".format(data=json.dumps(x), eol=chr(10)).encode(\"UTF-8\")\n \n # Punk API call\n try:\n logger.debug(\"Requesting api: {api}\".format(api=os.environ[\"API_URL\"]))\n request = r.get(os.environ[\"API_URL\"])\n except Exception as e:\n logger.error(\"An error occured while requesting api: {api}\".format(api=os.environ[\"API_URL\"]))\n raise e\n \n # Send records to kinesis stream\n logger.debug(\"Sending data to stream: {stream}\".format(stream=os.environ[\"STREAM_NAME\"]))\n for data in request.json():\n client.put_record(\n StreamName=os.environ[\"STREAM_NAME\"],\n Data=encode_data(data),\n PartitionKey=\"key\"\n )\n\n return {\n 'statusCode': request.status_code,\n 'body': data\n }", "def lambda_handler(event, context): # pylint: disable=too-many-locals,too-many-branches,too-many-statements\r\n try: # pylint: disable=too-many-nested-blocks\r\n print(\"Execution started!\")\r\n #print(\"Event: \",event)\r\n # Bucket name and Full path for file - where file will be uploded\r\n source_bucket_name = event[\"detail\"][\"requestParameters\"][\"bucketName\"]\r\n source_key = urllib.parse.unquote_plus(\r\n event[\"detail\"][\"requestParameters\"][\"key\"], encoding='utf-8')\r\n \r\n print(\"file_path: \",source_key)\r\n #Loading master config\r\n print(\"Loading master_config\")\r\n audit_config = {}\r\n config_path = \"./config/\" + \\\r\n os.environ['CCM_ENV'] + \"/master_config.json\"\r\n config_content = open(config_path).read()\r\n config_json = json.loads(config_content)\r\n audit_config = config_json[\"audit_config\"]\r\n snow_params = config_json[\"ERROR_NOTIFICATION_SNOW_PARAMS\"]\r\n athena_query_param = config_json[\"ATHENA_QUERY_PARAMS\"]\r\n athena_table_params = config_json[\"ATHENA_TABLE_PARAMS\"]\r\n\r\n # Audit Parameters Based on the Invoking lambda and its operation involved\r\n audit_config[\"component_type_code\"] = \"ETL\"\r\n audit_config[\"component_name\"] = \"PCP Appflow\"\r\n audit_config[\"source_name\"] = \"Patient Connections Platform\"\r\n audit_config[\"target_name\"] = \"Consumer Consent Management\"\r\n audit_config[\"full_file_path\"] = \"s3://\" + \\\r\n source_bucket_name + \"/\" + source_key\r\n audit_config[\"file_version_id\"] = \"\"\r\n\r\n # Creates Job Entry in ABC Framework\r\n print(\"audit config::\", audit_config)\r\n process_execution_id = audit_helper.\\\r\n invoke_edb_abc_log_process_status_event_job_entry(audit_config)\r\n audit_config[\"process_execution_id\"] = process_execution_id\r\n print(\"process_execution_id ::\", process_execution_id)\r\n #print(\"source_key: \",source_key)\r\n s3_write = boto3.client('s3')\r\n record_dict = {}\r\n file_name = \"\"\r\n final_json = \"\"\r\n # prefix = \"\"\r\n # file_list = []\r\n # client = boto3.client(\"s3\")\r\n # result = client.list_objects(Bucket=source_bucket_name, Prefix=source_key, Delimiter='/')\r\n # #print(result)\r\n # for obj in result.get('CommonPrefixes'):\r\n # prefix = obj.get('Prefix')\r\n # #print(prefix)\r\n # file_list = list_files(client,source_bucket_name,prefix)\r\n # for file in file_list:\r\n # #print(file)\r\n json_read = read_s3_file(source_bucket_name, source_key)\r\n data = json.loads(json_read)\r\n #print(data)\r\n if data != '':\r\n record_dict = {k.lower(): v for k, v in data.items()}\r\n print(\"Record_Dict::\",record_dict)\r\n event_type_param = {}\r\n event_type_list = athena_table_params.keys()\r\n print(\"event_type_list\",event_type_list)\r\n for key in event_type_list:\r\n print(\"key\",key)\r\n if key in source_key:\r\n print(\"key\",key)\r\n event_type_param = athena_table_params[key]\r\n print(event_type_param)\r\n if \"changeeventheader\" in record_dict:\r\n if record_dict[\"changeeventheader\"][\"changeType\"] == \"CREATE\":\r\n #and record_dict[\"dtpc_affiliate__c\"] == 'US':\r\n recordid_create = record_dict[\"changeeventheader\"][\"recordIds\"][0]\r\n print(recordid_create)\r\n if recordid_create != '':\r\n last_modified_date = record_dict[\"lastmodifieddate\"].replace(\":\",\".\")\r\n create_json = json.dumps(record_dict)\r\n final_json = create_json\r\n file_name = recordid_create + \"-create-\" + str(last_modified_date)\r\n print(\"file_name: \",file_name)\r\n outbound_path = event_type_param[\"folder_path\"]\r\n final_source_key = outbound_path + '/' + file_name+\".json\"\r\n print(\"final_source_key :\", final_source_key)\r\n s3_write.put_object(\r\n Body=final_json, Bucket=source_bucket_name, Key=final_source_key)\r\n else:\r\n raise Exception(\"RecordId is missing: \", record_dict)\r\n elif record_dict[\"changeeventheader\"][\"changeType\"] == \"UPDATE\":\r\n record_ids_list = record_dict[\"changeeventheader\"][\"recordIds\"]\r\n if len(record_ids_list) != 0:\r\n for ele in record_ids_list:\r\n print(ele)\r\n element = \"'\" + ele + \"'\"\r\n payload_condition = event_type_param[\"recordid_condition\"]\r\n query = 'SELECT * FROM '+event_type_param[\"athena_create_table\"]+\\\r\n ' WHERE lastmodifieddate IN(SELECT max(lastmodifieddate) from '\\\r\n +event_type_param[\"athena_create_table\"]+\\\r\n ', UNNEST(\"'+payload_condition[0]+'\".\"'+payload_condition[1]+\\\r\n '\") AS ln(jsondata) WHERE jsondata IN ('+element+'));'\r\n print(query)\r\n athena_query_param['athena_query'] = query\r\n query_result_record_id = athena_helper.perform_athena_search\\\r\n (athena_query_param)\r\n print(\"Athena Query Result for Create Path:::\", query_result_record_id)\r\n update_json = create_complete_payload(data,query_result_record_id)\r\n print(\"update_json: \",update_json)\r\n if len(update_json) != 0:\r\n last_modified_date = record_dict[\"lastmodifieddate\"].replace\\\r\n (\":\",\".\")\r\n final_json = json.dumps(update_json)\r\n file_name = ele + \"-update-\" + str(last_modified_date)\r\n print(\"file_name: \",file_name)\r\n outbound_path = event_type_param[\"folder_path\"]\r\n final_source_key = outbound_path + '/' + file_name+\".json\"\r\n print(\"final_source_key :\", final_source_key)\r\n s3_write.put_object(\r\n Body=final_json, Bucket=source_bucket_name, \\\r\n Key=final_source_key)\r\n else:\r\n print(ele,\" does not have a create payload\")\r\n else:\r\n raise Exception(\"RecordId is missing: \", record_dict)\r\n else:\r\n raise Exception(\"ChangeEventHeader is missing: \", record_dict)\r\n else:\r\n raise Exception(\"Invalid Payload: \", record_dict)\r\n\r\n except (Exception) as err: # pylint: disable=line-too-long,broad-except\r\n print(\"Error occured: {0}\".format(str(err)))\r\n audit_type = \"error\"\r\n error_msg = sys.exc_info()\r\n exc_type = error_msg\r\n exc_obj = error_msg\r\n snow_params[\"flag\"] = \"FAIL\"\r\n snow_params[\"error_message\"] = str(exc_obj)\r\n snow_params[\"error_type\"] = str(exc_type)\r\n audit_config[\"exception_message\"] = str(exc_obj)\r\n if audit_config != {}:\r\n logging.exception(sys.exc_info())\r\n audit_helper.invoke_edb_abc_log_process_status_event(\r\n audit_type, audit_config) # pylint: disable=line-too-long\r\n audit_helper.raise_snow_incident(snow_params)", "def test_kinesis_too_large_record(sdc_builder, sdc_executor, aws, keep_data):\n record_1_content = 'Hello 1'\n record_2_content = 'Hello ' + '2' * 1024 * 1024\n record_3_content = 'Hello 3'\n file_content = f'{record_1_content}\\n{record_2_content}\\n{record_3_content}'\n\n pipeline_builder = sdc_builder.get_pipeline_builder()\n dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')\n dev_raw_data_source.set_attributes(\n data_format='TEXT',\n raw_data=file_content,\n stop_after_first_batch=True,\n max_line_length=len(record_2_content)\n )\n stream_name = '{}_{}'.format(aws.kinesis_stream_prefix, get_random_string(string.ascii_letters, 10))\n\n kinesis_producer = pipeline_builder.add_stage('Kinesis Producer')\n kinesis_producer.set_attributes(\n data_format='TEXT',\n stream_name=stream_name,\n record_separator='',\n preserve_record_order=True,\n kinesis_producer_configuration=[{'key': 'AggregationEnabled', 'value': 'false'}]\n )\n\n wiretap = pipeline_builder.add_wiretap()\n\n dev_raw_data_source >> [kinesis_producer, wiretap.destination]\n pipeline = pipeline_builder.build().configure_for_environment(aws)\n\n client = aws.kinesis\n try:\n logger.info(f'Creating a Kinesis Stream {stream_name} on AWS...')\n client.create_stream(\n StreamName=stream_name,\n ShardCount=1\n )\n aws.wait_for_stream_status(\n stream_name=stream_name,\n status='ACTIVE'\n )\n desc_response = client.describe_stream(\n StreamName=stream_name\n )\n\n sdc_executor.add_pipeline(pipeline)\n sdc_executor.start_pipeline(pipeline).wait_for_finished()\n\n logger.info(f'Reading the data from the Kinesis Stream...')\n shard_iterator = client.get_shard_iterator(\n StreamName=stream_name,\n ShardId=desc_response['StreamDescription']['Shards'][0]['ShardId'],\n ShardIteratorType='TRIM_HORIZON'\n )\n response = client.get_records(\n ShardIterator=shard_iterator['ShardIterator']\n )\n received_data = [rec['Data'].decode().strip() for rec in response['Records']]\n assert len(received_data) == 2\n assert received_data[0] == record_1_content\n assert received_data[1] == record_3_content\n\n error_records = wiretap.error_records\n assert len(error_records) == 1\n assert error_records[0].header['errorCode'] == 'KINESIS_08'\n\n finally:\n _ensure_pipeline_is_stopped(sdc_executor, pipeline)\n if not keep_data:\n logger.info('Deleting %s Kinesis stream on AWS ...', stream_name)\n client.delete_stream(StreamName=stream_name)", "def lambda_handler(event, context):\n print('Received request')\n item = None\n\n mysql_host = '54.212.197.235'\n mysql_username = 'rts'\n mysql_password = 'SamWangRamsay520-S'\n mysql_dbname = 'rts_kinesis'\n mysql_tablename = 'benchmark_kinesis'\n\n print('Start connection')\n conn = mysql.connector.connect(host=mysql_host,\n user=mysql_username,\n passwd=mysql_password,\n db=mysql_dbname )\n print('End connection')\n '''Write the message to the mysql database'''\n cur = conn.cursor()\n\n #dynamo_db = boto3.resource('dynamodb')\n #table = dynamo_db.Table('benchmark_kinesis')\n _mysql_buffer = [] #ad-hoc message buffering for mysql, equivalent to dynamodb batch-write behavior\n _mysql_buffer_limit = 25\n records = [record for record in event['Records']]\n new_records = deaggregate_records(records)\n #decoded_record_data = [record['kinesis']['data'] for record in new_records]\n #deserialized_data = [decoded_record for decoded_record in records]\n #for data in decoded_record_data:\n for record in new_records:\n\t#d_record = \"%.15g\" % record['kinesis']['partitionKey']\n\t#con_time = \"%.15g\" % time.time()\n\tcreation_time = Decimal(record['kinesis']['partitionKey'])\n\tconsumer_time = Decimal(time.time())\n\tvalue = record['kinesis']['data']\n\t#cur.execute('INSERT INTO '+mysql_tablename+'(creation_time, consumer_time, value) VALUES (%s, %s, %s)', (creation_time, consumer_time, value))\n sql = 'INSERT INTO '+mysql_tablename+'(creation_time, consumer_time, value) VALUES (%s, %s, %s)'\n _mysql_buffer.append((creation_time, consumer_time, value))\n if len(_mysql_buffer) > _mysql_buffer_limit:\n cur.executemany(sql, _mysql_buffer)\n _mysql_buffer = []\n\t# Add a processed time so we have a rough idea how far behind we are\n #item['processed'] = datetime.datetime.utcnow().isoformat()\n\n conn.commit()\n conn.close()\n cur.close()\n # Print the last item to make it easy to see how we're doing\n #print(json.dumps(item))\n print('Number of records: {}'.format(str(len(new_records))))", "def flax_shard_checkpoint(params, max_shard_size=\"10GB\"):\n max_shard_size = convert_file_size_to_int(max_shard_size)\n\n sharded_state_dicts = []\n current_block = {}\n current_block_size = 0\n total_size = 0\n\n # flatten the weights to chunk\n weights = flatten_dict(params, sep=\"/\")\n for item in weights:\n weight_size = weights[item].size * dtype_byte_size(weights[item].dtype)\n\n # If this weight is going to tip up over the maximal size, we split.\n if current_block_size + weight_size > max_shard_size:\n sharded_state_dicts.append(current_block)\n current_block = {}\n current_block_size = 0\n\n current_block[item] = weights[item]\n current_block_size += weight_size\n total_size += weight_size\n\n # Add the last block\n sharded_state_dicts.append(current_block)\n\n # If we only have one shard, we return it\n if len(sharded_state_dicts) == 1:\n return {FLAX_WEIGHTS_NAME: sharded_state_dicts[0]}, None\n\n # Otherwise, let's build the index\n weight_map = {}\n shards = {}\n for idx, shard in enumerate(sharded_state_dicts):\n shard_file = FLAX_WEIGHTS_NAME.replace(\".msgpack\", f\"-{idx+1:05d}-of-{len(sharded_state_dicts):05d}.msgpack\")\n shards[shard_file] = shard\n for weight_name in shard.keys():\n weight_map[weight_name] = shard_file\n\n # Add the metadata\n metadata = {\"total_size\": total_size}\n index = {\"metadata\": metadata, \"weight_map\": weight_map}\n return shards, index", "def lambda_handler(event, context):\n log.log_request_and_context(event, context)\n\n parent_batch_id = event[\"parent_batch_id\"]\n job_level = event[\"job_level\"]\n\n parent_batch = db.get_batch_metadata(parent_batch_id)\n if parent_batch is None:\n raise Exception(f\"Invalid parent batch id: {parent_batch_id}\")\n\n if job_level == 1:\n meta_data_type = BatchMetadataType.FIRST_LEVEL\n elif job_level == 2:\n meta_data_type = BatchMetadataType.SECOND_LEVEL\n elif job_level == 3:\n meta_data_type = BatchMetadataType.THIRD_LEVEL\n\n # Filter jobs by job level\n labeling_jobs = parent_batch[BatchMetadataTableAttributes.LABELING_JOBS]\n current_jobs = [job for job in labeling_jobs if job[\"jobLevel\"] == job_level]\n log.logging.info(\"Kicking off %d jobs for level %d\", len(current_jobs), job_level)\n\n batch_id = f\"{parent_batch_id}-{meta_data_type.lower()}\"\n for job in current_jobs:\n trigger_labeling_job(parent_batch_id, batch_id, job)\n\n try:\n db.insert_perform_labeling_job_metadata(\n parent_batch_id=parent_batch_id,\n batch_id=batch_id,\n batch_status=BatchStatus.IN_PROGRESS,\n batch_metadata_type=meta_data_type,\n num_children_batches=len(current_jobs),\n )\n except botocore.exceptions.ClientError as err:\n raise Exception(f\"failed to put batch id {batch_id}\") from err\n\n return {\n \"batch_id\": batch_id,\n }", "def redshift_lambda_handler(event, context):\n logging.debug('event: %s', event)\n\n detail = event['detail']\n event_name = detail['eventName']\n creator = get_creator(event)\n\n logger.info('Event type: %s', event_name)\n\n if is_err_detail(logger, detail):\n return False\n\n if event_name == 'CreateCluster':\n logger.debug('%s is creating cluster: %s',\n creator, detail['requestParameters']['clusterIdentifier'])\n\n # https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html\n cluster_arn = 'arn:aws:redshift:' + detail['awsRegion'] + ':'\\\n + detail['userIdentity']['accountId'] + ':cluster:'\\\n + detail['requestParameters']['clusterIdentifier']\n short_msg = {\n \"EventName\": event_name,\n \"Creator\": creator,\n \"ResourceArn\": cluster_arn,\n \"TagStatus\": \"pending\",\n \"MaxRetries\": int(os.environ['SFN_MAX_RETRIES']),\n \"Retries\": 0\n }\n\n sfn = Boto3Wrapper.get_client('stepfunctions')\n response = sfn.start_execution(\n stateMachineArn=os.environ['SFN_ARN'],\n name=creator+'-'+event_name+'-'+detail['eventID'],\n input=json.dumps(short_msg)\n )\n\n logger.info('Step Functions start execution: %s', response)\n\n return True", "def lambda_handler(event, context):\n print(event)\n print(context)\n storage_gateway_status()", "def lambda_handler(event, context):\n return", "def lambda_handler(event, context):\n return", "def process(msg, context, region):\n\n job_id = int(msg['ingest_job'])\n chunk_key = msg['chunk_key']\n tile_key = msg['tile_key']\n print(\"Tile key: {}\".format(tile_key))\n\n proj_info = BossIngestProj.fromTileKey(tile_key)\n\n # Set the job id\n proj_info.job_id = msg['ingest_job']\n\n print(\"Data: {}\".format(msg))\n\n # update value in the dynamo table\n tile_index_db = BossTileIndexDB(proj_info.project_name)\n chunk = tile_index_db.getCuboid(chunk_key, job_id)\n if chunk:\n if tile_index_db.cuboidReady(chunk_key, chunk[\"tile_uploaded_map\"]):\n print(\"Chunk already has all its tiles: {}\".format(chunk_key))\n # Go ahead and setup to fire another ingest lambda so this tile\n # entry will be deleted on successful execution of the ingest lambda.\n chunk_ready = True\n else:\n print(\"Updating tile index for chunk_key: {}\".format(chunk_key))\n chunk_ready = tile_index_db.markTileAsUploaded(chunk_key, tile_key, job_id)\n else:\n # First tile in the chunk\n print(\"Creating first entry for chunk_key: {}\".format(chunk_key))\n try:\n tile_index_db.createCuboidEntry(chunk_key, job_id)\n except ClientError as err:\n # Under _exceptional_ circumstances, it's possible for another lambda\n # to beat the current instance to creating the initial cuboid entry\n # in the index.\n error_code = err.response['Error'].get('Code', 'Unknown')\n if error_code == 'ConditionalCheckFailedException':\n print('Chunk key entry already created - proceeding.')\n else:\n raise\n chunk_ready = tile_index_db.markTileAsUploaded(chunk_key, tile_key, job_id)\n\n # ingest the chunk if we have all the tiles\n if chunk_ready:\n print(\"CHUNK READY SENDING MESSAGE: {}\".format(chunk_key))\n # insert a new job in the insert queue if we have all the tiles\n ingest_queue = IngestQueue(proj_info)\n ingest_queue.sendMessage(json.dumps(msg))\n\n # Invoke Ingest lambda function\n names = AWSNames.from_lambda(context.function_name)\n lambda_client = boto3.client('lambda', region_name=region)\n lambda_client.invoke(\n FunctionName=names.tile_ingest.lambda_,\n InvocationType='Event',\n Payload=json.dumps(msg).encode())\n else:\n print(\"Chunk not ready for ingest yet: {}\".format(chunk_key))\n\n print(\"DONE!\")", "def lambda_handler(event, context):\n\n # S3 resource invocation\n s3_resource = boto3.resource('s3')\n # S3 bucket selection\n data_bucket_name = \"put_here_data_bucket_name\"\n # The SageMaker runtime is what allows us to invoke the endpoint that we've created.\n runtime = boto3.Session().client('sagemaker-runtime')\n\n request_body_dict = json.loads(event['body'])\n\n # Now we use the SageMaker runtime to invoke our endpoint, sending both ticker and start date if given\n if request_body_dict['start_date'] != \"\":\n response = runtime.invoke_endpoint(EndpointName='DeepAR-ml-spp', # The name of the endpoint we created\n ContentType='application/json', # The data format that is expected\n Body=encode_future_request(request_body=request_body_dict,\n s3_resource=s3_resource,\n s3_bucket=data_bucket_name, prefix='valid'))\n # or only ticker name if no start date has been provided\n elif request_body_dict['ticker_name'] != \"\":\n response = runtime.invoke_endpoint(EndpointName='DeepAR-ml-spp', # The name of the endpoint we created\n ContentType='application/json', # The data format that is expected\n Body=encode_request(ticker_name=request_body_dict['ticker_name'],\n s3_resource=s3_resource, s3_bucket=data_bucket_name,\n prefix='train'))\n\n # The response is an HTTP response whose body contains the result of our inference\n result = response['Body'].read().decode('utf-8')\n\n # print data for debug purposes\n print(result)\n\n return {\n 'statusCode': 200,\n 'headers': {'Content-Type': 'application/json', 'Access-Control-Allow-Origin': '*'},\n 'body': str(result)\n }", "def lambda_handler(Event, Context):\n if 'StateMachineArn' in Event.keys():\n step_function_arn = Event['StateMachineArn']\n r = step_function_client.start_execution(\n stateMachineArn=step_function_arn,\n input=json.dumps({\"last_updated\": \"\"}))\n\n else:\n stepfunctions = [os.getenv(\"CHARGEBEEDOWNLOADARN\"), os.getenv(\"EXCHANGERATESDOWNLOADARN\")]\n\n for stepfunction in stepfunctions:\n step_function_arn = stepfunction\n r = step_function_client.start_execution(\n stateMachineArn=step_function_arn,\n input=json.dumps({\"last_updated\": \"\"}))", "def lambda_handler(event, context):\n\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n\n # Decode the bytes to base64\n decoded_record_data = []\n for record in event['Records']:\n try:\n decoded_record_data.append(base64.b64decode(record['kinesis']['data']))\n except Exception as e:\n logger.error('%s - %s', \"Error decoding record\", e)\n\n # Deserialize the data\n deserialized_data = []\n for decoded_record in decoded_record_data:\n try:\n deserialized_data.append(json.loads(decoded_record))\n except Exception as e:\n logger.error('%s - %s', \"Error deserializing data\", e)\n\n # Try opening a connection to DynamoDB\n try:\n # Get a handle to the table\n dynamo_db = boto3.resource('dynamodb')\n curr_pos_table = dynamo_db.Table('current_position')\n except Exception as e:\n logger.error('%s - %s', \"Error connecting to DynamoDB\", e)\n return\n\n # Try sending the data\n transmit_data(curr_pos_table, deserialized_data, 0)", "def test4():\n event = {\n \"Records\": [\n {\n \"s3\": {\n \"s3SchemaVersion\": \"1.0\",\n \"configurationId\": \"b0efd5b1-cc92-47b4-8501-1c34f5eba235\",\n \"bucket\": {\n \"name\": \"/tmp/\"\n },\n \"object\": {\n \"key\": \"tic000147203645/tic000147203645_s0001-1-1_stlc.fits\"\n }\n }\n }\n ]\n}\n context = {}\n \n out = lambda_handler(event, context)\n \n assert out[\"statusCode\"] == 200", "def handler(event, context): # pylint: disable=unused-argument\n\n if \"queue\" in event:\n # Lambda is being invoked to read messages directly from queue URL\n # In that mode SNS events are always sent to the internal\n # reconcile topic\n process_queue(\n stac_bucket=os.environ[\"STAC_BUCKET\"],\n cog_pds_meta_pds=json.loads(os.environ[\"COG_PDS_META_PDS\"]),\n queue=event[\"queue\"],\n message_batch_size=int(os.environ[\"MESSAGE_BATCH_SIZE\"]),\n sns_reconcile_target_arn=os.environ[\"SNS_RECONCILE_TARGET_ARN\"],\n catalog_update_queue=os.environ.get(\"CATALOG_UPDATE_QUEUE\"),\n catalog_update_table=os.environ[\"CATALOG_UPDATE_TABLE\"],\n corrupted_xml_queue=os.environ[\"corrupted_xml_queue_url\"],\n delete_processed_messages=int(os.environ[\"DELETE_MESSAGES\"]) == 1,\n )\n else:\n # Lambda is being invoked as trigger to SQS\n process_trigger(\n stac_bucket=os.environ[\"STAC_BUCKET\"],\n cog_pds_meta_pds=json.loads(os.environ[\"COG_PDS_META_PDS\"]),\n event=event,\n sns_target_arn=os.environ[\"SNS_TARGET_ARN\"],\n sns_reconcile_target_arn=os.environ[\"SNS_RECONCILE_TARGET_ARN\"],\n catalog_update_queue=os.environ.get(\"CATALOG_UPDATE_QUEUE\"),\n catalog_update_table=os.environ[\"CATALOG_UPDATE_TABLE\"],\n corrupted_xml_queue=os.environ[\"corrupted_xml_queue_url\"],\n )", "def lambda_handler(event, context):\n print(event)\n \n #Input variables\n aminP = 0.7\n amaxP = 12.0\n durs = [0.75, 1.25, 3, 5, 7]\n min_snr = 3.9\n max_tce = 4\n frac_remain = 0.8\n det_window = 42\n noise_window = 12\n n_sigma = 3.9\n search_bucket = \"tesssearchresults\"\n #---------\n \n cloud = True\n #Local Storage\n local_filename = \"/tmp/mylightcurve.fits\"\n local_detrend_fn = \"/tmp/detrended.fits\"\n out_file = \"/tmp/output.csv\"\n \n b_filename = event['Records'][0]['s3']['object']['key']\n bucket = event['Records'][0]['s3']['bucket']['name'] \n \n #If not in the cloud bucket begins with /, do the following to set up for a test.\n if bucket[0] == \"/\":\n print(\"Not Using cloud.\")\n cloud = False #For testing\n local_filename = bucket + b_filename\n local_dir = \"/Users/smullally/TESS/lambdaSearch/test/tesssearchresults/\"\n local_detrend_fn = local_dir + \"detrended.fits\"\n out_file = local_dir + \"outfile.fits\"\n \n meta = dict()\n \n #Get the information from the light curve file.\n if cloud:\n time, flux, qflags, phead = io.read_lightcurve_lambda(bucket, \\\n b_filename, local_filename)\n else:\n time, flux, qflags, phead = io.read_lightcurve_lambda_local(bucket, \\\n b_filename, local_filename)\n \n #print(time,flux,qflags)\n ticid, camera, sector, ccd = io.read_header(phead)\n\n namestr = \"tic%012u/tic%012u_s%04u-%1u-%1u\" % \\\n (int(ticid), int(ticid), int(sector),int(camera), int(ccd))\n \n #Detrend\n good_time, meddet_flux = ps.clean_timeseries(time, flux, qflags, det_window, \\\n noise_window, n_sigma, sector)\n \n #import matplotlib.pyplot as plt\n #plt.figure()\n #plt.plot(good_time,meddet_flux,'.')\n \n #Take BLS\n results, stats = ps.identifyTces(good_time, meddet_flux, bls_durs_hrs=durs,\\\n minSnr=min_snr, \\\n fracRemain=frac_remain,\\\n maxTces=max_tce, minP=aminP, maxP=amaxP)\n #print(results)\n \n #Now write out results.\n \n bucket_out_name = namestr + \"_plsearch\" + '.csv'\n bucket_detrend_name = namestr + \"_detrend\" + '.fits'\n \n \n io.write_results(out_file, int(ticid), results, stats, **meta)\n io.write_timeseries(local_detrend_fn, good_time, meddet_flux, phead)\n \n if cloud: \n #Write to the S3 bucket.\n s3_client = boto3.client('s3')\n resp = s3_client.upload_file(out_file, search_bucket, bucket_out_name)\n resp = s3_client.upload_file(local_detrend_fn, search_bucket, bucket_detrend_name)\n else:\n resp = \"not cloud\"\n if not os.path.exists(local_dir + \"tic%012u\" % (int(ticid))):\n os.mkdir(local_dir + \"tic%012u\" % (int(ticid)))\n \n try:\n os.remove(local_dir + bucket_out_name)\n except:\n pass\n \n os.rename(out_file, local_dir + bucket_out_name)\n try:\n os.remove(local_dir + bucket_detrend_name)\n except:\n pass\n os.rename(local_detrend_fn, local_dir + bucket_detrend_name)\n \n \n return {\n \"statusCode\": 200,\n \"body\": json.dumps({\n \"outname\": bucket_detrend_name,\n \"response\": str(resp),\n \"period\": str(results[0][0]),\n \"epoch\": str(results[0][1])\n })\n }", "def test3():\n event = {\n \"Records\": [\n {\n \"s3\": {\n \"s3SchemaVersion\": \"1.0\",\n \"configurationId\": \"b0efd5b1-cc92-47b4-8501-1c34f5eba235\",\n \"bucket\": {\n \"name\": \"/tmp/\"\n },\n \"object\": {\n \"key\": \"tic000129646247_s0001-1-1_stlc.fits\"\n }\n }\n }\n ]\n}\n context = {}\n \n out = lambda_handler(event, context)\n \n assert out[\"statusCode\"] == 200", "def test_kinesis_preserve_record_order(sdc_builder, sdc_executor, aws, keep_data):\n expected_data = [f'Hello {i}' for i in range(100)]\n stream_name = '{}_{}'.format(aws.kinesis_stream_prefix, get_random_string(string.ascii_letters, 10))\n\n builder = sdc_builder.get_pipeline_builder()\n dev_raw_data_source = builder.add_stage('Dev Raw Data Source')\n dev_raw_data_source.set_attributes(\n data_format='TEXT',\n raw_data='\\n'.join(expected_data),\n stop_after_first_batch=True\n )\n\n kinesis_producer = builder.add_stage('Kinesis Producer')\n kinesis_producer.set_attributes(\n data_format='TEXT',\n stream_name=stream_name,\n record_separator='',\n preserve_record_order=True,\n kinesis_producer_configuration=[{'key': 'AggregationEnabled', 'value': 'false'}]\n )\n\n dev_raw_data_source >> kinesis_producer\n pipeline = builder.build().configure_for_environment(aws)\n\n client = aws.kinesis\n try:\n logger.info(f'Creating a Kinesis Stream {stream_name} on AWS ...')\n client.create_stream(\n StreamName=stream_name,\n ShardCount=1\n )\n aws.wait_for_stream_status(\n stream_name=stream_name,\n status='ACTIVE'\n )\n desc_response = client.describe_stream(\n StreamName=stream_name\n )\n\n sdc_executor.add_pipeline(pipeline)\n sdc_executor.start_pipeline(pipeline).wait_for_finished()\n\n logger.info(f'Reading the data from the Kinesis Stream ...')\n shard_iterator = client.get_shard_iterator(\n StreamName=stream_name,\n ShardId=desc_response['StreamDescription']['Shards'][0]['ShardId'],\n ShardIteratorType='TRIM_HORIZON'\n )\n response = client.get_records(\n ShardIterator=shard_iterator['ShardIterator']\n )\n received_data = [rec['Data'].decode().strip() for rec in response['Records']]\n\n logger.debug(f'Number of messages received from Kinesis = {len(received_data)}')\n assert received_data == expected_data\n\n finally:\n _ensure_pipeline_is_stopped(sdc_executor, pipeline)\n if not keep_data:\n logger.info('Deleting %s Kinesis stream on AWS ...', stream_name)\n client.delete_stream(StreamName=stream_name)", "def lambda_handler(event, context):\r\n print(\"Function triggered\")\r\n if 'local' == environ.get('APP_STAGE'):\r\n dynamodb = boto3.resource('dynamodb', endpoint_url='http://localhost:8000')\r\n table = dynamodb.Table(\"audiobooksDB\")\r\n else:\r\n dynamodb = boto3.resource('dynamodb')\r\n table = dynamodb.Table(environ[\"TABLE_NAME\"])\r\n s3 = boto3.client('s3')\r\n \r\n s3FileName = event['Records'][0]['s3']['object']['key'].replace(\"+\", \" \")\r\n bucketName = event['Records'][0]['s3']['bucket']['name']\r\n # Download file from the S3 bucket\r\n try:\r\n book = s3.get_object(Bucket=bucketName, Key=s3FileName)\r\n print(\"Loading file from S3 bucket\")\r\n bookContent = book[\"Body\"].read().decode(\"utf-8\", errors=\"ignore\").split(\"------ END METADATA --------\")\r\n metadata = json.loads(bookContent[0])\r\n bookContent = bookContent[1]\r\n # Polly accepts 100,000 chars at a time. We make chunks of 99990 because we put the part 1 maker in\r\n bookContent = [bookContent[i:i+99990] for i in range(0, len(bookContent), 99990)]\r\n except Exception as e:\r\n print(\"Error while downloading file \" + s3FileName + \"from the S3 bucket \" + bucketName)\r\n raise\r\n # Add part marker to book\r\n if len(bookContent) > 1:\r\n count = 0\r\n for chunk in bookContent:\r\n chunk += \"Part \" + str(count)\r\n hasShortPart = False\r\n audioURLs = []\r\n pollyClient = boto3.client('polly')\r\n for chunk in bookContent:\r\n try:\r\n chunk = convert_text_to_ssml(chunk)\r\n print(\"Asking Polly to record the current chunk\")\r\n response = pollyClient.start_speech_synthesis_task(\r\n Engine='standard',\r\n LanguageCode='en-GB',\r\n OutputFormat='mp3',\r\n OutputS3BucketName=environ['AUDIO_S3_BUCKET'],\r\n Text=chunk,\r\n TextType='ssml',\r\n VoiceId='Brian',\r\n SnsTopicArn=environ[\"SNS_TOPIC\"],\r\n )\r\n\r\n audioURLs.append(response[\"SynthesisTask\"][\"OutputUri\"].split(\"amazonaws.com/\")[-1])\r\n if len(chunk) <= 2000:\r\n hasShortPart = True\r\n print(response)\r\n print(\"Polly was successfully asked to to record the current chunk\")\r\n except Exception as e:\r\n print(\"Error parsing chunk or requesting Polly to say it\")\r\n raise\r\n try:\r\n randomString = ''.join([random.choice(string.ascii_letters \r\n + string.digits) for n in range(32)]) \r\n audiobook = {\r\n \"id\": randomString,\r\n \"bookName\": metadata[\"bookName\"],\r\n \"imageURL\": metadata[\"imageURL\"],\r\n \"authorName\":metadata[\"authorName\"],\r\n \"genres\": metadata[\"genres\"],\r\n \"audioURLs\": audioURLs,\r\n \"description\": metadata[\"description\"],\r\n \"hidden\": False,\r\n \"hasShortPart\": hasShortPart,\r\n \"addedAt\": Decimal(datetime.now().timestamp())\r\n }\r\n response = table.put_item(\r\n Item=audiobook\r\n )\r\n except Exception as e:\r\n print(\"Exception inserting into database\")\r\n print(audiobook)\r\n print(response)\r\n raise\r\n return {\r\n \"statusCode\": 200,\r\n \"body\": json.dumps({\r\n \"message\": audioURLs\r\n }),\r\n }", "def test_send_to_kinesis_stream(search_events, boto3_client, monkeypatch):\n monkeypatch.setattr(\"boto3.client\", boto3_client)\n lambdautils.utils.send_to_kinesis_stream(search_events, \"dummy_stream\")\n boto3_client(\"kinesis\").put_records.call_count == 1", "def run(self, event, context):\n logger.debug('Number of Records: %d', len(event.get('Records', [])))\n\n config = load_config()\n env = load_env(context)\n\n for record in event.get('Records', []):\n payload = StreamPayload(raw_record=record)\n classifier = StreamClassifier(config=config)\n classifier.map_source(payload)\n\n # If the kinesis stream or s3 bucket is not in our config,\n # go onto the next record\n if not payload.valid_source:\n continue\n\n if payload.service == 's3':\n self.s3_process(payload, classifier)\n elif payload.service == 'kinesis':\n self.kinesis_process(payload, classifier)\n elif payload.service == 'sns':\n self.sns_process(payload, classifier)\n else:\n logger.info('Unsupported service: %s', payload.service)\n\n # returns the list of generated alerts\n if self.return_alerts:\n return self.alerts\n # send alerts to SNS\n self.send_alerts(env, payload)", "def runs_on_aws_lambda():\n return 'AWS_SAM_LOCAL' not in os.environ and 'LAMBDA_TASK_ROOT' in os.environ", "def lambda_handler(event, context):\r\n if 'session' in event:\r\n print(\"event.session.application.applicationId=\" +\r\n event['session']['application']['applicationId'])\r\n\r\n \"\"\"\r\n Uncomment this if statement and populate with your skill's application ID to\r\n prevent someone else from configuring a skill that sends requests to this\r\n function.\r\n \"\"\"\r\n if ('session' in event and (event['session']['application']['applicationId'] !=\r\n \"amzn1.ask.skill.57119d91-fb3c-487f-be53-4e7fac12fb83\")):\r\n raise ValueError(\"Invalid Application ID\")\r\n\r\n \"\"\"if event['session']['new']:\r\n on_session_started({'requestId': event['request']['requestId']},\r\n event['session'])\"\"\"\r\n\r\n if event['request']['type'] == \"LaunchRequest\":\r\n return on_launch(event['request'], event['session'])\r\n elif event['request']['type'] == \"IntentRequest\":\r\n return on_intent(event['request'], event['session'])\r\n elif event['request']['type'] == \"SessionEndedRequest\":\r\n return on_session_ended(event['request'], event['session'])\r\n elif event['request']['type'] == 'UPDATE':\r\n return saveCoffeeMachineStatus(event['request'])\r\n elif event['request']['type'] == \"GLASS\":\r\n return glassStatus(event['request'])\r\n elif event['request']['type'] == \"WATER\":\r\n return waterStatus(event['request'])\r\n elif event['request']['type'] == \"COFFEE\":\r\n return coffeeStatus(event['request'])\r\n elif event['request']['type'] == \"ON_OFF\":\r\n return on_off_status(event['request'])\r\n elif event['request']['type'] == \"ONLINE\":\r\n return online_status_f(event['request'])\r\n elif event['request']['type'] == 'BUSY':\r\n return busyStatus(event['request'])", "def lambda_handler(event, context):\n # Environmental Variables\n CATALOG_BRANCHES_TABLE = anejocommon.set_env_var('CATALOG_BRANCHES_TABLE')\n PRODUCT_INFO_TABLE = anejocommon.set_env_var('PRODUCT_INFO_TABLE')\n S3_BUCKET = anejocommon.set_env_var('S3_BUCKET')\n\n # Loop through event records\n try:\n event_records = event['Records']\n except KeyError:\n event_records = [{'body': event}]\n\n for record in event_records:\n try:\n catalog_sync_info = json.loads(record['body'])\n except TypeError:\n catalog_sync_info = record['body']\n\n # Event Variables\n catalog_url = catalog_sync_info['catalog_url']\n\n apple_bucket_catalog_path = anejocommon.get_path_from_url(\n catalog_url,\n 'html',\n append_to_path='.apple'\n )\n \n catalog = anejocommon.retrieve_url(catalog_url)\n try:\n catalog_plist = plistlib.readPlistFromBytes(catalog.data)\n except plistlib.InvalidFileException:\n print(\"ERROR: Cannot read catalog plist\")\n return\n\n # Write our local (filtered) catalogs\n anejocommon.write_local_catalogs(\n apple_bucket_catalog_path,\n catalog_plist,\n S3_BUCKET,\n CATALOG_BRANCHES_TABLE,\n PRODUCT_INFO_TABLE\n )", "def test_kinesis_consumer(sdc_builder, sdc_executor, aws):\n # build consumer pipeline\n application_name = get_random_string(string.ascii_letters, 10)\n stream_name = '{}_{}'.format(aws.kinesis_stream_prefix, get_random_string(string.ascii_letters, 10))\n\n builder = sdc_builder.get_pipeline_builder()\n builder.add_error_stage('Discard')\n\n kinesis_consumer = builder.add_stage('Kinesis Consumer')\n kinesis_consumer.set_attributes(application_name=application_name, data_format='TEXT',\n initial_position='TRIM_HORIZON',\n stream_name=stream_name)\n\n trash = builder.add_stage('Trash')\n\n kinesis_consumer >> trash\n\n consumer_origin_pipeline = builder.build(title='Kinesis Consumer pipeline').configure_for_environment(aws)\n sdc_executor.add_pipeline(consumer_origin_pipeline)\n\n # run pipeline and capture snapshot\n client = aws.kinesis\n try:\n logger.info('Creating %s Kinesis stream on AWS ...', stream_name)\n client.create_stream(StreamName=stream_name, ShardCount=1)\n aws.wait_for_stream_status(stream_name=stream_name, status='ACTIVE')\n\n expected_messages = set('Message {0}'.format(i) for i in range(10))\n # not using PartitionKey logic and hence assign some temp key\n put_records = [{'Data': exp_msg, 'PartitionKey': '111'} for exp_msg in expected_messages]\n client.put_records(Records=put_records, StreamName=stream_name)\n\n # messages are published, read through the pipeline and assert\n snapshot = sdc_executor.capture_snapshot(consumer_origin_pipeline, start_pipeline=True).snapshot\n sdc_executor.stop_pipeline(consumer_origin_pipeline)\n\n output_records = [record.field['text'].value\n for record in snapshot[kinesis_consumer.instance_name].output]\n\n assert set(output_records) == expected_messages\n finally:\n logger.info('Deleting %s Kinesis stream on AWS ...', stream_name)\n client.delete_stream(StreamName=stream_name) # Stream operations are done. Delete the stream.\n logger.info('Deleting %s DynamoDB table on AWS ...', application_name)\n aws.dynamodb.delete_table(TableName=application_name)" ]
[ "0.6036465", "0.5968453", "0.5582644", "0.54831797", "0.5460034", "0.54361176", "0.5364517", "0.53256035", "0.52649206", "0.5256149", "0.5254377", "0.5188891", "0.5165099", "0.5165099", "0.5149605", "0.51353747", "0.51282066", "0.51216805", "0.5100538", "0.50952226", "0.50934696", "0.50637984", "0.5053903", "0.50325537", "0.5010805", "0.49944043", "0.4984336", "0.49797618", "0.49396428", "0.49276087" ]
0.61084247
0
Test if the path holder contains a shot render.
def test(cls, pathHolder, parentCrawler): if not super(ShotRenderCrawler, cls).test(pathHolder, parentCrawler): return False renderType = pathHolder.baseName().split(".")[0].split("_")[-1] return renderType == "sr"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def point_is_shot(self, point: Point):\n return point in self.shot_locations", "def test(cls, pathHolder, parentCrawler):\n if not super(TurntableCrawler, cls).test(pathHolder, parentCrawler):\n return False\n\n renderType = pathHolder.baseName().split(\".\")[0].split(\"_\")[-1]\n\n return renderType == \"tt\"", "def test(cls, pathHolder, parentCrawler):\n if not super(Jpg, cls).test(pathHolder, parentCrawler):\n return False\n\n return pathHolder.ext() == 'jpg'", "def is_shot(event):\n event_id = event['eventId']\n return event_id == 10", "def test(cls, pathHolder, parentCrawler):\n if not super(Scene, cls).test(pathHolder, parentCrawler):\n return False\n\n return pathHolder.ext() in cls.extensions()", "def is_shot_related_version(self, version):\n return self.get_shot(version) is not None", "def is_shot_valid(self, shot):\n a = self.check_position(shot.opponent)\n b = self.check_shot_direction(shot)\n c = self.check_shot_on_target(shot)\n return a and b and c", "def _is_repeatedshot_type(cls, object_):\n return (type(object_).__name__ in ['RepeatedShot'])", "def check_shot_on_target(self, shot):\n # Defining a few variables to ease the reading\n # Here we define the x and y interval of the goal's segment\n x_min = min(self.s_pos.x, self.e_pos.x)\n x_max = max(self.s_pos.x, self.e_pos.x)\n\n y_min = min(self.s_pos.y, self.e_pos.y)\n y_max = max(self.s_pos.y, self.e_pos.y)\n\n # Shortening variables names\n o_x = shot.opponent.pos.x\n o_y = shot.opponent.pos.y\n\n # If the angle = pi / 2 or - pi / 2, then tan(angle) is undefined\n # In these cases, the shot is vertical, therefore it is valid\n # iff the x coordinate of the opponent is in the goal's x interval\n if abs(shot.angle) == math.pi / 2:\n return self.is_in_interval(x_min, x_max, o_x)\n\n # If the angle = 0, pi or -pi, then tan(angle) is 0 which can lead to \n # undefined intersection points (if the goal is vertical for example)\n # although there is an intersection point\n # \n # In these cases, the shot is horizontal, therefore it is valid\n # iff the y coordinate of the opponent is in the goal's y interval\n if abs(shot.angle) == math.pi or shot.angle == 0:\n return self.is_in_interval(y_min, y_max, o_y)\n\n # Using tan the least amount of time possible, for this is a slow function\n tan_theta = math.tan(shot.angle)\n\n # Define the LE of the shot\n le1 = LinearEquation(tan_theta, o_y - tan_theta * o_x)\n le2 = None\n\n # If the goal is vertical, finding the intersection point\n # is not possible using the normal way\n #\n # That being said, unless the LE of the shot is vertical too (which it \n # isn't as it is checked before hand) there has to be an intersection point\n # This intersection must happen when at the x coodinate of the goal's segment\n # therefore, it is possible to compute the y coordinate of the intersection by\n # computing the application of the shot's LE on this ex coordinate\n #\n # Then, the resulting y is valid iff it is in the goal's segment interval\n if self.e_pos.x - self.s_pos.x == 0:\n y = le1.apply(self.e_pos.x)\n return self.is_in_interval(y_min, y_max, y)\n\n # The normal way of solving the intersection of these two LEs\n else:\n\n # Shortening variables by computing the coefficient of the goal's LE\n ratio = (self.e_pos.y - self.s_pos.y) / (self.e_pos.x - self.s_pos.x)\n\n # If the lines are parallels (have the same coefficient) return False\n if math.tan(shot.angle) == ratio:\n return False\n\n # Defining the goal's LE\n le2 = LinearEquation(ratio, self.e_pos.y - self.e_pos.x * ratio)\n\n # Finding the intersection point of the two LEs\n # If there isn't one, return False (but there should be one\n # given all the asserts we do before hand, this is just for completion sake)\n p_intersect = le1.intersection(le2)\n if p_intersect == None:\n return False\n\n # If the intersection point's abscissa is in the goal's x interval, then it is\n # a valid abstracted shot going \n return self.is_in_interval(x_min, x_max, p_intersect.x)", "def hasPng(self):\n\t\tif self.isPng:\n\t\t\treturn True\n\t\treturn textureFile( self.path.replace( self.extension, '.png' ) ).exists", "def can_grab(self, thing):\n return False", "def can_grab(self, thing):\n return False", "def hasScreenshot(self, timeout=20.0, commandId=None):\n TestAdapterLib.check_timeout(caller=TestAdapterLib.caller(), timeout=timeout)\n \n return self.isActionAccepted(timeout=timeout, commandName=Command.SCREENSHOT, \n commandId=commandId)", "def is_shooting(self):\n if self.gun_interface:\n return self.gun_interface.is_preparing()\n return False", "def _setJob_checkShot(shotPath):\n\tvalid = True\n\n\tjobPath = os.path.split(shotPath)[0]\n\t#jobDataDir = os.path.join(jobPath, os.environ['IC_METADATA'])\n\tshotDataDir = os.path.join(shotPath, os.environ['IC_METADATA'])\n\n\t# if not os.path.isdir(jobDataDir):\n\t# \tvalid = False\n\n\tif not os.path.isdir(shotDataDir):\n\t\tvalid = False\n\n\treturn valid", "def has_screenshots(miscobj):\n\n imagedir = misctools.get_screenshots_dir(miscobj)\n return imagedir", "def _checkPath(self):\r\n if(not self._isStraightLine()):\r\n raise IllegalMoveException(\"Move is not a straight line\")\r\n path = self._getPath()\r\n if(any(cell.isOccupied() for cell in path)):\r\n raise IllegalMoveException(\"There are pawns on the path\")\r\n return True", "def has_guardian(self):\n return self.tiles.count(3) > 0", "def test(cls, pathHolder, parentCrawler):\n if not super(AsciiCrawler, cls).test(pathHolder, parentCrawler):\n return False\n\n return pathHolder.ext() in ['json']", "def check_shot_direction(self, shot):\n return Vector.v_from_a(shot.angle) * self.dir < 0", "def ismount(path):\n return True if not get_instance(path).relpath(path) else False", "def is_visible(self, path):\n return True", "def exists(self):\n\t\tif self.hasUdim:\n\t\t\treturn len( self.udimPaths ) != 0\n\t\treturn super( textureFile, self ).exists", "def check_path_tile(self):\n self.tile = (self.get_nearest_row(), self.get_nearest_col())\n if self.return_path and self.tile == self.return_path[0]:\n del self.return_path[0]\n if not len(self.return_path) > 0:\n return '*' # signal that the path is complete\n return None", "def hasImage(self):\n if self.getImage():\n return True\n return False", "def hasItem(self, path): \n\t\treturn (path in self.items and self.items[path])", "def onGoal(self):\n return self.index == len(self.path)", "def is_path_available(self, y_pos, x_pos):\n if 15 > y_pos >= 0 and 0 <= x_pos < 15:\n return self.map[y_pos][x_pos] in [' ', 'G', 'X', 'W']\n return False", "def requires_safe_render(self) -> bool:\n return True\n # return any(is_reserved(child.name) for child in self.children)", "def canTile(self):\n raise RuntimeError('Not implemented')\n \n return False" ]
[ "0.66429377", "0.6288864", "0.6145428", "0.6053248", "0.6007993", "0.5842656", "0.5803181", "0.5795196", "0.57627773", "0.5747576", "0.56485415", "0.56485415", "0.5615542", "0.55867237", "0.5571235", "0.55255395", "0.55035883", "0.54886705", "0.5453938", "0.545361", "0.5423278", "0.5396576", "0.53901947", "0.5384317", "0.5359062", "0.532667", "0.53093433", "0.53070503", "0.5258432", "0.52555484" ]
0.7443225
0
Find links in jsoncompatible data.
def find_links(obj): if isinstance(obj, dict): for key, value in obj.iteritems(): for url in find_links(value): yield url elif isinstance(obj, list): for item in obj: for url in find_links(item): yield url else: try: if is_link(str(obj)): yield obj except Exception: pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_urls(json_dict):\n url_list = []\n count = 0\n for i in json_dict[\"items\"]:\n if i[\"is_answered\"]:\n url_list.append(i[\"link\"])\n count += 1\n if count == 3 or count == len(i):\n break\n \n for i in url_list:\n wb.open(i)", "def get_urls(self, data):\n data = json.loads(data)\n urls = []\n for article in data['articles']:\n urls.append(article['url'])\n return urls", "def search_link(self):\n return self._json['coredata'].get('link', [])[2].get('@href')", "def links(self):\n links = {}\n data = self.data['links']\n for key in data:\n links[key] = data[key]['url']\n return links", "async def _find_links(self, res: aiohttp.ClientResponse) -> Iterator[str]:\n\n content = await res.text()\n soup = BeautifulSoup(content, 'html.parser')\n links = [self._format(res.url, a) for a in soup.find_all('a')]\n return filter(lambda l: l is not None, links)", "def getDiscussionLinks(self, json_info, tag_filter=[]):\n discussion_links = []\n for t in json_info['document']['data']:\n if(t['type'] == 'discussions'):\n id = (t['id'])\n slug = t['attributes']['slug']\n tags = []\n for tag in t['relationships']['tags']['data']:\n tags.append(int(tag['id']))\n \n if(len(tag_filter) == 0 or len(list(set(tag_filter) & set(tags))) > 0):\n discussion_links.append(\"https://fbtag.net/d/{id}-{slug}\".format(id=id, slug=slug))\n else:\n logging.debug(msg=(tags, 'not in filter ', tag_filter, 'link', id, slug))\n pass\n \n return discussion_links", "def parse_json_export(json_file):\n\n json_file.seek(0)\n links = json.load(json_file)\n json_date = lambda s: datetime.strptime(s, '%Y-%m-%dT%H:%M:%SZ')\n\n for link in links:\n # example line\n # {\"href\":\"http:\\/\\/www.reddit.com\\/r\\/example\",\"description\":\"title here\",\"extended\":\"\",\"meta\":\"18a973f09c9cc0608c116967b64e0419\",\"hash\":\"910293f019c2f4bb1a749fb937ba58e3\",\"time\":\"2014-06-14T15:51:42Z\",\"shared\":\"no\",\"toread\":\"no\",\"tags\":\"reddit android\"}]\n if link:\n # Parse URL\n url = link.get('href') or link.get('url') or link.get('URL')\n if not url:\n raise Exception('JSON must contain URL in each entry [{\"url\": \"http://...\", ...}, ...]')\n\n # Parse the timestamp\n ts_str = str(datetime.now().timestamp())\n if link.get('timestamp'):\n # chrome/ff histories use a very precise timestamp\n ts_str = str(link['timestamp'] / 10000000) \n elif link.get('time'):\n ts_str = str(json_date(link['time'].split(',', 1)[0]).timestamp())\n elif link.get('created_at'):\n ts_str = str(json_date(link['created_at']).timestamp())\n elif link.get('created'):\n ts_str = str(json_date(link['created']).timestamp())\n elif link.get('date'):\n ts_str = str(json_date(link['date']).timestamp())\n elif link.get('bookmarked'):\n ts_str = str(json_date(link['bookmarked']).timestamp())\n elif link.get('saved'):\n ts_str = str(json_date(link['saved']).timestamp())\n \n # Parse the title\n title = None\n if link.get('title'):\n title = link['title'].strip() or None\n elif link.get('description'):\n title = link['description'].replace(' — Readability', '').strip() or None\n elif link.get('name'):\n title = link['name'].strip() or None\n\n yield {\n 'url': url,\n 'timestamp': ts_str,\n 'title': title,\n 'tags': link.get('tags') or '',\n 'sources': [json_file.name],\n }", "def links_json(self, absolutize_url):\n return [\n {\n \"href\": absolutize_url(\"v2/{0}/images/{1}\"\n .format(self.tenant_id, self.image_id)),\n \"rel\": \"self\"\n },\n {\n \"href\": absolutize_url(\"{0}/images/{1}\"\n .format(self.tenant_id, self.image_id)),\n \"rel\": \"bookmark\"\n },\n {\n \"href\": absolutize_url(\"/images/{0}\"\n .format(self.image_id)),\n \"type\": \"application/vnd.openstack.image\",\n \"rel\": \"alternate\"\n }\n ]", "def links_json(self, absolutize_url):\n return [\n {\n \"href\": absolutize_url(\"v2/{0}/images/{1}\"\n .format(self.tenant_id, self.image_id)),\n \"rel\": \"self\"\n },\n {\n \"href\": absolutize_url(\"{0}/images/{1}\"\n .format(self.tenant_id, self.image_id)),\n \"rel\": \"bookmark\"\n },\n {\n \"href\": absolutize_url(\"/images/{0}\"\n .format(self.image_id)),\n \"type\": \"application/vnd.openstack.image\",\n \"rel\": \"alternate\"\n }\n ]", "def readLinkoJson(file):\n with open(file, 'r') as jsonFile:\n preLinko = json.load(jsonFile)\n\n linko = Linkograph([], preLinko[0])\n\n for entry in preLinko[1:]:\n linko.append((set(entry[0]), set(entry[1]), set(entry[2])))\n linko.uuids.append(entry[3])\n\n return linko", "def getAllLinks(jsonData, propDict, refDict, prefix='', context=''):\n linkList = OrderedDict()\n # check keys in propertyDictionary\n # if it is a Nav property, check that it exists\n # if it is not a Nav Collection, add it to list\n # otherwise, add everything IN Nav collection\n # if it is a Complex property, check that it exists\n # if it is, recurse on collection or individual item\n for key in propDict:\n item = getType(key).split(':')[-1]\n if propDict[key]['isNav']:\n insideItem = jsonData.get(item)\n if insideItem is not None:\n cType = propDict[key].get('isCollection') \n autoExpand = propDict[key].get('OData.AutoExpand',None) is not None or\\\n propDict[key].get('OData.AutoExpand'.lower(),None) is not None\n if cType is not None:\n cSchema = refDict.get(getNamespace(cType),(None,None))[1]\n if cSchema is None:\n cSchema = context \n for cnt, listItem in enumerate(insideItem):\n linkList[prefix+str(item)+'.'+getType(propDict[key]['isCollection']) +\n '#' + str(cnt)] = (listItem.get('@odata.id'), autoExpand, cType, cSchema, listItem)\n else:\n cType = propDict[key]['attrs'].get('type')\n cSchema = refDict.get(getNamespace(cType),(None,None))[1]\n if cSchema is None:\n cSchema = context \n linkList[prefix+str(item)+'.'+getType(propDict[key]['attrs']['name'])] = (\\\n insideItem.get('@odata.id'), autoExpand, cType, cSchema, insideItem)\n for key in propDict:\n item = getType(key).split(':')[-1]\n if propDict[key]['realtype'] == 'complex':\n if jsonData.get(item) is not None:\n if propDict[key].get('isCollection') is not None:\n for listItem in jsonData[item]:\n linkList.update(getAllLinks(\n listItem, propDict[key]['typeprops'], refDict, prefix+item+'.', context))\n else:\n linkList.update(getAllLinks(\n jsonData[item], propDict[key]['typeprops'], refDict, prefix+item+'.', context))\n rsvLogger.debug(str(linkList))\n return linkList", "def get_all_links(self):\n links_url = \"{}/links\".format(self._project_url)\n print(links_url)\n response = requests.get(links_url).json()\n return json.dumps(response, indent=4, sort_keys=True)", "def parse_link(self,data,api):\n return REACT_API_DOCS_URL + data.FILE.split('/')[1] + api.find('a',attrs = {'class': 'hash-link'}).attrs['href']", "def iter_links(self):", "def get_links(self):\n return self.__data['links']", "def extract_links(data):\n soup = BeautifulSoup(data)\n for link in soup.findAll(\"a\"):\n for pair in link.attrs:\n if pair[0] == u'href':\n yield pair[1]", "def _parse_links(self, item, start, links_list):\n result_list = []\n target_str_1 = start.strftime(\"%m-%d-%Y\").replace(\" 0\", \" \")\n target_str_2 = start.strftime(\"%m-%d-%y\").replace(\" 0\", \" \")\n for item in links_list:\n if item[\"date\"] in target_str_1 or item[\"date\"] in target_str_2:\n new_dict = {}\n new_dict[\"href\"] = item[\"href\"]\n new_dict[\"title\"] = item[\"title\"]\n result_list.append(new_dict)\n return result_list", "def test_get_variant_links(variant_obj):\n # GIVEN a variant object without links\n assert \"thousandg_link\" not in variant_obj\n # WHEN fetching the variant links\n links = get_variant_links(variant_obj)\n # THEN check that links are returned\n assert \"thousandg_link\" in links", "def testDereferenceLinks(self):\n ddict = {\"ext_group\": {\"dataset\": 10}}\n dictdump.dicttonx(ddict, self.h5_ext_fname)\n ddict = {\"links\": {\"group\": {\"dataset\": 10, \">relative_softlink\": \"dataset\"},\n \">relative_softlink\": \"group/dataset\",\n \">absolute_softlink\": \"/links/group/dataset\",\n \">external_link\": \"nx_ext.h5::/ext_group/dataset\"}}\n dictdump.dicttonx(ddict, self.h5_fname)\n\n ddict = dictdump.h5todict(self.h5_fname, dereference_links=True)\n self.assertTrue(ddict[\"links\"][\"absolute_softlink\"], 10)\n self.assertTrue(ddict[\"links\"][\"relative_softlink\"], 10)\n self.assertTrue(ddict[\"links\"][\"external_link\"], 10)\n self.assertTrue(ddict[\"links\"][\"group\"][\"relative_softlink\"], 10)", "def _recursive_gh_get(href, items, password=None):\n response = GitHub._request('GET', href, token=password)\n response.raise_for_status()\n items.extend(response.json())\n if \"link\" not in response.headers:\n return\n # links = link_header.parse(response.headers[\"link\"])\n # rels = {link.rel: link.href for link in links.links}\n # if \"next\" in rels:\n # ghRelease._recursive_gh_get(rels[\"next\"], items)", "def get_json_urls(self):\n gi = GetImageURLs(self.json_url)\n self.urls = gi.get_image_url()\n\n # Turn it into a Python set\n self.urls_from_json = Set(self.urls)", "def _parse_links(self, item):\n regex = compile(r\"<a\\s+(?:[^>]*?\\s+)?href=([\\\"\\'])(.*?)\\1.*\\>(.*)<\\/a>\")\n links = [\n {\"href\": href, \"title\": title}\n for (_, href, title) in findall(regex, item[\"Event\"][\"Description\"])\n ]\n for link in links:\n if link[\"href\"][0] == \"/\":\n link[\"href\"] = \"https://www.pghschools.org\" + link[\"href\"]\n return links", "def get_image_links(data):\n painting_links = []\n\n print(data)\n\n for painting in data:\n painting_links.append(painting['image'])\n\n return painting_links", "def fetch_url_information(status_code):\n links = []\n result = {}\n obj = LinksInformation.objects.filter(status=status_code)\n for i in obj:\n links.append(i.link)\n result[\"result\"] = links\n json.dump(result, open(\"airlines/links.json\", \"w\"), indent=4)\n return result", "def links(self):\n\t\treturn self.list_of_links", "def result_urls(self, job_id: str, show_progress: bool = False) -> List:\n data = self.result_json(job_id, show_progress)\n urls = [x['href'] for x in data.get('links', []) if x['rel'] == 'data']\n return urls", "def get_data_link(self, ctx, params):\n # ctx is the context object\n # return variables are: link\n #BEGIN get_data_link\n id_ = _get_id_from_object(params, 'linkid', required=True)\n _check_admin(\n self._user_lookup, ctx[_CTX_TOKEN], _AdminPermission.READ,\n # pretty annoying to test ctx.log_info is working, do it manually\n 'get_data_link', ctx.log_info)\n dl = self._samples.get_data_link_admin(id_)\n link = _links_to_dicts([dl])[0]\n #END get_data_link\n\n # At some point might do deeper type checking...\n if not isinstance(link, dict):\n raise ValueError('Method get_data_link return value ' +\n 'link is not type dict as required.')\n # return the results\n return [link]", "def fetchJson(url):", "def extract_from_json_ld(self, data, url):\n\n scripts = data.xpath(\"//script[@type='application/ld+json']\")\n records = [ ]\n\n for scr in scripts:\n\n try:\n data = json.loads(scr.text)\n except:\n continue\n\n if not isinstance(data, dict):\n continue\n\n record = dict([ (k, v) for k, v in data.items() if k in self.store_fields ])\n if \"recipeIngredient\" not in record and \"ingredients\" in data:\n record[\"recipeIngredient\"] = data[\"ingredients\"]\n\n record[\"url\"] = url\n record[\"collect_time\"] = datetime.utcnow()\n\n if self.validate(record):\n records.append(record)\n\n return records", "def get_data_links_from_data(self, ctx, params):\n # ctx is the context object\n # return variables are: results\n #BEGIN get_data_links_from_data\n upa = _get_upa_from_object(params)\n dt = _get_datetime_from_epochmillseconds_in_object(params, 'effective_time')\n admin = _check_admin(\n self._user_lookup, ctx[_CTX_TOKEN], _AdminPermission.READ,\n # pretty annoying to test ctx.log_info is working, do it manually\n 'get_data_links_from_data', ctx.log_info, skip_check=not params.get('as_admin'))\n links, ts = self._samples.get_links_from_data(\n _UserID(ctx[_CTX_USER]), upa, dt, as_admin=admin)\n results = {'links': _links_to_dicts(links),\n 'effective_time': _datetime_to_epochmilliseconds(ts)\n }\n #END get_data_links_from_data\n\n # At some point might do deeper type checking...\n if not isinstance(results, dict):\n raise ValueError('Method get_data_links_from_data return value ' +\n 'results is not type dict as required.')\n # return the results\n return [results]" ]
[ "0.64437044", "0.6328608", "0.6288369", "0.62582314", "0.5989727", "0.5896046", "0.58893055", "0.58317786", "0.58317786", "0.5802547", "0.5788355", "0.57476854", "0.5663333", "0.5635389", "0.5627574", "0.5625349", "0.56044173", "0.5590741", "0.55799675", "0.5561591", "0.5554465", "0.5546976", "0.55460906", "0.5545992", "0.5532719", "0.55235773", "0.55177546", "0.551553", "0.54738", "0.54729325" ]
0.66384387
0
Load the correct backend driver for data persistent.
def _load_driver(backend, **kargs): bk_module = importlib.import_module('backend', __package__) driver_cls = getattr(bk_module, str.capitalize(backend) + 'Backend') return driver_cls(**kargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_backend(cls) -> IBackend:\n cls.Lock.acquire()\n try:\n return cls._load_backend()\n finally:\n cls.Lock.release()", "def set_backend(self, backend):\n if backend not in AVAILABLE_BACKENDS:\n raise StorageError(f'Unrecognized backend {backend}; use one of {AVAILABLE_BACKENDS}')\n if backend == 'tinydb':\n LOGGER.debug(\"Using TinyDB database as requested for %s\", self.name)\n self._backend = DB_TINYDB\n elif backend == 'sqlite':\n LOGGER.debug(\"Using SQLite database as requested for %s\", self.name)\n self._backend = DB_SQLITE\n elif backend == 'auto':\n if self._sqlite_storage.database_exists():\n LOGGER.debug(\"Using SQLite database in AUTO mode because one already exists for %s\", self.name)\n self._backend = DB_SQLITE\n else:\n LOGGER.debug(\"Using TinyDB (default) in AUTO because no database already exists for %s\", self.name)\n self._backend = DB_TINYDB", "def _load_driver_module(self):\n driver = get_dbapi_module(self.driver_module)\n exceptions.register(driver.DatabaseError)\n return driver", "def get_storage_backend(self):\n return self.client.info()['Driver']", "def _switch_backend(self, model_db):\n if model_db['backend_name'] != self.backend_name:\n backend = switch_backend(model_db['backend_name'])\n self.backend_name = backend.__name__\n self.backend_version = None\n if self.backend_name == 'keras':\n from ..backend import keras_backend\n self.backend = keras_backend\n elif self.backend_name == 'sklearn':\n from ..backend import sklearn_backend\n self.backend = sklearn_backend\n if hasattr(backend, '__version__'):\n check = self.backend_version != backend.__version__\n self.backend_version = backend.__version__\n if check and self.verbose > 0: # pragma: no cover\n sys.stderr.write('Warning: the backend versions'\n 'do not match.\\n') # pragma: no cover", "def load_backend(self):\r\n if self.current_presentation():\r\n presentation = self.current_presentation()\r\n\r\n # If current presentation is no existant (empty talk database)\r\n # use a default recording name.\r\n else:\r\n presentation = Presentation(title=unicode(\"default\"))\r\n\r\n initialized, self.recently_recorded_video = self.controller.load_backend(presentation)\r\n if initialized:\r\n return True\r\n else:\r\n return False # Error something failed while loading the backend\r", "def load_backend(backend: str | Type[Backend]) -> Type[Backend]:\n if isinstance(backend, type) and issubclass(backend, Backend):\n return backend\n elif isinstance(backend, str):\n try:\n backend = BUILTIN_BACKENDS[backend]\n except KeyError:\n raise ValueError(f'No such backend \"{backend}\"')\n p, m = backend.rsplit('.', 1)\n mod = importlib.import_module(p)\n attr = getattr(mod, m)\n if isinstance(attr, type) and issubclass(attr, Backend):\n return attr\n else:\n raise TypeError('Backend must be subclass of Backend class.')\n else:\n raise ValueError('Expecting string or Backend subclass.')", "def get_backend(self):\n return self.analyze_db_task(constants.TRAIN_DB).backend", "def driver_load(self, name):\r\n return AbstractServiceManager.service_load(self, name)", "def _getDriver(self):\n if not hasattr(self, '_driver'):\n with self._getDatasetLock:\n if not self.dataset or not self.dataset.GetDriver():\n self._driver = None\n else:\n self._driver = self.dataset.GetDriver().ShortName\n return self._driver", "def load_backend(name, options=None):\n if name is None:\n assert options is None\n return get_default()\n if options is None:\n options = {}\n if name not in _backends:\n raise UnknownBackend(name)\n try:\n res = _backends[name]()(**options)\n except Exception as e:\n raise LoadingError(name) from e\n return res", "def get_backend(self, name):\n if name == DATABASE_TYPE_MYSQL:\n ret = 2\n elif name == DATABASE_TYPE_POSTGRESQL:\n ret = 3\n elif name == DATABASE_TYPE_SQLITE:\n ret = 4\n # sqlcoder: this assignment fixes unicode problems for me with sqlite (windows, cp1252)\n # feel free to remove or improve this if you understand the problems\n # better than me (not hard!)\n Charset.not_needed1, Charset.not_needed2, Charset.not_needed3 = True, True, True\n else:\n raise ValueError('Unsupported database backend: %s' % self.supported_databases[name].db_server)\n\n return ret", "def get_backend(name):\n return _DEFAULT_PROVIDER.get_backend(name)", "def driver(self):\n return self.rpc.call(MsfRpcMethod.DbDriver, [{}])['driver']", "def _get_backend(args):\n if args.backend == 'gatttool':\n backend = GatttoolBackend\n elif args.backend == 'bluepy':\n backend = BluepyBackend\n elif args.backend == 'pygatt':\n backend = PygattBackend\n else:\n raise Exception('unknown backend: {}'.format(args.backend))\n return backend", "def load(self):\n with self.__lock:\n self._d.update(self.backend.load())\n log.debug(\"load: {}\".format(self.backend.filename))", "def driver(self) -> GraphDatabase.driver:\n raise NotImplementedError\n # if not self._driver:\n # self._driver = GraphDatabase.driver(\n # self.url,\n # auth=(self.username, self.password),\n # )\n #\n # return self._driver", "def get_backend():\n return Connection()", "def _load_backend(backend: str) -> types.ModuleType:\n from importlib.metadata import entry_points\n\n if backend == \"matplotlib\":\n # Because matplotlib is an optional dependency and first-party backend,\n # we need to attempt an import here to raise an ImportError if needed.\n try:\n module = importlib.import_module(\"pandas.plotting._matplotlib\")\n except ImportError:\n raise ImportError(\n \"matplotlib is required for plotting when the \"\n 'default backend \"matplotlib\" is selected.'\n ) from None\n return module\n\n found_backend = False\n\n eps = entry_points()\n key = \"pandas_plotting_backends\"\n # entry_points lost dict API ~ PY 3.10\n # https://github.com/python/importlib_metadata/issues/298\n if hasattr(eps, \"select\"):\n entry = eps.select(group=key) # pyright: ignore[reportGeneralTypeIssues]\n else:\n # Argument 2 to \"get\" of \"dict\" has incompatible type \"Tuple[]\";\n # expected \"EntryPoints\" [arg-type]\n entry = eps.get(key, ()) # type: ignore[arg-type]\n for entry_point in entry:\n found_backend = entry_point.name == backend\n if found_backend:\n module = entry_point.load()\n break\n\n if not found_backend:\n # Fall back to unregistered, module name approach.\n try:\n module = importlib.import_module(backend)\n found_backend = True\n except ImportError:\n # We re-raise later on.\n pass\n\n if found_backend:\n if hasattr(module, \"plot\"):\n # Validate that the interface is implemented when the option is set,\n # rather than at plot time.\n return module\n\n raise ValueError(\n f\"Could not find plotting backend '{backend}'. Ensure that you've \"\n f\"installed the package providing the '{backend}' entrypoint, or that \"\n \"the package has a top-level `.plot` method.\"\n )", "def _init_driver(self, shard_id):\n shard = self._shards_ctrl.get(shard_id, detailed=True)\n conf = utils.dynamic_conf(shard['uri'], shard['options'])\n return utils.load_storage_driver(conf, self._cache)", "def load_backend(backend_name):\n try:\n module_bits = backend_name.split(\".\")\n klass = module_bits.pop()\n return getattr(import_module(\".\".join(module_bits)), klass)\n except ImportError as e_user:\n # The nlp backend wasn't found. Display a helpful error message\n # listing all built-in nlp backends.\n backend_dir = str(Path(__file__).parent / 'backends')\n available_backends = [\n name for _, name, ispkg in pkgutil.iter_modules([backend_dir])\n if ispkg and name not in {'base'}\n ]\n if backend_name not in [\n 'poetaster.nlp.backends.%s' % b for b in available_backends\n ]:\n backend_reprs = map(repr, sorted(available_backends))\n raise ImproperlyConfigured(\n \"%r isn't an available nlp backend.\\n\"\n \"Try using 'poetaster.nlp.backends.X', where X is one of:\\n\"\n \" %s\" % (backend_name, \", \".join(backend_reprs))\n ) from e_user\n else:\n # If there's some other error, this must be an error in Django\n raise", "def get_backend_from_coredata(builddir: Path) -> str:\n return coredata.load(str(builddir)).get_builtin_option('backend')", "def get_backend(\n self,\n backend_id: str,\n ) -> Optional[Type[BaseCertificateStorageBackend]]:\n return self.get('backend_id', backend_id)", "def backends():\n return list(loader.backend_dict.keys())\n # return loader._preference", "def get_driver(drv):\n return GenericDriver.get_driver(drv)", "def load_backend(self, presentation=None):\r\n initialized, filename_for_frontend = self.media.load_backend(presentation)\r\n if initialized:\r\n return True, filename_for_frontend\r\n else:\r\n return False # Error something failed while loading the backend\r", "def get_backend():\n return _BACKEND", "def _get_storage_backend(fq_classname):\n LOG.debug('Running _get_storage_backend with fq_classname [%s]'\n % fq_classname)\n\n if not fq_classname:\n return None\n\n (modname, clname) = fq_classname.rsplit('.', 1)\n # A test import of the backend storage class should have been undertaken\n # at app startup in django_drf_filepond.apps.ready so any failure\n # importing the backend should have been picked up then.\n mod = importlib.import_module(modname)\n storage_backend = getattr(mod, clname)()\n LOG.info('Storage backend instance [%s] created...' % fq_classname)\n\n return storage_backend", "def backend_name(self) -> str:\n return self._db_data.backend", "def which_backend(self, backend_name, type_name, conf):\n print(\"backend_name is : <{}>\".format(backend_name))\n if backend_name not in self.records.keys():\n print(\"first get object\")\n self.port_obj = PortFactory.backends.get(backend_name)(type_name, conf)\n print(\"get object from factory : {}\".format(self.port_obj))\n self.records[backend_name] = [type_name]\n else:\n print(\"re-init get object\")\n self.port_obj.reinit(type_name,conf)\n self.records[backend_name].append(type_name)\n print(\"factory records: {}\".format(self.records))\n return self.port_obj" ]
[ "0.64767873", "0.6454533", "0.6352563", "0.6303405", "0.6164173", "0.59922636", "0.5975386", "0.58862835", "0.58696896", "0.5819013", "0.58041245", "0.5792222", "0.5769137", "0.575706", "0.5755538", "0.5722598", "0.5693296", "0.5691711", "0.5683557", "0.5676043", "0.5659401", "0.56327343", "0.5607392", "0.5552141", "0.5546877", "0.554518", "0.5541243", "0.55331874", "0.55314845", "0.5504782" ]
0.7439424
0
Get the table name to save data from the url.
def _get_table_name(url): try: return urlparse(url).path.strip('/').split('/')[1] except IndexError: return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tablename(self):\n _, tail = os.path.split(self.url)\n return tail[:-4]", "def table_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"table_name\")", "def table_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"table_name\")", "def table_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"table_name\")", "def table_name() -> str:\n pass", "def table_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"table_name\")", "def get_table_name(self):\n return self._table", "def get_tablename(self):\n return self.ds_table", "def getTableByName(self, tablename):\n pass", "def table(self):\n return self._table_name", "def table_name(self):\n return self._new_table.name", "def table_name(self) -> str:\n return self.model._meta.db_table", "def create_table_url(self, table_id):\n return self.base_url + \"/table?table=\" + str(table_id)", "def tablename(entity) -> str:\n return entity.__tablename__", "def getTableDefForTable(self, tableName):\n\t\tif not \".\" in tableName:\n\t\t\ttableName = \"public.\"+tableName\n\t\t\n\t\tfor row in self.readerConnection.queryToDicts(\n\t\t\t\t\"select sourcerd, tablename from dc.tablemeta where\"\n\t\t\t\t\" lower(tableName)=%(tableName)s\",\n\t\t\t\t{\"tableName\": tableName.lower()}):\n\t\t\tbreak\n\t\telse:\n\t\t\traise base.ui.logOldExc(\n\t\t\t\tbase.NotFoundError(tableName, \"table\", \"dc_tables\"))\n\n\t\treturn base.caches.getRD(row[\"sourcerd\"]\n\t\t\t).getById(row[\"tablename\"].split(\".\")[-1])", "def construct_bq_table_path(table_name: str) -> str:\n if not re.match(r'^\\w+$', table_name):\n raise ValueError(\n f'{table_name} should contain only letters, numbers and underscore.')\n\n return '{}.{}.{}'.format(\n get_airflow_variable('dest_project'),\n get_airflow_variable('dest_dataset'), table_name)", "def get_tablepath(self, groupname, tablename):\n return '/' + groupname + '/' + tablename", "def table_id(self) -> str:\n return pulumi.get(self, \"table_id\")", "def tableName():\n return \"people\"", "def save(self, response):\n url = response.url\n if self.item_url(url):\n table_name = self._get_table_name(url)\n if table_name:\n data = response.json()\n self.backend.save(table_name, data)", "def destination_table(self) -> str:\n return pulumi.get(self, \"destination_table\")", "def get_table_name(model_id: Text) -> Text:\n return model_id if not cfg.app.db.schema else cfg.app.db.schema + \".\" + model_id", "def get_table_from_dataset_path(ds_path: str):\n return ds_path.split(\".\")[0].split(\"/\")[-1]", "def table_name(class_):\n try:\n return class_.__tablename__\n except AttributeError:\n return class_.__table__.name", "def split_table_name(table):\n\n if 'exch_' not in table:\n return None, None\n\n # table1: exch_bitstamp_btcusd_snapshot_20170908\n # table2: exch_btcc_spot_btccny_snapshot_20170908\n table = table.split('_', 1)[1]\n table = table.rsplit('_', 2)[0]\n tick = table.rsplit('_', 1)[1]\n exchange_name = table.rsplit('_', 1)[0]\n\n return exchange_name, tick", "def get_table_name_from_model(model):\n return \"{0};{1}\".format(model._meta.app_label, model._meta.model_name)", "def __tablename__(cls):\n return get_table_name(cls.__name__)", "def test_table_name(self):\n obs = SampleTemplate._table_name(self.test_study.id)\n self.assertEqual(obs, \"sample_1\")", "def get_table(tname, request):\n pyramid_sacrud_models = get_models_from_settings(request)\n try:\n models = dict(pyramid_sacrud_models)\n except ValueError:\n models = dict((pyramid_sacrud_models, ))\n finally:\n models = models.values()\n\n tables = itertools.chain(*[model for model in models if model])\n tables = [\n table for table in tables\n if (table.__tablename__).lower() == tname.lower()\n and table\n ]\n if not tables:\n return None\n return tables[0]", "def name(self) -> str:\n return f\"lookup_table_{self.table_number}\"" ]
[ "0.72730315", "0.6656085", "0.6656085", "0.6656085", "0.65448517", "0.64656055", "0.6451091", "0.6374937", "0.6281148", "0.6230515", "0.61168766", "0.60657483", "0.6009189", "0.5981553", "0.5945678", "0.5933731", "0.5901814", "0.5891688", "0.5882904", "0.58783686", "0.58702534", "0.58645785", "0.5856004", "0.58465433", "0.58392376", "0.58331996", "0.5789683", "0.5780634", "0.57715046", "0.57299197" ]
0.7577692
0
Save data from response to backend persistent driver. Only save the detail item from a url, filter out the overall items like
def save(self, response): url = response.url if self.item_url(url): table_name = self._get_table_name(url) if table_name: data = response.json() self.backend.save(table_name, data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_detail(self, response):\n\n self.logger.log(self.log_lvl, 'scraping data @ {}'.format(response.url))\n\n item_list = list()\n image_urls = list()\n # extract image\n try:\n pattern = re.compile(r\"(.*imagearray:)(.*)(,.*displaymode.*)\", re.MULTILINE | re.DOTALL)\n javascript_containing_images = response.xpath('//script[contains(., \"var mygallery=\")]/text()').extract()[0]\n images = re.match(pattern, javascript_containing_images).group(2)\n image_array = json.loads(images)\n image_urls = [urlparse.urljoin(response.url, itm[1]) for itm in image_array]\n except Exception as e:\n print(\"{} - {}\".format(type(e), str(e)))\n\n tipe_mobil = response.css('#content font.vehicleinfo ~ font.warning::text').extract_first()\n model_mobil = response.css('#content font.vehicleinfo::text').extract_first()\n if tipe_mobil.lower() == model_mobil.lower():\n tipe_mobil = response.meta.get('type', None)\n main_group = response.meta.get('main_group', None)\n assembly_set = response.css('#content font.title b::text').extract_first()\n\n # sparepart items\n for row in response.css('div#content div.content table tr'):\n item = IsuzuSparepartItem()\n\n # source_url\n item['source_url'] = response.url\n\n # car model\n item['merk'] = self.name\n item['tipe_mobil'] = tipe_mobil\n item['model_mobil'] = model_mobil\n\n # images\n item['image_urls'] = image_urls\n\n # grouping/assembly\n item['main_group'] = main_group\n item['assembly_set'] = assembly_set\n\n item['key'] = row.css('td.intable:nth-child(1) .detailcontent::text').extract_first()\n item['part_number'] = row.css('td.intable:nth-child(2) .detailcontent::text').extract_first()\n item['itc'] = row.css('td.intable:nth-child(3) .detailcontent::text').extract_first()\n item['description'] = row.css('td.intable:nth-child(4) .detailcontent::text').extract_first()\n item['qty'] = row.css('td.intable:nth-child(5) .detailcontent::text').extract_first()\n item['app_date'] = row.css('td.intable:nth-child(6) .detailcontent::text').extract_first()\n item['lr'] = row.css('td.intable:nth-child(7) .detailcontent::text').extract_first()\n item['model'] = row.css('td.intable:nth-child(8) .detailcontent::text').extract_first()\n item['remarks'] = row.css('td.intable:nth-child(9) .detailcontent::text').extract_first()\n\n item_list.append(item)\n\n return item_list", "def _crawler_result(item, response, spider):\n output_data.clear()\n output_data.append(dict(item))", "def save_data(self, soup, url):\n # get the web page title\n title = soup.find('title').string\n # get the h1 tag of the page\n h1 = soup.find('h1')\n # checks if there is a h1 tag in the page\n # because is possible that a product url redirects to\n # another page.\n # In this way, only a valid product will be save.\n if h1:\n product_name = h1.contents[0].string\n page_values = PageValues(product_name, title, url, self.__csv_file_name)\n page_values.save_csv()\n else:\n # Shows the web page that have some problem.\n print('It was not possible to open {}'.format(url))", "def parse_detail_page(self, response):\n self.logger.info('Parse Detail Page function called on %s', response.url)\n item = response.meta.get('item', {})\n item['url'] = response.url\n item['title'] = response.css(TITLE_SELECTOR).extract_first(\"\").strip()\n item['price'] = self.get_price(response)\n return item", "def process_item(self, item, spider):\n tmp_dict = {}\n tmp_dict['comments'] = item['comments']\n tmp_dict['referenceName'] = item['referenceName']\n tmp_dict['referenceTime'] = item['referenceTime']\n tmp_dict['productColor'] = item['productColor']\n tmp_dict['productSize'] = item['productSize']\n self.savefile.write(u\"{0}\\n\".format(json.dumps(tmp_dict)))\n #raise DropItem()", "def fetch_save(url):\n\n name = url.split(\"/\")[-1]\n response = requests.get(url, stream=True)\n if response.status_code == 200:\n with open(f\"{DATA_PATH}/{name}\", \"wb\") as f:\n f.write(response.raw.read())\n else:\n logging.info(f\"Failed {url} download\")", "def fetch_and_save(cls, url, path):\n content = cls.fetch_with_retry(url)\n if not content:\n return False\n # print(\"Saving {}\".format(os.path.basename(path)))\n with open(path, \"wb\") as file:\n file.write(content)\n return content", "def save(self, url):\n self.database.insert({\n 'url': url,\n 'last_crawled': None,\n 'valid': True,\n 'sub_urls': [],\n })", "def save_data(db, dict_key, url, data_to_store):\n if dict_key not in db:\n db[dict_key] = []\n data = db[dict_key]\n data.append({\n 'url': url,\n 'data': data_to_store,\n })\n db[dict_key] = data", "def process_item_data(self, db, ref, response):\n raise Exception(\"To be implemented\")", "def store_item(self, item_in_json):\n item = item_in_json.copy()\n if pecan.request.headers.get('X-Public-Key'):\n if 'metadata' not in item:\n item['metadata'] = {}\n item['metadata']['public_key'] = \\\n pecan.request.headers.get('X-Public-Key')\n test_id = db.store_results(item)\n LOG.debug(item)\n return {'test_id': test_id,\n 'url': CONF.api.test_results_url % test_id}", "def process_item(self, item, spider):\n item['url'] = spider.config['site_domain'] + item[\"url\"]\n item[\"rating\"] = extract_rating(item[\"rating\"])\n item['price'] = get_price(item['price_integer'], item[\"price_decimal\"])\n item['no_discount_price'] = get_price(item['no_discount_price_integer'], item[\"no_discount_price_decimal\"])\n item[\"brand\"] = get_brand(item[\"brand\"])\n item[\"number_of_ratings\"] = get_number_of_ratings(item[\"number_of_ratings\"])\n del item['price_integer']\n del item['price_decimal']\n del item['no_discount_price_integer']\n del item[\"no_discount_price_decimal\"]\n return item", "def parse_item(self, response):\n item = IphoneSpiderItem()\n\n item['sku'] = response.meta.get('sku')\n item['price'] = response.meta.get('price')\n item['name'] = response.meta.get('name')\n item['seller'] = response.meta.get('seller')\n #pass the data from parse to parse_item\n\n url = response.url\n model = response.xpath('//*[@id=\"crumb-wrap\"]/div/div[1]/div[9]/text()').extract_first()\n color = response.xpath('//div[@data-type=\"颜色\"]/div[@class=\"dd\"]/div[contains(@class, \"selected\")]/a/i/text()').extract_first()\n memory = response.xpath('//div[@data-type=\"版本\"]/div[@class=\"dd\"]/div[contains(@class, \"selected\")]/a/text()').extract_first()\n memory2 = response.xpath('//div[@data-type=\"内存\"]/div[@class=\"dd\"]/div[contains(@class, \"selected\")]/a/text()').extract_first()\n #memory data can be stored in 版本 or 内存\n\n if memory2:\n memory = memory2.strip()\n elif memory:\n memory = memory.strip()\n\n item['model'] = model\n item['color'] = color\n item['memory'] = memory\n item['url'] = url\n\n return item", "def save(self, scraper):\n entry = HistoryEntry(scraper.url, scraper.response)\n self.load_history_entries(entry)", "def _save_SERP(\n self, response: Union[SplashJsonResponse, ScrapyHttpResponse, ScrapyTextResponse]\n ) -> None:\n\n scraped_page = ScrapedPage(\n timestamp=self.timestamp,\n source=self.source,\n merchant=self.merchant,\n country=self.country,\n url=response.url,\n html=response.body.decode(\"utf-8\"),\n page_type=PageType.SERP.value,\n category=response.meta.get(\"category\"),\n gender=response.meta.get(\"gender\"),\n consumer_lifestage=response.meta.get(\"consumer_lifestage\"),\n meta_information=response.meta.get(\"meta_data\"),\n )\n\n self.message_queue.add_scraping(table_name=self.table_name, scraped_page=scraped_page)", "def process_item(self, item, spider):\n if item is None:\n raise DropItem(\"Something went wrong in parsing data...\")\n try:\n self.curr.execute(\n SqlStatements.insert_new_real_estate(),\n (\n item['listing_type'],\n item['property_type'], \n item['price'], \n item['location_city'], \n item['location_city_district'], \n item['area_property'],\n item['area_land'],\n item['construction_type'],\n item['num_floors_building'],\n item['apartment_floor'],\n item['registered'],\n item['heating_type'],\n item['num_rooms'],\n item['num_bathrooms'],\n item['source']\n )\n )\n self.conn.commit()\n except Exception as e:\n print(e)\n self.conn.rollback()\n return item\n self._log_progress()\n return item", "def process_item(self, item, spider):\n\n url = item['url']\n iso_code = item['iso_code']\n result = self.item_data_store.get_item(url, iso_code)\n\n if result.data is not None:\n raise DropItem(\n f'Resource already indexed for language {iso_code}: {url}')\n\n create_result = self.item_data_store.create_item(item)\n\n if create_result.has_error():\n self.logger.error('\\n'.join(create_result.messages))\n\n return item", "def put_response(self, item):\n self.export.put_response(item)", "def store_item(self, item_in_json): # pragma: no cover\n raise NotImplementedError", "def save_item(self):\r\n raise NotImplementedError(\"Function not implemented, please implement in sub class\")", "def process_item(self, item, spider):\n session = self.Session()\n article = Article()\n restaurant = Restaurant()\n\n # populate article\n article.url = item['article_url']\n article.title = item['article_title']\n article.datetime = item['article_datetime']\n \n # populate restaurant\n restaurant.name = item['restaurant_name']\n restaurant.slug = item['restaurant_slug']\n restaurant.address = item['restaurant_address']\n restaurant.googlemaps_url = item['restaurant_googlemaps']\n restaurant.googlemaps_id = parse_googlemaps_id(restaurant.googlemaps_url)\n restaurant.lat = parse_lat(restaurant.googlemaps_url)\n restaurant.lng = parse_lng(restaurant.googlemaps_url)\n\n # determine if new article\n exist_article = session.query(Article).filter_by(url = article.url).first()\n if exist_article: \n article = exist_article\n\n # determine if new restaurant\n exist_restaurant = session.query(Restaurant).filter_by(slug = restaurant.slug).first()\n if exist_restaurant: \n restaurant = exist_restaurant\n if article not in restaurant.articles: \n restaurant.articles.append(article)\n else:\n # geocode for lat lng if necessary\n if restaurant.googlemaps_id: \n restaurant.lat, restaurant.lng, restaurant.address = convert_id(restaurant.googlemaps_id)\n # add article to restaurant.articles\n restaurant.articles.append(article)\n\n try:\n session.add(restaurant)\n session.commit()\n\n except:\n session.rollback()\n raise\n\n finally:\n session.close()\n\n return item", "def save_data(url, file):\n with open(file, 'w') as f:\n json.dump(get_json_data(url), f)", "def download(self, item, save_dir='./'):\r\n try:\r\n os.makedirs(save_dir)\r\n except OSError as e:\r\n if e.errno == errno.EEXIST and os.path.isdir(save_dir):\r\n # another thread beat us to creating this dir\r\n pass\r\n else:\r\n # target dir exists as a file, or a different error\r\n raise\r\n\r\n item['url'] = item[item['type'] + 's']['standard_resolution']['url'].split('?')[0]\r\n # remove dimensions to get largest image\r\n item['url'] = re.sub(r'/s\\d{3,}x\\d{3,}/', '/', item['url']) \r\n\r\n base_name = item['url'].split('/')[-1]\r\n file_path = os.path.join(save_dir, base_name)\r\n\r\n if not os.path.isfile(file_path):\r\n\r\n with open(file_path, 'wb') as file:\r\n try:\r\n bytes = requests.get(item['url']).content\r\n except requests.exceptions.ConnectionError:\r\n\t\t\t\t\tsleep(5)\r\n\t\t\t\t\tbytes = requests.get(item['url']).content\r\n\t\t\t\t\t\r\n file.write(bytes)\r\n\r\n file_time = int(item['created_time'])\r\n os.utime(file_path, (file_time, file_time))", "def parse_product(self, resp):\n loader = ItemLoader(item=EstateProperty(), response=resp)\n loader.add_value(\"url\", resp.request.url)\n\n # for the standard fields, extraction is straight forward\n for field, xpath in list(self.standard_fields.items()):\n loader.add_xpath(field, xpath)\n\n # exclude items where price is blank\n # may correspond to rentals\n price = resp.xpath(self.standard_fields['price']).extract_first()\n if price is None or price.strip()==\"\":\n # mark the item as dirty\n # to avoid sending it\n loader.add_value('is_dirty', True)\n\n # some items' titles are stored in a legacy path\n title = resp.xpath(self.standard_fields['title']).extract_first()\n if title is None or title.strip()==\"\":\n # try another way\n title = resp.xpath(self.special_fields['title_legacy']).extract_first()\n if title is None or title.strip()==\"\":\n # mark it dirty\n loader.add_value('is_dirty', True)\n else:\n loader.add_value('title', title)\n\n # sku is preprended by dirty text\n sku_dirty = resp.xpath(self.special_fields['sku']).extract_first()\n try:\n m = re.search(r'\\s{0,}\\S{3}\\s{1,}(?P<ref>.+)\\s{0,}', sku_dirty)\n loader.add_value('sku', m.group('ref'))\n except Exception as e:\n self.logger.error(e)\n loader.add_value('is_dirty', True)\n\n area_dirty = resp.xpath(self.special_fields['area']).extract_first()\n try:\n m = re.search(r'(?P<area>\\d+)\\sm.+', area_dirty)\n float_area = float(m.group('area'))\n loader.add_value('area', float_area)\n except Exception as e:\n self.logger.error(e)\n # parsing error on area is not a cause of dirty item\n\n yield loader.load_item()", "def process_item(self, item, spider):\n session = self.Session()\n real = Reals(**item)\n\n try:\n session.add(real)\n session.commit()\n except:\n session.rollback()\n raise\n finally:\n session.close()\n return item", "def extract_data():\n books = WebScraper().get_top_100_data()\n time.sleep(2)\n BookDetailsWebScrapper().save_book_details(books)\n _save_extract_state(books)", "def get_detail(self, appid):\n item = {}\n detail = self.details(appid)\n if not detail.docV2.docid:\n raise AppNotFoundError(appid)\n item[\"appid\"] = appid\n item[\"version_code\"] = detail.docV2.details.appDetails.versionCode\n item[\"offer_type\"] = detail.docV2.offer[0].offerType\n category = detail.docV2.details.appDetails.appCategory[0]\n item[\"category_id\"] = CATEGORY_MAP[category]\n item[\"description\"] = detail.docV2.descriptionHtml\n # detect the string language from description, return ISO 639-1 language code.\n item[\"lang\"] = unicode(guess_language(item[\"description\"] or 'en'))\n item[\"developer\"] = detail.docV2.details.appDetails.developerName\n item[\"group\"] = GROUP_MAP.get(detail.docV2.details.appDetails.appType) or 'app'\n item[\"icon\"] = [img.imageUrl for img in detail.docV2.image if img.imageType == 4][0]\n item[\"is_deleted\"] = False\n item[\"name\"] = detail.docV2.title\n # for url seo\n name = re.sub(ur\"\"\"\\$|\\%|\\(|\\)|\\[|\\[|\\]|\\*|\\ |\\®|\\#|\\~|\\`|\\@|\\^|\\&|\\{|\\}|\\<|\\>|\\?|\\\"|\\'|\\’|\\–|\\:|\\;|\\||\\/|\\+|\\!|\\•|\\,|\\™|\\_|\\.\"\"\", '-', item['name'])\n name_url = urllib.quote(name.encode('utf-8'))\n if \"%\" not in name_url:\n item['name_url'] = name_url\n item[\"operating_systems\"] = \"\"\n item[\"order\"] = 0\n item[\"rating\"] = detail.docV2.aggregateRating.starRating\n item['rating_user'] = humanize.intcomma(detail.docV2.aggregateRating.ratingsCount)\n\n total_count = detail.docV2.details.appDetails.numDownloads\n item[\"total_count\"] = remove_downloads(total_count)\n item[\"download_count\"] = strCount_to_intCount(total_count)\n\n item[\"release_time\"] = detail.docV2.details.appDetails.uploadDate\n item[\"screenshot\"] = [img.imageUrl for img in detail.docV2.image if img.imageType == 1]\n item[\"update_info\"] = detail.docV2.details.appDetails.recentChangesHtml\n item[\"version\"] = detail.docV2.details.appDetails.versionString\n item[\"offer_type\"] = detail.docV2.offer[0].offerType\n item[\"size\"] = humanize.naturalsize(detail.docV2.details.appDetails.installationSize, gnu=True)\n item[\"source\"] = 'crawler'\n item[\"channel\"] = 'googleplay'\n item[\"price\"] = detail.docV2.offer[0].formattedAmount.lower()\n item[\"paid\"] = 1\n item[\"search_order\"] = 0\n item[\"search_reindex\"] = 1\n item['app_status'] = 0\n\n return item", "def store_feed(e):\n query = WebResource.query().filter(WebResource.url == e[\"link\"])\n if query.count() == 0:\n print \"STORING: \" + e[\"link\"]\n try:\n if 'summary' in e:\n s, t = BeautifulSoup(e['summary'], \"lxml\"), BeautifulSoup(e['title'], \"lxml\")\n e['summary'], e['title'] = s.get_text(), t.get_text()\n else:\n t = BeautifulSoup(e['title'], \"lxml\")\n e['summary'], e['title'] = None , t.get_text()\n k = WebResource.store_feed(e)\n print \"STORED: \" + str(k)\n return k\n except Exception as e:\n print \"Cannot Store: \" + str(e)\n return None\n else:\n print \"Resource already stored\"\n return None", "def extract(self, response):\n\n #grab the BusinessItem passed in from the caller\n i = None\n try:\n i = response.meta['item']\n except Exception:\n i = BusinessItem()\n\n log.msg('passed in item={0}'.format(i), log.DEBUG)\n\n l = BusinessLoader(item=i, response=response)\n\n #Assume url pattern is /<addressLocality>/<category>/<duid>/<name>.html\n data_uid = re.match(pattern=u'.*COMPANYID=(\\d+)$', string=response.url).group(1).lstrip('0')\n\n l.add_xpath('description', '//*[@id=\"ctl00_ctl00_body_maincontentblock_lblProductandServices\"]/ text()')\n\n #List of strings which, when joined, form the address. form is <streetAddress>, <optional: streetAddress>, <addressLocality and state and postalCode>\n address_fields = response.xpath('//*[@id=\"ctl00_ctl00_body_maincontentblock_lblcoAddress\"]/ text()').extract()\n m = re.match(pattern=u'^([\\w\\s]*),\\s+([\\w\\s]+)[\\xa0]+(\\S+)$', string=address_fields[-1])\n\n l.add_value('streetAddress', address_fields[0])\n\n if len(address_fields) is 3:\n l.add_value('streetAddress', address_fields[1])\n\n l.add_value('addressLocality', m.group(1))\n l.add_value('addressRegion', m.group(2))\n l.add_value('postalCode', m.group(3))\n\n #Extract any social media links\n social_media_links = response.xpath('//table[@id=\"ctl00_ctl00_body_maincontentblock_gvSocialMedia\"]//a/ @href').extract()\n for link in social_media_links:\n if 'linkedin.com' in link:\n l.add_value('linkedin', unicode(link))\n elif 'twitter.com' in link:\n l.add_value('twitter', unicode(link))\n elif 'facebook.com' in link:\n l.add_value('facebook', unicode(link))\n\n l.add_value(\"data_uid\", unicode(data_uid))\n l.add_value(\"data_url\", unicode(response.url))\n\n return l.load_item()", "def save_response(self, request, response):\n response_dict = self.process_response(request.path, response)\n try:\n self.ser.info(pickle.dumps(response_dict))\n self.ser.info(RESPONSE_UNIQUE_STRING)\n except (TypeError, pickle.PicklingError):\n #Can't pickle wsgi.error objects\n pass" ]
[ "0.5922734", "0.5857825", "0.5833559", "0.5798279", "0.5555864", "0.55542606", "0.5488991", "0.5482613", "0.54276925", "0.5380093", "0.53528607", "0.5344608", "0.53070605", "0.5304953", "0.5301043", "0.5283759", "0.52696395", "0.5248865", "0.52482927", "0.5235908", "0.5224081", "0.51554215", "0.5148963", "0.51228595", "0.5104435", "0.5101055", "0.50889176", "0.50864196", "0.5068238", "0.50658524" ]
0.690983
0
Tests that a keyvault with 0 access policies is processed properly and doesn't raise an exception.
def test_whitelist_zero_access_policies(self): p = self.load_policy({ 'name': 'test-key-vault', 'resource': 'azure.keyvault', 'filters': [ {'type': 'value', 'key': 'name', 'op': 'glob', 'value_type': 'normalize', 'value': 'cckeyvault2*'}, {'not': [ {'type': 'whitelist', 'key': 'principalName', 'users': ['[email protected]']} ]} ] }) resources = p.run() self.assertEqual(len(resources), 0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_authz_file_empty_raises(self):\n self.env.config.set('authz_policy', 'authz_file', '')\n self.assertRaises(ConfigurationError, self.check_permission,\n 'WIKI_VIEW', 'änon', None, None)", "def noaccess(self):\n self.assertEqual(self.client.get(self.ENDPOINT).status_code, 403)\n self.assertEqual(self.client.post(self.ENDPOINT).status_code, 403)\n self.assertEqual(self.client.delete(self.ENDPOINT).status_code, 403)", "def testGetAccessDenied(self):\n self.runGet(None, sequencer=self.hiseq2000.vendor_id)\n self.response_401()\n for user in (self.norole, self.unrelated_owner):\n self.runGet(user)\n self.response_403()", "async def test_get_access_requests_no_envars(self):\n with self.sys_exit_patch:\n with self.assertRaises(SystemExit):\n await self.inst._get_access_requests(\n \"test-container\"\n )\n self.sys_exit_mock.assert_called_once()", "def test_check_keys_exist_for_provider_list_no_keys(self):\n\n secret_key = [None, None]\n provider_id = 'asu'\n\n serializer = serializers.CreditProviderCallbackSerializer()\n with pytest.raises(PermissionDenied):\n serializer._check_keys_exist_for_provider(secret_key, provider_id) # lint-amnesty, pylint: disable=protected-access", "def test_all_sequential_open_distribution_no_access(self, has_access):\r\n has_access.return_value = False\r\n response = views.all_sequential_open_distrib(self.request, 'test/test/test')\r\n\r\n self.assertEqual(\"{\\\"error\\\": \\\"Access Denied: User does not have access to this course\\'s data\\\"}\", response.content)", "def test_invalid_access_key(self):\r\n data = {\r\n \"EdX-ID\": self.receipt_id,\r\n \"Result\": \"Testing\",\r\n \"Reason\": \"Testing\",\r\n \"MessageType\": \"Testing\"\r\n }\r\n json_data = json.dumps(data)\r\n response = self.client.post(\r\n reverse('verify_student_results_callback'),\r\n data=json_data,\r\n content_type='application/json',\r\n HTTP_AUTHORIZATION='test testing:testing',\r\n HTTP_DATE='testdate'\r\n )\r\n self.assertIn('Access key invalid', response.content)\r\n self.assertEqual(response.status_code, 400)", "def testGetAccessDenied(self):\n self.runGet(None)\n self.response_401()\n for user in (self.norole, self.unrelated_owner):\n self.runGet(user)\n self.response_403()", "def test_001_unauthorized_access(self):\n false_token = \"12345\"\n self.info(\"Will use token %s\", false_token)\n client = ComputeClient(self.clients.compute_url, false_token)\n client.CONNECTION_RETRY_LIMIT = self.clients.retry\n\n with self.assertRaises(ClientError) as cl_error:\n client.list_servers()\n self.assertEqual(cl_error.exception.status, 401)", "def test_assessor_access_limited(self):\n assessor = get_or_create_default_assessor()\n self.client.login(assessor.email)\n # This assessor doesn't belong to a group\n self.assertTrue(is_assessor(assessor))\n self.assertFalse(get_user_assessor_groups(assessor))\n\n # forbidden\n urls_get_forbidden = [\n reverse('wl_applications:enter_conditions', args=[self.application.pk]),\n reverse('wl_applications:enter_conditions_assessor', args=[self.application.pk, self.assessment.pk]),\n ]\n urls_post_forbidden = [\n {\n 'url': reverse('wl_applications:create_condition', args=[self.application.pk]),\n 'data': {\n 'code': '123488374',\n 'text': 'condition text'\n }\n },\n {\n 'url': reverse('wl_applications:set_assessment_condition_state'),\n 'data': {\n 'assessmentConditionID': self.assessment_condition.pk,\n 'acceptanceStatus': 'accepted',\n }\n },\n {\n 'url': reverse('wl_applications:enter_conditions', args=[self.application.pk]),\n 'data': {\n 'conditionID': [self.condition.pk],\n }\n },\n {\n 'url': reverse('wl_applications:enter_conditions_assessor',\n args=[self.application.pk, self.assessment.pk]),\n 'data': {\n 'conditionID': [self.condition.pk],\n }\n },\n ]\n # Allowed\n urls_get_allowed = [\n reverse('wl_applications:search_conditions')\n ]\n urls_post_allowed = [\n ]\n for url in urls_get_forbidden:\n response = self.client.get(url, follow=True)\n if response.status_code != 403:\n self.assertRedirects(response, reverse('wl_dashboard:tables_assessor'), status_code=302,\n target_status_code=200)\n for url in urls_post_forbidden:\n response = self.client.post(url['url'], url['data'], follow=True)\n if response.status_code != 403:\n self.assertRedirects(response, reverse('wl_dashboard:tables_assessor'), status_code=302,\n target_status_code=200)\n for url in urls_get_allowed:\n response = self.client.get(url, follow=True)\n self.assertEqual(200, response.status_code)\n\n for url in urls_post_allowed:\n response = self.client.post(url['url'], url['data'], follow=True)\n self.assertEqual(200, response.status_code)", "def testGetAccessDenied(self):\n self.runGet(None, sequencer=self.hiseq2000.sodar_uuid)\n self.response_401()\n for user in (self.norole, self.unrelated_owner):\n self.runGet(user, sequencer=self.hiseq2000.sodar_uuid)\n self.response_403()", "def test_interactive_withdraw_no_token(client):\n response = client.get(WEBAPP_PATH)\n assert \"Missing authentication token\" in str(response.content)\n assert response.status_code == 403", "def test_section_problem_grade_distribution_no_access(self, has_access):\r\n has_access.return_value = False\r\n response = views.section_problem_grade_distrib(self.request, 'test/test/test', '1')\r\n\r\n self.assertEqual(\"{\\\"error\\\": \\\"Access Denied: User does not have access to this course\\'s data\\\"}\", response.content)", "def test_access_negative(self, api):\n self.builder.add_user(api.get_user())\n r1 = api.access_user(api.get_user(), False)\n access_false = self.builder.get_access(api.get_user())\n self.builder.del_user(api.get_user())\n assert access_false == 0\n assert r1.status_code == 200", "def test_call_bad_perms(self):\r\n self.assertRaises(ValueError, self.cs_overview, -1)", "def test_vault_create_authorization_for_vault_section(self):\n pass", "def test_get_cart_items_unauthorized(self):\n with self.assertRaises(ResourceAccessError):\n self.cart_item_manager.get_cart_items('123', '1')", "def test_all_functions_auth_failure(self):\r\n \r\n auth = {'username':'tester', 'api_key':'api_key'}\r\n\r\n # Indicate no user record was found with the provided auth info.\r\n interface.get_user_with_api_key = mock_raises_DoesNotExistError\r\n \r\n try:\r\n proxy.renew_resources(auth, [])\r\n except xmlrpclib.Fault, e:\r\n self.assertEqual(e.faultCode, views.FAULTCODE_AUTHERROR)\r\n else:\r\n self.fail(\"Expected an exception.\")\r\n \r\n try:\r\n proxy.acquire_resources(auth, {})\r\n except xmlrpclib.Fault, e:\r\n self.assertEqual(e.faultCode, views.FAULTCODE_AUTHERROR)\r\n else:\r\n self.fail(\"Expected an exception.\")\r\n \r\n try:\r\n proxy.acquire_specific_vessels(auth, [])\r\n except xmlrpclib.Fault, e:\r\n self.assertEqual(e.faultCode, views.FAULTCODE_AUTHERROR)\r\n else:\r\n self.fail(\"Expected an exception.\")\r\n \r\n try:\r\n proxy.release_resources(auth, [])\r\n except xmlrpclib.Fault, e:\r\n self.assertEqual(e.faultCode, views.FAULTCODE_AUTHERROR)\r\n else:\r\n self.fail(\"Expected an exception.\")\r\n\r\n try:\r\n proxy.get_resource_info(auth)\r\n except xmlrpclib.Fault, e:\r\n self.assertEqual(e.faultCode, views.FAULTCODE_AUTHERROR)\r\n else:\r\n self.fail(\"Expected an exception.\")\r\n\r\n try:\r\n proxy.get_account_info(auth)\r\n except xmlrpclib.Fault, e:\r\n self.assertEqual(e.faultCode, views.FAULTCODE_AUTHERROR)\r\n else:\r\n self.fail(\"Expected an exception.\")\r\n\r\n try:\r\n proxy.get_public_key(auth)\r\n except xmlrpclib.Fault, e:\r\n self.assertEqual(e.faultCode, views.FAULTCODE_AUTHERROR)\r\n else:\r\n self.fail(\"Expected an exception.\")", "def test_all_problem_grade_distribution_no_access(self, has_access):\r\n has_access.return_value = False\r\n response = views.all_problem_grade_distribution(self.request, 'test/test/test')\r\n\r\n self.assertEqual(\"{\\\"error\\\": \\\"Access Denied: User does not have access to this course\\'s data\\\"}\", response.content)", "def test_vault_delete_authorization_for_vault_section(self):\n pass", "def test_check_keys_exist_for_provider_string(self):\n\n secret_key = None\n provider_id = 'asu'\n\n serializer = serializers.CreditProviderCallbackSerializer()\n with pytest.raises(PermissionDenied):\n serializer._check_keys_exist_for_provider(secret_key, provider_id) # lint-amnesty, pylint: disable=protected-access", "def test_get_vault_pubkeys(self):\n pass", "def test_whitelist(self):\n p = self.load_policy({\n 'name': 'test-key-vault',\n 'resource': 'azure.keyvault',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'glob',\n 'value_type': 'normalize',\n 'value': 'cckeyvault1*'},\n {'not': [\n {'type': 'whitelist',\n 'key': 'principalName',\n 'users': ['[email protected]']}\n ]}\n ]\n })\n resources = p.run()\n self.assertEqual(len(resources), 1)", "def test40_check_authz(self):\n # auth disabled\n LDPHandler.no_auth = True\n h = mockedLDPHandler()\n h.check_authz(None, 'write')\n # auth enabled, no admin\n LDPHandler.no_auth = False\n h = mockedLDPHandler()\n self.assertRaises(HTTPError, h.check_authz, LDPRS('uri:a'), 'write')", "def check_vault_access(self, did, access_vault=None):\n info = self.get_vault_service(did)\n if not info:\n raise VaultNotFoundException()\n\n # INFO: no need check permission.\n # if (access_vault == VAULT_ACCESS_WR or access_vault == VAULT_ACCESS_DEL) \\\n # and info[VAULT_SERVICE_STATE] == VAULT_SERVICE_STATE_FREEZE:\n # raise ForbiddenException(msg=\"The vault can't be written.\")", "def test_authorization(self):\n res = self.get(url=\"/products/1/pricehistory\")\n self.assertEqual(res.status_code, 401)\n self.assertException(res, exc.UnauthorizedAccess)\n res = self.get(url=\"/products/1/pricehistory\", role=\"user\")\n self.assertEqual(res.status_code, 401)\n self.assertException(res, exc.UnauthorizedAccess)", "def test_excessive_Sigops(self):\n logging.info(\"Entered : test_excessive_Sigops \\n\")\n try:\n testExcessiveSigops(self)\n except (Exception, JSONRPCException) as e1:\n logging.info(e1)\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n raise TestAssertionError({\"file_name\": fname, \"line_num\": exc_tb.tb_lineno, \\\n \"error_type\": exc_type.__name__, \"error_msg\": str( e1 ), \\\n \"n1\" : \"N/A\", \"n2\" : \"N/A\", \"amount\" : \"N/A\", \"numsig\" : \"N/A\"})", "def ft_syndicate_access():\n \n fake_user = FakeObject()\n fake_user.email = \"[email protected]\"\n\n print \"\\nensure_user_exists(%s)\\n\" % fake_user.email\n ensure_user_exists( fake_user.email, is_admin=False, max_UGs=1100, max_RGs=1 )\n\n print \"\\nensure_user_exists(%s)\\n\" % fake_user.email\n ensure_user_exists( fake_user.email, is_admin=False, max_UGs=1100, max_RGs=1 )\n\n fake_volume = FakeObject()\n fake_volume.name = \"fakevolume\"\n fake_volume.description = \"This is a fake volume, created for funtional testing\"\n fake_volume.blocksize = 1024\n fake_volume.cap_read_data = True \n fake_volume.cap_write_data = True \n fake_volume.cap_host_data = False\n fake_volume.archive = False\n fake_volume.private = True\n \n # test idempotency\n print \"\\nensure_volume_exists(%s)\\n\" % fake_volume.name\n ensure_volume_exists( fake_user.email, fake_volume )\n\n print \"\\nensure_volume_exists(%s)\\n\" % fake_volume.name\n ensure_volume_exists( fake_user.email, fake_volume )\n \n print \"\\nensure_volume_access_right_exists(%s,%s)\\n\" % (fake_user.email, fake_volume.name)\n ensure_volume_access_right_exists( fake_user.email, fake_volume.name, 31 )\n \n print \"\\nensure_volume_access_right_exists(%s,%s)\\n\" % (fake_user.email, fake_volume.name)\n ensure_volume_access_right_exists( fake_user.email, fake_volume.name, 31 )\n \n print \"\\nensure_volume_access_right_absent(%s,%s)\\n\" % (fake_user.email, fake_volume.name)\n ensure_volume_access_right_absent( fake_user.email, fake_volume.name )\n \n print \"\\nensure_volume_access_right_absent(%s,%s)\\n\" % (fake_user.email, fake_volume.name)\n ensure_volume_access_right_absent( fake_user.email, fake_volume.name )\n \n print \"\\nensure_volume_absent(%s)\\n\" % fake_volume.name\n ensure_volume_absent( fake_volume.name )\n\n print \"\\nensure_volume_absent(%s)\\n\" % fake_volume.name\n ensure_volume_absent( fake_volume.name )\n\n print \"\\nensure_user_absent(%s)\\n\" % fake_user.email\n ensure_user_absent( fake_user.email )\n\n print \"\\nensure_user_absent(%s)\\n\" % fake_user.email\n ensure_user_absent( fake_user.email )\n \n \n \n \n print \"\\nensure_principal_exists(%s)\\n\" % fake_user.email\n ensure_principal_exists( fake_user.email, \"asdf\", is_admin=False, max_UGs=1100, max_RGs=1 )\n \n print \"\\nensure_principal_exists(%s)\\n\" % fake_user.email\n ensure_principal_exists( fake_user.email, \"asdf\", is_admin=False, max_UGs=1100, max_RGs=1 )\n\n print \"\\nensure_volume_exists(%s)\\n\" % fake_volume.name\n ensure_volume_exists( fake_user.email, fake_volume )\n\n print \"\\nsetup_volume_access(%s, %s)\\n\" % (fake_user.email, fake_volume.name)\n setup_volume_access( fake_user.email, fake_volume.name, 31, 38800, \"abcdef\" )\n \n print \"\\nsetup_volume_access(%s, %s)\\n\" % (fake_user.email, fake_volume.name)\n setup_volume_access( fake_user.email, fake_volume.name, 31, 38800, \"abcdef\" )\n \n print \"\\nteardown_volume_access(%s, %s)\\n\" % (fake_user.email, fake_volume.name )\n teardown_volume_access( fake_user.email, fake_volume.name )\n \n print \"\\nteardown_volume_access(%s, %s)\\n\" % (fake_user.email, fake_volume.name )\n teardown_volume_access( fake_user.email, fake_volume.name )\n \n print \"\\nensure_volume_absent(%s)\\n\" % fake_volume.name\n ensure_volume_absent( fake_volume.name )\n\n print \"\\nensure_principal_absent(%s)\\n\" % fake_user.email\n ensure_principal_absent( fake_user.email )", "def check_for_no_privates(context):\n json_data = context.response.json()\n\n if \"component_analyses\" in json_data:\n vulnerabilities = json_data['component_analyses']['vulnerability']\n for v in vulnerabilities:\n assert \"cvss\" in v\n assert \"is_private\" in v\n assert \"vendor_cve_ids\" in v\n if v[\"is_private\"]:\n raise Exception(\"Private vulnerability found\")", "def test_get_authz_file_notdefined_raises(self):\n self.env.config.remove('authz_policy', 'authz_file')\n self.assertRaises(ConfigurationError, self.check_permission,\n 'WIKI_VIEW', 'änon', None, None)" ]
[ "0.615517", "0.6075056", "0.6071155", "0.6068709", "0.59903747", "0.5970702", "0.59659946", "0.5951002", "0.5945882", "0.5927157", "0.59155506", "0.5856525", "0.58519167", "0.5801739", "0.5789592", "0.5782919", "0.5781346", "0.577808", "0.5775319", "0.576997", "0.57695985", "0.5769285", "0.5766115", "0.573969", "0.57388294", "0.57350534", "0.5731577", "0.57238805", "0.57086813", "0.5697845" ]
0.7272632
0
Filters a list of elements. 'viewer' is the viewer that we are filtering elements for. 'parent' is the parent element. 'elements' is the list of elements to filter. Returns a list containing only those elements for which 'select' returns True.
def filter(self, viewer, parent, elements): return [e for e in elements if self.select(viewer, parent, e)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def select(self, viewer, parent, element):\n\n return True", "def filter_by_reviewers(reviews, selected_reviewers):\n return [x for x in reviews if x.reviewer in selected_reviewers]", "def validate(elements):\n return list(filter(lambda el: el.is_valid, elements))", "def select(elements, val=True):\n for el in elements:\n el.select_set(val)", "def hide_show_elements(driver: webdriver, elements: list, hide: bool = None) -> None:\n for element_locator in elements:\n locator_type, locator_value = element_locator\n element_list = get_element(driver, locator_value, locator_type, many=True)\n if element_list:\n for element in element_list:\n display_element(driver, element, hide)", "def order_filter(self,elements):", "def find_elements(self, locator, parent=None):\n return self._element_finder.find(locator, first_only=False,\n required=False, parent=parent)", "def find_elements(self, elements: List[WebElement]) -> List[WebElement]:\n return elements", "def filter(self, *args):\n return _libsbml.ElementFilter_filter(self, *args)", "def query_parent(selectors, tree_item):\n return [subitem for subitem in iterate_parent(tree_item)\n if all(selectors, subitem)]", "def filter_selection_set(info: GraphQLResolveInfo):\n from graphql import Location\n from .pyutils import unfreeze\n\n excluded_field_nodes = []\n\n def _should_include(field_node: FieldNode):\n if not field_node.name:\n # Unknown field_node type\n return True\n if field_node.name.value == \"subscription_id\":\n return True\n\n # Location is a highly nested AST type\n excluded_field_nodes.append(unfreeze(field_node, ignore_types=[Location]))\n return False\n\n info.field_nodes[0].selection_set.selections = [\n x for x in info.field_nodes[0].selection_set.selections if _should_include(x)]\n\n return excluded_field_nodes", "def filter(self, row):\r\n return list(itertools.compress(row, self.selectors))", "def filter(self, filters):", "def selectAll(self,parent):\n\t\tif parent.IsOk() and self.tree.ItemHasChildren(parent):\n\t\t\tchild, cookie = self.tree.GetFirstChild(parent)\n\t\t\twhile child:\n\t\t\t\tobj = self.tree.GetPyData(child)\n\t\t\t\tselect = obj != \"1\" and obj != \"2\"\n\t\t\t\tif child not in self.tree.GetSelections() and select:\n\t\t\t\t\tself.tree.SelectItem(child)\n\t\t\t\tif self.tree.ItemHasChildren(child):\n\t\t\t\t\tself.selectAll(child)\n\t\t\t\tchild = self.tree.GetNextSibling(child)", "def clean_all_filters(driver, selector):\n filters_buttons = driver.find_elements_by_css_selector(selector)\n\n if not filters_buttons:\n return\n\n filters_buttons[0].click()\n driver.implicitly_wait(1)\n\n if len(filters_buttons) > 1:\n clean_all_filters(driver, selector)", "def filter_none(elems):\n return [x for x in elems if x is not None]", "def query(selectors, tree_item):\n return [subitem for subitem in iterate_item(tree_item)\n if all(selectors, subitem)]", "def find_elements_inside_element(self, parent_element: Union[WebElement, Tuple[By, str]],\n children_element_locator: Tuple[By, str], wait_time=10,\n skip_exception=False) -> List[WebElement]:\n parent_element = self.find_element(parent_element)\n for i in range(wait_time):\n by_type, value = children_element_locator\n if by_type == By.CSS_SELECTOR:\n children = parent_element.find_elements_by_css_selector(value)\n elif by_type == By.XPATH:\n children = parent_element.find_elements_by_xpath(value)\n else:\n children = parent_element.find_elements(children_element_locator)\n if len(children):\n return children\n time.sleep(1)\n else:\n if not skip_exception:\n raise TimeoutException(f'Elements was not found in {wait_time} seconds')\n return []", "def filter(self, cls):\n return ElementList([x for x in self._elements if isinstance(x, cls)])", "def filtered(self, func):\n return PSetList(list(filter(func, self.sets)))", "def filter_func(self, agents):\n return [\n agent for agent in agents\n if agent.energy < self.model.energy_threshold and not agent.pack\n ]", "def node_type_filter(node_list, *filter_types):\n\n flg = logging.getLogger(\"lettuce.xgenSetup.node_type_filter\")\n\n flg.info(\"Filtering Node List\")\n\n filtered_list = []\n for node in node_list:\n node_type = mc.nodeType(node)\n flg.debug(\"Node, {0}, is of type, {1}\".format(node, node_type))\n if node_type not in filter_types:\n flg.debug(\"Node kept\")\n filtered_list.append(node)\n else:\n flg.debug(\"Node filtered\")\n flg.info(\"Returning Filtered List\")\n return filtered_list", "def filter_to(self, samples):\n sample_set = set(samples)\n\n filtered_trios = []\n for trio in self._trios:\n restricted_trio = trio._restrict_to(sample_set)\n if restricted_trio is not None:\n filtered_trios.append(restricted_trio)\n\n return Pedigree(filtered_trios)", "def select_from(pav_cfg,\n paths: Iterable[Path],\n filter_func: Callable[[Any], bool] = default_filter,\n transform: Callable[[Path], Any] = None,\n order_func: Callable[[Any], Any] = None,\n order_asc: bool = True,\n fn_base: int = 10,\n limit: int = None) -> (List[Any], List[Path]):\n\n paths = list(paths)\n max_threads = min(pav_cfg.get('max_threads', 1), len(paths))\n\n selector = partial(select_one, ffunc=filter_func, trans=transform,\n ofunc=order_func, fnb=fn_base)\n\n if max_threads > 1:\n with ThreadPoolExecutor(max_workers=max_threads) as pool:\n selections = pool.map(selector, paths)\n else:\n selections = map(selector, paths)\n\n selected = [(item, path) for item, path in zip(selections, paths)\n if item is not None]\n\n if order_func is not None:\n selected.sort(key=lambda d: order_func(d[0]), reverse=not order_asc)\n\n return SelectItems(\n [item[0] for item in selected][:limit],\n [item[1] for item in selected][:limit])", "def filter_list(self, node_list):\n filtered_list = []\n for node in node_list:\n if self.is_member(node):\n filtered_list.append(node)\n return filtered_list", "def get_selected_elements(doc):\n try:\n # Revit 2016\n return [doc.GetElement(id)\n for id in __revit__.ActiveUIDocument.Selection.GetElementIds()]\n except:\n # old method\n return list(__revit__.ActiveUIDocument.Selection.Elements)", "def filter_(self, ancestors, filter_, matches):\n\n compounds = self.data_object.get_compound()\n\n node_stack = stack(self.data_object, ancestors)\n\n for compound in compounds:\n\n compound_finder = self.item_finder_factory.create_finder(compound)\n compound_finder.filter_(node_stack, filter_, matches)", "def elements(xpath_selection):\n driver = Driver().connect()\n return driver.find_elements_by_xpath(xpath_selection)", "def filter_selected_nodes(tree) -> list:\n return [n for n in tree.nodes if n.select and n.bl_idname not in {'LNGroupInputsNode', 'LNGroupOutputsNode'}]", "def filter_nodes(self, node_filter, parent=None):\n if self.data is None:\n return None\n\n if parent is None:\n return self.data.xpath(node_filter)\n else:\n return parent.xpath(node_filter)" ]
[ "0.53416806", "0.5286004", "0.516275", "0.51596725", "0.5059366", "0.5055007", "0.50266373", "0.49372914", "0.4907756", "0.47648945", "0.47609642", "0.46878317", "0.46518713", "0.46268824", "0.46157223", "0.4608182", "0.4597628", "0.45550662", "0.45305058", "0.45274553", "0.45258284", "0.4478596", "0.445101", "0.44420236", "0.4434969", "0.4425385", "0.44229695", "0.44192868", "0.4409322", "0.44092733" ]
0.87938285
0
Returns True if the element is 'allowed' (ie. NOT filtered). 'viewer' is the viewer that we are filtering elements for. 'parent' is the parent element. 'element' is the element to select. By default we return True.
def select(self, viewer, parent, element): return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_element_in_view(self, element: Element) -> bool:\n return self.find_element_view(element=element) is not None", "def tag_visible(element):\n\n if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:\n return False\n if isinstance(element, Comment):\n return False\n return True", "def filter(self, viewer, parent, elements):\n\n return [e for e in elements if self.select(viewer, parent, e)]", "def is_element_visible(self):\n if self.web_element.is_displayed():\n return True\n else:\n return False", "def is_blocked(self, xsd_element: 'XsdElement') -> bool:\n xsd_type = xsd_element.type\n if self is xsd_type:\n return False\n\n block = f'{xsd_element.block} {xsd_type.block}'.strip()\n if not block:\n return False\n\n _block = {x for x in block.split() if x in ('extension', 'restriction')}\n return any(self.is_derived(xsd_type, derivation) for derivation in _block)", "def is_element_only(self) -> bool:\n raise NotImplementedError()", "def is_filter_trait(self, element, trait_name):\n\n return False", "def filterAcceptsRow(self, sourceRow, sourceParentIndex):\n parent_item = self.sourceModel().treeItem(sourceParentIndex)\n tree_item = parent_item.child(sourceRow)\n\n accept = ((self._show_special_attributes or\n not tree_item.is_special_attribute) and\n (self._show_callables or\n not tree_item.is_callable_attribute))\n\n return accept", "def check_element(self, e):\n my_view = {}\n if self.content_mimetype is not None:\n my_view[\"mimetype\"] = self.content_mimetype\n if self.content_model is not None:\n my_view[\"model\"] = self.content_model\n\n if self.element_constraint is not None:\n ret = self.element_constraint.apply_to(e)\n else:\n ret = True\n return ret & apply_to(my_view, e)", "def check_parent_and_children_not_in_view(self, element: Element) -> None:\n for view in self.element_views:\n if view.element in element.child_elements:\n raise ValueError(f\"A child of {element.name} is already in this view.\")\n if view.element is getattr(element, \"parent\", None):\n raise ValueError(\n f\"The parent of {element.name} is already in this view.\"\n )", "def is_parent(self):\n if self.parent is not None:\n return False\n return True", "def tag_visible(element):\n if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:\n return False\n if isinstance(element, bs4.element.Comment):\n return False\n if re.match(r\"[\\n]+\", str(element)):\n return False\n return True", "def can_be_viewed_by(self,user):\n return True", "def is_element_enabled(self):\n if self.web_element.is_enabled():\n return True\n else:\n return False", "def has_parent(self):\n return self.parent != None", "def isElement(self, elementXpath):\r\n try:\r\n self.browser.find_element_by_xpath(elementXpath)\r\n return True\r\n except:\r\n return False", "def is_element_available(self, locator):\r\n if self.driver.is_element_present(locator):\r\n if self.driver.is_visible(locator):\r\n return True\r\n else:\r\n return False\r\n else:\r\n return False", "def is_review_permitted(self, user):\n if user.is_authenticated or settings.OSCAR_ALLOW_ANON_REVIEWS:\n return not self.has_review_by(user)\n else:\n return False", "def is_parent_of(self):\n return self.hasLabel('parent_of')", "def isElementOnly(self):\n return _libsbml.SBaseExtensionPoint_isElementOnly(self)", "def is_element_displayed(self, locator=\"\", locator_type=\"id\", element=None):\n is_displayed = False\n try:\n if locator: # This means if locator is not empty\n element = self.get_element_(locator, locator_type)\n if element is not None:\n is_displayed = element.is_displayed()\n self.log.info(\"Element is displayed with locator: \" + locator +\n \" locatorType: \" + locator_type)\n else:\n self.log.info(\"Element not displayed with locator: \" + locator +\n \" locatorType: \" + locator_type)\n return is_displayed\n except:\n print(\"Element not found\")\n return False", "def allowed_to_preview(user):\n if (\n user.is_authenticated and\n user.is_active and\n user.is_staff\n ):\n return True\n return False", "def test(cls, pathHolder, parentCrawler):\n if not super(Scene, cls).test(pathHolder, parentCrawler):\n return False\n\n return pathHolder.ext() in cls.extensions()", "def can_traverse(self, equipment: str, point: Point) -> bool:\n region_type = self.get_region(point)\n traversable = [\n [\"torch\", \"climbing\"],\n [\"climbing\", \"neither\"],\n [\"torch\", \"neither\"]\n ]\n return equipment in traversable[region_type]", "def can_change_external_reviewers(user, submission) -> bool:\n # check if all submissions have external review enabled\n if not submission.stage.has_external_review:\n return False\n\n if user.is_superuser:\n return True\n\n if settings.GIVE_STAFF_LEAD_PERMS and user.is_apply_staff:\n return True\n\n # only leads can change external reviewers\n if submission.lead.id == user.id:\n return True\n\n return False", "def has_parent(self):\n return False", "def _is_element_clickable(self, locator):\n return self.wait.until(lambda x: self.ec.element_to_be_clickable(self.get_element(locator)))", "def contains(self, element):\n pass", "def is_permitted(self):\n\t\tfrom frappe.utils import has_common\n\n\t\tallowed = [\n\t\t\td.role for d in frappe.get_all(\"Has Role\", fields=[\"role\"], filters={\"parent\": self.name})\n\t\t]\n\n\t\tcustom_roles = get_custom_allowed_roles(\"page\", self.name)\n\t\tallowed.extend(custom_roles)\n\n\t\tif not allowed:\n\t\t\treturn True\n\n\t\troles = frappe.get_roles()\n\n\t\tif has_common(roles, allowed):\n\t\t\treturn True", "def has_parent(self):\n return self._parent_ is not None" ]
[ "0.59139556", "0.57015985", "0.5638367", "0.54528904", "0.5326502", "0.52207935", "0.5189183", "0.51695454", "0.5162016", "0.51491517", "0.50781363", "0.49656478", "0.49402714", "0.49008185", "0.48731193", "0.48402408", "0.4839491", "0.48392522", "0.48301572", "0.48110285", "0.47901526", "0.4777314", "0.4727134", "0.4725504", "0.4724558", "0.4714422", "0.47120154", "0.4704228", "0.46767986", "0.46692315" ]
0.6605754
0
Is the filter affected by changes to an element's trait? 'element' is the element. 'trait_name' is the name of the trait. Returns True if the filter would be affected by changes to the trait named 'trait_name' on the specified element. By default we return False.
def is_filter_trait(self, element, trait_name): return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_element_in_view(self, element: Element) -> bool:\n return self.find_element_view(element=element) is not None", "def has_visibility(trait, visibility_name):\n\n return trait.visibility == getattr(schema.Trait.Visibility, visibility_name)", "def contains(self, element) -> bool:\n\n return self.__find_node(element) is not None", "def is_calibration_tag_for_name(ins, exp, run, name='dark') :\n for attr in run_attributes(ins, exp, run) :\n if attr['class'] == 'Calibrations' and attr['name'] == name : return True\n return False", "def __contains__(self, name):\n\n return name in self._wdict", "def is_tagged(self,tag_name,element):\n return (tag_name in self.tag2elements.keys()) and (element in self.tag2elements[tag_name])", "def check_element(self, e):\n my_view = {}\n if self.content_mimetype is not None:\n my_view[\"mimetype\"] = self.content_mimetype\n if self.content_model is not None:\n my_view[\"model\"] = self.content_model\n\n if self.element_constraint is not None:\n ret = self.element_constraint.apply_to(e)\n else:\n ret = True\n return ret & apply_to(my_view, e)", "def isModifiedByCategory(self,node, queryCategory):\n pred = self.getModifiers(node )\n for p in pred:\n #if( queryCategory.lower() == p.getCategory().lower() ):\n if( p.isA(queryCategory) ):\n return True\n\n return False", "def __contains__(self, name):\n return hasattr(self, name)", "def __contains__(self, attribute_name):\n return False # pragma: no cover", "def isModifiedByCategory(self, node, queryCategory):\n predecessors = self.getModifiers(node)\n for predecessor in predecessors:\n if predecessor.isA(queryCategory):\n return True\n\n return False", "def is_injected(self, name):\n return name in self.__provisions", "def __continas__ (self, name):\n return name in self.containments", "def tag_visible(element):\n\n if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:\n return False\n if isinstance(element, Comment):\n return False\n return True", "def __bool__(self):\n return True if self._name is not None else False", "def match(self, name, tags):\n S, tags = self.get_compiled(name, tags)\n return bool(S & tags)", "def contains(self, element):\n pass", "def is_element_only(self) -> bool:\n raise NotImplementedError()", "def specify_change(self) -> bool:\n return any(True for e in self if e != self.wildcard)", "def __contains__(self, name):\n return name in self._variables", "def can_transform(self, html_element: ET.Element):\n return html_element.tag == \"mark\"", "def is_change(self) -> bool:\n return self._change", "def is_blocked(self, xsd_element: 'XsdElement') -> bool:\n xsd_type = xsd_element.type\n if self is xsd_type:\n return False\n\n block = f'{xsd_element.block} {xsd_type.block}'.strip()\n if not block:\n return False\n\n _block = {x for x in block.split() if x in ('extension', 'restriction')}\n return any(self.is_derived(xsd_type, derivation) for derivation in _block)", "def __bool__(self):\n return any(\n getattr(self, hook_trigger, None) for hook_trigger in self._hook_triggers\n )", "def __contains__(self, name):\n return name in set(self)", "def trait_view ( self, name = None, view_element = None ):\n return self.__class__._trait_view( name, view_element,\n self.default_traits_view, self.trait_view_elements,\n self.editable_traits )", "def _is_desired_tag(self, tag):\n if self._tags is None:\n return True\n\n if self._ignore_namespace:\n for desired_tag in self._tags:\n if tag.localname == desired_tag.localname:\n return True\n else:\n for desired_tag in self._tags:\n if tag == desired_tag:\n return True\n\n return False", "def can_analyze_contain(cls, element):\n\n if element is None:\n return False\n\n return element.ele_type == SecondaryStructureElementType.Stem \\\n or element.ele_type == SecondaryStructureElementType.Hairpin \\\n or element.ele_type == SecondaryStructureElementType.Interior \\\n or element.ele_type == SecondaryStructureElementType.Multiloop \\\n or element.ele_type == SecondaryStructureElementType.Unpaired \\\n or element.ele_type == SecondaryStructureElementType.Bulge \\\n or element.ele_type == SecondaryStructureElementType.End", "def is_wrapped_by(self, name):\n\n\t\ttry:\n\t\t\tself._find_wrapper_by_name(name)\n\t\texcept ValueError:\n\t\t\treturn False\n\t\telse:\n\t\t\treturn True", "def _is_node_an_element(self, node):\n # Try the simplest approach first, works for plain old ElementTree\n if isinstance(node, BaseET.Element):\n return True\n # For cElementTree we need to be more cunning (or find a better way)\n if hasattr(node, 'makeelement') \\\n and isinstance(node.tag, six.string_types):\n return True" ]
[ "0.5432807", "0.5380321", "0.5154928", "0.50004315", "0.49823684", "0.4972386", "0.49546346", "0.49431932", "0.4915069", "0.4896147", "0.48712415", "0.48191318", "0.48169592", "0.47890756", "0.4781882", "0.47721133", "0.47471988", "0.47195056", "0.47182444", "0.46942788", "0.4686392", "0.4648566", "0.4622082", "0.4620239", "0.4619137", "0.45946512", "0.4581375", "0.45769244", "0.4544667", "0.4529805" ]
0.7937278
0
Given a binary tree, find its minimum depth. The minimum depth is the number of nodes along the shortest path from the root node down to the nearest leaf node.
def minDepth(self, root: TreeNode) -> int: return self.bfs(root)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def min_depth(node):\n if not node:\n return 0\n elif (not node.left) and (not node.right):\n # found leaf\n return 1\n elif not node.left:\n # if the root has only 1 child, this prevents the minimum depth from\n # equaling zero\n return min_depth(node.right) + 1\n elif not node.right:\n return min_depth(node.left) + 1\n return min(min_depth(node.left), min_depth(node.right)) + 1", "def min_depth(t):\n if is_leaf(t):\n return 0\n h = float('inf')\n for b in branches(t):\n # Still works fine!\n h = min(h, 1 + min_depth(b))\n return h", "def max_depth(root):\n # basic case\n if root is None:\n return 0\n\n # breadth-first traversal\n queue = collections.deque([root])\n depth = 0\n while queue:\n queue_size = len(queue)\n for i in range(queue_size):\n curr = queue.popleft()\n if curr.left is not None:\n queue.append(curr.left)\n if curr.right is not None:\n queue.append(curr.right)\n depth += 1\n\n return depth", "def find_depth_tree(root):\n if root is not None:\n max_depth = 0\n if root.branches is None:\n return 1\n else:\n for value in root.branches.values():\n max_depth = max(max_depth, DecisionTree.find_depth_tree(value))\n return 1 + max_depth\n else:\n return 1", "def depth(self):\n\t\tdef helper(tree, d):\n\t\t\tif tree.isLeaf():\n\t\t\t\treturn d\n\t\t\telse:\n\t\t\t\td_left=helper(tree.left, d+1) if tree.hasLeftChild() else 0\n\t\t\t\td_right=helper(tree.right, d+1) if tree.hasRightChild() else 0\n\t\t\t\treturn max(d_left, d_right)\n\n\t\treturn helper(self.root, 1) if not self.isEmpty() else 0", "def get_depth(self):\n if self.root is None:\n return 0\n else:\n node_queue = list()\n node_queue.append(self.root)\n depth = 0\n while len(node_queue):\n q_len = len(node_queue)\n while q_len:\n q_node = node_queue.pop(0)\n q_len = q_len - 1\n if q_node.left is not None:\n node_queue.append(q_node.left)\n if q_node.right is not None:\n node_queue.append(q_node.right)\n depth = depth + 1\n return depth", "def treeLevel(root):\n\n if not root:\n return 0\n else:\n return 1+max(treeLevel(root.left),treeLevel(root.right))", "def max_depth(node):\n if not node:\n return 0\n return max(max_depth(node.left), max_depth(node.right)) + 1", "def tree_depth(tree):\r\n if(tree==None):\r\n return 0\r\n elif(left(tree)!=None):\r\n return 1+tree_depth(left(tree))\r\n elif(right(tree)!=None):\r\n return 1+tree_depth(right(tree))\r\n else:\r\n return 0", "def maxDepth(node):\n\tif node is None: \n\t\treturn 0 \n\telse: \n\t\tlDepth=maxDepth(node.left)\n\t\trDepth=maxDepth(node.right) \n\t\tif lDepth>rDepth: \n\t\t return lDepth+1\n\t\telse: \n\t\t return rDepth+1", "def depth(self):\n L, R = 0,0\n if self.left:\n L = self.left.depth()\n if self.right:\n R = self.right.depth()\n\n return 1 + max(L, R)", "def dfs(self, root: TreeNode) -> int:\n if not root:\n return 0\n\n def dfs(node):\n if not node:\n return float('inf')\n if not node.left and not node.right:\n return 1\n return min(dfs(node.left), dfs(node.right)) + 1\n\n return dfs(root)", "def test_MaxDepth_SimpleTree(self):\n\n root = TreeNode(0)\n root.addLeft(1)\n root.addRight(5)\n root.left.addLeft(2)\n root.left.addRight(3)\n root.left.right.addRight(4)\n root.right.addRight(6)\n\n self.assertEqual(findMaxDepthDFS(root),3)", "def depth(self):\n result = 0\n if self.val is None:\n return result\n return max(self.left.depth(), self.right.depth()) + 1", "def max_depth(self):\n if len(self.children) == 0:\n return 1\n else:\n child_depths = [c.max_depth() for c in self.children]\n return 1 + max(child_depths)", "def min_len(BST):\r\n if isinstance(BST,tuple):\r\n return min_len(BST[0]) + min_len(BST[1])\r\n else:\r\n return BST[0]", "def depth(self, node):\n if node is self.root:\n return 0\n return nx.shortest_path_length(self.graph, self.root, node)", "def depth(self, d=0):\n d1 = 0\n d2 = 0\n if self.leftChild:\n d1 = max(self.leftChild.depth(d + 1), d)\n if self.rightChild:\n d2 = max(self.rightChild.depth(d + 1), d)\n return max(d1, d2, d)", "def max_tree_depth(self):\n\n depths = np.array([leaf.tree_depth for leaf in self.leaves])\n\n return depths.max()", "def depth(self):\n left_depth = self.left.depth() if self.left is not None else 0\n right_depth = self.right.depth() if self.right is not None else 0\n return max(left_depth, right_depth) + 1", "def depth(self, node):\n\n if not node:\n return 0\n else:\n l_depth = self.depth(node.left)\n r_depth = self.depth(node.right)\n\n if l_depth > r_depth:\n return l_depth + 1\n else:\n return r_depth + 1", "def _max_depth(self):\n max_depth = 0\n for node, data in self.traverse():\n max_depth = max(max_depth, data['level'])\n return max_depth", "def find_min(self) -> TreeNode:\n node = self.root\n while True:\n if not node.left:\n return node\n node = node.left", "def minKeyTree(root):\n try:\n min = None\n if (root is not None):\n if (root['left'] is None):\n min = root\n else:\n min = minKeyTree(root['left'])\n return min\n except Exception as exp:\n error.reraise(exp, 'BST:minKeyNode')", "def max_depth(self) -> int:\n return 0", "def diameterOfBinaryTree(self, root):\n self.max_length = 0\n def maxDepth(root):\n if not root:\n return 0\n left_branch = maxDepth(root.left)\n right_branch = maxDepth(root.right)\n self.max_length = max(self.max_length, left_branch + right_branch)\n return max(left_branch, right_branch) + 1\n maxDepth(root)\n return self.max_length", "def node_depths_recursive(root):\n depth_sums = 0\n depth_sums = sum_node_depths(root, depth_sums, 0)\n return depth_sums", "def get_min_depth(l_k):\n return max(l_k.values())", "def depth(self):\n if self.children is None:\n return 1\n\n max_depth = 0\n for child in self.children:\n if child is None:\n return 1\n child_depth = child.depth()\n if child_depth > max_depth:\n max_depth = child_depth\n\n return max_depth+1", "def depth(self, p):\n if self.is_root(p):\n return 0\n else:\n return 1 + self.depth(self.parent(p))" ]
[ "0.8461286", "0.8181243", "0.71641576", "0.7144373", "0.6993832", "0.6950117", "0.6907654", "0.6858466", "0.68347263", "0.6743698", "0.67391497", "0.6727257", "0.6615435", "0.6594133", "0.657416", "0.6555998", "0.6510495", "0.64694285", "0.646406", "0.6413745", "0.6391296", "0.6391089", "0.6387154", "0.6385762", "0.63813376", "0.63660586", "0.6352175", "0.63458717", "0.6342528", "0.63359886" ]
0.86733794
0
Converts the complex number `c` to a string in Fortranformat, i.e. (Re c, Im c). If c is iterable, it returns a string of the form [(Re c_1, Im c_1), ...].
def str_complex(c, kindstr=''): if hasattr(c, '__iter__'): return '[' + ', '.join([str_complex(c_i, kindstr) for c_i in c]) + ']' else: c = complex(c) return '({}{}, {}{})'.format(c.real, kindstr, c.imag, kindstr)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def complex_vct_str ( vct , format = '%.5g%-+.5gj' ) :\n try :\n lst = [] \n for c in vct :\n cc = complex ( c )\n item = format % ( cc.real , cc.imag )\n lst.append ( cc ) \n return '[ ' + ', '.join ( lst ) + ' ]' \n except TypeError :\n pass\n return complex_vct_str ( vct , format = '%.5g%-+.5gj' )", "def __str__( self ) :\n\n return( ' '.join( [ \"%g\" % c_l for c_l in self.coefficients ] ) )", "def to_string(inputs, outputs):\n r_val = '# Column 01: frequency\\n'\n r_val += '# 02: hp - real\\n'\n r_val += '# 03: hp - imaginary\\n'\n r_val += '# 04: hc - real\\n'\n r_val += '# 05: hc - imaginary\\n'\n for f_i, hp_i, hc_i in zip(inputs.freqs, outputs.hp, outputs.hc):\n r_val += \"%8.2f %12.5e %12.5e %12.5e %12.5e\\n\" % (f_i, hp_i.real, hp_i.imag, hc_i.real, hc_i.imag)\n return r_val", "def __str__(self):\n return f'{self.real:02} + {self.imaginary:02}i'", "def fortran_c_wrapper(self) -> str:\n result = ''\n for member in self.members:\n result += member.fortran_c_wrapper()\n return result", "def complexinfo(a, str=None):\n\n if str:\n print \n print \"\\t\", str\n re = a.real.copy()\n im = a.imag.copy()\n _log.debug(\"\\t%.2e %.2g = re.sum im.sum\" % (re.sum(), im.sum()))\n _log.debug(\"\\t%.2e %.2g = abs(re).sum abs(im).sum\" % (abs(re).sum(), abs(im).sum()))", "def nice_cubic_polynomial(p):\n tmp = \"\"\n if p[\"a\"] == 1:\n tmp += \" x^3\"\n elif p[\"a\"] != 0:\n tmp += \"%.2fx^3\" % p[\"a\"]\n if p[\"b\"] == 1:\n tmp += \"\\t+ x^2\"\n elif p[\"b\"] != 0:\n tmp += \"\\t+ %.2fx^2\" % p[\"b\"]\n else:\n tmp += \"\\t\\t\"\n if p[\"c\"] == 1:\n tmp += \"\\t+ x\"\n elif p[\"c\"] != 0:\n tmp += \"\\t+ %.2fx\" % p[\"c\"]\n else:\n tmp += \"\\t\\t\"\n if p[\"d\"] != 0:\n tmp += \"\\t+ %.2f\" % p[\"d\"]\n return tmp", "def list_to_str(\n l: list,\n c: str,\n ) -> str:\n\n s = c.join(map(str, l))\n\n return s", "def getComplex(self, base, aspirated=False):\n res = ''\n if base == 'c':\n res = self.useRetroflex and 'ʈ͡ʂ' or 't͡ɕ'\n elif base == 'j':\n res = self.useRetroflex and 'ɖ͡ʐ' or 'd͡ʑ'\n elif base == 'ts':\n res = 't͡s'\n else:\n res = 'd͡z'\n if aspirated:\n res += 'ʰ'\n return res", "def _repr_(self):\n return \"Complex Field with %s bits of precision\"%self._prec", "def build_ascii_fmtstr(pc_):\n fmtstr = []\n for t, cnt in zip(pc_.type, pc_.count):\n if t == 'F':\n fmtstr.extend(['%.10f'] * cnt)\n elif t == 'I':\n fmtstr.extend(['%d'] * cnt)\n elif t == 'U':\n fmtstr.extend(['%u'] * cnt)\n else:\n raise ValueError(\"don't know about type %s\" % t)\n return fmtstr", "def __str__(self):\n string = ''\n for degree, coef in enumerate(self.coefs, 1):\n degree = degree - 1\n string += str(coef)+'x^' + str(degree) + ' + '\n string = string[0:-3] # remove the last ' + '\n return string", "def order2string(order):\n nparray = np.array(order)\n num_x = np.sum(nparray==0)\n num_y = np.sum(nparray==1)\n num_z = np.sum(nparray==2)\n string_repr = \"$\"\n if num_x == 0 and num_y == 0 and num_z == 0:\n return \"constant\"\n if num_x > 0:\n string_repr += \"x^{{{}}}\".format(num_x)\n if num_y > 0 :\n string_repr += \"y^{{{}}}\".format(num_y)\n if num_z > 0:\n string_repr += \"z^{{{}}}\".format(num_z)\n string_repr += \"$\"\n return string_repr", "def __str__(self):\n #Get an ordered list of the elements strings so it outputs always the same\n #string given a mass function.\n elements = []\n for element in self.focals:\n elements.append((element, str(element)))\n sortedList = sorted(elements, key=lambda x:x[1])\n \n result = \"\"\n first = True\n for t in sortedList:\n if first:\n result += t[1] + \":\" + \"{:.4f}\".format(self.focals[t[0]])\n first = False\n else:\n result += \", \" + t[1] + \":\" + \"{:.4f}\".format(self.focals[t[0]])\n return \"{\" + result + \"}\"", "def __str__(self):\n my_str=\"[\"\n for elem in range(self.size):\n x=cArray.cModule.get_element(self.arrayRef,ctypes.c_int(elem))\n my_str+=str(x)+\" \"\n my_str+=\"]\"\n return my_str", "def __repr__(self) -> str:\n\t\treturn \",\".join(\"\".join(str(n) for n in m) for m in self.matrix)", "def float_vct_str ( vct , format = '%.5g' ) :\n try :\n return '[ ' + ', '.join ( [ format % v for v in vct ] ) + ' ]' \n except TypeError :\n pass\n return float_vct_str ( vct , format = '%.5g' )", "def to_cmd(c: Coordinate, pose_flag: Optional[int] = 7):\n txt = (\"{:.{d}f}\".format(i, d=c.digits) if i is not None else \"\" for i in c.values)\n return f'({\",\".join(txt)}) ({pose_flag},0)'", "def __str__(self):\n [r,c],f = self.D, self.F\n lmax = len(str(max(iter(self)))) + 1\n s = '\\n'.join( (' '.join('{0:{l}G}'.format(f(i,j),l=lmax) if isinstance(f(i,j), int) or isinstance(f(i,j), float) else str(f(i,j)) for j in range(c))) for i in range(r))\n return s", "def __str__(self):\n\t\treturn 'f(z) = ' + self.p.coeffString() + ' / ' + self.q.coeffString()", "def complexCompose(self,coefficients,t=1):\n c=coefficients\n N=len(c)//2\n s=lambda t,n:c[n+N]*cmath.exp(1j*n*t)\n a=0\n g=[]\n z=[]\n\n #for i in range(len(c)):\n # if i==0: n=0\n # elif i%2==1: n=(i+1)//2\n # elif i%2==0: n=-i//2\n # pass\n\n #print([a[1] for a in z])\n #z=sorted(z,key=lambda x:1,reverse=True)\n #print([a[1] for a in z])\n #z=[a[0] for a in z]\n\n for n in range(-N,N+1):\n a+=s(t,n)\n g.append((a.real,a.imag))\n\n return g", "def format_sampler(self, val):\n if isinstance(val, numbers.Number):\n val = complex(val)\n return \"%s,%s\" % (val.real, val.imag)\n return val", "def matrix2str(A):\n s = \"\"\n for x in numpy.nditer(A, order='F'):\n s = s + str(x) + \",\"\n\n return s", "def discreteComplexCompose(self,c,n):\n z=self.discreteComplexInverseTransform(c,n)\n return (z.real,z.imag)", "def matrixToString(matrix):\n nRows = len(matrix)\n if nRows == 0:\n return '[0,0](())'\n nCols = len(matrix[0])\n string = '[%d,%d](' % (nRows, nCols)\n for r in range(nRows):\n string += '('\n for c in range(nCols):\n string += str(float(matrix[r][c]))\n if c != nCols - 1:\n string += ','\n string += ')'\n if r != nRows - 1:\n string += ','\n string += ')'\n return string", "def name_circulant(num_vertices, j_value_set):\n\n return f\"Cir [{num_vertices}] [{j_value_set}]\"", "def floatArrayToString(fvalues, prec=3, delem=\",\"):\n\tsvalues = list(map(lambda v : formatFloat(prec, v), fvalues))\n\tdelem = \" \" if delem is None else delem\n\treturn delem.join(svalues)", "def create_string(iteration, dic):\n return str(iteration) + '|' + dic['Year'] + '/' + \\\n get_month_number(dic['Month']) + '/' + \\\n dic['Day'] + '|' + dic['Hour'] + ':' + \\\n dic['Min'] + ':' + dic['Seg'] + '|' + \\\n dic['Energy']", "def __str__(self):\n output = \"\"\n for i in self.values:\n st = []\n output += \"[\"\n for j in i:\n st.append(str(j))\n output += \",\".join(st)+\"]\"\n return str(self.m)+\"x\"+str(self.n)+\" [\" + output + \"]\"", "def str2(self):\n signs = [ ('+' if f >= 0 else '-') for f in self.mVector ]\n vals = [ abs(f) for f in self.mVector ]\n\n return '%s %s %si %s %sj %s %sk' % (self.mScalar, \n signs[0],\n vals[0],\n signs[1],\n vals[1],\n signs[2],\n vals[2])" ]
[ "0.7047887", "0.58852553", "0.5754803", "0.5726591", "0.57228047", "0.56523186", "0.56442934", "0.56111693", "0.5576192", "0.55657303", "0.5538955", "0.55268145", "0.5501663", "0.54070234", "0.5394753", "0.53852344", "0.536029", "0.535686", "0.53531355", "0.53468204", "0.53373665", "0.53193367", "0.531241", "0.52965844", "0.52930677", "0.52841514", "0.5272729", "0.52694833", "0.52590585", "0.5258443" ]
0.7722821
0
Select PORT update events, notify the observers upon a port update in APPL_DB/CONFIG_DB or a XCVR insertion/removal in STATE_DB
def handle_port_update_event(sel, asic_context, stop_event, logger, port_change_event_handler): if not stop_event.is_set(): (state, _) = sel.select(SELECT_TIMEOUT_MSECS) if state == swsscommon.Select.TIMEOUT: return if state != swsscommon.Select.OBJECT: logger.log_warning('sel.select() did not return swsscommon.Select.OBJECT') return for port_tbl in asic_context.keys(): while True: (key, op, fvp) = port_tbl.pop() if not key: break if not validate_port(key): continue fvp = dict(fvp) if fvp is not None else {} if 'index' not in fvp: fvp['index'] = '-1' port_index = int(fvp['index']) port_change_event = None if op == swsscommon.SET_COMMAND: port_change_event = PortChangeEvent(key, port_index, asic_context[port_tbl], PortChangeEvent.PORT_SET, fvp) elif op == swsscommon.DEL_COMMAND: port_change_event = PortChangeEvent(key, port_index, asic_context[port_tbl], PortChangeEvent.PORT_DEL, fvp) if port_change_event is not None: port_change_event_handler(port_change_event)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _port_status_handler(self, ev):\n msg = ev.msg\n reason = msg.reason\n port_no = msg.desc.port_no\n dpid = msg.datapath.id\n ofproto = msg.datapath.ofproto\n\n reason_dict = {ofproto.OFPPR_ADD: \"added\",\n ofproto.OFPPR_DELETE: \"deleted\",\n ofproto.OFPPR_MODIFY: \"modified\", }\n\n if reason in reason_dict:\n\n print \"switch%d: port %s %s\" % (dpid, reason_dict[reason], port_no)\n else:\n print \"switch%d: Illeagal port state %s %s\" % (port_no, reason)", "def _update_port_handler(self, *args, **kwargs):\n port = kwargs['port']\n orig_port = kwargs['original_port']\n if port['status'] == orig_port['status']:\n return # Change not relevant\n new_status = n_constants.PORT_STATUS_ACTIVE\n if port['status'] != n_constants.PORT_STATUS_ACTIVE:\n new_status = n_constants.PORT_STATUS_DOWN\n core_plugin = directory.get_plugin()\n for subport_id in self._get_subports_ids(port['id']):\n core_plugin.update_port_status(context.get_admin_context(),\n subport_id, new_status)", "def update_ports( self ):\n self.ports = self.getComPorts()\n self.updatePortsUI()", "def update_port_postcommit(self, context):\n if self.rpc_handler is None:\n return\n port = self._get_port_info(context)\n if port is not None:\n try:\n self.rpc_handler.update_port(port)\n except:\n pass", "def process_update_port(self, context, data, result):\n\n orginal_exten = copy.deepcopy(result)\n # Process extension data\n self._find_port_dict_extensions(\n result, None, session=context.session)\n\n port_ext = self._update_port_ext(\n result, data, session=context.session)\n switchports = self._update_switchports(\n result, data, session=context.session)\n self._find_port_dict_extensions(\n result, None, port_ext=port_ext,\n switchports=switchports, session=context.session)\n\n # We only want to commit on a state change\n if orginal_exten.get(\"commit\") != result[\"commit\"]:\n # If we are transitioning to active, validate\n if not orginal_exten.get(\"commit\") and result[\"commit\"]:\n self._validate_port_can_commit(\n result, None, session=context.session)", "def _port_status_handler(self, ev):\n msg = ev.msg\n reason = msg.reason\n port = msg.desc.port_no\n\n ofproto = msg.datapath.ofproto\n if reason == ofproto.OFPPR_ADD:\n self.logger.info(\"port added port=%s\", port)\n elif reason == ofproto.OFPPR_DELETE:\n self.logger.info(\"port deleted port=%s\", port)\n elif reason == ofproto.OFPPR_MODIFY:\n self.logger.info(\"port modified port=%s\", port)\n else:\n self.logger.info(\"Illegal port state port=%s %s\", port, reason)", "def check_device_changes(self):\n\n #---------------------------------------------------------------------------\n # USB ports\n current_serial_devices = self.enumerate_serial_devices()\n\n for device in self.old_serial_devices:\n if device not in current_serial_devices:\n print(\"Removed USB port: \", device)\n self.removed_serial_devices.append(device)\n\n self.arduino_change_signal.emit('OFF')\n\n for device in current_serial_devices:\n if device not in self.old_serial_devices:\n print(\"Added USB port: \", device)\n self.added_serial_devices.append(device)\n\n self.arduino_change_signal.emit('ON')\n\n self.old_serial_devices = current_serial_devices\n\n #---------------------------------------------------------------------------\n # MIDI port detection\n current_midi_devices = self.enumerate_midi_devices()\n\n for device in self.old_midi_devices:\n if device not in current_midi_devices:\n print(\"Removed MIDI port: \", device)\n self.removed_midi_devices.append(device)\n\n self.piano_change_signal.emit('OFF')\n\n for device in current_midi_devices:\n if device not in self.old_midi_devices:\n print(\"Added MIDI port: \", device)\n self.added_midi_devices.append(device)\n\n self.piano_change_signal.emit('ON')\n\n self.old_midi_devices = current_midi_devices", "def refreshPorts(self, event):\n logging.debug(\"Refreshing ports.\")\n self.availablePorts = self.controller.getAvailablePorts()\n\n # Delete old dropdown options\n self.portSelector[\"menu\"].delete(0, \"end\")\n for value in self.availablePorts:\n\n def _callback(value=value):\n self.controller.updatePort(value)\n self.serialPortVar.set(value)\n\n self.portSelector[\"menu\"] \\\n .add_command(label=value,\n command=_callback)\n return", "def handle_port_config_change(sel, asic_context, stop_event, port_mapping, logger, port_change_event_handler):\n if not stop_event.is_set():\n (state, _) = sel.select(SELECT_TIMEOUT_MSECS)\n if state == swsscommon.Select.TIMEOUT:\n return\n if state != swsscommon.Select.OBJECT:\n logger.log_warning('sel.select() did not return swsscommon.Select.OBJECT')\n return\n\n read_port_config_change(asic_context, port_mapping, logger, port_change_event_handler)", "def port_desc_stats_reply_handler(self, ev):\n msg = ev.msg\n dpid = msg.datapath.id\n ofproto = msg.datapath.ofproto\n\n config_dict = {ofproto.OFPPC_PORT_DOWN: \"Down\",\n ofproto.OFPPC_NO_RECV: \"No Recv\",\n ofproto.OFPPC_NO_FWD: \"No Farward\",\n ofproto.OFPPC_NO_PACKET_IN: \"No Packet-in\"}\n\n state_dict = {ofproto.OFPPS_LINK_DOWN: \"Down\",\n ofproto.OFPPS_BLOCKED: \"Blocked\",\n ofproto.OFPPS_LIVE: \"Live\"}\n\n ports = []\n for p in ev.msg.body:\n ports.append('port_no=%d hw_addr=%s name=%s config=0x%08x '\n 'state=0x%08x curr=0x%08x advertised=0x%08x '\n 'supported=0x%08x peer=0x%08x curr_speed=%d '\n 'max_speed=%d' %\n (p.port_no, p.hw_addr,\n p.name, p.config,\n p.state, p.curr, p.advertised,\n p.supported, p.peer, p.curr_speed,\n p.max_speed))\n\n if p.config in config_dict:\n config = config_dict[p.config]\n else:\n config = \"up\"\n\n if p.state in state_dict:\n state = state_dict[p.state]\n else:\n state = \"up\"\n port_feature = (config, state, p.curr_speed)\n self.port_features[dpid][p.port_no] = port_feature", "def treat_devices_added_or_updated(self, details):\n device = details['device']\n LOG.debug(\"Processing port: %s\", device)\n # REVISIT(ivar): this is not a public facing API, we will move to\n # the right method once the redesign is complete.\n port = self.bridge_manager.get_vif_port_by_id(device)\n if port:\n gbp_details = details.get('gbp_details')\n trunk_details = details.get('trunk_details')\n neutron_details = details.get('neutron_details')\n if gbp_details and 'port_id' not in gbp_details:\n # The port is dead\n details.pop('port_id', None)\n if (gbp_details and gbp_details.get('host') and\n gbp_details['host'] != self.host):\n self.port_unbound(device)\n return False\n elif neutron_details and 'port_id' in neutron_details:\n LOG.info(\"Port %(device)s updated. Details: %(details)s\",\n {'device': device, 'details': details})\n # Inject GBP/Trunk details\n port.gbp_details = gbp_details\n port.trunk_details = trunk_details\n self.treat_vif_port(port, neutron_details['port_id'],\n neutron_details['network_id'],\n neutron_details['network_type'],\n neutron_details['physical_network'],\n neutron_details['admin_state_up'],\n neutron_details['fixed_ips'],\n neutron_details['device_owner'],\n neutron_details['segmentation_id'])\n # update plugin about port status\n if neutron_details.get('admin_state_up'):\n LOG.debug(\"Setting status for %s to UP\", device)\n self.plugin_rpc.update_device_up(\n self.context, device, self.agent_id, self.host)\n else:\n LOG.debug(\"Setting status for %s to DOWN\", device)\n self.plugin_rpc.update_device_down(\n self.context, device, self.agent_id, self.host)\n LOG.info(\"Configuration for device %s completed.\",\n device)\n else:\n LOG.warn(\"Device %s not defined on plugin\", device)\n if port and port.ofport != -1:\n self.port_unbound(port.vif_id)\n return False\n else:\n # The port disappeared and cannot be processed\n LOG.info(\"Port %s was not found on the integration bridge \"\n \"and will therefore not be processed\", device)\n self.port_unbound(device)\n return False\n return True", "def gpio_edge_listener(port):\n self.schedule_update_ha_state(True)", "def datachange_notification(self, node, val, data):\n \n logger.debug(\"New data change event. node:{}, value:{}\".format(node, val))\n \n # Sorry about these lines of code, but I don't see any nicer way of determining the port number than from \n # the identifier string. Then splitting it up to isolate the port number.\n # Example \"Status.Port_2.Selected\" is split into ['Status', 'Port_2', 'Selected'] then 'Port_2' is split into \n # ['Port', '2'] and then the '2' is turned into an intiger.\n path_list = str(node.nodeid.Identifier).split(\".\")\n\n # We can safely assume that the last term is the tag that updated.\n tag = path_list[-1] \n \n # Figure out the port number\n port_number = None\n if 'Port' in path_list[1]:\n port_number = int(path_list[1].split(\"_\")[-1]) \n \n \"\"\" Switch for each possible tag\"\"\"\n # If the command tag \"Select\" changes go select that port with the instructions saved in the command tag. \n if tag == 'Select' and port_number:\n if val == True:\n node = self.ua_server.get_node(\"ns=2;s=Command.Port_{}.Instructions\".format(port_number))\n instructions = node.get_value()\n self._pbl.select_port(port_number, instructions=instructions)\n # Reset the select flag\n node = self.ua_server.get_node(\"ns=2;s=Command.Port_{}.Select\".format(port_number))\n node.set_value(False)\n \n elif tag == 'Deselect' and port_number:\n if val == True:\n self._pbl.deselect_port(port_number, work_finished=True)\n # Reset the select flag\n node = self.ua_server.get_node(\"ns=2;s=Command.Port_{}.Deselect\".format(port_number))\n node.set_value(False)\n\n elif tag == 'ContentDisplayName' and port_number:\n self._pbl.set_content_key(port_number,'display_name', str(val))\n elif tag == 'ContentName' and port_number:\n self._pbl.set_content_key(port_number,'name', str(val))\n elif tag == 'ContentDescription' and port_number:\n self._pbl.set_content_key(port_number,'description', str(val))\n elif tag == 'ContentImagePath' and port_number:\n self._pbl.set_content_key(port_number,'image_path', str(val))\n \n elif tag == 'Select' and 'ByContent' in path_list[1]:\n if val == True:\n instructions = self.ua_server.get_node(\"ns=2;s=Command.ByContent.Instructions\").get_value()\n name = self.ua_server.get_node(\"ns=2;s=Command.ByContent.Name\").get_value()\n _, selected_port = self._pbl.select_content(name = name, instructions=instructions)\n # Reset the select flag\n node = self.ua_server.get_node(\"ns=2;s=Command.ByContent.Select\")\n node.set_value(False)\n node = self.ua_server.get_node(\"ns=2;s=Command.ByContent.Result\")\n node.set_value(selected_port)\n\n elif tag == 'Deselect' and 'ByContent' in path_list[1]:\n if val == True:\n name = self.ua_server.get_node(\"ns=2;s=Command.ByContent.Name\").get_value()\n self._pbl.deselect_content(name = name, work_finished=True)\n # Reset the select flag\n node = self.ua_server.get_node(\"ns=2;s=Command.ByContent.Deselect\")\n node.set_value(False)", "def db_change_callback(self, table, key, action, value, topic=None):\n if self.USE_CACHE:\n # Update cache\n if action == 'create' or action == 'set':\n if table == 'lport':\n self.cache_logical_port_by_port_id[key] = self.nb_api.get(l2.LogicalPort(id=key))\n if table == 'lrouter':\n self.cache_logical_router_by_dpid[key] = self.nb_api.get(l3.LogicalRouter(id=key))\n if action == 'del':\n if table == 'lport':\n # default if key does not exists is None\n self.cache_logical_port_by_port_id.pop(key, None)\n if table == 'lrouter':\n self.cache_logical_router_by_dpid.pop(key, None)\n\n print(\"L3 App: Received Update for table {} and key {} action {}\".format(table, key, action))\n if action == 'set':\n if table == 'lport':\n if self.USE_CACHE:\n updated_port = self.cache_logical_port_by_port_id[key]\n else:\n updated_port = self.nb_api.get(l2.LogicalPort(id=key))\n\n if len(updated_port.ips) is not 0:\n for ip in updated_port.ips:\n # new ip discovered\n # install route on every datapath\n # only update the other datapaths\n for dpid, datapath in self.cache_datapath_by_dpid.iteritems():\n out_port, new_src_mac, new_dst_mac = self.get_next_hop(dpid, ip)\n if out_port is None:\n continue\n out_port_id = \"{}:{}\".format(dpid, out_port)\n lout_port = self.nb_api.get(l2.LogicalPort(id=out_port_id))\n if ip in lout_port.ips:\n continue\n # else add new ip and install flow\n lout_port.ips.append(ip)\n self.nb_api.update(lout_port)\n # install flow\n print \"L3 IP via pubsub: installing flow on {}: out_port: {} src_mac:\" \\\n \" {} dst_mac: {}, ip: {}\".format(datapath.id, out_port, new_src_mac, new_dst_mac, ip)\n self.add_flow_gateway_for_ip(datapath, int(out_port), ip, new_src_mac, new_dst_mac)", "def updateAvailablePorts(self):\n # Build a port list\n device_list_all = comports()\n self.device_choices = list()\n for device in device_list_all:\n self.device_choices.append(device[0])\n\n if len(self.device_choices) < 1:\n tkinter.messagebox.showerror('No Available Serial Ports','No serial ports are available.')", "def process_port_state(self, dp_name, port, state):\n with self._lock:\n device = self._port_device_mapping.setdefault((dp_name, port), DeviceEntry())\n device.port_up = state\n if not state:\n device.assigned = None\n device.vlan = None\n self._send_device_port_event(device)", "def update_ports(self):\n \n # fetch only those ports having\n # VID:PID == a valid (VID, PID) pair in target_vid_pid\n ports = []\n\n for valid_pair in self.target_vid_pid:\n vid_pid = valid_pair[0] + ':' + valid_pair[1]\n ports = ports + [p for p in list_ports.grep(vid_pid)]\n #ports = list_ports.comports()\n \n # add new ports to connected_ports\n # and update new_ports\n new_ports = []\n for p in ports:\n if not p in self.connected_ports:\n self.connected_ports.append(p)\n new_ports.append(p)\n\n # remove missing ports from devices_found\n # and update removed_ports\n removed_ports = []\n for p in self.connected_ports:\n if not p in ports:\n self.connected_ports.remove(p)\n removed_ports.append(p)\n\n return new_ports, removed_ports", "def change_port( self ):\n # disconnect and delete controller\n self.delete_controller()\n \n # update port\n self.update_port()", "def port_update(self, context, **kwargs):\n self._refresh_bridge_mappings_to_neutron()", "def update_port_postcommit(self, mech_context):\n LOG.debug(\"update_port_postcommit: called\")", "def db_changed(self):\n self.dbstate.db.connect('person-add', self.update)\n self.dbstate.db.connect('person-delete', self.update)\n self.dbstate.db.connect('person-update', self.update)\n self.dbstate.db.connect('family-add', self.update)\n self.dbstate.db.connect('family-delete', self.update)\n self.dbstate.db.connect('family-update', self.update)", "def check_port_connections(self):\n all_ports = crest.get_all_ports(self.model)\n influences_to_target = {p: [] for p in all_ports}\n updates_to_target = {p: [] for p in all_ports}\n actions_to_target = {p: [] for p in all_ports}\n\n # fill data stores\n for inf in crest.get_all_influences(self.model):\n influences_to_target[inf.target].append(inf)\n\n for up in crest.get_all_updates(self.model):\n updates_to_target[up.target].append(up)\n\n for action in crest.get_all_actions(self.model):\n actions_to_target[action.target].append(action)\n\n for port in all_ports:\n assert not (len(influences_to_target[port]) > 0 and (\n len(updates_to_target[port]) > 0 or len(actions_to_target[port]) > 0)\n ), f\"There are [influences and (updates or actions)] writing to port {port._name} (entity: {port._parent._name})\"\n\n assert len(influences_to_target[port]) < 2, f\"There are two influences writing to {port._name}\"\n\n states = [update.state for update in updates_to_target[port]]\n assert len(states) == len(set(states)), f\"Port {port._name} (entity: {port._parent._name}) is written by multiple updates linked to the same state\"\n\n transitions = [action.transition for action in actions_to_target[port]]\n assert len(transitions) == len(set(transitions)), f\"Port {port._name} (entity: {port._parent._name}) is written by multiple actions linked to the same transition\"", "def servicesChanged(self) -> None:\n ...", "def notify(self, ports):\n if self._state == JobState.PENDING:\n self._process.notify(ports)", "def _port_stats_reply_handler(self, ev):\n body = ev.msg.body\n dpid = ev.msg.datapath.id\n self.stats['port'][dpid] = body\n self.free_bandwidth.setdefault(dpid, {})\n\n for stat in sorted(body, key=attrgetter('port_no')):\n # self.link_loss[dpid][stat.port_no] = [stat.rx_packets,stat.tx_packets]\n port_no = stat.port_no\n if port_no != ofproto_v1_3.OFPP_LOCAL:\n key = (dpid, port_no)\n value = (stat.tx_bytes, stat.rx_bytes, stat.rx_errors,\n stat.duration_sec, stat.duration_nsec)\n\n self._save_stats(self.port_stats, key, value, 5)\n\n # Get port speed.\n pre = 0\n period = setting.MONITOR_PERIOD\n tmp = self.port_stats[key]\n if len(tmp) > 1:\n pre = tmp[-2][0] + tmp[-2][1]\n period = self._get_period(tmp[-1][3], tmp[-1][4],\n tmp[-2][3], tmp[-2][4])\n\n speed = self._get_speed(\n self.port_stats[key][-1][0] + self.port_stats[key][-1][1],\n pre, period)\n\n self._save_stats(self.port_speed, key, speed, 5)\n self._save_freebandwidth(dpid, port_no, speed)", "def forwarder_state_changed(self, ev):\n\n\n dp = ev.dp\n ofp = dp.ofproto\n parser = dp.ofproto_parser\n\n\n if ev.enter is True:\n # in plain MAC setup, this should install only ICMP and ARP re-route rules, watchout for hardcoded DP id\n self.on_inner_dp_join(dp)\n\t ##For evry new forwarder we send out discovery ICMP packets out of every port except OFPP_CONTROLLER\n LOG.debug('TOPO MNGR: Forwarder: ' + str(dp.id) + ' saying hello to Unifycore Controller, Unifycore warmly welcomes you!')\n for port in dp.ports:\n if port != (ofp.OFPP_CONTROLLER):\n LOG.debug('TOPO MNGR: Controller is sending topology discovery ICMPs to forwarder: ' + str(dp.id) + ', port: ' + str(port))\n _icmp_send(dp,port,DISCOVERY_IP_SRC, DISCOVERY_IP_DST)\n\n ##For evry new forwarder we send out discovery ARP packets out of every port except OFPP_CONTROLLER to find APN\n for apn in APN_POOL:\n if apn.ip_addr != None:\n LOG.debug('TOPO MNGR: Forwarder: '+str(dp.id)+', port: '+ str(port) + ' is looking for APN: ' + str(apn.name) +' at IP: '+str(apn.ip_addr)+' with ARP search source IP: ' + str(apn.arp_origin_ip))\n _arp_send(dp=dp, port_out=port, arp_code=1, ip_target=apn.ip_addr, ip_sender=apn.arp_origin_ip)\n\n\n\n\n\n if ev.enter is False:\n\t ##TODO: We need to scan if any tunnels were affected, and if so, if any PDP COntexts were affected\n ##JUST REMOVING NODE FROM TOPOLOGY ISNT ENOUGH!\n LOG.debug('TOPO MNGR: Forwarder: ' + str(dp.id) + ' is leaving topology. It was a pleasure for us!')\n topo.del_forwarder(dp.id)", "def connect_callbacks(self):\n self.winch_MIDI_controller.connect_midi_processor(self.main.winch_midi_logic)\n\n self.DMX_controller.connect_callback(self.main.dmx_slider_change)\n self.dmxSelect.callback = self.main.dmx.set_and_open_port\n self.dmxSelect.set_items(self.main.dmx.available_ports())\n\n self.winchMidiInputCombo.callback = self.main.winch_midi_listener.open_MIDI_input\n self.winchMidiInputCombo.set_items(self.main.winch_midi_listener.get_midi_port_names())\n\n self.midiOutputCombo.callback = self.main.midi_sender.open_MIDI_output\n self.midiOutputCombo.set_items(self.main.midi_sender.get_midi_port_names())\n\n self.oscListenerConfig.callback = self.main.osc_listener.set_OSC_port\n self.oscSenderConfig.callback = self.main.osc_sender.set_OSC_port\n\n for winch, selector in zip(self.main.winches, self.winchSelects):\n selector.callback = winch.set_and_open_port\n selector.set_items(winch.available_ports())\n\n return", "def loop(self):\n _logger.info(\"Bus.loop listen imbus on db postgres\")\n # PATCH !!\n with odoo.sql_db.db_connect(_get_imbus_db()).cursor() as cr:\n conn = cr._cnx\n cr.execute(\"listen imbus\")\n cr.commit();\n while True:\n if select.select([conn], [], [], TIMEOUT) == ([], [], []):\n pass\n else:\n conn.poll()\n channels = []\n while conn.notifies:\n channels.extend(json.loads(conn.notifies.pop().payload))\n # dispatch to local threads/greenlets\n events = set()\n for channel in channels:\n events.update(self.channels.pop(hashable(channel), []))\n for event in events:\n event.set()", "def _onConnectionEvent(args):\n ctx = current_context()\n pvname = name(args.chid)\n global _cache\n\n if ctx is None and len(_cache.keys()) > 0:\n ctx = list(_cache.keys())[0]\n if ctx not in _cache:\n _cache[ctx] = {}\n\n # search for PV in any context...\n pv_found = False\n for context in _cache:\n if pvname in _cache[context]:\n pv_found = True\n break\n\n if not pv_found:\n _cache[ctx][pvname] = {'conn':False, 'chid': args.chid,\n 'ts':0, 'failures':0, 'value': None,\n 'callbacks': []}\n\n # set connection time, run connection callbacks\n # in all contexts\n for context, cvals in _cache.items():\n if pvname in cvals:\n entry = cvals[pvname]\n ichid = entry['chid']\n if isinstance(entry['chid'], dbr.chid_t):\n ichid = entry['chid'].value\n\n if int(ichid) == int(args.chid):\n conn = (args.op == dbr.OP_CONN_UP)\n chid = args.chid\n entry.update({'chid': chid, 'conn': conn,\n 'ts': time.time(), 'failures': 0})\n for callback in entry.get('callbacks', []):\n poll()\n if hasattr(callback, '__call__'):\n callback(pvname=pvname, chid=chid, conn=conn)\n return", "def notify_observers(self, new_gamestate) -> None:" ]
[ "0.62168676", "0.6123209", "0.60724276", "0.60613334", "0.60579395", "0.6057897", "0.60528636", "0.59052724", "0.58132577", "0.5790811", "0.5715928", "0.5668344", "0.56622344", "0.5567299", "0.55424833", "0.54548216", "0.5438808", "0.5418422", "0.5405286", "0.5364919", "0.5348141", "0.5343965", "0.530156", "0.53005517", "0.5288755", "0.5283231", "0.5280578", "0.52697295", "0.52376074", "0.5187858" ]
0.6788266
0
Select CONFIG_DB PORT table changes, once there is a port configuration add/remove, notify observers
def handle_port_config_change(sel, asic_context, stop_event, port_mapping, logger, port_change_event_handler): if not stop_event.is_set(): (state, _) = sel.select(SELECT_TIMEOUT_MSECS) if state == swsscommon.Select.TIMEOUT: return if state != swsscommon.Select.OBJECT: logger.log_warning('sel.select() did not return swsscommon.Select.OBJECT') return read_port_config_change(asic_context, port_mapping, logger, port_change_event_handler)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def change_port( self ):\n # disconnect and delete controller\n self.delete_controller()\n \n # update port\n self.update_port()", "def check_device_changes(self):\n\n #---------------------------------------------------------------------------\n # USB ports\n current_serial_devices = self.enumerate_serial_devices()\n\n for device in self.old_serial_devices:\n if device not in current_serial_devices:\n print(\"Removed USB port: \", device)\n self.removed_serial_devices.append(device)\n\n self.arduino_change_signal.emit('OFF')\n\n for device in current_serial_devices:\n if device not in self.old_serial_devices:\n print(\"Added USB port: \", device)\n self.added_serial_devices.append(device)\n\n self.arduino_change_signal.emit('ON')\n\n self.old_serial_devices = current_serial_devices\n\n #---------------------------------------------------------------------------\n # MIDI port detection\n current_midi_devices = self.enumerate_midi_devices()\n\n for device in self.old_midi_devices:\n if device not in current_midi_devices:\n print(\"Removed MIDI port: \", device)\n self.removed_midi_devices.append(device)\n\n self.piano_change_signal.emit('OFF')\n\n for device in current_midi_devices:\n if device not in self.old_midi_devices:\n print(\"Added MIDI port: \", device)\n self.added_midi_devices.append(device)\n\n self.piano_change_signal.emit('ON')\n\n self.old_midi_devices = current_midi_devices", "def _port_status_handler(self, ev):\n msg = ev.msg\n reason = msg.reason\n port_no = msg.desc.port_no\n dpid = msg.datapath.id\n ofproto = msg.datapath.ofproto\n\n reason_dict = {ofproto.OFPPR_ADD: \"added\",\n ofproto.OFPPR_DELETE: \"deleted\",\n ofproto.OFPPR_MODIFY: \"modified\", }\n\n if reason in reason_dict:\n\n print \"switch%d: port %s %s\" % (dpid, reason_dict[reason], port_no)\n else:\n print \"switch%d: Illeagal port state %s %s\" % (port_no, reason)", "def db_change_callback(self, table, key, action, value, topic=None):\n if self.USE_CACHE:\n # Update cache\n if action == 'create' or action == 'set':\n if table == 'lport':\n self.cache_logical_port_by_port_id[key] = self.nb_api.get(l2.LogicalPort(id=key))\n if table == 'lrouter':\n self.cache_logical_router_by_dpid[key] = self.nb_api.get(l3.LogicalRouter(id=key))\n if action == 'del':\n if table == 'lport':\n # default if key does not exists is None\n self.cache_logical_port_by_port_id.pop(key, None)\n if table == 'lrouter':\n self.cache_logical_router_by_dpid.pop(key, None)\n\n print(\"L3 App: Received Update for table {} and key {} action {}\".format(table, key, action))\n if action == 'set':\n if table == 'lport':\n if self.USE_CACHE:\n updated_port = self.cache_logical_port_by_port_id[key]\n else:\n updated_port = self.nb_api.get(l2.LogicalPort(id=key))\n\n if len(updated_port.ips) is not 0:\n for ip in updated_port.ips:\n # new ip discovered\n # install route on every datapath\n # only update the other datapaths\n for dpid, datapath in self.cache_datapath_by_dpid.iteritems():\n out_port, new_src_mac, new_dst_mac = self.get_next_hop(dpid, ip)\n if out_port is None:\n continue\n out_port_id = \"{}:{}\".format(dpid, out_port)\n lout_port = self.nb_api.get(l2.LogicalPort(id=out_port_id))\n if ip in lout_port.ips:\n continue\n # else add new ip and install flow\n lout_port.ips.append(ip)\n self.nb_api.update(lout_port)\n # install flow\n print \"L3 IP via pubsub: installing flow on {}: out_port: {} src_mac:\" \\\n \" {} dst_mac: {}, ip: {}\".format(datapath.id, out_port, new_src_mac, new_dst_mac, ip)\n self.add_flow_gateway_for_ip(datapath, int(out_port), ip, new_src_mac, new_dst_mac)", "def update_port_postcommit(self, context):\n if self.rpc_handler is None:\n return\n port = self._get_port_info(context)\n if port is not None:\n try:\n self.rpc_handler.update_port(port)\n except:\n pass", "def db_changed(self):\n self.dbstate.db.connect('person-add', self.update)\n self.dbstate.db.connect('person-delete', self.update)\n self.dbstate.db.connect('person-update', self.update)\n self.dbstate.db.connect('family-add', self.update)\n self.dbstate.db.connect('family-delete', self.update)\n self.dbstate.db.connect('family-update', self.update)", "def datachange_notification(self, node, val, data):\n \n logger.debug(\"New data change event. node:{}, value:{}\".format(node, val))\n \n # Sorry about these lines of code, but I don't see any nicer way of determining the port number than from \n # the identifier string. Then splitting it up to isolate the port number.\n # Example \"Status.Port_2.Selected\" is split into ['Status', 'Port_2', 'Selected'] then 'Port_2' is split into \n # ['Port', '2'] and then the '2' is turned into an intiger.\n path_list = str(node.nodeid.Identifier).split(\".\")\n\n # We can safely assume that the last term is the tag that updated.\n tag = path_list[-1] \n \n # Figure out the port number\n port_number = None\n if 'Port' in path_list[1]:\n port_number = int(path_list[1].split(\"_\")[-1]) \n \n \"\"\" Switch for each possible tag\"\"\"\n # If the command tag \"Select\" changes go select that port with the instructions saved in the command tag. \n if tag == 'Select' and port_number:\n if val == True:\n node = self.ua_server.get_node(\"ns=2;s=Command.Port_{}.Instructions\".format(port_number))\n instructions = node.get_value()\n self._pbl.select_port(port_number, instructions=instructions)\n # Reset the select flag\n node = self.ua_server.get_node(\"ns=2;s=Command.Port_{}.Select\".format(port_number))\n node.set_value(False)\n \n elif tag == 'Deselect' and port_number:\n if val == True:\n self._pbl.deselect_port(port_number, work_finished=True)\n # Reset the select flag\n node = self.ua_server.get_node(\"ns=2;s=Command.Port_{}.Deselect\".format(port_number))\n node.set_value(False)\n\n elif tag == 'ContentDisplayName' and port_number:\n self._pbl.set_content_key(port_number,'display_name', str(val))\n elif tag == 'ContentName' and port_number:\n self._pbl.set_content_key(port_number,'name', str(val))\n elif tag == 'ContentDescription' and port_number:\n self._pbl.set_content_key(port_number,'description', str(val))\n elif tag == 'ContentImagePath' and port_number:\n self._pbl.set_content_key(port_number,'image_path', str(val))\n \n elif tag == 'Select' and 'ByContent' in path_list[1]:\n if val == True:\n instructions = self.ua_server.get_node(\"ns=2;s=Command.ByContent.Instructions\").get_value()\n name = self.ua_server.get_node(\"ns=2;s=Command.ByContent.Name\").get_value()\n _, selected_port = self._pbl.select_content(name = name, instructions=instructions)\n # Reset the select flag\n node = self.ua_server.get_node(\"ns=2;s=Command.ByContent.Select\")\n node.set_value(False)\n node = self.ua_server.get_node(\"ns=2;s=Command.ByContent.Result\")\n node.set_value(selected_port)\n\n elif tag == 'Deselect' and 'ByContent' in path_list[1]:\n if val == True:\n name = self.ua_server.get_node(\"ns=2;s=Command.ByContent.Name\").get_value()\n self._pbl.deselect_content(name = name, work_finished=True)\n # Reset the select flag\n node = self.ua_server.get_node(\"ns=2;s=Command.ByContent.Deselect\")\n node.set_value(False)", "def handle_port_update_event(sel, asic_context, stop_event, logger, port_change_event_handler):\n if not stop_event.is_set():\n (state, _) = sel.select(SELECT_TIMEOUT_MSECS)\n if state == swsscommon.Select.TIMEOUT:\n return\n if state != swsscommon.Select.OBJECT:\n logger.log_warning('sel.select() did not return swsscommon.Select.OBJECT')\n return\n for port_tbl in asic_context.keys():\n while True:\n (key, op, fvp) = port_tbl.pop()\n if not key:\n break\n if not validate_port(key):\n continue\n fvp = dict(fvp) if fvp is not None else {}\n if 'index' not in fvp:\n fvp['index'] = '-1'\n port_index = int(fvp['index'])\n port_change_event = None\n if op == swsscommon.SET_COMMAND:\n port_change_event = PortChangeEvent(key,\n port_index,\n asic_context[port_tbl],\n PortChangeEvent.PORT_SET,\n fvp)\n elif op == swsscommon.DEL_COMMAND:\n port_change_event = PortChangeEvent(key,\n port_index,\n asic_context[port_tbl],\n PortChangeEvent.PORT_DEL,\n fvp)\n if port_change_event is not None:\n port_change_event_handler(port_change_event)", "def get_all_port(self, conf, dpid):\n\t\tpass", "def _port_status_handler(self, ev):\n msg = ev.msg\n reason = msg.reason\n port = msg.desc.port_no\n\n ofproto = msg.datapath.ofproto\n if reason == ofproto.OFPPR_ADD:\n self.logger.info(\"port added port=%s\", port)\n elif reason == ofproto.OFPPR_DELETE:\n self.logger.info(\"port deleted port=%s\", port)\n elif reason == ofproto.OFPPR_MODIFY:\n self.logger.info(\"port modified port=%s\", port)\n else:\n self.logger.info(\"Illegal port state port=%s %s\", port, reason)", "def update_ports( self ):\n self.ports = self.getComPorts()\n self.updatePortsUI()", "def _update_port_handler(self, *args, **kwargs):\n port = kwargs['port']\n orig_port = kwargs['original_port']\n if port['status'] == orig_port['status']:\n return # Change not relevant\n new_status = n_constants.PORT_STATUS_ACTIVE\n if port['status'] != n_constants.PORT_STATUS_ACTIVE:\n new_status = n_constants.PORT_STATUS_DOWN\n core_plugin = directory.get_plugin()\n for subport_id in self._get_subports_ids(port['id']):\n core_plugin.update_port_status(context.get_admin_context(),\n subport_id, new_status)", "def _on_config_changed(self, _):\n self._configure_pod()", "def setup_logical_port_connectivity(self, context, port_db):\n pass", "def portconfig():\r\n print('''\\n%s at %s acting as user %s\r\n\\nPort Configuration Menu''' % (PACKETMASTER.model, ADDRESS, USERNAME))\r\n choice = moves.input('''\r\n 1 - Get current port configuration\r\n 2 - Get current port status\r\n 3 - Get current port counters\r\n 4 - Get SFP status\r\n 5 - Change Port Configuration\r\n 6 - Shut Down or Activate Port\r\n 7 - Reset Port Counters\r\n 8 - Back\r\n 9 - Quit \\n\r\n Enter selection number: ''')\r\n try:\r\n choice = int(choice)\r\n except ValueError as reason:\r\n print(\"That is not a valid selection.\", reason)\r\n portconfig()\r\n execute = {1: PACKETMASTER.port_config,\r\n 2: PACKETMASTER.port_info,\r\n 3: PACKETMASTER.port_statistics,\r\n 4: PACKETMASTER.sfp_info,\r\n 5: PACKETMASTER.set_port_config_guided,\r\n 6: PACKETMASTER.port_on_off_guided,\r\n 7: PACKETMASTER.reset_port_counters,\r\n 8: hardwareconfig,\r\n 9: exit}\r\n if choice in execute:\r\n try:\r\n select = execute[choice]\r\n run = select()\r\n print(run)\r\n portconfig()\r\n except KeyError as reason:\r\n print(reason)\r\n else:\r\n print(\"That is not a valid selection.\")\r\n portconfig()", "def interval(poll_interval):\n configdb = ConfigDBConnector()\n configdb.connect()\n port_info = {}\n if poll_interval is not None:\n port_info['POLL_INTERVAL'] = poll_interval\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"PORT\", port_info)", "def config_db():", "def port_configure(self,port,**config):\n if not port in self.ports:\n self.ports[port] = {}\n\n for k,v in config.items():\n self.ports[port][k] = v", "def port_update_end(self, payload):\n port = DictModel(payload['port'])\n network = self.cache.get_network_by_id(port.network_id)\n if network:\n self.cache.put_port(port)\n self.call_driver('reload_allocations', network)", "def servicesChanged(self) -> None:\n ...", "def refreshPorts(self, event):\n logging.debug(\"Refreshing ports.\")\n self.availablePorts = self.controller.getAvailablePorts()\n\n # Delete old dropdown options\n self.portSelector[\"menu\"].delete(0, \"end\")\n for value in self.availablePorts:\n\n def _callback(value=value):\n self.controller.updatePort(value)\n self.serialPortVar.set(value)\n\n self.portSelector[\"menu\"] \\\n .add_command(label=value,\n command=_callback)\n return", "def treat_devices_added_or_updated(self, details):\n device = details['device']\n LOG.debug(\"Processing port: %s\", device)\n # REVISIT(ivar): this is not a public facing API, we will move to\n # the right method once the redesign is complete.\n port = self.bridge_manager.get_vif_port_by_id(device)\n if port:\n gbp_details = details.get('gbp_details')\n trunk_details = details.get('trunk_details')\n neutron_details = details.get('neutron_details')\n if gbp_details and 'port_id' not in gbp_details:\n # The port is dead\n details.pop('port_id', None)\n if (gbp_details and gbp_details.get('host') and\n gbp_details['host'] != self.host):\n self.port_unbound(device)\n return False\n elif neutron_details and 'port_id' in neutron_details:\n LOG.info(\"Port %(device)s updated. Details: %(details)s\",\n {'device': device, 'details': details})\n # Inject GBP/Trunk details\n port.gbp_details = gbp_details\n port.trunk_details = trunk_details\n self.treat_vif_port(port, neutron_details['port_id'],\n neutron_details['network_id'],\n neutron_details['network_type'],\n neutron_details['physical_network'],\n neutron_details['admin_state_up'],\n neutron_details['fixed_ips'],\n neutron_details['device_owner'],\n neutron_details['segmentation_id'])\n # update plugin about port status\n if neutron_details.get('admin_state_up'):\n LOG.debug(\"Setting status for %s to UP\", device)\n self.plugin_rpc.update_device_up(\n self.context, device, self.agent_id, self.host)\n else:\n LOG.debug(\"Setting status for %s to DOWN\", device)\n self.plugin_rpc.update_device_down(\n self.context, device, self.agent_id, self.host)\n LOG.info(\"Configuration for device %s completed.\",\n device)\n else:\n LOG.warn(\"Device %s not defined on plugin\", device)\n if port and port.ofport != -1:\n self.port_unbound(port.vif_id)\n return False\n else:\n # The port disappeared and cannot be processed\n LOG.info(\"Port %s was not found on the integration bridge \"\n \"and will therefore not be processed\", device)\n self.port_unbound(device)\n return False\n return True", "def check_port_connections(self):\n all_ports = crest.get_all_ports(self.model)\n influences_to_target = {p: [] for p in all_ports}\n updates_to_target = {p: [] for p in all_ports}\n actions_to_target = {p: [] for p in all_ports}\n\n # fill data stores\n for inf in crest.get_all_influences(self.model):\n influences_to_target[inf.target].append(inf)\n\n for up in crest.get_all_updates(self.model):\n updates_to_target[up.target].append(up)\n\n for action in crest.get_all_actions(self.model):\n actions_to_target[action.target].append(action)\n\n for port in all_ports:\n assert not (len(influences_to_target[port]) > 0 and (\n len(updates_to_target[port]) > 0 or len(actions_to_target[port]) > 0)\n ), f\"There are [influences and (updates or actions)] writing to port {port._name} (entity: {port._parent._name})\"\n\n assert len(influences_to_target[port]) < 2, f\"There are two influences writing to {port._name}\"\n\n states = [update.state for update in updates_to_target[port]]\n assert len(states) == len(set(states)), f\"Port {port._name} (entity: {port._parent._name}) is written by multiple updates linked to the same state\"\n\n transitions = [action.transition for action in actions_to_target[port]]\n assert len(transitions) == len(set(transitions)), f\"Port {port._name} (entity: {port._parent._name}) is written by multiple actions linked to the same transition\"", "def get_port(self, conf, dpid, port_id):\n\t\tpass", "def enable_cable_ports(cid):\n\n SQL.execute('''\n SELECT \n guid,\n port\n FROM \n cable_ports \n WHERE\n cid = ?\n ''',(\n cid,\n ))\n\n for row in SQL.fetchall(): \n if not DISABLE_PORT_STATE_CHANGE: \n ib_mgt.enable_port(int(row['guid']), int(row['port']))", "def update_ports(self):\n \n # fetch only those ports having\n # VID:PID == a valid (VID, PID) pair in target_vid_pid\n ports = []\n\n for valid_pair in self.target_vid_pid:\n vid_pid = valid_pair[0] + ':' + valid_pair[1]\n ports = ports + [p for p in list_ports.grep(vid_pid)]\n #ports = list_ports.comports()\n \n # add new ports to connected_ports\n # and update new_ports\n new_ports = []\n for p in ports:\n if not p in self.connected_ports:\n self.connected_ports.append(p)\n new_ports.append(p)\n\n # remove missing ports from devices_found\n # and update removed_ports\n removed_ports = []\n for p in self.connected_ports:\n if not p in ports:\n self.connected_ports.remove(p)\n removed_ports.append(p)\n\n return new_ports, removed_ports", "def _update_port_config(port_config):\n\n # Update network config for port node\n _update_network_config(port_config)\n\n # Update network fixed ips config\n _update_fixed_ips_config(port_config)\n\n # Update security groups config for port node\n _update_security_groups_config(port_config)", "def get_port_list(self):\r\n self.ports = Manager().dict()\r\n self.value = Manager().dict()\r\n self.sensors = dict()\r\n for p in self.device.ports['input']:\r\n if p.enabled:\r\n self.ports[p.number] = p\r\n self.value[p.number] = 'Connexion à la carte'\r\n self.sensors[p.number] = Sensor.get(p._type)", "def gpio_edge_listener(port):\n self.schedule_update_ha_state(True)", "def updateAvailablePorts(self):\n # Build a port list\n device_list_all = comports()\n self.device_choices = list()\n for device in device_list_all:\n self.device_choices.append(device[0])\n\n if len(self.device_choices) < 1:\n tkinter.messagebox.showerror('No Available Serial Ports','No serial ports are available.')" ]
[ "0.60486573", "0.601688", "0.6003362", "0.5990358", "0.5919747", "0.5897559", "0.5824499", "0.5805411", "0.57939553", "0.5755642", "0.5738589", "0.57356256", "0.56507355", "0.5590035", "0.55844575", "0.5562933", "0.5554786", "0.55488783", "0.5544349", "0.5528714", "0.55249166", "0.55130166", "0.5482734", "0.5482587", "0.5469336", "0.5420505", "0.5391734", "0.537468", "0.5369885", "0.53678507" ]
0.6089049
0
Get port mapping from CONFIG_DB
def get_port_mapping(namespaces): port_mapping = PortMapping() for namespace in namespaces: asic_id = multi_asic.get_asic_index_from_namespace(namespace) config_db = daemon_base.db_connect("CONFIG_DB", namespace=namespace) port_table = swsscommon.Table(config_db, swsscommon.CFG_PORT_TABLE_NAME) for key in port_table.getKeys(): if not validate_port(key): continue _, port_config = port_table.get(key) port_config_dict = dict(port_config) port_change_event = PortChangeEvent(key, port_config_dict['index'], asic_id, PortChangeEvent.PORT_ADD) port_mapping.handle_port_change_event(port_change_event) return port_mapping
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getDbPort():\n\n if \"DB_PORT\" in controller.CONF.keys():\n return controller.CONF[\"DB_PORT\"]\n\n return basedefs.DB_PORT", "def db_port(self) -> Optional[int]:\n return pulumi.get(self, \"db_port\")", "def get_all_port(self, conf, dpid):\n\t\tpass", "def get_port_binding():\n import docker\n client = docker.from_env()\n return [c.attrs['NetworkSettings']['Ports']['5555/tcp'][0]\n for c in client.containers.list(\n filters={'label': 'org.label-schema.name=profemag/femag'})]", "def get_port(self, conf, dpid, port_id):\n\t\tpass", "def port_extension_map(self):\n return usb_config.CAMBRIONIX_PORT_MAP[self.model]", "def determine_ports():\n ports = [config('admin-port'), config('service-port')]\n return list(set(ports))", "def port_number(self):\n return self._props[\"persistent_identifiers\"].get(self._primary_port_prop)", "def get_ports_mapping(status=psutil.CONN_LISTEN):\n ports = defaultdict(list)\n\n for process in get_processes():\n try:\n connections = process.connections()\n except psutil.Error:\n continue\n\n if connections:\n for conn in connections:\n if conn.status == status:\n ports[process].append(conn.laddr.port)\n\n return ports", "def connection_configuration_mapping(self, value):\n if value == \"Y\":\n return \"0\"\n elif value == \"D\":\n return \"2\"\n elif value == \"Z\":\n return \"5\"\n else:\n raise ValueError(\"Unknown configuration {}\".format(value))", "def get_ports(self, database_name):\n databases = self.list_databases()\n for d in databases:\n if d['name'] == database_name:\n database_id = d['id']\n break\n else:\n raise ClientError('Could not find database, does not exist.')\n end_point = '/'.join([self.host, 'api', 'databases', str(database_id), 'ports', ''])\n resp = requests.get(end_point, headers={'Authorization': 'Token {}'.format(self.token)})\n return resp.json()", "def port(self, rel_id=None):\n rel = self.framework.model.get_relation(self.relation_name, rel_id)\n\n return rel.data[rel.app].get(\"port\")", "def ports(self): # type: () -> t.Dict[str, t.List[t.Dict[str, str]]]\n return self.network_settings['Ports']", "def _get_port(self):\n return self.__port", "def portmap(ctx, verbose):\n table = \"No portmap rules exist\"\n with Spinner('Looking up port mapping rules'):\n data = ctx.obj.vlab_api.get('/api/1/ipam/portmap').json()['content']\n rules = data['ports']\n gateway_ip = data['gateway_ip']\n header = ['Name', 'Type', 'Port', 'Protocol']\n if verbose:\n header.append('Target IP')\n rows = []\n for conn_port, details in rules.items():\n name = details.get('name', 'Error')\n vm_type = details.get('component', 'Unknown')\n vm_port = details.get('target_port', 0)\n protocol = port_to_protocol(vm_type, vm_port)\n target_ip = details.get('target_addr', 'Unknown')\n if verbose:\n row = [name, vm_type, conn_port, protocol, target_ip]\n else:\n row = [name, vm_type, conn_port, protocol]\n rows.append(row)\n table = tabulate(rows, headers=header, tablefmt='presto', numalign=\"center\")\n click.echo('\\nGateway IP: {}'.format(gateway_ip))\n click.echo(table)", "def port():", "def get_ports(self) -> tuple:\n raise NotImplementedError", "def map_port_info(port, nmap_store):\n nmap_store[\"port_id\"] = port.get(\"portid\")\n nmap_store[\"port_protocol\"] = port.get(\"protocol\")\n map_state_info(port, nmap_store)\n map_service_info(port, nmap_store)\n return nmap_store", "def get_port():\n return int(os.getenv(\"PORT\", \"7840\"))", "def _translate_port(port):\n services = _get_services_mapping()\n if port in services and services[port][\"port\"]:\n return services[port][\"port\"][0]\n return port", "def get_port(self):\n return self.port", "def port_list(self):\n return self._port_list", "def port(self) -> int:", "def _get_exposed_ports(debug_port):\n if not debug_port:\n return None\n\n return {\n # container port : host port\n debug_port: debug_port\n }", "def secondary_port_number(self):\n return self._props[\"persistent_identifiers\"].get(self._secondary_port_prop)", "def get_serverport(cobj):\n pass", "def port(self) -> int:\n if hasattr(self, \"_port\"):\n return self._port\n _args: list[Arg] = []\n _ctx = self._select(\"port\", _args)\n return _ctx.execute_sync(int)", "def port(self):\n return self._host[CONF_PORT]", "def get_config_connection():\n\n connection = {'send_time': '5',\n 'address': 'localhost',\n 'port': '5672',\n 'flask_port': '500'}\n\n return connection", "def get_config_db():\n\n datab = {'db_name': 'database_name',\n 'db_url': 'database_url'}\n\n return datab" ]
[ "0.6964796", "0.6683577", "0.653418", "0.65336806", "0.6416702", "0.628645", "0.6283446", "0.6275742", "0.6270378", "0.6266905", "0.62222916", "0.6177558", "0.60762554", "0.60760283", "0.597032", "0.59239864", "0.5915565", "0.59000075", "0.58872294", "0.5862604", "0.5836161", "0.58315146", "0.58273274", "0.58240616", "0.5812932", "0.5805191", "0.58002853", "0.578504", "0.5781893", "0.5761348" ]
0.6833719
1
Add a class that doesn't descend from Pickleable to the pickle whitelist
def addClassToPickleWhitelist(cls): unpickleWhitelist_.add(cls)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_class(self, module, name):\n raise pickle.UnpicklingError(\"global '%s.%s' is forbidden\" %\n (module, name))", "def test__pickle_unpickle(self):\n pass", "def __reduce__(self) -> NoReturn:\n raise TypeError(\n \"can't pickle {} objects\".format(self.__class__.__name__)\n )", "def not_discoverable(_class):\n\n # Set the attribute to the class name, to prevent subclasses from also\n # being not discoverable.\n setattr(_class, _get_discoverable_attribute(_class), False)\n return _class", "def class_exts(cls):\n return set()", "def record_class_examined(self, cls):\n serialized = self.serialize_type(cls)\n if serialized is not None:\n self.classes_examined.add(serialized)", "def _should_reject_unexamined(self, base_cls):\n result = (\n self.serialize_type(base_cls) not in self.classes_examined\n and base_cls.__module__ not in self.modules_examined\n and not qcore.inspection.is_cython_class(base_cls)\n )\n if not result:\n self.unexamined_base_classes.add(base_cls)\n return result", "def __reduce_ex__(self, protocol):\n return (_safe_pickle_load, (self.__module__, self.__class__.__name__, self.name))", "def addBanClass(x:ResidueDict)->ResidueDict:\n banClass:str = run(matchStrandToClass(x.struct,x.strand_id))\n x.banClass = banClass\n return x", "def _record_unpatched_classes():\n # type: () -> Dict[str, type]\n installed_packages = _get_installed_modules()\n\n original_classes = {}\n\n for package, orig_path in CLASSES_TO_INSTRUMENT.items():\n if package in installed_packages:\n try:\n original_cls = _import_by_path(orig_path)\n except (AttributeError, ImportError):\n logger.debug(\"[OTel] Failed to import %s\", orig_path)\n continue\n\n original_classes[package] = original_cls\n\n return original_classes", "def try_pickle_dumps(obj):\n try:\n return cloudpickle.dumps(obj)\n except Exception:\n pass\n\n try:\n return pickle.dumps(obj)\n except Exception:\n raise", "def suppressWarningClass(clazz):\n _enabled.insert(0, (clazz, 0))", "def add_managee(self, **saveable_classes):\n check_compliance(saveable_classes)\n for name in saveable_classes:\n if name in self.__dict__:\n logging.warning(\"Attribute of SavableCollection {} already \"\n \"exists, will be replaced\".format(name))\n\n self.__dict__.update(saveable_classes)", "def register(self, klass):\n if klass not in self.extensions:\n self.extensions.append(klass)", "def opaque_class(self, classobj):\n self.restrict_class(classobj, None)", "def register_serializer(cls, *, serializer, deserializer):\n context = ray.worker.global_worker.get_serialization_context()\n context._register_cloudpickle_serializer(cls, serializer, deserializer)", "def add_base_classes(x, newclasses):\n bases = list(x.__class__.__bases__)\n if bases[0] is object:\n bases[0] = x.__class__\n if any(x in bases for x in newclasses):\n raise PermitError(\"Cannot insert duplicate classes.\")\n bases = bases + newclasses\n x.__class__ = type(x.__class__.__name__, tuple(bases), x.__dict__)\n return newclasses", "def load_objects(self):\n \n # Load classifier\n with open('../twitterClass/classifier/classifier.p','r') as f:\n self.classifier = cPickle.load(f)\n \n #Load blocked keywords\n regex_str2 = []\n with open('../twitterClass/twitterMiningClass/private/blocked_keywords.txt','r') as f:\n keywords = f.read().split('\\n')\n for key in keywords:\n key = key.split(',')\n #key[0] = keyword name , key[1] = pattern\n print key\n regex_str2.append(key[1])\n # create regex compiler for blocked keyword search\n regex_str2 = map(lambda x: x.replace(\"\\\\\\\\\",\"\\\\\"),regex_str2)\n self.blocked_keywords_re = re.compile(r'('+'|'.join(regex_str2)+')',re.IGNORECASE)\n \n # Load keywords\n with open('../twitterClass/twitterMiningClass/private/keywords.txt','r') as f:\n keywords = f.read().split('\\n')\n for key in keywords:\n key = key.split(',')\n #key[0] = keyword name , key[1] = pattern\n self.keywords[key[0]] = key[1]\n # create regex compiler for keyword search\n regex_str = []\n for keys,pattern in self.keywords.iteritems():\n regex_str.append(pattern)\n regex_str = map(lambda x: x.replace(\"\\\\\\\\\",\"\\\\\"),regex_str)\n self.keywords_re = re.compile(r'('+'|'.join(regex_str)+')', re.VERBOSE | re.IGNORECASE)", "def __init_subclass__(cls):\n available_storages.append({\n \"name\": cls.__name__,\n \"extensions\": cls.extensions,\n \"storage\": cls,\n })", "def restrict_class(self, classobj, vars=None):\n if vars == None: vars = []\n self.instance_vars[classobj] = vars", "def discoverable(_class):\n\n # Set the attribute to the class name, to prevent subclasses from also\n # being discoverable.\n setattr(_class, _get_discoverable_attribute(_class), True)\n return _class", "def __add__(self, other: '__class__') -> '__class__':", "def register_class(obj):\r\n try:\r\n KnownClass.objects.get(module_name=obj.__module__, class_name=obj.__class__.__name__)\r\n except DoesNotExist:\r\n # Create it\r\n KnownClass(module_name = obj.__module__, class_name = obj.__class__.__name__).save()", "def drop_class(self, cls, ignore_instances=False):\n if ignore_instances:\n self.client.command(\n 'DROP CLASS {} UNSAFE'.format(cls.registry_name))\n else:\n self.client.command(\n 'DROP CLASS {}'.format(cls.registry_name))", "def extension(klass):\n registry.register(klass)\n return klass", "def _register_subclasses(cls):\n cls._format_to_serializer = {}\n cls._extension_to_serializer = {}\n subclasses = collections.deque(cls.__subclasses__())\n while subclasses:\n subclass = subclasses.popleft()\n if subclass.format is not None:\n cls._format_to_serializer[subclass.format] = subclass\n if subclass.extension is not None:\n cls._extension_to_serializer[subclass.extension] = subclass\n subclasses.extend(subclass.__subclasses__())", "def register(cls):\n if not issubclass(cls, Fuzzer):\n raise TypeError(\"Expecting a Fuzzer, not '%s'\" % type(cls))\n _registered.append(cls)", "def register_classes():\n CoaddSplit.register_class()\n CoaddSplit_SG.register_class()", "def test_drop_class(self, excl, value):\n e = exclude(*excl)\n assert e(fields(C).a, value) is False", "def process_class_list(self, module, classes):" ]
[ "0.5848995", "0.5714878", "0.5440713", "0.5331448", "0.53248143", "0.5274852", "0.5263136", "0.5262969", "0.5250038", "0.52005446", "0.5171152", "0.515845", "0.5151149", "0.51253116", "0.51061636", "0.51053673", "0.5073575", "0.5070191", "0.5066985", "0.5048813", "0.50263953", "0.50203323", "0.5012834", "0.5005724", "0.4989039", "0.49778858", "0.49598557", "0.4926569", "0.49202478", "0.49189088" ]
0.8570413
0
Recursively searches for 'datacubedefinition.prj' in a level2 directory and returns its parent directory.
def _get_datacubeprj_dir(directory): prj_path = [] for path in Path(directory).rglob('datacube-definition.prj'): prj_path.append(path) if len(prj_path) < 1: raise FileNotFoundError(f"'datacube-definition.prj' not found in {directory}") elif len(prj_path) > 1: raise RuntimeError(f"'datacube-definition.prj' multiple copies found in {directory}") else: return prj_path[0].parent
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_parent_path(self):\n return os.path.join(os.getcwd(), \"src\", \"data\", \"genes\")", "def find_in_parent_dir(fname):\n p = os.path.abspath(os.path.curdir)\n \n while not os.path.exists(os.path.join(p, project_conf_name)):\n oldp, p = p, os.path.dirname(p)\n if p == oldp:\n return None\n \n return open(os.path.join(p, project_conf_name), 'r')", "def test_find_in_grandparent_path(self):\n directory = os.path.dirname(os.path.realpath(__file__))\n subdirectory = os.path.join(directory, 'fake', 'fake')\n result = steptest.find_project_directory(subdirectory)\n self.assertEqual(directory, result)", "def search_parents(name, cwd):\n for pdir in parents(cwd):\n if name in os.listdir(pdir):\n return os.path.join(pdir, name)\n\n return None", "def test_find_in_parent_path(self):\n directory = os.path.dirname(os.path.realpath(__file__))\n subdirectory = os.path.join(directory, 'fake')\n result = steptest.find_project_directory(subdirectory)\n self.assertEqual(directory, result)", "def get_path(path, parent=None, prj=None):\n if prj is None:\n prj = QgsProject.instance()\n\n if parent is None:\n parent = prj.layerTreeRoot()\n\n if path is None:\n return parent\n if not isinstance(path, (list, tuple)):\n path = path.split(\"/\")\n\n for part in path:\n if len(path) > 0:\n parent = get_group(part, parent)\n\n return parent", "def project_path(cur_path=''):\n if not cur_path:\n cur_path = __file__\n real_path = os.path.realpath(cur_path)\n # path of upper-level directory\n upper_folder = os.path.split(real_path)[0]\n # path of topmost-level directory (trunk)\n return os.path.split(upper_folder)[0]", "def getProjectDir(self):\n logger.debug(\"Func: getProjectDir\")\n return -1", "def _loadSubprojects(self):\n logger.debug(\"Func: _loadSubprojects\")\n\n if not os.path.isfile(self._pathsDict[\"subprojectsFile\"]):\n data = [\"None\"]\n self._dumpJson(data, self._pathsDict[\"subprojectsFile\"])\n else:\n data = self._loadJson(self._pathsDict[\"subprojectsFile\"])\n if data == -2:\n return -2\n return data", "def have_ebuild_dir(path, maxdepth=3):\n\tstack = [(normalize_path(path), 1)]\n\twhile stack:\n\t\tpath, depth = stack.pop()\n\t\tbasename = os.path.basename(path)\n\t\ttry:\n\t\t\tlistdir = os.listdir(path)\n\t\texcept OSError:\n\t\t\tcontinue\n\t\tfor filename in listdir:\n\t\t\tabs_filename = os.path.join(path, filename)\n\t\t\ttry:\n\t\t\t\tst = os.stat(abs_filename)\n\t\t\texcept OSError:\n\t\t\t\tcontinue\n\t\t\tif stat.S_ISDIR(st.st_mode):\n\t\t\t\tif depth < maxdepth:\n\t\t\t\t\tstack.append((abs_filename, depth + 1))\n\t\t\telif stat.S_ISREG(st.st_mode):\n\t\t\t\tif filename.endswith(\".ebuild\") and \\\n\t\t\t\t\tfilename.startswith(basename + \"-\"):\n\t\t\t\t\treturn os.path.dirname(os.path.dirname(path))", "def test_level2_recursion(self):\n recursed = recurse_files('filename2', self.files['filename2'], self.files)\n self.assertEqual(recursed, [\"file7\", \"file2\", \"file3\", \"file6\"])", "def get_parent_directory(src: str) -> str:\n return src[: src.rfind(os.path.sep)]", "def _parent_path(pkg, pkg_path):\n parent = pkg_path[: -len(pkg)] if pkg_path.endswith(pkg) else pkg_path\n return parent.rstrip(\"/\" + os.sep)", "def get_parent_dir(path):\n return os.path.dirname(path)", "def getParentDirectory():\n path = os.path.dirname(os.path.realpath(__file__))\n path = '/'.join( path.split('/')[:-1] )\n return path", "def getProjectRoot(self):\n currentPath = os.getcwd()\n while(True):\n if \"DataStore\" in os.listdir(currentPath):\n break\n currentPath = \"/\".join(currentPath.split(\"/\")[:-1])\n return currentPath + \"/\"", "def get_parent_dir(path):\n\n return os.path.abspath(os.path.join(path, os.pardir))", "def get_level_path(target_level, cwd=None):\n if cwd is None:\n cwd = os.getwd()\n q = \"\"\n for ll in levels:\n q = os.path.join(q, get_level_name(ll, cwd))\n if ll == target_level:\n break\n return q", "def get_parent_until(path):\r\n dirname = osp.dirname(path)\r\n try:\r\n mod = osp.basename(path)\r\n mod = osp.splitext(mod)[0]\r\n imp.find_module(mod, [dirname])\r\n except ImportError:\r\n return\r\n items = [mod]\r\n while 1:\r\n items.append(osp.basename(dirname))\r\n try:\r\n dirname = osp.dirname(dirname)\r\n imp.find_module('__init__', [dirname + os.sep])\r\n except ImportError:\r\n break\r\n return '.'.join(reversed(items))", "def _find_project_by_import():\n try:\n import _databand_project\n\n return abs_join(_databand_project.__file__, \"..\")\n except ImportError:\n dbnd_log_init_msg(\"Can't import `_databand_project` marker.\")\n return None", "def _get_project_dir(self):\n return os.path.expanduser(\n self.sqlfluff_config.get_section(\n (self.templater_selector, self.name, \"project_dir\")\n )\n or os.getcwd()\n )", "def _find_root() -> pathlib.Path:\n cwd = pathlib.Path.cwd()\n while not (\n pathlib.Path(cwd, \"pyproject.toml\").exists() or\n pathlib.Path(cwd, \"poetry.lock\").exists() or\n pathlib.Path(\"/\") == cwd\n ):\n cwd = cwd.parent\n return cwd", "def _find_bids_root(dataset_path) -> Path:\n description_paths = list(Path(dataset_path).glob(\"**/dataset_description.json\"))\n # 1 - if more than one, select first and output warning\n # 2 - if zero, output error\n # 3 - if 1, add to dataset path and set ats bids root dir\n if len(description_paths) == 0:\n msg = (\"The file 'dataset_description.json' should be part of the BIDS dataset \"\n \"in order for the 'bids_dataset' extractor to function correctly\")\n raise FileNotFoundError(msg)\n elif len(description_paths) > 1:\n msg = (f\"Multiple 'dataset_description.json' files ({len(description_paths)}) \"\n f\"were found in the recursive filetree of {dataset_path}, selecting \"\n \"first path.\")\n lgr.warning(msg)\n return Path(description_paths[0]).parent\n else:\n return Path(description_paths[0]).parent", "def resolve_level(target_level, cwd=None):\n if cwd is None:\n cwd = os.getcwd()\n this_level = level(cwd)\n this_idx = levels.index(this_level)\n target_idx = levels.index(target_level)\n pl = [\".\"]\n for i in range(0, this_idx - target_idx):\n pl.append(\"..\")\n return os.path.join(*pl)", "def get_project_root():\n # Get current working directory\n cwd = os.getcwd()\n # Remove all children directories\n rd = os.path.join(cwd.split('stochastic-travel-demand-modelling/', 1)[0])\n # Make sure directory ends with project's name\n if not rd.endswith('stochastic-travel-demand-modelling'):\n rd = os.path.join(rd,'stochastic-travel-demand-modelling/')\n\n return rd", "def parentOrThisDir(path):\n if not os.path.isdir(path):\n path = os.path.dirname(path)\n return path", "def find_diagnostics_dir(cube, image):\n return find_subdir(cube, image, 'diagnostics')", "def get_project_dir():\n path = Path(__file__).parent.parent\n project_dir = path.parent\n return project_dir", "def findDirAbove(dirName):\n workDir = \"\"\n for i in range(20):\n path = os.path.join(workDir, dirName)\n if os.path.exists(path):\n return os.path.abspath(path)\n workDir = os.path.join(workDir, \"..\")\n\n return None", "def find_build(branch, tag, build_path=None, old_build_path=None):\n for directory_format in [build_path, old_build_path]:\n if directory_format is None:\n continue\n loc = directory_format % (branch, tag)\n if isdir(loc):\n return loc\n raise BuildNotFound(branch, tag)" ]
[ "0.59310746", "0.5910224", "0.5907544", "0.56730086", "0.567043", "0.54829234", "0.5468787", "0.5349158", "0.53045815", "0.52922255", "0.52142084", "0.52010685", "0.519746", "0.51958925", "0.51642823", "0.5158747", "0.51506376", "0.51362514", "0.5133889", "0.51276433", "0.511751", "0.5075891", "0.50754297", "0.5072611", "0.5060326", "0.5051632", "0.50437516", "0.50249946", "0.5024484", "0.49995455" ]
0.72337234
0
Create a vocabulary from the training directory return a sorted vocabulary list
def create_vocabulary(directory, cutoff): top_level = os.listdir(directory) a = cutoff vocab = {} for d in top_level: subdir = d if d[-1] == '/' else d+'/' files = os.listdir(directory+subdir) for f in files: with open(directory+subdir+f,'r', encoding="utf-8") as doc: for word in doc: word = word.strip() if not word in vocab and len(word) > 0: vocab[word] = 1 elif len(word) > 0: vocab[word] += 1 return sorted([word for word in vocab if vocab[word] >= cutoff])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_vocabulary(vocabulary_path, json_vocab_path):\n if not gfile.Exists(vocabulary_path):\n print(\"Transform vocabulary to %s\" % vocabulary_path)\n with gfile.GFile(json_vocab_path, mode=\"rb\") as f:\n jvocab = json.load(f)\n vocab = jvocab['w2id']\n vocab_list = _START_VOCAB + sorted(vocab, key=vocab.get)\n with gfile.GFile(vocabulary_path, mode=\"wb\") as vocab_file:\n for w in vocab_list:\n vocab_file.write(w + b\"\\n\")", "def create_vocabulary(vocabulary_path, data_paths, max_vocabulary_size,\n tokenizer=None, normalize_digits=True):\n if not gfile.Exists(vocabulary_path):\n vocab = {}\n files = []\n files += [data_paths+f for f in os.listdir(data_paths) ]\n for one_file in files:\n with gfile.GFile(one_file, mode=\"rb\") as f:\n review = f.read()\n tokens = tokenizer(review) if tokenizer else character_tokenizer(review)\n for w in tqdm(tokens):\n word = _DIGIT_RE.sub(b\"0\", w) if normalize_digits else w\n if word in vocab:\n vocab[word] += 1\n else:\n vocab[word] = 1\n vocab_list = _START_VOCAB + sorted(vocab, key=vocab.get, reverse=True)\n if len(vocab_list) > max_vocabulary_size:\n vocab_list = vocab_list[:max_vocabulary_size]\n with gfile.GFile(vocabulary_path, mode=\"wb\") as vocab_file:\n for w in vocab_list:\n vocab_file.write(w + b\"\\n\")\n else:\n print(\"Vocabulary already created.\")", "def generate_vocab():\n\n vocab_dict = {}\n folder_path = os.listdir(args.f)\n for subfolder in folder_path:\n subfolder_path = os.path.join(args.f, subfolder)\n for filename in os.listdir(subfolder_path):\n with open(os.path.join(subfolder_path, filename), 'r') as file:\n read_file = file.read()\n normalised_text = re.sub(r\"[^\\s\\w]\", \" \", read_file.lower())\n vocab = normalised_text.split() #.split() creates a list of strings\n vocab_dict.update({i: 0 for i in vocab})\n return vocab_dict", "def build_vocab(train_dir, vocab_dir, vocab_size=5000):\n data_train, _ = read_file(train_dir)\n\n all_data = []\n for content in data_train:\n all_data.extend(content)\n\n counter = Counter(all_data)\n count_pairs = counter.most_common(vocab_size-1)\n words, _ = list(zip(*count_pairs))\n\n open_file(vocab_dir,mode='w').write('\\n'.join(words)+'\\n')", "def _build_vocabulary(input_files):\n if FLAGS.vocab_file:\n tf.logging.info(\"Loading existing vocab file.\")\n vocab = collections.OrderedDict()\n with tf.gfile.GFile(FLAGS.vocab_file, mode=\"r\") as f:\n for i, line in enumerate(f):\n word = line.decode(\"utf-8\").strip()\n assert word not in vocab, \"Attempting to add word twice: %s\" % word\n vocab[word] = i\n tf.logging.info(\"Read vocab of size %d from %s\",\n len(vocab), FLAGS.vocab_file)\n return vocab\n\n tf.logging.info(\"Creating vocabulary.\")\n num = 0\n wordcount = collections.Counter()\n for input_file in input_files:\n tf.logging.info(\"Processing file: %s\", input_file)\n for sentence in tf.gfile.FastGFile(input_file):\n wordcount.update(sentence.split())\n\n num += 1\n if num % 1000000 == 0:\n tf.logging.info(\"Processed %d sentences\", num)\n\n tf.logging.info(\"Processed %d sentences total\", num)\n\n words = wordcount.keys()\n freqs = wordcount.values()\n sorted_indices = np.argsort(freqs)[::-1]\n\n vocab = collections.OrderedDict()\n vocab[special_words.EOS] = special_words.EOS_ID\n vocab[special_words.UNK] = special_words.UNK_ID\n for w_id, w_index in enumerate(sorted_indices[0:FLAGS.num_words - 2]):\n vocab[words[w_index]] = w_id + 2 # 0: EOS, 1: UNK.\n\n tf.logging.info(\"Created vocab with %d words\", len(vocab))\n\n vocab_file = os.path.join(FLAGS.output_dir, \"vocab.txt\")\n with tf.gfile.FastGFile(vocab_file, \"w\") as f:\n f.write(\"\\n\".join(vocab.keys()))\n tf.logging.info(\"Wrote vocab file to %s\", vocab_file)\n\n word_counts_file = os.path.join(FLAGS.output_dir, \"word_counts.txt\")\n with tf.gfile.FastGFile(word_counts_file, \"w\") as f:\n for i in sorted_indices:\n f.write(\"%s %d\\n\" % (words[i], freqs[i]))\n tf.logging.info(\"Wrote word counts file to %s\", word_counts_file)\n\n return vocab", "def create_vocabulary(vocabulary_path, data_path, max_vocabulary_size,\n tokenizer=None, normalize_digits=True):\n if not gfile.Exists(vocabulary_path):\n print(\"Creating vocabulary %s from data %s\" % (vocabulary_path, data_path))\n vocab = {}\n with gfile.GFile(data_path, mode=\"r\") as f:\n counter = 0\n for line in f:\n counter += 1\n line = line.strip().split('\\t')[0]\n if counter % 100000 == 0:\n print(\" processing line %d\" % counter)\n tokens = tokenizer(line) if tokenizer else basic_tokenizer(line)\n for w in tokens:\n word = re.sub(_DIGIT_RE, \"0\", w) if normalize_digits else w\n if word in vocab:\n vocab[word] += 1\n else:\n vocab[word] = 1\n sorted_vocab = sorted(vocab, key=vocab.get, reverse=True)\n vocab_list = _START_VOCAB + sorted_vocab\n if len(vocab_list) > max_vocabulary_size:\n vocab_list = vocab_list[:max_vocabulary_size]\n print(\"Corpus %s has %d tokens, %d uniq words, %d vocab at cutoff %d.\" % (\n data_path, sum(vocab.values()), len(vocab), max_vocabulary_size, vocab[sorted_vocab[max_vocabulary_size - len(_START_VOCAB)]] ) )\n else:\n print(\"Corpus %s has %d tokens, %d uniq words, %d vocab at cutoff %d.\" % (\n data_path, sum(vocab.values()), len(vocab), len(vocab), 0))\n\n with gfile.GFile(vocabulary_path, mode=\"wb\") as vocab_file:\n for w in vocab_list:\n vocab_file.write(w + \"\\n\")", "def create_vocabulary(vocabulary_path, words, max_vocabulary_size, normalize_digits=True):\n if not gfile.Exists(vocabulary_path):\n print(\"Creating vocabulary %s with max size %d\" % (vocabulary_path, max_vocabulary_size))\n vocab = {}\n counter = 0\n for w in words:\n counter += 1\n if counter % 10000 == 0:\n print(\" processing word %d = %s\" % (counter, w))\n word = re.sub(_DIGIT_RE, \"0\", w) if normalize_digits else w\n if word in vocab:\n vocab[word] += 1\n else:\n vocab[word] = 1\n vocab_list = _START_VOCAB + sorted(vocab, key=vocab.get, reverse=True)\n if len(vocab_list) > max_vocabulary_size:\n vocab_list = vocab_list[:max_vocabulary_size]\n with gfile.GFile(vocabulary_path, mode=\"w\") as vocab_file:\n for w in vocab_list:\n vocab_file.write(w + \"\\n\")", "def create_vocabulary(vocabulary_path, data_path, max_vocabulary_size,\n tokenizer=None, normalize_digits=True):\n if not gfile.Exists(vocabulary_path):\n print(\"Creating vocabulary %s from data %s\" % (vocabulary_path, data_path))\n vocab = {}\n with gfile.GFile(data_path, mode=\"rb\") as f:\n counter = 0\n for line in f:\n counter += 1\n if counter % 100000 == 0:\n print(\" processing line %d\" % counter)\n tokens = tokenizer(line) if tokenizer else basic_tokenizer(line)\n for w in tokens:\n word = re.sub(_DIGIT_RE, b\"0\", w) if normalize_digits else w\n if word in vocab:\n vocab[word] += 1\n else:\n vocab[word] = 1\n vocab_list = _START_VOCAB + sorted(vocab, key=vocab.get, reverse=True)\n if len(vocab_list) > max_vocabulary_size:\n print(\"vocab too big\")\n vocab_list = vocab_list[:max_vocabulary_size]\n with gfile.GFile(vocabulary_path, mode=\"wb\") as vocab_file:\n for w in vocab_list:\n vocab_file.write(w + b\"\\n\")", "def create_vocabulary(vocabulary_path, data_path, max_vocabulary_size,\n tokenizer=None, normalize_digits=True,\n _DIGIT_RE=re.compile(br\"\\d\"),\n _START_VOCAB=[b\"_PAD\", b\"_GO\", b\"_EOS\", b\"_UNK\"]):\n if not gfile.Exists(vocabulary_path):\n print(\"Creating vocabulary %s from data %s\" % (vocabulary_path, data_path))\n vocab = {}\n with gfile.GFile(data_path, mode=\"rb\") as f:\n counter = 0\n for line in f:\n counter += 1\n if counter % 100000 == 0:\n print(\" processing line %d\" % counter)\n tokens = tokenizer(line) if tokenizer else basic_tokenizer(line)\n for w in tokens:\n word = re.sub(_DIGIT_RE, b\"0\", w) if normalize_digits else w\n if word in vocab:\n vocab[word] += 1\n else:\n vocab[word] = 1\n vocab_list = _START_VOCAB + sorted(vocab, key=vocab.get, reverse=True)\n if len(vocab_list) > max_vocabulary_size:\n vocab_list = vocab_list[:max_vocabulary_size]\n with gfile.GFile(vocabulary_path, mode=\"wb\") as vocab_file:\n for w in vocab_list:\n vocab_file.write(w + b\"\\n\")\n else:\n print(\"Vocabulary %s from data %s exists\" % (vocabulary_path, data_path))", "def load_vocabulary():\n global vocabulary_list, vocabulary_dict\n vocabulary_list = []\n vocabulary_dict = {}\n\n with open(_VOCABULARY_PATH, 'r') as f:\n for index, line in enumerate(f):\n line = line.strip()\n vocabulary_dict[line] = index\n vocabulary_list.append(line)", "def initialize_vocabulary(vocabulary_path):\n if gfile.Exists(vocabulary_path):\n rev_vocab = []\n with gfile.GFile(vocabulary_path, mode=\"rb\") as f:\n rev_vocab.extend(f.readlines())\n rev_vocab = [line.strip() for line in rev_vocab]\n vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])\n return vocab, rev_vocab\n else:\n raise ValueError(\"Vocabulary file %s not found.\", vocabulary_path)", "def initialize_vocabulary(vocabulary_path):\n if gfile.Exists(vocabulary_path):\n rev_vocab = []\n with gfile.GFile(vocabulary_path, mode=\"r\") as f:\n rev_vocab.extend(f.readlines())\n rev_vocab = [line.strip() for line in rev_vocab]\n vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])\n return vocab, rev_vocab\n else:\n raise ValueError(\"Vocabulary file %s not found.\", vocabulary_path)", "def create_vocabulary(sentences, path):\n print('creating vocab..')\n\n word_dict = dict(); vocabulary = dict()\n for sentence in sentences:\n for word in nltk.word_tokenize(sentence):\n if word not in word_dict:\n word_dict[word] = ''\n word_dict['<s>'] = ''\n word_dict['</s>'] = ''\n\n with open(path, encoding=\"utf8\") as f:\n for line in f:\n word, vec = line.split(' ', 1)\n if word in word_dict:\n vocabulary[word] = np.fromstring(vec, sep=' ')\n\n print('vocabulary was created successfully!')\n return vocabulary", "def build_vocabulary(self):\n \n for iCount in range(0,len(self.documents)):\n for jCount in range(iCount,len(self.documents[iCount])):\n self.vocabulary.append(self.documents[iCount][jCount])\n\n self.vocabulary = set(self.vocabulary)\n\t\t\n self.vocabulary = sorted(self.vocabulary)\n\t\t#print(\"Value of the vocabulary\")\n self.vocabulary_size = len(self.vocabulary)", "def initialize_vocabulary(vocabulary_path):\n if gfile.Exists(vocabulary_path):\n rev_vocab = []\n with gfile.GFile(vocabulary_path, mode=\"rb\") as f:\n rev_vocab.extend(f.readlines())\n rev_vocab = [tf.compat.as_bytes(line.strip()) for line in rev_vocab]\n vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])\n return vocab, rev_vocab\n else:\n raise ValueError(\"Vocabulary file %s not found.\", vocabulary_path)", "def initialize_vocabulary(vocabulary_path):\n if gfile.Exists(vocabulary_path):\n rev_vocab = []\n with gfile.GFile(vocabulary_path, mode=\"rb\") as f:\n rev_vocab.extend(f.readlines())\n rev_vocab = [line.strip() for line in rev_vocab]\n vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])\n return vocab, rev_vocab\n else:\n raise ValueError(\"Vocabulary file %s not found.\", vocabulary_path)", "def initialize_vocabulary(vocabulary_path):\n if gfile.Exists(vocabulary_path):\n rev_vocab = []\n with gfile.GFile(vocabulary_path, mode=\"rb\") as f:\n rev_vocab.extend(f.readlines())\n rev_vocab = [line.strip() for line in rev_vocab]\n vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])\n return vocab, rev_vocab\n else:\n raise ValueError(\"Vocabulary file %s not found.\", vocabulary_path)", "def generate_vocabulary():\n stop_words = load_stop_words()\n words = ' '.join(generate_corpus()).split()\n print(len(words))\n vocabulary = {}\n for word in words:\n if word in stop_words:\n continue\n if word in vocabulary.keys():\n vocabulary[word] += 1\n else:\n vocabulary[word] = 1\n vocabulary = dict(sorted(vocabulary.items(), key=lambda x: x[1], reverse=True))\n return vocabulary", "def initialize_vocabulary(vocabulary_path):\n if gfile.Exists(vocabulary_path):\n rev_vocab = []\n with gfile.GFile(vocabulary_path, mode=\"r\") as f:\n rev_vocab.extend(f.readlines())\n rev_vocab = [line.strip() for line in rev_vocab]\n vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])\n return vocab, rev_vocab\n else:\n raise ValueError(\"Vocabulary file %s not found.\", vocabulary_path)", "def load_vocabulary(self):\n vocab_file = open(vocabulary_path, \"r\")\n self.vocab_list = vocab_file.read().split(\"\\n\")\n vocab_file.close()\n print(\"[INFO] Reading vocabulary...\")\n print(self.vocab_list[0:15])", "def initialize_vocabulary(vocabulary_path):\n if os.path.exists(vocabulary_path):\n rev_vocab = []\n with codecs_open(vocabulary_path, \"rb\", encoding=\"utf-8\") as f:\n rev_vocab.extend(f.readlines())\n rev_vocab = [line.strip() for line in rev_vocab]\n vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])\n return vocab, rev_vocab\n else:\n raise ValueError(\"Vocabulary file %s not found.\", vocabulary_path)", "def initialize_vocabulary(self,vocabulary_path):\n if tf.gfile.Exists(vocabulary_path):\n vocab = corpora.Dictionary.load(vocabulary_path)\n print(\"vocab length: \",len(vocab.token2id))\n\n return vocab.token2id, vocab.token2id.keys()\n else:\n raise ValueError(\"Vocabulary file %s not found.\", vocabulary_path)", "def vocabulary(self):\n lst = []\n for key in self.frequencies().keys():\n lst.append(key)\n return sorted(lst)\n #for lines in self.lines:\n # line = lines.strip(os.linesep)\n # wordslst = line.split()\n # for word in wordslst:\n # if word not in lst:\n # lst.append(word.lower())\n #return sorted(lst)", "def create_vocab():\n \n cutoff = CUTOFF\n \n lines = open(INFNAME_FORMAT.format(\"train\")).readlines() \\\n + open(INFNAME_FORMAT.format(\"test\")).readlines()\n raw = [process_line(l) for l in lines]\n cntx = Counter( [ w for e in raw for w in e ] )\n vocab = { x for x, y in cntx.items() if y > cutoff }\n \n return vocab", "def compute_vocabulary(root_path: str) -> list:\n vocab = list()\n scenario_folders = [os.path.join(root_path, f) for f in os.listdir(root_path) if isdir(os.path.join(root_path, f))]\n for scenario_folder in scenario_folders:\n # Compute the path for the scenario folder\n files = [os.path.join(scenario_folder, f) for f in os.listdir(scenario_folder) if\n isfile(os.path.join(scenario_folder, f))]\n for file in files:\n file_vocab = parse_vocabulary_from_file(file)\n vocab = vocab + file_vocab\n return unique(vocab)", "def create_vocab(vocab_path='ORBvoc-synth.txt'):\n total_time = 10 # seconds\n num_frames = 20\n speed = 3.0\n vocab_builder = VocabularyBuilder()\n for seed in tqdm(range(100), total=100):\n image_builder = DemoImageBuilder(\n mode=ImageMode.MONOCULAR, seed=seed,\n length=total_time * speed\n )\n for idx in range(num_frames):\n time = total_time * idx / num_frames\n image = image_builder.create_frame(time)\n vocab_builder.add_image(image.pixels)\n vocab_builder.build_vocabulary(str(vocab_path))", "def build_vocab(sentences_list, vocab_size, visual_fld):\n words = [word for sentence in sentences_list for word in sentence]\n utils.safe_mkdir(visual_fld)\n with open(os.path.join(visual_fld, 'vocab.tsv'), 'w') as fd:\n dictionary = {}\n index_dictionary = {}\n count = [('UNK', -1)]\n count.extend(Counter(words).most_common(vocab_size - 1))\n for index, (word, _) in enumerate(count):\n dictionary[word] = index\n index_dictionary[index] = word\n fd.write(word + '\\n')\n\n return dictionary, index_dictionary", "def initialize_vocabulary(vocabulary_path):\n characters_class = 9999\n\n if os.path.exists(vocabulary_path):\n with codecs.open(vocabulary_path, 'r', encoding='utf-8') as voc_file:\n rev_vocab = [line.strip() for line in voc_file]\n\n vocab = {x: y for (y, x) in enumerate(rev_vocab)}\n\n reserved_char_size = characters_class - len(rev_vocab)\n if reserved_char_size < 0:\n raise ValueError(\"Number of characters in vocabulary is equal or larger than config.characters_class\")\n\n for _ in range(reserved_char_size):\n rev_vocab.append('')\n\n # put space at the last position\n vocab[' '] = len(rev_vocab)\n rev_vocab.append(' ')\n return vocab, rev_vocab\n\n raise ValueError(\"Initializing vocabulary ends: %s\" % vocabulary_path)", "def trainingsVocabulary(context):\n ct = getToolByName(context,'portal_catalog')\n dictSearch = {'portal_type':'apyb.papers.training',\n 'sort_on':'sortable_title',\n 'review_state':'confirmed'}\n trainings = ct.searchResults(**dictSearch)\n trainings = [SimpleTerm(b.UID,b.UID,b.Title) for b in trainings]\n return SimpleVocabulary(trainings)", "def create_vocab(data_files, vocab_fname):\n chars = set()\n for data_fname in data_files:\n with io.open(data_fname, 'r', encoding='utf8') as fp:\n raw = fp.read().lower()\n chars.update(raw)\n\n vocab = list(chars - set(['\\t', '\\n'])) + SPECIALS\n tf.logging.info('Creating vocab file..')\n with io.open(vocab_fname, 'w', encoding='utf8') as fp:\n fp.write('\\n'.join(vocab))" ]
[ "0.72303003", "0.7164037", "0.71459424", "0.7054131", "0.7033178", "0.700483", "0.68919396", "0.68904585", "0.68769383", "0.6866679", "0.6859989", "0.6854887", "0.6839105", "0.6836058", "0.68341655", "0.6810794", "0.6810794", "0.6805193", "0.6802589", "0.6783411", "0.6731821", "0.6689364", "0.6675953", "0.66615444", "0.6661086", "0.6641328", "0.65732366", "0.65720415", "0.6559415", "0.6511555" ]
0.7643122
0
return the class conditional probability of label over all words, with smoothing
def p_word_given_label(vocab, training_data, label): smooth = 1 # smoothing factor word_prob = {} # TODO: add your code here total_word = 0 word_prob[None] = 0 for dic in training_data: for index0, i0 in enumerate(dic['bow']): if (list(dic['bow'])[index0] in word_prob): continue; word_prob[list(dic['bow'])[index0]] = 0 #word_prob[None] = 0 if(dic["label"] == label): for index, i in enumerate(dic["bow"]): if(list(dic['bow'])[index] in vocab): if(list(dic['bow'])[index] in word_prob): word_prob[list(dic['bow'])[index]] += dic["bow"][i] else: word_prob[list(dic['bow'])[index]] = dic["bow"][i] else: if(None in word_prob): word_prob[None] += dic["bow"][i] else: word_prob[None] = 0 total_word += dic["bow"][i] #word_prob [None] = 5 for h in word_prob: word_prob[h] = math.log((word_prob[h] + smooth*1)) - math.log((total_word + smooth*(len(vocab) +1))) return word_prob
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def predictability(self):\n temp = self.probs\n for n in range(10):\n temp = temp.dot(temp)\n final = temp[0,:]\n #Let's assume that all words have unique initial letters\n probs = map(len, self.words)\n probs = array(probs)\n probs = (probs + self.probs.max(1)-1)/probs\n return sum(final*probs)", "def get_word_probability(self, label, term):\n\n if 'sod' in label:\n return self.cond_prob_sod[term]\n elif 'pop' in label:\n return self.cond_prob_pop[term]\n else:\n print(\"Just run the doctest Dev\")\n \n pass", "def predict(self, X):\n words = X.split()\n chance = []\n for cur_label in self.model[\"labels\"]:\n probability = self.model[\"labels\"][cur_label][\"probability\"]\n total_grade = math.log(probability, math.e)\n for word in words:\n word_dict = self.model[\"words\"].get(word, None)\n if word_dict:\n total_grade += math.log(word_dict[cur_label], math.e)\n chance.append((total_grade, cur_label))\n _, prediction = max(chance)\n return prediction", "def predict(self, sentence, smoothing=None):\n words = sentence.split()\n words.append(\"STOP\")\n probability = 1.0\n\n words = [self.START_SYMBOL, self.START_SYMBOL] + words\n ###################\n # Compute the probability of a sentence under the trigram model\n # p(x1,..,xn)= \\prod {q(x_i| x_{i-2}x_{i-1}}\n for i in xrange(len(words)-2):\n probability *= self.trigram_prob(words[i], words[i+1], words[i+2])\n\n return probability", "def get_ngram_prob(self, label_seq):\n curr_ngram = self.all_grams\n for i in range(0, len(label_seq)):\n label = label_seq[i]\n if i == len(label_seq) - 1:\n denom = curr_ngram.get_count() + self.SMOOTHING_VALUE * 9\n curr_ngram = curr_ngram.get_next_Ngram(label)\n # For smoothing, just add self.SMOOTHING_VALUE\n numer = curr_ngram.get_count() + self.SMOOTHING_VALUE\n return float(numer) / denom", "def classify(self, sText):\n words = self.tokenize(sText)\n #print \"words here, \", words\n words = [s.lower() for s in words]\n words = set(words)\n #words = set(words) potentially bring this back in here again.\n\n num_docs = self.freq_dist['num_good'] + self.freq_dist['num_bad']\n prob_neg = 0\n prob_pos = 0\n\n for word in words:\n if word in self.freq_dist['freq_dist']:\n prob_neg += math.log((self.freq_dist['freq_dist'][word]['bad']+1) / 0.175)\n prob_pos += math.log((self.freq_dist['freq_dist'][word]['good']+1) / 0.825)\n\n prob_pos = abs(prob_pos)\n prob_neg = abs(prob_neg)\n if prob_pos > prob_neg:\n return 1\n return 0", "def classify(self, sText):\n threshold = .1\n posCount = float(sum(self.posFreqDict.itervalues()))\n negCount = float(sum(self.negFreqDict.itervalues()))\n negProbability=0.0\n posProbability=0.0\n for word in self.tokenize(sText):\n if word in self.posFreqDict:\n posProbability+= log10(float( (1.0+float(self.posFreqDict[word]))/posCount))\n else:\n posProbability+=log10(float(1.0/posCount))\n if word in self.negFreqDict:\n negProbability+= log10(float( (1.0+float(self.negFreqDict[word]))/negCount))\n else:\n negProbability+= log10(float(1.0/negCount))\n if abs(posProbability-negProbability)< .1 :\n return \"neutral\"\n elif posProbability>negProbability:\n return \"positive\"\n else:\n return \"negative\"", "def calculate_class_weights(label_data):\n neg, pos = np.bincount(label_data)\n weight_for_0 = 1 / neg\n weight_for_1 = 1 / pos\n return {0: weight_for_0, 1: weight_for_1}", "def prior(training_data, label_list):\n\n smooth = 1 # smoothing factor\n logprob = {}\n # TODO: add your code here\n numfile1 = 0\n numfile2 = 0\n for dic in training_data:\n if(dic[\"label\"] == label_list[0]):\n numfile1 += 1\n elif(dic[\"label\"] == label_list[1]):\n numfile2 += 1\n numtotal = numfile1 + numfile2\n\n prob1 = (numfile1+smooth)/(numtotal+2)\n prob2 = (numfile2 + smooth) / (numtotal + 2)\n\n logprob[label_list[0]] = math.log(prob1)\n logprob[label_list[1]] = math.log(prob2)\n\n\n return logprob", "def word_probability(self, word, prev):\n bg = \"{} {}\".format(prev, word)\n p_c = self.model[word] if word in self.model else 1e-10 \n p_cw = self.bigrams[bg] if bg in self.bigrams else 1e-10 \n p = p_c * p_cw if prev else p_c\n return p", "def classify_spam(sms):\n return naive_bayes_predict(spam_ratio, words, spamicity, sms) > seuil", "def _computeCondProb(self, testData, classValue):\n classAttrObj = self._classAttrs[classValue]\n frequencyDict = classAttrObj.frequencyDict\n totalDocsInClass = classAttrObj.totalDocsInClass\n\n result = (totalDocsInClass/self._totalTrainDocs) # P(c)\n # Compute P(t|c) for each t in d\n for word in testData:\n result *= ((frequencyDict.get(word, 0) + 1) / (sum(frequencyDict.values()) + self._sizeOfVocabulary))\n return result", "def classify(words, all_tags):\n answer = []\n for word in words:\n label, score = clf_base.predict({word:1},weights,list(all_tags))\n answer.append(label)\n return answer", "def most_probable_class(text, weights):\n\n pos_weights = weights['positive']\n neg_weights = weights['negative']\n neu_weights = weights['neutral']\n features = calculate_features(text)\n pos_numerator = 0.0\n neg_numerator = 0.0\n neu_numerator = 0.0\n denominator = 0.0\n for f in features:\n if f in pos_weights and f in neg_weights and f in neu_weights:\n pos_numerator += pos_weights[f] * features[f]\n neg_numerator += neg_weights[f] * features[f]\n neu_numerator += neu_weights[f] * features[f]\n denominator += pos_numerator + neg_numerator + neu_numerator\n else:\n pos_numerator += 0\n neg_numerator += 0\n neu_numerator += 0\n denominator += pos_numerator + neg_numerator + neu_numerator\n\n pos_prob = (\"positive\", exp(pos_numerator))# /exp(denominator))\n neg_prob = (\"negative\", exp(neg_numerator))# /exp(denominator))\n neu_prob = (\"neutral\", exp(neu_numerator))# /exp(denominator))\n return max(neu_prob, neg_prob, pos_prob, key=lambda x: x[1])", "def PredictClerLabel(sentence, model_cler, word2vec):\n \n tokenized_sample = word_tokenize(re.sub(\"-\",\" \",sentence))\n features = np.mean([word2vec.word_vec(w) for w in tokenized_sample if w in word2vec],axis=0)\n prediction = model_cler.predict_proba(features.reshape(1,-1))[0]\n return model_cler.classes_[prediction.argmax()]", "def naiveBayes(train_set, train_labels, dev_set, smoothing_parameter, pos_prior):\n # TODO: Write your code here\n # return predicted labels of development set\n #\n ### len(train_set) 8000, len(dev) = 5000 P(pos) = 0.8 \n #### 0.55, 4.0, 0.30 ----------- 0.766\n #### 0.25 3.5 0.3 -------------- 0.766\n print(pos_prior)\n smoothing_parameter = 3.5\n pos_total_word = 0\n neg_total_word = 0\n pos_word_dict = {}\n neg_word_dict = {}\n dicts = [neg_word_dict, pos_word_dict]\n for i, sentence in enumerate(train_set):\n\n if train_labels[i] == 1: # positive reviews\n for word in sentence:\n pos_total_word += 1 \n if word in stop_words:\n continue\n if word in pos_word_dict:\n pos_word_dict[word] += 1\n else :\n pos_word_dict[word] = 1\n\n else: # negative reviews\n for word in sentence:\n neg_total_word += 1 \n if word in stop_words:\n continue\n if word in neg_word_dict:\n neg_word_dict[word] += 1\n else :\n neg_word_dict[word] = 1\n\n\n prob = {}\n denominator_pos = pos_total_word + smoothing_parameter * (len(pos_word_dict) + 1)\n denominator_neg = neg_total_word + smoothing_parameter * (len(neg_word_dict) + 1)\n de = [denominator_neg, denominator_pos]\n\n for t, dictionary in enumerate(dicts):\n for key, value in dictionary.items():\n if key not in prob:\n prob[key] = {0 : 0, 1 : 0}\n if smoothing_parameter != 0:\n prob[key][1 - t] = -1 * np.log(smoothing_parameter / de[t]) \n # print(prob[key][1 - t])\n\n prob[key][t] = -1 * np.log((value + smoothing_parameter) / de[t]) \n \n\n revised_prob = {}\n for key, value in prob.items():\n if np.abs(value[0] - value[1]) >= 0.25:\n revised_prob[key] = value \n\n print(len(revised_prob))\n\n dev_labels = []\n num_0 = 0\n for i, sentence in enumerate(dev_set):\n pos_odd = -1 * np.log(pos_prior)\n neg_odd = -1 * np.log(1.0 - pos_prior)\n for word in sentence:\n if word in revised_prob:\n pos_odd += revised_prob[word][1]\n neg_odd += revised_prob[word][0]\n \n if pos_odd > neg_odd:\n num_0 += 1\n dev_labels.append(1 if pos_odd <= neg_odd else 0)\n print(num_0)\n\n \n #### bigram model \n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n return dev_labels", "def probability(self, words):\n prob = 1\n for w in words: # Loop through words\n if w not in self.mdl.index: # Not in tokens\n return 0\n else: # Get prob\n prob *= self.mdl.loc[w] \n return prob", "def probability(self, words):\n prob = 1\n for w in words: # Loop through words\n if w not in self.mdl.index: # Not in tokens\n return 0\n else: # Get prob\n prob *= self.mdl.loc[w] \n return prob", "def compute_propability(word, label, dict):\n return dict[label][word] / sum(dict[label].values())", "def classify(self, documents):\n predictions = []\n for doc in documents:\n\n score_sod = math.log(self.priorSOD)\n score_pop = math.log(self.priorPOP)\n for term in doc.tokens:\n if term in self.cond_prob_sod.keys():\n score_sod += math.log(self.cond_prob_sod[term])\n if term in self.cond_prob_pop.keys():\n score_pop += math.log(self.cond_prob_pop[term])\n if(score_pop >= score_sod): #defaults to ham if score = even \n predictions.append('pop')\n else:\n predictions.append('sod')\n \n return predictions \n pass", "def predict(self, example):\n label = \"\"\n pred = -99.0\n for w in self.weights:\n current = np.asarray(example.fvector)\n i = self.weights[w] @ current\n if i > pred:\n pred = i\n label = w\n return label", "def classify_message(message_words, ham_l, spam_l):\n data_ham_words, data_spam_words = train_function(ham_l, spam_l)\n message_unique_words = set(message_words)\n message_ham_words, message_spam_words = [], []\n for word in message_unique_words:\n if word in data_ham_words:\n message_ham_words.append(word)\n if word in data_spam_words:\n message_spam_words.append(word)\n probability_ham = ((len(ham_l)) / (len(ham_l) + len(spam_l))) + get_message_probability(message_ham_words, data_ham_words)\n probability_spam = ((len(spam_l)) / (len(ham_l) + len(spam_l))) + get_message_probability(message_spam_words, data_spam_words)\n print(probability_ham, probability_spam)\n if probability_ham > probability_spam:\n return \"This letter is ham.\"\n else:\n return \"This letter is spam.\"", "def get_lexical_generation_prob(self, word, label):\n word = word.lower()\n numer = self.SMOOTHING_VALUE\n if word in self.words_labels_counts[label] and self.words_labels_counts[label][word] != 0:\n numer += self.words_labels_counts[label][word]\n elif word in self.words_labels_counts[label]:\n numer += self.words_labels_counts[label][self.UNKNOWN_TOKEN]\n denom = self.label_counts[label] + self.SMOOTHING_VALUE * self.all_grams.get_count()\n return float(numer) / denom", "def predict(self, testData=[]):\n result = []\n for classValue in self._classAttrs:\n #print(f'Computing Label: {classValue}, {self._classLabelMap[classValue]}')\n result.append(self._computeCondProb(testData, classValue))\n return self._classLabelMap[result.index(max(result))]", "def test_lcwa_label_smoothing(self):\n # Create dummy dense labels\n labels = torch.zeros(self.batch_size, self.num_entities)\n for i in range(self.batch_size):\n labels[i, self.random.randint(self.num_entities)] = 1.0\n # Check if labels form a probability distribution\n np.testing.assert_allclose(torch.sum(labels, dim=1).numpy(), 1.0)\n\n # Apply label smoothing\n smooth_labels = apply_label_smoothing(labels=labels, epsilon=self.epsilon, num_classes=self.num_entities)\n # Check if smooth labels form probability distribution\n np.testing.assert_allclose(torch.sum(smooth_labels, dim=1).numpy(), 1.0, rtol=self.relative_tolerance)", "def predict(self, observation):\n\t\t# TODO - complete this\n\t\tp_max = 0\n\t\tpredict = None\n\t\tfor label in self.possible_labels:\n\t\t\tpossiblity = 1\n\t\t\tlabel_gaussian = self.gaussians.get(label)\n\t\t\tfor i in range(len(observation)):\n\t\t\t\t(mean, std) = label_gaussian[0][i]\n\t\t\t\tvalue = observation[i]\n\t\t\t\tpossiblity *= self.gaussians_calc(value, mean, std)\n\t\t\tif p_max < possiblity:\n\t\t\t\tp_max = possiblity\n\t\t\t\tpredict = label\n\n\t\treturn predict", "def predict_next(self, seq):\n context = tuple(seq[-2:]) # last two words\n pc = self.probas[context] # conditional distribution\n words, probs = zip(*pc.items()) # convert to list\n return np.random.choice(words, p=probs)", "def classify_text(classifier, sentence):\n\n sentence = Sentence(sentence)\n classifier.predict(sentence, multi_class_prob=True)\n return sentence.labels", "def sample_labels(self, y, num_of_sents = 5, num_of_samples = 10,\n num_of_classes = 3, start_index = 5, get_prob = True):\n classes = self.classes_()\n ret = []\n for sent in y[:num_of_sents]:\n cur = []\n for word in sent[start_index: start_index + num_of_samples]:\n sorted_prob = am(word)\n cur.append([(classes[ind], word[ind]) if get_prob else classes[ind]\n for ind in sorted_prob[:num_of_classes]])\n ret.append(cur)\n return ret", "def label_smoothing_regularization(self, chars_labels, weight=0.1):\n one_hot_labels = tf.one_hot(\n chars_labels, depth=self.num_char_classes, axis=-1)\n pos_weight = 1.0 - weight\n neg_weight = weight / self.num_char_classes\n return one_hot_labels * pos_weight + neg_weight" ]
[ "0.7158563", "0.68635947", "0.6840666", "0.66153693", "0.6609253", "0.6576637", "0.6564863", "0.653211", "0.6521681", "0.6498006", "0.6471072", "0.640781", "0.63957083", "0.6352886", "0.6348521", "0.6348173", "0.634401", "0.634401", "0.6340316", "0.63331866", "0.6293823", "0.62921864", "0.6284099", "0.62624526", "0.6261897", "0.6255573", "0.62498116", "0.62477356", "0.62358195", "0.6231648" ]
0.7195733
0
Find all pairs of unique indices which form a palindrome.
def palindromePairs(lst): results = [] for i, e1 in enumerate(lst): for j, e2 in enumerate(lst): if i != j: if isPalindrome(e1+e2): results.append((i, j)) return results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def palindromePairs(self, words: List[str]) -> List[List[int]]:\n d = {w : i for i, w in enumerate(words)}\n \n res = []\n for idx, word in enumerate(words):\n for i in range(len(word)+1):\n str1 = word[:i]\n str2 = word[i:]\n # first part should be palindrome, second part (reverse) should be in w\n if str1 == str1[::-1]:\n back = str2[::-1]\n if back in d and back != word:\n res.append([d[str2[::-1]], idx])\n # second part should be palindrome, first part (reverse) should be in w\n if str2 and str2 == str2[::-1]: # if the last part is empty, it is calculated before \n back = str1[::-1]\n if back in d and back != word: \n res.append([idx, d[str1[::-1]]])\n # print(res)\n return res", "def palindromes():\n for n in count(1):\n if str(n) == str(n)[::-1]:\n yield n", "def palindrom():\r\n pal = []\r\n\r\n sub_str = gen_substring(\"abaabbaab\")\r\n\r\n for i in range(len(sub_str)):\r\n\r\n rev = reverse_string(sub_str[i])\r\n\r\n if rev == sub_str[i]:\r\n\r\n pal.append(rev)\r\n\r\n return pal", "def find_palindromes(self):\n\t\tself.square_palindromes = [x for x in self.squares if self.is_palindrome(x)]", "def is_palindrome_v3(s):\n i = 0\n j = len(s)-1\n\n while i < j and s[i] == s[j]:\n i = i + 1\n j = j -1\n\n return j <= i", "def palindrome_itertive(a):\n # TODO make this less crappy\n start = 0 \n end = len(a) - 1\n while start != end:\n # print(end)\n # print('start: ', start, ' a: ', a[start])\n # print('end: ', end, ' a: ', a[end])\n if not a[start] == a[end]:\n return False\n else:\n start += 1\n end -= 1\n return True", "def palindrom_permutation(string: str):\n string = re.sub(r'\\W+', '', string.lower())\n\n chars = dict()\n for c in string:\n chars[c] = chars[c] + 1 if c in chars else 1\n\n almost_not_okey = False\n for val in chars.values():\n if val % 2 == 1:\n if not almost_not_okey:\n almost_not_okey = True\n else:\n return False\n\n if almost_not_okey:\n return len(string) % 2 == 1\n return True", "def palindromePermutation(s):\n char_count = {}\n for character in s:\n if character == ' ': continue # skip the spaces.\n char_count[character] = char_count.get(character, 0) + 1\n\n odd = False\n for key in char_count:\n if char_count[key] % 2 != 0:\n if odd:\n return False\n odd = True\n\n return True \n\n # Time Complexity: O(n)\n # Space Complexity: O(m), where m is the number of unique characters", "def palindromes(n: int) -> int:\n # 1 -> 2 -> 3 ... 9 -> 11 -> 22 -> 33 -> 44 .. 99 -> 101\n # 101 -> 111 -> 121 -> 131 -> ... -> 191 -> 202 -> 212\n # 989 -> 999 -> 1001 -> 1111 -> 1221\n # 9889 -> 9999 -> 10001 -> 10101 -> 10201\n prev = n\n s = str(n)\n even = len(s) % 2 == 0\n s = s[:ceil(len(s) / 2)]\n n = int(s)\n while True:\n if even:\n pal = int(''.join([s, s[-1::-1]])) # join '12' with '21'\n else:\n pal = int(''.join([s, s[-2::-1]])) # join '12' with '1'\n if prev <= pal:\n yield pal\n \n n += 1\n if all(digit == '9' for digit in s):\n even = not even\n if even: n //= 10\n s = str(n)", "def has_palindrome_permutation(given_string):\n\n unpaired_characters = set()\n\n for char in given_string:\n if char in unpaired_characters:\n unpaired_characters.remove(char)\n else:\n unpaired_characters.add(char) \n\n return len(unpaired_characters) <= 1", "def palindrome():\n c = 0\n d = ''\n e = 0\n f = 0\n g = 0\n for a in range(100, 1000):\n for b in range(100, 1000):\n c = a * b\n d = str(c)\n if d == d[::-1] and c > e:\n e = c\n f = a\n g = b\n return e", "def num_palindrome():\n nums = map(str, range(1000000))\n odo = []\n for i in range(len(nums)):\n if len(nums[i]) < 6:\n odo.append('0'*(6-len(nums[i])) + nums[i])\n elif len(nums[i]) == 6:\n odo.append(nums[i])\n \n for i in range(len(odo)-3): \n first = odo[i][2:] == odo[i][:1:-1]\n second = odo[i+1][1:] == odo[i+1][:0:-1]\n third = odo[i+2][1:5] == odo[i+2][4:0:-1]\n fourth = odo[i+3][:] == odo[i+3][::-1]\n if first & second & third & fourth:\n print 'A possible odometer reading is '+odo[i]", "def countPalindromicSubsequences(self, s: str) -> int:\n MOD = 10 ** 9 + 7\n \n def dp(i, j) -> (int, set):\n distinct = set()\n if i > j:\n return (0, distinct)\n if i == j:\n distinct.add(s[i])\n return (1, distinct)\n ret = 0\n for c in 'abcd':\n l = s.find(c, i, j)\n if l < 0:\n continue\n r = s.rfind(c, i, j)\n sub_ret, sub_set = dp(l, r)\n print(sub_ret, sub_set)\n # print(f'{c}-{sub_set}-{c}')\n ret += sub_ret + 1\n ret %= MOD\n distinct.union(sub_set)\n distinct.add(c)\n\n return ret, distinct\n return dp(0, len(s))[0]", "def repeated_palindrome(palindromes_list):\n # the list is ordered in the reversed form (long to short)\n ordered_palindrome = sorted(palindromes_list)\n longest_first = ordered_palindrome[::-1]\n # initialize a new list to receive unique plaindromes data\n pal_list = [longest_first[0]]\n # the longest palindrome cannot fit in any other sequence \n # iterates over the longest_first original palindromes\n # get the start and end positions \n for data in longest_first:\n start = data[1]\n end = start + data[0]\n # iterate through the pal_list and \n # compare the start and end of the potential and palindromes \n # to check if the potential palindrome is unique.\n unique_palindrome = None\n for dat in pal_list:\n start_unique = dat[1]\n end_unique = start_unique + dat[0]\n # statement should test to check if the test palindrome fits\n # inside any of the identified 'real/unique' palindromes.\n if start >= start_unique and end <= end_unique:\n # if the palindrome tested fits inside\n unique_palindrome = False\n break\n else:\n # other wise it is unique\n unique_palindrome = True\n if unique_palindrome:\n # check if if it is not in the list\n if data not in pal_list:\n pal_list += [data]\n return pal_list", "def get_palindromes(kmer_list):\n rev_kmers = [get_reverse_complement(kmer) for kmer in kmer_list]\n palindromes = set()\n for mer1, mer2 in zip(kmer_list, rev_kmers):\n if check_is_palindrome(mer1, mer2):\n palindromes.add(mer1)\n return palindromes", "def check_palindrome():", "def isPalindrome(string):\n for i in range(len(string)//2):\n if string[i] != string[(i*-1)-1]:\n return False\n return True", "def countPalindromicSubsequences(self, S):\n if not S:\n return 0\n\n ways = [[0] * len(S) for i in range(len(S))]\n\n # base cases: for subarray of length 1 and 2\n for i in range(len(S)):\n ways[i][i] = 1\n if i < len(S) - 1:\n ways[i][i+1] = 2\n\n for ll in range(3, len(S)+1):\n for i in range(len(S) - ll + 1):\n j = ll + i - 1\n if S[i] != S[j]:\n ways[i][j] = ways[i+1][j] + ways[i][j-1] - ways[i+1][j-1]\n else:\n l = i + 1\n while l < j and S[l] != S[i]:\n l += 1\n r = j - 1\n while r > i and S[r] != S[j]:\n r -= 1\n\n if l < r:\n ways[i][j] = 2 * ways[i+1][j-1] - ways[l+1][r-1]\n elif l == r :\n ways[i][j] = 2 * ways[i+1][j-1] + 1\n else:\n ways[i][j] = 2 * ways[i+1][j-1] + 2\n return ways[0][len(S)-1] % (10**9 + 7)", "def is_palindrome_v2(s):\n n = len(s)\n\n return s[:n/2] == reverse(s[n-n/2:])", "def palindrome_permutation(w):\n w = w.strip().replace(' ', '')\n chars = {}\n for c in w:\n try:\n chars[c] += 1\n except KeyError:\n chars[c] = 1\n\n if len(w) % 2 == 0:\n #Check if there is an even number\n #of every character in w.\n return all(x % 2 == 0 for x in chars.values()) \n else:\n #Check if there is an even number\n #of every character in w,\n #except for exactly one character.\n found_odd = False\n for c in chars:\n if chars[c] % 1 == 0:\n if not found_odd:\n found_odd = True\n else:\n return False\n \n if found_odd:\n return True\n else:\n return False", "def find_mismatching_pair(s):\n i = 0\n j = len(s) - 1\n while i < j and s[i] == s[j]:\n i += 1\n j -= 1\n return i, j", "def is_palindrome(s):\n i, end = 0, len(s) // 2\n while i < end:\n if s[i] != s[len(s) - i - 1]:\n return False\n i += 1\n return True", "def main():\n for l in range(999,890,-1):\n for r in range(999,890,-1):\n num= l*r\n ans= palindrome_check(num)\n if ans:\n print l,r,num\n return\n print l,r,num\n print \"No palindrome found.\"\n return", "def is_palindrome(n):\n ns = str(n)\n for i in range(0, len(ns) // 2):\n if ns[i] != ns[len(ns) - 1 - i]: return False\n return True", "def probl4():\n\n largest_palindrome = 0\n for i in xrange(101, 1000):\n for j in xrange(101, 1000):\n output = i * j\n if str(output) == str(output)[::-1] and \\\n output > largest_palindrome:\n largest_palindrome = output\n return largest_palindrome", "def is_palindrome(n):\n v = []\n while n > 0:\n v.append(n % 10)\n n //= 10\n for i in range(len(v)//2):\n if v[i] != v[len(v)-i-1]:\n return False\n return True", "def palindrome_permutation(s):\n char_dict = {}\n for i in s:\n if i in char_dict:\n char_dict[i] += 1\n else:\n char_dict[i] = 1\n numOdd = 0\n for key in char_dict:\n if key != ' ':\n if char_dict[key] % 2 == 1:\n numOdd += 1\n if numOdd < 2:\n print_permutation(char_dict)\n return True\n else:\n return False", "def match(list_string):\n assert type(list_string)==list\n for i in list_string:\n assert type(i)==str\n assert i.isalpha()\n #Loops through all the possible substrings of the list of words to find the word pairs that are palindromes.\n my_match = []\n for i in range(0,len(list_string)):\n for j in range(0,len(list_string)):\n if i!=j:\n a = list_string[i]\n b = list_string[j]\n c = a+b\n d = b+a\n if c==c[::-1]:\n if (i,j) not in my_match:\n my_match.append((i,j))\n elif d==d[::-1]:\n if (j,i) not in my_match:\n my_match.append((j,i))\n return my_match", "def is_palindrome_permutation(string):\n\n letter_to_count = dict()\n\n for letter in string:\n letter_to_count[letter] = letter_to_count.get(letter, 0) + 1\n\n residual = 0\n for count in letter_to_count.values():\n residual += count % 2\n\n # there are can be a single letter with an odd character count when the palindrome is of odd length\n return residual <= 1", "def is_palindrome(sub):\n for i in range(len(sub)):\n if sub[i] != sub[len(sub) - i - 1]:\n return False\n return True" ]
[ "0.7363634", "0.7076098", "0.70694715", "0.6878793", "0.67712194", "0.67707276", "0.67490494", "0.65257967", "0.6466228", "0.63992494", "0.63456815", "0.633291", "0.6321037", "0.62712246", "0.6258766", "0.6231323", "0.6217285", "0.62059194", "0.6202977", "0.6194959", "0.6181608", "0.61775905", "0.61702603", "0.6162088", "0.61519253", "0.6142727", "0.6088312", "0.607948", "0.6078674", "0.606602" ]
0.78059846
0
Convert a dictionary to an earth engine feature server side
def dict_to_feature(d): f = ee.Feature(None,ee.Dictionary(d)) return f
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _preprocess(self, feature_dict):\n return feature_dict", "def from_dict(cls, dikt) -> 'Features':\n return util.deserialize_model(dikt, cls)", "def dict_to_feature(feature_dict, keys, max_value=None):\n feature = []\n for key, val in feature_dict.items(): # First level\n if key not in keys:\n continue\n if val is None or val == \"auto\" or key == \"autotuning\" or val == \"\":\n continue\n if isinstance(val, dict):\n feature.append(dict_to_feature(val, max_value))\n else:\n feature.append(float(val))\n\n # normalization, should not matter in tree models\n if max_value is not None:\n norm_feature = []\n for f, mv in zip(feature, max_value):\n norm_feature.append(f / mv)\n feature = norm_feature\n\n return feature", "def to_features(self):\n to_return = dict()\n\n to_return['bias'] = 1.0\n to_return['user:' + self.user] = 1.0\n to_return['format:' + self.format] = 1.0\n to_return['token:' + self.token.lower()] = 1.0\n\n to_return['part_of_speech:' + self.part_of_speech] = 1.0\n for morphological_feature in self.morphological_features:\n to_return['morphological_feature:' + morphological_feature] = 1.0\n to_return['dependency_label:' + self.dependency_label] = 1.0\n\n return to_return", "def map_to_app_features(self, app):\n app['features'] = []\n for form_feature in self.features:\n feature = {}\n if form_feature.feature_name.data:\n feature['name'] = form_feature.feature_name.data\n if form_feature.feature_version.data:\n feature['version'] = form_feature.feature_version.data\n if form_feature.feature_provisioner.data:\n feature['provisioner'] = form_feature.feature_provisioner.data\n if form_feature.feature_parameters.data:\n json_ob = json.loads(form_feature.feature_parameters.data)\n if json_ob:\n feature['parameters'] = json_ob\n feature['version'] = ''\n else:\n feature['parameters'] = {}\n if feature:\n app['features'].append(feature)", "def add_engineered(features):\n features[\"londiff\"] = features[\"dropofflon\"] - features[\"pickuplon\"]\n features[\"latdiff\"] = features[\"dropofflat\"] - features[\"pickuplat\"]\n features[\"euclidean\"] = tf.math.sqrt(\n features[\"londiff\"]**2 + features[\"latdiff\"]**2)\n return features", "def dict_to_example(dictionary):\n features = {}\n for k, v in six.iteritems(dictionary):\n features[k] = tf.train.Feature(int64_list=tf.train.Int64List(value=v))\n return tf.train.Example(features=tf.train.Features(feature=features))", "def sites_geojson():\n\n with Config() as config:\n with db.Connection(config) as con:\n features = con.features()\n features = list(features)\n return flask.jsonify(features)", "def __call__(self, *args, **kwargs):\n self.features = dict((k, v()) for k, v in self.features.items())\n return self.features", "def process_features(\n config,\n raw_features: Union[tf.train.Example, features.FeatureDict],\n random_seed: int) -> features.FeatureDict:\n if isinstance(raw_features, dict):\n return features.np_example_to_features(\n np_example=raw_features,\n config=config,\n random_seed=random_seed)\n else:\n return features.tf_example_to_features(\n tf_example=raw_features,\n config=config,\n random_seed=random_seed)", "def process_feature_file(filename: str) -> Dict[str, Any]:\n feature = json.loads(open(filename).read())\n template = feature['query']\n name = feature['name']\n params = feature['params']\n feature_spec = {\n 'name': name,\n 'template': template,\n 'params': params\n }\n return feature_spec", "def GEOJsonToEWKT(dict): \n if '__GEOSGeometry__' in dict: # using class hint catch a GEOSGeometry definition \n return dict['__GEOSGeometry__'][1][0]\n \n return dict", "def save_feature(self):\n feature_dict = {\n 'name': self.name,\n 'preActionDes': self.pre_action_des,\n 'inActionDes': self.in_action_des,\n 'postActionDes': self.post_action_des,\n 'actionable': self.actionable,\n 'usable': self.usable,\n 'state': self.state,\n 'featureId': self.feature_id\n }\n return feature_dict", "def enhance_metadata(metadata, features='all'):\n\n # available options\n ortographic_features = ['w_length','n_vowels','n_consonants']\n lexical_features = ['uni_freq', 'bi_freq', 'func_word','count']\n position_features = ['position','position_end','is_first_word','is_last_word']\n\n # make list of features\n if features == 'all': features = ortographic_features +lexical_features + position_features \n\n # use ws clean to lower case\n words = [word.lower() for word in metadata['word'].values]\n\n # itereate features and fill metadata\n for feature in features:\n # ORTOGRAPHIC ##############################\n if feature == 'w_length': \n metadata[feature] = w_length(words)\n if feature == 'n_consonants':\n metadata[feature] = n_consonants(words)\n if feature == 'n_vowels':\n metadata[feature] = n_vowels(words)\n\n # LEXICAL ###################################\n if feature == 'uni_freq':\n metadata[feature] = unigram(words)\n if feature == 'bi_freq':\n metadata[feature] = bigram(words)\n if feature == 'func_word':\n metadata[feature] = function_word(words)\n if feature == 'count':\n metadata[feature] = count(words)\n\n # POSITION ###################################\n if feature == 'position':\n metadata[feature] = position(words)\n if feature == 'position_end':\n metadata[feature] = position_end(words)\n if feature == 'is_first_word':\n metadata[feature] = first_word(words)\n if feature == 'is_last_word':\n metadata[feature] = last_word(words)\n\n return metadata", "def _interactive_input_tensor_to_features_dict(feature_map, hparams):\n inputs = tf.convert_to_tensor(feature_map[\"inputs\"])\n\n x = inputs\n # Remove the batch dimension.\n num_samples = x[0]\n length = x[2]\n x = tf.slice(x, [3], tf.to_int32([length]))\n x = tf.reshape(x, [1, -1, 1, 1])\n # Transform into a batch of size num_samples to get that many random\n # decodes.\n x = tf.tile(x, tf.to_int32([num_samples, 1, 1, 1]))\n\n p_hparams = hparams.problem_hparams\n input_space_id = tf.constant(p_hparams.input_space_id)\n target_space_id = tf.constant(p_hparams.target_space_id)\n\n features = {}\n features[\"input_space_id\"] = input_space_id\n features[\"target_space_id\"] = target_space_id\n features[\"decode_length\"] = inputs[1]\n features[\"inputs\"] = x\n return features", "def convert_series_to_feature(series: Types.SeriesObj,) -> Dict[str, tf.train.Feature]:\n try:\n image, metadata = series\n dicom_id = f\"{metadata.get('Study Instance UID', 'unknown_study')}/{metadata.get('Series Instance UID', 'unknown_series')}/\"\n\n if metadata.get(\"flags\") and metadata.get(\"time\"):\n name = f\"time{metadata.get('time')[1:]}/{'_'.join(metadata.get('flags'))}/\"\n else:\n name = dicom_id\n return dict(\n [\n (f\"{name}{k}\", v)\n for (k, v) in {\n \"image\": floatList_feature(image.flatten().tolist()),\n \"dx\": float_feature(metadata.get(\"Pixel Spacing\")[0]),\n \"dy\": float_feature(metadata.get(\"Pixel Spacing\")[1]),\n \"dz\": float_feature(metadata.get(\"Spacing Between Slices\")),\n \"is_seg\": int64_feature(int(metadata.get(\"Modality\") == \"SEG\")),\n \"right\": int64_feature(int(metadata.get(\"Laterality\") == \"R\")),\n \"shape\": int64List_feature(image.shape),\n \"dicom_id\": bytes_feature(dicom_id.encode()),\n \"Image Position (Patient)\": floatList_feature(metadata.get(\"Image Position (Patient)\")),\n \"Image Orientation (Patient)\": floatList_feature(metadata.get(\"Image Orientation (Patient)\")),\n \"z_bound\": floatList_feature(metadata.get(\"slice_z\")),\n }.items()\n ]\n )\n except Exception as e:\n _logger.error(\n f\"Error making Series Features. Series meta: {metadata}. Error: {str(e)}\"\n )\n return {}", "def convert_study_to_feature(study: List[Types.SeriesObj]) -> List[Dict[str, tf.train.Feature]]:\n return [convert_series_to_feature(s) for s in study]", "def feature_dict(sent, i):\n # WORK HERE!!\n return {}", "def _make_feature(self, val, app, reg):\n return {\"type\": \"Feature\",\n \"properties\": self._get_properties(val.set_app(app).set_region(reg)),\n \"geometry\": self._get_geometry(val.geom)\n }", "def post_feature_set(\n feature_set: Dict[str, Any],\n model_name: str,\n es_host: str\n) -> None:\n host = f'http://{es_host}'\n url = f'_ltr/_featureset/{model_name}'\n url = urljoin(host, url)\n header = {'Content-Type': 'application/json'}\n resp = requests.post(url, data=json.dumps(feature_set), headers=header)\n if not resp.ok:\n raise Exception(resp.content)", "def to_feature_dict(self):\n return {feature:self.get_feature(feature) for feature in self._FEATURES}", "def expand_feature_meta(feat_meta):\n if type(feat_meta) != dict:\n if type(feat_meta).__name__ == 'Feature':\n feat_meta = feat_meta.getInfo()\n else:\n raise RuntimeError('Unsupported EE object')\n\n out_str = ''\n for k, y in feat_meta.items():\n if k == 'geometry':\n for _k, _y in y.items():\n out_str += '{}: {}\\n'.format(str(_k), str(_y))\n\n elif k == 'properties':\n for _k, _y in y.items():\n out_str += 'Property: {} : {}\\n'.format(_k, str(_y))\n else:\n out_str += '{} : {}\\n'.format(str(k), str(y))\n return out_str", "def test_convert_features(convert_features_parameters):\n test_input = convert_features_parameters[0]\n expected_output = convert_features_parameters[1]\n assert geojson2fromto.convert(test_input) == expected_output", "def wrap_feature(self, data):\n feature = {\n 'type': 'Feature',\n 'geometry': data.pop(self.__geometry_field_name__, None)\n }\n feature['properties'] = data\n return feature", "def _parse_tensor_or_dict(features):\n if isinstance(features, dict):\n keys = sorted(features.keys())\n with ops.colocate_with(features[keys[0]]):\n features = array_ops.concat([features[k] for k in keys], 1)\n return features", "def encode_features(item):\n item['is_male'] = int(item['Sex'] == 'male')\n del item['Name']\n del item['Sex']\n # del item['Fare']\n del item['Cabin']\n del item['Ticket']\n\n # One-hot encoding: Embarked\n item['embarked_s'] = int(item['Embarked'] == 'S')\n item['embarked_c'] = int(item['Embarked'] == 'C')\n item['embarked_q'] = int(item['Embarked'] == 'Q')\n del item['Embarked']\n\n # One-hot encoding: Title\n item['title_mr'] = int(item['Title'] == 'Mr')\n item['title_miss'] = int(item['Title'] == 'Miss')\n item['title_mrs'] = int(item['Title'] == 'Mrs')\n item['title_master'] = int(item['Title'] == 'Master')\n item['title_other'] = 1 - (item['title_mr'] +\n item['title_miss'] +\n item['title_mrs'] +\n item['title_master'])\n del item['Title']\n return item", "def convert_patient_to_feature(\n patient_data: Dict[str, object]\n) -> Dict[str, tf.train.Feature]:\n # TODO: Maybe prefix with \"patient/\" for post processing ease.\n return {\n \"patient_id\": int64_feature(patient_data.get(\"patient_id\")),\n \"age\": float_feature(patient_data.get(\"demographic_metadata\").get(\"age\")),\n \"race\": int64_feature(patient_data.get(\"demographic_metadata\").get(\"race\")),\n \"ERpos\": int64_feature(patient_data.get(\"clinical\").get(\"ERpos\")),\n \"Pgpos\": int64_feature(patient_data.get(\"clinical\").get(\"Pgpos\")),\n \"HRpos\": int64_feature(patient_data.get(\"clinical\").get(\"HRpos\")),\n \"HER_two_status\": int64_feature(\n patient_data.get(\"clinical\").get(\"HER_two_status\")\n ),\n \"three_level_HER\": int64_feature(\n patient_data.get(\"clinical\").get(\"three_level_HER\")\n ),\n \"Bilateral\": int64_feature(patient_data.get(\"clinical\").get(\"Bilateral\")),\n \"Laterality\": int64_feature(patient_data.get(\"clinical\").get(\"Laterality\")),\n # Outcomes\n \"Sstat\": int64_feature(patient_data.get(\"outcome\").get(\"Sstat\")),\n \"survival_duration\": int64_feature(\n patient_data.get(\"outcome\").get(\"survival_duration\")\n ),\n \"rfs_ind\": int64_feature(patient_data.get(\"outcome\").get(\"rfs_ind\")),\n \"rfs_duration\": int64_feature(patient_data.get(\"outcome\").get(\"rfs_duration\")),\n \"pCR\": int64_feature(patient_data.get(\"outcome\").get(\"pCR\")),\n \"RCB\": int64_feature(patient_data.get(\"outcome\").get(\"RCB\")),\n \"LD\": int64List_feature(patient_data.get(\"LD\")),\n }", "def from_dict(cls, fs_dict):\n\n feature_set_proto = json_format.ParseDict(\n fs_dict, FeatureSetProto(), ignore_unknown_fields=True\n )\n return cls.from_proto(feature_set_proto)", "def create_feature_map(string, features):\n fmap = {}\n vec = create_vector(string)\n\n for ngram in features:\n if ngram in vec:\n fmap[ngram] = vec[ngram]\n\n return fmap", "def BoltMotionObjToFeatureObj(all_bolt_data, electrode_pca_dict):\n\n # Store in feature class object\n all_features_obj_dict = dict();\n\n for motion_name in all_bolt_data:\n trial_list = all_bolt_data.get(motion_name)\n print motion_name\n\n feature_list = list()\n # For all objects\n for trial in trial_list:\n\n bolt_feature_obj = extract_features.extract_features(trial, electrode_pca_dict[motion_name])\n\n feature_list.append(bolt_feature_obj)\n\n # Store all of the objects away\n all_features_obj_dict[motion_name] = feature_list\n\n return all_features_obj_dict" ]
[ "0.6012967", "0.5905541", "0.58329165", "0.57612747", "0.5738102", "0.5726723", "0.5687374", "0.56813467", "0.5624164", "0.56068397", "0.5599818", "0.5524402", "0.548304", "0.5466543", "0.5398618", "0.53919345", "0.53797644", "0.5355627", "0.53457856", "0.5333529", "0.53182507", "0.53055334", "0.5278664", "0.5269773", "0.5265213", "0.52626985", "0.5256953", "0.52399284", "0.52239853", "0.5206245" ]
0.78169143
0
Convert volume to flux
def volumeToFlux(volume_image): image = ee.Image(volume_image) flux_image = image.divide(ee.Image(AREA_PFAF6_30MIN)).multiply(1e6).copyProperties(image) flux_image = flux_image.set("units","m") flux_image = flux_image.set("convertedToFlux", 1) return flux_image
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convertflux(self, *args, **kwargs):\n return _image.image_convertflux(self, *args, **kwargs)", "def flux(source, freq=0.0, deltafreq=0.0, daysback=0.0) :\n x = queryFlux(source,freq,deltafreq,daysback)\n return x.flux", "def flux(self, x):\n return self.cal_spec.get_flux(self(x))", "def flux(self, q):\n q1, q2 = q\n if q1 > 0:\n u = q2/q1\n else:\n u = 0\n return np.array([q1*u, q1 * u*u + 0.5*9.81 * q1*q1])", "def treat_volume(volume):\n labels = measure.label(volume.dataobj, background=0, connectivity=2)\n new_volume = np.asarray(volume.dataobj)\n new_volume[labels > 1] = 0\n new_volume = nib.Nifti1Image(new_volume, volume.affine)\n return new_volume", "def normalize(volume):\n\n MIN_BOUND = 0\n MAX_BOUND = 256.0\n volume = (volume - MIN_BOUND) /(MAX_BOUND - MIN_BOUND)\n volume[volume > 1] = 1 #Clip everything larger than 1 and 0\n volume[volume < 0] = 0\n volume = (volume*255).astype('uint8')\n\n return volume", "def normalize(volume):\n\n MIN_BOUND = 0\n MAX_BOUND = 256.0\n volume = (volume - MIN_BOUND) /(MAX_BOUND - MIN_BOUND)\n volume[volume > 1] = 1 #Clip everything larger than 1 and 0\n volume[volume < 0] = 0\n volume = (volume*255).astype('uint8')\n\n return volume", "def to_volume(self, verbose=True):\n images = self.load_all_dicom_images(verbose=verbose)\n\n volume = np.stack(\n [\n x.pixel_array * x.RescaleSlope + x.RescaleIntercept\n for x in images\n ],\n axis=-1,\n ).astype(np.int16)\n return volume", "def test_flux_conversion_vega(in_q, out_u, ans):\n result = units.convert_flux(_wave, in_q, out_u, vegaspec=_vspec)\n assert_quantity_allclose(result, ans, rtol=1e-2)\n\n # Scalar\n i = 0\n result = units.convert_flux(_wave[i], in_q[i], out_u, vegaspec=_vspec)\n assert_quantity_allclose(result, ans[i], rtol=1e-2)", "def flux(self, u):\n flu = np.zeros((3,2), dtype=np.float64)\n flu[0,0] = u[1]\n flu[1,0] = u[0] * (u[1]/u[0])**2 + 0.5 * 9.81*u[0]**2\n flu[2,0] = u[1] * u[2]/u[0] #FIXME attenzione che c'è il punto controllare se sono scalari o vettori'\n flu[0,1] = u[2]\n flu[1,1] = u[2] * u[1]/u[0]\n flu[2,1] = u[0] * (u[2]/u[0])**2 + 0.5 * 9.81*u[0]**2\n return flu", "def mag_to_flux(mag, mag_zp):\n return 10 ** (-0.4 * (mag - mag_zp))", "def convert_flux(nu, flux, target_unit):\n\n curr_unit = flux.unit\n\n if curr_unit.is_equivalent(u.erg / u.s):\n flux = flux / sed.distance ** 2\n elif curr_unit.is_equivalent(u.Jy):\n flux = flux * nu\n elif not curr_unit.is_equivalent(u.erg / u.cm ** 2 / u.s):\n raise Exception(\"Don't know how to convert {0} to ergs/cm^2/s\" % (flux.unit))\n\n # Convert to requested unit\n\n if target_unit.is_equivalent(u.erg / u.s):\n flux = flux * sed.distance ** 2\n elif target_unit.is_equivalent(u.Jy):\n flux = flux / nu\n elif not target_unit.is_equivalent(u.erg / u.cm ** 2 / u.s):\n raise Exception(\"Don't know how to convert %s to %s\" % (curr_unit, unit_flux))\n\n return flux.to(target_unit)", "def Vega_zero_flux(self):\n with Vega() as v:\n f_vega = self.get_flux(v.wavelength, v.flux, axis=-1)\n return f_vega", "def normalize(volume):\n max = np.amax(volume)\n if max == 0:#Fixes dividing by 0 error if nothing in the volume\n return volume.astype(np.uint8)\n\n normalized = volume * (255.0 / max)\n normalized = np.round(normalized).astype(np.uint8)\n return normalized", "def normalize(wav, flux):\n return flux / flux.max() # maximum flux = 1\n\n # flux_norm = flux[wav>wav_norm][0]\n # return flux / flux_norm", "def normalize_flux(self):\n fmax = 0\n fmin = 1e99\n for n in self.graph:\n if n.flux > fmax:\n fmax = n.flux\n if n.flux < fmin:\n fmin = n.flux\n for n in self.graph:\n n.flux = (n.flux-fmin)/(fmax-fmin)", "def flux():\n delta = 0.01 # film thickness, [dm]\n c = pre * 10 ** 2 / (R * tem) # total concentration calculated by ideal gas equation, in [mol/L]\n D12 = 0.001626528 / pre # HCl diffusion in Air, [dm2/s] @296K\n D13 = 3e-7 # HCl gas diffusion in water, [dm2/s] @296K\n D23 = 1.5e-7 # CH4 gas diffusion in water, [dm2/s] @296K\n N1 = ((x1_bar * x2d * D23) / (x2_bar * delta * D13) - x1_bar / delta) / \\\n (x2_bar / (D12 * c) + x3_bar / (D13 * c) + D23 * x1_bar / (D12 * D13 * c))\n # print 'Flux of HCl into water', abs(N1), [mol/(dm2*sec)]\n return N1", "def flux_hack(self):\r\n return self.planes[1].galaxies[0].light_profiles[0].flux", "def normalize_volume(vol_data):\n h, w, d = np.shape(vol_data)\n mean = np.sum(vol_data)/(h*w*d)\n std = np.std(vol_data)\n return (vol_data - mean) / std", "def flux(u, kappa):\n V = u.function_space()\n mesh = V.mesh()\n degree = V.ufl_element().degree()\n W = VectorFunctionSpace(mesh, 'P', degree)\n flux_u = project(-kappa*grad(u), W)\n flux_u.rename('flux(u)', 'continuous flux field')\n return flux_u", "def toa_incoming_shortwave_flux(srad0, srad0u):\n return srad0 - srad0u", "def magtoflux(_mag, _id):\n return math.pow(10, -0.4*(_mag + VegaToAB[_id] - 8.9))", "def convert_volume(self, event):\n try:\n #Compare other unit to one unit(cubic decimeters)\n current_value, current_unit = float(\"0\" + str(self.v.get())), self.dropdown.get()\n unit_comp = {\"acre foot\": 1233481.837548, \"barrels\": 158.987295, \"bushels(UK)\": 36.36872, \"bushels(US)\": 35.23907, \"centiliters\": 0.01, \"cubic centimeters\": 0.001, \"cubic decameters\": 1000000.0, \"cubic decimeters\": 1.0, \"cubic feet\": 28.316847, \"cubic inches\": 0.016387, \"cubic kilometers\": 1000000000000.0, \"cubic meters\": 1000.0, \"cubic mile\": 4168181825000.0, \"cubic millimeters\": 1e-06, \"cubic yards\": 764.554858, \"cups\": 0.236588, \"deciliters\": 0.1, \"dram\": 0.003697, \"dram(imperial)\": 0.003552, \"fluid ounces(US)\": 0.029574, \"fluid ounces(imperial)\": 0.028413, \"gallons(US,dry)\": 4.404884, \"gallons(US,liquid)\": 3.785412, \"gallons(imperial)\": 4.54609, \"gill(US)\": 0.118294, \"gill(imperial)\": 0.142065, \"liters\": 1.0, \"liters(1901-1964)\": 1.000028, \"microliters\": 1e-06, \"milliliters\": 0.001, \"nanoliters\": 1e-09, \"picoliters\": 1e-12, \"pints(US,dry)\": 0.55061, \"pints(US,liquid)\": 0.473176, \"pints(imperial)\": 0.568261, \"quarts(UK,dry)\": 1.101221, \"quarts(US,liquid)\": 0.946353, \"quarts(imperial)\": 1.136523, \"table spoons\": 0.014787, \"tea spoons\": 0.004929}\n value_comp, printer = current_value * unit_comp[current_unit], \"\"\n unit_list = sorted(unit_comp.keys())\n unit_list.remove(current_unit)\n for unit in unit_list:\n printer += \"To %s \" % unit + \" \" * (max([len(i) for i in unit_list]) - len(unit)) + str(value_comp / unit_comp[unit]) + [\"\", \"\\n\"][unit_list[-1] != unit]\n except ValueError: #In case user enter the other type of value, not Int or Float\n printer = \"Value is invalid.\"\n self.print_text(printer)", "def volume(self):\n return self.volume_array", "def getScalarFlux(self):\n totScalarFlux = []\n for cell in self.cells:\n totScalarFlux.append(cell.getTotScalarFlux())\n totScalarFlux = np.array(totScalarFlux)\n #return totScalarFlux / np.sum(totScalarFlux) # norm flux to 1.\n return totScalarFlux", "def vol_uc(x):\r\n return sum([vol(m) for m in metamer(x)])", "def itensity_normalize_one_volume(volume):\n pixels = volume[volume > 0]\n mean = pixels.mean()\n std = pixels.std()\n out = (volume - mean)/std\n out_random = np.random.normal(0, 1, size = volume.shape)\n out[volume == 0] = out_random[volume == 0]\n return out", "def cps_to_flux(self, counts):\n return counts * 10**(-(2.406+self.zp) / 2.5 ) / (self.lbda**2)", "def floor_volume(volume):\n return ul(math.floor(volume.to('microliter').magnitude))", "def volume(self, volume: float | None, from_unit: str) -> float:\n if not isinstance(volume, Number):\n raise TypeError(f\"{volume!s} is not a numeric value.\")\n\n # type ignore: https://github.com/python/mypy/issues/7207\n return VolumeConverter.convert( # type: ignore[unreachable]\n volume, from_unit, self.volume_unit\n )" ]
[ "0.6863947", "0.65460765", "0.6410462", "0.63754797", "0.6337502", "0.63291806", "0.63291806", "0.62562454", "0.61814946", "0.61583227", "0.60439867", "0.60325944", "0.592126", "0.5912541", "0.5898983", "0.5894703", "0.58924603", "0.58740014", "0.5857131", "0.58522546", "0.58230066", "0.5814723", "0.5810996", "0.5797539", "0.5693262", "0.56831056", "0.5676475", "0.56579185", "0.56464136", "0.56361353" ]
0.8054486
0
filters an imagecollection based on year and month
def filter_ic(ic,year,month): ic_filtered = (ic.filter(ee.Filter.eq("month",month)) .filter(ee.Filter.eq("year",year))) image = ee.Image(ic_filtered.first()) return(image)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _filter_temporal(self, start_date: str, end_date: str) -> 'ImageCollection':\n process_id = 'filter_daterange'\n args = {\n 'imagery': self.graph,\n 'extent': [start_date, end_date]\n }\n\n return self.graph_add_process(process_id, args)", "def filter_month(data, month, year):\n input_month = str(month).zfill(2)\n input_year = str(year)\n\n month_data = []\n\n for row in data:\n date_as_string = row['inspection_date'][:10]\n month, day, year = date_as_string.split('/')\n if input_month == month and input_year == year:\n month_data.append(row)\n\n return month_data", "def get_images(self,\n collection,\n bounds=None,\n year=None,\n start_date=None,\n end_date=None,\n start_julian=1,\n end_julian=365,\n index_list=None,\n scale_factor=None,\n **kwargs):\n coll = ee.ImageCollection(collection)\n\n if year is not None:\n start_date = '{}-01-01'.format(str(year))\n end_date = '{}-12-31'.format(str(year))\n\n if bounds is not None:\n coll = coll.filterBounds(bounds)\n if (start_date is not None) and (end_date is not None):\n coll = coll.filterDate(start_date, end_date)\n\n coll = coll.filter(ee.Filter.calendarRange(start_julian, end_julian))\n\n if len(kwargs) > 0:\n for key, value in kwargs.items():\n if key == 'map':\n if value == 'add_indices':\n\n if index_list is not None:\n self.index_list = index_list\n\n if scale_factor is not None:\n self.scale_factor = scale_factor\n\n func = getattr(self, value, None)\n\n if func is not None:\n coll = coll.map(func)\n else:\n warnings.warn('The function {} is not implemented'.format(str(key)))\n return coll", "def applyFrequencyFilter(image, yearBandNames, classDictionary, filterParams): \n #Grab land cover classes as a list of strings\n lc_classes = classDictionary.keys().getInfo()\n \n #Get binary images of the land cover classifications for the current year\n binary_class_images = npv.convertClassificationsToBinaryImages(image, classDictionary)\n \n #Get the frequency of each class through the years by reducing the image collection to an image using the sum reducer\n class_frequency = binary_class_images.reduce(ee.Reducer.sum().unweighted()).rename(lc_classes)\n \n #Get an image that represents the mode of the land cover classes in each pixel\n mode_image = image.reduce(ee.Reducer.mode())\n \n #Define an image to add bands with frequency filter applied\n out_img = ee.Image()\n \n #Loop through years\n for yearBand in yearBandNames:\n #Select the target year from the image\n yearImage = image.select(yearBand)\n \n #Loop through land cover classes in filterParams\n for lc_class in lc_classes:\n #Get the minimum occurance allowed in that land cover class\n min_occurance = filterParams.get(lc_class)\n \n #Find if the land cover class had less than the number of min_occurances in each pixel\n change_class = class_frequency.select(lc_class).lt(ee.Image.constant(min_occurance))\n \n #If change_class==1, then replace that pixel with the mode of all the years in that pixel\n #This filter is only applied to pixels of this land cover class\n #First mask yearImage to pixels of this land cover class, then get the union of pixels where change_class==1,\n #if both conditions are true, then the pixel is replaced with the mode\n yearImage = yearImage.where(yearImage.eq(ee.Number(classDictionary.get(lc_class))).bitwiseAnd(change_class),mode_image)\n #Rename yearImage to bandName\n yearImage = yearImage.rename(yearBand)\n #Append to output image\n out_img = out_img.addBands(yearImage)\n \n return out_img", "def test_year_filtering(self):\n # Get a valid date\n entry = Entry.objects.get(id=1)\n params = {\"year\": entry.publication_date.year}\n\n self._test_filtering(**params)", "def filter_irrigated(asset, yr, region, filter_type='irrigated', addl_yr=None):\n filt_fc = None\n\n # filter out any weird geometries\n plots = ee.FeatureCollection(asset)\n plots = plots.map(lambda x: x.set('geo_type', x.geometry().type()))\n plots = plots.filter(ee.Filter.eq('geo_type', 'Polygon'))\n\n roi = ee.FeatureCollection(region)\n if filter_type == 'irrigated':\n\n summer_s, late_summer_e = '{}-05-01'.format(yr), '{}-07-15'.format(yr)\n late_summer_s_, summer_e = '{}-07-01'.format(yr), '{}-10-31'.format(yr)\n\n lsSR_masked = landsat_masked(yr, roi)\n\n early_nd = ee.Image(lsSR_masked.filterDate(summer_s, late_summer_e).map(\n lambda x: x.normalizedDifference(['B5', 'B4'])).max()).rename('nd')\n early_nd_max = early_nd.select('nd').reduce(ee.Reducer.intervalMean(0.0, 15.0))\n early_int_mean = early_nd_max.reduceRegions(collection=plots,\n reducer=ee.Reducer.median(),\n scale=30.0)\n early_int_mean = early_int_mean.select('median')\n\n late_nd = ee.Image(lsSR_masked.filterDate(late_summer_s_, summer_e).map(\n lambda x: x.normalizedDifference(['B5', 'B4'])).max()).rename('nd_1')\n late_nd_max = late_nd.select('nd_1').reduce(ee.Reducer.intervalMean(0.0, 15.0))\n\n combo = late_nd_max.reduceRegions(collection=early_int_mean,\n reducer=ee.Reducer.mean(),\n scale=30.0)\n\n filt_fc = combo # .filter(ee.Filter.Or(ee.Filter.gt('median', 0.9), ee.Filter.gt('mean', 0.8)))\n desc = '{}_{}_irr'.format(os.path.basename(region), yr)\n\n elif filter_type == 'dryland':\n\n summer_s, late_summer_e = '{}-07-01'.format(yr), '{}-10-31'.format(yr)\n late_summer_s_, late_summer_e_ = '{}-07-01'.format(addl_yr), '{}-10-31'.format(addl_yr)\n\n lsSR_masked = landsat_masked(yr, roi)\n early_nd = ee.Image(lsSR_masked.filterDate(summer_s, late_summer_e).map(\n lambda x: x.normalizedDifference(['B5', 'B4'])).max()).rename('nd')\n early_nd_max = early_nd.select('nd').reduce(ee.Reducer.intervalMean(0.0, 15.0))\n early_int_mean = early_nd_max.reduceRegions(collection=plots,\n reducer=ee.Reducer.mean(),\n scale=30.0)\n early_int_mean = early_int_mean.select(['mean', 'MGRS_TILE', 'system:index', 'popper'],\n ['nd_e', 'MGRS_TILE', 'system:index', 'popper'])\n\n lsSR_masked = landsat_masked(addl_yr, roi)\n late_nd = ee.Image(lsSR_masked.filterDate(late_summer_s_, late_summer_e_).map(\n lambda x: x.normalizedDifference(['B5', 'B4'])).max()).rename('nd_1')\n late_nd_max = late_nd.select('nd_1').reduce(ee.Reducer.intervalMean(0.0, 15.0))\n\n combo = late_nd_max.reduceRegions(collection=early_int_mean,\n reducer=ee.Reducer.mean(),\n scale=30.0)\n\n filt_fc = combo.filter(ee.Filter.Or(ee.Filter.lt('nd_e', 0.7), ee.Filter.lt('mean', 0.7)))\n desc = '{}_dry'.format(os.path.basename(region))\n\n else:\n raise NotImplementedError('must choose from filter_low or filter_high')\n\n task = ee.batch.Export.table.toCloudStorage(filt_fc,\n description=desc,\n bucket='wudr',\n fileFormat='SHP')\n print(yr, filter_type)\n task.start()", "def date_filter(frame, date_column, year):\n frame[date_column] = pd.to_datetime(frame[date_column])\n frame = frame[frame[date_column] > pd.Timestamp(year, 1, 1)]\n return frame", "def test_collection_author_year_filtering(self):\n # Create a collection\n entries = Entry.objects.filter(id__in=(1, 5, 10, 15))\n collection = CollectionFactory(entries=entries)\n entry = Entry.objects.get(id=1)\n\n # Get a valid collection\n params = {\n \"collection\": collection.id,\n \"author\": entry.first_author.id,\n \"year\": entry.publication_date.year,\n }\n self._test_filtering(**params)", "def filter_daterange(self, imagery, extent) -> 'ProcessGraph':\n\n graph = {\n 'process_id': 'filter_daterange',\n 'imagery': imagery.graph,\n 'extent': extent\n }\n\n imagery.graph = graph\n\n return imagery", "def applyForwardNoDataFilter(image, bandNames):\n #Get a list of band names from year(1) through the last year\n bandNamesEE = ee.List(bandNames[1:])\n \n #Define forwards filter\n #In first iteration, bandName=bandNames[1] and previousImage is image.select(bandNames[0]), or the classifications for the first year\n #currentImage = image.select(bandNames[1]), the image for the second year\n #previousImage = image.select(bandNames[0]), the first year\n #Find where the second year has missing data, replace those values with the values of the first year\n #Append previousImage to currentImage, so now currentImage is a two band image, with the first band being the second year with the gap fill\n #and the second band is the first years classification\n #The iteration continues, now with followingImage.select[0] being the second year with the gap fill applied, and bandName is the third year\n def forwardNoDataFilter(bandName, previousImage):\n currentImage = image.select(ee.String(bandName))\n previousImage = ee.Image(previousImage)\n currentImage = currentImage.unmask(previousImage.select([0]))\n return currentImage.addBands(previousImage)\n \n #Iterate through all the years, starting with the first year's classification\n filtered = bandNamesEE.iterate(forwardNoDataFilter,ee.Image(image.select(bandNames[0])))\n filtered = ee.Image(filtered)\n return filtered.select(bandNames)", "def applyWindow4years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-2):\n img_out = img_out.addBands(mask4(imagem, value,bandNames[(i-1):(i+3)]))\n img_out = img_out.addBands(imagem.select(bandNames[-2]))\n img_out = img_out.addBands(imagem.select(bandNames[-1]))\n return img_out", "def multiple_years(our_data, start, end):\n count = start\n album_list = []\n while count <= end:\n album_list.append(find_by_year(our_data,count))\n count += 1", "def get_immats_fromdwh(dwh_schema, table_name, connection, date_col=\"date_immat\"):\n \n query = f\"SELECT distinct date AS date_immat FROM {dwh_schema}.{table_name}\"\n df = pd.read_sql(query,con=connection) \n df[\"year_immat\"] = df[date_col].str[-4:]\n dc_years = dict()\n for year, nb_mois in df.year_immat.value_counts().iteritems():\n if nb_mois < 12:\n key_name = f\"immats/immats_{year}.csv\"\n dc_years[key_name] = df[df[\"year_immat\"]==year][date_col].tolist()\n return dc_years", "def applyWindow3years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-1):\n img_out = img_out.addBands(mask3(imagem, value,bandNames[(i-1):(i+2)]))\n img_out = img_out.addBands(imagem.select(bandNames[-1]))\n return img_out", "def filter_creation_date(groups, start, end):\n results = []\n for g in groups:\n created = datetime.fromtimestamp(g['creationTime'] / 1000.0)\n if created > end:\n continue\n if created > start:\n g['exportStart'] = created\n else:\n g['exportStart'] = start\n results.append(g)\n return results", "def _get_metadata(self): \n def add_dates(date_list, dates):\n \"\"\"\n Append dates to date_list which are not already within date_list.\n \n \"\"\"\n for date in dates:\n if date.strftime('%d-%b') not in date_list:\n date_list.append(date.strftime('%d-%b'))\n return date_list\n \n metadata = {'DATA_TYPE':'Observation Data'} \n \n self.cube_dates = []\n years = []\n \n for cube in self.cubelist:\n cube_metadata = self._get_obs_metadata(cube)\n \n self.cube_dates = add_dates(self.cube_dates, \n cube_metadata['DATES'])\n # Years are based on the earliest date.\n years.append(min(cube_metadata['DATES']).year)\n del cube_metadata['DATES']\n \n for key, val in cube_metadata.items():\n # Find unique metadata which has not already been added by \n # previous cubes. Years are the common one.\n current_vals = metadata.get(key)\n if current_vals is not None:\n for this_val in current_vals:\n if hasattr(this_val, '__iter__'):\n try: \n if numpy.array_equal(this_val, val):\n break\n except AttributeError:\n # If the array type is not comparable for \n # example array of strings.\n equal = True\n for this_item, item in zip(this_val, val):\n if this_item != item:\n equal = False\n break\n if equal:\n break\n else:\n if this_val == val:\n break\n metadata[key].append(val)\n else:\n metadata[key] = [val]\n \n bound_names = []\n # Tidy up lists of length 1.\n for key, val in metadata.items():\n if type(val) == list and len(val) == 1:\n metadata[key] = val[0]\n # Retrieve the exact bound names.\n if key[-7:] == '_BOUNDS':\n bound_names.append(key)\n \n metadata['YEARS'] = sorted(list(set(years)))\n metadata['DATES'] = self.cube_dates\n \n return self.MetaData(metadata, bound_names)", "def album_filter(query_params, query):\n table = Album.__table__\n col_name = table.c.release_date\n if query_params.get('start_year') is not None \\\n and query_params.get('end_year') is not None:\n filt_statement = and_(\n col_name >= date(int(query_params.get('start_year')), 1, 1),\n col_name <= date(int(query_params.get('end_year')), 12, 31))\n query = query.filter(filt_statement)\n elif query_params.get('start_year') is not None:\n query = query.filter(\n col_name >= date(int(query_params.get('start_year')), 1, 1))\n elif query_params.get('end_year') is not None:\n query = query.filter(\n col_name <= date(int(query_params.get('end_year')), 12, 31))\n if query_params.get('num_tracks') is not None:\n query = query.filter(\n table.c.num_tracks == int(query_params.get('num_tracks')))\n if query_params.get('label') is not None:\n query = query.filter(table.c.label == str(query_params.get('label')))\n return query", "def plotOceanParcelsAccumulatedResults(input_data_folder, output_folder, start_year, end_year, dt=1):\n # Only for\n tot_days = (end_year-start_year)*365\n start_date = datetime.strptime(str(start_year),'%Y')\n\n open_files = []\n for c_day in np.arange(0, tot_days, dt):\n print(F\"------- {c_day}---------\")\n # Released months\n c_date = start_date + timedelta(days=int(c_day))\n months = (c_date.year - start_date.year)*12 + c_date.month - start_date.month\n\n # Iterate over all the files that should contribute to the image\n fig = plt.figure(figsize=(20,10))\n ax = plt.subplot(1, 1, 1, projection=ccrs.PlateCarree())\n for c_month in range(0, months + 1):\n c_file_year = (start_date + relativedelta(months=int(c_month))).year\n c_file_month = (start_date + relativedelta(months=int(c_month))).month\n skip_days = c_day - (c_date - datetime.strptime(F\"{c_file_year}-{c_file_month}\",'%Y-%m')).days\n\n if len(open_files) <= c_month:\n file_name = F\"TenYears_YesWinds_YesDiffusion_NoUnbeaching_{c_file_year}_{(c_file_month):02d}.nc\"\n print(F\"Reading new file: {file_name}\")\n open_files.append(Dataset(join(input_data_folder, file_name), \"r\", format=\"NETCDF4\"))\n\n c_time_step = c_day - skip_days\n # lats = open_files[c_month].variables['lat'][:,c_time_step]\n # lons = open_files[c_month].variables['lon'][:,c_time_step]\n ax.scatter(open_files[c_month].variables['lon'][:,c_time_step], open_files[c_month].variables['lat'][:,c_time_step], color='c', s=1)\n\n title = F\"{start_date.strftime('%Y-%m-%d')} - {c_date.strftime('%Y-%m-%d')}\"\n ax.coastlines()\n ax.set_title(title, fontsize=30)\n\n # plt.show()\n plt.savefig(F\"{output_folder}/{start_date.strftime('%Y_%m')}_{c_day:04d}.png\")\n plt.close()", "def applyBackwardNoDataFilter(image, bandNames):\n #Get a list of band names to iterate over, from year(-2) through year(0)\n bandNamesEE = ee.List(bandNames[:-1]).reverse()\n \n #Define backwards filter\n #In first iteration, bandName=bandNames[-2] and followingImage is image.select(bandNames[-1]), or the classifications for the final year\n #currentImage = image.select(bandNames[-2]), the second to last year\n #followingImage = image.select(bandNames[-1]), the final year\n #Find where the second to last year has missing data, replace those values with the values of the following year\n #Append followingImage to currentImage, so now currentImage is a two band image, with the first band being the second to last year with the gap fill\n #and the second band is the final years classification\n #The iteration continues, now with followingImage.select[0] being the second to last year with the gap fill applied, and bandName is the third to last year\n def backwardNoDataFilter(bandName, followingImage):\n currentImage = image.select(ee.String(bandName))\n followingImage = ee.Image(followingImage)\n currentImage = currentImage.unmask(followingImage.select([0]))\n return currentImage.addBands(followingImage)\n \n #Apply backwards filter, starting with the final year and iterating through to year(0) \n filtered = bandNamesEE.iterate(backwardNoDataFilter,ee.Image(image.select(bandNames[-1])))\n #Re-order bands to be in chronological order\n filtered = ee.Image(filtered)\n return filtered.select(bandNames)", "def filter_images(self, images):\n status = self.day_or_night(images[0][1],\n self.gray_refs['day'][0],\n self.gray_refs['night'][0])\n print status\n exclusions = self.gray_refs[status]\n threshold = 0.7\n last_ref = None\n result = []\n\n for filename, gray_img, raw_img in images:\n skip = False\n if last_ref:\n dist = ssim(gray_img, exclusions[last_ref], multichannel=False)\n if dist > threshold:\n skip = True\n\n if not skip:\n for i, gray_ref in enumerate(exclusions):\n if i == last_ref:\n continue\n dist = ssim(gray_img, gray_ref, multichannel=False)\n if dist > threshold:\n skip = True\n last_ref = i\n break\n\n if not skip:\n if (time.time() - self.last_notify) > notify_thresh:\n send_alert('Alert! Motion detected near front door.')\n self.last_notify = time.time()\n result.append((filename, gray_img, raw_img))\n return result", "def applyWindow5years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-3):\n img_out = img_out.addBands(mask5(imagem, value,bandNames[(i-1):(i+4)]))\n img_out = img_out.addBands(imagem.select(bandNames[-3]))\n img_out = img_out.addBands(imagem.select(bandNames[-2]))\n img_out = img_out.addBands(imagem.select(bandNames[-1]))\n return img_out", "def get_filtered(self, collection, xmlFormat):\n\t\tstart = \"2012-05-01T00:00:00Z\"\n\t\tend = \"2012-05-20T00:00:00Z\"\n\t\tquery = '/text//annotationRecord/service/date/@modified:[%s TO %s]' % (start, end)\n\t\t\n\t\treturn {\n\t\t\t'q' : query,\n\t\t\t\"verb\": \"Search\",\n\t\t\t\"xmlFormat\": xmlFormat,\n\t\t\t\"ky\": collection,\n\t\t\t'sortDescending' : '/text//annotationRecord/service/date/@modified'\n\t\t\t}", "def find_by_year(our_data,year):\n return [album for album in our_data if album['number'] == str(year)]", "def dataset_extract_features_from_date(dataset,date_feature): \n dataset['dayofmonth'] = dataset[date_feature].dt.day\n dataset['dayofyear'] = dataset[date_feature].dt.dayofyear \n dataset['dayofweek'] = dataset[date_feature].dt.dayofweek\n dataset['month'] = dataset[date_feature].dt.month\n dataset['year'] = dataset[date_feature].dt.year\n dataset['weekofyear'] = dataset[date_feature].dt.weekofyear\n dataset['is_month_start'] = (dataset[date_feature].dt.is_month_start).astype(int)\n dataset['is_month_end'] = (dataset[date_feature].dt.is_month_end).astype(int)\n return dataset", "def apply_filter(self, image):\n pass", "def year_data(self,year):\n idx = [i for i in range(self.dates.shape[0]) if self.dates[i].year == year]\n year_dates = self.dates[idx]\n year_dc = self.dc[idx]\n return year_dates, year_dc", "def filter_images(data, vgid2idx, meta_vgids):\r\n new_data = []\r\n for vgid in meta_vgids:\r\n new_data.append(data[vgid2idx[vgid]])\r\n return new_data", "def get_filtered(self, collection, xmlFormat):\n\t\tstart = \"2012-05-01T00:00:00Z\"\n\t\tend = \"2012-05-20T00:00:00Z\"\n\t\tquery = '/text//itemRecord/metaMetadata/dateInfo/@lastModified:[%s TO %s]' % (start, end)\n\t\t\n\t\treturn {\n\t\t\t'q' : query,\n\t\t\t\"verb\": \"Search\",\n\t\t\t\"xmlFormat\": xmlFormat,\n\t\t\t\"ky\": collection,\n\t\t\t'sortDescending' : '/text//itemRecord/metaMetadata/dateInfo/@lastModified'\n\t\t\t}", "def request_band_extract(file_prefix, points_layer, region, years, filter_bounds=False):\n roi = ee.FeatureCollection(region)\n plots = ee.FeatureCollection(points_layer)\n for yr in years:\n stack = stack_bands(yr, roi)\n\n if filter_bounds:\n plots = plots.filterBounds(roi)\n\n filtered = plots.filter(ee.Filter.eq('YEAR', yr))\n\n plot_sample_regions = stack.sampleRegions(\n collection=filtered,\n properties=['POINT_TYPE', 'YEAR'],\n scale=30,\n tileScale=16)\n\n task = ee.batch.Export.table.toCloudStorage(\n plot_sample_regions,\n description='{}_{}'.format(file_prefix, yr),\n bucket='wudr',\n fileNamePrefix='{}_{}'.format(file_prefix, yr),\n fileFormat='CSV')\n\n task.start()\n print(yr)\n exit()", "def test_author_year_filtering(self):\n # Get a valid date\n entry = Entry.objects.get(id=1)\n params = {\"author\": entry.first_author.id, \"year\": entry.publication_date.year}\n\n self._test_filtering(**params)" ]
[ "0.6298117", "0.5903462", "0.58848464", "0.5747459", "0.5735303", "0.5665092", "0.54974794", "0.5486667", "0.5385302", "0.5352185", "0.5291872", "0.5263054", "0.5232077", "0.52078956", "0.519673", "0.51886106", "0.5142435", "0.510914", "0.50958353", "0.50748765", "0.50723386", "0.5071877", "0.50454485", "0.5031448", "0.5014739", "0.50142187", "0.499967", "0.49894515", "0.49519676", "0.4909264" ]
0.77574176
0
Zonal statistics with rasters as input and rasters and lists as output
def zonalStatsToRaster(image,zonesImage,geometry,maxPixels,reducerType): # reducertype can be mean, max, sum, first. Count is always included for QA # the resolution of the zonesimage is used for scale reducer = ee.Algorithms.If(ee.Algorithms.IsEqual(reducerType,"mean"),ee.Reducer.mean(), ee.Algorithms.If(ee.Algorithms.IsEqual(reducerType,"max"),ee.Reducer.max(), ee.Algorithms.If(ee.Algorithms.IsEqual(reducerType,"sum"),ee.Reducer.sum(), ee.Algorithms.If(ee.Algorithms.IsEqual(reducerType,"first"),ee.Reducer.first(), ee.Algorithms.If(ee.Algorithms.IsEqual(reducerType,"mode"),ee.Reducer.mode(),"error")))) ) reducer = ee.Reducer(reducer).combine(reducer2= ee.Reducer.count(), sharedInputs= True).group(groupField=1, groupName="zones") scale = zonesImage.projection().nominalScale().getInfo() zonesImage = zonesImage.select(zonesImage.bandNames(),["zones"]) totalImage = ee.Image(image).addBands(zonesImage) resultsList = ee.List(totalImage.reduceRegion( geometry= geometry, reducer= reducer, scale= scale, maxPixels=maxPixels ).get("groups")) resultsList = resultsList.map(ensure_default_properties); zoneList = mapList(resultsList, 'zones'); countList = mapList(resultsList, 'count'); valueList = mapList(resultsList, reducerType); valueImage = zonesImage.remap(zoneList, valueList).select(["remapped"],[reducerType]) countImage = zonesImage.remap(zoneList, countList).select(["remapped"],["count"]) newImage = zonesImage.addBands(countImage).addBands(valueImage) return newImage,zoneList,valueList,countList
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def zonal_stats(src_poly, src_raster, operator=['mean'], features=None):\n assert src_raster.geo_transform is not None, \"src_raster.geo_transform should not be None\"\n assert isinstance(operator, list), \"operator should be a list of string. ex: ['mean']\"\n features = list(range(src_raster.bands)) if features is None else features\n assert len(features) == src_raster.bands, \"length of features should equals number of bands of the raster\"\n df_shp = src_poly.copy()\n df_shp['poly_idx'] = list(range(len(df_shp)))\n df_shp['poly_idx'] = df_shp['poly_idx'].astype('float')\n poly_rst = tgp.ShapeGrid.rasterize_layer(df_shp, src_raster.rows, src_raster.cols, src_raster.geo_transform, 'poly_idx', all_touched=True, no_data_value=np.nan)\n X_combine = np.concatenate([poly_rst.data, src_raster.data], axis=-1)\n X_combine_df = pd.DataFrame(X_combine.reshape(-1, src_raster.bands))\n X_groupby = X_combine_df.groupby(0, as_index=False)\n for op in operator:\n columns = {0:'poly_idx'}\n for f_idx, f in enumerate(features):\n columns[f_idx+1] = f'zonal_{op}_{f}'\n if op == 'mean':\n df_shp = df_shp.merge(X_groupby.mean().rename(columns=columns), on='poly_idx', how='left')\n elif op == 'max':\n df_shp = df_shp.merge(X_groupby.max().rename(columns=columns), on='poly_idx', how='left')\n elif op == 'min':\n df_shp = df_shp.merge(X_groupby.min().rename(columns=columns), on='poly_idx', how='left')\n elif op == 'median':\n df_shp = df_shp.merge(X_groupby.median().rename(columns=columns), on='poly_idx', how='left')\n elif op == 'sum':\n df_shp = df_shp.merge(X_groupby.sum().rename(columns=columns), on='poly_idx', how='left')\n elif op == 'std':\n df_shp = df_shp.merge(X_groupby.std().rename(columns=columns), on='poly_idx', how='left')\n elif op == 'count':\n df_shp = df_shp.merge(X_groupby.count().rename(columns=columns), on='poly_idx', how='left')\n else:\n assert False, \"no this operator\"\n return df_shp", "def zonal_statistics(wrksppath, timestamp, region, model):\n logging.info('\\nDoing Zonal Statistics on ' + region)\n # Define app workspace and sub-paths\n resampleds = os.path.join(wrksppath, region, model + '_GeoTIFFs_resampled')\n shp_path = os.path.join(wrksppath, region, 'shapefiles', 'ffgs_' + region + '.shp')\n\n stat_file = os.path.join(wrksppath, region, model + 'results.csv')\n\n # check that there are resampled tiffs to do zonal statistics on\n if not os.path.exists(resampleds):\n logging.info('There are no resampled tiffs to do zonal statistics on. Skipping Zonal Statistics')\n return\n\n # List all Resampled GeoTIFFs\n files = os.listdir(resampleds)\n files = [tif for tif in files if tif.endswith('.tif')]\n files.sort()\n\n # do zonal statistics for each resampled tiff file and put it in the stats dataframe\n stats_df = pd.DataFrame()\n for i in range(len(files)):\n logging.info('starting zonal statistics for ' + files[i])\n ras_path = os.path.join(resampleds, files[i])\n stats = rasterstats.zonal_stats(\n shp_path,\n ras_path,\n stats=['count', 'max', 'mean'],\n geojson_out=True\n )\n\n timestep = files[i][:10]\n\n # for each stat that you get out, write it to the dataframe\n logging.info('writing the statistics for this file to the dataframe')\n for j in range(len(stats)):\n\n temp_data = stats[j]['properties']\n temp_data.update({'Forecast Timestamp': timestamp})\n temp_data.update({'Timestep': timestep})\n\n temp_df = pd.DataFrame([temp_data])\n stats_df = stats_df.append(temp_df, ignore_index=True)\n\n # write the resulting dataframe to a csv\n logging.info('\\ndone with zonal statistics, rounding values, writing to a csv file')\n stats_df = stats_df.round({'max': 1, 'mean': 1})\n stats_df.to_csv(stat_file, index=False)\n\n # delete the resampled tiffs now that we dont need them\n logging.info('deleting the resampled tiffs directory')\n shutil.rmtree(resampleds)\n\n return", "def ZonalStatsRasterArray(zonegeodf, rasterarr, transaffine, stats, nodatavalue=0):\n zonaloutput = zonal_stats(vectors=zonegeodf.geometry, raster=rasterarr, nodata=nodatavalue, affine=transaffine, stats=stats, all_touched=True)\n indexname = 'index' if zonegeodf.index.name is None else zonegeodf.index.name\n zonegeodf.reset_index(inplace=True)\n output = zonegeodf.join(pd.DataFrame(zonaloutput))\n output.set_index(indexname, inplace=True)\n return output", "def zonal_stats(in_path, raster, grid_id_name='GRIDMET_ID'):\n if not os.path.isfile(in_path):\n raise FileNotFoundError('Input summary CSV file given'+\\\n ' was invalid or not found')\n # look for fishnet created in 'in_path/spatial'\n path_root = os.path.split(in_path)[0]\n file_name = os.path.split(in_path)[1]\n # get variable names from input file prefix\n grid_var = file_name.split('_summ')[0]\n var_name = Path(raster).name.split('.')[0]\n # grid is in the \"spatial\" subdir of in_path\n grid_file = OPJ(path_root, 'spatial', 'grid.shp')\n # save zonal stats to summary CSV in same dir as raster as of version 0.3\n raster_root = os.path.split(raster)[0]\n out_file = OPJ(raster_root, 'zonal_stats.csv')\n\n # this error would only occur when using within Python \n if not os.path.isfile(grid_file):\n raise FileNotFoundError(\n os.path.abspath(grid_file),\n '\\ndoes not exist, create it using spatial.make_grid first'\n )\n print(\n 'Calculating', grid_var, 'zonal means for', var_name\n )\n\n # calc zonal stats and open for grid IDs\n with fiona.open(grid_file, 'r') as source:\n zs = zstats(source, raster, all_touched=True)\n grid_ids = [f['properties'].get(grid_id_name) for f in source]\n\n # get just mean values, zonal_stats can do other stats...\n means = [z['mean'] for z in zs]\n out_df = pd.DataFrame(\n data={\n grid_id_name: grid_ids, \n var_name: means\n }\n )\n out_df[grid_id_name] = out_df[grid_id_name].astype(int)\n # drop rows for cells outside of gridMET master grid\n out_df = out_df.drop(out_df[out_df[grid_id_name] == -999].index)\n\n # save or update existing csv file\n if not os.path.isfile(out_file):\n print(\n os.path.abspath(out_file),\n '\\ndoes not exist, creating file'\n )\n out_df.to_csv(out_file, index=False)\n else:\n # overwrite column values if exists, else append\n existing_df = pd.read_csv(out_file)\n existing_df[grid_id_name] = existing_df[grid_id_name].astype(int)\n if var_name in existing_df.columns:\n # may throw error if not same size as original grid\n try:\n existing_df.update(out_df)\n existing_df.to_csv(out_file, index=False) \n except:\n print('Zonal stats for this variable already exist but they',\n 'appear to have been calculated with a different grid',\n 'overwriting existing file at:\\n',\n os.path.abspath(out_file)\n )\n out_df.to_csv(out_file, index=False)\n else:\n existing_df = existing_df.merge(out_df, on=grid_id_name)\n #existing_df = pd.concat([existing_df, out_df], axis=1).drop_duplicates()\n existing_df.to_csv(out_file, index=False)", "def zonalStatsToFeatureCollection(image,zonesImage,geometry,maxPixels,reducerType):\n # reducertype can be mean, max, sum, first. Count is always included for QA\n # the resolution of the zonesimage is used for scale\n\n reducer = ee.Algorithms.If(ee.Algorithms.IsEqual(reducerType,\"mean\"),ee.Reducer.mean(),\n ee.Algorithms.If(ee.Algorithms.IsEqual(reducerType,\"max\"),ee.Reducer.max(),\n ee.Algorithms.If(ee.Algorithms.IsEqual(reducerType,\"sum\"),ee.Reducer.sum(),\n ee.Algorithms.If(ee.Algorithms.IsEqual(reducerType,\"first\"),ee.Reducer.first(),\n ee.Algorithms.If(ee.Algorithms.IsEqual(reducerType,\"mode\"),ee.Reducer.mode(),\"error\"))))\n )\n reducer = ee.Reducer(reducer).combine(reducer2= ee.Reducer.count(), sharedInputs= True).group(groupField=1, groupName=\"zones\") \n\n scale = zonesImage.projection().nominalScale().getInfo()\n zonesImage = zonesImage.select(zonesImage.bandNames(),[\"zones\"])\n\n totalImage = ee.Image(image).addBands(zonesImage)\n resultsList = ee.List(totalImage.reduceRegion(\n geometry= geometry, \n reducer= reducer,\n scale= scale,\n maxPixels=maxPixels\n ).get(\"groups\"))\n\n resultsList = resultsList.map(ensure_default_properties); \n fc = ee.FeatureCollection(resultsList.map(dict_to_feature))\n\n return fc", "def gen_zonal_stats(\n vectors, raster,\n layer=0,\n band=1,\n nodata=None,\n affine=None,\n stats=None,\n all_touched=True,\n percent_cover_selection=None,\n percent_cover_weighting=True,\n percent_cover_scale=20,\n categorical=False,\n category_map=None,\n add_stats=None,\n zone_func=None,\n raster_out=False,\n prefix=None,\n geojson_out=False, **kwargs):\n stats, run_count = check_stats(stats, categorical)\n\n # check inputs related to percent coverage\n percent_cover = False\n if percent_cover_weighting or percent_cover_selection is not None:\n percent_cover = True\n if percent_cover_scale is None:\n warnings.warn('No value for `percent_cover_scale` was given. '\n 'Using default value of 10.')\n percent_cover_scale = 10\n\n try:\n if percent_cover_scale != int(percent_cover_scale):\n warnings.warn('Value for `percent_cover_scale` given ({0}) '\n 'was converted to int ({1}) but does not '\n 'match original value'.format(\n percent_cover_scale, int(percent_cover_scale)))\n\n percent_cover_scale = int(percent_cover_scale)\n\n if percent_cover_scale <= 1:\n raise Exception('Value for `percent_cover_scale` must be '\n 'greater than one ({0})'.format(\n percent_cover_scale))\n\n except:\n raise Exception('Invalid value for `percent_cover_scale` '\n 'provided ({0}). Must be type int.'.format(\n percent_cover_scale))\n\n if percent_cover_selection is not None:\n try:\n percent_cover_selection = float(percent_cover_selection)\n except:\n raise Exception('Invalid value for `percent_cover_selection` '\n 'provided ({0}). Must be able to be converted '\n 'to a float.'.format(percent_cover_selection))\n\n # if not all_touched:\n # warnings.warn('`all_touched` was not enabled but an option requiring '\n # 'percent_cover calculations was selected. Automatically '\n # 'enabling `all_touched`.')\n # all_touched = True\n\n with Raster(raster, affine, nodata, band) as rast:\n features_iter = read_features(vectors, layer)\n for _, feat in enumerate(features_iter):\n geom = shape(feat['geometry'])\n\n if 'Point' in geom.type:\n geom = boxify_points(geom, rast)\n percent_cover = False\n\n geom_bounds = tuple(geom.bounds)\n fsrc = rast.read(bounds=geom_bounds)\n\n if percent_cover:\n cover_weights = rasterize_pctcover_geom(\n geom, shape=fsrc.shape, affine=fsrc.affine,\n scale=percent_cover_scale,\n all_touched=all_touched)\n rv_array = cover_weights > (percent_cover_selection or 0)\n else:\n rv_array = rasterize_geom(\n geom, shape=fsrc.shape, affine=fsrc.affine,\n all_touched=all_touched)\n\n # nodata mask\n isnodata = (fsrc.array == fsrc.nodata)\n\n # add nan mask (if necessary)\n if np.issubdtype(fsrc.array.dtype, float) and \\\n np.isnan(fsrc.array.min()):\n isnodata = (isnodata | np.isnan(fsrc.array))\n\n # Mask the source data array\n # mask everything that is not a valid value or not within our geom\n masked = np.ma.MaskedArray(\n fsrc.array,\n mask=(isnodata | ~rv_array))\n\n # execute zone_func on masked zone ndarray\n if zone_func is not None:\n if not callable(zone_func):\n raise TypeError(('zone_func must be a callable '\n 'which accepts function a '\n 'single `zone_array` arg.'))\n zone_func(masked)\n\n if masked.compressed().size == 0:\n # nothing here, fill with None and move on\n feature_stats = dict([(stat, None) for stat in stats])\n if 'count' in stats: # special case, zero makes sense here\n feature_stats['count'] = 0\n else:\n if run_count:\n keys, counts = np.unique(masked.compressed(), return_counts=True)\n pixel_count = dict(zip([np.asscalar(k) for k in keys],\n [np.asscalar(c) for c in counts]))\n\n if categorical:\n feature_stats = dict(pixel_count)\n if category_map:\n feature_stats = remap_categories(category_map, feature_stats)\n else:\n feature_stats = {}\n\n if 'min' in stats:\n feature_stats['min'] = float(masked.min())\n if 'max' in stats:\n feature_stats['max'] = float(masked.max())\n if 'mean' in stats:\n if percent_cover:\n feature_stats['mean'] = float(\n np.sum(masked * cover_weights) /\n np.sum(~masked.mask * cover_weights))\n else:\n feature_stats['mean'] = float(masked.mean())\n if 'count' in stats:\n if percent_cover:\n feature_stats['count'] = float(np.sum(~masked.mask * cover_weights))\n else:\n feature_stats['count'] = int(masked.count())\n # optional\n if 'sum' in stats:\n if percent_cover:\n feature_stats['sum'] = float(np.sum(masked * cover_weights))\n else:\n feature_stats['sum'] = float(masked.sum())\n if 'std' in stats:\n feature_stats['std'] = float(masked.std())\n if 'median' in stats:\n feature_stats['median'] = float(np.median(masked.compressed()))\n if 'majority' in stats:\n feature_stats['majority'] = float(key_assoc_val(pixel_count, max))\n if 'minority' in stats:\n feature_stats['minority'] = float(key_assoc_val(pixel_count, min))\n if 'unique' in stats:\n feature_stats['unique'] = len(list(pixel_count.keys()))\n if 'range' in stats:\n try:\n rmin = feature_stats['min']\n except KeyError:\n rmin = float(masked.min())\n try:\n rmax = feature_stats['max']\n except KeyError:\n rmax = float(masked.max())\n feature_stats['range'] = rmax - rmin\n\n for pctile in [s for s in stats if s.startswith('percentile_')]:\n q = get_percentile(pctile)\n pctarr = masked.compressed()\n feature_stats[pctile] = np.percentile(pctarr, q)\n\n if 'nodata' in stats:\n featmasked = np.ma.MaskedArray(fsrc.array, mask=np.logical_not(rv_array))\n feature_stats['nodata'] = float((featmasked == fsrc.nodata).sum())\n\n if add_stats is not None:\n for stat_name, stat_func in add_stats.items():\n feature_stats[stat_name] = stat_func(masked)\n\n if raster_out:\n feature_stats['mini_raster_array'] = masked\n feature_stats['mini_raster_affine'] = fsrc.affine\n feature_stats['mini_raster_nodata'] = fsrc.nodata\n\n if prefix is not None:\n prefixed_feature_stats = {}\n for key, val in feature_stats.items():\n newkey = \"{}{}\".format(prefix, key)\n prefixed_feature_stats[newkey] = val\n feature_stats = prefixed_feature_stats\n\n if geojson_out:\n for key, val in feature_stats.items():\n if 'properties' not in feat:\n feat['properties'] = {}\n feat['properties'][key] = val\n yield feat\n else:\n yield feature_stats", "def zonal_stats(self, gdf, stats, all_touched=False):\n _ST = [\"count\", \"min\", \"max\", \"sum\", \"mean\", \"std\", \"median\"]\n\n def rmd(ds, stat):\n return {var: f\"{var}_{stat}\" for var in ds.raster.vars}\n\n def gen_zonal_stat(ds, geoms, stats, all_touched=False):\n dims = (ds.raster.y_dim, ds.raster.x_dim)\n for i, geom in enumerate(geoms):\n # add buffer to work with point geometries\n ds1 = ds.raster.clip_bbox(geom.bounds, buffer=2).raster.mask_nodata()\n if np.any(np.asarray(ds1.raster.shape) < 2):\n continue\n mask = full(ds1.raster.coords, nodata=0, dtype=np.uint8)\n features.rasterize(\n [(geom, 1)],\n out_shape=mask.raster.shape,\n fill=0,\n transform=mask.raster.transform,\n out=mask.data,\n all_touched=all_touched,\n )\n ds1 = ds1.where(mask == 1)\n dss = []\n for stat in stats:\n if stat in _ST:\n ds1_stat = getattr(ds1, stat)(dims)\n dss.append(ds1_stat.rename(rmd(ds1, stat)))\n elif isinstance(stat, str) and stat.startswith(\"q\"):\n qs = np.array([float(q) for q in stat.strip(\"q\").split(\",\")])\n dss.append(\n ds1.quantile(qs / 100, dims).rename(rmd(ds1, \"quantile\"))\n )\n elif callable(stat):\n dss.append(\n ds1.reduce(stat, dims).rename(rmd(ds1, stat.__name__))\n )\n else:\n raise ValueError(f\"Stat {stat} not valid.\")\n yield xr.merge(dss), i\n\n if isinstance(stats, str):\n stats = stats.split()\n elif callable(stats):\n stats = list([stats])\n\n if gdf.crs is not None and self.crs is not None and gdf.crs != self.crs:\n gdf = gdf.to_crs(self.crs)\n geoms = gdf[\"geometry\"].values\n\n ds = self._obj.copy()\n if isinstance(ds, xr.DataArray):\n if ds.name is None:\n ds.name = \"values\"\n ds = ds.to_dataset()\n\n out = list(gen_zonal_stat(ds, geoms, stats, all_touched))\n if len(out) == 0:\n raise IndexError(\"All geometries outside raster domain\")\n\n dss, idx = zip(*out)\n ds_out = xr.concat(dss, \"index\")\n ds_out[\"index\"] = xr.IndexVariable(\"index\", gdf.index.values[np.array(idx)])\n\n return ds_out", "def test_rasters_and_arrays(self):\n\n # Create test data\n lon_ul = 100 # Longitude of upper left corner\n lat_ul = 10 # Latitude of upper left corner\n numlon = 8 # Number of longitudes\n numlat = 5 # Number of latitudes\n dlon = 1\n dlat = -1\n\n # Define array where latitudes are rows and longitude columns\n A1 = numpy.zeros((numlat, numlon))\n\n # Establish coordinates for lower left corner\n lat_ll = lat_ul - numlat\n lon_ll = lon_ul\n\n # Define pixel centers along each direction\n lon = numpy.linspace(lon_ll + 0.5, lon_ll + numlon - 0.5, numlon)\n lat = numpy.linspace(lat_ll + 0.5, lat_ll + numlat - 0.5, numlat)\n\n # Define raster with latitudes going bottom-up (south to north).\n # Longitudes go left-right (west to east)\n for i in range(numlat):\n for j in range(numlon):\n A1[numlat - 1 - i, j] = linear_function(lon[j], lat[i])\n\n # Throw in a nodata element\n A1[2, 6] = numpy.nan\n\n # Upper left corner\n assert A1[0, 0] == 105.25\n assert A1[0, 0] == linear_function(lon[0], lat[4])\n\n # Lower left corner\n assert A1[4, 0] == 103.25\n assert A1[4, 0] == linear_function(lon[0], lat[0])\n\n # Upper right corner\n assert A1[0, 7] == 112.25\n assert A1[0, 7] == linear_function(lon[7], lat[4])\n\n # Lower right corner\n assert A1[4, 7] == 110.25\n assert A1[4, 7] == linear_function(lon[7], lat[0])\n\n # Generate raster object and write\n projection = ('GEOGCS[\"WGS 84\",'\n 'DATUM[\"WGS_1984\",'\n 'SPHEROID[\"WGS 84\",6378137,298.2572235630016,'\n 'AUTHORITY[\"EPSG\",\"7030\"]],'\n 'AUTHORITY[\"EPSG\",\"6326\"]],'\n 'PRIMEM[\"Greenwich\",0],'\n 'UNIT[\"degree\",0.0174532925199433],'\n 'AUTHORITY[\"EPSG\",\"4326\"]]')\n geotransform = (lon_ul, dlon, 0, lat_ul, 0, dlat)\n R1 = Raster(A1, projection, geotransform,\n keywords={'testkwd': 'testval', 'size': 'small'})\n\n # Check string representation of raster class\n assert str(R1).startswith('Raster data')\n assert str(R1.rows) in str(R1)\n assert str(R1.columns) in str(R1)\n\n # Test conversion between geotransform and\n # geometry (longitudes and latitudes)\n longitudes, latitudes = R1.get_geometry()\n msg = 'Longitudes not as expected: %s' % str(longitudes)\n assert numpy.allclose(longitudes, [100.5, 101.5, 102.5, 103.5, 104.5,\n 105.5, 106.5, 107.5]), msg\n\n msg = 'Latitudes not as expected: %s' % str(latitudes)\n assert numpy.allclose(latitudes, [5.5, 6.5, 7.5, 8.5, 9.5]), msg\n\n gt = raster_geometry2geotransform(longitudes, latitudes)\n msg = ('Conversion from coordinates to geotransform failed: %s'\n % str(gt))\n assert numpy.allclose(gt, geotransform,\n rtol=1.0e-12, atol=1.0e-12), msg\n\n msg = ('Dimensions of raster array do not match those of '\n 'raster object')\n assert numlat == R1.rows, msg\n assert numlon == R1.columns, msg\n\n # Write back to new (tif) file\n out_filename = unique_filename(suffix='.tif')\n R1.write_to_file(out_filename)\n assert R1.filename == out_filename\n\n # Check nodata in original layer\n assert numpy.isnan(R1.get_nodata_value())\n\n # Read again and check consistency\n R2 = read_layer(out_filename)\n assert R2.filename == out_filename\n\n # Check nodata in read layer\n assert numpy.isnan(R2.get_nodata_value())\n\n msg = ('Dimensions of written raster array do not match those '\n 'of input raster file\\n')\n msg += (' Dimensions of input file '\n '%s: (%s, %s)\\n' % (R1.filename, numlat, numlon))\n msg += (' Dimensions of output file %s: '\n '(%s, %s)' % (R2.filename, R2.rows, R2.columns))\n\n assert numlat == R2.rows, msg\n assert numlon == R2.columns, msg\n\n A2 = R2.get_data()\n\n assert numpy.allclose(numpy.nanmin(A1), numpy.nanmin(A2))\n assert numpy.allclose(numpy.nanmax(A1), numpy.nanmax(A2))\n\n msg = 'Array values of written raster array were not as expected'\n assert nanallclose(A1, A2), msg\n\n msg = 'Geotransforms were different'\n assert R1.get_geotransform() == R2.get_geotransform(), msg\n\n p1 = R1.get_projection(proj4=True)\n p2 = R2.get_projection(proj4=True)\n msg = 'Projections were different: %s != %s' % (p1, p2)\n assert p1 == p1, msg\n\n # Exercise projection __eq__ method\n assert R1.projection == R2.projection\n\n # Check that equality raises exception when type is wrong\n try:\n R1.projection == 234\n except TypeError:\n pass\n else:\n msg = 'Should have raised TypeError'\n raise Exception(msg)\n\n # Check keywords\n assert R1.keywords == R2.keywords\n\n # Check override of ==\n assert R1 == R2", "def output_rasters(self, arr, outdir, outname):\n\n outpath = os.path.join(outdir, outname)\n print('the outpath for file {} is {}'.format(outname, outpath))\n\n # get the geoinfo from sample tiff to output intermediate files\n ds = rasterio.open(self.geoproperties_file)\n band1 = arr\n with rasterio.open(outpath, 'w', driver='GTiff', height=self.rows, width=self.cols,\n count=1, dtype='float64', crs=self.crs, transform=self.transform) as wrast:\n wrast.write(band1, indexes=1)\n\n # TODO - Set an AWS Cloud flag in the config_dict file to activate this function or not...\n # delete files created locally and put in bucket\n # PathManager.s3_delete_local(from_file, bucket, prefix_no_slash)", "def get_zone_pixels(feat, input_zone_polygon, input_value_raster, band, coords=[]): #, raster_band\n \n \n \n # Open data\n raster = gdal.Open(input_value_raster)\n shp = ogr.Open(input_zone_polygon)\n lyr = shp.GetLayer()\n \n # Get raster georeference info\n transform = raster.GetGeoTransform()\n xOrigin = transform[0]\n yOrigin = transform[3]\n pixelWidth = transform[1]\n pixelHeight = transform[5]\n \n sizeX = raster.RasterXSize\n sizeY = raster.RasterYSize\n lrx = xOrigin + (sizeX * pixelWidth)\n lry = yOrigin + (sizeY * pixelHeight)\n \n \n \n # Reproject vector geometry to same projection as raster\n #sourceSR = lyr.GetSpatialRef()\n #targetSR = osr.SpatialReference()\n #targetSR.ImportFromWkt(raster.GetProjectionRef())\n #coordTrans = osr.CoordinateTransformation(sourceSR,targetSR)\n #feat = lyr.GetNextFeature()\n #geom = feat.GetGeometryRef()\n #geom.Transform(coordTrans)\n \n # Get extent of feat\n geom = feat.GetGeometryRef()\n if (geom.GetGeometryName() == 'MULTIPOLYGON'):\n count = 0\n pointsX = []; pointsY = []\n for polygon in geom:\n geomInner = geom.GetGeometryRef(count)\n ring = geomInner.GetGeometryRef(0)\n numpoints = ring.GetPointCount()\n for p in range(numpoints):\n lon, lat, z = ring.GetPoint(p)\n pointsX.append(lon)\n pointsY.append(lat)\n count += 1\n elif (geom.GetGeometryName() == 'POLYGON'):\n ring = geom.GetGeometryRef(0)\n numpoints = ring.GetPointCount()\n pointsX = []; pointsY = []\n for p in range(numpoints):\n lon, lat, z = ring.GetPoint(p)\n pointsX.append(lon)\n pointsY.append(lat)\n\n else:\n sys.exit(\"ERROR: Geometry needs to be either Polygon or Multipolygon\")\n\n #xmin = min(pointsX) \n #xmax = max(pointsX)\n #ymin = min(pointsY)\n #ymax = max(pointsY)\n \n \n if len(coords) == 0: \n xmin = xOrigin if (min(pointsX) < xOrigin) else min(pointsX)\n xmax = lrx if (max(pointsX) > lrx) else max(pointsX)\n ymin = lry if (min(pointsY) < lry) else min(pointsY)\n ymax = yOrigin if (max(pointsY) > yOrigin) else max(pointsY)\n else:\n xmin = coords[0] if (min(pointsX) < coords[0]) else min(pointsX)\n xmax = coords[1] if (max(pointsX) > coords[1]) else max(pointsX)\n ymin = coords[2] if (min(pointsY) < coords[2]) else min(pointsY)\n ymax = coords[3] if (max(pointsY) > coords[3]) else max(pointsY)\n \n # Specify offset and rows and columns to read\n xoff = int((xmin - xOrigin)/pixelWidth)\n yoff = int((yOrigin - ymax)/pixelWidth)\n xcount = int((xmax - xmin)/pixelWidth) #+1 !!!!!!!!!!!!!!!!!!!!! This adds a pixel to the right side\n ycount = int((ymax - ymin)/pixelWidth) #+1 !!!!!!!!!!!!!!!!!!!!! This adds a pixel to the bottom side\n \n #print(xoff, yoff, xcount, ycount)\n \n # Create memory target raster\n target_ds = gdal.GetDriverByName('MEM').Create('', xcount, ycount, 1, gdal.GDT_Byte)\n target_ds.SetGeoTransform((\n xmin, pixelWidth, 0,\n ymax, 0, pixelHeight,\n ))\n\n # Create for target raster the same projection as for the value raster\n raster_srs = osr.SpatialReference()\n raster_srs.ImportFromWkt(raster.GetProjectionRef())\n target_ds.SetProjection(raster_srs.ExportToWkt())\n\n # Rasterize zone polygon to raster\n gdal.RasterizeLayer(target_ds, [1], lyr, burn_values=[1])\n\n # Read raster as arrays\n dataBandRaster = raster.GetRasterBand(band)\n data = dataBandRaster.ReadAsArray(xoff, yoff, xcount, ycount).astype(np.float)\n bandmask = target_ds.GetRasterBand(1)\n datamask = bandmask.ReadAsArray(0, 0, xcount, ycount).astype(np.float)\n\n # data zone of raster\n dataZone = np.ma.masked_array(data, np.logical_not(datamask))\n\n raster_srs = None\n raster = None\n shp = None\n lyr = None\n return [dataZone, [xmin,xmax,ymin,ymax]]", "def get_z_ranges(self):\n\n summary = self.get_rasters_summary()\n\n # Convert to dict in format:\n # { 'stat' : { 'z': (min, max), ... } ... }\n\n ranges = summary.groupby(['stat', 'z'], as_index=False)\n ranges = ranges.agg({'min': 'min', 'max': 'max'})\n ranges['vals'] = ranges.apply(\n lambda row: {\n row['z']: (row['min'], row['max'])\n }, axis=1)\n ranges = ranges.groupby('stat')['vals'].apply(\n lambda group: group.values)\n ranges = ranges.apply(\n lambda group: {\n int(k): v for d in group for k,\n v in d.items()})\n\n return ranges.to_dict()", "def _sum_n_rasters(\n raster_path_list, target_raster_path):\n LOGGER.info('Summing %s rasters to %s', len(raster_path_list),\n target_raster_path)\n LOGGER.debug('Attempting to open %s', raster_path_list[0])\n pygeoprocessing.new_raster_from_base(\n raster_path_list[0], target_raster_path, gdal.GDT_Float32,\n [NODATA_FLOAT32_MIN])\n\n target_raster = gdal.OpenEx(\n target_raster_path, gdal.GA_Update | gdal.OF_RASTER)\n target_band = target_raster.GetRasterBand(1)\n\n n_pixels_to_process = (\n (target_raster.RasterXSize * target_raster.RasterYSize) *\n len(raster_path_list))\n n_pixels_processed = 0\n last_log_time = time.time()\n\n raster_tuple_list = []\n for raster_path in raster_path_list:\n raster = gdal.OpenEx(raster_path, gdal.OF_RASTER)\n band = raster.GetRasterBand(1)\n nodata = band.GetNoDataValue()\n raster_tuple_list.append((raster, band, nodata))\n\n for block_info in pygeoprocessing.iterblocks(\n (raster_path_list[0], 1), offset_only=True):\n\n sum_array = numpy.empty(\n (block_info['win_ysize'], block_info['win_xsize']),\n dtype=numpy.float32)\n sum_array[:] = 0.0\n\n # Assume everything is valid until proven otherwise\n pixels_touched = numpy.zeros(sum_array.shape, dtype=bool)\n for (_, band, nodata) in raster_tuple_list:\n if time.time() - last_log_time >= 5.0:\n percent_complete = round(\n n_pixels_processed / n_pixels_to_process, 4)*100\n LOGGER.info(f'Summation {percent_complete:.2f}% complete')\n last_log_time = time.time()\n\n array = band.ReadAsArray(**block_info)\n valid_pixels = slice(None)\n if nodata is not None:\n valid_pixels = ~utils.array_equals_nodata(array, nodata)\n\n sum_array[valid_pixels] += array[valid_pixels]\n pixels_touched[valid_pixels] = 1\n n_pixels_processed += sum_array.size # for logging\n\n sum_array[~pixels_touched] = NODATA_FLOAT32_MIN\n\n target_band.WriteArray(\n sum_array, block_info['xoff'], block_info['yoff'])\n\n LOGGER.info('Summation 100.00% complete')\n raster_tuple_list = None\n\n target_band.ComputeStatistics(0)\n target_band = None\n target_raster = None", "def zonal_stats_workflow():\n save_as = \"C:/Users/ginge/Documents/NatCap/GIS_local/USFS/replicate_4th_draft_12.4.18/summary/monthly_quickflow.csv\"\n scenario_dict = {\n 'pre-decline': \"C:/Users/ginge/Documents/NatCap/GIS_local/USFS/replicate_4th_draft_12.4.18/pre_decline\",\n 'post-decline': \"C:/Users/ginge/Documents/NatCap/GIS_local/USFS/replicate_4th_draft_12.4.18/post_decline\",\n }\n df_list = []\n for scenario in scenario_dict.iterkeys():\n results_dict = {\n 'scenario': [],\n 'month': [],\n 'sum_quickflow': [],\n }\n folder = scenario_dict[scenario]\n aoi_shp = os.path.join(folder, 'aggregated_results.shp')\n for month in xrange(1, 13):\n qf_raster = os.path.join(\n folder, 'intermediate_outputs', 'qf_{}.tif'.format(month))\n zonal_stats = pygeoprocessing.zonal_statistics(\n (qf_raster, 1), aoi_shp)\n sum_QF = zonal_stats[0]['sum']\n results_dict['scenario'].append(scenario)\n results_dict['month'].append(month)\n results_dict['sum_quickflow'].append(sum_QF)\n results_df = pandas.DataFrame(data=results_dict)\n df_list.append(results_df)\n combined_list = pandas.concat(df_list)\n combined_list.to_csv(save_as, index=False)", "def zonal_grid_statistics(stats, zones, categories=None, grids=None,\n aspect=None, shortnames=True):\n # Check inputs\n zones = _validation.input_file(zones, 'grid', False)\n\n if not (stats.endswith('.txt') or stats.endswith('.csv')):\n stats += '.csv'\n\n if categories is None:\n category_list = 'NULL'\n elif type(categories) is str:\n categories = [_validation.input_file(categories, 'grid', False)]\n category_list = categories[0]\n elif type(categories) in (list, tuple):\n categories = _validation.input_file(categories, 'grid', False)\n category_list = ';'.join(categories)\n else:\n raise TypeError('Wrong argument type to categories!')\n\n if grids is None:\n grids_list = 'NULL'\n elif type(grids) is str:\n grids = [_validation.input_file(grids, 'grid', False)]\n grids_list = grids[0]\n elif type(grids) in (list, tuple):\n grids = _validation.input_file(grids, 'grid', False)\n grids_list = ';'.join(grids)\n else:\n raise TypeError('Wrong argument type to grids!')\n\n if aspect is None:\n aspect = 'NULL'\n elif type(aspect) is str:\n aspect = _validation.input_file(zones, 'grid', False)\n else:\n raise TypeError('Wrong argument type to grids!')\n\n # Check inputs\n shortnames = str(int(shortnames))\n # Create cmd\n cmd = ['saga_cmd', '-f=q', 'statistics_grid', '5', '-ZONES', zones,\n '-CATLIST', category_list, '-STATLIST', grids_list, '-ASPECT',\n aspect, '-OUTTAB', stats, '-SHORTNAMES', shortnames]\n # Run command\n flag = _env.run_command_logged(cmd)\n if not flag:\n raise EnvironmentError(_ERROR_TEXT.format(_sys._getframe().f_code.co_name, _env.errlog))", "def zonal_statistics(self, regions, func, scale=1000, interval=\"day\") -> 'ImageCollection':\n regions_geojson = regions\n if isinstance(regions,Polygon) or isinstance(regions,MultiPolygon):\n regions_geojson = mapping(regions)\n process_id = 'zonal_statistics'\n args = {\n 'imagery': self.graph,\n 'regions': regions_geojson,\n 'func': func,\n 'scale': scale,\n 'interval': interval\n }\n\n return self.graph_add_process(process_id, args)", "def exact_zonalstats(self, ras_path, vec_path, fid, col, stats, output_csv):\n cmd = \"exactextract --raster grid:%s --polygons %s --fid %s --stat %s=%s\\(grid\\) --output %s\" %(ras_path, vec_path, fid, col, stats, output_csv)\n # Apply zonalstatistics\n os.system(cmd)", "def comparing_urban_zonal_stats(self, zonal_path = '../data/zonal/', fid = 'uid', stats = 'sum', gpd_ls = ['gpw', 'ghs_pop', 'worldpop'], \n schema = 'urban_pop', table = 'global_grid'):\n \n # Create folder if does not already exist\n if not os.path.exists(zonal_path): \n os.makedirs(zonal_path)\n \n for iso in self.country_iso3:\n \n # Define name of temp shp\n file_name = 'temp.gpkg'\n # And file path\n file_path = '../data/gpkg/'\n # Define full path \n vec_path = ''.join([file_path + file_name])\n \n if os.path.exists(vec_path):\n os.remove(vec_path)\n \n # Join schema and table together\n layer = '.'.join([schema, table])\n # Define sql statement to extract from table e.g. urban_pop.global_grid \n sql = \"SELECT * FROM %s WHERE gid_0 LIKE '%s'\" %(layer, iso)\n # Define column name of output zonal stats\n\n # Define db connection class \n db_conn = postgres_conn(section = 'postgresql', config_path = '../src/config/', config_file = 'database.ini', country_iso3 = iso)\n # Get vector geometries from postgres and store as temp shp\n #db_conn.psql_to_shp(file_name, file_path, schema, table, sql)\n db_conn.psql_to_gpkg(file_name, file_path, schema, table, sql)\n \n # Define full vector path including layer name\n vec_path = vec_path + '[gridded]'\n \n for gpd in gpd_ls:\n \n col = gpd + '_' + stats\n output_path = '../data/zonal/' + iso + '_' + gpd + '.csv'\n\n if 'gpw' == gpd:\n \n # Define input raster path\n ras_path = '../data/gpw/cropped/gpw_' + iso + '.tif'\n \n if not os.path.isfile(output_path):\n # Apply zonal statistics\n self.exact_zonalstats(ras_path, vec_path, fid, col, stats, output_csv = output_path)\n \n # Apply zonal statistics if db is ghs_pop\n elif 'ghs_pop' == gpd:\n \n # Define input raster path\n ras_path = '../data/ghs_pop/cropped/ghs_pop_' + iso + '.tif'\n\n if not os.path.isfile(output_path):\n # Apply zonal statistics\n self.exact_zonalstats(ras_path, vec_path, fid, col, stats, output_csv = output_path)\n \n # Apply zonal statistics if db is worldpop\n elif 'worldpop' == gpd:\n \n # Define input raster path\n ras_path = '../data/worldpop/MOSAIC_ppp_prj_2015/ppp_prj_2015_' + iso + '.tif'\n\n if not os.path.isfile(output_path):\n # Apply zonal statistics\n self.exact_zonalstats(ras_path, vec_path, fid, col, stats, output_csv = output_path)", "def average( names):\n\n rs = radioastronomy.Spectrum() # create input and average structures\n asum = radioastronomy.Spectrum()\n nsum = 0\n\n # now average coldest data for calibration\n for filename in names:\n\n rs.read_spec_ast(filename)\n rs.azel2radec() # compute ra,dec from az,el\n\n if nsum == 0:\n asum = copy.deepcopy( rs)\n firstlon = rs.gallon\n asum.ydataA = rs.ydataA * rs.durationSec\n asum.gallat = rs.gallat * rs.durationSec\n asum.gallon = rs.gallon * rs.durationSec\n nsum = 1\n firstutc = rs.utc\n lastutc = rs.utc\n else:\n asum.ydataA = asum.ydataA + (rs.ydataA * rs.durationSec)\n asum.count = asum.count + rs.count\n asum.durationSec = asum.durationSec + rs.durationSec\n # fix wrap of longitudes\n if abs(rs.gallon - firstlon) > 180:\n crossZero = True\n if rs.gallon > firstlon:\n rs.gallon = rs.gallon - 360.\n else:\n rs.gallon = rs.gallon + 360.\n asum.gallon = asum.gallon + (rs.gallon * rs.durationSec)\n asum.gallat = asum.gallat + (rs.gallat * rs.durationSec)\n # keep track of observing time for weighted sum\n lastutc = rs.utc\n nsum = nsum + 1\n #end for all files loop\n\n if nsum < 1:\n print \"No acceptable files in average list\"\n else:\n asum.ydataA = asum.ydataA/float(asum.durationSec)\n asum.gallon = asum.gallon/float(asum.durationSec)\n asum.gallat = asum.gallat/float(asum.durationSec)\n aveutc,duration = radioastronomy.aveutcs( firstutc, lastutc)\n asum.utc = aveutc\n if (duration < 1.):\n print 'hotcold.average: very short average interval: ',duration\n return nsum, asum", "def useZstat(zstat, file_path_name_save, file_path_conte, file_path_name_resting_atlas):\n\n import matplotlib.pyplot as plt\n import os\n from glob import glob\n import numpy as np\n import nibabel as nb\n import nibabel.gifti as gifti\n\n # Crucial: xvfb must be imported and started before importing mayavi\n from xvfbwrapper import Xvfb\n print('XVb pre')\n vdisplay = Xvfb()\n vdisplay.start()\n\n print('pre maya')\n # Crashes on this line if run with plain python (not xvfb-run ... python) and if xvfbwrapper is after it.\n from mayavi import mlab\n print('post maya')\n from tvtk.api import tvtk\n print('post tvtk')\n import math\n\n print('display')\n mlab.options.offscreen = True #offscreen window for rendering\n\n img = nb.load(file_path_name_resting_atlas)\n #img = nb.load('/Users/MathiasMacbook/Desktop/rfMRI_REST1_LR_Atlas.dtseries.nii')\n mim = img.header.matrix.mims[1]\n #for idx, bm in enumerate(mim.brainModels):\n # print((idx, bm.indexOffset, bm.brainStructure))\n bm1 = mim.brainModels[0]\n lidx = bm1.vertexIndices.indices\n bm2 = mim.brainModels[1]\n ridx = bm1.surfaceNumberOfVertices + bm2.vertexIndices.indices\n bidx = np.concatenate((lidx, ridx))\n\n axis = [0, 0, 1]\n theta = np.pi\n\n inflated = True\n split_brain = True\n\n surf = gifti.read(file_path_conte + '/Conte69.L.midthickness.32k_fs_LR.surf.gii') \n verts_L_data = surf.darrays[0].data\n faces_L_data = surf.darrays[1].data\n\n surf = gifti.read(file_path_conte + '/Conte69.R.midthickness.32k_fs_LR.surf.gii') \n verts_R_data = surf.darrays[0].data\n faces_R_data = surf.darrays[1].data\n\n if inflated:\n surf = gifti.read(file_path_conte + '/Conte69.L.inflated.32k_fs_LR.surf.gii')\n verts_L_display = surf.darrays[0].data\n faces_L_display = surf.darrays[1].data\n surf = gifti.read(file_path_conte + '/Conte69.R.inflated.32k_fs_LR.surf.gii')\n verts_R_display = surf.darrays[0].data\n faces_R_display = surf.darrays[1].data\n else:\n verts_L_display = verts_L_data.copy()\n verts_R_display = verts_R_data.copy()\n faces_L_display = faces_L_data.copy()\n faces_R_display = faces_R_data.copy()\n\n verts_L_display[:, 0] -= max(verts_L_display[:, 0])\n verts_R_display[:, 0] -= min(verts_R_display[:, 0])\n verts_L_display[:, 1] -= (max(verts_L_display[:, 1]) + 1)\n verts_R_display[:, 1] -= (max(verts_R_display[:, 1]) + 1)\n\n faces = np.vstack((faces_L_display, verts_L_display.shape[0] + faces_R_display))\n\n if split_brain:\n verts2 = rotation_matrix(axis, theta).dot(verts_R_display.T).T\n else:\n verts_L_display[:, 1] -= np.mean(verts_L_display[:, 1])\n verts_R_display[:, 1] -= np.mean(verts_R_display[:, 1])\n verts2 = verts_R_display\n\n verts_rot = np.vstack((verts_L_display, verts2))\n verts = np.vstack((verts_L_data, verts_R_data))\n #print verts.shape\n #print faces.shape\n\n if not os.path.exists(os.path.split(file_path_name_save)[0]):\n os.makedirs(os.path.split(file_path_name_save)[0]) \n\n print('use zstat')\n img = nb.load(zstat)\n print('loaded img')\n \n threshold = 2.3 # 1000, lower limit\n display_threshold = 6 #8000, upper limit\n\n data = img.get_data()\n aff = img.affine\n indices = np.round((np.linalg.pinv(aff).dot(np.hstack((verts, \n np.ones((verts.shape[0], 1)))).T))[:3, :].T).astype(int)\n scalars2 = data[indices[:, 0], indices[:, 1], indices[:, 2]]\n scalars2[np.abs(scalars2) < threshold] = 0.\n scalars = np.zeros(verts.shape[0])\n scalars[bidx] = scalars2[bidx]\n\n negative = positive = False\n if np.any(scalars < 0):\n negative = True\n if np.any(scalars > 0):\n positive = True\n\n nlabels = 2\n vmin = 0\n vmax = 0\n if negative and positive:\n maxval = max(-scalars.min(), scalars.max())\n if maxval > display_threshold:\n maxval = display_threshold\n vmin = -maxval\n vmax = maxval\n nlabels = 3\n vmin = -display_threshold ######\n vmax = display_threshold ######\n elif negative:\n vmin = scalars.min()\n if vmin < -display_threshold:\n vmin = -display_threshold\n vmax = 0\n vmin = -display_threshold ######\n elif positive:\n vmax = scalars.max()\n if vmax > display_threshold:\n vmax = display_threshold\n vmin = 0\n vmax = display_threshold ######\n #print zstat\n \n dual_split = True\n\n fig1 = mlab.figure(1, bgcolor=(0, 0, 0))\n mlab.clf()\n mesh = tvtk.PolyData(points=verts_rot, polys=faces)\n mesh.point_data.scalars = scalars\n mesh.point_data.scalars.name = 'scalars'\n surf = mlab.pipeline.surface(mesh, colormap='autumn', vmin=vmin, vmax=vmax)\n if dual_split:\n verts_rot_shifted = verts_rot.copy()\n verts_rot_shifted = rotation_matrix(axis, theta).dot(verts_rot_shifted.T).T\n verts_rot_shifted[:, 2] -= (np.max(verts_rot_shifted[:, 2]) - np.min(verts_rot_shifted[:, 2]))\n verts_rot_shifted[:, 0] -= np.max(verts_rot_shifted[:, 0])\n mesh2 = tvtk.PolyData(points=verts_rot_shifted, polys=faces)\n mesh2.point_data.scalars = scalars\n mesh2.point_data.scalars.name = 'scalars'\n surf2 = mlab.pipeline.surface(mesh2, colormap='autumn', vmin=vmin, vmax=vmax)\n colorbar = mlab.colorbar(surf, nb_labels=nlabels) #, orientation='vertical')\n lut = surf.module_manager.scalar_lut_manager.lut.table.to_array()\n\n if negative and positive:\n half_index = lut.shape[0] / 2\n index = int(half_index * threshold / vmax)\n lut[(half_index - index + 1):(half_index + index), :] = 192\n lut[(half_index + index):, :] = 255 * plt.cm.autumn(np.linspace(0, 255, half_index - index).astype(int))\n lut[:(half_index - index), :] = 255 * plt.cm.cool(np.linspace(0, 255, half_index - index).astype(int))\n elif negative:\n index = int(lut.shape[0] * threshold / abs(vmin))\n lut[(lut.shape[0] - index):, :] = 192\n lut[:(lut.shape[0] - index), :] = 255 * plt.cm.cool(np.linspace(0, 255, lut.shape[0] - index).astype(int))\n elif positive:\n index = int(lut.shape[0] * threshold / vmax)\n lut[:index, :] = 192\n lut[index:, :] = 255 * plt.cm.autumn(np.linspace(0, 255, lut.shape[0] - index).astype(int))\n lut[:, -1] = 255\n\n surf.module_manager.scalar_lut_manager.lut.table = lut\n if dual_split:\n surf2.module_manager.scalar_lut_manager.lut.table = lut\n surf.module_manager.scalar_lut_manager.show_scalar_bar = False\n surf.module_manager.scalar_lut_manager.show_legend = False\n surf.module_manager.scalar_lut_manager.label_text_property.font_size = 10\n surf.module_manager.scalar_lut_manager.show_scalar_bar = True\n surf.module_manager.scalar_lut_manager.show_legend = True\n mlab.draw()\n\n translate = [0, 0, 0]\n if inflated:\n zoom = -700\n else:\n zoom = -600\n if dual_split:\n if inflated:\n translate = [0, 0, -104.01467148]\n else:\n translate = [0, 0, -54.76305802] \n if inflated:\n zoom = -750\n else:\n zoom = -570\n \n #mlab.view(0, 90.0, zoom, translate)\n mlab.view(9, 90.0)\n\n print(file_path_name_save)\n \n mlab.savefig(file_path_name_save, figure=fig1, magnification=5)\n\n vdisplay.stop()", "def zonal_statistics(self, imagery, regions, func, scale=1000, interval=\"day\") -> 'ImageCollection':\n regions_geojson = regions\n if isinstance(regions,Polygon) or isinstance(regions,MultiPolygon):\n regions_geojson = mapping(regions)\n\n graph = {\n 'process_id': 'zonal_statistics',\n 'imagery': imagery.graph,\n 'regions': regions_geojson,\n 'func': func,\n 'scale': scale,\n 'interval': interval\n }\n\n imagery.graph = graph\n\n return imagery", "def getCompStats(self, photoz = \"z_peak\", verbose = True):\n\n specz = self.zout[\"z_spec\"]\n photoz = self.zout[photoz]\n\n dz = (photoz - specz)\n diff = (dz / (1.+specz))\n\n nmad = 1.4826 * np.median( np.abs( dz - np.median(dz) ) )\n mean_offset = np.mean(diff)\n median_offset = np.median(diff)\n dz1s = np.mean(np.abs(diff))\n\n outlier1 = ((np.abs(diff) > 0.15).sum(dtype = float) / self.NOBJ)\n outlier2 = ((np.abs(diff) > 3.*nmad).sum(dtype = float) / self.NOBJ)\n\n # print np.mean(np.abs(diff))\n\n # print nmad, outlier1, outlier2, mean_offset, median_offset\n\n if verbose:\n print \"#\"*35\n print \"NMAD: \\t\\t\\t{0:1.3f}\".format(nmad)\n print \"dz/1+z:\\t\\t\\t{0:1.3f}\".format(dz1s)\n print \"nu 1: \\t\\t\\t{0:1.1f}%\".format(outlier1*100.)\n print \"nu 2: \\t\\t\\t{0:1.1f}%\".format(outlier2*100.)\n print \"mean offset: \\t\\t{0:1.3f}\".format(mean_offset)\n print \"median offset: \\t\\t{0:1.3f}\".format(median_offset)\n print \"#\"*35\n\n keys = [\"nmad\", \"nu1\", \"nu2\", \"mean_offset\", \"median_offset\"]\n values = [nmad, outlier1, outlier2, mean_offset, median_offset]\n\n return dict(zip(keys, values))", "def zonal_avg2(data,Log=False):\n print 'setting up the destination grid'\n # get lat and lon for new regular grid\n# fpin = Nio.open_file('/home/ivan/Python/data/lat_t.nc','r')\n fpin = Nio.open_file('/home/emunoz/Python/mapping/model_grid/lat_t.nc','r')\n lat_t = fpin.variables['lat_t'][:]\n lat_t_edges = fpin.variables['lat_t_edges'][:]\n fpin.close()\n# fpin = Nio.open_file('/home/ivan/Python/data/gx3v5.nc','r')\n fpin = Nio.open_file('/home/emunoz/Python/mapping/model_grid/gx3v5.nc','r')\n lon_t = N.sort(fpin.variables['TLONG'][0,:])\n ulon = N.sort(fpin.variables['ULONG'][0,:])\n lon_t_edges = N.concatenate((ulon,ulon[0,N.newaxis]+360.),0)\n # get gx3v5 lat and lon\n tlon = fpin.variables['TLONG'][:]\n tlat = fpin.variables['TLAT'][:]\n fpin.close()\n\n # compute area of cells in new regular grid\n area = grid_area(lon_t_edges,lat_t_edges)\n\n nlat = lat_t.shape[0]\n nlon = lon_t.shape[0]\n\n print 'computing weights for grid cell'\n ilist = []\n jlist = []\n wghts2D = []\n wghts3D = []\n for i in range(nlat):\n for j in range(nlon):\n i_inds, j_inds = find_stn_idx(lon_t[j], lat_t[i], tlon, tlat)\n ilist.append(i_inds)\n jlist.append(j_inds)\n dist = gc_dist(lon_t[i], lat_t[i], tlon, tlat)\n # make weights=0 on land\n work2D = 1./MA.array(dist,mask=data[0,...].mask)\n wghts2D.append(MA.filled(N.take(N.take(work2D,i_inds,0),j_inds,1)\n ,0))\n\n work3D = 1./MA.array(N.resize(dist,data.shape),mask=data.mask)\n wghts3D.append(MA.filled(N.take(N.take(work3D,i_inds,-2),j_inds,-1)\n ,0))\n\n #print 'computing zonal average'\n return lon_t, lat_t, ilist, jlist, wghts2D, wghts3D", "def azmap (scores, compare, dimension=0):\r\n mns = amean(compare,dimension)\r\n sstd = asamplestdev(compare,0)\r\n return (scores - mns) / sstd", "def reduce_rasters(stack, statistic, no_data_value=None, dtype=None):\n if statistic not in STATISTICS:\n percentile = parse_percentile_statistic(statistic)\n if percentile is None:\n raise KeyError('Unknown statistic \"{}\"'.format(statistic))\n else:\n statistic = \"percentile\"\n\n if len(stack) == 0:\n raise ValueError(\"Cannot reduce a zero-length stack\")\n\n # get the output array properties (dtype, no_data_value, shape)\n if dtype is None:\n dtype = stack[0][\"values\"].dtype\n if no_data_value is None:\n no_data_value = stack[0][\"no_data_value\"]\n shape = stack[0][\"values\"].shape\n\n # sum, count and nans output do not contain no data: fill zeroes right away\n if statistic in {\"sum\", \"count\", \"nans\"}:\n fill_value = 0\n else:\n fill_value = no_data_value\n\n # create the output array\n out = np.full(shape, fill_value, dtype)\n\n if statistic == \"last\":\n # populate 'out' with the last value that is not 'no data'\n for data in stack:\n index = get_index(data[\"values\"], data[\"no_data_value\"])\n out[index] = data[\"values\"][index]\n elif statistic == \"first\":\n # populate 'out' with the first value that is not 'no data'\n for data in stack[::-1]:\n index = get_index(data[\"values\"], data[\"no_data_value\"])\n out[index] = data[\"values\"][index]\n elif statistic == \"count\":\n # count the number of values that are not 'no data'\n for data in stack:\n out += get_index(data[\"values\"], data[\"no_data_value\"])\n else:\n if statistic == \"percentile\":\n func = partial(np.nanpercentile, q=percentile)\n else:\n func = STATISTICS[statistic]\n # transform 'no data' into 'nan' to be able to use numpy functions\n # NB: the dtype is at least float16 to accomodate NaN\n stack_array = np.full(\n (len(stack),) + shape, np.nan, np.result_type(dtype, np.float16)\n )\n for i, data in enumerate(stack):\n index = get_index(data[\"values\"], data[\"no_data_value\"])\n stack_array[i, index] = data[\"values\"][index]\n\n # protect against all-NaN slice warnings and errors\n not_all_nan = ~np.all(np.isnan(stack_array), axis=0)\n\n # perform the math\n out[not_all_nan] = func(stack_array[:, not_all_nan], axis=0)\n\n return {\"values\": out, \"no_data_value\": no_data_value}", "def obj_s2n_z(s2n_dict, z_bins, flux_bins, otype, outfile=None, ax=None):\n logs = get_logger()\n nz = z_bins.size\n nfx = flux_bins.size\n s2n_sum = np.zeros((nz-1,nfx-1))\n s2n_N = np.zeros((nz-1,nfx-1)).astype(int)\n # Loop on exposures+wedges (can do just once if these are identical for each)\n for jj, wave in enumerate(s2n_dict['waves']):\n # Turn wave into z\n zELG = wave / 3728. - 1.\n z_i = np.digitize(zELG, z_bins) - 1\n m_i = np.digitize(s2n_dict['OII'][jj]*1e17, flux_bins) - 1\n mmm = []\n for ll in range(nfx-1): # Only need to do once\n mmm.append(m_i == ll)\n #\n for kk in range(nz-1):\n all_s2n = s2n_dict['s2n'][jj][:,z_i==kk]\n for ll in range(nfx-1):\n if np.any(mmm[ll]):\n s2n_sum[kk, ll] += np.sum(all_s2n[mmm[ll],:])\n s2n_N[kk, ll] += np.sum(mmm[ll]) * all_s2n.shape[1]\n\n sty_otype = get_sty_otype()\n\n # Plot\n if ax is None:\n fig = plt.figure(figsize=(6, 6.0))\n ax= plt.gca()\n # Title\n fig.suptitle('{:s}: Redshift Summary'.format(sty_otype[otype]['lbl']),\n fontsize='large')\n\n # Plot em up\n z_cen = (z_bins + np.roll(z_bins,-1))/2.\n lstys = ['-', '--', '-.', ':', (0, (3, 1, 1, 1))]\n mxy = 1e-9\n for ss in range(nfx-1):\n if np.sum(s2n_N[:,ss]) == 0:\n continue\n lbl = 'OII(1e-17) = [{:0.1f},{:0.1f}]'.format(flux_bins[ss], flux_bins[ss+1])\n ax.plot(z_cen[:-1], s2n_sum[:,ss]/s2n_N[:,ss], linestyle=lstys[ss],\n label=lbl, color=sty_otype[otype]['color'])\n mxy = max(mxy, np.max(s2n_sum[:,ss]/s2n_N[:,ss]))\n\n ax.set_xlabel('Redshift')\n ax.set_xlim(z_bins[0], z_bins[-1])\n ax.set_ylabel('Mean S/N per Ang in dz bins')\n ax.set_yscale(\"log\", nonposy='clip')\n ax.set_ylim(0.1, mxy*1.1)\n\n legend = plt.legend(loc='lower right', scatterpoints=1, borderpad=0.3,\n handletextpad=0.3, fontsize='medium', numpoints=1)\n\n # Finish\n plt.tight_layout(pad=0.2,h_pad=0.2,w_pad=0.3)\n plt.subplots_adjust(top=0.92)\n if outfile is not None:\n plt.savefig(outfile, dpi=600)\n print(\"Wrote: {:s}\".format(outfile))", "def raw2outputs(raw, z_vals, rays_d, render_mask=False):\n raw2alpha = lambda x, y: 1. - torch.exp(-x * y)\n device = raw.device\n\n dists = z_vals[..., 1:] - z_vals[..., :-1]\n dists = torch.cat([dists, torch.tensor([1e-2], device=device).expand(dists[..., :1].shape)], -1) # [N_rays, N_samples]\n\n dists = dists * torch.norm(rays_d[..., None, :], dim=-1)\n\n rgb = raw[..., :3]\n\n alpha = raw2alpha(raw[..., 3], dists) # [N_rays, N_samples]\n weights = alpha * torch.cumprod(torch.cat([torch.ones((alpha.shape[0], 1), device=device), 1. - alpha + 1e-10], -1), -1)[:,:-1]\n\n rgb_map = torch.sum(weights[..., None] * rgb, -2) # [N_rays, 3]\n\n weights_norm = weights.detach() + 1e-5\n weights_norm /= weights_norm.sum(dim=-1, keepdim=True)\n depth_map = torch.sum(weights_norm * z_vals, -1)\n\n if render_mask:\n density = raw[..., 3] # [N_rays, N_samples]\n mask_map = torch.sum(weights * density, dim=1) # [N_rays,]\n return rgb_map, depth_map, weights_norm, mask_map\n\n return rgb_map, depth_map, weights_norm", "def main():\n stats = []\n start = timer()\n\n for file_name in get_dataset():\n\n # load image and ground truth detection mask\n img = cv2.imread(settings.PATH + file_name)\n ground_truth_mask = cv2.imread(settings.PATH_GT_MASKS + file_name)\n\n # Find list of barcode regions (rotated rectangle) within image\n barcode_regions, debug_img = find_barcodes(img)\n barcode_regions_mask = np.zeros(img.shape, np.uint8)\n barcode_images = None\n result = []\n\n # Decode barcode regions\n for barcode_region in barcode_regions:\n\n # Decode barcode image\n barcode_img = barcode_region.extract_from(img)\n barcode_mask = barcode_region.get_mask(img)\n debug_img = barcode_region.draw(debug_img)\n\n # Combine masks from multiple detected regions\n barcode_regions_mask += barcode_mask\n\n # Decode barcode\n decoded = pyzbar.decode(barcode_img)\n\n # Keep result for logging\n data = \", \".join([d.data.decode(\"utf-8\") for d in decoded])\n result.append({\"data\": data, \"region\": barcode_region.json()})\n\n if settings.SHOW_IMAGE:\n barcode_images = img_concat(barcode_images, barcode_img)\n\n # Jaccard_accuracy = intersection over union of the two binary masks\n jaccard_accuracy = 0\n if ground_truth_mask is not None:\n r = barcode_regions_mask.max(axis=-1).astype(bool)\n u = ground_truth_mask.max(axis=-1).astype(bool)\n jaccard_accuracy = float((r & u).sum()) / (r | u).sum()\n stats.append(jaccard_accuracy)\n\n # Log result\n logger.info(\n \"Image processed\",\n file_name=file_name,\n jaccard_accuracy=jaccard_accuracy,\n success=jaccard_accuracy > 0.5,\n result=result,\n )\n\n # In debug mode show visualization of detection algorithm\n if settings.SHOW_IMAGE:\n\n # Add alpha channel\n debug_img = cv2.cvtColor(debug_img, cv2.COLOR_BGR2BGRA)\n if barcode_images is not None:\n barcode_images = cv2.cvtColor(barcode_images, cv2.COLOR_BGR2BGRA)\n\n # Overlay error mask\n # Pixel-wise difference between ground truth and detected barcodes\n if ground_truth_mask is not None:\n error_img = np.zeros(debug_img.shape, np.uint8)\n error_img[r & u] = np.array([0, 0, 0, 0], dtype=np.uint8)\n error_img[np.logical_xor(r, u)] = np.array(\n [0, 0, 255, 1], dtype=np.uint8\n )\n debug_img = cv2.addWeighted(debug_img, 1, error_img, 0.5, 0)\n\n # Append barcode pictures to the right\n debug_img = img_concat(debug_img, barcode_images, axis=1)\n\n # Show visualization\n cv2.namedWindow(\"img\", cv2.WINDOW_NORMAL)\n cv2.imshow(\"img\", debug_img)\n cv2.waitKey(0)\n\n # Calculate final stats\n end = timer()\n accuracy = np.array(stats).mean()\n successes = np.where(np.array(stats) > 0.5)[0]\n logger.info(\n \"Final stats\",\n accuracy=accuracy,\n detection_rate=float(len(successes)) / len(stats),\n fps=len(stats) / (end - start),\n )", "def test_read_raster(self):\n\n # FIXME (Ole): Some datasets show very large differences between extrema in the array and \n # those computed by GDAL. This may warrant another bug report to GEOS\n \n for coverage_name in ['test_grid', \n 'shakemap_padang_20090930',\n 'population_padang_1',\n 'population_padang_2',\n #'fatality_padang_1',\n #'fatality_padang_2'\n ]:\n \n \n filename = 'data/%s.asc' % coverage_name\n \n for R in [Raster(filename), read_coverage(filename)]:\n \n min, max = R.get_extrema()\n \n A = R.get_data(nan=True)\n B = R.get_data(nan=True)\n\n assert numpy.nanmax(A - B) == 0\n\n \n # FIXME (Ole): These tolerances are not really acceptable. Report to GEOS.\n assert numpy.allclose(min, numpy.nanmin(A[:]), rtol=1e-2)\n \n if coverage_name != 'population_padang_2':\n assert numpy.allclose(max, numpy.nanmax(A[:]), rtol=1e-2)", "def merge_rasters(self):\n for index, i in enumerate(self.months):\n month = str(index + 1)\n if len(month) < 2:\n month = '0' + month\n rasters = [str(x) for x in i.joinpath('subnational').iterdir() if not x.name.endswith('txt') if x.name.endswith('norm.tif')]\n outfile = i.joinpath(f'{self.country}_{month}_normalised.tif')\n tiffs = \" \".join(rasters)\n gdal_cmd = f\"gdal_merge.py -o {outfile} -a_nodata -99999.0 -of gtiff {tiffs}\"\n subprocess.call(gdal_cmd, shell=True)", "def azs (a):\r\n zscores = []\r\n for item in a:\r\n zscores.append(z(a,item))\r\n return N.array(zscores)" ]
[ "0.68493986", "0.6522732", "0.62442964", "0.62096614", "0.6187095", "0.60611725", "0.60029554", "0.5962458", "0.5880883", "0.58487236", "0.5846595", "0.5792119", "0.57755446", "0.57681596", "0.5670428", "0.55730975", "0.5560811", "0.5551571", "0.55226356", "0.55010265", "0.5425854", "0.54105395", "0.54085886", "0.5401833", "0.53954977", "0.5377328", "0.5376893", "0.53562975", "0.5347076", "0.53305864" ]
0.71548915
0
Instantiate the daily profile class..
def __init__(self, profile: Dict[datetime.time, float] = None) -> None: if profile is None: profile = dict() if not isinstance(profile, dict): raise ProgrammerJudgementFault( "The input daily profile provided is not a mapping of the correct type." ) self._profile = profile
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, name):\n self.__username = name\n self.__startDate = datetime.now().date().today() # This attributes will not be change ever once it has been initialized.", "def __init__(self, dt=60*60*24):\n pass", "def __init__(self, student, start_date, day_periods):\n self.student = student\n self.start_date = start_date\n self.day_periods = day_periods\n self.student_name = student.full_name_lastname_first(\n show_middle_name=False)\n self.student_gender= student.gender\n self.student_attendance_record = self.student.attendance", "def __init__(self):\n super(Profile, self).__init__()", "def __init__(self, period=None, date=None):\r\n self.period = period\r\n self.date = date", "def __init__(self, profile: AskarProfile):\n self._profile = profile", "def __init__(self):\n self.users = {}\n self.tweetTime = {}\n self.recentMax = 0\n self.time = 0", "def __init__(self, subject_id, gender, dob, dod, dod_hosp, dod_ssn, expire_flag):\n self.id = int(subject_id)\n self.gender = gender\n self.dob = utils.convert_to_date_time_object(dob)\n self.dod = utils.convert_to_date_time_object(dod)\n self.dod_hosp = utils.convert_to_date_time_object(dod_hosp)\n self.dod_ssn = utils.convert_to_date_time_object(dod_ssn)\n self.expire_flag = expire_flag\n\n self.hospital_visits = {}\n self.num_of_hospital_visits = 0\n self.total_num_of_icu_stays = 0", "def __init__(self, day, hour, minute):\n self.day = day\n self.hour = hour\n self.minute = minute", "def __init__(self,\n day=None,\n end_time=None,\n start_time=None,\n ):\n\n # Initialize members of the class\n self.day = day\n self.end_time = end_time\n self.start_time = start_time", "def create_instance(self, date):\n raise NotImplementedError", "def __init__(self, year, month, day):", "def __init__(self, Date, TimeOfDay):\n self.date = Date\n self.time_of_day = TimeOfDay", "def __init__(self):\n\n from dateutil import tz\n\n # Get timezone descriptions\n self.UTC = tz.tzutc()\n self.LOCAL = tz.gettz(\"Europe/Berlin\")\n\n # Lookup FOOD event type_id\n table = current.s3db.dvr_case_event_type\n query = (table.code == \"FOOD\") & \\\n (table.deleted != True)\n row = current.db(query).select(table.id, limitby=(0, 1)).first()\n self.FOOD = row.id if row else None\n\n self.SURPLUS_MEALS = s3_str(current.T(\"Surplus Meals\"))", "def __init__(self):\n self.now = datetime.now()", "def __init__(self, month, day, year):", "def __init__(self):\r\n super(ProfileParser, self).__init__([self.ProfileEntryHandler()])", "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.creation_date = datetime.now()", "def __init__(self):\n self.site = ('http://vortex.plymouth.edu/cgi-bin/gen_statlog-u.cgi')\n \"\"\"Root of URL to query for data.\"\"\"\n yesterday = datetime.today() - timedelta(days=1)\n self.year = yesterday.year\n \"\"\"Year to get data for.\"\"\"\n self.month = yesterday.month\n \"\"\"Month to get data for.\"\"\"\n self.day = yesterday.day\n \"\"\"Day to get data for.\"\"\"\n self.stns = dict(yvr=\"CYVR\",\n sandheads=\"CWVF\")\n \"\"\"Mapping of common station names to official station IDs.\"\"\"", "def setUpClass(cls):\n now = timezone.now()\n cls.expired_dt = now + timedelta(days=-10)\n cls.current_dt = now + timedelta(days=90)", "def __init__(self, rate, from_weekday, to_weekday, from_hour, to_hour):\n self.from_weekday = from_weekday\n self.to_weekday = to_weekday\n self.from_hour = from_hour\n self.to_hour = to_hour\n self.rate = rate", "def __init__(self, className, name, title=None, unit=None):\n self.className = className\n filename = os.path.join(DATA, name + '.csv')\n with open(filename,encoding='utf8') as csv_file:\n reader = csv.reader(csv_file)\n for row_number, row in enumerate(reader):\n if row[1] == filename:\n continue\n if row[0] == '':\n self.title = row[1]\n continue\n if row[0] == 'unit':\n self.unit = row[1]\n continue\n try:\n datetime.strptime(row[0], \"%Y-%m-%d\")\n break\n except: ValueError\n super().__init__(name, title, unit)\n with open(filename,encoding='utf8') as csv_file:\n reader = csv.reader(csv_file)\n for skip in range(row_number): # row_number is first data line\n next(reader)\n for row in reader:\n try:\n self.data[datetime.strptime(row[0], \"%Y-%m-%d\")]=float(row[1])\n except: ValueError\n self.first_date=min(self.data.keys())\n self.last_date=max(self.data.keys())", "def __init__(self, security_identifier, profile_path):\n super(UserProfile, self).__init__()\n self.profile_path = profile_path\n self.security_identifier = security_identifier", "def __init__(self, data):\n\n self.produce_csv = data['produce_csv']\n self.produce_graphics = data['produce_graphics']\n self.report_name = data['report_name']\n self.file_name = self.report_name + '.csv'\n self.annual_file_name = self.report_name + '_annual.csv'\n self.csv_dir = ''\n self.diagnostic_dir = ''\n\n self.daily_variables = {\n 'year': ['time.cal_year', '', []],\n 'j_day': ['time.day', '', []]\n }\n\n self.annual_variables = {\n 'year': ['time.cal_year', '', 0]\n }", "def __init__(self, profile):\n self.subject_name = \"playbook\"\n Subject.__init__(self, profile, self.subject_name)", "def __init__(self, date_time, diastolic):\n Encounter.__init__(self, date_time)\n self.__diastolic = diastolic", "def __init__(self):\n # 保存用户推特数据\n self.user_pool = defaultdict(UserInfo)\n self.twitter_pool = defaultdict(list)\n self.time = 0", "def __init__(self, name, title=None, unit=None):\n super().__init__(name.lower(), title, unit)\n filename = os.path.join(DATA, name + '.csv')\n with open(filename) as csv_file:\n reader = csv.DictReader(csv_file)\n for row in reader:\n try:\n value = float(row[name])\n except ValueError:\n continue\n date = datetime.strptime(row['DATE'], \"%Y-%m-%d\")\n self.data[date] = value\n self.first_date = min(self.data)\n self.last_date = max(self.data)", "def __init__(self, *args):\n this = _libsbml.new_Date(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, d, m, y):\n\n self.set_calendar(d, m, y)" ]
[ "0.6303617", "0.6276865", "0.61957604", "0.6129965", "0.60512453", "0.5994838", "0.59334636", "0.5906946", "0.57980454", "0.57551837", "0.5752836", "0.57355404", "0.5726834", "0.5710793", "0.5709311", "0.56886977", "0.5673901", "0.5655608", "0.5648033", "0.5615956", "0.55905086", "0.5588783", "0.5588267", "0.5580841", "0.5574352", "0.55732423", "0.55709463", "0.5559031", "0.5553749", "0.55436355" ]
0.6949065
0
Updates the internal profile with the mapping provided.
def update(self, profile: Dict[datetime.time, float]) -> None: if self._profile is None: self._profile = profile else: self._profile.update(profile)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update(self, mapping):\n if not ismapping(mapping):\n raise TypeError(\"mapping type required\")\n field_names = getpyattr(type(self), 'field_names')\n for key, value in mapping.items():\n if key in field_names:\n setattr(self, key, value)", "def update_profile():\n logger.debug(\"entering function update_profile\")\n response = update_user_profile(request.json)\n logger.debug(\"exiting function update_profile\")\n return jsonify(response)", "def update(self,\n tier1_id,\n segment_id,\n segment_monitoring_profile_binding_map_id,\n segment_monitoring_profile_binding_map,\n ):\n return self._invoke('update',\n {\n 'tier1_id': tier1_id,\n 'segment_id': segment_id,\n 'segment_monitoring_profile_binding_map_id': segment_monitoring_profile_binding_map_id,\n 'segment_monitoring_profile_binding_map': segment_monitoring_profile_binding_map,\n })", "def update(self, profiles, matches):\n raise NotImplementedError()", "def update_profile(self, channels=None): # pragma: no cover\n pass", "def mapping(self, mapping):\n self.set_mapping(mapping)", "def update(self, mapItem: MapItem):\n pass", "def update(\n self,\n mapping: Mapping | Iterable[tuple[str, Any]] | None = None,\n **kwargs: Any,\n ) -> None:\n with self.changed.blocked():\n if mapping:\n items = mapping.items() if isinstance(mapping, Mapping) else mapping\n for key, value in items:\n getattr(self, key).value = value\n for key, value in kwargs.items():\n getattr(self, key).value = value\n self.changed.emit()", "def setMappedInfo(self, mapped_info):\n \n self.mapped_info = mapped_info", "def fusion_api_edit_server_profile(self, body, uri, api=None, headers=None, param=''):\n return self.profile.update(body, uri, api, headers, param=param)", "def test_update_profile(self):\n self.cim.update_profile(\n customer_id=u\"222\",\n description=u\"Foo bar baz quz\",\n email=u\"[email protected]\",\n customer_profile_id=u\"122\"\n )", "def update(self, upddict):\n\t\tfor (key, value) in upddict.iteritems():\n\t\t\tsetattr(self, key, value)", "def profile_data(self, profile_data):\n\n self._profile_data = profile_data", "def update_policy_profile(self, profile, body=None):\r\n return self.put(self.policy_profile_path % (profile), body=body)", "def update_network_profile(self, profile, body=None):\r\n return self.put(self.network_profile_path % (profile), body=body)", "def put(self, request, flavor_profile_id):\n update_flavor_profile(request)", "def update(self, key, value):\n if key in self.map:\n self.map[key] = value", "def current_mapping(self, current_mapping):\n self._current_mapping = current_mapping", "def update(self, other_cmap):\r\n if not isinstance(other_cmap, CorrectMap):\r\n raise Exception('CorrectMap.update called with invalid argument %s' % other_cmap)\r\n self.cmap.update(other_cmap.get_dict())\r\n self.set_overall_message(other_cmap.get_overall_message())", "def update(self):\n self.send_tf_msg()\n super(Map).update()", "def update(self, mapper_info: dict):\n self.update_from_dict(\n [\n \"form_id\",\n \"form_name\",\n \"form_revision_number\",\n \"process_key\",\n \"process_name\",\n \"status\",\n \"comments\",\n \"modified_by\",\n ],\n mapper_info,\n )\n self.commit()", "def applyMapping(self):\n pass", "def _update(self, other):\n # NOTE: detail map properties should NEVER be overridden. NEVER. EVER. kthx.\n if other.use_alpha:\n self.use_alpha = True\n if other.mipmap:\n self.mipmap = True", "def test_update_risk_profile_using_put(self):\n pass", "def update_dict(new,old):", "def putProfile(profileType,value):\n # PUT /profile/$profileType\n pass", "def update(self, obj):\n self.identity_map[obj._instance_key] = obj\n self.register_dirty(obj)", "def set_profile(self, profile: str):\n self._profile = profile", "def recursive_update(\n base_dict: typing.Dict[typing.Any, typing.Any],\n new_dict: typing.Mapping[typing.Any, typing.Any],\n ) -> None:\n for key, value in new_dict.items():\n if isinstance(value, collections.Mapping) and (\n base_dict.get(key) is not None\n ):\n TrainingConfig.recursive_update(base_dict[key], value)\n else:\n base_dict[key] = value", "def update_map(mapping, map_file):\n #Replace commas in mapping string with newlines\n mapping = mapping.replace(',', '\\n')\n\n try:\n with open(map_file, 'w') as f:\n f.write(mapping)\n except IOError as e:\n logging.error(\"Can not write %s\", map_file)\n logging.error(e)" ]
[ "0.63624704", "0.609009", "0.60749924", "0.5952606", "0.59126085", "0.5860821", "0.5851326", "0.58165675", "0.57689905", "0.5755722", "0.57045287", "0.56417894", "0.5599517", "0.55890405", "0.5579434", "0.55684793", "0.55682325", "0.5558063", "0.55190516", "0.5518738", "0.54925174", "0.5489183", "0.5453527", "0.54371965", "0.5415451", "0.54064655", "0.540205", "0.5388321", "0.53722847", "0.5354438" ]
0.7125943
0
The density of air varies as a function of temperature.
def density_of_air(self) -> float: return self.pressure / (SPECIFIC_GAS_CONSTANT_OF_AIR * self.ambient_temperature)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def air_density(self):\n return self.flow_field.air_density", "async def air_density(self, temperature, station_pressure):\n if temperature is not None and station_pressure is not None:\n kelvin = temperature + 273.15\n pressure = station_pressure\n r_specific = 287.058\n decimals = 2\n\n air_dens = (pressure * 100) / (r_specific * kelvin)\n\n if self._unit_system == UNITS_IMPERIAL:\n air_dens = air_dens * 0.06243\n decimals = 4\n\n return round(air_dens, decimals)\n\n _LOGGER.error(\"FUNC: air_density ERROR: Temperature or Pressure value was reported as NoneType. Check the sensor\")", "def density(self):\n return (1e-3*self.molar_mass) * self.pressure / (gas_constant * self.temperature) # kg/m^3", "def air_density(altitude):\n p = pressure(altitude) # psf\n t = temperature(altitude) # R\n rho = p/(gas_constant*t) # lb/ft3\n return rho", "def density(wair,pres,entr=None,temp=None,airf=None,dhum=None,\n chkvals=False,chktol=_CHKTOL,airf0=None,temp0=None,dhum0=None,\n chkbnd=False,mathargs=None):\n airf, temp, dhum = eq_wpte(wair,pres,entr=entr,temp=temp,airf=airf,\n dhum=dhum,chkvals=chkvals,chktol=chktol,airf0=airf0,temp0=temp0,\n dhum0=dhum0,chkbnd=chkbnd,mathargs=mathargs)\n h_p = iceair_h(0,0,1,wair,pres,temp=temp,airf=airf,dhum=dhum)\n dens = h_p**(-1)\n return dens", "def fGasDensity(GasGravity, Temperature, Pressure):\n\tGasConstant = 8.314\n\tPress = Pressure / 145.038 # MPa\n\tTemp = Temperature + 273.16 # Deg K\n\tPr = Press / (4.892 - (0.4048 * GasGravity))\n\tTr = Temp / (94.72 + (170.75 * GasGravity))\n\tA = 0.03 + 0.00527 * ((3.5 - Tr)**3)\n\tB = (0.642 * Tr) - (0.007 * (Tr**4)) - 0.52\n\tC = 0.109 * ((3.85 - Tr)**2)\n\tD = exp(-((0.45 + (8 * ((0.56 - (1 / Tr))**2))) * ((Pr**1.2) / Tr)))\n\tZ = (A * Pr) + B + (C * D)\n\treturn (28.8 * GasGravity * Press) / (Z * GasConstant * Temp)", "def fWaterDensity(Salinity, GasWaterRatio, Temperature, Pressure):\n\tTemp = Temperature\n\tPress = Pressure / 145.038\n\tSal = Salinity / 1000\n\tA = (-80 * Temp) + (-3.3 * (Temp**2)) + (0.00175 * (Temp**3))\n\tB = (489 * Press) + (-2 * Temp * Press) + (0.016 * (Temp**2) * Press)\n\tC = (-0.000013 * (Temp**3) * Press) + (-0.333 * (Press**2)) + (0.002 * Temp * (Press ** 2))\n\tPureWaterDensity = 1 + ((A + B + C) * 1e-6)\n\tA = 80 + (3 * Temp) + (-3300 * Sal) + (-13 * Press) + (47 * Press * Sal)\n\tB = (300 * Press) + (-2400 * Press * Sal)\n\tC = 0.000001 * (B + (Temp * A))\n\tD = 0.668 + (0.44 * Sal)\n\treturn PureWaterDensity + (Sal * (D + C))", "def thermal_expansivity_of_air(self) -> float:\n\n return 1 / self.ambient_temperature", "def temperature() -> float:", "def dynamic_viscosity_of_air(self) -> float:\n\n return (1.458 * (10 ** (-6)) * (self.ambient_temperature**1.5)) / (\n self.ambient_temperature + 110.4\n )", "def density(self):\n return self.get_density()", "def density(temp,pres):\n g_p = liq_g(0,1,temp,pres)\n dliq = g_p**(-1)\n return dliq", "def density_from_pressure(temperature, pressure, RH):\n # R = specific gas constant , J/(kg*degK) = 287.05 for dry air\n Rd = 287.05\n # http://www.baranidesign.com/air-density/air-density.htm\n # http://wahiduddin.net/calc/density_altitude.htm\n # Evaporation into the Atmosphere, Wilfried Brutsaert, p37\n # saturation vapor pressure is a polynomial developed by Herman Wobus\n e_so = 6.1078\n c0 = 0.99999683\n c1 = -0.90826951e-2\n c2 = 0.78736169e-4\n c3 = -0.61117958e-6\n c4 = 0.43884187e-8\n c5 = -0.29883885e-10\n c6 = 0.21874425e-12\n c7 = -0.17892321e-14\n c8 = 0.11112018e-16\n c9 = -0.30994571e-19\n \n p = (c0 + temperature*(\n c1 + temperature*(\n c2 + temperature*(\n c3 + temperature*(\n c4 + temperature*(\n c5 + temperature*(\n c6 + temperature*(\n c7 + temperature*(\n c8 + temperature*(\n c9)))))))))) \n \n sat_vp = e_so / p**8\n Pv = sat_vp * RH\n density = (pressure / (Rd * temperature)) * (1 - (0.378 * Pv / pressure))\n return density", "def getDensityEstimate(self):\n return self.density", "def _density(self):\n fraction = np.array([0.]+[m.value for m in self.fraction])\n # TODO: handle invalid fractions using penalty functions\n # S = sum(fraction)\n # scale = S/100 if S > 100 else 1\n # fraction[0] = 100 - S/scale\n # penalty = scale - 1\n fraction[0] = 100 - sum(fraction)\n if (fraction < 0).any():\n return NaN\n volume = self._volume(fraction)\n density = np.array([m.density() for m in [self.base]+self.material])\n return np.sum(volume*density)", "def density( self ) :\n return self.__density", "def density( self ) :\n return self.__density", "def density( self ) :\n return self.__density", "def current_density(self, xyz):\n\n j = self.electric_field(xyz) / self.rho\n return j", "def test_density(self):\n sol = Mader(p_cj=3.0e11, d_cj=8.0e5, gamma=3.0, u_piston=0.0)\n r = np.array([0.7, 0.8])\n t = 6.25e-6\n solrt = sol(r, t)\n np.testing.assert_allclose(solrt.density[0], 2.26666666666663)", "def current_density(self, xyz):\n return self.sigma * self.electric_field(xyz)", "def current_density(self, xyz):\n return self.sigma * self.electric_field(xyz)", "def thermal_conductivity_of_air(self) -> float:\n\n # This more accurate equation is not used by the paper.\n # return (0.02646 * self.ambient_temperature ** 1.5) / (\n # self.ambient_temperature + 254.4 * (10 ** (-12 / self.ambient_temperature))\n # )\n\n # The reference suggests this equation is accurate to 1%.\n return 0.02646 * (self.ambient_temperature / 300) ** 0.8646", "def density(self):\n return self._density", "def __density(self, x):\n\n z = np.power(self.rate, x) / m.factorial(x)\n return z * np.exp(-self.rate)", "def test_density(self):\n earth = CoreMantleCrustModel()\n assert earth.density(0) == 14\n assert earth.density(1e6) == 14\n assert earth.density(3.464e6) == 14\n assert earth.density(3.5e6) == 3.4\n assert earth.density(5e6) == 3.4\n assert earth.density(6.338e6) == 3.4\n assert earth.density(6.378e6) == 2.9", "def density(self, alt):\n (Z, T, CN2, CO2, CO, CAr, CHe, CH, CM, WM) = self.altitude_profile(alt)\n\n # using eqn(42) of COESA for multiple gases\n M_i = [wmN2, wmO2, wmO, wmAr, wmHe, wmH] << (u.g / u.mol)\n n_i = [\n CN2.to_value(u.m**-3),\n CO2.to_value(u.m**-3),\n CO.to_value(u.m**-3),\n CAr.to_value(u.m**-3),\n CHe.to_value(u.m**-3),\n CH.to_value(u.m**-3),\n ] << (1 / u.m**3)\n rho = (n_i @ M_i) / Na\n return rho.to(u.kg / u.m**3)", "def calc_air_density(temperature, pressure, elevation_ref=None, elevation_site=None, lapse_rate=-0.113,\n specific_gas_constant=286.9):\n\n temp = temperature\n temp_kelvin = temp + 273.15 # to convert deg C to Kelvin.\n pressure = pressure * 100 # to convert hPa to Pa\n ref_air_density = pressure / (specific_gas_constant * temp_kelvin)\n\n if elevation_ref is not None and elevation_site is not None:\n site_air_density = round(ref_air_density + (((elevation_site - elevation_ref) / 1000) * lapse_rate), 3)\n return site_air_density\n elif elevation_site is None and elevation_ref is not None:\n raise TypeError('elevation_site should be a number')\n elif elevation_site is not None and elevation_ref is None:\n raise TypeError('elevation_ref should be a number')\n else:\n return ref_air_density", "def compute_energy_density(kT):\n h=u.planck\n c=u.speed_of_light\n pi=np.pi\n return (8*pi/(h*c)**3)*((pi*kT)**4/15)", "def getDensity(h, R_w, R_sun): # k is a fitting constant\n\n R = np.sqrt(R_w**2+h**2)\n r = R/R_sun # units need to be in solar radii \n a = 77.1\n b = 31.4\n c = 0.954\n d = 8.30\n e = 0.550\n f = 4.63\n\n return (a*r**(-b) + c*r**(-d) + e*r**(-f))*10**8 #[cm-3]" ]
[ "0.7871237", "0.77620345", "0.73548687", "0.7349471", "0.6937895", "0.6883913", "0.6820493", "0.6713705", "0.6696069", "0.66546506", "0.6654112", "0.6648508", "0.6641304", "0.6609871", "0.65910405", "0.6577765", "0.6577765", "0.6577765", "0.6552875", "0.654293", "0.65178937", "0.65178937", "0.6496522", "0.6484519", "0.64358747", "0.6431408", "0.6410963", "0.638376", "0.634123", "0.62969977" ]
0.7765341
1
The dynamic viscosity of air varies as a function of temperature.
def dynamic_viscosity_of_air(self) -> float: return (1.458 * (10 ** (-6)) * (self.ambient_temperature**1.5)) / ( self.ambient_temperature + 110.4 )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def kinematic_viscosity_of_air(self) -> float:\n\n return self.dynamic_viscosity_of_air / self.density_of_air", "def latent_heat_vapourisation(self, tair):\n return (2.501 - 0.00237 * tair) * 1E06", "def air_density(self):\n return self.flow_field.air_density", "def thermal_expansivity_of_air(self) -> float:\n\n return 1 / self.ambient_temperature", "def variable_vis(self):\n return self._variable_vis", "def thermal_conductivity_of_air(self) -> float:\n\n # This more accurate equation is not used by the paper.\n # return (0.02646 * self.ambient_temperature ** 1.5) / (\n # self.ambient_temperature + 254.4 * (10 ** (-12 / self.ambient_temperature))\n # )\n\n # The reference suggests this equation is accurate to 1%.\n return 0.02646 * (self.ambient_temperature / 300) ** 0.8646", "def liquid_viscosity(id, temperature=298.15, pressure=constants.atm): # noqa: A002\n return rx._misc._get_chemical(id, temperature, pressure).mul # noqa: SLF001", "def viscosity(altitude):\n t_ref = temperature(0) # R\n t = temperature(altitude) # R\n s = 198.72 # R\n mu_ref = 3.737 * 10 ** (-7) # [slug/(ft*s)]\n mu = mu_ref*((t/t_ref)**(3/2))*(t_ref + s)/(t + s) # [slug/(ft*s)]\n return mu", "def calculate_visibility(qv,qc,qr,qi,qs,T,p):\n Rd = 287.\n COEFLC = 144.7\n COEFLP = 2.24\n COEFFC = 327.8\n COEFFP = 10.36\n EXPLC = 0.88\n EXPLP = 0.75\n EXPFC = 1.\n EXPFP = 0.7776\n\n Tv = T * (1+0.61*qv) # Virtual temperature\n\n rhoa = p/(Rd*Tv) # Air density [kg m^-3]\n rhow = 1e3 # Water density [kg m^-3]\n rhoi = 0.917e3 # Ice density [kg m^-3]\n\n vovmd = (1+qv)/rhoa + (qc+qr)/rhow + (qi+qs)/rhoi\n\n conc_lc = 1e3*qc/vovmd\n conc_lp = 1e3*qr/vovmd\n conc_fc = 1e3*qi/vovmd\n conc_fp = 1e3*qs/vovmd\n\n # Make sure all concentrations are positive\n conc_lc[conc_lc < 0] = 0\n conc_lp[conc_lp < 0] = 0\n conc_fc[conc_fc < 0] = 0\n conc_fp[conc_fp < 0] = 0\n\n betav = COEFFC*conc_fc**EXPFC\\\n + COEFFP*conc_fp**EXPFP\\\n + COEFLC*conc_lc**EXPLC\\\n + COEFLP*conc_lp**EXPLP+1E-10\n\n vis = -np.log(0.02)/betav # Visibility [km]\n vis[vis > 24.135] = 24.135\n\n return vis", "def calc_VPD(t_celsius, rel_humidity):\n # according to Licor LI-6400 manual pg 14-10\n # and Buck AL (1981). New equations for computing vapor pressure and\n # enhancement factor. J Appl Meteor 20:1527-1532\n vp_sat = 0.61365 * math.exp((17.502 * t_celsius) / (240.97 + t_celsius))\n\n vp_air = vp_sat * rel_humidity\n return vp_sat - vp_air # or vp_sat * (1 - rel_humidity)", "def density_of_air(self) -> float:\n\n return self.pressure / (SPECIFIC_GAS_CONSTANT_OF_AIR * self.ambient_temperature)", "def temperature() -> float:", "async def air_density(self, temperature, station_pressure):\n if temperature is not None and station_pressure is not None:\n kelvin = temperature + 273.15\n pressure = station_pressure\n r_specific = 287.058\n decimals = 2\n\n air_dens = (pressure * 100) / (r_specific * kelvin)\n\n if self._unit_system == UNITS_IMPERIAL:\n air_dens = air_dens * 0.06243\n decimals = 4\n\n return round(air_dens, decimals)\n\n _LOGGER.error(\"FUNC: air_density ERROR: Temperature or Pressure value was reported as NoneType. Check the sensor\")", "def env_temperature(v3: \"float\", v4: \"float\") -> \"float\":", "def dynamic(self):\n # FrostIndexChangeRate=-(1-Afrost)*FrostIndex - Tavg*exp(-0.04*Kfrost*SnowCover/SnowWaterEquivalent);\n\n FrostIndexChangeRate = -(1 - self.var.Afrost) * self.var.FrostIndex - self.var.Tavg * \\\n np.exp(-0.04 * self.var.Kfrost * self.var.SnowCover / self.var.SnowWaterEquivalent)\n # FrostIndexChangeRate=self.var.AfrostIndex - self.var.Tavg* pcraster.exp(self.var.Kfrost*self.var.SnowCover*self.var.InvSnowWaterEquivalent)\n # Rate of change of frost index (expressed as rate, [degree days/day])\n # CHANGED 9 September 2004:\n # - first term should be negative\n # - second term should be subtracted, not added!!\n\n self.var.FrostIndex = np.maximum(self.var.FrostIndex + FrostIndexChangeRate * self.var.DtDay, 0)\n # frost index in soil [degree days]\n # based on Molnau and Bissel (1983, A Continuous Frozen Ground Index for Flood\n # Forecasting. In: Maidment, Handbook of Hydrology, p. 7.28, 7.55)\n # if Tavg is above zero, FrostIndex will stay 0\n # if Tavg is negative, FrostIndex will increase with 1 per degree C per day\n # Exponent of 0.04 (instead of 0.4 in HoH): conversion [cm] to [mm]!\n # Division by SnowDensity because SnowDepth is expressed as equivalent water\n # depth(always less than depth of snow pack)\n # SnowWaterEquivalent taken as 0.100 (based on density of 100 kg/m3) (Handbook of Hydrology, p. 7.5)\n # Afrost, (daily decay coefficient) is taken as 0.97 (Handbook of Hydrology,\n # p. 7.28)\n # Kfrost, (snow depth reduction coefficient) is taken as 0.57 [1/cm],\n # (HH, p. 7.28)", "def _CloudVar(self): \n # q is MA order of ARMA(1,q)\n q = int(round(self.lambda_avg/self.lambda_s))\n a = exp(-self.lambda_s / self.lambda_p) \n (var, var_ratio) = self._ARMAvar(q, a)\n # This variance is a multiple of the variance of the noise driving the\n # AR(1) model. This variance, in turn, is a multiple of the underlying\n # measurement variance, with the relationship given in Gillespie 96\n var = var * (1. - exp(-2*self.lambda_s / self.lambda_p))/2\n # print q, a\n return var", "def is_artificial(self):\n\t\treturn 0", "def get_specific_heat() -> float:\n return 1006.0", "def heat_vaporization_func(ts):\n heat_vaporization = np.copy(ts).astype(np.float64)\n heat_vaporization -= 273.15\n heat_vaporization *= -0.00236\n heat_vaporization += 2.501\n heat_vaporization *= 1E6\n return heat_vaporization.astype(np.float32)", "def get_variables(self, z0, u_inf):\n # Get the ambient data from the CTD profile\n Ta, Sa, P = self.profile.get_values(z0, ['temperature', 'salinity',\n 'pressure'])\n rho = seawater.density(Ta, Sa, P)\n \n # Compute the properties of each dispersed-phase particle\n us = np.zeros(len(self.particles))\n rho_p = np.zeros(len(self.particles))\n m_p = np.zeros(len(self.particles))\n B_p = np.zeros(len(self.particles))\n for i in range(len(self.particles)):\n m0 = self.particles[i].m0\n T0 = self.particles[i].T0\n m_p[i] = np.sum(m0) * self.particles[i].nb0\n if m_p[i] > 0.:\n # Particles exist, get properties. Make sure the algorithm \n # uses the dirty bubble properties since this is supposed\n # to be the rise velocity averaged over the whole plume.\n us[i], rho_p[i]= self.particles[i].properties(m0, T0, P, Sa, \n Ta, np.inf)[0:2]\n B_p[i] = (rho - rho_p[i]) / rho * 9.81 * (m_p[i] / rho_p[i])\n else:\n # Particles dissolved, set to ambient conditions\n us[i] = 0.\n rho_p[i] = rho\n B_p[i] = 0.\n \n # Select the correct slip velocity\n u_slip = us[0]\n for i in range(len(self.particles) - 1):\n if B_p[i+1] > B_p[i]:\n u_slip = us[i+1]\n \n # Compute the total buoyancy flux\n B = np.sum(B_p)\n \n # Get the ambient buoyancy frequency\n N = self.profile.buoyancy_frequency(z0)\n \n # Return the governing parameters\n return (B, N, u_slip, u_inf)", "def get_D_C3H8_air_eff(self, T):\n\n Kn = self.get_Kn(T)\n D_C3H8_air_Kn = self.get_D_C3H8_air_Kn(T)\n\n if np.isscalar(Kn):\n if Kn <= 1.:\n D_C3H8_air_eff = (\n self.porosity / self.tortuosity * self.D_C3H8_air\n )\n else:\n D_C3H8_air_eff = (\n 2. * self.porosity / self.tortuosity *\n (self.D_C3H8_air * D_C3H8_air_Kn) / (self.D_C3H8_air +\n D_C3H8_air_Kn)\n )\n\n else:\n if Kn.any() <= 1.:\n D_C3H8_air_eff = (\n self.porosity / self.tortuosity * self.D_C3H8_air\n )\n else:\n D_C3H8_air_eff = (\n 2. * self.porosity / self.tortuosity *\n (self.D_C3H8_air * D_C3H8_air_Kn) / (self.D_C3H8_air +\n D_C3H8_air_Kn)\n )\n\n self.D_C3H8_air_eff = D_C3H8_air_eff\n\n return D_C3H8_air_eff", "def get_vsolar(self):\n return self.read_register(4098, 1, 3)", "def car_dynamics(self,x, t, u, p):\n # f = vehicle_dynamics_ks(x, u, p)\n f = vehicle_dynamics_st(x, u, p)\n # f = vehicle_dynamics_std(x, u, p)\n # f = vehicle_dynamics_mb(x, u, p)\n return f", "def get_production_factor(self, temp_atmosphere):\n return 1.", "def sky_temperature(self) -> float:\n\n return 0.0552 * (self.ambient_temperature**1.5)", "def air_humidity_method_qsat26air(air_temperature,surface_air_pressure,relative_humdity):\n es = vapor_pressure(air_temperature,surface_air_pressure)\n em = 0.01*relative_humdity*es\n air_humidity = 622.*em/(surface_air_pressure-0.378*em)\n return air_humidity", "def simulated_reflectivity(pressure, temperature, vapor_mixing_ratio, liquid_mixing_ratio, snow_mixing_ratio=None,\n graupel_mixing_ratio=None, use_varint=False, use_liqskin=False):\n # Set values for constants with variable intercept\n R1 = 1e-15\n RON = 8e6\n RON2 = 1e10\n SON = 2e7\n GON = 5e7\n RON_MIN = 8e6\n RON_QR0 = 0.00010\n RON_DELQR0 = 0.25*RON_QR0\n RON_CONST1R = (RON2-RON_MIN)*0.5\n RON_CONST2R = (RON2+RON_MIN)*0.5\n\n # set constant intercepts\n rno_l = 8e6\n rno_s = 2e7\n rno_g = 4e6\n\n qvapor = da.clip(vapor_mixing_ratio, 0., None)\n qliquid = da.clip(liquid_mixing_ratio, 0., None)\n\n # If qgraupel but not qsnow, set qgraupel = qsnow\n if snow_mixing_ratio is None:\n if graupel_mixing_ratio is None:\n qsnow = da.zeros_like(qliquid)\n qgraupel = da.zeros_like(qliquid)\n else:\n qgraupel = da.clip(graupel_mixing_ratio, 0., None)\n qsnow = da.zeros_like(graupel_mixing_ratio)\n qsnow[temperature <= 273.15] = qgraupel[temperature <= 273.15]\n else:\n qsnow = da.clip(snow_mixing_ratio, 0., None)\n qgraupel = da.clip(graupel_mixing_ratio, 0., None)\n\n # density for liquid, snow, and graupel (kg m-3)\n rho_l = 1000. # liquid\n rho_i = 100. # snow\n rho_g = 400. # graupel\n\n # constant evaluation of gamma distribution\n gamma = 720.\n\n # Alpha constant\n alpha = 0.224\n\n # constant multiplication factors\n factor_l = gamma * 1e18 * (1./(np.pi*rho_l))**1.75\n s = gamma * 1e18 * (1./(np.pi*rho_i))**1.75 * (rho_i/rho_l)**2 * alpha\n g = gamma * 1e18 * (1./(np.pi*rho_g))**1.75 * (rho_g/rho_l)**2 * alpha\n\n # calculate virtual temperature\n virtual_t = virtual_temperature(temperature, qvapor)\n\n # dry gas constant\n Rd = 287.\n rho_air = pressure/(Rd*virtual_t)\n\n # adjust for brightband if use_liqskin=True\n if use_liqskin:\n raise NotImplementedError('Liquid skin correction not implemented')\n # factor_s = da.full_like(temperature, s)\n # factor_g = da.full_like(temperature, g)\n # try:\n # factor_s[temperature >= 273.15] = factor_s[temperature >= 273.15] / da.array([alpha])\n # factor_g[temperature >= 273.15] = factor_g[temperature >= 273.15] / da.array([alpha])\n # except ValueError:\n # factor_s = s\n # factor_g = g\n else:\n factor_s = s\n factor_g = g\n\n # calculate variable intercept if use_varint=True\n if use_varint:\n raise NotImplementedError('Variable intercepts not yet implemented')\n # temp_c = da.clip(temperature-273.15, temperature.min(), -0.001)\n # sonv = MIN(2.0D8, 2.0D6*EXP(-0.12D0*temp_c))\n #\n # gonv = gon\n # IF (qgr(i,j,k) .GT. R1) THEN\n # gonv = 2.38D0 * (PI*RHO_G/(rhoair*qgr(i,j,k)))**0.92D0\n # gonv = MAX(1.D4, MIN(gonv,GON))\n # END IF\n #\n # ronv = RON2\n # IF (qra(i,j,k) .GT. R1) THEN\n # ronv = RON_CONST1R*TANH((RON_QR0 - qra(i,j,k))/RON_DELQR0) + RON_CONST2R\n # END IF\n else:\n ronv = rno_l\n sonv = rno_s\n gonv = rno_g\n\n # Total equivalent reflectivity factor (z_e, in mm^6 m^-3) is\n # the sum of z_e for each hydrometeor species:\n z_e = (((factor_l*(rho_air*qliquid)**1.75)/(ronv**.75)) +\n ((factor_s*(rho_air*qsnow)**1.75)/(sonv**.75)) +\n ((factor_g*(rho_air*qgraupel)**1.75)/(gonv**.75)))\n\n # Adjust small values of Z_e so that dBZ is no lower than -30\n z_e = da.clip(z_e, .001, None)\n\n # Convert to dBZ\n dbz = 10.*da.log10(z_e)\n return dbz", "def atmosphereVariation(img, header, chanInfo, airmass=1.5, pwv=-1, removeSlope=True):\n freqs, values = CalcAtmTransmissionForImage(img, header, chanInfo, airmass=airmass, pwv=pwv, value='transmission')\n if removeSlope:\n slope, intercept = linfit(freqs, values, values*0.001)\n casalogPost(\"Computed atmospheric variation and determined slope: %f per GHz (%.0f,%.2f)\" % (slope,freqs[0],values[0]))\n values = values - (freqs*slope + intercept) + np.mean(values)\n maxMinusMin = np.max(values)-np.min(values)\n percentage = maxMinusMin/np.mean(values)\n freqs, values = CalcAtmTransmissionForImage(img, header, chanInfo, airmass=airmass, pwv=pwv, value='tsky')\n if removeSlope:\n slope, intercept = linfit(freqs, values, values*0.001)\n values = values - (freqs*slope + intercept) + np.mean(values)\n TmaxMinusMin = np.max(values)-np.min(values)\n Tpercentage = TmaxMinusMin*100/np.mean(values)\n stdValues = np.std(values)\n return(maxMinusMin, percentage, TmaxMinusMin, Tpercentage, stdValues)", "def visc(s, t, p):\n s, t, p = map(np.asanyarray, (s, t, p))\n return (1e-4 * (17.91 - 0.5381 * t + 0.00694 * t ** 2 + 0.02305 * s) /\n sw.dens(s, t, p))", "def intensity(self) -> int:" ]
[ "0.7093741", "0.6370206", "0.617513", "0.6122823", "0.61151516", "0.6091893", "0.6021482", "0.59835947", "0.592721", "0.5878499", "0.58544457", "0.5826623", "0.58215696", "0.5811718", "0.58073586", "0.56892794", "0.56720847", "0.56465936", "0.5620597", "0.5558093", "0.5547346", "0.5543419", "0.5529484", "0.54964113", "0.54892886", "0.5484821", "0.5484269", "0.5476041", "0.5470144", "0.5468478" ]
0.82398015
0
Return the heat capacity of air in Joules perkilogram Kelvin. The heat capacity of air varies with a function of temperature and is given by an empiricallyderived formula.
def heat_capacity_of_air(self) -> float: return 1002.5 + 275 * (10 ** (-6)) * (self.ambient_temperature - 200) ** 2
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def heatCapacity(self, Tk=None, Tc=None):\n Tk = getTk(Tc, Tk)\n self.checkPropertyTempRange(\"heat capacity\", Tk)\n return (\n sum(\n [\n +1.38642e-13 * Tk**4,\n -6.47481e-10 * Tk**3,\n +1.02345e-06 * Tk**2,\n -4.32829e-04 * Tk,\n +1.06133e00,\n ]\n )\n * 1000.0\n ) # kJ / kg K to J / kg K", "def volumetric_heat_capacity(temperature):\n a = -2.4083\n b = 7.6006\n c = -8.2982\n d = 7.3301\n e = -4.2386\n f = 1.4294\n g = -0.24396\n h = 0.015236\n i = 0.0\n log_t = math.log10(temperature)\n f_exp = a + b*log_t + c*log_t**2.0 + d*log_t**3.0 + e*log_t**4.0 + f*log_t**5.0 + g*log_t**6.0 + \\\n h*log_t**7.0 + i*log_t**8.0\n g10_cp = 10.0**f_exp\n return g10_cp * G10NISTMaterialProperties.density", "def heat_capacity(r, phi, q, kT):\n pot = q*(phi - phi[0])\n a = np.trapz(pot**2 * np.exp(-pot/kT) * r, r)\n b = np.trapz(pot * np.exp(-pot/kT) * r, r)\n c = np.trapz(np.exp(-pot/kT) * r, r)\n return 3/2 + 1/kT**2 * (a/c - b**2/c**2)", "def heat(self, delta_temp):\n return self.heat_capacity * self.mass * delta_temp", "def get_heat_capacity_pressure(vv, tt, chemical_potential, gbar=2.0, *args, **kwargs):\n # type: (np.ndarray, np.ndarray, np.ndarray, float, list, dict) -> np.ndarray\n y = chemical_potential / tt\n # There is a precision problem with \"-\" (minus) operator\n # We'll use asymptotic formula for high temperatures to avoid that problem\n y_low = y[y < THRESHOLD]\n vv_low, vv_high = vv[y < THRESHOLD], vv[y >= THRESHOLD]\n tt_low, tt_high = tt[y < THRESHOLD], tt[y >= THRESHOLD]\n # high temperatures - low numbers\n C_P_low = 5 * gbar * np.sqrt(2) / (36 * np.pi ** 2) * tt_low ** (3 / 2) * vv_low\n C_P_low *= (\n 5 * _1d_call(_fdk, y_low, k=-1 / 2) * _1d_call(_fdk, y_low, k=3 / 2)\n - 9 * _1d_call(_fdk, y_low, k=1 / 2) ** 2\n )\n C_P_low *= _1d_call(_fdk, y_low, k=3 / 2) / _1d_call(_fdk, y_low, k=1 / 2) ** 2\n # low temperatures - high numbers\n C_P_high = (gbar * np.pi / 6) ** (2 / 3) * tt_high * vv_high ** (2 / 3)\n return np.concatenate((C_P_low, C_P_high)).reshape(y.shape)", "def internal_external_canopy_heat_capacity(lumped_cover_heat_capacity: float) -> float:\n return 0.1 * lumped_cover_heat_capacity", "def thermal_conductivity_of_air(self) -> float:\n\n # This more accurate equation is not used by the paper.\n # return (0.02646 * self.ambient_temperature ** 1.5) / (\n # self.ambient_temperature + 254.4 * (10 ** (-12 / self.ambient_temperature))\n # )\n\n # The reference suggests this equation is accurate to 1%.\n return 0.02646 * (self.ambient_temperature / 300) ** 0.8646", "def get_specific_heat() -> float:\n return 1006.0", "def getHeatCapacity(self, Tlist):\n\t\treturn _modes.harmonicoscillator_heatcapacity(Tlist, self.frequency) * self.degeneracy", "def temperature(k, kmax):\n return 1.0 / 500 * (1.0 / k - 1.0 / kmax)", "def latent_heat_vapourisation(self, tair):\n return (2.501 - 0.00237 * tair) * 1E06", "def get_heat_capacity_volume(vv, tt, chemical_potential, gbar=2.0, *args, **kwargs):\n # type: (np.ndarray, np.ndarray, np.ndarray, float, list, dict) -> np.ndarray\n y = chemical_potential / tt\n # There is a precision problem with \"-\" (minus) operator\n # We'll use asymptotic formula for high temperatures to avoid that problem\n y_low = y[y < THRESHOLD]\n vv_low, vv_high = vv[y < THRESHOLD], vv[y >= THRESHOLD]\n tt_low, tt_high = tt[y < THRESHOLD], tt[y >= THRESHOLD]\n # high temperatures - low numbers\n C_V_low = 5 * _1d_call(_fdk, y_low, k=-1 / 2) * _1d_call(_fdk, y_low, k=3 / 2)\n C_V_low -= 9 * _1d_call(_fdk, y_low, k=1 / 2) ** 2\n C_V_low *= gbar * np.sqrt(2) / (4 * np.pi ** 2) * tt_low ** (3 / 2) * vv_low\n C_V_low /= _1d_call(_fdk, y_low, k=-1 / 2)\n # low temperatures - high numbers\n C_V_high = (gbar * np.pi / 6) ** (2 / 3) * tt_high * vv_high ** (2 / 3)\n return np.concatenate((C_V_low, C_V_high)).reshape(y.shape)", "def helmholtzenergy(temp,pres):\n g = liq_g(0,0,temp,pres)\n g_p = liq_g(0,1,temp,pres)\n f = g - pres*g_p\n return f", "def fuel_cond(T):\n\n kc = 1.841e-19*math.pow(T,6) - 2.097e-15*math.pow(T,5) +\\\n 9.721e-12*math.pow(T,4) - 2.369e-8*math.pow(T,3) +\\\n 3.283e-5*math.pow(T,2) - 0.0267*T + 63.18\n \n return kc", "def getHeatCapacity(self, Tlist, V=1.0):\n\t\treturn _modes.translation_heatcapacity(Tlist, self.mass, self.dimension, V)", "def specificHeatCapacity(d, d_iso, density, cp):\n d_t = min(0.5 * np.sum(d), d_iso , 0.1)\n sum_d_i = d[0]\n i = 0 \n kappa = 0 \n while sum_d_i <= d_t:\n kappa += d[i] * density[i] * cp[i]\n i += 1\n sum_d_i += d[i]\n else:\n sum_d_i -= d[i]\n d_part = d_t - sum_d_i \n kappa += d_part * density[i] * cp[i]\n\n return kappa", "def harmonicOscillator_heatCapacity(T, freq):\n x = freq / (0.695039 * T) # kB = 0.695039 cm^-1/K\n exp_x = math.exp(x)\n one_minus_exp_x = 1.0 - exp_x\n return x * x * exp_x / one_minus_exp_x / one_minus_exp_x", "def thermal_expansivity_of_air(self) -> float:\n\n return 1 / self.ambient_temperature", "def getEnthalpyOfVaporization(self,Temperature):\n\t\tB = self.Antoine_params[1]\n\t\tC = self.Antoine_params[2]\n\n\t\t# Eqn 7 from Epstein et al 2009\n\t\tHvap = 2.303*8.3145*Temperature*Temperature*B/((C + Temperature - 273.15)*(C + Temperature - 273.15))\n\t\treturn Hvap # units are J/molK", "def getHeatCapacity(self, Tlist):\n\t\treturn _modes.hinderedrotor_heatcapacity(Tlist, self.frequency, self.barrier) * self.degeneracy", "def alpha_B_HII(temperature):\n # HII recombination rate\n # input : T in K\n # output : HII recombination rate (in cm3 / s)\n l = 315614./temperature\n a = 2.753e-14 * l**1.5 / (1. + (l/2.74)**0.407)**2.242\n return a", "def canopy_heat_capacity(states: ClimateStates) -> float:\n return CAP_LEAF * states.leaf_area_index", "def getHeatCapacity(self, Tlist):\n\t\treturn _modes.freerotor_heatcapacity(Tlist, self.frequencies, 1 if self.linear else 0)", "def thermal_conductivity(temperature):\n a0 = -4.1236\n a1 = 13.788\n a2 = -26.068\n a3 = 26.272\n a4 = -14.663\n a5 = 4.4954\n a6 = -0.6905\n a7 = 0.0397\n log_t = math.log10(temperature)\n f_exp = a0 + a1*log_t + a2*log_t**2.0 + a3*log_t**3.0 + a4*log_t**4.0 + \\\n a5*log_t**5.0 + a6*log_t**6.0 + a7*log_t**7\n g10_thermal_conductivity = 10.0**f_exp\n return g10_thermal_conductivity", "def C_P(self):\n return self.generic_getter(\n get_heat_capacity_pressure, \"C_P\", \"convert_heat_capacity\"\n )", "def get_evaporation_latent_heat() -> float:\n theta = 28.0\n return 2500.8 - 2.3668 * theta", "def molar_mass_dry_air():\n return 28.9647", "def gueymard94_pw(temp_air, relative_humidity):\n\n T = temp_air + 273.15 # Convert to Kelvin # noqa: N806\n RH = relative_humidity # noqa: N806\n\n theta = T / 273.15\n\n # Eq. 1 from Keogh and Blakers\n pw = (\n 0.1 *\n (0.4976 + 1.5265*theta + np.exp(13.6897*theta - 14.9188*(theta)**3)) *\n (216.7*RH/(100*T)*np.exp(22.330 - 49.140*(100/T) -\n 10.922*(100/T)**2 - 0.39015*T/100)))\n\n pw = np.maximum(pw, 0.1)\n\n return pw", "def get_effective_mass():\n\n H_BAR = 6.582119514e-16 # eV*s\n M_0 = 9.10938356e-31 # kg\n N_KPTS = 6 # Number of k-points included in the parabola.\n\n spin_up = Spin(1)\n\n band_structure = Vasprun('vasprun.xml').get_band_structure()\n\n # Locations of CBM and VBM in band_structure.bands\n cbm_band_index = band_structure.get_cbm()['band_index'][spin_up][0]\n cbm_kpoint_index = band_structure.get_cbm()['kpoint_index'][0]\n\n vbm_band_index = band_structure.get_vbm()['band_index'][spin_up][0]\n vbm_kpoint_index = band_structure.get_vbm()['kpoint_index'][0]\n\n k = {'electron': {'left': [], 'right': []},\n 'hole': {'left': [], 'right': []}}\n E = {'electron': {'left': [], 'right': []},\n 'hole': {'left': [], 'right': []}}\n\n e_ref_coords = band_structure.kpoints[cbm_kpoint_index]._ccoords\n h_ref_coords = band_structure.kpoints[vbm_kpoint_index]._ccoords\n\n for n in range(-N_KPTS, 1):\n e_coords = band_structure.kpoints[cbm_kpoint_index + n]._ccoords\n h_coords = band_structure.kpoints[vbm_kpoint_index + n]._ccoords\n\n k['electron']['left'].append(\n ((e_coords[0] - e_ref_coords[0])**2 +\n (e_coords[1] - e_ref_coords[1])**2 +\n (e_coords[2] - e_ref_coords[2])**2)**0.5\n )\n k['hole']['left'].append(\n ((h_coords[0] - h_ref_coords[0])**2 +\n (h_coords[1] - h_ref_coords[1])**2 +\n (h_coords[2] - h_ref_coords[2])**2)**0.5\n )\n\n e_energy = band_structure.bands[\n spin_up][cbm_band_index][cbm_kpoint_index + n]\n h_energy = band_structure.bands[\n spin_up][vbm_band_index][vbm_kpoint_index + n]\n\n E['electron']['left'].append(e_energy)\n E['hole']['left'].append(h_energy)\n\n for n in range(1, 1 + N_KPTS):\n e_coords = band_structure.kpoints[cbm_kpoint_index + n]._ccoords\n h_coords = band_structure.kpoints[vbm_kpoint_index + n]._ccoords\n\n k['electron']['right'].append(\n ((e_coords[0] - e_ref_coords[0])**2 +\n (e_coords[1] - e_ref_coords[1])**2 +\n (e_coords[2] - e_ref_coords[2])**2)**0.5\n )\n k['hole']['right'].append(\n ((h_coords[0] - h_ref_coords[0])**2 +\n (h_coords[1] - h_ref_coords[1])**2 +\n (h_coords[2] - h_ref_coords[2])**2)**0.5\n )\n\n e_energy = band_structure.bands[\n spin_up][cbm_band_index][cbm_kpoint_index + n]\n h_energy = band_structure.bands[\n spin_up][vbm_band_index][vbm_kpoint_index + n]\n\n E['electron']['right'].append(e_energy)\n E['hole']['right'].append(h_energy)\n\n # 2nd order fits\n e_l_fit = np.poly1d(\n np.polyfit(k['electron']['left'], E['electron']['left'], 2))\n e_r_fit = np.poly1d(\n np.polyfit(k['electron']['right'], E['electron']['right'], 2))\n h_l_fit = np.poly1d(\n np.polyfit(k['hole']['left'], E['hole']['left'], 2))\n h_r_fit = np.poly1d(\n np.polyfit(k['hole']['right'], E['hole']['right'], 2))\n\n # Curvatures\n e_l_curvature = e_l_fit.deriv().deriv()[0]\n e_r_curvature = e_r_fit.deriv().deriv()[0]\n h_l_curvature = h_l_fit.deriv().deriv()[0]\n h_r_curvature = h_r_fit.deriv().deriv()[0]\n\n # Unit conversion\n e_m_eff_l = 10 * ((H_BAR ** 2) / e_l_curvature) / M_0\n e_m_eff_r = 10 * ((H_BAR ** 2) / e_r_curvature) / M_0\n h_m_eff_l = -10 * ((H_BAR ** 2) / h_l_curvature) / M_0\n h_m_eff_r = -10 * ((H_BAR ** 2) / h_r_curvature) / M_0\n\n return {'electron': {'left': e_m_eff_l, 'right': e_m_eff_r},\n 'hole': {'left': h_m_eff_l, 'right': h_m_eff_r}}", "def temperature_energy():\n e = _si.e.value\n k_B = _si.k_B.value\n return Equivalency(\n [(si.K, si.eV, lambda x: x / (e / k_B), lambda x: x * (e / k_B))],\n \"temperature_energy\",\n )" ]
[ "0.72807765", "0.70755357", "0.67571175", "0.6657788", "0.6539134", "0.63971967", "0.63434315", "0.6203374", "0.62022215", "0.61694735", "0.61694306", "0.6157651", "0.61571056", "0.61204106", "0.6098233", "0.60910946", "0.6065617", "0.6051415", "0.60473984", "0.6033305", "0.6029253", "0.5986691", "0.5982994", "0.5909707", "0.58936924", "0.58759123", "0.5864374", "0.58613294", "0.5851433", "0.581145" ]
0.77720433
0
The kinematic viscosity of air varies as a function of temperature.
def kinematic_viscosity_of_air(self) -> float: return self.dynamic_viscosity_of_air / self.density_of_air
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dynamic_viscosity_of_air(self) -> float:\n\n return (1.458 * (10 ** (-6)) * (self.ambient_temperature**1.5)) / (\n self.ambient_temperature + 110.4\n )", "def latent_heat_vapourisation(self, tair):\n return (2.501 - 0.00237 * tair) * 1E06", "def dispersion(self, k):\n return np.sqrt(\n 1 + (\n (k*self.material.hbar_m)**2 / (2*self.material.m_star_m) -\n self.material.z\n )**2\n )", "def viscosity(altitude):\n t_ref = temperature(0) # R\n t = temperature(altitude) # R\n s = 198.72 # R\n mu_ref = 3.737 * 10 ** (-7) # [slug/(ft*s)]\n mu = mu_ref*((t/t_ref)**(3/2))*(t_ref + s)/(t + s) # [slug/(ft*s)]\n return mu", "def dispersion(self, k):\n return 2*self.material.gamma*np.sqrt(self.material.z)", "def thermal_conductivity_of_air(self) -> float:\n\n # This more accurate equation is not used by the paper.\n # return (0.02646 * self.ambient_temperature ** 1.5) / (\n # self.ambient_temperature + 254.4 * (10 ** (-12 / self.ambient_temperature))\n # )\n\n # The reference suggests this equation is accurate to 1%.\n return 0.02646 * (self.ambient_temperature / 300) ** 0.8646", "def kts(self):\n return CAL_TO_J * 0.0077 * (self.rho/1000.0) * (self.rho/1000.0)", "def thermal_expansivity_of_air(self) -> float:\n\n return 1 / self.ambient_temperature", "def feller(self):\n return 2 * self.kappa_y * self.mean_v - self.eta_y**2 > 0", "def sky_temperature(self) -> float:\n\n return 0.0552 * (self.ambient_temperature**1.5)", "def car_dynamics(self,x, t, u, p):\n # f = vehicle_dynamics_ks(x, u, p)\n f = vehicle_dynamics_st(x, u, p)\n # f = vehicle_dynamics_std(x, u, p)\n # f = vehicle_dynamics_mb(x, u, p)\n return f", "def von_klitzing_constant(self):\n return self._von_klitzing_constant", "def VTrue(h,Vc,p,Temp_m):\n M = Mach(h,Vc,p)\n return M*np.sqrt(gamma*R*Static_T(Temp_m,M))", "def temperature() -> float:", "def conductivity(self, T):\n m = self.mass\n mu = self.viscosity(T)\n K = (15/4) * kB * mu / m\n return K", "def env_temperature(v3: \"float\", v4: \"float\") -> \"float\":", "def air_density(self):\n return self.flow_field.air_density", "def _k(self, T):\n RT = Rgas * T\n return (self.parameters.A1 / np.exp(self.parameters.E1 / RT),\n self.parameters.A2 / np.exp(self.parameters.E2 / RT))", "def kineticEnergy(self):\n return self.params['kinetic']", "def viscosity(self, T):\n m = self.mass\n w = self.omega\n Tr = self.t_ref\n dr = self.d_ref\n a = self.alpha\n \n mu_ref_numerator = (5 * (1 + a) * (2 + a) * sqrt(kB * m * Tr / pi))\n mu_ref_denominator = 4 * a * dr **2 * (7 - 2 * w) * (5 - 2 * w)\n mu_ref = mu_ref_numerator / mu_ref_denominator\n \n mu = mu_ref * (T / Tr) ** w\n return mu", "def calculate_visibility(qv,qc,qr,qi,qs,T,p):\n Rd = 287.\n COEFLC = 144.7\n COEFLP = 2.24\n COEFFC = 327.8\n COEFFP = 10.36\n EXPLC = 0.88\n EXPLP = 0.75\n EXPFC = 1.\n EXPFP = 0.7776\n\n Tv = T * (1+0.61*qv) # Virtual temperature\n\n rhoa = p/(Rd*Tv) # Air density [kg m^-3]\n rhow = 1e3 # Water density [kg m^-3]\n rhoi = 0.917e3 # Ice density [kg m^-3]\n\n vovmd = (1+qv)/rhoa + (qc+qr)/rhow + (qi+qs)/rhoi\n\n conc_lc = 1e3*qc/vovmd\n conc_lp = 1e3*qr/vovmd\n conc_fc = 1e3*qi/vovmd\n conc_fp = 1e3*qs/vovmd\n\n # Make sure all concentrations are positive\n conc_lc[conc_lc < 0] = 0\n conc_lp[conc_lp < 0] = 0\n conc_fc[conc_fc < 0] = 0\n conc_fp[conc_fp < 0] = 0\n\n betav = COEFFC*conc_fc**EXPFC\\\n + COEFFP*conc_fp**EXPFP\\\n + COEFLC*conc_lc**EXPLC\\\n + COEFLP*conc_lp**EXPLP+1E-10\n\n vis = -np.log(0.02)/betav # Visibility [km]\n vis[vis > 24.135] = 24.135\n\n return vis", "def kelvin_effect(pres, surft, temp, mw_ba, dcell):\n volm = mw_ba/1e3 # approximation: using density 1000 kg/m3\n return pres*exp(-4*surft*volm/(dcell*gas_constant*temp))", "def kinetic_energy(self, sys):\n v = sys.velocities\n m = sys.mass\n return 0.5*np.dot(m, np.multiply(v, v))", "def func_kc_318(n, series):\n if series == \"3D3\":\n try:\n return 2*np.pi/(wl_3D3[str(n)]*1e-9)\n except:\n return 0", "def test_virtual_potential_temperature():\n p = 999. * units.mbar\n t = 288. * units.kelvin\n qv = .0016 * units.dimensionless # kg/kg\n theta_v = virtual_potential_temperature(p, t, qv)\n assert_almost_equal(theta_v, 288.3620 * units.kelvin, 3)", "def width_h_invis(self):\n if m_higgs > 2.0 * self.mx:\n coupling = self.gsxx * self.stheta / np.sqrt(1 - self.stheta**2)\n\n val = (\n (coupling**2 * (m_higgs**2 - 4 * self.mx**2) ** 1.5)\n / (8.0 * m_higgs**2 * np.pi)\n ).real\n\n assert val >= 0\n\n return val\n else:\n return 0.0", "def kinetic_energy(self):\r\n return self.mass * np.dot(self.vel, self.vel) / 2", "def molar_mass_dry_air():\n return 28.9647", "def k_Ni00(wind_ms, temp_C):\n\n U = wind_ms\n\n Sc = schmidt_number(temp_C)\n k = (0.333 * U + 0.222 * U ** 2) * (600 / Sc) ** 0.5\n\n return k", "def liquid_viscosity(id, temperature=298.15, pressure=constants.atm): # noqa: A002\n return rx._misc._get_chemical(id, temperature, pressure).mul # noqa: SLF001" ]
[ "0.7809844", "0.62890047", "0.62317854", "0.6194581", "0.61644995", "0.61163557", "0.600697", "0.5984969", "0.5790677", "0.575207", "0.5744783", "0.5729746", "0.5714137", "0.57019544", "0.5691392", "0.56809", "0.56597465", "0.56572974", "0.5655333", "0.56213343", "0.56118655", "0.55981666", "0.55919814", "0.5574342", "0.55607146", "0.5530427", "0.55266887", "0.55231625", "0.55113393", "0.5509306" ]
0.8016766
0
Determines the radiative temperature of the sky. The "sky," as a black body, has a radiative temperature different to that of the surrounding air, or the ambient temperature. This function converts between them and outputs the sky's radiative temperature.
def sky_temperature(self) -> float: return 0.0552 * (self.ambient_temperature**1.5)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_sky_ir_temperature(self) -> float:\n self.serial.write(b\"S!\")\n sky_ir_temp = self.__extract_int(self.__read_response(1)[0], b\"!1\")\n\n return round(sky_ir_temp / 100, 2)", "def temperature() -> float:", "def temperature(self):\n names = ['anc_air_temperature']\n return self.sensor.get_with_fallback('temperature', names)", "def temperature(self):\n return self.read_short(65) / 340.0 + 36.53", "def get_temperature(self): # This function implements the equations needed to convert the digital data to degrees celsius\n C_1, C_2, C_3, C_4, C_5, C_6=self.calibration_constants()\n self.digital_temp_data() \n dT = self.tempadc-(C_5*(2**8))\n temperature=(2000+(dT*(C_6/(2**23))))/100\n return temperature, dT", "def temperature(self):\r\n self._read_temperature()\r\n return self._t_fine / 5120.0", "def Tsky(self, source):\n\n if not _usePyGSM:\n raise ImportError('PyGSM is not available: cannot access sky temperatures')\n if not isinstance(source, astropy.coordinates.sky_coordinate.SkyCoord):\n if isinstance(source,str):\n # assume .par file\n source=parfile2SkyCoord(source)\n else:\n raise TypeError('Do not know how to interpret an object of type %s' % source.__class__)\n\n source=source.galactic\n T=healpy.pixelfunc.get_interp_val(self.map,\n source.l.value,\n source.b.value,\n lonlat=True)\n return T*u.K", "def temperature():\n from .imperial import deg_F as F\n from .imperial import deg_R as R\n\n K = si.K\n C = si.deg_C\n\n return Equivalency(\n [\n (K, C, lambda x: x - 273.15, lambda x: x + 273.15),\n (C, F, lambda x: x * 1.8 + 32.0, lambda x: (x - 32.0) / 1.8),\n (K, F, lambda x: x * 1.8 - 459.67, lambda x: (x + 459.67) / 1.8),\n (R, F, lambda x: x - 459.67, lambda x: x + 459.67),\n (R, C, lambda x: (x - 491.67) * (5 / 9), lambda x: x * 1.8 + 491.67),\n (R, K, lambda x: x * (5 / 9), lambda x: x * 1.8),\n ],\n \"temperature\",\n )", "def temperature(self):\n return _cantera.reactor_temperature(self.__reactor_id)", "def get_capacitive_rain_sensor_temp(\n self, rain_sensor_temp: Optional[int] = None\n ) -> float:\n # TODO: these values were hardcoded but now are taken from the CW.\n # Check which way is the \"true\" way based on the sensor type (capacitive vs Hydredon)\n # rain_pull_up_resistance = 1\n # rain_res_at_25 = 1\n # rain_beta = 3450\n absolute_zero = 273.15\n\n if rain_sensor_temp is None:\n rain_sensor_temp = self.raw_rain_sensor_temp\n\n if rain_sensor_temp < 1:\n rain_sensor_temp = 1\n elif rain_sensor_temp > 1022:\n rain_sensor_temp = 1022\n\n r = self.rain_pull_up_resistance / ((1023 / rain_sensor_temp) - 1)\n r = math.log(r / self.rain_res_at_25)\n\n return 1 / (r / self.rain_beta + 1 / (absolute_zero + 25)) - absolute_zero", "def get_temperature(self):\n \n # Get temp readings from both sensors\n humidity_temp = self._sense_hat.get_temperature_from_humidity()\n pressure_temp = self._sense_hat.get_temperature_from_pressure()\n \n # avg_temp becomes the average of the temperatures from both sensors\n # We need to check for pressure_temp value is not 0, to not ruin avg_temp calculation\n avg_temp = (humidity_temp + pressure_temp) / 2 if pressure_temp else humidity_temp\n \n # Get the CPU temperature\n cpu_temp = self._get_cpu_temp()\n \n # Calculate temperature compensating for CPU heating\n adj_temp = avg_temp - (cpu_temp - avg_temp) / 1.5\n \n # Average out value across the last three readings\n return self._get_smooth(adj_temp)", "def compute_surface_temperature(heat_flux):\n\n return 1.1e-4*heat_flux + 323", "def temperature(self) -> float:\n # Start a measurement then poll the measurement finished bit.\n self.temp_start = 1\n while self.temp_running > 0:\n pass\n # Grab the temperature value and convert it to Celsius.\n # This uses the same observed value formula from the Radiohead library.\n temp = self._read_u8(_REG_TEMP2)\n return 166.0 - temp", "def calculateTemperature(self):\n \n # CIE XYZ space\n self.X = (1/0.17697)*((0.49)*self.R + (0.31)*self.G + (0.2)*self.B)\n self.Y = (1/0.17697)*((0.17697)*self.R + (0.81240)*self.G + (0.01063)*self.B)\n self.Z = (1/0.17697)*((0)*self.R + (0.010)*self.G + (0.99)*self.B)\n\n # CIE Chromaticities xy\n self.x = self.X/(self.X + self.Y + self.Z)\n self.y = self.Y/(self.X + self.Y + self.Z)\n \n # CIE Chromaticities uv\n #self.u = (0.4661*self.x + 0.1593*self.y)/(self.y - 0.15735*self.x + 0.2424)\n #self.v = (0.6581*self.y)/(self.y - 0.15735*self.x + 0.2424)\n \n # constant for McCamy's/Hernandez-Andrés formula\n n = (self.x - self.x_e)/(self.y - self.y_e)\n \n # Correlated color temperature according to Hernández-Andrés (1999)\n self.color_temp = ( self.A_0 + \n self.A_1*np.exp(-n/self.t_1) + \n self.A_2*np.exp(-n/self.t_2) + \n self.A_3*np.exp(-n/self.t_3) )\n \n # Delete too high values\n self.color_temp[self.color_temp > 30000] = 0\n \n # Affichage de la CCT\n self.mean_temp = int(round(self.color_temp.mean()))\n self.mean_temp_label.setText(\"Temperature moyenne = \"+str(self.mean_temp))\n self.mean_temp_label.adjustSize()\n \t\n # Affichage de l'illuminance (Y)\n self.mean_illu = int(round((self.Y.mean())))\n self.illuminance_label.setText(\"Illuminance moyenne = \"+str(self.mean_illu))\n self.illuminance_label.adjustSize()", "def temperature(altitude):\n if altitude <= 36152:\n t = 59-0.00356*altitude # deg F\n else:\n t = -70 # deg F\n t = t + 459.7 # R\n return t", "def temp(self):\n if self.temp_sensor is None:\n return None\n else:\n if self.temp_scale.lower() in ['f', 'fahrenheit']:\n return self.temp_sensor.temp_f\n elif self.temp_scale.lower() in ['c', 'celsius']:\n return self.temp_sensor.temp_c", "def read_ambient_temperatureF(self, ):\n return self.read_ambient_temperatureC() * (9.0/5.0) + 32.0", "def ambient_temperature_f(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"ambient_temperature_f\"))\r\n return celsius_to_fahrenheit(self.ambient_temperature_c)", "def temperature() -> FlowFieldVal:\n return [\n self._t_s - self._delta_t * tf.math.tanh(z / self._height) for z in zz\n ]", "def tempAir(sample):\n sample *= 1.0\n sample /= 1000\n celsius = (sample - 0.5) * 100\n return round(celsius,2)", "def temperatures(self):\r\n return self._arm.temperatures", "def temperature(self):\n return float(self._current_observation['temp_c'])", "def get_temperature(\n self, sensitivity: Optional[str] = None, temp_sensor: Optional[int] = None\n ) -> float:\n if sensitivity is None or temp_sensor is None:\n sensitivity, temp_sensor = self.get_temperature_sensor()\n if sensitivity == \"th\":\n temp = temp_sensor * 175.72 / 65536 - 46.85\n elif sensitivity == \"t\":\n temp = temp_sensor * 1.7572 - 46.85\n else:\n raise CloudWatcherException(\n f\"Unknown temperature sensor type {sensitivity}\"\n )\n\n return temp", "def ambient_temperature(self) -> int:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"ambient_temperature\"))\r\n # TODO: Force this to return an int.\r\n if self.temperature_scale == \"F\":\r\n return self.ambient_temperature_f\r\n elif self.temperature_scale == \"C\":\r\n return self.ambient_temperature_c\r\n else:\r\n return self._ambient_temperature", "def tempWater(sample):\n sample *= .0009\n sample *= 1000\n celsius = (sample - 20.5128) * 0.0512\n return round(celsius,2)", "def get_temperature(self):\n pass", "def getTemperature(self):\n return self.temperature", "def ambient_temperature_c(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"ambient_temperature_c\"))\r\n return kelvin_to_celsius(self._ambient_temperature)", "def temperatures():\n\n return station_9281", "def get_temperature(self):\n summary = \" \".join(self.get_summary().split())\n pattern = '\\$.... .. .*? .*? (.*?) .*? .*? . .*? .*? . . . .*?'\n temperature = float(re.findall(pattern,summary).pop())\n return temperature" ]
[ "0.677967", "0.65615", "0.64276695", "0.6416784", "0.64141804", "0.6408829", "0.62878555", "0.6222289", "0.6220975", "0.61991245", "0.6182677", "0.61740917", "0.61678636", "0.6091844", "0.60883605", "0.6079088", "0.6072627", "0.6015334", "0.60076296", "0.59943837", "0.59905624", "0.5988029", "0.5980697", "0.59772134", "0.5975562", "0.59164184", "0.5863152", "0.586078", "0.5853003", "0.5832522" ]
0.77121866
0
The thermal conductivity of air varies as a function of temperature.
def thermal_conductivity_of_air(self) -> float: # This more accurate equation is not used by the paper. # return (0.02646 * self.ambient_temperature ** 1.5) / ( # self.ambient_temperature + 254.4 * (10 ** (-12 / self.ambient_temperature)) # ) # The reference suggests this equation is accurate to 1%. return 0.02646 * (self.ambient_temperature / 300) ** 0.8646
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def thermal_expansivity_of_air(self) -> float:\n\n return 1 / self.ambient_temperature", "def thermal_conductivity(temperature):\n a0 = -4.1236\n a1 = 13.788\n a2 = -26.068\n a3 = 26.272\n a4 = -14.663\n a5 = 4.4954\n a6 = -0.6905\n a7 = 0.0397\n log_t = math.log10(temperature)\n f_exp = a0 + a1*log_t + a2*log_t**2.0 + a3*log_t**3.0 + a4*log_t**4.0 + \\\n a5*log_t**5.0 + a6*log_t**6.0 + a7*log_t**7\n g10_thermal_conductivity = 10.0**f_exp\n return g10_thermal_conductivity", "def temperature() -> float:", "def thermalConductivity(self, Tk=None, Tc=None):\n Tk = getTk(Tc, Tk)\n self.checkPropertyTempRange(\"thermal conductivity\", Tk)\n thermalConductivity = (\n 2.13014e-08 * Tk**3\n - 6.31916e-05 * Tk**2\n + 1.11629e-01 * Tk\n - 2.00043e00\n )\n return thermalConductivity * 1e-3", "def temperature(self):\n names = ['anc_air_temperature']\n return self.sensor.get_with_fallback('temperature', names)", "def sky_temperature(self) -> float:\n\n return 0.0552 * (self.ambient_temperature**1.5)", "def temperature(self):\r\n self._read_temperature()\r\n return self._t_fine / 5120.0", "def temperature(self):\n return float(self._current_observation['temp_c'])", "def latent_heat_vapourisation(self, tair):\n return (2.501 - 0.00237 * tair) * 1E06", "def dynamic_viscosity_of_air(self) -> float:\n\n return (1.458 * (10 ** (-6)) * (self.ambient_temperature**1.5)) / (\n self.ambient_temperature + 110.4\n )", "def thermal_i(mu,Ti):\n return 9.79*1.e5/np.sqrt(mu/Ti)/1.e2", "def ambient_temperature_c(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"ambient_temperature_c\"))\r\n return kelvin_to_celsius(self._ambient_temperature)", "def compute_surface_temperature(heat_flux):\n\n return 1.1e-4*heat_flux + 323", "def cooled_surface_temp(T:np.ndarray) -> float:\n \n return T.dot(cs_temp_weights)", "def env_temperature(v3: \"float\", v4: \"float\") -> \"float\":", "def thermal(isatom, freq, scalfac,linnonlin,T):\n if isatom != \"true\":\n nfreq = len(freq)\n\n vib_temp = []\n for ifreq in range(nfreq):\n freq[ifreq] = float(freq[ifreq]) * float(scalfac)\n vib_temp_new = c * 100.0 * h * float(freq[ifreq]) / kB\n vib_temp.append(vib_temp_new)\n\n dE_vib = 0\n for ifreq in range(nfreq):\n dE_vib = dE_vib + kB * vib_temp[ifreq] * j2au * ( 0.5 + 1 / ( np.exp(vib_temp[ifreq]/T) - 1) )\n\n dE_ZPE = 0.5 * sum(freq) * cmi2au\n\n if linnonlin == \"L\":\n dE_rot = kB * T * j2au\n elif linnonlin == \"NL\":\n dE_rot = kB * T * j2au * (3.0/2.0)\n else:\n with open(\"Thermochemistry.out\", \"a\") as ther_chem:\n ther_chem.write(\"ERROR: unknown entry for linear/nonlinear\")\n else:\n dE_ZPE = 0\n dE_vib = 0\n dE_rot = 0\n\n dE_tra = kB * T * j2au * (3.0/2.0)\n dE_thermal = (dE_vib - dE_ZPE) + dE_rot + dE_tra\n\n return(dE_ZPE, dE_vib, dE_rot, dE_tra, dE_thermal)", "def tempAir(sample):\n sample *= 1.0\n sample /= 1000\n celsius = (sample - 0.5) * 100\n return round(celsius,2)", "def temperature() -> FlowFieldVal:\n return [\n self._t_s - self._delta_t * tf.math.tanh(z / self._height) for z in zz\n ]", "def ambient_temperature_f(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"ambient_temperature_f\"))\r\n return celsius_to_fahrenheit(self.ambient_temperature_c)", "def prescribed_surface_temperature(x, t, K_medium, rho_medium, c_medium, T_medium_initial, T_external_applied):\n k = get_kappa(K_medium, rho_medium, c_medium)\n return (T_external_applied - T_medium_initial)*erfc(x/(2*np.sqrt(k*t))) + T_medium_initial", "def temperature(self) -> float:\n # Start a measurement then poll the measurement finished bit.\n self.temp_start = 1\n while self.temp_running > 0:\n pass\n # Grab the temperature value and convert it to Celsius.\n # This uses the same observed value formula from the Radiohead library.\n temp = self._read_u8(_REG_TEMP2)\n return 166.0 - temp", "def __t_fine__(self, adc_temperature):\n var1 = (((adc_temperature >> 3) -\n (self._calibration_t[0] << 1)) * self._calibration_t[1]) >> 11\n var2 = (((\n ((adc_temperature >> 4) - self._calibration_t[0]) *\n ((adc_temperature >> 4) - self._calibration_t[0])) >> 12)\n * self._calibration_t[2]) >> 14\n return var1 + var2", "def airfoilEffT(self):\n return float(Importer(Component='Evaluations',\n VariableName='Wing airfoil efficiency factor',\n Default=.95,\n Path=self.filePath).getValue)", "def get_temperature(self):\n pass", "def current_temperature(self) -> float:\n return self._thermostat.current_temperatue", "def temperature(self):\n return self.read_short(65) / 340.0 + 36.53", "def air_density(self):\n return self.flow_field.air_density", "def temperature():\n from .imperial import deg_F as F\n from .imperial import deg_R as R\n\n K = si.K\n C = si.deg_C\n\n return Equivalency(\n [\n (K, C, lambda x: x - 273.15, lambda x: x + 273.15),\n (C, F, lambda x: x * 1.8 + 32.0, lambda x: (x - 32.0) / 1.8),\n (K, F, lambda x: x * 1.8 - 459.67, lambda x: (x + 459.67) / 1.8),\n (R, F, lambda x: x - 459.67, lambda x: x + 459.67),\n (R, C, lambda x: (x - 491.67) * (5 / 9), lambda x: x * 1.8 + 491.67),\n (R, K, lambda x: x * (5 / 9), lambda x: x * 1.8),\n ],\n \"temperature\",\n )", "def getTemperature(self):\n return self.temperature", "def temperature(wair,pres,entr=None,temp=None,airf=None,dhum=None,\n chkvals=False,chktol=_CHKTOL,airf0=None,temp0=None,dhum0=None,\n chkbnd=False,mathargs=None):\n airf, temp, dhum = eq_wpte(wair,pres,entr=entr,temp=temp,airf=airf,\n dhum=dhum,chkvals=chkvals,chktol=chktol,airf0=airf0,temp0=temp0,\n dhum0=dhum0,chkbnd=chkbnd,mathargs=mathargs)\n return temp" ]
[ "0.79880536", "0.764283", "0.7523666", "0.75130093", "0.7090662", "0.70669293", "0.7042304", "0.6954361", "0.6936385", "0.6905874", "0.6894696", "0.68758714", "0.6788131", "0.6774004", "0.677208", "0.67583066", "0.6724294", "0.67135936", "0.66866106", "0.66677165", "0.6657772", "0.6656361", "0.6648099", "0.6646587", "0.66422915", "0.6608585", "0.6579855", "0.6576052", "0.6573095", "0.6572762" ]
0.8359033
0
The thermal expansion coefficient of air varies as a function of temperature.
def thermal_expansivity_of_air(self) -> float: return 1 / self.ambient_temperature
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def thermal_conductivity_of_air(self) -> float:\n\n # This more accurate equation is not used by the paper.\n # return (0.02646 * self.ambient_temperature ** 1.5) / (\n # self.ambient_temperature + 254.4 * (10 ** (-12 / self.ambient_temperature))\n # )\n\n # The reference suggests this equation is accurate to 1%.\n return 0.02646 * (self.ambient_temperature / 300) ** 0.8646", "def thermal_conductivity(temperature):\n a0 = -4.1236\n a1 = 13.788\n a2 = -26.068\n a3 = 26.272\n a4 = -14.663\n a5 = 4.4954\n a6 = -0.6905\n a7 = 0.0397\n log_t = math.log10(temperature)\n f_exp = a0 + a1*log_t + a2*log_t**2.0 + a3*log_t**3.0 + a4*log_t**4.0 + \\\n a5*log_t**5.0 + a6*log_t**6.0 + a7*log_t**7\n g10_thermal_conductivity = 10.0**f_exp\n return g10_thermal_conductivity", "def temperature() -> float:", "def prescribed_surface_temperature(x, t, K_medium, rho_medium, c_medium, T_medium_initial, T_external_applied):\n k = get_kappa(K_medium, rho_medium, c_medium)\n return (T_external_applied - T_medium_initial)*erfc(x/(2*np.sqrt(k*t))) + T_medium_initial", "def get_D_C3H8_air(self, T):\n\n self.set_TempPres_dependents(T)\n\n self.D_C3H8_air = (\n 2. / 3. * np.sqrt(const.k_B * T / np.pi * 0.5 * (1. /\n self.air.m + 1. / self.fuel.m)) / (np.pi * (0.5 *\n (self.air.d + self.fuel.d)) ** 2.) / self.air.n\n )\n\n return self.D_C3H8_air", "def temperature(wair,pres,entr=None,temp=None,airf=None,dhum=None,\n chkvals=False,chktol=_CHKTOL,airf0=None,temp0=None,dhum0=None,\n chkbnd=False,mathargs=None):\n airf, temp, dhum = eq_wpte(wair,pres,entr=entr,temp=temp,airf=airf,\n dhum=dhum,chkvals=chkvals,chktol=chktol,airf0=airf0,temp0=temp0,\n dhum0=dhum0,chkbnd=chkbnd,mathargs=mathargs)\n return temp", "def get_D_C3H8_air_eff(self, T):\n\n Kn = self.get_Kn(T)\n D_C3H8_air_Kn = self.get_D_C3H8_air_Kn(T)\n\n if np.isscalar(Kn):\n if Kn <= 1.:\n D_C3H8_air_eff = (\n self.porosity / self.tortuosity * self.D_C3H8_air\n )\n else:\n D_C3H8_air_eff = (\n 2. * self.porosity / self.tortuosity *\n (self.D_C3H8_air * D_C3H8_air_Kn) / (self.D_C3H8_air +\n D_C3H8_air_Kn)\n )\n\n else:\n if Kn.any() <= 1.:\n D_C3H8_air_eff = (\n self.porosity / self.tortuosity * self.D_C3H8_air\n )\n else:\n D_C3H8_air_eff = (\n 2. * self.porosity / self.tortuosity *\n (self.D_C3H8_air * D_C3H8_air_Kn) / (self.D_C3H8_air +\n D_C3H8_air_Kn)\n )\n\n self.D_C3H8_air_eff = D_C3H8_air_eff\n\n return D_C3H8_air_eff", "def helmholtzenergy(temp,pres):\n g = liq_g(0,0,temp,pres)\n g_p = liq_g(0,1,temp,pres)\n f = g - pres*g_p\n return f", "def thermal(isatom, freq, scalfac,linnonlin,T):\n if isatom != \"true\":\n nfreq = len(freq)\n\n vib_temp = []\n for ifreq in range(nfreq):\n freq[ifreq] = float(freq[ifreq]) * float(scalfac)\n vib_temp_new = c * 100.0 * h * float(freq[ifreq]) / kB\n vib_temp.append(vib_temp_new)\n\n dE_vib = 0\n for ifreq in range(nfreq):\n dE_vib = dE_vib + kB * vib_temp[ifreq] * j2au * ( 0.5 + 1 / ( np.exp(vib_temp[ifreq]/T) - 1) )\n\n dE_ZPE = 0.5 * sum(freq) * cmi2au\n\n if linnonlin == \"L\":\n dE_rot = kB * T * j2au\n elif linnonlin == \"NL\":\n dE_rot = kB * T * j2au * (3.0/2.0)\n else:\n with open(\"Thermochemistry.out\", \"a\") as ther_chem:\n ther_chem.write(\"ERROR: unknown entry for linear/nonlinear\")\n else:\n dE_ZPE = 0\n dE_vib = 0\n dE_rot = 0\n\n dE_tra = kB * T * j2au * (3.0/2.0)\n dE_thermal = (dE_vib - dE_ZPE) + dE_rot + dE_tra\n\n return(dE_ZPE, dE_vib, dE_rot, dE_tra, dE_thermal)", "def thermalConductivity(self, Tk=None, Tc=None):\n Tk = getTk(Tc, Tk)\n self.checkPropertyTempRange(\"thermal conductivity\", Tk)\n thermalConductivity = (\n 2.13014e-08 * Tk**3\n - 6.31916e-05 * Tk**2\n + 1.11629e-01 * Tk\n - 2.00043e00\n )\n return thermalConductivity * 1e-3", "def temperature(self):\n names = ['anc_air_temperature']\n return self.sensor.get_with_fallback('temperature', names)", "def tempAir(sample):\n sample *= 1.0\n sample /= 1000\n celsius = (sample - 0.5) * 100\n return round(celsius,2)", "def wind_heat_transfer_coefficient(self) -> float:\n\n return 3.8 + 2 * self.wind_speed\n # return 4.5 + 2.9 * self.wind_speed", "def compute_surface_temperature(heat_flux):\n\n return 1.1e-4*heat_flux + 323", "def sky_temperature(self) -> float:\n\n return 0.0552 * (self.ambient_temperature**1.5)", "def temperature(self):\n return float(self._current_observation['temp_c'])", "def dynamic_viscosity_of_air(self) -> float:\n\n return (1.458 * (10 ** (-6)) * (self.ambient_temperature**1.5)) / (\n self.ambient_temperature + 110.4\n )", "def temperature_energy():\n e = _si.e.value\n k_B = _si.k_B.value\n return Equivalency(\n [(si.K, si.eV, lambda x: x / (e / k_B), lambda x: x * (e / k_B))],\n \"temperature_energy\",\n )", "def heat_capacity_of_air(self) -> float:\n\n return 1002.5 + 275 * (10 ** (-6)) * (self.ambient_temperature - 200) ** 2", "def thermal_i(mu,Ti):\n return 9.79*1.e5/np.sqrt(mu/Ti)/1.e2", "def eco_temperature_low_f(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"eco_temperature_low_f\"))\r\n return celsius_to_fahrenheit(self.eco_temperature_low_c)", "def constant_temp(self, numIterations):\n return 1 + self.alpha", "def latent_heat_vapourisation(self, tair):\n return (2.501 - 0.00237 * tair) * 1E06", "def eco_temperature_low_c(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"eco_temperature_low_c\"))\r\n return kelvin_to_celsius(self._eco_temperature_low)", "def ambient_temperature_c(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"ambient_temperature_c\"))\r\n return kelvin_to_celsius(self._ambient_temperature)", "def energy_func(self):\n i = self.inl[0].to_flow()\n o = self.outl[0].to_flow()\n\n T_m = (T_mix_ph(i, T0=self.inl[0].T.val_SI) +\n T_mix_ph(o, T0=self.outl[0].T.val_SI)) / 2\n\n return (i[0] * (o[2] - i[2]) -\n self.A.val * (\n self.E.val * self.eta_opt.val -\n (T_m - self.Tamb.val_SI) * self.lkf_lin.val -\n self.lkf_quad.val * (T_m - self.Tamb.val_SI) ** 2))", "def internalenergy(temp,pres):\n g = liq_g(0,0,temp,pres)\n g_t = liq_g(1,0,temp,pres)\n g_p = liq_g(0,1,temp,pres)\n u = g - temp*g_t - pres*g_p\n return u", "def dielectric_constant_water(temperature=298.15):\n tabulated_data = np.array([[263.15, 92.10],\n [268.15, 89.96],\n [273.15, 87.90],\n [278.15, 85.90],\n [283.15, 83.96],\n [288.15, 82.06],\n [293.15, 80.20],\n [298.15, 78.38],\n [303.15, 76.60],\n [308.15, 74.86],\n [313.15, 73.17],\n [318.15, 71.50],\n [323.15, 69.88],\n [328.15, 68.29],\n [333.15, 66.74],\n [338.15, 65.22],\n [343.15, 63.73],\n [348.15, 62.28],\n [353.15, 60.87],\n [358.15, 59.48],\n [363.15, 58.13],\n [368.15, 56.81],\n [373.15, 55.51]])\n polynomal_degree = 5\n fitdata = np.polyfit(tabulated_data[:, 0], tabulated_data[:, 1],\n polynomal_degree)\n fitfunction = np.poly1d(fitdata)\n return fitfunction(temperature)", "def get_actual_air_conditioned_temperature(\n hc_period: np.ndarray,\n theta_ac: np.ndarray, v_supply: np.ndarray, theta_supply_h: np.ndarray, theta_supply_c: np.ndarray,\n l_d_h: np.ndarray, l_d_cs: np.ndarray,\n u_prt: float, a_prt: np.ndarray, a_hcz: np.ndarray, q: float) -> np.ndarray:\n\n rho = get_air_density()\n c = get_specific_heat()\n\n a_prt = a_prt.reshape(1, 5).T\n a_hcz = a_hcz[0:5].reshape(1, 5).T\n\n theta_ac_act_h = np.maximum(theta_ac + (c * rho * v_supply * (theta_supply_h - theta_ac) - l_d_h * 10 ** 6)\n / (c * rho * v_supply + (u_prt * a_prt + q * a_hcz) * 3600), theta_ac)\n\n theta_ac_act_c = np.minimum(theta_ac - (c * rho * v_supply * (theta_ac - theta_supply_c) - l_d_cs * 10 ** 6)\n / (c * rho * v_supply + (u_prt * a_prt + q * a_hcz) * 3600), theta_ac)\n\n return theta_ac_act_h * (hc_period == 'h') + theta_ac_act_c * (hc_period == 'c') + theta_ac * (hc_period == 'm')", "def energy_func(self):\n i = self.inl[0].to_flow()\n o = self.outl[0].to_flow()\n\n T_m = (T_mix_ph(i, T0=self.inl[0].T.val_SI) +\n T_mix_ph(o, T0=self.outl[0].T.val_SI)) / 2\n\n iam = (\n 1 - self.iam_1.val * abs(self.aoi.val) -\n self.iam_2.val * self.aoi.val ** 2)\n\n return (i[0] * (o[2] - i[2]) -\n self.A.val * (\n self.E.val * self.eta_opt.val * self.doc.val ** 1.5 * iam -\n (T_m - self.Tamb.val_SI) * self.c_1.val -\n self.c_2.val * (T_m - self.Tamb.val_SI) ** 2))" ]
[ "0.7279272", "0.68165576", "0.6797786", "0.6684487", "0.66219014", "0.6579864", "0.6570214", "0.6559898", "0.649577", "0.64194864", "0.63722163", "0.6345614", "0.6337939", "0.6337814", "0.6305888", "0.6286312", "0.6284374", "0.6271602", "0.6268423", "0.62625694", "0.62529576", "0.62480354", "0.62434256", "0.6219022", "0.6205769", "0.62008363", "0.61886775", "0.61633605", "0.6153172", "0.61523736" ]
0.76827806
0
Determines the convective heat transfer coefficient, either free, or forced. In the absence of any wind, the "free" wind_heat_transfer_coefficient is returned. If there is wind present, then this parameter is known as the "forced" wind_heat_transfer_coefficient.
def wind_heat_transfer_coefficient(self) -> float: return 3.8 + 2 * self.wind_speed # return 4.5 + 2.9 * self.wind_speed
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def thermalConductivity(self, Tk=None, Tc=None):\n Tk = getTk(Tc, Tk)\n self.checkPropertyTempRange(\"thermal conductivity\", Tk)\n thermalConductivity = (\n 2.13014e-08 * Tk**3\n - 6.31916e-05 * Tk**2\n + 1.11629e-01 * Tk\n - 2.00043e00\n )\n return thermalConductivity * 1e-3", "def get_chiller_temperature(self) -> float:\n\n return self.send(self.cmd.GET_COOLING_ACT)", "def getHeatFlux(self, T):\n\t\tQ = self.heat_transfer_coefficient * (self.T_wall - T)\n\t\treturn Q", "def conductive_heat_flux(discr, eos, cv, grad_t):\n transport = eos.transport_model()\n return -transport.thermal_conductivity(eos, cv)*grad_t", "def thermal_conductivity_of_air(self) -> float:\n\n # This more accurate equation is not used by the paper.\n # return (0.02646 * self.ambient_temperature ** 1.5) / (\n # self.ambient_temperature + 254.4 * (10 ** (-12 / self.ambient_temperature))\n # )\n\n # The reference suggests this equation is accurate to 1%.\n return 0.02646 * (self.ambient_temperature / 300) ** 0.8646", "def setHeatTransferCoeff(self, u):\n return _cantera.wall_setHeatTransferCoeff(self.__wall_id, u)", "def calc_cogen_const(q_heat_Wh, thermal_eff, electrical_eff):\n q_fuel_Wh = q_heat_Wh / thermal_eff\n p_el_Wh = q_fuel_Wh * electrical_eff\n q_anth_Wh = q_fuel_Wh - (q_heat_Wh + p_el_Wh)\n return q_fuel_Wh, p_el_Wh, q_anth_Wh", "def heatFlowRate(self):\n return _cantera.wall_Q(self.__wall_id)", "def calc_maintenance_cost (self):\n\n if str(self.comp_specs['operational costs']) \\\n != 'UNKNOWN':\n self.maintenance_cost = \\\n self.comp_specs['operational costs']\n else:\n self.maintenance_cost = \\\n (self.comp_specs['percent o&m'] / 100.0) * self.capital_costs\n #~ print 'self.maintenance_cost',self.maintenance_cost", "def conductivity(self):\n m = 1.67296736e-02 # Determined from optimisation\n c = 8.54665149e-05 # Determined from optimisation\n return m * self.concentration + c", "def target_temperature(self):\n if self.current_operation == 'Heat & Cool':\n return None\n if self.current_operation == 'Heat only':\n return int(self._api._heatto)\n elif self.current_operation == 'Cool only':\n return int(self._api._coolto)\n return None", "def fuel_cost(self, update=False):\n if update or self._dfs['fuel_cost'] is None:\n self._dfs['fuel_cost'] = pudl.analysis.mcoe.fuel_cost(self)\n return self._dfs['fuel_cost']", "def target_temperature_c(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"target_temperature_c\"))\r\n return kelvin_to_celsius(self._target_temperature)", "def thermal_conductivity(temperature):\n a0 = -4.1236\n a1 = 13.788\n a2 = -26.068\n a3 = 26.272\n a4 = -14.663\n a5 = 4.4954\n a6 = -0.6905\n a7 = 0.0397\n log_t = math.log10(temperature)\n f_exp = a0 + a1*log_t + a2*log_t**2.0 + a3*log_t**3.0 + a4*log_t**4.0 + \\\n a5*log_t**5.0 + a6*log_t**6.0 + a7*log_t**7\n g10_thermal_conductivity = 10.0**f_exp\n return g10_thermal_conductivity", "def target_temperature(self) -> float | None:\n if self.hvac_mode == HVACMode.COOL:\n return self.target_temperature_high\n if self.hvac_mode == HVACMode.HEAT:\n return self.target_temperature_low\n return None", "def get_compensated_temperature() -> float:\n comp_factor = 2.25\n cpu_temp = get_cpu_temperature()\n raw_temp = bme280.get_temperature()\n comp_temp = raw_temp - ((cpu_temp - raw_temp) / comp_factor)\n # print(\"\"\"\n # Compensated_Temperature: {:05.2f} *C\n # Pressure: {:05.2f} hPa\n # Relative humidity: {:05.2f} %\n # \"\"\".format(temperature, pressure, humidity))\n return comp_temp", "def transport_cost_per_t(self):\n return safe_divide(self.reseller.operating_expenses(), self.quantity_fieldside)", "def fuel_cond(T):\n\n kc = 1.841e-19*math.pow(T,6) - 2.097e-15*math.pow(T,5) +\\\n 9.721e-12*math.pow(T,4) - 2.369e-8*math.pow(T,3) +\\\n 3.283e-5*math.pow(T,2) - 0.0267*T + 63.18\n \n return kc", "def best_coupling(self):\n\n return self.coupling().max()", "def target_temperature_high_c(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"target_temperature_high_c\"))\r\n return kelvin_to_celsius(self._target_temperature_high)", "def coupling(self):\n couple = coupling_parameter(\n self.T_e, self.n_e, (self.particle, self.particle), self.Z\n )\n if couple < 0.01:\n warnings.warn(\n f\"Coupling parameter is {couple}, you might have strong coupling effects\",\n CouplingWarning,\n )\n\n return couple", "def cooled_surface_temp(T:np.ndarray) -> float:\n \n return T.dot(cs_temp_weights)", "def get_chiller_temperature_setpoint(self) -> float:\n\n return self.send(self.cmd.GET_COOLING_SET)", "def distribute(self, date_time, air_temp, vapor_pressure=None,\n dew_point=None, cloud_factor=None):\n\n self._logger.debug('%s Distributing thermal' % date_time)\n\n # calculate clear sky thermal\n if self.clear_sky_method == 'marks1979':\n cth = np.zeros_like(air_temp, dtype=np.float64)\n envphys_c.ctopotherm(\n air_temp, dew_point,\n self.dem,\n self.sky_view_factor,\n cth,\n self.config['marks1979_nthreads'])\n\n elif self.clear_sky_method == 'dilley1998':\n cth = clear_sky.Dilly1998(air_temp, vapor_pressure/1000)\n\n elif self.clear_sky_method == 'prata1996':\n cth = clear_sky.Prata1996(air_temp, vapor_pressure/1000)\n\n elif self.clear_sky_method == 'angstrom1918':\n cth = clear_sky.Angstrom1918(air_temp, vapor_pressure/1000)\n\n # terrain factor correction\n if (self.sky_view_factor is not None) and \\\n (self.clear_sky_method != 'marks1979'):\n # apply (emiss * skvfac) + (1.0 - skvfac) to the longwave\n cth = cth * self.sky_view_factor + (1.0 - self.sky_view_factor) * \\\n STEF_BOLTZ * air_temp**4\n\n # make output variable\n self.thermal_clear = cth.copy()\n\n # correct for the cloud factor\n # ratio of measured/modeled solar indicates the thermal correction\n if self.correct_cloud:\n if self.cloud_method == 'garen2005':\n cth = cloud.Garen2005(cth,\n cloud_factor)\n\n elif self.cloud_method == 'unsworth1975':\n cth = cloud.Unsworth1975(cth,\n air_temp,\n cloud_factor)\n\n elif self.cloud_method == 'kimball1982':\n cth = cloud.Kimball1982(cth,\n air_temp,\n vapor_pressure/1000,\n cloud_factor)\n\n elif self.cloud_method == 'crawford1999':\n cth = cloud.Crawford1999(cth,\n air_temp,\n cloud_factor)\n\n # make output variable\n self.thermal_cloud = cth.copy()\n\n # correct for vegetation\n if self.correct_veg:\n cth = vegetation.thermal_correct_canopy(cth,\n air_temp,\n self.veg_tau,\n self.veg_height)\n\n # make output variable\n self.thermal_veg = cth.copy()\n\n self.thermal = utils.set_min_max(cth, self.min, self.max)", "def target_temperature(self) -> float | None:\n if self._device.mode == ThermostatMode.COOL and self._device.cooling_setpoint:\n return self._device.scaled_cooling_setpoint\n\n if self._device.heating_setpoint:\n return self._device.scaled_heating_setpoint\n\n return None", "def get_correction_factor(self, temperature, humidity):\n\n if temperature < 20:\n return self.CORA * temperature * temperature - self.CORB * temperature + self.CORC - (humidity - 33.) * self.CORD\n\n return self.CORE * temperature + self.CORF * humidity + self.CORG", "def get_correction_factor(self, temperature, humidity):\n\n if temperature < 20:\n return self.CORA * temperature * temperature - self.CORB * temperature + self.CORC - (humidity - 33.) * self.CORD\n\n return self.CORE * temperature + self.CORF * humidity + self.CORG", "def eco_temperature_high_c(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"eco_temperature_high_c\"))\r\n return kelvin_to_celsius(self._eco_temperature_high)", "def adjust_cost(self) -> None:\n\n n_iterations = self.array.shape[-1]\n n_year = len(self.array.year.values)\n\n # If uncertainty is not considered, the cost factor equals 1.\n # Otherwise, a variability of +/-30% is added.\n\n if n_iterations == 1:\n cost_factor = 1\n else:\n if \"reference\" in self.array.value.values.tolist():\n cost_factor = np.ones((n_iterations, 1))\n else:\n cost_factor = np.random.triangular(0.7, 1, 1.3, (n_iterations, 1))\n\n # Correction of hydrogen tank cost, per kg\n # Correction of fuel cell stack cost, per kW\n if \"FCEV\" in self.array.powertrain:\n self.array.loc[\n dict(powertrain=\"FCEV\", parameter=\"fuel tank cost per kg\")\n ] = np.reshape(\n (1.078e58 * np.exp(-6.32e-2 * self.array.year.values) + 3.43e2)\n * cost_factor,\n (1, n_year, n_iterations),\n )\n\n self.array.loc[\n dict(powertrain=\"FCEV\", parameter=\"fuel tank cost per kg\")\n ] = np.reshape(\n (3.15e66 * np.exp(-7.35e-2 * self.array.year.values) + 2.39e1)\n * cost_factor,\n (1, n_year, n_iterations),\n )\n\n # Correction of energy battery system cost, per kWh\n list_batt = [\n i\n for i in [\"BEV\", \"PHEV-e\", \"PHEV-c-p\", \"PHEV-c-d\"]\n if i in self.array.powertrain\n ]\n if len(list_batt) > 0:\n self.array.loc[\n dict(powertrain=list_batt, parameter=\"energy battery cost per kWh\")\n ] = np.reshape(\n (2.75e86 * np.exp(-9.61e-2 * self.array.year.values) + 5.059e1)\n * cost_factor,\n (1, 1, n_year, n_iterations),\n )\n\n # Correction of power battery system cost, per kW\n list_pwt = [\n i\n for i in [\n \"ICEV-p\",\n \"ICEV-d\",\n \"ICEV-g\",\n \"PHEV-c-p\",\n \"PHEV-c-d\",\n \"FCEV\",\n \"HEV-p\",\n \"HEV-d\",\n ]\n if i in self.array.powertrain\n ]\n\n if len(list_pwt) > 0:\n self.array.loc[\n dict(powertrain=list_pwt, parameter=\"power battery cost per kW\")\n ] = np.reshape(\n (8.337e40 * np.exp(-4.49e-2 * self.array.year.values) + 11.17)\n * cost_factor,\n (1, 1, n_year, n_iterations),\n )\n\n # Correction of combustion powertrain cost for ICEV-g\n if \"ICEV-g\" in self.array.powertrain:\n self.array.loc[\n dict(powertrain=\"ICEV-g\", parameter=\"combustion powertrain cost per kW\")\n ] = np.clip(\n np.reshape(\n (5.92e160 * np.exp(-0.1819 * self.array.year.values) + 26.76)\n * cost_factor,\n (1, n_year, n_iterations),\n ),\n None,\n 100,\n )", "def discharge_coefficient(self) -> _VectorisedFloat:\n window_ratio = np.array(self.window_width / self.window_height)\n coefs = np.empty(window_ratio.shape + (2, ), dtype=np.float64)\n\n coefs[window_ratio < 0.5] = (0.06, 0.612)\n coefs[np.bitwise_and(0.5 <= window_ratio, window_ratio < 1)] = (0.048, 0.589)\n coefs[np.bitwise_and(1 <= window_ratio, window_ratio < 2)] = (0.04, 0.563)\n coefs[window_ratio >= 2] = (0.038, 0.548)\n M, cd_max = coefs.T\n\n window_angle = 2.*np.rad2deg(np.arcsin(self.opening_length/(2.*self.window_height)))\n return cd_max*(1-np.exp(-M*window_angle))" ]
[ "0.6343963", "0.59971076", "0.59249985", "0.5791971", "0.576443", "0.5683115", "0.565232", "0.55878526", "0.5570194", "0.54607165", "0.5381237", "0.52986056", "0.5273817", "0.5269059", "0.5203606", "0.5199913", "0.5169069", "0.5163147", "0.5161178", "0.51067317", "0.510497", "0.5098822", "0.50977695", "0.5091799", "0.5088686", "0.50850314", "0.50850314", "0.5050838", "0.50430024", "0.50383663" ]
0.69990456
0
Return a nice representation of the weather conditions.
def __repr__(self) -> str: return ( "WeatherConditions(" f"ambient_temperature: {self.ambient_temperature:.3f}K, " f"azimuthal_angle: {self.azimuthal_angle}deg, " f"declination: {self.declination}deg, " f"density: {self.density_of_air:.3f}kg/m^3, " f"dynamic_viscosity: {self.dynamic_viscosity_of_air:.3f}kg/m*s, " f"heat_capacity: {self.heat_capacity_of_air}:.3fJ/kg*K, " f"irradiance: {self.irradiance:.3f}W/m^2, " f"kinematic_viscosity: {self.kinematic_viscosity_of_air:.3f}m^2/s, " f"sky_temperature: {self.sky_temperature:.3f}K, " f"thermal_conductivity: {self.thermal_conductivity_of_air:.3f}W/m*K, " f"thermal_expansion_coefficient: {self.thermal_expansivity_of_air:.3f}K^-1, " f"wind_heat_transfer_coefficient: {self.wind_heat_transfer_coefficient:2f}W/m*K, " f"wind_speed: {self.wind_speed:.3f}m/s, " ")" )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def conditions(self, json):\n conditions = str(json['forecast']['simpleforecast']['forecastday'][0]['conditions'])\n return conditions", "def genWeather():\n\n weather = random.choice(weather_conditions.keys())\n condition = weather_conditions[weather]\n (tMax, tMin) = condition[\"temperature\"]\n (pMax, pMin) = condition[\"pressure\"]\n (hMax, hMin) = condition[\"humidity\"]\n\n return weather + \"|\" + str(round(random.uniform(tMax, tMin), 1)) + \"|\" + \\\n str(round(random.uniform(pMax, pMin), 1)) + \"|\" + \\\n str(random.randrange(hMax, hMin, -1))", "def convert_weather(self, description):\n conditions = {\n 'clear sky': 'clear',\n 'few clouds': 'clouds with some sunshine',\n 'scattered clouds': 'cloudy',\n 'broken clouds': 'cloudy',\n 'shower rain': 'showers',\n 'thunderstorm': 'thunder and lightning',\n 'mist': 'fog'\n }\n\n if description in conditions:\n return conditions.get(description, \"Look out of the window.\")\n else:\n return description", "def conditions(self) -> pulumi.Output[Mapping[str, str]]:\n return pulumi.get(self, \"conditions\")", "def __str__(self):\n temperature = None\n offset = ' ' * 4\n if self._forecast_type == ForecastType.TODAY:\n temperature = (f'{offset}{self._current_temp}\\xb0\\n'\n f'{offset}High {self._high_temp}\\xb0 / '\n f'Low {self._low_temp}\\xb0 ')\n else:\n temperature = (f'{offset}High {self._high_temp}\\xb0 / '\n f'Low {self._low_temp}\\xb0 ')\n return (f'>> {self.forecast_date}\\n'\n f'{temperature}'\n f'({self._description})\\n'\n f'{offset}Wind: '\n f'{self._wind} / Humidity: {self._humidity}\\n')", "def weather(self):\r\n try:\r\n return str(self.connect()['weather'][0]['description'])\r\n except:\r\n return '@weather'", "def __str__(self):\n astr = ' variables:\\t[ '\n for var in self.variables:\n astr += str(var) + ', '\n astr = astr[:-2] + ' ]\\n assumptions :\\t[ '\n for assumption in self.assumptions.cnf:\n astr += assumption.formula + ', '\n astr = astr[:-2] + ' ]\\n guarantees :\\t[ '\n for guarantee in self.guarantees.cnf:\n astr += guarantee.formula + ', '\n # astr = astr[:-2] + ' ]\\n guarantees_unsat :\\t[ '\n # for guarantee in self.guarantees.cnf:\n # astr += guarantee.unsaturated + ', '\n return astr[:-2] + ' ]\\n'", "def __str__(self):\n msg = '- Window size: ' + str(self.window_size) + \" by \" + str(self.window_size)\n msg += '\\n'\n msg += ' - Expression for r.mapcalc to determine column water vapor: '\n return msg + str(self.column_water_vapor_expression)", "def getData():\n\t\n\ttry:\n\t\tgoogleWeather = pywapi.get_weather_from_google(location)\n\t\tcondition = googleWeather['current_conditions']['condition']\n\t\ttemp = googleWeather['current_conditions']['temp_c']\n\t\treturn \"<weather location=\\\"\" + location + \"\\\" condition=\\\"\" + condition + \"\\\" temp=\\\"\" + temp + \"c\" + \"\\\"/>\"\n\texcept:\n\t\treturn \"\"", "def __str__(self):\n s = ''\n for i, (k, v) in enumerate(self.meters.items()):\n if i > 0:\n s += ' '\n s += k + ' ' + str(v)\n return s", "def weather_helper():\n\n weather = get_weather('Chicago')\n conditions = weather['weather'][0]['description']\n temperature = weather['main']['temp']\n location = weather['name']\n\n curr_weather = 'It is currently %s degrees with %s in %s' % (temperature, conditions, location)\n return curr_weather", "def getWaterConditions(self):\n return self._getConditions(restrict=['CS-Eau'])", "def __str__(self):\r\n s = ''\r\n for i, (k, v) in enumerate(self.meters.items()):\r\n if i > 0:\r\n s += ' '\r\n s += k + ' ' + str(v)\r\n return s", "def details(weather):\n\treturn \"\"\"<table class=\"forecast bg-success\"><tr><th colspan=\"2\" class=\"text-center lead\">Weather for {location} at {time}<th></tr>\n\t<tr><td>Temp: {temperature}<i class=\"wi wi-celsius\"></i> Feels Like: {feelsLike}<i class=\"wi wi-celsius\"></i></td><td rowspan=\"9\"><img src=\"map.gif?{latitude},{longitude}\" width=\"600\" height=\"371\"/><td></tr>\n\t<tr><td>Low: {low}<i class=\"wi wi-celsius\"></i> High: {high}<i class=\"wi wi-celsius\"></i></td></tr>\n\t<tr><td>Sunrise <i class=\"wi wi-sunrise\"></i>: {sunrise} Sunset <i class=\"wi wi-sunset\"></i>: {sunset}</td></tr>\n\t<tr><td>Wind: {windSpeed} kph from {windBearing} <i class=\"wi wi-wind.towards-{windDirection}-deg\"></i></td></tr>\n\t<tr><td>Summary <i class=\"wi wi-{icon}\"></i>: {summary}</td></tr>\n\t<tr><td></td></tr>\n\t<tr><td></td></tr>\n\t<tr><td></td></tr>\n\t<tr><td></td></tr>\n\t<tr><td>&nbsp;</td><td>&nbsp;</td></tr>\n\t</table>\"\"\".format(**weather)", "def __str__(self):\n s = ''\n for i, (k, v) in enumerate(self.meters.iteritems()):\n if i > 0:\n s += ' '\n s += k + ' ' + str(v)\n return s", "def current_weather(self):\n weather = DarkSky(self.coordinates)\n current_weather = weather.current_weather\n reply = [f\"*Current Weather in {self.format_location(self.location)}*\"]\n summary = current_weather['summary']\n temp = current_weather['temperature']\n feels_like = current_weather['apparentTemperature']\n rain_chance = current_weather['precipProbability']\n wind_speed = current_weather['windSpeed']\n cloud_cover = current_weather['cloudCover']\n icon = current_weather['icon']\n emoji = self.get_emoji(icon)\n logging.info(f\"Weather Icon: {icon}\")\n weather_message = [\n f\"{emoji} *{summary}*\",\n f\">*Temperature: `{temp}`*\",\n f\">*Feels Like: `{feels_like}`*\",\n f\">*Chance of Rain: `{rain_chance}`*\",\n f\">*Wind Speed: `{wind_speed}`*\",\n f\">*Cloud Cover: `{cloud_cover}`*\"\n ]\n reply.append(\"\\n\".join(weather_message))\n return \"\\n\".join(reply)", "def __str__(self):\n status = (\"\\na: %.2f \\n\" % self.a +\n \"e: %.2f \\n\" % self.e +\n \"inc: %.2f deg \\n\" % (self.inc * 180/math.pi) +\n \"om: %.2f deg \\n\" % (self.om * 180/math.pi) +\n \"Om: %.2f deg \\n\" % (self.Om * 180/math.pi) +\n \"H: %.2f \\n\" % self.H\n )\n return status", "def generate_weather_conditions(temperature, temp_type):\n\n if temp_type == \"MIN\" or temperature < 5:\n if temperature > 10:\n return 0\n elif temperature >= 0:\n return (10.-temperature)/10.\n else:\n return 1\n\n elif temp_type == \"AVG\":\n\n if temperature > 25:\n return 0\n elif temperature >= 15:\n return (25.-temperature)/(25.-15)\n elif temperature >= 5:\n return (temperature-5.)/(15-5.)\n\n elif temp_type == \"MAX\":\n if temperature > 40:\n return 1\n elif temperature >= 20:\n return (temperature-20)/(40.-20)\n else:\n return 0", "def get_conditions(self):\n return (self.temp, self.humid)", "def get_weather_violation(weather,minimums):\n # Implement this function\n #print(weather)\n #print(minimums)\n \n result = ''\n #print(weather['wind'])\n if weather == None:\n# result = 'Unknown'\n return 'Unknown'\n #elif bad_visibility(weather['visibility'],minimums[1]) == False and bad_winds(weather['wind'], minimums[2], minimums[3]) == False and bad_ceiling(weather['sky'],minimums[0]) == False:\n #result = ''\n \n if bad_winds(weather['wind'], minimums[2], minimums[3]) == True:\n result = 'Winds' if result == '' else 'Weather'\n \n if bad_visibility(weather['visibility'], minimums[1]) == True:\n result = 'Visibility' if result == '' else 'Weather'\n \n if bad_ceiling(weather['sky'], minimums[0]) == True:\n result = 'Ceiling' if result == '' else 'Weather'\n \n #elif \n \n return result", "def format_status(self) -> str:\n if not self.ready_to_trade:\n return \"Market connectors are not ready.\"\n lines = []\n\n if len(self.stored_executors) > 0:\n lines.extend([\n \"\\n########################################## Closed Executors ##########################################\"])\n\n for executor in self.stored_executors:\n lines.extend([f\"|Signal id: {executor.timestamp}\"])\n lines.extend(executor.to_format_status())\n lines.extend([\n \"-----------------------------------------------------------------------------------------------------------\"])\n\n if len(self.active_executors) > 0:\n lines.extend([\n \"\\n########################################## Active Executors ##########################################\"])\n\n for executor in self.active_executors:\n lines.extend([f\"|Signal id: {executor.timestamp}\"])\n lines.extend(executor.to_format_status())\n if self.candles.is_ready:\n lines.extend([\n \"\\n############################################ Market Data ############################################\\n\"])\n signal, take_profit, stop_loss, indicators = self.get_signal_tp_and_sl()\n lines.extend([f\"Signal: {signal} | Take Profit: {take_profit} | Stop Loss: {stop_loss}\"])\n lines.extend([f\"BB%: {indicators[0]} | MACDh: {indicators[1]} | MACD: {indicators[2]}\"])\n lines.extend([\"\\n-----------------------------------------------------------------------------------------------------------\\n\"])\n else:\n lines.extend([\"\", \" No data collected.\"])\n\n return \"\\n\".join(lines)", "def all_characteristics_as_string(self):\n chars1 = _all_characteristics_\n chars2 = [ch.title() for ch in chars1]\n chars2[chars2.index('Internalstructure')] = 'InternalStructure'\n\n s = ('%-18s %-24s %-2s'%('Characteristic', 'Semantic value','#'))\n s+= '\\n'\n s+= ('%-18s %-24s %-2s' % ('-', '-', '-')) + '\\n'\n\n for i in range(len(chars1)):\n attrs = (chars2[i],\\\n getattr(self,chars2[i])(),\n getattr(self,chars1[i]))\n s += '%-18s | %-24s | %-2d' % attrs\n s += '\\n'\n return s[:-1] # cut the trailing newline character", "def device_state_attributes(self):\n # TODO: convert RH from Elk to AH ?\n #if self.current_humidity > 0:\n # humidity = self.current_humidity\n data = {\n 'hidden': self._hidden,\n 'temp_unit' : self.temperature_unit,\n }\n if self._device.temp_outside is not None and self._device.temp_outside > -460:\n data['temp_outside'] = self._device.temp_outside\n if self._device.temp_3 is not None and self._device.temp_3 > -460:\n data['temp_3'] = self._device.temp_3\n if self._device.temp_4 is not None and self._device.temp_4 > -460:\n data['temp_4'] = self._device.temp_4\n return data", "def generate_condition_data(self):\n # set 'Conditions' column to NA\n self.output['Conditions'] = 'NA'\n\n # instantiate new MarkovChain object\n MC = MarkovChain()\n\n # apply forecast function on 'Conditions' column based on temperature\n # and humidity values for each observation period\n params = self.output[[\"Temperature\", \"Humidity\"]]\n self.output[['Conditions']] = params.apply(\n lambda x: MC.forecast_weather(x.values[0], x.values[1]), axis=1)", "def state(self):\n result = \"\"\n if self._type == \"weather\":\n result = self._connector.get_condition()\n elif self._type == \"weather_report\":\n result = re.search(\n \"\\w+, \\d{2}\\.\\d{2}\\.\\d{2}, \\d{2}:\\d{2}\",\n self._connector.get_weather_report(),\n ).group()\n elif self._type == \"temperature\":\n result = self._connector.get_temperature()\n elif self._type == \"dewpoint\":\n result = self._connector.get_dewpoint()\n elif self._type == \"pressure\":\n result = self._connector.get_pressure()\n elif self._type == \"wind_speed\":\n result = self._connector.get_wind_speed()\n elif self._type == \"wind_direction\":\n result = self._connector.get_wind_direction()\n elif self._type == \"wind_gusts\":\n result = self._connector.get_wind_gusts()\n elif self._type == \"precipitation\":\n result = self._connector.get_precipitation()\n elif self._type == \"precipitation_probability\":\n result = self._connector.get_precipitation_probability()\n elif self._type == \"precipitation_duration\":\n result = self._connector.get_precipitation_duration()\n elif self._type == \"cloud_coverage\":\n result = self._connector.get_cloud_coverage()\n elif self._type == \"visibility\":\n result = self._connector.get_visibility()\n elif self._type == \"sun_duration\":\n result = self._connector.get_sun_duration()\n elif self._type == \"sun_irradiance\":\n result = self._connector.get_sun_irradiance()\n elif self._type == \"fog_probability\":\n result = self._connector.get_fog_probability()\n elif self._type == \"humidity\":\n result = self._connector.get_humidity()\n return result", "def __str__(self):\n state_1 = \"Time: \" + str(self._time)\n state_2 = \"Current Cookies: \" + str(self._current_cookies)\n state_3 = \"CPS: \" + str(self._cps)\n state_4 = \"Total Cookies: \" + str(self._total_cookies)\n return state_1 + \" \" + state_2 + \" \" + state_3 + \" \" + state_4", "def PrintWeather(Weather):\n print('Temperature : {}°C'.format(Weather[0]))\n print('Humidity : {} %'.format(Weather[1]))\n print('Description : {}'.format(Weather[2])+'\\n')\n return 1", "def wattsString(self):\n return self.watts is None and \"unknown\" or str(self.watts)", "def __str__(self):\n status = \"height = {}\\n\".format(self.height)\n status += \"width = {}\\n\".format(self.width)\n status += \"channels = {}\\n\".format(self.channels)\n status += \"architecture = {}\\n\".format(self.architecture)\n status += \"activations = {}\\n\".format(self.activations)\n status += \"conv_activations = {}\\n\".format(self.conv_activations)\n status += \"conv_architecture = {}\\n\".format(self.conv_architecture)\n status += \"kernel_sizes = {}\\n\".format(self.kernel_sizes)\n status += \"pool_kernel = {}\\n\".format(self.pool_kernel)\n status += \"batch_size = {}\\n\".format(self.batch_size)\n status += \"epochs = {}\\n\".format(self.epochs)\n status += \"save_step = {}\\n\".format(self.save_step)\n status += \"learning_rate = {}\\n\".format(self.learning_rate)\n status += \"momentum = {}\\n\".format(self.momentum)\n return status", "def to_string(self):\n if self.is_power_onoff():\n return 'Power On/Off'\n else:\n gain = str(hex(int(self['gain_speed'])))\n out = self['target'].ljust(20) + ' ' + self['filters'].ljust(11) + ' ' + self['x_bin'] + 'x' + self['y_bin'] + ' ' + gain[2:].upper()\n \n \n if self.number_windows() > 0:\n out += ' ' + self['x1_size'].rjust(4) + 'x' + self['y1_size'].ljust(4) + ' ' + self['x1_start'].ljust(3) + ' ' + self['y1_start'].ljust(4)\n if self.number_windows() > 1:\n out += ' ' + self['x2_size'].rjust(4) + 'x' + self['y2_size'].ljust(4) + ' ' + self['x2_start'].ljust(3) + ' ' + self['y2_start'].ljust(4)\n \n if 'Comment' in self:\n out += ' ' + self['Comment']\n return out" ]
[ "0.63701195", "0.6170547", "0.6153412", "0.6075571", "0.59051156", "0.5875144", "0.582113", "0.5815576", "0.5801642", "0.5736014", "0.5721063", "0.5704934", "0.56886274", "0.5640063", "0.56320167", "0.5610608", "0.5609194", "0.55756634", "0.5571646", "0.5566451", "0.5524659", "0.5514211", "0.5509786", "0.55024344", "0.54777163", "0.5442844", "0.54310066", "0.5416631", "0.5409348", "0.5381201" ]
0.79582655
0
Create a Model from a formula and dataframe.
def from_formula(cls, formula, data, subset=None, drop_cols=None, *args, **kwargs): # TODO: provide a docs template for args/kwargs from child models # TODO: subset could use syntax. GH#469. if subset is not None: data = data.loc[subset] eval_env = kwargs.pop('eval_env', None) if eval_env is None: eval_env = 2 elif eval_env == -1: from patsy import EvalEnvironment eval_env = EvalEnvironment({}) else: eval_env += 1 # we're going down the stack again missing = kwargs.get('missing', 'drop') if missing == 'none': # with patsy it's drop or raise. let's raise. missing = 'raise' tmp = handle_formula_data(data, None, formula, depth=eval_env, missing=missing) ((endog, exog), missing_idx, design_info) = tmp if drop_cols is not None and len(drop_cols) > 0: # TODO: not hit in tests cols = [x for x in exog.columns if x not in drop_cols] if len(cols) < len(exog.columns): exog = exog[cols] cols = list(design_info.term_names) for col in drop_cols: try: cols.remove(col) except ValueError: pass # OK if not present design_info = design_info.subset(cols) kwargs.update({'missing_idx': missing_idx, 'missing': missing, 'formula': formula, # attach formula for unpckling 'design_info': design_info}) mod = cls(endog, exog, *args, **kwargs) mod.formula = formula # since we got a dataframe, attach the original mod.data.frame = data return mod
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_formula():\n config = {\"samples\": {\"x1\": onp.ones((2, 10)), \"x2\": onp.ones((2, 10))}}\n\n class Model(Poisson):\n dv = \"y\"\n features = dict(\n x1=dict(transformer=1, prior=dist.Normal(0, 1)),\n x2=dict(transformer=2, prior=dist.Normal(0, 1)),\n )\n\n model = Model.from_dict(config)\n formula = model.formula\n expected = \"y = exp(\\n x1 * 1.00000(+-0.00000)\\n + x2 * 1.00000(+-0.00000)\\n)\"\n assert formula == expected", "def convert(self, df):\n return convert_df_to_model(\n model_type=self.model_type, df=df,\n outcome_variables=self.outcome_variables,\n fixed_effects=self.fixed_effects,\n random_effect=self.random_effect,\n spline=self.spline,\n offset=self.offset,\n weight=self.weight\n )", "def _create_model(self):\n\n model_formula = self.get_model_formula()\n\n removed_observation_index = self._model_dataset.index.isin(self._excluded_observations)\n\n # TODO: Handle error that occurs when all model observations are invalid\n model = smf.ols(model_formula,\n data=self._model_dataset,\n subset=~removed_observation_index,\n missing='drop')\n\n self._model = model", "def fit(self, df):\n try:\n df = df.astype(float)\n except Exception:\n raise ValueError(\"Data Cannot Be Converted to Numeric Float\")\n\n y = df.values\n if y.shape[1] == 1:\n y = y.ravel()\n X = date_part(df.index, method=self.datepart_method)\n from autots.models.sklearn import retrieve_regressor\n\n multioutput = True\n if y.ndim < 2:\n multioutput = False\n elif y.shape[1] < 2:\n multioutput = False\n self.model = retrieve_regressor(\n regression_model=self.regression_model,\n verbose=0,\n verbose_bool=False,\n random_seed=2020,\n multioutput=multioutput,\n )\n self.model = self.model.fit(X, y)\n self.shape = df.shape\n return self", "def build_model_fn(self):", "def __init__(self, x_function, x_derivative, data_f, data_df, a):\n self.x_function = x_function\n self.x_derivative = x_derivative\n self.data_f = data_f\n self.data_df = data_df\n self.a = a\n self.linear_model = LinearModel(self.x_function, self.x_derivative)", "def build_model():", "def get_trained_model(dataframe, features, target, method='logistic'):\n if method == 'logistic':\n model = LogisticRegression()\n model.fit(dataframe[features], dataframe[target])\n return model\n else:\n raise NotImplementedError", "def eval(self, df):\n ## Check invariant; model inputs must be subset of df columns\n if not set(self.var).issubset(set(df.columns)):\n raise ValueError(\n \"Model function `{}` var not a subset of given columns\".format(\n self.name\n )\n )\n\n ## Set up output\n n_rows = df.shape[0]\n results = zeros((n_rows, len(self.out)))\n\n for ind in range(n_rows):\n results[ind] = self.func(*df.loc[ind, self.var])\n\n ## Package output as DataFrame\n return DataFrame(data=results, columns=self.out)", "def build_model(self, X: pd.DataFrame, y: pd.DataFrame = None) -> pm.Model:\n idx = X.index\n \n if y is None:\n y = pd.Series(0, index=idx)\n elif self.oversample: # only if y is given\n n_pos = (y == 1).sum()\n n_neg = (y == 0).sum()\n to_add = int(np.ceil(n_neg/n_pos) - 1)\n # print(n_pos, n_neg, to_add)\n if to_add > 4:\n to_add = 4\n for i in range(to_add):\n idx = idx.append(y[y==1].index)\n X = X.loc[idx]\n y = y.loc[idx]\n \n A = X[self.v_known + self.v_oob_bio]\n B_vals = X[self.v_fuzzy]\n B_mask = (B_vals == -1).astype(int)\n C_raw = X[self.v_float_adm + self.v_float_bio]\n # C_scaled = (C_raw - self.C_mean_) / self.C_std_ \n C_scaled = np.log1p(C_raw/self.C_mean_)\n C_scaled[~np.isfinite(C_scaled)] = np.nan\n C_vals = C_scaled.fillna(0)\n C_mask = C_scaled.isnull().astype(int)\n \n coords = {\"idx\": idx, \"a\": A.columns, \"b\": B_vals.columns, \"c\": C_vals.columns}\n with pm.Model(coords=coords) as m:\n pm.Data(\"A\", A, dims=[\"idx\", \"a\"])\n pm.Data(\"B_vals\", B_vals, dims=[\"idx\", \"b\"])\n pm.Data(\"B_mask\", B_mask, dims=[\"idx\", \"b\"])\n pm.Data(\"C_vals\", C_vals, dims=[\"idx\", \"c\"])\n pm.Data(\"C_mask\", C_mask, dims=[\"idx\", \"c\"])\n pm.Data(\"y\", y, dims=[\"idx\"])\n\n pm.Normal(\"avg\", mu=0, sd=1)\n\n pm.Beta(\"h_a_incl\", alpha=1, beta=4)\n pm.Normal(\"a_coef_raw\", mu=0, sd=1, dims=[\"a\"])\n pm.Bernoulli(\"a_incl\", p=m[\"h_a_incl\"], dims=[\"a\"])\n pm.Deterministic(\"a_coef\", m['a_coef_raw'] * m['a_incl'], dims=[\"a\"])\n \n pm.Normal(\"b_vals_coef\", mu=0, sd=1, dims=[\"b\"])\n pm.Normal(\"b_mask_coef_raw\", mu=0, sd=1, dims=[\"b\"])\n pm.Beta(\"h_b_mask_incl\", alpha=1, beta=4)\n pm.Bernoulli(\"b_mask_incl\", p=m[\"h_b_mask_incl\"], dims=[\"b\"])\n pm.Deterministic(\"b_mask_coef\", m['b_mask_coef_raw'] * m['b_mask_incl'], dims=[\"b\"])\n \n pm.Normal(\"c_vals_coef\", mu=0, sd=1, dims=[\"c\"])\n pm.Normal(\"c_mask_coef_raw\", mu=0, sd=1, dims=[\"c\"])\n pm.Beta(\"h_c_mask_incl\", alpha=1, beta=4)\n pm.Bernoulli(\"c_mask_incl\", p=m[\"h_c_mask_incl\"], dims=[\"c\"])\n pm.Deterministic(\"c_mask_coef\", m['c_mask_coef_raw'] * m['c_mask_incl'], dims=[\"c\"])\n unprob = pm.Deterministic(\n \"logit\",\n m['avg']\n + tt.dot(m[\"A\"], m[\"a_coef\"])\n + tt.dot(m[\"B_vals\"] * (1 - m['B_mask']), m[\"b_vals_coef\"])\n + tt.dot(m[\"B_mask\"], m[\"b_mask_coef\"])\n + tt.dot(m[\"C_vals\"] * (1 - m['C_mask']), m[\"c_vals_coef\"])\n + tt.dot(m[\"C_mask\"], m[\"c_mask_coef\"])\n )\n pm.Bernoulli(\"y_pred\", p = tt.nnet.sigmoid(unprob), dims=['idx'], observed=m['y'])\n\n m.graph = pm.model_to_graphviz()\n\n return m", "def ml_df(df, parameters, t_size, model = DecisionTreeRegressor()):\n ndf = df[parameters]\n x = ndf.loc[:, ndf.columns != 'T_exp']\n y = ndf['T_exp']\n x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=t_size)\n model = model\n p = PolynomialFeatures(degree = 2)\n X_poly = p.fit_transform(x_train)\n X_poly_test = p.fit_transform(x_test)\n model.fit(X_poly,y_train)\n y_train_pred = model.predict(X_poly)\n y_test_pred = model.predict(X_poly_test)\n result = pd.DataFrame()\n result['T_exp'] = y_test\n result['T_prd'] = y_test_pred\n result['ratio'] = result['T_exp']/result['T_prd']\n return result", "async def _build_model(\n self,\n data: Timeseries\n ) -> Prophet:\n model = Prophet()\n model.fit(data.get_dataframe())\n return model", "def from_formula(cls, formula, data, re_formula=None, subset=None,\n *args, **kwargs):\n\n if \"groups\" not in kwargs.keys():\n raise AttributeError(\"'groups' is a required keyword argument in MixedLM.from_formula\")\n\n # If `groups` is a variable name, retrieve the data for the\n # groups variable.\n if type(kwargs[\"groups\"]) == str:\n kwargs[\"groups\"] = np.asarray(data[kwargs[\"groups\"]])\n\n if re_formula is not None:\n eval_env = kwargs.get('eval_env', None)\n if eval_env is None:\n eval_env = 1\n elif eval_env == -1:\n from patsy import EvalEnvironment\n eval_env = EvalEnvironment({})\n exog_re = patsy.dmatrix(re_formula, data, eval_env=eval_env)\n exog_re_names = exog_re.design_info.column_names\n exog_re = np.asarray(exog_re)\n else:\n exog_re = np.ones((data.shape[0], 1),\n dtype=np.float64)\n exog_re_names = [\"Intercept\"]\n\n mod = super(MixedLM, cls).from_formula(formula, data,\n subset=None,\n exog_re=exog_re,\n *args, **kwargs)\n\n # expand re names to account for pairs of RE\n (param_names,\n exog_re_names,\n exog_re_names_full) = mod._make_param_names(exog_re_names)\n mod.data.param_names = param_names\n mod.data.exog_re_names = exog_re_names\n mod.data.exog_re_names_full = exog_re_names_full\n\n return mod", "def model(data_x, parameters):\n return data_x @ parameters", "def create_model():\n\n class Net(nn.Cell):\n def construct(self, x, y):\n return x\n\n net = Net()\n model_simple = Model(net)\n\n return model_simple", "def fit(self, df):\n try:\n df = df.astype(float)\n except Exception:\n raise ValueError(\"Data Cannot Be Converted to Numeric Float\")\n\n Y = df.to_numpy()\n X = pd.to_numeric(df.index, errors='coerce', downcast='integer').to_numpy()\n if self.model == 'GLS':\n from statsmodels.regression.linear_model import GLS\n\n self.trained_model = GLS(Y, X, missing='drop').fit()\n else:\n self.trained_model = self._retrieve_detrend(detrend=self.model)\n if self.model in self.need_positive:\n self.trnd_trans = PositiveShift(\n log=False, center_one=True, squared=False\n )\n Y = pd.DataFrame(self.trnd_trans.fit_transform(df)).to_numpy()\n X = X.reshape((-1, 1))\n self.trained_model.fit(X, Y)\n self.shape = df.shape\n return self", "def train_and_eval(self):\n self.__create_indexes()\n model = None\n model = None\n if self.model == 'OMult':\n model = OMult(self.kwargs)\n elif self.model == 'ConvO':\n model = ConvO(self.kwargs)\n elif self.model == 'QMult':\n model = QMult(self.kwargs)\n elif self.model == 'ConvQ':\n model = ConvQ(self.kwargs)\n elif self.model == 'OMultBatch':\n model = OMultBatch(self.kwargs)\n elif self.model == 'ConvOBatch':\n model = ConvOBatch(self.kwargs)\n elif self.model == 'QMultBatch':\n model = QMultBatch(self.kwargs)\n elif self.model == 'ConvQBatch':\n model = ConvQBatch(self.kwargs)\n else:\n print(self.model, ' is not valid name')\n raise ValueError\n\n self.train(model)\n self.eval(model)", "def from_dataframe(cls, dataframe):\n return cls(dataframe)", "def makeCalc(self, dataSet):\n\n #cyl = sasmodels.core.load_model_info('cylinder')\n #hs = sasmodels.core.load_model_info('hardsphere')\n #cylhs = sasmodels.core.load_model_info('cylinder@hardsphere')\n cylhmsa = sasmodels.core.load_model_info('cylinder@hayter_msa')\n\n # Build using c version instead of python. Avoids pyopencl\n model = sasmodels.core.build_model(cylhmsa, platform='dll')\n self.calculator = sasmodels.direct_model.DirectModel(dataSet, model)\n\n return", "def build_numeric_model(movie_df):\n import statsmodels.formula.api as smf\n #build a multivariate reg model\n linmodel_multi_f = smf.ols(formula='domestic_gross ~ opening_per_theater + opening_weekend_take + production_budget + widest_release + worldwide_gross', data=movie_df).fit()\n linmodel_multi_f.summary()", "def eval_input_fn(df):\n fts = df.drop(columns=['class'])\n labs = df.filter(items=['class']).values.astype(int)\n\n features = {k:list(v.values) for k,v in fts.items()}\n features = dict(features)\n x = fts.values\n x = np.array([[x]]).reshape((np.shape(x)[0], np.shape(x)[1], 1, 1))\n # Convert the inputs to a Dataset.\n dataset = tf.data.Dataset.from_tensor_slices({\"x_ph\":x,\"y_ph\":convert_to_one_hot(labs)})\n \n # Shuffle, repeat, and batch the examples.\n dataset = dataset.shuffle(1000).batch(np.shape(x)[0]).repeat()\n # Return the read end of the pipeline.\n return dataset.make_one_shot_iterator().get_next()", "def parseL3FormulaWithModel(*args):\n return _libsbml.parseL3FormulaWithModel(*args)", "def convert_to_model(self, *args):", "def create_model(data, cont, cat, target): \n\n cont_features = '+'.join(cont)\n\n cat_features = '+'.join([f'C({x})' for x in cat])\n\n f = f'{target}~+{cont_features}+{cat_features}'\n\n print(f)\n\n model = smf.ols(formula=f, data=data).fit()\n \n diagnose_model(model)\n \n return model", "def predict(self, load_script=False, variant=\"predict\"):\n \n # Interpret the request data based on the expected row and column structure\n row_template = ['strData', 'strData']\n col_headers = ['model_name', 'n_features']\n feature_col_num = 1\n \n # An additional key field column is expected if the call is made through the load script\n if load_script:\n row_template = ['strData', 'strData', 'strData']\n col_headers = ['model_name', 'key', 'n_features']\n feature_col_num = 2\n \n # Create a Pandas Data Frame for the request data\n self.request_df = utils.request_df(self.request, row_template, col_headers)\n \n # Initialize the persistent model\n self.model = PersistentModel()\n \n # Get the model name from the request dataframe\n self.model.name = self.request_df.loc[0, 'model_name']\n \n # Get the model from cache or disk\n self._get_model()\n \n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(3)\n \n if load_script:\n # Set the key column as the index\n self.request_df.set_index(\"key\", drop=False, inplace=True)\n \n try:\n # Split the features provided as a string into individual columns\n self.X = pd.DataFrame([x[feature_col_num].split(\"|\") for x in self.request_df.values.tolist()],\\\n columns=self.model.features_df.loc[:,\"name\"].tolist(),\\\n index=self.request_df.index)\n except AssertionError as ae:\n err = \"The number of input columns do not match feature definitions. Ensure you are using the | delimiter and that the target is not included in your input to the prediction function.\"\n raise AssertionError(err) from ae\n \n # Convert the data types based on feature definitions \n self.X = utils.convert_types(self.X, self.model.features_df, sort=False)\n\n if variant in ('predict_proba', 'predict_log_proba'):\n # If probabilities need to be returned\n if variant == 'predict_proba':\n # Get the predicted probability for each sample \n self.y = self.model.pipe.predict_proba(self.X)\n elif variant == 'predict_log_proba':\n # Get the log probability for each sample\n self.y = self.model.pipe.predict_log_proba(self.X)\n \n # Prepare a list of probability by class for each sample\n probabilities = []\n\n for a in self.y:\n s = \"\"\n i = 0\n for b in a:\n s = s + \", {0}: {1:.3f}\".format(self.model.pipe.named_steps['estimator'].classes_[i], b)\n i = i + 1\n probabilities.append(s[2:])\n \n self.y = probabilities\n \n else:\n # Predict y for X using the previously fit pipeline\n self.y = self.model.pipe.predict(self.X)\n\n # Inverse transformations on the targets if required\n if self.model.scale_target or self.model.make_stationary:\n # Apply the transformer to the test targets\n self.y = self.model.target_transformer.inverse_transform(self.y) \n\n # Prepare the response\n self.response = pd.DataFrame(self.y, columns=[\"result\"], index=self.X.index)\n \n if load_script:\n # Add the key field column to the response\n self.response = self.request_df.join(self.response).drop(['n_features'], axis=1)\n \n # If the function was called through the load script we return a Data Frame\n self._send_table_description(\"predict\")\n \n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(4)\n \n return self.response\n \n # If the function was called through a chart expression we return a Series\n else:\n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(4)\n \n return self.response.loc[:,'result']", "def __init__(self, df, y_list, X_list, *,\r\n alpha=0.05, printing=True):\r\n # Model inputs (attributes from arguments):\r\n self._df = df\r\n [y_name] = y_list # sequence unpacking in order to make Series\r\n self._y = df[y_name] # Pandas Series\r\n if len(X_list) == 1:\r\n [x_name] = X_list\r\n self._X = df[x_name].to_frame() # Pandas dataframe\r\n else:\r\n self._X = df[X_list] # Pandas dataframe\r\n self._y_list, self._X_list = y_list, X_list\r\n self._alpha = alpha\r\n self._is_fitted = False", "def create_model(self):\r\n model = self.model_fn(self.flags)\r\n print(model)\r\n return model", "def make_model():\n m = model_class(*argv[2:-1])\n modelobj[\"model\"] = m", "def fit(self):\n self.model = RegressionModel(model_expression=self.model_expression,\n fit_filters=self.filters, predict_filters=self.out_filters,\n ytransform=None, name=self.name)\n\n df = get_data(tables = self.tables,\n filters = self.filters,\n model_expression = self.model_expression)\n \n results = self.model.fit(df)\n \n self.name = self._generate_name()\n self.summary_table = str(results.summary())\n print(self.summary_table)\n \n # We don't strictly need to save the fitted parameters, because they are also\n # contained in the urbansim.models.RegressionModel() sub-object. But maintaining\n # a parallel data structure to other templates will make it easier to refactor the\n # code later on to not rely on RegressionModel any more. \n \n self.fitted_parameters = results.params.tolist()\n self.residuals = results.resid", "def create_model(self):\n model = solph.Model(self.es)\n return model" ]
[ "0.64051425", "0.6352155", "0.6185541", "0.5942793", "0.58994234", "0.5815461", "0.5793945", "0.565781", "0.5657534", "0.56343275", "0.5602399", "0.5592737", "0.55794835", "0.5542218", "0.5530604", "0.5495615", "0.54892266", "0.54615164", "0.5460569", "0.54572856", "0.54244167", "0.54210985", "0.5418621", "0.5403502", "0.539797", "0.5396568", "0.53828114", "0.5379534", "0.5362838", "0.5349719" ]
0.6909729
0
Score vector of model. Default implementation sums score_obs. The gradient of loglike with respect to each parameter.
def score(self, params, *args, **kwargs): try: # If an analytic score_obs is available, try this first before # falling back to numerical differentiation below return self.score_obs(params, *args, **kwargs).sum(0) except NotImplementedError: # Fallback in case a `loglike` is implemented but `loglikeobs` # is not. approx_func = (approx_fprime_cs if self._use_approx_cs else approx_fprime) return approx_func(params, self.loglike, args=args, kwargs=kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def score(self, s):\n fv = s.feature_vector\n product = fv.dot(self.params.T)[0, 0]\n return s.score(lmwt=self.lmwt) + product", "def svm_loss(scores, y):\r\n\r\n N = scores.shape[0]\r\n\r\n # Compute svm data loss\r\n correct_class_scores = scores[range(N), y]\r\n margins = np.maximum(0.0, scores - correct_class_scores[:, None] + 1.0)\r\n margins[range(N), y] = 0.0\r\n loss = np.sum(margins) / N\r\n\r\n # Compute gradient off loss function w.r.t. scores\r\n num_pos = np.sum(margins > 0, axis=1)\r\n dscores = np.zeros(scores.shape)\r\n dscores[margins > 0] = 1\r\n dscores[range(N), y] -= num_pos\r\n dscores /= N\r\n\r\n return loss, dscores", "def score_full(self, params):\n\n if type(params) is not MixedLMParams:\n params = MixedLMParams.from_packed(params, self.k_fe,\n self.use_sqrt)\n\n fe_params = params.get_fe_params()\n cov_re = params.get_cov_re()\n try:\n cov_re_inv = np.linalg.inv(cov_re)\n except np.linalg.LinAlgError:\n cov_re_inv = None\n\n score_fe = np.zeros(self.k_fe, dtype=np.float64)\n score_re = np.zeros(self.k_re2, dtype=np.float64)\n\n # Handle the covariance penalty.\n if self.cov_pen is not None:\n score_re -= self.cov_pen.grad(cov_re, cov_re_inv)\n\n # Handle the fixed effects penalty.\n if self.fe_pen is not None:\n score_fe -= self.fe_pen.grad(fe_params)\n\n # resid' V^{-1} resid, summed over the groups (a scalar)\n rvir = 0.\n\n # exog' V^{-1} resid, summed over the groups (a k_fe\n # dimensional vector)\n xtvir = 0.\n\n # exog' V^{_1} exog, summed over the groups (a k_fe x k_fe\n # matrix)\n xtvix = 0.\n\n # V^{-1} exog' dV/dQ_jj exog V^{-1}, where Q_jj is the jj^th\n # covariance parameter.\n xtax = [0.,] * self.k_re2\n\n # Temporary related to the gradient of log |V|\n dlv = np.zeros(self.k_re2, dtype=np.float64)\n\n # resid' V^{-1} dV/dQ_jj V^{-1} resid (a scalar)\n rvavr = np.zeros(self.k_re2, dtype=np.float64)\n\n for k in range(self.n_groups):\n\n exog = self.exog_li[k]\n ex_r = self.exog_re_li[k]\n ex2_r = self.exog_re2_li[k]\n\n # The residuals\n expval = np.dot(exog, fe_params)\n resid = self.endog_li[k] - expval\n\n if self.reml:\n viexog = _smw_solve(1., ex_r, ex2_r, cov_re,\n cov_re_inv, exog)\n xtvix += np.dot(exog.T, viexog)\n\n # Contributions to the covariance parameter gradient\n jj = 0\n vex = _smw_solve(1., ex_r, ex2_r, cov_re, cov_re_inv,\n ex_r)\n vir = _smw_solve(1., ex_r, ex2_r, cov_re, cov_re_inv,\n resid)\n for jj,mat in self._gen_dV_dPsi(ex_r):\n dlv[jj] = np.trace(_smw_solve(1., ex_r, ex2_r, cov_re,\n cov_re_inv, mat))\n rvavr[jj] += np.dot(vir, np.dot(mat, vir))\n if self.reml:\n xtax[jj] += np.dot(viexog.T, np.dot(mat, viexog))\n\n # Contribution of log|V| to the covariance parameter\n # gradient.\n score_re -= 0.5 * dlv\n\n # Needed for the fixed effects params gradient\n rvir += np.dot(resid, vir)\n xtvir += np.dot(exog.T, vir)\n\n fac = self.n_totobs\n if self.reml:\n fac -= self.exog.shape[1]\n\n score_fe += fac * xtvir / rvir\n score_re += 0.5 * fac * rvavr / rvir\n\n if self.reml:\n for j in range(self.k_re2):\n score_re[j] += 0.5 * np.trace(np.linalg.solve(\n xtvix, xtax[j]))\n\n score_vec = np.concatenate((score_fe, score_re))\n\n if self._freepat is not None:\n return self._freepat.get_packed() * score_vec\n else:\n return score_vec", "def loss(self, y_true, score, pos_label=_NoValue):\n if pos_label is not _NoValue:\n raise ValueError(\"`pos_label` not supported\")\n\n score = score.atleast_2d() # Working with 2-D arrays only\n\n p = CSoftmax().softmax(score) # SoftMax function\n\n # find-like indexing (list of lists)\n return -CArray(p[[list(range(score.shape[0])), y_true.tolist()]]).log()", "def score_obs(self, params, *args, **kwargs):\n if self._use_approx_cs:\n return approx_fprime_cs(params, self.loglikeobs,\n args=args, kwargs=kwargs)\n else:\n return approx_fprime(params, self.loglikeobs,\n args=args, kwargs=kwargs)", "def loss_fun(model: GPModel, params: dict) -> float:\n py = model.module.call(params, train_ds['index_points'])\n return -py.log_prob(train_ds['y'])", "def score_model(self, length):\n train_score = self.dtr.score(self.X_train, self.y_train)\n test_score = self.dtr.score(self.X_test, self.y_test)\n self.scores.append([length, train_score, test_score])", "def svm_loss_vectorized(W, X, y, reg):\n num_classes = W.shape[1]\n num_train = X.shape[0]\n #loss = 0.0 \n loss = 0.0\n scores = np.zeros((1,num_classes))\n dW = np.zeros(W.shape) # initialize the gradient as zero\n\n #############################################################################\n # TODO: #\n # Implement a vectorized version of the structured SVM loss, storing the #\n # result in loss. #\n #############################################################################\n \n # lines begin with double \"#\" are the last version of code!!!!!\n \n ##for i in xrange(num_train):\n #XX = np.tile(X[i,:],(num_classes,1)) # try to use broadcasting\n #scores = np.sum(np.multiply(XX,W.T), axis = 1)\n ## scores = np.sum(np.multiply(X[i,:],W.T), axis = 1)\n \n ## if i ==1: print scores\n \n #loss += np.sum(scores - scores[y[i]]) + num_classes -1\n #http://stackoverflow.com/questions/2900084/counting-positive-elements-in-a-list-with-python-list-comprehensions\n ## scores+=1\n ## scores[y[i]]-=1 \n #however, this is sum over index, not values, glaube ich \n #loss+= sum(x < 0 for x in (scores-scores[y[i]]))\n ## loss+= (scores-scores[y[i]])[scores-scores[y[i]]>0].sum()\n #pass\n ############################################\n # construct a zero loop version\n ############################################\n scores2D = np.zeros((num_train, num_classes)) #used to store dotted scores\n scores1D = np.zeros((num_train,1)) #used to store corrected scores\n #index1D = np.zeros((1,num_classes))\n #index1D = range(num_classes) \n #scores1D = y[index1D]\n \n scores2D = np.dot(X,W) \n ##for i in xrange(num_train):\n ## scores1D[i,0]=scores2D[i,y[i]]-1 #find the correct scores and fill them into scores1D, the value -1 is because: si-sj+1\n ## scores2D[i,y[i]]-=1 # we want at corrected score voxel, the value should be 0, correct score -1 - \n #(correct score -1) = 0\n #####################################\n #for loop replacement###\n indexInsert = np.arange(num_train)\n scores1D[indexInsert,0] = scores2D[indexInsert,y[indexInsert]] -1 #using array indexing\n scores2D[indexInsert,y[indexInsert]] -=1\n \n ##################################### \n \n #scores2D = X.dot(W)\n #http://stackoverflow.com/questions/9497290/how-would-i-sum-a-multi-dimensional-array-in-the-most-succinct-python\n #rewrite summation\n #loss += (scores2D-scores1D)[scores2D-scores1D >0].sum()\n #temp = scores2D-np.tile (scores1D, (1,num_classes)) # for each score minus the corrected score\n temp = scores2D-scores1D #broadcasting!!\n #print temp[1,:]\n temp= temp.clip(min=0) \n #loss += sum(map(sum, (temp)[temp>0]))\n #loss += sum(map(sum, (temp)))\n #loss += (temp)[temp >0].sum()\n loss += sum(sum(x) for x in temp) #sum them up\n #loss -= num_train # minus 1 is because in each train, due to the plus 1 above , correct score - correct \n # score +1 = 1, but it should be 0, therefore, i deduce them at the last minute \n # ( then I made this also in the for loop to meet intuitive)\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n loss /= num_train\n loss += 0.5 * reg * np.sum(W * W)\n #############################################################################\n # TODO: #\n # Implement a vectorized version of the gradient for the structured SVM #\n # loss, storing the result in dW. #\n # #\n # Hint: Instead of computing the gradient from scratch, it may be easier #\n # to reuse some of the intermediate values that you used to compute the #\n # loss. #\n #############################################################################\n #tempBool = np.divide(temp, temp)\n #tempBool = tempBool.clip(max=1,min=0)\n #http://stackoverflow.com/questions/19666626/replace-all-elements-of-python-numpy-array-that-are-greater-than-some-value\n tempBool = np.copy(temp) # temp = scores2D-scores1D , temp= temp.clip(min=0)\n # temp is already the every score minus the correct labeled score\n tempBool[tempBool>0] = 1 # for every element, when it is positive, set it to one (for weighting)\n for j in xrange(num_train):\n tempBool[j,y[j]] =-1*sum(tempBool[j,:]) # calculate how many final scores, max(~~,0) are more than 0, add the number to the correct\n # label element, because it is the times that the corrected scores be used\n dW += np.reshape (X[j,:],(X.shape[1],1))*tempBool[j,:] # broadcasting, out-product\n #pass\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n dW/= num_train\n dW += reg*W\n \n return loss, dW", "def loss(W_vect, X, T):\n # log_prior = - 0.5 * L2_reg * jnp.dot(W_vect, W_vect)\n return jnp.mean((predictions(W_vect, X) - T)**2) + 0.5*jnp.log(2*jnp.pi)", "def score(self, X):\n nolist = False\n if not isinstance(X, list):\n X = [X]\n nolist = True\n\n scores = []\n for i in X:\n Xi = X[i]\n Xhati = self.predict(Xi)\n\n scores.append(1.0 - np.sum((Xi - Xhati)**2.0) / np.sum(Xi**2.0))\n\n if nolist:\n return scores[0]\n else:\n return scores", "def prob_calibration_function(truthvec, scorevec, reg_param_vec='default', knots='sample',\n method='logistic', force_prob=True, eps=1e-15, max_knots=200,\n transform_fn='none', random_state=942, verbose=False, cv_folds=5,\n unity_prior_weight=1, unity_prior_gridsize=20):\n from sklearn import linear_model\n from sklearn.metrics import log_loss, make_scorer\n\n if (unity_prior_weight>0):\n scorevec_coda, truthvec_coda = create_yeqx_bias_vectors(unity_prior_gridsize)\n coda_wt = unity_prior_weight/unity_prior_gridsize\n weightvec = np.concatenate((np.ones(len(scorevec)), coda_wt * np.ones(len(scorevec_coda))))\n scorevec = np.concatenate((scorevec, scorevec_coda))\n truthvec = np.concatenate((truthvec, truthvec_coda))\n\n if transform_fn != 'none':\n scorevec = transform_fn(scorevec)\n\n knot_vec = np.unique(scorevec)\n if (knots == 'sample'):\n num_unique = len(knot_vec)\n if (num_unique > max_knots):\n smallest_knot, biggest_knot = knot_vec[0], knot_vec[-1]\n inter_knot_vec = knot_vec[1:-1]\n random.seed(random_state)\n random.shuffle(inter_knot_vec)\n reduced_knot_vec = inter_knot_vec[:(max_knots-2)]\n reduced_knot_vec = np.concatenate((reduced_knot_vec, [smallest_knot, biggest_knot]))\n reduced_knot_vec = np.concatenate((reduced_knot_vec, np.linspace(0, 1, 21)))\n if (unity_prior_weight>0):\n reduced_knot_vec = np.concatenate((reduced_knot_vec, scorevec_coda))\n knot_vec = np.unique(reduced_knot_vec)\n if verbose:\n print(\"Originally there were {} knots. Reducing to {} while preserving first and last knot.\".format(num_unique, len(knot_vec)))\n X_mat = _natural_cubic_spline_basis_expansion(scorevec, knot_vec)\n\n if (method == 'logistic'):\n if ((type(reg_param_vec) == str) and (reg_param_vec == 'default')):\n reg_param_vec = 10**np.linspace(-7, 5, 61)\n if verbose:\n print(\"Trying {} values of C between {} and {}\".format(len(reg_param_vec), np.min(reg_param_vec), np.max(reg_param_vec)))\n reg = linear_model.LogisticRegressionCV(Cs=reg_param_vec, cv=StratifiedKFold(cv_folds, shuffle=True),\n scoring=make_scorer(log_loss, needs_proba=True, greater_is_better=False))\n if (unity_prior_weight>0):\n reg.fit(X_mat, truthvec, weightvec)\n else:\n reg.fit(X_mat, truthvec)\n if verbose:\n print(\"Best value found C = {}\".format(reg.C_))\n\n if (method == 'ridge'):\n if ((type(reg_param_vec) == str) and (reg_param_vec == 'default')):\n reg_param_vec = 10**np.linspace(-7, 7, 71)\n if verbose:\n print(\"Trying {} values of alpha between {} and {}\".format(len(reg_param_vec), np.min(reg_param_vec),np.max(reg_param_vec)))\n reg = linear_model.RidgeCV(alphas=reg_param_vec, cv=KFold(cv_folds, shuffle=True), scoring=make_scorer(mean_squared_error_trunc,needs_proba=False, greater_is_better=False))\n reg.fit(X_mat, truthvec)\n if verbose:\n print(\"Best value found alpha = {}\".format(reg.alpha_))\n\n def calibrate_scores(new_scores):\n new_scores = np.maximum(new_scores,knot_vec[0]*np.ones(len(new_scores)))\n new_scores = np.minimum(new_scores,knot_vec[-1]*np.ones(len(new_scores)))\n if transform_fn != 'none':\n new_scores = transform_fn(new_scores)\n basis_exp = _natural_cubic_spline_basis_expansion(new_scores,knot_vec)\n if (method == 'logistic'):\n outvec = reg.predict_proba(basis_exp)[:,1]\n if (method == 'ridge'):\n outvec = reg.predict(basis_exp)\n if force_prob:\n outvec = np.where(outvec < eps, eps, outvec)\n outvec = np.where(outvec > 1-eps, 1-eps, outvec)\n return outvec\n\n return calibrate_scores", "def calc_score(model, scorer, X, y_true):\n\n y_preds = model.predict(X)\n score = scorer(y_true, y_preds)\n\n return score", "def eval_score( # type: ignore\n self, model_in: torch.Tensor, target: Optional[torch.Tensor] = None, idx=None, next_obs=None\n ) -> torch.Tensor:\n # target = target.repeat((self.num_members, 1, 1))\n loss = self._vaml_loss(model_in, target, idx, next_obs=next_obs, eval=True)\n if self.add_mse:\n loss += self._mse_loss(model_in, target).mean(-1, keepdim=True)\n return loss.detach()", "def scoring_function(self, model, y_true, y_predicted_probability):", "def score(self,x,**kwargs):\r\n if self.kfun != 'matrix' and len(self.sv): \r\n k = self.kfun(x,self.sv,**self.cparam)\r\n #print \"Kernel after test: \", k\r\n else:\r\n k = x\r\n \r\n \r\n self.W=self.alphas \r\n self.mat=self.kfun(np.array([self.sv[1]]), self.sv,**self.cparam) \r\n self.bias=self.svLabels[1]- np.dot((self.alphas*self.svLabels).T,self.mat.T) \r\n z=np.dot((self.alphas*self.svLabels).T,k.T)+self.bias\r\n \r\n #print \"bias: \", self.bias, \"\\nZ: \",z\r\n \r\n \r\n return z", "def get_score(self, solution: np.array) -> float:\n pass", "def update(self, returns, log_probs):\n policy_gradient = []\n for log_prob, Gt in zip(log_probs, returns):\n policy_gradient.append(-log_prob * Gt)\n\n loss = torch.stack(policy_gradient).sum()\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()", "def lm(self, lm_para=LmPara()):\r\n if self.doc_len == 0:\r\n return np.log(MIN_LM_SCORE)\r\n v_tf = np.maximum(self.v_tf, lm_para.min_tf)\r\n v_tf /= self.doc_len\r\n v_tf = np.maximum(v_tf, MIN_LM_SCORE)\r\n score = np.log(v_tf).dot(self.v_q_tf)\r\n\r\n return score", "def log_loss(self):\n probabilities = self.probability_array().copy()\n # need to flip the probabilities for p < 0.5 with this binary case.\n # 1 - old_val is same as oldval*-1 + 1. Do in 2 steps:\n probabilities[np.equal(0, self.y)] *= -1\n probabilities[np.equal(0, self.y)] += 1\n # when multiclass: np.amax(probabilities, 1)\n return np.log(probabilities).sum()", "def fast_loss_and_grad(self, X, y):\n loss = 0.0\n grad = np.zeros(self.W.shape) # initialize the gradient as zero\n \n # ================================================================ #\n # YOUR CODE HERE:\n # Calculate the softmax loss and gradient WITHOUT any for loops.\n # ================================================================ #\n \n num_train = X.shape[0]\n num_classes = self.W.shape[0]\n \n# # vectorized loss calculation #\n class_scores_matrix = np.dot(self.W,X.T) # calculating class scores matrix (C x m): rows are class scores transposes\n class_scores_matrix -= np.max(class_scores_matrix) # considering the possible issue for numerical instability and account for it\n exp_a = np.exp(class_scores_matrix) # calculating the exponents\n \n# y_exp = np.array(exp_a[y, np.arange(0, class_scores_matrix.shape[1])])\n# #print(exp_a[:,:3])\n# #print(y[:3])\n# #print(y_exp[:3])\n \n# tt = np.sum(exp_a,axis=0)\n# tt2 = np.divide(tt,y_exp)\n# print(num_train)\n# tt3 = np.power(tt2,1/num_train)\n# loss = np.log(np.prod(tt3))\n \n \n \n \n (C, D) = self.W.shape\n N = X.shape[0]\n\n scores = np.dot(self.W, X.T)\n scores -= np.max(scores) # shift by log C to avoid numerical instability\n\n y_mat = np.zeros(shape = (C, N))\n y_mat[y, range(N)] = 1\n\n # matrix of all zeros except for a single wx + log C value in each column that corresponds to the\n # quantity we need to subtract from each row of scores\n correct_wx = np.multiply(y_mat, scores)\n\n # create a single row of the correct wx_y + log C values for each data point\n sums_wy = np.sum(correct_wx, axis=0) # sum over each column\n\n exp_scores = np.exp(scores)\n sums_exp = np.sum(exp_scores, axis=0) # sum over each column\n result = np.log(sums_exp)\n\n result -= sums_wy\n\n loss = np.sum(result)\n loss /= num_train\n \n \n # vectorized gradient calculation #\n exp_a_sum = np.sum(exp_a,axis=0)\n\n y_mat_corres = np.zeros(shape = (num_classes, num_train))\n y_mat_corres[y, range(num_train)] = 1\n sum_exp_scores = np.sum(exp_a, axis=0) \n sum_exp_scores = 1.0 / exp_a_sum # division by sum over columns\n exp_a *= sum_exp_scores\n grad = np.dot(exp_a, X)\n grad -= np.dot(y_mat_corres, X)\n grad /= num_train\n \n\n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ #\n\n return loss, grad", "def score(self):\n self.set_idx()\n if self.idx:\n diffs = self.diffs()\n weights = self.weights\n return np.sum(weights * diffs) / np.sum(weights)\n else:\n return 0.0", "def calc_loss(X, Y, model):\n Z = predict(X, model)\n return -(Y * np.log(Z)).sum() / len(Y)", "def score(self, x, y=None):\n _, logp = self.score_samples(x)\n return logp", "def predict_score(self, X):\r\n if self.score:\r\n preds = self.model.predictValue(X)\r\n return preds", "def objective(self,w):\n l = 0\n for i in range(len(self.x)):\n # Each example contributes log(sigma(y_i * x_i . w))\n l -= log(sigmoid(self.y[i] * np.dot(w, self.x[i,:])))\n # regularisation 1/2 * alpha * ||w||^2\n l += 0.5 * self.alpha * np.dot(w,w)\n return l", "def score(self, X, y):\n\n u = ((y - self.predict(X)) ** 2).sum()\n v = ((y - np.mean(y)) ** 2).sum()\n score = 1 - u / v\n\n return score", "def score(self, indices):\n self.model.eval()\n _, prediction = self.model(self.propagation_matrix, self.features).max(dim=1)\n correct = prediction[indices].eq(self.target[indices]).sum().item()\n acc = correct / indices.shape[0]\n return acc", "def total_score(self, logits):\n previous = torch.full((1, self.tag_size), -10000., device=device)\n previous[0][self.tag_map[self.start_tag]] = 0.\n\n for index in range(len(logits)):\n previous = previous.expand(self.tag_size, self.tag_size).t()\n emit = logits[index].view(1, -1).expand(self.tag_size, self.tag_size)\n scores = previous + emit + self.transitions\n previous = log_sum_exp(scores)\n\n # previous = previous + self.transitions[:, self.tag_map[self.stop_tag]]\n # previous += self.transitions[self.tag_map[self.stop_tag]]\n previous += self.transitions[self.tag_map[:, self.stop_tag]]\n total_scores = log_sum_exp(previous.t())[0]\n return total_scores", "def log_likelihood_grad_rew(self, data, reward_model, bias_params):", "def loss_gradient(self, targets, scores):\n m = targets * scores\n numer = 4. * (2. * numpy.arctan(m) - 1.)\n denom = 1. + m**2\n return numer/denom" ]
[ "0.6385058", "0.6369366", "0.6337341", "0.62241894", "0.6202223", "0.6085212", "0.6069365", "0.5963406", "0.59437096", "0.59082556", "0.5902111", "0.58229357", "0.5803482", "0.57961464", "0.5775284", "0.5754475", "0.5751942", "0.5720772", "0.5715828", "0.57079184", "0.56972706", "0.56924284", "0.56900287", "0.5684798", "0.56828547", "0.5678644", "0.5676018", "0.5663591", "0.56587905", "0.56492484" ]
0.6393054
0