repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
lsst-sqre/sqre-codekit
codekit/cli/github_tag_teams.py
check_tags
def check_tags(repos, tags, ignore_existing=False, fail_fast=False): """ check if tags already exist in repos""" debug("looking for {n} tag(s):".format(n=len(tags))) [debug(" {t}".format(t=t)) for t in tags] debug("in {n} repo(s):".format(n=len(repos))) [debug(" {r}".format(r=r.full_name)) for r in repos] # present/missing tags by repo name present_tags = {} absent_tags = {} problems = [] for r in repos: has_tags = find_tags_in_repo(r, tags) if has_tags: if not ignore_existing: yikes = GitTagExistsError( "tag(s) {tag} already exists in repos {r}".format( tag=list(has_tags.keys()), r=r.full_name )) if fail_fast: raise yikes problems.append(yikes) error(yikes) present_tags[r.full_name] = { 'repo': r, 'tags': list(has_tags.values()), } missing_tags = [x for x in tags if x not in has_tags] if missing_tags: absent_tags[r.full_name] = { 'repo': r, 'need_tags': missing_tags, } debug(textwrap.dedent("""\ found: {n_with:>4} repos with tag(s) {n_none:>4} repos with no tag(s) {errors:>4} repos with error(s)\ """).format( n_with=len(present_tags), n_none=len(absent_tags), errors=len(problems), )) return present_tags, absent_tags, problems
python
def check_tags(repos, tags, ignore_existing=False, fail_fast=False): """ check if tags already exist in repos""" debug("looking for {n} tag(s):".format(n=len(tags))) [debug(" {t}".format(t=t)) for t in tags] debug("in {n} repo(s):".format(n=len(repos))) [debug(" {r}".format(r=r.full_name)) for r in repos] # present/missing tags by repo name present_tags = {} absent_tags = {} problems = [] for r in repos: has_tags = find_tags_in_repo(r, tags) if has_tags: if not ignore_existing: yikes = GitTagExistsError( "tag(s) {tag} already exists in repos {r}".format( tag=list(has_tags.keys()), r=r.full_name )) if fail_fast: raise yikes problems.append(yikes) error(yikes) present_tags[r.full_name] = { 'repo': r, 'tags': list(has_tags.values()), } missing_tags = [x for x in tags if x not in has_tags] if missing_tags: absent_tags[r.full_name] = { 'repo': r, 'need_tags': missing_tags, } debug(textwrap.dedent("""\ found: {n_with:>4} repos with tag(s) {n_none:>4} repos with no tag(s) {errors:>4} repos with error(s)\ """).format( n_with=len(present_tags), n_none=len(absent_tags), errors=len(problems), )) return present_tags, absent_tags, problems
[ "def", "check_tags", "(", "repos", ",", "tags", ",", "ignore_existing", "=", "False", ",", "fail_fast", "=", "False", ")", ":", "debug", "(", "\"looking for {n} tag(s):\"", ".", "format", "(", "n", "=", "len", "(", "tags", ")", ")", ")", "[", "debug", "(", "\" {t}\"", ".", "format", "(", "t", "=", "t", ")", ")", "for", "t", "in", "tags", "]", "debug", "(", "\"in {n} repo(s):\"", ".", "format", "(", "n", "=", "len", "(", "repos", ")", ")", ")", "[", "debug", "(", "\" {r}\"", ".", "format", "(", "r", "=", "r", ".", "full_name", ")", ")", "for", "r", "in", "repos", "]", "# present/missing tags by repo name", "present_tags", "=", "{", "}", "absent_tags", "=", "{", "}", "problems", "=", "[", "]", "for", "r", "in", "repos", ":", "has_tags", "=", "find_tags_in_repo", "(", "r", ",", "tags", ")", "if", "has_tags", ":", "if", "not", "ignore_existing", ":", "yikes", "=", "GitTagExistsError", "(", "\"tag(s) {tag} already exists in repos {r}\"", ".", "format", "(", "tag", "=", "list", "(", "has_tags", ".", "keys", "(", ")", ")", ",", "r", "=", "r", ".", "full_name", ")", ")", "if", "fail_fast", ":", "raise", "yikes", "problems", ".", "append", "(", "yikes", ")", "error", "(", "yikes", ")", "present_tags", "[", "r", ".", "full_name", "]", "=", "{", "'repo'", ":", "r", ",", "'tags'", ":", "list", "(", "has_tags", ".", "values", "(", ")", ")", ",", "}", "missing_tags", "=", "[", "x", "for", "x", "in", "tags", "if", "x", "not", "in", "has_tags", "]", "if", "missing_tags", ":", "absent_tags", "[", "r", ".", "full_name", "]", "=", "{", "'repo'", ":", "r", ",", "'need_tags'", ":", "missing_tags", ",", "}", "debug", "(", "textwrap", ".", "dedent", "(", "\"\"\"\\\n found:\n {n_with:>4} repos with tag(s)\n {n_none:>4} repos with no tag(s)\n {errors:>4} repos with error(s)\\\n \"\"\"", ")", ".", "format", "(", "n_with", "=", "len", "(", "present_tags", ")", ",", "n_none", "=", "len", "(", "absent_tags", ")", ",", "errors", "=", "len", "(", "problems", ")", ",", ")", ")", "return", "present_tags", ",", "absent_tags", ",", "problems" ]
check if tags already exist in repos
[ "check", "if", "tags", "already", "exist", "in", "repos" ]
98122404cd9065d4d1d570867fe518042669126c
https://github.com/lsst-sqre/sqre-codekit/blob/98122404cd9065d4d1d570867fe518042669126c/codekit/cli/github_tag_teams.py#L148-L198
train
lsst-sqre/sqre-codekit
codekit/cli/github_tag_teams.py
delete_refs
def delete_refs(repo, refs, dry_run=False): """Note that only the ref to a tag can be explicitly removed. The tag object will leave on until it's gargabe collected.""" assert isinstance(repo, github.Repository.Repository), type(repo) debug("removing {n} refs from {repo}".format( n=len(refs), repo=repo.full_name) ) for r in refs: debug(" deleting {ref}".format(ref=r.ref)) if dry_run: debug(' (noop)') continue r.delete()
python
def delete_refs(repo, refs, dry_run=False): """Note that only the ref to a tag can be explicitly removed. The tag object will leave on until it's gargabe collected.""" assert isinstance(repo, github.Repository.Repository), type(repo) debug("removing {n} refs from {repo}".format( n=len(refs), repo=repo.full_name) ) for r in refs: debug(" deleting {ref}".format(ref=r.ref)) if dry_run: debug(' (noop)') continue r.delete()
[ "def", "delete_refs", "(", "repo", ",", "refs", ",", "dry_run", "=", "False", ")", ":", "assert", "isinstance", "(", "repo", ",", "github", ".", "Repository", ".", "Repository", ")", ",", "type", "(", "repo", ")", "debug", "(", "\"removing {n} refs from {repo}\"", ".", "format", "(", "n", "=", "len", "(", "refs", ")", ",", "repo", "=", "repo", ".", "full_name", ")", ")", "for", "r", "in", "refs", ":", "debug", "(", "\" deleting {ref}\"", ".", "format", "(", "ref", "=", "r", ".", "ref", ")", ")", "if", "dry_run", ":", "debug", "(", "' (noop)'", ")", "continue", "r", ".", "delete", "(", ")" ]
Note that only the ref to a tag can be explicitly removed. The tag object will leave on until it's gargabe collected.
[ "Note", "that", "only", "the", "ref", "to", "a", "tag", "can", "be", "explicitly", "removed", ".", "The", "tag", "object", "will", "leave", "on", "until", "it", "s", "gargabe", "collected", "." ]
98122404cd9065d4d1d570867fe518042669126c
https://github.com/lsst-sqre/sqre-codekit/blob/98122404cd9065d4d1d570867fe518042669126c/codekit/cli/github_tag_teams.py#L397-L414
train
RedHatQE/Sentaku
examples/todo_example/ux.py
TodoUX.get_by
def get_by(self, name): """get a todo list ux by name :rtype: TodoListUX """ item = self.app.get_by(name) return TodoListUX(ux=self, controlled_list=item)
python
def get_by(self, name): """get a todo list ux by name :rtype: TodoListUX """ item = self.app.get_by(name) return TodoListUX(ux=self, controlled_list=item)
[ "def", "get_by", "(", "self", ",", "name", ")", ":", "item", "=", "self", ".", "app", ".", "get_by", "(", "name", ")", "return", "TodoListUX", "(", "ux", "=", "self", ",", "controlled_list", "=", "item", ")" ]
get a todo list ux by name :rtype: TodoListUX
[ "get", "a", "todo", "list", "ux", "by", "name" ]
b336cef5b6ee2db4e8dff28dcdb2be35a1f3d01c
https://github.com/RedHatQE/Sentaku/blob/b336cef5b6ee2db4e8dff28dcdb2be35a1f3d01c/examples/todo_example/ux.py#L9-L15
train
RedHatQE/Sentaku
examples/todo_example/ux.py
TodoUX.create_item
def create_item(self, name): """create a new named todo list :rtype: TodoListUX """ item = self.app.create_item(name) return TodoListUX(ux=self, controlled_list=item)
python
def create_item(self, name): """create a new named todo list :rtype: TodoListUX """ item = self.app.create_item(name) return TodoListUX(ux=self, controlled_list=item)
[ "def", "create_item", "(", "self", ",", "name", ")", ":", "item", "=", "self", ".", "app", ".", "create_item", "(", "name", ")", "return", "TodoListUX", "(", "ux", "=", "self", ",", "controlled_list", "=", "item", ")" ]
create a new named todo list :rtype: TodoListUX
[ "create", "a", "new", "named", "todo", "list" ]
b336cef5b6ee2db4e8dff28dcdb2be35a1f3d01c
https://github.com/RedHatQE/Sentaku/blob/b336cef5b6ee2db4e8dff28dcdb2be35a1f3d01c/examples/todo_example/ux.py#L17-L24
train
RedHatQE/Sentaku
examples/todo_example/ux.py
TodoListUX.get_by
def get_by(self, name): """ find a todo list element by name """ item = self.controlled_list.get_by(name) if item: return TodoElementUX(parent=self, controlled_element=item)
python
def get_by(self, name): """ find a todo list element by name """ item = self.controlled_list.get_by(name) if item: return TodoElementUX(parent=self, controlled_element=item)
[ "def", "get_by", "(", "self", ",", "name", ")", ":", "item", "=", "self", ".", "controlled_list", ".", "get_by", "(", "name", ")", "if", "item", ":", "return", "TodoElementUX", "(", "parent", "=", "self", ",", "controlled_element", "=", "item", ")" ]
find a todo list element by name
[ "find", "a", "todo", "list", "element", "by", "name" ]
b336cef5b6ee2db4e8dff28dcdb2be35a1f3d01c
https://github.com/RedHatQE/Sentaku/blob/b336cef5b6ee2db4e8dff28dcdb2be35a1f3d01c/examples/todo_example/ux.py#L44-L50
train
RedHatQE/Sentaku
examples/todo_example/ux.py
TodoListUX.create_item
def create_item(self, name): """ create a new todo list item """ elem = self.controlled_list.create_item(name) if elem: return TodoElementUX(parent=self, controlled_element=elem)
python
def create_item(self, name): """ create a new todo list item """ elem = self.controlled_list.create_item(name) if elem: return TodoElementUX(parent=self, controlled_element=elem)
[ "def", "create_item", "(", "self", ",", "name", ")", ":", "elem", "=", "self", ".", "controlled_list", ".", "create_item", "(", "name", ")", "if", "elem", ":", "return", "TodoElementUX", "(", "parent", "=", "self", ",", "controlled_element", "=", "elem", ")" ]
create a new todo list item
[ "create", "a", "new", "todo", "list", "item" ]
b336cef5b6ee2db4e8dff28dcdb2be35a1f3d01c
https://github.com/RedHatQE/Sentaku/blob/b336cef5b6ee2db4e8dff28dcdb2be35a1f3d01c/examples/todo_example/ux.py#L52-L58
train
Frzk/Ellis
ellis_actions/nftables.py
NFTables.chose_blacklist
def chose_blacklist(self, ip): """ Given an IP address, figure out the set we have to use. If the address is an IPv4, we have to use *ellis_blacklist4*. If the address is an IPv6, we have to use *ellis_blacklist6*. Raises ipaddress.AddressValueError if the address is neither an IPv4 nor an IPv6. """ blacklist = 'ellis_blacklist{0}' try: address = ipaddress.ip_address(ip) except ipaddress.AddressValueError: raise else: if address.version is 6: # We don't ban private IPv6: if address.is_private: msg = "We don't ban private addresses ({0} given)." \ .format(address) raise ipaddress.AddressValueError(msg) else: # Do we have an embedded IPv4 ? if address.ipv4_mapped is not None: address = address.ipv4_mapped elif address.sixtofour is not None: address = address.sixtofour blacklist = blacklist.format(address.version) return (address, blacklist)
python
def chose_blacklist(self, ip): """ Given an IP address, figure out the set we have to use. If the address is an IPv4, we have to use *ellis_blacklist4*. If the address is an IPv6, we have to use *ellis_blacklist6*. Raises ipaddress.AddressValueError if the address is neither an IPv4 nor an IPv6. """ blacklist = 'ellis_blacklist{0}' try: address = ipaddress.ip_address(ip) except ipaddress.AddressValueError: raise else: if address.version is 6: # We don't ban private IPv6: if address.is_private: msg = "We don't ban private addresses ({0} given)." \ .format(address) raise ipaddress.AddressValueError(msg) else: # Do we have an embedded IPv4 ? if address.ipv4_mapped is not None: address = address.ipv4_mapped elif address.sixtofour is not None: address = address.sixtofour blacklist = blacklist.format(address.version) return (address, blacklist)
[ "def", "chose_blacklist", "(", "self", ",", "ip", ")", ":", "blacklist", "=", "'ellis_blacklist{0}'", "try", ":", "address", "=", "ipaddress", ".", "ip_address", "(", "ip", ")", "except", "ipaddress", ".", "AddressValueError", ":", "raise", "else", ":", "if", "address", ".", "version", "is", "6", ":", "# We don't ban private IPv6:", "if", "address", ".", "is_private", ":", "msg", "=", "\"We don't ban private addresses ({0} given).\"", ".", "format", "(", "address", ")", "raise", "ipaddress", ".", "AddressValueError", "(", "msg", ")", "else", ":", "# Do we have an embedded IPv4 ?", "if", "address", ".", "ipv4_mapped", "is", "not", "None", ":", "address", "=", "address", ".", "ipv4_mapped", "elif", "address", ".", "sixtofour", "is", "not", "None", ":", "address", "=", "address", ".", "sixtofour", "blacklist", "=", "blacklist", ".", "format", "(", "address", ".", "version", ")", "return", "(", "address", ",", "blacklist", ")" ]
Given an IP address, figure out the set we have to use. If the address is an IPv4, we have to use *ellis_blacklist4*. If the address is an IPv6, we have to use *ellis_blacklist6*. Raises ipaddress.AddressValueError if the address is neither an IPv4 nor an IPv6.
[ "Given", "an", "IP", "address", "figure", "out", "the", "set", "we", "have", "to", "use", "." ]
39ce8987cbc503354cf1f45927344186a8b18363
https://github.com/Frzk/Ellis/blob/39ce8987cbc503354cf1f45927344186a8b18363/ellis_actions/nftables.py#L50-L82
train
sirfoga/pyhal
hal/maths/problems.py
EightQueen.under_attack
def under_attack(col, queens): """Checks if queen is under attack :param col: Column number :param queens: list of queens :return: True iff queen is under attack """ left = right = col for _, column in reversed(queens): left, right = left - 1, right + 1 if column in (left, col, right): return True return False
python
def under_attack(col, queens): """Checks if queen is under attack :param col: Column number :param queens: list of queens :return: True iff queen is under attack """ left = right = col for _, column in reversed(queens): left, right = left - 1, right + 1 if column in (left, col, right): return True return False
[ "def", "under_attack", "(", "col", ",", "queens", ")", ":", "left", "=", "right", "=", "col", "for", "_", ",", "column", "in", "reversed", "(", "queens", ")", ":", "left", ",", "right", "=", "left", "-", "1", ",", "right", "+", "1", "if", "column", "in", "(", "left", ",", "col", ",", "right", ")", ":", "return", "True", "return", "False" ]
Checks if queen is under attack :param col: Column number :param queens: list of queens :return: True iff queen is under attack
[ "Checks", "if", "queen", "is", "under", "attack" ]
4394d8a1f7e45bea28a255ec390f4962ee64d33a
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/maths/problems.py#L13-L25
train
rspivak/crammit
src/crammit/__init__.py
AssetManager._get_bundles_by_type
def _get_bundles_by_type(self, type): """Get a dictionary of bundles for requested type. Args: type: 'javascript' or 'css' """ bundles = {} bundle_definitions = self.config.get(type) if bundle_definitions is None: return bundles # bundle name: common for bundle_name, paths in bundle_definitions.items(): bundle_files = [] # path: static/js/vendor/*.js for path in paths: # pattern: /tmp/static/js/vendor/*.js pattern = abspath = os.path.join(self.basedir, path) # assetdir: /tmp/static/js/vendor # assetdir contents: # - /tmp/static/js/vendor/t1.js # - /tmp/static/js/vendor/t2.js # - /tmp/static/js/vendor/index.html assetdir = os.path.dirname(abspath) # expanded_fnames after filtering using the pattern: # - /tmp/static/js/vendor/t1.js # - /tmp/static/js/vendor/t2.js fnames = [os.path.join(assetdir, fname) for fname in os.listdir(assetdir)] expanded_fnames = fnmatch.filter(fnames, pattern) bundle_files.extend(sorted(expanded_fnames)) bundles[bundle_name] = bundle_files return bundles
python
def _get_bundles_by_type(self, type): """Get a dictionary of bundles for requested type. Args: type: 'javascript' or 'css' """ bundles = {} bundle_definitions = self.config.get(type) if bundle_definitions is None: return bundles # bundle name: common for bundle_name, paths in bundle_definitions.items(): bundle_files = [] # path: static/js/vendor/*.js for path in paths: # pattern: /tmp/static/js/vendor/*.js pattern = abspath = os.path.join(self.basedir, path) # assetdir: /tmp/static/js/vendor # assetdir contents: # - /tmp/static/js/vendor/t1.js # - /tmp/static/js/vendor/t2.js # - /tmp/static/js/vendor/index.html assetdir = os.path.dirname(abspath) # expanded_fnames after filtering using the pattern: # - /tmp/static/js/vendor/t1.js # - /tmp/static/js/vendor/t2.js fnames = [os.path.join(assetdir, fname) for fname in os.listdir(assetdir)] expanded_fnames = fnmatch.filter(fnames, pattern) bundle_files.extend(sorted(expanded_fnames)) bundles[bundle_name] = bundle_files return bundles
[ "def", "_get_bundles_by_type", "(", "self", ",", "type", ")", ":", "bundles", "=", "{", "}", "bundle_definitions", "=", "self", ".", "config", ".", "get", "(", "type", ")", "if", "bundle_definitions", "is", "None", ":", "return", "bundles", "# bundle name: common", "for", "bundle_name", ",", "paths", "in", "bundle_definitions", ".", "items", "(", ")", ":", "bundle_files", "=", "[", "]", "# path: static/js/vendor/*.js", "for", "path", "in", "paths", ":", "# pattern: /tmp/static/js/vendor/*.js", "pattern", "=", "abspath", "=", "os", ".", "path", ".", "join", "(", "self", ".", "basedir", ",", "path", ")", "# assetdir: /tmp/static/js/vendor", "# assetdir contents:", "# - /tmp/static/js/vendor/t1.js", "# - /tmp/static/js/vendor/t2.js", "# - /tmp/static/js/vendor/index.html", "assetdir", "=", "os", ".", "path", ".", "dirname", "(", "abspath", ")", "# expanded_fnames after filtering using the pattern:", "# - /tmp/static/js/vendor/t1.js", "# - /tmp/static/js/vendor/t2.js", "fnames", "=", "[", "os", ".", "path", ".", "join", "(", "assetdir", ",", "fname", ")", "for", "fname", "in", "os", ".", "listdir", "(", "assetdir", ")", "]", "expanded_fnames", "=", "fnmatch", ".", "filter", "(", "fnames", ",", "pattern", ")", "bundle_files", ".", "extend", "(", "sorted", "(", "expanded_fnames", ")", ")", "bundles", "[", "bundle_name", "]", "=", "bundle_files", "return", "bundles" ]
Get a dictionary of bundles for requested type. Args: type: 'javascript' or 'css'
[ "Get", "a", "dictionary", "of", "bundles", "for", "requested", "type", "." ]
ebd0f8a9b5267e6e1483f8886329ac262ab272d6
https://github.com/rspivak/crammit/blob/ebd0f8a9b5267e6e1483f8886329ac262ab272d6/src/crammit/__init__.py#L75-L107
train
lowandrew/OLCTools
spadespipeline/mMLST.py
getmlsthelper
def getmlsthelper(referencefilepath, start, organism, update): """Prepares to run the getmlst.py script provided in SRST2""" from accessoryFunctions.accessoryFunctions import GenObject # Initialise a set to for the organism(s) for which new alleles and profiles are desired organismset = set() # Allow for Shigella to use the Escherichia MLST profile/alleles organism = organism if organism != 'Shigella' else 'Escherichia' # As there are multiple profiles for certain organisms, this dictionary has the schemes I use as values organismdictionary = {'Escherichia': 'Escherichia coli#1', 'Shigella': 'Escherichia coli#1', 'Vibrio': 'Vibrio parahaemolyticus', 'Campylobacter': 'Campylobacter jejuni', 'Listeria': 'Listeria monocytogenes', 'Bacillus': 'Bacillus cereus', 'Klebsiella': 'Klebsiella pneumoniae'} # Allow for a genus not in the dictionary being specified try: organismset.add(organismdictionary[organism]) except KeyError: # Add the organism to the set organismset.add(organism) for scheme in organismset: organismpath = os.path.join(referencefilepath, 'MLST', organism) # Find all folders (with the trailing / in the glob search) and remove the trailing / try: lastfolder = sorted(glob('{}/*/'.format(organismpath)))[-1].rstrip('/') except IndexError: lastfolder = [] # Run the method to determine the most recent folder, and how recently it was updated delta, foldersize, d1 = schemedate(lastfolder) # Set the path/name of the folder to contain the new alleles and profile newfolder = '{}/{}'.format(organismpath, d1) if update: if delta.days > 7 or foldersize < 100: printtime('Downloading {} MLST scheme from pubmlst.org'.format(organism), start) # Create the object to store the argument attributes to feed to getmlst getmlstargs = GenObject() getmlstargs.species = scheme getmlstargs.repository_url = 'http://pubmlst.org/data/dbases.xml' getmlstargs.force_scheme_name = False getmlstargs.path = newfolder # Create the path to store the downloaded make_path(getmlstargs.path) getmlst.main(getmlstargs) # Even if there is an issue contacting the database, files are created, however, they are populated # with XML strings indicating that the download failed # Read the first character in the file try: profilestart = open(glob('{}/*.txt'.format(newfolder))[0]).readline() except IndexError: profilestart = [] # If it is a <, then the download failed if not profilestart or profilestart[0] == '<': # Delete the folder, and use the previous definitions instead shutil.rmtree(newfolder) newfolder = lastfolder # If the profile and alleles are up-to-date, set :newfolder to :lastfolder else: newfolder = lastfolder # If update isn't specified, don't update else: newfolder = lastfolder # Ensure that the profile/alleles updated successfully # Calculate the size of the folder by adding the sizes of all the files within the folder together try: newfoldersize = sum(os.path.getsize('{}/{}'.format(newfolder, f)) for f in os.listdir(newfolder) if os.path.isfile('{}/{}'.format(newfolder, f))) except (OSError, TypeError): newfoldersize = 100 # If the profile/allele failed, remove the folder, and use the most recent update if newfoldersize < 100: shutil.rmtree(newfolder) try: newfolder = sorted(glob('{}/*/'.format(organismpath)))[-1].rstrip('/') except IndexError: newfolder = organismpath # Return the name/path of the allele-containing folder return newfolder
python
def getmlsthelper(referencefilepath, start, organism, update): """Prepares to run the getmlst.py script provided in SRST2""" from accessoryFunctions.accessoryFunctions import GenObject # Initialise a set to for the organism(s) for which new alleles and profiles are desired organismset = set() # Allow for Shigella to use the Escherichia MLST profile/alleles organism = organism if organism != 'Shigella' else 'Escherichia' # As there are multiple profiles for certain organisms, this dictionary has the schemes I use as values organismdictionary = {'Escherichia': 'Escherichia coli#1', 'Shigella': 'Escherichia coli#1', 'Vibrio': 'Vibrio parahaemolyticus', 'Campylobacter': 'Campylobacter jejuni', 'Listeria': 'Listeria monocytogenes', 'Bacillus': 'Bacillus cereus', 'Klebsiella': 'Klebsiella pneumoniae'} # Allow for a genus not in the dictionary being specified try: organismset.add(organismdictionary[organism]) except KeyError: # Add the organism to the set organismset.add(organism) for scheme in organismset: organismpath = os.path.join(referencefilepath, 'MLST', organism) # Find all folders (with the trailing / in the glob search) and remove the trailing / try: lastfolder = sorted(glob('{}/*/'.format(organismpath)))[-1].rstrip('/') except IndexError: lastfolder = [] # Run the method to determine the most recent folder, and how recently it was updated delta, foldersize, d1 = schemedate(lastfolder) # Set the path/name of the folder to contain the new alleles and profile newfolder = '{}/{}'.format(organismpath, d1) if update: if delta.days > 7 or foldersize < 100: printtime('Downloading {} MLST scheme from pubmlst.org'.format(organism), start) # Create the object to store the argument attributes to feed to getmlst getmlstargs = GenObject() getmlstargs.species = scheme getmlstargs.repository_url = 'http://pubmlst.org/data/dbases.xml' getmlstargs.force_scheme_name = False getmlstargs.path = newfolder # Create the path to store the downloaded make_path(getmlstargs.path) getmlst.main(getmlstargs) # Even if there is an issue contacting the database, files are created, however, they are populated # with XML strings indicating that the download failed # Read the first character in the file try: profilestart = open(glob('{}/*.txt'.format(newfolder))[0]).readline() except IndexError: profilestart = [] # If it is a <, then the download failed if not profilestart or profilestart[0] == '<': # Delete the folder, and use the previous definitions instead shutil.rmtree(newfolder) newfolder = lastfolder # If the profile and alleles are up-to-date, set :newfolder to :lastfolder else: newfolder = lastfolder # If update isn't specified, don't update else: newfolder = lastfolder # Ensure that the profile/alleles updated successfully # Calculate the size of the folder by adding the sizes of all the files within the folder together try: newfoldersize = sum(os.path.getsize('{}/{}'.format(newfolder, f)) for f in os.listdir(newfolder) if os.path.isfile('{}/{}'.format(newfolder, f))) except (OSError, TypeError): newfoldersize = 100 # If the profile/allele failed, remove the folder, and use the most recent update if newfoldersize < 100: shutil.rmtree(newfolder) try: newfolder = sorted(glob('{}/*/'.format(organismpath)))[-1].rstrip('/') except IndexError: newfolder = organismpath # Return the name/path of the allele-containing folder return newfolder
[ "def", "getmlsthelper", "(", "referencefilepath", ",", "start", ",", "organism", ",", "update", ")", ":", "from", "accessoryFunctions", ".", "accessoryFunctions", "import", "GenObject", "# Initialise a set to for the organism(s) for which new alleles and profiles are desired", "organismset", "=", "set", "(", ")", "# Allow for Shigella to use the Escherichia MLST profile/alleles", "organism", "=", "organism", "if", "organism", "!=", "'Shigella'", "else", "'Escherichia'", "# As there are multiple profiles for certain organisms, this dictionary has the schemes I use as values", "organismdictionary", "=", "{", "'Escherichia'", ":", "'Escherichia coli#1'", ",", "'Shigella'", ":", "'Escherichia coli#1'", ",", "'Vibrio'", ":", "'Vibrio parahaemolyticus'", ",", "'Campylobacter'", ":", "'Campylobacter jejuni'", ",", "'Listeria'", ":", "'Listeria monocytogenes'", ",", "'Bacillus'", ":", "'Bacillus cereus'", ",", "'Klebsiella'", ":", "'Klebsiella pneumoniae'", "}", "# Allow for a genus not in the dictionary being specified", "try", ":", "organismset", ".", "add", "(", "organismdictionary", "[", "organism", "]", ")", "except", "KeyError", ":", "# Add the organism to the set", "organismset", ".", "add", "(", "organism", ")", "for", "scheme", "in", "organismset", ":", "organismpath", "=", "os", ".", "path", ".", "join", "(", "referencefilepath", ",", "'MLST'", ",", "organism", ")", "# Find all folders (with the trailing / in the glob search) and remove the trailing /", "try", ":", "lastfolder", "=", "sorted", "(", "glob", "(", "'{}/*/'", ".", "format", "(", "organismpath", ")", ")", ")", "[", "-", "1", "]", ".", "rstrip", "(", "'/'", ")", "except", "IndexError", ":", "lastfolder", "=", "[", "]", "# Run the method to determine the most recent folder, and how recently it was updated", "delta", ",", "foldersize", ",", "d1", "=", "schemedate", "(", "lastfolder", ")", "# Set the path/name of the folder to contain the new alleles and profile", "newfolder", "=", "'{}/{}'", ".", "format", "(", "organismpath", ",", "d1", ")", "if", "update", ":", "if", "delta", ".", "days", ">", "7", "or", "foldersize", "<", "100", ":", "printtime", "(", "'Downloading {} MLST scheme from pubmlst.org'", ".", "format", "(", "organism", ")", ",", "start", ")", "# Create the object to store the argument attributes to feed to getmlst", "getmlstargs", "=", "GenObject", "(", ")", "getmlstargs", ".", "species", "=", "scheme", "getmlstargs", ".", "repository_url", "=", "'http://pubmlst.org/data/dbases.xml'", "getmlstargs", ".", "force_scheme_name", "=", "False", "getmlstargs", ".", "path", "=", "newfolder", "# Create the path to store the downloaded", "make_path", "(", "getmlstargs", ".", "path", ")", "getmlst", ".", "main", "(", "getmlstargs", ")", "# Even if there is an issue contacting the database, files are created, however, they are populated", "# with XML strings indicating that the download failed", "# Read the first character in the file", "try", ":", "profilestart", "=", "open", "(", "glob", "(", "'{}/*.txt'", ".", "format", "(", "newfolder", ")", ")", "[", "0", "]", ")", ".", "readline", "(", ")", "except", "IndexError", ":", "profilestart", "=", "[", "]", "# If it is a <, then the download failed", "if", "not", "profilestart", "or", "profilestart", "[", "0", "]", "==", "'<'", ":", "# Delete the folder, and use the previous definitions instead", "shutil", ".", "rmtree", "(", "newfolder", ")", "newfolder", "=", "lastfolder", "# If the profile and alleles are up-to-date, set :newfolder to :lastfolder", "else", ":", "newfolder", "=", "lastfolder", "# If update isn't specified, don't update", "else", ":", "newfolder", "=", "lastfolder", "# Ensure that the profile/alleles updated successfully", "# Calculate the size of the folder by adding the sizes of all the files within the folder together", "try", ":", "newfoldersize", "=", "sum", "(", "os", ".", "path", ".", "getsize", "(", "'{}/{}'", ".", "format", "(", "newfolder", ",", "f", ")", ")", "for", "f", "in", "os", ".", "listdir", "(", "newfolder", ")", "if", "os", ".", "path", ".", "isfile", "(", "'{}/{}'", ".", "format", "(", "newfolder", ",", "f", ")", ")", ")", "except", "(", "OSError", ",", "TypeError", ")", ":", "newfoldersize", "=", "100", "# If the profile/allele failed, remove the folder, and use the most recent update", "if", "newfoldersize", "<", "100", ":", "shutil", ".", "rmtree", "(", "newfolder", ")", "try", ":", "newfolder", "=", "sorted", "(", "glob", "(", "'{}/*/'", ".", "format", "(", "organismpath", ")", ")", ")", "[", "-", "1", "]", ".", "rstrip", "(", "'/'", ")", "except", "IndexError", ":", "newfolder", "=", "organismpath", "# Return the name/path of the allele-containing folder", "return", "newfolder" ]
Prepares to run the getmlst.py script provided in SRST2
[ "Prepares", "to", "run", "the", "getmlst", ".", "py", "script", "provided", "in", "SRST2" ]
88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a
https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/spadespipeline/mMLST.py#L1229-L1306
train
lowandrew/OLCTools
spadespipeline/mMLST.py
MLST.blastnprep
def blastnprep(self): """Setup blastn analyses""" # Populate threads for each gene, genome combination for sample in self.metadata: if sample.general.bestassemblyfile != 'NA': # # sample[self.analysistype].alleleresults = GenObject() sample[self.analysistype].closealleles = dict() sample[self.analysistype].mismatches = dict() sample[self.analysistype].alignmentlength = dict() sample[self.analysistype].subjectlength = dict() sample[self.analysistype].queryid = dict() sample[self.analysistype].start = dict() sample[self.analysistype].end = dict() sample[self.analysistype].queryseq = dict() if type(sample[self.analysistype].allelenames) == list: for allele in sample[self.analysistype].combinedalleles: # Add each fasta/allele file combination to the threads self.runblast(sample.general.bestassemblyfile, allele, sample)
python
def blastnprep(self): """Setup blastn analyses""" # Populate threads for each gene, genome combination for sample in self.metadata: if sample.general.bestassemblyfile != 'NA': # # sample[self.analysistype].alleleresults = GenObject() sample[self.analysistype].closealleles = dict() sample[self.analysistype].mismatches = dict() sample[self.analysistype].alignmentlength = dict() sample[self.analysistype].subjectlength = dict() sample[self.analysistype].queryid = dict() sample[self.analysistype].start = dict() sample[self.analysistype].end = dict() sample[self.analysistype].queryseq = dict() if type(sample[self.analysistype].allelenames) == list: for allele in sample[self.analysistype].combinedalleles: # Add each fasta/allele file combination to the threads self.runblast(sample.general.bestassemblyfile, allele, sample)
[ "def", "blastnprep", "(", "self", ")", ":", "# Populate threads for each gene, genome combination", "for", "sample", "in", "self", ".", "metadata", ":", "if", "sample", ".", "general", ".", "bestassemblyfile", "!=", "'NA'", ":", "#", "# sample[self.analysistype].alleleresults = GenObject()", "sample", "[", "self", ".", "analysistype", "]", ".", "closealleles", "=", "dict", "(", ")", "sample", "[", "self", ".", "analysistype", "]", ".", "mismatches", "=", "dict", "(", ")", "sample", "[", "self", ".", "analysistype", "]", ".", "alignmentlength", "=", "dict", "(", ")", "sample", "[", "self", ".", "analysistype", "]", ".", "subjectlength", "=", "dict", "(", ")", "sample", "[", "self", ".", "analysistype", "]", ".", "queryid", "=", "dict", "(", ")", "sample", "[", "self", ".", "analysistype", "]", ".", "start", "=", "dict", "(", ")", "sample", "[", "self", ".", "analysistype", "]", ".", "end", "=", "dict", "(", ")", "sample", "[", "self", ".", "analysistype", "]", ".", "queryseq", "=", "dict", "(", ")", "if", "type", "(", "sample", "[", "self", ".", "analysistype", "]", ".", "allelenames", ")", "==", "list", ":", "for", "allele", "in", "sample", "[", "self", ".", "analysistype", "]", ".", "combinedalleles", ":", "# Add each fasta/allele file combination to the threads", "self", ".", "runblast", "(", "sample", ".", "general", ".", "bestassemblyfile", ",", "allele", ",", "sample", ")" ]
Setup blastn analyses
[ "Setup", "blastn", "analyses" ]
88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a
https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/spadespipeline/mMLST.py#L170-L188
train
lowandrew/OLCTools
spadespipeline/mMLST.py
PipelineInit.strainer
def strainer(self): """ Determine whether it is required to run the MLST analyses """ # Initialise a variable to store whether the analyses need to be performed analyse = list() for sample in self.runmetadata.samples: if sample.general.bestassemblyfile != 'NA': try: # Try to open the final report from the analyses. If it exists, then the analyses don't need to be # performed again. if os.path.isfile('{}{}_{}.csv'.format(sample[self.analysistype].reportdir, sample.name, self.analysistype)): if self.analysistype == 'rmlst': # Run the allele updater method updatecall, allelefolder = getrmlsthelper(self.referencefilepath, self.updatedatabases, self.start) else: # referencefilepath, start, organism, update allelefolder = getmlsthelper(self.referencefilepath, self.start, sample.general.referencegenus, self.updatedatabases) # Alleles have a .tfa extension self.alleles = glob('{}/*.tfa'.format(allelefolder)) sample[self.analysistype].alleles = self.alleles sample[self.analysistype].allelenames = [os.path.split(x)[1].split('.')[0] for x in self.alleles] # The analyses have already been successfully completed analyse.append(False) # Otherwise run the analyses else: self.populator(sample) analyse.append(True) # If the attribute doesn't exist, then the analyses haven't been performed yet. except (KeyError, AttributeError): self.populator(sample) analyse.append(True) else: self.populator(sample) analyse.append(False) # Only run the analyses if they have not completed successfully before # if any(analyse): # Run the MLST analyses MLST(self)
python
def strainer(self): """ Determine whether it is required to run the MLST analyses """ # Initialise a variable to store whether the analyses need to be performed analyse = list() for sample in self.runmetadata.samples: if sample.general.bestassemblyfile != 'NA': try: # Try to open the final report from the analyses. If it exists, then the analyses don't need to be # performed again. if os.path.isfile('{}{}_{}.csv'.format(sample[self.analysistype].reportdir, sample.name, self.analysistype)): if self.analysistype == 'rmlst': # Run the allele updater method updatecall, allelefolder = getrmlsthelper(self.referencefilepath, self.updatedatabases, self.start) else: # referencefilepath, start, organism, update allelefolder = getmlsthelper(self.referencefilepath, self.start, sample.general.referencegenus, self.updatedatabases) # Alleles have a .tfa extension self.alleles = glob('{}/*.tfa'.format(allelefolder)) sample[self.analysistype].alleles = self.alleles sample[self.analysistype].allelenames = [os.path.split(x)[1].split('.')[0] for x in self.alleles] # The analyses have already been successfully completed analyse.append(False) # Otherwise run the analyses else: self.populator(sample) analyse.append(True) # If the attribute doesn't exist, then the analyses haven't been performed yet. except (KeyError, AttributeError): self.populator(sample) analyse.append(True) else: self.populator(sample) analyse.append(False) # Only run the analyses if they have not completed successfully before # if any(analyse): # Run the MLST analyses MLST(self)
[ "def", "strainer", "(", "self", ")", ":", "# Initialise a variable to store whether the analyses need to be performed", "analyse", "=", "list", "(", ")", "for", "sample", "in", "self", ".", "runmetadata", ".", "samples", ":", "if", "sample", ".", "general", ".", "bestassemblyfile", "!=", "'NA'", ":", "try", ":", "# Try to open the final report from the analyses. If it exists, then the analyses don't need to be", "# performed again.", "if", "os", ".", "path", ".", "isfile", "(", "'{}{}_{}.csv'", ".", "format", "(", "sample", "[", "self", ".", "analysistype", "]", ".", "reportdir", ",", "sample", ".", "name", ",", "self", ".", "analysistype", ")", ")", ":", "if", "self", ".", "analysistype", "==", "'rmlst'", ":", "# Run the allele updater method", "updatecall", ",", "allelefolder", "=", "getrmlsthelper", "(", "self", ".", "referencefilepath", ",", "self", ".", "updatedatabases", ",", "self", ".", "start", ")", "else", ":", "# referencefilepath, start, organism, update", "allelefolder", "=", "getmlsthelper", "(", "self", ".", "referencefilepath", ",", "self", ".", "start", ",", "sample", ".", "general", ".", "referencegenus", ",", "self", ".", "updatedatabases", ")", "# Alleles have a .tfa extension", "self", ".", "alleles", "=", "glob", "(", "'{}/*.tfa'", ".", "format", "(", "allelefolder", ")", ")", "sample", "[", "self", ".", "analysistype", "]", ".", "alleles", "=", "self", ".", "alleles", "sample", "[", "self", ".", "analysistype", "]", ".", "allelenames", "=", "[", "os", ".", "path", ".", "split", "(", "x", ")", "[", "1", "]", ".", "split", "(", "'.'", ")", "[", "0", "]", "for", "x", "in", "self", ".", "alleles", "]", "# The analyses have already been successfully completed", "analyse", ".", "append", "(", "False", ")", "# Otherwise run the analyses", "else", ":", "self", ".", "populator", "(", "sample", ")", "analyse", ".", "append", "(", "True", ")", "# If the attribute doesn't exist, then the analyses haven't been performed yet.", "except", "(", "KeyError", ",", "AttributeError", ")", ":", "self", ".", "populator", "(", "sample", ")", "analyse", ".", "append", "(", "True", ")", "else", ":", "self", ".", "populator", "(", "sample", ")", "analyse", ".", "append", "(", "False", ")", "# Only run the analyses if they have not completed successfully before", "# if any(analyse):", "# Run the MLST analyses", "MLST", "(", "self", ")" ]
Determine whether it is required to run the MLST analyses
[ "Determine", "whether", "it", "is", "required", "to", "run", "the", "MLST", "analyses" ]
88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a
https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/spadespipeline/mMLST.py#L1542-L1584
train
sirfoga/pyhal
hal/times/utils.py
Timing.get_seconds
def get_seconds(self): """Gets seconds from raw time :return: Seconds in time """ parsed = self.parse_hh_mm_ss() # get times total_seconds = parsed.second total_seconds += parsed.minute * 60.0 total_seconds += parsed.hour * 60.0 * 60.0 return total_seconds
python
def get_seconds(self): """Gets seconds from raw time :return: Seconds in time """ parsed = self.parse_hh_mm_ss() # get times total_seconds = parsed.second total_seconds += parsed.minute * 60.0 total_seconds += parsed.hour * 60.0 * 60.0 return total_seconds
[ "def", "get_seconds", "(", "self", ")", ":", "parsed", "=", "self", ".", "parse_hh_mm_ss", "(", ")", "# get times", "total_seconds", "=", "parsed", ".", "second", "total_seconds", "+=", "parsed", ".", "minute", "*", "60.0", "total_seconds", "+=", "parsed", ".", "hour", "*", "60.0", "*", "60.0", "return", "total_seconds" ]
Gets seconds from raw time :return: Seconds in time
[ "Gets", "seconds", "from", "raw", "time" ]
4394d8a1f7e45bea28a255ec390f4962ee64d33a
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/times/utils.py#L39-L48
train
sirfoga/pyhal
hal/internet/email/utils.py
get_email_content
def get_email_content(file_path): """Email content in file :param file_path: Path to file with email text :return: Email text (html formatted) """ with open(file_path, "r") as in_file: text = str(in_file.read()) return text.replace("\n\n", "<br>")
python
def get_email_content(file_path): """Email content in file :param file_path: Path to file with email text :return: Email text (html formatted) """ with open(file_path, "r") as in_file: text = str(in_file.read()) return text.replace("\n\n", "<br>")
[ "def", "get_email_content", "(", "file_path", ")", ":", "with", "open", "(", "file_path", ",", "\"r\"", ")", "as", "in_file", ":", "text", "=", "str", "(", "in_file", ".", "read", "(", ")", ")", "return", "text", ".", "replace", "(", "\"\\n\\n\"", ",", "\"<br>\"", ")" ]
Email content in file :param file_path: Path to file with email text :return: Email text (html formatted)
[ "Email", "content", "in", "file" ]
4394d8a1f7e45bea28a255ec390f4962ee64d33a
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/internet/email/utils.py#L6-L14
train
portfors-lab/sparkle
sparkle/run/abstract_acquisition.py
AbstractAcquisitionRunner.set
def set(self, **kwargs): """Sets an internal setting for acquistion, using keywords. Available parameters to set: :param acqtime: duration of recording (input) window (seconds) :type acqtime: float :param aifs: sample rate of the recording (input) operation (Hz) :type aifs: int :param aochan: AO (generation) channel name :type aochan: str :param aichan: AI (recording) channel name :type aichan: str :param nreps: number of repetitions for each unique stimulus :type nreps: int :param binsz: time bin duration for spike sorting (seconds) :type binsz: float :param caldb: See :meth:`StimulusModel<sparkle.stim.stimulus_model.StimulusModel.setReferenceVoltage>` :type caldb: float :param calv: See :meth:`StimulusModel<sparkle.stim.stimulus_model.StimulusModel.setReferenceVoltage>` :type calv: float :param datafile: a reference to an open file to save data to :type datafile: :class:`AcquisitionData<sparkle.data.dataobjects.AcquisitionData>` :param average: whether to average repetitions of a trace, saving only the averaged signal :type average: bool :param reject: whether to reject values higher than a defined threshold. Only used while average is true :type reject: bool :param rejectrate: the value to base artifact rejection on :type rejectrate: float """ self.player_lock.acquire() if 'acqtime' in kwargs: self.player.set_aidur(kwargs['acqtime']) if 'aifs' in kwargs: self.player.set_aifs(kwargs['aifs']) self.aifs = kwargs['aifs'] if 'aifs' in kwargs or 'acqtime' in kwargs: t = kwargs.get('acqtime', self.player.get_aidur()) npoints = t*float(kwargs.get('aifs', self.player.get_aifs())) self.aitimes = np.linspace(0, t, npoints) if 'trigger' in kwargs: self.player.set_trigger(kwargs['trigger']) self.player_lock.release() if 'aochan' in kwargs: self.aochan = kwargs['aochan'] if 'aichan' in kwargs: self.aichan = kwargs['aichan'] if 'binsz' in kwargs: self.binsz = kwargs['binsz'] if 'save' in kwargs: self.save_data = kwargs['save'] if 'caldb' in kwargs: self.caldb = kwargs['caldb'] if 'calv' in kwargs: self.calv = kwargs['calv'] if 'calf' in kwargs: self.calf = kwargs['calf'] if 'caldb' in kwargs or 'calv' in kwargs: self.update_reference_voltage() if 'datafile' in kwargs: self.datafile = kwargs['datafile'] if 'reprate' in kwargs: self.reprate = kwargs['reprate'] if 'save' in kwargs: self.save_data = kwargs['save'] if 'average' in kwargs: self.average = kwargs['average'] if 'reject' in kwargs: self.reject = kwargs['reject'] if 'rejectrate' in kwargs: self.rejectrate = kwargs['rejectrate']
python
def set(self, **kwargs): """Sets an internal setting for acquistion, using keywords. Available parameters to set: :param acqtime: duration of recording (input) window (seconds) :type acqtime: float :param aifs: sample rate of the recording (input) operation (Hz) :type aifs: int :param aochan: AO (generation) channel name :type aochan: str :param aichan: AI (recording) channel name :type aichan: str :param nreps: number of repetitions for each unique stimulus :type nreps: int :param binsz: time bin duration for spike sorting (seconds) :type binsz: float :param caldb: See :meth:`StimulusModel<sparkle.stim.stimulus_model.StimulusModel.setReferenceVoltage>` :type caldb: float :param calv: See :meth:`StimulusModel<sparkle.stim.stimulus_model.StimulusModel.setReferenceVoltage>` :type calv: float :param datafile: a reference to an open file to save data to :type datafile: :class:`AcquisitionData<sparkle.data.dataobjects.AcquisitionData>` :param average: whether to average repetitions of a trace, saving only the averaged signal :type average: bool :param reject: whether to reject values higher than a defined threshold. Only used while average is true :type reject: bool :param rejectrate: the value to base artifact rejection on :type rejectrate: float """ self.player_lock.acquire() if 'acqtime' in kwargs: self.player.set_aidur(kwargs['acqtime']) if 'aifs' in kwargs: self.player.set_aifs(kwargs['aifs']) self.aifs = kwargs['aifs'] if 'aifs' in kwargs or 'acqtime' in kwargs: t = kwargs.get('acqtime', self.player.get_aidur()) npoints = t*float(kwargs.get('aifs', self.player.get_aifs())) self.aitimes = np.linspace(0, t, npoints) if 'trigger' in kwargs: self.player.set_trigger(kwargs['trigger']) self.player_lock.release() if 'aochan' in kwargs: self.aochan = kwargs['aochan'] if 'aichan' in kwargs: self.aichan = kwargs['aichan'] if 'binsz' in kwargs: self.binsz = kwargs['binsz'] if 'save' in kwargs: self.save_data = kwargs['save'] if 'caldb' in kwargs: self.caldb = kwargs['caldb'] if 'calv' in kwargs: self.calv = kwargs['calv'] if 'calf' in kwargs: self.calf = kwargs['calf'] if 'caldb' in kwargs or 'calv' in kwargs: self.update_reference_voltage() if 'datafile' in kwargs: self.datafile = kwargs['datafile'] if 'reprate' in kwargs: self.reprate = kwargs['reprate'] if 'save' in kwargs: self.save_data = kwargs['save'] if 'average' in kwargs: self.average = kwargs['average'] if 'reject' in kwargs: self.reject = kwargs['reject'] if 'rejectrate' in kwargs: self.rejectrate = kwargs['rejectrate']
[ "def", "set", "(", "self", ",", "*", "*", "kwargs", ")", ":", "self", ".", "player_lock", ".", "acquire", "(", ")", "if", "'acqtime'", "in", "kwargs", ":", "self", ".", "player", ".", "set_aidur", "(", "kwargs", "[", "'acqtime'", "]", ")", "if", "'aifs'", "in", "kwargs", ":", "self", ".", "player", ".", "set_aifs", "(", "kwargs", "[", "'aifs'", "]", ")", "self", ".", "aifs", "=", "kwargs", "[", "'aifs'", "]", "if", "'aifs'", "in", "kwargs", "or", "'acqtime'", "in", "kwargs", ":", "t", "=", "kwargs", ".", "get", "(", "'acqtime'", ",", "self", ".", "player", ".", "get_aidur", "(", ")", ")", "npoints", "=", "t", "*", "float", "(", "kwargs", ".", "get", "(", "'aifs'", ",", "self", ".", "player", ".", "get_aifs", "(", ")", ")", ")", "self", ".", "aitimes", "=", "np", ".", "linspace", "(", "0", ",", "t", ",", "npoints", ")", "if", "'trigger'", "in", "kwargs", ":", "self", ".", "player", ".", "set_trigger", "(", "kwargs", "[", "'trigger'", "]", ")", "self", ".", "player_lock", ".", "release", "(", ")", "if", "'aochan'", "in", "kwargs", ":", "self", ".", "aochan", "=", "kwargs", "[", "'aochan'", "]", "if", "'aichan'", "in", "kwargs", ":", "self", ".", "aichan", "=", "kwargs", "[", "'aichan'", "]", "if", "'binsz'", "in", "kwargs", ":", "self", ".", "binsz", "=", "kwargs", "[", "'binsz'", "]", "if", "'save'", "in", "kwargs", ":", "self", ".", "save_data", "=", "kwargs", "[", "'save'", "]", "if", "'caldb'", "in", "kwargs", ":", "self", ".", "caldb", "=", "kwargs", "[", "'caldb'", "]", "if", "'calv'", "in", "kwargs", ":", "self", ".", "calv", "=", "kwargs", "[", "'calv'", "]", "if", "'calf'", "in", "kwargs", ":", "self", ".", "calf", "=", "kwargs", "[", "'calf'", "]", "if", "'caldb'", "in", "kwargs", "or", "'calv'", "in", "kwargs", ":", "self", ".", "update_reference_voltage", "(", ")", "if", "'datafile'", "in", "kwargs", ":", "self", ".", "datafile", "=", "kwargs", "[", "'datafile'", "]", "if", "'reprate'", "in", "kwargs", ":", "self", ".", "reprate", "=", "kwargs", "[", "'reprate'", "]", "if", "'save'", "in", "kwargs", ":", "self", ".", "save_data", "=", "kwargs", "[", "'save'", "]", "if", "'average'", "in", "kwargs", ":", "self", ".", "average", "=", "kwargs", "[", "'average'", "]", "if", "'reject'", "in", "kwargs", ":", "self", ".", "reject", "=", "kwargs", "[", "'reject'", "]", "if", "'rejectrate'", "in", "kwargs", ":", "self", ".", "rejectrate", "=", "kwargs", "[", "'rejectrate'", "]" ]
Sets an internal setting for acquistion, using keywords. Available parameters to set: :param acqtime: duration of recording (input) window (seconds) :type acqtime: float :param aifs: sample rate of the recording (input) operation (Hz) :type aifs: int :param aochan: AO (generation) channel name :type aochan: str :param aichan: AI (recording) channel name :type aichan: str :param nreps: number of repetitions for each unique stimulus :type nreps: int :param binsz: time bin duration for spike sorting (seconds) :type binsz: float :param caldb: See :meth:`StimulusModel<sparkle.stim.stimulus_model.StimulusModel.setReferenceVoltage>` :type caldb: float :param calv: See :meth:`StimulusModel<sparkle.stim.stimulus_model.StimulusModel.setReferenceVoltage>` :type calv: float :param datafile: a reference to an open file to save data to :type datafile: :class:`AcquisitionData<sparkle.data.dataobjects.AcquisitionData>` :param average: whether to average repetitions of a trace, saving only the averaged signal :type average: bool :param reject: whether to reject values higher than a defined threshold. Only used while average is true :type reject: bool :param rejectrate: the value to base artifact rejection on :type rejectrate: float
[ "Sets", "an", "internal", "setting", "for", "acquistion", "using", "keywords", "." ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/run/abstract_acquisition.py#L64-L135
train
portfors-lab/sparkle
sparkle/run/abstract_acquisition.py
AbstractAcquisitionRunner.interval_wait
def interval_wait(self): """Pauses the correct amount of time according to this acquisition object's interval setting, and the last time this function was called""" # calculate time since last interation and wait to acheive desired interval now = time.time() elapsed = (now - self.last_tick)*1000 # print("interval %d, time from start %d \n" % (elapsed, (now - self.start_time)*1000)) if elapsed < self.interval: # print('sleep ', (self.interval-elapsed)) # self.signals.warning.emit('') # clear previous warning time.sleep((self.interval-elapsed)/1000) now = time.time() elif elapsed > self.interval: pass # self.signals.warning.emit("WARNING: PROVIDED INTERVAL EXCEEDED, ELAPSED TIME %d" % (elapsed)) self.last_tick = now
python
def interval_wait(self): """Pauses the correct amount of time according to this acquisition object's interval setting, and the last time this function was called""" # calculate time since last interation and wait to acheive desired interval now = time.time() elapsed = (now - self.last_tick)*1000 # print("interval %d, time from start %d \n" % (elapsed, (now - self.start_time)*1000)) if elapsed < self.interval: # print('sleep ', (self.interval-elapsed)) # self.signals.warning.emit('') # clear previous warning time.sleep((self.interval-elapsed)/1000) now = time.time() elif elapsed > self.interval: pass # self.signals.warning.emit("WARNING: PROVIDED INTERVAL EXCEEDED, ELAPSED TIME %d" % (elapsed)) self.last_tick = now
[ "def", "interval_wait", "(", "self", ")", ":", "# calculate time since last interation and wait to acheive desired interval", "now", "=", "time", ".", "time", "(", ")", "elapsed", "=", "(", "now", "-", "self", ".", "last_tick", ")", "*", "1000", "# print(\"interval %d, time from start %d \\n\" % (elapsed, (now - self.start_time)*1000))", "if", "elapsed", "<", "self", ".", "interval", ":", "# print('sleep ', (self.interval-elapsed))", "# self.signals.warning.emit('') # clear previous warning", "time", ".", "sleep", "(", "(", "self", ".", "interval", "-", "elapsed", ")", "/", "1000", ")", "now", "=", "time", ".", "time", "(", ")", "elif", "elapsed", ">", "self", ".", "interval", ":", "pass", "# self.signals.warning.emit(\"WARNING: PROVIDED INTERVAL EXCEEDED, ELAPSED TIME %d\" % (elapsed))", "self", ".", "last_tick", "=", "now" ]
Pauses the correct amount of time according to this acquisition object's interval setting, and the last time this function was called
[ "Pauses", "the", "correct", "amount", "of", "time", "according", "to", "this", "acquisition", "object", "s", "interval", "setting", "and", "the", "last", "time", "this", "function", "was", "called" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/run/abstract_acquisition.py#L150-L166
train
portfors-lab/sparkle
sparkle/run/abstract_acquisition.py
AbstractAcquisitionRunner.putnotify
def putnotify(self, name, *args): """Puts data into queue and alerts listeners""" # self.signals[name][0].send(*args) self.queues[name][0].put(*args) self.queues[name][1].set()
python
def putnotify(self, name, *args): """Puts data into queue and alerts listeners""" # self.signals[name][0].send(*args) self.queues[name][0].put(*args) self.queues[name][1].set()
[ "def", "putnotify", "(", "self", ",", "name", ",", "*", "args", ")", ":", "# self.signals[name][0].send(*args)", "self", ".", "queues", "[", "name", "]", "[", "0", "]", ".", "put", "(", "*", "args", ")", "self", ".", "queues", "[", "name", "]", "[", "1", "]", ".", "set", "(", ")" ]
Puts data into queue and alerts listeners
[ "Puts", "data", "into", "queue", "and", "alerts", "listeners" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/run/abstract_acquisition.py#L168-L172
train
lowandrew/OLCTools
metagenomefilter/filtermetagenome.py
FilterGenome.loadassignment
def loadassignment(self): """Load the taxonomic assignment for each read""" printtime('Finding taxonomic assignments', self.start) # Create and start threads for i in range(self.cpus): # Send the threads to the appropriate destination function threads = Thread(target=self.assignmentload, args=()) # Set the daemon to true - something to do with thread management threads.setDaemon(True) # Start the threading threads.start() for sample in self.runmetadata.samples: self.loadqueue.put(sample) self.loadqueue.join() # Filter the .fastq files self.readlist()
python
def loadassignment(self): """Load the taxonomic assignment for each read""" printtime('Finding taxonomic assignments', self.start) # Create and start threads for i in range(self.cpus): # Send the threads to the appropriate destination function threads = Thread(target=self.assignmentload, args=()) # Set the daemon to true - something to do with thread management threads.setDaemon(True) # Start the threading threads.start() for sample in self.runmetadata.samples: self.loadqueue.put(sample) self.loadqueue.join() # Filter the .fastq files self.readlist()
[ "def", "loadassignment", "(", "self", ")", ":", "printtime", "(", "'Finding taxonomic assignments'", ",", "self", ".", "start", ")", "# Create and start threads", "for", "i", "in", "range", "(", "self", ".", "cpus", ")", ":", "# Send the threads to the appropriate destination function", "threads", "=", "Thread", "(", "target", "=", "self", ".", "assignmentload", ",", "args", "=", "(", ")", ")", "# Set the daemon to true - something to do with thread management", "threads", ".", "setDaemon", "(", "True", ")", "# Start the threading", "threads", ".", "start", "(", ")", "for", "sample", "in", "self", ".", "runmetadata", ".", "samples", ":", "self", ".", "loadqueue", ".", "put", "(", "sample", ")", "self", ".", "loadqueue", ".", "join", "(", ")", "# Filter the .fastq files", "self", ".", "readlist", "(", ")" ]
Load the taxonomic assignment for each read
[ "Load", "the", "taxonomic", "assignment", "for", "each", "read" ]
88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a
https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/metagenomefilter/filtermetagenome.py#L51-L66
train
lowandrew/OLCTools
metagenomefilter/filtermetagenome.py
FilterGenome.readlist
def readlist(self): """Sort the reads, and create lists to be used in creating sorted .fastq files""" printtime('Sorting reads', self.start) # Create and start threads for i in range(self.cpus): # Send the threads to the appropriate destination function threads = Thread(target=self.listread, args=()) # Set the daemon to true - something to do with thread management threads.setDaemon(True) # Start the threading threads.start() for sample in self.runmetadata.samples: self.listqueue.put(sample) self.listqueue.join() # Create self.fastqfilter()
python
def readlist(self): """Sort the reads, and create lists to be used in creating sorted .fastq files""" printtime('Sorting reads', self.start) # Create and start threads for i in range(self.cpus): # Send the threads to the appropriate destination function threads = Thread(target=self.listread, args=()) # Set the daemon to true - something to do with thread management threads.setDaemon(True) # Start the threading threads.start() for sample in self.runmetadata.samples: self.listqueue.put(sample) self.listqueue.join() # Create self.fastqfilter()
[ "def", "readlist", "(", "self", ")", ":", "printtime", "(", "'Sorting reads'", ",", "self", ".", "start", ")", "# Create and start threads", "for", "i", "in", "range", "(", "self", ".", "cpus", ")", ":", "# Send the threads to the appropriate destination function", "threads", "=", "Thread", "(", "target", "=", "self", ".", "listread", ",", "args", "=", "(", ")", ")", "# Set the daemon to true - something to do with thread management", "threads", ".", "setDaemon", "(", "True", ")", "# Start the threading", "threads", ".", "start", "(", ")", "for", "sample", "in", "self", ".", "runmetadata", ".", "samples", ":", "self", ".", "listqueue", ".", "put", "(", "sample", ")", "self", ".", "listqueue", ".", "join", "(", ")", "# Create", "self", ".", "fastqfilter", "(", ")" ]
Sort the reads, and create lists to be used in creating sorted .fastq files
[ "Sort", "the", "reads", "and", "create", "lists", "to", "be", "used", "in", "creating", "sorted", ".", "fastq", "files" ]
88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a
https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/metagenomefilter/filtermetagenome.py#L86-L101
train
lowandrew/OLCTools
metagenomefilter/filtermetagenome.py
FilterGenome.fastqfilter
def fastqfilter(self): """Filter the reads into separate files based on taxonomic assignment""" printtime('Creating filtered .fastqfiles', self.start) # Create and start threads for i in range(self.cpus): # Send the threads to the appropriate destination function threads = Thread(target=self.filterfastq, args=()) # Set the daemon to true - something to do with thread management threads.setDaemon(True) # Start the threading threads.start() for sample in self.runmetadata.samples: self.filterqueue.put(sample) self.filterqueue.join() # Print the metadata to file metadataprinter.MetadataPrinter(self)
python
def fastqfilter(self): """Filter the reads into separate files based on taxonomic assignment""" printtime('Creating filtered .fastqfiles', self.start) # Create and start threads for i in range(self.cpus): # Send the threads to the appropriate destination function threads = Thread(target=self.filterfastq, args=()) # Set the daemon to true - something to do with thread management threads.setDaemon(True) # Start the threading threads.start() for sample in self.runmetadata.samples: self.filterqueue.put(sample) self.filterqueue.join() # Print the metadata to file metadataprinter.MetadataPrinter(self)
[ "def", "fastqfilter", "(", "self", ")", ":", "printtime", "(", "'Creating filtered .fastqfiles'", ",", "self", ".", "start", ")", "# Create and start threads", "for", "i", "in", "range", "(", "self", ".", "cpus", ")", ":", "# Send the threads to the appropriate destination function", "threads", "=", "Thread", "(", "target", "=", "self", ".", "filterfastq", ",", "args", "=", "(", ")", ")", "# Set the daemon to true - something to do with thread management", "threads", ".", "setDaemon", "(", "True", ")", "# Start the threading", "threads", ".", "start", "(", ")", "for", "sample", "in", "self", ".", "runmetadata", ".", "samples", ":", "self", ".", "filterqueue", ".", "put", "(", "sample", ")", "self", ".", "filterqueue", ".", "join", "(", ")", "# Print the metadata to file", "metadataprinter", ".", "MetadataPrinter", "(", "self", ")" ]
Filter the reads into separate files based on taxonomic assignment
[ "Filter", "the", "reads", "into", "separate", "files", "based", "on", "taxonomic", "assignment" ]
88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a
https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/metagenomefilter/filtermetagenome.py#L125-L140
train
sirfoga/pyhal
hal/strings/models.py
String.remove_escapes
def remove_escapes(self): """Removes everything except number and letters from string :return: All numbers and letters in string """ chars = [] i = 0 while i < len(self.string): char = self.string[i] if char == "\\": i += 1 else: chars.append(char) i += 1 return "".join(chars)
python
def remove_escapes(self): """Removes everything except number and letters from string :return: All numbers and letters in string """ chars = [] i = 0 while i < len(self.string): char = self.string[i] if char == "\\": i += 1 else: chars.append(char) i += 1 return "".join(chars)
[ "def", "remove_escapes", "(", "self", ")", ":", "chars", "=", "[", "]", "i", "=", "0", "while", "i", "<", "len", "(", "self", ".", "string", ")", ":", "char", "=", "self", ".", "string", "[", "i", "]", "if", "char", "==", "\"\\\\\"", ":", "i", "+=", "1", "else", ":", "chars", ".", "append", "(", "char", ")", "i", "+=", "1", "return", "\"\"", ".", "join", "(", "chars", ")" ]
Removes everything except number and letters from string :return: All numbers and letters in string
[ "Removes", "everything", "except", "number", "and", "letters", "from", "string" ]
4394d8a1f7e45bea28a255ec390f4962ee64d33a
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/strings/models.py#L17-L34
train
sirfoga/pyhal
hal/strings/models.py
String.convert_accents
def convert_accents(self): """Removes accents from text :return: input with converted accents chars """ nkfd_form = unicodedata.normalize('NFKD', self.string) return "".join([ char for char in nkfd_form if not unicodedata.combining(char) ])
python
def convert_accents(self): """Removes accents from text :return: input with converted accents chars """ nkfd_form = unicodedata.normalize('NFKD', self.string) return "".join([ char for char in nkfd_form if not unicodedata.combining(char) ])
[ "def", "convert_accents", "(", "self", ")", ":", "nkfd_form", "=", "unicodedata", ".", "normalize", "(", "'NFKD'", ",", "self", ".", "string", ")", "return", "\"\"", ".", "join", "(", "[", "char", "for", "char", "in", "nkfd_form", "if", "not", "unicodedata", ".", "combining", "(", "char", ")", "]", ")" ]
Removes accents from text :return: input with converted accents chars
[ "Removes", "accents", "from", "text" ]
4394d8a1f7e45bea28a255ec390f4962ee64d33a
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/strings/models.py#L43-L53
train
sirfoga/pyhal
hal/strings/models.py
String.remove_all
def remove_all(self, token): """Removes all occurrences of token :param token: string to remove :return: input without token """ out = self.string.replace(" ", token) # replace tokens while out.find(token + token) >= 0: # while there are tokens out = out.replace(token + token, token) return out
python
def remove_all(self, token): """Removes all occurrences of token :param token: string to remove :return: input without token """ out = self.string.replace(" ", token) # replace tokens while out.find(token + token) >= 0: # while there are tokens out = out.replace(token + token, token) return out
[ "def", "remove_all", "(", "self", ",", "token", ")", ":", "out", "=", "self", ".", "string", ".", "replace", "(", "\" \"", ",", "token", ")", "# replace tokens", "while", "out", ".", "find", "(", "token", "+", "token", ")", ">=", "0", ":", "# while there are tokens", "out", "=", "out", ".", "replace", "(", "token", "+", "token", ",", "token", ")", "return", "out" ]
Removes all occurrences of token :param token: string to remove :return: input without token
[ "Removes", "all", "occurrences", "of", "token" ]
4394d8a1f7e45bea28a255ec390f4962ee64d33a
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/strings/models.py#L105-L115
train
portfors-lab/sparkle
sparkle/tools/log.py
init_logging
def init_logging(): """Initialize a logger from a configuration file to use throughout the project""" with open(os.path.join(os.path.dirname(__file__),'logging.conf'), 'r') as yf: config = yaml.load(yf) logging.config.dictConfig(config)
python
def init_logging(): """Initialize a logger from a configuration file to use throughout the project""" with open(os.path.join(os.path.dirname(__file__),'logging.conf'), 'r') as yf: config = yaml.load(yf) logging.config.dictConfig(config)
[ "def", "init_logging", "(", ")", ":", "with", "open", "(", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "'logging.conf'", ")", ",", "'r'", ")", "as", "yf", ":", "config", "=", "yaml", ".", "load", "(", "yf", ")", "logging", ".", "config", ".", "dictConfig", "(", "config", ")" ]
Initialize a logger from a configuration file to use throughout the project
[ "Initialize", "a", "logger", "from", "a", "configuration", "file", "to", "use", "throughout", "the", "project" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/tools/log.py#L8-L12
train
ArabellaTech/django-basic-cms
basic_cms/admin/forms.py
SlugFormMixin._clean_page_unique_slug_required
def _clean_page_unique_slug_required(self, slug): """See if this slug exists already""" if hasattr(self, 'instance') and self.instance.id: if Content.objects.exclude(page=self.instance).filter( body=slug, type="slug").count(): raise forms.ValidationError(self.err_dict['another_page_error']) elif Content.objects.filter(body=slug, type="slug").count(): raise forms.ValidationError(self.err_dict['another_page_error']) return slug
python
def _clean_page_unique_slug_required(self, slug): """See if this slug exists already""" if hasattr(self, 'instance') and self.instance.id: if Content.objects.exclude(page=self.instance).filter( body=slug, type="slug").count(): raise forms.ValidationError(self.err_dict['another_page_error']) elif Content.objects.filter(body=slug, type="slug").count(): raise forms.ValidationError(self.err_dict['another_page_error']) return slug
[ "def", "_clean_page_unique_slug_required", "(", "self", ",", "slug", ")", ":", "if", "hasattr", "(", "self", ",", "'instance'", ")", "and", "self", ".", "instance", ".", "id", ":", "if", "Content", ".", "objects", ".", "exclude", "(", "page", "=", "self", ".", "instance", ")", ".", "filter", "(", "body", "=", "slug", ",", "type", "=", "\"slug\"", ")", ".", "count", "(", ")", ":", "raise", "forms", ".", "ValidationError", "(", "self", ".", "err_dict", "[", "'another_page_error'", "]", ")", "elif", "Content", ".", "objects", ".", "filter", "(", "body", "=", "slug", ",", "type", "=", "\"slug\"", ")", ".", "count", "(", ")", ":", "raise", "forms", ".", "ValidationError", "(", "self", ".", "err_dict", "[", "'another_page_error'", "]", ")", "return", "slug" ]
See if this slug exists already
[ "See", "if", "this", "slug", "exists", "already" ]
863f3c6098606f663994930cd8e7723ad0c07caf
https://github.com/ArabellaTech/django-basic-cms/blob/863f3c6098606f663994930cd8e7723ad0c07caf/basic_cms/admin/forms.py#L53-L62
train
klahnakoski/mo-logs
mo_logs/exceptions.py
extract_stack
def extract_stack(start=0): """ SNAGGED FROM traceback.py Altered to return Data Extract the raw traceback from the current stack frame. Each item in the returned list is a quadruple (filename, line number, function name, text), and the entries are in order from newest to oldest """ try: raise ZeroDivisionError except ZeroDivisionError: trace = sys.exc_info()[2] f = trace.tb_frame.f_back for i in range(start): f = f.f_back stack = [] while f is not None: stack.append({ "line": f.f_lineno, "file": f.f_code.co_filename, "method": f.f_code.co_name }) f = f.f_back return stack
python
def extract_stack(start=0): """ SNAGGED FROM traceback.py Altered to return Data Extract the raw traceback from the current stack frame. Each item in the returned list is a quadruple (filename, line number, function name, text), and the entries are in order from newest to oldest """ try: raise ZeroDivisionError except ZeroDivisionError: trace = sys.exc_info()[2] f = trace.tb_frame.f_back for i in range(start): f = f.f_back stack = [] while f is not None: stack.append({ "line": f.f_lineno, "file": f.f_code.co_filename, "method": f.f_code.co_name }) f = f.f_back return stack
[ "def", "extract_stack", "(", "start", "=", "0", ")", ":", "try", ":", "raise", "ZeroDivisionError", "except", "ZeroDivisionError", ":", "trace", "=", "sys", ".", "exc_info", "(", ")", "[", "2", "]", "f", "=", "trace", ".", "tb_frame", ".", "f_back", "for", "i", "in", "range", "(", "start", ")", ":", "f", "=", "f", ".", "f_back", "stack", "=", "[", "]", "while", "f", "is", "not", "None", ":", "stack", ".", "append", "(", "{", "\"line\"", ":", "f", ".", "f_lineno", ",", "\"file\"", ":", "f", ".", "f_code", ".", "co_filename", ",", "\"method\"", ":", "f", ".", "f_code", ".", "co_name", "}", ")", "f", "=", "f", ".", "f_back", "return", "stack" ]
SNAGGED FROM traceback.py Altered to return Data Extract the raw traceback from the current stack frame. Each item in the returned list is a quadruple (filename, line number, function name, text), and the entries are in order from newest to oldest
[ "SNAGGED", "FROM", "traceback", ".", "py", "Altered", "to", "return", "Data" ]
0971277ac9caf28a755b766b70621916957d4fea
https://github.com/klahnakoski/mo-logs/blob/0971277ac9caf28a755b766b70621916957d4fea/mo_logs/exceptions.py#L155-L183
train
klahnakoski/mo-logs
mo_logs/exceptions.py
_extract_traceback
def _extract_traceback(start): """ SNAGGED FROM traceback.py RETURN list OF dicts DESCRIBING THE STACK TRACE """ tb = sys.exc_info()[2] for i in range(start): tb = tb.tb_next return _parse_traceback(tb)
python
def _extract_traceback(start): """ SNAGGED FROM traceback.py RETURN list OF dicts DESCRIBING THE STACK TRACE """ tb = sys.exc_info()[2] for i in range(start): tb = tb.tb_next return _parse_traceback(tb)
[ "def", "_extract_traceback", "(", "start", ")", ":", "tb", "=", "sys", ".", "exc_info", "(", ")", "[", "2", "]", "for", "i", "in", "range", "(", "start", ")", ":", "tb", "=", "tb", ".", "tb_next", "return", "_parse_traceback", "(", "tb", ")" ]
SNAGGED FROM traceback.py RETURN list OF dicts DESCRIBING THE STACK TRACE
[ "SNAGGED", "FROM", "traceback", ".", "py" ]
0971277ac9caf28a755b766b70621916957d4fea
https://github.com/klahnakoski/mo-logs/blob/0971277ac9caf28a755b766b70621916957d4fea/mo_logs/exceptions.py#L186-L195
train
klahnakoski/mo-logs
mo_logs/exceptions.py
Except.wrap
def wrap(cls, e, stack_depth=0): """ ENSURE THE STACKTRACE AND CAUSAL CHAIN IS CAPTURED, PLUS ADD FEATURES OF Except :param e: AN EXCEPTION OF ANY TYPE :param stack_depth: HOW MANY CALLS TO TAKE OFF THE TOP OF THE STACK TRACE :return: A Except OBJECT OF THE SAME """ if e == None: return Null elif isinstance(e, (list, Except)): return e elif is_data(e): e.cause = unwraplist([Except.wrap(c) for c in listwrap(e.cause)]) return Except(**e) else: tb = getattr(e, '__traceback__', None) if tb is not None: trace = _parse_traceback(tb) else: trace = _extract_traceback(0) cause = Except.wrap(getattr(e, '__cause__', None)) if hasattr(e, "message") and e.message: output = Except(context=ERROR, template=text_type(e.message), trace=trace, cause=cause) else: output = Except(context=ERROR, template=text_type(e), trace=trace, cause=cause) trace = extract_stack(stack_depth + 2) # +2 = to remove the caller, and it's call to this' Except.wrap() output.trace.extend(trace) return output
python
def wrap(cls, e, stack_depth=0): """ ENSURE THE STACKTRACE AND CAUSAL CHAIN IS CAPTURED, PLUS ADD FEATURES OF Except :param e: AN EXCEPTION OF ANY TYPE :param stack_depth: HOW MANY CALLS TO TAKE OFF THE TOP OF THE STACK TRACE :return: A Except OBJECT OF THE SAME """ if e == None: return Null elif isinstance(e, (list, Except)): return e elif is_data(e): e.cause = unwraplist([Except.wrap(c) for c in listwrap(e.cause)]) return Except(**e) else: tb = getattr(e, '__traceback__', None) if tb is not None: trace = _parse_traceback(tb) else: trace = _extract_traceback(0) cause = Except.wrap(getattr(e, '__cause__', None)) if hasattr(e, "message") and e.message: output = Except(context=ERROR, template=text_type(e.message), trace=trace, cause=cause) else: output = Except(context=ERROR, template=text_type(e), trace=trace, cause=cause) trace = extract_stack(stack_depth + 2) # +2 = to remove the caller, and it's call to this' Except.wrap() output.trace.extend(trace) return output
[ "def", "wrap", "(", "cls", ",", "e", ",", "stack_depth", "=", "0", ")", ":", "if", "e", "==", "None", ":", "return", "Null", "elif", "isinstance", "(", "e", ",", "(", "list", ",", "Except", ")", ")", ":", "return", "e", "elif", "is_data", "(", "e", ")", ":", "e", ".", "cause", "=", "unwraplist", "(", "[", "Except", ".", "wrap", "(", "c", ")", "for", "c", "in", "listwrap", "(", "e", ".", "cause", ")", "]", ")", "return", "Except", "(", "*", "*", "e", ")", "else", ":", "tb", "=", "getattr", "(", "e", ",", "'__traceback__'", ",", "None", ")", "if", "tb", "is", "not", "None", ":", "trace", "=", "_parse_traceback", "(", "tb", ")", "else", ":", "trace", "=", "_extract_traceback", "(", "0", ")", "cause", "=", "Except", ".", "wrap", "(", "getattr", "(", "e", ",", "'__cause__'", ",", "None", ")", ")", "if", "hasattr", "(", "e", ",", "\"message\"", ")", "and", "e", ".", "message", ":", "output", "=", "Except", "(", "context", "=", "ERROR", ",", "template", "=", "text_type", "(", "e", ".", "message", ")", ",", "trace", "=", "trace", ",", "cause", "=", "cause", ")", "else", ":", "output", "=", "Except", "(", "context", "=", "ERROR", ",", "template", "=", "text_type", "(", "e", ")", ",", "trace", "=", "trace", ",", "cause", "=", "cause", ")", "trace", "=", "extract_stack", "(", "stack_depth", "+", "2", ")", "# +2 = to remove the caller, and it's call to this' Except.wrap()", "output", ".", "trace", ".", "extend", "(", "trace", ")", "return", "output" ]
ENSURE THE STACKTRACE AND CAUSAL CHAIN IS CAPTURED, PLUS ADD FEATURES OF Except :param e: AN EXCEPTION OF ANY TYPE :param stack_depth: HOW MANY CALLS TO TAKE OFF THE TOP OF THE STACK TRACE :return: A Except OBJECT OF THE SAME
[ "ENSURE", "THE", "STACKTRACE", "AND", "CAUSAL", "CHAIN", "IS", "CAPTURED", "PLUS", "ADD", "FEATURES", "OF", "Except" ]
0971277ac9caf28a755b766b70621916957d4fea
https://github.com/klahnakoski/mo-logs/blob/0971277ac9caf28a755b766b70621916957d4fea/mo_logs/exceptions.py#L74-L104
train
grahame/dividebatur
dividebatur/counter.py
SenateCounter.determine_elected_candidates_in_order
def determine_elected_candidates_in_order(self, candidate_votes): """ determine all candidates with at least a quota of votes in `candidate_votes'. returns results in order of decreasing vote count. Any ties are resolved within this method. """ eligible_by_vote = defaultdict(list) for candidate_id, votes in candidate_votes.candidate_votes_iter(): if candidate_id in self.candidates_elected: continue if votes < self.quota: continue eligible_by_vote[votes].append(candidate_id) elected = [] for votes in reversed(sorted(eligible_by_vote)): candidate_ids = eligible_by_vote[votes] # we sort here to ensure stability, so external callers can hard-coded their response candidate_ids.sort(key=self.candidate_order_fn) if len(candidate_ids) == 1: elected.append(candidate_ids[0]) else: tie_breaker_round = self.find_tie_breaker(candidate_ids) if tie_breaker_round is not None: self.results.provision_used( ActProvision("Multiple candidates elected with %d votes. Tie broken from previous totals." % (votes))) for candidate_id in reversed(sorted(candidate_ids, key=tie_breaker_round.get_vote_count)): elected.append(candidate_id) else: self.results.provision_used( ActProvision("Multiple candidates elected with %d votes. Input required from Australian Electoral Officer." % (votes))) permutations = list(itertools.permutations(candidate_ids)) permutations.sort() choice = self.resolve_election_order(permutations) for candidate_id in permutations[choice]: elected.append(candidate_id) return elected
python
def determine_elected_candidates_in_order(self, candidate_votes): """ determine all candidates with at least a quota of votes in `candidate_votes'. returns results in order of decreasing vote count. Any ties are resolved within this method. """ eligible_by_vote = defaultdict(list) for candidate_id, votes in candidate_votes.candidate_votes_iter(): if candidate_id in self.candidates_elected: continue if votes < self.quota: continue eligible_by_vote[votes].append(candidate_id) elected = [] for votes in reversed(sorted(eligible_by_vote)): candidate_ids = eligible_by_vote[votes] # we sort here to ensure stability, so external callers can hard-coded their response candidate_ids.sort(key=self.candidate_order_fn) if len(candidate_ids) == 1: elected.append(candidate_ids[0]) else: tie_breaker_round = self.find_tie_breaker(candidate_ids) if tie_breaker_round is not None: self.results.provision_used( ActProvision("Multiple candidates elected with %d votes. Tie broken from previous totals." % (votes))) for candidate_id in reversed(sorted(candidate_ids, key=tie_breaker_round.get_vote_count)): elected.append(candidate_id) else: self.results.provision_used( ActProvision("Multiple candidates elected with %d votes. Input required from Australian Electoral Officer." % (votes))) permutations = list(itertools.permutations(candidate_ids)) permutations.sort() choice = self.resolve_election_order(permutations) for candidate_id in permutations[choice]: elected.append(candidate_id) return elected
[ "def", "determine_elected_candidates_in_order", "(", "self", ",", "candidate_votes", ")", ":", "eligible_by_vote", "=", "defaultdict", "(", "list", ")", "for", "candidate_id", ",", "votes", "in", "candidate_votes", ".", "candidate_votes_iter", "(", ")", ":", "if", "candidate_id", "in", "self", ".", "candidates_elected", ":", "continue", "if", "votes", "<", "self", ".", "quota", ":", "continue", "eligible_by_vote", "[", "votes", "]", ".", "append", "(", "candidate_id", ")", "elected", "=", "[", "]", "for", "votes", "in", "reversed", "(", "sorted", "(", "eligible_by_vote", ")", ")", ":", "candidate_ids", "=", "eligible_by_vote", "[", "votes", "]", "# we sort here to ensure stability, so external callers can hard-coded their response", "candidate_ids", ".", "sort", "(", "key", "=", "self", ".", "candidate_order_fn", ")", "if", "len", "(", "candidate_ids", ")", "==", "1", ":", "elected", ".", "append", "(", "candidate_ids", "[", "0", "]", ")", "else", ":", "tie_breaker_round", "=", "self", ".", "find_tie_breaker", "(", "candidate_ids", ")", "if", "tie_breaker_round", "is", "not", "None", ":", "self", ".", "results", ".", "provision_used", "(", "ActProvision", "(", "\"Multiple candidates elected with %d votes. Tie broken from previous totals.\"", "%", "(", "votes", ")", ")", ")", "for", "candidate_id", "in", "reversed", "(", "sorted", "(", "candidate_ids", ",", "key", "=", "tie_breaker_round", ".", "get_vote_count", ")", ")", ":", "elected", ".", "append", "(", "candidate_id", ")", "else", ":", "self", ".", "results", ".", "provision_used", "(", "ActProvision", "(", "\"Multiple candidates elected with %d votes. Input required from Australian Electoral Officer.\"", "%", "(", "votes", ")", ")", ")", "permutations", "=", "list", "(", "itertools", ".", "permutations", "(", "candidate_ids", ")", ")", "permutations", ".", "sort", "(", ")", "choice", "=", "self", ".", "resolve_election_order", "(", "permutations", ")", "for", "candidate_id", "in", "permutations", "[", "choice", "]", ":", "elected", ".", "append", "(", "candidate_id", ")", "return", "elected" ]
determine all candidates with at least a quota of votes in `candidate_votes'. returns results in order of decreasing vote count. Any ties are resolved within this method.
[ "determine", "all", "candidates", "with", "at", "least", "a", "quota", "of", "votes", "in", "candidate_votes", ".", "returns", "results", "in", "order", "of", "decreasing", "vote", "count", ".", "Any", "ties", "are", "resolved", "within", "this", "method", "." ]
adc1f6e8013943471f1679e3c94f9448a1e4a472
https://github.com/grahame/dividebatur/blob/adc1f6e8013943471f1679e3c94f9448a1e4a472/dividebatur/counter.py#L264-L299
train
grahame/dividebatur
dividebatur/counter.py
SenateCounter.get_initial_totals
def get_initial_totals(self): "determine the initial total for each candidate. only call this at the start of round 1" candidate_votes = {} # initialise to zero for every individual candidate for candidate_id in self.candidate_ids: candidate_votes[candidate_id] = 0 for candidate_id in self.candidate_ids: candidate_votes[candidate_id] = self.candidate_bundle_transactions.get_paper_count(candidate_id) for candidate_id in candidate_votes: candidate_votes[candidate_id] = int(candidate_votes[candidate_id]) return candidate_votes, 0, 0
python
def get_initial_totals(self): "determine the initial total for each candidate. only call this at the start of round 1" candidate_votes = {} # initialise to zero for every individual candidate for candidate_id in self.candidate_ids: candidate_votes[candidate_id] = 0 for candidate_id in self.candidate_ids: candidate_votes[candidate_id] = self.candidate_bundle_transactions.get_paper_count(candidate_id) for candidate_id in candidate_votes: candidate_votes[candidate_id] = int(candidate_votes[candidate_id]) return candidate_votes, 0, 0
[ "def", "get_initial_totals", "(", "self", ")", ":", "candidate_votes", "=", "{", "}", "# initialise to zero for every individual candidate", "for", "candidate_id", "in", "self", ".", "candidate_ids", ":", "candidate_votes", "[", "candidate_id", "]", "=", "0", "for", "candidate_id", "in", "self", ".", "candidate_ids", ":", "candidate_votes", "[", "candidate_id", "]", "=", "self", ".", "candidate_bundle_transactions", ".", "get_paper_count", "(", "candidate_id", ")", "for", "candidate_id", "in", "candidate_votes", ":", "candidate_votes", "[", "candidate_id", "]", "=", "int", "(", "candidate_votes", "[", "candidate_id", "]", ")", "return", "candidate_votes", ",", "0", ",", "0" ]
determine the initial total for each candidate. only call this at the start of round 1
[ "determine", "the", "initial", "total", "for", "each", "candidate", ".", "only", "call", "this", "at", "the", "start", "of", "round", "1" ]
adc1f6e8013943471f1679e3c94f9448a1e4a472
https://github.com/grahame/dividebatur/blob/adc1f6e8013943471f1679e3c94f9448a1e4a472/dividebatur/counter.py#L301-L311
train
grahame/dividebatur
dividebatur/counter.py
SenateCounter.bundle_to_next_candidate
def bundle_to_next_candidate(self, bundle): """ returns the next candidate_it of the next preference expressed in the ticket for this bundle, and the next ticket_state after preferences are moved along if the vote exhausts, candidate_id will be None """ ticket_state = bundle.ticket_state while True: ticket_state = TicketState(ticket_state.preferences, ticket_state.up_to + 1) candidate_id = get_preference(ticket_state) # if the preference passes through an elected or excluded candidate, we # skip over it if candidate_id in self.candidates_elected or candidate_id in self.candidates_excluded: continue return candidate_id, ticket_state
python
def bundle_to_next_candidate(self, bundle): """ returns the next candidate_it of the next preference expressed in the ticket for this bundle, and the next ticket_state after preferences are moved along if the vote exhausts, candidate_id will be None """ ticket_state = bundle.ticket_state while True: ticket_state = TicketState(ticket_state.preferences, ticket_state.up_to + 1) candidate_id = get_preference(ticket_state) # if the preference passes through an elected or excluded candidate, we # skip over it if candidate_id in self.candidates_elected or candidate_id in self.candidates_excluded: continue return candidate_id, ticket_state
[ "def", "bundle_to_next_candidate", "(", "self", ",", "bundle", ")", ":", "ticket_state", "=", "bundle", ".", "ticket_state", "while", "True", ":", "ticket_state", "=", "TicketState", "(", "ticket_state", ".", "preferences", ",", "ticket_state", ".", "up_to", "+", "1", ")", "candidate_id", "=", "get_preference", "(", "ticket_state", ")", "# if the preference passes through an elected or excluded candidate, we", "# skip over it", "if", "candidate_id", "in", "self", ".", "candidates_elected", "or", "candidate_id", "in", "self", ".", "candidates_excluded", ":", "continue", "return", "candidate_id", ",", "ticket_state" ]
returns the next candidate_it of the next preference expressed in the ticket for this bundle, and the next ticket_state after preferences are moved along if the vote exhausts, candidate_id will be None
[ "returns", "the", "next", "candidate_it", "of", "the", "next", "preference", "expressed", "in", "the", "ticket", "for", "this", "bundle", "and", "the", "next", "ticket_state", "after", "preferences", "are", "moved", "along", "if", "the", "vote", "exhausts", "candidate_id", "will", "be", "None" ]
adc1f6e8013943471f1679e3c94f9448a1e4a472
https://github.com/grahame/dividebatur/blob/adc1f6e8013943471f1679e3c94f9448a1e4a472/dividebatur/counter.py#L313-L327
train
grahame/dividebatur
dividebatur/counter.py
SenateCounter.elect
def elect(self, candidate_aggregates, candidate_id): """ Elect a candidate, updating internal state to track this. Calculate the paper count to be transferred on to other candidates, and if required schedule a distribution fo papers. """ # somewhat paranoid cross-check, but we've had this bug before.. assert(candidate_id not in self.candidates_elected) elected_no = len(self.candidates_elected) + 1 self.candidates_elected[candidate_id] = True transfer_value = 0 excess_votes = paper_count = None if len(self.candidates_elected) != self.vacancies: excess_votes = max(candidate_aggregates.get_vote_count(candidate_id) - self.quota, 0) assert(excess_votes >= 0) paper_count = self.candidate_bundle_transactions.get_paper_count(candidate_id) if paper_count > 0: transfer_value = fractions.Fraction(excess_votes, paper_count) assert(transfer_value >= 0) self.election_distributions_pending.append((candidate_id, transfer_value, excess_votes)) self.results.candidate_elected( CandidateElected( candidate_id=candidate_id, order=elected_no, excess_votes=excess_votes, paper_count=paper_count, transfer_value=transfer_value))
python
def elect(self, candidate_aggregates, candidate_id): """ Elect a candidate, updating internal state to track this. Calculate the paper count to be transferred on to other candidates, and if required schedule a distribution fo papers. """ # somewhat paranoid cross-check, but we've had this bug before.. assert(candidate_id not in self.candidates_elected) elected_no = len(self.candidates_elected) + 1 self.candidates_elected[candidate_id] = True transfer_value = 0 excess_votes = paper_count = None if len(self.candidates_elected) != self.vacancies: excess_votes = max(candidate_aggregates.get_vote_count(candidate_id) - self.quota, 0) assert(excess_votes >= 0) paper_count = self.candidate_bundle_transactions.get_paper_count(candidate_id) if paper_count > 0: transfer_value = fractions.Fraction(excess_votes, paper_count) assert(transfer_value >= 0) self.election_distributions_pending.append((candidate_id, transfer_value, excess_votes)) self.results.candidate_elected( CandidateElected( candidate_id=candidate_id, order=elected_no, excess_votes=excess_votes, paper_count=paper_count, transfer_value=transfer_value))
[ "def", "elect", "(", "self", ",", "candidate_aggregates", ",", "candidate_id", ")", ":", "# somewhat paranoid cross-check, but we've had this bug before..", "assert", "(", "candidate_id", "not", "in", "self", ".", "candidates_elected", ")", "elected_no", "=", "len", "(", "self", ".", "candidates_elected", ")", "+", "1", "self", ".", "candidates_elected", "[", "candidate_id", "]", "=", "True", "transfer_value", "=", "0", "excess_votes", "=", "paper_count", "=", "None", "if", "len", "(", "self", ".", "candidates_elected", ")", "!=", "self", ".", "vacancies", ":", "excess_votes", "=", "max", "(", "candidate_aggregates", ".", "get_vote_count", "(", "candidate_id", ")", "-", "self", ".", "quota", ",", "0", ")", "assert", "(", "excess_votes", ">=", "0", ")", "paper_count", "=", "self", ".", "candidate_bundle_transactions", ".", "get_paper_count", "(", "candidate_id", ")", "if", "paper_count", ">", "0", ":", "transfer_value", "=", "fractions", ".", "Fraction", "(", "excess_votes", ",", "paper_count", ")", "assert", "(", "transfer_value", ">=", "0", ")", "self", ".", "election_distributions_pending", ".", "append", "(", "(", "candidate_id", ",", "transfer_value", ",", "excess_votes", ")", ")", "self", ".", "results", ".", "candidate_elected", "(", "CandidateElected", "(", "candidate_id", "=", "candidate_id", ",", "order", "=", "elected_no", ",", "excess_votes", "=", "excess_votes", ",", "paper_count", "=", "paper_count", ",", "transfer_value", "=", "transfer_value", ")", ")" ]
Elect a candidate, updating internal state to track this. Calculate the paper count to be transferred on to other candidates, and if required schedule a distribution fo papers.
[ "Elect", "a", "candidate", "updating", "internal", "state", "to", "track", "this", ".", "Calculate", "the", "paper", "count", "to", "be", "transferred", "on", "to", "other", "candidates", "and", "if", "required", "schedule", "a", "distribution", "fo", "papers", "." ]
adc1f6e8013943471f1679e3c94f9448a1e4a472
https://github.com/grahame/dividebatur/blob/adc1f6e8013943471f1679e3c94f9448a1e4a472/dividebatur/counter.py#L363-L392
train
grahame/dividebatur
dividebatur/counter.py
SenateCounter.find_tie_breaker
def find_tie_breaker(self, candidate_ids): """ finds a round in the count history in which the candidate_ids each had different vote counts if no such round exists, returns None """ for candidate_aggregates in reversed(self.round_candidate_aggregates): candidates_on_vote = defaultdict(int) for candidate_id in candidate_ids: votes = candidate_aggregates.get_vote_count(candidate_id) candidates_on_vote[votes] += 1 if max(candidates_on_vote.values()) == 1: return candidate_aggregates
python
def find_tie_breaker(self, candidate_ids): """ finds a round in the count history in which the candidate_ids each had different vote counts if no such round exists, returns None """ for candidate_aggregates in reversed(self.round_candidate_aggregates): candidates_on_vote = defaultdict(int) for candidate_id in candidate_ids: votes = candidate_aggregates.get_vote_count(candidate_id) candidates_on_vote[votes] += 1 if max(candidates_on_vote.values()) == 1: return candidate_aggregates
[ "def", "find_tie_breaker", "(", "self", ",", "candidate_ids", ")", ":", "for", "candidate_aggregates", "in", "reversed", "(", "self", ".", "round_candidate_aggregates", ")", ":", "candidates_on_vote", "=", "defaultdict", "(", "int", ")", "for", "candidate_id", "in", "candidate_ids", ":", "votes", "=", "candidate_aggregates", ".", "get_vote_count", "(", "candidate_id", ")", "candidates_on_vote", "[", "votes", "]", "+=", "1", "if", "max", "(", "candidates_on_vote", ".", "values", "(", ")", ")", "==", "1", ":", "return", "candidate_aggregates" ]
finds a round in the count history in which the candidate_ids each had different vote counts if no such round exists, returns None
[ "finds", "a", "round", "in", "the", "count", "history", "in", "which", "the", "candidate_ids", "each", "had", "different", "vote", "counts", "if", "no", "such", "round", "exists", "returns", "None" ]
adc1f6e8013943471f1679e3c94f9448a1e4a472
https://github.com/grahame/dividebatur/blob/adc1f6e8013943471f1679e3c94f9448a1e4a472/dividebatur/counter.py#L480-L491
train
grahame/dividebatur
dividebatur/counter.py
SenateCounter.get_candidate_notional_votes
def get_candidate_notional_votes(self, candidate_aggregates, adjustment): "aggregate of vote received by each candidate, and the votes received by any candidate lower in the poll" continuing = self.get_continuing_candidates(candidate_aggregates) candidates_notional = {} by_votes = self.get_votes_to_candidates(continuing, candidate_aggregates) total = adjustment for votes, candidates in sorted(by_votes.items(), key=lambda x: x[0]): for candidate_id in candidates: candidates_notional[candidate_id] = total + votes total += votes * len(candidates) return candidates_notional
python
def get_candidate_notional_votes(self, candidate_aggregates, adjustment): "aggregate of vote received by each candidate, and the votes received by any candidate lower in the poll" continuing = self.get_continuing_candidates(candidate_aggregates) candidates_notional = {} by_votes = self.get_votes_to_candidates(continuing, candidate_aggregates) total = adjustment for votes, candidates in sorted(by_votes.items(), key=lambda x: x[0]): for candidate_id in candidates: candidates_notional[candidate_id] = total + votes total += votes * len(candidates) return candidates_notional
[ "def", "get_candidate_notional_votes", "(", "self", ",", "candidate_aggregates", ",", "adjustment", ")", ":", "continuing", "=", "self", ".", "get_continuing_candidates", "(", "candidate_aggregates", ")", "candidates_notional", "=", "{", "}", "by_votes", "=", "self", ".", "get_votes_to_candidates", "(", "continuing", ",", "candidate_aggregates", ")", "total", "=", "adjustment", "for", "votes", ",", "candidates", "in", "sorted", "(", "by_votes", ".", "items", "(", ")", ",", "key", "=", "lambda", "x", ":", "x", "[", "0", "]", ")", ":", "for", "candidate_id", "in", "candidates", ":", "candidates_notional", "[", "candidate_id", "]", "=", "total", "+", "votes", "total", "+=", "votes", "*", "len", "(", "candidates", ")", "return", "candidates_notional" ]
aggregate of vote received by each candidate, and the votes received by any candidate lower in the poll
[ "aggregate", "of", "vote", "received", "by", "each", "candidate", "and", "the", "votes", "received", "by", "any", "candidate", "lower", "in", "the", "poll" ]
adc1f6e8013943471f1679e3c94f9448a1e4a472
https://github.com/grahame/dividebatur/blob/adc1f6e8013943471f1679e3c94f9448a1e4a472/dividebatur/counter.py#L544-L554
train
ArabellaTech/django-basic-cms
basic_cms/permissions.py
PagePermission.check
def check(self, action, page=None, lang=None, method=None): """Return ``True`` if the current user has permission on the page.""" if self.user.is_superuser: return True if action == 'change': return self.has_change_permission(page, lang, method) if action == 'delete': if not self.delete_page(): return False return True if action == 'add': if not self.add_page(): return False return True if action == 'freeze': perm = self.user.has_perm('pages.can_freeze') if perm: return True return False if action == 'publish': perm = self.user.has_perm('pages.can_publish') if perm: return True return False return False
python
def check(self, action, page=None, lang=None, method=None): """Return ``True`` if the current user has permission on the page.""" if self.user.is_superuser: return True if action == 'change': return self.has_change_permission(page, lang, method) if action == 'delete': if not self.delete_page(): return False return True if action == 'add': if not self.add_page(): return False return True if action == 'freeze': perm = self.user.has_perm('pages.can_freeze') if perm: return True return False if action == 'publish': perm = self.user.has_perm('pages.can_publish') if perm: return True return False return False
[ "def", "check", "(", "self", ",", "action", ",", "page", "=", "None", ",", "lang", "=", "None", ",", "method", "=", "None", ")", ":", "if", "self", ".", "user", ".", "is_superuser", ":", "return", "True", "if", "action", "==", "'change'", ":", "return", "self", ".", "has_change_permission", "(", "page", ",", "lang", ",", "method", ")", "if", "action", "==", "'delete'", ":", "if", "not", "self", ".", "delete_page", "(", ")", ":", "return", "False", "return", "True", "if", "action", "==", "'add'", ":", "if", "not", "self", ".", "add_page", "(", ")", ":", "return", "False", "return", "True", "if", "action", "==", "'freeze'", ":", "perm", "=", "self", ".", "user", ".", "has_perm", "(", "'pages.can_freeze'", ")", "if", "perm", ":", "return", "True", "return", "False", "if", "action", "==", "'publish'", ":", "perm", "=", "self", ".", "user", ".", "has_perm", "(", "'pages.can_publish'", ")", "if", "perm", ":", "return", "True", "return", "False", "return", "False" ]
Return ``True`` if the current user has permission on the page.
[ "Return", "True", "if", "the", "current", "user", "has", "permission", "on", "the", "page", "." ]
863f3c6098606f663994930cd8e7723ad0c07caf
https://github.com/ArabellaTech/django-basic-cms/blob/863f3c6098606f663994930cd8e7723ad0c07caf/basic_cms/permissions.py#L20-L47
train
ArabellaTech/django-basic-cms
basic_cms/permissions.py
PagePermission.has_change_permission
def has_change_permission(self, page, lang, method=None): """Return ``True`` if the current user has permission to change the page.""" # the user has always the right to look at a page content # if he doesn't try to modify it. if method != 'POST': return True # right to change all the pages if self.change_page(): return True if lang: # try the global language permission first perm = self.user.has_perm( 'pages.can_manage_%s' % lang.replace('-', '_') ) if perm: return True # then per object permission perm_func = getattr(self, 'manage (%s)_page' % lang) if perm_func(page): return True # last hierarchic permissions because it's more expensive perm_func = getattr(self, 'manage hierarchy_page') if perm_func(page): return True else: for ancestor in page.get_ancestors(): if perm_func(ancestor): return True # everything else failed, no permissions return False
python
def has_change_permission(self, page, lang, method=None): """Return ``True`` if the current user has permission to change the page.""" # the user has always the right to look at a page content # if he doesn't try to modify it. if method != 'POST': return True # right to change all the pages if self.change_page(): return True if lang: # try the global language permission first perm = self.user.has_perm( 'pages.can_manage_%s' % lang.replace('-', '_') ) if perm: return True # then per object permission perm_func = getattr(self, 'manage (%s)_page' % lang) if perm_func(page): return True # last hierarchic permissions because it's more expensive perm_func = getattr(self, 'manage hierarchy_page') if perm_func(page): return True else: for ancestor in page.get_ancestors(): if perm_func(ancestor): return True # everything else failed, no permissions return False
[ "def", "has_change_permission", "(", "self", ",", "page", ",", "lang", ",", "method", "=", "None", ")", ":", "# the user has always the right to look at a page content", "# if he doesn't try to modify it.", "if", "method", "!=", "'POST'", ":", "return", "True", "# right to change all the pages", "if", "self", ".", "change_page", "(", ")", ":", "return", "True", "if", "lang", ":", "# try the global language permission first", "perm", "=", "self", ".", "user", ".", "has_perm", "(", "'pages.can_manage_%s'", "%", "lang", ".", "replace", "(", "'-'", ",", "'_'", ")", ")", "if", "perm", ":", "return", "True", "# then per object permission", "perm_func", "=", "getattr", "(", "self", ",", "'manage (%s)_page'", "%", "lang", ")", "if", "perm_func", "(", "page", ")", ":", "return", "True", "# last hierarchic permissions because it's more expensive", "perm_func", "=", "getattr", "(", "self", ",", "'manage hierarchy_page'", ")", "if", "perm_func", "(", "page", ")", ":", "return", "True", "else", ":", "for", "ancestor", "in", "page", ".", "get_ancestors", "(", ")", ":", "if", "perm_func", "(", "ancestor", ")", ":", "return", "True", "# everything else failed, no permissions", "return", "False" ]
Return ``True`` if the current user has permission to change the page.
[ "Return", "True", "if", "the", "current", "user", "has", "permission", "to", "change", "the", "page", "." ]
863f3c6098606f663994930cd8e7723ad0c07caf
https://github.com/ArabellaTech/django-basic-cms/blob/863f3c6098606f663994930cd8e7723ad0c07caf/basic_cms/permissions.py#L49-L82
train
hughsie/python-appstream
appstream/utils.py
_join_lines
def _join_lines(txt): """ Remove whitespace from XML input """ txt = txt or '' # Handle NoneType input values val = '' lines = txt.split('\n') for line in lines: stripped = line.strip() if len(stripped) == 0: continue val += stripped + ' ' return val.strip()
python
def _join_lines(txt): """ Remove whitespace from XML input """ txt = txt or '' # Handle NoneType input values val = '' lines = txt.split('\n') for line in lines: stripped = line.strip() if len(stripped) == 0: continue val += stripped + ' ' return val.strip()
[ "def", "_join_lines", "(", "txt", ")", ":", "txt", "=", "txt", "or", "''", "# Handle NoneType input values", "val", "=", "''", "lines", "=", "txt", ".", "split", "(", "'\\n'", ")", "for", "line", "in", "lines", ":", "stripped", "=", "line", ".", "strip", "(", ")", "if", "len", "(", "stripped", ")", "==", "0", ":", "continue", "val", "+=", "stripped", "+", "' '", "return", "val", ".", "strip", "(", ")" ]
Remove whitespace from XML input
[ "Remove", "whitespace", "from", "XML", "input" ]
f2606380278c5728ee7f8e7d19914c54fca05e76
https://github.com/hughsie/python-appstream/blob/f2606380278c5728ee7f8e7d19914c54fca05e76/appstream/utils.py#L32-L42
train
hughsie/python-appstream
appstream/utils.py
_parse_desc
def _parse_desc(node): """ A quick'n'dirty description parser """ desc = '' if len(node) == 0: return '<p>' + node.text + '</p>' for n in node: if n.tag == 'p': desc += '<p>' + _join_lines(n.text) + '</p>' elif n.tag == 'ol' or n.tag == 'ul': desc += '<ul>' for c in n: if c.tag == 'li': desc += '<li>' + _join_lines(c.text) + '</li>' else: raise ParseError('Expected <li> in <%s>, got <%s>' % (n.tag, c.tag)) desc += '</ul>' else: raise ParseError('Expected <p>, <ul>, <ol> in <%s>, got <%s>' % (node.tag, n.tag)) return desc
python
def _parse_desc(node): """ A quick'n'dirty description parser """ desc = '' if len(node) == 0: return '<p>' + node.text + '</p>' for n in node: if n.tag == 'p': desc += '<p>' + _join_lines(n.text) + '</p>' elif n.tag == 'ol' or n.tag == 'ul': desc += '<ul>' for c in n: if c.tag == 'li': desc += '<li>' + _join_lines(c.text) + '</li>' else: raise ParseError('Expected <li> in <%s>, got <%s>' % (n.tag, c.tag)) desc += '</ul>' else: raise ParseError('Expected <p>, <ul>, <ol> in <%s>, got <%s>' % (node.tag, n.tag)) return desc
[ "def", "_parse_desc", "(", "node", ")", ":", "desc", "=", "''", "if", "len", "(", "node", ")", "==", "0", ":", "return", "'<p>'", "+", "node", ".", "text", "+", "'</p>'", "for", "n", "in", "node", ":", "if", "n", ".", "tag", "==", "'p'", ":", "desc", "+=", "'<p>'", "+", "_join_lines", "(", "n", ".", "text", ")", "+", "'</p>'", "elif", "n", ".", "tag", "==", "'ol'", "or", "n", ".", "tag", "==", "'ul'", ":", "desc", "+=", "'<ul>'", "for", "c", "in", "n", ":", "if", "c", ".", "tag", "==", "'li'", ":", "desc", "+=", "'<li>'", "+", "_join_lines", "(", "c", ".", "text", ")", "+", "'</li>'", "else", ":", "raise", "ParseError", "(", "'Expected <li> in <%s>, got <%s>'", "%", "(", "n", ".", "tag", ",", "c", ".", "tag", ")", ")", "desc", "+=", "'</ul>'", "else", ":", "raise", "ParseError", "(", "'Expected <p>, <ul>, <ol> in <%s>, got <%s>'", "%", "(", "node", ".", "tag", ",", "n", ".", "tag", ")", ")", "return", "desc" ]
A quick'n'dirty description parser
[ "A", "quick", "n", "dirty", "description", "parser" ]
f2606380278c5728ee7f8e7d19914c54fca05e76
https://github.com/hughsie/python-appstream/blob/f2606380278c5728ee7f8e7d19914c54fca05e76/appstream/utils.py#L44-L62
train
hughsie/python-appstream
appstream/utils.py
validate_description
def validate_description(xml_data): """ Validate the description for validity """ try: root = ET.fromstring('<document>' + xml_data + '</document>') except StdlibParseError as e: raise ParseError(str(e)) return _parse_desc(root)
python
def validate_description(xml_data): """ Validate the description for validity """ try: root = ET.fromstring('<document>' + xml_data + '</document>') except StdlibParseError as e: raise ParseError(str(e)) return _parse_desc(root)
[ "def", "validate_description", "(", "xml_data", ")", ":", "try", ":", "root", "=", "ET", ".", "fromstring", "(", "'<document>'", "+", "xml_data", "+", "'</document>'", ")", "except", "StdlibParseError", "as", "e", ":", "raise", "ParseError", "(", "str", "(", "e", ")", ")", "return", "_parse_desc", "(", "root", ")" ]
Validate the description for validity
[ "Validate", "the", "description", "for", "validity" ]
f2606380278c5728ee7f8e7d19914c54fca05e76
https://github.com/hughsie/python-appstream/blob/f2606380278c5728ee7f8e7d19914c54fca05e76/appstream/utils.py#L64-L70
train
hughsie/python-appstream
appstream/utils.py
import_description
def import_description(text): """ Convert ASCII text to AppStream markup format """ xml = '' is_in_ul = False for line in text.split('\n'): # don't include whitespace line = line.strip() if len(line) == 0: continue # detected as a list element? line_li = _import_description_to_list_element(line) if line_li: # first list element if not is_in_ul: xml += '<ul>\n' is_in_ul = True xml += '<li>' + _import_description_sentence_case(line_li) + '</li>\n' continue # done with the list if is_in_ul: xml += '</ul>\n' is_in_ul = False # regular paragraph xml += '<p>' + _import_description_sentence_case(line) + '</p>\n' # no trailing paragraph if is_in_ul: xml += '</ul>\n' return xml
python
def import_description(text): """ Convert ASCII text to AppStream markup format """ xml = '' is_in_ul = False for line in text.split('\n'): # don't include whitespace line = line.strip() if len(line) == 0: continue # detected as a list element? line_li = _import_description_to_list_element(line) if line_li: # first list element if not is_in_ul: xml += '<ul>\n' is_in_ul = True xml += '<li>' + _import_description_sentence_case(line_li) + '</li>\n' continue # done with the list if is_in_ul: xml += '</ul>\n' is_in_ul = False # regular paragraph xml += '<p>' + _import_description_sentence_case(line) + '</p>\n' # no trailing paragraph if is_in_ul: xml += '</ul>\n' return xml
[ "def", "import_description", "(", "text", ")", ":", "xml", "=", "''", "is_in_ul", "=", "False", "for", "line", "in", "text", ".", "split", "(", "'\\n'", ")", ":", "# don't include whitespace", "line", "=", "line", ".", "strip", "(", ")", "if", "len", "(", "line", ")", "==", "0", ":", "continue", "# detected as a list element?", "line_li", "=", "_import_description_to_list_element", "(", "line", ")", "if", "line_li", ":", "# first list element", "if", "not", "is_in_ul", ":", "xml", "+=", "'<ul>\\n'", "is_in_ul", "=", "True", "xml", "+=", "'<li>'", "+", "_import_description_sentence_case", "(", "line_li", ")", "+", "'</li>\\n'", "continue", "# done with the list", "if", "is_in_ul", ":", "xml", "+=", "'</ul>\\n'", "is_in_ul", "=", "False", "# regular paragraph", "xml", "+=", "'<p>'", "+", "_import_description_sentence_case", "(", "line", ")", "+", "'</p>\\n'", "# no trailing paragraph", "if", "is_in_ul", ":", "xml", "+=", "'</ul>\\n'", "return", "xml" ]
Convert ASCII text to AppStream markup format
[ "Convert", "ASCII", "text", "to", "AppStream", "markup", "format" ]
f2606380278c5728ee7f8e7d19914c54fca05e76
https://github.com/hughsie/python-appstream/blob/f2606380278c5728ee7f8e7d19914c54fca05e76/appstream/utils.py#L84-L117
train
sirfoga/pyhal
hal/internet/selenium/forms.py
SeleniumFormFiller.fill_form_field
def fill_form_field(self, field_name, field_value): """Fills given field with given value :param field_name: name of field to fill :param field_value: value with which to fill field """ self.browser.execute_script( "document.getElementsByName(\"" + str( field_name) + "\")[0].value = \"" + str(field_value) + "\"")
python
def fill_form_field(self, field_name, field_value): """Fills given field with given value :param field_name: name of field to fill :param field_value: value with which to fill field """ self.browser.execute_script( "document.getElementsByName(\"" + str( field_name) + "\")[0].value = \"" + str(field_value) + "\"")
[ "def", "fill_form_field", "(", "self", ",", "field_name", ",", "field_value", ")", ":", "self", ".", "browser", ".", "execute_script", "(", "\"document.getElementsByName(\\\"\"", "+", "str", "(", "field_name", ")", "+", "\"\\\")[0].value = \\\"\"", "+", "str", "(", "field_value", ")", "+", "\"\\\"\"", ")" ]
Fills given field with given value :param field_name: name of field to fill :param field_value: value with which to fill field
[ "Fills", "given", "field", "with", "given", "value" ]
4394d8a1f7e45bea28a255ec390f4962ee64d33a
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/internet/selenium/forms.py#L15-L23
train
sirfoga/pyhal
hal/internet/selenium/forms.py
SeleniumFormFiller.fill_login_form
def fill_login_form(self, username, username_field, user_password, user_password_field): """Fills form with login info :param username: user login :param username_field: name of field to fill with username :param user_password: login password :param user_password_field: name of field to fill with user password """ self.fill_form_field(username_field, username) # set username self.fill_form_field(user_password_field, user_password)
python
def fill_login_form(self, username, username_field, user_password, user_password_field): """Fills form with login info :param username: user login :param username_field: name of field to fill with username :param user_password: login password :param user_password_field: name of field to fill with user password """ self.fill_form_field(username_field, username) # set username self.fill_form_field(user_password_field, user_password)
[ "def", "fill_login_form", "(", "self", ",", "username", ",", "username_field", ",", "user_password", ",", "user_password_field", ")", ":", "self", ".", "fill_form_field", "(", "username_field", ",", "username", ")", "# set username", "self", ".", "fill_form_field", "(", "user_password_field", ",", "user_password", ")" ]
Fills form with login info :param username: user login :param username_field: name of field to fill with username :param user_password: login password :param user_password_field: name of field to fill with user password
[ "Fills", "form", "with", "login", "info" ]
4394d8a1f7e45bea28a255ec390f4962ee64d33a
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/internet/selenium/forms.py#L25-L35
train
JukeboxPipeline/jukeboxmaya
src/jukeboxmaya/commands.py
open_scene
def open_scene(f, kwargs=None): """Opens the given JB_File :param f: the file to open :type f: :class:`jukeboxcore.filesys.JB_File` :param kwargs: keyword arguments for the command maya.cmds file. defaultflags that are always used: :open: ``True`` e.g. to force the open command use ``{'force'=True}``. :type kwargs: dict|None :returns: An action status. The returnvalue of the actionstatus is the opened mayafile :rtype: :class:`ActionStatus` :raises: None """ defaultkwargs = {'open':True} if kwargs is None: kwargs = {} kwargs.update(defaultkwargs) fp = f.get_fullpath() mayafile = cmds.file(fp, **kwargs) msg = "Successfully opened file %s with arguments: %s" % (fp, kwargs) return ActionStatus(ActionStatus.SUCCESS, msg, returnvalue=mayafile)
python
def open_scene(f, kwargs=None): """Opens the given JB_File :param f: the file to open :type f: :class:`jukeboxcore.filesys.JB_File` :param kwargs: keyword arguments for the command maya.cmds file. defaultflags that are always used: :open: ``True`` e.g. to force the open command use ``{'force'=True}``. :type kwargs: dict|None :returns: An action status. The returnvalue of the actionstatus is the opened mayafile :rtype: :class:`ActionStatus` :raises: None """ defaultkwargs = {'open':True} if kwargs is None: kwargs = {} kwargs.update(defaultkwargs) fp = f.get_fullpath() mayafile = cmds.file(fp, **kwargs) msg = "Successfully opened file %s with arguments: %s" % (fp, kwargs) return ActionStatus(ActionStatus.SUCCESS, msg, returnvalue=mayafile)
[ "def", "open_scene", "(", "f", ",", "kwargs", "=", "None", ")", ":", "defaultkwargs", "=", "{", "'open'", ":", "True", "}", "if", "kwargs", "is", "None", ":", "kwargs", "=", "{", "}", "kwargs", ".", "update", "(", "defaultkwargs", ")", "fp", "=", "f", ".", "get_fullpath", "(", ")", "mayafile", "=", "cmds", ".", "file", "(", "fp", ",", "*", "*", "kwargs", ")", "msg", "=", "\"Successfully opened file %s with arguments: %s\"", "%", "(", "fp", ",", "kwargs", ")", "return", "ActionStatus", "(", "ActionStatus", ".", "SUCCESS", ",", "msg", ",", "returnvalue", "=", "mayafile", ")" ]
Opens the given JB_File :param f: the file to open :type f: :class:`jukeboxcore.filesys.JB_File` :param kwargs: keyword arguments for the command maya.cmds file. defaultflags that are always used: :open: ``True`` e.g. to force the open command use ``{'force'=True}``. :type kwargs: dict|None :returns: An action status. The returnvalue of the actionstatus is the opened mayafile :rtype: :class:`ActionStatus` :raises: None
[ "Opens", "the", "given", "JB_File" ]
c8d6318d53cdb5493453c4a6b65ef75bdb2d5f2c
https://github.com/JukeboxPipeline/jukeboxmaya/blob/c8d6318d53cdb5493453c4a6b65ef75bdb2d5f2c/src/jukeboxmaya/commands.py#L11-L34
train
JukeboxPipeline/jukeboxmaya
src/jukeboxmaya/commands.py
import_all_references
def import_all_references(arg, kwargs=None): """Import all references in the currently open scene :param arg: this argument is ignored. But thisway you can use this function in an ActionUnit more easily. :param kwargs: keyword arguments for the command maya.cmds file. defaultflags that are always used: :importReference: ``True`` :type kwargs: dict|None :returns: An action status. The returnvalue of the actionstatus are the imported references. :rtype: :class:`ActionStatus` :raises: None """ defaultkwargs = {'importReference':True} if kwargs is None: kwargs = {} kwargs.update(defaultkwargs) imported = [] # list all reference files refs = cmds.file(query=True, reference=True) while refs: for rfile in refs: cmds.file(rfile, **kwargs) imported.append(rfile) refs = cmds.file(query=True, reference=True) msg = "Successfully imported references %s with arguments: %s" % (imported, kwargs) return ActionStatus(ActionStatus.SUCCESS, msg, returnvalue=imported)
python
def import_all_references(arg, kwargs=None): """Import all references in the currently open scene :param arg: this argument is ignored. But thisway you can use this function in an ActionUnit more easily. :param kwargs: keyword arguments for the command maya.cmds file. defaultflags that are always used: :importReference: ``True`` :type kwargs: dict|None :returns: An action status. The returnvalue of the actionstatus are the imported references. :rtype: :class:`ActionStatus` :raises: None """ defaultkwargs = {'importReference':True} if kwargs is None: kwargs = {} kwargs.update(defaultkwargs) imported = [] # list all reference files refs = cmds.file(query=True, reference=True) while refs: for rfile in refs: cmds.file(rfile, **kwargs) imported.append(rfile) refs = cmds.file(query=True, reference=True) msg = "Successfully imported references %s with arguments: %s" % (imported, kwargs) return ActionStatus(ActionStatus.SUCCESS, msg, returnvalue=imported)
[ "def", "import_all_references", "(", "arg", ",", "kwargs", "=", "None", ")", ":", "defaultkwargs", "=", "{", "'importReference'", ":", "True", "}", "if", "kwargs", "is", "None", ":", "kwargs", "=", "{", "}", "kwargs", ".", "update", "(", "defaultkwargs", ")", "imported", "=", "[", "]", "# list all reference files", "refs", "=", "cmds", ".", "file", "(", "query", "=", "True", ",", "reference", "=", "True", ")", "while", "refs", ":", "for", "rfile", "in", "refs", ":", "cmds", ".", "file", "(", "rfile", ",", "*", "*", "kwargs", ")", "imported", ".", "append", "(", "rfile", ")", "refs", "=", "cmds", ".", "file", "(", "query", "=", "True", ",", "reference", "=", "True", ")", "msg", "=", "\"Successfully imported references %s with arguments: %s\"", "%", "(", "imported", ",", "kwargs", ")", "return", "ActionStatus", "(", "ActionStatus", ".", "SUCCESS", ",", "msg", ",", "returnvalue", "=", "imported", ")" ]
Import all references in the currently open scene :param arg: this argument is ignored. But thisway you can use this function in an ActionUnit more easily. :param kwargs: keyword arguments for the command maya.cmds file. defaultflags that are always used: :importReference: ``True`` :type kwargs: dict|None :returns: An action status. The returnvalue of the actionstatus are the imported references. :rtype: :class:`ActionStatus` :raises: None
[ "Import", "all", "references", "in", "the", "currently", "open", "scene" ]
c8d6318d53cdb5493453c4a6b65ef75bdb2d5f2c
https://github.com/JukeboxPipeline/jukeboxmaya/blob/c8d6318d53cdb5493453c4a6b65ef75bdb2d5f2c/src/jukeboxmaya/commands.py#L70-L98
train
JukeboxPipeline/jukeboxmaya
src/jukeboxmaya/commands.py
update_scenenode
def update_scenenode(f): """Set the id of the current scene node to the id for the given file :param f: the file to save the current scene to :type f: :class:`jukeboxcore.filesys.JB_File` :returns: None :rtype: None :raises: None """ n = get_current_scene_node() if not n: msg = "Could not find a scene node." return ActionStatus(ActionStatus.FAILURE, msg) # get dbentry for for the given jbfile tfi = f.get_obj() assert tfi tf = dj.taskfiles.get(task=tfi.task, releasetype=tfi.releasetype, version=tfi.version, descriptor=tfi.descriptor, typ=tfi.typ) cmds.setAttr('%s.taskfile_id' % n, lock=False) cmds.setAttr('%s.taskfile_id' % n, tf.pk) cmds.setAttr('%s.taskfile_id' % n, lock=True) msg = "Successfully updated scene node to %s" % tf.id return ActionStatus(ActionStatus.SUCCESS, msg)
python
def update_scenenode(f): """Set the id of the current scene node to the id for the given file :param f: the file to save the current scene to :type f: :class:`jukeboxcore.filesys.JB_File` :returns: None :rtype: None :raises: None """ n = get_current_scene_node() if not n: msg = "Could not find a scene node." return ActionStatus(ActionStatus.FAILURE, msg) # get dbentry for for the given jbfile tfi = f.get_obj() assert tfi tf = dj.taskfiles.get(task=tfi.task, releasetype=tfi.releasetype, version=tfi.version, descriptor=tfi.descriptor, typ=tfi.typ) cmds.setAttr('%s.taskfile_id' % n, lock=False) cmds.setAttr('%s.taskfile_id' % n, tf.pk) cmds.setAttr('%s.taskfile_id' % n, lock=True) msg = "Successfully updated scene node to %s" % tf.id return ActionStatus(ActionStatus.SUCCESS, msg)
[ "def", "update_scenenode", "(", "f", ")", ":", "n", "=", "get_current_scene_node", "(", ")", "if", "not", "n", ":", "msg", "=", "\"Could not find a scene node.\"", "return", "ActionStatus", "(", "ActionStatus", ".", "FAILURE", ",", "msg", ")", "# get dbentry for for the given jbfile", "tfi", "=", "f", ".", "get_obj", "(", ")", "assert", "tfi", "tf", "=", "dj", ".", "taskfiles", ".", "get", "(", "task", "=", "tfi", ".", "task", ",", "releasetype", "=", "tfi", ".", "releasetype", ",", "version", "=", "tfi", ".", "version", ",", "descriptor", "=", "tfi", ".", "descriptor", ",", "typ", "=", "tfi", ".", "typ", ")", "cmds", ".", "setAttr", "(", "'%s.taskfile_id'", "%", "n", ",", "lock", "=", "False", ")", "cmds", ".", "setAttr", "(", "'%s.taskfile_id'", "%", "n", ",", "tf", ".", "pk", ")", "cmds", ".", "setAttr", "(", "'%s.taskfile_id'", "%", "n", ",", "lock", "=", "True", ")", "msg", "=", "\"Successfully updated scene node to %s\"", "%", "tf", ".", "id", "return", "ActionStatus", "(", "ActionStatus", ".", "SUCCESS", ",", "msg", ")" ]
Set the id of the current scene node to the id for the given file :param f: the file to save the current scene to :type f: :class:`jukeboxcore.filesys.JB_File` :returns: None :rtype: None :raises: None
[ "Set", "the", "id", "of", "the", "current", "scene", "node", "to", "the", "id", "for", "the", "given", "file" ]
c8d6318d53cdb5493453c4a6b65ef75bdb2d5f2c
https://github.com/JukeboxPipeline/jukeboxmaya/blob/c8d6318d53cdb5493453c4a6b65ef75bdb2d5f2c/src/jukeboxmaya/commands.py#L101-L127
train
TorkamaniLab/metapipe
metapipe/models/job.py
call
def call(args, stdout=PIPE, stderr=PIPE): """ Calls the given arguments in a seperate process and returns the contents of standard out. """ p = Popen(args, stdout=stdout, stderr=stderr) out, err = p.communicate() try: return out.decode(sys.stdout.encoding), err.decode(sys.stdout.encoding) except Exception: return out, err
python
def call(args, stdout=PIPE, stderr=PIPE): """ Calls the given arguments in a seperate process and returns the contents of standard out. """ p = Popen(args, stdout=stdout, stderr=stderr) out, err = p.communicate() try: return out.decode(sys.stdout.encoding), err.decode(sys.stdout.encoding) except Exception: return out, err
[ "def", "call", "(", "args", ",", "stdout", "=", "PIPE", ",", "stderr", "=", "PIPE", ")", ":", "p", "=", "Popen", "(", "args", ",", "stdout", "=", "stdout", ",", "stderr", "=", "stderr", ")", "out", ",", "err", "=", "p", ".", "communicate", "(", ")", "try", ":", "return", "out", ".", "decode", "(", "sys", ".", "stdout", ".", "encoding", ")", ",", "err", ".", "decode", "(", "sys", ".", "stdout", ".", "encoding", ")", "except", "Exception", ":", "return", "out", ",", "err" ]
Calls the given arguments in a seperate process and returns the contents of standard out.
[ "Calls", "the", "given", "arguments", "in", "a", "seperate", "process", "and", "returns", "the", "contents", "of", "standard", "out", "." ]
15592e5b0c217afb00ac03503f8d0d7453d4baf4
https://github.com/TorkamaniLab/metapipe/blob/15592e5b0c217afb00ac03503f8d0d7453d4baf4/metapipe/models/job.py#L11-L21
train
TorkamaniLab/metapipe
metapipe/models/job.py
Job.make
def make(self): """ Evaluate the command, and write it to a file. """ eval = self.command.eval() with open(self.filename, 'w') as f: f.write(eval)
python
def make(self): """ Evaluate the command, and write it to a file. """ eval = self.command.eval() with open(self.filename, 'w') as f: f.write(eval)
[ "def", "make", "(", "self", ")", ":", "eval", "=", "self", ".", "command", ".", "eval", "(", ")", "with", "open", "(", "self", ".", "filename", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "eval", ")" ]
Evaluate the command, and write it to a file.
[ "Evaluate", "the", "command", "and", "write", "it", "to", "a", "file", "." ]
15592e5b0c217afb00ac03503f8d0d7453d4baf4
https://github.com/TorkamaniLab/metapipe/blob/15592e5b0c217afb00ac03503f8d0d7453d4baf4/metapipe/models/job.py#L49-L53
train
wylee/runcommands
runcommands/collection.py
Collection.set_default_args
def set_default_args(self, default_args): """Set default args for commands in collection. Default args are used when the corresponding args aren't passed on the command line or in a direct call. """ for name, args in default_args.items(): command = self[name] command.default_args = default_args.get(command.name) or {}
python
def set_default_args(self, default_args): """Set default args for commands in collection. Default args are used when the corresponding args aren't passed on the command line or in a direct call. """ for name, args in default_args.items(): command = self[name] command.default_args = default_args.get(command.name) or {}
[ "def", "set_default_args", "(", "self", ",", "default_args", ")", ":", "for", "name", ",", "args", "in", "default_args", ".", "items", "(", ")", ":", "command", "=", "self", "[", "name", "]", "command", ".", "default_args", "=", "default_args", ".", "get", "(", "command", ".", "name", ")", "or", "{", "}" ]
Set default args for commands in collection. Default args are used when the corresponding args aren't passed on the command line or in a direct call.
[ "Set", "default", "args", "for", "commands", "in", "collection", "." ]
b1d7c262885b9ced7ab89b63562f5464ca9970fe
https://github.com/wylee/runcommands/blob/b1d7c262885b9ced7ab89b63562f5464ca9970fe/runcommands/collection.py#L60-L69
train
ClearcodeHQ/matchbox
src/matchbox/box.py
MatchBox.extract_traits
def extract_traits(self, entity): """ Extract data required to classify entity. :param object entity: :return: namedtuple consisting of characteristic traits and match flag :rtype: matchbox.box.Trait """ traits = getattr(entity, self._characteristic) if traits is not None and isinstance(traits, Hashable): traits = [traits] return Trait( traits, getattr(entity, self._characteristic + '_match', True) )
python
def extract_traits(self, entity): """ Extract data required to classify entity. :param object entity: :return: namedtuple consisting of characteristic traits and match flag :rtype: matchbox.box.Trait """ traits = getattr(entity, self._characteristic) if traits is not None and isinstance(traits, Hashable): traits = [traits] return Trait( traits, getattr(entity, self._characteristic + '_match', True) )
[ "def", "extract_traits", "(", "self", ",", "entity", ")", ":", "traits", "=", "getattr", "(", "entity", ",", "self", ".", "_characteristic", ")", "if", "traits", "is", "not", "None", "and", "isinstance", "(", "traits", ",", "Hashable", ")", ":", "traits", "=", "[", "traits", "]", "return", "Trait", "(", "traits", ",", "getattr", "(", "entity", ",", "self", ".", "_characteristic", "+", "'_match'", ",", "True", ")", ")" ]
Extract data required to classify entity. :param object entity: :return: namedtuple consisting of characteristic traits and match flag :rtype: matchbox.box.Trait
[ "Extract", "data", "required", "to", "classify", "entity", "." ]
22f5bd163ad22ceacb0fcd5d4ddae9069d1a94f4
https://github.com/ClearcodeHQ/matchbox/blob/22f5bd163ad22ceacb0fcd5d4ddae9069d1a94f4/src/matchbox/box.py#L62-L76
train
ClearcodeHQ/matchbox
src/matchbox/box.py
MatchBox.add
def add(self, entity): """ Add entity to index. :param object entity: single object to add to box's index """ characteristic = self.extract_traits(entity) if not characteristic.traits: return if characteristic.is_matching: self.add_match(entity, *characteristic.traits) else: self.add_mismatch(entity, *characteristic.traits)
python
def add(self, entity): """ Add entity to index. :param object entity: single object to add to box's index """ characteristic = self.extract_traits(entity) if not characteristic.traits: return if characteristic.is_matching: self.add_match(entity, *characteristic.traits) else: self.add_mismatch(entity, *characteristic.traits)
[ "def", "add", "(", "self", ",", "entity", ")", ":", "characteristic", "=", "self", ".", "extract_traits", "(", "entity", ")", "if", "not", "characteristic", ".", "traits", ":", "return", "if", "characteristic", ".", "is_matching", ":", "self", ".", "add_match", "(", "entity", ",", "*", "characteristic", ".", "traits", ")", "else", ":", "self", ".", "add_mismatch", "(", "entity", ",", "*", "characteristic", ".", "traits", ")" ]
Add entity to index. :param object entity: single object to add to box's index
[ "Add", "entity", "to", "index", "." ]
22f5bd163ad22ceacb0fcd5d4ddae9069d1a94f4
https://github.com/ClearcodeHQ/matchbox/blob/22f5bd163ad22ceacb0fcd5d4ddae9069d1a94f4/src/matchbox/box.py#L78-L91
train
ClearcodeHQ/matchbox
src/matchbox/box.py
MatchBox.remove
def remove(self, entity): """ Remove entity from the MatchBox. :param object entity: """ empty_traits = set() self.mismatch_unknown.discard(entity) for trait, entities in self.index.items(): entities.discard(entity) if not entities: empty_traits.add(trait) for empty_trait in empty_traits: del self.index[empty_trait]
python
def remove(self, entity): """ Remove entity from the MatchBox. :param object entity: """ empty_traits = set() self.mismatch_unknown.discard(entity) for trait, entities in self.index.items(): entities.discard(entity) if not entities: empty_traits.add(trait) for empty_trait in empty_traits: del self.index[empty_trait]
[ "def", "remove", "(", "self", ",", "entity", ")", ":", "empty_traits", "=", "set", "(", ")", "self", ".", "mismatch_unknown", ".", "discard", "(", "entity", ")", "for", "trait", ",", "entities", "in", "self", ".", "index", ".", "items", "(", ")", ":", "entities", ".", "discard", "(", "entity", ")", "if", "not", "entities", ":", "empty_traits", ".", "add", "(", "trait", ")", "for", "empty_trait", "in", "empty_traits", ":", "del", "self", ".", "index", "[", "empty_trait", "]" ]
Remove entity from the MatchBox. :param object entity:
[ "Remove", "entity", "from", "the", "MatchBox", "." ]
22f5bd163ad22ceacb0fcd5d4ddae9069d1a94f4
https://github.com/ClearcodeHQ/matchbox/blob/22f5bd163ad22ceacb0fcd5d4ddae9069d1a94f4/src/matchbox/box.py#L93-L107
train
sholsapp/py509
py509/client.py
get_host_certificate
def get_host_certificate(host, port=443): """Get a host's certificate. :param str host: The hostname from which to fetch the certificate. :param int port: The port from which to fetch the certificate, if different than ``443``. :return: The host's X.509 certificate. :rtype: :class:`OpenSSL.crypto.X509` """ ip_addr = socket.gethostbyname(host) sock = socket.socket() context = SSL.Context(SSL.TLSv1_METHOD) context.set_options(SSL.OP_NO_SSLv2) context.load_verify_locations(certifi.where(), None) ssl_sock = SSL.Connection(context, sock) ssl_sock.connect((ip_addr, port)) ssl_sock.do_handshake() return ssl_sock.get_peer_certificate()
python
def get_host_certificate(host, port=443): """Get a host's certificate. :param str host: The hostname from which to fetch the certificate. :param int port: The port from which to fetch the certificate, if different than ``443``. :return: The host's X.509 certificate. :rtype: :class:`OpenSSL.crypto.X509` """ ip_addr = socket.gethostbyname(host) sock = socket.socket() context = SSL.Context(SSL.TLSv1_METHOD) context.set_options(SSL.OP_NO_SSLv2) context.load_verify_locations(certifi.where(), None) ssl_sock = SSL.Connection(context, sock) ssl_sock.connect((ip_addr, port)) ssl_sock.do_handshake() return ssl_sock.get_peer_certificate()
[ "def", "get_host_certificate", "(", "host", ",", "port", "=", "443", ")", ":", "ip_addr", "=", "socket", ".", "gethostbyname", "(", "host", ")", "sock", "=", "socket", ".", "socket", "(", ")", "context", "=", "SSL", ".", "Context", "(", "SSL", ".", "TLSv1_METHOD", ")", "context", ".", "set_options", "(", "SSL", ".", "OP_NO_SSLv2", ")", "context", ".", "load_verify_locations", "(", "certifi", ".", "where", "(", ")", ",", "None", ")", "ssl_sock", "=", "SSL", ".", "Connection", "(", "context", ",", "sock", ")", "ssl_sock", ".", "connect", "(", "(", "ip_addr", ",", "port", ")", ")", "ssl_sock", ".", "do_handshake", "(", ")", "return", "ssl_sock", ".", "get_peer_certificate", "(", ")" ]
Get a host's certificate. :param str host: The hostname from which to fetch the certificate. :param int port: The port from which to fetch the certificate, if different than ``443``. :return: The host's X.509 certificate. :rtype: :class:`OpenSSL.crypto.X509`
[ "Get", "a", "host", "s", "certificate", "." ]
83bd6786a8ec1543b66c42ea5523e611c3e8dc5a
https://github.com/sholsapp/py509/blob/83bd6786a8ec1543b66c42ea5523e611c3e8dc5a/py509/client.py#L11-L29
train
sirfoga/pyhal
hal/data/dicts.py
get_inner_keys
def get_inner_keys(dictionary): """Gets 2nd-level dictionary keys :param dictionary: dict :return: inner keys """ keys = [] for key in dictionary.keys(): inner_keys = dictionary[key].keys() keys += [ key + " " + inner_key # concatenate for inner_key in inner_keys ] return keys
python
def get_inner_keys(dictionary): """Gets 2nd-level dictionary keys :param dictionary: dict :return: inner keys """ keys = [] for key in dictionary.keys(): inner_keys = dictionary[key].keys() keys += [ key + " " + inner_key # concatenate for inner_key in inner_keys ] return keys
[ "def", "get_inner_keys", "(", "dictionary", ")", ":", "keys", "=", "[", "]", "for", "key", "in", "dictionary", ".", "keys", "(", ")", ":", "inner_keys", "=", "dictionary", "[", "key", "]", ".", "keys", "(", ")", "keys", "+=", "[", "key", "+", "\" \"", "+", "inner_key", "# concatenate", "for", "inner_key", "in", "inner_keys", "]", "return", "keys" ]
Gets 2nd-level dictionary keys :param dictionary: dict :return: inner keys
[ "Gets", "2nd", "-", "level", "dictionary", "keys" ]
4394d8a1f7e45bea28a255ec390f4962ee64d33a
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/data/dicts.py#L26-L42
train
sirfoga/pyhal
hal/data/dicts.py
get_inner_data
def get_inner_data(dictionary): """Gets 2nd-level data into 1st-level dictionary :param dictionary: dict :return: with 2nd-level data """ out = {} for key in dictionary.keys(): inner_keys = dictionary[key].keys() for inner_key in inner_keys: new_key = key + " " + inner_key # concatenate out[new_key] = dictionary[key][inner_key] return out
python
def get_inner_data(dictionary): """Gets 2nd-level data into 1st-level dictionary :param dictionary: dict :return: with 2nd-level data """ out = {} for key in dictionary.keys(): inner_keys = dictionary[key].keys() for inner_key in inner_keys: new_key = key + " " + inner_key # concatenate out[new_key] = dictionary[key][inner_key] return out
[ "def", "get_inner_data", "(", "dictionary", ")", ":", "out", "=", "{", "}", "for", "key", "in", "dictionary", ".", "keys", "(", ")", ":", "inner_keys", "=", "dictionary", "[", "key", "]", ".", "keys", "(", ")", "for", "inner_key", "in", "inner_keys", ":", "new_key", "=", "key", "+", "\" \"", "+", "inner_key", "# concatenate", "out", "[", "new_key", "]", "=", "dictionary", "[", "key", "]", "[", "inner_key", "]", "return", "out" ]
Gets 2nd-level data into 1st-level dictionary :param dictionary: dict :return: with 2nd-level data
[ "Gets", "2nd", "-", "level", "data", "into", "1st", "-", "level", "dictionary" ]
4394d8a1f7e45bea28a255ec390f4962ee64d33a
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/data/dicts.py#L45-L60
train
yamcs/yamcs-python
yamcs-cli/yamcs/cli/dbshell.py
DbShell.do_use
def do_use(self, args): """Use another instance, provided as argument.""" self.instance = args self.prompt = self.instance + '> ' archive = self._client.get_archive(self.instance) self.streams = [s.name for s in archive.list_streams()] self.tables = [t.name for t in archive.list_tables()]
python
def do_use(self, args): """Use another instance, provided as argument.""" self.instance = args self.prompt = self.instance + '> ' archive = self._client.get_archive(self.instance) self.streams = [s.name for s in archive.list_streams()] self.tables = [t.name for t in archive.list_tables()]
[ "def", "do_use", "(", "self", ",", "args", ")", ":", "self", ".", "instance", "=", "args", "self", ".", "prompt", "=", "self", ".", "instance", "+", "'> '", "archive", "=", "self", ".", "_client", ".", "get_archive", "(", "self", ".", "instance", ")", "self", ".", "streams", "=", "[", "s", ".", "name", "for", "s", "in", "archive", ".", "list_streams", "(", ")", "]", "self", ".", "tables", "=", "[", "t", ".", "name", "for", "t", "in", "archive", ".", "list_tables", "(", ")", "]" ]
Use another instance, provided as argument.
[ "Use", "another", "instance", "provided", "as", "argument", "." ]
1082fee8a299010cc44416bbb7518fac0ef08b48
https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-cli/yamcs/cli/dbshell.py#L72-L79
train
portfors-lab/sparkle
sparkle/gui/dialogs/saving_dlg.py
SavingDialog.update_label
def update_label(self): """Updates the text on the accept button, to reflect if the name of the data file will result in opening an existing file, or creating a new one""" current_file = str(self.selectedFiles()[0]) if not '.' in current_file.split(os.path.sep)[-1]: # add hdf5 extention if none given current_file += '.hdf5' if os.path.isfile(current_file): self.setLabelText(QtGui.QFileDialog.Accept, 'Reload') elif os.path.isdir(current_file): self.setLabelText(QtGui.QFileDialog.Accept, 'Open') else: self.setLabelText(QtGui.QFileDialog.Accept, 'Create')
python
def update_label(self): """Updates the text on the accept button, to reflect if the name of the data file will result in opening an existing file, or creating a new one""" current_file = str(self.selectedFiles()[0]) if not '.' in current_file.split(os.path.sep)[-1]: # add hdf5 extention if none given current_file += '.hdf5' if os.path.isfile(current_file): self.setLabelText(QtGui.QFileDialog.Accept, 'Reload') elif os.path.isdir(current_file): self.setLabelText(QtGui.QFileDialog.Accept, 'Open') else: self.setLabelText(QtGui.QFileDialog.Accept, 'Create')
[ "def", "update_label", "(", "self", ")", ":", "current_file", "=", "str", "(", "self", ".", "selectedFiles", "(", ")", "[", "0", "]", ")", "if", "not", "'.'", "in", "current_file", ".", "split", "(", "os", ".", "path", ".", "sep", ")", "[", "-", "1", "]", ":", "# add hdf5 extention if none given", "current_file", "+=", "'.hdf5'", "if", "os", ".", "path", ".", "isfile", "(", "current_file", ")", ":", "self", ".", "setLabelText", "(", "QtGui", ".", "QFileDialog", ".", "Accept", ",", "'Reload'", ")", "elif", "os", ".", "path", ".", "isdir", "(", "current_file", ")", ":", "self", ".", "setLabelText", "(", "QtGui", ".", "QFileDialog", ".", "Accept", ",", "'Open'", ")", "else", ":", "self", ".", "setLabelText", "(", "QtGui", ".", "QFileDialog", ".", "Accept", ",", "'Create'", ")" ]
Updates the text on the accept button, to reflect if the name of the data file will result in opening an existing file, or creating a new one
[ "Updates", "the", "text", "on", "the", "accept", "button", "to", "reflect", "if", "the", "name", "of", "the", "data", "file", "will", "result", "in", "opening", "an", "existing", "file", "or", "creating", "a", "new", "one" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/dialogs/saving_dlg.py#L28-L41
train
wylee/runcommands
runcommands/util/path.py
abs_path
def abs_path(path, format_kwargs={}, relative_to=None, keep_slash=False): """Get abs. path for ``path``. ``path`` may be a relative or absolute file system path or an asset path. If ``path`` is already an abs. path, it will be returned as is. Otherwise, it will be converted into a normalized abs. path. If ``relative_to`` is passed *and* ``path`` is not absolute, the path will be joined to the specified prefix before it's made absolute. If ``path`` ends with a slash, it will be stripped unless ``keep_slash`` is set (for use with ``rsync``, for example). >>> file_path = os.path.normpath(__file__) >>> dir_name = os.path.dirname(file_path) >>> file_name = os.path.basename(file_path) >>> os.chdir(dir_name) >>> >>> abs_path(file_name) == file_path True >>> abs_path('runcommands.util:') == dir_name True >>> abs_path('runcommands.util:path.py') == file_path True >>> abs_path('/{xyz}', format_kwargs={'xyz': 'abc'}) '/abc' >>> abs_path('banana', relative_to='/usr') '/usr/banana' >>> abs_path('/usr/banana/') '/usr/banana' >>> abs_path('banana/', relative_to='/usr', keep_slash=True) '/usr/banana/' >>> abs_path('runcommands.util:banana/', keep_slash=True) == (dir_name + '/banana/') True """ if format_kwargs: path = path.format_map(format_kwargs) has_slash = path.endswith(os.sep) if os.path.isabs(path): path = os.path.normpath(path) elif ':' in path: path = asset_path(path, keep_slash=False) else: path = os.path.expanduser(path) if relative_to: path = os.path.join(relative_to, path) path = os.path.abspath(path) path = os.path.normpath(path) if has_slash and keep_slash: path = '{path}{slash}'.format(path=path, slash=os.sep) return path
python
def abs_path(path, format_kwargs={}, relative_to=None, keep_slash=False): """Get abs. path for ``path``. ``path`` may be a relative or absolute file system path or an asset path. If ``path`` is already an abs. path, it will be returned as is. Otherwise, it will be converted into a normalized abs. path. If ``relative_to`` is passed *and* ``path`` is not absolute, the path will be joined to the specified prefix before it's made absolute. If ``path`` ends with a slash, it will be stripped unless ``keep_slash`` is set (for use with ``rsync``, for example). >>> file_path = os.path.normpath(__file__) >>> dir_name = os.path.dirname(file_path) >>> file_name = os.path.basename(file_path) >>> os.chdir(dir_name) >>> >>> abs_path(file_name) == file_path True >>> abs_path('runcommands.util:') == dir_name True >>> abs_path('runcommands.util:path.py') == file_path True >>> abs_path('/{xyz}', format_kwargs={'xyz': 'abc'}) '/abc' >>> abs_path('banana', relative_to='/usr') '/usr/banana' >>> abs_path('/usr/banana/') '/usr/banana' >>> abs_path('banana/', relative_to='/usr', keep_slash=True) '/usr/banana/' >>> abs_path('runcommands.util:banana/', keep_slash=True) == (dir_name + '/banana/') True """ if format_kwargs: path = path.format_map(format_kwargs) has_slash = path.endswith(os.sep) if os.path.isabs(path): path = os.path.normpath(path) elif ':' in path: path = asset_path(path, keep_slash=False) else: path = os.path.expanduser(path) if relative_to: path = os.path.join(relative_to, path) path = os.path.abspath(path) path = os.path.normpath(path) if has_slash and keep_slash: path = '{path}{slash}'.format(path=path, slash=os.sep) return path
[ "def", "abs_path", "(", "path", ",", "format_kwargs", "=", "{", "}", ",", "relative_to", "=", "None", ",", "keep_slash", "=", "False", ")", ":", "if", "format_kwargs", ":", "path", "=", "path", ".", "format_map", "(", "format_kwargs", ")", "has_slash", "=", "path", ".", "endswith", "(", "os", ".", "sep", ")", "if", "os", ".", "path", ".", "isabs", "(", "path", ")", ":", "path", "=", "os", ".", "path", ".", "normpath", "(", "path", ")", "elif", "':'", "in", "path", ":", "path", "=", "asset_path", "(", "path", ",", "keep_slash", "=", "False", ")", "else", ":", "path", "=", "os", ".", "path", ".", "expanduser", "(", "path", ")", "if", "relative_to", ":", "path", "=", "os", ".", "path", ".", "join", "(", "relative_to", ",", "path", ")", "path", "=", "os", ".", "path", ".", "abspath", "(", "path", ")", "path", "=", "os", ".", "path", ".", "normpath", "(", "path", ")", "if", "has_slash", "and", "keep_slash", ":", "path", "=", "'{path}{slash}'", ".", "format", "(", "path", "=", "path", ",", "slash", "=", "os", ".", "sep", ")", "return", "path" ]
Get abs. path for ``path``. ``path`` may be a relative or absolute file system path or an asset path. If ``path`` is already an abs. path, it will be returned as is. Otherwise, it will be converted into a normalized abs. path. If ``relative_to`` is passed *and* ``path`` is not absolute, the path will be joined to the specified prefix before it's made absolute. If ``path`` ends with a slash, it will be stripped unless ``keep_slash`` is set (for use with ``rsync``, for example). >>> file_path = os.path.normpath(__file__) >>> dir_name = os.path.dirname(file_path) >>> file_name = os.path.basename(file_path) >>> os.chdir(dir_name) >>> >>> abs_path(file_name) == file_path True >>> abs_path('runcommands.util:') == dir_name True >>> abs_path('runcommands.util:path.py') == file_path True >>> abs_path('/{xyz}', format_kwargs={'xyz': 'abc'}) '/abc' >>> abs_path('banana', relative_to='/usr') '/usr/banana' >>> abs_path('/usr/banana/') '/usr/banana' >>> abs_path('banana/', relative_to='/usr', keep_slash=True) '/usr/banana/' >>> abs_path('runcommands.util:banana/', keep_slash=True) == (dir_name + '/banana/') True
[ "Get", "abs", ".", "path", "for", "path", "." ]
b1d7c262885b9ced7ab89b63562f5464ca9970fe
https://github.com/wylee/runcommands/blob/b1d7c262885b9ced7ab89b63562f5464ca9970fe/runcommands/util/path.py#L7-L63
train
wylee/runcommands
runcommands/util/path.py
paths_to_str
def paths_to_str(paths, format_kwargs={}, delimiter=os.pathsep, asset_paths=False, check_paths=False): """Convert ``paths`` to a single string. Args: paths (str|list): A string like "/a/path:/another/path" or a list of paths; may include absolute paths and/or asset paths; paths that are relative will be left relative format_kwargs (dict): Will be injected into each path delimiter (str): The string used to separate paths asset_paths (bool): Whether paths that look like asset paths will be converted to absolute paths check_paths (bool): Whether paths should be checked to ensure they exist """ if not paths: return '' if isinstance(paths, str): paths = paths.split(delimiter) processed_paths = [] for path in paths: original = path path = path.format_map(format_kwargs) if not os.path.isabs(path): if asset_paths and ':' in path: try: path = asset_path(path) except ValueError: path = None if path is not None and os.path.isdir(path): processed_paths.append(path) elif check_paths: f = locals() printer.warning('Path does not exist: {path} (from {original})'.format_map(f)) return delimiter.join(processed_paths)
python
def paths_to_str(paths, format_kwargs={}, delimiter=os.pathsep, asset_paths=False, check_paths=False): """Convert ``paths`` to a single string. Args: paths (str|list): A string like "/a/path:/another/path" or a list of paths; may include absolute paths and/or asset paths; paths that are relative will be left relative format_kwargs (dict): Will be injected into each path delimiter (str): The string used to separate paths asset_paths (bool): Whether paths that look like asset paths will be converted to absolute paths check_paths (bool): Whether paths should be checked to ensure they exist """ if not paths: return '' if isinstance(paths, str): paths = paths.split(delimiter) processed_paths = [] for path in paths: original = path path = path.format_map(format_kwargs) if not os.path.isabs(path): if asset_paths and ':' in path: try: path = asset_path(path) except ValueError: path = None if path is not None and os.path.isdir(path): processed_paths.append(path) elif check_paths: f = locals() printer.warning('Path does not exist: {path} (from {original})'.format_map(f)) return delimiter.join(processed_paths)
[ "def", "paths_to_str", "(", "paths", ",", "format_kwargs", "=", "{", "}", ",", "delimiter", "=", "os", ".", "pathsep", ",", "asset_paths", "=", "False", ",", "check_paths", "=", "False", ")", ":", "if", "not", "paths", ":", "return", "''", "if", "isinstance", "(", "paths", ",", "str", ")", ":", "paths", "=", "paths", ".", "split", "(", "delimiter", ")", "processed_paths", "=", "[", "]", "for", "path", "in", "paths", ":", "original", "=", "path", "path", "=", "path", ".", "format_map", "(", "format_kwargs", ")", "if", "not", "os", ".", "path", ".", "isabs", "(", "path", ")", ":", "if", "asset_paths", "and", "':'", "in", "path", ":", "try", ":", "path", "=", "asset_path", "(", "path", ")", "except", "ValueError", ":", "path", "=", "None", "if", "path", "is", "not", "None", "and", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "processed_paths", ".", "append", "(", "path", ")", "elif", "check_paths", ":", "f", "=", "locals", "(", ")", "printer", ".", "warning", "(", "'Path does not exist: {path} (from {original})'", ".", "format_map", "(", "f", ")", ")", "return", "delimiter", ".", "join", "(", "processed_paths", ")" ]
Convert ``paths`` to a single string. Args: paths (str|list): A string like "/a/path:/another/path" or a list of paths; may include absolute paths and/or asset paths; paths that are relative will be left relative format_kwargs (dict): Will be injected into each path delimiter (str): The string used to separate paths asset_paths (bool): Whether paths that look like asset paths will be converted to absolute paths check_paths (bool): Whether paths should be checked to ensure they exist
[ "Convert", "paths", "to", "a", "single", "string", "." ]
b1d7c262885b9ced7ab89b63562f5464ca9970fe
https://github.com/wylee/runcommands/blob/b1d7c262885b9ced7ab89b63562f5464ca9970fe/runcommands/util/path.py#L122-L157
train
Naresh1318/crystal
crystal/app.py
index
def index(): """ Renders the dashboard when the server is initially run. Usage description: The rendered HTML allows the user to select a project and the desired run. :return: Template to render, Object that is taken care by flask. """ # Reset current index values when the page is refreshed for k, v in current_index.items(): current_index[k] = 0 logging.info("Dashboard refreshed") # render the template (below) that will use JavaScript to read the stream return render_template("crystal_dashboard.html")
python
def index(): """ Renders the dashboard when the server is initially run. Usage description: The rendered HTML allows the user to select a project and the desired run. :return: Template to render, Object that is taken care by flask. """ # Reset current index values when the page is refreshed for k, v in current_index.items(): current_index[k] = 0 logging.info("Dashboard refreshed") # render the template (below) that will use JavaScript to read the stream return render_template("crystal_dashboard.html")
[ "def", "index", "(", ")", ":", "# Reset current index values when the page is refreshed", "for", "k", ",", "v", "in", "current_index", ".", "items", "(", ")", ":", "current_index", "[", "k", "]", "=", "0", "logging", ".", "info", "(", "\"Dashboard refreshed\"", ")", "# render the template (below) that will use JavaScript to read the stream", "return", "render_template", "(", "\"crystal_dashboard.html\"", ")" ]
Renders the dashboard when the server is initially run. Usage description: The rendered HTML allows the user to select a project and the desired run. :return: Template to render, Object that is taken care by flask.
[ "Renders", "the", "dashboard", "when", "the", "server", "is", "initially", "run", "." ]
6bb43fd1128296cc59b8ed3bc03064cc61c6bd88
https://github.com/Naresh1318/crystal/blob/6bb43fd1128296cc59b8ed3bc03064cc61c6bd88/crystal/app.py#L58-L74
train
Naresh1318/crystal
crystal/app.py
update
def update(): """ Called by XMLHTTPrequest function periodically to get new graph data. Usage description: This function queries the database and returns all the newly added values. :return: JSON Object, passed on to the JS script. """ assert request.method == "POST", "POST request expected received {}".format(request.method) if request.method == 'POST': # Get figure stats selected_run = request.form['selected_run'] variable_names = utils.get_variables(selected_run).items() if len(current_index) < 1: for _, v_n in variable_names: current_index[v_n] = 0 logging.info("Current index: {}".format(current_index)) data = utils.get_variable_update_dicts(current_index, variable_names, selected_run) return jsonify(data)
python
def update(): """ Called by XMLHTTPrequest function periodically to get new graph data. Usage description: This function queries the database and returns all the newly added values. :return: JSON Object, passed on to the JS script. """ assert request.method == "POST", "POST request expected received {}".format(request.method) if request.method == 'POST': # Get figure stats selected_run = request.form['selected_run'] variable_names = utils.get_variables(selected_run).items() if len(current_index) < 1: for _, v_n in variable_names: current_index[v_n] = 0 logging.info("Current index: {}".format(current_index)) data = utils.get_variable_update_dicts(current_index, variable_names, selected_run) return jsonify(data)
[ "def", "update", "(", ")", ":", "assert", "request", ".", "method", "==", "\"POST\"", ",", "\"POST request expected received {}\"", ".", "format", "(", "request", ".", "method", ")", "if", "request", ".", "method", "==", "'POST'", ":", "# Get figure stats", "selected_run", "=", "request", ".", "form", "[", "'selected_run'", "]", "variable_names", "=", "utils", ".", "get_variables", "(", "selected_run", ")", ".", "items", "(", ")", "if", "len", "(", "current_index", ")", "<", "1", ":", "for", "_", ",", "v_n", "in", "variable_names", ":", "current_index", "[", "v_n", "]", "=", "0", "logging", ".", "info", "(", "\"Current index: {}\"", ".", "format", "(", "current_index", ")", ")", "data", "=", "utils", ".", "get_variable_update_dicts", "(", "current_index", ",", "variable_names", ",", "selected_run", ")", "return", "jsonify", "(", "data", ")" ]
Called by XMLHTTPrequest function periodically to get new graph data. Usage description: This function queries the database and returns all the newly added values. :return: JSON Object, passed on to the JS script.
[ "Called", "by", "XMLHTTPrequest", "function", "periodically", "to", "get", "new", "graph", "data", "." ]
6bb43fd1128296cc59b8ed3bc03064cc61c6bd88
https://github.com/Naresh1318/crystal/blob/6bb43fd1128296cc59b8ed3bc03064cc61c6bd88/crystal/app.py#L78-L100
train
Naresh1318/crystal
crystal/app.py
get_projects
def get_projects(): """ Send a dictionary of projects that are available on the database. Usage description: This function is usually called to get and display the list of projects available in the database. :return: JSON, {<int_keys>: <project_name>} """ assert request.method == "GET", "GET request expected received {}".format(request.method) try: if request.method == 'GET': projects = utils.get_projects() return jsonify(projects) except Exception as e: logging.error(e) return jsonify({"0": "__EMPTY"})
python
def get_projects(): """ Send a dictionary of projects that are available on the database. Usage description: This function is usually called to get and display the list of projects available in the database. :return: JSON, {<int_keys>: <project_name>} """ assert request.method == "GET", "GET request expected received {}".format(request.method) try: if request.method == 'GET': projects = utils.get_projects() return jsonify(projects) except Exception as e: logging.error(e) return jsonify({"0": "__EMPTY"})
[ "def", "get_projects", "(", ")", ":", "assert", "request", ".", "method", "==", "\"GET\"", ",", "\"GET request expected received {}\"", ".", "format", "(", "request", ".", "method", ")", "try", ":", "if", "request", ".", "method", "==", "'GET'", ":", "projects", "=", "utils", ".", "get_projects", "(", ")", "return", "jsonify", "(", "projects", ")", "except", "Exception", "as", "e", ":", "logging", ".", "error", "(", "e", ")", "return", "jsonify", "(", "{", "\"0\"", ":", "\"__EMPTY\"", "}", ")" ]
Send a dictionary of projects that are available on the database. Usage description: This function is usually called to get and display the list of projects available in the database. :return: JSON, {<int_keys>: <project_name>}
[ "Send", "a", "dictionary", "of", "projects", "that", "are", "available", "on", "the", "database", "." ]
6bb43fd1128296cc59b8ed3bc03064cc61c6bd88
https://github.com/Naresh1318/crystal/blob/6bb43fd1128296cc59b8ed3bc03064cc61c6bd88/crystal/app.py#L104-L121
train
Naresh1318/crystal
crystal/app.py
get_runs
def get_runs(): """ Send a dictionary of runs associated with the selected project. Usage description: This function is usually called to get and display the list of runs associated with a selected project available in the database. :return: JSON, {<int_keys>: <run_name>} """ assert request.method == "POST", "POST request expected received {}".format(request.method) if request.method == "POST": try: selected_project = request.form["selected_project"] runs = utils.get_runs(selected_project) return jsonify(runs) except Exception as e: logging.error(e) return jsonify({"0": "__EMPTY"})
python
def get_runs(): """ Send a dictionary of runs associated with the selected project. Usage description: This function is usually called to get and display the list of runs associated with a selected project available in the database. :return: JSON, {<int_keys>: <run_name>} """ assert request.method == "POST", "POST request expected received {}".format(request.method) if request.method == "POST": try: selected_project = request.form["selected_project"] runs = utils.get_runs(selected_project) return jsonify(runs) except Exception as e: logging.error(e) return jsonify({"0": "__EMPTY"})
[ "def", "get_runs", "(", ")", ":", "assert", "request", ".", "method", "==", "\"POST\"", ",", "\"POST request expected received {}\"", ".", "format", "(", "request", ".", "method", ")", "if", "request", ".", "method", "==", "\"POST\"", ":", "try", ":", "selected_project", "=", "request", ".", "form", "[", "\"selected_project\"", "]", "runs", "=", "utils", ".", "get_runs", "(", "selected_project", ")", "return", "jsonify", "(", "runs", ")", "except", "Exception", "as", "e", ":", "logging", ".", "error", "(", "e", ")", "return", "jsonify", "(", "{", "\"0\"", ":", "\"__EMPTY\"", "}", ")" ]
Send a dictionary of runs associated with the selected project. Usage description: This function is usually called to get and display the list of runs associated with a selected project available in the database. :return: JSON, {<int_keys>: <run_name>}
[ "Send", "a", "dictionary", "of", "runs", "associated", "with", "the", "selected", "project", "." ]
6bb43fd1128296cc59b8ed3bc03064cc61c6bd88
https://github.com/Naresh1318/crystal/blob/6bb43fd1128296cc59b8ed3bc03064cc61c6bd88/crystal/app.py#L125-L144
train
Naresh1318/crystal
crystal/app.py
get_variables
def get_variables(): """ Send a dictionary of variables associated with the selected run. Usage description: This function is usually called to get and display the list of runs associated with a selected project available in the database for the user to view. :return: JSON, {<int_keys>: <run_name>} """ assert request.method == "POST", "POST request expected received {}".format(request.method) if request.method == "POST": try: selected_run = request.form["selected_run"] variables = utils.get_variables(selected_run) # Reset current_index when you select a new run variable_names = variables.items() global current_index current_index = {} if len(current_index) < 1: for _, v_n in variable_names: current_index["{}".format(v_n)] = 0 return jsonify(variables) except Exception as e: logging.error(e) return jsonify({"0": "__EMPTY"})
python
def get_variables(): """ Send a dictionary of variables associated with the selected run. Usage description: This function is usually called to get and display the list of runs associated with a selected project available in the database for the user to view. :return: JSON, {<int_keys>: <run_name>} """ assert request.method == "POST", "POST request expected received {}".format(request.method) if request.method == "POST": try: selected_run = request.form["selected_run"] variables = utils.get_variables(selected_run) # Reset current_index when you select a new run variable_names = variables.items() global current_index current_index = {} if len(current_index) < 1: for _, v_n in variable_names: current_index["{}".format(v_n)] = 0 return jsonify(variables) except Exception as e: logging.error(e) return jsonify({"0": "__EMPTY"})
[ "def", "get_variables", "(", ")", ":", "assert", "request", ".", "method", "==", "\"POST\"", ",", "\"POST request expected received {}\"", ".", "format", "(", "request", ".", "method", ")", "if", "request", ".", "method", "==", "\"POST\"", ":", "try", ":", "selected_run", "=", "request", ".", "form", "[", "\"selected_run\"", "]", "variables", "=", "utils", ".", "get_variables", "(", "selected_run", ")", "# Reset current_index when you select a new run", "variable_names", "=", "variables", ".", "items", "(", ")", "global", "current_index", "current_index", "=", "{", "}", "if", "len", "(", "current_index", ")", "<", "1", ":", "for", "_", ",", "v_n", "in", "variable_names", ":", "current_index", "[", "\"{}\"", ".", "format", "(", "v_n", ")", "]", "=", "0", "return", "jsonify", "(", "variables", ")", "except", "Exception", "as", "e", ":", "logging", ".", "error", "(", "e", ")", "return", "jsonify", "(", "{", "\"0\"", ":", "\"__EMPTY\"", "}", ")" ]
Send a dictionary of variables associated with the selected run. Usage description: This function is usually called to get and display the list of runs associated with a selected project available in the database for the user to view. :return: JSON, {<int_keys>: <run_name>}
[ "Send", "a", "dictionary", "of", "variables", "associated", "with", "the", "selected", "run", "." ]
6bb43fd1128296cc59b8ed3bc03064cc61c6bd88
https://github.com/Naresh1318/crystal/blob/6bb43fd1128296cc59b8ed3bc03064cc61c6bd88/crystal/app.py#L148-L175
train
wylee/runcommands
commands.py
install_completion
def install_completion( shell: arg(choices=('bash', 'fish'), help='Shell to install completion for'), to: arg(help='~/.bashrc.d/runcommands.rc or ~/.config/fish/runcommands.fish') = None, overwrite: 'Overwrite if exists' = False): """Install command line completion script. Currently, bash and fish are supported. The corresponding script will be copied to an appropriate directory. If the script already exists at that location, it will be overwritten by default. """ if shell == 'bash': source = 'runcommands:completion/bash/runcommands.rc' to = to or '~/.bashrc.d' elif shell == 'fish': source = 'runcommands:completion/fish/runcommands.fish' to = to or '~/.config/fish/runcommands.fish' source = asset_path(source) destination = os.path.expanduser(to) if os.path.isdir(destination): destination = os.path.join(destination, os.path.basename(source)) printer.info('Installing', shell, 'completion script to:\n ', destination) if os.path.exists(destination): if overwrite: printer.info('Overwriting:\n {destination}'.format_map(locals())) else: message = 'File exists. Overwrite?'.format_map(locals()) overwrite = confirm(message, abort_on_unconfirmed=True) copy_file(source, destination) printer.info('Installed; remember to:\n source {destination}'.format_map(locals()))
python
def install_completion( shell: arg(choices=('bash', 'fish'), help='Shell to install completion for'), to: arg(help='~/.bashrc.d/runcommands.rc or ~/.config/fish/runcommands.fish') = None, overwrite: 'Overwrite if exists' = False): """Install command line completion script. Currently, bash and fish are supported. The corresponding script will be copied to an appropriate directory. If the script already exists at that location, it will be overwritten by default. """ if shell == 'bash': source = 'runcommands:completion/bash/runcommands.rc' to = to or '~/.bashrc.d' elif shell == 'fish': source = 'runcommands:completion/fish/runcommands.fish' to = to or '~/.config/fish/runcommands.fish' source = asset_path(source) destination = os.path.expanduser(to) if os.path.isdir(destination): destination = os.path.join(destination, os.path.basename(source)) printer.info('Installing', shell, 'completion script to:\n ', destination) if os.path.exists(destination): if overwrite: printer.info('Overwriting:\n {destination}'.format_map(locals())) else: message = 'File exists. Overwrite?'.format_map(locals()) overwrite = confirm(message, abort_on_unconfirmed=True) copy_file(source, destination) printer.info('Installed; remember to:\n source {destination}'.format_map(locals()))
[ "def", "install_completion", "(", "shell", ":", "arg", "(", "choices", "=", "(", "'bash'", ",", "'fish'", ")", ",", "help", "=", "'Shell to install completion for'", ")", ",", "to", ":", "arg", "(", "help", "=", "'~/.bashrc.d/runcommands.rc or ~/.config/fish/runcommands.fish'", ")", "=", "None", ",", "overwrite", ":", "'Overwrite if exists'", "=", "False", ")", ":", "if", "shell", "==", "'bash'", ":", "source", "=", "'runcommands:completion/bash/runcommands.rc'", "to", "=", "to", "or", "'~/.bashrc.d'", "elif", "shell", "==", "'fish'", ":", "source", "=", "'runcommands:completion/fish/runcommands.fish'", "to", "=", "to", "or", "'~/.config/fish/runcommands.fish'", "source", "=", "asset_path", "(", "source", ")", "destination", "=", "os", ".", "path", ".", "expanduser", "(", "to", ")", "if", "os", ".", "path", ".", "isdir", "(", "destination", ")", ":", "destination", "=", "os", ".", "path", ".", "join", "(", "destination", ",", "os", ".", "path", ".", "basename", "(", "source", ")", ")", "printer", ".", "info", "(", "'Installing'", ",", "shell", ",", "'completion script to:\\n '", ",", "destination", ")", "if", "os", ".", "path", ".", "exists", "(", "destination", ")", ":", "if", "overwrite", ":", "printer", ".", "info", "(", "'Overwriting:\\n {destination}'", ".", "format_map", "(", "locals", "(", ")", ")", ")", "else", ":", "message", "=", "'File exists. Overwrite?'", ".", "format_map", "(", "locals", "(", ")", ")", "overwrite", "=", "confirm", "(", "message", ",", "abort_on_unconfirmed", "=", "True", ")", "copy_file", "(", "source", ",", "destination", ")", "printer", ".", "info", "(", "'Installed; remember to:\\n source {destination}'", ".", "format_map", "(", "locals", "(", ")", ")", ")" ]
Install command line completion script. Currently, bash and fish are supported. The corresponding script will be copied to an appropriate directory. If the script already exists at that location, it will be overwritten by default.
[ "Install", "command", "line", "completion", "script", "." ]
b1d7c262885b9ced7ab89b63562f5464ca9970fe
https://github.com/wylee/runcommands/blob/b1d7c262885b9ced7ab89b63562f5464ca9970fe/commands.py#L48-L82
train
NoviceLive/intellicoder
intellicoder/synthesizers.py
Synthesizer.synthesize
def synthesize(self, modules, use_string, x64, native): """Transform sources.""" # code_opts = CodeOpts( # str.lower, None if use_string else hash_func, # 'reloc_delta', '->', # True) # gen_opts = GenOpts('defs', transformed) print(hash_func) groups = group_by(modules, ends_with_punctuation) sources = self.make_source(groups, self.database) if sources: return stylify_files( {'defs.h': sources[0], 'init.c': sources[1]} ) else: return ''
python
def synthesize(self, modules, use_string, x64, native): """Transform sources.""" # code_opts = CodeOpts( # str.lower, None if use_string else hash_func, # 'reloc_delta', '->', # True) # gen_opts = GenOpts('defs', transformed) print(hash_func) groups = group_by(modules, ends_with_punctuation) sources = self.make_source(groups, self.database) if sources: return stylify_files( {'defs.h': sources[0], 'init.c': sources[1]} ) else: return ''
[ "def", "synthesize", "(", "self", ",", "modules", ",", "use_string", ",", "x64", ",", "native", ")", ":", "# code_opts = CodeOpts(", "# str.lower, None if use_string else hash_func,", "# 'reloc_delta', '->',", "# True)", "# gen_opts = GenOpts('defs', transformed)", "print", "(", "hash_func", ")", "groups", "=", "group_by", "(", "modules", ",", "ends_with_punctuation", ")", "sources", "=", "self", ".", "make_source", "(", "groups", ",", "self", ".", "database", ")", "if", "sources", ":", "return", "stylify_files", "(", "{", "'defs.h'", ":", "sources", "[", "0", "]", ",", "'init.c'", ":", "sources", "[", "1", "]", "}", ")", "else", ":", "return", "''" ]
Transform sources.
[ "Transform", "sources", "." ]
6cac5ebfce65c370dbebe47756a1789b120ef982
https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/synthesizers.py#L46-L61
train
NoviceLive/intellicoder
intellicoder/synthesizers.py
Synthesizer.make_source
def make_source(self, groups, code_opts, gen_opts): """Build the final source code for all modules.""" modules = self.make_modules(groups, code_opts) var_decls = modules.var_decls relocs = AttrsGetter(modules.relocs) x86, x64 = relocs.get_attrs('x86', 'x64') if code_opts.windll: structs, x86_reloc, x64_reloc = make_windll( modules.structs ) x86 += x86_reloc x64 += x64_reloc else: structs = ''.join(modules.structs) c_relocs = reloc_both(relocs.strings + x86, x64) data = var_decls.strip() c_header = make_c_header( gen_opts.filename, 'NOTICE', modules.typedefs + structs + data ) c_source = make_init( modules.hashes + c_relocs + modules.libprocs, callable(code_opts.hash_func) ) return [c_header, c_source]
python
def make_source(self, groups, code_opts, gen_opts): """Build the final source code for all modules.""" modules = self.make_modules(groups, code_opts) var_decls = modules.var_decls relocs = AttrsGetter(modules.relocs) x86, x64 = relocs.get_attrs('x86', 'x64') if code_opts.windll: structs, x86_reloc, x64_reloc = make_windll( modules.structs ) x86 += x86_reloc x64 += x64_reloc else: structs = ''.join(modules.structs) c_relocs = reloc_both(relocs.strings + x86, x64) data = var_decls.strip() c_header = make_c_header( gen_opts.filename, 'NOTICE', modules.typedefs + structs + data ) c_source = make_init( modules.hashes + c_relocs + modules.libprocs, callable(code_opts.hash_func) ) return [c_header, c_source]
[ "def", "make_source", "(", "self", ",", "groups", ",", "code_opts", ",", "gen_opts", ")", ":", "modules", "=", "self", ".", "make_modules", "(", "groups", ",", "code_opts", ")", "var_decls", "=", "modules", ".", "var_decls", "relocs", "=", "AttrsGetter", "(", "modules", ".", "relocs", ")", "x86", ",", "x64", "=", "relocs", ".", "get_attrs", "(", "'x86'", ",", "'x64'", ")", "if", "code_opts", ".", "windll", ":", "structs", ",", "x86_reloc", ",", "x64_reloc", "=", "make_windll", "(", "modules", ".", "structs", ")", "x86", "+=", "x86_reloc", "x64", "+=", "x64_reloc", "else", ":", "structs", "=", "''", ".", "join", "(", "modules", ".", "structs", ")", "c_relocs", "=", "reloc_both", "(", "relocs", ".", "strings", "+", "x86", ",", "x64", ")", "data", "=", "var_decls", ".", "strip", "(", ")", "c_header", "=", "make_c_header", "(", "gen_opts", ".", "filename", ",", "'NOTICE'", ",", "modules", ".", "typedefs", "+", "structs", "+", "data", ")", "c_source", "=", "make_init", "(", "modules", ".", "hashes", "+", "c_relocs", "+", "modules", ".", "libprocs", ",", "callable", "(", "code_opts", ".", "hash_func", ")", ")", "return", "[", "c_header", ",", "c_source", "]" ]
Build the final source code for all modules.
[ "Build", "the", "final", "source", "code", "for", "all", "modules", "." ]
6cac5ebfce65c370dbebe47756a1789b120ef982
https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/synthesizers.py#L63-L87
train
NoviceLive/intellicoder
intellicoder/synthesizers.py
Synthesizer.make_modules
def make_modules(self, groups, code_opts): """Build shellcoding files for the module.""" modules = [] for raw_module, raw_funcs in groups: module = raw_module[0].strip().strip(string.punctuation) funcs = [func.strip() for func in raw_funcs] args = [self.database.query_args(func, raw=True) for func in funcs] if self.generic: args = [arg if arg else ('VOID *', []) for arg in args] else: args = [arg for arg in args if arg] if not args: logging.info(_('%s not found.'), module) continue logging.debug(module) module = ModuleSource(module, zip(funcs, args), code_opts) modules.append(module.c_source()) return AttrsGetter(modules)
python
def make_modules(self, groups, code_opts): """Build shellcoding files for the module.""" modules = [] for raw_module, raw_funcs in groups: module = raw_module[0].strip().strip(string.punctuation) funcs = [func.strip() for func in raw_funcs] args = [self.database.query_args(func, raw=True) for func in funcs] if self.generic: args = [arg if arg else ('VOID *', []) for arg in args] else: args = [arg for arg in args if arg] if not args: logging.info(_('%s not found.'), module) continue logging.debug(module) module = ModuleSource(module, zip(funcs, args), code_opts) modules.append(module.c_source()) return AttrsGetter(modules)
[ "def", "make_modules", "(", "self", ",", "groups", ",", "code_opts", ")", ":", "modules", "=", "[", "]", "for", "raw_module", ",", "raw_funcs", "in", "groups", ":", "module", "=", "raw_module", "[", "0", "]", ".", "strip", "(", ")", ".", "strip", "(", "string", ".", "punctuation", ")", "funcs", "=", "[", "func", ".", "strip", "(", ")", "for", "func", "in", "raw_funcs", "]", "args", "=", "[", "self", ".", "database", ".", "query_args", "(", "func", ",", "raw", "=", "True", ")", "for", "func", "in", "funcs", "]", "if", "self", ".", "generic", ":", "args", "=", "[", "arg", "if", "arg", "else", "(", "'VOID *'", ",", "[", "]", ")", "for", "arg", "in", "args", "]", "else", ":", "args", "=", "[", "arg", "for", "arg", "in", "args", "if", "arg", "]", "if", "not", "args", ":", "logging", ".", "info", "(", "_", "(", "'%s not found.'", ")", ",", "module", ")", "continue", "logging", ".", "debug", "(", "module", ")", "module", "=", "ModuleSource", "(", "module", ",", "zip", "(", "funcs", ",", "args", ")", ",", "code_opts", ")", "modules", ".", "append", "(", "module", ".", "c_source", "(", ")", ")", "return", "AttrsGetter", "(", "modules", ")" ]
Build shellcoding files for the module.
[ "Build", "shellcoding", "files", "for", "the", "module", "." ]
6cac5ebfce65c370dbebe47756a1789b120ef982
https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/synthesizers.py#L89-L109
train
NoviceLive/intellicoder
intellicoder/synthesizers.py
ModuleSource.c_source
def c_source(self): """Return strings.""" relocs = Relocs( ''.join(self.c_self_relocs()), *self.c_module_relocs() ) return Source( ''.join(self.c_typedefs()), '' if self.opts.no_structs else self.c_struct(), ''.join(self.c_hashes()), ''.join(self.c_var_decls()), relocs, self.c_loadlib() + ''.join(self.c_getprocs()) )
python
def c_source(self): """Return strings.""" relocs = Relocs( ''.join(self.c_self_relocs()), *self.c_module_relocs() ) return Source( ''.join(self.c_typedefs()), '' if self.opts.no_structs else self.c_struct(), ''.join(self.c_hashes()), ''.join(self.c_var_decls()), relocs, self.c_loadlib() + ''.join(self.c_getprocs()) )
[ "def", "c_source", "(", "self", ")", ":", "relocs", "=", "Relocs", "(", "''", ".", "join", "(", "self", ".", "c_self_relocs", "(", ")", ")", ",", "*", "self", ".", "c_module_relocs", "(", ")", ")", "return", "Source", "(", "''", ".", "join", "(", "self", ".", "c_typedefs", "(", ")", ")", ",", "''", "if", "self", ".", "opts", ".", "no_structs", "else", "self", ".", "c_struct", "(", ")", ",", "''", ".", "join", "(", "self", ".", "c_hashes", "(", ")", ")", ",", "''", ".", "join", "(", "self", ".", "c_var_decls", "(", ")", ")", ",", "relocs", ",", "self", ".", "c_loadlib", "(", ")", "+", "''", ".", "join", "(", "self", ".", "c_getprocs", "(", ")", ")", ")" ]
Return strings.
[ "Return", "strings", "." ]
6cac5ebfce65c370dbebe47756a1789b120ef982
https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/synthesizers.py#L134-L146
train
NoviceLive/intellicoder
intellicoder/synthesizers.py
ModuleSource.c_typedefs
def c_typedefs(self): """Get the typedefs of the module.""" defs = [] attrs = self.opts.attrs + '\n' if self.opts.attrs else '' for name, args in self.funcs: logging.debug('name: %s args: %s', name, args) defs.append( 'typedef\n{}\n{}{}({});\n'.format( args[0], attrs, self._c_type_name(name), make_c_args(args[2]) ) ) return defs
python
def c_typedefs(self): """Get the typedefs of the module.""" defs = [] attrs = self.opts.attrs + '\n' if self.opts.attrs else '' for name, args in self.funcs: logging.debug('name: %s args: %s', name, args) defs.append( 'typedef\n{}\n{}{}({});\n'.format( args[0], attrs, self._c_type_name(name), make_c_args(args[2]) ) ) return defs
[ "def", "c_typedefs", "(", "self", ")", ":", "defs", "=", "[", "]", "attrs", "=", "self", ".", "opts", ".", "attrs", "+", "'\\n'", "if", "self", ".", "opts", ".", "attrs", "else", "''", "for", "name", ",", "args", "in", "self", ".", "funcs", ":", "logging", ".", "debug", "(", "'name: %s args: %s'", ",", "name", ",", "args", ")", "defs", ".", "append", "(", "'typedef\\n{}\\n{}{}({});\\n'", ".", "format", "(", "args", "[", "0", "]", ",", "attrs", ",", "self", ".", "_c_type_name", "(", "name", ")", ",", "make_c_args", "(", "args", "[", "2", "]", ")", ")", ")", "return", "defs" ]
Get the typedefs of the module.
[ "Get", "the", "typedefs", "of", "the", "module", "." ]
6cac5ebfce65c370dbebe47756a1789b120ef982
https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/synthesizers.py#L148-L160
train
NoviceLive/intellicoder
intellicoder/synthesizers.py
ModuleSource.c_struct
def c_struct(self): """Get the struct of the module.""" member = '\n'.join(self.c_member_funcs(True)) if self.opts.windll: return 'struct {{\n{}{} }} {};\n'.format( self._c_dll_base(), member, self.name ) return 'typedef\nstruct {2} {{\n{0}\n{1}}}\n{3};\n'.format( self._c_dll_base(), member, *self._c_struct_names() )
python
def c_struct(self): """Get the struct of the module.""" member = '\n'.join(self.c_member_funcs(True)) if self.opts.windll: return 'struct {{\n{}{} }} {};\n'.format( self._c_dll_base(), member, self.name ) return 'typedef\nstruct {2} {{\n{0}\n{1}}}\n{3};\n'.format( self._c_dll_base(), member, *self._c_struct_names() )
[ "def", "c_struct", "(", "self", ")", ":", "member", "=", "'\\n'", ".", "join", "(", "self", ".", "c_member_funcs", "(", "True", ")", ")", "if", "self", ".", "opts", ".", "windll", ":", "return", "'struct {{\\n{}{} }} {};\\n'", ".", "format", "(", "self", ".", "_c_dll_base", "(", ")", ",", "member", ",", "self", ".", "name", ")", "return", "'typedef\\nstruct {2} {{\\n{0}\\n{1}}}\\n{3};\\n'", ".", "format", "(", "self", ".", "_c_dll_base", "(", ")", ",", "member", ",", "*", "self", ".", "_c_struct_names", "(", ")", ")" ]
Get the struct of the module.
[ "Get", "the", "struct", "of", "the", "module", "." ]
6cac5ebfce65c370dbebe47756a1789b120ef982
https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/synthesizers.py#L162-L171
train
NoviceLive/intellicoder
intellicoder/synthesizers.py
ModuleSource.c_hashes
def c_hashes(self): """Get the hashes of the module including functions and DLLs. """ if callable(self.opts.hash_func): hashes = [ '# define {}{} {}\n'.format( self.opts.prefix, name, self.opts.hash_func(name) ) for name, dummy_args in self.funcs ] else: hashes = [ make_c_str(self.opts.prefix + name, name) for name, dummy_args in self.funcs ] if self.name != 'kernel32': hashes = [ make_c_str(self.opts.prefix + self.name, self.name) ] + hashes return hashes
python
def c_hashes(self): """Get the hashes of the module including functions and DLLs. """ if callable(self.opts.hash_func): hashes = [ '# define {}{} {}\n'.format( self.opts.prefix, name, self.opts.hash_func(name) ) for name, dummy_args in self.funcs ] else: hashes = [ make_c_str(self.opts.prefix + name, name) for name, dummy_args in self.funcs ] if self.name != 'kernel32': hashes = [ make_c_str(self.opts.prefix + self.name, self.name) ] + hashes return hashes
[ "def", "c_hashes", "(", "self", ")", ":", "if", "callable", "(", "self", ".", "opts", ".", "hash_func", ")", ":", "hashes", "=", "[", "'# define {}{} {}\\n'", ".", "format", "(", "self", ".", "opts", ".", "prefix", ",", "name", ",", "self", ".", "opts", ".", "hash_func", "(", "name", ")", ")", "for", "name", ",", "dummy_args", "in", "self", ".", "funcs", "]", "else", ":", "hashes", "=", "[", "make_c_str", "(", "self", ".", "opts", ".", "prefix", "+", "name", ",", "name", ")", "for", "name", ",", "dummy_args", "in", "self", ".", "funcs", "]", "if", "self", ".", "name", "!=", "'kernel32'", ":", "hashes", "=", "[", "make_c_str", "(", "self", ".", "opts", ".", "prefix", "+", "self", ".", "name", ",", "self", ".", "name", ")", "]", "+", "hashes", "return", "hashes" ]
Get the hashes of the module including functions and DLLs.
[ "Get", "the", "hashes", "of", "the", "module", "including", "functions", "and", "DLLs", "." ]
6cac5ebfce65c370dbebe47756a1789b120ef982
https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/synthesizers.py#L173-L191
train
NoviceLive/intellicoder
intellicoder/synthesizers.py
ModuleSource.c_self_relocs
def c_self_relocs(self): """Build relocation for strings.""" relocs = [] if not callable(self.opts.hash_func): relocs = [ reloc_ptr( self.opts.prefix + name, self.opts.reloc_delta, 'char *' ) for name, dummy_args in self.funcs ] if self.name != 'kernel32': relocs = [ reloc_ptr( self.opts.prefix + self.name, self.opts.reloc_delta, 'char *' ) ] + relocs return relocs
python
def c_self_relocs(self): """Build relocation for strings.""" relocs = [] if not callable(self.opts.hash_func): relocs = [ reloc_ptr( self.opts.prefix + name, self.opts.reloc_delta, 'char *' ) for name, dummy_args in self.funcs ] if self.name != 'kernel32': relocs = [ reloc_ptr( self.opts.prefix + self.name, self.opts.reloc_delta, 'char *' ) ] + relocs return relocs
[ "def", "c_self_relocs", "(", "self", ")", ":", "relocs", "=", "[", "]", "if", "not", "callable", "(", "self", ".", "opts", ".", "hash_func", ")", ":", "relocs", "=", "[", "reloc_ptr", "(", "self", ".", "opts", ".", "prefix", "+", "name", ",", "self", ".", "opts", ".", "reloc_delta", ",", "'char *'", ")", "for", "name", ",", "dummy_args", "in", "self", ".", "funcs", "]", "if", "self", ".", "name", "!=", "'kernel32'", ":", "relocs", "=", "[", "reloc_ptr", "(", "self", ".", "opts", ".", "prefix", "+", "self", ".", "name", ",", "self", ".", "opts", ".", "reloc_delta", ",", "'char *'", ")", "]", "+", "relocs", "return", "relocs" ]
Build relocation for strings.
[ "Build", "relocation", "for", "strings", "." ]
6cac5ebfce65c370dbebe47756a1789b120ef982
https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/synthesizers.py#L193-L211
train
NoviceLive/intellicoder
intellicoder/synthesizers.py
ModuleSource.c_var_decls
def c_var_decls(self): """Get the needed variable definitions.""" if self.opts.no_structs: mod_decl = 'HMODULE {} = NULL;\n'.format(self.name) return [mod_decl] + [ '{} *{} = NULL;\n'.format( self._c_type_name(name), name ) for name, dummy_args in self.funcs ] if self.opts.windll: return '' return [ '{} _{} = {{ 0 }};\n'.format( self._c_struct_names()[1], self.name ) ]
python
def c_var_decls(self): """Get the needed variable definitions.""" if self.opts.no_structs: mod_decl = 'HMODULE {} = NULL;\n'.format(self.name) return [mod_decl] + [ '{} *{} = NULL;\n'.format( self._c_type_name(name), name ) for name, dummy_args in self.funcs ] if self.opts.windll: return '' return [ '{} _{} = {{ 0 }};\n'.format( self._c_struct_names()[1], self.name ) ]
[ "def", "c_var_decls", "(", "self", ")", ":", "if", "self", ".", "opts", ".", "no_structs", ":", "mod_decl", "=", "'HMODULE {} = NULL;\\n'", ".", "format", "(", "self", ".", "name", ")", "return", "[", "mod_decl", "]", "+", "[", "'{} *{} = NULL;\\n'", ".", "format", "(", "self", ".", "_c_type_name", "(", "name", ")", ",", "name", ")", "for", "name", ",", "dummy_args", "in", "self", ".", "funcs", "]", "if", "self", ".", "opts", ".", "windll", ":", "return", "''", "return", "[", "'{} _{} = {{ 0 }};\\n'", ".", "format", "(", "self", ".", "_c_struct_names", "(", ")", "[", "1", "]", ",", "self", ".", "name", ")", "]" ]
Get the needed variable definitions.
[ "Get", "the", "needed", "variable", "definitions", "." ]
6cac5ebfce65c370dbebe47756a1789b120ef982
https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/synthesizers.py#L213-L229
train
NoviceLive/intellicoder
intellicoder/synthesizers.py
ModuleSource.c_module_relocs
def c_module_relocs(self): """Build relocation for the module variable.""" if self.opts.no_structs or self.opts.windll: return '', '' x86 = reloc_var( self.name, self._c_struct_names()[1], self.opts.reloc_delta, self._c_uses_pointer() ) x64 = '{0} *{1} = &_{1};\n'.format( self._c_struct_names()[1], self.name ) if self._c_uses_pointer() else '' return x86, x64
python
def c_module_relocs(self): """Build relocation for the module variable.""" if self.opts.no_structs or self.opts.windll: return '', '' x86 = reloc_var( self.name, self._c_struct_names()[1], self.opts.reloc_delta, self._c_uses_pointer() ) x64 = '{0} *{1} = &_{1};\n'.format( self._c_struct_names()[1], self.name ) if self._c_uses_pointer() else '' return x86, x64
[ "def", "c_module_relocs", "(", "self", ")", ":", "if", "self", ".", "opts", ".", "no_structs", "or", "self", ".", "opts", ".", "windll", ":", "return", "''", ",", "''", "x86", "=", "reloc_var", "(", "self", ".", "name", ",", "self", ".", "_c_struct_names", "(", ")", "[", "1", "]", ",", "self", ".", "opts", ".", "reloc_delta", ",", "self", ".", "_c_uses_pointer", "(", ")", ")", "x64", "=", "'{0} *{1} = &_{1};\\n'", ".", "format", "(", "self", ".", "_c_struct_names", "(", ")", "[", "1", "]", ",", "self", ".", "name", ")", "if", "self", ".", "_c_uses_pointer", "(", ")", "else", "''", "return", "x86", ",", "x64" ]
Build relocation for the module variable.
[ "Build", "relocation", "for", "the", "module", "variable", "." ]
6cac5ebfce65c370dbebe47756a1789b120ef982
https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/synthesizers.py#L231-L243
train
NoviceLive/intellicoder
intellicoder/synthesizers.py
ModuleSource.c_loadlib
def c_loadlib(self): """Get the loadlib of the module.""" name = self._c_base_var() kernel32 = 'windll->kernel32.' if self.name == 'kernel32': loadlib = '{} = get_kernel32_base();\n'.format( 'kernel32' if self.opts.no_structs else kernel32 + self.opts.base ) else: loadlib = '{} = {}LoadLibraryA({}{});\n'.format( name, '' if self.opts.no_structs else kernel32, self.opts.prefix, self.name ) return loadlib + self._c_null_check(name)
python
def c_loadlib(self): """Get the loadlib of the module.""" name = self._c_base_var() kernel32 = 'windll->kernel32.' if self.name == 'kernel32': loadlib = '{} = get_kernel32_base();\n'.format( 'kernel32' if self.opts.no_structs else kernel32 + self.opts.base ) else: loadlib = '{} = {}LoadLibraryA({}{});\n'.format( name, '' if self.opts.no_structs else kernel32, self.opts.prefix, self.name ) return loadlib + self._c_null_check(name)
[ "def", "c_loadlib", "(", "self", ")", ":", "name", "=", "self", ".", "_c_base_var", "(", ")", "kernel32", "=", "'windll->kernel32.'", "if", "self", ".", "name", "==", "'kernel32'", ":", "loadlib", "=", "'{} = get_kernel32_base();\\n'", ".", "format", "(", "'kernel32'", "if", "self", ".", "opts", ".", "no_structs", "else", "kernel32", "+", "self", ".", "opts", ".", "base", ")", "else", ":", "loadlib", "=", "'{} = {}LoadLibraryA({}{});\\n'", ".", "format", "(", "name", ",", "''", "if", "self", ".", "opts", ".", "no_structs", "else", "kernel32", ",", "self", ".", "opts", ".", "prefix", ",", "self", ".", "name", ")", "return", "loadlib", "+", "self", ".", "_c_null_check", "(", "name", ")" ]
Get the loadlib of the module.
[ "Get", "the", "loadlib", "of", "the", "module", "." ]
6cac5ebfce65c370dbebe47756a1789b120ef982
https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/synthesizers.py#L245-L260
train
NoviceLive/intellicoder
intellicoder/synthesizers.py
ModuleSource.c_getprocs
def c_getprocs(self): """Get the getprocs of the module.""" getprocs = [] for name, dummy_args in self.funcs: if name == 'GetProcAddress': if callable(self.opts.hash_func): continue getter = 'get_proc_by_string' elif self.opts.no_structs: getter = 'GetProcAddress' else: getter = 'windll->kernel32.GetProcAddress' if callable(self.opts.hash_func): getter = 'get_proc_by_hash' if self.opts.no_structs: var = name else: var = 'windll->{}.{}'.format(self.name, name) getproc = '{} = ({} *){}({}, {}{});\n'.format( var, self._c_type_name(name), getter, self._c_base_var(), self.opts.prefix, name ) getprocs.append(getproc + self._c_null_check(var)) return getprocs
python
def c_getprocs(self): """Get the getprocs of the module.""" getprocs = [] for name, dummy_args in self.funcs: if name == 'GetProcAddress': if callable(self.opts.hash_func): continue getter = 'get_proc_by_string' elif self.opts.no_structs: getter = 'GetProcAddress' else: getter = 'windll->kernel32.GetProcAddress' if callable(self.opts.hash_func): getter = 'get_proc_by_hash' if self.opts.no_structs: var = name else: var = 'windll->{}.{}'.format(self.name, name) getproc = '{} = ({} *){}({}, {}{});\n'.format( var, self._c_type_name(name), getter, self._c_base_var(), self.opts.prefix, name ) getprocs.append(getproc + self._c_null_check(var)) return getprocs
[ "def", "c_getprocs", "(", "self", ")", ":", "getprocs", "=", "[", "]", "for", "name", ",", "dummy_args", "in", "self", ".", "funcs", ":", "if", "name", "==", "'GetProcAddress'", ":", "if", "callable", "(", "self", ".", "opts", ".", "hash_func", ")", ":", "continue", "getter", "=", "'get_proc_by_string'", "elif", "self", ".", "opts", ".", "no_structs", ":", "getter", "=", "'GetProcAddress'", "else", ":", "getter", "=", "'windll->kernel32.GetProcAddress'", "if", "callable", "(", "self", ".", "opts", ".", "hash_func", ")", ":", "getter", "=", "'get_proc_by_hash'", "if", "self", ".", "opts", ".", "no_structs", ":", "var", "=", "name", "else", ":", "var", "=", "'windll->{}.{}'", ".", "format", "(", "self", ".", "name", ",", "name", ")", "getproc", "=", "'{} = ({} *){}({}, {}{});\\n'", ".", "format", "(", "var", ",", "self", ".", "_c_type_name", "(", "name", ")", ",", "getter", ",", "self", ".", "_c_base_var", "(", ")", ",", "self", ".", "opts", ".", "prefix", ",", "name", ")", "getprocs", ".", "append", "(", "getproc", "+", "self", ".", "_c_null_check", "(", "var", ")", ")", "return", "getprocs" ]
Get the getprocs of the module.
[ "Get", "the", "getprocs", "of", "the", "module", "." ]
6cac5ebfce65c370dbebe47756a1789b120ef982
https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/synthesizers.py#L262-L288
train
NoviceLive/intellicoder
intellicoder/synthesizers.py
ModuleSource.c_member_funcs
def c_member_funcs(self, for_struct=False): """Get the decls of the module.""" decls = [ '{} *{};'.format(self._c_type_name(name), name) for name, dummy_args in self.funcs ] if for_struct: return decls return [self._c_mod_decl()] + decls
python
def c_member_funcs(self, for_struct=False): """Get the decls of the module.""" decls = [ '{} *{};'.format(self._c_type_name(name), name) for name, dummy_args in self.funcs ] if for_struct: return decls return [self._c_mod_decl()] + decls
[ "def", "c_member_funcs", "(", "self", ",", "for_struct", "=", "False", ")", ":", "decls", "=", "[", "'{} *{};'", ".", "format", "(", "self", ".", "_c_type_name", "(", "name", ")", ",", "name", ")", "for", "name", ",", "dummy_args", "in", "self", ".", "funcs", "]", "if", "for_struct", ":", "return", "decls", "return", "[", "self", ".", "_c_mod_decl", "(", ")", "]", "+", "decls" ]
Get the decls of the module.
[ "Get", "the", "decls", "of", "the", "module", "." ]
6cac5ebfce65c370dbebe47756a1789b120ef982
https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/synthesizers.py#L290-L298
train
NoviceLive/intellicoder
intellicoder/synthesizers.py
ModuleSource._c_base_var
def _c_base_var(self): """Return the name of the module base variable.""" if self.opts.no_structs: return self.name return 'windll->{}.{}'.format( self.name, self.opts.base )
python
def _c_base_var(self): """Return the name of the module base variable.""" if self.opts.no_structs: return self.name return 'windll->{}.{}'.format( self.name, self.opts.base )
[ "def", "_c_base_var", "(", "self", ")", ":", "if", "self", ".", "opts", ".", "no_structs", ":", "return", "self", ".", "name", "return", "'windll->{}.{}'", ".", "format", "(", "self", ".", "name", ",", "self", ".", "opts", ".", "base", ")" ]
Return the name of the module base variable.
[ "Return", "the", "name", "of", "the", "module", "base", "variable", "." ]
6cac5ebfce65c370dbebe47756a1789b120ef982
https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/synthesizers.py#L330-L336
train
computational-metabolomics/msp2db
msp2db/utils.py
get_precursor_mz
def get_precursor_mz(exact_mass, precursor_type): """ Calculate precursor mz based on exact mass and precursor type Args: exact_mass (float): exact mass of compound of interest precursor_type (str): Precursor type (currently only works with '[M-H]-', '[M+H]+' and '[M+H-H2O]+' Return: neutral mass of compound """ # these are just taken from what was present in the massbank .msp file for those missing the exact mass d = {'[M-H]-': -1.007276, '[M+H]+': 1.007276, '[M+H-H2O]+': 1.007276 - ((1.007276 * 2) + 15.9949) } try: return exact_mass + d[precursor_type] except KeyError as e: print(e) return False
python
def get_precursor_mz(exact_mass, precursor_type): """ Calculate precursor mz based on exact mass and precursor type Args: exact_mass (float): exact mass of compound of interest precursor_type (str): Precursor type (currently only works with '[M-H]-', '[M+H]+' and '[M+H-H2O]+' Return: neutral mass of compound """ # these are just taken from what was present in the massbank .msp file for those missing the exact mass d = {'[M-H]-': -1.007276, '[M+H]+': 1.007276, '[M+H-H2O]+': 1.007276 - ((1.007276 * 2) + 15.9949) } try: return exact_mass + d[precursor_type] except KeyError as e: print(e) return False
[ "def", "get_precursor_mz", "(", "exact_mass", ",", "precursor_type", ")", ":", "# these are just taken from what was present in the massbank .msp file for those missing the exact mass", "d", "=", "{", "'[M-H]-'", ":", "-", "1.007276", ",", "'[M+H]+'", ":", "1.007276", ",", "'[M+H-H2O]+'", ":", "1.007276", "-", "(", "(", "1.007276", "*", "2", ")", "+", "15.9949", ")", "}", "try", ":", "return", "exact_mass", "+", "d", "[", "precursor_type", "]", "except", "KeyError", "as", "e", ":", "print", "(", "e", ")", "return", "False" ]
Calculate precursor mz based on exact mass and precursor type Args: exact_mass (float): exact mass of compound of interest precursor_type (str): Precursor type (currently only works with '[M-H]-', '[M+H]+' and '[M+H-H2O]+' Return: neutral mass of compound
[ "Calculate", "precursor", "mz", "based", "on", "exact", "mass", "and", "precursor", "type" ]
f86f01efca26fd2745547c9993f97337c6bef123
https://github.com/computational-metabolomics/msp2db/blob/f86f01efca26fd2745547c9993f97337c6bef123/msp2db/utils.py#L6-L28
train
computational-metabolomics/msp2db
msp2db/utils.py
line_count
def line_count(fn): """ Get line count of file Args: fn (str): Path to file Return: Number of lines in file (int) """ with open(fn) as f: for i, l in enumerate(f): pass return i + 1
python
def line_count(fn): """ Get line count of file Args: fn (str): Path to file Return: Number of lines in file (int) """ with open(fn) as f: for i, l in enumerate(f): pass return i + 1
[ "def", "line_count", "(", "fn", ")", ":", "with", "open", "(", "fn", ")", "as", "f", ":", "for", "i", ",", "l", "in", "enumerate", "(", "f", ")", ":", "pass", "return", "i", "+", "1" ]
Get line count of file Args: fn (str): Path to file Return: Number of lines in file (int)
[ "Get", "line", "count", "of", "file" ]
f86f01efca26fd2745547c9993f97337c6bef123
https://github.com/computational-metabolomics/msp2db/blob/f86f01efca26fd2745547c9993f97337c6bef123/msp2db/utils.py#L31-L44
train
portfors-lab/sparkle
sparkle/stim/abstract_component.py
AbstractStimulusComponent.amplitude
def amplitude(self, caldb, calv, atten=0): """Calculates the voltage amplitude for this stimulus, using internal intensity value and the given reference intensity & voltage :param caldb: calibration intensity in dbSPL :type caldb: float :param calv: calibration voltage that was used to record the intensity provided :type calv: float """ amp = (10 ** (float(self._intensity+atten-caldb)/20)*calv) return amp
python
def amplitude(self, caldb, calv, atten=0): """Calculates the voltage amplitude for this stimulus, using internal intensity value and the given reference intensity & voltage :param caldb: calibration intensity in dbSPL :type caldb: float :param calv: calibration voltage that was used to record the intensity provided :type calv: float """ amp = (10 ** (float(self._intensity+atten-caldb)/20)*calv) return amp
[ "def", "amplitude", "(", "self", ",", "caldb", ",", "calv", ",", "atten", "=", "0", ")", ":", "amp", "=", "(", "10", "**", "(", "float", "(", "self", ".", "_intensity", "+", "atten", "-", "caldb", ")", "/", "20", ")", "*", "calv", ")", "return", "amp" ]
Calculates the voltage amplitude for this stimulus, using internal intensity value and the given reference intensity & voltage :param caldb: calibration intensity in dbSPL :type caldb: float :param calv: calibration voltage that was used to record the intensity provided :type calv: float
[ "Calculates", "the", "voltage", "amplitude", "for", "this", "stimulus", "using", "internal", "intensity", "value", "and", "the", "given", "reference", "intensity", "&", "voltage" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/stim/abstract_component.py#L38-L48
train
portfors-lab/sparkle
sparkle/stim/abstract_component.py
AbstractStimulusComponent.verify
def verify(self, **kwargs): """Checks this component for invalidating conditions :returns: str -- message if error, 0 otherwise """ if 'duration' in kwargs: if kwargs['duration'] < self._duration: return "Window size must equal or exceed stimulus length" if self._risefall > self._duration: return "Rise and fall times exceed component duration" return 0
python
def verify(self, **kwargs): """Checks this component for invalidating conditions :returns: str -- message if error, 0 otherwise """ if 'duration' in kwargs: if kwargs['duration'] < self._duration: return "Window size must equal or exceed stimulus length" if self._risefall > self._duration: return "Rise and fall times exceed component duration" return 0
[ "def", "verify", "(", "self", ",", "*", "*", "kwargs", ")", ":", "if", "'duration'", "in", "kwargs", ":", "if", "kwargs", "[", "'duration'", "]", "<", "self", ".", "_duration", ":", "return", "\"Window size must equal or exceed stimulus length\"", "if", "self", ".", "_risefall", ">", "self", ".", "_duration", ":", "return", "\"Rise and fall times exceed component duration\"", "return", "0" ]
Checks this component for invalidating conditions :returns: str -- message if error, 0 otherwise
[ "Checks", "this", "component", "for", "invalidating", "conditions" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/stim/abstract_component.py#L68-L78
train
portfors-lab/sparkle
sparkle/stim/abstract_component.py
AbstractStimulusComponent.stateDict
def stateDict(self): """Saves internal values to be loaded later :returns: dict -- {'parametername': value, ...} """ state = { 'duration' : self._duration, 'intensity' : self._intensity, 'risefall' : self._risefall, 'stim_type' : self.name } return state
python
def stateDict(self): """Saves internal values to be loaded later :returns: dict -- {'parametername': value, ...} """ state = { 'duration' : self._duration, 'intensity' : self._intensity, 'risefall' : self._risefall, 'stim_type' : self.name } return state
[ "def", "stateDict", "(", "self", ")", ":", "state", "=", "{", "'duration'", ":", "self", ".", "_duration", ",", "'intensity'", ":", "self", ".", "_intensity", ",", "'risefall'", ":", "self", ".", "_risefall", ",", "'stim_type'", ":", "self", ".", "name", "}", "return", "state" ]
Saves internal values to be loaded later :returns: dict -- {'parametername': value, ...}
[ "Saves", "internal", "values", "to", "be", "loaded", "later" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/stim/abstract_component.py#L92-L103
train
portfors-lab/sparkle
sparkle/stim/abstract_component.py
AbstractStimulusComponent.loadState
def loadState(self, state): """Loads previously saved values to this component. :param state: return value from `stateDict` :type state: dict """ self._duration = state['duration'] self._intensity = state['intensity'] self._risefall = state['risefall']
python
def loadState(self, state): """Loads previously saved values to this component. :param state: return value from `stateDict` :type state: dict """ self._duration = state['duration'] self._intensity = state['intensity'] self._risefall = state['risefall']
[ "def", "loadState", "(", "self", ",", "state", ")", ":", "self", ".", "_duration", "=", "state", "[", "'duration'", "]", "self", ".", "_intensity", "=", "state", "[", "'intensity'", "]", "self", ".", "_risefall", "=", "state", "[", "'risefall'", "]" ]
Loads previously saved values to this component. :param state: return value from `stateDict` :type state: dict
[ "Loads", "previously", "saved", "values", "to", "this", "component", "." ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/stim/abstract_component.py#L105-L113
train
mediawiki-utilities/python-mwoauth
mwoauth/handshaker.py
Handshaker.initiate
def initiate(self, callback=None): """ Initiate an OAuth handshake with MediaWiki. :Parameters: callback : `str` Callback URL. Defaults to 'oob'. :Returns: A `tuple` of two values: * a MediaWiki URL to direct the user to * a :class:`~mwoauth.RequestToken` representing an access request """ return initiate(self.mw_uri, self.consumer_token, callback=callback or self.callback, user_agent=self.user_agent)
python
def initiate(self, callback=None): """ Initiate an OAuth handshake with MediaWiki. :Parameters: callback : `str` Callback URL. Defaults to 'oob'. :Returns: A `tuple` of two values: * a MediaWiki URL to direct the user to * a :class:`~mwoauth.RequestToken` representing an access request """ return initiate(self.mw_uri, self.consumer_token, callback=callback or self.callback, user_agent=self.user_agent)
[ "def", "initiate", "(", "self", ",", "callback", "=", "None", ")", ":", "return", "initiate", "(", "self", ".", "mw_uri", ",", "self", ".", "consumer_token", ",", "callback", "=", "callback", "or", "self", ".", "callback", ",", "user_agent", "=", "self", ".", "user_agent", ")" ]
Initiate an OAuth handshake with MediaWiki. :Parameters: callback : `str` Callback URL. Defaults to 'oob'. :Returns: A `tuple` of two values: * a MediaWiki URL to direct the user to * a :class:`~mwoauth.RequestToken` representing an access request
[ "Initiate", "an", "OAuth", "handshake", "with", "MediaWiki", "." ]
cd6990753ec3d59b7cfd96a76459f71ef4790cd3
https://github.com/mediawiki-utilities/python-mwoauth/blob/cd6990753ec3d59b7cfd96a76459f71ef4790cd3/mwoauth/handshaker.py#L71-L89
train
AllTheWayDown/turgles
turgles/gl/uniform.py
_load_variable
def _load_variable(func, program_id, index): """Loads the meta data for a uniform or attribute""" n = 64 # max name length TODO: read from card bufsize = GLsizei(n) length = pointer(GLsizei(0)) size = pointer(GLint(0)) type = pointer(GLenum(0)) uname = create_string_buffer(n) func(program_id, index, bufsize, length, size, type, uname) return size[0], type[0], uname.value.decode('utf8')
python
def _load_variable(func, program_id, index): """Loads the meta data for a uniform or attribute""" n = 64 # max name length TODO: read from card bufsize = GLsizei(n) length = pointer(GLsizei(0)) size = pointer(GLint(0)) type = pointer(GLenum(0)) uname = create_string_buffer(n) func(program_id, index, bufsize, length, size, type, uname) return size[0], type[0], uname.value.decode('utf8')
[ "def", "_load_variable", "(", "func", ",", "program_id", ",", "index", ")", ":", "n", "=", "64", "# max name length TODO: read from card", "bufsize", "=", "GLsizei", "(", "n", ")", "length", "=", "pointer", "(", "GLsizei", "(", "0", ")", ")", "size", "=", "pointer", "(", "GLint", "(", "0", ")", ")", "type", "=", "pointer", "(", "GLenum", "(", "0", ")", ")", "uname", "=", "create_string_buffer", "(", "n", ")", "func", "(", "program_id", ",", "index", ",", "bufsize", ",", "length", ",", "size", ",", "type", ",", "uname", ")", "return", "size", "[", "0", "]", ",", "type", "[", "0", "]", ",", "uname", ".", "value", ".", "decode", "(", "'utf8'", ")" ]
Loads the meta data for a uniform or attribute
[ "Loads", "the", "meta", "data", "for", "a", "uniform", "or", "attribute" ]
1bb17abe9b3aa0953d9a8e9b05a23369c5bf8852
https://github.com/AllTheWayDown/turgles/blob/1bb17abe9b3aa0953d9a8e9b05a23369c5bf8852/turgles/gl/uniform.py#L49-L58
train
portfors-lab/sparkle
sparkle/gui/stim/explore_component_editor.py
ExploreComponentEditor.addWidget
def addWidget(self, widget, name): """Add a component editor widget""" self.exploreStimTypeCmbbx.addItem(name) self.componentStack.addWidget(widget) widget.valueChanged.connect(self.valueChanged.emit)
python
def addWidget(self, widget, name): """Add a component editor widget""" self.exploreStimTypeCmbbx.addItem(name) self.componentStack.addWidget(widget) widget.valueChanged.connect(self.valueChanged.emit)
[ "def", "addWidget", "(", "self", ",", "widget", ",", "name", ")", ":", "self", ".", "exploreStimTypeCmbbx", ".", "addItem", "(", "name", ")", "self", ".", "componentStack", ".", "addWidget", "(", "widget", ")", "widget", ".", "valueChanged", ".", "connect", "(", "self", ".", "valueChanged", ".", "emit", ")" ]
Add a component editor widget
[ "Add", "a", "component", "editor", "widget" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/stim/explore_component_editor.py#L56-L60
train
portfors-lab/sparkle
sparkle/gui/stim/explore_component_editor.py
ExploreComponentEditor.saveTemplate
def saveTemplate(self): """Get a json structure of the current inputs, to be able to load later""" savedict = {} for comp_editor in self.widgets(): stim = comp_editor.component() comp_editor.saveToObject() savedict[stim.name] = stim.stateDict() savedict['delay'] = self.delaySpnbx.value() return savedict
python
def saveTemplate(self): """Get a json structure of the current inputs, to be able to load later""" savedict = {} for comp_editor in self.widgets(): stim = comp_editor.component() comp_editor.saveToObject() savedict[stim.name] = stim.stateDict() savedict['delay'] = self.delaySpnbx.value() return savedict
[ "def", "saveTemplate", "(", "self", ")", ":", "savedict", "=", "{", "}", "for", "comp_editor", "in", "self", ".", "widgets", "(", ")", ":", "stim", "=", "comp_editor", ".", "component", "(", ")", "comp_editor", ".", "saveToObject", "(", ")", "savedict", "[", "stim", ".", "name", "]", "=", "stim", ".", "stateDict", "(", ")", "savedict", "[", "'delay'", "]", "=", "self", ".", "delaySpnbx", ".", "value", "(", ")", "return", "savedict" ]
Get a json structure of the current inputs, to be able to load later
[ "Get", "a", "json", "structure", "of", "the", "current", "inputs", "to", "be", "able", "to", "load", "later" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/stim/explore_component_editor.py#L73-L82
train
wylee/runcommands
runcommands/command.py
Command.expand_short_options
def expand_short_options(self, argv): """Convert grouped short options like `-abc` to `-a, -b, -c`. This is necessary because we set ``allow_abbrev=False`` on the ``ArgumentParser`` in :prop:`self.arg_parser`. The argparse docs say ``allow_abbrev`` applies only to long options, but it also affects whether short options grouped behind a single dash will be parsed into multiple short options. """ new_argv = [] for arg in argv: result = self.parse_multi_short_option(arg) new_argv.extend(result) return new_argv
python
def expand_short_options(self, argv): """Convert grouped short options like `-abc` to `-a, -b, -c`. This is necessary because we set ``allow_abbrev=False`` on the ``ArgumentParser`` in :prop:`self.arg_parser`. The argparse docs say ``allow_abbrev`` applies only to long options, but it also affects whether short options grouped behind a single dash will be parsed into multiple short options. """ new_argv = [] for arg in argv: result = self.parse_multi_short_option(arg) new_argv.extend(result) return new_argv
[ "def", "expand_short_options", "(", "self", ",", "argv", ")", ":", "new_argv", "=", "[", "]", "for", "arg", "in", "argv", ":", "result", "=", "self", ".", "parse_multi_short_option", "(", "arg", ")", "new_argv", ".", "extend", "(", "result", ")", "return", "new_argv" ]
Convert grouped short options like `-abc` to `-a, -b, -c`. This is necessary because we set ``allow_abbrev=False`` on the ``ArgumentParser`` in :prop:`self.arg_parser`. The argparse docs say ``allow_abbrev`` applies only to long options, but it also affects whether short options grouped behind a single dash will be parsed into multiple short options.
[ "Convert", "grouped", "short", "options", "like", "-", "abc", "to", "-", "a", "-", "b", "-", "c", "." ]
b1d7c262885b9ced7ab89b63562f5464ca9970fe
https://github.com/wylee/runcommands/blob/b1d7c262885b9ced7ab89b63562f5464ca9970fe/runcommands/command.py#L270-L284
train
wylee/runcommands
runcommands/command.py
Command.find_arg
def find_arg(self, name): """Find arg by normalized arg name or parameter name.""" name = self.normalize_name(name) return self.args.get(name)
python
def find_arg(self, name): """Find arg by normalized arg name or parameter name.""" name = self.normalize_name(name) return self.args.get(name)
[ "def", "find_arg", "(", "self", ",", "name", ")", ":", "name", "=", "self", ".", "normalize_name", "(", "name", ")", "return", "self", ".", "args", ".", "get", "(", "name", ")" ]
Find arg by normalized arg name or parameter name.
[ "Find", "arg", "by", "normalized", "arg", "name", "or", "parameter", "name", "." ]
b1d7c262885b9ced7ab89b63562f5464ca9970fe
https://github.com/wylee/runcommands/blob/b1d7c262885b9ced7ab89b63562f5464ca9970fe/runcommands/command.py#L305-L308
train
wylee/runcommands
runcommands/command.py
Command.find_parameter
def find_parameter(self, name): """Find parameter by name or normalized arg name.""" name = self.normalize_name(name) arg = self.args.get(name) return None if arg is None else arg.parameter
python
def find_parameter(self, name): """Find parameter by name or normalized arg name.""" name = self.normalize_name(name) arg = self.args.get(name) return None if arg is None else arg.parameter
[ "def", "find_parameter", "(", "self", ",", "name", ")", ":", "name", "=", "self", ".", "normalize_name", "(", "name", ")", "arg", "=", "self", ".", "args", ".", "get", "(", "name", ")", "return", "None", "if", "arg", "is", "None", "else", "arg", ".", "parameter" ]
Find parameter by name or normalized arg name.
[ "Find", "parameter", "by", "name", "or", "normalized", "arg", "name", "." ]
b1d7c262885b9ced7ab89b63562f5464ca9970fe
https://github.com/wylee/runcommands/blob/b1d7c262885b9ced7ab89b63562f5464ca9970fe/runcommands/command.py#L310-L314
train
wylee/runcommands
runcommands/command.py
Command.args
def args(self): """Create args from function parameters.""" params = self.parameters args = OrderedDict() # This will be overridden if the command explicitly defines an # arg named help. args['help'] = HelpArg(command=self) normalize_name = self.normalize_name get_arg_config = self.get_arg_config get_short_option = self.get_short_option_for_arg get_long_option = self.get_long_option_for_arg get_inverse_option = self.get_inverse_option_for_arg names = {normalize_name(name) for name in params} used_short_options = set() for param in params.values(): annotation = get_arg_config(param) short_option = annotation.short_option if short_option: used_short_options.add(short_option) for name, param in params.items(): name = normalize_name(name) skip = ( name.startswith('_') or param.kind is param.VAR_KEYWORD or param.kind is param.KEYWORD_ONLY) if skip: continue annotation = get_arg_config(param) container = annotation.container type = annotation.type choices = annotation.choices help = annotation.help inverse_help = annotation.inverse_help short_option = annotation.short_option long_option = annotation.long_option inverse_option = annotation.inverse_option action = annotation.action nargs = annotation.nargs default = param.default if default is not param.empty: if not short_option: short_option = get_short_option(name, names, used_short_options) used_short_options.add(short_option) if not long_option: long_option = get_long_option(name) if not inverse_option: # NOTE: The DISABLE marker evaluates as True inverse_option = get_inverse_option(long_option) args[name] = Arg( command=self, parameter=param, name=name, container=container, type=type, default=default, choices=choices, help=help, inverse_help=inverse_help, short_option=short_option, long_option=long_option, inverse_option=inverse_option, action=action, nargs=nargs, ) option_map = OrderedDict() for arg in args.values(): for option in arg.options: option_map.setdefault(option, []) option_map[option].append(arg) for option, option_args in option_map.items(): if len(option_args) > 1: names = ', '.join(a.parameter.name for a in option_args) message = ( 'Option {option} of command {self.name} maps to multiple parameters: {names}') message = message.format_map(locals()) raise CommandError(message) return args
python
def args(self): """Create args from function parameters.""" params = self.parameters args = OrderedDict() # This will be overridden if the command explicitly defines an # arg named help. args['help'] = HelpArg(command=self) normalize_name = self.normalize_name get_arg_config = self.get_arg_config get_short_option = self.get_short_option_for_arg get_long_option = self.get_long_option_for_arg get_inverse_option = self.get_inverse_option_for_arg names = {normalize_name(name) for name in params} used_short_options = set() for param in params.values(): annotation = get_arg_config(param) short_option = annotation.short_option if short_option: used_short_options.add(short_option) for name, param in params.items(): name = normalize_name(name) skip = ( name.startswith('_') or param.kind is param.VAR_KEYWORD or param.kind is param.KEYWORD_ONLY) if skip: continue annotation = get_arg_config(param) container = annotation.container type = annotation.type choices = annotation.choices help = annotation.help inverse_help = annotation.inverse_help short_option = annotation.short_option long_option = annotation.long_option inverse_option = annotation.inverse_option action = annotation.action nargs = annotation.nargs default = param.default if default is not param.empty: if not short_option: short_option = get_short_option(name, names, used_short_options) used_short_options.add(short_option) if not long_option: long_option = get_long_option(name) if not inverse_option: # NOTE: The DISABLE marker evaluates as True inverse_option = get_inverse_option(long_option) args[name] = Arg( command=self, parameter=param, name=name, container=container, type=type, default=default, choices=choices, help=help, inverse_help=inverse_help, short_option=short_option, long_option=long_option, inverse_option=inverse_option, action=action, nargs=nargs, ) option_map = OrderedDict() for arg in args.values(): for option in arg.options: option_map.setdefault(option, []) option_map[option].append(arg) for option, option_args in option_map.items(): if len(option_args) > 1: names = ', '.join(a.parameter.name for a in option_args) message = ( 'Option {option} of command {self.name} maps to multiple parameters: {names}') message = message.format_map(locals()) raise CommandError(message) return args
[ "def", "args", "(", "self", ")", ":", "params", "=", "self", ".", "parameters", "args", "=", "OrderedDict", "(", ")", "# This will be overridden if the command explicitly defines an", "# arg named help.", "args", "[", "'help'", "]", "=", "HelpArg", "(", "command", "=", "self", ")", "normalize_name", "=", "self", ".", "normalize_name", "get_arg_config", "=", "self", ".", "get_arg_config", "get_short_option", "=", "self", ".", "get_short_option_for_arg", "get_long_option", "=", "self", ".", "get_long_option_for_arg", "get_inverse_option", "=", "self", ".", "get_inverse_option_for_arg", "names", "=", "{", "normalize_name", "(", "name", ")", "for", "name", "in", "params", "}", "used_short_options", "=", "set", "(", ")", "for", "param", "in", "params", ".", "values", "(", ")", ":", "annotation", "=", "get_arg_config", "(", "param", ")", "short_option", "=", "annotation", ".", "short_option", "if", "short_option", ":", "used_short_options", ".", "add", "(", "short_option", ")", "for", "name", ",", "param", "in", "params", ".", "items", "(", ")", ":", "name", "=", "normalize_name", "(", "name", ")", "skip", "=", "(", "name", ".", "startswith", "(", "'_'", ")", "or", "param", ".", "kind", "is", "param", ".", "VAR_KEYWORD", "or", "param", ".", "kind", "is", "param", ".", "KEYWORD_ONLY", ")", "if", "skip", ":", "continue", "annotation", "=", "get_arg_config", "(", "param", ")", "container", "=", "annotation", ".", "container", "type", "=", "annotation", ".", "type", "choices", "=", "annotation", ".", "choices", "help", "=", "annotation", ".", "help", "inverse_help", "=", "annotation", ".", "inverse_help", "short_option", "=", "annotation", ".", "short_option", "long_option", "=", "annotation", ".", "long_option", "inverse_option", "=", "annotation", ".", "inverse_option", "action", "=", "annotation", ".", "action", "nargs", "=", "annotation", ".", "nargs", "default", "=", "param", ".", "default", "if", "default", "is", "not", "param", ".", "empty", ":", "if", "not", "short_option", ":", "short_option", "=", "get_short_option", "(", "name", ",", "names", ",", "used_short_options", ")", "used_short_options", ".", "add", "(", "short_option", ")", "if", "not", "long_option", ":", "long_option", "=", "get_long_option", "(", "name", ")", "if", "not", "inverse_option", ":", "# NOTE: The DISABLE marker evaluates as True", "inverse_option", "=", "get_inverse_option", "(", "long_option", ")", "args", "[", "name", "]", "=", "Arg", "(", "command", "=", "self", ",", "parameter", "=", "param", ",", "name", "=", "name", ",", "container", "=", "container", ",", "type", "=", "type", ",", "default", "=", "default", ",", "choices", "=", "choices", ",", "help", "=", "help", ",", "inverse_help", "=", "inverse_help", ",", "short_option", "=", "short_option", ",", "long_option", "=", "long_option", ",", "inverse_option", "=", "inverse_option", ",", "action", "=", "action", ",", "nargs", "=", "nargs", ",", ")", "option_map", "=", "OrderedDict", "(", ")", "for", "arg", "in", "args", ".", "values", "(", ")", ":", "for", "option", "in", "arg", ".", "options", ":", "option_map", ".", "setdefault", "(", "option", ",", "[", "]", ")", "option_map", "[", "option", "]", ".", "append", "(", "arg", ")", "for", "option", ",", "option_args", "in", "option_map", ".", "items", "(", ")", ":", "if", "len", "(", "option_args", ")", ">", "1", ":", "names", "=", "', '", ".", "join", "(", "a", ".", "parameter", ".", "name", "for", "a", "in", "option_args", ")", "message", "=", "(", "'Option {option} of command {self.name} maps to multiple parameters: {names}'", ")", "message", "=", "message", ".", "format_map", "(", "locals", "(", ")", ")", "raise", "CommandError", "(", "message", ")", "return", "args" ]
Create args from function parameters.
[ "Create", "args", "from", "function", "parameters", "." ]
b1d7c262885b9ced7ab89b63562f5464ca9970fe
https://github.com/wylee/runcommands/blob/b1d7c262885b9ced7ab89b63562f5464ca9970fe/runcommands/command.py#L377-L466
train
wylee/runcommands
runcommands/command.py
Command.option_map
def option_map(self): """Map command-line options to args.""" option_map = OrderedDict() for arg in self.args.values(): for option in arg.options: option_map[option] = arg return option_map
python
def option_map(self): """Map command-line options to args.""" option_map = OrderedDict() for arg in self.args.values(): for option in arg.options: option_map[option] = arg return option_map
[ "def", "option_map", "(", "self", ")", ":", "option_map", "=", "OrderedDict", "(", ")", "for", "arg", "in", "self", ".", "args", ".", "values", "(", ")", ":", "for", "option", "in", "arg", ".", "options", ":", "option_map", "[", "option", "]", "=", "arg", "return", "option_map" ]
Map command-line options to args.
[ "Map", "command", "-", "line", "options", "to", "args", "." ]
b1d7c262885b9ced7ab89b63562f5464ca9970fe
https://github.com/wylee/runcommands/blob/b1d7c262885b9ced7ab89b63562f5464ca9970fe/runcommands/command.py#L524-L530
train
lowandrew/OLCTools
sipprCommon/objectprep.py
Objectprep.objectprep
def objectprep(self): """ Creates fastq files from an in-progress Illumina MiSeq run or create an object and moves files appropriately """ # Create .fastq files if necessary. Otherwise create the metadata object if self.bcltofastq: if self.customsamplesheet: assert os.path.isfile(self.customsamplesheet), 'Cannot find custom sample sheet as specified {}' \ .format(self.customsamplesheet) # Create the FASTQ files self.samples = fastqCreator.CreateFastq(self) # Create a dictionary of the object samples_dict = vars(self.samples) # Extract the required information from the dictionary self.index = samples_dict['index'] self.index_length = samples_dict['indexlength'] self.forward = samples_dict['forwardlength'] self.reverse = samples_dict['reverselength'] self.forwardlength = samples_dict['forward'] self.reverselength = samples_dict['reverse'] self.header = samples_dict['header'] else: self.samples = createObject.ObjectCreation(self)
python
def objectprep(self): """ Creates fastq files from an in-progress Illumina MiSeq run or create an object and moves files appropriately """ # Create .fastq files if necessary. Otherwise create the metadata object if self.bcltofastq: if self.customsamplesheet: assert os.path.isfile(self.customsamplesheet), 'Cannot find custom sample sheet as specified {}' \ .format(self.customsamplesheet) # Create the FASTQ files self.samples = fastqCreator.CreateFastq(self) # Create a dictionary of the object samples_dict = vars(self.samples) # Extract the required information from the dictionary self.index = samples_dict['index'] self.index_length = samples_dict['indexlength'] self.forward = samples_dict['forwardlength'] self.reverse = samples_dict['reverselength'] self.forwardlength = samples_dict['forward'] self.reverselength = samples_dict['reverse'] self.header = samples_dict['header'] else: self.samples = createObject.ObjectCreation(self)
[ "def", "objectprep", "(", "self", ")", ":", "# Create .fastq files if necessary. Otherwise create the metadata object", "if", "self", ".", "bcltofastq", ":", "if", "self", ".", "customsamplesheet", ":", "assert", "os", ".", "path", ".", "isfile", "(", "self", ".", "customsamplesheet", ")", ",", "'Cannot find custom sample sheet as specified {}'", ".", "format", "(", "self", ".", "customsamplesheet", ")", "# Create the FASTQ files", "self", ".", "samples", "=", "fastqCreator", ".", "CreateFastq", "(", "self", ")", "# Create a dictionary of the object", "samples_dict", "=", "vars", "(", "self", ".", "samples", ")", "# Extract the required information from the dictionary", "self", ".", "index", "=", "samples_dict", "[", "'index'", "]", "self", ".", "index_length", "=", "samples_dict", "[", "'indexlength'", "]", "self", ".", "forward", "=", "samples_dict", "[", "'forwardlength'", "]", "self", ".", "reverse", "=", "samples_dict", "[", "'reverselength'", "]", "self", ".", "forwardlength", "=", "samples_dict", "[", "'forward'", "]", "self", ".", "reverselength", "=", "samples_dict", "[", "'reverse'", "]", "self", ".", "header", "=", "samples_dict", "[", "'header'", "]", "else", ":", "self", ".", "samples", "=", "createObject", ".", "ObjectCreation", "(", "self", ")" ]
Creates fastq files from an in-progress Illumina MiSeq run or create an object and moves files appropriately
[ "Creates", "fastq", "files", "from", "an", "in", "-", "progress", "Illumina", "MiSeq", "run", "or", "create", "an", "object", "and", "moves", "files", "appropriately" ]
88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a
https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/sipprCommon/objectprep.py#L11-L33
train
lowandrew/OLCTools
spadespipeline/fileprep.py
Fileprep.fileprep
def fileprep(self): """Decompress and concatenate .fastq files""" # Create and start threads for i in range(self.cpus): # Send the threads to the appropriate destination function threads = Thread(target=self.prep, args=()) # Set the daemon to true - something to do with thread management threads.setDaemon(True) # Start the threading threads.start() for sample in self.metadata: # Set the name of the decompressed, combined .fastq file sample.general.combined = os.path.join(sample.general.outputdirectory, '{sample_name}_combined.fastq' .format(sample_name=sample.name)) self.queue.put(sample) self.queue.join()
python
def fileprep(self): """Decompress and concatenate .fastq files""" # Create and start threads for i in range(self.cpus): # Send the threads to the appropriate destination function threads = Thread(target=self.prep, args=()) # Set the daemon to true - something to do with thread management threads.setDaemon(True) # Start the threading threads.start() for sample in self.metadata: # Set the name of the decompressed, combined .fastq file sample.general.combined = os.path.join(sample.general.outputdirectory, '{sample_name}_combined.fastq' .format(sample_name=sample.name)) self.queue.put(sample) self.queue.join()
[ "def", "fileprep", "(", "self", ")", ":", "# Create and start threads", "for", "i", "in", "range", "(", "self", ".", "cpus", ")", ":", "# Send the threads to the appropriate destination function", "threads", "=", "Thread", "(", "target", "=", "self", ".", "prep", ",", "args", "=", "(", ")", ")", "# Set the daemon to true - something to do with thread management", "threads", ".", "setDaemon", "(", "True", ")", "# Start the threading", "threads", ".", "start", "(", ")", "for", "sample", "in", "self", ".", "metadata", ":", "# Set the name of the decompressed, combined .fastq file", "sample", ".", "general", ".", "combined", "=", "os", ".", "path", ".", "join", "(", "sample", ".", "general", ".", "outputdirectory", ",", "'{sample_name}_combined.fastq'", ".", "format", "(", "sample_name", "=", "sample", ".", "name", ")", ")", "self", ".", "queue", ".", "put", "(", "sample", ")", "self", ".", "queue", ".", "join", "(", ")" ]
Decompress and concatenate .fastq files
[ "Decompress", "and", "concatenate", ".", "fastq", "files" ]
88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a
https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/spadespipeline/fileprep.py#L11-L26
train
NoviceLive/intellicoder
intellicoder/converters.py
chunked_join
def chunked_join(iterable, int1, int2, str1, str2, func): """Chunk and join.""" chunks = list(chunked(iterable, int1)) logging.debug(chunks) groups = [list(chunked(chunk, int2)) for chunk in chunks] logging.debug(groups) return str1.join([ str2.join([func(''.join(chunk)) for chunk in chunks]) for chunks in groups ])
python
def chunked_join(iterable, int1, int2, str1, str2, func): """Chunk and join.""" chunks = list(chunked(iterable, int1)) logging.debug(chunks) groups = [list(chunked(chunk, int2)) for chunk in chunks] logging.debug(groups) return str1.join([ str2.join([func(''.join(chunk)) for chunk in chunks]) for chunks in groups ])
[ "def", "chunked_join", "(", "iterable", ",", "int1", ",", "int2", ",", "str1", ",", "str2", ",", "func", ")", ":", "chunks", "=", "list", "(", "chunked", "(", "iterable", ",", "int1", ")", ")", "logging", ".", "debug", "(", "chunks", ")", "groups", "=", "[", "list", "(", "chunked", "(", "chunk", ",", "int2", ")", ")", "for", "chunk", "in", "chunks", "]", "logging", ".", "debug", "(", "groups", ")", "return", "str1", ".", "join", "(", "[", "str2", ".", "join", "(", "[", "func", "(", "''", ".", "join", "(", "chunk", ")", ")", "for", "chunk", "in", "chunks", "]", ")", "for", "chunks", "in", "groups", "]", ")" ]
Chunk and join.
[ "Chunk", "and", "join", "." ]
6cac5ebfce65c370dbebe47756a1789b120ef982
https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/converters.py#L100-L109
train
NoviceLive/intellicoder
intellicoder/converters.py
bytes_to_c_string
def bytes_to_c_string(data): """ Convert the hexadecimal string in to C-style string. """ rows = chunked_join(data, 20, 2, '"\n "', '', r'\x' + X) logging.debug(_('Returning rows: %s'), rows) return '"{}";'.format(rows)
python
def bytes_to_c_string(data): """ Convert the hexadecimal string in to C-style string. """ rows = chunked_join(data, 20, 2, '"\n "', '', r'\x' + X) logging.debug(_('Returning rows: %s'), rows) return '"{}";'.format(rows)
[ "def", "bytes_to_c_string", "(", "data", ")", ":", "rows", "=", "chunked_join", "(", "data", ",", "20", ",", "2", ",", "'\"\\n \"'", ",", "''", ",", "r'\\x'", "+", "X", ")", "logging", ".", "debug", "(", "_", "(", "'Returning rows: %s'", ")", ",", "rows", ")", "return", "'\"{}\";'", ".", "format", "(", "rows", ")" ]
Convert the hexadecimal string in to C-style string.
[ "Convert", "the", "hexadecimal", "string", "in", "to", "C", "-", "style", "string", "." ]
6cac5ebfce65c370dbebe47756a1789b120ef982
https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/converters.py#L112-L118
train
NoviceLive/intellicoder
intellicoder/converters.py
bytes_to_c_array
def bytes_to_c_array(data): """ Make a C array using the given string. """ chars = [ "'{}'".format(encode_escape(i)) for i in decode_escape(data) ] return ', '.join(chars) + ', 0'
python
def bytes_to_c_array(data): """ Make a C array using the given string. """ chars = [ "'{}'".format(encode_escape(i)) for i in decode_escape(data) ] return ', '.join(chars) + ', 0'
[ "def", "bytes_to_c_array", "(", "data", ")", ":", "chars", "=", "[", "\"'{}'\"", ".", "format", "(", "encode_escape", "(", "i", ")", ")", "for", "i", "in", "decode_escape", "(", "data", ")", "]", "return", "', '", ".", "join", "(", "chars", ")", "+", "', 0'" ]
Make a C array using the given string.
[ "Make", "a", "C", "array", "using", "the", "given", "string", "." ]
6cac5ebfce65c370dbebe47756a1789b120ef982
https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/converters.py#L121-L129
train
NoviceLive/intellicoder
intellicoder/converters.py
Converter.uni_from
def uni_from(cls, source, *args, **kwargs): """Unified from.""" logging.debug(_('source: %s, args: %s, kwargs: %s'), source, args, kwargs) return getattr(cls, cls.cons_dict[source])(*args, **kwargs)
python
def uni_from(cls, source, *args, **kwargs): """Unified from.""" logging.debug(_('source: %s, args: %s, kwargs: %s'), source, args, kwargs) return getattr(cls, cls.cons_dict[source])(*args, **kwargs)
[ "def", "uni_from", "(", "cls", ",", "source", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "logging", ".", "debug", "(", "_", "(", "'source: %s, args: %s, kwargs: %s'", ")", ",", "source", ",", "args", ",", "kwargs", ")", "return", "getattr", "(", "cls", ",", "cls", ".", "cons_dict", "[", "source", "]", ")", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Unified from.
[ "Unified", "from", "." ]
6cac5ebfce65c370dbebe47756a1789b120ef982
https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/converters.py#L46-L50
train
NoviceLive/intellicoder
intellicoder/converters.py
Converter.uni_to
def uni_to(self, target, *args, **kwargs): """Unified to.""" logging.debug(_('target: %s, args: %s, kwargs: %s'), target, args, kwargs) return getattr(self, self.func_dict[target])(*args, **kwargs)
python
def uni_to(self, target, *args, **kwargs): """Unified to.""" logging.debug(_('target: %s, args: %s, kwargs: %s'), target, args, kwargs) return getattr(self, self.func_dict[target])(*args, **kwargs)
[ "def", "uni_to", "(", "self", ",", "target", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "logging", ".", "debug", "(", "_", "(", "'target: %s, args: %s, kwargs: %s'", ")", ",", "target", ",", "args", ",", "kwargs", ")", "return", "getattr", "(", "self", ",", "self", ".", "func_dict", "[", "target", "]", ")", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Unified to.
[ "Unified", "to", "." ]
6cac5ebfce65c370dbebe47756a1789b120ef982
https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/converters.py#L52-L56
train