repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
sequence
docstring
stringlengths
3
17.3k
docstring_tokens
sequence
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
LISE-B26/pylabcontrol
build/lib/pylabcontrol/src/core/probe.py
Probe.value
def value(self): """ reads the value from the instrument """ value = getattr(self.instrument, self.probe_name) self.buffer.append(value) return value
python
def value(self): """ reads the value from the instrument """ value = getattr(self.instrument, self.probe_name) self.buffer.append(value) return value
[ "def", "value", "(", "self", ")", ":", "value", "=", "getattr", "(", "self", ".", "instrument", ",", "self", ".", "probe_name", ")", "self", ".", "buffer", ".", "append", "(", "value", ")", "return", "value" ]
reads the value from the instrument
[ "reads", "the", "value", "from", "the", "instrument" ]
67482e5157fcd1c40705e5c2cacfb93564703ed0
https://github.com/LISE-B26/pylabcontrol/blob/67482e5157fcd1c40705e5c2cacfb93564703ed0/build/lib/pylabcontrol/src/core/probe.py#L58-L66
train
LISE-B26/pylabcontrol
build/lib/pylabcontrol/src/core/probe.py
Probe.load_and_append
def load_and_append(probe_dict, probes, instruments={}): """ load probes from probe_dict and append to probes, if additional instruments are required create them and add them to instruments Args: probe_dict: dictionary of form probe_dict = { instrument1_name : probe1_of_instrument1, probe2_of_instrument1, ... instrument2_name : probe1_of_instrument2, probe2_of_instrument2, ... } where probe1_of_instrument1 is a valid name of a probe in instrument of class instrument1_name # optional arguments (as key value pairs): # probe_name # instrument_name # probe_info # buffer_length # # # or # probe_dict = { # name_of_probe_1 : instrument_class_1 # name_of_probe_2 : instrument_class_2 # ... # } probes: dictionary of form probe_dict = { instrument1_name: {name_of_probe_1_of_instrument1 : probe_1_instance, name_of_probe_2_instrument1 : probe_2_instance } , ...} instruments: dictionary of form instruments = { name_of_instrument_1 : instance_of_instrument_1, name_of_instrument_2 : instance_of_instrument_2, ... } Returns: updated_probes = { name_of_probe_1 : probe_1_instance, name_of_probe_2 : probe_2_instance, ...} loaded_failed = {name_of_probe_1: exception_1, name_of_probe_2: exception_2, ....} updated_instruments """ loaded_failed = {} updated_probes = {} updated_probes.update(probes) updated_instruments = {} updated_instruments.update(instruments) # ===== load new instruments ======= new_instruments = list(set(probe_dict.keys())-set(probes.keys())) if new_instruments != []: updated_instruments, failed = Instrument.load_and_append({instrument_name: instrument_name for instrument_name in new_instruments}, instruments) if failed != []: # if loading an instrument fails all the probes that depend on that instrument also fail # ignore the failed instrument that did exist already because they failed because they did exist for failed_instrument in set(failed) - set(instruments.keys()): for probe_name in probe_dict[failed_instrument]: loaded_failed[probe_name] = ValueError('failed to load instrument {:s} already exists. Did not load!'.format(failed_instrument)) del probe_dict[failed_instrument] # ===== now we are sure that all the instruments that we need for the probes already exist for instrument_name, probe_names in probe_dict.items(): if not instrument_name in updated_probes: updated_probes.update({instrument_name:{}}) for probe_name in probe_names.split(','): if probe_name in updated_probes[instrument_name]: loaded_failed[probe_name] = ValueError('failed to load probe {:s} already exists. Did not load!'.format(probe_name)) else: probe_instance = Probe(updated_instruments[instrument_name], probe_name) updated_probes[instrument_name].update({probe_name: probe_instance}) return updated_probes, loaded_failed, updated_instruments
python
def load_and_append(probe_dict, probes, instruments={}): """ load probes from probe_dict and append to probes, if additional instruments are required create them and add them to instruments Args: probe_dict: dictionary of form probe_dict = { instrument1_name : probe1_of_instrument1, probe2_of_instrument1, ... instrument2_name : probe1_of_instrument2, probe2_of_instrument2, ... } where probe1_of_instrument1 is a valid name of a probe in instrument of class instrument1_name # optional arguments (as key value pairs): # probe_name # instrument_name # probe_info # buffer_length # # # or # probe_dict = { # name_of_probe_1 : instrument_class_1 # name_of_probe_2 : instrument_class_2 # ... # } probes: dictionary of form probe_dict = { instrument1_name: {name_of_probe_1_of_instrument1 : probe_1_instance, name_of_probe_2_instrument1 : probe_2_instance } , ...} instruments: dictionary of form instruments = { name_of_instrument_1 : instance_of_instrument_1, name_of_instrument_2 : instance_of_instrument_2, ... } Returns: updated_probes = { name_of_probe_1 : probe_1_instance, name_of_probe_2 : probe_2_instance, ...} loaded_failed = {name_of_probe_1: exception_1, name_of_probe_2: exception_2, ....} updated_instruments """ loaded_failed = {} updated_probes = {} updated_probes.update(probes) updated_instruments = {} updated_instruments.update(instruments) # ===== load new instruments ======= new_instruments = list(set(probe_dict.keys())-set(probes.keys())) if new_instruments != []: updated_instruments, failed = Instrument.load_and_append({instrument_name: instrument_name for instrument_name in new_instruments}, instruments) if failed != []: # if loading an instrument fails all the probes that depend on that instrument also fail # ignore the failed instrument that did exist already because they failed because they did exist for failed_instrument in set(failed) - set(instruments.keys()): for probe_name in probe_dict[failed_instrument]: loaded_failed[probe_name] = ValueError('failed to load instrument {:s} already exists. Did not load!'.format(failed_instrument)) del probe_dict[failed_instrument] # ===== now we are sure that all the instruments that we need for the probes already exist for instrument_name, probe_names in probe_dict.items(): if not instrument_name in updated_probes: updated_probes.update({instrument_name:{}}) for probe_name in probe_names.split(','): if probe_name in updated_probes[instrument_name]: loaded_failed[probe_name] = ValueError('failed to load probe {:s} already exists. Did not load!'.format(probe_name)) else: probe_instance = Probe(updated_instruments[instrument_name], probe_name) updated_probes[instrument_name].update({probe_name: probe_instance}) return updated_probes, loaded_failed, updated_instruments
[ "def", "load_and_append", "(", "probe_dict", ",", "probes", ",", "instruments", "=", "{", "}", ")", ":", "loaded_failed", "=", "{", "}", "updated_probes", "=", "{", "}", "updated_probes", ".", "update", "(", "probes", ")", "updated_instruments", "=", "{", "}", "updated_instruments", ".", "update", "(", "instruments", ")", "# ===== load new instruments =======", "new_instruments", "=", "list", "(", "set", "(", "probe_dict", ".", "keys", "(", ")", ")", "-", "set", "(", "probes", ".", "keys", "(", ")", ")", ")", "if", "new_instruments", "!=", "[", "]", ":", "updated_instruments", ",", "failed", "=", "Instrument", ".", "load_and_append", "(", "{", "instrument_name", ":", "instrument_name", "for", "instrument_name", "in", "new_instruments", "}", ",", "instruments", ")", "if", "failed", "!=", "[", "]", ":", "# if loading an instrument fails all the probes that depend on that instrument also fail", "# ignore the failed instrument that did exist already because they failed because they did exist", "for", "failed_instrument", "in", "set", "(", "failed", ")", "-", "set", "(", "instruments", ".", "keys", "(", ")", ")", ":", "for", "probe_name", "in", "probe_dict", "[", "failed_instrument", "]", ":", "loaded_failed", "[", "probe_name", "]", "=", "ValueError", "(", "'failed to load instrument {:s} already exists. Did not load!'", ".", "format", "(", "failed_instrument", ")", ")", "del", "probe_dict", "[", "failed_instrument", "]", "# ===== now we are sure that all the instruments that we need for the probes already exist", "for", "instrument_name", ",", "probe_names", "in", "probe_dict", ".", "items", "(", ")", ":", "if", "not", "instrument_name", "in", "updated_probes", ":", "updated_probes", ".", "update", "(", "{", "instrument_name", ":", "{", "}", "}", ")", "for", "probe_name", "in", "probe_names", ".", "split", "(", "','", ")", ":", "if", "probe_name", "in", "updated_probes", "[", "instrument_name", "]", ":", "loaded_failed", "[", "probe_name", "]", "=", "ValueError", "(", "'failed to load probe {:s} already exists. Did not load!'", ".", "format", "(", "probe_name", ")", ")", "else", ":", "probe_instance", "=", "Probe", "(", "updated_instruments", "[", "instrument_name", "]", ",", "probe_name", ")", "updated_probes", "[", "instrument_name", "]", ".", "update", "(", "{", "probe_name", ":", "probe_instance", "}", ")", "return", "updated_probes", ",", "loaded_failed", ",", "updated_instruments" ]
load probes from probe_dict and append to probes, if additional instruments are required create them and add them to instruments Args: probe_dict: dictionary of form probe_dict = { instrument1_name : probe1_of_instrument1, probe2_of_instrument1, ... instrument2_name : probe1_of_instrument2, probe2_of_instrument2, ... } where probe1_of_instrument1 is a valid name of a probe in instrument of class instrument1_name # optional arguments (as key value pairs): # probe_name # instrument_name # probe_info # buffer_length # # # or # probe_dict = { # name_of_probe_1 : instrument_class_1 # name_of_probe_2 : instrument_class_2 # ... # } probes: dictionary of form probe_dict = { instrument1_name: {name_of_probe_1_of_instrument1 : probe_1_instance, name_of_probe_2_instrument1 : probe_2_instance } , ...} instruments: dictionary of form instruments = { name_of_instrument_1 : instance_of_instrument_1, name_of_instrument_2 : instance_of_instrument_2, ... } Returns: updated_probes = { name_of_probe_1 : probe_1_instance, name_of_probe_2 : probe_2_instance, ...} loaded_failed = {name_of_probe_1: exception_1, name_of_probe_2: exception_2, ....} updated_instruments
[ "load", "probes", "from", "probe_dict", "and", "append", "to", "probes", "if", "additional", "instruments", "are", "required", "create", "them", "and", "add", "them", "to", "instruments" ]
67482e5157fcd1c40705e5c2cacfb93564703ed0
https://github.com/LISE-B26/pylabcontrol/blob/67482e5157fcd1c40705e5c2cacfb93564703ed0/build/lib/pylabcontrol/src/core/probe.py#L106-L191
train
bennylope/smartystreets.py
smartystreets/data.py
AddressCollection.get
def get(self, key): """ Returns an address by user controlled input ID :param key: an input_id used to tag a lookup address :return: a matching Address """ try: return self[self.id_lookup.get(key)] except TypeError: raise KeyError
python
def get(self, key): """ Returns an address by user controlled input ID :param key: an input_id used to tag a lookup address :return: a matching Address """ try: return self[self.id_lookup.get(key)] except TypeError: raise KeyError
[ "def", "get", "(", "self", ",", "key", ")", ":", "try", ":", "return", "self", "[", "self", ".", "id_lookup", ".", "get", "(", "key", ")", "]", "except", "TypeError", ":", "raise", "KeyError" ]
Returns an address by user controlled input ID :param key: an input_id used to tag a lookup address :return: a matching Address
[ "Returns", "an", "address", "by", "user", "controlled", "input", "ID" ]
f45e37dd52ea7cec8ed43ce2b64724beb6dbbb69
https://github.com/bennylope/smartystreets.py/blob/f45e37dd52ea7cec8ed43ce2b64724beb6dbbb69/smartystreets/data.py#L87-L98
train
bennylope/smartystreets.py
smartystreets/data.py
AddressCollection.get_index
def get_index(self, key): """ Returns an address by input index, a value that matches the list index of the provided lookup value, not necessarily the result. :param key: an input_index matching the index of the provided address :return: a matching Address """ try: return self[self.index_lookup.get(key)] except TypeError: raise KeyError
python
def get_index(self, key): """ Returns an address by input index, a value that matches the list index of the provided lookup value, not necessarily the result. :param key: an input_index matching the index of the provided address :return: a matching Address """ try: return self[self.index_lookup.get(key)] except TypeError: raise KeyError
[ "def", "get_index", "(", "self", ",", "key", ")", ":", "try", ":", "return", "self", "[", "self", ".", "index_lookup", ".", "get", "(", "key", ")", "]", "except", "TypeError", ":", "raise", "KeyError" ]
Returns an address by input index, a value that matches the list index of the provided lookup value, not necessarily the result. :param key: an input_index matching the index of the provided address :return: a matching Address
[ "Returns", "an", "address", "by", "input", "index", "a", "value", "that", "matches", "the", "list", "index", "of", "the", "provided", "lookup", "value", "not", "necessarily", "the", "result", "." ]
f45e37dd52ea7cec8ed43ce2b64724beb6dbbb69
https://github.com/bennylope/smartystreets.py/blob/f45e37dd52ea7cec8ed43ce2b64724beb6dbbb69/smartystreets/data.py#L100-L112
train
etal/biofrills
biofrills/stats/chisq.py
_igamc
def _igamc(a, x): """Complemented incomplete Gamma integral. SYNOPSIS: double a, x, y, igamc(); y = igamc( a, x ); DESCRIPTION: The function is defined by:: igamc(a,x) = 1 - igam(a,x) inf. - 1 | | -t a-1 = ----- | e t dt. - | | | (a) - x In this implementation both arguments must be positive. The integral is evaluated by either a power series or continued fraction expansion, depending on the relative values of a and x. """ # Compute x**a * exp(-x) / Gamma(a) ax = math.exp(a * math.log(x) - x - math.lgamma(a)) # Continued fraction y = 1.0 - a z = x + y + 1.0 c = 0.0 pkm2 = 1.0 qkm2 = x pkm1 = x + 1.0 qkm1 = z * x ans = pkm1 / qkm1 while True: c += 1.0 y += 1.0 z += 2.0 yc = y * c pk = pkm1 * z - pkm2 * yc qk = qkm1 * z - qkm2 * yc if qk != 0: r = pk/qk t = abs((ans - r) / r) ans = r else: t = 1.0; pkm2 = pkm1 pkm1 = pk qkm2 = qkm1 qkm1 = qk if abs(pk) > BIG: pkm2 *= BIGINV; pkm1 *= BIGINV; qkm2 *= BIGINV; qkm1 *= BIGINV; if t <= MACHEP: return ans * ax
python
def _igamc(a, x): """Complemented incomplete Gamma integral. SYNOPSIS: double a, x, y, igamc(); y = igamc( a, x ); DESCRIPTION: The function is defined by:: igamc(a,x) = 1 - igam(a,x) inf. - 1 | | -t a-1 = ----- | e t dt. - | | | (a) - x In this implementation both arguments must be positive. The integral is evaluated by either a power series or continued fraction expansion, depending on the relative values of a and x. """ # Compute x**a * exp(-x) / Gamma(a) ax = math.exp(a * math.log(x) - x - math.lgamma(a)) # Continued fraction y = 1.0 - a z = x + y + 1.0 c = 0.0 pkm2 = 1.0 qkm2 = x pkm1 = x + 1.0 qkm1 = z * x ans = pkm1 / qkm1 while True: c += 1.0 y += 1.0 z += 2.0 yc = y * c pk = pkm1 * z - pkm2 * yc qk = qkm1 * z - qkm2 * yc if qk != 0: r = pk/qk t = abs((ans - r) / r) ans = r else: t = 1.0; pkm2 = pkm1 pkm1 = pk qkm2 = qkm1 qkm1 = qk if abs(pk) > BIG: pkm2 *= BIGINV; pkm1 *= BIGINV; qkm2 *= BIGINV; qkm1 *= BIGINV; if t <= MACHEP: return ans * ax
[ "def", "_igamc", "(", "a", ",", "x", ")", ":", "# Compute x**a * exp(-x) / Gamma(a)", "ax", "=", "math", ".", "exp", "(", "a", "*", "math", ".", "log", "(", "x", ")", "-", "x", "-", "math", ".", "lgamma", "(", "a", ")", ")", "# Continued fraction", "y", "=", "1.0", "-", "a", "z", "=", "x", "+", "y", "+", "1.0", "c", "=", "0.0", "pkm2", "=", "1.0", "qkm2", "=", "x", "pkm1", "=", "x", "+", "1.0", "qkm1", "=", "z", "*", "x", "ans", "=", "pkm1", "/", "qkm1", "while", "True", ":", "c", "+=", "1.0", "y", "+=", "1.0", "z", "+=", "2.0", "yc", "=", "y", "*", "c", "pk", "=", "pkm1", "*", "z", "-", "pkm2", "*", "yc", "qk", "=", "qkm1", "*", "z", "-", "qkm2", "*", "yc", "if", "qk", "!=", "0", ":", "r", "=", "pk", "/", "qk", "t", "=", "abs", "(", "(", "ans", "-", "r", ")", "/", "r", ")", "ans", "=", "r", "else", ":", "t", "=", "1.0", "pkm2", "=", "pkm1", "pkm1", "=", "pk", "qkm2", "=", "qkm1", "qkm1", "=", "qk", "if", "abs", "(", "pk", ")", ">", "BIG", ":", "pkm2", "*=", "BIGINV", "pkm1", "*=", "BIGINV", "qkm2", "*=", "BIGINV", "qkm1", "*=", "BIGINV", "if", "t", "<=", "MACHEP", ":", "return", "ans", "*", "ax" ]
Complemented incomplete Gamma integral. SYNOPSIS: double a, x, y, igamc(); y = igamc( a, x ); DESCRIPTION: The function is defined by:: igamc(a,x) = 1 - igam(a,x) inf. - 1 | | -t a-1 = ----- | e t dt. - | | | (a) - x In this implementation both arguments must be positive. The integral is evaluated by either a power series or continued fraction expansion, depending on the relative values of a and x.
[ "Complemented", "incomplete", "Gamma", "integral", "." ]
36684bb6c7632f96215e8b2b4ebc86640f331bcd
https://github.com/etal/biofrills/blob/36684bb6c7632f96215e8b2b4ebc86640f331bcd/biofrills/stats/chisq.py#L47-L110
train
lreis2415/PyGeoC
examples/ex04_watershed_delineation.py
main
def main(): """The simplest usage of watershed delineation based on TauDEM.""" dem = '../tests/data/Jamaica_dem.tif' num_proc = 2 wp = '../tests/data/tmp_results/wtsd_delineation' TauDEMWorkflow.watershed_delineation(num_proc, dem, workingdir=wp)
python
def main(): """The simplest usage of watershed delineation based on TauDEM.""" dem = '../tests/data/Jamaica_dem.tif' num_proc = 2 wp = '../tests/data/tmp_results/wtsd_delineation' TauDEMWorkflow.watershed_delineation(num_proc, dem, workingdir=wp)
[ "def", "main", "(", ")", ":", "dem", "=", "'../tests/data/Jamaica_dem.tif'", "num_proc", "=", "2", "wp", "=", "'../tests/data/tmp_results/wtsd_delineation'", "TauDEMWorkflow", ".", "watershed_delineation", "(", "num_proc", ",", "dem", ",", "workingdir", "=", "wp", ")" ]
The simplest usage of watershed delineation based on TauDEM.
[ "The", "simplest", "usage", "of", "watershed", "delineation", "based", "on", "TauDEM", "." ]
9a92d1a229bb74298e3c57f27c97079980b5f729
https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/examples/ex04_watershed_delineation.py#L8-L14
train
CitrineInformatics/pif-dft
dfttopif/parsers/pwscf.py
PwscfParser._get_line
def _get_line(self, search_string, search_file, return_string=True, case_sens=True): '''Return the first line containing a set of strings in a file. If return_string is False, we just return whether such a line was found. If case_sens is False, the search is case insensitive. ''' if os.path.isfile(search_file): # if single search string if type(search_string) == type(''): search_string = [search_string] # if case insensitive, convert everything to lowercase if not case_sens: search_string = [i.lower() for i in search_string] with open(search_file) as fp: # search for the strings line by line for line in fp: query_line = line if case_sens else line.lower() if all([i in query_line for i in search_string]): return line if return_string else True if return_string: raise Exception('%s not found in %s'%(' & '.join(search_string), search_file)) else: return False else: raise Exception('%s file does not exist'%search_file)
python
def _get_line(self, search_string, search_file, return_string=True, case_sens=True): '''Return the first line containing a set of strings in a file. If return_string is False, we just return whether such a line was found. If case_sens is False, the search is case insensitive. ''' if os.path.isfile(search_file): # if single search string if type(search_string) == type(''): search_string = [search_string] # if case insensitive, convert everything to lowercase if not case_sens: search_string = [i.lower() for i in search_string] with open(search_file) as fp: # search for the strings line by line for line in fp: query_line = line if case_sens else line.lower() if all([i in query_line for i in search_string]): return line if return_string else True if return_string: raise Exception('%s not found in %s'%(' & '.join(search_string), search_file)) else: return False else: raise Exception('%s file does not exist'%search_file)
[ "def", "_get_line", "(", "self", ",", "search_string", ",", "search_file", ",", "return_string", "=", "True", ",", "case_sens", "=", "True", ")", ":", "if", "os", ".", "path", ".", "isfile", "(", "search_file", ")", ":", "# if single search string", "if", "type", "(", "search_string", ")", "==", "type", "(", "''", ")", ":", "search_string", "=", "[", "search_string", "]", "# if case insensitive, convert everything to lowercase", "if", "not", "case_sens", ":", "search_string", "=", "[", "i", ".", "lower", "(", ")", "for", "i", "in", "search_string", "]", "with", "open", "(", "search_file", ")", "as", "fp", ":", "# search for the strings line by line", "for", "line", "in", "fp", ":", "query_line", "=", "line", "if", "case_sens", "else", "line", ".", "lower", "(", ")", "if", "all", "(", "[", "i", "in", "query_line", "for", "i", "in", "search_string", "]", ")", ":", "return", "line", "if", "return_string", "else", "True", "if", "return_string", ":", "raise", "Exception", "(", "'%s not found in %s'", "%", "(", "' & '", ".", "join", "(", "search_string", ")", ",", "search_file", ")", ")", "else", ":", "return", "False", "else", ":", "raise", "Exception", "(", "'%s file does not exist'", "%", "search_file", ")" ]
Return the first line containing a set of strings in a file. If return_string is False, we just return whether such a line was found. If case_sens is False, the search is case insensitive.
[ "Return", "the", "first", "line", "containing", "a", "set", "of", "strings", "in", "a", "file", "." ]
d5411dc1f6c6e8d454b132977ca7ab3bb8131a80
https://github.com/CitrineInformatics/pif-dft/blob/d5411dc1f6c6e8d454b132977ca7ab3bb8131a80/dfttopif/parsers/pwscf.py#L66-L88
train
CitrineInformatics/pif-dft
dfttopif/parsers/pwscf.py
PwscfParser.get_cutoff_energy
def get_cutoff_energy(self): '''Determine the cutoff energy from the output''' return Value( scalars=[Scalar(value=self.settings["kinetic-energy cutoff"])], units=self.settings['kinetic-energy cutoff units'] )
python
def get_cutoff_energy(self): '''Determine the cutoff energy from the output''' return Value( scalars=[Scalar(value=self.settings["kinetic-energy cutoff"])], units=self.settings['kinetic-energy cutoff units'] )
[ "def", "get_cutoff_energy", "(", "self", ")", ":", "return", "Value", "(", "scalars", "=", "[", "Scalar", "(", "value", "=", "self", ".", "settings", "[", "\"kinetic-energy cutoff\"", "]", ")", "]", ",", "units", "=", "self", ".", "settings", "[", "'kinetic-energy cutoff units'", "]", ")" ]
Determine the cutoff energy from the output
[ "Determine", "the", "cutoff", "energy", "from", "the", "output" ]
d5411dc1f6c6e8d454b132977ca7ab3bb8131a80
https://github.com/CitrineInformatics/pif-dft/blob/d5411dc1f6c6e8d454b132977ca7ab3bb8131a80/dfttopif/parsers/pwscf.py#L98-L103
train
CitrineInformatics/pif-dft
dfttopif/parsers/pwscf.py
PwscfParser.get_pp_name
def get_pp_name(self): '''Determine the pseudopotential names from the output''' ppnames = [] # Find the number of atom types natomtypes = int(self._get_line('number of atomic types', self.outputf).split()[5]) # Find the pseudopotential names with open(self.outputf) as fp: for line in fp: if "PseudoPot. #" in line: ppnames.append(Scalar(value=next(fp).split('/')[-1].rstrip())) if len(ppnames) == natomtypes: return Value(scalars=ppnames) raise Exception('Could not find %i pseudopotential names'%natomtypes)
python
def get_pp_name(self): '''Determine the pseudopotential names from the output''' ppnames = [] # Find the number of atom types natomtypes = int(self._get_line('number of atomic types', self.outputf).split()[5]) # Find the pseudopotential names with open(self.outputf) as fp: for line in fp: if "PseudoPot. #" in line: ppnames.append(Scalar(value=next(fp).split('/')[-1].rstrip())) if len(ppnames) == natomtypes: return Value(scalars=ppnames) raise Exception('Could not find %i pseudopotential names'%natomtypes)
[ "def", "get_pp_name", "(", "self", ")", ":", "ppnames", "=", "[", "]", "# Find the number of atom types", "natomtypes", "=", "int", "(", "self", ".", "_get_line", "(", "'number of atomic types'", ",", "self", ".", "outputf", ")", ".", "split", "(", ")", "[", "5", "]", ")", "# Find the pseudopotential names", "with", "open", "(", "self", ".", "outputf", ")", "as", "fp", ":", "for", "line", "in", "fp", ":", "if", "\"PseudoPot. #\"", "in", "line", ":", "ppnames", ".", "append", "(", "Scalar", "(", "value", "=", "next", "(", "fp", ")", ".", "split", "(", "'/'", ")", "[", "-", "1", "]", ".", "rstrip", "(", ")", ")", ")", "if", "len", "(", "ppnames", ")", "==", "natomtypes", ":", "return", "Value", "(", "scalars", "=", "ppnames", ")", "raise", "Exception", "(", "'Could not find %i pseudopotential names'", "%", "natomtypes", ")" ]
Determine the pseudopotential names from the output
[ "Determine", "the", "pseudopotential", "names", "from", "the", "output" ]
d5411dc1f6c6e8d454b132977ca7ab3bb8131a80
https://github.com/CitrineInformatics/pif-dft/blob/d5411dc1f6c6e8d454b132977ca7ab3bb8131a80/dfttopif/parsers/pwscf.py#L171-L183
train
CitrineInformatics/pif-dft
dfttopif/parsers/pwscf.py
PwscfParser.get_U_settings
def get_U_settings(self): '''Determine the DFT+U type and parameters from the output''' with open(self.outputf) as fp: for line in fp: if "LDA+U calculation" in line: U_param = {} U_param['Type'] = line.split()[0] U_param['Values'] = {} # look through next several lines for nl in range(15): line2 = next(fp).split() if len(line2) > 1 and line2[0] == "atomic": pass # column titles elif len(line2) == 6: U_param['Values'][line2[0]] = {} U_param['Values'][line2[0]]['L'] = float(line2[1]) U_param['Values'][line2[0]]['U'] = float(line2[2]) U_param['Values'][line2[0]]['J'] = float(line2[4]) else: break # end of data block return Value(**U_param) return None
python
def get_U_settings(self): '''Determine the DFT+U type and parameters from the output''' with open(self.outputf) as fp: for line in fp: if "LDA+U calculation" in line: U_param = {} U_param['Type'] = line.split()[0] U_param['Values'] = {} # look through next several lines for nl in range(15): line2 = next(fp).split() if len(line2) > 1 and line2[0] == "atomic": pass # column titles elif len(line2) == 6: U_param['Values'][line2[0]] = {} U_param['Values'][line2[0]]['L'] = float(line2[1]) U_param['Values'][line2[0]]['U'] = float(line2[2]) U_param['Values'][line2[0]]['J'] = float(line2[4]) else: break # end of data block return Value(**U_param) return None
[ "def", "get_U_settings", "(", "self", ")", ":", "with", "open", "(", "self", ".", "outputf", ")", "as", "fp", ":", "for", "line", "in", "fp", ":", "if", "\"LDA+U calculation\"", "in", "line", ":", "U_param", "=", "{", "}", "U_param", "[", "'Type'", "]", "=", "line", ".", "split", "(", ")", "[", "0", "]", "U_param", "[", "'Values'", "]", "=", "{", "}", "# look through next several lines", "for", "nl", "in", "range", "(", "15", ")", ":", "line2", "=", "next", "(", "fp", ")", ".", "split", "(", ")", "if", "len", "(", "line2", ")", ">", "1", "and", "line2", "[", "0", "]", "==", "\"atomic\"", ":", "pass", "# column titles", "elif", "len", "(", "line2", ")", "==", "6", ":", "U_param", "[", "'Values'", "]", "[", "line2", "[", "0", "]", "]", "=", "{", "}", "U_param", "[", "'Values'", "]", "[", "line2", "[", "0", "]", "]", "[", "'L'", "]", "=", "float", "(", "line2", "[", "1", "]", ")", "U_param", "[", "'Values'", "]", "[", "line2", "[", "0", "]", "]", "[", "'U'", "]", "=", "float", "(", "line2", "[", "2", "]", ")", "U_param", "[", "'Values'", "]", "[", "line2", "[", "0", "]", "]", "[", "'J'", "]", "=", "float", "(", "line2", "[", "4", "]", ")", "else", ":", "break", "# end of data block", "return", "Value", "(", "*", "*", "U_param", ")", "return", "None" ]
Determine the DFT+U type and parameters from the output
[ "Determine", "the", "DFT", "+", "U", "type", "and", "parameters", "from", "the", "output" ]
d5411dc1f6c6e8d454b132977ca7ab3bb8131a80
https://github.com/CitrineInformatics/pif-dft/blob/d5411dc1f6c6e8d454b132977ca7ab3bb8131a80/dfttopif/parsers/pwscf.py#L185-L205
train
CitrineInformatics/pif-dft
dfttopif/parsers/pwscf.py
PwscfParser.get_vdW_settings
def get_vdW_settings(self): '''Determine the vdW type if using vdW xc functional or correction scheme from the input otherwise''' xc = self.get_xc_functional().scalars[0].value if 'vdw' in xc.lower(): # vdW xc functional return Value(scalars=[Scalar(value=xc)]) else: # look for vdw_corr in input vdW_dict = {'xdm':'Becke-Johnson XDM', 'ts': 'Tkatchenko-Scheffler', 'ts-vdw': 'Tkatchenko-Scheffler', 'tkatchenko-scheffler': 'Tkatchenko-Scheffler', 'grimme-d2': 'Grimme D2', 'dft-d': 'Grimme D2'} if self._get_line('vdw_corr', self.inputf, return_string=False, case_sens=False): line = self._get_line('vdw_corr', self.inputf, return_string=True, case_sens=False) vdwkey = str(line.split('=')[-1].replace("'", "").replace(',', '').lower().rstrip()) return Value(scalars=[Scalar(value=vdW_dict[vdwkey])]) return None
python
def get_vdW_settings(self): '''Determine the vdW type if using vdW xc functional or correction scheme from the input otherwise''' xc = self.get_xc_functional().scalars[0].value if 'vdw' in xc.lower(): # vdW xc functional return Value(scalars=[Scalar(value=xc)]) else: # look for vdw_corr in input vdW_dict = {'xdm':'Becke-Johnson XDM', 'ts': 'Tkatchenko-Scheffler', 'ts-vdw': 'Tkatchenko-Scheffler', 'tkatchenko-scheffler': 'Tkatchenko-Scheffler', 'grimme-d2': 'Grimme D2', 'dft-d': 'Grimme D2'} if self._get_line('vdw_corr', self.inputf, return_string=False, case_sens=False): line = self._get_line('vdw_corr', self.inputf, return_string=True, case_sens=False) vdwkey = str(line.split('=')[-1].replace("'", "").replace(',', '').lower().rstrip()) return Value(scalars=[Scalar(value=vdW_dict[vdwkey])]) return None
[ "def", "get_vdW_settings", "(", "self", ")", ":", "xc", "=", "self", ".", "get_xc_functional", "(", ")", ".", "scalars", "[", "0", "]", ".", "value", "if", "'vdw'", "in", "xc", ".", "lower", "(", ")", ":", "# vdW xc functional", "return", "Value", "(", "scalars", "=", "[", "Scalar", "(", "value", "=", "xc", ")", "]", ")", "else", ":", "# look for vdw_corr in input", "vdW_dict", "=", "{", "'xdm'", ":", "'Becke-Johnson XDM'", ",", "'ts'", ":", "'Tkatchenko-Scheffler'", ",", "'ts-vdw'", ":", "'Tkatchenko-Scheffler'", ",", "'tkatchenko-scheffler'", ":", "'Tkatchenko-Scheffler'", ",", "'grimme-d2'", ":", "'Grimme D2'", ",", "'dft-d'", ":", "'Grimme D2'", "}", "if", "self", ".", "_get_line", "(", "'vdw_corr'", ",", "self", ".", "inputf", ",", "return_string", "=", "False", ",", "case_sens", "=", "False", ")", ":", "line", "=", "self", ".", "_get_line", "(", "'vdw_corr'", ",", "self", ".", "inputf", ",", "return_string", "=", "True", ",", "case_sens", "=", "False", ")", "vdwkey", "=", "str", "(", "line", ".", "split", "(", "'='", ")", "[", "-", "1", "]", ".", "replace", "(", "\"'\"", ",", "\"\"", ")", ".", "replace", "(", "','", ",", "''", ")", ".", "lower", "(", ")", ".", "rstrip", "(", ")", ")", "return", "Value", "(", "scalars", "=", "[", "Scalar", "(", "value", "=", "vdW_dict", "[", "vdwkey", "]", ")", "]", ")", "return", "None" ]
Determine the vdW type if using vdW xc functional or correction scheme from the input otherwise
[ "Determine", "the", "vdW", "type", "if", "using", "vdW", "xc", "functional", "or", "correction", "scheme", "from", "the", "input", "otherwise" ]
d5411dc1f6c6e8d454b132977ca7ab3bb8131a80
https://github.com/CitrineInformatics/pif-dft/blob/d5411dc1f6c6e8d454b132977ca7ab3bb8131a80/dfttopif/parsers/pwscf.py#L207-L224
train
CitrineInformatics/pif-dft
dfttopif/parsers/pwscf.py
PwscfParser.get_stresses
def get_stresses(self): '''Determine the stress tensor from the output''' if "stress" not in self.settings: return None wrapped = [[Scalar(value=x) for x in y] for y in self.settings["stress"]] return Property(matrices=[wrapped], units=self.settings["stress units"])
python
def get_stresses(self): '''Determine the stress tensor from the output''' if "stress" not in self.settings: return None wrapped = [[Scalar(value=x) for x in y] for y in self.settings["stress"]] return Property(matrices=[wrapped], units=self.settings["stress units"])
[ "def", "get_stresses", "(", "self", ")", ":", "if", "\"stress\"", "not", "in", "self", ".", "settings", ":", "return", "None", "wrapped", "=", "[", "[", "Scalar", "(", "value", "=", "x", ")", "for", "x", "in", "y", "]", "for", "y", "in", "self", ".", "settings", "[", "\"stress\"", "]", "]", "return", "Property", "(", "matrices", "=", "[", "wrapped", "]", ",", "units", "=", "self", ".", "settings", "[", "\"stress units\"", "]", ")" ]
Determine the stress tensor from the output
[ "Determine", "the", "stress", "tensor", "from", "the", "output" ]
d5411dc1f6c6e8d454b132977ca7ab3bb8131a80
https://github.com/CitrineInformatics/pif-dft/blob/d5411dc1f6c6e8d454b132977ca7ab3bb8131a80/dfttopif/parsers/pwscf.py#L230-L235
train
CitrineInformatics/pif-dft
dfttopif/parsers/pwscf.py
PwscfParser.get_dos
def get_dos(self): '''Find the total DOS shifted by the Fermi energy''' # find the dos file fildos = '' for f in self._files: with open(f, 'r') as fp: first_line = next(fp) if "E (eV)" in first_line and "Int dos(E)" in first_line: fildos = f ndoscol = len(next(fp).split())-2 # number of spin channels fp.close() break fp.close() if not fildos: return None # cannot find DOS # get the Fermi energy line = self._get_line('the Fermi energy is', self.outputf) efermi = float(line.split('is')[-1].split()[0]) # grab the DOS energy = [] ; dos = [] fp = open(fildos, 'r') next(fp) # comment line for line in fp: ls = line.split() energy.append(Scalar(value=float(ls[0])-efermi)) dos.append(Scalar(value=sum([float(i) for i in ls[1:1+ndoscol]]))) return Property(scalars=dos, units='number of states per unit cell', conditions=Value(name='energy', scalars=energy, units='eV'))
python
def get_dos(self): '''Find the total DOS shifted by the Fermi energy''' # find the dos file fildos = '' for f in self._files: with open(f, 'r') as fp: first_line = next(fp) if "E (eV)" in first_line and "Int dos(E)" in first_line: fildos = f ndoscol = len(next(fp).split())-2 # number of spin channels fp.close() break fp.close() if not fildos: return None # cannot find DOS # get the Fermi energy line = self._get_line('the Fermi energy is', self.outputf) efermi = float(line.split('is')[-1].split()[0]) # grab the DOS energy = [] ; dos = [] fp = open(fildos, 'r') next(fp) # comment line for line in fp: ls = line.split() energy.append(Scalar(value=float(ls[0])-efermi)) dos.append(Scalar(value=sum([float(i) for i in ls[1:1+ndoscol]]))) return Property(scalars=dos, units='number of states per unit cell', conditions=Value(name='energy', scalars=energy, units='eV'))
[ "def", "get_dos", "(", "self", ")", ":", "# find the dos file", "fildos", "=", "''", "for", "f", "in", "self", ".", "_files", ":", "with", "open", "(", "f", ",", "'r'", ")", "as", "fp", ":", "first_line", "=", "next", "(", "fp", ")", "if", "\"E (eV)\"", "in", "first_line", "and", "\"Int dos(E)\"", "in", "first_line", ":", "fildos", "=", "f", "ndoscol", "=", "len", "(", "next", "(", "fp", ")", ".", "split", "(", ")", ")", "-", "2", "# number of spin channels", "fp", ".", "close", "(", ")", "break", "fp", ".", "close", "(", ")", "if", "not", "fildos", ":", "return", "None", "# cannot find DOS", "# get the Fermi energy", "line", "=", "self", ".", "_get_line", "(", "'the Fermi energy is'", ",", "self", ".", "outputf", ")", "efermi", "=", "float", "(", "line", ".", "split", "(", "'is'", ")", "[", "-", "1", "]", ".", "split", "(", ")", "[", "0", "]", ")", "# grab the DOS", "energy", "=", "[", "]", "dos", "=", "[", "]", "fp", "=", "open", "(", "fildos", ",", "'r'", ")", "next", "(", "fp", ")", "# comment line", "for", "line", "in", "fp", ":", "ls", "=", "line", ".", "split", "(", ")", "energy", ".", "append", "(", "Scalar", "(", "value", "=", "float", "(", "ls", "[", "0", "]", ")", "-", "efermi", ")", ")", "dos", ".", "append", "(", "Scalar", "(", "value", "=", "sum", "(", "[", "float", "(", "i", ")", "for", "i", "in", "ls", "[", "1", ":", "1", "+", "ndoscol", "]", "]", ")", ")", ")", "return", "Property", "(", "scalars", "=", "dos", ",", "units", "=", "'number of states per unit cell'", ",", "conditions", "=", "Value", "(", "name", "=", "'energy'", ",", "scalars", "=", "energy", ",", "units", "=", "'eV'", ")", ")" ]
Find the total DOS shifted by the Fermi energy
[ "Find", "the", "total", "DOS", "shifted", "by", "the", "Fermi", "energy" ]
d5411dc1f6c6e8d454b132977ca7ab3bb8131a80
https://github.com/CitrineInformatics/pif-dft/blob/d5411dc1f6c6e8d454b132977ca7ab3bb8131a80/dfttopif/parsers/pwscf.py#L319-L347
train
CitrineInformatics/pif-dft
dfttopif/parsers/pwscf.py
PwscfParser.get_band_gap
def get_band_gap(self): '''Compute the band gap from the DOS''' dosdata = self.get_dos() if type(dosdata) == type(None): return None # cannot find DOS else: energy = dosdata.conditions.scalars dos = dosdata.scalars step_size = energy[1].value - energy[0].value not_found = True ; l = 0 ; bot = 10**3 ; top = -10**3 while not_found and l < len(dos): # iterate through the data e = float(energy[l].value) dens = float(dos[l].value) # note: dos already shifted by efermi if e < 0 and dens > 1e-3: bot = e elif e > 0 and dens > 1e-3: top = e not_found = False l += 1 if top < bot: raise Exception('Algorithm failed to find the band gap') elif top - bot < step_size*2: return Property(scalars=[Scalar(value=0)], units='eV') else: bandgap = float(top-bot) return Property(scalars=[Scalar(value=round(bandgap,3))], units='eV')
python
def get_band_gap(self): '''Compute the band gap from the DOS''' dosdata = self.get_dos() if type(dosdata) == type(None): return None # cannot find DOS else: energy = dosdata.conditions.scalars dos = dosdata.scalars step_size = energy[1].value - energy[0].value not_found = True ; l = 0 ; bot = 10**3 ; top = -10**3 while not_found and l < len(dos): # iterate through the data e = float(energy[l].value) dens = float(dos[l].value) # note: dos already shifted by efermi if e < 0 and dens > 1e-3: bot = e elif e > 0 and dens > 1e-3: top = e not_found = False l += 1 if top < bot: raise Exception('Algorithm failed to find the band gap') elif top - bot < step_size*2: return Property(scalars=[Scalar(value=0)], units='eV') else: bandgap = float(top-bot) return Property(scalars=[Scalar(value=round(bandgap,3))], units='eV')
[ "def", "get_band_gap", "(", "self", ")", ":", "dosdata", "=", "self", ".", "get_dos", "(", ")", "if", "type", "(", "dosdata", ")", "==", "type", "(", "None", ")", ":", "return", "None", "# cannot find DOS", "else", ":", "energy", "=", "dosdata", ".", "conditions", ".", "scalars", "dos", "=", "dosdata", ".", "scalars", "step_size", "=", "energy", "[", "1", "]", ".", "value", "-", "energy", "[", "0", "]", ".", "value", "not_found", "=", "True", "l", "=", "0", "bot", "=", "10", "**", "3", "top", "=", "-", "10", "**", "3", "while", "not_found", "and", "l", "<", "len", "(", "dos", ")", ":", "# iterate through the data", "e", "=", "float", "(", "energy", "[", "l", "]", ".", "value", ")", "dens", "=", "float", "(", "dos", "[", "l", "]", ".", "value", ")", "# note: dos already shifted by efermi", "if", "e", "<", "0", "and", "dens", ">", "1e-3", ":", "bot", "=", "e", "elif", "e", ">", "0", "and", "dens", ">", "1e-3", ":", "top", "=", "e", "not_found", "=", "False", "l", "+=", "1", "if", "top", "<", "bot", ":", "raise", "Exception", "(", "'Algorithm failed to find the band gap'", ")", "elif", "top", "-", "bot", "<", "step_size", "*", "2", ":", "return", "Property", "(", "scalars", "=", "[", "Scalar", "(", "value", "=", "0", ")", "]", ",", "units", "=", "'eV'", ")", "else", ":", "bandgap", "=", "float", "(", "top", "-", "bot", ")", "return", "Property", "(", "scalars", "=", "[", "Scalar", "(", "value", "=", "round", "(", "bandgap", ",", "3", ")", ")", "]", ",", "units", "=", "'eV'", ")" ]
Compute the band gap from the DOS
[ "Compute", "the", "band", "gap", "from", "the", "DOS" ]
d5411dc1f6c6e8d454b132977ca7ab3bb8131a80
https://github.com/CitrineInformatics/pif-dft/blob/d5411dc1f6c6e8d454b132977ca7ab3bb8131a80/dfttopif/parsers/pwscf.py#L369-L396
train
idlesign/django-sitecats
sitecats/toolbox.py
get_category_aliases_under
def get_category_aliases_under(parent_alias=None): """Returns a list of category aliases under the given parent. Could be useful to pass to `ModelWithCategory.enable_category_lists_editor` in `additional_parents_aliases` parameter. :param str|None parent_alias: Parent alias or None to categories under root :rtype: list :return: a list of category aliases """ return [ch.alias for ch in get_cache().get_children_for(parent_alias, only_with_aliases=True)]
python
def get_category_aliases_under(parent_alias=None): """Returns a list of category aliases under the given parent. Could be useful to pass to `ModelWithCategory.enable_category_lists_editor` in `additional_parents_aliases` parameter. :param str|None parent_alias: Parent alias or None to categories under root :rtype: list :return: a list of category aliases """ return [ch.alias for ch in get_cache().get_children_for(parent_alias, only_with_aliases=True)]
[ "def", "get_category_aliases_under", "(", "parent_alias", "=", "None", ")", ":", "return", "[", "ch", ".", "alias", "for", "ch", "in", "get_cache", "(", ")", ".", "get_children_for", "(", "parent_alias", ",", "only_with_aliases", "=", "True", ")", "]" ]
Returns a list of category aliases under the given parent. Could be useful to pass to `ModelWithCategory.enable_category_lists_editor` in `additional_parents_aliases` parameter. :param str|None parent_alias: Parent alias or None to categories under root :rtype: list :return: a list of category aliases
[ "Returns", "a", "list", "of", "category", "aliases", "under", "the", "given", "parent", "." ]
9b45e91fc0dcb63a0011780437fe28145e3ecce9
https://github.com/idlesign/django-sitecats/blob/9b45e91fc0dcb63a0011780437fe28145e3ecce9/sitecats/toolbox.py#L16-L26
train
idlesign/django-sitecats
sitecats/toolbox.py
get_category_lists
def get_category_lists(init_kwargs=None, additional_parents_aliases=None, obj=None): """Returns a list of CategoryList objects, optionally associated with a given model instance. :param dict|None init_kwargs: :param list|None additional_parents_aliases: :param Model|None obj: Model instance to get categories for :rtype: list :return: """ init_kwargs = init_kwargs or {} additional_parents_aliases = additional_parents_aliases or [] parent_aliases = additional_parents_aliases if obj is not None: ctype = ContentType.objects.get_for_model(obj) cat_ids = [ item[0] for item in get_tie_model().objects.filter(content_type=ctype, object_id=obj.id).values_list('category_id').all() ] parent_aliases = list(get_cache().get_parents_for(cat_ids).union(additional_parents_aliases)) lists = [] aliases = get_cache().sort_aliases(parent_aliases) categories_cache = get_cache().get_categories(aliases, obj) for parent_alias in aliases: catlist = CategoryList(parent_alias, **init_kwargs) # TODO Burned in class name. Make more customizable. if obj is not None: catlist.set_obj(obj) # Optimization. To get DB hits down. cache = [] try: cache = categories_cache[parent_alias] except KeyError: pass catlist.set_get_categories_cache(cache) lists.append(catlist) return lists
python
def get_category_lists(init_kwargs=None, additional_parents_aliases=None, obj=None): """Returns a list of CategoryList objects, optionally associated with a given model instance. :param dict|None init_kwargs: :param list|None additional_parents_aliases: :param Model|None obj: Model instance to get categories for :rtype: list :return: """ init_kwargs = init_kwargs or {} additional_parents_aliases = additional_parents_aliases or [] parent_aliases = additional_parents_aliases if obj is not None: ctype = ContentType.objects.get_for_model(obj) cat_ids = [ item[0] for item in get_tie_model().objects.filter(content_type=ctype, object_id=obj.id).values_list('category_id').all() ] parent_aliases = list(get_cache().get_parents_for(cat_ids).union(additional_parents_aliases)) lists = [] aliases = get_cache().sort_aliases(parent_aliases) categories_cache = get_cache().get_categories(aliases, obj) for parent_alias in aliases: catlist = CategoryList(parent_alias, **init_kwargs) # TODO Burned in class name. Make more customizable. if obj is not None: catlist.set_obj(obj) # Optimization. To get DB hits down. cache = [] try: cache = categories_cache[parent_alias] except KeyError: pass catlist.set_get_categories_cache(cache) lists.append(catlist) return lists
[ "def", "get_category_lists", "(", "init_kwargs", "=", "None", ",", "additional_parents_aliases", "=", "None", ",", "obj", "=", "None", ")", ":", "init_kwargs", "=", "init_kwargs", "or", "{", "}", "additional_parents_aliases", "=", "additional_parents_aliases", "or", "[", "]", "parent_aliases", "=", "additional_parents_aliases", "if", "obj", "is", "not", "None", ":", "ctype", "=", "ContentType", ".", "objects", ".", "get_for_model", "(", "obj", ")", "cat_ids", "=", "[", "item", "[", "0", "]", "for", "item", "in", "get_tie_model", "(", ")", ".", "objects", ".", "filter", "(", "content_type", "=", "ctype", ",", "object_id", "=", "obj", ".", "id", ")", ".", "values_list", "(", "'category_id'", ")", ".", "all", "(", ")", "]", "parent_aliases", "=", "list", "(", "get_cache", "(", ")", ".", "get_parents_for", "(", "cat_ids", ")", ".", "union", "(", "additional_parents_aliases", ")", ")", "lists", "=", "[", "]", "aliases", "=", "get_cache", "(", ")", ".", "sort_aliases", "(", "parent_aliases", ")", "categories_cache", "=", "get_cache", "(", ")", ".", "get_categories", "(", "aliases", ",", "obj", ")", "for", "parent_alias", "in", "aliases", ":", "catlist", "=", "CategoryList", "(", "parent_alias", ",", "*", "*", "init_kwargs", ")", "# TODO Burned in class name. Make more customizable.", "if", "obj", "is", "not", "None", ":", "catlist", ".", "set_obj", "(", "obj", ")", "# Optimization. To get DB hits down.", "cache", "=", "[", "]", "try", ":", "cache", "=", "categories_cache", "[", "parent_alias", "]", "except", "KeyError", ":", "pass", "catlist", ".", "set_get_categories_cache", "(", "cache", ")", "lists", ".", "append", "(", "catlist", ")", "return", "lists" ]
Returns a list of CategoryList objects, optionally associated with a given model instance. :param dict|None init_kwargs: :param list|None additional_parents_aliases: :param Model|None obj: Model instance to get categories for :rtype: list :return:
[ "Returns", "a", "list", "of", "CategoryList", "objects", "optionally", "associated", "with", "a", "given", "model", "instance", "." ]
9b45e91fc0dcb63a0011780437fe28145e3ecce9
https://github.com/idlesign/django-sitecats/blob/9b45e91fc0dcb63a0011780437fe28145e3ecce9/sitecats/toolbox.py#L29-L68
train
idlesign/django-sitecats
sitecats/toolbox.py
CategoryRequestHandler.register_lists
def register_lists(self, category_lists, lists_init_kwargs=None, editor_init_kwargs=None): """Registers CategoryList objects to handle their requests. :param list category_lists: CategoryList objects :param dict lists_init_kwargs: Attributes to apply to each of CategoryList objects """ lists_init_kwargs = lists_init_kwargs or {} editor_init_kwargs = editor_init_kwargs or {} for lst in category_lists: if isinstance(lst, string_types): # Spawn CategoryList object from base category alias. lst = self.list_cls(lst, **lists_init_kwargs) elif not isinstance(lst, CategoryList): raise SitecatsConfigurationError( '`CategoryRequestHandler.register_lists()` accepts only ' '`CategoryList` objects or category aliases.' ) if self._obj: lst.set_obj(self._obj) for name, val in lists_init_kwargs.items(): # Setting CategoryList attributes from kwargs. setattr(lst, name, val) lst.enable_editor(**editor_init_kwargs) self._lists[lst.get_id()] = lst
python
def register_lists(self, category_lists, lists_init_kwargs=None, editor_init_kwargs=None): """Registers CategoryList objects to handle their requests. :param list category_lists: CategoryList objects :param dict lists_init_kwargs: Attributes to apply to each of CategoryList objects """ lists_init_kwargs = lists_init_kwargs or {} editor_init_kwargs = editor_init_kwargs or {} for lst in category_lists: if isinstance(lst, string_types): # Spawn CategoryList object from base category alias. lst = self.list_cls(lst, **lists_init_kwargs) elif not isinstance(lst, CategoryList): raise SitecatsConfigurationError( '`CategoryRequestHandler.register_lists()` accepts only ' '`CategoryList` objects or category aliases.' ) if self._obj: lst.set_obj(self._obj) for name, val in lists_init_kwargs.items(): # Setting CategoryList attributes from kwargs. setattr(lst, name, val) lst.enable_editor(**editor_init_kwargs) self._lists[lst.get_id()] = lst
[ "def", "register_lists", "(", "self", ",", "category_lists", ",", "lists_init_kwargs", "=", "None", ",", "editor_init_kwargs", "=", "None", ")", ":", "lists_init_kwargs", "=", "lists_init_kwargs", "or", "{", "}", "editor_init_kwargs", "=", "editor_init_kwargs", "or", "{", "}", "for", "lst", "in", "category_lists", ":", "if", "isinstance", "(", "lst", ",", "string_types", ")", ":", "# Spawn CategoryList object from base category alias.", "lst", "=", "self", ".", "list_cls", "(", "lst", ",", "*", "*", "lists_init_kwargs", ")", "elif", "not", "isinstance", "(", "lst", ",", "CategoryList", ")", ":", "raise", "SitecatsConfigurationError", "(", "'`CategoryRequestHandler.register_lists()` accepts only '", "'`CategoryList` objects or category aliases.'", ")", "if", "self", ".", "_obj", ":", "lst", ".", "set_obj", "(", "self", ".", "_obj", ")", "for", "name", ",", "val", "in", "lists_init_kwargs", ".", "items", "(", ")", ":", "# Setting CategoryList attributes from kwargs.", "setattr", "(", "lst", ",", "name", ",", "val", ")", "lst", ".", "enable_editor", "(", "*", "*", "editor_init_kwargs", ")", "self", ".", "_lists", "[", "lst", ".", "get_id", "(", ")", "]", "=", "lst" ]
Registers CategoryList objects to handle their requests. :param list category_lists: CategoryList objects :param dict lists_init_kwargs: Attributes to apply to each of CategoryList objects
[ "Registers", "CategoryList", "objects", "to", "handle", "their", "requests", "." ]
9b45e91fc0dcb63a0011780437fe28145e3ecce9
https://github.com/idlesign/django-sitecats/blob/9b45e91fc0dcb63a0011780437fe28145e3ecce9/sitecats/toolbox.py#L254-L280
train
idlesign/django-sitecats
sitecats/toolbox.py
CategoryRequestHandler.action_remove
def action_remove(cls, request, category_list): """Handles `remove` action from CategoryList editor. Removes an actual category if a target object is not set for the list. Removes a tie-to-category object if a target object is set for the list. :param Request request: Django request object :param CategoryList category_list: CategoryList object to operate upon. :return: True on success otherwise and exception from SitecatsException family is raised. """ if not category_list.editor.allow_remove: raise SitecatsSecurityException( '`action_remove()` is not supported by parent `%s`category.' % category_list.alias) category_id = int(request.POST.get('category_id', 0)) if not category_id: raise SitecatsSecurityException( 'Unsupported `category_id` value - `%s` - is passed to `action_remove()`.' % category_id) category = get_cache().get_category_by_id(category_id) if not category: raise SitecatsSecurityException('Unable to get `%s` category in `action_remove()`.' % category_id) cat_ident = category.alias or category.id if category.is_locked: raise SitecatsSecurityException('`action_remove()` is not supported by `%s` category.' % cat_ident) if category.parent_id != category_list.get_id(): raise SitecatsSecurityException( '`action_remove()` is unable to remove `%s`: ' 'not a child of parent `%s` category.' % (cat_ident, category_list.alias) ) min_num = category_list.editor.min_num def check_min_num(num): if min_num is not None and num-1 < min_num: subcats_str = ungettext_lazy('subcategory', 'subcategories', min_num) error_msg = _( 'Unable to remove "%(target_category)s" category from "%(parent_category)s": ' 'parent category requires at least %(num)s %(subcats_str)s.' ) % { 'target_category': category.title, 'parent_category': category_list.get_title(), 'num': min_num, 'subcats_str': subcats_str } raise SitecatsValidationError(error_msg) child_ids = get_cache().get_child_ids(category_list.alias) check_min_num(len(child_ids)) if category_list.obj is None: # Remove category itself and children. category.delete() else: # Remove just a category-to-object tie. # TODO filter user/status check_min_num(category_list.obj.get_ties_for_categories_qs(child_ids).count()) category_list.obj.remove_from_category(category) return True
python
def action_remove(cls, request, category_list): """Handles `remove` action from CategoryList editor. Removes an actual category if a target object is not set for the list. Removes a tie-to-category object if a target object is set for the list. :param Request request: Django request object :param CategoryList category_list: CategoryList object to operate upon. :return: True on success otherwise and exception from SitecatsException family is raised. """ if not category_list.editor.allow_remove: raise SitecatsSecurityException( '`action_remove()` is not supported by parent `%s`category.' % category_list.alias) category_id = int(request.POST.get('category_id', 0)) if not category_id: raise SitecatsSecurityException( 'Unsupported `category_id` value - `%s` - is passed to `action_remove()`.' % category_id) category = get_cache().get_category_by_id(category_id) if not category: raise SitecatsSecurityException('Unable to get `%s` category in `action_remove()`.' % category_id) cat_ident = category.alias or category.id if category.is_locked: raise SitecatsSecurityException('`action_remove()` is not supported by `%s` category.' % cat_ident) if category.parent_id != category_list.get_id(): raise SitecatsSecurityException( '`action_remove()` is unable to remove `%s`: ' 'not a child of parent `%s` category.' % (cat_ident, category_list.alias) ) min_num = category_list.editor.min_num def check_min_num(num): if min_num is not None and num-1 < min_num: subcats_str = ungettext_lazy('subcategory', 'subcategories', min_num) error_msg = _( 'Unable to remove "%(target_category)s" category from "%(parent_category)s": ' 'parent category requires at least %(num)s %(subcats_str)s.' ) % { 'target_category': category.title, 'parent_category': category_list.get_title(), 'num': min_num, 'subcats_str': subcats_str } raise SitecatsValidationError(error_msg) child_ids = get_cache().get_child_ids(category_list.alias) check_min_num(len(child_ids)) if category_list.obj is None: # Remove category itself and children. category.delete() else: # Remove just a category-to-object tie. # TODO filter user/status check_min_num(category_list.obj.get_ties_for_categories_qs(child_ids).count()) category_list.obj.remove_from_category(category) return True
[ "def", "action_remove", "(", "cls", ",", "request", ",", "category_list", ")", ":", "if", "not", "category_list", ".", "editor", ".", "allow_remove", ":", "raise", "SitecatsSecurityException", "(", "'`action_remove()` is not supported by parent `%s`category.'", "%", "category_list", ".", "alias", ")", "category_id", "=", "int", "(", "request", ".", "POST", ".", "get", "(", "'category_id'", ",", "0", ")", ")", "if", "not", "category_id", ":", "raise", "SitecatsSecurityException", "(", "'Unsupported `category_id` value - `%s` - is passed to `action_remove()`.'", "%", "category_id", ")", "category", "=", "get_cache", "(", ")", ".", "get_category_by_id", "(", "category_id", ")", "if", "not", "category", ":", "raise", "SitecatsSecurityException", "(", "'Unable to get `%s` category in `action_remove()`.'", "%", "category_id", ")", "cat_ident", "=", "category", ".", "alias", "or", "category", ".", "id", "if", "category", ".", "is_locked", ":", "raise", "SitecatsSecurityException", "(", "'`action_remove()` is not supported by `%s` category.'", "%", "cat_ident", ")", "if", "category", ".", "parent_id", "!=", "category_list", ".", "get_id", "(", ")", ":", "raise", "SitecatsSecurityException", "(", "'`action_remove()` is unable to remove `%s`: '", "'not a child of parent `%s` category.'", "%", "(", "cat_ident", ",", "category_list", ".", "alias", ")", ")", "min_num", "=", "category_list", ".", "editor", ".", "min_num", "def", "check_min_num", "(", "num", ")", ":", "if", "min_num", "is", "not", "None", "and", "num", "-", "1", "<", "min_num", ":", "subcats_str", "=", "ungettext_lazy", "(", "'subcategory'", ",", "'subcategories'", ",", "min_num", ")", "error_msg", "=", "_", "(", "'Unable to remove \"%(target_category)s\" category from \"%(parent_category)s\": '", "'parent category requires at least %(num)s %(subcats_str)s.'", ")", "%", "{", "'target_category'", ":", "category", ".", "title", ",", "'parent_category'", ":", "category_list", ".", "get_title", "(", ")", ",", "'num'", ":", "min_num", ",", "'subcats_str'", ":", "subcats_str", "}", "raise", "SitecatsValidationError", "(", "error_msg", ")", "child_ids", "=", "get_cache", "(", ")", ".", "get_child_ids", "(", "category_list", ".", "alias", ")", "check_min_num", "(", "len", "(", "child_ids", ")", ")", "if", "category_list", ".", "obj", "is", "None", ":", "# Remove category itself and children.", "category", ".", "delete", "(", ")", "else", ":", "# Remove just a category-to-object tie.", "# TODO filter user/status", "check_min_num", "(", "category_list", ".", "obj", ".", "get_ties_for_categories_qs", "(", "child_ids", ")", ".", "count", "(", ")", ")", "category_list", ".", "obj", ".", "remove_from_category", "(", "category", ")", "return", "True" ]
Handles `remove` action from CategoryList editor. Removes an actual category if a target object is not set for the list. Removes a tie-to-category object if a target object is set for the list. :param Request request: Django request object :param CategoryList category_list: CategoryList object to operate upon. :return: True on success otherwise and exception from SitecatsException family is raised.
[ "Handles", "remove", "action", "from", "CategoryList", "editor", "." ]
9b45e91fc0dcb63a0011780437fe28145e3ecce9
https://github.com/idlesign/django-sitecats/blob/9b45e91fc0dcb63a0011780437fe28145e3ecce9/sitecats/toolbox.py#L283-L342
train
idlesign/django-sitecats
sitecats/toolbox.py
CategoryRequestHandler.action_add
def action_add(cls, request, category_list): """Handles `add` action from CategoryList editor. Adds an actual category if a target object is not set for the list. Adds a tie-to-category object if a target object is set for the list. :param Request request: Django request object :param CategoryList category_list: CategoryList object to operate upon. :return: CategoryModel object on success otherwise and exception from SitecatsException family is raised. """ if not category_list.editor.allow_add: raise SitecatsSecurityException('`action_add()` is not supported by `%s` category.' % category_list.alias) titles = request.POST.get('category_title', '').strip() if not titles: raise SitecatsSecurityException( 'Unsupported `category_title` value - `%s` - is passed to `action_add()`.' % titles) if category_list.editor.category_separator is None: titles = [titles] else: titles = [ title.strip() for title in titles.split(category_list.editor.category_separator) if title.strip() ] def check_max_num(num, max_num, category_title): if max_num is not None and num+1 > max_num: subcats_str = ungettext_lazy('subcategory', 'subcategories', max_num) error_msg = _( 'Unable to add "%(target_category)s" category into "%(parent_category)s": ' 'parent category can have at most %(num)s %(subcats_str)s.' ) % { 'target_category': category_title, 'parent_category': category_list.get_title(), 'num': max_num, 'subcats_str': subcats_str } raise SitecatsValidationError(error_msg) target_category = None for category_title in titles: exists = get_cache().find_category(category_list.alias, category_title) if exists and category_list.obj is None: # Already exists. return exists if not exists and not category_list.editor.allow_new: error_msg = _( 'Unable to create a new "%(new_category)s" category inside of "%(parent_category)s": ' 'parent category does not support this action.' ) % { 'new_category': category_title, 'parent_category': category_list.get_title() } raise SitecatsNewCategoryException(error_msg) max_num = category_list.editor.max_num child_ids = get_cache().get_child_ids(category_list.alias) if not exists: # Add new category. if category_list.obj is None: check_max_num(len(child_ids), max_num, category_title) # TODO status target_category = get_category_model().add( category_title, request.user, parent=category_list.get_category_model() ) else: target_category = exists # Use existing one for a tie. if category_list.obj is not None: # TODO status check_max_num(category_list.obj.get_ties_for_categories_qs(child_ids).count(), max_num, category_title) category_list.obj.add_to_category(target_category, request.user) return target_category
python
def action_add(cls, request, category_list): """Handles `add` action from CategoryList editor. Adds an actual category if a target object is not set for the list. Adds a tie-to-category object if a target object is set for the list. :param Request request: Django request object :param CategoryList category_list: CategoryList object to operate upon. :return: CategoryModel object on success otherwise and exception from SitecatsException family is raised. """ if not category_list.editor.allow_add: raise SitecatsSecurityException('`action_add()` is not supported by `%s` category.' % category_list.alias) titles = request.POST.get('category_title', '').strip() if not titles: raise SitecatsSecurityException( 'Unsupported `category_title` value - `%s` - is passed to `action_add()`.' % titles) if category_list.editor.category_separator is None: titles = [titles] else: titles = [ title.strip() for title in titles.split(category_list.editor.category_separator) if title.strip() ] def check_max_num(num, max_num, category_title): if max_num is not None and num+1 > max_num: subcats_str = ungettext_lazy('subcategory', 'subcategories', max_num) error_msg = _( 'Unable to add "%(target_category)s" category into "%(parent_category)s": ' 'parent category can have at most %(num)s %(subcats_str)s.' ) % { 'target_category': category_title, 'parent_category': category_list.get_title(), 'num': max_num, 'subcats_str': subcats_str } raise SitecatsValidationError(error_msg) target_category = None for category_title in titles: exists = get_cache().find_category(category_list.alias, category_title) if exists and category_list.obj is None: # Already exists. return exists if not exists and not category_list.editor.allow_new: error_msg = _( 'Unable to create a new "%(new_category)s" category inside of "%(parent_category)s": ' 'parent category does not support this action.' ) % { 'new_category': category_title, 'parent_category': category_list.get_title() } raise SitecatsNewCategoryException(error_msg) max_num = category_list.editor.max_num child_ids = get_cache().get_child_ids(category_list.alias) if not exists: # Add new category. if category_list.obj is None: check_max_num(len(child_ids), max_num, category_title) # TODO status target_category = get_category_model().add( category_title, request.user, parent=category_list.get_category_model() ) else: target_category = exists # Use existing one for a tie. if category_list.obj is not None: # TODO status check_max_num(category_list.obj.get_ties_for_categories_qs(child_ids).count(), max_num, category_title) category_list.obj.add_to_category(target_category, request.user) return target_category
[ "def", "action_add", "(", "cls", ",", "request", ",", "category_list", ")", ":", "if", "not", "category_list", ".", "editor", ".", "allow_add", ":", "raise", "SitecatsSecurityException", "(", "'`action_add()` is not supported by `%s` category.'", "%", "category_list", ".", "alias", ")", "titles", "=", "request", ".", "POST", ".", "get", "(", "'category_title'", ",", "''", ")", ".", "strip", "(", ")", "if", "not", "titles", ":", "raise", "SitecatsSecurityException", "(", "'Unsupported `category_title` value - `%s` - is passed to `action_add()`.'", "%", "titles", ")", "if", "category_list", ".", "editor", ".", "category_separator", "is", "None", ":", "titles", "=", "[", "titles", "]", "else", ":", "titles", "=", "[", "title", ".", "strip", "(", ")", "for", "title", "in", "titles", ".", "split", "(", "category_list", ".", "editor", ".", "category_separator", ")", "if", "title", ".", "strip", "(", ")", "]", "def", "check_max_num", "(", "num", ",", "max_num", ",", "category_title", ")", ":", "if", "max_num", "is", "not", "None", "and", "num", "+", "1", ">", "max_num", ":", "subcats_str", "=", "ungettext_lazy", "(", "'subcategory'", ",", "'subcategories'", ",", "max_num", ")", "error_msg", "=", "_", "(", "'Unable to add \"%(target_category)s\" category into \"%(parent_category)s\": '", "'parent category can have at most %(num)s %(subcats_str)s.'", ")", "%", "{", "'target_category'", ":", "category_title", ",", "'parent_category'", ":", "category_list", ".", "get_title", "(", ")", ",", "'num'", ":", "max_num", ",", "'subcats_str'", ":", "subcats_str", "}", "raise", "SitecatsValidationError", "(", "error_msg", ")", "target_category", "=", "None", "for", "category_title", "in", "titles", ":", "exists", "=", "get_cache", "(", ")", ".", "find_category", "(", "category_list", ".", "alias", ",", "category_title", ")", "if", "exists", "and", "category_list", ".", "obj", "is", "None", ":", "# Already exists.", "return", "exists", "if", "not", "exists", "and", "not", "category_list", ".", "editor", ".", "allow_new", ":", "error_msg", "=", "_", "(", "'Unable to create a new \"%(new_category)s\" category inside of \"%(parent_category)s\": '", "'parent category does not support this action.'", ")", "%", "{", "'new_category'", ":", "category_title", ",", "'parent_category'", ":", "category_list", ".", "get_title", "(", ")", "}", "raise", "SitecatsNewCategoryException", "(", "error_msg", ")", "max_num", "=", "category_list", ".", "editor", ".", "max_num", "child_ids", "=", "get_cache", "(", ")", ".", "get_child_ids", "(", "category_list", ".", "alias", ")", "if", "not", "exists", ":", "# Add new category.", "if", "category_list", ".", "obj", "is", "None", ":", "check_max_num", "(", "len", "(", "child_ids", ")", ",", "max_num", ",", "category_title", ")", "# TODO status", "target_category", "=", "get_category_model", "(", ")", ".", "add", "(", "category_title", ",", "request", ".", "user", ",", "parent", "=", "category_list", ".", "get_category_model", "(", ")", ")", "else", ":", "target_category", "=", "exists", "# Use existing one for a tie.", "if", "category_list", ".", "obj", "is", "not", "None", ":", "# TODO status", "check_max_num", "(", "category_list", ".", "obj", ".", "get_ties_for_categories_qs", "(", "child_ids", ")", ".", "count", "(", ")", ",", "max_num", ",", "category_title", ")", "category_list", ".", "obj", ".", "add_to_category", "(", "target_category", ",", "request", ".", "user", ")", "return", "target_category" ]
Handles `add` action from CategoryList editor. Adds an actual category if a target object is not set for the list. Adds a tie-to-category object if a target object is set for the list. :param Request request: Django request object :param CategoryList category_list: CategoryList object to operate upon. :return: CategoryModel object on success otherwise and exception from SitecatsException family is raised.
[ "Handles", "add", "action", "from", "CategoryList", "editor", "." ]
9b45e91fc0dcb63a0011780437fe28145e3ecce9
https://github.com/idlesign/django-sitecats/blob/9b45e91fc0dcb63a0011780437fe28145e3ecce9/sitecats/toolbox.py#L345-L418
train
vasilcovsky/pytinypng
pytinypng/api.py
shrink
def shrink(image, apikey): """To shrink a PNG image, post the data to the API service. The response is a JSON message. The initial request must be authorized with HTTP Basic authorization. @param image: PNG image bytes sequence @param apikey: TinyPNG API key @param filename: filename of input file """ def _handle_response(response): body = json.loads(response.read()) if response.code == TinyPNGResponse.SUCCESS_CODE: body['location'] = response.headers.getheader("Location") try: body['bytes'] = urlopen(body['location']).read() except: body['bytes'] = None return response.code, body auth = b64encode(bytes("api:" + apikey)).decode("ascii") request = Request(TINYPNG_SHRINK_URL, image) request.add_header("Authorization", "Basic %s" % auth) try: response = urlopen(request) (code, response_dict) = _handle_response(response) except HTTPError as e: (code, response_dict) = _handle_response(e) return TinyPNGResponse(code, **response_dict)
python
def shrink(image, apikey): """To shrink a PNG image, post the data to the API service. The response is a JSON message. The initial request must be authorized with HTTP Basic authorization. @param image: PNG image bytes sequence @param apikey: TinyPNG API key @param filename: filename of input file """ def _handle_response(response): body = json.loads(response.read()) if response.code == TinyPNGResponse.SUCCESS_CODE: body['location'] = response.headers.getheader("Location") try: body['bytes'] = urlopen(body['location']).read() except: body['bytes'] = None return response.code, body auth = b64encode(bytes("api:" + apikey)).decode("ascii") request = Request(TINYPNG_SHRINK_URL, image) request.add_header("Authorization", "Basic %s" % auth) try: response = urlopen(request) (code, response_dict) = _handle_response(response) except HTTPError as e: (code, response_dict) = _handle_response(e) return TinyPNGResponse(code, **response_dict)
[ "def", "shrink", "(", "image", ",", "apikey", ")", ":", "def", "_handle_response", "(", "response", ")", ":", "body", "=", "json", ".", "loads", "(", "response", ".", "read", "(", ")", ")", "if", "response", ".", "code", "==", "TinyPNGResponse", ".", "SUCCESS_CODE", ":", "body", "[", "'location'", "]", "=", "response", ".", "headers", ".", "getheader", "(", "\"Location\"", ")", "try", ":", "body", "[", "'bytes'", "]", "=", "urlopen", "(", "body", "[", "'location'", "]", ")", ".", "read", "(", ")", "except", ":", "body", "[", "'bytes'", "]", "=", "None", "return", "response", ".", "code", ",", "body", "auth", "=", "b64encode", "(", "bytes", "(", "\"api:\"", "+", "apikey", ")", ")", ".", "decode", "(", "\"ascii\"", ")", "request", "=", "Request", "(", "TINYPNG_SHRINK_URL", ",", "image", ")", "request", ".", "add_header", "(", "\"Authorization\"", ",", "\"Basic %s\"", "%", "auth", ")", "try", ":", "response", "=", "urlopen", "(", "request", ")", "(", "code", ",", "response_dict", ")", "=", "_handle_response", "(", "response", ")", "except", "HTTPError", "as", "e", ":", "(", "code", ",", "response_dict", ")", "=", "_handle_response", "(", "e", ")", "return", "TinyPNGResponse", "(", "code", ",", "*", "*", "response_dict", ")" ]
To shrink a PNG image, post the data to the API service. The response is a JSON message. The initial request must be authorized with HTTP Basic authorization. @param image: PNG image bytes sequence @param apikey: TinyPNG API key @param filename: filename of input file
[ "To", "shrink", "a", "PNG", "image", "post", "the", "data", "to", "the", "API", "service", ".", "The", "response", "is", "a", "JSON", "message", ".", "The", "initial", "request", "must", "be", "authorized", "with", "HTTP", "Basic", "authorization", "." ]
ac633e4aa41122c49a806f411e43a76d8f73058e
https://github.com/vasilcovsky/pytinypng/blob/ac633e4aa41122c49a806f411e43a76d8f73058e/pytinypng/api.py#L10-L41
train
koszullab/metaTOR
metator/metator.py
download_and_install_dependencies
def download_and_install_dependencies(): """Setup URLS and download dependencies for Python 3.6+ """ try: import requests except ImportError: raise ValueError("Python 3.6+ is required.") dependencies = {"hmm_databases": HMM_URL} if sys.platform.startswith("linux") or "bsd" in sys.platform: dependencies["prodigal"] = "{}.linux".format(BASE_PRODIGAL) dependencies["louvain"] = ( "https://lip6.github.io/Louvain-BinaryBuild/" "louvain_linux.tar.gz" ) elif sys.platform == "darwin": dependencies["prodigal"] = "{}.osx.10.9.5".format(BASE_PRODIGAL) dependencies["louvain"] = ( "https://github.com/lip6/Louvain-BinaryBuilds/raw/osx/" "louvain_osx.tar.gz" ) elif sys.platform.startswith("win") or sys.platform == "cygwin": dependencies["prodigal"] = "{}.windows.exe" dependencies["louvain"] = ( "https://ci.appveyor.com/api/projects/yanntm/" "Louvain-BinaryBuild/artifacts/website/" "louvain_windows.tar.gz" ) else: raise NotImplementedError( "Your platform is not supported: {}".format(sys.platform) ) cache_dir = pathlib.Path.cwd() / pathlib.Path("cache") try: print("Downloading dependencies...") cache_dir.mkdir() for dependency_name, url in dependencies.items(): print("Downloading {} at {}".format(dependency_name, url)) request = requests.get(url) basename = url.split("/")[-1] with open(cache_dir / basename, "wb") as handle: print(dependency_name, basename, cache_dir / basename) handle.write(request.content) except FileExistsError: print("Using cached dependencies...") share_dir = pathlib.Path.cwd() tools_dir = share_dir / "tools" louvain_dir = tools_dir / "louvain" louvain_dir.mkdir(parents=True, exist_ok=True) louvain_basename = dependencies["louvain"].split("/")[-1] louvain_path = louvain_dir / louvain_basename (cache_dir / louvain_basename).replace(louvain_path) with tarfile.open(louvain_path, "r:gz") as tar: tar.extractall() hmm_basename = dependencies["hmm_databases"].split("/")[-1] hmm_path = share_dir / hmm_basename (cache_dir / hmm_basename).replace(hmm_path) prodigal_basename = dependencies["prodigal"].split("/")[-1] prodigal_path = tools_dir / "prodigal" (cache_dir / prodigal_basename).replace(prodigal_path)
python
def download_and_install_dependencies(): """Setup URLS and download dependencies for Python 3.6+ """ try: import requests except ImportError: raise ValueError("Python 3.6+ is required.") dependencies = {"hmm_databases": HMM_URL} if sys.platform.startswith("linux") or "bsd" in sys.platform: dependencies["prodigal"] = "{}.linux".format(BASE_PRODIGAL) dependencies["louvain"] = ( "https://lip6.github.io/Louvain-BinaryBuild/" "louvain_linux.tar.gz" ) elif sys.platform == "darwin": dependencies["prodigal"] = "{}.osx.10.9.5".format(BASE_PRODIGAL) dependencies["louvain"] = ( "https://github.com/lip6/Louvain-BinaryBuilds/raw/osx/" "louvain_osx.tar.gz" ) elif sys.platform.startswith("win") or sys.platform == "cygwin": dependencies["prodigal"] = "{}.windows.exe" dependencies["louvain"] = ( "https://ci.appveyor.com/api/projects/yanntm/" "Louvain-BinaryBuild/artifacts/website/" "louvain_windows.tar.gz" ) else: raise NotImplementedError( "Your platform is not supported: {}".format(sys.platform) ) cache_dir = pathlib.Path.cwd() / pathlib.Path("cache") try: print("Downloading dependencies...") cache_dir.mkdir() for dependency_name, url in dependencies.items(): print("Downloading {} at {}".format(dependency_name, url)) request = requests.get(url) basename = url.split("/")[-1] with open(cache_dir / basename, "wb") as handle: print(dependency_name, basename, cache_dir / basename) handle.write(request.content) except FileExistsError: print("Using cached dependencies...") share_dir = pathlib.Path.cwd() tools_dir = share_dir / "tools" louvain_dir = tools_dir / "louvain" louvain_dir.mkdir(parents=True, exist_ok=True) louvain_basename = dependencies["louvain"].split("/")[-1] louvain_path = louvain_dir / louvain_basename (cache_dir / louvain_basename).replace(louvain_path) with tarfile.open(louvain_path, "r:gz") as tar: tar.extractall() hmm_basename = dependencies["hmm_databases"].split("/")[-1] hmm_path = share_dir / hmm_basename (cache_dir / hmm_basename).replace(hmm_path) prodigal_basename = dependencies["prodigal"].split("/")[-1] prodigal_path = tools_dir / "prodigal" (cache_dir / prodigal_basename).replace(prodigal_path)
[ "def", "download_and_install_dependencies", "(", ")", ":", "try", ":", "import", "requests", "except", "ImportError", ":", "raise", "ValueError", "(", "\"Python 3.6+ is required.\"", ")", "dependencies", "=", "{", "\"hmm_databases\"", ":", "HMM_URL", "}", "if", "sys", ".", "platform", ".", "startswith", "(", "\"linux\"", ")", "or", "\"bsd\"", "in", "sys", ".", "platform", ":", "dependencies", "[", "\"prodigal\"", "]", "=", "\"{}.linux\"", ".", "format", "(", "BASE_PRODIGAL", ")", "dependencies", "[", "\"louvain\"", "]", "=", "(", "\"https://lip6.github.io/Louvain-BinaryBuild/\"", "\"louvain_linux.tar.gz\"", ")", "elif", "sys", ".", "platform", "==", "\"darwin\"", ":", "dependencies", "[", "\"prodigal\"", "]", "=", "\"{}.osx.10.9.5\"", ".", "format", "(", "BASE_PRODIGAL", ")", "dependencies", "[", "\"louvain\"", "]", "=", "(", "\"https://github.com/lip6/Louvain-BinaryBuilds/raw/osx/\"", "\"louvain_osx.tar.gz\"", ")", "elif", "sys", ".", "platform", ".", "startswith", "(", "\"win\"", ")", "or", "sys", ".", "platform", "==", "\"cygwin\"", ":", "dependencies", "[", "\"prodigal\"", "]", "=", "\"{}.windows.exe\"", "dependencies", "[", "\"louvain\"", "]", "=", "(", "\"https://ci.appveyor.com/api/projects/yanntm/\"", "\"Louvain-BinaryBuild/artifacts/website/\"", "\"louvain_windows.tar.gz\"", ")", "else", ":", "raise", "NotImplementedError", "(", "\"Your platform is not supported: {}\"", ".", "format", "(", "sys", ".", "platform", ")", ")", "cache_dir", "=", "pathlib", ".", "Path", ".", "cwd", "(", ")", "/", "pathlib", ".", "Path", "(", "\"cache\"", ")", "try", ":", "print", "(", "\"Downloading dependencies...\"", ")", "cache_dir", ".", "mkdir", "(", ")", "for", "dependency_name", ",", "url", "in", "dependencies", ".", "items", "(", ")", ":", "print", "(", "\"Downloading {} at {}\"", ".", "format", "(", "dependency_name", ",", "url", ")", ")", "request", "=", "requests", ".", "get", "(", "url", ")", "basename", "=", "url", ".", "split", "(", "\"/\"", ")", "[", "-", "1", "]", "with", "open", "(", "cache_dir", "/", "basename", ",", "\"wb\"", ")", "as", "handle", ":", "print", "(", "dependency_name", ",", "basename", ",", "cache_dir", "/", "basename", ")", "handle", ".", "write", "(", "request", ".", "content", ")", "except", "FileExistsError", ":", "print", "(", "\"Using cached dependencies...\"", ")", "share_dir", "=", "pathlib", ".", "Path", ".", "cwd", "(", ")", "tools_dir", "=", "share_dir", "/", "\"tools\"", "louvain_dir", "=", "tools_dir", "/", "\"louvain\"", "louvain_dir", ".", "mkdir", "(", "parents", "=", "True", ",", "exist_ok", "=", "True", ")", "louvain_basename", "=", "dependencies", "[", "\"louvain\"", "]", ".", "split", "(", "\"/\"", ")", "[", "-", "1", "]", "louvain_path", "=", "louvain_dir", "/", "louvain_basename", "(", "cache_dir", "/", "louvain_basename", ")", ".", "replace", "(", "louvain_path", ")", "with", "tarfile", ".", "open", "(", "louvain_path", ",", "\"r:gz\"", ")", "as", "tar", ":", "tar", ".", "extractall", "(", ")", "hmm_basename", "=", "dependencies", "[", "\"hmm_databases\"", "]", ".", "split", "(", "\"/\"", ")", "[", "-", "1", "]", "hmm_path", "=", "share_dir", "/", "hmm_basename", "(", "cache_dir", "/", "hmm_basename", ")", ".", "replace", "(", "hmm_path", ")", "prodigal_basename", "=", "dependencies", "[", "\"prodigal\"", "]", ".", "split", "(", "\"/\"", ")", "[", "-", "1", "]", "prodigal_path", "=", "tools_dir", "/", "\"prodigal\"", "(", "cache_dir", "/", "prodigal_basename", ")", ".", "replace", "(", "prodigal_path", ")" ]
Setup URLS and download dependencies for Python 3.6+
[ "Setup", "URLS", "and", "download", "dependencies", "for", "Python", "3", ".", "6", "+" ]
0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a
https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/metator.py#L20-L94
train
Godley/MuseParse
MuseParse/classes/ObjectHierarchy/ItemClasses/Directions.py
Text.get
def get(self): """ method to fetch all contents as a list :return: list """ ret_list = [] if hasattr(self, "font"): ret_list.append(self.font) if hasattr(self, "size"): ret_list.append(self.size) if hasattr(self, "text"): ret_list.append(self.text) return ret_list
python
def get(self): """ method to fetch all contents as a list :return: list """ ret_list = [] if hasattr(self, "font"): ret_list.append(self.font) if hasattr(self, "size"): ret_list.append(self.size) if hasattr(self, "text"): ret_list.append(self.text) return ret_list
[ "def", "get", "(", "self", ")", ":", "ret_list", "=", "[", "]", "if", "hasattr", "(", "self", ",", "\"font\"", ")", ":", "ret_list", ".", "append", "(", "self", ".", "font", ")", "if", "hasattr", "(", "self", ",", "\"size\"", ")", ":", "ret_list", ".", "append", "(", "self", ".", "size", ")", "if", "hasattr", "(", "self", ",", "\"text\"", ")", ":", "ret_list", ".", "append", "(", "self", ".", "text", ")", "return", "ret_list" ]
method to fetch all contents as a list :return: list
[ "method", "to", "fetch", "all", "contents", "as", "a", "list" ]
23cecafa1fdc0f2d6a87760553572b459f3c9904
https://github.com/Godley/MuseParse/blob/23cecafa1fdc0f2d6a87760553572b459f3c9904/MuseParse/classes/ObjectHierarchy/ItemClasses/Directions.py#L33-L46
train
MacHu-GWU/single_file_module-project
sfm/rerecipe.py
extract_by_prefix_surfix
def extract_by_prefix_surfix(text, prefix, surfix, minlen=None, maxlen=None, include=False): """Extract the text in between a prefix and surfix. It use non-greedy match. :param text: text body :type text: str :param prefix: the prefix :type prefix: str :param surfix: the surfix :type surfix: str :param minlen: the min matched string length :type minlen: int :param maxlen: the max matched string length :type maxlen: int :param include: whether if include prefix and surfix :type include: bool """ if minlen is None: minlen = 0 if maxlen is None: maxlen = 2 ** 30 pattern = r"""(?<=%s)[\s\S]{%s,%s}?(?=%s)""" % ( prefix, minlen, maxlen, surfix) if include: return [prefix + s + surfix for s in re.findall(pattern, text)] else: return re.findall(pattern, text)
python
def extract_by_prefix_surfix(text, prefix, surfix, minlen=None, maxlen=None, include=False): """Extract the text in between a prefix and surfix. It use non-greedy match. :param text: text body :type text: str :param prefix: the prefix :type prefix: str :param surfix: the surfix :type surfix: str :param minlen: the min matched string length :type minlen: int :param maxlen: the max matched string length :type maxlen: int :param include: whether if include prefix and surfix :type include: bool """ if minlen is None: minlen = 0 if maxlen is None: maxlen = 2 ** 30 pattern = r"""(?<=%s)[\s\S]{%s,%s}?(?=%s)""" % ( prefix, minlen, maxlen, surfix) if include: return [prefix + s + surfix for s in re.findall(pattern, text)] else: return re.findall(pattern, text)
[ "def", "extract_by_prefix_surfix", "(", "text", ",", "prefix", ",", "surfix", ",", "minlen", "=", "None", ",", "maxlen", "=", "None", ",", "include", "=", "False", ")", ":", "if", "minlen", "is", "None", ":", "minlen", "=", "0", "if", "maxlen", "is", "None", ":", "maxlen", "=", "2", "**", "30", "pattern", "=", "r\"\"\"(?<=%s)[\\s\\S]{%s,%s}?(?=%s)\"\"\"", "%", "(", "prefix", ",", "minlen", ",", "maxlen", ",", "surfix", ")", "if", "include", ":", "return", "[", "prefix", "+", "s", "+", "surfix", "for", "s", "in", "re", ".", "findall", "(", "pattern", ",", "text", ")", "]", "else", ":", "return", "re", ".", "findall", "(", "pattern", ",", "text", ")" ]
Extract the text in between a prefix and surfix. It use non-greedy match. :param text: text body :type text: str :param prefix: the prefix :type prefix: str :param surfix: the surfix :type surfix: str :param minlen: the min matched string length :type minlen: int :param maxlen: the max matched string length :type maxlen: int :param include: whether if include prefix and surfix :type include: bool
[ "Extract", "the", "text", "in", "between", "a", "prefix", "and", "surfix", ".", "It", "use", "non", "-", "greedy", "match", "." ]
01f7a6b250853bebfd73de275895bf274325cfc1
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/rerecipe.py#L13-L48
train
MacHu-GWU/single_file_module-project
sfm/rerecipe.py
extract_number
def extract_number(text): """Extract digit character from text. """ result = list() chunk = list() valid_char = set(".1234567890") for char in text: if char in valid_char: chunk.append(char) else: result.append("".join(chunk)) chunk = list() result.append("".join(chunk)) result_new = list() for number in result: if "." in number: try: result_new.append(float(number)) except: pass else: try: result_new.append(int(number)) except: pass return result_new
python
def extract_number(text): """Extract digit character from text. """ result = list() chunk = list() valid_char = set(".1234567890") for char in text: if char in valid_char: chunk.append(char) else: result.append("".join(chunk)) chunk = list() result.append("".join(chunk)) result_new = list() for number in result: if "." in number: try: result_new.append(float(number)) except: pass else: try: result_new.append(int(number)) except: pass return result_new
[ "def", "extract_number", "(", "text", ")", ":", "result", "=", "list", "(", ")", "chunk", "=", "list", "(", ")", "valid_char", "=", "set", "(", "\".1234567890\"", ")", "for", "char", "in", "text", ":", "if", "char", "in", "valid_char", ":", "chunk", ".", "append", "(", "char", ")", "else", ":", "result", ".", "append", "(", "\"\"", ".", "join", "(", "chunk", ")", ")", "chunk", "=", "list", "(", ")", "result", ".", "append", "(", "\"\"", ".", "join", "(", "chunk", ")", ")", "result_new", "=", "list", "(", ")", "for", "number", "in", "result", ":", "if", "\".\"", "in", "number", ":", "try", ":", "result_new", ".", "append", "(", "float", "(", "number", ")", ")", "except", ":", "pass", "else", ":", "try", ":", "result_new", ".", "append", "(", "int", "(", "number", ")", ")", "except", ":", "pass", "return", "result_new" ]
Extract digit character from text.
[ "Extract", "digit", "character", "from", "text", "." ]
01f7a6b250853bebfd73de275895bf274325cfc1
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/rerecipe.py#L51-L78
train
MacHu-GWU/single_file_module-project
sfm/rerecipe.py
extract_email
def extract_email(text): """Extract email from text. """ result = list() for tp in re.findall(_regex_extract_email, text.lower()): for email in tp: if re.match(_regex_validate_email, email): result.append(email) return result
python
def extract_email(text): """Extract email from text. """ result = list() for tp in re.findall(_regex_extract_email, text.lower()): for email in tp: if re.match(_regex_validate_email, email): result.append(email) return result
[ "def", "extract_email", "(", "text", ")", ":", "result", "=", "list", "(", ")", "for", "tp", "in", "re", ".", "findall", "(", "_regex_extract_email", ",", "text", ".", "lower", "(", ")", ")", ":", "for", "email", "in", "tp", ":", "if", "re", ".", "match", "(", "_regex_validate_email", ",", "email", ")", ":", "result", ".", "append", "(", "email", ")", "return", "result" ]
Extract email from text.
[ "Extract", "email", "from", "text", "." ]
01f7a6b250853bebfd73de275895bf274325cfc1
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/rerecipe.py#L87-L95
train
PSPC-SPAC-buyandsell/didauth
didauth/headers.py
HeaderSigner.sign
def sign(self, headers: Mapping, method=None, path=None): """ Add Signature Authorization header to case-insensitive header dict. `headers` is a case-insensitive dict of mutable headers. `host` is a override for the 'host' header (defaults to value in headers). `method` is the HTTP method (required when using '(request-target)'). `path` is the HTTP path (required when using '(request-target)'). """ required_headers = self.header_list message = generate_message(required_headers, headers, method, path) signature = encode_string(self._signer.sign(message), 'base64') ret_headers = multidict.CIMultiDict(headers) ret_headers['Authorization'] = self._signature_tpl % signature.decode('ascii') return ret_headers
python
def sign(self, headers: Mapping, method=None, path=None): """ Add Signature Authorization header to case-insensitive header dict. `headers` is a case-insensitive dict of mutable headers. `host` is a override for the 'host' header (defaults to value in headers). `method` is the HTTP method (required when using '(request-target)'). `path` is the HTTP path (required when using '(request-target)'). """ required_headers = self.header_list message = generate_message(required_headers, headers, method, path) signature = encode_string(self._signer.sign(message), 'base64') ret_headers = multidict.CIMultiDict(headers) ret_headers['Authorization'] = self._signature_tpl % signature.decode('ascii') return ret_headers
[ "def", "sign", "(", "self", ",", "headers", ":", "Mapping", ",", "method", "=", "None", ",", "path", "=", "None", ")", ":", "required_headers", "=", "self", ".", "header_list", "message", "=", "generate_message", "(", "required_headers", ",", "headers", ",", "method", ",", "path", ")", "signature", "=", "encode_string", "(", "self", ".", "_signer", ".", "sign", "(", "message", ")", ",", "'base64'", ")", "ret_headers", "=", "multidict", ".", "CIMultiDict", "(", "headers", ")", "ret_headers", "[", "'Authorization'", "]", "=", "self", ".", "_signature_tpl", "%", "signature", ".", "decode", "(", "'ascii'", ")", "return", "ret_headers" ]
Add Signature Authorization header to case-insensitive header dict. `headers` is a case-insensitive dict of mutable headers. `host` is a override for the 'host' header (defaults to value in headers). `method` is the HTTP method (required when using '(request-target)'). `path` is the HTTP path (required when using '(request-target)').
[ "Add", "Signature", "Authorization", "header", "to", "case", "-", "insensitive", "header", "dict", "." ]
e242fff8eddebf6ed52a65b161a229cdfbf5226e
https://github.com/PSPC-SPAC-buyandsell/didauth/blob/e242fff8eddebf6ed52a65b161a229cdfbf5226e/didauth/headers.py#L60-L77
train
PSPC-SPAC-buyandsell/didauth
didauth/headers.py
HeaderVerifier.verify
async def verify(self, headers: Mapping, method=None, path=None): """ Parse Signature Authorization header and verify signature `headers` is a dict or multidict of headers `host` is a override for the 'host' header (defaults to value in headers). `method` is the HTTP method (required when using '(request-target)'). `path` is the HTTP path (required when using '(request-target)'). """ if not 'authorization' in headers: return False auth_type, auth_params = parse_authorization_header(headers['authorization']) if auth_type.lower() != 'signature': return False for param in ('algorithm', 'keyId', 'signature'): if param not in auth_params: raise VerifierException("Unsupported HTTP signature, missing '{}'".format(param)) auth_headers = (auth_params.get('headers') or 'date').lower().strip().split() missing_reqd = set(self._required_headers) - set(auth_headers) if missing_reqd: error_headers = ', '.join(missing_reqd) raise VerifierException( 'One or more required headers not provided: {}'.format(error_headers)) key_id, algo = auth_params['keyId'], auth_params['algorithm'] if not self._handlers.supports(algo): raise VerifierException("Unsupported HTTP signature algorithm '{}'".format(algo)) pubkey = await self._key_finder.find_key(key_id, algo) if not pubkey: raise VerifierException("Cannot locate public key for '{}'".format(key_id)) LOGGER.debug("Got %s public key for '%s': %s", algo, key_id, pubkey) handler = self._handlers.create_verifier(algo, pubkey) message = generate_message(auth_headers, headers, method, path) signature = auth_params['signature'] raw_signature = decode_string(signature, 'base64') if handler.verify(message, raw_signature): return { 'verified': True, 'algorithm': algo, 'headers': auth_headers, 'keyId': key_id, 'key': pubkey, 'signature': signature } raise VerifierException("Signature could not be verified for keyId '{}'".format(key_id))
python
async def verify(self, headers: Mapping, method=None, path=None): """ Parse Signature Authorization header and verify signature `headers` is a dict or multidict of headers `host` is a override for the 'host' header (defaults to value in headers). `method` is the HTTP method (required when using '(request-target)'). `path` is the HTTP path (required when using '(request-target)'). """ if not 'authorization' in headers: return False auth_type, auth_params = parse_authorization_header(headers['authorization']) if auth_type.lower() != 'signature': return False for param in ('algorithm', 'keyId', 'signature'): if param not in auth_params: raise VerifierException("Unsupported HTTP signature, missing '{}'".format(param)) auth_headers = (auth_params.get('headers') or 'date').lower().strip().split() missing_reqd = set(self._required_headers) - set(auth_headers) if missing_reqd: error_headers = ', '.join(missing_reqd) raise VerifierException( 'One or more required headers not provided: {}'.format(error_headers)) key_id, algo = auth_params['keyId'], auth_params['algorithm'] if not self._handlers.supports(algo): raise VerifierException("Unsupported HTTP signature algorithm '{}'".format(algo)) pubkey = await self._key_finder.find_key(key_id, algo) if not pubkey: raise VerifierException("Cannot locate public key for '{}'".format(key_id)) LOGGER.debug("Got %s public key for '%s': %s", algo, key_id, pubkey) handler = self._handlers.create_verifier(algo, pubkey) message = generate_message(auth_headers, headers, method, path) signature = auth_params['signature'] raw_signature = decode_string(signature, 'base64') if handler.verify(message, raw_signature): return { 'verified': True, 'algorithm': algo, 'headers': auth_headers, 'keyId': key_id, 'key': pubkey, 'signature': signature } raise VerifierException("Signature could not be verified for keyId '{}'".format(key_id))
[ "async", "def", "verify", "(", "self", ",", "headers", ":", "Mapping", ",", "method", "=", "None", ",", "path", "=", "None", ")", ":", "if", "not", "'authorization'", "in", "headers", ":", "return", "False", "auth_type", ",", "auth_params", "=", "parse_authorization_header", "(", "headers", "[", "'authorization'", "]", ")", "if", "auth_type", ".", "lower", "(", ")", "!=", "'signature'", ":", "return", "False", "for", "param", "in", "(", "'algorithm'", ",", "'keyId'", ",", "'signature'", ")", ":", "if", "param", "not", "in", "auth_params", ":", "raise", "VerifierException", "(", "\"Unsupported HTTP signature, missing '{}'\"", ".", "format", "(", "param", ")", ")", "auth_headers", "=", "(", "auth_params", ".", "get", "(", "'headers'", ")", "or", "'date'", ")", ".", "lower", "(", ")", ".", "strip", "(", ")", ".", "split", "(", ")", "missing_reqd", "=", "set", "(", "self", ".", "_required_headers", ")", "-", "set", "(", "auth_headers", ")", "if", "missing_reqd", ":", "error_headers", "=", "', '", ".", "join", "(", "missing_reqd", ")", "raise", "VerifierException", "(", "'One or more required headers not provided: {}'", ".", "format", "(", "error_headers", ")", ")", "key_id", ",", "algo", "=", "auth_params", "[", "'keyId'", "]", ",", "auth_params", "[", "'algorithm'", "]", "if", "not", "self", ".", "_handlers", ".", "supports", "(", "algo", ")", ":", "raise", "VerifierException", "(", "\"Unsupported HTTP signature algorithm '{}'\"", ".", "format", "(", "algo", ")", ")", "pubkey", "=", "await", "self", ".", "_key_finder", ".", "find_key", "(", "key_id", ",", "algo", ")", "if", "not", "pubkey", ":", "raise", "VerifierException", "(", "\"Cannot locate public key for '{}'\"", ".", "format", "(", "key_id", ")", ")", "LOGGER", ".", "debug", "(", "\"Got %s public key for '%s': %s\"", ",", "algo", ",", "key_id", ",", "pubkey", ")", "handler", "=", "self", ".", "_handlers", ".", "create_verifier", "(", "algo", ",", "pubkey", ")", "message", "=", "generate_message", "(", "auth_headers", ",", "headers", ",", "method", ",", "path", ")", "signature", "=", "auth_params", "[", "'signature'", "]", "raw_signature", "=", "decode_string", "(", "signature", ",", "'base64'", ")", "if", "handler", ".", "verify", "(", "message", ",", "raw_signature", ")", ":", "return", "{", "'verified'", ":", "True", ",", "'algorithm'", ":", "algo", ",", "'headers'", ":", "auth_headers", ",", "'keyId'", ":", "key_id", ",", "'key'", ":", "pubkey", ",", "'signature'", ":", "signature", "}", "raise", "VerifierException", "(", "\"Signature could not be verified for keyId '{}'\"", ".", "format", "(", "key_id", ")", ")" ]
Parse Signature Authorization header and verify signature `headers` is a dict or multidict of headers `host` is a override for the 'host' header (defaults to value in headers). `method` is the HTTP method (required when using '(request-target)'). `path` is the HTTP path (required when using '(request-target)').
[ "Parse", "Signature", "Authorization", "header", "and", "verify", "signature" ]
e242fff8eddebf6ed52a65b161a229cdfbf5226e
https://github.com/PSPC-SPAC-buyandsell/didauth/blob/e242fff8eddebf6ed52a65b161a229cdfbf5226e/didauth/headers.py#L92-L147
train
TylerTemp/docpie
docpie/pie.py
Docpie.docpie
def docpie(self, argv=None): """match the argv for each usages, return dict. if argv is None, it will use sys.argv instead. if argv is str, it will call argv.split() first. this function will check the options in self.extra and handle it first. Which means it may not try to match any usages because of the checking. """ token = self._prepare_token(argv) # check first, raise after # so `-hwhatever` can trigger `-h` first self.check_flag_and_handler(token) if token.error is not None: # raise DocpieExit('%s\n\n%s' % (token.error, help_msg)) self.exception_handler(token.error) try: result, dashed = self._match(token) except DocpieExit as e: self.exception_handler(e) # if error is not None: # self.exception_handler(error) value = result.get_value(self.appeared_only, False) self.clear() self.update(value) if self.appeared_only: self._drop_non_appeared() logger.debug('get all matched value %s', self) rest = list(self.usages) # a copy rest.remove(result) self._add_rest_value(rest) logger.debug('merged rest values, now %s', self) self._add_option_value() self._dashes_value(dashed) return dict(self)
python
def docpie(self, argv=None): """match the argv for each usages, return dict. if argv is None, it will use sys.argv instead. if argv is str, it will call argv.split() first. this function will check the options in self.extra and handle it first. Which means it may not try to match any usages because of the checking. """ token = self._prepare_token(argv) # check first, raise after # so `-hwhatever` can trigger `-h` first self.check_flag_and_handler(token) if token.error is not None: # raise DocpieExit('%s\n\n%s' % (token.error, help_msg)) self.exception_handler(token.error) try: result, dashed = self._match(token) except DocpieExit as e: self.exception_handler(e) # if error is not None: # self.exception_handler(error) value = result.get_value(self.appeared_only, False) self.clear() self.update(value) if self.appeared_only: self._drop_non_appeared() logger.debug('get all matched value %s', self) rest = list(self.usages) # a copy rest.remove(result) self._add_rest_value(rest) logger.debug('merged rest values, now %s', self) self._add_option_value() self._dashes_value(dashed) return dict(self)
[ "def", "docpie", "(", "self", ",", "argv", "=", "None", ")", ":", "token", "=", "self", ".", "_prepare_token", "(", "argv", ")", "# check first, raise after", "# so `-hwhatever` can trigger `-h` first", "self", ".", "check_flag_and_handler", "(", "token", ")", "if", "token", ".", "error", "is", "not", "None", ":", "# raise DocpieExit('%s\\n\\n%s' % (token.error, help_msg))", "self", ".", "exception_handler", "(", "token", ".", "error", ")", "try", ":", "result", ",", "dashed", "=", "self", ".", "_match", "(", "token", ")", "except", "DocpieExit", "as", "e", ":", "self", ".", "exception_handler", "(", "e", ")", "# if error is not None:", "# self.exception_handler(error)", "value", "=", "result", ".", "get_value", "(", "self", ".", "appeared_only", ",", "False", ")", "self", ".", "clear", "(", ")", "self", ".", "update", "(", "value", ")", "if", "self", ".", "appeared_only", ":", "self", ".", "_drop_non_appeared", "(", ")", "logger", ".", "debug", "(", "'get all matched value %s'", ",", "self", ")", "rest", "=", "list", "(", "self", ".", "usages", ")", "# a copy", "rest", ".", "remove", "(", "result", ")", "self", ".", "_add_rest_value", "(", "rest", ")", "logger", ".", "debug", "(", "'merged rest values, now %s'", ",", "self", ")", "self", ".", "_add_option_value", "(", ")", "self", ".", "_dashes_value", "(", "dashed", ")", "return", "dict", "(", "self", ")" ]
match the argv for each usages, return dict. if argv is None, it will use sys.argv instead. if argv is str, it will call argv.split() first. this function will check the options in self.extra and handle it first. Which means it may not try to match any usages because of the checking.
[ "match", "the", "argv", "for", "each", "usages", "return", "dict", "." ]
e658454b81b6c79a020d499f12ad73496392c09a
https://github.com/TylerTemp/docpie/blob/e658454b81b6c79a020d499f12ad73496392c09a/docpie/pie.py#L128-L168
train
TylerTemp/docpie
docpie/pie.py
Docpie.clone_exception
def clone_exception(error, args): """ return a new cloned error when do: ``` try: do_sth() except BaseException as e: handle(e) def handle(error): # do sth with error raise e # <- won't work! This can generate a new cloned error of the same class Parameters ---------- error: the caught error args: the new args to init the cloned error Returns ------- new error of the same class """ new_error = error.__class__(*args) new_error.__dict__ = error.__dict__ return new_error
python
def clone_exception(error, args): """ return a new cloned error when do: ``` try: do_sth() except BaseException as e: handle(e) def handle(error): # do sth with error raise e # <- won't work! This can generate a new cloned error of the same class Parameters ---------- error: the caught error args: the new args to init the cloned error Returns ------- new error of the same class """ new_error = error.__class__(*args) new_error.__dict__ = error.__dict__ return new_error
[ "def", "clone_exception", "(", "error", ",", "args", ")", ":", "new_error", "=", "error", ".", "__class__", "(", "*", "args", ")", "new_error", ".", "__dict__", "=", "error", ".", "__dict__", "return", "new_error" ]
return a new cloned error when do: ``` try: do_sth() except BaseException as e: handle(e) def handle(error): # do sth with error raise e # <- won't work! This can generate a new cloned error of the same class Parameters ---------- error: the caught error args: the new args to init the cloned error Returns ------- new error of the same class
[ "return", "a", "new", "cloned", "error" ]
e658454b81b6c79a020d499f12ad73496392c09a
https://github.com/TylerTemp/docpie/blob/e658454b81b6c79a020d499f12ad73496392c09a/docpie/pie.py#L425-L454
train
TylerTemp/docpie
docpie/pie.py
Docpie.to_dict
def to_dict(self): # cls, self): """Convert Docpie into a JSONlizable dict. Use it in this way: pie = Docpie(__doc__) json.dumps(pie.convert_2_dict()) Note the `extra` info will be lost if you costomize that, because a function is not JSONlizable. You can use `set_config(extra={...})` to set it back. """ config = { 'stdopt': self.stdopt, 'attachopt': self.attachopt, 'attachvalue': self.attachvalue, 'auto2dashes': self.auto2dashes, 'case_sensitive': self.case_sensitive, 'namedoptions': self.namedoptions, 'appearedonly': self.appeared_only, 'optionsfirst': self.options_first, 'option_name': self.option_name, 'usage_name': self.usage_name, 'name': self.name, 'help': self.help, 'version': self.version } text = { 'doc': self.doc, 'usage_text': self.usage_text, 'option_sections': self.option_sections, } # option = [convert_2_dict(x) for x in self.options] option = {} for title, options in self.options.items(): option[title] = [convert_2_dict(x) for x in options] usage = [convert_2_dict(x) for x in self.usages] return { '__version__': self._version, '__class__': 'Docpie', '__config__': config, '__text__': text, 'option': option, 'usage': usage, 'option_names': [list(x) for x in self.opt_names], 'opt_names_required_max_args': self.opt_names_required_max_args }
python
def to_dict(self): # cls, self): """Convert Docpie into a JSONlizable dict. Use it in this way: pie = Docpie(__doc__) json.dumps(pie.convert_2_dict()) Note the `extra` info will be lost if you costomize that, because a function is not JSONlizable. You can use `set_config(extra={...})` to set it back. """ config = { 'stdopt': self.stdopt, 'attachopt': self.attachopt, 'attachvalue': self.attachvalue, 'auto2dashes': self.auto2dashes, 'case_sensitive': self.case_sensitive, 'namedoptions': self.namedoptions, 'appearedonly': self.appeared_only, 'optionsfirst': self.options_first, 'option_name': self.option_name, 'usage_name': self.usage_name, 'name': self.name, 'help': self.help, 'version': self.version } text = { 'doc': self.doc, 'usage_text': self.usage_text, 'option_sections': self.option_sections, } # option = [convert_2_dict(x) for x in self.options] option = {} for title, options in self.options.items(): option[title] = [convert_2_dict(x) for x in options] usage = [convert_2_dict(x) for x in self.usages] return { '__version__': self._version, '__class__': 'Docpie', '__config__': config, '__text__': text, 'option': option, 'usage': usage, 'option_names': [list(x) for x in self.opt_names], 'opt_names_required_max_args': self.opt_names_required_max_args }
[ "def", "to_dict", "(", "self", ")", ":", "# cls, self):", "config", "=", "{", "'stdopt'", ":", "self", ".", "stdopt", ",", "'attachopt'", ":", "self", ".", "attachopt", ",", "'attachvalue'", ":", "self", ".", "attachvalue", ",", "'auto2dashes'", ":", "self", ".", "auto2dashes", ",", "'case_sensitive'", ":", "self", ".", "case_sensitive", ",", "'namedoptions'", ":", "self", ".", "namedoptions", ",", "'appearedonly'", ":", "self", ".", "appeared_only", ",", "'optionsfirst'", ":", "self", ".", "options_first", ",", "'option_name'", ":", "self", ".", "option_name", ",", "'usage_name'", ":", "self", ".", "usage_name", ",", "'name'", ":", "self", ".", "name", ",", "'help'", ":", "self", ".", "help", ",", "'version'", ":", "self", ".", "version", "}", "text", "=", "{", "'doc'", ":", "self", ".", "doc", ",", "'usage_text'", ":", "self", ".", "usage_text", ",", "'option_sections'", ":", "self", ".", "option_sections", ",", "}", "# option = [convert_2_dict(x) for x in self.options]", "option", "=", "{", "}", "for", "title", ",", "options", "in", "self", ".", "options", ".", "items", "(", ")", ":", "option", "[", "title", "]", "=", "[", "convert_2_dict", "(", "x", ")", "for", "x", "in", "options", "]", "usage", "=", "[", "convert_2_dict", "(", "x", ")", "for", "x", "in", "self", ".", "usages", "]", "return", "{", "'__version__'", ":", "self", ".", "_version", ",", "'__class__'", ":", "'Docpie'", ",", "'__config__'", ":", "config", ",", "'__text__'", ":", "text", ",", "'option'", ":", "option", ",", "'usage'", ":", "usage", ",", "'option_names'", ":", "[", "list", "(", "x", ")", "for", "x", "in", "self", ".", "opt_names", "]", ",", "'opt_names_required_max_args'", ":", "self", ".", "opt_names_required_max_args", "}" ]
Convert Docpie into a JSONlizable dict. Use it in this way: pie = Docpie(__doc__) json.dumps(pie.convert_2_dict()) Note the `extra` info will be lost if you costomize that, because a function is not JSONlizable. You can use `set_config(extra={...})` to set it back.
[ "Convert", "Docpie", "into", "a", "JSONlizable", "dict", "." ]
e658454b81b6c79a020d499f12ad73496392c09a
https://github.com/TylerTemp/docpie/blob/e658454b81b6c79a020d499f12ad73496392c09a/docpie/pie.py#L547-L597
train
TylerTemp/docpie
docpie/pie.py
Docpie.from_dict
def from_dict(cls, dic): """Convert dict generated by `convert_2_dict` into Docpie instance You can do this: pie = Docpie(__doc__) clone_pie = json.loads(pie.convert_2_docpie( json.dumps(pie.convert_2_dict()) )) Note if you changed `extra`, it will be lost. You can use `set_config(extra={...})` to set it back. """ if '__version__' not in dic: raise ValueError('Not support old docpie data') data_version = int(dic['__version__'].replace('.', '')) this_version = int(cls._version.replace('.', '')) logger.debug('this: %s, old: %s', this_version, data_version) if data_version < this_version: raise ValueError('Not support old docpie data') assert dic['__class__'] == 'Docpie' config = dic['__config__'] help = config.pop('help') version = config.pop('version') option_name = config.pop('option_name') usage_name = config.pop('usage_name') self = cls(None, **config) self.option_name = option_name self.usage_name = usage_name text = dic['__text__'] self.doc = text['doc'] self.usage_text = text['usage_text'] self.option_sections = text['option_sections'] self.opt_names = [set(x) for x in dic['option_names']] self.opt_names_required_max_args = dic['opt_names_required_max_args'] self.set_config(help=help, version=version) self.options = o = {} for title, options in dic['option'].items(): opt_ins = [convert_2_object(x, {}, self.namedoptions) for x in options] o[title] = opt_ins self.usages = [convert_2_object(x, self.options, self.namedoptions) for x in dic['usage']] return self
python
def from_dict(cls, dic): """Convert dict generated by `convert_2_dict` into Docpie instance You can do this: pie = Docpie(__doc__) clone_pie = json.loads(pie.convert_2_docpie( json.dumps(pie.convert_2_dict()) )) Note if you changed `extra`, it will be lost. You can use `set_config(extra={...})` to set it back. """ if '__version__' not in dic: raise ValueError('Not support old docpie data') data_version = int(dic['__version__'].replace('.', '')) this_version = int(cls._version.replace('.', '')) logger.debug('this: %s, old: %s', this_version, data_version) if data_version < this_version: raise ValueError('Not support old docpie data') assert dic['__class__'] == 'Docpie' config = dic['__config__'] help = config.pop('help') version = config.pop('version') option_name = config.pop('option_name') usage_name = config.pop('usage_name') self = cls(None, **config) self.option_name = option_name self.usage_name = usage_name text = dic['__text__'] self.doc = text['doc'] self.usage_text = text['usage_text'] self.option_sections = text['option_sections'] self.opt_names = [set(x) for x in dic['option_names']] self.opt_names_required_max_args = dic['opt_names_required_max_args'] self.set_config(help=help, version=version) self.options = o = {} for title, options in dic['option'].items(): opt_ins = [convert_2_object(x, {}, self.namedoptions) for x in options] o[title] = opt_ins self.usages = [convert_2_object(x, self.options, self.namedoptions) for x in dic['usage']] return self
[ "def", "from_dict", "(", "cls", ",", "dic", ")", ":", "if", "'__version__'", "not", "in", "dic", ":", "raise", "ValueError", "(", "'Not support old docpie data'", ")", "data_version", "=", "int", "(", "dic", "[", "'__version__'", "]", ".", "replace", "(", "'.'", ",", "''", ")", ")", "this_version", "=", "int", "(", "cls", ".", "_version", ".", "replace", "(", "'.'", ",", "''", ")", ")", "logger", ".", "debug", "(", "'this: %s, old: %s'", ",", "this_version", ",", "data_version", ")", "if", "data_version", "<", "this_version", ":", "raise", "ValueError", "(", "'Not support old docpie data'", ")", "assert", "dic", "[", "'__class__'", "]", "==", "'Docpie'", "config", "=", "dic", "[", "'__config__'", "]", "help", "=", "config", ".", "pop", "(", "'help'", ")", "version", "=", "config", ".", "pop", "(", "'version'", ")", "option_name", "=", "config", ".", "pop", "(", "'option_name'", ")", "usage_name", "=", "config", ".", "pop", "(", "'usage_name'", ")", "self", "=", "cls", "(", "None", ",", "*", "*", "config", ")", "self", ".", "option_name", "=", "option_name", "self", ".", "usage_name", "=", "usage_name", "text", "=", "dic", "[", "'__text__'", "]", "self", ".", "doc", "=", "text", "[", "'doc'", "]", "self", ".", "usage_text", "=", "text", "[", "'usage_text'", "]", "self", ".", "option_sections", "=", "text", "[", "'option_sections'", "]", "self", ".", "opt_names", "=", "[", "set", "(", "x", ")", "for", "x", "in", "dic", "[", "'option_names'", "]", "]", "self", ".", "opt_names_required_max_args", "=", "dic", "[", "'opt_names_required_max_args'", "]", "self", ".", "set_config", "(", "help", "=", "help", ",", "version", "=", "version", ")", "self", ".", "options", "=", "o", "=", "{", "}", "for", "title", ",", "options", "in", "dic", "[", "'option'", "]", ".", "items", "(", ")", ":", "opt_ins", "=", "[", "convert_2_object", "(", "x", ",", "{", "}", ",", "self", ".", "namedoptions", ")", "for", "x", "in", "options", "]", "o", "[", "title", "]", "=", "opt_ins", "self", ".", "usages", "=", "[", "convert_2_object", "(", "x", ",", "self", ".", "options", ",", "self", ".", "namedoptions", ")", "for", "x", "in", "dic", "[", "'usage'", "]", "]", "return", "self" ]
Convert dict generated by `convert_2_dict` into Docpie instance You can do this: pie = Docpie(__doc__) clone_pie = json.loads(pie.convert_2_docpie( json.dumps(pie.convert_2_dict()) )) Note if you changed `extra`, it will be lost. You can use `set_config(extra={...})` to set it back.
[ "Convert", "dict", "generated", "by", "convert_2_dict", "into", "Docpie", "instance" ]
e658454b81b6c79a020d499f12ad73496392c09a
https://github.com/TylerTemp/docpie/blob/e658454b81b6c79a020d499f12ad73496392c09a/docpie/pie.py#L602-L651
train
TylerTemp/docpie
docpie/pie.py
Docpie.set_config
def set_config(self, **config): """Shadow all the current config.""" reinit = False if 'stdopt' in config: stdopt = config.pop('stdopt') reinit = (stdopt != self.stdopt) self.stdopt = stdopt if 'attachopt' in config: attachopt = config.pop('attachopt') reinit = reinit or (attachopt != self.attachopt) self.attachopt = attachopt if 'attachvalue' in config: attachvalue = config.pop('attachvalue') reinit = reinit or (attachvalue != self.attachvalue) self.attachvalue = attachvalue if 'auto2dashes' in config: self.auto2dashes = config.pop('auto2dashes') if 'name' in config: name = config.pop('name') reinit = reinit or (name != self.name) self.name = name if 'help' in config: self.help = config.pop('help') self._set_or_remove_extra_handler( self.help, ('--help', '-h'), self.help_handler) if 'version' in config: self.version = config.pop('version') self._set_or_remove_extra_handler( self.version is not None, ('--version', '-v'), self.version_handler) if 'case_sensitive' in config: case_sensitive = config.pop('case_sensitive') reinit = reinit or (case_sensitive != self.case_sensitive) self.case_sensitive = case_sensitive if 'optionsfirst' in config: self.options_first = config.pop('optionsfirst') if 'appearedonly' in config: self.appeared_only = config.pop('appearedonly') if 'namedoptions' in config: namedoptions = config.pop('namedoptions') reinit = reinit or (namedoptions != self.namedoptions) self.namedoptions = namedoptions if 'extra' in config: self.extra.update(self._formal_extra(config.pop('extra'))) if config: # should be empty raise ValueError( '`%s` %s not accepted key argument%s' % ( '`, `'.join(config), 'is' if len(config) == 1 else 'are', '' if len(config) == 1 else 's' )) if self.doc is not None and reinit: logger.warning( 'You changed the config that requires re-initialized' ' `Docpie` object. Create a new one instead' ) self._init()
python
def set_config(self, **config): """Shadow all the current config.""" reinit = False if 'stdopt' in config: stdopt = config.pop('stdopt') reinit = (stdopt != self.stdopt) self.stdopt = stdopt if 'attachopt' in config: attachopt = config.pop('attachopt') reinit = reinit or (attachopt != self.attachopt) self.attachopt = attachopt if 'attachvalue' in config: attachvalue = config.pop('attachvalue') reinit = reinit or (attachvalue != self.attachvalue) self.attachvalue = attachvalue if 'auto2dashes' in config: self.auto2dashes = config.pop('auto2dashes') if 'name' in config: name = config.pop('name') reinit = reinit or (name != self.name) self.name = name if 'help' in config: self.help = config.pop('help') self._set_or_remove_extra_handler( self.help, ('--help', '-h'), self.help_handler) if 'version' in config: self.version = config.pop('version') self._set_or_remove_extra_handler( self.version is not None, ('--version', '-v'), self.version_handler) if 'case_sensitive' in config: case_sensitive = config.pop('case_sensitive') reinit = reinit or (case_sensitive != self.case_sensitive) self.case_sensitive = case_sensitive if 'optionsfirst' in config: self.options_first = config.pop('optionsfirst') if 'appearedonly' in config: self.appeared_only = config.pop('appearedonly') if 'namedoptions' in config: namedoptions = config.pop('namedoptions') reinit = reinit or (namedoptions != self.namedoptions) self.namedoptions = namedoptions if 'extra' in config: self.extra.update(self._formal_extra(config.pop('extra'))) if config: # should be empty raise ValueError( '`%s` %s not accepted key argument%s' % ( '`, `'.join(config), 'is' if len(config) == 1 else 'are', '' if len(config) == 1 else 's' )) if self.doc is not None and reinit: logger.warning( 'You changed the config that requires re-initialized' ' `Docpie` object. Create a new one instead' ) self._init()
[ "def", "set_config", "(", "self", ",", "*", "*", "config", ")", ":", "reinit", "=", "False", "if", "'stdopt'", "in", "config", ":", "stdopt", "=", "config", ".", "pop", "(", "'stdopt'", ")", "reinit", "=", "(", "stdopt", "!=", "self", ".", "stdopt", ")", "self", ".", "stdopt", "=", "stdopt", "if", "'attachopt'", "in", "config", ":", "attachopt", "=", "config", ".", "pop", "(", "'attachopt'", ")", "reinit", "=", "reinit", "or", "(", "attachopt", "!=", "self", ".", "attachopt", ")", "self", ".", "attachopt", "=", "attachopt", "if", "'attachvalue'", "in", "config", ":", "attachvalue", "=", "config", ".", "pop", "(", "'attachvalue'", ")", "reinit", "=", "reinit", "or", "(", "attachvalue", "!=", "self", ".", "attachvalue", ")", "self", ".", "attachvalue", "=", "attachvalue", "if", "'auto2dashes'", "in", "config", ":", "self", ".", "auto2dashes", "=", "config", ".", "pop", "(", "'auto2dashes'", ")", "if", "'name'", "in", "config", ":", "name", "=", "config", ".", "pop", "(", "'name'", ")", "reinit", "=", "reinit", "or", "(", "name", "!=", "self", ".", "name", ")", "self", ".", "name", "=", "name", "if", "'help'", "in", "config", ":", "self", ".", "help", "=", "config", ".", "pop", "(", "'help'", ")", "self", ".", "_set_or_remove_extra_handler", "(", "self", ".", "help", ",", "(", "'--help'", ",", "'-h'", ")", ",", "self", ".", "help_handler", ")", "if", "'version'", "in", "config", ":", "self", ".", "version", "=", "config", ".", "pop", "(", "'version'", ")", "self", ".", "_set_or_remove_extra_handler", "(", "self", ".", "version", "is", "not", "None", ",", "(", "'--version'", ",", "'-v'", ")", ",", "self", ".", "version_handler", ")", "if", "'case_sensitive'", "in", "config", ":", "case_sensitive", "=", "config", ".", "pop", "(", "'case_sensitive'", ")", "reinit", "=", "reinit", "or", "(", "case_sensitive", "!=", "self", ".", "case_sensitive", ")", "self", ".", "case_sensitive", "=", "case_sensitive", "if", "'optionsfirst'", "in", "config", ":", "self", ".", "options_first", "=", "config", ".", "pop", "(", "'optionsfirst'", ")", "if", "'appearedonly'", "in", "config", ":", "self", ".", "appeared_only", "=", "config", ".", "pop", "(", "'appearedonly'", ")", "if", "'namedoptions'", "in", "config", ":", "namedoptions", "=", "config", ".", "pop", "(", "'namedoptions'", ")", "reinit", "=", "reinit", "or", "(", "namedoptions", "!=", "self", ".", "namedoptions", ")", "self", ".", "namedoptions", "=", "namedoptions", "if", "'extra'", "in", "config", ":", "self", ".", "extra", ".", "update", "(", "self", ".", "_formal_extra", "(", "config", ".", "pop", "(", "'extra'", ")", ")", ")", "if", "config", ":", "# should be empty", "raise", "ValueError", "(", "'`%s` %s not accepted key argument%s'", "%", "(", "'`, `'", ".", "join", "(", "config", ")", ",", "'is'", "if", "len", "(", "config", ")", "==", "1", "else", "'are'", ",", "''", "if", "len", "(", "config", ")", "==", "1", "else", "'s'", ")", ")", "if", "self", ".", "doc", "is", "not", "None", "and", "reinit", ":", "logger", ".", "warning", "(", "'You changed the config that requires re-initialized'", "' `Docpie` object. Create a new one instead'", ")", "self", ".", "_init", "(", ")" ]
Shadow all the current config.
[ "Shadow", "all", "the", "current", "config", "." ]
e658454b81b6c79a020d499f12ad73496392c09a
https://github.com/TylerTemp/docpie/blob/e658454b81b6c79a020d499f12ad73496392c09a/docpie/pie.py#L655-L714
train
TylerTemp/docpie
docpie/pie.py
Docpie.find_flag_alias
def find_flag_alias(self, flag): """Return alias set of a flag; return None if flag is not defined in "Options". """ for each in self.opt_names: if flag in each: result = set(each) # a copy result.remove(flag) return result return None
python
def find_flag_alias(self, flag): """Return alias set of a flag; return None if flag is not defined in "Options". """ for each in self.opt_names: if flag in each: result = set(each) # a copy result.remove(flag) return result return None
[ "def", "find_flag_alias", "(", "self", ",", "flag", ")", ":", "for", "each", "in", "self", ".", "opt_names", ":", "if", "flag", "in", "each", ":", "result", "=", "set", "(", "each", ")", "# a copy", "result", ".", "remove", "(", "flag", ")", "return", "result", "return", "None" ]
Return alias set of a flag; return None if flag is not defined in "Options".
[ "Return", "alias", "set", "of", "a", "flag", ";", "return", "None", "if", "flag", "is", "not", "defined", "in", "Options", "." ]
e658454b81b6c79a020d499f12ad73496392c09a
https://github.com/TylerTemp/docpie/blob/e658454b81b6c79a020d499f12ad73496392c09a/docpie/pie.py#L749-L758
train
TylerTemp/docpie
docpie/pie.py
Docpie.set_auto_handler
def set_auto_handler(self, flag, handler): """Set pre-auto-handler for a flag. the handler must accept two argument: first the `pie` which referent to the current `Docpie` instance, second, the `flag` which is the flag found in `argv`. Different from `extra` argument, this will set the alias option you defined in `Option` section with the same behavior. """ assert flag.startswith('-') and flag not in ('-', '--') alias = self.find_flag_alias(flag) or [] self.extra[flag] = handler for each in alias: self.extra[each] = handler
python
def set_auto_handler(self, flag, handler): """Set pre-auto-handler for a flag. the handler must accept two argument: first the `pie` which referent to the current `Docpie` instance, second, the `flag` which is the flag found in `argv`. Different from `extra` argument, this will set the alias option you defined in `Option` section with the same behavior. """ assert flag.startswith('-') and flag not in ('-', '--') alias = self.find_flag_alias(flag) or [] self.extra[flag] = handler for each in alias: self.extra[each] = handler
[ "def", "set_auto_handler", "(", "self", ",", "flag", ",", "handler", ")", ":", "assert", "flag", ".", "startswith", "(", "'-'", ")", "and", "flag", "not", "in", "(", "'-'", ",", "'--'", ")", "alias", "=", "self", ".", "find_flag_alias", "(", "flag", ")", "or", "[", "]", "self", ".", "extra", "[", "flag", "]", "=", "handler", "for", "each", "in", "alias", ":", "self", ".", "extra", "[", "each", "]", "=", "handler" ]
Set pre-auto-handler for a flag. the handler must accept two argument: first the `pie` which referent to the current `Docpie` instance, second, the `flag` which is the flag found in `argv`. Different from `extra` argument, this will set the alias option you defined in `Option` section with the same behavior.
[ "Set", "pre", "-", "auto", "-", "handler", "for", "a", "flag", "." ]
e658454b81b6c79a020d499f12ad73496392c09a
https://github.com/TylerTemp/docpie/blob/e658454b81b6c79a020d499f12ad73496392c09a/docpie/pie.py#L760-L775
train
TylerTemp/docpie
docpie/pie.py
Docpie.preview
def preview(self, stream=sys.stdout): """A quick preview of docpie. Print all the parsed object""" write = stream.write write(('[Quick preview of Docpie %s]' % self._version).center(80, '=')) write('\n') write(' sections '.center(80, '-')) write('\n') write(self.usage_text) write('\n') option_sections = self.option_sections if option_sections: write('\n') write('\n'.join(option_sections.values())) write('\n') write(' str '.center(80, '-')) write('\n[%s]\n' % self.usage_name) for each in self.usages: write(' %s\n' % each) write('\n[Options:]\n\n') for title, sections in self.options.items(): if title: full_title = '%s %s' % (title, self.option_name) else: full_title = self.option_name write(full_title) write('\n') for each in sections: write(' %s\n' % each) write('\n') write(' repr '.center(80, '-')) write('\n[%s]\n' % self.usage_name) for each in self.usages: write(' %r\n' % each) write('\n[Options:]\n\n') for title, sections in self.options.items(): if title: full_title = '%s %s' % (title, self.option_name) else: full_title = self.option_name write(full_title) write('\n') for each in sections: write(' %r\n' % each) write('\n') write(' auto handlers '.center(80, '-')) write('\n') for key, value in self.extra.items(): write('%s %s\n' % (key, value))
python
def preview(self, stream=sys.stdout): """A quick preview of docpie. Print all the parsed object""" write = stream.write write(('[Quick preview of Docpie %s]' % self._version).center(80, '=')) write('\n') write(' sections '.center(80, '-')) write('\n') write(self.usage_text) write('\n') option_sections = self.option_sections if option_sections: write('\n') write('\n'.join(option_sections.values())) write('\n') write(' str '.center(80, '-')) write('\n[%s]\n' % self.usage_name) for each in self.usages: write(' %s\n' % each) write('\n[Options:]\n\n') for title, sections in self.options.items(): if title: full_title = '%s %s' % (title, self.option_name) else: full_title = self.option_name write(full_title) write('\n') for each in sections: write(' %s\n' % each) write('\n') write(' repr '.center(80, '-')) write('\n[%s]\n' % self.usage_name) for each in self.usages: write(' %r\n' % each) write('\n[Options:]\n\n') for title, sections in self.options.items(): if title: full_title = '%s %s' % (title, self.option_name) else: full_title = self.option_name write(full_title) write('\n') for each in sections: write(' %r\n' % each) write('\n') write(' auto handlers '.center(80, '-')) write('\n') for key, value in self.extra.items(): write('%s %s\n' % (key, value))
[ "def", "preview", "(", "self", ",", "stream", "=", "sys", ".", "stdout", ")", ":", "write", "=", "stream", ".", "write", "write", "(", "(", "'[Quick preview of Docpie %s]'", "%", "self", ".", "_version", ")", ".", "center", "(", "80", ",", "'='", ")", ")", "write", "(", "'\\n'", ")", "write", "(", "' sections '", ".", "center", "(", "80", ",", "'-'", ")", ")", "write", "(", "'\\n'", ")", "write", "(", "self", ".", "usage_text", ")", "write", "(", "'\\n'", ")", "option_sections", "=", "self", ".", "option_sections", "if", "option_sections", ":", "write", "(", "'\\n'", ")", "write", "(", "'\\n'", ".", "join", "(", "option_sections", ".", "values", "(", ")", ")", ")", "write", "(", "'\\n'", ")", "write", "(", "' str '", ".", "center", "(", "80", ",", "'-'", ")", ")", "write", "(", "'\\n[%s]\\n'", "%", "self", ".", "usage_name", ")", "for", "each", "in", "self", ".", "usages", ":", "write", "(", "' %s\\n'", "%", "each", ")", "write", "(", "'\\n[Options:]\\n\\n'", ")", "for", "title", ",", "sections", "in", "self", ".", "options", ".", "items", "(", ")", ":", "if", "title", ":", "full_title", "=", "'%s %s'", "%", "(", "title", ",", "self", ".", "option_name", ")", "else", ":", "full_title", "=", "self", ".", "option_name", "write", "(", "full_title", ")", "write", "(", "'\\n'", ")", "for", "each", "in", "sections", ":", "write", "(", "' %s\\n'", "%", "each", ")", "write", "(", "'\\n'", ")", "write", "(", "' repr '", ".", "center", "(", "80", ",", "'-'", ")", ")", "write", "(", "'\\n[%s]\\n'", "%", "self", ".", "usage_name", ")", "for", "each", "in", "self", ".", "usages", ":", "write", "(", "' %r\\n'", "%", "each", ")", "write", "(", "'\\n[Options:]\\n\\n'", ")", "for", "title", ",", "sections", "in", "self", ".", "options", ".", "items", "(", ")", ":", "if", "title", ":", "full_title", "=", "'%s %s'", "%", "(", "title", ",", "self", ".", "option_name", ")", "else", ":", "full_title", "=", "self", ".", "option_name", "write", "(", "full_title", ")", "write", "(", "'\\n'", ")", "for", "each", "in", "sections", ":", "write", "(", "' %r\\n'", "%", "each", ")", "write", "(", "'\\n'", ")", "write", "(", "' auto handlers '", ".", "center", "(", "80", ",", "'-'", ")", ")", "write", "(", "'\\n'", ")", "for", "key", ",", "value", "in", "self", ".", "extra", ".", "items", "(", ")", ":", "write", "(", "'%s %s\\n'", "%", "(", "key", ",", "value", ")", ")" ]
A quick preview of docpie. Print all the parsed object
[ "A", "quick", "preview", "of", "docpie", ".", "Print", "all", "the", "parsed", "object" ]
e658454b81b6c79a020d499f12ad73496392c09a
https://github.com/TylerTemp/docpie/blob/e658454b81b6c79a020d499f12ad73496392c09a/docpie/pie.py#L777-L837
train
nugget/python-anthemav
anthemav/protocol.py
AVR.refresh_core
def refresh_core(self): """Query device for all attributes that exist regardless of power state. This will force a refresh for all device queries that are valid to request at any time. It's the only safe suite of queries that we can make if we do not know the current state (on or off+standby). This does not return any data, it just issues the queries. """ self.log.info('Sending out mass query for all attributes') for key in ATTR_CORE: self.query(key)
python
def refresh_core(self): """Query device for all attributes that exist regardless of power state. This will force a refresh for all device queries that are valid to request at any time. It's the only safe suite of queries that we can make if we do not know the current state (on or off+standby). This does not return any data, it just issues the queries. """ self.log.info('Sending out mass query for all attributes') for key in ATTR_CORE: self.query(key)
[ "def", "refresh_core", "(", "self", ")", ":", "self", ".", "log", ".", "info", "(", "'Sending out mass query for all attributes'", ")", "for", "key", "in", "ATTR_CORE", ":", "self", ".", "query", "(", "key", ")" ]
Query device for all attributes that exist regardless of power state. This will force a refresh for all device queries that are valid to request at any time. It's the only safe suite of queries that we can make if we do not know the current state (on or off+standby). This does not return any data, it just issues the queries.
[ "Query", "device", "for", "all", "attributes", "that", "exist", "regardless", "of", "power", "state", "." ]
c3cee38f2d452c1ab1335d9885e0769ec24d5f90
https://github.com/nugget/python-anthemav/blob/c3cee38f2d452c1ab1335d9885e0769ec24d5f90/anthemav/protocol.py#L109-L120
train
nugget/python-anthemav
anthemav/protocol.py
AVR.poweron_refresh
def poweron_refresh(self): """Keep requesting all attributes until it works. Immediately after a power on event (POW1) the AVR is inconsistent with which attributes can be successfully queried. When we detect that power has just been turned on, we loop every second making a bulk query for every known attribute. This continues until we detect that values have been returned for at least one input name (this seems to be the laggiest of all the attributes) """ if self._poweron_refresh_successful: return else: self.refresh_all() self._loop.call_later(2, self.poweron_refresh)
python
def poweron_refresh(self): """Keep requesting all attributes until it works. Immediately after a power on event (POW1) the AVR is inconsistent with which attributes can be successfully queried. When we detect that power has just been turned on, we loop every second making a bulk query for every known attribute. This continues until we detect that values have been returned for at least one input name (this seems to be the laggiest of all the attributes) """ if self._poweron_refresh_successful: return else: self.refresh_all() self._loop.call_later(2, self.poweron_refresh)
[ "def", "poweron_refresh", "(", "self", ")", ":", "if", "self", ".", "_poweron_refresh_successful", ":", "return", "else", ":", "self", ".", "refresh_all", "(", ")", "self", ".", "_loop", ".", "call_later", "(", "2", ",", "self", ".", "poweron_refresh", ")" ]
Keep requesting all attributes until it works. Immediately after a power on event (POW1) the AVR is inconsistent with which attributes can be successfully queried. When we detect that power has just been turned on, we loop every second making a bulk query for every known attribute. This continues until we detect that values have been returned for at least one input name (this seems to be the laggiest of all the attributes)
[ "Keep", "requesting", "all", "attributes", "until", "it", "works", "." ]
c3cee38f2d452c1ab1335d9885e0769ec24d5f90
https://github.com/nugget/python-anthemav/blob/c3cee38f2d452c1ab1335d9885e0769ec24d5f90/anthemav/protocol.py#L122-L136
train
nugget/python-anthemav
anthemav/protocol.py
AVR.refresh_all
def refresh_all(self): """Query device for all attributes that are known. This will force a refresh for all device queries that the module is aware of. In theory, this will completely populate the internal state table for all attributes. This does not return any data, it just issues the queries. """ self.log.info('refresh_all') for key in LOOKUP: self.query(key)
python
def refresh_all(self): """Query device for all attributes that are known. This will force a refresh for all device queries that the module is aware of. In theory, this will completely populate the internal state table for all attributes. This does not return any data, it just issues the queries. """ self.log.info('refresh_all') for key in LOOKUP: self.query(key)
[ "def", "refresh_all", "(", "self", ")", ":", "self", ".", "log", ".", "info", "(", "'refresh_all'", ")", "for", "key", "in", "LOOKUP", ":", "self", ".", "query", "(", "key", ")" ]
Query device for all attributes that are known. This will force a refresh for all device queries that the module is aware of. In theory, this will completely populate the internal state table for all attributes. This does not return any data, it just issues the queries.
[ "Query", "device", "for", "all", "attributes", "that", "are", "known", "." ]
c3cee38f2d452c1ab1335d9885e0769ec24d5f90
https://github.com/nugget/python-anthemav/blob/c3cee38f2d452c1ab1335d9885e0769ec24d5f90/anthemav/protocol.py#L139-L150
train
nugget/python-anthemav
anthemav/protocol.py
AVR.connection_made
def connection_made(self, transport): """Called when asyncio.Protocol establishes the network connection.""" self.log.info('Connection established to AVR') self.transport = transport #self.transport.set_write_buffer_limits(0) limit_low, limit_high = self.transport.get_write_buffer_limits() self.log.debug('Write buffer limits %d to %d', limit_low, limit_high) self.command('ECH1') self.refresh_core()
python
def connection_made(self, transport): """Called when asyncio.Protocol establishes the network connection.""" self.log.info('Connection established to AVR') self.transport = transport #self.transport.set_write_buffer_limits(0) limit_low, limit_high = self.transport.get_write_buffer_limits() self.log.debug('Write buffer limits %d to %d', limit_low, limit_high) self.command('ECH1') self.refresh_core()
[ "def", "connection_made", "(", "self", ",", "transport", ")", ":", "self", ".", "log", ".", "info", "(", "'Connection established to AVR'", ")", "self", ".", "transport", "=", "transport", "#self.transport.set_write_buffer_limits(0)", "limit_low", ",", "limit_high", "=", "self", ".", "transport", ".", "get_write_buffer_limits", "(", ")", "self", ".", "log", ".", "debug", "(", "'Write buffer limits %d to %d'", ",", "limit_low", ",", "limit_high", ")", "self", ".", "command", "(", "'ECH1'", ")", "self", ".", "refresh_core", "(", ")" ]
Called when asyncio.Protocol establishes the network connection.
[ "Called", "when", "asyncio", ".", "Protocol", "establishes", "the", "network", "connection", "." ]
c3cee38f2d452c1ab1335d9885e0769ec24d5f90
https://github.com/nugget/python-anthemav/blob/c3cee38f2d452c1ab1335d9885e0769ec24d5f90/anthemav/protocol.py#L157-L167
train
nugget/python-anthemav
anthemav/protocol.py
AVR.data_received
def data_received(self, data): """Called when asyncio.Protocol detects received data from network.""" self.buffer += data.decode() self.log.debug('Received %d bytes from AVR: %s', len(self.buffer), self.buffer) self._assemble_buffer()
python
def data_received(self, data): """Called when asyncio.Protocol detects received data from network.""" self.buffer += data.decode() self.log.debug('Received %d bytes from AVR: %s', len(self.buffer), self.buffer) self._assemble_buffer()
[ "def", "data_received", "(", "self", ",", "data", ")", ":", "self", ".", "buffer", "+=", "data", ".", "decode", "(", ")", "self", ".", "log", ".", "debug", "(", "'Received %d bytes from AVR: %s'", ",", "len", "(", "self", ".", "buffer", ")", ",", "self", ".", "buffer", ")", "self", ".", "_assemble_buffer", "(", ")" ]
Called when asyncio.Protocol detects received data from network.
[ "Called", "when", "asyncio", ".", "Protocol", "detects", "received", "data", "from", "network", "." ]
c3cee38f2d452c1ab1335d9885e0769ec24d5f90
https://github.com/nugget/python-anthemav/blob/c3cee38f2d452c1ab1335d9885e0769ec24d5f90/anthemav/protocol.py#L169-L173
train
nugget/python-anthemav
anthemav/protocol.py
AVR.connection_lost
def connection_lost(self, exc): """Called when asyncio.Protocol loses the network connection.""" if exc is None: self.log.warning('eof from receiver?') else: self.log.warning('Lost connection to receiver: %s', exc) self.transport = None if self._connection_lost_callback: self._loop.call_soon(self._connection_lost_callback)
python
def connection_lost(self, exc): """Called when asyncio.Protocol loses the network connection.""" if exc is None: self.log.warning('eof from receiver?') else: self.log.warning('Lost connection to receiver: %s', exc) self.transport = None if self._connection_lost_callback: self._loop.call_soon(self._connection_lost_callback)
[ "def", "connection_lost", "(", "self", ",", "exc", ")", ":", "if", "exc", "is", "None", ":", "self", ".", "log", ".", "warning", "(", "'eof from receiver?'", ")", "else", ":", "self", ".", "log", ".", "warning", "(", "'Lost connection to receiver: %s'", ",", "exc", ")", "self", ".", "transport", "=", "None", "if", "self", ".", "_connection_lost_callback", ":", "self", ".", "_loop", ".", "call_soon", "(", "self", ".", "_connection_lost_callback", ")" ]
Called when asyncio.Protocol loses the network connection.
[ "Called", "when", "asyncio", ".", "Protocol", "loses", "the", "network", "connection", "." ]
c3cee38f2d452c1ab1335d9885e0769ec24d5f90
https://github.com/nugget/python-anthemav/blob/c3cee38f2d452c1ab1335d9885e0769ec24d5f90/anthemav/protocol.py#L175-L185
train
nugget/python-anthemav
anthemav/protocol.py
AVR._assemble_buffer
def _assemble_buffer(self): """Split up received data from device into individual commands. Data sent by the device is a sequence of datagrams separated by semicolons. It's common to receive a burst of them all in one submission when there's a lot of device activity. This function disassembles the chain of datagrams into individual messages which are then passed on for interpretation. """ self.transport.pause_reading() for message in self.buffer.split(';'): if message != '': self.log.debug('assembled message '+message) self._parse_message(message) self.buffer = "" self.transport.resume_reading() return
python
def _assemble_buffer(self): """Split up received data from device into individual commands. Data sent by the device is a sequence of datagrams separated by semicolons. It's common to receive a burst of them all in one submission when there's a lot of device activity. This function disassembles the chain of datagrams into individual messages which are then passed on for interpretation. """ self.transport.pause_reading() for message in self.buffer.split(';'): if message != '': self.log.debug('assembled message '+message) self._parse_message(message) self.buffer = "" self.transport.resume_reading() return
[ "def", "_assemble_buffer", "(", "self", ")", ":", "self", ".", "transport", ".", "pause_reading", "(", ")", "for", "message", "in", "self", ".", "buffer", ".", "split", "(", "';'", ")", ":", "if", "message", "!=", "''", ":", "self", ".", "log", ".", "debug", "(", "'assembled message '", "+", "message", ")", "self", ".", "_parse_message", "(", "message", ")", "self", ".", "buffer", "=", "\"\"", "self", ".", "transport", ".", "resume_reading", "(", ")", "return" ]
Split up received data from device into individual commands. Data sent by the device is a sequence of datagrams separated by semicolons. It's common to receive a burst of them all in one submission when there's a lot of device activity. This function disassembles the chain of datagrams into individual messages which are then passed on for interpretation.
[ "Split", "up", "received", "data", "from", "device", "into", "individual", "commands", "." ]
c3cee38f2d452c1ab1335d9885e0769ec24d5f90
https://github.com/nugget/python-anthemav/blob/c3cee38f2d452c1ab1335d9885e0769ec24d5f90/anthemav/protocol.py#L187-L206
train
nugget/python-anthemav
anthemav/protocol.py
AVR._populate_inputs
def _populate_inputs(self, total): """Request the names for all active, configured inputs on the device. Once we learn how many inputs are configured, this function is called which will ask for the name of each active input. """ total = total + 1 for input_number in range(1, total): self.query('ISN'+str(input_number).zfill(2))
python
def _populate_inputs(self, total): """Request the names for all active, configured inputs on the device. Once we learn how many inputs are configured, this function is called which will ask for the name of each active input. """ total = total + 1 for input_number in range(1, total): self.query('ISN'+str(input_number).zfill(2))
[ "def", "_populate_inputs", "(", "self", ",", "total", ")", ":", "total", "=", "total", "+", "1", "for", "input_number", "in", "range", "(", "1", ",", "total", ")", ":", "self", ".", "query", "(", "'ISN'", "+", "str", "(", "input_number", ")", ".", "zfill", "(", "2", ")", ")" ]
Request the names for all active, configured inputs on the device. Once we learn how many inputs are configured, this function is called which will ask for the name of each active input.
[ "Request", "the", "names", "for", "all", "active", "configured", "inputs", "on", "the", "device", "." ]
c3cee38f2d452c1ab1335d9885e0769ec24d5f90
https://github.com/nugget/python-anthemav/blob/c3cee38f2d452c1ab1335d9885e0769ec24d5f90/anthemav/protocol.py#L208-L216
train
nugget/python-anthemav
anthemav/protocol.py
AVR.formatted_command
def formatted_command(self, command): """Issue a raw, formatted command to the device. This function is invoked by both query and command and is the point where we actually send bytes out over the network. This function does the wrapping and formatting required by the Anthem API so that the higher-level function can just operate with regular strings without the burden of byte encoding and terminating device requests. :param command: Any command as documented in the Anthem API :type command: str :Example: >>> formatted_command('Z1VOL-50') """ command = command command = command.encode() self.log.debug('> %s', command) try: self.transport.write(command) time.sleep(0.01) except: self.log.warning('No transport found, unable to send command')
python
def formatted_command(self, command): """Issue a raw, formatted command to the device. This function is invoked by both query and command and is the point where we actually send bytes out over the network. This function does the wrapping and formatting required by the Anthem API so that the higher-level function can just operate with regular strings without the burden of byte encoding and terminating device requests. :param command: Any command as documented in the Anthem API :type command: str :Example: >>> formatted_command('Z1VOL-50') """ command = command command = command.encode() self.log.debug('> %s', command) try: self.transport.write(command) time.sleep(0.01) except: self.log.warning('No transport found, unable to send command')
[ "def", "formatted_command", "(", "self", ",", "command", ")", ":", "command", "=", "command", "command", "=", "command", ".", "encode", "(", ")", "self", ".", "log", ".", "debug", "(", "'> %s'", ",", "command", ")", "try", ":", "self", ".", "transport", ".", "write", "(", "command", ")", "time", ".", "sleep", "(", "0.01", ")", "except", ":", "self", ".", "log", ".", "warning", "(", "'No transport found, unable to send command'", ")" ]
Issue a raw, formatted command to the device. This function is invoked by both query and command and is the point where we actually send bytes out over the network. This function does the wrapping and formatting required by the Anthem API so that the higher-level function can just operate with regular strings without the burden of byte encoding and terminating device requests. :param command: Any command as documented in the Anthem API :type command: str :Example: >>> formatted_command('Z1VOL-50')
[ "Issue", "a", "raw", "formatted", "command", "to", "the", "device", "." ]
c3cee38f2d452c1ab1335d9885e0769ec24d5f90
https://github.com/nugget/python-anthemav/blob/c3cee38f2d452c1ab1335d9885e0769ec24d5f90/anthemav/protocol.py#L354-L378
train
nugget/python-anthemav
anthemav/protocol.py
AVR.dump_rawdata
def dump_rawdata(self): """Return contents of transport object for debugging forensics.""" if hasattr(self, 'transport'): attrs = vars(self.transport) return ', '.join("%s: %s" % item for item in attrs.items())
python
def dump_rawdata(self): """Return contents of transport object for debugging forensics.""" if hasattr(self, 'transport'): attrs = vars(self.transport) return ', '.join("%s: %s" % item for item in attrs.items())
[ "def", "dump_rawdata", "(", "self", ")", ":", "if", "hasattr", "(", "self", ",", "'transport'", ")", ":", "attrs", "=", "vars", "(", "self", ".", "transport", ")", "return", "', '", ".", "join", "(", "\"%s: %s\"", "%", "item", "for", "item", "in", "attrs", ".", "items", "(", ")", ")" ]
Return contents of transport object for debugging forensics.
[ "Return", "contents", "of", "transport", "object", "for", "debugging", "forensics", "." ]
c3cee38f2d452c1ab1335d9885e0769ec24d5f90
https://github.com/nugget/python-anthemav/blob/c3cee38f2d452c1ab1335d9885e0769ec24d5f90/anthemav/protocol.py#L852-L856
train
kentik/kentikapi-py
kentikapi/v5/tagging.py
Batch.add_upsert
def add_upsert(self, value, criteria): """Add a tag or populator to the batch by value and criteria""" value = value.strip() v = value.lower() self.lower_val_to_val[v] = value criteria_array = self.upserts.get(v) if criteria_array is None: criteria_array = [] # start with # '{"value": "some_value", "criteria": []}, ' self.upserts_size[v] = 31 + len(value) criteria_array.append(criteria.to_dict()) self.upserts[v] = criteria_array self.upserts_size[v] += criteria.json_size()
python
def add_upsert(self, value, criteria): """Add a tag or populator to the batch by value and criteria""" value = value.strip() v = value.lower() self.lower_val_to_val[v] = value criteria_array = self.upserts.get(v) if criteria_array is None: criteria_array = [] # start with # '{"value": "some_value", "criteria": []}, ' self.upserts_size[v] = 31 + len(value) criteria_array.append(criteria.to_dict()) self.upserts[v] = criteria_array self.upserts_size[v] += criteria.json_size()
[ "def", "add_upsert", "(", "self", ",", "value", ",", "criteria", ")", ":", "value", "=", "value", ".", "strip", "(", ")", "v", "=", "value", ".", "lower", "(", ")", "self", ".", "lower_val_to_val", "[", "v", "]", "=", "value", "criteria_array", "=", "self", ".", "upserts", ".", "get", "(", "v", ")", "if", "criteria_array", "is", "None", ":", "criteria_array", "=", "[", "]", "# start with # '{\"value\": \"some_value\", \"criteria\": []}, '", "self", ".", "upserts_size", "[", "v", "]", "=", "31", "+", "len", "(", "value", ")", "criteria_array", ".", "append", "(", "criteria", ".", "to_dict", "(", ")", ")", "self", ".", "upserts", "[", "v", "]", "=", "criteria_array", "self", ".", "upserts_size", "[", "v", "]", "+=", "criteria", ".", "json_size", "(", ")" ]
Add a tag or populator to the batch by value and criteria
[ "Add", "a", "tag", "or", "populator", "to", "the", "batch", "by", "value", "and", "criteria" ]
aa94c0b7eaf88409818b97967d7293e309e11bab
https://github.com/kentik/kentikapi-py/blob/aa94c0b7eaf88409818b97967d7293e309e11bab/kentikapi/v5/tagging.py#L25-L38
train
kentik/kentikapi-py
kentikapi/v5/tagging.py
Batch.add_delete
def add_delete(self, value): """Delete a tag or populator by value - these are processed before upserts""" value = value.strip() v = value.lower() self.lower_val_to_val[v] = value if len(v) == 0: raise ValueError("Invalid value for delete. Value is empty.") self.deletes.add(v)
python
def add_delete(self, value): """Delete a tag or populator by value - these are processed before upserts""" value = value.strip() v = value.lower() self.lower_val_to_val[v] = value if len(v) == 0: raise ValueError("Invalid value for delete. Value is empty.") self.deletes.add(v)
[ "def", "add_delete", "(", "self", ",", "value", ")", ":", "value", "=", "value", ".", "strip", "(", ")", "v", "=", "value", ".", "lower", "(", ")", "self", ".", "lower_val_to_val", "[", "v", "]", "=", "value", "if", "len", "(", "v", ")", "==", "0", ":", "raise", "ValueError", "(", "\"Invalid value for delete. Value is empty.\"", ")", "self", ".", "deletes", ".", "add", "(", "v", ")" ]
Delete a tag or populator by value - these are processed before upserts
[ "Delete", "a", "tag", "or", "populator", "by", "value", "-", "these", "are", "processed", "before", "upserts" ]
aa94c0b7eaf88409818b97967d7293e309e11bab
https://github.com/kentik/kentikapi-py/blob/aa94c0b7eaf88409818b97967d7293e309e11bab/kentikapi/v5/tagging.py#L40-L49
train
kentik/kentikapi-py
kentikapi/v5/tagging.py
Batch.parts
def parts(self): """Return an array of batch parts to submit""" parts = [] upserts = dict() deletes = [] # we keep track of the batch size as we go (pretty close approximation!) so we can chunk it small enough # to limit the HTTP posts to under 700KB - server limits to 750KB, so play it safe max_upload_size = 700000 # loop upserts first - fit the deletes in afterward # '{"replace_all": true, "complete": false, "guid": "6659fbfc-3f08-42ee-998c-9109f650f4b7", "upserts": [], "deletes": []}' base_part_size = 118 if not self.replace_all: base_part_size += 1 # yeah, this is totally overkill :) part_size = base_part_size for value in self.upserts: if (part_size + self.upserts_size[value]) >= max_upload_size: # this record would put us over the limit - close out the batch part and start a new one parts.append(BatchPart(self.replace_all, upserts, deletes)) upserts = dict() deletes = [] part_size = base_part_size # for the new upserts dict, drop the lower-casing of value upserts[self.lower_val_to_val[value]] = self.upserts[value] part_size += self.upserts_size[value] # updating the approximate size of the batch for value in self.deletes: # delete adds length of string plus quotes, comma and space if (part_size + len(value) + 4) >= max_upload_size: parts.append(BatchPart(self.replace_all, upserts, deletes)) upserts = dict() deletes = [] part_size = base_part_size # for the new deletes set, drop the lower-casing of value deletes.append({'value': self.lower_val_to_val[value]}) part_size += len(value) + 4 if len(upserts) + len(deletes) > 0: # finish the batch parts.append(BatchPart(self.replace_all, upserts, deletes)) if len(parts) == 0: if not self.replace_all: raise ValueError("Batch has no data, and 'replace_all' is False") parts.append(BatchPart(self.replace_all, dict(), [])) # last part finishes the batch parts[-1].set_last_part() return parts
python
def parts(self): """Return an array of batch parts to submit""" parts = [] upserts = dict() deletes = [] # we keep track of the batch size as we go (pretty close approximation!) so we can chunk it small enough # to limit the HTTP posts to under 700KB - server limits to 750KB, so play it safe max_upload_size = 700000 # loop upserts first - fit the deletes in afterward # '{"replace_all": true, "complete": false, "guid": "6659fbfc-3f08-42ee-998c-9109f650f4b7", "upserts": [], "deletes": []}' base_part_size = 118 if not self.replace_all: base_part_size += 1 # yeah, this is totally overkill :) part_size = base_part_size for value in self.upserts: if (part_size + self.upserts_size[value]) >= max_upload_size: # this record would put us over the limit - close out the batch part and start a new one parts.append(BatchPart(self.replace_all, upserts, deletes)) upserts = dict() deletes = [] part_size = base_part_size # for the new upserts dict, drop the lower-casing of value upserts[self.lower_val_to_val[value]] = self.upserts[value] part_size += self.upserts_size[value] # updating the approximate size of the batch for value in self.deletes: # delete adds length of string plus quotes, comma and space if (part_size + len(value) + 4) >= max_upload_size: parts.append(BatchPart(self.replace_all, upserts, deletes)) upserts = dict() deletes = [] part_size = base_part_size # for the new deletes set, drop the lower-casing of value deletes.append({'value': self.lower_val_to_val[value]}) part_size += len(value) + 4 if len(upserts) + len(deletes) > 0: # finish the batch parts.append(BatchPart(self.replace_all, upserts, deletes)) if len(parts) == 0: if not self.replace_all: raise ValueError("Batch has no data, and 'replace_all' is False") parts.append(BatchPart(self.replace_all, dict(), [])) # last part finishes the batch parts[-1].set_last_part() return parts
[ "def", "parts", "(", "self", ")", ":", "parts", "=", "[", "]", "upserts", "=", "dict", "(", ")", "deletes", "=", "[", "]", "# we keep track of the batch size as we go (pretty close approximation!) so we can chunk it small enough", "# to limit the HTTP posts to under 700KB - server limits to 750KB, so play it safe", "max_upload_size", "=", "700000", "# loop upserts first - fit the deletes in afterward", "# '{\"replace_all\": true, \"complete\": false, \"guid\": \"6659fbfc-3f08-42ee-998c-9109f650f4b7\", \"upserts\": [], \"deletes\": []}'", "base_part_size", "=", "118", "if", "not", "self", ".", "replace_all", ":", "base_part_size", "+=", "1", "# yeah, this is totally overkill :)", "part_size", "=", "base_part_size", "for", "value", "in", "self", ".", "upserts", ":", "if", "(", "part_size", "+", "self", ".", "upserts_size", "[", "value", "]", ")", ">=", "max_upload_size", ":", "# this record would put us over the limit - close out the batch part and start a new one", "parts", ".", "append", "(", "BatchPart", "(", "self", ".", "replace_all", ",", "upserts", ",", "deletes", ")", ")", "upserts", "=", "dict", "(", ")", "deletes", "=", "[", "]", "part_size", "=", "base_part_size", "# for the new upserts dict, drop the lower-casing of value", "upserts", "[", "self", ".", "lower_val_to_val", "[", "value", "]", "]", "=", "self", ".", "upserts", "[", "value", "]", "part_size", "+=", "self", ".", "upserts_size", "[", "value", "]", "# updating the approximate size of the batch", "for", "value", "in", "self", ".", "deletes", ":", "# delete adds length of string plus quotes, comma and space", "if", "(", "part_size", "+", "len", "(", "value", ")", "+", "4", ")", ">=", "max_upload_size", ":", "parts", ".", "append", "(", "BatchPart", "(", "self", ".", "replace_all", ",", "upserts", ",", "deletes", ")", ")", "upserts", "=", "dict", "(", ")", "deletes", "=", "[", "]", "part_size", "=", "base_part_size", "# for the new deletes set, drop the lower-casing of value", "deletes", ".", "append", "(", "{", "'value'", ":", "self", ".", "lower_val_to_val", "[", "value", "]", "}", ")", "part_size", "+=", "len", "(", "value", ")", "+", "4", "if", "len", "(", "upserts", ")", "+", "len", "(", "deletes", ")", ">", "0", ":", "# finish the batch", "parts", ".", "append", "(", "BatchPart", "(", "self", ".", "replace_all", ",", "upserts", ",", "deletes", ")", ")", "if", "len", "(", "parts", ")", "==", "0", ":", "if", "not", "self", ".", "replace_all", ":", "raise", "ValueError", "(", "\"Batch has no data, and 'replace_all' is False\"", ")", "parts", ".", "append", "(", "BatchPart", "(", "self", ".", "replace_all", ",", "dict", "(", ")", ",", "[", "]", ")", ")", "# last part finishes the batch", "parts", "[", "-", "1", "]", ".", "set_last_part", "(", ")", "return", "parts" ]
Return an array of batch parts to submit
[ "Return", "an", "array", "of", "batch", "parts", "to", "submit" ]
aa94c0b7eaf88409818b97967d7293e309e11bab
https://github.com/kentik/kentikapi-py/blob/aa94c0b7eaf88409818b97967d7293e309e11bab/kentikapi/v5/tagging.py#L51-L105
train
kentik/kentikapi-py
kentikapi/v5/tagging.py
BatchPart.build_json
def build_json(self, guid): """Build JSON with the input guid""" upserts = [] for value in self.upserts: upserts.append({"value": value, "criteria": self.upserts[value]}) return json.dumps({'replace_all': self.replace_all, 'guid': guid, 'complete': self.complete, 'upserts': upserts, 'deletes': self.deletes})
python
def build_json(self, guid): """Build JSON with the input guid""" upserts = [] for value in self.upserts: upserts.append({"value": value, "criteria": self.upserts[value]}) return json.dumps({'replace_all': self.replace_all, 'guid': guid, 'complete': self.complete, 'upserts': upserts, 'deletes': self.deletes})
[ "def", "build_json", "(", "self", ",", "guid", ")", ":", "upserts", "=", "[", "]", "for", "value", "in", "self", ".", "upserts", ":", "upserts", ".", "append", "(", "{", "\"value\"", ":", "value", ",", "\"criteria\"", ":", "self", ".", "upserts", "[", "value", "]", "}", ")", "return", "json", ".", "dumps", "(", "{", "'replace_all'", ":", "self", ".", "replace_all", ",", "'guid'", ":", "guid", ",", "'complete'", ":", "self", ".", "complete", ",", "'upserts'", ":", "upserts", ",", "'deletes'", ":", "self", ".", "deletes", "}", ")" ]
Build JSON with the input guid
[ "Build", "JSON", "with", "the", "input", "guid" ]
aa94c0b7eaf88409818b97967d7293e309e11bab
https://github.com/kentik/kentikapi-py/blob/aa94c0b7eaf88409818b97967d7293e309e11bab/kentikapi/v5/tagging.py#L124-L130
train
kentik/kentikapi-py
kentikapi/v5/tagging.py
Criteria._ensure_field
def _ensure_field(self, key): """Ensure a non-array field""" if self._has_field: self._size += 2 # comma, space self._has_field = True self._size += len(key) + 4
python
def _ensure_field(self, key): """Ensure a non-array field""" if self._has_field: self._size += 2 # comma, space self._has_field = True self._size += len(key) + 4
[ "def", "_ensure_field", "(", "self", ",", "key", ")", ":", "if", "self", ".", "_has_field", ":", "self", ".", "_size", "+=", "2", "# comma, space", "self", ".", "_has_field", "=", "True", "self", ".", "_size", "+=", "len", "(", "key", ")", "+", "4" ]
Ensure a non-array field
[ "Ensure", "a", "non", "-", "array", "field" ]
aa94c0b7eaf88409818b97967d7293e309e11bab
https://github.com/kentik/kentikapi-py/blob/aa94c0b7eaf88409818b97967d7293e309e11bab/kentikapi/v5/tagging.py#L156-L162
train
kentik/kentikapi-py
kentikapi/v5/tagging.py
Criteria._ensure_array
def _ensure_array(self, key, value): """Ensure an array field""" if key not in self._json_dict: self._json_dict[key] = [] self._size += 2 # brackets self._ensure_field(key) if len(self._json_dict[key]) > 0: # this array already has an entry, so add comma and space self._size += 2 if isinstance(value, str): self._size += 2 # quotes self._size += len(str(value)) self._json_dict[key].append(value)
python
def _ensure_array(self, key, value): """Ensure an array field""" if key not in self._json_dict: self._json_dict[key] = [] self._size += 2 # brackets self._ensure_field(key) if len(self._json_dict[key]) > 0: # this array already has an entry, so add comma and space self._size += 2 if isinstance(value, str): self._size += 2 # quotes self._size += len(str(value)) self._json_dict[key].append(value)
[ "def", "_ensure_array", "(", "self", ",", "key", ",", "value", ")", ":", "if", "key", "not", "in", "self", ".", "_json_dict", ":", "self", ".", "_json_dict", "[", "key", "]", "=", "[", "]", "self", ".", "_size", "+=", "2", "# brackets", "self", ".", "_ensure_field", "(", "key", ")", "if", "len", "(", "self", ".", "_json_dict", "[", "key", "]", ")", ">", "0", ":", "# this array already has an entry, so add comma and space", "self", ".", "_size", "+=", "2", "if", "isinstance", "(", "value", ",", "str", ")", ":", "self", ".", "_size", "+=", "2", "# quotes", "self", ".", "_size", "+=", "len", "(", "str", "(", "value", ")", ")", "self", ".", "_json_dict", "[", "key", "]", ".", "append", "(", "value", ")" ]
Ensure an array field
[ "Ensure", "an", "array", "field" ]
aa94c0b7eaf88409818b97967d7293e309e11bab
https://github.com/kentik/kentikapi-py/blob/aa94c0b7eaf88409818b97967d7293e309e11bab/kentikapi/v5/tagging.py#L164-L180
train
kentik/kentikapi-py
kentikapi/v5/tagging.py
Criteria.add_tcp_flag
def add_tcp_flag(self, tcp_flag): """Add a single TCP flag - will be OR'd into the existing bitmask""" if tcp_flag not in [1, 2, 4, 8, 16, 32, 64, 128]: raise ValueError("Invalid TCP flag. Valid: [1, 2, 4, 8, 16,32, 64, 128]") prev_size = 0 if self._json_dict.get('tcp_flags') is None: self._json_dict['tcp_flags'] = 0 else: prev_size = len(str(self._json_dict['tcp_flags'])) + len('tcp_flags') + 3 # str, key, key quotes, colon self._json_dict['tcp_flags'] |= tcp_flag # update size new_size = len(str(self._json_dict['tcp_flags'])) + len('tcp_flags') + 3 # str, key, key quotes, colon self._size += new_size - prev_size if prev_size == 0 and self._has_field: # add the comma and space self._size += 2 self._has_field = True
python
def add_tcp_flag(self, tcp_flag): """Add a single TCP flag - will be OR'd into the existing bitmask""" if tcp_flag not in [1, 2, 4, 8, 16, 32, 64, 128]: raise ValueError("Invalid TCP flag. Valid: [1, 2, 4, 8, 16,32, 64, 128]") prev_size = 0 if self._json_dict.get('tcp_flags') is None: self._json_dict['tcp_flags'] = 0 else: prev_size = len(str(self._json_dict['tcp_flags'])) + len('tcp_flags') + 3 # str, key, key quotes, colon self._json_dict['tcp_flags'] |= tcp_flag # update size new_size = len(str(self._json_dict['tcp_flags'])) + len('tcp_flags') + 3 # str, key, key quotes, colon self._size += new_size - prev_size if prev_size == 0 and self._has_field: # add the comma and space self._size += 2 self._has_field = True
[ "def", "add_tcp_flag", "(", "self", ",", "tcp_flag", ")", ":", "if", "tcp_flag", "not", "in", "[", "1", ",", "2", ",", "4", ",", "8", ",", "16", ",", "32", ",", "64", ",", "128", "]", ":", "raise", "ValueError", "(", "\"Invalid TCP flag. Valid: [1, 2, 4, 8, 16,32, 64, 128]\"", ")", "prev_size", "=", "0", "if", "self", ".", "_json_dict", ".", "get", "(", "'tcp_flags'", ")", "is", "None", ":", "self", ".", "_json_dict", "[", "'tcp_flags'", "]", "=", "0", "else", ":", "prev_size", "=", "len", "(", "str", "(", "self", ".", "_json_dict", "[", "'tcp_flags'", "]", ")", ")", "+", "len", "(", "'tcp_flags'", ")", "+", "3", "# str, key, key quotes, colon", "self", ".", "_json_dict", "[", "'tcp_flags'", "]", "|=", "tcp_flag", "# update size", "new_size", "=", "len", "(", "str", "(", "self", ".", "_json_dict", "[", "'tcp_flags'", "]", ")", ")", "+", "len", "(", "'tcp_flags'", ")", "+", "3", "# str, key, key quotes, colon", "self", ".", "_size", "+=", "new_size", "-", "prev_size", "if", "prev_size", "==", "0", "and", "self", ".", "_has_field", ":", "# add the comma and space", "self", ".", "_size", "+=", "2", "self", ".", "_has_field", "=", "True" ]
Add a single TCP flag - will be OR'd into the existing bitmask
[ "Add", "a", "single", "TCP", "flag", "-", "will", "be", "OR", "d", "into", "the", "existing", "bitmask" ]
aa94c0b7eaf88409818b97967d7293e309e11bab
https://github.com/kentik/kentikapi-py/blob/aa94c0b7eaf88409818b97967d7293e309e11bab/kentikapi/v5/tagging.py#L283-L304
train
kentik/kentikapi-py
kentikapi/v5/tagging.py
Criteria.set_tcp_flags
def set_tcp_flags(self, tcp_flags): """Set the complete tcp flag bitmask""" if tcp_flags < 0 or tcp_flags > 255: raise ValueError("Invalid tcp_flags. Valid: 0-255.") prev_size = 0 if self._json_dict.get('tcp_flags') is not None: prev_size = len(str(self._json_dict['tcp_flags'])) + len('tcp_flags') + 3 # str, key, key quotes, colon self._json_dict['tcp_flags'] = tcp_flags # update size new_size = len(str(self._json_dict['tcp_flags'])) + len('tcp_flags') + 3 # str, key, key quotes, colon self._size += new_size - prev_size if prev_size == 0 and self._has_field: # add the comma and space self._size += 2 self._has_field = True
python
def set_tcp_flags(self, tcp_flags): """Set the complete tcp flag bitmask""" if tcp_flags < 0 or tcp_flags > 255: raise ValueError("Invalid tcp_flags. Valid: 0-255.") prev_size = 0 if self._json_dict.get('tcp_flags') is not None: prev_size = len(str(self._json_dict['tcp_flags'])) + len('tcp_flags') + 3 # str, key, key quotes, colon self._json_dict['tcp_flags'] = tcp_flags # update size new_size = len(str(self._json_dict['tcp_flags'])) + len('tcp_flags') + 3 # str, key, key quotes, colon self._size += new_size - prev_size if prev_size == 0 and self._has_field: # add the comma and space self._size += 2 self._has_field = True
[ "def", "set_tcp_flags", "(", "self", ",", "tcp_flags", ")", ":", "if", "tcp_flags", "<", "0", "or", "tcp_flags", ">", "255", ":", "raise", "ValueError", "(", "\"Invalid tcp_flags. Valid: 0-255.\"", ")", "prev_size", "=", "0", "if", "self", ".", "_json_dict", ".", "get", "(", "'tcp_flags'", ")", "is", "not", "None", ":", "prev_size", "=", "len", "(", "str", "(", "self", ".", "_json_dict", "[", "'tcp_flags'", "]", ")", ")", "+", "len", "(", "'tcp_flags'", ")", "+", "3", "# str, key, key quotes, colon", "self", ".", "_json_dict", "[", "'tcp_flags'", "]", "=", "tcp_flags", "# update size", "new_size", "=", "len", "(", "str", "(", "self", ".", "_json_dict", "[", "'tcp_flags'", "]", ")", ")", "+", "len", "(", "'tcp_flags'", ")", "+", "3", "# str, key, key quotes, colon", "self", ".", "_size", "+=", "new_size", "-", "prev_size", "if", "prev_size", "==", "0", "and", "self", ".", "_has_field", ":", "# add the comma and space", "self", ".", "_size", "+=", "2", "self", ".", "_has_field", "=", "True" ]
Set the complete tcp flag bitmask
[ "Set", "the", "complete", "tcp", "flag", "bitmask" ]
aa94c0b7eaf88409818b97967d7293e309e11bab
https://github.com/kentik/kentikapi-py/blob/aa94c0b7eaf88409818b97967d7293e309e11bab/kentikapi/v5/tagging.py#L306-L325
train
kentik/kentikapi-py
kentikapi/v5/tagging.py
Client._submit_batch
def _submit_batch(self, url, batch): """Submit the batch, returning the JSON->dict from the last HTTP response""" # TODO: validate column_name batch_parts = batch.parts() guid = "" headers = { 'User-Agent': 'kentik-python-api/0.1', 'Content-Type': 'application/json', 'X-CH-Auth-Email': self.api_email, 'X-CH-Auth-API-Token': self.api_token } # submit each part last_part = dict() for batch_part in batch_parts: # submit resp = requests.post(url, headers=headers, data=batch_part.build_json(guid)) # print the HTTP response to help debug print(resp.text) # break out at first sign of trouble resp.raise_for_status() last_part = resp.json() guid = last_part['guid'] if guid is None or len(guid) == 0: raise RuntimeError('guid not found in batch response') return last_part
python
def _submit_batch(self, url, batch): """Submit the batch, returning the JSON->dict from the last HTTP response""" # TODO: validate column_name batch_parts = batch.parts() guid = "" headers = { 'User-Agent': 'kentik-python-api/0.1', 'Content-Type': 'application/json', 'X-CH-Auth-Email': self.api_email, 'X-CH-Auth-API-Token': self.api_token } # submit each part last_part = dict() for batch_part in batch_parts: # submit resp = requests.post(url, headers=headers, data=batch_part.build_json(guid)) # print the HTTP response to help debug print(resp.text) # break out at first sign of trouble resp.raise_for_status() last_part = resp.json() guid = last_part['guid'] if guid is None or len(guid) == 0: raise RuntimeError('guid not found in batch response') return last_part
[ "def", "_submit_batch", "(", "self", ",", "url", ",", "batch", ")", ":", "# TODO: validate column_name", "batch_parts", "=", "batch", ".", "parts", "(", ")", "guid", "=", "\"\"", "headers", "=", "{", "'User-Agent'", ":", "'kentik-python-api/0.1'", ",", "'Content-Type'", ":", "'application/json'", ",", "'X-CH-Auth-Email'", ":", "self", ".", "api_email", ",", "'X-CH-Auth-API-Token'", ":", "self", ".", "api_token", "}", "# submit each part", "last_part", "=", "dict", "(", ")", "for", "batch_part", "in", "batch_parts", ":", "# submit", "resp", "=", "requests", ".", "post", "(", "url", ",", "headers", "=", "headers", ",", "data", "=", "batch_part", ".", "build_json", "(", "guid", ")", ")", "# print the HTTP response to help debug", "print", "(", "resp", ".", "text", ")", "# break out at first sign of trouble", "resp", ".", "raise_for_status", "(", ")", "last_part", "=", "resp", ".", "json", "(", ")", "guid", "=", "last_part", "[", "'guid'", "]", "if", "guid", "is", "None", "or", "len", "(", "guid", ")", "==", "0", ":", "raise", "RuntimeError", "(", "'guid not found in batch response'", ")", "return", "last_part" ]
Submit the batch, returning the JSON->dict from the last HTTP response
[ "Submit", "the", "batch", "returning", "the", "JSON", "-", ">", "dict", "from", "the", "last", "HTTP", "response" ]
aa94c0b7eaf88409818b97967d7293e309e11bab
https://github.com/kentik/kentikapi-py/blob/aa94c0b7eaf88409818b97967d7293e309e11bab/kentikapi/v5/tagging.py#L400-L429
train
kentik/kentikapi-py
kentikapi/v5/tagging.py
Client.submit_populator_batch
def submit_populator_batch(self, column_name, batch): """Submit a populator batch Submit a populator batch as a series of HTTP requests in small chunks, returning the batch GUID, or raising exception on error.""" if not set(column_name).issubset(_allowedCustomDimensionChars): raise ValueError('Invalid custom dimension name "%s": must only contain letters, digits, and underscores' % column_name) if len(column_name) < 3 or len(column_name) > 20: raise ValueError('Invalid value "%s": must be between 3-20 characters' % column_name) url = '%s/api/v5/batch/customdimensions/%s/populators' % (self.base_url, column_name) resp_json_dict = self._submit_batch(url, batch) if resp_json_dict.get('error') is not None: raise RuntimeError('Error received from server: %s' % resp_json_dict['error']) return resp_json_dict['guid']
python
def submit_populator_batch(self, column_name, batch): """Submit a populator batch Submit a populator batch as a series of HTTP requests in small chunks, returning the batch GUID, or raising exception on error.""" if not set(column_name).issubset(_allowedCustomDimensionChars): raise ValueError('Invalid custom dimension name "%s": must only contain letters, digits, and underscores' % column_name) if len(column_name) < 3 or len(column_name) > 20: raise ValueError('Invalid value "%s": must be between 3-20 characters' % column_name) url = '%s/api/v5/batch/customdimensions/%s/populators' % (self.base_url, column_name) resp_json_dict = self._submit_batch(url, batch) if resp_json_dict.get('error') is not None: raise RuntimeError('Error received from server: %s' % resp_json_dict['error']) return resp_json_dict['guid']
[ "def", "submit_populator_batch", "(", "self", ",", "column_name", ",", "batch", ")", ":", "if", "not", "set", "(", "column_name", ")", ".", "issubset", "(", "_allowedCustomDimensionChars", ")", ":", "raise", "ValueError", "(", "'Invalid custom dimension name \"%s\": must only contain letters, digits, and underscores'", "%", "column_name", ")", "if", "len", "(", "column_name", ")", "<", "3", "or", "len", "(", "column_name", ")", ">", "20", ":", "raise", "ValueError", "(", "'Invalid value \"%s\": must be between 3-20 characters'", "%", "column_name", ")", "url", "=", "'%s/api/v5/batch/customdimensions/%s/populators'", "%", "(", "self", ".", "base_url", ",", "column_name", ")", "resp_json_dict", "=", "self", ".", "_submit_batch", "(", "url", ",", "batch", ")", "if", "resp_json_dict", ".", "get", "(", "'error'", ")", "is", "not", "None", ":", "raise", "RuntimeError", "(", "'Error received from server: %s'", "%", "resp_json_dict", "[", "'error'", "]", ")", "return", "resp_json_dict", "[", "'guid'", "]" ]
Submit a populator batch Submit a populator batch as a series of HTTP requests in small chunks, returning the batch GUID, or raising exception on error.
[ "Submit", "a", "populator", "batch" ]
aa94c0b7eaf88409818b97967d7293e309e11bab
https://github.com/kentik/kentikapi-py/blob/aa94c0b7eaf88409818b97967d7293e309e11bab/kentikapi/v5/tagging.py#L431-L446
train
kentik/kentikapi-py
kentikapi/v5/tagging.py
Client.submit_tag_batch
def submit_tag_batch(self, batch): """Submit a tag batch""" url = '%s/api/v5/batch/tags' % self.base_url self._submit_batch(url, batch)
python
def submit_tag_batch(self, batch): """Submit a tag batch""" url = '%s/api/v5/batch/tags' % self.base_url self._submit_batch(url, batch)
[ "def", "submit_tag_batch", "(", "self", ",", "batch", ")", ":", "url", "=", "'%s/api/v5/batch/tags'", "%", "self", ".", "base_url", "self", ".", "_submit_batch", "(", "url", ",", "batch", ")" ]
Submit a tag batch
[ "Submit", "a", "tag", "batch" ]
aa94c0b7eaf88409818b97967d7293e309e11bab
https://github.com/kentik/kentikapi-py/blob/aa94c0b7eaf88409818b97967d7293e309e11bab/kentikapi/v5/tagging.py#L448-L451
train
kentik/kentikapi-py
kentikapi/v5/tagging.py
Client.fetch_batch_status
def fetch_batch_status(self, guid): """Fetch the status of a batch, given the guid""" url = '%s/api/v5/batch/%s/status' % (self.base_url, guid) headers = { 'User-Agent': 'kentik-python-api/0.1', 'Content-Type': 'application/json', 'X-CH-Auth-Email': self.api_email, 'X-CH-Auth-API-Token': self.api_token } resp = requests.get(url, headers=headers) # break out at first sign of trouble resp.raise_for_status() return BatchResponse(guid, resp.json())
python
def fetch_batch_status(self, guid): """Fetch the status of a batch, given the guid""" url = '%s/api/v5/batch/%s/status' % (self.base_url, guid) headers = { 'User-Agent': 'kentik-python-api/0.1', 'Content-Type': 'application/json', 'X-CH-Auth-Email': self.api_email, 'X-CH-Auth-API-Token': self.api_token } resp = requests.get(url, headers=headers) # break out at first sign of trouble resp.raise_for_status() return BatchResponse(guid, resp.json())
[ "def", "fetch_batch_status", "(", "self", ",", "guid", ")", ":", "url", "=", "'%s/api/v5/batch/%s/status'", "%", "(", "self", ".", "base_url", ",", "guid", ")", "headers", "=", "{", "'User-Agent'", ":", "'kentik-python-api/0.1'", ",", "'Content-Type'", ":", "'application/json'", ",", "'X-CH-Auth-Email'", ":", "self", ".", "api_email", ",", "'X-CH-Auth-API-Token'", ":", "self", ".", "api_token", "}", "resp", "=", "requests", ".", "get", "(", "url", ",", "headers", "=", "headers", ")", "# break out at first sign of trouble", "resp", ".", "raise_for_status", "(", ")", "return", "BatchResponse", "(", "guid", ",", "resp", ".", "json", "(", ")", ")" ]
Fetch the status of a batch, given the guid
[ "Fetch", "the", "status", "of", "a", "batch", "given", "the", "guid" ]
aa94c0b7eaf88409818b97967d7293e309e11bab
https://github.com/kentik/kentikapi-py/blob/aa94c0b7eaf88409818b97967d7293e309e11bab/kentikapi/v5/tagging.py#L453-L467
train
SMAPPNYU/pysmap
pysmap/mltools/crowd_model.py
CrowdModel.predict_files
def predict_files(self, files): ''' reads files off disk, resizes them and then predicts them, files should be a list or itrerable of file paths that lead to images, they are then loaded with opencv, resized, and predicted ''' imgs = [0]*len(files) for i, file in enumerate(files): img = cv2.imread(file).astype('float64') img = cv2.resize(img, (224,224)) img = preprocess_input(img) if img is None: print('failed to open: {}, continuing...'.format(file)) imgs[i] = img return self.model.predict(np.array(imgs))
python
def predict_files(self, files): ''' reads files off disk, resizes them and then predicts them, files should be a list or itrerable of file paths that lead to images, they are then loaded with opencv, resized, and predicted ''' imgs = [0]*len(files) for i, file in enumerate(files): img = cv2.imread(file).astype('float64') img = cv2.resize(img, (224,224)) img = preprocess_input(img) if img is None: print('failed to open: {}, continuing...'.format(file)) imgs[i] = img return self.model.predict(np.array(imgs))
[ "def", "predict_files", "(", "self", ",", "files", ")", ":", "imgs", "=", "[", "0", "]", "*", "len", "(", "files", ")", "for", "i", ",", "file", "in", "enumerate", "(", "files", ")", ":", "img", "=", "cv2", ".", "imread", "(", "file", ")", ".", "astype", "(", "'float64'", ")", "img", "=", "cv2", ".", "resize", "(", "img", ",", "(", "224", ",", "224", ")", ")", "img", "=", "preprocess_input", "(", "img", ")", "if", "img", "is", "None", ":", "print", "(", "'failed to open: {}, continuing...'", ".", "format", "(", "file", ")", ")", "imgs", "[", "i", "]", "=", "img", "return", "self", ".", "model", ".", "predict", "(", "np", ".", "array", "(", "imgs", ")", ")" ]
reads files off disk, resizes them and then predicts them, files should be a list or itrerable of file paths that lead to images, they are then loaded with opencv, resized, and predicted
[ "reads", "files", "off", "disk", "resizes", "them", "and", "then", "predicts", "them", "files", "should", "be", "a", "list", "or", "itrerable", "of", "file", "paths", "that", "lead", "to", "images", "they", "are", "then", "loaded", "with", "opencv", "resized", "and", "predicted" ]
eb871992f40c53125129535e871525d5623c8c2d
https://github.com/SMAPPNYU/pysmap/blob/eb871992f40c53125129535e871525d5623c8c2d/pysmap/mltools/crowd_model.py#L44-L60
train
koszullab/metaTOR
metator/scripts/fasta_utils.py
rename_genome
def rename_genome(genome_in, genome_out=None): """Rename genome and slugify headers Rename genomes according to a simple naming scheme; this is mainly done to avoid special character weirdness. Parameters ---------- genome_in : file, str or pathlib.Path The input genome to be renamed and slugify. genome_out : file, str or pathlib.Path The output genome to be written into. Defaults is <base>_renamed.fa, where <base> is genome_in without its extension. """ if genome_out is None: genome_out = "{}_renamed.fa".format(genome_in.split(".")[0]) with open(genome_out, "w") as output_handle: for record in SeqIO.parse(genome_in, "fasta"): # Replace hyphens, tabs and whitespace with underscores new_record_id = record.id.replace(" ", "_") new_record_id = new_record_id.replace("-", "_") new_record_id = new_record_id.replace("\t", "_") # Remove anything that's weird, i.e. not alphanumeric # or an underscore new_record_id = re.sub("[^_A-Za-z0-9]+", "", new_record_id) header = ">{}\n".format(new_record_id) output_handle.write(header) output_handle.write("{}\n".format(str(record.seq)))
python
def rename_genome(genome_in, genome_out=None): """Rename genome and slugify headers Rename genomes according to a simple naming scheme; this is mainly done to avoid special character weirdness. Parameters ---------- genome_in : file, str or pathlib.Path The input genome to be renamed and slugify. genome_out : file, str or pathlib.Path The output genome to be written into. Defaults is <base>_renamed.fa, where <base> is genome_in without its extension. """ if genome_out is None: genome_out = "{}_renamed.fa".format(genome_in.split(".")[0]) with open(genome_out, "w") as output_handle: for record in SeqIO.parse(genome_in, "fasta"): # Replace hyphens, tabs and whitespace with underscores new_record_id = record.id.replace(" ", "_") new_record_id = new_record_id.replace("-", "_") new_record_id = new_record_id.replace("\t", "_") # Remove anything that's weird, i.e. not alphanumeric # or an underscore new_record_id = re.sub("[^_A-Za-z0-9]+", "", new_record_id) header = ">{}\n".format(new_record_id) output_handle.write(header) output_handle.write("{}\n".format(str(record.seq)))
[ "def", "rename_genome", "(", "genome_in", ",", "genome_out", "=", "None", ")", ":", "if", "genome_out", "is", "None", ":", "genome_out", "=", "\"{}_renamed.fa\"", ".", "format", "(", "genome_in", ".", "split", "(", "\".\"", ")", "[", "0", "]", ")", "with", "open", "(", "genome_out", ",", "\"w\"", ")", "as", "output_handle", ":", "for", "record", "in", "SeqIO", ".", "parse", "(", "genome_in", ",", "\"fasta\"", ")", ":", "# Replace hyphens, tabs and whitespace with underscores", "new_record_id", "=", "record", ".", "id", ".", "replace", "(", "\" \"", ",", "\"_\"", ")", "new_record_id", "=", "new_record_id", ".", "replace", "(", "\"-\"", ",", "\"_\"", ")", "new_record_id", "=", "new_record_id", ".", "replace", "(", "\"\\t\"", ",", "\"_\"", ")", "# Remove anything that's weird, i.e. not alphanumeric", "# or an underscore", "new_record_id", "=", "re", ".", "sub", "(", "\"[^_A-Za-z0-9]+\"", ",", "\"\"", ",", "new_record_id", ")", "header", "=", "\">{}\\n\"", ".", "format", "(", "new_record_id", ")", "output_handle", ".", "write", "(", "header", ")", "output_handle", ".", "write", "(", "\"{}\\n\"", ".", "format", "(", "str", "(", "record", ".", "seq", ")", ")", ")" ]
Rename genome and slugify headers Rename genomes according to a simple naming scheme; this is mainly done to avoid special character weirdness. Parameters ---------- genome_in : file, str or pathlib.Path The input genome to be renamed and slugify. genome_out : file, str or pathlib.Path The output genome to be written into. Defaults is <base>_renamed.fa, where <base> is genome_in without its extension.
[ "Rename", "genome", "and", "slugify", "headers" ]
0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a
https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/fasta_utils.py#L18-L50
train
koszullab/metaTOR
metator/scripts/fasta_utils.py
filter_genome
def filter_genome(genome_in, threshold=500, list_records=None): """Filter fasta file according to various parameters. Filter a fasta file according to size and/or an explicit list of records to keep. Parameters ---------- genome_in: file, str or pathlib.Path The input genome in FASTA format. threshold: int, optional The size below which genome records are discarded. Default is the default minimum chunk size, i.e. 500. list_records: array_like, optional A list of record ids to keep. If not None, records that don't belong to that list are discarded. Default is None, i.e. all records are kept. Returns ------- records_to_write: generator Filtered records that were kept. """ if list_records is None: def truth(*args): del args return True is_a_record_to_keep = truth else: try: with open(list_records) as records_handle: records_to_keep = records_handle.readlines() except OSError: if not hasattr(list_records, "__contains__"): raise else: records_to_keep = list_records is_a_record_to_keep = records_to_keep.__contains__ records_to_write = ( record for record in SeqIO.parse(genome_in, "fasta") if (len(record.seq) >= threshold and is_a_record_to_keep(record.id)) ) return records_to_write
python
def filter_genome(genome_in, threshold=500, list_records=None): """Filter fasta file according to various parameters. Filter a fasta file according to size and/or an explicit list of records to keep. Parameters ---------- genome_in: file, str or pathlib.Path The input genome in FASTA format. threshold: int, optional The size below which genome records are discarded. Default is the default minimum chunk size, i.e. 500. list_records: array_like, optional A list of record ids to keep. If not None, records that don't belong to that list are discarded. Default is None, i.e. all records are kept. Returns ------- records_to_write: generator Filtered records that were kept. """ if list_records is None: def truth(*args): del args return True is_a_record_to_keep = truth else: try: with open(list_records) as records_handle: records_to_keep = records_handle.readlines() except OSError: if not hasattr(list_records, "__contains__"): raise else: records_to_keep = list_records is_a_record_to_keep = records_to_keep.__contains__ records_to_write = ( record for record in SeqIO.parse(genome_in, "fasta") if (len(record.seq) >= threshold and is_a_record_to_keep(record.id)) ) return records_to_write
[ "def", "filter_genome", "(", "genome_in", ",", "threshold", "=", "500", ",", "list_records", "=", "None", ")", ":", "if", "list_records", "is", "None", ":", "def", "truth", "(", "*", "args", ")", ":", "del", "args", "return", "True", "is_a_record_to_keep", "=", "truth", "else", ":", "try", ":", "with", "open", "(", "list_records", ")", "as", "records_handle", ":", "records_to_keep", "=", "records_handle", ".", "readlines", "(", ")", "except", "OSError", ":", "if", "not", "hasattr", "(", "list_records", ",", "\"__contains__\"", ")", ":", "raise", "else", ":", "records_to_keep", "=", "list_records", "is_a_record_to_keep", "=", "records_to_keep", ".", "__contains__", "records_to_write", "=", "(", "record", "for", "record", "in", "SeqIO", ".", "parse", "(", "genome_in", ",", "\"fasta\"", ")", "if", "(", "len", "(", "record", ".", "seq", ")", ">=", "threshold", "and", "is_a_record_to_keep", "(", "record", ".", "id", ")", ")", ")", "return", "records_to_write" ]
Filter fasta file according to various parameters. Filter a fasta file according to size and/or an explicit list of records to keep. Parameters ---------- genome_in: file, str or pathlib.Path The input genome in FASTA format. threshold: int, optional The size below which genome records are discarded. Default is the default minimum chunk size, i.e. 500. list_records: array_like, optional A list of record ids to keep. If not None, records that don't belong to that list are discarded. Default is None, i.e. all records are kept. Returns ------- records_to_write: generator Filtered records that were kept.
[ "Filter", "fasta", "file", "according", "to", "various", "parameters", "." ]
0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a
https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/fasta_utils.py#L53-L103
train
koszullab/metaTOR
metator/scripts/fasta_utils.py
rename_proteins
def rename_proteins(prot_in, prot_out=None, chunk_size=DEFAULT_CHUNK_SIZE): """Rename prodigal output files Rename output files from prodigal according to the following naming scheme: >contigX_chunkY__geneZ Chunk numbering starts at 0 and gene identification is taken from prodigal. Parameters ---------- prot_in : file, str or pathlib.Path The input protein file in FASTA format to be renamed. prot_out : file, str or pathlib.Path The output protein file to be renamed into. chunk_size : int, optional The size of the chunks (in bp) used in the pipeline. Default is 1000. """ if prot_out is None: prot_out = "{}_renamed.fa".format(prot_in.split(".")[0]) with open(prot_out, "w") as prot_out_handle: for record in SeqIO.parse(prot_in, "fasta"): header = record.description name, pos_start, _, _, _ = header.split("#") chunk_start = int(pos_start) // chunk_size name_split = name.split("_") contig_name = "_".join(name_split[:-1]) gene_id = name_split[-1] new_record_id = "{}_{}__gene{}".format( contig_name, chunk_start, gene_id ) prot_out_handle.write(">{}\n".format(new_record_id)) prot_out_handle.write("{}\n".format(str(record.seq)))
python
def rename_proteins(prot_in, prot_out=None, chunk_size=DEFAULT_CHUNK_SIZE): """Rename prodigal output files Rename output files from prodigal according to the following naming scheme: >contigX_chunkY__geneZ Chunk numbering starts at 0 and gene identification is taken from prodigal. Parameters ---------- prot_in : file, str or pathlib.Path The input protein file in FASTA format to be renamed. prot_out : file, str or pathlib.Path The output protein file to be renamed into. chunk_size : int, optional The size of the chunks (in bp) used in the pipeline. Default is 1000. """ if prot_out is None: prot_out = "{}_renamed.fa".format(prot_in.split(".")[0]) with open(prot_out, "w") as prot_out_handle: for record in SeqIO.parse(prot_in, "fasta"): header = record.description name, pos_start, _, _, _ = header.split("#") chunk_start = int(pos_start) // chunk_size name_split = name.split("_") contig_name = "_".join(name_split[:-1]) gene_id = name_split[-1] new_record_id = "{}_{}__gene{}".format( contig_name, chunk_start, gene_id ) prot_out_handle.write(">{}\n".format(new_record_id)) prot_out_handle.write("{}\n".format(str(record.seq)))
[ "def", "rename_proteins", "(", "prot_in", ",", "prot_out", "=", "None", ",", "chunk_size", "=", "DEFAULT_CHUNK_SIZE", ")", ":", "if", "prot_out", "is", "None", ":", "prot_out", "=", "\"{}_renamed.fa\"", ".", "format", "(", "prot_in", ".", "split", "(", "\".\"", ")", "[", "0", "]", ")", "with", "open", "(", "prot_out", ",", "\"w\"", ")", "as", "prot_out_handle", ":", "for", "record", "in", "SeqIO", ".", "parse", "(", "prot_in", ",", "\"fasta\"", ")", ":", "header", "=", "record", ".", "description", "name", ",", "pos_start", ",", "_", ",", "_", ",", "_", "=", "header", ".", "split", "(", "\"#\"", ")", "chunk_start", "=", "int", "(", "pos_start", ")", "//", "chunk_size", "name_split", "=", "name", ".", "split", "(", "\"_\"", ")", "contig_name", "=", "\"_\"", ".", "join", "(", "name_split", "[", ":", "-", "1", "]", ")", "gene_id", "=", "name_split", "[", "-", "1", "]", "new_record_id", "=", "\"{}_{}__gene{}\"", ".", "format", "(", "contig_name", ",", "chunk_start", ",", "gene_id", ")", "prot_out_handle", ".", "write", "(", "\">{}\\n\"", ".", "format", "(", "new_record_id", ")", ")", "prot_out_handle", ".", "write", "(", "\"{}\\n\"", ".", "format", "(", "str", "(", "record", ".", "seq", ")", ")", ")" ]
Rename prodigal output files Rename output files from prodigal according to the following naming scheme: >contigX_chunkY__geneZ Chunk numbering starts at 0 and gene identification is taken from prodigal. Parameters ---------- prot_in : file, str or pathlib.Path The input protein file in FASTA format to be renamed. prot_out : file, str or pathlib.Path The output protein file to be renamed into. chunk_size : int, optional The size of the chunks (in bp) used in the pipeline. Default is 1000.
[ "Rename", "prodigal", "output", "files" ]
0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a
https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/fasta_utils.py#L106-L144
train
koszullab/metaTOR
metator/scripts/fasta_utils.py
write_records
def write_records(records, output_file, split=False): """Write FASTA records Write a FASTA file from an iterable of records. Parameters ---------- records : iterable Input records to write. output_file : file, str or pathlib.Path Output FASTA file to be written into. split : bool, optional If True, each record is written into its own separate file. Default is False. """ if split: for record in records: with open( "{}{}.fa".format(output_file, record.id), "w" ) as record_handle: SeqIO.write(record, record_handle, "fasta") else: SeqIO.write(records, output_file, "fasta")
python
def write_records(records, output_file, split=False): """Write FASTA records Write a FASTA file from an iterable of records. Parameters ---------- records : iterable Input records to write. output_file : file, str or pathlib.Path Output FASTA file to be written into. split : bool, optional If True, each record is written into its own separate file. Default is False. """ if split: for record in records: with open( "{}{}.fa".format(output_file, record.id), "w" ) as record_handle: SeqIO.write(record, record_handle, "fasta") else: SeqIO.write(records, output_file, "fasta")
[ "def", "write_records", "(", "records", ",", "output_file", ",", "split", "=", "False", ")", ":", "if", "split", ":", "for", "record", "in", "records", ":", "with", "open", "(", "\"{}{}.fa\"", ".", "format", "(", "output_file", ",", "record", ".", "id", ")", ",", "\"w\"", ")", "as", "record_handle", ":", "SeqIO", ".", "write", "(", "record", ",", "record_handle", ",", "\"fasta\"", ")", "else", ":", "SeqIO", ".", "write", "(", "records", ",", "output_file", ",", "\"fasta\"", ")" ]
Write FASTA records Write a FASTA file from an iterable of records. Parameters ---------- records : iterable Input records to write. output_file : file, str or pathlib.Path Output FASTA file to be written into. split : bool, optional If True, each record is written into its own separate file. Default is False.
[ "Write", "FASTA", "records" ]
0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a
https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/fasta_utils.py#L147-L171
train
eclipse/unide.python
src/unide/process.py
Series.add_sample
def add_sample(self, **data): """Add a sample to this series.""" missing_dimensions = set(data).difference(self.dimensions) if missing_dimensions: raise KeyError('Dimensions not defined in this series: %s' % ', '.join(missing_dimensions)) for dim in self.dimensions: getattr(self, dim).append(data.get(dim))
python
def add_sample(self, **data): """Add a sample to this series.""" missing_dimensions = set(data).difference(self.dimensions) if missing_dimensions: raise KeyError('Dimensions not defined in this series: %s' % ', '.join(missing_dimensions)) for dim in self.dimensions: getattr(self, dim).append(data.get(dim))
[ "def", "add_sample", "(", "self", ",", "*", "*", "data", ")", ":", "missing_dimensions", "=", "set", "(", "data", ")", ".", "difference", "(", "self", ".", "dimensions", ")", "if", "missing_dimensions", ":", "raise", "KeyError", "(", "'Dimensions not defined in this series: %s'", "%", "', '", ".", "join", "(", "missing_dimensions", ")", ")", "for", "dim", "in", "self", ".", "dimensions", ":", "getattr", "(", "self", ",", "dim", ")", ".", "append", "(", "data", ".", "get", "(", "dim", ")", ")" ]
Add a sample to this series.
[ "Add", "a", "sample", "to", "this", "series", "." ]
b82e6a0bf7cc44a463c5d7cdb3d2199f8320c493
https://github.com/eclipse/unide.python/blob/b82e6a0bf7cc44a463c5d7cdb3d2199f8320c493/src/unide/process.py#L158-L167
train
eclipse/unide.python
src/unide/process.py
Measurement.samples
def samples(self): """Yield the samples as dicts, keyed by dimensions.""" names = self.series.dimensions for values in zip(*(getattr(self.series, name) for name in names)): yield dict(zip(names, values))
python
def samples(self): """Yield the samples as dicts, keyed by dimensions.""" names = self.series.dimensions for values in zip(*(getattr(self.series, name) for name in names)): yield dict(zip(names, values))
[ "def", "samples", "(", "self", ")", ":", "names", "=", "self", ".", "series", ".", "dimensions", "for", "values", "in", "zip", "(", "*", "(", "getattr", "(", "self", ".", "series", ",", "name", ")", "for", "name", "in", "names", ")", ")", ":", "yield", "dict", "(", "zip", "(", "names", ",", "values", ")", ")" ]
Yield the samples as dicts, keyed by dimensions.
[ "Yield", "the", "samples", "as", "dicts", "keyed", "by", "dimensions", "." ]
b82e6a0bf7cc44a463c5d7cdb3d2199f8320c493
https://github.com/eclipse/unide.python/blob/b82e6a0bf7cc44a463c5d7cdb3d2199f8320c493/src/unide/process.py#L298-L302
train
vasilcovsky/pytinypng
pytinypng/utils.py
write_binary
def write_binary(filename, data): """Create path to filename and saves binary data""" dir = os.path.dirname(filename) if not os.path.exists(dir): os.makedirs(dir) with open(filename, 'wb') as f: f.write(data)
python
def write_binary(filename, data): """Create path to filename and saves binary data""" dir = os.path.dirname(filename) if not os.path.exists(dir): os.makedirs(dir) with open(filename, 'wb') as f: f.write(data)
[ "def", "write_binary", "(", "filename", ",", "data", ")", ":", "dir", "=", "os", ".", "path", ".", "dirname", "(", "filename", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "dir", ")", ":", "os", ".", "makedirs", "(", "dir", ")", "with", "open", "(", "filename", ",", "'wb'", ")", "as", "f", ":", "f", ".", "write", "(", "data", ")" ]
Create path to filename and saves binary data
[ "Create", "path", "to", "filename", "and", "saves", "binary", "data" ]
ac633e4aa41122c49a806f411e43a76d8f73058e
https://github.com/vasilcovsky/pytinypng/blob/ac633e4aa41122c49a806f411e43a76d8f73058e/pytinypng/utils.py#L22-L28
train
vasilcovsky/pytinypng
pytinypng/utils.py
files_with_exts
def files_with_exts(root='.', suffix=''): """Returns generator that contains filenames from root directory and ends with suffix """ return (os.path.join(rootdir, filename) for rootdir, dirnames, filenames in os.walk(root) for filename in filenames if filename.endswith(suffix))
python
def files_with_exts(root='.', suffix=''): """Returns generator that contains filenames from root directory and ends with suffix """ return (os.path.join(rootdir, filename) for rootdir, dirnames, filenames in os.walk(root) for filename in filenames if filename.endswith(suffix))
[ "def", "files_with_exts", "(", "root", "=", "'.'", ",", "suffix", "=", "''", ")", ":", "return", "(", "os", ".", "path", ".", "join", "(", "rootdir", ",", "filename", ")", "for", "rootdir", ",", "dirnames", ",", "filenames", "in", "os", ".", "walk", "(", "root", ")", "for", "filename", "in", "filenames", "if", "filename", ".", "endswith", "(", "suffix", ")", ")" ]
Returns generator that contains filenames from root directory and ends with suffix
[ "Returns", "generator", "that", "contains", "filenames", "from", "root", "directory", "and", "ends", "with", "suffix" ]
ac633e4aa41122c49a806f411e43a76d8f73058e
https://github.com/vasilcovsky/pytinypng/blob/ac633e4aa41122c49a806f411e43a76d8f73058e/pytinypng/utils.py#L37-L44
train
vasilcovsky/pytinypng
pytinypng/utils.py
find_apikey
def find_apikey(): """Finds TinyPNG API key Search for api key in following order: - environment variable TINYPNG_APIKEY - environment variable TINYPNG_API_KEY - file in local directory tinypng.key - file in home directory ~/.tinypng.key If key not found returns None """ env_keys = ['TINYPNG_APIKEY', 'TINYPNG_API_KEY'] paths = [] paths.append(os.path.join(os.path.abspath("."), "tinypng.key")) # local directory paths.append(os.path.expanduser("~/.tinypng.key")) # home directory for env_key in env_keys: if os.environ.get(env_key): return os.environ.get(env_key) for path in paths: if os.path.exists(path): return open(path, 'rt').read().strip() return None
python
def find_apikey(): """Finds TinyPNG API key Search for api key in following order: - environment variable TINYPNG_APIKEY - environment variable TINYPNG_API_KEY - file in local directory tinypng.key - file in home directory ~/.tinypng.key If key not found returns None """ env_keys = ['TINYPNG_APIKEY', 'TINYPNG_API_KEY'] paths = [] paths.append(os.path.join(os.path.abspath("."), "tinypng.key")) # local directory paths.append(os.path.expanduser("~/.tinypng.key")) # home directory for env_key in env_keys: if os.environ.get(env_key): return os.environ.get(env_key) for path in paths: if os.path.exists(path): return open(path, 'rt').read().strip() return None
[ "def", "find_apikey", "(", ")", ":", "env_keys", "=", "[", "'TINYPNG_APIKEY'", ",", "'TINYPNG_API_KEY'", "]", "paths", "=", "[", "]", "paths", ".", "append", "(", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "abspath", "(", "\".\"", ")", ",", "\"tinypng.key\"", ")", ")", "# local directory", "paths", ".", "append", "(", "os", ".", "path", ".", "expanduser", "(", "\"~/.tinypng.key\"", ")", ")", "# home directory", "for", "env_key", "in", "env_keys", ":", "if", "os", ".", "environ", ".", "get", "(", "env_key", ")", ":", "return", "os", ".", "environ", ".", "get", "(", "env_key", ")", "for", "path", "in", "paths", ":", "if", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "return", "open", "(", "path", ",", "'rt'", ")", ".", "read", "(", ")", ".", "strip", "(", ")", "return", "None" ]
Finds TinyPNG API key Search for api key in following order: - environment variable TINYPNG_APIKEY - environment variable TINYPNG_API_KEY - file in local directory tinypng.key - file in home directory ~/.tinypng.key If key not found returns None
[ "Finds", "TinyPNG", "API", "key" ]
ac633e4aa41122c49a806f411e43a76d8f73058e
https://github.com/vasilcovsky/pytinypng/blob/ac633e4aa41122c49a806f411e43a76d8f73058e/pytinypng/utils.py#L67-L91
train
ihiji/version_utils
version_utils/rpm.py
compare_packages
def compare_packages(rpm_str_a, rpm_str_b, arch_provided=True): """Compare two RPM strings to determine which is newer Parses version information out of RPM package strings of the form returned by the ``rpm -q`` command and compares their versions to determine which is newer. Provided strings *do not* require an architecture at the end, although if providing strings without architecture, the ``arch_provided`` parameter should be set to False. Note that the packages do not have to be the same package (i.e. they do not require the same name or architecture). :param str rpm_str_a: an rpm package string :param str rpm_str_b: an rpm package string :param bool arch_provided: whether package strings contain architecture information :return: 1 (``a`` is newer), 0 (versions are equivalent), or -1 (``b`` is newer) :rtype: int """ logger.debug('resolve_versions(%s, %s)', rpm_str_a, rpm_str_b) evr_a = parse_package(rpm_str_a, arch_provided)['EVR'] evr_b = parse_package(rpm_str_b, arch_provided)['EVR'] return labelCompare(evr_a, evr_b)
python
def compare_packages(rpm_str_a, rpm_str_b, arch_provided=True): """Compare two RPM strings to determine which is newer Parses version information out of RPM package strings of the form returned by the ``rpm -q`` command and compares their versions to determine which is newer. Provided strings *do not* require an architecture at the end, although if providing strings without architecture, the ``arch_provided`` parameter should be set to False. Note that the packages do not have to be the same package (i.e. they do not require the same name or architecture). :param str rpm_str_a: an rpm package string :param str rpm_str_b: an rpm package string :param bool arch_provided: whether package strings contain architecture information :return: 1 (``a`` is newer), 0 (versions are equivalent), or -1 (``b`` is newer) :rtype: int """ logger.debug('resolve_versions(%s, %s)', rpm_str_a, rpm_str_b) evr_a = parse_package(rpm_str_a, arch_provided)['EVR'] evr_b = parse_package(rpm_str_b, arch_provided)['EVR'] return labelCompare(evr_a, evr_b)
[ "def", "compare_packages", "(", "rpm_str_a", ",", "rpm_str_b", ",", "arch_provided", "=", "True", ")", ":", "logger", ".", "debug", "(", "'resolve_versions(%s, %s)'", ",", "rpm_str_a", ",", "rpm_str_b", ")", "evr_a", "=", "parse_package", "(", "rpm_str_a", ",", "arch_provided", ")", "[", "'EVR'", "]", "evr_b", "=", "parse_package", "(", "rpm_str_b", ",", "arch_provided", ")", "[", "'EVR'", "]", "return", "labelCompare", "(", "evr_a", ",", "evr_b", ")" ]
Compare two RPM strings to determine which is newer Parses version information out of RPM package strings of the form returned by the ``rpm -q`` command and compares their versions to determine which is newer. Provided strings *do not* require an architecture at the end, although if providing strings without architecture, the ``arch_provided`` parameter should be set to False. Note that the packages do not have to be the same package (i.e. they do not require the same name or architecture). :param str rpm_str_a: an rpm package string :param str rpm_str_b: an rpm package string :param bool arch_provided: whether package strings contain architecture information :return: 1 (``a`` is newer), 0 (versions are equivalent), or -1 (``b`` is newer) :rtype: int
[ "Compare", "two", "RPM", "strings", "to", "determine", "which", "is", "newer" ]
7f63d80faca8e76274b6e8dff7637cc7cb8d848c
https://github.com/ihiji/version_utils/blob/7f63d80faca8e76274b6e8dff7637cc7cb8d848c/version_utils/rpm.py#L41-L65
train
ihiji/version_utils
version_utils/rpm.py
compare_evrs
def compare_evrs(evr_a, evr_b): """Compare two EVR tuples to determine which is newer This method compares the epoch, version, and release of the provided package strings, assuming that epoch is 0 if not provided. Comparison is performed on the epoch, then the version, and then the release. If at any point a non-equality is found, the result is returned without any remaining comparisons being performed (e.g. if the epochs of the packages differ, the versions are releases are not compared). :param tuple evr_a: an EVR tuple :param tuple evr_b: an EVR tuple """ a_epoch, a_ver, a_rel = evr_a b_epoch, b_ver, b_rel = evr_b if a_epoch != b_epoch: return a_newer if a_epoch > b_epoch else b_newer ver_comp = compare_versions(a_ver, b_ver) if ver_comp != a_eq_b: return ver_comp rel_comp = compare_versions(a_rel, b_rel) return rel_comp
python
def compare_evrs(evr_a, evr_b): """Compare two EVR tuples to determine which is newer This method compares the epoch, version, and release of the provided package strings, assuming that epoch is 0 if not provided. Comparison is performed on the epoch, then the version, and then the release. If at any point a non-equality is found, the result is returned without any remaining comparisons being performed (e.g. if the epochs of the packages differ, the versions are releases are not compared). :param tuple evr_a: an EVR tuple :param tuple evr_b: an EVR tuple """ a_epoch, a_ver, a_rel = evr_a b_epoch, b_ver, b_rel = evr_b if a_epoch != b_epoch: return a_newer if a_epoch > b_epoch else b_newer ver_comp = compare_versions(a_ver, b_ver) if ver_comp != a_eq_b: return ver_comp rel_comp = compare_versions(a_rel, b_rel) return rel_comp
[ "def", "compare_evrs", "(", "evr_a", ",", "evr_b", ")", ":", "a_epoch", ",", "a_ver", ",", "a_rel", "=", "evr_a", "b_epoch", ",", "b_ver", ",", "b_rel", "=", "evr_b", "if", "a_epoch", "!=", "b_epoch", ":", "return", "a_newer", "if", "a_epoch", ">", "b_epoch", "else", "b_newer", "ver_comp", "=", "compare_versions", "(", "a_ver", ",", "b_ver", ")", "if", "ver_comp", "!=", "a_eq_b", ":", "return", "ver_comp", "rel_comp", "=", "compare_versions", "(", "a_rel", ",", "b_rel", ")", "return", "rel_comp" ]
Compare two EVR tuples to determine which is newer This method compares the epoch, version, and release of the provided package strings, assuming that epoch is 0 if not provided. Comparison is performed on the epoch, then the version, and then the release. If at any point a non-equality is found, the result is returned without any remaining comparisons being performed (e.g. if the epochs of the packages differ, the versions are releases are not compared). :param tuple evr_a: an EVR tuple :param tuple evr_b: an EVR tuple
[ "Compare", "two", "EVR", "tuples", "to", "determine", "which", "is", "newer" ]
7f63d80faca8e76274b6e8dff7637cc7cb8d848c
https://github.com/ihiji/version_utils/blob/7f63d80faca8e76274b6e8dff7637cc7cb8d848c/version_utils/rpm.py#L68-L90
train
ihiji/version_utils
version_utils/rpm.py
compare_versions
def compare_versions(version_a, version_b): """Compare two RPM version strings Compares two RPM version strings and returns an integer indicating the result of the comparison. The method of comparison mirrors that used by RPM, so results should be the same for any standard RPM package. To perform the comparison, the strings are first checked for equality. If they are equal, the versions are equal. Otherwise, each string is converted to a character list, and a comparison loop is started using these lists. In the comparison loop, first any non-alphanumeric, non-~ characters are trimmed from the front of the list. Then if the first character from both ``a`` and ``b`` is a ~ (tilde), it is trimmed. The ~ (tilde) character indicates that a given package or version should be considered older (even if it is numerically larger), so if ``a`` begins with a tilde, ``b`` is newer, and vice-versa. At this point, if the length of either list has been reduced to 0, the loop is exited. If characters remain in the list, the :any:`_get_block_result` function is used to pop consecutive digits or letters from the front of hte list and compare them. The result of the block comparison is returned if the blocks are not equal. The loop then begins again. If the loop exits without returning a value, the lengths of the remaining character lists are compared. If they have the same length (usually 0, since all characters have been popped), they are considered to be equal. Otherwise, whichever is longer is considered to be newer. Generally, unequal length will be due to one character list having been completely consumed while some characters remain on the other, for example when comparing 1.05b to 1.05. :param unicode version_a: An RPM version or release string :param unicode version_b: An RPM version or release string :return: 1 (if ``a`` is newer), 0 (if versions are equal), or -1 (if ``b`` is newer) :rtype: int :raises RpmError: if an a type is passed that cannot be converted to a list """ logger.debug('compare_versions(%s, %s)', version_a, version_b) if version_a == version_b: return a_eq_b try: chars_a, chars_b = list(version_a), list(version_b) except TypeError: raise RpmError('Could not compare {0} to ' '{1}'.format(version_a, version_b)) while len(chars_a) != 0 and len(chars_b) != 0: logger.debug('starting loop comparing %s ' 'to %s', chars_a, chars_b) _check_leading(chars_a, chars_b) if chars_a[0] == '~' and chars_b[0] == '~': map(lambda x: x.pop(0), (chars_a, chars_b)) elif chars_a[0] == '~': return b_newer elif chars_b[0] == '~': return a_newer if len(chars_a) == 0 or len(chars_b) == 0: break block_res = _get_block_result(chars_a, chars_b) if block_res != a_eq_b: return block_res if len(chars_a) == len(chars_b): logger.debug('versions are equal') return a_eq_b else: logger.debug('versions not equal') return a_newer if len(chars_a) > len(chars_b) else b_newer
python
def compare_versions(version_a, version_b): """Compare two RPM version strings Compares two RPM version strings and returns an integer indicating the result of the comparison. The method of comparison mirrors that used by RPM, so results should be the same for any standard RPM package. To perform the comparison, the strings are first checked for equality. If they are equal, the versions are equal. Otherwise, each string is converted to a character list, and a comparison loop is started using these lists. In the comparison loop, first any non-alphanumeric, non-~ characters are trimmed from the front of the list. Then if the first character from both ``a`` and ``b`` is a ~ (tilde), it is trimmed. The ~ (tilde) character indicates that a given package or version should be considered older (even if it is numerically larger), so if ``a`` begins with a tilde, ``b`` is newer, and vice-versa. At this point, if the length of either list has been reduced to 0, the loop is exited. If characters remain in the list, the :any:`_get_block_result` function is used to pop consecutive digits or letters from the front of hte list and compare them. The result of the block comparison is returned if the blocks are not equal. The loop then begins again. If the loop exits without returning a value, the lengths of the remaining character lists are compared. If they have the same length (usually 0, since all characters have been popped), they are considered to be equal. Otherwise, whichever is longer is considered to be newer. Generally, unequal length will be due to one character list having been completely consumed while some characters remain on the other, for example when comparing 1.05b to 1.05. :param unicode version_a: An RPM version or release string :param unicode version_b: An RPM version or release string :return: 1 (if ``a`` is newer), 0 (if versions are equal), or -1 (if ``b`` is newer) :rtype: int :raises RpmError: if an a type is passed that cannot be converted to a list """ logger.debug('compare_versions(%s, %s)', version_a, version_b) if version_a == version_b: return a_eq_b try: chars_a, chars_b = list(version_a), list(version_b) except TypeError: raise RpmError('Could not compare {0} to ' '{1}'.format(version_a, version_b)) while len(chars_a) != 0 and len(chars_b) != 0: logger.debug('starting loop comparing %s ' 'to %s', chars_a, chars_b) _check_leading(chars_a, chars_b) if chars_a[0] == '~' and chars_b[0] == '~': map(lambda x: x.pop(0), (chars_a, chars_b)) elif chars_a[0] == '~': return b_newer elif chars_b[0] == '~': return a_newer if len(chars_a) == 0 or len(chars_b) == 0: break block_res = _get_block_result(chars_a, chars_b) if block_res != a_eq_b: return block_res if len(chars_a) == len(chars_b): logger.debug('versions are equal') return a_eq_b else: logger.debug('versions not equal') return a_newer if len(chars_a) > len(chars_b) else b_newer
[ "def", "compare_versions", "(", "version_a", ",", "version_b", ")", ":", "logger", ".", "debug", "(", "'compare_versions(%s, %s)'", ",", "version_a", ",", "version_b", ")", "if", "version_a", "==", "version_b", ":", "return", "a_eq_b", "try", ":", "chars_a", ",", "chars_b", "=", "list", "(", "version_a", ")", ",", "list", "(", "version_b", ")", "except", "TypeError", ":", "raise", "RpmError", "(", "'Could not compare {0} to '", "'{1}'", ".", "format", "(", "version_a", ",", "version_b", ")", ")", "while", "len", "(", "chars_a", ")", "!=", "0", "and", "len", "(", "chars_b", ")", "!=", "0", ":", "logger", ".", "debug", "(", "'starting loop comparing %s '", "'to %s'", ",", "chars_a", ",", "chars_b", ")", "_check_leading", "(", "chars_a", ",", "chars_b", ")", "if", "chars_a", "[", "0", "]", "==", "'~'", "and", "chars_b", "[", "0", "]", "==", "'~'", ":", "map", "(", "lambda", "x", ":", "x", ".", "pop", "(", "0", ")", ",", "(", "chars_a", ",", "chars_b", ")", ")", "elif", "chars_a", "[", "0", "]", "==", "'~'", ":", "return", "b_newer", "elif", "chars_b", "[", "0", "]", "==", "'~'", ":", "return", "a_newer", "if", "len", "(", "chars_a", ")", "==", "0", "or", "len", "(", "chars_b", ")", "==", "0", ":", "break", "block_res", "=", "_get_block_result", "(", "chars_a", ",", "chars_b", ")", "if", "block_res", "!=", "a_eq_b", ":", "return", "block_res", "if", "len", "(", "chars_a", ")", "==", "len", "(", "chars_b", ")", ":", "logger", ".", "debug", "(", "'versions are equal'", ")", "return", "a_eq_b", "else", ":", "logger", ".", "debug", "(", "'versions not equal'", ")", "return", "a_newer", "if", "len", "(", "chars_a", ")", ">", "len", "(", "chars_b", ")", "else", "b_newer" ]
Compare two RPM version strings Compares two RPM version strings and returns an integer indicating the result of the comparison. The method of comparison mirrors that used by RPM, so results should be the same for any standard RPM package. To perform the comparison, the strings are first checked for equality. If they are equal, the versions are equal. Otherwise, each string is converted to a character list, and a comparison loop is started using these lists. In the comparison loop, first any non-alphanumeric, non-~ characters are trimmed from the front of the list. Then if the first character from both ``a`` and ``b`` is a ~ (tilde), it is trimmed. The ~ (tilde) character indicates that a given package or version should be considered older (even if it is numerically larger), so if ``a`` begins with a tilde, ``b`` is newer, and vice-versa. At this point, if the length of either list has been reduced to 0, the loop is exited. If characters remain in the list, the :any:`_get_block_result` function is used to pop consecutive digits or letters from the front of hte list and compare them. The result of the block comparison is returned if the blocks are not equal. The loop then begins again. If the loop exits without returning a value, the lengths of the remaining character lists are compared. If they have the same length (usually 0, since all characters have been popped), they are considered to be equal. Otherwise, whichever is longer is considered to be newer. Generally, unequal length will be due to one character list having been completely consumed while some characters remain on the other, for example when comparing 1.05b to 1.05. :param unicode version_a: An RPM version or release string :param unicode version_b: An RPM version or release string :return: 1 (if ``a`` is newer), 0 (if versions are equal), or -1 (if ``b`` is newer) :rtype: int :raises RpmError: if an a type is passed that cannot be converted to a list
[ "Compare", "two", "RPM", "version", "strings" ]
7f63d80faca8e76274b6e8dff7637cc7cb8d848c
https://github.com/ihiji/version_utils/blob/7f63d80faca8e76274b6e8dff7637cc7cb8d848c/version_utils/rpm.py#L115-L185
train
ihiji/version_utils
version_utils/rpm.py
package
def package(package_string, arch_included=True): """Parse an RPM version string Parses most (all tested) RPM version strings to get their name, epoch, version, release, and architecture information. Epoch (also called serial) is an optional component for RPM versions, and it is also optional when providing a version string to this function. RPM assumes the epoch to be 0 if it is not provided, so that behavior is mirrored here. :param str package_string: :param bool arch_included: :return: A :any:`common.Package` object containing all parsed information :rtype: common.Package """ logger.debug('package(%s, %s)', package_string, arch_included) pkg_info = parse_package(package_string, arch_included) pkg = Package(pkg_info['name'], pkg_info['EVR'][0], pkg_info['EVR'][1], pkg_info['EVR'][2], pkg_info['arch'], package_str=package_string) return pkg
python
def package(package_string, arch_included=True): """Parse an RPM version string Parses most (all tested) RPM version strings to get their name, epoch, version, release, and architecture information. Epoch (also called serial) is an optional component for RPM versions, and it is also optional when providing a version string to this function. RPM assumes the epoch to be 0 if it is not provided, so that behavior is mirrored here. :param str package_string: :param bool arch_included: :return: A :any:`common.Package` object containing all parsed information :rtype: common.Package """ logger.debug('package(%s, %s)', package_string, arch_included) pkg_info = parse_package(package_string, arch_included) pkg = Package(pkg_info['name'], pkg_info['EVR'][0], pkg_info['EVR'][1], pkg_info['EVR'][2], pkg_info['arch'], package_str=package_string) return pkg
[ "def", "package", "(", "package_string", ",", "arch_included", "=", "True", ")", ":", "logger", ".", "debug", "(", "'package(%s, %s)'", ",", "package_string", ",", "arch_included", ")", "pkg_info", "=", "parse_package", "(", "package_string", ",", "arch_included", ")", "pkg", "=", "Package", "(", "pkg_info", "[", "'name'", "]", ",", "pkg_info", "[", "'EVR'", "]", "[", "0", "]", ",", "pkg_info", "[", "'EVR'", "]", "[", "1", "]", ",", "pkg_info", "[", "'EVR'", "]", "[", "2", "]", ",", "pkg_info", "[", "'arch'", "]", ",", "package_str", "=", "package_string", ")", "return", "pkg" ]
Parse an RPM version string Parses most (all tested) RPM version strings to get their name, epoch, version, release, and architecture information. Epoch (also called serial) is an optional component for RPM versions, and it is also optional when providing a version string to this function. RPM assumes the epoch to be 0 if it is not provided, so that behavior is mirrored here. :param str package_string: :param bool arch_included: :return: A :any:`common.Package` object containing all parsed information :rtype: common.Package
[ "Parse", "an", "RPM", "version", "string" ]
7f63d80faca8e76274b6e8dff7637cc7cb8d848c
https://github.com/ihiji/version_utils/blob/7f63d80faca8e76274b6e8dff7637cc7cb8d848c/version_utils/rpm.py#L188-L209
train
ihiji/version_utils
version_utils/rpm.py
parse_package
def parse_package(package_string, arch_included=True): """Parse an RPM version string to get name, version, and arch Splits most (all tested) RPM version strings into name, epoch, version, release, and architecture. Epoch (also called serial) is an optional component of RPM versioning and is also optional in version strings provided to this function. RPM assumes the epoch to be 0 if it is not provided, so that behavior is mirrored here. **Deprecated** since version 0.2.0. Use :any:`rpm.package` instead. :param str package_string: an RPM version string of the form returned by the ``rpm -q`` command :param bool arch_included: default True - version strings may optionally be provided without the trailing architecture. If providing such strings, set this option to False :return: a dictionary with all parsed package information :rtype: dict """ # Yum sets epoch values to 0 if they are not specified logger.debug('parse_package(%s, %s)', package_string, arch_included) default_epoch = '0' arch = None if arch_included: char_list = list(package_string) arch = _pop_arch(char_list) package_string = ''.join(char_list) logger.debug('updated version_string: %s', package_string) try: name, epoch, version, release = _rpm_re.match(package_string).groups() except AttributeError: raise RpmError('Could not parse package string: %s' % package_string) if epoch == '' or epoch is None: epoch = default_epoch info = { 'name': name, 'EVR': (epoch, version, release), 'arch': arch } logger.debug('parsed information: %s', info) return info
python
def parse_package(package_string, arch_included=True): """Parse an RPM version string to get name, version, and arch Splits most (all tested) RPM version strings into name, epoch, version, release, and architecture. Epoch (also called serial) is an optional component of RPM versioning and is also optional in version strings provided to this function. RPM assumes the epoch to be 0 if it is not provided, so that behavior is mirrored here. **Deprecated** since version 0.2.0. Use :any:`rpm.package` instead. :param str package_string: an RPM version string of the form returned by the ``rpm -q`` command :param bool arch_included: default True - version strings may optionally be provided without the trailing architecture. If providing such strings, set this option to False :return: a dictionary with all parsed package information :rtype: dict """ # Yum sets epoch values to 0 if they are not specified logger.debug('parse_package(%s, %s)', package_string, arch_included) default_epoch = '0' arch = None if arch_included: char_list = list(package_string) arch = _pop_arch(char_list) package_string = ''.join(char_list) logger.debug('updated version_string: %s', package_string) try: name, epoch, version, release = _rpm_re.match(package_string).groups() except AttributeError: raise RpmError('Could not parse package string: %s' % package_string) if epoch == '' or epoch is None: epoch = default_epoch info = { 'name': name, 'EVR': (epoch, version, release), 'arch': arch } logger.debug('parsed information: %s', info) return info
[ "def", "parse_package", "(", "package_string", ",", "arch_included", "=", "True", ")", ":", "# Yum sets epoch values to 0 if they are not specified", "logger", ".", "debug", "(", "'parse_package(%s, %s)'", ",", "package_string", ",", "arch_included", ")", "default_epoch", "=", "'0'", "arch", "=", "None", "if", "arch_included", ":", "char_list", "=", "list", "(", "package_string", ")", "arch", "=", "_pop_arch", "(", "char_list", ")", "package_string", "=", "''", ".", "join", "(", "char_list", ")", "logger", ".", "debug", "(", "'updated version_string: %s'", ",", "package_string", ")", "try", ":", "name", ",", "epoch", ",", "version", ",", "release", "=", "_rpm_re", ".", "match", "(", "package_string", ")", ".", "groups", "(", ")", "except", "AttributeError", ":", "raise", "RpmError", "(", "'Could not parse package string: %s'", "%", "package_string", ")", "if", "epoch", "==", "''", "or", "epoch", "is", "None", ":", "epoch", "=", "default_epoch", "info", "=", "{", "'name'", ":", "name", ",", "'EVR'", ":", "(", "epoch", ",", "version", ",", "release", ")", ",", "'arch'", ":", "arch", "}", "logger", ".", "debug", "(", "'parsed information: %s'", ",", "info", ")", "return", "info" ]
Parse an RPM version string to get name, version, and arch Splits most (all tested) RPM version strings into name, epoch, version, release, and architecture. Epoch (also called serial) is an optional component of RPM versioning and is also optional in version strings provided to this function. RPM assumes the epoch to be 0 if it is not provided, so that behavior is mirrored here. **Deprecated** since version 0.2.0. Use :any:`rpm.package` instead. :param str package_string: an RPM version string of the form returned by the ``rpm -q`` command :param bool arch_included: default True - version strings may optionally be provided without the trailing architecture. If providing such strings, set this option to False :return: a dictionary with all parsed package information :rtype: dict
[ "Parse", "an", "RPM", "version", "string", "to", "get", "name", "version", "and", "arch" ]
7f63d80faca8e76274b6e8dff7637cc7cb8d848c
https://github.com/ihiji/version_utils/blob/7f63d80faca8e76274b6e8dff7637cc7cb8d848c/version_utils/rpm.py#L212-L252
train
ihiji/version_utils
version_utils/rpm.py
_pop_arch
def _pop_arch(char_list): """Pop the architecture from a version string and return it Returns any portion of a string following the final period. In rpm version strings, this corresponds to the package architecture. :param list char_list: an rpm version string in character list form :return: the parsed architecture as a string :rtype: str """ logger.debug('_pop_arch(%s)', char_list) arch_list = [] char = char_list.pop() while char != '.': arch_list.insert(0, char) try: char = char_list.pop() except IndexError: # Raised for a string with no periods raise RpmError('Could not parse an architecture. Did you mean to ' 'set the arch_included flag to False?') logger.debug('arch chars: %s', arch_list) return ''.join(arch_list)
python
def _pop_arch(char_list): """Pop the architecture from a version string and return it Returns any portion of a string following the final period. In rpm version strings, this corresponds to the package architecture. :param list char_list: an rpm version string in character list form :return: the parsed architecture as a string :rtype: str """ logger.debug('_pop_arch(%s)', char_list) arch_list = [] char = char_list.pop() while char != '.': arch_list.insert(0, char) try: char = char_list.pop() except IndexError: # Raised for a string with no periods raise RpmError('Could not parse an architecture. Did you mean to ' 'set the arch_included flag to False?') logger.debug('arch chars: %s', arch_list) return ''.join(arch_list)
[ "def", "_pop_arch", "(", "char_list", ")", ":", "logger", ".", "debug", "(", "'_pop_arch(%s)'", ",", "char_list", ")", "arch_list", "=", "[", "]", "char", "=", "char_list", ".", "pop", "(", ")", "while", "char", "!=", "'.'", ":", "arch_list", ".", "insert", "(", "0", ",", "char", ")", "try", ":", "char", "=", "char_list", ".", "pop", "(", ")", "except", "IndexError", ":", "# Raised for a string with no periods", "raise", "RpmError", "(", "'Could not parse an architecture. Did you mean to '", "'set the arch_included flag to False?'", ")", "logger", ".", "debug", "(", "'arch chars: %s'", ",", "arch_list", ")", "return", "''", ".", "join", "(", "arch_list", ")" ]
Pop the architecture from a version string and return it Returns any portion of a string following the final period. In rpm version strings, this corresponds to the package architecture. :param list char_list: an rpm version string in character list form :return: the parsed architecture as a string :rtype: str
[ "Pop", "the", "architecture", "from", "a", "version", "string", "and", "return", "it" ]
7f63d80faca8e76274b6e8dff7637cc7cb8d848c
https://github.com/ihiji/version_utils/blob/7f63d80faca8e76274b6e8dff7637cc7cb8d848c/version_utils/rpm.py#L255-L276
train
ihiji/version_utils
version_utils/rpm.py
_check_leading
def _check_leading(*char_lists): """Remove any non-alphanumeric or non-~ leading characters Checks the beginning of any provided lists for non-alphanumeric or non-~ (tilde) leading characters and removes them if found. Operates on (and possibly alters) the passed list. :param list char_list: a list or lists of characters :return: None :rtype: None """ logger.debug('_check_leading(%s)', char_lists) for char_list in char_lists: while (len(char_list) != 0 and not char_list[0].isalnum() and not char_list[0] == '~'): char_list.pop(0) logger.debug('updated list: %s', char_list)
python
def _check_leading(*char_lists): """Remove any non-alphanumeric or non-~ leading characters Checks the beginning of any provided lists for non-alphanumeric or non-~ (tilde) leading characters and removes them if found. Operates on (and possibly alters) the passed list. :param list char_list: a list or lists of characters :return: None :rtype: None """ logger.debug('_check_leading(%s)', char_lists) for char_list in char_lists: while (len(char_list) != 0 and not char_list[0].isalnum() and not char_list[0] == '~'): char_list.pop(0) logger.debug('updated list: %s', char_list)
[ "def", "_check_leading", "(", "*", "char_lists", ")", ":", "logger", ".", "debug", "(", "'_check_leading(%s)'", ",", "char_lists", ")", "for", "char_list", "in", "char_lists", ":", "while", "(", "len", "(", "char_list", ")", "!=", "0", "and", "not", "char_list", "[", "0", "]", ".", "isalnum", "(", ")", "and", "not", "char_list", "[", "0", "]", "==", "'~'", ")", ":", "char_list", ".", "pop", "(", "0", ")", "logger", ".", "debug", "(", "'updated list: %s'", ",", "char_list", ")" ]
Remove any non-alphanumeric or non-~ leading characters Checks the beginning of any provided lists for non-alphanumeric or non-~ (tilde) leading characters and removes them if found. Operates on (and possibly alters) the passed list. :param list char_list: a list or lists of characters :return: None :rtype: None
[ "Remove", "any", "non", "-", "alphanumeric", "or", "non", "-", "~", "leading", "characters" ]
7f63d80faca8e76274b6e8dff7637cc7cb8d848c
https://github.com/ihiji/version_utils/blob/7f63d80faca8e76274b6e8dff7637cc7cb8d848c/version_utils/rpm.py#L279-L295
train
ihiji/version_utils
version_utils/rpm.py
_trim_zeros
def _trim_zeros(*char_lists): """Trim any zeros from provided character lists Checks the beginning of any provided lists for '0's and removes any such leading zeros. Operates on (and possibly) alters the passed list :param list char_lists: a list or lists of characters :return: None :rtype: None """ logger.debug('_trim_zeros(%s)', char_lists) for char_list in char_lists: while len(char_list) != 0 and char_list[0] == '0': char_list.pop(0) logger.debug('updated block: %s', char_list)
python
def _trim_zeros(*char_lists): """Trim any zeros from provided character lists Checks the beginning of any provided lists for '0's and removes any such leading zeros. Operates on (and possibly) alters the passed list :param list char_lists: a list or lists of characters :return: None :rtype: None """ logger.debug('_trim_zeros(%s)', char_lists) for char_list in char_lists: while len(char_list) != 0 and char_list[0] == '0': char_list.pop(0) logger.debug('updated block: %s', char_list)
[ "def", "_trim_zeros", "(", "*", "char_lists", ")", ":", "logger", ".", "debug", "(", "'_trim_zeros(%s)'", ",", "char_lists", ")", "for", "char_list", "in", "char_lists", ":", "while", "len", "(", "char_list", ")", "!=", "0", "and", "char_list", "[", "0", "]", "==", "'0'", ":", "char_list", ".", "pop", "(", "0", ")", "logger", ".", "debug", "(", "'updated block: %s'", ",", "char_list", ")" ]
Trim any zeros from provided character lists Checks the beginning of any provided lists for '0's and removes any such leading zeros. Operates on (and possibly) alters the passed list :param list char_lists: a list or lists of characters :return: None :rtype: None
[ "Trim", "any", "zeros", "from", "provided", "character", "lists" ]
7f63d80faca8e76274b6e8dff7637cc7cb8d848c
https://github.com/ihiji/version_utils/blob/7f63d80faca8e76274b6e8dff7637cc7cb8d848c/version_utils/rpm.py#L298-L313
train
ihiji/version_utils
version_utils/rpm.py
_pop_digits
def _pop_digits(char_list): """Pop consecutive digits from the front of list and return them Pops any and all consecutive digits from the start of the provided character list and returns them as a list of string digits. Operates on (and possibly alters) the passed list. :param list char_list: a list of characters :return: a list of string digits :rtype: list """ logger.debug('_pop_digits(%s)', char_list) digits = [] while len(char_list) != 0 and char_list[0].isdigit(): digits.append(char_list.pop(0)) logger.debug('got digits: %s', digits) logger.debug('updated char list: %s', char_list) return digits
python
def _pop_digits(char_list): """Pop consecutive digits from the front of list and return them Pops any and all consecutive digits from the start of the provided character list and returns them as a list of string digits. Operates on (and possibly alters) the passed list. :param list char_list: a list of characters :return: a list of string digits :rtype: list """ logger.debug('_pop_digits(%s)', char_list) digits = [] while len(char_list) != 0 and char_list[0].isdigit(): digits.append(char_list.pop(0)) logger.debug('got digits: %s', digits) logger.debug('updated char list: %s', char_list) return digits
[ "def", "_pop_digits", "(", "char_list", ")", ":", "logger", ".", "debug", "(", "'_pop_digits(%s)'", ",", "char_list", ")", "digits", "=", "[", "]", "while", "len", "(", "char_list", ")", "!=", "0", "and", "char_list", "[", "0", "]", ".", "isdigit", "(", ")", ":", "digits", ".", "append", "(", "char_list", ".", "pop", "(", "0", ")", ")", "logger", ".", "debug", "(", "'got digits: %s'", ",", "digits", ")", "logger", ".", "debug", "(", "'updated char list: %s'", ",", "char_list", ")", "return", "digits" ]
Pop consecutive digits from the front of list and return them Pops any and all consecutive digits from the start of the provided character list and returns them as a list of string digits. Operates on (and possibly alters) the passed list. :param list char_list: a list of characters :return: a list of string digits :rtype: list
[ "Pop", "consecutive", "digits", "from", "the", "front", "of", "list", "and", "return", "them" ]
7f63d80faca8e76274b6e8dff7637cc7cb8d848c
https://github.com/ihiji/version_utils/blob/7f63d80faca8e76274b6e8dff7637cc7cb8d848c/version_utils/rpm.py#L316-L333
train
ihiji/version_utils
version_utils/rpm.py
_pop_letters
def _pop_letters(char_list): """Pop consecutive letters from the front of a list and return them Pops any and all consecutive letters from the start of the provided character list and returns them as a list of characters. Operates on (and possibly alters) the passed list :param list char_list: a list of characters :return: a list of characters :rtype: list """ logger.debug('_pop_letters(%s)', char_list) letters = [] while len(char_list) != 0 and char_list[0].isalpha(): letters.append(char_list.pop(0)) logger.debug('got letters: %s', letters) logger.debug('updated char list: %s', char_list) return letters
python
def _pop_letters(char_list): """Pop consecutive letters from the front of a list and return them Pops any and all consecutive letters from the start of the provided character list and returns them as a list of characters. Operates on (and possibly alters) the passed list :param list char_list: a list of characters :return: a list of characters :rtype: list """ logger.debug('_pop_letters(%s)', char_list) letters = [] while len(char_list) != 0 and char_list[0].isalpha(): letters.append(char_list.pop(0)) logger.debug('got letters: %s', letters) logger.debug('updated char list: %s', char_list) return letters
[ "def", "_pop_letters", "(", "char_list", ")", ":", "logger", ".", "debug", "(", "'_pop_letters(%s)'", ",", "char_list", ")", "letters", "=", "[", "]", "while", "len", "(", "char_list", ")", "!=", "0", "and", "char_list", "[", "0", "]", ".", "isalpha", "(", ")", ":", "letters", ".", "append", "(", "char_list", ".", "pop", "(", "0", ")", ")", "logger", ".", "debug", "(", "'got letters: %s'", ",", "letters", ")", "logger", ".", "debug", "(", "'updated char list: %s'", ",", "char_list", ")", "return", "letters" ]
Pop consecutive letters from the front of a list and return them Pops any and all consecutive letters from the start of the provided character list and returns them as a list of characters. Operates on (and possibly alters) the passed list :param list char_list: a list of characters :return: a list of characters :rtype: list
[ "Pop", "consecutive", "letters", "from", "the", "front", "of", "a", "list", "and", "return", "them" ]
7f63d80faca8e76274b6e8dff7637cc7cb8d848c
https://github.com/ihiji/version_utils/blob/7f63d80faca8e76274b6e8dff7637cc7cb8d848c/version_utils/rpm.py#L336-L353
train
ihiji/version_utils
version_utils/rpm.py
_compare_blocks
def _compare_blocks(block_a, block_b): """Compare two blocks of characters Compares two blocks of characters of the form returned by either the :any:`_pop_digits` or :any:`_pop_letters` function. Blocks should be character lists containing only digits or only letters. Both blocks should contain the same character type (digits or letters). The method of comparison mirrors the method used by RPM. If the blocks are digit blocks, any leading zeros are trimmed, and whichever block is longer is assumed to be larger. If the resultant blocks are the same length, or if the blocks are non-numeric, they are checked for string equality and considered equal if the string equality comparison returns True. If not, whichever evaluates as greater than the other (again in string comparison) is assumed to be larger. :param list block_a: an all numeric or all alphabetic character list :param list block_b: an all numeric or all alphabetic character list. Alphabetic or numeric character should match ``block_a`` :return: 1 (if ``a`` is newer), 0 (if versions are equal) or -1 (if ``b`` is newer) :rtype: int """ logger.debug('_compare_blocks(%s, %s)', block_a, block_b) if block_a[0].isdigit(): _trim_zeros(block_a, block_b) if len(block_a) != len(block_b): logger.debug('block lengths are not equal') return a_newer if len(block_a) > len(block_b) else b_newer if block_a == block_b: logger.debug('blocks are equal') return a_eq_b else: logger.debug('blocks are not equal') return a_newer if block_a > block_b else b_newer
python
def _compare_blocks(block_a, block_b): """Compare two blocks of characters Compares two blocks of characters of the form returned by either the :any:`_pop_digits` or :any:`_pop_letters` function. Blocks should be character lists containing only digits or only letters. Both blocks should contain the same character type (digits or letters). The method of comparison mirrors the method used by RPM. If the blocks are digit blocks, any leading zeros are trimmed, and whichever block is longer is assumed to be larger. If the resultant blocks are the same length, or if the blocks are non-numeric, they are checked for string equality and considered equal if the string equality comparison returns True. If not, whichever evaluates as greater than the other (again in string comparison) is assumed to be larger. :param list block_a: an all numeric or all alphabetic character list :param list block_b: an all numeric or all alphabetic character list. Alphabetic or numeric character should match ``block_a`` :return: 1 (if ``a`` is newer), 0 (if versions are equal) or -1 (if ``b`` is newer) :rtype: int """ logger.debug('_compare_blocks(%s, %s)', block_a, block_b) if block_a[0].isdigit(): _trim_zeros(block_a, block_b) if len(block_a) != len(block_b): logger.debug('block lengths are not equal') return a_newer if len(block_a) > len(block_b) else b_newer if block_a == block_b: logger.debug('blocks are equal') return a_eq_b else: logger.debug('blocks are not equal') return a_newer if block_a > block_b else b_newer
[ "def", "_compare_blocks", "(", "block_a", ",", "block_b", ")", ":", "logger", ".", "debug", "(", "'_compare_blocks(%s, %s)'", ",", "block_a", ",", "block_b", ")", "if", "block_a", "[", "0", "]", ".", "isdigit", "(", ")", ":", "_trim_zeros", "(", "block_a", ",", "block_b", ")", "if", "len", "(", "block_a", ")", "!=", "len", "(", "block_b", ")", ":", "logger", ".", "debug", "(", "'block lengths are not equal'", ")", "return", "a_newer", "if", "len", "(", "block_a", ")", ">", "len", "(", "block_b", ")", "else", "b_newer", "if", "block_a", "==", "block_b", ":", "logger", ".", "debug", "(", "'blocks are equal'", ")", "return", "a_eq_b", "else", ":", "logger", ".", "debug", "(", "'blocks are not equal'", ")", "return", "a_newer", "if", "block_a", ">", "block_b", "else", "b_newer" ]
Compare two blocks of characters Compares two blocks of characters of the form returned by either the :any:`_pop_digits` or :any:`_pop_letters` function. Blocks should be character lists containing only digits or only letters. Both blocks should contain the same character type (digits or letters). The method of comparison mirrors the method used by RPM. If the blocks are digit blocks, any leading zeros are trimmed, and whichever block is longer is assumed to be larger. If the resultant blocks are the same length, or if the blocks are non-numeric, they are checked for string equality and considered equal if the string equality comparison returns True. If not, whichever evaluates as greater than the other (again in string comparison) is assumed to be larger. :param list block_a: an all numeric or all alphabetic character list :param list block_b: an all numeric or all alphabetic character list. Alphabetic or numeric character should match ``block_a`` :return: 1 (if ``a`` is newer), 0 (if versions are equal) or -1 (if ``b`` is newer) :rtype: int
[ "Compare", "two", "blocks", "of", "characters" ]
7f63d80faca8e76274b6e8dff7637cc7cb8d848c
https://github.com/ihiji/version_utils/blob/7f63d80faca8e76274b6e8dff7637cc7cb8d848c/version_utils/rpm.py#L356-L393
train
ihiji/version_utils
version_utils/rpm.py
_get_block_result
def _get_block_result(chars_a, chars_b): """Get the first block from two character lists and compare If character list ``a`` begins with a digit, the :any:`_pop_digit` function is called on both lists to get blocks of all consecutive digits at the start of each list. If the length of the block returned when popping digits for ``b`` is zero (``b`` started with a letter), ``a`` is newer. If ``b`` is of nonzero length, the blocks are compared using :any:`_compare_blocks`. If character list ``a`` begins with a letter, the :any:`_pop_letter` function is called on both lists to get blocks of all consecutive letters at the start of each list. If the length of the block returned when popping letters for ``b`` is zero (``b`` started with a digit), ``b`` is newer. If ``b`` is of nonzero length, blocks ``a`` and ``b`` are compared using :any:`_compare_blocks`. :param list chars_a: a list of characters derived from a version string :param list chars_b: a list of characters derived from a version string :return: 1 (if ``a`` is newer), 0 (if versions are equal), or -1 (if ``b`` is newer) :rtype: int """ logger.debug('_get_block_result(%s, %s)', chars_a, chars_b) first_is_digit = chars_a[0].isdigit() pop_func = _pop_digits if first_is_digit else _pop_letters return_if_no_b = a_newer if first_is_digit else b_newer block_a, block_b = pop_func(chars_a), pop_func(chars_b) if len(block_b) == 0: logger.debug('blocks are equal') return return_if_no_b return _compare_blocks(block_a, block_b)
python
def _get_block_result(chars_a, chars_b): """Get the first block from two character lists and compare If character list ``a`` begins with a digit, the :any:`_pop_digit` function is called on both lists to get blocks of all consecutive digits at the start of each list. If the length of the block returned when popping digits for ``b`` is zero (``b`` started with a letter), ``a`` is newer. If ``b`` is of nonzero length, the blocks are compared using :any:`_compare_blocks`. If character list ``a`` begins with a letter, the :any:`_pop_letter` function is called on both lists to get blocks of all consecutive letters at the start of each list. If the length of the block returned when popping letters for ``b`` is zero (``b`` started with a digit), ``b`` is newer. If ``b`` is of nonzero length, blocks ``a`` and ``b`` are compared using :any:`_compare_blocks`. :param list chars_a: a list of characters derived from a version string :param list chars_b: a list of characters derived from a version string :return: 1 (if ``a`` is newer), 0 (if versions are equal), or -1 (if ``b`` is newer) :rtype: int """ logger.debug('_get_block_result(%s, %s)', chars_a, chars_b) first_is_digit = chars_a[0].isdigit() pop_func = _pop_digits if first_is_digit else _pop_letters return_if_no_b = a_newer if first_is_digit else b_newer block_a, block_b = pop_func(chars_a), pop_func(chars_b) if len(block_b) == 0: logger.debug('blocks are equal') return return_if_no_b return _compare_blocks(block_a, block_b)
[ "def", "_get_block_result", "(", "chars_a", ",", "chars_b", ")", ":", "logger", ".", "debug", "(", "'_get_block_result(%s, %s)'", ",", "chars_a", ",", "chars_b", ")", "first_is_digit", "=", "chars_a", "[", "0", "]", ".", "isdigit", "(", ")", "pop_func", "=", "_pop_digits", "if", "first_is_digit", "else", "_pop_letters", "return_if_no_b", "=", "a_newer", "if", "first_is_digit", "else", "b_newer", "block_a", ",", "block_b", "=", "pop_func", "(", "chars_a", ")", ",", "pop_func", "(", "chars_b", ")", "if", "len", "(", "block_b", ")", "==", "0", ":", "logger", ".", "debug", "(", "'blocks are equal'", ")", "return", "return_if_no_b", "return", "_compare_blocks", "(", "block_a", ",", "block_b", ")" ]
Get the first block from two character lists and compare If character list ``a`` begins with a digit, the :any:`_pop_digit` function is called on both lists to get blocks of all consecutive digits at the start of each list. If the length of the block returned when popping digits for ``b`` is zero (``b`` started with a letter), ``a`` is newer. If ``b`` is of nonzero length, the blocks are compared using :any:`_compare_blocks`. If character list ``a`` begins with a letter, the :any:`_pop_letter` function is called on both lists to get blocks of all consecutive letters at the start of each list. If the length of the block returned when popping letters for ``b`` is zero (``b`` started with a digit), ``b`` is newer. If ``b`` is of nonzero length, blocks ``a`` and ``b`` are compared using :any:`_compare_blocks`. :param list chars_a: a list of characters derived from a version string :param list chars_b: a list of characters derived from a version string :return: 1 (if ``a`` is newer), 0 (if versions are equal), or -1 (if ``b`` is newer) :rtype: int
[ "Get", "the", "first", "block", "from", "two", "character", "lists", "and", "compare" ]
7f63d80faca8e76274b6e8dff7637cc7cb8d848c
https://github.com/ihiji/version_utils/blob/7f63d80faca8e76274b6e8dff7637cc7cb8d848c/version_utils/rpm.py#L396-L430
train
ariebovenberg/snug
examples/slack/channels.py
list_
def list_(*, cursor: str=None, exclude_archived: bool=None, exclude_members: bool=None, limit: int=None) -> snug.Query[Page[t.List[Channel]]]: """list all channels""" kwargs = { 'exclude_archived': exclude_archived, 'exclude_members': exclude_members, 'limit': limit } response = yield {'cursor': cursor, **kwargs} try: next_cursor = response['response_metadata']['next_cursor'] except KeyError: next_query = None else: next_query = list_(**kwargs, cursor=next_cursor) return Page( load_channel_list(response['channels']), next_query=next_query, )
python
def list_(*, cursor: str=None, exclude_archived: bool=None, exclude_members: bool=None, limit: int=None) -> snug.Query[Page[t.List[Channel]]]: """list all channels""" kwargs = { 'exclude_archived': exclude_archived, 'exclude_members': exclude_members, 'limit': limit } response = yield {'cursor': cursor, **kwargs} try: next_cursor = response['response_metadata']['next_cursor'] except KeyError: next_query = None else: next_query = list_(**kwargs, cursor=next_cursor) return Page( load_channel_list(response['channels']), next_query=next_query, )
[ "def", "list_", "(", "*", ",", "cursor", ":", "str", "=", "None", ",", "exclude_archived", ":", "bool", "=", "None", ",", "exclude_members", ":", "bool", "=", "None", ",", "limit", ":", "int", "=", "None", ")", "->", "snug", ".", "Query", "[", "Page", "[", "t", ".", "List", "[", "Channel", "]", "]", "]", ":", "kwargs", "=", "{", "'exclude_archived'", ":", "exclude_archived", ",", "'exclude_members'", ":", "exclude_members", ",", "'limit'", ":", "limit", "}", "response", "=", "yield", "{", "'cursor'", ":", "cursor", ",", "*", "*", "kwargs", "}", "try", ":", "next_cursor", "=", "response", "[", "'response_metadata'", "]", "[", "'next_cursor'", "]", "except", "KeyError", ":", "next_query", "=", "None", "else", ":", "next_query", "=", "list_", "(", "*", "*", "kwargs", ",", "cursor", "=", "next_cursor", ")", "return", "Page", "(", "load_channel_list", "(", "response", "[", "'channels'", "]", ")", ",", "next_query", "=", "next_query", ",", ")" ]
list all channels
[ "list", "all", "channels" ]
4f5cd30e6b7b2c3f0ad3cc10be865bd8900b38ef
https://github.com/ariebovenberg/snug/blob/4f5cd30e6b7b2c3f0ad3cc10be865bd8900b38ef/examples/slack/channels.py#L14-L34
train
ariebovenberg/snug
examples/slack/channels.py
create
def create(name: str, *, validate: bool=None) -> snug.Query[Channel]: """create a new channel""" return {'name': name, 'validate': validate}
python
def create(name: str, *, validate: bool=None) -> snug.Query[Channel]: """create a new channel""" return {'name': name, 'validate': validate}
[ "def", "create", "(", "name", ":", "str", ",", "*", ",", "validate", ":", "bool", "=", "None", ")", "->", "snug", ".", "Query", "[", "Channel", "]", ":", "return", "{", "'name'", ":", "name", ",", "'validate'", ":", "validate", "}" ]
create a new channel
[ "create", "a", "new", "channel" ]
4f5cd30e6b7b2c3f0ad3cc10be865bd8900b38ef
https://github.com/ariebovenberg/snug/blob/4f5cd30e6b7b2c3f0ad3cc10be865bd8900b38ef/examples/slack/channels.py#L38-L41
train
igorcoding/asynctnt-queue
asynctnt_queue/queue.py
Queue.tube
def tube(self, name): """ Returns tube by its name :param name: Tube name :returns: ``self.tube_cls`` instance (by default :class:`asynctnt_queue.Tube`) """ if name in self._tubes: return self._tubes[name] assert name, 'Tube name must be specified' t = self._tube_cls(self, name) self._tubes[name] = t return t
python
def tube(self, name): """ Returns tube by its name :param name: Tube name :returns: ``self.tube_cls`` instance (by default :class:`asynctnt_queue.Tube`) """ if name in self._tubes: return self._tubes[name] assert name, 'Tube name must be specified' t = self._tube_cls(self, name) self._tubes[name] = t return t
[ "def", "tube", "(", "self", ",", "name", ")", ":", "if", "name", "in", "self", ".", "_tubes", ":", "return", "self", ".", "_tubes", "[", "name", "]", "assert", "name", ",", "'Tube name must be specified'", "t", "=", "self", ".", "_tube_cls", "(", "self", ",", "name", ")", "self", ".", "_tubes", "[", "name", "]", "=", "t", "return", "t" ]
Returns tube by its name :param name: Tube name :returns: ``self.tube_cls`` instance (by default :class:`asynctnt_queue.Tube`)
[ "Returns", "tube", "by", "its", "name" ]
75719b2dd27e8314ae924aea6a7a85be8f48ecc5
https://github.com/igorcoding/asynctnt-queue/blob/75719b2dd27e8314ae924aea6a7a85be8f48ecc5/asynctnt_queue/queue.py#L58-L72
train
eclipse/unide.python
src/unide/measurement.py
device_measurement
def device_measurement(device, ts=None, part=None, result=None, code=None, **kwargs): """Returns a JSON MeasurementPayload ready to be send through a transport. If `ts` is not given, the current time is used. `part` is an optional `Part` object, and `result` and `code` are the respective fields of the `Measurement` object. All other arguments are interpreted as dimensions. Minimal example, using a `Device` object to send two measurements: >>> d = Device("12345") >>> def publish(msg): ... pass >>> publish(d.measurement(temperature=22.8)) >>> publish(d.measurement(pressure=4.1)) """ if ts is None: ts = local_now() payload = MeasurementPayload(device=device, part=part) m = Measurement(ts, result, code, list(kwargs)) payload.measurements.append(m) m.add_sample(ts, **kwargs) return dumps(payload)
python
def device_measurement(device, ts=None, part=None, result=None, code=None, **kwargs): """Returns a JSON MeasurementPayload ready to be send through a transport. If `ts` is not given, the current time is used. `part` is an optional `Part` object, and `result` and `code` are the respective fields of the `Measurement` object. All other arguments are interpreted as dimensions. Minimal example, using a `Device` object to send two measurements: >>> d = Device("12345") >>> def publish(msg): ... pass >>> publish(d.measurement(temperature=22.8)) >>> publish(d.measurement(pressure=4.1)) """ if ts is None: ts = local_now() payload = MeasurementPayload(device=device, part=part) m = Measurement(ts, result, code, list(kwargs)) payload.measurements.append(m) m.add_sample(ts, **kwargs) return dumps(payload)
[ "def", "device_measurement", "(", "device", ",", "ts", "=", "None", ",", "part", "=", "None", ",", "result", "=", "None", ",", "code", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "ts", "is", "None", ":", "ts", "=", "local_now", "(", ")", "payload", "=", "MeasurementPayload", "(", "device", "=", "device", ",", "part", "=", "part", ")", "m", "=", "Measurement", "(", "ts", ",", "result", ",", "code", ",", "list", "(", "kwargs", ")", ")", "payload", ".", "measurements", ".", "append", "(", "m", ")", "m", ".", "add_sample", "(", "ts", ",", "*", "*", "kwargs", ")", "return", "dumps", "(", "payload", ")" ]
Returns a JSON MeasurementPayload ready to be send through a transport. If `ts` is not given, the current time is used. `part` is an optional `Part` object, and `result` and `code` are the respective fields of the `Measurement` object. All other arguments are interpreted as dimensions. Minimal example, using a `Device` object to send two measurements: >>> d = Device("12345") >>> def publish(msg): ... pass >>> publish(d.measurement(temperature=22.8)) >>> publish(d.measurement(pressure=4.1))
[ "Returns", "a", "JSON", "MeasurementPayload", "ready", "to", "be", "send", "through", "a", "transport", "." ]
b82e6a0bf7cc44a463c5d7cdb3d2199f8320c493
https://github.com/eclipse/unide.python/blob/b82e6a0bf7cc44a463c5d7cdb3d2199f8320c493/src/unide/measurement.py#L272-L303
train
eclipse/unide.python
src/unide/measurement.py
Measurement.add_sample
def add_sample(self, ts, **kwargs): """Add a sample to this measurements.""" if not self.series.offsets: self.ts = ts offset = 0 else: dt = ts - self.ts offset = (dt.days * 24 * 60 * 60 * 1000 + dt.seconds * 1000 + dt.microseconds // 1000) self.series.add_sample(offset, **kwargs)
python
def add_sample(self, ts, **kwargs): """Add a sample to this measurements.""" if not self.series.offsets: self.ts = ts offset = 0 else: dt = ts - self.ts offset = (dt.days * 24 * 60 * 60 * 1000 + dt.seconds * 1000 + dt.microseconds // 1000) self.series.add_sample(offset, **kwargs)
[ "def", "add_sample", "(", "self", ",", "ts", ",", "*", "*", "kwargs", ")", ":", "if", "not", "self", ".", "series", ".", "offsets", ":", "self", ".", "ts", "=", "ts", "offset", "=", "0", "else", ":", "dt", "=", "ts", "-", "self", ".", "ts", "offset", "=", "(", "dt", ".", "days", "*", "24", "*", "60", "*", "60", "*", "1000", "+", "dt", ".", "seconds", "*", "1000", "+", "dt", ".", "microseconds", "//", "1000", ")", "self", ".", "series", ".", "add_sample", "(", "offset", ",", "*", "*", "kwargs", ")" ]
Add a sample to this measurements.
[ "Add", "a", "sample", "to", "this", "measurements", "." ]
b82e6a0bf7cc44a463c5d7cdb3d2199f8320c493
https://github.com/eclipse/unide.python/blob/b82e6a0bf7cc44a463c5d7cdb3d2199f8320c493/src/unide/measurement.py#L203-L212
train
eclipse/unide.python
src/unide/measurement.py
Measurement.samples
def samples(self): """Yield samples as dictionaries, keyed by dimensions.""" names = self.series.dimensions for n, offset in enumerate(self.series.offsets): dt = datetime.timedelta(microseconds=offset * 1000) d = {"ts": self.ts + dt} for name in names: d[name] = getattr(self.series, name)[n] yield d
python
def samples(self): """Yield samples as dictionaries, keyed by dimensions.""" names = self.series.dimensions for n, offset in enumerate(self.series.offsets): dt = datetime.timedelta(microseconds=offset * 1000) d = {"ts": self.ts + dt} for name in names: d[name] = getattr(self.series, name)[n] yield d
[ "def", "samples", "(", "self", ")", ":", "names", "=", "self", ".", "series", ".", "dimensions", "for", "n", ",", "offset", "in", "enumerate", "(", "self", ".", "series", ".", "offsets", ")", ":", "dt", "=", "datetime", ".", "timedelta", "(", "microseconds", "=", "offset", "*", "1000", ")", "d", "=", "{", "\"ts\"", ":", "self", ".", "ts", "+", "dt", "}", "for", "name", "in", "names", ":", "d", "[", "name", "]", "=", "getattr", "(", "self", ".", "series", ",", "name", ")", "[", "n", "]", "yield", "d" ]
Yield samples as dictionaries, keyed by dimensions.
[ "Yield", "samples", "as", "dictionaries", "keyed", "by", "dimensions", "." ]
b82e6a0bf7cc44a463c5d7cdb3d2199f8320c493
https://github.com/eclipse/unide.python/blob/b82e6a0bf7cc44a463c5d7cdb3d2199f8320c493/src/unide/measurement.py#L214-L222
train
thebigmunch/audio-metadata
src/audio_metadata/api.py
determine_format
def determine_format(data, extension=None): """Determine the format of an audio file. Parameters: data (bytes-like object, str, os.PathLike, or file-like object): A bytes-like object, filepath, path-like object or file-like object of an audio file. extension (str): The file extension of the file. Used as a tie-breaker for formats that can be used in multiple containers (e.g. ID3). """ if isinstance(data, (os.PathLike, str)): data = open(data, 'rb') data_reader = DataReader(data) data_reader.seek(0, os.SEEK_SET) d = data_reader.read(4) if d.startswith((b'ID3', b'\xFF\xFB')): # TODO: Catch all MP3 possibilities. if extension is None or extension.endswith('.mp3'): return MP3 if d.startswith((b'fLaC', b'ID3')): if extension is None or extension.endswith('.flac'): return FLAC if d.startswith(b'RIFF'): if extension is None or extension.endswith('.wav'): return WAV return None
python
def determine_format(data, extension=None): """Determine the format of an audio file. Parameters: data (bytes-like object, str, os.PathLike, or file-like object): A bytes-like object, filepath, path-like object or file-like object of an audio file. extension (str): The file extension of the file. Used as a tie-breaker for formats that can be used in multiple containers (e.g. ID3). """ if isinstance(data, (os.PathLike, str)): data = open(data, 'rb') data_reader = DataReader(data) data_reader.seek(0, os.SEEK_SET) d = data_reader.read(4) if d.startswith((b'ID3', b'\xFF\xFB')): # TODO: Catch all MP3 possibilities. if extension is None or extension.endswith('.mp3'): return MP3 if d.startswith((b'fLaC', b'ID3')): if extension is None or extension.endswith('.flac'): return FLAC if d.startswith(b'RIFF'): if extension is None or extension.endswith('.wav'): return WAV return None
[ "def", "determine_format", "(", "data", ",", "extension", "=", "None", ")", ":", "if", "isinstance", "(", "data", ",", "(", "os", ".", "PathLike", ",", "str", ")", ")", ":", "data", "=", "open", "(", "data", ",", "'rb'", ")", "data_reader", "=", "DataReader", "(", "data", ")", "data_reader", ".", "seek", "(", "0", ",", "os", ".", "SEEK_SET", ")", "d", "=", "data_reader", ".", "read", "(", "4", ")", "if", "d", ".", "startswith", "(", "(", "b'ID3'", ",", "b'\\xFF\\xFB'", ")", ")", ":", "# TODO: Catch all MP3 possibilities.", "if", "extension", "is", "None", "or", "extension", ".", "endswith", "(", "'.mp3'", ")", ":", "return", "MP3", "if", "d", ".", "startswith", "(", "(", "b'fLaC'", ",", "b'ID3'", ")", ")", ":", "if", "extension", "is", "None", "or", "extension", ".", "endswith", "(", "'.flac'", ")", ":", "return", "FLAC", "if", "d", ".", "startswith", "(", "b'RIFF'", ")", ":", "if", "extension", "is", "None", "or", "extension", ".", "endswith", "(", "'.wav'", ")", ":", "return", "WAV", "return", "None" ]
Determine the format of an audio file. Parameters: data (bytes-like object, str, os.PathLike, or file-like object): A bytes-like object, filepath, path-like object or file-like object of an audio file. extension (str): The file extension of the file. Used as a tie-breaker for formats that can be used in multiple containers (e.g. ID3).
[ "Determine", "the", "format", "of", "an", "audio", "file", "." ]
d17bdbdb71db79c1568d54438d42dcd940b76074
https://github.com/thebigmunch/audio-metadata/blob/d17bdbdb71db79c1568d54438d42dcd940b76074/src/audio_metadata/api.py#L14-L45
train
thebigmunch/audio-metadata
src/audio_metadata/api.py
load
def load(f): """Load audio metadata from filepath or file-like object. Parameters: f (str, os.PathLike, or file-like object): A filepath, path-like object or file-like object of an audio file. Returns: Format: An audio format object. Raises: UnsupportedFormat: If file is not of a supported format. ValueError: If filepath/file-like object is not valid or readable. """ if isinstance(f, (os.PathLike, str)): fileobj = open(f, 'rb') else: try: f.read(0) except AttributeError: raise ValueError("Not a valid file-like object.") except Exception: raise ValueError("Can't read from file-like object.") fileobj = f parser_cls = determine_format(fileobj, os.path.splitext(fileobj.name)[1]) if parser_cls is None: raise UnsupportedFormat("Supported format signature not found.") else: fileobj.seek(0, os.SEEK_SET) return parser_cls.load(fileobj)
python
def load(f): """Load audio metadata from filepath or file-like object. Parameters: f (str, os.PathLike, or file-like object): A filepath, path-like object or file-like object of an audio file. Returns: Format: An audio format object. Raises: UnsupportedFormat: If file is not of a supported format. ValueError: If filepath/file-like object is not valid or readable. """ if isinstance(f, (os.PathLike, str)): fileobj = open(f, 'rb') else: try: f.read(0) except AttributeError: raise ValueError("Not a valid file-like object.") except Exception: raise ValueError("Can't read from file-like object.") fileobj = f parser_cls = determine_format(fileobj, os.path.splitext(fileobj.name)[1]) if parser_cls is None: raise UnsupportedFormat("Supported format signature not found.") else: fileobj.seek(0, os.SEEK_SET) return parser_cls.load(fileobj)
[ "def", "load", "(", "f", ")", ":", "if", "isinstance", "(", "f", ",", "(", "os", ".", "PathLike", ",", "str", ")", ")", ":", "fileobj", "=", "open", "(", "f", ",", "'rb'", ")", "else", ":", "try", ":", "f", ".", "read", "(", "0", ")", "except", "AttributeError", ":", "raise", "ValueError", "(", "\"Not a valid file-like object.\"", ")", "except", "Exception", ":", "raise", "ValueError", "(", "\"Can't read from file-like object.\"", ")", "fileobj", "=", "f", "parser_cls", "=", "determine_format", "(", "fileobj", ",", "os", ".", "path", ".", "splitext", "(", "fileobj", ".", "name", ")", "[", "1", "]", ")", "if", "parser_cls", "is", "None", ":", "raise", "UnsupportedFormat", "(", "\"Supported format signature not found.\"", ")", "else", ":", "fileobj", ".", "seek", "(", "0", ",", "os", ".", "SEEK_SET", ")", "return", "parser_cls", ".", "load", "(", "fileobj", ")" ]
Load audio metadata from filepath or file-like object. Parameters: f (str, os.PathLike, or file-like object): A filepath, path-like object or file-like object of an audio file. Returns: Format: An audio format object. Raises: UnsupportedFormat: If file is not of a supported format. ValueError: If filepath/file-like object is not valid or readable.
[ "Load", "audio", "metadata", "from", "filepath", "or", "file", "-", "like", "object", "." ]
d17bdbdb71db79c1568d54438d42dcd940b76074
https://github.com/thebigmunch/audio-metadata/blob/d17bdbdb71db79c1568d54438d42dcd940b76074/src/audio_metadata/api.py#L48-L82
train
thebigmunch/audio-metadata
src/audio_metadata/api.py
loads
def loads(b): """Load audio metadata from a bytes-like object. Parameters: b (bytes-like object): A bytes-like object of an audio file. Returns: Format: An audio format object. Raises: UnsupportedFormat: If file is not of a supported format. """ parser_cls = determine_format(b) if parser_cls is None: raise UnsupportedFormat("Supported format signature not found.") return parser_cls.load(b)
python
def loads(b): """Load audio metadata from a bytes-like object. Parameters: b (bytes-like object): A bytes-like object of an audio file. Returns: Format: An audio format object. Raises: UnsupportedFormat: If file is not of a supported format. """ parser_cls = determine_format(b) if parser_cls is None: raise UnsupportedFormat("Supported format signature not found.") return parser_cls.load(b)
[ "def", "loads", "(", "b", ")", ":", "parser_cls", "=", "determine_format", "(", "b", ")", "if", "parser_cls", "is", "None", ":", "raise", "UnsupportedFormat", "(", "\"Supported format signature not found.\"", ")", "return", "parser_cls", ".", "load", "(", "b", ")" ]
Load audio metadata from a bytes-like object. Parameters: b (bytes-like object): A bytes-like object of an audio file. Returns: Format: An audio format object. Raises: UnsupportedFormat: If file is not of a supported format.
[ "Load", "audio", "metadata", "from", "a", "bytes", "-", "like", "object", "." ]
d17bdbdb71db79c1568d54438d42dcd940b76074
https://github.com/thebigmunch/audio-metadata/blob/d17bdbdb71db79c1568d54438d42dcd940b76074/src/audio_metadata/api.py#L85-L103
train
Godley/MuseParse
MuseParse/classes/ObjectHierarchy/TreeClasses/NoteNode.py
NoteNode.Find
def Find(self, node_type, item_type): ''' method for finding specific types of notation from nodes. will currently return the first one it encounters because this method's only really intended for some types of notation for which the exact value doesn't really matter. :param node_type: the type of node to look under :param item_type: the type of item (notation) being searched for :return: first item_type object encountered ''' if node_type == OtherNodes.DirectionNode: child = self.GetChild(len(self.children) - 1) while child is not None and not isinstance( child.GetItem(), item_type): if child.GetItem().__class__.__name__ == item_type.__name__: return True child = child.GetChild(0) if node_type == OtherNodes.ExpressionNode: child = self.GetChild(len(self.children) - 2) while child is not None and not isinstance( child.GetItem(), item_type): if child.GetItem().__class__.__name__ == item_type.__name__: return True child = child.GetChild(0)
python
def Find(self, node_type, item_type): ''' method for finding specific types of notation from nodes. will currently return the first one it encounters because this method's only really intended for some types of notation for which the exact value doesn't really matter. :param node_type: the type of node to look under :param item_type: the type of item (notation) being searched for :return: first item_type object encountered ''' if node_type == OtherNodes.DirectionNode: child = self.GetChild(len(self.children) - 1) while child is not None and not isinstance( child.GetItem(), item_type): if child.GetItem().__class__.__name__ == item_type.__name__: return True child = child.GetChild(0) if node_type == OtherNodes.ExpressionNode: child = self.GetChild(len(self.children) - 2) while child is not None and not isinstance( child.GetItem(), item_type): if child.GetItem().__class__.__name__ == item_type.__name__: return True child = child.GetChild(0)
[ "def", "Find", "(", "self", ",", "node_type", ",", "item_type", ")", ":", "if", "node_type", "==", "OtherNodes", ".", "DirectionNode", ":", "child", "=", "self", ".", "GetChild", "(", "len", "(", "self", ".", "children", ")", "-", "1", ")", "while", "child", "is", "not", "None", "and", "not", "isinstance", "(", "child", ".", "GetItem", "(", ")", ",", "item_type", ")", ":", "if", "child", ".", "GetItem", "(", ")", ".", "__class__", ".", "__name__", "==", "item_type", ".", "__name__", ":", "return", "True", "child", "=", "child", ".", "GetChild", "(", "0", ")", "if", "node_type", "==", "OtherNodes", ".", "ExpressionNode", ":", "child", "=", "self", ".", "GetChild", "(", "len", "(", "self", ".", "children", ")", "-", "2", ")", "while", "child", "is", "not", "None", "and", "not", "isinstance", "(", "child", ".", "GetItem", "(", ")", ",", "item_type", ")", ":", "if", "child", ".", "GetItem", "(", ")", ".", "__class__", ".", "__name__", "==", "item_type", ".", "__name__", ":", "return", "True", "child", "=", "child", ".", "GetChild", "(", "0", ")" ]
method for finding specific types of notation from nodes. will currently return the first one it encounters because this method's only really intended for some types of notation for which the exact value doesn't really matter. :param node_type: the type of node to look under :param item_type: the type of item (notation) being searched for :return: first item_type object encountered
[ "method", "for", "finding", "specific", "types", "of", "notation", "from", "nodes", ".", "will", "currently", "return", "the", "first", "one", "it", "encounters", "because", "this", "method", "s", "only", "really", "intended", "for", "some", "types", "of", "notation", "for", "which", "the", "exact", "value", "doesn", "t", "really", "matter", "." ]
23cecafa1fdc0f2d6a87760553572b459f3c9904
https://github.com/Godley/MuseParse/blob/23cecafa1fdc0f2d6a87760553572b459f3c9904/MuseParse/classes/ObjectHierarchy/TreeClasses/NoteNode.py#L41-L71
train
MacHu-GWU/single_file_module-project
sfm/lines_count.py
count_lines
def count_lines(abspath): """Count how many lines in a pure text file. """ with open(abspath, "rb") as f: i = 0 for line in f: i += 1 pass return i
python
def count_lines(abspath): """Count how many lines in a pure text file. """ with open(abspath, "rb") as f: i = 0 for line in f: i += 1 pass return i
[ "def", "count_lines", "(", "abspath", ")", ":", "with", "open", "(", "abspath", ",", "\"rb\"", ")", "as", "f", ":", "i", "=", "0", "for", "line", "in", "f", ":", "i", "+=", "1", "pass", "return", "i" ]
Count how many lines in a pure text file.
[ "Count", "how", "many", "lines", "in", "a", "pure", "text", "file", "." ]
01f7a6b250853bebfd73de275895bf274325cfc1
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/lines_count.py#L21-L29
train
MacHu-GWU/single_file_module-project
sfm/lines_count.py
lines_stats
def lines_stats(dir_path, file_filter): """Lines count of selected files under a directory. :return n_files: number of files :return n_lines: number of lines """ n_files = 0 n_lines = 0 for p in Path(dir_path).select_file(file_filter): n_files += 1 n_lines += count_lines(p.abspath) return n_files, n_lines
python
def lines_stats(dir_path, file_filter): """Lines count of selected files under a directory. :return n_files: number of files :return n_lines: number of lines """ n_files = 0 n_lines = 0 for p in Path(dir_path).select_file(file_filter): n_files += 1 n_lines += count_lines(p.abspath) return n_files, n_lines
[ "def", "lines_stats", "(", "dir_path", ",", "file_filter", ")", ":", "n_files", "=", "0", "n_lines", "=", "0", "for", "p", "in", "Path", "(", "dir_path", ")", ".", "select_file", "(", "file_filter", ")", ":", "n_files", "+=", "1", "n_lines", "+=", "count_lines", "(", "p", ".", "abspath", ")", "return", "n_files", ",", "n_lines" ]
Lines count of selected files under a directory. :return n_files: number of files :return n_lines: number of lines
[ "Lines", "count", "of", "selected", "files", "under", "a", "directory", "." ]
01f7a6b250853bebfd73de275895bf274325cfc1
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/lines_count.py#L32-L43
train
TylerTemp/docpie
docpie/parser.py
UsageParser.parse_content
def parse_content(self, text): """get Usage section and set to `raw_content`, `formal_content` of no title and empty-line version""" match = re.search( self.usage_re_str.format(self.usage_name), text, flags=(re.DOTALL if self.case_sensitive else (re.DOTALL | re.IGNORECASE))) if match is None: return dic = match.groupdict() logger.debug(dic) self.raw_content = dic['raw'] if dic['sep'] in ('\n', '\r\n'): self.formal_content = dic['section'] return reallen = len(dic['name']) replace = ''.ljust(reallen) drop_name = match.expand('%s\\g<sep>\\g<section>' % replace) self.formal_content = self.drop_started_empty_lines(drop_name).rstrip()
python
def parse_content(self, text): """get Usage section and set to `raw_content`, `formal_content` of no title and empty-line version""" match = re.search( self.usage_re_str.format(self.usage_name), text, flags=(re.DOTALL if self.case_sensitive else (re.DOTALL | re.IGNORECASE))) if match is None: return dic = match.groupdict() logger.debug(dic) self.raw_content = dic['raw'] if dic['sep'] in ('\n', '\r\n'): self.formal_content = dic['section'] return reallen = len(dic['name']) replace = ''.ljust(reallen) drop_name = match.expand('%s\\g<sep>\\g<section>' % replace) self.formal_content = self.drop_started_empty_lines(drop_name).rstrip()
[ "def", "parse_content", "(", "self", ",", "text", ")", ":", "match", "=", "re", ".", "search", "(", "self", ".", "usage_re_str", ".", "format", "(", "self", ".", "usage_name", ")", ",", "text", ",", "flags", "=", "(", "re", ".", "DOTALL", "if", "self", ".", "case_sensitive", "else", "(", "re", ".", "DOTALL", "|", "re", ".", "IGNORECASE", ")", ")", ")", "if", "match", "is", "None", ":", "return", "dic", "=", "match", ".", "groupdict", "(", ")", "logger", ".", "debug", "(", "dic", ")", "self", ".", "raw_content", "=", "dic", "[", "'raw'", "]", "if", "dic", "[", "'sep'", "]", "in", "(", "'\\n'", ",", "'\\r\\n'", ")", ":", "self", ".", "formal_content", "=", "dic", "[", "'section'", "]", "return", "reallen", "=", "len", "(", "dic", "[", "'name'", "]", ")", "replace", "=", "''", ".", "ljust", "(", "reallen", ")", "drop_name", "=", "match", ".", "expand", "(", "'%s\\\\g<sep>\\\\g<section>'", "%", "replace", ")", "self", ".", "formal_content", "=", "self", ".", "drop_started_empty_lines", "(", "drop_name", ")", ".", "rstrip", "(", ")" ]
get Usage section and set to `raw_content`, `formal_content` of no title and empty-line version
[ "get", "Usage", "section", "and", "set", "to", "raw_content", "formal_content", "of", "no", "title", "and", "empty", "-", "line", "version" ]
e658454b81b6c79a020d499f12ad73496392c09a
https://github.com/TylerTemp/docpie/blob/e658454b81b6c79a020d499f12ad73496392c09a/docpie/parser.py#L772-L795
train
koszullab/metaTOR
metator/scripts/figures.py
spaceless_pdf_plot_maker
def spaceless_pdf_plot_maker(array, filename, vmax=None, dpi=DEFAULT_DPI): """Draw a pretty plot from an array A function that performs all the tedious matplotlib magic to draw a 2D array with as few parameters and as little whitespace as possible. Parameters ---------- array : array_like The input array to draw. filename : file, str or pathlib.Path The output image to save the array into. vmax : float, optional The default saturation threshold for the array. If set to None, the 80th percentile value of the array is chosen. Default is None. dpi : int, optional Dots per inch (DPI) of the output image. Default is 200. """ if vmax is None: vmax = np.percentile(array, DEFAULT_SATURATION_THRESHOLD) plt.gca().set_axis_off() plt.subplots_adjust(top=1, bottom=0, right=1, left=0, hspace=0, wspace=0) plt.margins(0, 0) plt.gca().xaxis.set_major_locator(plt.NullLocator()) plt.gca().yaxis.set_major_locator(plt.NullLocator()) plt.figure() if SEABORN: sns.heatmap(array, vmax=vmax, cmap="Reds") else: plt.imshow(array, vmax=vmax, cmap="Reds", interpolation="none") plt.colorbar() plt.savefig(filename, bbox_inches="tight", pad_inches=0.0, dpi=dpi) plt.close()
python
def spaceless_pdf_plot_maker(array, filename, vmax=None, dpi=DEFAULT_DPI): """Draw a pretty plot from an array A function that performs all the tedious matplotlib magic to draw a 2D array with as few parameters and as little whitespace as possible. Parameters ---------- array : array_like The input array to draw. filename : file, str or pathlib.Path The output image to save the array into. vmax : float, optional The default saturation threshold for the array. If set to None, the 80th percentile value of the array is chosen. Default is None. dpi : int, optional Dots per inch (DPI) of the output image. Default is 200. """ if vmax is None: vmax = np.percentile(array, DEFAULT_SATURATION_THRESHOLD) plt.gca().set_axis_off() plt.subplots_adjust(top=1, bottom=0, right=1, left=0, hspace=0, wspace=0) plt.margins(0, 0) plt.gca().xaxis.set_major_locator(plt.NullLocator()) plt.gca().yaxis.set_major_locator(plt.NullLocator()) plt.figure() if SEABORN: sns.heatmap(array, vmax=vmax, cmap="Reds") else: plt.imshow(array, vmax=vmax, cmap="Reds", interpolation="none") plt.colorbar() plt.savefig(filename, bbox_inches="tight", pad_inches=0.0, dpi=dpi) plt.close()
[ "def", "spaceless_pdf_plot_maker", "(", "array", ",", "filename", ",", "vmax", "=", "None", ",", "dpi", "=", "DEFAULT_DPI", ")", ":", "if", "vmax", "is", "None", ":", "vmax", "=", "np", ".", "percentile", "(", "array", ",", "DEFAULT_SATURATION_THRESHOLD", ")", "plt", ".", "gca", "(", ")", ".", "set_axis_off", "(", ")", "plt", ".", "subplots_adjust", "(", "top", "=", "1", ",", "bottom", "=", "0", ",", "right", "=", "1", ",", "left", "=", "0", ",", "hspace", "=", "0", ",", "wspace", "=", "0", ")", "plt", ".", "margins", "(", "0", ",", "0", ")", "plt", ".", "gca", "(", ")", ".", "xaxis", ".", "set_major_locator", "(", "plt", ".", "NullLocator", "(", ")", ")", "plt", ".", "gca", "(", ")", ".", "yaxis", ".", "set_major_locator", "(", "plt", ".", "NullLocator", "(", ")", ")", "plt", ".", "figure", "(", ")", "if", "SEABORN", ":", "sns", ".", "heatmap", "(", "array", ",", "vmax", "=", "vmax", ",", "cmap", "=", "\"Reds\"", ")", "else", ":", "plt", ".", "imshow", "(", "array", ",", "vmax", "=", "vmax", ",", "cmap", "=", "\"Reds\"", ",", "interpolation", "=", "\"none\"", ")", "plt", ".", "colorbar", "(", ")", "plt", ".", "savefig", "(", "filename", ",", "bbox_inches", "=", "\"tight\"", ",", "pad_inches", "=", "0.0", ",", "dpi", "=", "dpi", ")", "plt", ".", "close", "(", ")" ]
Draw a pretty plot from an array A function that performs all the tedious matplotlib magic to draw a 2D array with as few parameters and as little whitespace as possible. Parameters ---------- array : array_like The input array to draw. filename : file, str or pathlib.Path The output image to save the array into. vmax : float, optional The default saturation threshold for the array. If set to None, the 80th percentile value of the array is chosen. Default is None. dpi : int, optional Dots per inch (DPI) of the output image. Default is 200.
[ "Draw", "a", "pretty", "plot", "from", "an", "array" ]
0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a
https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/figures.py#L38-L72
train
koszullab/metaTOR
metator/scripts/figures.py
draw_sparse_matrix
def draw_sparse_matrix( array_filename, output_image, vmax=DEFAULT_SATURATION_THRESHOLD, max_size_matrix=DEFAULT_MAX_SIZE_MATRIX, ): """Draw a quick preview of a sparse matrix with automated binning and normalization. """ matrix = np.loadtxt(array_filename, dtype=np.int32, skiprows=1) try: row, col, data = matrix.T except ValueError: row, col, data = matrix size = max(np.amax(row), np.amax(col)) + 1 S = sparse.coo_matrix((data, (row, col)), shape=(size, size)) if max_size_matrix <= 0: binning = 1 else: binning = (size // max_size_matrix) + 1 binned_S = hcs.bin_sparse(S, subsampling_factor=binning) dense_S = binned_S.todense() dense_S = dense_S + dense_S.T - np.diag(np.diag(dense_S)) normed_S = hcs.normalize_dense(dense_S) spaceless_pdf_plot_maker(normed_S, output_image, vmax=vmax)
python
def draw_sparse_matrix( array_filename, output_image, vmax=DEFAULT_SATURATION_THRESHOLD, max_size_matrix=DEFAULT_MAX_SIZE_MATRIX, ): """Draw a quick preview of a sparse matrix with automated binning and normalization. """ matrix = np.loadtxt(array_filename, dtype=np.int32, skiprows=1) try: row, col, data = matrix.T except ValueError: row, col, data = matrix size = max(np.amax(row), np.amax(col)) + 1 S = sparse.coo_matrix((data, (row, col)), shape=(size, size)) if max_size_matrix <= 0: binning = 1 else: binning = (size // max_size_matrix) + 1 binned_S = hcs.bin_sparse(S, subsampling_factor=binning) dense_S = binned_S.todense() dense_S = dense_S + dense_S.T - np.diag(np.diag(dense_S)) normed_S = hcs.normalize_dense(dense_S) spaceless_pdf_plot_maker(normed_S, output_image, vmax=vmax)
[ "def", "draw_sparse_matrix", "(", "array_filename", ",", "output_image", ",", "vmax", "=", "DEFAULT_SATURATION_THRESHOLD", ",", "max_size_matrix", "=", "DEFAULT_MAX_SIZE_MATRIX", ",", ")", ":", "matrix", "=", "np", ".", "loadtxt", "(", "array_filename", ",", "dtype", "=", "np", ".", "int32", ",", "skiprows", "=", "1", ")", "try", ":", "row", ",", "col", ",", "data", "=", "matrix", ".", "T", "except", "ValueError", ":", "row", ",", "col", ",", "data", "=", "matrix", "size", "=", "max", "(", "np", ".", "amax", "(", "row", ")", ",", "np", ".", "amax", "(", "col", ")", ")", "+", "1", "S", "=", "sparse", ".", "coo_matrix", "(", "(", "data", ",", "(", "row", ",", "col", ")", ")", ",", "shape", "=", "(", "size", ",", "size", ")", ")", "if", "max_size_matrix", "<=", "0", ":", "binning", "=", "1", "else", ":", "binning", "=", "(", "size", "//", "max_size_matrix", ")", "+", "1", "binned_S", "=", "hcs", ".", "bin_sparse", "(", "S", ",", "subsampling_factor", "=", "binning", ")", "dense_S", "=", "binned_S", ".", "todense", "(", ")", "dense_S", "=", "dense_S", "+", "dense_S", ".", "T", "-", "np", ".", "diag", "(", "np", ".", "diag", "(", "dense_S", ")", ")", "normed_S", "=", "hcs", ".", "normalize_dense", "(", "dense_S", ")", "spaceless_pdf_plot_maker", "(", "normed_S", ",", "output_image", ",", "vmax", "=", "vmax", ")" ]
Draw a quick preview of a sparse matrix with automated binning and normalization.
[ "Draw", "a", "quick", "preview", "of", "a", "sparse", "matrix", "with", "automated", "binning", "and", "normalization", "." ]
0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a
https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/figures.py#L75-L100
train
MacHu-GWU/single_file_module-project
sfm/iterable.py
nth
def nth(iterable, n, default=None): """Returns the nth item or a default value. Example:: >>> nth([0, 1, 2], 1) 1 >>> nth([0, 1, 2], 100) None **中文文档** 取出一个可循环对象中的第n个元素。等效于list(iterable)[n], 但占用极小的内存。 因为list(iterable)要将所有元素放在内存中并生成一个新列表。该方法常用语对于 那些取index操作被改写了的可循环对象。 """ return next(itertools.islice(iterable, n, None), default)
python
def nth(iterable, n, default=None): """Returns the nth item or a default value. Example:: >>> nth([0, 1, 2], 1) 1 >>> nth([0, 1, 2], 100) None **中文文档** 取出一个可循环对象中的第n个元素。等效于list(iterable)[n], 但占用极小的内存。 因为list(iterable)要将所有元素放在内存中并生成一个新列表。该方法常用语对于 那些取index操作被改写了的可循环对象。 """ return next(itertools.islice(iterable, n, None), default)
[ "def", "nth", "(", "iterable", ",", "n", ",", "default", "=", "None", ")", ":", "return", "next", "(", "itertools", ".", "islice", "(", "iterable", ",", "n", ",", "None", ")", ",", "default", ")" ]
Returns the nth item or a default value. Example:: >>> nth([0, 1, 2], 1) 1 >>> nth([0, 1, 2], 100) None **中文文档** 取出一个可循环对象中的第n个元素。等效于list(iterable)[n], 但占用极小的内存。 因为list(iterable)要将所有元素放在内存中并生成一个新列表。该方法常用语对于 那些取index操作被改写了的可循环对象。
[ "Returns", "the", "nth", "item", "or", "a", "default", "value", "." ]
01f7a6b250853bebfd73de275895bf274325cfc1
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/iterable.py#L65-L82
train
MacHu-GWU/single_file_module-project
sfm/iterable.py
pull
def pull(iterable, n): """Return last n items of the iterable as a list. Example:: >>> pull([0, 1, 2], 3) [1, 2] **中文文档** 取出可循环对象中的最后n个元素。等效于list(iterable)[-n:], 但占用极小的内存。 因为list(iterable)要将所有元素放在内存中并生成一个新列表。该方法常用语对于 那些取index操作被改写了的可循环对象。 """ fifo = collections.deque(maxlen=n) for i in iterable: fifo.append(i) return list(fifo)
python
def pull(iterable, n): """Return last n items of the iterable as a list. Example:: >>> pull([0, 1, 2], 3) [1, 2] **中文文档** 取出可循环对象中的最后n个元素。等效于list(iterable)[-n:], 但占用极小的内存。 因为list(iterable)要将所有元素放在内存中并生成一个新列表。该方法常用语对于 那些取index操作被改写了的可循环对象。 """ fifo = collections.deque(maxlen=n) for i in iterable: fifo.append(i) return list(fifo)
[ "def", "pull", "(", "iterable", ",", "n", ")", ":", "fifo", "=", "collections", ".", "deque", "(", "maxlen", "=", "n", ")", "for", "i", "in", "iterable", ":", "fifo", ".", "append", "(", "i", ")", "return", "list", "(", "fifo", ")" ]
Return last n items of the iterable as a list. Example:: >>> pull([0, 1, 2], 3) [1, 2] **中文文档** 取出可循环对象中的最后n个元素。等效于list(iterable)[-n:], 但占用极小的内存。 因为list(iterable)要将所有元素放在内存中并生成一个新列表。该方法常用语对于 那些取index操作被改写了的可循环对象。
[ "Return", "last", "n", "items", "of", "the", "iterable", "as", "a", "list", "." ]
01f7a6b250853bebfd73de275895bf274325cfc1
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/iterable.py#L102-L119
train
MacHu-GWU/single_file_module-project
sfm/iterable.py
running_window
def running_window(iterable, size): """Generate n-size running window. Example:: >>> for i in running_windows([1, 2, 3, 4, 5], size=3): ... print(i) [1, 2, 3] [2, 3, 4] [3, 4, 5] **中文文档** 简单滑窗函数。 """ if size > len(iterable): raise ValueError("size can not be greater than length of iterable.") fifo = collections.deque(maxlen=size) for i in iterable: fifo.append(i) if len(fifo) == size: yield list(fifo)
python
def running_window(iterable, size): """Generate n-size running window. Example:: >>> for i in running_windows([1, 2, 3, 4, 5], size=3): ... print(i) [1, 2, 3] [2, 3, 4] [3, 4, 5] **中文文档** 简单滑窗函数。 """ if size > len(iterable): raise ValueError("size can not be greater than length of iterable.") fifo = collections.deque(maxlen=size) for i in iterable: fifo.append(i) if len(fifo) == size: yield list(fifo)
[ "def", "running_window", "(", "iterable", ",", "size", ")", ":", "if", "size", ">", "len", "(", "iterable", ")", ":", "raise", "ValueError", "(", "\"size can not be greater than length of iterable.\"", ")", "fifo", "=", "collections", ".", "deque", "(", "maxlen", "=", "size", ")", "for", "i", "in", "iterable", ":", "fifo", ".", "append", "(", "i", ")", "if", "len", "(", "fifo", ")", "==", "size", ":", "yield", "list", "(", "fifo", ")" ]
Generate n-size running window. Example:: >>> for i in running_windows([1, 2, 3, 4, 5], size=3): ... print(i) [1, 2, 3] [2, 3, 4] [3, 4, 5] **中文文档** 简单滑窗函数。
[ "Generate", "n", "-", "size", "running", "window", "." ]
01f7a6b250853bebfd73de275895bf274325cfc1
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/iterable.py#L215-L237
train
MacHu-GWU/single_file_module-project
sfm/iterable.py
cycle_running_window
def cycle_running_window(iterable, size): """Generate n-size cycle running window. Example:: >>> for i in running_windows([1, 2, 3, 4, 5], size=3): ... print(i) [1, 2, 3] [2, 3, 4] [3, 4, 5] [4, 5, 1] [5, 1, 2] **中文文档** 循环位移滑窗函数。 """ if size > len(iterable): raise ValueError("size can not be greater than length of iterable.") fifo = collections.deque(maxlen=size) cycle = itertools.cycle(iterable) counter = itertools.count(1) length = len(iterable) for i in cycle: fifo.append(i) if len(fifo) == size: yield list(fifo) if next(counter) == length: break
python
def cycle_running_window(iterable, size): """Generate n-size cycle running window. Example:: >>> for i in running_windows([1, 2, 3, 4, 5], size=3): ... print(i) [1, 2, 3] [2, 3, 4] [3, 4, 5] [4, 5, 1] [5, 1, 2] **中文文档** 循环位移滑窗函数。 """ if size > len(iterable): raise ValueError("size can not be greater than length of iterable.") fifo = collections.deque(maxlen=size) cycle = itertools.cycle(iterable) counter = itertools.count(1) length = len(iterable) for i in cycle: fifo.append(i) if len(fifo) == size: yield list(fifo) if next(counter) == length: break
[ "def", "cycle_running_window", "(", "iterable", ",", "size", ")", ":", "if", "size", ">", "len", "(", "iterable", ")", ":", "raise", "ValueError", "(", "\"size can not be greater than length of iterable.\"", ")", "fifo", "=", "collections", ".", "deque", "(", "maxlen", "=", "size", ")", "cycle", "=", "itertools", ".", "cycle", "(", "iterable", ")", "counter", "=", "itertools", ".", "count", "(", "1", ")", "length", "=", "len", "(", "iterable", ")", "for", "i", "in", "cycle", ":", "fifo", ".", "append", "(", "i", ")", "if", "len", "(", "fifo", ")", "==", "size", ":", "yield", "list", "(", "fifo", ")", "if", "next", "(", "counter", ")", "==", "length", ":", "break" ]
Generate n-size cycle running window. Example:: >>> for i in running_windows([1, 2, 3, 4, 5], size=3): ... print(i) [1, 2, 3] [2, 3, 4] [3, 4, 5] [4, 5, 1] [5, 1, 2] **中文文档** 循环位移滑窗函数。
[ "Generate", "n", "-", "size", "cycle", "running", "window", "." ]
01f7a6b250853bebfd73de275895bf274325cfc1
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/iterable.py#L240-L269
train
MacHu-GWU/single_file_module-project
sfm/iterable.py
shift_and_trim
def shift_and_trim(array, dist): """Shift and trim unneeded item. :params array: list like iterable object :params dist: int Example:: >>> array = [0, 1, 2] >>> shift_and_trim(array, 0) [0, 1, 2] >>> shift_and_trim(array, 1) [0, 1] >>> shift_and_trim(array, -1) [1, 2] >>> shift_and_trim(array, 3) [] >>> shift_and_trim(array, -3) [] """ length = len(array) if length == 0: return [] if (dist >= length) or (dist <= -length): return [] elif dist < 0: return array[-dist:] elif dist > 0: return array[:-dist] else: return list(array)
python
def shift_and_trim(array, dist): """Shift and trim unneeded item. :params array: list like iterable object :params dist: int Example:: >>> array = [0, 1, 2] >>> shift_and_trim(array, 0) [0, 1, 2] >>> shift_and_trim(array, 1) [0, 1] >>> shift_and_trim(array, -1) [1, 2] >>> shift_and_trim(array, 3) [] >>> shift_and_trim(array, -3) [] """ length = len(array) if length == 0: return [] if (dist >= length) or (dist <= -length): return [] elif dist < 0: return array[-dist:] elif dist > 0: return array[:-dist] else: return list(array)
[ "def", "shift_and_trim", "(", "array", ",", "dist", ")", ":", "length", "=", "len", "(", "array", ")", "if", "length", "==", "0", ":", "return", "[", "]", "if", "(", "dist", ">=", "length", ")", "or", "(", "dist", "<=", "-", "length", ")", ":", "return", "[", "]", "elif", "dist", "<", "0", ":", "return", "array", "[", "-", "dist", ":", "]", "elif", "dist", ">", "0", ":", "return", "array", "[", ":", "-", "dist", "]", "else", ":", "return", "list", "(", "array", ")" ]
Shift and trim unneeded item. :params array: list like iterable object :params dist: int Example:: >>> array = [0, 1, 2] >>> shift_and_trim(array, 0) [0, 1, 2] >>> shift_and_trim(array, 1) [0, 1] >>> shift_and_trim(array, -1) [1, 2] >>> shift_and_trim(array, 3) [] >>> shift_and_trim(array, -3) []
[ "Shift", "and", "trim", "unneeded", "item", "." ]
01f7a6b250853bebfd73de275895bf274325cfc1
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/iterable.py#L351-L386
train
MacHu-GWU/single_file_module-project
sfm/iterable.py
shift_and_pad
def shift_and_pad(array, dist, pad="__null__"): """Shift and pad with item. :params array: list like iterable object :params dist: int :params pad: any value Example:: >>> array = [0, 1, 2] >>> shift_and_pad(array, 0) [0, 1, 2] >>> shift_and_pad(array, 1) [0, 0, 1] >>> shift_and_pad(array, -1) [1, 2, 2] >>> shift_and_pad(array, 3) [0, 0, 0] >>> shift_and_pad(array, -3) [2, 2, 2] >>> shift_and_pad(array, -1, None) [None, 0, 1] """ length = len(array) if length == 0: return [] if pad == "__null__": if dist > 0: padding_item = array[0] elif dist < 0: padding_item = array[-1] else: padding_item = None else: padding_item = pad if abs(dist) >= length: return length * [padding_item, ] elif dist == 0: return list(array) elif dist > 0: return [padding_item, ] * dist + array[:-dist] elif dist < 0: return array[-dist:] + [padding_item, ] * -dist else: # Never get in this logic raise Exception
python
def shift_and_pad(array, dist, pad="__null__"): """Shift and pad with item. :params array: list like iterable object :params dist: int :params pad: any value Example:: >>> array = [0, 1, 2] >>> shift_and_pad(array, 0) [0, 1, 2] >>> shift_and_pad(array, 1) [0, 0, 1] >>> shift_and_pad(array, -1) [1, 2, 2] >>> shift_and_pad(array, 3) [0, 0, 0] >>> shift_and_pad(array, -3) [2, 2, 2] >>> shift_and_pad(array, -1, None) [None, 0, 1] """ length = len(array) if length == 0: return [] if pad == "__null__": if dist > 0: padding_item = array[0] elif dist < 0: padding_item = array[-1] else: padding_item = None else: padding_item = pad if abs(dist) >= length: return length * [padding_item, ] elif dist == 0: return list(array) elif dist > 0: return [padding_item, ] * dist + array[:-dist] elif dist < 0: return array[-dist:] + [padding_item, ] * -dist else: # Never get in this logic raise Exception
[ "def", "shift_and_pad", "(", "array", ",", "dist", ",", "pad", "=", "\"__null__\"", ")", ":", "length", "=", "len", "(", "array", ")", "if", "length", "==", "0", ":", "return", "[", "]", "if", "pad", "==", "\"__null__\"", ":", "if", "dist", ">", "0", ":", "padding_item", "=", "array", "[", "0", "]", "elif", "dist", "<", "0", ":", "padding_item", "=", "array", "[", "-", "1", "]", "else", ":", "padding_item", "=", "None", "else", ":", "padding_item", "=", "pad", "if", "abs", "(", "dist", ")", ">=", "length", ":", "return", "length", "*", "[", "padding_item", ",", "]", "elif", "dist", "==", "0", ":", "return", "list", "(", "array", ")", "elif", "dist", ">", "0", ":", "return", "[", "padding_item", ",", "]", "*", "dist", "+", "array", "[", ":", "-", "dist", "]", "elif", "dist", "<", "0", ":", "return", "array", "[", "-", "dist", ":", "]", "+", "[", "padding_item", ",", "]", "*", "-", "dist", "else", ":", "# Never get in this logic", "raise", "Exception" ]
Shift and pad with item. :params array: list like iterable object :params dist: int :params pad: any value Example:: >>> array = [0, 1, 2] >>> shift_and_pad(array, 0) [0, 1, 2] >>> shift_and_pad(array, 1) [0, 0, 1] >>> shift_and_pad(array, -1) [1, 2, 2] >>> shift_and_pad(array, 3) [0, 0, 0] >>> shift_and_pad(array, -3) [2, 2, 2] >>> shift_and_pad(array, -1, None) [None, 0, 1]
[ "Shift", "and", "pad", "with", "item", "." ]
01f7a6b250853bebfd73de275895bf274325cfc1
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/iterable.py#L389-L440
train