Unnamed: 0
int64 0
10k
| repository_name
stringlengths 7
54
| func_path_in_repository
stringlengths 5
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 100
30.3k
| language
stringclasses 1
value | func_code_string
stringlengths 100
30.3k
| func_code_tokens
stringlengths 138
33.2k
| func_documentation_string
stringlengths 1
15k
| func_documentation_tokens
stringlengths 5
5.14k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
|
---|---|---|---|---|---|---|---|---|---|---|---|
2,900 | mjirik/imcut | imcut/models.py | Model.likelihood | def likelihood(self, x, cl):
"""
X = numpy.random.random([2,3,4])
# we have data 2x3 with fature vector with 4 fatures
Use likelihoodFromImage() function for 3d image input
m.likelihood(X,0)
"""
# sha = x.shape
# xr = x.reshape(-1, sha[-1])
# outsha = sha[:-1]
# from PyQt4.QtCore import pyqtRemoveInputHook
# pyqtRemoveInputHook()
logger.debug("likel " + str(x.shape))
if self.modelparams["type"] == "gmmsame":
px = self.mdl[cl].score_samples(x)
# todo ošetřit více dimenzionální fv
# px = px.reshape(outsha)
elif self.modelparams["type"] == "kernel":
px = self.mdl[cl].score_samples(x)
elif self.modelparams["type"] == "gaussian_kde":
# print x
# np.log because it is likelihood
# @TODO Zde je patrně problém s reshape
# old
# px = np.log(self.mdl[cl](x.reshape(-1)))
# new
px = np.log(self.mdl[cl](x))
# px = px.reshape(outsha)
# from PyQt4.QtCore import pyqtRemoveInputHook
# pyqtRemoveInputHook()
elif self.modelparams["type"] == "dpgmm":
# todo here is a hack
# dpgmm z nějakého důvodu nefunguje pro naše data
# vždy natrénuje jednu složku v blízkosti nuly
# patrně to bude mít něco společného s parametrem alpha
# přenásobí-li se to malým číslem, zázračně to chodí
logger.warning(".score() replaced with .score_samples() . Check it.")
# px = self.mdl[cl].score(x * 0.01)
px = self.mdl[cl].score_samples(x * 0.01)
elif self.modelparams["type"] == "stored":
px = self.mdl[cl].score(x)
return px | python | def likelihood(self, x, cl):
"""
X = numpy.random.random([2,3,4])
# we have data 2x3 with fature vector with 4 fatures
Use likelihoodFromImage() function for 3d image input
m.likelihood(X,0)
"""
# sha = x.shape
# xr = x.reshape(-1, sha[-1])
# outsha = sha[:-1]
# from PyQt4.QtCore import pyqtRemoveInputHook
# pyqtRemoveInputHook()
logger.debug("likel " + str(x.shape))
if self.modelparams["type"] == "gmmsame":
px = self.mdl[cl].score_samples(x)
# todo ošetřit více dimenzionální fv
# px = px.reshape(outsha)
elif self.modelparams["type"] == "kernel":
px = self.mdl[cl].score_samples(x)
elif self.modelparams["type"] == "gaussian_kde":
# print x
# np.log because it is likelihood
# @TODO Zde je patrně problém s reshape
# old
# px = np.log(self.mdl[cl](x.reshape(-1)))
# new
px = np.log(self.mdl[cl](x))
# px = px.reshape(outsha)
# from PyQt4.QtCore import pyqtRemoveInputHook
# pyqtRemoveInputHook()
elif self.modelparams["type"] == "dpgmm":
# todo here is a hack
# dpgmm z nějakého důvodu nefunguje pro naše data
# vždy natrénuje jednu složku v blízkosti nuly
# patrně to bude mít něco společného s parametrem alpha
# přenásobí-li se to malým číslem, zázračně to chodí
logger.warning(".score() replaced with .score_samples() . Check it.")
# px = self.mdl[cl].score(x * 0.01)
px = self.mdl[cl].score_samples(x * 0.01)
elif self.modelparams["type"] == "stored":
px = self.mdl[cl].score(x)
return px | ['def', 'likelihood', '(', 'self', ',', 'x', ',', 'cl', ')', ':', '# sha = x.shape', '# xr = x.reshape(-1, sha[-1])', '# outsha = sha[:-1]', '# from PyQt4.QtCore import pyqtRemoveInputHook', '# pyqtRemoveInputHook()', 'logger', '.', 'debug', '(', '"likel "', '+', 'str', '(', 'x', '.', 'shape', ')', ')', 'if', 'self', '.', 'modelparams', '[', '"type"', ']', '==', '"gmmsame"', ':', 'px', '=', 'self', '.', 'mdl', '[', 'cl', ']', '.', 'score_samples', '(', 'x', ')', '# todo ošetřit více dimenzionální fv', '# px = px.reshape(outsha)', 'elif', 'self', '.', 'modelparams', '[', '"type"', ']', '==', '"kernel"', ':', 'px', '=', 'self', '.', 'mdl', '[', 'cl', ']', '.', 'score_samples', '(', 'x', ')', 'elif', 'self', '.', 'modelparams', '[', '"type"', ']', '==', '"gaussian_kde"', ':', '# print x', '# np.log because it is likelihood', '# @TODO Zde je patrně problém s reshape', '# old', '# px = np.log(self.mdl[cl](x.reshape(-1)))', '# new', 'px', '=', 'np', '.', 'log', '(', 'self', '.', 'mdl', '[', 'cl', ']', '(', 'x', ')', ')', '# px = px.reshape(outsha)', '# from PyQt4.QtCore import pyqtRemoveInputHook', '# pyqtRemoveInputHook()', 'elif', 'self', '.', 'modelparams', '[', '"type"', ']', '==', '"dpgmm"', ':', '# todo here is a hack', '# dpgmm z nějakého důvodu nefunguje pro naše data', '# vždy natrénuje jednu složku v blízkosti nuly', '# patrně to bude mít něco společného s parametrem alpha', '# přenásobí-li se to malým číslem, zázračně to chodí', 'logger', '.', 'warning', '(', '".score() replaced with .score_samples() . Check it."', ')', '# px = self.mdl[cl].score(x * 0.01)', 'px', '=', 'self', '.', 'mdl', '[', 'cl', ']', '.', 'score_samples', '(', 'x', '*', '0.01', ')', 'elif', 'self', '.', 'modelparams', '[', '"type"', ']', '==', '"stored"', ':', 'px', '=', 'self', '.', 'mdl', '[', 'cl', ']', '.', 'score', '(', 'x', ')', 'return', 'px'] | X = numpy.random.random([2,3,4])
# we have data 2x3 with fature vector with 4 fatures
Use likelihoodFromImage() function for 3d image input
m.likelihood(X,0) | ['X', '=', 'numpy', '.', 'random', '.', 'random', '(', '[', '2', '3', '4', ']', ')', '#', 'we', 'have', 'data', '2x3', 'with', 'fature', 'vector', 'with', '4', 'fatures'] | train | https://github.com/mjirik/imcut/blob/1b38e7cd18a7a38fe683c1cabe1222fe5fa03aa3/imcut/models.py#L397-L442 |
2,901 | awslabs/mxboard | python/mxboard/writer.py | SummaryWriter.export_scalars | def export_scalars(self, path):
"""Exports to the given path an ASCII file containing all the scalars written
so far by this instance, with the following format:
{writer_id : [[timestamp, step, value], ...], ...}
"""
if os.path.exists(path) and os.path.isfile(path):
logging.warning('%s already exists and will be overwritten by scalar dict', path)
with open(path, "w") as f:
json.dump(self._scalar_dict, f) | python | def export_scalars(self, path):
"""Exports to the given path an ASCII file containing all the scalars written
so far by this instance, with the following format:
{writer_id : [[timestamp, step, value], ...], ...}
"""
if os.path.exists(path) and os.path.isfile(path):
logging.warning('%s already exists and will be overwritten by scalar dict', path)
with open(path, "w") as f:
json.dump(self._scalar_dict, f) | ['def', 'export_scalars', '(', 'self', ',', 'path', ')', ':', 'if', 'os', '.', 'path', '.', 'exists', '(', 'path', ')', 'and', 'os', '.', 'path', '.', 'isfile', '(', 'path', ')', ':', 'logging', '.', 'warning', '(', "'%s already exists and will be overwritten by scalar dict'", ',', 'path', ')', 'with', 'open', '(', 'path', ',', '"w"', ')', 'as', 'f', ':', 'json', '.', 'dump', '(', 'self', '.', '_scalar_dict', ',', 'f', ')'] | Exports to the given path an ASCII file containing all the scalars written
so far by this instance, with the following format:
{writer_id : [[timestamp, step, value], ...], ...} | ['Exports', 'to', 'the', 'given', 'path', 'an', 'ASCII', 'file', 'containing', 'all', 'the', 'scalars', 'written', 'so', 'far', 'by', 'this', 'instance', 'with', 'the', 'following', 'format', ':', '{', 'writer_id', ':', '[[', 'timestamp', 'step', 'value', ']', '...', ']', '...', '}'] | train | https://github.com/awslabs/mxboard/blob/36057ff0f05325c9dc2fe046521325bf9d563a88/python/mxboard/writer.py#L358-L366 |
2,902 | vertexproject/synapse | synapse/lib/syntax.py | Parser.stormcmd | def stormcmd(self):
'''
A storm sub-query aware command line splitter.
( not for storm commands, but for commands which may take storm )
'''
argv = []
while self.more():
self.ignore(whitespace)
if self.nextstr('{'):
self.offs += 1
start = self.offs
self.query()
argv.append('{' + self.text[start:self.offs] + '}')
self.nextmust('}')
continue
argv.append(self.cmdvalu(until=whitespace))
return argv | python | def stormcmd(self):
'''
A storm sub-query aware command line splitter.
( not for storm commands, but for commands which may take storm )
'''
argv = []
while self.more():
self.ignore(whitespace)
if self.nextstr('{'):
self.offs += 1
start = self.offs
self.query()
argv.append('{' + self.text[start:self.offs] + '}')
self.nextmust('}')
continue
argv.append(self.cmdvalu(until=whitespace))
return argv | ['def', 'stormcmd', '(', 'self', ')', ':', 'argv', '=', '[', ']', 'while', 'self', '.', 'more', '(', ')', ':', 'self', '.', 'ignore', '(', 'whitespace', ')', 'if', 'self', '.', 'nextstr', '(', "'{'", ')', ':', 'self', '.', 'offs', '+=', '1', 'start', '=', 'self', '.', 'offs', 'self', '.', 'query', '(', ')', 'argv', '.', 'append', '(', "'{'", '+', 'self', '.', 'text', '[', 'start', ':', 'self', '.', 'offs', ']', '+', "'}'", ')', 'self', '.', 'nextmust', '(', "'}'", ')', 'continue', 'argv', '.', 'append', '(', 'self', '.', 'cmdvalu', '(', 'until', '=', 'whitespace', ')', ')', 'return', 'argv'] | A storm sub-query aware command line splitter.
( not for storm commands, but for commands which may take storm ) | ['A', 'storm', 'sub', '-', 'query', 'aware', 'command', 'line', 'splitter', '.', '(', 'not', 'for', 'storm', 'commands', 'but', 'for', 'commands', 'which', 'may', 'take', 'storm', ')'] | train | https://github.com/vertexproject/synapse/blob/22e67c5a8f6d7caddbcf34b39ab1bd2d6c4a6e0b/synapse/lib/syntax.py#L443-L460 |
2,903 | vladcalin/gemstone | gemstone/core/decorators.py | event_handler | def event_handler(event_name):
"""
Decorator for designating a handler for an event type. ``event_name`` must be a string
representing the name of the event type.
The decorated function must accept a parameter: the body of the received event,
which will be a Python object that can be encoded as a JSON (dict, list, str, int,
bool, float or None)
:param event_name: The name of the event that will be handled. Only one handler per
event name is supported by the same microservice.
"""
def wrapper(func):
func._event_handler = True
func._handled_event = event_name
return func
return wrapper | python | def event_handler(event_name):
"""
Decorator for designating a handler for an event type. ``event_name`` must be a string
representing the name of the event type.
The decorated function must accept a parameter: the body of the received event,
which will be a Python object that can be encoded as a JSON (dict, list, str, int,
bool, float or None)
:param event_name: The name of the event that will be handled. Only one handler per
event name is supported by the same microservice.
"""
def wrapper(func):
func._event_handler = True
func._handled_event = event_name
return func
return wrapper | ['def', 'event_handler', '(', 'event_name', ')', ':', 'def', 'wrapper', '(', 'func', ')', ':', 'func', '.', '_event_handler', '=', 'True', 'func', '.', '_handled_event', '=', 'event_name', 'return', 'func', 'return', 'wrapper'] | Decorator for designating a handler for an event type. ``event_name`` must be a string
representing the name of the event type.
The decorated function must accept a parameter: the body of the received event,
which will be a Python object that can be encoded as a JSON (dict, list, str, int,
bool, float or None)
:param event_name: The name of the event that will be handled. Only one handler per
event name is supported by the same microservice. | ['Decorator', 'for', 'designating', 'a', 'handler', 'for', 'an', 'event', 'type', '.', 'event_name', 'must', 'be', 'a', 'string', 'representing', 'the', 'name', 'of', 'the', 'event', 'type', '.'] | train | https://github.com/vladcalin/gemstone/blob/325a49d17621b9d45ffd2b5eca6f0de284de8ba4/gemstone/core/decorators.py#L14-L32 |
2,904 | bwhite/hadoopy | hadoopy/thirdparty/pyinstaller/PyInstaller/lib/altgraph/Graph.py | Graph.restore_edge | def restore_edge(self, edge):
"""
Restores a previously hidden edge back into the graph.
"""
try:
head_id, tail_id, data = self.hidden_edges[edge]
self.nodes[tail_id][0].append(edge)
self.nodes[head_id][1].append(edge)
self.edges[edge] = head_id, tail_id, data
del self.hidden_edges[edge]
except KeyError:
raise GraphError('Invalid edge %s' % edge) | python | def restore_edge(self, edge):
"""
Restores a previously hidden edge back into the graph.
"""
try:
head_id, tail_id, data = self.hidden_edges[edge]
self.nodes[tail_id][0].append(edge)
self.nodes[head_id][1].append(edge)
self.edges[edge] = head_id, tail_id, data
del self.hidden_edges[edge]
except KeyError:
raise GraphError('Invalid edge %s' % edge) | ['def', 'restore_edge', '(', 'self', ',', 'edge', ')', ':', 'try', ':', 'head_id', ',', 'tail_id', ',', 'data', '=', 'self', '.', 'hidden_edges', '[', 'edge', ']', 'self', '.', 'nodes', '[', 'tail_id', ']', '[', '0', ']', '.', 'append', '(', 'edge', ')', 'self', '.', 'nodes', '[', 'head_id', ']', '[', '1', ']', '.', 'append', '(', 'edge', ')', 'self', '.', 'edges', '[', 'edge', ']', '=', 'head_id', ',', 'tail_id', ',', 'data', 'del', 'self', '.', 'hidden_edges', '[', 'edge', ']', 'except', 'KeyError', ':', 'raise', 'GraphError', '(', "'Invalid edge %s'", '%', 'edge', ')'] | Restores a previously hidden edge back into the graph. | ['Restores', 'a', 'previously', 'hidden', 'edge', 'back', 'into', 'the', 'graph', '.'] | train | https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/thirdparty/pyinstaller/PyInstaller/lib/altgraph/Graph.py#L161-L172 |
2,905 | SUNCAT-Center/CatHub | cathub/cathubsqlite.py | CathubSQLite.check_reaction_on_surface | def check_reaction_on_surface(self, chemical_composition, reactants,
products):
"""
Check if entry with same surface and reaction is allready written
to database file
Parameters
----------
chemcial_composition: str
reactants: dict
products: dict
Returns id or None
"""
con = self.connection or self._connect()
self._initialize(con)
cur = con.cursor()
statement = """SELECT reaction.id FROM reaction WHERE
reaction.chemical_composition='{}' and reaction.reactants='{}'
and reaction.products='{}';""".format(chemical_composition,
json.dumps(reactants),
json.dumps(products))
cur.execute(statement)
rows = cur.fetchall()
if len(rows) > 0:
id = rows[0][0]
else:
id = None
return id | python | def check_reaction_on_surface(self, chemical_composition, reactants,
products):
"""
Check if entry with same surface and reaction is allready written
to database file
Parameters
----------
chemcial_composition: str
reactants: dict
products: dict
Returns id or None
"""
con = self.connection or self._connect()
self._initialize(con)
cur = con.cursor()
statement = """SELECT reaction.id FROM reaction WHERE
reaction.chemical_composition='{}' and reaction.reactants='{}'
and reaction.products='{}';""".format(chemical_composition,
json.dumps(reactants),
json.dumps(products))
cur.execute(statement)
rows = cur.fetchall()
if len(rows) > 0:
id = rows[0][0]
else:
id = None
return id | ['def', 'check_reaction_on_surface', '(', 'self', ',', 'chemical_composition', ',', 'reactants', ',', 'products', ')', ':', 'con', '=', 'self', '.', 'connection', 'or', 'self', '.', '_connect', '(', ')', 'self', '.', '_initialize', '(', 'con', ')', 'cur', '=', 'con', '.', 'cursor', '(', ')', 'statement', '=', '"""SELECT reaction.id FROM reaction WHERE\n reaction.chemical_composition=\'{}\' and reaction.reactants=\'{}\'\n and reaction.products=\'{}\';"""', '.', 'format', '(', 'chemical_composition', ',', 'json', '.', 'dumps', '(', 'reactants', ')', ',', 'json', '.', 'dumps', '(', 'products', ')', ')', 'cur', '.', 'execute', '(', 'statement', ')', 'rows', '=', 'cur', '.', 'fetchall', '(', ')', 'if', 'len', '(', 'rows', ')', '>', '0', ':', 'id', '=', 'rows', '[', '0', ']', '[', '0', ']', 'else', ':', 'id', '=', 'None', 'return', 'id'] | Check if entry with same surface and reaction is allready written
to database file
Parameters
----------
chemcial_composition: str
reactants: dict
products: dict
Returns id or None | ['Check', 'if', 'entry', 'with', 'same', 'surface', 'and', 'reaction', 'is', 'allready', 'written', 'to', 'database', 'file'] | train | https://github.com/SUNCAT-Center/CatHub/blob/324625d1d8e740673f139658b2de4c9e1059739e/cathub/cathubsqlite.py#L400-L429 |
2,906 | materialsproject/pymatgen | pymatgen/io/phonopy.py | get_ph_bs_symm_line_from_dict | def get_ph_bs_symm_line_from_dict(bands_dict, has_nac=False, labels_dict=None):
"""
Creates a pymatgen PhononBandStructure object from the dictionary
extracted by the band.yaml file produced by phonopy. The labels
will be extracted from the dictionary, if present. If the 'eigenvector'
key is found the eigendisplacements will be calculated according to the
formula::
exp(2*pi*i*(frac_coords \\dot q) / sqrt(mass) * v
and added to the object.
Args:
bands_dict: the dictionary extracted from the band.yaml file
has_nac: True if the data have been obtained with the option
--nac option. Default False.
labels_dict: dict that links a qpoint in frac coords to a label.
Its value will replace the data contained in the band.yaml.
"""
structure = get_structure_from_dict(bands_dict)
qpts = []
frequencies = []
eigendisplacements = []
phonopy_labels_dict = {}
for p in bands_dict['phonon']:
q = p['q-position']
qpts.append(q)
bands = []
eig_q = []
for b in p['band']:
bands.append(b['frequency'])
if 'eigenvector' in b:
eig_b = []
for i, eig_a in enumerate(b['eigenvector']):
v = np.zeros(3, np.complex)
for x in range(3):
v[x] = eig_a[x][0] + eig_a[x][1]*1j
eig_b.append(eigvec_to_eigdispl(
v, q, structure[i].frac_coords,
structure.site_properties['phonopy_masses'][i]))
eig_q.append(eig_b)
frequencies.append(bands)
if 'label' in p:
phonopy_labels_dict[p['label']] = p['q-position']
if eig_q:
eigendisplacements.append(eig_q)
qpts = np.array(qpts)
# transpose to match the convention in PhononBandStructure
frequencies = np.transpose(frequencies)
if eigendisplacements:
eigendisplacements = np.transpose(eigendisplacements, (1, 0, 2, 3))
rec_latt = Lattice(bands_dict['reciprocal_lattice'])
labels_dict = labels_dict or phonopy_labels_dict
ph_bs = PhononBandStructureSymmLine(
qpts, frequencies, rec_latt, has_nac=has_nac, labels_dict=labels_dict,
structure=structure, eigendisplacements=eigendisplacements)
return ph_bs | python | def get_ph_bs_symm_line_from_dict(bands_dict, has_nac=False, labels_dict=None):
"""
Creates a pymatgen PhononBandStructure object from the dictionary
extracted by the band.yaml file produced by phonopy. The labels
will be extracted from the dictionary, if present. If the 'eigenvector'
key is found the eigendisplacements will be calculated according to the
formula::
exp(2*pi*i*(frac_coords \\dot q) / sqrt(mass) * v
and added to the object.
Args:
bands_dict: the dictionary extracted from the band.yaml file
has_nac: True if the data have been obtained with the option
--nac option. Default False.
labels_dict: dict that links a qpoint in frac coords to a label.
Its value will replace the data contained in the band.yaml.
"""
structure = get_structure_from_dict(bands_dict)
qpts = []
frequencies = []
eigendisplacements = []
phonopy_labels_dict = {}
for p in bands_dict['phonon']:
q = p['q-position']
qpts.append(q)
bands = []
eig_q = []
for b in p['band']:
bands.append(b['frequency'])
if 'eigenvector' in b:
eig_b = []
for i, eig_a in enumerate(b['eigenvector']):
v = np.zeros(3, np.complex)
for x in range(3):
v[x] = eig_a[x][0] + eig_a[x][1]*1j
eig_b.append(eigvec_to_eigdispl(
v, q, structure[i].frac_coords,
structure.site_properties['phonopy_masses'][i]))
eig_q.append(eig_b)
frequencies.append(bands)
if 'label' in p:
phonopy_labels_dict[p['label']] = p['q-position']
if eig_q:
eigendisplacements.append(eig_q)
qpts = np.array(qpts)
# transpose to match the convention in PhononBandStructure
frequencies = np.transpose(frequencies)
if eigendisplacements:
eigendisplacements = np.transpose(eigendisplacements, (1, 0, 2, 3))
rec_latt = Lattice(bands_dict['reciprocal_lattice'])
labels_dict = labels_dict or phonopy_labels_dict
ph_bs = PhononBandStructureSymmLine(
qpts, frequencies, rec_latt, has_nac=has_nac, labels_dict=labels_dict,
structure=structure, eigendisplacements=eigendisplacements)
return ph_bs | ['def', 'get_ph_bs_symm_line_from_dict', '(', 'bands_dict', ',', 'has_nac', '=', 'False', ',', 'labels_dict', '=', 'None', ')', ':', 'structure', '=', 'get_structure_from_dict', '(', 'bands_dict', ')', 'qpts', '=', '[', ']', 'frequencies', '=', '[', ']', 'eigendisplacements', '=', '[', ']', 'phonopy_labels_dict', '=', '{', '}', 'for', 'p', 'in', 'bands_dict', '[', "'phonon'", ']', ':', 'q', '=', 'p', '[', "'q-position'", ']', 'qpts', '.', 'append', '(', 'q', ')', 'bands', '=', '[', ']', 'eig_q', '=', '[', ']', 'for', 'b', 'in', 'p', '[', "'band'", ']', ':', 'bands', '.', 'append', '(', 'b', '[', "'frequency'", ']', ')', 'if', "'eigenvector'", 'in', 'b', ':', 'eig_b', '=', '[', ']', 'for', 'i', ',', 'eig_a', 'in', 'enumerate', '(', 'b', '[', "'eigenvector'", ']', ')', ':', 'v', '=', 'np', '.', 'zeros', '(', '3', ',', 'np', '.', 'complex', ')', 'for', 'x', 'in', 'range', '(', '3', ')', ':', 'v', '[', 'x', ']', '=', 'eig_a', '[', 'x', ']', '[', '0', ']', '+', 'eig_a', '[', 'x', ']', '[', '1', ']', '*', '1j', 'eig_b', '.', 'append', '(', 'eigvec_to_eigdispl', '(', 'v', ',', 'q', ',', 'structure', '[', 'i', ']', '.', 'frac_coords', ',', 'structure', '.', 'site_properties', '[', "'phonopy_masses'", ']', '[', 'i', ']', ')', ')', 'eig_q', '.', 'append', '(', 'eig_b', ')', 'frequencies', '.', 'append', '(', 'bands', ')', 'if', "'label'", 'in', 'p', ':', 'phonopy_labels_dict', '[', 'p', '[', "'label'", ']', ']', '=', 'p', '[', "'q-position'", ']', 'if', 'eig_q', ':', 'eigendisplacements', '.', 'append', '(', 'eig_q', ')', 'qpts', '=', 'np', '.', 'array', '(', 'qpts', ')', '# transpose to match the convention in PhononBandStructure', 'frequencies', '=', 'np', '.', 'transpose', '(', 'frequencies', ')', 'if', 'eigendisplacements', ':', 'eigendisplacements', '=', 'np', '.', 'transpose', '(', 'eigendisplacements', ',', '(', '1', ',', '0', ',', '2', ',', '3', ')', ')', 'rec_latt', '=', 'Lattice', '(', 'bands_dict', '[', "'reciprocal_lattice'", ']', ')', 'labels_dict', '=', 'labels_dict', 'or', 'phonopy_labels_dict', 'ph_bs', '=', 'PhononBandStructureSymmLine', '(', 'qpts', ',', 'frequencies', ',', 'rec_latt', ',', 'has_nac', '=', 'has_nac', ',', 'labels_dict', '=', 'labels_dict', ',', 'structure', '=', 'structure', ',', 'eigendisplacements', '=', 'eigendisplacements', ')', 'return', 'ph_bs'] | Creates a pymatgen PhononBandStructure object from the dictionary
extracted by the band.yaml file produced by phonopy. The labels
will be extracted from the dictionary, if present. If the 'eigenvector'
key is found the eigendisplacements will be calculated according to the
formula::
exp(2*pi*i*(frac_coords \\dot q) / sqrt(mass) * v
and added to the object.
Args:
bands_dict: the dictionary extracted from the band.yaml file
has_nac: True if the data have been obtained with the option
--nac option. Default False.
labels_dict: dict that links a qpoint in frac coords to a label.
Its value will replace the data contained in the band.yaml. | ['Creates', 'a', 'pymatgen', 'PhononBandStructure', 'object', 'from', 'the', 'dictionary', 'extracted', 'by', 'the', 'band', '.', 'yaml', 'file', 'produced', 'by', 'phonopy', '.', 'The', 'labels', 'will', 'be', 'extracted', 'from', 'the', 'dictionary', 'if', 'present', '.', 'If', 'the', 'eigenvector', 'key', 'is', 'found', 'the', 'eigendisplacements', 'will', 'be', 'calculated', 'according', 'to', 'the', 'formula', '::', 'exp', '(', '2', '*', 'pi', '*', 'i', '*', '(', 'frac_coords', '\\\\', 'dot', 'q', ')', '/', 'sqrt', '(', 'mass', ')', '*', 'v', 'and', 'added', 'to', 'the', 'object', '.'] | train | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/phonopy.py#L108-L171 |
2,907 | StackStorm/pybind | pybind/slxos/v17s_1_02/qos_mpls/map_/traffic_class_exp/__init__.py | traffic_class_exp._set_priority | def _set_priority(self, v, load=False):
"""
Setter method for priority, mapped from YANG variable /qos_mpls/map/traffic_class_exp/priority (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_priority is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_priority() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("priority_in_values",priority.priority, yang_name="priority", rest_name="traffic-class", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='priority-in-values', extensions={u'tailf-common': {u'info': u'Map Traffic class value to Exp value', u'cli-suppress-mode': None, u'cli-incomplete-no': None, u'cli-suppress-list-no': None, u'callpoint': u'QosMplsTrafficClassExpCallpoint', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'alt-name': u'traffic-class'}}), is_container='list', yang_name="priority", rest_name="traffic-class", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Map Traffic class value to Exp value', u'cli-suppress-mode': None, u'cli-incomplete-no': None, u'cli-suppress-list-no': None, u'callpoint': u'QosMplsTrafficClassExpCallpoint', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'alt-name': u'traffic-class'}}, namespace='urn:brocade.com:mgmt:brocade-qos-mpls', defining_module='brocade-qos-mpls', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """priority must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("priority_in_values",priority.priority, yang_name="priority", rest_name="traffic-class", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='priority-in-values', extensions={u'tailf-common': {u'info': u'Map Traffic class value to Exp value', u'cli-suppress-mode': None, u'cli-incomplete-no': None, u'cli-suppress-list-no': None, u'callpoint': u'QosMplsTrafficClassExpCallpoint', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'alt-name': u'traffic-class'}}), is_container='list', yang_name="priority", rest_name="traffic-class", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Map Traffic class value to Exp value', u'cli-suppress-mode': None, u'cli-incomplete-no': None, u'cli-suppress-list-no': None, u'callpoint': u'QosMplsTrafficClassExpCallpoint', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'alt-name': u'traffic-class'}}, namespace='urn:brocade.com:mgmt:brocade-qos-mpls', defining_module='brocade-qos-mpls', yang_type='list', is_config=True)""",
})
self.__priority = t
if hasattr(self, '_set'):
self._set() | python | def _set_priority(self, v, load=False):
"""
Setter method for priority, mapped from YANG variable /qos_mpls/map/traffic_class_exp/priority (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_priority is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_priority() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("priority_in_values",priority.priority, yang_name="priority", rest_name="traffic-class", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='priority-in-values', extensions={u'tailf-common': {u'info': u'Map Traffic class value to Exp value', u'cli-suppress-mode': None, u'cli-incomplete-no': None, u'cli-suppress-list-no': None, u'callpoint': u'QosMplsTrafficClassExpCallpoint', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'alt-name': u'traffic-class'}}), is_container='list', yang_name="priority", rest_name="traffic-class", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Map Traffic class value to Exp value', u'cli-suppress-mode': None, u'cli-incomplete-no': None, u'cli-suppress-list-no': None, u'callpoint': u'QosMplsTrafficClassExpCallpoint', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'alt-name': u'traffic-class'}}, namespace='urn:brocade.com:mgmt:brocade-qos-mpls', defining_module='brocade-qos-mpls', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """priority must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("priority_in_values",priority.priority, yang_name="priority", rest_name="traffic-class", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='priority-in-values', extensions={u'tailf-common': {u'info': u'Map Traffic class value to Exp value', u'cli-suppress-mode': None, u'cli-incomplete-no': None, u'cli-suppress-list-no': None, u'callpoint': u'QosMplsTrafficClassExpCallpoint', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'alt-name': u'traffic-class'}}), is_container='list', yang_name="priority", rest_name="traffic-class", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Map Traffic class value to Exp value', u'cli-suppress-mode': None, u'cli-incomplete-no': None, u'cli-suppress-list-no': None, u'callpoint': u'QosMplsTrafficClassExpCallpoint', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'alt-name': u'traffic-class'}}, namespace='urn:brocade.com:mgmt:brocade-qos-mpls', defining_module='brocade-qos-mpls', yang_type='list', is_config=True)""",
})
self.__priority = t
if hasattr(self, '_set'):
self._set() | ['def', '_set_priority', '(', 'self', ',', 'v', ',', 'load', '=', 'False', ')', ':', 'if', 'hasattr', '(', 'v', ',', '"_utype"', ')', ':', 'v', '=', 'v', '.', '_utype', '(', 'v', ')', 'try', ':', 't', '=', 'YANGDynClass', '(', 'v', ',', 'base', '=', 'YANGListType', '(', '"priority_in_values"', ',', 'priority', '.', 'priority', ',', 'yang_name', '=', '"priority"', ',', 'rest_name', '=', '"traffic-class"', ',', 'parent', '=', 'self', ',', 'is_container', '=', "'list'", ',', 'user_ordered', '=', 'False', ',', 'path_helper', '=', 'self', '.', '_path_helper', ',', 'yang_keys', '=', "'priority-in-values'", ',', 'extensions', '=', '{', "u'tailf-common'", ':', '{', "u'info'", ':', "u'Map Traffic class value to Exp value'", ',', "u'cli-suppress-mode'", ':', 'None', ',', "u'cli-incomplete-no'", ':', 'None', ',', "u'cli-suppress-list-no'", ':', 'None', ',', "u'callpoint'", ':', "u'QosMplsTrafficClassExpCallpoint'", ',', "u'cli-compact-syntax'", ':', 'None', ',', "u'cli-sequence-commands'", ':', 'None', ',', "u'cli-suppress-key-abbreviation'", ':', 'None', ',', "u'cli-incomplete-command'", ':', 'None', ',', "u'alt-name'", ':', "u'traffic-class'", '}', '}', ')', ',', 'is_container', '=', "'list'", ',', 'yang_name', '=', '"priority"', ',', 'rest_name', '=', '"traffic-class"', ',', 'parent', '=', 'self', ',', 'path_helper', '=', 'self', '.', '_path_helper', ',', 'extmethods', '=', 'self', '.', '_extmethods', ',', 'register_paths', '=', 'True', ',', 'extensions', '=', '{', "u'tailf-common'", ':', '{', "u'info'", ':', "u'Map Traffic class value to Exp value'", ',', "u'cli-suppress-mode'", ':', 'None', ',', "u'cli-incomplete-no'", ':', 'None', ',', "u'cli-suppress-list-no'", ':', 'None', ',', "u'callpoint'", ':', "u'QosMplsTrafficClassExpCallpoint'", ',', "u'cli-compact-syntax'", ':', 'None', ',', "u'cli-sequence-commands'", ':', 'None', ',', "u'cli-suppress-key-abbreviation'", ':', 'None', ',', "u'cli-incomplete-command'", ':', 'None', ',', "u'alt-name'", ':', "u'traffic-class'", '}', '}', ',', 'namespace', '=', "'urn:brocade.com:mgmt:brocade-qos-mpls'", ',', 'defining_module', '=', "'brocade-qos-mpls'", ',', 'yang_type', '=', "'list'", ',', 'is_config', '=', 'True', ')', 'except', '(', 'TypeError', ',', 'ValueError', ')', ':', 'raise', 'ValueError', '(', '{', "'error-string'", ':', '"""priority must be of a type compatible with list"""', ',', "'defined-type'", ':', '"list"', ',', "'generated-type'", ':', '"""YANGDynClass(base=YANGListType("priority_in_values",priority.priority, yang_name="priority", rest_name="traffic-class", parent=self, is_container=\'list\', user_ordered=False, path_helper=self._path_helper, yang_keys=\'priority-in-values\', extensions={u\'tailf-common\': {u\'info\': u\'Map Traffic class value to Exp value\', u\'cli-suppress-mode\': None, u\'cli-incomplete-no\': None, u\'cli-suppress-list-no\': None, u\'callpoint\': u\'QosMplsTrafficClassExpCallpoint\', u\'cli-compact-syntax\': None, u\'cli-sequence-commands\': None, u\'cli-suppress-key-abbreviation\': None, u\'cli-incomplete-command\': None, u\'alt-name\': u\'traffic-class\'}}), is_container=\'list\', yang_name="priority", rest_name="traffic-class", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'info\': u\'Map Traffic class value to Exp value\', u\'cli-suppress-mode\': None, u\'cli-incomplete-no\': None, u\'cli-suppress-list-no\': None, u\'callpoint\': u\'QosMplsTrafficClassExpCallpoint\', u\'cli-compact-syntax\': None, u\'cli-sequence-commands\': None, u\'cli-suppress-key-abbreviation\': None, u\'cli-incomplete-command\': None, u\'alt-name\': u\'traffic-class\'}}, namespace=\'urn:brocade.com:mgmt:brocade-qos-mpls\', defining_module=\'brocade-qos-mpls\', yang_type=\'list\', is_config=True)"""', ',', '}', ')', 'self', '.', '__priority', '=', 't', 'if', 'hasattr', '(', 'self', ',', "'_set'", ')', ':', 'self', '.', '_set', '(', ')'] | Setter method for priority, mapped from YANG variable /qos_mpls/map/traffic_class_exp/priority (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_priority is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_priority() directly. | ['Setter', 'method', 'for', 'priority', 'mapped', 'from', 'YANG', 'variable', '/', 'qos_mpls', '/', 'map', '/', 'traffic_class_exp', '/', 'priority', '(', 'list', ')', 'If', 'this', 'variable', 'is', 'read', '-', 'only', '(', 'config', ':', 'false', ')', 'in', 'the', 'source', 'YANG', 'file', 'then', '_set_priority', 'is', 'considered', 'as', 'a', 'private', 'method', '.', 'Backends', 'looking', 'to', 'populate', 'this', 'variable', 'should', 'do', 'so', 'via', 'calling', 'thisObj', '.', '_set_priority', '()', 'directly', '.'] | train | https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/qos_mpls/map_/traffic_class_exp/__init__.py#L131-L152 |
2,908 | koordinates/python-client | koordinates/exports.py | Export.download | def download(self, path, progress_callback=None, chunk_size=1024**2):
"""
Download the export archive.
.. warning::
If you pass this function an open file-like object as the ``path``
parameter, the function will not close that file for you.
If a ``path`` parameter is a directory, this function will use the
Export name to determine the name of the file (returned). If the
calculated download file path already exists, this function will raise
a DownloadError.
You can also specify the filename as a string. This will be passed to
the built-in :func:`open` and we will read the content into the file.
Instead, if you want to manage the file object yourself, you need to
provide either a :class:`io.BytesIO` object or a file opened with the
`'b'` flag. See the two examples below for more details.
:param path: Either a string with the path to the location
to save the response content, or a file-like object expecting bytes.
:param function progress_callback: An optional callback
function which receives upload progress notifications. The function should take two
arguments: the number of bytes recieved, and the total number of bytes to recieve.
:param int chunk_size: Chunk size in bytes for streaming large downloads and progress reporting. 1MB by default
:returns The name of the automatic filename that would be used.
:rtype: str
"""
if not self.download_url or self.state != 'complete':
raise DownloadError("Download not available")
# ignore parsing the Content-Disposition header, since we know the name
download_filename = "{}.zip".format(self.name)
fd = None
if isinstance(getattr(path, 'write', None), collections.Callable):
# already open file-like object
fd = path
elif os.path.isdir(path):
# directory to download to, using the export name
path = os.path.join(path, download_filename)
# do not allow overwriting
if os.path.exists(path):
raise DownloadError("Download file already exists: %s" % path)
elif path:
# fully qualified file path
# allow overwriting
pass
elif not path:
raise DownloadError("Empty download file path")
with contextlib.ExitStack() as stack:
if not fd:
fd = open(path, 'wb')
# only close a file we open
stack.callback(fd.close)
r = self._manager.client.request('GET', self.download_url, stream=True)
stack.callback(r.close)
bytes_written = 0
try:
bytes_total = int(r.headers.get('content-length', None))
except TypeError:
bytes_total = None
if progress_callback:
# initial callback (0%)
progress_callback(bytes_written, bytes_total)
for chunk in r.iter_content(chunk_size=chunk_size):
fd.write(chunk)
bytes_written += len(chunk)
if progress_callback:
progress_callback(bytes_written, bytes_total)
return download_filename | python | def download(self, path, progress_callback=None, chunk_size=1024**2):
"""
Download the export archive.
.. warning::
If you pass this function an open file-like object as the ``path``
parameter, the function will not close that file for you.
If a ``path`` parameter is a directory, this function will use the
Export name to determine the name of the file (returned). If the
calculated download file path already exists, this function will raise
a DownloadError.
You can also specify the filename as a string. This will be passed to
the built-in :func:`open` and we will read the content into the file.
Instead, if you want to manage the file object yourself, you need to
provide either a :class:`io.BytesIO` object or a file opened with the
`'b'` flag. See the two examples below for more details.
:param path: Either a string with the path to the location
to save the response content, or a file-like object expecting bytes.
:param function progress_callback: An optional callback
function which receives upload progress notifications. The function should take two
arguments: the number of bytes recieved, and the total number of bytes to recieve.
:param int chunk_size: Chunk size in bytes for streaming large downloads and progress reporting. 1MB by default
:returns The name of the automatic filename that would be used.
:rtype: str
"""
if not self.download_url or self.state != 'complete':
raise DownloadError("Download not available")
# ignore parsing the Content-Disposition header, since we know the name
download_filename = "{}.zip".format(self.name)
fd = None
if isinstance(getattr(path, 'write', None), collections.Callable):
# already open file-like object
fd = path
elif os.path.isdir(path):
# directory to download to, using the export name
path = os.path.join(path, download_filename)
# do not allow overwriting
if os.path.exists(path):
raise DownloadError("Download file already exists: %s" % path)
elif path:
# fully qualified file path
# allow overwriting
pass
elif not path:
raise DownloadError("Empty download file path")
with contextlib.ExitStack() as stack:
if not fd:
fd = open(path, 'wb')
# only close a file we open
stack.callback(fd.close)
r = self._manager.client.request('GET', self.download_url, stream=True)
stack.callback(r.close)
bytes_written = 0
try:
bytes_total = int(r.headers.get('content-length', None))
except TypeError:
bytes_total = None
if progress_callback:
# initial callback (0%)
progress_callback(bytes_written, bytes_total)
for chunk in r.iter_content(chunk_size=chunk_size):
fd.write(chunk)
bytes_written += len(chunk)
if progress_callback:
progress_callback(bytes_written, bytes_total)
return download_filename | ['def', 'download', '(', 'self', ',', 'path', ',', 'progress_callback', '=', 'None', ',', 'chunk_size', '=', '1024', '**', '2', ')', ':', 'if', 'not', 'self', '.', 'download_url', 'or', 'self', '.', 'state', '!=', "'complete'", ':', 'raise', 'DownloadError', '(', '"Download not available"', ')', '# ignore parsing the Content-Disposition header, since we know the name', 'download_filename', '=', '"{}.zip"', '.', 'format', '(', 'self', '.', 'name', ')', 'fd', '=', 'None', 'if', 'isinstance', '(', 'getattr', '(', 'path', ',', "'write'", ',', 'None', ')', ',', 'collections', '.', 'Callable', ')', ':', '# already open file-like object', 'fd', '=', 'path', 'elif', 'os', '.', 'path', '.', 'isdir', '(', 'path', ')', ':', '# directory to download to, using the export name', 'path', '=', 'os', '.', 'path', '.', 'join', '(', 'path', ',', 'download_filename', ')', '# do not allow overwriting', 'if', 'os', '.', 'path', '.', 'exists', '(', 'path', ')', ':', 'raise', 'DownloadError', '(', '"Download file already exists: %s"', '%', 'path', ')', 'elif', 'path', ':', '# fully qualified file path', '# allow overwriting', 'pass', 'elif', 'not', 'path', ':', 'raise', 'DownloadError', '(', '"Empty download file path"', ')', 'with', 'contextlib', '.', 'ExitStack', '(', ')', 'as', 'stack', ':', 'if', 'not', 'fd', ':', 'fd', '=', 'open', '(', 'path', ',', "'wb'", ')', '# only close a file we open', 'stack', '.', 'callback', '(', 'fd', '.', 'close', ')', 'r', '=', 'self', '.', '_manager', '.', 'client', '.', 'request', '(', "'GET'", ',', 'self', '.', 'download_url', ',', 'stream', '=', 'True', ')', 'stack', '.', 'callback', '(', 'r', '.', 'close', ')', 'bytes_written', '=', '0', 'try', ':', 'bytes_total', '=', 'int', '(', 'r', '.', 'headers', '.', 'get', '(', "'content-length'", ',', 'None', ')', ')', 'except', 'TypeError', ':', 'bytes_total', '=', 'None', 'if', 'progress_callback', ':', '# initial callback (0%)', 'progress_callback', '(', 'bytes_written', ',', 'bytes_total', ')', 'for', 'chunk', 'in', 'r', '.', 'iter_content', '(', 'chunk_size', '=', 'chunk_size', ')', ':', 'fd', '.', 'write', '(', 'chunk', ')', 'bytes_written', '+=', 'len', '(', 'chunk', ')', 'if', 'progress_callback', ':', 'progress_callback', '(', 'bytes_written', ',', 'bytes_total', ')', 'return', 'download_filename'] | Download the export archive.
.. warning::
If you pass this function an open file-like object as the ``path``
parameter, the function will not close that file for you.
If a ``path`` parameter is a directory, this function will use the
Export name to determine the name of the file (returned). If the
calculated download file path already exists, this function will raise
a DownloadError.
You can also specify the filename as a string. This will be passed to
the built-in :func:`open` and we will read the content into the file.
Instead, if you want to manage the file object yourself, you need to
provide either a :class:`io.BytesIO` object or a file opened with the
`'b'` flag. See the two examples below for more details.
:param path: Either a string with the path to the location
to save the response content, or a file-like object expecting bytes.
:param function progress_callback: An optional callback
function which receives upload progress notifications. The function should take two
arguments: the number of bytes recieved, and the total number of bytes to recieve.
:param int chunk_size: Chunk size in bytes for streaming large downloads and progress reporting. 1MB by default
:returns The name of the automatic filename that would be used.
:rtype: str | ['Download', 'the', 'export', 'archive', '.'] | train | https://github.com/koordinates/python-client/blob/f3dc7cd164f5a9499b2454cd1d4516e9d4b3c252/koordinates/exports.py#L273-L351 |
2,909 | iotile/coretools | iotilecore/iotile/core/hw/virtual/common_types.py | unpack_rpc_payload | def unpack_rpc_payload(resp_format, payload):
"""Unpack an RPC payload according to resp_format.
Args:
resp_format (str): a struct format code (without the <) for the
parameter format for this RPC. This format code may include the final
character V, which means that it expects a variable length bytearray.
payload (bytes): The binary payload that should be unpacked.
Returns:
list: A list of the unpacked payload items.
"""
code = _create_argcode(resp_format, payload)
return struct.unpack(code, payload) | python | def unpack_rpc_payload(resp_format, payload):
"""Unpack an RPC payload according to resp_format.
Args:
resp_format (str): a struct format code (without the <) for the
parameter format for this RPC. This format code may include the final
character V, which means that it expects a variable length bytearray.
payload (bytes): The binary payload that should be unpacked.
Returns:
list: A list of the unpacked payload items.
"""
code = _create_argcode(resp_format, payload)
return struct.unpack(code, payload) | ['def', 'unpack_rpc_payload', '(', 'resp_format', ',', 'payload', ')', ':', 'code', '=', '_create_argcode', '(', 'resp_format', ',', 'payload', ')', 'return', 'struct', '.', 'unpack', '(', 'code', ',', 'payload', ')'] | Unpack an RPC payload according to resp_format.
Args:
resp_format (str): a struct format code (without the <) for the
parameter format for this RPC. This format code may include the final
character V, which means that it expects a variable length bytearray.
payload (bytes): The binary payload that should be unpacked.
Returns:
list: A list of the unpacked payload items. | ['Unpack', 'an', 'RPC', 'payload', 'according', 'to', 'resp_format', '.'] | train | https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilecore/iotile/core/hw/virtual/common_types.py#L121-L135 |
2,910 | blockstack/blockstack-core | blockstack/lib/subdomains.py | SubdomainIndex.reindex | def reindex(cls, lastblock, firstblock=None, opts=None):
"""
Generate a subdomains db from scratch, using the names db and the atlas db and zone file collection.
Best to do this in a one-off command (i.e. *not* in the blockstackd process)
"""
if opts is None:
opts = get_blockstack_opts()
if not is_atlas_enabled(opts):
raise Exception("Atlas is not enabled")
if not is_subdomains_enabled(opts):
raise Exception("Subdomain support is not enabled")
subdomaindb_path = opts['subdomaindb_path']
atlasdb_path = opts['atlasdb_path']
if not os.path.exists(atlasdb_path):
raise Exception("No Atlas database at {}".format(opts['atlasdb_path']))
subdomain_indexer = SubdomainIndex(subdomaindb_path, blockstack_opts=opts)
subdomain_indexer.subdomain_db.wipe()
if firstblock is None:
start_block = SUBDOMAINS_FIRST_BLOCK
else:
start_block = firstblock
for i in range(start_block, lastblock, 100):
log.debug("Processing all subdomains in blocks {}-{}...".format(i, i+99))
subdomain_indexer.index_blockchain(i, i+100)
log.debug("Finished indexing subdomains in blocks {}-{}".format(start_block, lastblock)) | python | def reindex(cls, lastblock, firstblock=None, opts=None):
"""
Generate a subdomains db from scratch, using the names db and the atlas db and zone file collection.
Best to do this in a one-off command (i.e. *not* in the blockstackd process)
"""
if opts is None:
opts = get_blockstack_opts()
if not is_atlas_enabled(opts):
raise Exception("Atlas is not enabled")
if not is_subdomains_enabled(opts):
raise Exception("Subdomain support is not enabled")
subdomaindb_path = opts['subdomaindb_path']
atlasdb_path = opts['atlasdb_path']
if not os.path.exists(atlasdb_path):
raise Exception("No Atlas database at {}".format(opts['atlasdb_path']))
subdomain_indexer = SubdomainIndex(subdomaindb_path, blockstack_opts=opts)
subdomain_indexer.subdomain_db.wipe()
if firstblock is None:
start_block = SUBDOMAINS_FIRST_BLOCK
else:
start_block = firstblock
for i in range(start_block, lastblock, 100):
log.debug("Processing all subdomains in blocks {}-{}...".format(i, i+99))
subdomain_indexer.index_blockchain(i, i+100)
log.debug("Finished indexing subdomains in blocks {}-{}".format(start_block, lastblock)) | ['def', 'reindex', '(', 'cls', ',', 'lastblock', ',', 'firstblock', '=', 'None', ',', 'opts', '=', 'None', ')', ':', 'if', 'opts', 'is', 'None', ':', 'opts', '=', 'get_blockstack_opts', '(', ')', 'if', 'not', 'is_atlas_enabled', '(', 'opts', ')', ':', 'raise', 'Exception', '(', '"Atlas is not enabled"', ')', 'if', 'not', 'is_subdomains_enabled', '(', 'opts', ')', ':', 'raise', 'Exception', '(', '"Subdomain support is not enabled"', ')', 'subdomaindb_path', '=', 'opts', '[', "'subdomaindb_path'", ']', 'atlasdb_path', '=', 'opts', '[', "'atlasdb_path'", ']', 'if', 'not', 'os', '.', 'path', '.', 'exists', '(', 'atlasdb_path', ')', ':', 'raise', 'Exception', '(', '"No Atlas database at {}"', '.', 'format', '(', 'opts', '[', "'atlasdb_path'", ']', ')', ')', 'subdomain_indexer', '=', 'SubdomainIndex', '(', 'subdomaindb_path', ',', 'blockstack_opts', '=', 'opts', ')', 'subdomain_indexer', '.', 'subdomain_db', '.', 'wipe', '(', ')', 'if', 'firstblock', 'is', 'None', ':', 'start_block', '=', 'SUBDOMAINS_FIRST_BLOCK', 'else', ':', 'start_block', '=', 'firstblock', 'for', 'i', 'in', 'range', '(', 'start_block', ',', 'lastblock', ',', '100', ')', ':', 'log', '.', 'debug', '(', '"Processing all subdomains in blocks {}-{}..."', '.', 'format', '(', 'i', ',', 'i', '+', '99', ')', ')', 'subdomain_indexer', '.', 'index_blockchain', '(', 'i', ',', 'i', '+', '100', ')', 'log', '.', 'debug', '(', '"Finished indexing subdomains in blocks {}-{}"', '.', 'format', '(', 'start_block', ',', 'lastblock', ')', ')'] | Generate a subdomains db from scratch, using the names db and the atlas db and zone file collection.
Best to do this in a one-off command (i.e. *not* in the blockstackd process) | ['Generate', 'a', 'subdomains', 'db', 'from', 'scratch', 'using', 'the', 'names', 'db', 'and', 'the', 'atlas', 'db', 'and', 'zone', 'file', 'collection', '.', 'Best', 'to', 'do', 'this', 'in', 'a', 'one', '-', 'off', 'command', '(', 'i', '.', 'e', '.', '*', 'not', '*', 'in', 'the', 'blockstackd', 'process', ')'] | train | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/subdomains.py#L946-L978 |
2,911 | saltstack/salt | salt/modules/dracr.py | list_users | def list_users(host=None,
admin_username=None,
admin_password=None,
module=None):
'''
List all DRAC users
CLI Example:
.. code-block:: bash
salt dell dracr.list_users
'''
users = {}
_username = ''
for idx in range(1, 17):
cmd = __execute_ret('getconfig -g '
'cfgUserAdmin -i {0}'.format(idx),
host=host, admin_username=admin_username,
admin_password=admin_password)
if cmd['retcode'] != 0:
log.warning('racadm returned an exit code of %s', cmd['retcode'])
for user in cmd['stdout'].splitlines():
if not user.startswith('cfg'):
continue
(key, val) = user.split('=')
if key.startswith('cfgUserAdminUserName'):
_username = val.strip()
if val:
users[_username] = {'index': idx}
else:
break
else:
if _username:
users[_username].update({key: val})
return users | python | def list_users(host=None,
admin_username=None,
admin_password=None,
module=None):
'''
List all DRAC users
CLI Example:
.. code-block:: bash
salt dell dracr.list_users
'''
users = {}
_username = ''
for idx in range(1, 17):
cmd = __execute_ret('getconfig -g '
'cfgUserAdmin -i {0}'.format(idx),
host=host, admin_username=admin_username,
admin_password=admin_password)
if cmd['retcode'] != 0:
log.warning('racadm returned an exit code of %s', cmd['retcode'])
for user in cmd['stdout'].splitlines():
if not user.startswith('cfg'):
continue
(key, val) = user.split('=')
if key.startswith('cfgUserAdminUserName'):
_username = val.strip()
if val:
users[_username] = {'index': idx}
else:
break
else:
if _username:
users[_username].update({key: val})
return users | ['def', 'list_users', '(', 'host', '=', 'None', ',', 'admin_username', '=', 'None', ',', 'admin_password', '=', 'None', ',', 'module', '=', 'None', ')', ':', 'users', '=', '{', '}', '_username', '=', "''", 'for', 'idx', 'in', 'range', '(', '1', ',', '17', ')', ':', 'cmd', '=', '__execute_ret', '(', "'getconfig -g '", "'cfgUserAdmin -i {0}'", '.', 'format', '(', 'idx', ')', ',', 'host', '=', 'host', ',', 'admin_username', '=', 'admin_username', ',', 'admin_password', '=', 'admin_password', ')', 'if', 'cmd', '[', "'retcode'", ']', '!=', '0', ':', 'log', '.', 'warning', '(', "'racadm returned an exit code of %s'", ',', 'cmd', '[', "'retcode'", ']', ')', 'for', 'user', 'in', 'cmd', '[', "'stdout'", ']', '.', 'splitlines', '(', ')', ':', 'if', 'not', 'user', '.', 'startswith', '(', "'cfg'", ')', ':', 'continue', '(', 'key', ',', 'val', ')', '=', 'user', '.', 'split', '(', "'='", ')', 'if', 'key', '.', 'startswith', '(', "'cfgUserAdminUserName'", ')', ':', '_username', '=', 'val', '.', 'strip', '(', ')', 'if', 'val', ':', 'users', '[', '_username', ']', '=', '{', "'index'", ':', 'idx', '}', 'else', ':', 'break', 'else', ':', 'if', '_username', ':', 'users', '[', '_username', ']', '.', 'update', '(', '{', 'key', ':', 'val', '}', ')', 'return', 'users'] | List all DRAC users
CLI Example:
.. code-block:: bash
salt dell dracr.list_users | ['List', 'all', 'DRAC', 'users'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/dracr.py#L475-L517 |
2,912 | jochym/Elastic | parcalc/parcalc.py | ClusterVasp.prepare_calc_dir | def prepare_calc_dir(self):
'''
Prepare the calculation directory for VASP execution.
This needs to be re-implemented for each local setup.
The following code reflects just my particular setup.
'''
with open("vasprun.conf","w") as f:
f.write('NODES="nodes=%s:ppn=%d"\n' % (self.nodes, self.ppn))
f.write('BLOCK=%d\n' % (self.block,))
if self.ncl :
f.write('NCL=%d\n' % (1,)) | python | def prepare_calc_dir(self):
'''
Prepare the calculation directory for VASP execution.
This needs to be re-implemented for each local setup.
The following code reflects just my particular setup.
'''
with open("vasprun.conf","w") as f:
f.write('NODES="nodes=%s:ppn=%d"\n' % (self.nodes, self.ppn))
f.write('BLOCK=%d\n' % (self.block,))
if self.ncl :
f.write('NCL=%d\n' % (1,)) | ['def', 'prepare_calc_dir', '(', 'self', ')', ':', 'with', 'open', '(', '"vasprun.conf"', ',', '"w"', ')', 'as', 'f', ':', 'f', '.', 'write', '(', '\'NODES="nodes=%s:ppn=%d"\\n\'', '%', '(', 'self', '.', 'nodes', ',', 'self', '.', 'ppn', ')', ')', 'f', '.', 'write', '(', "'BLOCK=%d\\n'", '%', '(', 'self', '.', 'block', ',', ')', ')', 'if', 'self', '.', 'ncl', ':', 'f', '.', 'write', '(', "'NCL=%d\\n'", '%', '(', '1', ',', ')', ')'] | Prepare the calculation directory for VASP execution.
This needs to be re-implemented for each local setup.
The following code reflects just my particular setup. | ['Prepare', 'the', 'calculation', 'directory', 'for', 'VASP', 'execution', '.', 'This', 'needs', 'to', 'be', 're', '-', 'implemented', 'for', 'each', 'local', 'setup', '.', 'The', 'following', 'code', 'reflects', 'just', 'my', 'particular', 'setup', '.'] | train | https://github.com/jochym/Elastic/blob/8daae37d0c48aab8dfb1de2839dab02314817f95/parcalc/parcalc.py#L114-L124 |
2,913 | guaix-ucm/numina | numina/drps/drpsystem.py | DrpSystem.load | def load(self):
"""Load all available DRPs in 'entry_point'."""
for drpins in self.iload(self.entry):
self.drps[drpins.name] = drpins
return self | python | def load(self):
"""Load all available DRPs in 'entry_point'."""
for drpins in self.iload(self.entry):
self.drps[drpins.name] = drpins
return self | ['def', 'load', '(', 'self', ')', ':', 'for', 'drpins', 'in', 'self', '.', 'iload', '(', 'self', '.', 'entry', ')', ':', 'self', '.', 'drps', '[', 'drpins', '.', 'name', ']', '=', 'drpins', 'return', 'self'] | Load all available DRPs in 'entry_point'. | ['Load', 'all', 'available', 'DRPs', 'in', 'entry_point', '.'] | train | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/drps/drpsystem.py#L27-L33 |
2,914 | gabstopper/smc-python | smc/administration/certificates/vpn.py | VPNCertificateCA.create | def create(cls, name, certificate):
"""
Create a new external VPN CA for signing internal gateway
certificates.
:param str name: Name of VPN CA
:param str certificate: file name, path or certificate string.
:raises CreateElementFailed: Failed creating cert with reason
:rtype: VPNCertificateCA
"""
json = {'name': name,
'certificate': certificate}
return ElementCreator(cls, json) | python | def create(cls, name, certificate):
"""
Create a new external VPN CA for signing internal gateway
certificates.
:param str name: Name of VPN CA
:param str certificate: file name, path or certificate string.
:raises CreateElementFailed: Failed creating cert with reason
:rtype: VPNCertificateCA
"""
json = {'name': name,
'certificate': certificate}
return ElementCreator(cls, json) | ['def', 'create', '(', 'cls', ',', 'name', ',', 'certificate', ')', ':', 'json', '=', '{', "'name'", ':', 'name', ',', "'certificate'", ':', 'certificate', '}', 'return', 'ElementCreator', '(', 'cls', ',', 'json', ')'] | Create a new external VPN CA for signing internal gateway
certificates.
:param str name: Name of VPN CA
:param str certificate: file name, path or certificate string.
:raises CreateElementFailed: Failed creating cert with reason
:rtype: VPNCertificateCA | ['Create', 'a', 'new', 'external', 'VPN', 'CA', 'for', 'signing', 'internal', 'gateway', 'certificates', '.', ':', 'param', 'str', 'name', ':', 'Name', 'of', 'VPN', 'CA', ':', 'param', 'str', 'certificate', ':', 'file', 'name', 'path', 'or', 'certificate', 'string', '.', ':', 'raises', 'CreateElementFailed', ':', 'Failed', 'creating', 'cert', 'with', 'reason', ':', 'rtype', ':', 'VPNCertificateCA'] | train | https://github.com/gabstopper/smc-python/blob/e027b8a5dcfaf884eada32d113d41c1e56b32457/smc/administration/certificates/vpn.py#L28-L41 |
2,915 | CartoDB/cartoframes | cartoframes/credentials.py | Credentials.save | def save(self, config_loc=None):
"""Saves current user credentials to user directory.
Args:
config_loc (str, optional): Location where credentials are to be
stored. If no argument is provided, it will be send to the
default location.
Example:
.. code::
from cartoframes import Credentials
creds = Credentials(username='eschbacher', key='abcdefg')
creds.save() # save to default location
"""
if not os.path.exists(_USER_CONFIG_DIR):
"""create directory if not exists"""
os.makedirs(_USER_CONFIG_DIR)
with open(_DEFAULT_PATH, 'w') as f:
json.dump({'key': self._key, 'base_url': self._base_url,
'username': self._username}, f) | python | def save(self, config_loc=None):
"""Saves current user credentials to user directory.
Args:
config_loc (str, optional): Location where credentials are to be
stored. If no argument is provided, it will be send to the
default location.
Example:
.. code::
from cartoframes import Credentials
creds = Credentials(username='eschbacher', key='abcdefg')
creds.save() # save to default location
"""
if not os.path.exists(_USER_CONFIG_DIR):
"""create directory if not exists"""
os.makedirs(_USER_CONFIG_DIR)
with open(_DEFAULT_PATH, 'w') as f:
json.dump({'key': self._key, 'base_url': self._base_url,
'username': self._username}, f) | ['def', 'save', '(', 'self', ',', 'config_loc', '=', 'None', ')', ':', 'if', 'not', 'os', '.', 'path', '.', 'exists', '(', '_USER_CONFIG_DIR', ')', ':', '"""create directory if not exists"""', 'os', '.', 'makedirs', '(', '_USER_CONFIG_DIR', ')', 'with', 'open', '(', '_DEFAULT_PATH', ',', "'w'", ')', 'as', 'f', ':', 'json', '.', 'dump', '(', '{', "'key'", ':', 'self', '.', '_key', ',', "'base_url'", ':', 'self', '.', '_base_url', ',', "'username'", ':', 'self', '.', '_username', '}', ',', 'f', ')'] | Saves current user credentials to user directory.
Args:
config_loc (str, optional): Location where credentials are to be
stored. If no argument is provided, it will be send to the
default location.
Example:
.. code::
from cartoframes import Credentials
creds = Credentials(username='eschbacher', key='abcdefg')
creds.save() # save to default location | ['Saves', 'current', 'user', 'credentials', 'to', 'user', 'directory', '.'] | train | https://github.com/CartoDB/cartoframes/blob/c94238a545f3dec45963dac3892540942b6f0df8/cartoframes/credentials.py#L94-L116 |
2,916 | sentinel-hub/sentinelhub-py | sentinelhub/geo_utils.py | get_utm_crs | def get_utm_crs(lng, lat, source_crs=CRS.WGS84):
""" Get CRS for UTM zone in which (lat, lng) is contained.
:param lng: longitude
:type lng: float
:param lat: latitude
:type lat: float
:param source_crs: source CRS
:type source_crs: constants.CRS
:return: CRS of the zone containing the lat,lon point
:rtype: constants.CRS
"""
if source_crs is not CRS.WGS84:
lng, lat = transform_point((lng, lat), source_crs, CRS.WGS84)
return CRS.get_utm_from_wgs84(lng, lat) | python | def get_utm_crs(lng, lat, source_crs=CRS.WGS84):
""" Get CRS for UTM zone in which (lat, lng) is contained.
:param lng: longitude
:type lng: float
:param lat: latitude
:type lat: float
:param source_crs: source CRS
:type source_crs: constants.CRS
:return: CRS of the zone containing the lat,lon point
:rtype: constants.CRS
"""
if source_crs is not CRS.WGS84:
lng, lat = transform_point((lng, lat), source_crs, CRS.WGS84)
return CRS.get_utm_from_wgs84(lng, lat) | ['def', 'get_utm_crs', '(', 'lng', ',', 'lat', ',', 'source_crs', '=', 'CRS', '.', 'WGS84', ')', ':', 'if', 'source_crs', 'is', 'not', 'CRS', '.', 'WGS84', ':', 'lng', ',', 'lat', '=', 'transform_point', '(', '(', 'lng', ',', 'lat', ')', ',', 'source_crs', ',', 'CRS', '.', 'WGS84', ')', 'return', 'CRS', '.', 'get_utm_from_wgs84', '(', 'lng', ',', 'lat', ')'] | Get CRS for UTM zone in which (lat, lng) is contained.
:param lng: longitude
:type lng: float
:param lat: latitude
:type lat: float
:param source_crs: source CRS
:type source_crs: constants.CRS
:return: CRS of the zone containing the lat,lon point
:rtype: constants.CRS | ['Get', 'CRS', 'for', 'UTM', 'zone', 'in', 'which', '(', 'lat', 'lng', ')', 'is', 'contained', '.'] | train | https://github.com/sentinel-hub/sentinelhub-py/blob/08a83b7f1e289187159a643336995d8369860fea/sentinelhub/geo_utils.py#L200-L214 |
2,917 | spyder-ide/spyder | spyder/utils/introspection/fallback_plugin.py | FallbackPlugin.get_completions | def get_completions(self, info):
"""Return a list of (completion, type) tuples
Simple completion based on python-like identifiers and whitespace
"""
if not info['obj']:
return
items = []
obj = info['obj']
if info['context']:
lexer = find_lexer_for_filename(info['filename'])
# get a list of token matches for the current object
tokens = lexer.get_tokens(info['source_code'])
for (context, token) in tokens:
token = token.strip()
if (context in info['context'] and
token.startswith(obj) and
obj != token):
items.append(token)
# add in keywords if not in a string
if context not in Token.Literal.String:
try:
keywords = get_keywords(lexer)
items.extend(k for k in keywords if k.startswith(obj))
except Exception:
pass
else:
tokens = set(re.findall(info['id_regex'], info['source_code']))
items = [item for item in tokens if
item.startswith(obj) and len(item) > len(obj)]
if '.' in obj:
start = obj.rfind('.') + 1
else:
start = 0
items = [i[start:len(obj)] + i[len(obj):].split('.')[0]
for i in items]
# get path completions
# get last word back to a space or a quote character
match = re.search(r'''[ "\']([\w\.\\\\/]+)\Z''', info['line'])
if match:
items += _complete_path(match.groups()[0])
return [(i, '') for i in sorted(items)] | python | def get_completions(self, info):
"""Return a list of (completion, type) tuples
Simple completion based on python-like identifiers and whitespace
"""
if not info['obj']:
return
items = []
obj = info['obj']
if info['context']:
lexer = find_lexer_for_filename(info['filename'])
# get a list of token matches for the current object
tokens = lexer.get_tokens(info['source_code'])
for (context, token) in tokens:
token = token.strip()
if (context in info['context'] and
token.startswith(obj) and
obj != token):
items.append(token)
# add in keywords if not in a string
if context not in Token.Literal.String:
try:
keywords = get_keywords(lexer)
items.extend(k for k in keywords if k.startswith(obj))
except Exception:
pass
else:
tokens = set(re.findall(info['id_regex'], info['source_code']))
items = [item for item in tokens if
item.startswith(obj) and len(item) > len(obj)]
if '.' in obj:
start = obj.rfind('.') + 1
else:
start = 0
items = [i[start:len(obj)] + i[len(obj):].split('.')[0]
for i in items]
# get path completions
# get last word back to a space or a quote character
match = re.search(r'''[ "\']([\w\.\\\\/]+)\Z''', info['line'])
if match:
items += _complete_path(match.groups()[0])
return [(i, '') for i in sorted(items)] | ['def', 'get_completions', '(', 'self', ',', 'info', ')', ':', 'if', 'not', 'info', '[', "'obj'", ']', ':', 'return', 'items', '=', '[', ']', 'obj', '=', 'info', '[', "'obj'", ']', 'if', 'info', '[', "'context'", ']', ':', 'lexer', '=', 'find_lexer_for_filename', '(', 'info', '[', "'filename'", ']', ')', '# get a list of token matches for the current object\r', 'tokens', '=', 'lexer', '.', 'get_tokens', '(', 'info', '[', "'source_code'", ']', ')', 'for', '(', 'context', ',', 'token', ')', 'in', 'tokens', ':', 'token', '=', 'token', '.', 'strip', '(', ')', 'if', '(', 'context', 'in', 'info', '[', "'context'", ']', 'and', 'token', '.', 'startswith', '(', 'obj', ')', 'and', 'obj', '!=', 'token', ')', ':', 'items', '.', 'append', '(', 'token', ')', '# add in keywords if not in a string\r', 'if', 'context', 'not', 'in', 'Token', '.', 'Literal', '.', 'String', ':', 'try', ':', 'keywords', '=', 'get_keywords', '(', 'lexer', ')', 'items', '.', 'extend', '(', 'k', 'for', 'k', 'in', 'keywords', 'if', 'k', '.', 'startswith', '(', 'obj', ')', ')', 'except', 'Exception', ':', 'pass', 'else', ':', 'tokens', '=', 'set', '(', 're', '.', 'findall', '(', 'info', '[', "'id_regex'", ']', ',', 'info', '[', "'source_code'", ']', ')', ')', 'items', '=', '[', 'item', 'for', 'item', 'in', 'tokens', 'if', 'item', '.', 'startswith', '(', 'obj', ')', 'and', 'len', '(', 'item', ')', '>', 'len', '(', 'obj', ')', ']', 'if', "'.'", 'in', 'obj', ':', 'start', '=', 'obj', '.', 'rfind', '(', "'.'", ')', '+', '1', 'else', ':', 'start', '=', '0', 'items', '=', '[', 'i', '[', 'start', ':', 'len', '(', 'obj', ')', ']', '+', 'i', '[', 'len', '(', 'obj', ')', ':', ']', '.', 'split', '(', "'.'", ')', '[', '0', ']', 'for', 'i', 'in', 'items', ']', '# get path completions\r', '# get last word back to a space or a quote character\r', 'match', '=', 're', '.', 'search', '(', 'r\'\'\'[ "\\\']([\\w\\.\\\\\\\\/]+)\\Z\'\'\'', ',', 'info', '[', "'line'", ']', ')', 'if', 'match', ':', 'items', '+=', '_complete_path', '(', 'match', '.', 'groups', '(', ')', '[', '0', ']', ')', 'return', '[', '(', 'i', ',', "''", ')', 'for', 'i', 'in', 'sorted', '(', 'items', ')', ']'] | Return a list of (completion, type) tuples
Simple completion based on python-like identifiers and whitespace | ['Return', 'a', 'list', 'of', '(', 'completion', 'type', ')', 'tuples', 'Simple', 'completion', 'based', 'on', 'python', '-', 'like', 'identifiers', 'and', 'whitespace'] | train | https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/utils/introspection/fallback_plugin.py#L37-L79 |
2,918 | opendatateam/udata | udata/core/spatial/geoids.py | build | def build(level, code, validity=None):
'''Serialize a GeoID from its parts'''
spatial = ':'.join((level, code))
if not validity:
return spatial
elif isinstance(validity, basestring):
return '@'.join((spatial, validity))
elif isinstance(validity, datetime):
return '@'.join((spatial, validity.date().isoformat()))
elif isinstance(validity, date):
return '@'.join((spatial, validity.isoformat()))
else:
msg = 'Unknown GeoID validity type: {0}'
raise GeoIDError(msg.format(type(validity).__name__)) | python | def build(level, code, validity=None):
'''Serialize a GeoID from its parts'''
spatial = ':'.join((level, code))
if not validity:
return spatial
elif isinstance(validity, basestring):
return '@'.join((spatial, validity))
elif isinstance(validity, datetime):
return '@'.join((spatial, validity.date().isoformat()))
elif isinstance(validity, date):
return '@'.join((spatial, validity.isoformat()))
else:
msg = 'Unknown GeoID validity type: {0}'
raise GeoIDError(msg.format(type(validity).__name__)) | ['def', 'build', '(', 'level', ',', 'code', ',', 'validity', '=', 'None', ')', ':', 'spatial', '=', "':'", '.', 'join', '(', '(', 'level', ',', 'code', ')', ')', 'if', 'not', 'validity', ':', 'return', 'spatial', 'elif', 'isinstance', '(', 'validity', ',', 'basestring', ')', ':', 'return', "'@'", '.', 'join', '(', '(', 'spatial', ',', 'validity', ')', ')', 'elif', 'isinstance', '(', 'validity', ',', 'datetime', ')', ':', 'return', "'@'", '.', 'join', '(', '(', 'spatial', ',', 'validity', '.', 'date', '(', ')', '.', 'isoformat', '(', ')', ')', ')', 'elif', 'isinstance', '(', 'validity', ',', 'date', ')', ':', 'return', "'@'", '.', 'join', '(', '(', 'spatial', ',', 'validity', '.', 'isoformat', '(', ')', ')', ')', 'else', ':', 'msg', '=', "'Unknown GeoID validity type: {0}'", 'raise', 'GeoIDError', '(', 'msg', '.', 'format', '(', 'type', '(', 'validity', ')', '.', '__name__', ')', ')'] | Serialize a GeoID from its parts | ['Serialize', 'a', 'GeoID', 'from', 'its', 'parts'] | train | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/spatial/geoids.py#L41-L54 |
2,919 | projectatomic/osbs-client | osbs/api.py | OSBS.get_pod_for_build | def get_pod_for_build(self, build_id):
"""
:return: PodResponse object for pod relating to the build
"""
pods = self.os.list_pods(label='openshift.io/build.name=%s' % build_id)
serialized_response = pods.json()
pod_list = [PodResponse(pod) for pod in serialized_response["items"]]
if not pod_list:
raise OsbsException("No pod for build")
elif len(pod_list) != 1:
raise OsbsException("Only one pod expected but %d returned",
len(pod_list))
return pod_list[0] | python | def get_pod_for_build(self, build_id):
"""
:return: PodResponse object for pod relating to the build
"""
pods = self.os.list_pods(label='openshift.io/build.name=%s' % build_id)
serialized_response = pods.json()
pod_list = [PodResponse(pod) for pod in serialized_response["items"]]
if not pod_list:
raise OsbsException("No pod for build")
elif len(pod_list) != 1:
raise OsbsException("Only one pod expected but %d returned",
len(pod_list))
return pod_list[0] | ['def', 'get_pod_for_build', '(', 'self', ',', 'build_id', ')', ':', 'pods', '=', 'self', '.', 'os', '.', 'list_pods', '(', 'label', '=', "'openshift.io/build.name=%s'", '%', 'build_id', ')', 'serialized_response', '=', 'pods', '.', 'json', '(', ')', 'pod_list', '=', '[', 'PodResponse', '(', 'pod', ')', 'for', 'pod', 'in', 'serialized_response', '[', '"items"', ']', ']', 'if', 'not', 'pod_list', ':', 'raise', 'OsbsException', '(', '"No pod for build"', ')', 'elif', 'len', '(', 'pod_list', ')', '!=', '1', ':', 'raise', 'OsbsException', '(', '"Only one pod expected but %d returned"', ',', 'len', '(', 'pod_list', ')', ')', 'return', 'pod_list', '[', '0', ']'] | :return: PodResponse object for pod relating to the build | [':', 'return', ':', 'PodResponse', 'object', 'for', 'pod', 'relating', 'to', 'the', 'build'] | train | https://github.com/projectatomic/osbs-client/blob/571fe035dab3a7c02e1dccd5d65ffd75be750458/osbs/api.py#L182-L194 |
2,920 | Illumina/interop | src/examples/python/summary.py | main | def main():
""" Retrieve run folder paths from the command line
Ensure only metrics required for summary are loaded
Load the run metrics
Calculate the summary metrics
Display error by lane, read
"""
logging.basicConfig(level=logging.INFO)
run_metrics = py_interop_run_metrics.run_metrics()
summary = py_interop_summary.run_summary()
valid_to_load = py_interop_run.uchar_vector(py_interop_run.MetricCount, 0)
py_interop_run_metrics.list_summary_metrics_to_load(valid_to_load)
for run_folder_path in sys.argv[1:]:
run_folder = os.path.basename(run_folder_path)
try:
run_metrics.read(run_folder_path, valid_to_load)
except Exception, ex:
logging.warn("Skipping - cannot read RunInfo.xml: %s - %s"%(run_folder, str(ex)))
continue
py_interop_summary.summarize_run_metrics(run_metrics, summary)
error_rate_read_lane_surface = numpy.zeros((summary.size(), summary.lane_count(), summary.surface_count()))
for read_index in xrange(summary.size()):
for lane_index in xrange(summary.lane_count()):
for surface_index in xrange(summary.surface_count()):
error_rate_read_lane_surface[read_index, lane_index, surface_index] = \
summary.at(read_index).at(lane_index).at(surface_index).error_rate().mean()
logging.info("Run Folder: "+run_folder)
for read_index in xrange(summary.size()):
read_summary = summary.at(read_index)
logging.info("Read "+str(read_summary.read().number())+" - Top Surface Mean Error: "+str(error_rate_read_lane_surface[read_index, :, 0].mean())) | python | def main():
""" Retrieve run folder paths from the command line
Ensure only metrics required for summary are loaded
Load the run metrics
Calculate the summary metrics
Display error by lane, read
"""
logging.basicConfig(level=logging.INFO)
run_metrics = py_interop_run_metrics.run_metrics()
summary = py_interop_summary.run_summary()
valid_to_load = py_interop_run.uchar_vector(py_interop_run.MetricCount, 0)
py_interop_run_metrics.list_summary_metrics_to_load(valid_to_load)
for run_folder_path in sys.argv[1:]:
run_folder = os.path.basename(run_folder_path)
try:
run_metrics.read(run_folder_path, valid_to_load)
except Exception, ex:
logging.warn("Skipping - cannot read RunInfo.xml: %s - %s"%(run_folder, str(ex)))
continue
py_interop_summary.summarize_run_metrics(run_metrics, summary)
error_rate_read_lane_surface = numpy.zeros((summary.size(), summary.lane_count(), summary.surface_count()))
for read_index in xrange(summary.size()):
for lane_index in xrange(summary.lane_count()):
for surface_index in xrange(summary.surface_count()):
error_rate_read_lane_surface[read_index, lane_index, surface_index] = \
summary.at(read_index).at(lane_index).at(surface_index).error_rate().mean()
logging.info("Run Folder: "+run_folder)
for read_index in xrange(summary.size()):
read_summary = summary.at(read_index)
logging.info("Read "+str(read_summary.read().number())+" - Top Surface Mean Error: "+str(error_rate_read_lane_surface[read_index, :, 0].mean())) | ['def', 'main', '(', ')', ':', 'logging', '.', 'basicConfig', '(', 'level', '=', 'logging', '.', 'INFO', ')', 'run_metrics', '=', 'py_interop_run_metrics', '.', 'run_metrics', '(', ')', 'summary', '=', 'py_interop_summary', '.', 'run_summary', '(', ')', 'valid_to_load', '=', 'py_interop_run', '.', 'uchar_vector', '(', 'py_interop_run', '.', 'MetricCount', ',', '0', ')', 'py_interop_run_metrics', '.', 'list_summary_metrics_to_load', '(', 'valid_to_load', ')', 'for', 'run_folder_path', 'in', 'sys', '.', 'argv', '[', '1', ':', ']', ':', 'run_folder', '=', 'os', '.', 'path', '.', 'basename', '(', 'run_folder_path', ')', 'try', ':', 'run_metrics', '.', 'read', '(', 'run_folder_path', ',', 'valid_to_load', ')', 'except', 'Exception', ',', 'ex', ':', 'logging', '.', 'warn', '(', '"Skipping - cannot read RunInfo.xml: %s - %s"', '%', '(', 'run_folder', ',', 'str', '(', 'ex', ')', ')', ')', 'continue', 'py_interop_summary', '.', 'summarize_run_metrics', '(', 'run_metrics', ',', 'summary', ')', 'error_rate_read_lane_surface', '=', 'numpy', '.', 'zeros', '(', '(', 'summary', '.', 'size', '(', ')', ',', 'summary', '.', 'lane_count', '(', ')', ',', 'summary', '.', 'surface_count', '(', ')', ')', ')', 'for', 'read_index', 'in', 'xrange', '(', 'summary', '.', 'size', '(', ')', ')', ':', 'for', 'lane_index', 'in', 'xrange', '(', 'summary', '.', 'lane_count', '(', ')', ')', ':', 'for', 'surface_index', 'in', 'xrange', '(', 'summary', '.', 'surface_count', '(', ')', ')', ':', 'error_rate_read_lane_surface', '[', 'read_index', ',', 'lane_index', ',', 'surface_index', ']', '=', 'summary', '.', 'at', '(', 'read_index', ')', '.', 'at', '(', 'lane_index', ')', '.', 'at', '(', 'surface_index', ')', '.', 'error_rate', '(', ')', '.', 'mean', '(', ')', 'logging', '.', 'info', '(', '"Run Folder: "', '+', 'run_folder', ')', 'for', 'read_index', 'in', 'xrange', '(', 'summary', '.', 'size', '(', ')', ')', ':', 'read_summary', '=', 'summary', '.', 'at', '(', 'read_index', ')', 'logging', '.', 'info', '(', '"Read "', '+', 'str', '(', 'read_summary', '.', 'read', '(', ')', '.', 'number', '(', ')', ')', '+', '" - Top Surface Mean Error: "', '+', 'str', '(', 'error_rate_read_lane_surface', '[', 'read_index', ',', ':', ',', '0', ']', '.', 'mean', '(', ')', ')', ')'] | Retrieve run folder paths from the command line
Ensure only metrics required for summary are loaded
Load the run metrics
Calculate the summary metrics
Display error by lane, read | ['Retrieve', 'run', 'folder', 'paths', 'from', 'the', 'command', 'line', 'Ensure', 'only', 'metrics', 'required', 'for', 'summary', 'are', 'loaded', 'Load', 'the', 'run', 'metrics', 'Calculate', 'the', 'summary', 'metrics', 'Display', 'error', 'by', 'lane', 'read'] | train | https://github.com/Illumina/interop/blob/a55b40bde4b764e3652758f6cdf72aef5f473370/src/examples/python/summary.py#L17-L49 |
2,921 | influxdata/influxdb-python | influxdb/influxdb08/client.py | InfluxDBClient.delete_cluster_admin | def delete_cluster_admin(self, username):
"""Delete cluster admin."""
url = "cluster_admins/{0}".format(username)
self.request(
url=url,
method='DELETE',
expected_response_code=200
)
return True | python | def delete_cluster_admin(self, username):
"""Delete cluster admin."""
url = "cluster_admins/{0}".format(username)
self.request(
url=url,
method='DELETE',
expected_response_code=200
)
return True | ['def', 'delete_cluster_admin', '(', 'self', ',', 'username', ')', ':', 'url', '=', '"cluster_admins/{0}"', '.', 'format', '(', 'username', ')', 'self', '.', 'request', '(', 'url', '=', 'url', ',', 'method', '=', "'DELETE'", ',', 'expected_response_code', '=', '200', ')', 'return', 'True'] | Delete cluster admin. | ['Delete', 'cluster', 'admin', '.'] | train | https://github.com/influxdata/influxdb-python/blob/d5d12499f3755199d5eedd8b363450f1cf4073bd/influxdb/influxdb08/client.py#L640-L650 |
2,922 | Erotemic/utool | utool/util_list.py | isetdiff_flags | def isetdiff_flags(list1, list2):
"""
move to util_iter
"""
set2 = set(list2)
return (item not in set2 for item in list1) | python | def isetdiff_flags(list1, list2):
"""
move to util_iter
"""
set2 = set(list2)
return (item not in set2 for item in list1) | ['def', 'isetdiff_flags', '(', 'list1', ',', 'list2', ')', ':', 'set2', '=', 'set', '(', 'list2', ')', 'return', '(', 'item', 'not', 'in', 'set2', 'for', 'item', 'in', 'list1', ')'] | move to util_iter | ['move', 'to', 'util_iter'] | train | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_list.py#L1437-L1442 |
2,923 | sdispater/eloquent | eloquent/orm/relations/has_many_through.py | HasManyThrough._set_join | def _set_join(self, query=None):
"""
Set the join clause for the query.
"""
if not query:
query = self._query
foreign_key = '%s.%s' % (self._related.get_table(), self._second_key)
query.join(self._parent.get_table(), self.get_qualified_parent_key_name(), '=', foreign_key) | python | def _set_join(self, query=None):
"""
Set the join clause for the query.
"""
if not query:
query = self._query
foreign_key = '%s.%s' % (self._related.get_table(), self._second_key)
query.join(self._parent.get_table(), self.get_qualified_parent_key_name(), '=', foreign_key) | ['def', '_set_join', '(', 'self', ',', 'query', '=', 'None', ')', ':', 'if', 'not', 'query', ':', 'query', '=', 'self', '.', '_query', 'foreign_key', '=', "'%s.%s'", '%', '(', 'self', '.', '_related', '.', 'get_table', '(', ')', ',', 'self', '.', '_second_key', ')', 'query', '.', 'join', '(', 'self', '.', '_parent', '.', 'get_table', '(', ')', ',', 'self', '.', 'get_qualified_parent_key_name', '(', ')', ',', "'='", ',', 'foreign_key', ')'] | Set the join clause for the query. | ['Set', 'the', 'join', 'clause', 'for', 'the', 'query', '.'] | train | https://github.com/sdispater/eloquent/blob/0638b688d5fd0c1a46b7471dd465eeb4c2f84666/eloquent/orm/relations/has_many_through.py#L61-L70 |
2,924 | aquatix/python-utilkit | utilkit/stringutil.py | safe_unicode | def safe_unicode(obj, *args):
""" return the unicode representation of obj """
try:
return unicode(obj, *args) # noqa for undefined-variable
except UnicodeDecodeError:
# obj is byte string
ascii_text = str(obj).encode('string_escape')
try:
return unicode(ascii_text) # noqa for undefined-variable
except NameError:
# This is Python 3, just return the obj as it's already unicode
return obj
except NameError:
# This is Python 3, just return the obj as it's already unicode
return obj | python | def safe_unicode(obj, *args):
""" return the unicode representation of obj """
try:
return unicode(obj, *args) # noqa for undefined-variable
except UnicodeDecodeError:
# obj is byte string
ascii_text = str(obj).encode('string_escape')
try:
return unicode(ascii_text) # noqa for undefined-variable
except NameError:
# This is Python 3, just return the obj as it's already unicode
return obj
except NameError:
# This is Python 3, just return the obj as it's already unicode
return obj | ['def', 'safe_unicode', '(', 'obj', ',', '*', 'args', ')', ':', 'try', ':', 'return', 'unicode', '(', 'obj', ',', '*', 'args', ')', '# noqa for undefined-variable', 'except', 'UnicodeDecodeError', ':', '# obj is byte string', 'ascii_text', '=', 'str', '(', 'obj', ')', '.', 'encode', '(', "'string_escape'", ')', 'try', ':', 'return', 'unicode', '(', 'ascii_text', ')', '# noqa for undefined-variable', 'except', 'NameError', ':', "# This is Python 3, just return the obj as it's already unicode", 'return', 'obj', 'except', 'NameError', ':', "# This is Python 3, just return the obj as it's already unicode", 'return', 'obj'] | return the unicode representation of obj | ['return', 'the', 'unicode', 'representation', 'of', 'obj'] | train | https://github.com/aquatix/python-utilkit/blob/1b4a4175381d2175592208619315f399610f915c/utilkit/stringutil.py#L6-L20 |
2,925 | twoolie/NBT | nbt/chunk.py | BlockArray.get_blocks_byte_array | def get_blocks_byte_array(self, buffer=False):
"""Return a list of all blocks in this chunk."""
if buffer:
length = len(self.blocksList)
return BytesIO(pack(">i", length)+self.get_blocks_byte_array())
else:
return array.array('B', self.blocksList).tostring() | python | def get_blocks_byte_array(self, buffer=False):
"""Return a list of all blocks in this chunk."""
if buffer:
length = len(self.blocksList)
return BytesIO(pack(">i", length)+self.get_blocks_byte_array())
else:
return array.array('B', self.blocksList).tostring() | ['def', 'get_blocks_byte_array', '(', 'self', ',', 'buffer', '=', 'False', ')', ':', 'if', 'buffer', ':', 'length', '=', 'len', '(', 'self', '.', 'blocksList', ')', 'return', 'BytesIO', '(', 'pack', '(', '">i"', ',', 'length', ')', '+', 'self', '.', 'get_blocks_byte_array', '(', ')', ')', 'else', ':', 'return', 'array', '.', 'array', '(', "'B'", ',', 'self', '.', 'blocksList', ')', '.', 'tostring', '(', ')'] | Return a list of all blocks in this chunk. | ['Return', 'a', 'list', 'of', 'all', 'blocks', 'in', 'this', 'chunk', '.'] | train | https://github.com/twoolie/NBT/blob/b06dd6cc8117d2788da1d8416e642d58bad45762/nbt/chunk.py#L329-L335 |
2,926 | tensorpack/tensorpack | tensorpack/models/registry.py | layer_register | def layer_register(
log_shape=False,
use_scope=True):
"""
Args:
log_shape (bool): log input/output shape of this layer
use_scope (bool or None):
Whether to call this layer with an extra first argument as variable scope.
When set to None, it can be called either with or without
the scope name argument, depend on whether the first argument
is string or not.
Returns:
A decorator used to register a layer.
Example:
.. code-block:: python
@layer_register(use_scope=True)
def add10(x):
return x + tf.get_variable('W', shape=[10])
# use it:
output = add10('layer_name', input) # the function will be called under variable scope "layer_name".
"""
def wrapper(func):
@wraps(func)
def wrapped_func(*args, **kwargs):
assert args[0] is not None, args
if use_scope:
name, inputs = args[0], args[1]
args = args[1:] # actual positional args used to call func
assert isinstance(name, six.string_types), "First argument for \"{}\" should be a string. ".format(
func.__name__) + "Did you forget to specify the name of the layer?"
else:
assert not log_shape
if isinstance(args[0], six.string_types):
if use_scope is False:
logger.warn(
"Please call layer {} without the first scope name argument, "
"or register the layer with use_scope=None to allow "
"two calling methods.".format(func.__name__))
name, inputs = args[0], args[1]
args = args[1:] # actual positional args used to call func
else:
inputs = args[0]
name = None
if not (isinstance(inputs, (tf.Tensor, tf.Variable)) or
(isinstance(inputs, (list, tuple)) and
isinstance(inputs[0], (tf.Tensor, tf.Variable)))):
raise ValueError("Invalid inputs to layer: " + str(inputs))
# use kwargs from current argument scope
actual_args = copy.copy(get_arg_scope()[func.__name__])
# explicit kwargs overwrite argscope
actual_args.update(kwargs)
# if six.PY3:
# # explicit positional args also override argscope. only work in PY3
# posargmap = inspect.signature(func).bind_partial(*args).arguments
# for k in six.iterkeys(posargmap):
# if k in actual_args:
# del actual_args[k]
if name is not None: # use scope
with tfv1.variable_scope(name) as scope:
# this name is only used to surpress logging, doesn't hurt to do some heuristics
scope_name = re.sub('tower[0-9]+/', '', scope.name)
do_log_shape = log_shape and scope_name not in _LAYER_LOGGED
if do_log_shape:
logger.info("{} input: {}".format(scope.name, get_shape_str(inputs)))
# run the actual function
outputs = func(*args, **actual_args)
if do_log_shape:
# log shape info and add activation
logger.info("{} output: {}".format(
scope.name, get_shape_str(outputs)))
_LAYER_LOGGED.add(scope_name)
else:
# run the actual function
outputs = func(*args, **actual_args)
return outputs
wrapped_func.symbolic_function = func # attribute to access the underlying function object
wrapped_func.use_scope = use_scope
_register(func.__name__, wrapped_func)
return wrapped_func
return wrapper | python | def layer_register(
log_shape=False,
use_scope=True):
"""
Args:
log_shape (bool): log input/output shape of this layer
use_scope (bool or None):
Whether to call this layer with an extra first argument as variable scope.
When set to None, it can be called either with or without
the scope name argument, depend on whether the first argument
is string or not.
Returns:
A decorator used to register a layer.
Example:
.. code-block:: python
@layer_register(use_scope=True)
def add10(x):
return x + tf.get_variable('W', shape=[10])
# use it:
output = add10('layer_name', input) # the function will be called under variable scope "layer_name".
"""
def wrapper(func):
@wraps(func)
def wrapped_func(*args, **kwargs):
assert args[0] is not None, args
if use_scope:
name, inputs = args[0], args[1]
args = args[1:] # actual positional args used to call func
assert isinstance(name, six.string_types), "First argument for \"{}\" should be a string. ".format(
func.__name__) + "Did you forget to specify the name of the layer?"
else:
assert not log_shape
if isinstance(args[0], six.string_types):
if use_scope is False:
logger.warn(
"Please call layer {} without the first scope name argument, "
"or register the layer with use_scope=None to allow "
"two calling methods.".format(func.__name__))
name, inputs = args[0], args[1]
args = args[1:] # actual positional args used to call func
else:
inputs = args[0]
name = None
if not (isinstance(inputs, (tf.Tensor, tf.Variable)) or
(isinstance(inputs, (list, tuple)) and
isinstance(inputs[0], (tf.Tensor, tf.Variable)))):
raise ValueError("Invalid inputs to layer: " + str(inputs))
# use kwargs from current argument scope
actual_args = copy.copy(get_arg_scope()[func.__name__])
# explicit kwargs overwrite argscope
actual_args.update(kwargs)
# if six.PY3:
# # explicit positional args also override argscope. only work in PY3
# posargmap = inspect.signature(func).bind_partial(*args).arguments
# for k in six.iterkeys(posargmap):
# if k in actual_args:
# del actual_args[k]
if name is not None: # use scope
with tfv1.variable_scope(name) as scope:
# this name is only used to surpress logging, doesn't hurt to do some heuristics
scope_name = re.sub('tower[0-9]+/', '', scope.name)
do_log_shape = log_shape and scope_name not in _LAYER_LOGGED
if do_log_shape:
logger.info("{} input: {}".format(scope.name, get_shape_str(inputs)))
# run the actual function
outputs = func(*args, **actual_args)
if do_log_shape:
# log shape info and add activation
logger.info("{} output: {}".format(
scope.name, get_shape_str(outputs)))
_LAYER_LOGGED.add(scope_name)
else:
# run the actual function
outputs = func(*args, **actual_args)
return outputs
wrapped_func.symbolic_function = func # attribute to access the underlying function object
wrapped_func.use_scope = use_scope
_register(func.__name__, wrapped_func)
return wrapped_func
return wrapper | ['def', 'layer_register', '(', 'log_shape', '=', 'False', ',', 'use_scope', '=', 'True', ')', ':', 'def', 'wrapper', '(', 'func', ')', ':', '@', 'wraps', '(', 'func', ')', 'def', 'wrapped_func', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'assert', 'args', '[', '0', ']', 'is', 'not', 'None', ',', 'args', 'if', 'use_scope', ':', 'name', ',', 'inputs', '=', 'args', '[', '0', ']', ',', 'args', '[', '1', ']', 'args', '=', 'args', '[', '1', ':', ']', '# actual positional args used to call func', 'assert', 'isinstance', '(', 'name', ',', 'six', '.', 'string_types', ')', ',', '"First argument for \\"{}\\" should be a string. "', '.', 'format', '(', 'func', '.', '__name__', ')', '+', '"Did you forget to specify the name of the layer?"', 'else', ':', 'assert', 'not', 'log_shape', 'if', 'isinstance', '(', 'args', '[', '0', ']', ',', 'six', '.', 'string_types', ')', ':', 'if', 'use_scope', 'is', 'False', ':', 'logger', '.', 'warn', '(', '"Please call layer {} without the first scope name argument, "', '"or register the layer with use_scope=None to allow "', '"two calling methods."', '.', 'format', '(', 'func', '.', '__name__', ')', ')', 'name', ',', 'inputs', '=', 'args', '[', '0', ']', ',', 'args', '[', '1', ']', 'args', '=', 'args', '[', '1', ':', ']', '# actual positional args used to call func', 'else', ':', 'inputs', '=', 'args', '[', '0', ']', 'name', '=', 'None', 'if', 'not', '(', 'isinstance', '(', 'inputs', ',', '(', 'tf', '.', 'Tensor', ',', 'tf', '.', 'Variable', ')', ')', 'or', '(', 'isinstance', '(', 'inputs', ',', '(', 'list', ',', 'tuple', ')', ')', 'and', 'isinstance', '(', 'inputs', '[', '0', ']', ',', '(', 'tf', '.', 'Tensor', ',', 'tf', '.', 'Variable', ')', ')', ')', ')', ':', 'raise', 'ValueError', '(', '"Invalid inputs to layer: "', '+', 'str', '(', 'inputs', ')', ')', '# use kwargs from current argument scope', 'actual_args', '=', 'copy', '.', 'copy', '(', 'get_arg_scope', '(', ')', '[', 'func', '.', '__name__', ']', ')', '# explicit kwargs overwrite argscope', 'actual_args', '.', 'update', '(', 'kwargs', ')', '# if six.PY3:', '# # explicit positional args also override argscope. only work in PY3', '# posargmap = inspect.signature(func).bind_partial(*args).arguments', '# for k in six.iterkeys(posargmap):', '# if k in actual_args:', '# del actual_args[k]', 'if', 'name', 'is', 'not', 'None', ':', '# use scope', 'with', 'tfv1', '.', 'variable_scope', '(', 'name', ')', 'as', 'scope', ':', "# this name is only used to surpress logging, doesn't hurt to do some heuristics", 'scope_name', '=', 're', '.', 'sub', '(', "'tower[0-9]+/'", ',', "''", ',', 'scope', '.', 'name', ')', 'do_log_shape', '=', 'log_shape', 'and', 'scope_name', 'not', 'in', '_LAYER_LOGGED', 'if', 'do_log_shape', ':', 'logger', '.', 'info', '(', '"{} input: {}"', '.', 'format', '(', 'scope', '.', 'name', ',', 'get_shape_str', '(', 'inputs', ')', ')', ')', '# run the actual function', 'outputs', '=', 'func', '(', '*', 'args', ',', '*', '*', 'actual_args', ')', 'if', 'do_log_shape', ':', '# log shape info and add activation', 'logger', '.', 'info', '(', '"{} output: {}"', '.', 'format', '(', 'scope', '.', 'name', ',', 'get_shape_str', '(', 'outputs', ')', ')', ')', '_LAYER_LOGGED', '.', 'add', '(', 'scope_name', ')', 'else', ':', '# run the actual function', 'outputs', '=', 'func', '(', '*', 'args', ',', '*', '*', 'actual_args', ')', 'return', 'outputs', 'wrapped_func', '.', 'symbolic_function', '=', 'func', '# attribute to access the underlying function object', 'wrapped_func', '.', 'use_scope', '=', 'use_scope', '_register', '(', 'func', '.', '__name__', ',', 'wrapped_func', ')', 'return', 'wrapped_func', 'return', 'wrapper'] | Args:
log_shape (bool): log input/output shape of this layer
use_scope (bool or None):
Whether to call this layer with an extra first argument as variable scope.
When set to None, it can be called either with or without
the scope name argument, depend on whether the first argument
is string or not.
Returns:
A decorator used to register a layer.
Example:
.. code-block:: python
@layer_register(use_scope=True)
def add10(x):
return x + tf.get_variable('W', shape=[10])
# use it:
output = add10('layer_name', input) # the function will be called under variable scope "layer_name". | ['Args', ':', 'log_shape', '(', 'bool', ')', ':', 'log', 'input', '/', 'output', 'shape', 'of', 'this', 'layer', 'use_scope', '(', 'bool', 'or', 'None', ')', ':', 'Whether', 'to', 'call', 'this', 'layer', 'with', 'an', 'extra', 'first', 'argument', 'as', 'variable', 'scope', '.', 'When', 'set', 'to', 'None', 'it', 'can', 'be', 'called', 'either', 'with', 'or', 'without', 'the', 'scope', 'name', 'argument', 'depend', 'on', 'whether', 'the', 'first', 'argument', 'is', 'string', 'or', 'not', '.'] | train | https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/models/registry.py#L64-L155 |
2,927 | SCIP-Interfaces/PySCIPOpt | examples/finished/read_tsplib.py | distATT | def distATT(x1,y1,x2,y2):
"""Compute the ATT distance between two points (see TSPLIB documentation)"""
xd = x2 - x1
yd = y2 - y1
rij = math.sqrt((xd*xd + yd*yd) /10.)
tij = int(rij + .5)
if tij < rij:
return tij + 1
else:
return tij | python | def distATT(x1,y1,x2,y2):
"""Compute the ATT distance between two points (see TSPLIB documentation)"""
xd = x2 - x1
yd = y2 - y1
rij = math.sqrt((xd*xd + yd*yd) /10.)
tij = int(rij + .5)
if tij < rij:
return tij + 1
else:
return tij | ['def', 'distATT', '(', 'x1', ',', 'y1', ',', 'x2', ',', 'y2', ')', ':', 'xd', '=', 'x2', '-', 'x1', 'yd', '=', 'y2', '-', 'y1', 'rij', '=', 'math', '.', 'sqrt', '(', '(', 'xd', '*', 'xd', '+', 'yd', '*', 'yd', ')', '/', '10.', ')', 'tij', '=', 'int', '(', 'rij', '+', '.5', ')', 'if', 'tij', '<', 'rij', ':', 'return', 'tij', '+', '1', 'else', ':', 'return', 'tij'] | Compute the ATT distance between two points (see TSPLIB documentation) | ['Compute', 'the', 'ATT', 'distance', 'between', 'two', 'points', '(', 'see', 'TSPLIB', 'documentation', ')'] | train | https://github.com/SCIP-Interfaces/PySCIPOpt/blob/9c960b40d94a48b0304d73dbe28b467b9c065abe/examples/finished/read_tsplib.py#L42-L51 |
2,928 | JukeboxPipeline/jukebox-core | src/jukeboxcore/gui/widgetdelegate.py | WidgetDelegateViewMixin.index_at_event | def index_at_event(self, event):
"""Get the index under the position of the given MouseEvent
:param event: the mouse event
:type event: :class:`QtGui.QMouseEvent`
:returns: the index
:rtype: :class:`QtCore.QModelIndex`
:raises: None
"""
# find index at mouse position
globalpos = event.globalPos()
viewport = self.viewport()
pos = viewport.mapFromGlobal(globalpos)
return self.indexAt(pos) | python | def index_at_event(self, event):
"""Get the index under the position of the given MouseEvent
:param event: the mouse event
:type event: :class:`QtGui.QMouseEvent`
:returns: the index
:rtype: :class:`QtCore.QModelIndex`
:raises: None
"""
# find index at mouse position
globalpos = event.globalPos()
viewport = self.viewport()
pos = viewport.mapFromGlobal(globalpos)
return self.indexAt(pos) | ['def', 'index_at_event', '(', 'self', ',', 'event', ')', ':', '# find index at mouse position', 'globalpos', '=', 'event', '.', 'globalPos', '(', ')', 'viewport', '=', 'self', '.', 'viewport', '(', ')', 'pos', '=', 'viewport', '.', 'mapFromGlobal', '(', 'globalpos', ')', 'return', 'self', '.', 'indexAt', '(', 'pos', ')'] | Get the index under the position of the given MouseEvent
:param event: the mouse event
:type event: :class:`QtGui.QMouseEvent`
:returns: the index
:rtype: :class:`QtCore.QModelIndex`
:raises: None | ['Get', 'the', 'index', 'under', 'the', 'position', 'of', 'the', 'given', 'MouseEvent'] | train | https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/widgetdelegate.py#L362-L375 |
2,929 | dmlc/gluon-nlp | src/gluonnlp/data/batchify/batchify.py | _pad_arrs_to_max_length | def _pad_arrs_to_max_length(arrs, pad_axis, pad_val, use_shared_mem, dtype):
"""Inner Implementation of the Pad batchify
Parameters
----------
arrs : list
pad_axis : int
pad_val : number
use_shared_mem : bool, default False
Returns
-------
ret : NDArray
original_length : NDArray
"""
if isinstance(arrs[0], mx.nd.NDArray):
dtype = arrs[0].dtype if dtype is None else dtype
arrs = [arr.asnumpy() for arr in arrs]
elif not isinstance(arrs[0], np.ndarray):
arrs = [np.asarray(ele) for ele in arrs]
else:
dtype = arrs[0].dtype if dtype is None else dtype
original_length = [ele.shape[pad_axis] for ele in arrs]
max_size = max(original_length)
ret_shape = list(arrs[0].shape)
ret_shape[pad_axis] = max_size
ret_shape = (len(arrs), ) + tuple(ret_shape)
ret = np.full(shape=ret_shape, fill_value=pad_val, dtype=dtype)
for i, arr in enumerate(arrs):
if arr.shape[pad_axis] == max_size:
ret[i] = arr
else:
slices = [slice(None) for _ in range(arr.ndim)]
slices[pad_axis] = slice(0, arr.shape[pad_axis])
if slices[pad_axis].start != slices[pad_axis].stop:
slices = [slice(i, i + 1)] + slices
ret[tuple(slices)] = arr
ctx = mx.Context('cpu_shared', 0) if use_shared_mem else mx.cpu()
ret = mx.nd.array(ret, ctx=ctx, dtype=dtype)
original_length = mx.nd.array(original_length, ctx=ctx, dtype=np.int32)
return ret, original_length | python | def _pad_arrs_to_max_length(arrs, pad_axis, pad_val, use_shared_mem, dtype):
"""Inner Implementation of the Pad batchify
Parameters
----------
arrs : list
pad_axis : int
pad_val : number
use_shared_mem : bool, default False
Returns
-------
ret : NDArray
original_length : NDArray
"""
if isinstance(arrs[0], mx.nd.NDArray):
dtype = arrs[0].dtype if dtype is None else dtype
arrs = [arr.asnumpy() for arr in arrs]
elif not isinstance(arrs[0], np.ndarray):
arrs = [np.asarray(ele) for ele in arrs]
else:
dtype = arrs[0].dtype if dtype is None else dtype
original_length = [ele.shape[pad_axis] for ele in arrs]
max_size = max(original_length)
ret_shape = list(arrs[0].shape)
ret_shape[pad_axis] = max_size
ret_shape = (len(arrs), ) + tuple(ret_shape)
ret = np.full(shape=ret_shape, fill_value=pad_val, dtype=dtype)
for i, arr in enumerate(arrs):
if arr.shape[pad_axis] == max_size:
ret[i] = arr
else:
slices = [slice(None) for _ in range(arr.ndim)]
slices[pad_axis] = slice(0, arr.shape[pad_axis])
if slices[pad_axis].start != slices[pad_axis].stop:
slices = [slice(i, i + 1)] + slices
ret[tuple(slices)] = arr
ctx = mx.Context('cpu_shared', 0) if use_shared_mem else mx.cpu()
ret = mx.nd.array(ret, ctx=ctx, dtype=dtype)
original_length = mx.nd.array(original_length, ctx=ctx, dtype=np.int32)
return ret, original_length | ['def', '_pad_arrs_to_max_length', '(', 'arrs', ',', 'pad_axis', ',', 'pad_val', ',', 'use_shared_mem', ',', 'dtype', ')', ':', 'if', 'isinstance', '(', 'arrs', '[', '0', ']', ',', 'mx', '.', 'nd', '.', 'NDArray', ')', ':', 'dtype', '=', 'arrs', '[', '0', ']', '.', 'dtype', 'if', 'dtype', 'is', 'None', 'else', 'dtype', 'arrs', '=', '[', 'arr', '.', 'asnumpy', '(', ')', 'for', 'arr', 'in', 'arrs', ']', 'elif', 'not', 'isinstance', '(', 'arrs', '[', '0', ']', ',', 'np', '.', 'ndarray', ')', ':', 'arrs', '=', '[', 'np', '.', 'asarray', '(', 'ele', ')', 'for', 'ele', 'in', 'arrs', ']', 'else', ':', 'dtype', '=', 'arrs', '[', '0', ']', '.', 'dtype', 'if', 'dtype', 'is', 'None', 'else', 'dtype', 'original_length', '=', '[', 'ele', '.', 'shape', '[', 'pad_axis', ']', 'for', 'ele', 'in', 'arrs', ']', 'max_size', '=', 'max', '(', 'original_length', ')', 'ret_shape', '=', 'list', '(', 'arrs', '[', '0', ']', '.', 'shape', ')', 'ret_shape', '[', 'pad_axis', ']', '=', 'max_size', 'ret_shape', '=', '(', 'len', '(', 'arrs', ')', ',', ')', '+', 'tuple', '(', 'ret_shape', ')', 'ret', '=', 'np', '.', 'full', '(', 'shape', '=', 'ret_shape', ',', 'fill_value', '=', 'pad_val', ',', 'dtype', '=', 'dtype', ')', 'for', 'i', ',', 'arr', 'in', 'enumerate', '(', 'arrs', ')', ':', 'if', 'arr', '.', 'shape', '[', 'pad_axis', ']', '==', 'max_size', ':', 'ret', '[', 'i', ']', '=', 'arr', 'else', ':', 'slices', '=', '[', 'slice', '(', 'None', ')', 'for', '_', 'in', 'range', '(', 'arr', '.', 'ndim', ')', ']', 'slices', '[', 'pad_axis', ']', '=', 'slice', '(', '0', ',', 'arr', '.', 'shape', '[', 'pad_axis', ']', ')', 'if', 'slices', '[', 'pad_axis', ']', '.', 'start', '!=', 'slices', '[', 'pad_axis', ']', '.', 'stop', ':', 'slices', '=', '[', 'slice', '(', 'i', ',', 'i', '+', '1', ')', ']', '+', 'slices', 'ret', '[', 'tuple', '(', 'slices', ')', ']', '=', 'arr', 'ctx', '=', 'mx', '.', 'Context', '(', "'cpu_shared'", ',', '0', ')', 'if', 'use_shared_mem', 'else', 'mx', '.', 'cpu', '(', ')', 'ret', '=', 'mx', '.', 'nd', '.', 'array', '(', 'ret', ',', 'ctx', '=', 'ctx', ',', 'dtype', '=', 'dtype', ')', 'original_length', '=', 'mx', '.', 'nd', '.', 'array', '(', 'original_length', ',', 'ctx', '=', 'ctx', ',', 'dtype', '=', 'np', '.', 'int32', ')', 'return', 'ret', ',', 'original_length'] | Inner Implementation of the Pad batchify
Parameters
----------
arrs : list
pad_axis : int
pad_val : number
use_shared_mem : bool, default False
Returns
-------
ret : NDArray
original_length : NDArray | ['Inner', 'Implementation', 'of', 'the', 'Pad', 'batchify'] | train | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/src/gluonnlp/data/batchify/batchify.py#L29-L75 |
2,930 | fabioz/PyDev.Debugger | pydevd_attach_to_process/winappdbg/textio.py | Color.bk_default | def bk_default(cls):
"Make the current background color the default."
wAttributes = cls._get_text_attributes()
wAttributes &= ~win32.BACKGROUND_MASK
#wAttributes |= win32.BACKGROUND_BLACK
wAttributes &= ~win32.BACKGROUND_INTENSITY
cls._set_text_attributes(wAttributes) | python | def bk_default(cls):
"Make the current background color the default."
wAttributes = cls._get_text_attributes()
wAttributes &= ~win32.BACKGROUND_MASK
#wAttributes |= win32.BACKGROUND_BLACK
wAttributes &= ~win32.BACKGROUND_INTENSITY
cls._set_text_attributes(wAttributes) | ['def', 'bk_default', '(', 'cls', ')', ':', 'wAttributes', '=', 'cls', '.', '_get_text_attributes', '(', ')', 'wAttributes', '&=', '~', 'win32', '.', 'BACKGROUND_MASK', '#wAttributes |= win32.BACKGROUND_BLACK', 'wAttributes', '&=', '~', 'win32', '.', 'BACKGROUND_INTENSITY', 'cls', '.', '_set_text_attributes', '(', 'wAttributes', ')'] | Make the current background color the default. | ['Make', 'the', 'current', 'background', 'color', 'the', 'default', '.'] | train | https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/textio.py#L1009-L1015 |
2,931 | Duke-GCB/DukeDSClient | ddsc/config.py | Config.add_properties | def add_properties(self, filename):
"""
Add properties to config based on filename replacing previous values.
:param filename: str path to YAML file to pull top level properties from
"""
filename = os.path.expanduser(filename)
if os.path.exists(filename):
with open(filename, 'r') as yaml_file:
self.update_properties(yaml.safe_load(yaml_file)) | python | def add_properties(self, filename):
"""
Add properties to config based on filename replacing previous values.
:param filename: str path to YAML file to pull top level properties from
"""
filename = os.path.expanduser(filename)
if os.path.exists(filename):
with open(filename, 'r') as yaml_file:
self.update_properties(yaml.safe_load(yaml_file)) | ['def', 'add_properties', '(', 'self', ',', 'filename', ')', ':', 'filename', '=', 'os', '.', 'path', '.', 'expanduser', '(', 'filename', ')', 'if', 'os', '.', 'path', '.', 'exists', '(', 'filename', ')', ':', 'with', 'open', '(', 'filename', ',', "'r'", ')', 'as', 'yaml_file', ':', 'self', '.', 'update_properties', '(', 'yaml', '.', 'safe_load', '(', 'yaml_file', ')', ')'] | Add properties to config based on filename replacing previous values.
:param filename: str path to YAML file to pull top level properties from | ['Add', 'properties', 'to', 'config', 'based', 'on', 'filename', 'replacing', 'previous', 'values', '.', ':', 'param', 'filename', ':', 'str', 'path', 'to', 'YAML', 'file', 'to', 'pull', 'top', 'level', 'properties', 'from'] | train | https://github.com/Duke-GCB/DukeDSClient/blob/117f68fb9bae82e4c81ea487ad5d61ac350f3726/ddsc/config.py#L81-L89 |
2,932 | pkgw/pwkit | pwkit/msmt.py | Uval.from_pcount | def from_pcount(nevents):
"""We assume a Poisson process. nevents is the number of events in
some interval. The distribution of values is the distribution of the
Poisson rate parameter given this observed number of events, where the
"rate" is in units of events per interval of the same duration. The
max-likelihood value is nevents, but the mean value is nevents + 1.
The gamma distribution is obtained by assuming an improper, uniform
prior for the rate between 0 and infinity."""
if nevents < 0:
raise ValueError('Poisson parameter `nevents` must be nonnegative')
return Uval(np.random.gamma(nevents + 1, size=uval_nsamples)) | python | def from_pcount(nevents):
"""We assume a Poisson process. nevents is the number of events in
some interval. The distribution of values is the distribution of the
Poisson rate parameter given this observed number of events, where the
"rate" is in units of events per interval of the same duration. The
max-likelihood value is nevents, but the mean value is nevents + 1.
The gamma distribution is obtained by assuming an improper, uniform
prior for the rate between 0 and infinity."""
if nevents < 0:
raise ValueError('Poisson parameter `nevents` must be nonnegative')
return Uval(np.random.gamma(nevents + 1, size=uval_nsamples)) | ['def', 'from_pcount', '(', 'nevents', ')', ':', 'if', 'nevents', '<', '0', ':', 'raise', 'ValueError', '(', "'Poisson parameter `nevents` must be nonnegative'", ')', 'return', 'Uval', '(', 'np', '.', 'random', '.', 'gamma', '(', 'nevents', '+', '1', ',', 'size', '=', 'uval_nsamples', ')', ')'] | We assume a Poisson process. nevents is the number of events in
some interval. The distribution of values is the distribution of the
Poisson rate parameter given this observed number of events, where the
"rate" is in units of events per interval of the same duration. The
max-likelihood value is nevents, but the mean value is nevents + 1.
The gamma distribution is obtained by assuming an improper, uniform
prior for the rate between 0 and infinity. | ['We', 'assume', 'a', 'Poisson', 'process', '.', 'nevents', 'is', 'the', 'number', 'of', 'events', 'in', 'some', 'interval', '.', 'The', 'distribution', 'of', 'values', 'is', 'the', 'distribution', 'of', 'the', 'Poisson', 'rate', 'parameter', 'given', 'this', 'observed', 'number', 'of', 'events', 'where', 'the', 'rate', 'is', 'in', 'units', 'of', 'events', 'per', 'interval', 'of', 'the', 'same', 'duration', '.', 'The', 'max', '-', 'likelihood', 'value', 'is', 'nevents', 'but', 'the', 'mean', 'value', 'is', 'nevents', '+', '1', '.', 'The', 'gamma', 'distribution', 'is', 'obtained', 'by', 'assuming', 'an', 'improper', 'uniform', 'prior', 'for', 'the', 'rate', 'between', '0', 'and', 'infinity', '.'] | train | https://github.com/pkgw/pwkit/blob/d40957a1c3d2ea34e7ceac2267ee9635135f2793/pwkit/msmt.py#L353-L363 |
2,933 | readbeyond/aeneas | aeneas/textfile.py | TextFile.children_not_empty | def children_not_empty(self):
"""
Return the direct not empty children of the root of the fragments tree,
as ``TextFile`` objects.
:rtype: list of :class:`~aeneas.textfile.TextFile`
"""
children = []
for child_node in self.fragments_tree.children_not_empty:
child_text_file = self.get_subtree(child_node)
child_text_file.set_language(child_node.value.language)
children.append(child_text_file)
return children | python | def children_not_empty(self):
"""
Return the direct not empty children of the root of the fragments tree,
as ``TextFile`` objects.
:rtype: list of :class:`~aeneas.textfile.TextFile`
"""
children = []
for child_node in self.fragments_tree.children_not_empty:
child_text_file = self.get_subtree(child_node)
child_text_file.set_language(child_node.value.language)
children.append(child_text_file)
return children | ['def', 'children_not_empty', '(', 'self', ')', ':', 'children', '=', '[', ']', 'for', 'child_node', 'in', 'self', '.', 'fragments_tree', '.', 'children_not_empty', ':', 'child_text_file', '=', 'self', '.', 'get_subtree', '(', 'child_node', ')', 'child_text_file', '.', 'set_language', '(', 'child_node', '.', 'value', '.', 'language', ')', 'children', '.', 'append', '(', 'child_text_file', ')', 'return', 'children'] | Return the direct not empty children of the root of the fragments tree,
as ``TextFile`` objects.
:rtype: list of :class:`~aeneas.textfile.TextFile` | ['Return', 'the', 'direct', 'not', 'empty', 'children', 'of', 'the', 'root', 'of', 'the', 'fragments', 'tree', 'as', 'TextFile', 'objects', '.'] | train | https://github.com/readbeyond/aeneas/blob/9d95535ad63eef4a98530cfdff033b8c35315ee1/aeneas/textfile.py#L455-L467 |
2,934 | saltstack/salt | salt/states/boto_apigateway.py | _Swagger.deploy_models | def deploy_models(self, ret):
'''
Method to deploy swagger file's definition objects and associated schema to AWS Apigateway as Models
ret
a dictionary for returning status to Saltstack
'''
for model, schema in self.models():
# add in a few attributes into the model schema that AWS expects
# _schema = schema.copy()
_schema = self._update_schema_to_aws_notation(schema)
_schema.update({'$schema': _Swagger.JSON_SCHEMA_DRAFT_4,
'title': '{0} Schema'.format(model)})
# check to see if model already exists, aws has 2 default models [Empty, Error]
# which may need upate with data from swagger file
model_exists_response = __salt__['boto_apigateway.api_model_exists'](restApiId=self.restApiId,
modelName=model,
**self._common_aws_args)
if model_exists_response.get('exists'):
update_model_schema_response = (
__salt__['boto_apigateway.update_api_model_schema'](restApiId=self.restApiId,
modelName=model,
schema=_dict_to_json_pretty(_schema),
**self._common_aws_args))
if not update_model_schema_response.get('updated'):
ret['result'] = False
ret['abort'] = True
if 'error' in update_model_schema_response:
ret['comment'] = ('Failed to update existing model {0} with schema {1}, '
'error: {2}'.format(model, _dict_to_json_pretty(schema),
update_model_schema_response['error']['message']))
return ret
ret = _log_changes(ret, 'deploy_models', update_model_schema_response)
else:
create_model_response = (
__salt__['boto_apigateway.create_api_model'](restApiId=self.restApiId, modelName=model,
modelDescription=model,
schema=_dict_to_json_pretty(_schema),
contentType='application/json',
**self._common_aws_args))
if not create_model_response.get('created'):
ret['result'] = False
ret['abort'] = True
if 'error' in create_model_response:
ret['comment'] = ('Failed to create model {0}, schema {1}, '
'error: {2}'.format(model, _dict_to_json_pretty(schema),
create_model_response['error']['message']))
return ret
ret = _log_changes(ret, 'deploy_models', create_model_response)
return ret | python | def deploy_models(self, ret):
'''
Method to deploy swagger file's definition objects and associated schema to AWS Apigateway as Models
ret
a dictionary for returning status to Saltstack
'''
for model, schema in self.models():
# add in a few attributes into the model schema that AWS expects
# _schema = schema.copy()
_schema = self._update_schema_to_aws_notation(schema)
_schema.update({'$schema': _Swagger.JSON_SCHEMA_DRAFT_4,
'title': '{0} Schema'.format(model)})
# check to see if model already exists, aws has 2 default models [Empty, Error]
# which may need upate with data from swagger file
model_exists_response = __salt__['boto_apigateway.api_model_exists'](restApiId=self.restApiId,
modelName=model,
**self._common_aws_args)
if model_exists_response.get('exists'):
update_model_schema_response = (
__salt__['boto_apigateway.update_api_model_schema'](restApiId=self.restApiId,
modelName=model,
schema=_dict_to_json_pretty(_schema),
**self._common_aws_args))
if not update_model_schema_response.get('updated'):
ret['result'] = False
ret['abort'] = True
if 'error' in update_model_schema_response:
ret['comment'] = ('Failed to update existing model {0} with schema {1}, '
'error: {2}'.format(model, _dict_to_json_pretty(schema),
update_model_schema_response['error']['message']))
return ret
ret = _log_changes(ret, 'deploy_models', update_model_schema_response)
else:
create_model_response = (
__salt__['boto_apigateway.create_api_model'](restApiId=self.restApiId, modelName=model,
modelDescription=model,
schema=_dict_to_json_pretty(_schema),
contentType='application/json',
**self._common_aws_args))
if not create_model_response.get('created'):
ret['result'] = False
ret['abort'] = True
if 'error' in create_model_response:
ret['comment'] = ('Failed to create model {0}, schema {1}, '
'error: {2}'.format(model, _dict_to_json_pretty(schema),
create_model_response['error']['message']))
return ret
ret = _log_changes(ret, 'deploy_models', create_model_response)
return ret | ['def', 'deploy_models', '(', 'self', ',', 'ret', ')', ':', 'for', 'model', ',', 'schema', 'in', 'self', '.', 'models', '(', ')', ':', '# add in a few attributes into the model schema that AWS expects', '# _schema = schema.copy()', '_schema', '=', 'self', '.', '_update_schema_to_aws_notation', '(', 'schema', ')', '_schema', '.', 'update', '(', '{', "'$schema'", ':', '_Swagger', '.', 'JSON_SCHEMA_DRAFT_4', ',', "'title'", ':', "'{0} Schema'", '.', 'format', '(', 'model', ')', '}', ')', '# check to see if model already exists, aws has 2 default models [Empty, Error]', '# which may need upate with data from swagger file', 'model_exists_response', '=', '__salt__', '[', "'boto_apigateway.api_model_exists'", ']', '(', 'restApiId', '=', 'self', '.', 'restApiId', ',', 'modelName', '=', 'model', ',', '*', '*', 'self', '.', '_common_aws_args', ')', 'if', 'model_exists_response', '.', 'get', '(', "'exists'", ')', ':', 'update_model_schema_response', '=', '(', '__salt__', '[', "'boto_apigateway.update_api_model_schema'", ']', '(', 'restApiId', '=', 'self', '.', 'restApiId', ',', 'modelName', '=', 'model', ',', 'schema', '=', '_dict_to_json_pretty', '(', '_schema', ')', ',', '*', '*', 'self', '.', '_common_aws_args', ')', ')', 'if', 'not', 'update_model_schema_response', '.', 'get', '(', "'updated'", ')', ':', 'ret', '[', "'result'", ']', '=', 'False', 'ret', '[', "'abort'", ']', '=', 'True', 'if', "'error'", 'in', 'update_model_schema_response', ':', 'ret', '[', "'comment'", ']', '=', '(', "'Failed to update existing model {0} with schema {1}, '", "'error: {2}'", '.', 'format', '(', 'model', ',', '_dict_to_json_pretty', '(', 'schema', ')', ',', 'update_model_schema_response', '[', "'error'", ']', '[', "'message'", ']', ')', ')', 'return', 'ret', 'ret', '=', '_log_changes', '(', 'ret', ',', "'deploy_models'", ',', 'update_model_schema_response', ')', 'else', ':', 'create_model_response', '=', '(', '__salt__', '[', "'boto_apigateway.create_api_model'", ']', '(', 'restApiId', '=', 'self', '.', 'restApiId', ',', 'modelName', '=', 'model', ',', 'modelDescription', '=', 'model', ',', 'schema', '=', '_dict_to_json_pretty', '(', '_schema', ')', ',', 'contentType', '=', "'application/json'", ',', '*', '*', 'self', '.', '_common_aws_args', ')', ')', 'if', 'not', 'create_model_response', '.', 'get', '(', "'created'", ')', ':', 'ret', '[', "'result'", ']', '=', 'False', 'ret', '[', "'abort'", ']', '=', 'True', 'if', "'error'", 'in', 'create_model_response', ':', 'ret', '[', "'comment'", ']', '=', '(', "'Failed to create model {0}, schema {1}, '", "'error: {2}'", '.', 'format', '(', 'model', ',', '_dict_to_json_pretty', '(', 'schema', ')', ',', 'create_model_response', '[', "'error'", ']', '[', "'message'", ']', ')', ')', 'return', 'ret', 'ret', '=', '_log_changes', '(', 'ret', ',', "'deploy_models'", ',', 'create_model_response', ')', 'return', 'ret'] | Method to deploy swagger file's definition objects and associated schema to AWS Apigateway as Models
ret
a dictionary for returning status to Saltstack | ['Method', 'to', 'deploy', 'swagger', 'file', 's', 'definition', 'objects', 'and', 'associated', 'schema', 'to', 'AWS', 'Apigateway', 'as', 'Models'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_apigateway.py#L1360-L1416 |
2,935 | saltstack/salt | salt/modules/selinux.py | getenforce | def getenforce():
'''
Return the mode selinux is running in
CLI Example:
.. code-block:: bash
salt '*' selinux.getenforce
'''
_selinux_fs_path = selinux_fs_path()
if _selinux_fs_path is None:
return 'Disabled'
try:
enforce = os.path.join(_selinux_fs_path, 'enforce')
with salt.utils.files.fopen(enforce, 'r') as _fp:
if salt.utils.stringutils.to_unicode(_fp.readline()).strip() == '0':
return 'Permissive'
else:
return 'Enforcing'
except (IOError, OSError, AttributeError):
return 'Disabled' | python | def getenforce():
'''
Return the mode selinux is running in
CLI Example:
.. code-block:: bash
salt '*' selinux.getenforce
'''
_selinux_fs_path = selinux_fs_path()
if _selinux_fs_path is None:
return 'Disabled'
try:
enforce = os.path.join(_selinux_fs_path, 'enforce')
with salt.utils.files.fopen(enforce, 'r') as _fp:
if salt.utils.stringutils.to_unicode(_fp.readline()).strip() == '0':
return 'Permissive'
else:
return 'Enforcing'
except (IOError, OSError, AttributeError):
return 'Disabled' | ['def', 'getenforce', '(', ')', ':', '_selinux_fs_path', '=', 'selinux_fs_path', '(', ')', 'if', '_selinux_fs_path', 'is', 'None', ':', 'return', "'Disabled'", 'try', ':', 'enforce', '=', 'os', '.', 'path', '.', 'join', '(', '_selinux_fs_path', ',', "'enforce'", ')', 'with', 'salt', '.', 'utils', '.', 'files', '.', 'fopen', '(', 'enforce', ',', "'r'", ')', 'as', '_fp', ':', 'if', 'salt', '.', 'utils', '.', 'stringutils', '.', 'to_unicode', '(', '_fp', '.', 'readline', '(', ')', ')', '.', 'strip', '(', ')', '==', "'0'", ':', 'return', "'Permissive'", 'else', ':', 'return', "'Enforcing'", 'except', '(', 'IOError', ',', 'OSError', ',', 'AttributeError', ')', ':', 'return', "'Disabled'"] | Return the mode selinux is running in
CLI Example:
.. code-block:: bash
salt '*' selinux.getenforce | ['Return', 'the', 'mode', 'selinux', 'is', 'running', 'in'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/selinux.py#L85-L106 |
2,936 | nicolargo/glances | glances/stats.py | GlancesStats.load_plugins | def load_plugins(self, args=None):
"""Load all plugins in the 'plugins' folder."""
for item in os.listdir(plugins_path):
if (item.startswith(self.header) and
item.endswith(".py") and
item != (self.header + "plugin.py")):
# Load the plugin
self._load_plugin(os.path.basename(item),
args=args, config=self.config)
# Log plugins list
logger.debug("Active plugins list: {}".format(self.getPluginsList())) | python | def load_plugins(self, args=None):
"""Load all plugins in the 'plugins' folder."""
for item in os.listdir(plugins_path):
if (item.startswith(self.header) and
item.endswith(".py") and
item != (self.header + "plugin.py")):
# Load the plugin
self._load_plugin(os.path.basename(item),
args=args, config=self.config)
# Log plugins list
logger.debug("Active plugins list: {}".format(self.getPluginsList())) | ['def', 'load_plugins', '(', 'self', ',', 'args', '=', 'None', ')', ':', 'for', 'item', 'in', 'os', '.', 'listdir', '(', 'plugins_path', ')', ':', 'if', '(', 'item', '.', 'startswith', '(', 'self', '.', 'header', ')', 'and', 'item', '.', 'endswith', '(', '".py"', ')', 'and', 'item', '!=', '(', 'self', '.', 'header', '+', '"plugin.py"', ')', ')', ':', '# Load the plugin', 'self', '.', '_load_plugin', '(', 'os', '.', 'path', '.', 'basename', '(', 'item', ')', ',', 'args', '=', 'args', ',', 'config', '=', 'self', '.', 'config', ')', '# Log plugins list', 'logger', '.', 'debug', '(', '"Active plugins list: {}"', '.', 'format', '(', 'self', '.', 'getPluginsList', '(', ')', ')', ')'] | Load all plugins in the 'plugins' folder. | ['Load', 'all', 'plugins', 'in', 'the', 'plugins', 'folder', '.'] | train | https://github.com/nicolargo/glances/blob/5bd4d587a736e0d2b03170b56926841d2a3eb7ee/glances/stats.py#L131-L142 |
2,937 | krukas/Trionyx | trionyx/navigation.py | TabRegister.update | def update(self, model_alias, code='general', name=None, order=None, display_filter=None):
"""
Update given tab
:param model_alias:
:param code:
:param name:
:param order:
:param display_filter:
:return:
"""
model_alias = self.get_model_alias(model_alias)
for item in self.tabs[model_alias]:
if item.code != code:
continue
if name:
item.name = name
if order:
item.order = order
if display_filter:
item.display_filter = display_filter
break
self.tabs[model_alias] = sorted(self.tabs[model_alias], key=lambda item: item.code if item.code else 999) | python | def update(self, model_alias, code='general', name=None, order=None, display_filter=None):
"""
Update given tab
:param model_alias:
:param code:
:param name:
:param order:
:param display_filter:
:return:
"""
model_alias = self.get_model_alias(model_alias)
for item in self.tabs[model_alias]:
if item.code != code:
continue
if name:
item.name = name
if order:
item.order = order
if display_filter:
item.display_filter = display_filter
break
self.tabs[model_alias] = sorted(self.tabs[model_alias], key=lambda item: item.code if item.code else 999) | ['def', 'update', '(', 'self', ',', 'model_alias', ',', 'code', '=', "'general'", ',', 'name', '=', 'None', ',', 'order', '=', 'None', ',', 'display_filter', '=', 'None', ')', ':', 'model_alias', '=', 'self', '.', 'get_model_alias', '(', 'model_alias', ')', 'for', 'item', 'in', 'self', '.', 'tabs', '[', 'model_alias', ']', ':', 'if', 'item', '.', 'code', '!=', 'code', ':', 'continue', 'if', 'name', ':', 'item', '.', 'name', '=', 'name', 'if', 'order', ':', 'item', '.', 'order', '=', 'order', 'if', 'display_filter', ':', 'item', '.', 'display_filter', '=', 'display_filter', 'break', 'self', '.', 'tabs', '[', 'model_alias', ']', '=', 'sorted', '(', 'self', '.', 'tabs', '[', 'model_alias', ']', ',', 'key', '=', 'lambda', 'item', ':', 'item', '.', 'code', 'if', 'item', '.', 'code', 'else', '999', ')'] | Update given tab
:param model_alias:
:param code:
:param name:
:param order:
:param display_filter:
:return: | ['Update', 'given', 'tab'] | train | https://github.com/krukas/Trionyx/blob/edac132cc0797190153f2e60bc7e88cb50e80da6/trionyx/navigation.py#L285-L307 |
2,938 | pandas-dev/pandas | pandas/io/excel/_openpyxl.py | _OpenpyxlWriter._convert_to_color | def _convert_to_color(cls, color_spec):
"""
Convert ``color_spec`` to an openpyxl v2 Color object
Parameters
----------
color_spec : str, dict
A 32-bit ARGB hex string, or a dict with zero or more of the
following keys.
'rgb'
'indexed'
'auto'
'theme'
'tint'
'index'
'type'
Returns
-------
color : openpyxl.styles.Color
"""
from openpyxl.styles import Color
if isinstance(color_spec, str):
return Color(color_spec)
else:
return Color(**color_spec) | python | def _convert_to_color(cls, color_spec):
"""
Convert ``color_spec`` to an openpyxl v2 Color object
Parameters
----------
color_spec : str, dict
A 32-bit ARGB hex string, or a dict with zero or more of the
following keys.
'rgb'
'indexed'
'auto'
'theme'
'tint'
'index'
'type'
Returns
-------
color : openpyxl.styles.Color
"""
from openpyxl.styles import Color
if isinstance(color_spec, str):
return Color(color_spec)
else:
return Color(**color_spec) | ['def', '_convert_to_color', '(', 'cls', ',', 'color_spec', ')', ':', 'from', 'openpyxl', '.', 'styles', 'import', 'Color', 'if', 'isinstance', '(', 'color_spec', ',', 'str', ')', ':', 'return', 'Color', '(', 'color_spec', ')', 'else', ':', 'return', 'Color', '(', '*', '*', 'color_spec', ')'] | Convert ``color_spec`` to an openpyxl v2 Color object
Parameters
----------
color_spec : str, dict
A 32-bit ARGB hex string, or a dict with zero or more of the
following keys.
'rgb'
'indexed'
'auto'
'theme'
'tint'
'index'
'type'
Returns
-------
color : openpyxl.styles.Color | ['Convert', 'color_spec', 'to', 'an', 'openpyxl', 'v2', 'Color', 'object', 'Parameters', '----------', 'color_spec', ':', 'str', 'dict', 'A', '32', '-', 'bit', 'ARGB', 'hex', 'string', 'or', 'a', 'dict', 'with', 'zero', 'or', 'more', 'of', 'the', 'following', 'keys', '.', 'rgb', 'indexed', 'auto', 'theme', 'tint', 'index', 'type', 'Returns', '-------', 'color', ':', 'openpyxl', '.', 'styles', '.', 'Color'] | train | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/excel/_openpyxl.py#L98-L123 |
2,939 | wonambi-python/wonambi | wonambi/utils/simulate.py | create_channels | def create_channels(chan_name=None, n_chan=None):
"""Create instance of Channels with random xyz coordinates
Parameters
----------
chan_name : list of str
names of the channels
n_chan : int
if chan_name is not specified, this defines the number of channels
Returns
-------
instance of Channels
where the location of the channels is random
"""
if chan_name is not None:
n_chan = len(chan_name)
elif n_chan is not None:
chan_name = _make_chan_name(n_chan)
else:
raise TypeError('You need to specify either the channel names (chan_name) or the number of channels (n_chan)')
xyz = round(random.randn(n_chan, 3) * 10, decimals=2)
return Channels(chan_name, xyz) | python | def create_channels(chan_name=None, n_chan=None):
"""Create instance of Channels with random xyz coordinates
Parameters
----------
chan_name : list of str
names of the channels
n_chan : int
if chan_name is not specified, this defines the number of channels
Returns
-------
instance of Channels
where the location of the channels is random
"""
if chan_name is not None:
n_chan = len(chan_name)
elif n_chan is not None:
chan_name = _make_chan_name(n_chan)
else:
raise TypeError('You need to specify either the channel names (chan_name) or the number of channels (n_chan)')
xyz = round(random.randn(n_chan, 3) * 10, decimals=2)
return Channels(chan_name, xyz) | ['def', 'create_channels', '(', 'chan_name', '=', 'None', ',', 'n_chan', '=', 'None', ')', ':', 'if', 'chan_name', 'is', 'not', 'None', ':', 'n_chan', '=', 'len', '(', 'chan_name', ')', 'elif', 'n_chan', 'is', 'not', 'None', ':', 'chan_name', '=', '_make_chan_name', '(', 'n_chan', ')', 'else', ':', 'raise', 'TypeError', '(', "'You need to specify either the channel names (chan_name) or the number of channels (n_chan)'", ')', 'xyz', '=', 'round', '(', 'random', '.', 'randn', '(', 'n_chan', ',', '3', ')', '*', '10', ',', 'decimals', '=', '2', ')', 'return', 'Channels', '(', 'chan_name', ',', 'xyz', ')'] | Create instance of Channels with random xyz coordinates
Parameters
----------
chan_name : list of str
names of the channels
n_chan : int
if chan_name is not specified, this defines the number of channels
Returns
-------
instance of Channels
where the location of the channels is random | ['Create', 'instance', 'of', 'Channels', 'with', 'random', 'xyz', 'coordinates'] | train | https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/utils/simulate.py#L145-L170 |
2,940 | thomasballinger/trellocardupdate | trellocardupdate/trelloupdate.py | TrelloUpdater.card_names_and_ids | def card_names_and_ids(self):
"""Returns [(name, id), ...] pairs of cards from current board"""
b = Board(self.client, self.board_id)
cards = b.getCards()
card_names_and_ids = [(unidecode(c.name), c.id) for c in cards]
return card_names_and_ids | python | def card_names_and_ids(self):
"""Returns [(name, id), ...] pairs of cards from current board"""
b = Board(self.client, self.board_id)
cards = b.getCards()
card_names_and_ids = [(unidecode(c.name), c.id) for c in cards]
return card_names_and_ids | ['def', 'card_names_and_ids', '(', 'self', ')', ':', 'b', '=', 'Board', '(', 'self', '.', 'client', ',', 'self', '.', 'board_id', ')', 'cards', '=', 'b', '.', 'getCards', '(', ')', 'card_names_and_ids', '=', '[', '(', 'unidecode', '(', 'c', '.', 'name', ')', ',', 'c', '.', 'id', ')', 'for', 'c', 'in', 'cards', ']', 'return', 'card_names_and_ids'] | Returns [(name, id), ...] pairs of cards from current board | ['Returns', '[', '(', 'name', 'id', ')', '...', ']', 'pairs', 'of', 'cards', 'from', 'current', 'board'] | train | https://github.com/thomasballinger/trellocardupdate/blob/16a648fa15efef144c07cd56fcdb1d8920fac889/trellocardupdate/trelloupdate.py#L128-L133 |
2,941 | pypa/bandersnatch | src/bandersnatch/utils.py | update_safe | def update_safe(filename: str, **kw: Any) -> Generator[IO, None, None]:
"""Rewrite a file atomically.
Clients are allowed to delete the tmpfile to signal that they don't
want to have it updated.
"""
with tempfile.NamedTemporaryFile(
dir=os.path.dirname(filename),
delete=False,
prefix=f"{os.path.basename(filename)}.",
**kw,
) as tf:
if os.path.exists(filename):
os.chmod(tf.name, os.stat(filename).st_mode & 0o7777)
tf.has_changed = False # type: ignore
yield tf
if not os.path.exists(tf.name):
return
filename_tmp = tf.name
if os.path.exists(filename) and filecmp.cmp(filename, filename_tmp, shallow=False):
os.unlink(filename_tmp)
else:
os.rename(filename_tmp, filename)
tf.has_changed = True | python | def update_safe(filename: str, **kw: Any) -> Generator[IO, None, None]:
"""Rewrite a file atomically.
Clients are allowed to delete the tmpfile to signal that they don't
want to have it updated.
"""
with tempfile.NamedTemporaryFile(
dir=os.path.dirname(filename),
delete=False,
prefix=f"{os.path.basename(filename)}.",
**kw,
) as tf:
if os.path.exists(filename):
os.chmod(tf.name, os.stat(filename).st_mode & 0o7777)
tf.has_changed = False # type: ignore
yield tf
if not os.path.exists(tf.name):
return
filename_tmp = tf.name
if os.path.exists(filename) and filecmp.cmp(filename, filename_tmp, shallow=False):
os.unlink(filename_tmp)
else:
os.rename(filename_tmp, filename)
tf.has_changed = True | ['def', 'update_safe', '(', 'filename', ':', 'str', ',', '*', '*', 'kw', ':', 'Any', ')', '->', 'Generator', '[', 'IO', ',', 'None', ',', 'None', ']', ':', 'with', 'tempfile', '.', 'NamedTemporaryFile', '(', 'dir', '=', 'os', '.', 'path', '.', 'dirname', '(', 'filename', ')', ',', 'delete', '=', 'False', ',', 'prefix', '=', 'f"{os.path.basename(filename)}."', ',', '*', '*', 'kw', ',', ')', 'as', 'tf', ':', 'if', 'os', '.', 'path', '.', 'exists', '(', 'filename', ')', ':', 'os', '.', 'chmod', '(', 'tf', '.', 'name', ',', 'os', '.', 'stat', '(', 'filename', ')', '.', 'st_mode', '&', '0o7777', ')', 'tf', '.', 'has_changed', '=', 'False', '# type: ignore', 'yield', 'tf', 'if', 'not', 'os', '.', 'path', '.', 'exists', '(', 'tf', '.', 'name', ')', ':', 'return', 'filename_tmp', '=', 'tf', '.', 'name', 'if', 'os', '.', 'path', '.', 'exists', '(', 'filename', ')', 'and', 'filecmp', '.', 'cmp', '(', 'filename', ',', 'filename_tmp', ',', 'shallow', '=', 'False', ')', ':', 'os', '.', 'unlink', '(', 'filename_tmp', ')', 'else', ':', 'os', '.', 'rename', '(', 'filename_tmp', ',', 'filename', ')', 'tf', '.', 'has_changed', '=', 'True'] | Rewrite a file atomically.
Clients are allowed to delete the tmpfile to signal that they don't
want to have it updated. | ['Rewrite', 'a', 'file', 'atomically', '.'] | train | https://github.com/pypa/bandersnatch/blob/8b702c3bc128c5a1cbdd18890adede2f7f17fad4/src/bandersnatch/utils.py#L121-L145 |
2,942 | mikedh/trimesh | trimesh/exchange/stl.py | load_stl_ascii | def load_stl_ascii(file_obj):
"""
Load an ASCII STL file from a file object.
Parameters
----------
file_obj: open file- like object
Returns
----------
loaded: kwargs for a Trimesh constructor with keys:
vertices: (n,3) float, vertices
faces: (m,3) int, indexes of vertices
face_normals: (m,3) float, normal vector of each face
"""
# the first line is the header
header = file_obj.readline()
# make sure header is a string, not bytes
if hasattr(header, 'decode'):
try:
header = header.decode('utf-8')
except BaseException:
header = ''
# save header to metadata
metadata = {'header': header}
# read all text into one string
text = file_obj.read()
# convert bytes to string
if hasattr(text, 'decode'):
text = text.decode('utf-8')
# split by endsolid keyword
text = text.lower().split('endsolid')[0]
# create array of splits
blob = np.array(text.strip().split())
# there are 21 'words' in each face
face_len = 21
# length of blob should be multiple of face_len
if (len(blob) % face_len) != 0:
raise HeaderError('Incorrect length STL file!')
face_count = int(len(blob) / face_len)
# this offset is to be added to a fixed set of tiled indices
offset = face_len * np.arange(face_count).reshape((-1, 1))
normal_index = np.tile([2, 3, 4], (face_count, 1)) + offset
vertex_index = np.tile([8, 9, 10,
12, 13, 14,
16, 17, 18], (face_count, 1)) + offset
# faces are groups of three sequential vertices
faces = np.arange(face_count * 3).reshape((-1, 3))
face_normals = blob[normal_index].astype('<f8')
vertices = blob[vertex_index.reshape((-1, 3))].astype('<f8')
return {'vertices': vertices,
'faces': faces,
'metadata': metadata,
'face_normals': face_normals} | python | def load_stl_ascii(file_obj):
"""
Load an ASCII STL file from a file object.
Parameters
----------
file_obj: open file- like object
Returns
----------
loaded: kwargs for a Trimesh constructor with keys:
vertices: (n,3) float, vertices
faces: (m,3) int, indexes of vertices
face_normals: (m,3) float, normal vector of each face
"""
# the first line is the header
header = file_obj.readline()
# make sure header is a string, not bytes
if hasattr(header, 'decode'):
try:
header = header.decode('utf-8')
except BaseException:
header = ''
# save header to metadata
metadata = {'header': header}
# read all text into one string
text = file_obj.read()
# convert bytes to string
if hasattr(text, 'decode'):
text = text.decode('utf-8')
# split by endsolid keyword
text = text.lower().split('endsolid')[0]
# create array of splits
blob = np.array(text.strip().split())
# there are 21 'words' in each face
face_len = 21
# length of blob should be multiple of face_len
if (len(blob) % face_len) != 0:
raise HeaderError('Incorrect length STL file!')
face_count = int(len(blob) / face_len)
# this offset is to be added to a fixed set of tiled indices
offset = face_len * np.arange(face_count).reshape((-1, 1))
normal_index = np.tile([2, 3, 4], (face_count, 1)) + offset
vertex_index = np.tile([8, 9, 10,
12, 13, 14,
16, 17, 18], (face_count, 1)) + offset
# faces are groups of three sequential vertices
faces = np.arange(face_count * 3).reshape((-1, 3))
face_normals = blob[normal_index].astype('<f8')
vertices = blob[vertex_index.reshape((-1, 3))].astype('<f8')
return {'vertices': vertices,
'faces': faces,
'metadata': metadata,
'face_normals': face_normals} | ['def', 'load_stl_ascii', '(', 'file_obj', ')', ':', '# the first line is the header', 'header', '=', 'file_obj', '.', 'readline', '(', ')', '# make sure header is a string, not bytes', 'if', 'hasattr', '(', 'header', ',', "'decode'", ')', ':', 'try', ':', 'header', '=', 'header', '.', 'decode', '(', "'utf-8'", ')', 'except', 'BaseException', ':', 'header', '=', "''", '# save header to metadata', 'metadata', '=', '{', "'header'", ':', 'header', '}', '# read all text into one string', 'text', '=', 'file_obj', '.', 'read', '(', ')', '# convert bytes to string', 'if', 'hasattr', '(', 'text', ',', "'decode'", ')', ':', 'text', '=', 'text', '.', 'decode', '(', "'utf-8'", ')', '# split by endsolid keyword', 'text', '=', 'text', '.', 'lower', '(', ')', '.', 'split', '(', "'endsolid'", ')', '[', '0', ']', '# create array of splits', 'blob', '=', 'np', '.', 'array', '(', 'text', '.', 'strip', '(', ')', '.', 'split', '(', ')', ')', "# there are 21 'words' in each face", 'face_len', '=', '21', '# length of blob should be multiple of face_len', 'if', '(', 'len', '(', 'blob', ')', '%', 'face_len', ')', '!=', '0', ':', 'raise', 'HeaderError', '(', "'Incorrect length STL file!'", ')', 'face_count', '=', 'int', '(', 'len', '(', 'blob', ')', '/', 'face_len', ')', '# this offset is to be added to a fixed set of tiled indices', 'offset', '=', 'face_len', '*', 'np', '.', 'arange', '(', 'face_count', ')', '.', 'reshape', '(', '(', '-', '1', ',', '1', ')', ')', 'normal_index', '=', 'np', '.', 'tile', '(', '[', '2', ',', '3', ',', '4', ']', ',', '(', 'face_count', ',', '1', ')', ')', '+', 'offset', 'vertex_index', '=', 'np', '.', 'tile', '(', '[', '8', ',', '9', ',', '10', ',', '12', ',', '13', ',', '14', ',', '16', ',', '17', ',', '18', ']', ',', '(', 'face_count', ',', '1', ')', ')', '+', 'offset', '# faces are groups of three sequential vertices', 'faces', '=', 'np', '.', 'arange', '(', 'face_count', '*', '3', ')', '.', 'reshape', '(', '(', '-', '1', ',', '3', ')', ')', 'face_normals', '=', 'blob', '[', 'normal_index', ']', '.', 'astype', '(', "'<f8'", ')', 'vertices', '=', 'blob', '[', 'vertex_index', '.', 'reshape', '(', '(', '-', '1', ',', '3', ')', ')', ']', '.', 'astype', '(', "'<f8'", ')', 'return', '{', "'vertices'", ':', 'vertices', ',', "'faces'", ':', 'faces', ',', "'metadata'", ':', 'metadata', ',', "'face_normals'", ':', 'face_normals', '}'] | Load an ASCII STL file from a file object.
Parameters
----------
file_obj: open file- like object
Returns
----------
loaded: kwargs for a Trimesh constructor with keys:
vertices: (n,3) float, vertices
faces: (m,3) int, indexes of vertices
face_normals: (m,3) float, normal vector of each face | ['Load', 'an', 'ASCII', 'STL', 'file', 'from', 'a', 'file', 'object', '.'] | train | https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/exchange/stl.py#L126-L186 |
2,943 | openego/eTraGo | etrago/tools/utilities.py | add_missing_components | def add_missing_components(network):
# Munich
"""Add missing transformer at Heizkraftwerk Nord in Munich and missing
transformer in Stuttgart
Parameters
----------
network : :class:`pypsa.Network
Overall container of PyPSA
Returns
-------
network : :class:`pypsa.Network
Overall container of PyPSA
"""
"""https://www.swm.de/privatkunden/unternehmen/energieerzeugung/heizkraftwerke.html?utm_medium=301
to bus 25096:
25369 (86)
28232 (24)
25353 to 25356 (79)
to bus 23822: (110kV bus of 380/110-kV-transformer)
25355 (90)
28212 (98)
25357 to 665 (85)
25354 to 27414 (30)
27414 to 28212 (33)
25354 to 28294 (32/63)
28335 to 28294 (64)
28335 to 28139 (28)
Overhead lines:
16573 to 24182 (part of 4)
"""
"""
Installierte Leistung der Umspannungsebene Höchst- zu Hochspannung
(380 kV / 110 kV): 2.750.000 kVA
https://www.swm-infrastruktur.de/strom/netzstrukturdaten/strukturmerkmale.html
"""
new_trafo = str(network.transformers.index.astype(int).max() + 1)
network.add("Transformer", new_trafo, bus0="16573", bus1="23648",
x=0.135 / (2750 / 2),
r=0.0, tap_ratio=1, s_nom=2750 / 2)
def add_110kv_line(bus0, bus1, overhead=False):
new_line = str(network.lines.index.astype(int).max() + 1)
if not overhead:
network.add("Line", new_line, bus0=bus0, bus1=bus1, s_nom=280)
else:
network.add("Line", new_line, bus0=bus0, bus1=bus1, s_nom=260)
network.lines.loc[new_line, "scn_name"] = "Status Quo"
network.lines.loc[new_line, "v_nom"] = 110
network.lines.loc[new_line, "version"] = "added_manually"
network.lines.loc[new_line, "frequency"] = 50
network.lines.loc[new_line, "cables"] = 3.0
network.lines.loc[new_line, "country"] = 'DE'
network.lines.loc[new_line, "length"] = (
pypsa.geo.haversine(network.buses.loc[bus0, ["x", "y"]],
network.buses.loc[bus1, ["x", "y"]])
[0][0] * 1.2)
if not overhead:
network.lines.loc[new_line, "r"] = (network.lines.
loc[new_line, "length"] *
0.0177)
network.lines.loc[new_line, "g"] = 0
# or: (network.lines.loc[new_line, "length"]*78e-9)
network.lines.loc[new_line, "x"] = (network.lines.
loc[new_line, "length"] *
0.3e-3)
network.lines.loc[new_line, "b"] = (network.lines.
loc[new_line, "length"] *
250e-9)
elif overhead:
network.lines.loc[new_line, "r"] = (network.lines.
loc[new_line, "length"] *
0.05475)
network.lines.loc[new_line, "g"] = 0
# or: (network.lines.loc[new_line, "length"]*40e-9)
network.lines.loc[new_line, "x"] = (network.lines.
loc[new_line, "length"] *
1.2e-3)
network.lines.loc[new_line, "b"] = (network.lines.
loc[new_line, "length"] *
9.5e-9)
add_110kv_line("16573", "28353")
add_110kv_line("16573", "28092")
add_110kv_line("25096", "25369")
add_110kv_line("25096", "28232")
add_110kv_line("25353", "25356")
add_110kv_line("23822", "25355")
add_110kv_line("23822", "28212")
add_110kv_line("25357", "665")
add_110kv_line("25354", "27414")
add_110kv_line("27414", "28212")
add_110kv_line("25354", "28294")
add_110kv_line("28335", "28294")
add_110kv_line("28335", "28139")
add_110kv_line("16573", "24182", overhead=True)
# Stuttgart
"""
Stuttgart:
Missing transformer, because 110-kV-bus is situated outside
Heizkraftwerk Heilbronn:
"""
# new_trafo = str(network.transformers.index.astype(int).max()1)
network.add("Transformer", '99999', bus0="18967", bus1="25766",
x=0.135 / 300, r=0.0, tap_ratio=1, s_nom=300)
"""
According to:
https://assets.ctfassets.net/xytfb1vrn7of/NZO8x4rKesAcYGGcG4SQg/b780d6a3ca4c2600ab51a30b70950bb1/netzschemaplan-110-kv.pdf
the following lines are missing:
"""
add_110kv_line("18967", "22449", overhead=True) # visible in OSM & DSO map
add_110kv_line("21165", "24068", overhead=True) # visible in OSM & DSO map
add_110kv_line("23782", "24089", overhead=True)
# visible in DSO map & OSM till 1 km from bus1
"""
Umspannwerk Möhringen (bus 23697)
https://de.wikipedia.org/wiki/Umspannwerk_M%C3%B6hringen
there should be two connections:
to Sindelfingen (2*110kV)
to Wendingen (former 220kV, now 2*110kV)
the line to Sindelfingen is connected, but the connection of Sindelfingen
itself to 380kV is missing:
"""
add_110kv_line("19962", "27671", overhead=True) # visible in OSM & DSO map
add_110kv_line("19962", "27671", overhead=True)
"""
line to Wendingen is missing, probably because it ends shortly before the
way of the substation and is connected via cables:
"""
add_110kv_line("23697", "24090", overhead=True) # visible in OSM & DSO map
add_110kv_line("23697", "24090", overhead=True)
# Lehrte
"""
Lehrte: 220kV Bus located outsinde way of Betriebszentrtum Lehrte and
therefore not connected:
"""
def add_220kv_line(bus0, bus1, overhead=False):
new_line = str(network.lines.index.astype(int).max() + 1)
if not overhead:
network.add("Line", new_line, bus0=bus0, bus1=bus1, s_nom=550)
else:
network.add("Line", new_line, bus0=bus0, bus1=bus1, s_nom=520)
network.lines.loc[new_line, "scn_name"] = "Status Quo"
network.lines.loc[new_line, "v_nom"] = 220
network.lines.loc[new_line, "version"] = "added_manually"
network.lines.loc[new_line, "frequency"] = 50
network.lines.loc[new_line, "cables"] = 3.0
network.lines.loc[new_line, "country"] = 'DE'
network.lines.loc[new_line, "length"] = (
pypsa.geo.haversine(network.buses.loc[bus0, ["x", "y"]],
network.buses.loc[bus1, ["x", "y"]])[0][0] *
1.2)
if not overhead:
network.lines.loc[new_line, "r"] = (network.lines.
loc[new_line, "length"] *
0.0176)
network.lines.loc[new_line, "g"] = 0
# or: (network.lines.loc[new_line, "length"]*67e-9)
network.lines.loc[new_line, "x"] = (network.lines.
loc[new_line, "length"] *
0.3e-3)
network.lines.loc[new_line, "b"] = (network.lines.
loc[new_line, "length"] *
210e-9)
elif overhead:
network.lines.loc[new_line, "r"] = (network.lines.
loc[new_line, "length"] *
0.05475)
network.lines.loc[new_line, "g"] = 0
# or: (network.lines.loc[new_line, "length"]*30e-9)
network.lines.loc[new_line, "x"] = (network.lines.
loc[new_line, "length"] * 1e-3)
network.lines.loc[new_line, "b"] = (network.lines.
loc[new_line, "length"] * 11e-9
)
add_220kv_line("266", "24633", overhead=True)
# temporary turn buses of transformers
network.transformers["v_nom0"] = network.transformers.bus0.map(
network.buses.v_nom)
network.transformers["v_nom1"] = network.transformers.bus1.map(
network.buses.v_nom)
new_bus0 = network.transformers.bus1[network.transformers.v_nom0>network.transformers.v_nom1]
new_bus1 = network.transformers.bus0[network.transformers.v_nom0>network.transformers.v_nom1]
network.transformers.bus0[network.transformers.v_nom0>network.transformers.v_nom1] = new_bus0.values
network.transformers.bus1[network.transformers.v_nom0>network.transformers.v_nom1] = new_bus1.values
return network | python | def add_missing_components(network):
# Munich
"""Add missing transformer at Heizkraftwerk Nord in Munich and missing
transformer in Stuttgart
Parameters
----------
network : :class:`pypsa.Network
Overall container of PyPSA
Returns
-------
network : :class:`pypsa.Network
Overall container of PyPSA
"""
"""https://www.swm.de/privatkunden/unternehmen/energieerzeugung/heizkraftwerke.html?utm_medium=301
to bus 25096:
25369 (86)
28232 (24)
25353 to 25356 (79)
to bus 23822: (110kV bus of 380/110-kV-transformer)
25355 (90)
28212 (98)
25357 to 665 (85)
25354 to 27414 (30)
27414 to 28212 (33)
25354 to 28294 (32/63)
28335 to 28294 (64)
28335 to 28139 (28)
Overhead lines:
16573 to 24182 (part of 4)
"""
"""
Installierte Leistung der Umspannungsebene Höchst- zu Hochspannung
(380 kV / 110 kV): 2.750.000 kVA
https://www.swm-infrastruktur.de/strom/netzstrukturdaten/strukturmerkmale.html
"""
new_trafo = str(network.transformers.index.astype(int).max() + 1)
network.add("Transformer", new_trafo, bus0="16573", bus1="23648",
x=0.135 / (2750 / 2),
r=0.0, tap_ratio=1, s_nom=2750 / 2)
def add_110kv_line(bus0, bus1, overhead=False):
new_line = str(network.lines.index.astype(int).max() + 1)
if not overhead:
network.add("Line", new_line, bus0=bus0, bus1=bus1, s_nom=280)
else:
network.add("Line", new_line, bus0=bus0, bus1=bus1, s_nom=260)
network.lines.loc[new_line, "scn_name"] = "Status Quo"
network.lines.loc[new_line, "v_nom"] = 110
network.lines.loc[new_line, "version"] = "added_manually"
network.lines.loc[new_line, "frequency"] = 50
network.lines.loc[new_line, "cables"] = 3.0
network.lines.loc[new_line, "country"] = 'DE'
network.lines.loc[new_line, "length"] = (
pypsa.geo.haversine(network.buses.loc[bus0, ["x", "y"]],
network.buses.loc[bus1, ["x", "y"]])
[0][0] * 1.2)
if not overhead:
network.lines.loc[new_line, "r"] = (network.lines.
loc[new_line, "length"] *
0.0177)
network.lines.loc[new_line, "g"] = 0
# or: (network.lines.loc[new_line, "length"]*78e-9)
network.lines.loc[new_line, "x"] = (network.lines.
loc[new_line, "length"] *
0.3e-3)
network.lines.loc[new_line, "b"] = (network.lines.
loc[new_line, "length"] *
250e-9)
elif overhead:
network.lines.loc[new_line, "r"] = (network.lines.
loc[new_line, "length"] *
0.05475)
network.lines.loc[new_line, "g"] = 0
# or: (network.lines.loc[new_line, "length"]*40e-9)
network.lines.loc[new_line, "x"] = (network.lines.
loc[new_line, "length"] *
1.2e-3)
network.lines.loc[new_line, "b"] = (network.lines.
loc[new_line, "length"] *
9.5e-9)
add_110kv_line("16573", "28353")
add_110kv_line("16573", "28092")
add_110kv_line("25096", "25369")
add_110kv_line("25096", "28232")
add_110kv_line("25353", "25356")
add_110kv_line("23822", "25355")
add_110kv_line("23822", "28212")
add_110kv_line("25357", "665")
add_110kv_line("25354", "27414")
add_110kv_line("27414", "28212")
add_110kv_line("25354", "28294")
add_110kv_line("28335", "28294")
add_110kv_line("28335", "28139")
add_110kv_line("16573", "24182", overhead=True)
# Stuttgart
"""
Stuttgart:
Missing transformer, because 110-kV-bus is situated outside
Heizkraftwerk Heilbronn:
"""
# new_trafo = str(network.transformers.index.astype(int).max()1)
network.add("Transformer", '99999', bus0="18967", bus1="25766",
x=0.135 / 300, r=0.0, tap_ratio=1, s_nom=300)
"""
According to:
https://assets.ctfassets.net/xytfb1vrn7of/NZO8x4rKesAcYGGcG4SQg/b780d6a3ca4c2600ab51a30b70950bb1/netzschemaplan-110-kv.pdf
the following lines are missing:
"""
add_110kv_line("18967", "22449", overhead=True) # visible in OSM & DSO map
add_110kv_line("21165", "24068", overhead=True) # visible in OSM & DSO map
add_110kv_line("23782", "24089", overhead=True)
# visible in DSO map & OSM till 1 km from bus1
"""
Umspannwerk Möhringen (bus 23697)
https://de.wikipedia.org/wiki/Umspannwerk_M%C3%B6hringen
there should be two connections:
to Sindelfingen (2*110kV)
to Wendingen (former 220kV, now 2*110kV)
the line to Sindelfingen is connected, but the connection of Sindelfingen
itself to 380kV is missing:
"""
add_110kv_line("19962", "27671", overhead=True) # visible in OSM & DSO map
add_110kv_line("19962", "27671", overhead=True)
"""
line to Wendingen is missing, probably because it ends shortly before the
way of the substation and is connected via cables:
"""
add_110kv_line("23697", "24090", overhead=True) # visible in OSM & DSO map
add_110kv_line("23697", "24090", overhead=True)
# Lehrte
"""
Lehrte: 220kV Bus located outsinde way of Betriebszentrtum Lehrte and
therefore not connected:
"""
def add_220kv_line(bus0, bus1, overhead=False):
new_line = str(network.lines.index.astype(int).max() + 1)
if not overhead:
network.add("Line", new_line, bus0=bus0, bus1=bus1, s_nom=550)
else:
network.add("Line", new_line, bus0=bus0, bus1=bus1, s_nom=520)
network.lines.loc[new_line, "scn_name"] = "Status Quo"
network.lines.loc[new_line, "v_nom"] = 220
network.lines.loc[new_line, "version"] = "added_manually"
network.lines.loc[new_line, "frequency"] = 50
network.lines.loc[new_line, "cables"] = 3.0
network.lines.loc[new_line, "country"] = 'DE'
network.lines.loc[new_line, "length"] = (
pypsa.geo.haversine(network.buses.loc[bus0, ["x", "y"]],
network.buses.loc[bus1, ["x", "y"]])[0][0] *
1.2)
if not overhead:
network.lines.loc[new_line, "r"] = (network.lines.
loc[new_line, "length"] *
0.0176)
network.lines.loc[new_line, "g"] = 0
# or: (network.lines.loc[new_line, "length"]*67e-9)
network.lines.loc[new_line, "x"] = (network.lines.
loc[new_line, "length"] *
0.3e-3)
network.lines.loc[new_line, "b"] = (network.lines.
loc[new_line, "length"] *
210e-9)
elif overhead:
network.lines.loc[new_line, "r"] = (network.lines.
loc[new_line, "length"] *
0.05475)
network.lines.loc[new_line, "g"] = 0
# or: (network.lines.loc[new_line, "length"]*30e-9)
network.lines.loc[new_line, "x"] = (network.lines.
loc[new_line, "length"] * 1e-3)
network.lines.loc[new_line, "b"] = (network.lines.
loc[new_line, "length"] * 11e-9
)
add_220kv_line("266", "24633", overhead=True)
# temporary turn buses of transformers
network.transformers["v_nom0"] = network.transformers.bus0.map(
network.buses.v_nom)
network.transformers["v_nom1"] = network.transformers.bus1.map(
network.buses.v_nom)
new_bus0 = network.transformers.bus1[network.transformers.v_nom0>network.transformers.v_nom1]
new_bus1 = network.transformers.bus0[network.transformers.v_nom0>network.transformers.v_nom1]
network.transformers.bus0[network.transformers.v_nom0>network.transformers.v_nom1] = new_bus0.values
network.transformers.bus1[network.transformers.v_nom0>network.transformers.v_nom1] = new_bus1.values
return network | ['def', 'add_missing_components', '(', 'network', ')', ':', '# Munich', '"""https://www.swm.de/privatkunden/unternehmen/energieerzeugung/heizkraftwerke.html?utm_medium=301\n\n to bus 25096:\n 25369 (86)\n 28232 (24)\n 25353 to 25356 (79)\n to bus 23822: (110kV bus of 380/110-kV-transformer)\n 25355 (90)\n 28212 (98)\n\n 25357 to 665 (85)\n 25354 to 27414 (30)\n 27414 to 28212 (33)\n 25354 to 28294 (32/63)\n 28335 to 28294 (64)\n 28335 to 28139 (28)\n Overhead lines:\n 16573 to 24182 (part of 4)\n """', '"""\n Installierte Leistung der Umspannungsebene Höchst- zu Hochspannung\n (380 kV / 110 kV): 2.750.000 kVA\n https://www.swm-infrastruktur.de/strom/netzstrukturdaten/strukturmerkmale.html\n """', 'new_trafo', '=', 'str', '(', 'network', '.', 'transformers', '.', 'index', '.', 'astype', '(', 'int', ')', '.', 'max', '(', ')', '+', '1', ')', 'network', '.', 'add', '(', '"Transformer"', ',', 'new_trafo', ',', 'bus0', '=', '"16573"', ',', 'bus1', '=', '"23648"', ',', 'x', '=', '0.135', '/', '(', '2750', '/', '2', ')', ',', 'r', '=', '0.0', ',', 'tap_ratio', '=', '1', ',', 's_nom', '=', '2750', '/', '2', ')', 'def', 'add_110kv_line', '(', 'bus0', ',', 'bus1', ',', 'overhead', '=', 'False', ')', ':', 'new_line', '=', 'str', '(', 'network', '.', 'lines', '.', 'index', '.', 'astype', '(', 'int', ')', '.', 'max', '(', ')', '+', '1', ')', 'if', 'not', 'overhead', ':', 'network', '.', 'add', '(', '"Line"', ',', 'new_line', ',', 'bus0', '=', 'bus0', ',', 'bus1', '=', 'bus1', ',', 's_nom', '=', '280', ')', 'else', ':', 'network', '.', 'add', '(', '"Line"', ',', 'new_line', ',', 'bus0', '=', 'bus0', ',', 'bus1', '=', 'bus1', ',', 's_nom', '=', '260', ')', 'network', '.', 'lines', '.', 'loc', '[', 'new_line', ',', '"scn_name"', ']', '=', '"Status Quo"', 'network', '.', 'lines', '.', 'loc', '[', 'new_line', ',', '"v_nom"', ']', '=', '110', 'network', '.', 'lines', '.', 'loc', '[', 'new_line', ',', '"version"', ']', '=', '"added_manually"', 'network', '.', 'lines', '.', 'loc', '[', 'new_line', ',', '"frequency"', ']', '=', '50', 'network', '.', 'lines', '.', 'loc', '[', 'new_line', ',', '"cables"', ']', '=', '3.0', 'network', '.', 'lines', '.', 'loc', '[', 'new_line', ',', '"country"', ']', '=', "'DE'", 'network', '.', 'lines', '.', 'loc', '[', 'new_line', ',', '"length"', ']', '=', '(', 'pypsa', '.', 'geo', '.', 'haversine', '(', 'network', '.', 'buses', '.', 'loc', '[', 'bus0', ',', '[', '"x"', ',', '"y"', ']', ']', ',', 'network', '.', 'buses', '.', 'loc', '[', 'bus1', ',', '[', '"x"', ',', '"y"', ']', ']', ')', '[', '0', ']', '[', '0', ']', '*', '1.2', ')', 'if', 'not', 'overhead', ':', 'network', '.', 'lines', '.', 'loc', '[', 'new_line', ',', '"r"', ']', '=', '(', 'network', '.', 'lines', '.', 'loc', '[', 'new_line', ',', '"length"', ']', '*', '0.0177', ')', 'network', '.', 'lines', '.', 'loc', '[', 'new_line', ',', '"g"', ']', '=', '0', '# or: (network.lines.loc[new_line, "length"]*78e-9)', 'network', '.', 'lines', '.', 'loc', '[', 'new_line', ',', '"x"', ']', '=', '(', 'network', '.', 'lines', '.', 'loc', '[', 'new_line', ',', '"length"', ']', '*', '0.3e-3', ')', 'network', '.', 'lines', '.', 'loc', '[', 'new_line', ',', '"b"', ']', '=', '(', 'network', '.', 'lines', '.', 'loc', '[', 'new_line', ',', '"length"', ']', '*', '250e-9', ')', 'elif', 'overhead', ':', 'network', '.', 'lines', '.', 'loc', '[', 'new_line', ',', '"r"', ']', '=', '(', 'network', '.', 'lines', '.', 'loc', '[', 'new_line', ',', '"length"', ']', '*', '0.05475', ')', 'network', '.', 'lines', '.', 'loc', '[', 'new_line', ',', '"g"', ']', '=', '0', '# or: (network.lines.loc[new_line, "length"]*40e-9)', 'network', '.', 'lines', '.', 'loc', '[', 'new_line', ',', '"x"', ']', '=', '(', 'network', '.', 'lines', '.', 'loc', '[', 'new_line', ',', '"length"', ']', '*', '1.2e-3', ')', 'network', '.', 'lines', '.', 'loc', '[', 'new_line', ',', '"b"', ']', '=', '(', 'network', '.', 'lines', '.', 'loc', '[', 'new_line', ',', '"length"', ']', '*', '9.5e-9', ')', 'add_110kv_line', '(', '"16573"', ',', '"28353"', ')', 'add_110kv_line', '(', '"16573"', ',', '"28092"', ')', 'add_110kv_line', '(', '"25096"', ',', '"25369"', ')', 'add_110kv_line', '(', '"25096"', ',', '"28232"', ')', 'add_110kv_line', '(', '"25353"', ',', '"25356"', ')', 'add_110kv_line', '(', '"23822"', ',', '"25355"', ')', 'add_110kv_line', '(', '"23822"', ',', '"28212"', ')', 'add_110kv_line', '(', '"25357"', ',', '"665"', ')', 'add_110kv_line', '(', '"25354"', ',', '"27414"', ')', 'add_110kv_line', '(', '"27414"', ',', '"28212"', ')', 'add_110kv_line', '(', '"25354"', ',', '"28294"', ')', 'add_110kv_line', '(', '"28335"', ',', '"28294"', ')', 'add_110kv_line', '(', '"28335"', ',', '"28139"', ')', 'add_110kv_line', '(', '"16573"', ',', '"24182"', ',', 'overhead', '=', 'True', ')', '# Stuttgart', '"""\n Stuttgart:\n Missing transformer, because 110-kV-bus is situated outside\n Heizkraftwerk Heilbronn:\n """', '# new_trafo = str(network.transformers.index.astype(int).max()1)', 'network', '.', 'add', '(', '"Transformer"', ',', "'99999'", ',', 'bus0', '=', '"18967"', ',', 'bus1', '=', '"25766"', ',', 'x', '=', '0.135', '/', '300', ',', 'r', '=', '0.0', ',', 'tap_ratio', '=', '1', ',', 's_nom', '=', '300', ')', '"""\n According to:\n https://assets.ctfassets.net/xytfb1vrn7of/NZO8x4rKesAcYGGcG4SQg/b780d6a3ca4c2600ab51a30b70950bb1/netzschemaplan-110-kv.pdf\n the following lines are missing:\n """', 'add_110kv_line', '(', '"18967"', ',', '"22449"', ',', 'overhead', '=', 'True', ')', '# visible in OSM & DSO map', 'add_110kv_line', '(', '"21165"', ',', '"24068"', ',', 'overhead', '=', 'True', ')', '# visible in OSM & DSO map', 'add_110kv_line', '(', '"23782"', ',', '"24089"', ',', 'overhead', '=', 'True', ')', '# visible in DSO map & OSM till 1 km from bus1', '"""\n Umspannwerk Möhringen (bus 23697)\n https://de.wikipedia.org/wiki/Umspannwerk_M%C3%B6hringen\n there should be two connections:\n to Sindelfingen (2*110kV)\n to Wendingen (former 220kV, now 2*110kV)\n the line to Sindelfingen is connected, but the connection of Sindelfingen\n itself to 380kV is missing:\n """', 'add_110kv_line', '(', '"19962"', ',', '"27671"', ',', 'overhead', '=', 'True', ')', '# visible in OSM & DSO map', 'add_110kv_line', '(', '"19962"', ',', '"27671"', ',', 'overhead', '=', 'True', ')', '"""\n line to Wendingen is missing, probably because it ends shortly before the\n way of the substation and is connected via cables:\n """', 'add_110kv_line', '(', '"23697"', ',', '"24090"', ',', 'overhead', '=', 'True', ')', '# visible in OSM & DSO map', 'add_110kv_line', '(', '"23697"', ',', '"24090"', ',', 'overhead', '=', 'True', ')', '# Lehrte', '"""\n Lehrte: 220kV Bus located outsinde way of Betriebszentrtum Lehrte and\n therefore not connected:\n """', 'def', 'add_220kv_line', '(', 'bus0', ',', 'bus1', ',', 'overhead', '=', 'False', ')', ':', 'new_line', '=', 'str', '(', 'network', '.', 'lines', '.', 'index', '.', 'astype', '(', 'int', ')', '.', 'max', '(', ')', '+', '1', ')', 'if', 'not', 'overhead', ':', 'network', '.', 'add', '(', '"Line"', ',', 'new_line', ',', 'bus0', '=', 'bus0', ',', 'bus1', '=', 'bus1', ',', 's_nom', '=', '550', ')', 'else', ':', 'network', '.', 'add', '(', '"Line"', ',', 'new_line', ',', 'bus0', '=', 'bus0', ',', 'bus1', '=', 'bus1', ',', 's_nom', '=', '520', ')', 'network', '.', 'lines', '.', 'loc', '[', 'new_line', ',', '"scn_name"', ']', '=', '"Status Quo"', 'network', '.', 'lines', '.', 'loc', '[', 'new_line', ',', '"v_nom"', ']', '=', '220', 'network', '.', 'lines', '.', 'loc', '[', 'new_line', ',', '"version"', ']', '=', '"added_manually"', 'network', '.', 'lines', '.', 'loc', '[', 'new_line', ',', '"frequency"', ']', '=', '50', 'network', '.', 'lines', '.', 'loc', '[', 'new_line', ',', '"cables"', ']', '=', '3.0', 'network', '.', 'lines', '.', 'loc', '[', 'new_line', ',', '"country"', ']', '=', "'DE'", 'network', '.', 'lines', '.', 'loc', '[', 'new_line', ',', '"length"', ']', '=', '(', 'pypsa', '.', 'geo', '.', 'haversine', '(', 'network', '.', 'buses', '.', 'loc', '[', 'bus0', ',', '[', '"x"', ',', '"y"', ']', ']', ',', 'network', '.', 'buses', '.', 'loc', '[', 'bus1', ',', '[', '"x"', ',', '"y"', ']', ']', ')', '[', '0', ']', '[', '0', ']', '*', '1.2', ')', 'if', 'not', 'overhead', ':', 'network', '.', 'lines', '.', 'loc', '[', 'new_line', ',', '"r"', ']', '=', '(', 'network', '.', 'lines', '.', 'loc', '[', 'new_line', ',', '"length"', ']', '*', '0.0176', ')', 'network', '.', 'lines', '.', 'loc', '[', 'new_line', ',', '"g"', ']', '=', '0', '# or: (network.lines.loc[new_line, "length"]*67e-9)', 'network', '.', 'lines', '.', 'loc', '[', 'new_line', ',', '"x"', ']', '=', '(', 'network', '.', 'lines', '.', 'loc', '[', 'new_line', ',', '"length"', ']', '*', '0.3e-3', ')', 'network', '.', 'lines', '.', 'loc', '[', 'new_line', ',', '"b"', ']', '=', '(', 'network', '.', 'lines', '.', 'loc', '[', 'new_line', ',', '"length"', ']', '*', '210e-9', ')', 'elif', 'overhead', ':', 'network', '.', 'lines', '.', 'loc', '[', 'new_line', ',', '"r"', ']', '=', '(', 'network', '.', 'lines', '.', 'loc', '[', 'new_line', ',', '"length"', ']', '*', '0.05475', ')', 'network', '.', 'lines', '.', 'loc', '[', 'new_line', ',', '"g"', ']', '=', '0', '# or: (network.lines.loc[new_line, "length"]*30e-9)', 'network', '.', 'lines', '.', 'loc', '[', 'new_line', ',', '"x"', ']', '=', '(', 'network', '.', 'lines', '.', 'loc', '[', 'new_line', ',', '"length"', ']', '*', '1e-3', ')', 'network', '.', 'lines', '.', 'loc', '[', 'new_line', ',', '"b"', ']', '=', '(', 'network', '.', 'lines', '.', 'loc', '[', 'new_line', ',', '"length"', ']', '*', '11e-9', ')', 'add_220kv_line', '(', '"266"', ',', '"24633"', ',', 'overhead', '=', 'True', ')', '# temporary turn buses of transformers', 'network', '.', 'transformers', '[', '"v_nom0"', ']', '=', 'network', '.', 'transformers', '.', 'bus0', '.', 'map', '(', 'network', '.', 'buses', '.', 'v_nom', ')', 'network', '.', 'transformers', '[', '"v_nom1"', ']', '=', 'network', '.', 'transformers', '.', 'bus1', '.', 'map', '(', 'network', '.', 'buses', '.', 'v_nom', ')', 'new_bus0', '=', 'network', '.', 'transformers', '.', 'bus1', '[', 'network', '.', 'transformers', '.', 'v_nom0', '>', 'network', '.', 'transformers', '.', 'v_nom1', ']', 'new_bus1', '=', 'network', '.', 'transformers', '.', 'bus0', '[', 'network', '.', 'transformers', '.', 'v_nom0', '>', 'network', '.', 'transformers', '.', 'v_nom1', ']', 'network', '.', 'transformers', '.', 'bus0', '[', 'network', '.', 'transformers', '.', 'v_nom0', '>', 'network', '.', 'transformers', '.', 'v_nom1', ']', '=', 'new_bus0', '.', 'values', 'network', '.', 'transformers', '.', 'bus1', '[', 'network', '.', 'transformers', '.', 'v_nom0', '>', 'network', '.', 'transformers', '.', 'v_nom1', ']', '=', 'new_bus1', '.', 'values', 'return', 'network'] | Add missing transformer at Heizkraftwerk Nord in Munich and missing
transformer in Stuttgart
Parameters
----------
network : :class:`pypsa.Network
Overall container of PyPSA
Returns
-------
network : :class:`pypsa.Network
Overall container of PyPSA | ['Add', 'missing', 'transformer', 'at', 'Heizkraftwerk', 'Nord', 'in', 'Munich', 'and', 'missing', 'transformer', 'in', 'Stuttgart', 'Parameters', '----------', 'network', ':', ':', 'class', ':', 'pypsa', '.', 'Network', 'Overall', 'container', 'of', 'PyPSA'] | train | https://github.com/openego/eTraGo/blob/2a8fc6d4368d0e9abe6fe0d0c39baf66ea0126b9/etrago/tools/utilities.py#L1238-L1438 |
2,944 | bitesofcode/projexui | projexui/widgets/xcalendarwidget/xcalendarwidget.py | XCalendarWidget.mouseDoubleClickEvent | def mouseDoubleClickEvent( self, event ):
"""
Handles the mouse double click event.
:param event | <QMouseEvent>
"""
scene_point = self.mapToScene(event.pos())
date = self.scene().dateAt(scene_point)
date_time = self.scene().dateTimeAt(scene_point)
item = self.scene().itemAt(scene_point)
if ( not isinstance(item, XCalendarItem) ):
item = None
# checks to see if the signals are blocked
if ( not self.signalsBlocked() ):
if ( item ):
self.calendarItemDoubleClicked.emit(item)
elif ( date_time.isValid() ):
self.dateTimeDoubleClicked.emit(date_time)
elif ( date.isValid() ):
self.dateDoubleClicked.emit(date)
return super(XCalendarWidget, self).mouseDoubleClickEvent(event) | python | def mouseDoubleClickEvent( self, event ):
"""
Handles the mouse double click event.
:param event | <QMouseEvent>
"""
scene_point = self.mapToScene(event.pos())
date = self.scene().dateAt(scene_point)
date_time = self.scene().dateTimeAt(scene_point)
item = self.scene().itemAt(scene_point)
if ( not isinstance(item, XCalendarItem) ):
item = None
# checks to see if the signals are blocked
if ( not self.signalsBlocked() ):
if ( item ):
self.calendarItemDoubleClicked.emit(item)
elif ( date_time.isValid() ):
self.dateTimeDoubleClicked.emit(date_time)
elif ( date.isValid() ):
self.dateDoubleClicked.emit(date)
return super(XCalendarWidget, self).mouseDoubleClickEvent(event) | ['def', 'mouseDoubleClickEvent', '(', 'self', ',', 'event', ')', ':', 'scene_point', '=', 'self', '.', 'mapToScene', '(', 'event', '.', 'pos', '(', ')', ')', 'date', '=', 'self', '.', 'scene', '(', ')', '.', 'dateAt', '(', 'scene_point', ')', 'date_time', '=', 'self', '.', 'scene', '(', ')', '.', 'dateTimeAt', '(', 'scene_point', ')', 'item', '=', 'self', '.', 'scene', '(', ')', '.', 'itemAt', '(', 'scene_point', ')', 'if', '(', 'not', 'isinstance', '(', 'item', ',', 'XCalendarItem', ')', ')', ':', 'item', '=', 'None', '# checks to see if the signals are blocked\r', 'if', '(', 'not', 'self', '.', 'signalsBlocked', '(', ')', ')', ':', 'if', '(', 'item', ')', ':', 'self', '.', 'calendarItemDoubleClicked', '.', 'emit', '(', 'item', ')', 'elif', '(', 'date_time', '.', 'isValid', '(', ')', ')', ':', 'self', '.', 'dateTimeDoubleClicked', '.', 'emit', '(', 'date_time', ')', 'elif', '(', 'date', '.', 'isValid', '(', ')', ')', ':', 'self', '.', 'dateDoubleClicked', '.', 'emit', '(', 'date', ')', 'return', 'super', '(', 'XCalendarWidget', ',', 'self', ')', '.', 'mouseDoubleClickEvent', '(', 'event', ')'] | Handles the mouse double click event.
:param event | <QMouseEvent> | ['Handles', 'the', 'mouse', 'double', 'click', 'event', '.', ':', 'param', 'event', '|', '<QMouseEvent', '>'] | train | https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xcalendarwidget/xcalendarwidget.py#L261-L286 |
2,945 | ramses-tech/ramses | ramses/utils.py | resource_view_attrs | def resource_view_attrs(raml_resource, singular=False):
""" Generate view method names needed for `raml_resource` view.
Collects HTTP method names from resource siblings and dynamic children
if exist. Collected methods are then translated to
`nefertari.view.BaseView` method names, each of which is used to
process a particular HTTP method request.
Maps of {HTTP_method: view_method} `collection_methods` and
`item_methods` are used to convert collection and item methods
respectively.
:param raml_resource: Instance of ramlfications.raml.ResourceNode
:param singular: Boolean indicating if resource is singular or not
"""
from .views import collection_methods, item_methods
# Singular resource doesn't have collection methods though
# it looks like a collection
if singular:
collection_methods = item_methods
siblings = get_resource_siblings(raml_resource)
http_methods = [sibl.method.lower() for sibl in siblings]
attrs = [collection_methods.get(method) for method in http_methods]
# Check if resource has dynamic child resource like collection/{id}
# If dynamic child resource exists, add its siblings' methods to attrs,
# as both resources are handled by a single view
children = get_resource_children(raml_resource)
http_submethods = [child.method.lower() for child in children
if is_dynamic_uri(child.path)]
attrs += [item_methods.get(method) for method in http_submethods]
return set(filter(bool, attrs)) | python | def resource_view_attrs(raml_resource, singular=False):
""" Generate view method names needed for `raml_resource` view.
Collects HTTP method names from resource siblings and dynamic children
if exist. Collected methods are then translated to
`nefertari.view.BaseView` method names, each of which is used to
process a particular HTTP method request.
Maps of {HTTP_method: view_method} `collection_methods` and
`item_methods` are used to convert collection and item methods
respectively.
:param raml_resource: Instance of ramlfications.raml.ResourceNode
:param singular: Boolean indicating if resource is singular or not
"""
from .views import collection_methods, item_methods
# Singular resource doesn't have collection methods though
# it looks like a collection
if singular:
collection_methods = item_methods
siblings = get_resource_siblings(raml_resource)
http_methods = [sibl.method.lower() for sibl in siblings]
attrs = [collection_methods.get(method) for method in http_methods]
# Check if resource has dynamic child resource like collection/{id}
# If dynamic child resource exists, add its siblings' methods to attrs,
# as both resources are handled by a single view
children = get_resource_children(raml_resource)
http_submethods = [child.method.lower() for child in children
if is_dynamic_uri(child.path)]
attrs += [item_methods.get(method) for method in http_submethods]
return set(filter(bool, attrs)) | ['def', 'resource_view_attrs', '(', 'raml_resource', ',', 'singular', '=', 'False', ')', ':', 'from', '.', 'views', 'import', 'collection_methods', ',', 'item_methods', "# Singular resource doesn't have collection methods though", '# it looks like a collection', 'if', 'singular', ':', 'collection_methods', '=', 'item_methods', 'siblings', '=', 'get_resource_siblings', '(', 'raml_resource', ')', 'http_methods', '=', '[', 'sibl', '.', 'method', '.', 'lower', '(', ')', 'for', 'sibl', 'in', 'siblings', ']', 'attrs', '=', '[', 'collection_methods', '.', 'get', '(', 'method', ')', 'for', 'method', 'in', 'http_methods', ']', '# Check if resource has dynamic child resource like collection/{id}', "# If dynamic child resource exists, add its siblings' methods to attrs,", '# as both resources are handled by a single view', 'children', '=', 'get_resource_children', '(', 'raml_resource', ')', 'http_submethods', '=', '[', 'child', '.', 'method', '.', 'lower', '(', ')', 'for', 'child', 'in', 'children', 'if', 'is_dynamic_uri', '(', 'child', '.', 'path', ')', ']', 'attrs', '+=', '[', 'item_methods', '.', 'get', '(', 'method', ')', 'for', 'method', 'in', 'http_submethods', ']', 'return', 'set', '(', 'filter', '(', 'bool', ',', 'attrs', ')', ')'] | Generate view method names needed for `raml_resource` view.
Collects HTTP method names from resource siblings and dynamic children
if exist. Collected methods are then translated to
`nefertari.view.BaseView` method names, each of which is used to
process a particular HTTP method request.
Maps of {HTTP_method: view_method} `collection_methods` and
`item_methods` are used to convert collection and item methods
respectively.
:param raml_resource: Instance of ramlfications.raml.ResourceNode
:param singular: Boolean indicating if resource is singular or not | ['Generate', 'view', 'method', 'names', 'needed', 'for', 'raml_resource', 'view', '.'] | train | https://github.com/ramses-tech/ramses/blob/ea2e1e896325b7256cdf5902309e05fd98e0c14c/ramses/utils.py#L123-L156 |
2,946 | consbio/ncdjango | ncdjango/interfaces/arcgis/views.py | GetImageView.format_image | def format_image(self, image, image_format, **kwargs):
"""Returns an image in the request format"""
image_format = image_format.lower()
accept = self.request.META['HTTP_ACCEPT'].split(',')
if FORCE_WEBP and 'image/webp' in accept:
image_format = 'webp'
elif image_format == 'png8':
alpha = image.split()[-1]
image = image.convert('RGB')
image = image.convert('P', palette=Image.ADAPTIVE, colors=255)
image.paste(255, Image.eval(alpha, lambda x: 255 if x <= 128 else 0))
image_format = 'png'
kwargs['transparency'] = 255
elif image_format in ('png32', 'png24'):
image_format = 'png'
return super(GetImageView, self).format_image(image, image_format, **kwargs) | python | def format_image(self, image, image_format, **kwargs):
"""Returns an image in the request format"""
image_format = image_format.lower()
accept = self.request.META['HTTP_ACCEPT'].split(',')
if FORCE_WEBP and 'image/webp' in accept:
image_format = 'webp'
elif image_format == 'png8':
alpha = image.split()[-1]
image = image.convert('RGB')
image = image.convert('P', palette=Image.ADAPTIVE, colors=255)
image.paste(255, Image.eval(alpha, lambda x: 255 if x <= 128 else 0))
image_format = 'png'
kwargs['transparency'] = 255
elif image_format in ('png32', 'png24'):
image_format = 'png'
return super(GetImageView, self).format_image(image, image_format, **kwargs) | ['def', 'format_image', '(', 'self', ',', 'image', ',', 'image_format', ',', '*', '*', 'kwargs', ')', ':', 'image_format', '=', 'image_format', '.', 'lower', '(', ')', 'accept', '=', 'self', '.', 'request', '.', 'META', '[', "'HTTP_ACCEPT'", ']', '.', 'split', '(', "','", ')', 'if', 'FORCE_WEBP', 'and', "'image/webp'", 'in', 'accept', ':', 'image_format', '=', "'webp'", 'elif', 'image_format', '==', "'png8'", ':', 'alpha', '=', 'image', '.', 'split', '(', ')', '[', '-', '1', ']', 'image', '=', 'image', '.', 'convert', '(', "'RGB'", ')', 'image', '=', 'image', '.', 'convert', '(', "'P'", ',', 'palette', '=', 'Image', '.', 'ADAPTIVE', ',', 'colors', '=', '255', ')', 'image', '.', 'paste', '(', '255', ',', 'Image', '.', 'eval', '(', 'alpha', ',', 'lambda', 'x', ':', '255', 'if', 'x', '<=', '128', 'else', '0', ')', ')', 'image_format', '=', "'png'", 'kwargs', '[', "'transparency'", ']', '=', '255', 'elif', 'image_format', 'in', '(', "'png32'", ',', "'png24'", ')', ':', 'image_format', '=', "'png'", 'return', 'super', '(', 'GetImageView', ',', 'self', ')', '.', 'format_image', '(', 'image', ',', 'image_format', ',', '*', '*', 'kwargs', ')'] | Returns an image in the request format | ['Returns', 'an', 'image', 'in', 'the', 'request', 'format'] | train | https://github.com/consbio/ncdjango/blob/f807bfd1e4083ab29fbc3c4d4418be108383a710/ncdjango/interfaces/arcgis/views.py#L264-L282 |
2,947 | kmike/port-for | port_for/_download_ranges.py | _unassigned_ports | def _unassigned_ports():
""" Returns a set of all unassigned ports (according to IANA and Wikipedia) """
free_ports = ranges_to_set(_parse_ranges(_iana_unassigned_port_ranges()))
known_ports = ranges_to_set(_wikipedia_known_port_ranges())
return free_ports.difference(known_ports) | python | def _unassigned_ports():
""" Returns a set of all unassigned ports (according to IANA and Wikipedia) """
free_ports = ranges_to_set(_parse_ranges(_iana_unassigned_port_ranges()))
known_ports = ranges_to_set(_wikipedia_known_port_ranges())
return free_ports.difference(known_ports) | ['def', '_unassigned_ports', '(', ')', ':', 'free_ports', '=', 'ranges_to_set', '(', '_parse_ranges', '(', '_iana_unassigned_port_ranges', '(', ')', ')', ')', 'known_ports', '=', 'ranges_to_set', '(', '_wikipedia_known_port_ranges', '(', ')', ')', 'return', 'free_ports', '.', 'difference', '(', 'known_ports', ')'] | Returns a set of all unassigned ports (according to IANA and Wikipedia) | ['Returns', 'a', 'set', 'of', 'all', 'unassigned', 'ports', '(', 'according', 'to', 'IANA', 'and', 'Wikipedia', ')'] | train | https://github.com/kmike/port-for/blob/f61ebf3c2caf54eabe8233b40ef67b973176a6f5/port_for/_download_ranges.py#L37-L41 |
2,948 | saltstack/salt | salt/utils/azurearm.py | get_client | def get_client(client_type, **kwargs):
'''
Dynamically load the selected client and return a management client object
'''
client_map = {'compute': 'ComputeManagement',
'authorization': 'AuthorizationManagement',
'dns': 'DnsManagement',
'storage': 'StorageManagement',
'managementlock': 'ManagementLock',
'monitor': 'MonitorManagement',
'network': 'NetworkManagement',
'policy': 'Policy',
'resource': 'ResourceManagement',
'subscription': 'Subscription',
'web': 'WebSiteManagement'}
if client_type not in client_map:
raise SaltSystemExit(
msg='The Azure ARM client_type {0} specified can not be found.'.format(
client_type)
)
map_value = client_map[client_type]
if client_type in ['policy', 'subscription']:
module_name = 'resource'
elif client_type in ['managementlock']:
module_name = 'resource.locks'
else:
module_name = client_type
try:
client_module = importlib.import_module('azure.mgmt.'+module_name)
# pylint: disable=invalid-name
Client = getattr(client_module,
'{0}Client'.format(map_value))
except ImportError:
raise sys.exit(
'The azure {0} client is not available.'.format(client_type)
)
credentials, subscription_id, cloud_env = _determine_auth(**kwargs)
if client_type == 'subscription':
client = Client(
credentials=credentials,
base_url=cloud_env.endpoints.resource_manager,
)
else:
client = Client(
credentials=credentials,
subscription_id=subscription_id,
base_url=cloud_env.endpoints.resource_manager,
)
client.config.add_user_agent('Salt/{0}'.format(salt.version.__version__))
return client | python | def get_client(client_type, **kwargs):
'''
Dynamically load the selected client and return a management client object
'''
client_map = {'compute': 'ComputeManagement',
'authorization': 'AuthorizationManagement',
'dns': 'DnsManagement',
'storage': 'StorageManagement',
'managementlock': 'ManagementLock',
'monitor': 'MonitorManagement',
'network': 'NetworkManagement',
'policy': 'Policy',
'resource': 'ResourceManagement',
'subscription': 'Subscription',
'web': 'WebSiteManagement'}
if client_type not in client_map:
raise SaltSystemExit(
msg='The Azure ARM client_type {0} specified can not be found.'.format(
client_type)
)
map_value = client_map[client_type]
if client_type in ['policy', 'subscription']:
module_name = 'resource'
elif client_type in ['managementlock']:
module_name = 'resource.locks'
else:
module_name = client_type
try:
client_module = importlib.import_module('azure.mgmt.'+module_name)
# pylint: disable=invalid-name
Client = getattr(client_module,
'{0}Client'.format(map_value))
except ImportError:
raise sys.exit(
'The azure {0} client is not available.'.format(client_type)
)
credentials, subscription_id, cloud_env = _determine_auth(**kwargs)
if client_type == 'subscription':
client = Client(
credentials=credentials,
base_url=cloud_env.endpoints.resource_manager,
)
else:
client = Client(
credentials=credentials,
subscription_id=subscription_id,
base_url=cloud_env.endpoints.resource_manager,
)
client.config.add_user_agent('Salt/{0}'.format(salt.version.__version__))
return client | ['def', 'get_client', '(', 'client_type', ',', '*', '*', 'kwargs', ')', ':', 'client_map', '=', '{', "'compute'", ':', "'ComputeManagement'", ',', "'authorization'", ':', "'AuthorizationManagement'", ',', "'dns'", ':', "'DnsManagement'", ',', "'storage'", ':', "'StorageManagement'", ',', "'managementlock'", ':', "'ManagementLock'", ',', "'monitor'", ':', "'MonitorManagement'", ',', "'network'", ':', "'NetworkManagement'", ',', "'policy'", ':', "'Policy'", ',', "'resource'", ':', "'ResourceManagement'", ',', "'subscription'", ':', "'Subscription'", ',', "'web'", ':', "'WebSiteManagement'", '}', 'if', 'client_type', 'not', 'in', 'client_map', ':', 'raise', 'SaltSystemExit', '(', 'msg', '=', "'The Azure ARM client_type {0} specified can not be found.'", '.', 'format', '(', 'client_type', ')', ')', 'map_value', '=', 'client_map', '[', 'client_type', ']', 'if', 'client_type', 'in', '[', "'policy'", ',', "'subscription'", ']', ':', 'module_name', '=', "'resource'", 'elif', 'client_type', 'in', '[', "'managementlock'", ']', ':', 'module_name', '=', "'resource.locks'", 'else', ':', 'module_name', '=', 'client_type', 'try', ':', 'client_module', '=', 'importlib', '.', 'import_module', '(', "'azure.mgmt.'", '+', 'module_name', ')', '# pylint: disable=invalid-name', 'Client', '=', 'getattr', '(', 'client_module', ',', "'{0}Client'", '.', 'format', '(', 'map_value', ')', ')', 'except', 'ImportError', ':', 'raise', 'sys', '.', 'exit', '(', "'The azure {0} client is not available.'", '.', 'format', '(', 'client_type', ')', ')', 'credentials', ',', 'subscription_id', ',', 'cloud_env', '=', '_determine_auth', '(', '*', '*', 'kwargs', ')', 'if', 'client_type', '==', "'subscription'", ':', 'client', '=', 'Client', '(', 'credentials', '=', 'credentials', ',', 'base_url', '=', 'cloud_env', '.', 'endpoints', '.', 'resource_manager', ',', ')', 'else', ':', 'client', '=', 'Client', '(', 'credentials', '=', 'credentials', ',', 'subscription_id', '=', 'subscription_id', ',', 'base_url', '=', 'cloud_env', '.', 'endpoints', '.', 'resource_manager', ',', ')', 'client', '.', 'config', '.', 'add_user_agent', '(', "'Salt/{0}'", '.', 'format', '(', 'salt', '.', 'version', '.', '__version__', ')', ')', 'return', 'client'] | Dynamically load the selected client and return a management client object | ['Dynamically', 'load', 'the', 'selected', 'client', 'and', 'return', 'a', 'management', 'client', 'object'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/azurearm.py#L140-L197 |
2,949 | glomex/gcdt | gcdt/s3.py | remove_file_from_s3 | def remove_file_from_s3(awsclient, bucket, key):
"""Remove a file from an AWS S3 bucket.
:param awsclient:
:param bucket:
:param key:
:return:
"""
client_s3 = awsclient.get_client('s3')
response = client_s3.delete_object(Bucket=bucket, Key=key) | python | def remove_file_from_s3(awsclient, bucket, key):
"""Remove a file from an AWS S3 bucket.
:param awsclient:
:param bucket:
:param key:
:return:
"""
client_s3 = awsclient.get_client('s3')
response = client_s3.delete_object(Bucket=bucket, Key=key) | ['def', 'remove_file_from_s3', '(', 'awsclient', ',', 'bucket', ',', 'key', ')', ':', 'client_s3', '=', 'awsclient', '.', 'get_client', '(', "'s3'", ')', 'response', '=', 'client_s3', '.', 'delete_object', '(', 'Bucket', '=', 'bucket', ',', 'Key', '=', 'key', ')'] | Remove a file from an AWS S3 bucket.
:param awsclient:
:param bucket:
:param key:
:return: | ['Remove', 'a', 'file', 'from', 'an', 'AWS', 'S3', 'bucket', '.'] | train | https://github.com/glomex/gcdt/blob/cd67cf416371337b83cb9ca3f696277125703339/gcdt/s3.py#L92-L101 |
2,950 | watson-developer-cloud/python-sdk | ibm_watson/discovery_v1.py | CollectionDiskUsage._to_dict | def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'used_bytes') and self.used_bytes is not None:
_dict['used_bytes'] = self.used_bytes
return _dict | python | def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'used_bytes') and self.used_bytes is not None:
_dict['used_bytes'] = self.used_bytes
return _dict | ['def', '_to_dict', '(', 'self', ')', ':', '_dict', '=', '{', '}', 'if', 'hasattr', '(', 'self', ',', "'used_bytes'", ')', 'and', 'self', '.', 'used_bytes', 'is', 'not', 'None', ':', '_dict', '[', "'used_bytes'", ']', '=', 'self', '.', 'used_bytes', 'return', '_dict'] | Return a json dictionary representing this model. | ['Return', 'a', 'json', 'dictionary', 'representing', 'this', 'model', '.'] | train | https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/discovery_v1.py#L3836-L3841 |
2,951 | bitprophet/releases | releases/__init__.py | append_unreleased_entries | def append_unreleased_entries(app, manager, releases):
"""
Generate new abstract 'releases' for unreleased issues.
There's one for each combination of bug-vs-feature & major release line.
When only one major release line exists, that dimension is ignored.
"""
for family, lines in six.iteritems(manager):
for type_ in ('bugfix', 'feature'):
bucket = 'unreleased_{}'.format(type_)
if bucket not in lines: # Implies unstable prehistory + 0.x fam
continue
issues = lines[bucket]
fam_prefix = "{}.x ".format(family) if len(manager) > 1 else ""
header = "Next {}{} release".format(fam_prefix, type_)
line = "unreleased_{}.x_{}".format(family, type_)
releases.append(
generate_unreleased_entry(header, line, issues, manager, app)
) | python | def append_unreleased_entries(app, manager, releases):
"""
Generate new abstract 'releases' for unreleased issues.
There's one for each combination of bug-vs-feature & major release line.
When only one major release line exists, that dimension is ignored.
"""
for family, lines in six.iteritems(manager):
for type_ in ('bugfix', 'feature'):
bucket = 'unreleased_{}'.format(type_)
if bucket not in lines: # Implies unstable prehistory + 0.x fam
continue
issues = lines[bucket]
fam_prefix = "{}.x ".format(family) if len(manager) > 1 else ""
header = "Next {}{} release".format(fam_prefix, type_)
line = "unreleased_{}.x_{}".format(family, type_)
releases.append(
generate_unreleased_entry(header, line, issues, manager, app)
) | ['def', 'append_unreleased_entries', '(', 'app', ',', 'manager', ',', 'releases', ')', ':', 'for', 'family', ',', 'lines', 'in', 'six', '.', 'iteritems', '(', 'manager', ')', ':', 'for', 'type_', 'in', '(', "'bugfix'", ',', "'feature'", ')', ':', 'bucket', '=', "'unreleased_{}'", '.', 'format', '(', 'type_', ')', 'if', 'bucket', 'not', 'in', 'lines', ':', '# Implies unstable prehistory + 0.x fam', 'continue', 'issues', '=', 'lines', '[', 'bucket', ']', 'fam_prefix', '=', '"{}.x "', '.', 'format', '(', 'family', ')', 'if', 'len', '(', 'manager', ')', '>', '1', 'else', '""', 'header', '=', '"Next {}{} release"', '.', 'format', '(', 'fam_prefix', ',', 'type_', ')', 'line', '=', '"unreleased_{}.x_{}"', '.', 'format', '(', 'family', ',', 'type_', ')', 'releases', '.', 'append', '(', 'generate_unreleased_entry', '(', 'header', ',', 'line', ',', 'issues', ',', 'manager', ',', 'app', ')', ')'] | Generate new abstract 'releases' for unreleased issues.
There's one for each combination of bug-vs-feature & major release line.
When only one major release line exists, that dimension is ignored. | ['Generate', 'new', 'abstract', 'releases', 'for', 'unreleased', 'issues', '.'] | train | https://github.com/bitprophet/releases/blob/97a763e41bbe7374106a1c648b89346a0d935429/releases/__init__.py#L202-L221 |
2,952 | QUANTAXIS/QUANTAXIS | QUANTAXIS/QAIndicator/indicators.py | QA_indicator_MA | def QA_indicator_MA(DataFrame,*args,**kwargs):
"""MA
Arguments:
DataFrame {[type]} -- [description]
Returns:
[type] -- [description]
"""
CLOSE = DataFrame['close']
return pd.DataFrame({'MA{}'.format(N): MA(CLOSE, N) for N in list(args)}) | python | def QA_indicator_MA(DataFrame,*args,**kwargs):
"""MA
Arguments:
DataFrame {[type]} -- [description]
Returns:
[type] -- [description]
"""
CLOSE = DataFrame['close']
return pd.DataFrame({'MA{}'.format(N): MA(CLOSE, N) for N in list(args)}) | ['def', 'QA_indicator_MA', '(', 'DataFrame', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'CLOSE', '=', 'DataFrame', '[', "'close'", ']', 'return', 'pd', '.', 'DataFrame', '(', '{', "'MA{}'", '.', 'format', '(', 'N', ')', ':', 'MA', '(', 'CLOSE', ',', 'N', ')', 'for', 'N', 'in', 'list', '(', 'args', ')', '}', ')'] | MA
Arguments:
DataFrame {[type]} -- [description]
Returns:
[type] -- [description] | ['MA', 'Arguments', ':', 'DataFrame', '{', '[', 'type', ']', '}', '--', '[', 'description', ']', 'Returns', ':', '[', 'type', ']', '--', '[', 'description', ']'] | train | https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAIndicator/indicators.py#L58-L69 |
2,953 | msu-coinlab/pymop | pymop/problem.py | Problem.pareto_front | def pareto_front(self, *args, **kwargs):
"""
Returns
-------
P : np.array
The Pareto front of a given problem. It is only loaded or calculate the first time and then cached.
For a single-objective problem only one point is returned but still in a two dimensional array.
"""
if self._pareto_front is None:
self._pareto_front = self._calc_pareto_front(*args, **kwargs)
return self._pareto_front | python | def pareto_front(self, *args, **kwargs):
"""
Returns
-------
P : np.array
The Pareto front of a given problem. It is only loaded or calculate the first time and then cached.
For a single-objective problem only one point is returned but still in a two dimensional array.
"""
if self._pareto_front is None:
self._pareto_front = self._calc_pareto_front(*args, **kwargs)
return self._pareto_front | ['def', 'pareto_front', '(', 'self', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'if', 'self', '.', '_pareto_front', 'is', 'None', ':', 'self', '.', '_pareto_front', '=', 'self', '.', '_calc_pareto_front', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', 'return', 'self', '.', '_pareto_front'] | Returns
-------
P : np.array
The Pareto front of a given problem. It is only loaded or calculate the first time and then cached.
For a single-objective problem only one point is returned but still in a two dimensional array. | ['Returns', '-------', 'P', ':', 'np', '.', 'array', 'The', 'Pareto', 'front', 'of', 'a', 'given', 'problem', '.', 'It', 'is', 'only', 'loaded', 'or', 'calculate', 'the', 'first', 'time', 'and', 'then', 'cached', '.', 'For', 'a', 'single', '-', 'objective', 'problem', 'only', 'one', 'point', 'is', 'returned', 'but', 'still', 'in', 'a', 'two', 'dimensional', 'array', '.'] | train | https://github.com/msu-coinlab/pymop/blob/7b7e789e640126c6d254e86ede5d7f4baad7eaa5/pymop/problem.py#L96-L107 |
2,954 | mitsei/dlkit | dlkit/json_/learning/sessions.py | ObjectiveBankHierarchyDesignSession.remove_child_objective_banks | def remove_child_objective_banks(self, objective_bank_id):
"""Removes all children from an objective bank.
arg: objective_bank_id (osid.id.Id): the ``Id`` of an
objective bank
raise: NotFound - ``objective_bank_id`` not in hierarchy
raise: NullArgument - ``objective_bank_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchyDesignSession.remove_child_bin_template
if self._catalog_session is not None:
return self._catalog_session.remove_child_catalogs(catalog_id=objective_bank_id)
return self._hierarchy_session.remove_children(id_=objective_bank_id) | python | def remove_child_objective_banks(self, objective_bank_id):
"""Removes all children from an objective bank.
arg: objective_bank_id (osid.id.Id): the ``Id`` of an
objective bank
raise: NotFound - ``objective_bank_id`` not in hierarchy
raise: NullArgument - ``objective_bank_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchyDesignSession.remove_child_bin_template
if self._catalog_session is not None:
return self._catalog_session.remove_child_catalogs(catalog_id=objective_bank_id)
return self._hierarchy_session.remove_children(id_=objective_bank_id) | ['def', 'remove_child_objective_banks', '(', 'self', ',', 'objective_bank_id', ')', ':', '# Implemented from template for', '# osid.resource.BinHierarchyDesignSession.remove_child_bin_template', 'if', 'self', '.', '_catalog_session', 'is', 'not', 'None', ':', 'return', 'self', '.', '_catalog_session', '.', 'remove_child_catalogs', '(', 'catalog_id', '=', 'objective_bank_id', ')', 'return', 'self', '.', '_hierarchy_session', '.', 'remove_children', '(', 'id_', '=', 'objective_bank_id', ')'] | Removes all children from an objective bank.
arg: objective_bank_id (osid.id.Id): the ``Id`` of an
objective bank
raise: NotFound - ``objective_bank_id`` not in hierarchy
raise: NullArgument - ``objective_bank_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* | ['Removes', 'all', 'children', 'from', 'an', 'objective', 'bank', '.'] | train | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/learning/sessions.py#L6566-L6582 |
2,955 | maximtrp/scikit-posthocs | scikit_posthocs/_posthocs.py | __convert_to_df | def __convert_to_df(a, val_col=None, group_col=None, val_id=None, group_id=None):
'''Hidden helper method to create a DataFrame with input data for further
processing.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
val_id : int, optional
Index of a column that contains dependent variable values (test or
response variable). Should be specified if a NumPy ndarray is used as an
input. It will be inferred from data, if not specified.
group_id : int, optional
Index of a column that contains independent variable values (grouping or
predictor variable). Should be specified if a NumPy ndarray is used as
an input. It will be inferred from data, if not specified.
Returns
-------
x : pandas DataFrame
DataFrame with input data, `val_col` column contains numerical values and
`group_col` column contains categorical values.
val_col : str
Name of a DataFrame column that contains dependent variable values (test
or response variable).
group_col : str
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable).
Notes
-----
Inferrence algorithm for determining `val_id` and `group_id` args is rather
simple, so it is better to specify them explicitly to prevent errors.
'''
if not group_col:
group_col = 'groups'
if not val_col:
val_col = 'vals'
if isinstance(a, DataFrame):
x = a.copy()
if not {group_col, val_col}.issubset(a.columns):
raise ValueError('Specify correct column names using `group_col` and `val_col` args')
return x, val_col, group_col
elif isinstance(a, list) or (isinstance(a, np.ndarray) and not a.shape.count(2)):
grps_len = map(len, a)
grps = list(it.chain(*[[i+1] * l for i, l in enumerate(grps_len)]))
vals = list(it.chain(*a))
return DataFrame({val_col: vals, group_col: grps}), val_col, group_col
elif isinstance(a, np.ndarray):
# cols ids not defined
# trying to infer
if not(all([val_id, group_id])):
if np.argmax(a.shape):
a = a.T
ax = [np.unique(a[:, 0]).size, np.unique(a[:, 1]).size]
if np.asscalar(np.diff(ax)):
__val_col = np.argmax(ax)
__group_col = np.argmin(ax)
else:
raise ValueError('Cannot infer input format.\nPlease specify `val_id` and `group_id` args')
cols = {__val_col: val_col,
__group_col: group_col}
else:
cols = {val_id: val_col,
group_id: group_col}
cols_vals = dict(sorted(cols.items())).values()
return DataFrame(a, columns=cols_vals), val_col, group_col | python | def __convert_to_df(a, val_col=None, group_col=None, val_id=None, group_id=None):
'''Hidden helper method to create a DataFrame with input data for further
processing.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
val_id : int, optional
Index of a column that contains dependent variable values (test or
response variable). Should be specified if a NumPy ndarray is used as an
input. It will be inferred from data, if not specified.
group_id : int, optional
Index of a column that contains independent variable values (grouping or
predictor variable). Should be specified if a NumPy ndarray is used as
an input. It will be inferred from data, if not specified.
Returns
-------
x : pandas DataFrame
DataFrame with input data, `val_col` column contains numerical values and
`group_col` column contains categorical values.
val_col : str
Name of a DataFrame column that contains dependent variable values (test
or response variable).
group_col : str
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable).
Notes
-----
Inferrence algorithm for determining `val_id` and `group_id` args is rather
simple, so it is better to specify them explicitly to prevent errors.
'''
if not group_col:
group_col = 'groups'
if not val_col:
val_col = 'vals'
if isinstance(a, DataFrame):
x = a.copy()
if not {group_col, val_col}.issubset(a.columns):
raise ValueError('Specify correct column names using `group_col` and `val_col` args')
return x, val_col, group_col
elif isinstance(a, list) or (isinstance(a, np.ndarray) and not a.shape.count(2)):
grps_len = map(len, a)
grps = list(it.chain(*[[i+1] * l for i, l in enumerate(grps_len)]))
vals = list(it.chain(*a))
return DataFrame({val_col: vals, group_col: grps}), val_col, group_col
elif isinstance(a, np.ndarray):
# cols ids not defined
# trying to infer
if not(all([val_id, group_id])):
if np.argmax(a.shape):
a = a.T
ax = [np.unique(a[:, 0]).size, np.unique(a[:, 1]).size]
if np.asscalar(np.diff(ax)):
__val_col = np.argmax(ax)
__group_col = np.argmin(ax)
else:
raise ValueError('Cannot infer input format.\nPlease specify `val_id` and `group_id` args')
cols = {__val_col: val_col,
__group_col: group_col}
else:
cols = {val_id: val_col,
group_id: group_col}
cols_vals = dict(sorted(cols.items())).values()
return DataFrame(a, columns=cols_vals), val_col, group_col | ['def', '__convert_to_df', '(', 'a', ',', 'val_col', '=', 'None', ',', 'group_col', '=', 'None', ',', 'val_id', '=', 'None', ',', 'group_id', '=', 'None', ')', ':', 'if', 'not', 'group_col', ':', 'group_col', '=', "'groups'", 'if', 'not', 'val_col', ':', 'val_col', '=', "'vals'", 'if', 'isinstance', '(', 'a', ',', 'DataFrame', ')', ':', 'x', '=', 'a', '.', 'copy', '(', ')', 'if', 'not', '{', 'group_col', ',', 'val_col', '}', '.', 'issubset', '(', 'a', '.', 'columns', ')', ':', 'raise', 'ValueError', '(', "'Specify correct column names using `group_col` and `val_col` args'", ')', 'return', 'x', ',', 'val_col', ',', 'group_col', 'elif', 'isinstance', '(', 'a', ',', 'list', ')', 'or', '(', 'isinstance', '(', 'a', ',', 'np', '.', 'ndarray', ')', 'and', 'not', 'a', '.', 'shape', '.', 'count', '(', '2', ')', ')', ':', 'grps_len', '=', 'map', '(', 'len', ',', 'a', ')', 'grps', '=', 'list', '(', 'it', '.', 'chain', '(', '*', '[', '[', 'i', '+', '1', ']', '*', 'l', 'for', 'i', ',', 'l', 'in', 'enumerate', '(', 'grps_len', ')', ']', ')', ')', 'vals', '=', 'list', '(', 'it', '.', 'chain', '(', '*', 'a', ')', ')', 'return', 'DataFrame', '(', '{', 'val_col', ':', 'vals', ',', 'group_col', ':', 'grps', '}', ')', ',', 'val_col', ',', 'group_col', 'elif', 'isinstance', '(', 'a', ',', 'np', '.', 'ndarray', ')', ':', '# cols ids not defined', '# trying to infer', 'if', 'not', '(', 'all', '(', '[', 'val_id', ',', 'group_id', ']', ')', ')', ':', 'if', 'np', '.', 'argmax', '(', 'a', '.', 'shape', ')', ':', 'a', '=', 'a', '.', 'T', 'ax', '=', '[', 'np', '.', 'unique', '(', 'a', '[', ':', ',', '0', ']', ')', '.', 'size', ',', 'np', '.', 'unique', '(', 'a', '[', ':', ',', '1', ']', ')', '.', 'size', ']', 'if', 'np', '.', 'asscalar', '(', 'np', '.', 'diff', '(', 'ax', ')', ')', ':', '__val_col', '=', 'np', '.', 'argmax', '(', 'ax', ')', '__group_col', '=', 'np', '.', 'argmin', '(', 'ax', ')', 'else', ':', 'raise', 'ValueError', '(', "'Cannot infer input format.\\nPlease specify `val_id` and `group_id` args'", ')', 'cols', '=', '{', '__val_col', ':', 'val_col', ',', '__group_col', ':', 'group_col', '}', 'else', ':', 'cols', '=', '{', 'val_id', ':', 'val_col', ',', 'group_id', ':', 'group_col', '}', 'cols_vals', '=', 'dict', '(', 'sorted', '(', 'cols', '.', 'items', '(', ')', ')', ')', '.', 'values', '(', ')', 'return', 'DataFrame', '(', 'a', ',', 'columns', '=', 'cols_vals', ')', ',', 'val_col', ',', 'group_col'] | Hidden helper method to create a DataFrame with input data for further
processing.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
val_id : int, optional
Index of a column that contains dependent variable values (test or
response variable). Should be specified if a NumPy ndarray is used as an
input. It will be inferred from data, if not specified.
group_id : int, optional
Index of a column that contains independent variable values (grouping or
predictor variable). Should be specified if a NumPy ndarray is used as
an input. It will be inferred from data, if not specified.
Returns
-------
x : pandas DataFrame
DataFrame with input data, `val_col` column contains numerical values and
`group_col` column contains categorical values.
val_col : str
Name of a DataFrame column that contains dependent variable values (test
or response variable).
group_col : str
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable).
Notes
-----
Inferrence algorithm for determining `val_id` and `group_id` args is rather
simple, so it is better to specify them explicitly to prevent errors. | ['Hidden', 'helper', 'method', 'to', 'create', 'a', 'DataFrame', 'with', 'input', 'data', 'for', 'further', 'processing', '.'] | train | https://github.com/maximtrp/scikit-posthocs/blob/5476b09e2a325cd4e31c0b0bc6906ab5cd77fc5d/scikit_posthocs/_posthocs.py#L11-L106 |
2,956 | ghukill/pyfc4 | pyfc4/plugins/pcdm/models.py | PCDMFile._post_create | def _post_create(self, auto_refresh=False):
'''
resource.create() hook
For PCDM File
'''
# set PCDM triple as Collection
self.add_triple(self.rdf.prefixes.rdf.type, self.rdf.prefixes.pcdm.File)
self.update(auto_refresh=auto_refresh) | python | def _post_create(self, auto_refresh=False):
'''
resource.create() hook
For PCDM File
'''
# set PCDM triple as Collection
self.add_triple(self.rdf.prefixes.rdf.type, self.rdf.prefixes.pcdm.File)
self.update(auto_refresh=auto_refresh) | ['def', '_post_create', '(', 'self', ',', 'auto_refresh', '=', 'False', ')', ':', '# set PCDM triple as Collection', 'self', '.', 'add_triple', '(', 'self', '.', 'rdf', '.', 'prefixes', '.', 'rdf', '.', 'type', ',', 'self', '.', 'rdf', '.', 'prefixes', '.', 'pcdm', '.', 'File', ')', 'self', '.', 'update', '(', 'auto_refresh', '=', 'auto_refresh', ')'] | resource.create() hook
For PCDM File | ['resource', '.', 'create', '()', 'hook'] | train | https://github.com/ghukill/pyfc4/blob/59011df592f08978c4a901a908862d112a5dcf02/pyfc4/plugins/pcdm/models.py#L435-L445 |
2,957 | jtwhite79/pyemu | pyemu/en.py | Ensemble.as_pyemu_matrix | def as_pyemu_matrix(self,typ=Matrix):
"""
Create a pyemu.Matrix from the Ensemble.
Parameters
----------
typ : pyemu.Matrix or derived type
the type of matrix to return
Returns
-------
pyemu.Matrix : pyemu.Matrix
"""
x = self.values.copy().astype(np.float)
return typ(x=x,row_names=list(self.index),
col_names=list(self.columns)) | python | def as_pyemu_matrix(self,typ=Matrix):
"""
Create a pyemu.Matrix from the Ensemble.
Parameters
----------
typ : pyemu.Matrix or derived type
the type of matrix to return
Returns
-------
pyemu.Matrix : pyemu.Matrix
"""
x = self.values.copy().astype(np.float)
return typ(x=x,row_names=list(self.index),
col_names=list(self.columns)) | ['def', 'as_pyemu_matrix', '(', 'self', ',', 'typ', '=', 'Matrix', ')', ':', 'x', '=', 'self', '.', 'values', '.', 'copy', '(', ')', '.', 'astype', '(', 'np', '.', 'float', ')', 'return', 'typ', '(', 'x', '=', 'x', ',', 'row_names', '=', 'list', '(', 'self', '.', 'index', ')', ',', 'col_names', '=', 'list', '(', 'self', '.', 'columns', ')', ')'] | Create a pyemu.Matrix from the Ensemble.
Parameters
----------
typ : pyemu.Matrix or derived type
the type of matrix to return
Returns
-------
pyemu.Matrix : pyemu.Matrix | ['Create', 'a', 'pyemu', '.', 'Matrix', 'from', 'the', 'Ensemble', '.'] | train | https://github.com/jtwhite79/pyemu/blob/c504d8e7a4097cec07655a6318d275739bd8148a/pyemu/en.py#L58-L74 |
2,958 | mikedh/trimesh | trimesh/visual/color.py | ColorVisuals.main_color | def main_color(self):
"""
What is the most commonly occurring color.
Returns
------------
color: (4,) uint8, most common color
"""
if self.kind is None:
return DEFAULT_COLOR
elif self.kind == 'face':
colors = self.face_colors
elif self.kind == 'vertex':
colors = self.vertex_colors
else:
raise ValueError('color kind incorrect!')
# find the unique colors
unique, inverse = grouping.unique_rows(colors)
# the most commonly occurring color, or mode
# this will be an index of inverse, not colors
mode_index = np.bincount(inverse).argmax()
color = colors[unique[mode_index]]
return color | python | def main_color(self):
"""
What is the most commonly occurring color.
Returns
------------
color: (4,) uint8, most common color
"""
if self.kind is None:
return DEFAULT_COLOR
elif self.kind == 'face':
colors = self.face_colors
elif self.kind == 'vertex':
colors = self.vertex_colors
else:
raise ValueError('color kind incorrect!')
# find the unique colors
unique, inverse = grouping.unique_rows(colors)
# the most commonly occurring color, or mode
# this will be an index of inverse, not colors
mode_index = np.bincount(inverse).argmax()
color = colors[unique[mode_index]]
return color | ['def', 'main_color', '(', 'self', ')', ':', 'if', 'self', '.', 'kind', 'is', 'None', ':', 'return', 'DEFAULT_COLOR', 'elif', 'self', '.', 'kind', '==', "'face'", ':', 'colors', '=', 'self', '.', 'face_colors', 'elif', 'self', '.', 'kind', '==', "'vertex'", ':', 'colors', '=', 'self', '.', 'vertex_colors', 'else', ':', 'raise', 'ValueError', '(', "'color kind incorrect!'", ')', '# find the unique colors', 'unique', ',', 'inverse', '=', 'grouping', '.', 'unique_rows', '(', 'colors', ')', '# the most commonly occurring color, or mode', '# this will be an index of inverse, not colors', 'mode_index', '=', 'np', '.', 'bincount', '(', 'inverse', ')', '.', 'argmax', '(', ')', 'color', '=', 'colors', '[', 'unique', '[', 'mode_index', ']', ']', 'return', 'color'] | What is the most commonly occurring color.
Returns
------------
color: (4,) uint8, most common color | ['What', 'is', 'the', 'most', 'commonly', 'occurring', 'color', '.'] | train | https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/visual/color.py#L410-L434 |
2,959 | farshidce/touchworks-python | touchworks/api/http.py | TouchWorks.set_patient_medhx_flag | def set_patient_medhx_flag(self, patient_id,
medhx_status):
"""
invokes TouchWorksMagicConstants.ACTION_GET_ENCOUNTER_LIST_FOR_PATIENT action
:param patient_id
:param medhx_status - Field in EEHR expects U, G, or D. SP defaults to Null and
errors out if included.
U=Unknown
G=Granted
D=Declined
:return: JSON response
"""
magic = self._magic_json(
action=TouchWorksMagicConstants.ACTION_SET_PATIENT_MEDHX_FLAG,
patient_id=patient_id,
parameter1=medhx_status
)
response = self._http_request(TouchWorksEndPoints.MAGIC_JSON, data=magic)
result = self._get_results_or_raise_if_magic_invalid(
magic,
response,
TouchWorksMagicConstants.RESULT_SET_PATIENT_MEDHX_FLAG)
return result | python | def set_patient_medhx_flag(self, patient_id,
medhx_status):
"""
invokes TouchWorksMagicConstants.ACTION_GET_ENCOUNTER_LIST_FOR_PATIENT action
:param patient_id
:param medhx_status - Field in EEHR expects U, G, or D. SP defaults to Null and
errors out if included.
U=Unknown
G=Granted
D=Declined
:return: JSON response
"""
magic = self._magic_json(
action=TouchWorksMagicConstants.ACTION_SET_PATIENT_MEDHX_FLAG,
patient_id=patient_id,
parameter1=medhx_status
)
response = self._http_request(TouchWorksEndPoints.MAGIC_JSON, data=magic)
result = self._get_results_or_raise_if_magic_invalid(
magic,
response,
TouchWorksMagicConstants.RESULT_SET_PATIENT_MEDHX_FLAG)
return result | ['def', 'set_patient_medhx_flag', '(', 'self', ',', 'patient_id', ',', 'medhx_status', ')', ':', 'magic', '=', 'self', '.', '_magic_json', '(', 'action', '=', 'TouchWorksMagicConstants', '.', 'ACTION_SET_PATIENT_MEDHX_FLAG', ',', 'patient_id', '=', 'patient_id', ',', 'parameter1', '=', 'medhx_status', ')', 'response', '=', 'self', '.', '_http_request', '(', 'TouchWorksEndPoints', '.', 'MAGIC_JSON', ',', 'data', '=', 'magic', ')', 'result', '=', 'self', '.', '_get_results_or_raise_if_magic_invalid', '(', 'magic', ',', 'response', ',', 'TouchWorksMagicConstants', '.', 'RESULT_SET_PATIENT_MEDHX_FLAG', ')', 'return', 'result'] | invokes TouchWorksMagicConstants.ACTION_GET_ENCOUNTER_LIST_FOR_PATIENT action
:param patient_id
:param medhx_status - Field in EEHR expects U, G, or D. SP defaults to Null and
errors out if included.
U=Unknown
G=Granted
D=Declined
:return: JSON response | ['invokes', 'TouchWorksMagicConstants', '.', 'ACTION_GET_ENCOUNTER_LIST_FOR_PATIENT', 'action', ':', 'param', 'patient_id', ':', 'param', 'medhx_status', '-', 'Field', 'in', 'EEHR', 'expects', 'U', 'G', 'or', 'D', '.', 'SP', 'defaults', 'to', 'Null', 'and', 'errors', 'out', 'if', 'included', '.', 'U', '=', 'Unknown', 'G', '=', 'Granted', 'D', '=', 'Declined', ':', 'return', ':', 'JSON', 'response'] | train | https://github.com/farshidce/touchworks-python/blob/ea8f93a0f4273de1317a318e945a571f5038ba62/touchworks/api/http.py#L458-L480 |
2,960 | asascience-open/paegan-transport | paegan/transport/particles/particle.py | Particle.age | def age(self, **kwargs):
"""
Age this particle.
parameters (optional, only one allowed):
days (default)
hours
minutes
seconds
"""
if kwargs.get('days', None) is not None:
self._age += kwargs.get('days')
return
if kwargs.get('hours', None) is not None:
self._age += kwargs.get('hours') / 24.
return
if kwargs.get('minutes', None) is not None:
self._age += kwargs.get('minutes') / 24. / 60.
return
if kwargs.get('seconds', None) is not None:
self._age += kwargs.get('seconds') / 24. / 60. / 60.
return
raise KeyError("Could not age particle, please specify 'days', 'hours', 'minutes', or 'seconds' parameter") | python | def age(self, **kwargs):
"""
Age this particle.
parameters (optional, only one allowed):
days (default)
hours
minutes
seconds
"""
if kwargs.get('days', None) is not None:
self._age += kwargs.get('days')
return
if kwargs.get('hours', None) is not None:
self._age += kwargs.get('hours') / 24.
return
if kwargs.get('minutes', None) is not None:
self._age += kwargs.get('minutes') / 24. / 60.
return
if kwargs.get('seconds', None) is not None:
self._age += kwargs.get('seconds') / 24. / 60. / 60.
return
raise KeyError("Could not age particle, please specify 'days', 'hours', 'minutes', or 'seconds' parameter") | ['def', 'age', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'if', 'kwargs', '.', 'get', '(', "'days'", ',', 'None', ')', 'is', 'not', 'None', ':', 'self', '.', '_age', '+=', 'kwargs', '.', 'get', '(', "'days'", ')', 'return', 'if', 'kwargs', '.', 'get', '(', "'hours'", ',', 'None', ')', 'is', 'not', 'None', ':', 'self', '.', '_age', '+=', 'kwargs', '.', 'get', '(', "'hours'", ')', '/', '24.', 'return', 'if', 'kwargs', '.', 'get', '(', "'minutes'", ',', 'None', ')', 'is', 'not', 'None', ':', 'self', '.', '_age', '+=', 'kwargs', '.', 'get', '(', "'minutes'", ')', '/', '24.', '/', '60.', 'return', 'if', 'kwargs', '.', 'get', '(', "'seconds'", ',', 'None', ')', 'is', 'not', 'None', ':', 'self', '.', '_age', '+=', 'kwargs', '.', 'get', '(', "'seconds'", ')', '/', '24.', '/', '60.', '/', '60.', 'return', 'raise', 'KeyError', '(', '"Could not age particle, please specify \'days\', \'hours\', \'minutes\', or \'seconds\' parameter"', ')'] | Age this particle.
parameters (optional, only one allowed):
days (default)
hours
minutes
seconds | ['Age', 'this', 'particle', '.'] | train | https://github.com/asascience-open/paegan-transport/blob/99a7f4ea24f0f42d9b34d1fb0e87ab2c49315bd3/paegan/transport/particles/particle.py#L203-L226 |
2,961 | lago-project/lago | lago/virt.py | VirtEnv.prefixed_name | def prefixed_name(self, unprefixed_name, max_length=0):
"""
Returns a uuid pefixed identifier
Args:
unprefixed_name(str): Name to add a prefix to
max_length(int): maximum length of the resultant prefixed name,
will adapt the given name and the length of the uuid ot fit it
Returns:
str: prefixed identifier for the given unprefixed name
"""
if max_length == 0:
prefixed_name = '%s-%s' % (self.uuid[:8], unprefixed_name)
else:
if max_length < 6:
raise RuntimeError(
"Can't prefix with less than 6 chars (%s)" %
unprefixed_name
)
if max_length < 16:
_uuid = self.uuid[:4]
else:
_uuid = self.uuid[:8]
name_max_length = max_length - len(_uuid) - 1
if name_max_length < len(unprefixed_name):
hashed_name = hashlib.sha1(unprefixed_name).hexdigest()
unprefixed_name = hashed_name[:name_max_length]
prefixed_name = '%s-%s' % (_uuid, unprefixed_name)
return prefixed_name | python | def prefixed_name(self, unprefixed_name, max_length=0):
"""
Returns a uuid pefixed identifier
Args:
unprefixed_name(str): Name to add a prefix to
max_length(int): maximum length of the resultant prefixed name,
will adapt the given name and the length of the uuid ot fit it
Returns:
str: prefixed identifier for the given unprefixed name
"""
if max_length == 0:
prefixed_name = '%s-%s' % (self.uuid[:8], unprefixed_name)
else:
if max_length < 6:
raise RuntimeError(
"Can't prefix with less than 6 chars (%s)" %
unprefixed_name
)
if max_length < 16:
_uuid = self.uuid[:4]
else:
_uuid = self.uuid[:8]
name_max_length = max_length - len(_uuid) - 1
if name_max_length < len(unprefixed_name):
hashed_name = hashlib.sha1(unprefixed_name).hexdigest()
unprefixed_name = hashed_name[:name_max_length]
prefixed_name = '%s-%s' % (_uuid, unprefixed_name)
return prefixed_name | ['def', 'prefixed_name', '(', 'self', ',', 'unprefixed_name', ',', 'max_length', '=', '0', ')', ':', 'if', 'max_length', '==', '0', ':', 'prefixed_name', '=', "'%s-%s'", '%', '(', 'self', '.', 'uuid', '[', ':', '8', ']', ',', 'unprefixed_name', ')', 'else', ':', 'if', 'max_length', '<', '6', ':', 'raise', 'RuntimeError', '(', '"Can\'t prefix with less than 6 chars (%s)"', '%', 'unprefixed_name', ')', 'if', 'max_length', '<', '16', ':', '_uuid', '=', 'self', '.', 'uuid', '[', ':', '4', ']', 'else', ':', '_uuid', '=', 'self', '.', 'uuid', '[', ':', '8', ']', 'name_max_length', '=', 'max_length', '-', 'len', '(', '_uuid', ')', '-', '1', 'if', 'name_max_length', '<', 'len', '(', 'unprefixed_name', ')', ':', 'hashed_name', '=', 'hashlib', '.', 'sha1', '(', 'unprefixed_name', ')', '.', 'hexdigest', '(', ')', 'unprefixed_name', '=', 'hashed_name', '[', ':', 'name_max_length', ']', 'prefixed_name', '=', "'%s-%s'", '%', '(', '_uuid', ',', 'unprefixed_name', ')', 'return', 'prefixed_name'] | Returns a uuid pefixed identifier
Args:
unprefixed_name(str): Name to add a prefix to
max_length(int): maximum length of the resultant prefixed name,
will adapt the given name and the length of the uuid ot fit it
Returns:
str: prefixed identifier for the given unprefixed name | ['Returns', 'a', 'uuid', 'pefixed', 'identifier', 'Args', ':', 'unprefixed_name', '(', 'str', ')', ':', 'Name', 'to', 'add', 'a', 'prefix', 'to', 'max_length', '(', 'int', ')', ':', 'maximum', 'length', 'of', 'the', 'resultant', 'prefixed', 'name', 'will', 'adapt', 'the', 'given', 'name', 'and', 'the', 'length', 'of', 'the', 'uuid', 'ot', 'fit', 'it', 'Returns', ':', 'str', ':', 'prefixed', 'identifier', 'for', 'the', 'given', 'unprefixed', 'name'] | train | https://github.com/lago-project/lago/blob/5b8970f7687e063e4619066d5b8093ca997678c9/lago/virt.py#L122-L153 |
2,962 | acutesoftware/AIKIF | aikif/dataTools/cls_datatable.py | DataTable.calc_percentiles | def calc_percentiles(self, col_name, where_col_list, where_value_list):
"""
calculates the percentiles of col_name
WHERE [where_col_list] = [where_value_list]
"""
#col_data = self.get_col_data_by_name(col_name)
col_data = self.select_where(where_col_list, where_value_list, col_name)
#print('calc_percentiles: col_data = ', col_data, ' where_col_list = ', where_col_list, ', where_value_list = ', where_value_list)
if len(col_data) == 0:
#print("Nothing to calculate")
return 0,0,0
else:
first = self.percentile(col_data, .25)
third = self.percentile(col_data, .75)
median = self.percentile(col_data, .50)
#print('CALC_PERCENTILES = first, third, median ', first, third, median )
return first, third, median | python | def calc_percentiles(self, col_name, where_col_list, where_value_list):
"""
calculates the percentiles of col_name
WHERE [where_col_list] = [where_value_list]
"""
#col_data = self.get_col_data_by_name(col_name)
col_data = self.select_where(where_col_list, where_value_list, col_name)
#print('calc_percentiles: col_data = ', col_data, ' where_col_list = ', where_col_list, ', where_value_list = ', where_value_list)
if len(col_data) == 0:
#print("Nothing to calculate")
return 0,0,0
else:
first = self.percentile(col_data, .25)
third = self.percentile(col_data, .75)
median = self.percentile(col_data, .50)
#print('CALC_PERCENTILES = first, third, median ', first, third, median )
return first, third, median | ['def', 'calc_percentiles', '(', 'self', ',', 'col_name', ',', 'where_col_list', ',', 'where_value_list', ')', ':', '#col_data = self.get_col_data_by_name(col_name)', 'col_data', '=', 'self', '.', 'select_where', '(', 'where_col_list', ',', 'where_value_list', ',', 'col_name', ')', "#print('calc_percentiles: col_data = ', col_data, ' where_col_list = ', where_col_list, ', where_value_list = ', where_value_list)", 'if', 'len', '(', 'col_data', ')', '==', '0', ':', '#print("Nothing to calculate")', 'return', '0', ',', '0', ',', '0', 'else', ':', 'first', '=', 'self', '.', 'percentile', '(', 'col_data', ',', '.25', ')', 'third', '=', 'self', '.', 'percentile', '(', 'col_data', ',', '.75', ')', 'median', '=', 'self', '.', 'percentile', '(', 'col_data', ',', '.50', ')', "#print('CALC_PERCENTILES = first, third, median ', first, third, median )", 'return', 'first', ',', 'third', ',', 'median'] | calculates the percentiles of col_name
WHERE [where_col_list] = [where_value_list] | ['calculates', 'the', 'percentiles', 'of', 'col_name', 'WHERE', '[', 'where_col_list', ']', '=', '[', 'where_value_list', ']'] | train | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/dataTools/cls_datatable.py#L188-L204 |
2,963 | DataDog/integrations-core | tokumx/datadog_checks/tokumx/vendor/pymongo/mongo_client.py | MongoClient._reset_on_error | def _reset_on_error(self, server, func, *args, **kwargs):
"""Execute an operation. Reset the server on network error.
Returns fn()'s return value on success. On error, clears the server's
pool and marks the server Unknown.
Re-raises any exception thrown by fn().
"""
try:
return func(*args, **kwargs)
except NetworkTimeout:
# The socket has been closed. Don't reset the server.
raise
except ConnectionFailure:
self.__reset_server(server.description.address)
raise | python | def _reset_on_error(self, server, func, *args, **kwargs):
"""Execute an operation. Reset the server on network error.
Returns fn()'s return value on success. On error, clears the server's
pool and marks the server Unknown.
Re-raises any exception thrown by fn().
"""
try:
return func(*args, **kwargs)
except NetworkTimeout:
# The socket has been closed. Don't reset the server.
raise
except ConnectionFailure:
self.__reset_server(server.description.address)
raise | ['def', '_reset_on_error', '(', 'self', ',', 'server', ',', 'func', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'try', ':', 'return', 'func', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', 'except', 'NetworkTimeout', ':', "# The socket has been closed. Don't reset the server.", 'raise', 'except', 'ConnectionFailure', ':', 'self', '.', '__reset_server', '(', 'server', '.', 'description', '.', 'address', ')', 'raise'] | Execute an operation. Reset the server on network error.
Returns fn()'s return value on success. On error, clears the server's
pool and marks the server Unknown.
Re-raises any exception thrown by fn(). | ['Execute', 'an', 'operation', '.', 'Reset', 'the', 'server', 'on', 'network', 'error', '.'] | train | https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/tokumx/datadog_checks/tokumx/vendor/pymongo/mongo_client.py#L952-L967 |
2,964 | offu/WeRoBot | werobot/client.py | Client.delete_custom_service_account | def delete_custom_service_account(self, account, nickname, password):
"""
删除客服帐号。
:param account: 客服账号的用户名
:param nickname: 客服账号的昵称
:param password: 客服账号的密码
:return: 返回的 JSON 数据包
"""
return self.post(
url="https://api.weixin.qq.com/customservice/kfaccount/del",
data={
"kf_account": account,
"nickname": nickname,
"password": password
}
) | python | def delete_custom_service_account(self, account, nickname, password):
"""
删除客服帐号。
:param account: 客服账号的用户名
:param nickname: 客服账号的昵称
:param password: 客服账号的密码
:return: 返回的 JSON 数据包
"""
return self.post(
url="https://api.weixin.qq.com/customservice/kfaccount/del",
data={
"kf_account": account,
"nickname": nickname,
"password": password
}
) | ['def', 'delete_custom_service_account', '(', 'self', ',', 'account', ',', 'nickname', ',', 'password', ')', ':', 'return', 'self', '.', 'post', '(', 'url', '=', '"https://api.weixin.qq.com/customservice/kfaccount/del"', ',', 'data', '=', '{', '"kf_account"', ':', 'account', ',', '"nickname"', ':', 'nickname', ',', '"password"', ':', 'password', '}', ')'] | 删除客服帐号。
:param account: 客服账号的用户名
:param nickname: 客服账号的昵称
:param password: 客服账号的密码
:return: 返回的 JSON 数据包 | ['删除客服帐号。'] | train | https://github.com/offu/WeRoBot/blob/fd42109105b03f9acf45ebd9dcabb9d5cff98f3c/werobot/client.py#L310-L326 |
2,965 | datasift/datasift-python | datasift/push.py | Push.create_from_historics | def create_from_historics(self, historics_id, name, output_type, output_params, initial_status=None, start=None,
end=None):
""" Create a new push subscription using the given Historic ID.
Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/pushcreate
:param historics_id: The ID of a Historics query
:type historics_id: str
:param name: The name to give the newly created subscription
:type name: str
:param output_type: One of the supported output types e.g. s3
:type output_type: str
:param output_params: set of parameters required for the given output type, see dev.datasift.com
:type output_params: dict
:param initial_status: The initial status of the subscription, active, paused or waiting_for_start
:type initial_status: str
:param start: Optionally specifies when the subscription should start
:type start: int
:param end: Optionally specifies when the subscription should end
:type end: int
:returns: dict with extra response data
:rtype: :class:`~datasift.request.DictResponse`
:raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
"""
return self._create(False, historics_id, name, output_type, output_params, initial_status, start, end) | python | def create_from_historics(self, historics_id, name, output_type, output_params, initial_status=None, start=None,
end=None):
""" Create a new push subscription using the given Historic ID.
Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/pushcreate
:param historics_id: The ID of a Historics query
:type historics_id: str
:param name: The name to give the newly created subscription
:type name: str
:param output_type: One of the supported output types e.g. s3
:type output_type: str
:param output_params: set of parameters required for the given output type, see dev.datasift.com
:type output_params: dict
:param initial_status: The initial status of the subscription, active, paused or waiting_for_start
:type initial_status: str
:param start: Optionally specifies when the subscription should start
:type start: int
:param end: Optionally specifies when the subscription should end
:type end: int
:returns: dict with extra response data
:rtype: :class:`~datasift.request.DictResponse`
:raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
"""
return self._create(False, historics_id, name, output_type, output_params, initial_status, start, end) | ['def', 'create_from_historics', '(', 'self', ',', 'historics_id', ',', 'name', ',', 'output_type', ',', 'output_params', ',', 'initial_status', '=', 'None', ',', 'start', '=', 'None', ',', 'end', '=', 'None', ')', ':', 'return', 'self', '.', '_create', '(', 'False', ',', 'historics_id', ',', 'name', ',', 'output_type', ',', 'output_params', ',', 'initial_status', ',', 'start', ',', 'end', ')'] | Create a new push subscription using the given Historic ID.
Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/pushcreate
:param historics_id: The ID of a Historics query
:type historics_id: str
:param name: The name to give the newly created subscription
:type name: str
:param output_type: One of the supported output types e.g. s3
:type output_type: str
:param output_params: set of parameters required for the given output type, see dev.datasift.com
:type output_params: dict
:param initial_status: The initial status of the subscription, active, paused or waiting_for_start
:type initial_status: str
:param start: Optionally specifies when the subscription should start
:type start: int
:param end: Optionally specifies when the subscription should end
:type end: int
:returns: dict with extra response data
:rtype: :class:`~datasift.request.DictResponse`
:raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` | ['Create', 'a', 'new', 'push', 'subscription', 'using', 'the', 'given', 'Historic', 'ID', '.'] | train | https://github.com/datasift/datasift-python/blob/bfaca1a47501a18e11065ecf630d9c31df818f65/datasift/push.py#L69-L93 |
2,966 | BerkeleyAutomation/autolab_core | autolab_core/rigid_transformations.py | RigidTransform.interpolate | def interpolate(T0, T1, t):
"""Return an interpolation of two RigidTransforms.
Parameters
----------
T0 : :obj:`RigidTransform`
The first RigidTransform to interpolate.
T1 : :obj:`RigidTransform`
The second RigidTransform to interpolate.
t : float
The interpolation step in [0,1]. 0 favors T0, 1 favors T1.
Returns
-------
:obj:`RigidTransform`
The interpolated RigidTransform.
Raises
------
ValueError
If the to_frame of the two RigidTransforms are not identical.
"""
if T0.to_frame != T1.to_frame:
raise ValueError('Cannot interpolate between 2 transforms with different to frames! Got T1 {0} and T2 {1}'.format(T0.to_frame, T1.to_frame))
dq0 = T0.dual_quaternion
dq1 = T1.dual_quaternion
dqt = DualQuaternion.interpolate(dq0, dq1, t)
from_frame = "{0}_{1}_{2}".format(T0.from_frame, T1.from_frame, t)
return RigidTransform.transform_from_dual_quaternion(dqt, from_frame, T0.to_frame) | python | def interpolate(T0, T1, t):
"""Return an interpolation of two RigidTransforms.
Parameters
----------
T0 : :obj:`RigidTransform`
The first RigidTransform to interpolate.
T1 : :obj:`RigidTransform`
The second RigidTransform to interpolate.
t : float
The interpolation step in [0,1]. 0 favors T0, 1 favors T1.
Returns
-------
:obj:`RigidTransform`
The interpolated RigidTransform.
Raises
------
ValueError
If the to_frame of the two RigidTransforms are not identical.
"""
if T0.to_frame != T1.to_frame:
raise ValueError('Cannot interpolate between 2 transforms with different to frames! Got T1 {0} and T2 {1}'.format(T0.to_frame, T1.to_frame))
dq0 = T0.dual_quaternion
dq1 = T1.dual_quaternion
dqt = DualQuaternion.interpolate(dq0, dq1, t)
from_frame = "{0}_{1}_{2}".format(T0.from_frame, T1.from_frame, t)
return RigidTransform.transform_from_dual_quaternion(dqt, from_frame, T0.to_frame) | ['def', 'interpolate', '(', 'T0', ',', 'T1', ',', 't', ')', ':', 'if', 'T0', '.', 'to_frame', '!=', 'T1', '.', 'to_frame', ':', 'raise', 'ValueError', '(', "'Cannot interpolate between 2 transforms with different to frames! Got T1 {0} and T2 {1}'", '.', 'format', '(', 'T0', '.', 'to_frame', ',', 'T1', '.', 'to_frame', ')', ')', 'dq0', '=', 'T0', '.', 'dual_quaternion', 'dq1', '=', 'T1', '.', 'dual_quaternion', 'dqt', '=', 'DualQuaternion', '.', 'interpolate', '(', 'dq0', ',', 'dq1', ',', 't', ')', 'from_frame', '=', '"{0}_{1}_{2}"', '.', 'format', '(', 'T0', '.', 'from_frame', ',', 'T1', '.', 'from_frame', ',', 't', ')', 'return', 'RigidTransform', '.', 'transform_from_dual_quaternion', '(', 'dqt', ',', 'from_frame', ',', 'T0', '.', 'to_frame', ')'] | Return an interpolation of two RigidTransforms.
Parameters
----------
T0 : :obj:`RigidTransform`
The first RigidTransform to interpolate.
T1 : :obj:`RigidTransform`
The second RigidTransform to interpolate.
t : float
The interpolation step in [0,1]. 0 favors T0, 1 favors T1.
Returns
-------
:obj:`RigidTransform`
The interpolated RigidTransform.
Raises
------
ValueError
If the to_frame of the two RigidTransforms are not identical. | ['Return', 'an', 'interpolation', 'of', 'two', 'RigidTransforms', '.'] | train | https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/rigid_transformations.py#L973-L1004 |
2,967 | googleapis/google-cloud-python | firestore/google/cloud/firestore_v1beta1/batch.py | WriteBatch.set | def set(self, reference, document_data, merge=False):
"""Add a "change" to replace a document.
See
:meth:`~.firestore_v1beta1.document.DocumentReference.set` for
more information on how ``option`` determines how the change is
applied.
Args:
reference (~.firestore_v1beta1.document.DocumentReference):
A document reference that will have values set in this batch.
document_data (dict):
Property names and values to use for replacing a document.
merge (Optional[bool] or Optional[List<apispec>]):
If True, apply merging instead of overwriting the state
of the document.
"""
if merge is not False:
write_pbs = _helpers.pbs_for_set_with_merge(
reference._document_path, document_data, merge
)
else:
write_pbs = _helpers.pbs_for_set_no_merge(
reference._document_path, document_data
)
self._add_write_pbs(write_pbs) | python | def set(self, reference, document_data, merge=False):
"""Add a "change" to replace a document.
See
:meth:`~.firestore_v1beta1.document.DocumentReference.set` for
more information on how ``option`` determines how the change is
applied.
Args:
reference (~.firestore_v1beta1.document.DocumentReference):
A document reference that will have values set in this batch.
document_data (dict):
Property names and values to use for replacing a document.
merge (Optional[bool] or Optional[List<apispec>]):
If True, apply merging instead of overwriting the state
of the document.
"""
if merge is not False:
write_pbs = _helpers.pbs_for_set_with_merge(
reference._document_path, document_data, merge
)
else:
write_pbs = _helpers.pbs_for_set_no_merge(
reference._document_path, document_data
)
self._add_write_pbs(write_pbs) | ['def', 'set', '(', 'self', ',', 'reference', ',', 'document_data', ',', 'merge', '=', 'False', ')', ':', 'if', 'merge', 'is', 'not', 'False', ':', 'write_pbs', '=', '_helpers', '.', 'pbs_for_set_with_merge', '(', 'reference', '.', '_document_path', ',', 'document_data', ',', 'merge', ')', 'else', ':', 'write_pbs', '=', '_helpers', '.', 'pbs_for_set_no_merge', '(', 'reference', '.', '_document_path', ',', 'document_data', ')', 'self', '.', '_add_write_pbs', '(', 'write_pbs', ')'] | Add a "change" to replace a document.
See
:meth:`~.firestore_v1beta1.document.DocumentReference.set` for
more information on how ``option`` determines how the change is
applied.
Args:
reference (~.firestore_v1beta1.document.DocumentReference):
A document reference that will have values set in this batch.
document_data (dict):
Property names and values to use for replacing a document.
merge (Optional[bool] or Optional[List<apispec>]):
If True, apply merging instead of overwriting the state
of the document. | ['Add', 'a', 'change', 'to', 'replace', 'a', 'document', '.'] | train | https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/firestore/google/cloud/firestore_v1beta1/batch.py#L65-L91 |
2,968 | woolfson-group/isambard | isambard/ampal/specifications/polymer_specs/helix.py | HelicalHelix.curve | def curve(self):
"""Curve of the super helix."""
return HelicalCurve.pitch_and_radius(
self.major_pitch, self.major_radius,
handedness=self.major_handedness) | python | def curve(self):
"""Curve of the super helix."""
return HelicalCurve.pitch_and_radius(
self.major_pitch, self.major_radius,
handedness=self.major_handedness) | ['def', 'curve', '(', 'self', ')', ':', 'return', 'HelicalCurve', '.', 'pitch_and_radius', '(', 'self', '.', 'major_pitch', ',', 'self', '.', 'major_radius', ',', 'handedness', '=', 'self', '.', 'major_handedness', ')'] | Curve of the super helix. | ['Curve', 'of', 'the', 'super', 'helix', '.'] | train | https://github.com/woolfson-group/isambard/blob/ebc33b48a28ad217e18f93b910dfba46e6e71e07/isambard/ampal/specifications/polymer_specs/helix.py#L370-L374 |
2,969 | ajenti/jadi | jadi/jadi.py | interface | def interface(cls):
'''
Marks the decorated class as an abstract interface.
Injects following classmethods:
.. py:method:: .all(context)
Returns a list of instances of each component in the ``context`` implementing this ``@interface``
:param context: context to look in
:type context: :class:`Context`
:returns: list(``cls``)
.. py:method:: .any(context)
Returns the first suitable instance implementing this ``@interface`` or raises :exc:`NoImplementationError` if none is available.
:param context: context to look in
:type context: :class:`Context`
:returns: ``cls``
.. py:method:: .classes()
Returns a list of classes implementing this ``@interface``
:returns: list(class)
'''
if not cls:
return None
cls.implementations = []
# Inject methods
def _all(cls, context, ignore_exceptions=False):
return list(context.get_components(cls, ignore_exceptions=ignore_exceptions))
cls.all = _all.__get__(cls)
def _any(cls, context):
instances = cls.all(context)
if instances:
return instances[0]
raise NoImplementationError(cls)
cls.any = _any.__get__(cls)
def _classes(cls):
return list(cls.implementations)
cls.classes = _classes.__get__(cls)
log.debug('Registering [%s] (interface)', get_fqdn(cls))
return cls | python | def interface(cls):
'''
Marks the decorated class as an abstract interface.
Injects following classmethods:
.. py:method:: .all(context)
Returns a list of instances of each component in the ``context`` implementing this ``@interface``
:param context: context to look in
:type context: :class:`Context`
:returns: list(``cls``)
.. py:method:: .any(context)
Returns the first suitable instance implementing this ``@interface`` or raises :exc:`NoImplementationError` if none is available.
:param context: context to look in
:type context: :class:`Context`
:returns: ``cls``
.. py:method:: .classes()
Returns a list of classes implementing this ``@interface``
:returns: list(class)
'''
if not cls:
return None
cls.implementations = []
# Inject methods
def _all(cls, context, ignore_exceptions=False):
return list(context.get_components(cls, ignore_exceptions=ignore_exceptions))
cls.all = _all.__get__(cls)
def _any(cls, context):
instances = cls.all(context)
if instances:
return instances[0]
raise NoImplementationError(cls)
cls.any = _any.__get__(cls)
def _classes(cls):
return list(cls.implementations)
cls.classes = _classes.__get__(cls)
log.debug('Registering [%s] (interface)', get_fqdn(cls))
return cls | ['def', 'interface', '(', 'cls', ')', ':', 'if', 'not', 'cls', ':', 'return', 'None', 'cls', '.', 'implementations', '=', '[', ']', '# Inject methods', 'def', '_all', '(', 'cls', ',', 'context', ',', 'ignore_exceptions', '=', 'False', ')', ':', 'return', 'list', '(', 'context', '.', 'get_components', '(', 'cls', ',', 'ignore_exceptions', '=', 'ignore_exceptions', ')', ')', 'cls', '.', 'all', '=', '_all', '.', '__get__', '(', 'cls', ')', 'def', '_any', '(', 'cls', ',', 'context', ')', ':', 'instances', '=', 'cls', '.', 'all', '(', 'context', ')', 'if', 'instances', ':', 'return', 'instances', '[', '0', ']', 'raise', 'NoImplementationError', '(', 'cls', ')', 'cls', '.', 'any', '=', '_any', '.', '__get__', '(', 'cls', ')', 'def', '_classes', '(', 'cls', ')', ':', 'return', 'list', '(', 'cls', '.', 'implementations', ')', 'cls', '.', 'classes', '=', '_classes', '.', '__get__', '(', 'cls', ')', 'log', '.', 'debug', '(', "'Registering [%s] (interface)'", ',', 'get_fqdn', '(', 'cls', ')', ')', 'return', 'cls'] | Marks the decorated class as an abstract interface.
Injects following classmethods:
.. py:method:: .all(context)
Returns a list of instances of each component in the ``context`` implementing this ``@interface``
:param context: context to look in
:type context: :class:`Context`
:returns: list(``cls``)
.. py:method:: .any(context)
Returns the first suitable instance implementing this ``@interface`` or raises :exc:`NoImplementationError` if none is available.
:param context: context to look in
:type context: :class:`Context`
:returns: ``cls``
.. py:method:: .classes()
Returns a list of classes implementing this ``@interface``
:returns: list(class) | ['Marks', 'the', 'decorated', 'class', 'as', 'an', 'abstract', 'interface', '.'] | train | https://github.com/ajenti/jadi/blob/db76e1c5330672d282f03787fedcd702c04b007f/jadi/jadi.py#L86-L138 |
2,970 | thespacedoctor/fundamentals | fundamentals/renderer/list_of_dictionaries.py | list_of_dictionaries._list_of_dictionaries_to_csv | def _list_of_dictionaries_to_csv(
self,
csvType="human"):
"""Convert a python list of dictionaries to pretty csv output
**Key Arguments:**
- ``csvType`` -- human, machine or reST
**Return:**
- ``output`` -- the contents of a CSV file
"""
self.log.debug(
'starting the ``_list_of_dictionaries_to_csv`` function')
if not len(self.listOfDictionaries):
return "NO MATCH"
dataCopy = copy.deepcopy(self.listOfDictionaries)
tableColumnNames = dataCopy[0].keys()
columnWidths = []
columnWidths[:] = [len(tableColumnNames[i])
for i in range(len(tableColumnNames))]
output = io.BytesIO()
# setup csv styles
if csvType == "machine":
delimiter = ","
elif csvType in ["human", "markdown"]:
delimiter = "|"
elif csvType in ["reST"]:
delimiter = "|"
if csvType in ["markdown"]:
writer = csv.writer(output, delimiter=delimiter,
quoting=csv.QUOTE_NONE, doublequote=False, quotechar='"', escapechar="\\", lineterminator="\n")
else:
writer = csv.writer(output, dialect='excel', delimiter=delimiter,
quotechar='"', quoting=csv.QUOTE_MINIMAL, lineterminator="\n")
if csvType in ["markdown"]:
dividerWriter = csv.writer(
output, delimiter="|", quoting=csv.QUOTE_NONE, doublequote=False, quotechar='"', escapechar="\\", lineterminator="\n")
else:
dividerWriter = csv.writer(output, dialect='excel', delimiter="+",
quotechar='"', quoting=csv.QUOTE_MINIMAL, lineterminator="\n")
# add column names to csv
header = []
divider = []
rstDivider = []
allRows = []
# clean up data
for row in dataCopy:
for c in tableColumnNames:
if isinstance(row[c], float) or isinstance(row[c], Decimal):
row[c] = "%0.9g" % row[c]
elif isinstance(row[c], datetime):
thisDate = str(row[c])[:10]
row[c] = "%(thisDate)s" % locals()
# set the column widths
for row in dataCopy:
for i, c in enumerate(tableColumnNames):
if len(unicode(row[c])) > columnWidths[i]:
columnWidths[i] = len(unicode(row[c]))
# table borders for human readable
if csvType in ["human", "markdown", "reST"]:
header.append("")
divider.append("")
rstDivider.append("")
for i, c in enumerate(tableColumnNames):
if csvType == "machine":
header.append(c)
elif csvType in ["human", "markdown", "reST"]:
header.append(
c.ljust(columnWidths[i] + 2).rjust(columnWidths[i] + 3))
divider.append('-' * (columnWidths[i] + 3))
rstDivider.append('=' * (columnWidths[i] + 3))
# table border for human readable
if csvType in ["human", "markdown", "reST"]:
header.append("")
divider.append("")
rstDivider.append("")
# fill in the data
for row in dataCopy:
thisRow = []
# table border for human readable
if csvType in ["human", "markdown", "reST"]:
thisRow.append("")
for i, c in enumerate(tableColumnNames):
if csvType in ["human", "markdown", "reST"]:
if row[c] == None:
row[c] = ""
row[c] = unicode(unicode(row[c]).ljust(columnWidths[i] + 2)
.rjust(columnWidths[i] + 3))
thisRow.append(row[c])
# table border for human readable
if csvType in ["human", "markdown", "reST"]:
thisRow.append("")
allRows.append(thisRow)
if csvType in ["reST"]:
allRows.append(divider)
if csvType == "machine":
writer.writerow(header)
if csvType in ["reST"]:
dividerWriter.writerow(divider)
writer.writerow(header)
dividerWriter.writerow(rstDivider)
if csvType in ["human"]:
dividerWriter.writerow(divider)
writer.writerow(header)
dividerWriter.writerow(divider)
elif csvType in ["markdown"]:
writer.writerow(header)
dividerWriter.writerow(divider)
# write out the data
writer.writerows(allRows)
# table border for human readable
if csvType in ["human"]:
dividerWriter.writerow(divider)
output = output.getvalue()
output = output.strip()
if csvType in ["markdown"]:
output = output.replace("|--", "|:-")
if csvType in ["reST"]:
output = output.replace("|--", "+--").replace("--|", "--+")
self.log.debug(
'completed the ``_list_of_dictionaries_to_csv`` function')
return output | python | def _list_of_dictionaries_to_csv(
self,
csvType="human"):
"""Convert a python list of dictionaries to pretty csv output
**Key Arguments:**
- ``csvType`` -- human, machine or reST
**Return:**
- ``output`` -- the contents of a CSV file
"""
self.log.debug(
'starting the ``_list_of_dictionaries_to_csv`` function')
if not len(self.listOfDictionaries):
return "NO MATCH"
dataCopy = copy.deepcopy(self.listOfDictionaries)
tableColumnNames = dataCopy[0].keys()
columnWidths = []
columnWidths[:] = [len(tableColumnNames[i])
for i in range(len(tableColumnNames))]
output = io.BytesIO()
# setup csv styles
if csvType == "machine":
delimiter = ","
elif csvType in ["human", "markdown"]:
delimiter = "|"
elif csvType in ["reST"]:
delimiter = "|"
if csvType in ["markdown"]:
writer = csv.writer(output, delimiter=delimiter,
quoting=csv.QUOTE_NONE, doublequote=False, quotechar='"', escapechar="\\", lineterminator="\n")
else:
writer = csv.writer(output, dialect='excel', delimiter=delimiter,
quotechar='"', quoting=csv.QUOTE_MINIMAL, lineterminator="\n")
if csvType in ["markdown"]:
dividerWriter = csv.writer(
output, delimiter="|", quoting=csv.QUOTE_NONE, doublequote=False, quotechar='"', escapechar="\\", lineterminator="\n")
else:
dividerWriter = csv.writer(output, dialect='excel', delimiter="+",
quotechar='"', quoting=csv.QUOTE_MINIMAL, lineterminator="\n")
# add column names to csv
header = []
divider = []
rstDivider = []
allRows = []
# clean up data
for row in dataCopy:
for c in tableColumnNames:
if isinstance(row[c], float) or isinstance(row[c], Decimal):
row[c] = "%0.9g" % row[c]
elif isinstance(row[c], datetime):
thisDate = str(row[c])[:10]
row[c] = "%(thisDate)s" % locals()
# set the column widths
for row in dataCopy:
for i, c in enumerate(tableColumnNames):
if len(unicode(row[c])) > columnWidths[i]:
columnWidths[i] = len(unicode(row[c]))
# table borders for human readable
if csvType in ["human", "markdown", "reST"]:
header.append("")
divider.append("")
rstDivider.append("")
for i, c in enumerate(tableColumnNames):
if csvType == "machine":
header.append(c)
elif csvType in ["human", "markdown", "reST"]:
header.append(
c.ljust(columnWidths[i] + 2).rjust(columnWidths[i] + 3))
divider.append('-' * (columnWidths[i] + 3))
rstDivider.append('=' * (columnWidths[i] + 3))
# table border for human readable
if csvType in ["human", "markdown", "reST"]:
header.append("")
divider.append("")
rstDivider.append("")
# fill in the data
for row in dataCopy:
thisRow = []
# table border for human readable
if csvType in ["human", "markdown", "reST"]:
thisRow.append("")
for i, c in enumerate(tableColumnNames):
if csvType in ["human", "markdown", "reST"]:
if row[c] == None:
row[c] = ""
row[c] = unicode(unicode(row[c]).ljust(columnWidths[i] + 2)
.rjust(columnWidths[i] + 3))
thisRow.append(row[c])
# table border for human readable
if csvType in ["human", "markdown", "reST"]:
thisRow.append("")
allRows.append(thisRow)
if csvType in ["reST"]:
allRows.append(divider)
if csvType == "machine":
writer.writerow(header)
if csvType in ["reST"]:
dividerWriter.writerow(divider)
writer.writerow(header)
dividerWriter.writerow(rstDivider)
if csvType in ["human"]:
dividerWriter.writerow(divider)
writer.writerow(header)
dividerWriter.writerow(divider)
elif csvType in ["markdown"]:
writer.writerow(header)
dividerWriter.writerow(divider)
# write out the data
writer.writerows(allRows)
# table border for human readable
if csvType in ["human"]:
dividerWriter.writerow(divider)
output = output.getvalue()
output = output.strip()
if csvType in ["markdown"]:
output = output.replace("|--", "|:-")
if csvType in ["reST"]:
output = output.replace("|--", "+--").replace("--|", "--+")
self.log.debug(
'completed the ``_list_of_dictionaries_to_csv`` function')
return output | ['def', '_list_of_dictionaries_to_csv', '(', 'self', ',', 'csvType', '=', '"human"', ')', ':', 'self', '.', 'log', '.', 'debug', '(', "'starting the ``_list_of_dictionaries_to_csv`` function'", ')', 'if', 'not', 'len', '(', 'self', '.', 'listOfDictionaries', ')', ':', 'return', '"NO MATCH"', 'dataCopy', '=', 'copy', '.', 'deepcopy', '(', 'self', '.', 'listOfDictionaries', ')', 'tableColumnNames', '=', 'dataCopy', '[', '0', ']', '.', 'keys', '(', ')', 'columnWidths', '=', '[', ']', 'columnWidths', '[', ':', ']', '=', '[', 'len', '(', 'tableColumnNames', '[', 'i', ']', ')', 'for', 'i', 'in', 'range', '(', 'len', '(', 'tableColumnNames', ')', ')', ']', 'output', '=', 'io', '.', 'BytesIO', '(', ')', '# setup csv styles', 'if', 'csvType', '==', '"machine"', ':', 'delimiter', '=', '","', 'elif', 'csvType', 'in', '[', '"human"', ',', '"markdown"', ']', ':', 'delimiter', '=', '"|"', 'elif', 'csvType', 'in', '[', '"reST"', ']', ':', 'delimiter', '=', '"|"', 'if', 'csvType', 'in', '[', '"markdown"', ']', ':', 'writer', '=', 'csv', '.', 'writer', '(', 'output', ',', 'delimiter', '=', 'delimiter', ',', 'quoting', '=', 'csv', '.', 'QUOTE_NONE', ',', 'doublequote', '=', 'False', ',', 'quotechar', '=', '\'"\'', ',', 'escapechar', '=', '"\\\\"', ',', 'lineterminator', '=', '"\\n"', ')', 'else', ':', 'writer', '=', 'csv', '.', 'writer', '(', 'output', ',', 'dialect', '=', "'excel'", ',', 'delimiter', '=', 'delimiter', ',', 'quotechar', '=', '\'"\'', ',', 'quoting', '=', 'csv', '.', 'QUOTE_MINIMAL', ',', 'lineterminator', '=', '"\\n"', ')', 'if', 'csvType', 'in', '[', '"markdown"', ']', ':', 'dividerWriter', '=', 'csv', '.', 'writer', '(', 'output', ',', 'delimiter', '=', '"|"', ',', 'quoting', '=', 'csv', '.', 'QUOTE_NONE', ',', 'doublequote', '=', 'False', ',', 'quotechar', '=', '\'"\'', ',', 'escapechar', '=', '"\\\\"', ',', 'lineterminator', '=', '"\\n"', ')', 'else', ':', 'dividerWriter', '=', 'csv', '.', 'writer', '(', 'output', ',', 'dialect', '=', "'excel'", ',', 'delimiter', '=', '"+"', ',', 'quotechar', '=', '\'"\'', ',', 'quoting', '=', 'csv', '.', 'QUOTE_MINIMAL', ',', 'lineterminator', '=', '"\\n"', ')', '# add column names to csv', 'header', '=', '[', ']', 'divider', '=', '[', ']', 'rstDivider', '=', '[', ']', 'allRows', '=', '[', ']', '# clean up data', 'for', 'row', 'in', 'dataCopy', ':', 'for', 'c', 'in', 'tableColumnNames', ':', 'if', 'isinstance', '(', 'row', '[', 'c', ']', ',', 'float', ')', 'or', 'isinstance', '(', 'row', '[', 'c', ']', ',', 'Decimal', ')', ':', 'row', '[', 'c', ']', '=', '"%0.9g"', '%', 'row', '[', 'c', ']', 'elif', 'isinstance', '(', 'row', '[', 'c', ']', ',', 'datetime', ')', ':', 'thisDate', '=', 'str', '(', 'row', '[', 'c', ']', ')', '[', ':', '10', ']', 'row', '[', 'c', ']', '=', '"%(thisDate)s"', '%', 'locals', '(', ')', '# set the column widths', 'for', 'row', 'in', 'dataCopy', ':', 'for', 'i', ',', 'c', 'in', 'enumerate', '(', 'tableColumnNames', ')', ':', 'if', 'len', '(', 'unicode', '(', 'row', '[', 'c', ']', ')', ')', '>', 'columnWidths', '[', 'i', ']', ':', 'columnWidths', '[', 'i', ']', '=', 'len', '(', 'unicode', '(', 'row', '[', 'c', ']', ')', ')', '# table borders for human readable', 'if', 'csvType', 'in', '[', '"human"', ',', '"markdown"', ',', '"reST"', ']', ':', 'header', '.', 'append', '(', '""', ')', 'divider', '.', 'append', '(', '""', ')', 'rstDivider', '.', 'append', '(', '""', ')', 'for', 'i', ',', 'c', 'in', 'enumerate', '(', 'tableColumnNames', ')', ':', 'if', 'csvType', '==', '"machine"', ':', 'header', '.', 'append', '(', 'c', ')', 'elif', 'csvType', 'in', '[', '"human"', ',', '"markdown"', ',', '"reST"', ']', ':', 'header', '.', 'append', '(', 'c', '.', 'ljust', '(', 'columnWidths', '[', 'i', ']', '+', '2', ')', '.', 'rjust', '(', 'columnWidths', '[', 'i', ']', '+', '3', ')', ')', 'divider', '.', 'append', '(', "'-'", '*', '(', 'columnWidths', '[', 'i', ']', '+', '3', ')', ')', 'rstDivider', '.', 'append', '(', "'='", '*', '(', 'columnWidths', '[', 'i', ']', '+', '3', ')', ')', '# table border for human readable', 'if', 'csvType', 'in', '[', '"human"', ',', '"markdown"', ',', '"reST"', ']', ':', 'header', '.', 'append', '(', '""', ')', 'divider', '.', 'append', '(', '""', ')', 'rstDivider', '.', 'append', '(', '""', ')', '# fill in the data', 'for', 'row', 'in', 'dataCopy', ':', 'thisRow', '=', '[', ']', '# table border for human readable', 'if', 'csvType', 'in', '[', '"human"', ',', '"markdown"', ',', '"reST"', ']', ':', 'thisRow', '.', 'append', '(', '""', ')', 'for', 'i', ',', 'c', 'in', 'enumerate', '(', 'tableColumnNames', ')', ':', 'if', 'csvType', 'in', '[', '"human"', ',', '"markdown"', ',', '"reST"', ']', ':', 'if', 'row', '[', 'c', ']', '==', 'None', ':', 'row', '[', 'c', ']', '=', '""', 'row', '[', 'c', ']', '=', 'unicode', '(', 'unicode', '(', 'row', '[', 'c', ']', ')', '.', 'ljust', '(', 'columnWidths', '[', 'i', ']', '+', '2', ')', '.', 'rjust', '(', 'columnWidths', '[', 'i', ']', '+', '3', ')', ')', 'thisRow', '.', 'append', '(', 'row', '[', 'c', ']', ')', '# table border for human readable', 'if', 'csvType', 'in', '[', '"human"', ',', '"markdown"', ',', '"reST"', ']', ':', 'thisRow', '.', 'append', '(', '""', ')', 'allRows', '.', 'append', '(', 'thisRow', ')', 'if', 'csvType', 'in', '[', '"reST"', ']', ':', 'allRows', '.', 'append', '(', 'divider', ')', 'if', 'csvType', '==', '"machine"', ':', 'writer', '.', 'writerow', '(', 'header', ')', 'if', 'csvType', 'in', '[', '"reST"', ']', ':', 'dividerWriter', '.', 'writerow', '(', 'divider', ')', 'writer', '.', 'writerow', '(', 'header', ')', 'dividerWriter', '.', 'writerow', '(', 'rstDivider', ')', 'if', 'csvType', 'in', '[', '"human"', ']', ':', 'dividerWriter', '.', 'writerow', '(', 'divider', ')', 'writer', '.', 'writerow', '(', 'header', ')', 'dividerWriter', '.', 'writerow', '(', 'divider', ')', 'elif', 'csvType', 'in', '[', '"markdown"', ']', ':', 'writer', '.', 'writerow', '(', 'header', ')', 'dividerWriter', '.', 'writerow', '(', 'divider', ')', '# write out the data', 'writer', '.', 'writerows', '(', 'allRows', ')', '# table border for human readable', 'if', 'csvType', 'in', '[', '"human"', ']', ':', 'dividerWriter', '.', 'writerow', '(', 'divider', ')', 'output', '=', 'output', '.', 'getvalue', '(', ')', 'output', '=', 'output', '.', 'strip', '(', ')', 'if', 'csvType', 'in', '[', '"markdown"', ']', ':', 'output', '=', 'output', '.', 'replace', '(', '"|--"', ',', '"|:-"', ')', 'if', 'csvType', 'in', '[', '"reST"', ']', ':', 'output', '=', 'output', '.', 'replace', '(', '"|--"', ',', '"+--"', ')', '.', 'replace', '(', '"--|"', ',', '"--+"', ')', 'self', '.', 'log', '.', 'debug', '(', "'completed the ``_list_of_dictionaries_to_csv`` function'", ')', 'return', 'output'] | Convert a python list of dictionaries to pretty csv output
**Key Arguments:**
- ``csvType`` -- human, machine or reST
**Return:**
- ``output`` -- the contents of a CSV file | ['Convert', 'a', 'python', 'list', 'of', 'dictionaries', 'to', 'pretty', 'csv', 'output'] | train | https://github.com/thespacedoctor/fundamentals/blob/1d2c007ac74442ec2eabde771cfcacdb9c1ab382/fundamentals/renderer/list_of_dictionaries.py#L500-L639 |
2,971 | edeposit/edeposit.amqp.ftp | src/edeposit/amqp/ftp/passwd_reader.py | set_permissions | def set_permissions(filename, uid=None, gid=None, mode=0775):
"""
Set pemissions for given `filename`.
Args:
filename (str): name of the file/directory
uid (int, default proftpd): user ID - if not set, user ID of `proftpd`
is used
gid (int): group ID, if not set, it is not changed
mode (int, default 0775): unix access mode
"""
if uid is None:
uid = get_ftp_uid()
if gid is None:
gid = -1
os.chown(filename, uid, gid)
os.chmod(filename, mode) | python | def set_permissions(filename, uid=None, gid=None, mode=0775):
"""
Set pemissions for given `filename`.
Args:
filename (str): name of the file/directory
uid (int, default proftpd): user ID - if not set, user ID of `proftpd`
is used
gid (int): group ID, if not set, it is not changed
mode (int, default 0775): unix access mode
"""
if uid is None:
uid = get_ftp_uid()
if gid is None:
gid = -1
os.chown(filename, uid, gid)
os.chmod(filename, mode) | ['def', 'set_permissions', '(', 'filename', ',', 'uid', '=', 'None', ',', 'gid', '=', 'None', ',', 'mode', '=', '0775', ')', ':', 'if', 'uid', 'is', 'None', ':', 'uid', '=', 'get_ftp_uid', '(', ')', 'if', 'gid', 'is', 'None', ':', 'gid', '=', '-', '1', 'os', '.', 'chown', '(', 'filename', ',', 'uid', ',', 'gid', ')', 'os', '.', 'chmod', '(', 'filename', ',', 'mode', ')'] | Set pemissions for given `filename`.
Args:
filename (str): name of the file/directory
uid (int, default proftpd): user ID - if not set, user ID of `proftpd`
is used
gid (int): group ID, if not set, it is not changed
mode (int, default 0775): unix access mode | ['Set', 'pemissions', 'for', 'given', 'filename', '.'] | train | https://github.com/edeposit/edeposit.amqp.ftp/blob/fcdcbffb6e5d194e1bb4f85f0b8eaa9dbb08aa71/src/edeposit/amqp/ftp/passwd_reader.py#L115-L133 |
2,972 | madprime/cgivar2gvcf | cgivar2gvcf/__init__.py | process_full_position | def process_full_position(data, header, var_only=False):
"""
Return genetic data when all alleles called on same line.
Returns an array containing one item, a tuple of five items:
(string) chromosome
(string) start position (1-based)
(array of strings) matching dbSNP entries
(string) reference allele sequence
(array of strings) the genome's allele sequences
"""
feature_type = data[header['varType']]
# Skip unmatchable, uncovered, or pseudoautosomal-in-X
if (feature_type == 'no-ref' or feature_type.startswith('PAR-called-in-X')):
return None
if var_only and feature_type in ['no-call', 'ref']:
return None
filters = []
if feature_type == 'no-call':
filters.append('NOCALL')
if 'varQuality' in header:
if 'VQLOW' in data[header['varQuality']]:
filters.append('VQLOW')
else:
var_filter = data[header['varFilter']]
if var_filter and not var_filter == "PASS":
filters = filters + var_filter.split(';')
chrom = data[header['chromosome']]
start = data[header['begin']]
ref_allele = data[header['reference']]
alleles = [data[header['alleleSeq']]]
dbsnp_data = []
dbsnp_data = data[header['xRef']].split(';')
assert data[header['ploidy']] in ['1', '2']
if feature_type == 'ref' or feature_type == 'no-call':
return [{'chrom': chrom,
'start': start,
'dbsnp_data': dbsnp_data,
'ref_seq': ref_allele,
'alleles': alleles,
'allele_count': data[header['ploidy']],
'filters': filters,
'end': data[header['end']]}]
else:
return [{'chrom': chrom,
'start': start,
'dbsnp_data': dbsnp_data,
'ref_seq': ref_allele,
'alleles': alleles,
'allele_count': data[header['ploidy']],
'filters': filters}] | python | def process_full_position(data, header, var_only=False):
"""
Return genetic data when all alleles called on same line.
Returns an array containing one item, a tuple of five items:
(string) chromosome
(string) start position (1-based)
(array of strings) matching dbSNP entries
(string) reference allele sequence
(array of strings) the genome's allele sequences
"""
feature_type = data[header['varType']]
# Skip unmatchable, uncovered, or pseudoautosomal-in-X
if (feature_type == 'no-ref' or feature_type.startswith('PAR-called-in-X')):
return None
if var_only and feature_type in ['no-call', 'ref']:
return None
filters = []
if feature_type == 'no-call':
filters.append('NOCALL')
if 'varQuality' in header:
if 'VQLOW' in data[header['varQuality']]:
filters.append('VQLOW')
else:
var_filter = data[header['varFilter']]
if var_filter and not var_filter == "PASS":
filters = filters + var_filter.split(';')
chrom = data[header['chromosome']]
start = data[header['begin']]
ref_allele = data[header['reference']]
alleles = [data[header['alleleSeq']]]
dbsnp_data = []
dbsnp_data = data[header['xRef']].split(';')
assert data[header['ploidy']] in ['1', '2']
if feature_type == 'ref' or feature_type == 'no-call':
return [{'chrom': chrom,
'start': start,
'dbsnp_data': dbsnp_data,
'ref_seq': ref_allele,
'alleles': alleles,
'allele_count': data[header['ploidy']],
'filters': filters,
'end': data[header['end']]}]
else:
return [{'chrom': chrom,
'start': start,
'dbsnp_data': dbsnp_data,
'ref_seq': ref_allele,
'alleles': alleles,
'allele_count': data[header['ploidy']],
'filters': filters}] | ['def', 'process_full_position', '(', 'data', ',', 'header', ',', 'var_only', '=', 'False', ')', ':', 'feature_type', '=', 'data', '[', 'header', '[', "'varType'", ']', ']', '# Skip unmatchable, uncovered, or pseudoautosomal-in-X', 'if', '(', 'feature_type', '==', "'no-ref'", 'or', 'feature_type', '.', 'startswith', '(', "'PAR-called-in-X'", ')', ')', ':', 'return', 'None', 'if', 'var_only', 'and', 'feature_type', 'in', '[', "'no-call'", ',', "'ref'", ']', ':', 'return', 'None', 'filters', '=', '[', ']', 'if', 'feature_type', '==', "'no-call'", ':', 'filters', '.', 'append', '(', "'NOCALL'", ')', 'if', "'varQuality'", 'in', 'header', ':', 'if', "'VQLOW'", 'in', 'data', '[', 'header', '[', "'varQuality'", ']', ']', ':', 'filters', '.', 'append', '(', "'VQLOW'", ')', 'else', ':', 'var_filter', '=', 'data', '[', 'header', '[', "'varFilter'", ']', ']', 'if', 'var_filter', 'and', 'not', 'var_filter', '==', '"PASS"', ':', 'filters', '=', 'filters', '+', 'var_filter', '.', 'split', '(', "';'", ')', 'chrom', '=', 'data', '[', 'header', '[', "'chromosome'", ']', ']', 'start', '=', 'data', '[', 'header', '[', "'begin'", ']', ']', 'ref_allele', '=', 'data', '[', 'header', '[', "'reference'", ']', ']', 'alleles', '=', '[', 'data', '[', 'header', '[', "'alleleSeq'", ']', ']', ']', 'dbsnp_data', '=', '[', ']', 'dbsnp_data', '=', 'data', '[', 'header', '[', "'xRef'", ']', ']', '.', 'split', '(', "';'", ')', 'assert', 'data', '[', 'header', '[', "'ploidy'", ']', ']', 'in', '[', "'1'", ',', "'2'", ']', 'if', 'feature_type', '==', "'ref'", 'or', 'feature_type', '==', "'no-call'", ':', 'return', '[', '{', "'chrom'", ':', 'chrom', ',', "'start'", ':', 'start', ',', "'dbsnp_data'", ':', 'dbsnp_data', ',', "'ref_seq'", ':', 'ref_allele', ',', "'alleles'", ':', 'alleles', ',', "'allele_count'", ':', 'data', '[', 'header', '[', "'ploidy'", ']', ']', ',', "'filters'", ':', 'filters', ',', "'end'", ':', 'data', '[', 'header', '[', "'end'", ']', ']', '}', ']', 'else', ':', 'return', '[', '{', "'chrom'", ':', 'chrom', ',', "'start'", ':', 'start', ',', "'dbsnp_data'", ':', 'dbsnp_data', ',', "'ref_seq'", ':', 'ref_allele', ',', "'alleles'", ':', 'alleles', ',', "'allele_count'", ':', 'data', '[', 'header', '[', "'ploidy'", ']', ']', ',', "'filters'", ':', 'filters', '}', ']'] | Return genetic data when all alleles called on same line.
Returns an array containing one item, a tuple of five items:
(string) chromosome
(string) start position (1-based)
(array of strings) matching dbSNP entries
(string) reference allele sequence
(array of strings) the genome's allele sequences | ['Return', 'genetic', 'data', 'when', 'all', 'alleles', 'called', 'on', 'same', 'line', '.'] | train | https://github.com/madprime/cgivar2gvcf/blob/13b4cd8da08669f7e4b0ceed77a7a17082f91037/cgivar2gvcf/__init__.py#L65-L117 |
2,973 | opencobra/memote | memote/support/validation.py | format_failure | def format_failure(failure):
"""Format how an error or warning should be displayed."""
return "Line {}, Column {} - #{}: {} - Category: {}, Severity: {}".format(
failure.getLine(),
failure.getColumn(),
failure.getErrorId(),
failure.getMessage(),
failure.getCategoryAsString(),
failure.getSeverity()
) | python | def format_failure(failure):
"""Format how an error or warning should be displayed."""
return "Line {}, Column {} - #{}: {} - Category: {}, Severity: {}".format(
failure.getLine(),
failure.getColumn(),
failure.getErrorId(),
failure.getMessage(),
failure.getCategoryAsString(),
failure.getSeverity()
) | ['def', 'format_failure', '(', 'failure', ')', ':', 'return', '"Line {}, Column {} - #{}: {} - Category: {}, Severity: {}"', '.', 'format', '(', 'failure', '.', 'getLine', '(', ')', ',', 'failure', '.', 'getColumn', '(', ')', ',', 'failure', '.', 'getErrorId', '(', ')', ',', 'failure', '.', 'getMessage', '(', ')', ',', 'failure', '.', 'getCategoryAsString', '(', ')', ',', 'failure', '.', 'getSeverity', '(', ')', ')'] | Format how an error or warning should be displayed. | ['Format', 'how', 'an', 'error', 'or', 'warning', 'should', 'be', 'displayed', '.'] | train | https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/validation.py#L52-L61 |
2,974 | PyHDI/Pyverilog | pyverilog/vparser/parser.py | VerilogParser.p_param | def p_param(self, p):
'param : PARAMETER param_substitution_list COMMA'
paramlist = [Parameter(rname, rvalue, lineno=p.lineno(2))
for rname, rvalue in p[2]]
p[0] = Decl(tuple(paramlist), lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) | python | def p_param(self, p):
'param : PARAMETER param_substitution_list COMMA'
paramlist = [Parameter(rname, rvalue, lineno=p.lineno(2))
for rname, rvalue in p[2]]
p[0] = Decl(tuple(paramlist), lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) | ['def', 'p_param', '(', 'self', ',', 'p', ')', ':', 'paramlist', '=', '[', 'Parameter', '(', 'rname', ',', 'rvalue', ',', 'lineno', '=', 'p', '.', 'lineno', '(', '2', ')', ')', 'for', 'rname', ',', 'rvalue', 'in', 'p', '[', '2', ']', ']', 'p', '[', '0', ']', '=', 'Decl', '(', 'tuple', '(', 'paramlist', ')', ',', 'lineno', '=', 'p', '.', 'lineno', '(', '1', ')', ')', 'p', '.', 'set_lineno', '(', '0', ',', 'p', '.', 'lineno', '(', '1', ')', ')'] | param : PARAMETER param_substitution_list COMMA | ['param', ':', 'PARAMETER', 'param_substitution_list', 'COMMA'] | train | https://github.com/PyHDI/Pyverilog/blob/b852cc5ed6a7a2712e33639f9d9782d0d1587a53/pyverilog/vparser/parser.py#L166-L171 |
2,975 | bslatkin/dpxdt | dpxdt/server/emails.py | send_ready_for_review | def send_ready_for_review(build_id, release_name, release_number):
"""Sends an email indicating that the release is ready for review."""
build = models.Build.query.get(build_id)
if not build.send_email:
logging.debug(
'Not sending ready for review email because build does not have '
'email enabled. build_id=%r', build.id)
return
ops = operations.BuildOps(build_id)
release, run_list, stats_dict, _ = ops.get_release(
release_name, release_number)
if not run_list:
logging.debug(
'Not sending ready for review email because there are '
' no runs. build_id=%r, release_name=%r, release_number=%d',
build.id, release.name, release.number)
return
title = '%s: %s - Ready for review' % (build.name, release.name)
email_body = render_template(
'email_ready_for_review.html',
build=build,
release=release,
run_list=run_list,
stats_dict=stats_dict)
recipients = []
if build.email_alias:
recipients.append(build.email_alias)
else:
for user in build.owners:
recipients.append(user.email_address)
if not recipients:
logging.debug(
'Not sending ready for review email because there are no '
'recipients. build_id=%r, release_name=%r, release_number=%d',
build.id, release.name, release.number)
return
message = Message(title, recipients=recipients)
message.html = email_body
logging.info('Sending ready for review email for build_id=%r, '
'release_name=%r, release_number=%d to %r',
build.id, release.name, release.number, recipients)
return render_or_send(send_ready_for_review, message) | python | def send_ready_for_review(build_id, release_name, release_number):
"""Sends an email indicating that the release is ready for review."""
build = models.Build.query.get(build_id)
if not build.send_email:
logging.debug(
'Not sending ready for review email because build does not have '
'email enabled. build_id=%r', build.id)
return
ops = operations.BuildOps(build_id)
release, run_list, stats_dict, _ = ops.get_release(
release_name, release_number)
if not run_list:
logging.debug(
'Not sending ready for review email because there are '
' no runs. build_id=%r, release_name=%r, release_number=%d',
build.id, release.name, release.number)
return
title = '%s: %s - Ready for review' % (build.name, release.name)
email_body = render_template(
'email_ready_for_review.html',
build=build,
release=release,
run_list=run_list,
stats_dict=stats_dict)
recipients = []
if build.email_alias:
recipients.append(build.email_alias)
else:
for user in build.owners:
recipients.append(user.email_address)
if not recipients:
logging.debug(
'Not sending ready for review email because there are no '
'recipients. build_id=%r, release_name=%r, release_number=%d',
build.id, release.name, release.number)
return
message = Message(title, recipients=recipients)
message.html = email_body
logging.info('Sending ready for review email for build_id=%r, '
'release_name=%r, release_number=%d to %r',
build.id, release.name, release.number, recipients)
return render_or_send(send_ready_for_review, message) | ['def', 'send_ready_for_review', '(', 'build_id', ',', 'release_name', ',', 'release_number', ')', ':', 'build', '=', 'models', '.', 'Build', '.', 'query', '.', 'get', '(', 'build_id', ')', 'if', 'not', 'build', '.', 'send_email', ':', 'logging', '.', 'debug', '(', "'Not sending ready for review email because build does not have '", "'email enabled. build_id=%r'", ',', 'build', '.', 'id', ')', 'return', 'ops', '=', 'operations', '.', 'BuildOps', '(', 'build_id', ')', 'release', ',', 'run_list', ',', 'stats_dict', ',', '_', '=', 'ops', '.', 'get_release', '(', 'release_name', ',', 'release_number', ')', 'if', 'not', 'run_list', ':', 'logging', '.', 'debug', '(', "'Not sending ready for review email because there are '", "' no runs. build_id=%r, release_name=%r, release_number=%d'", ',', 'build', '.', 'id', ',', 'release', '.', 'name', ',', 'release', '.', 'number', ')', 'return', 'title', '=', "'%s: %s - Ready for review'", '%', '(', 'build', '.', 'name', ',', 'release', '.', 'name', ')', 'email_body', '=', 'render_template', '(', "'email_ready_for_review.html'", ',', 'build', '=', 'build', ',', 'release', '=', 'release', ',', 'run_list', '=', 'run_list', ',', 'stats_dict', '=', 'stats_dict', ')', 'recipients', '=', '[', ']', 'if', 'build', '.', 'email_alias', ':', 'recipients', '.', 'append', '(', 'build', '.', 'email_alias', ')', 'else', ':', 'for', 'user', 'in', 'build', '.', 'owners', ':', 'recipients', '.', 'append', '(', 'user', '.', 'email_address', ')', 'if', 'not', 'recipients', ':', 'logging', '.', 'debug', '(', "'Not sending ready for review email because there are no '", "'recipients. build_id=%r, release_name=%r, release_number=%d'", ',', 'build', '.', 'id', ',', 'release', '.', 'name', ',', 'release', '.', 'number', ')', 'return', 'message', '=', 'Message', '(', 'title', ',', 'recipients', '=', 'recipients', ')', 'message', '.', 'html', '=', 'email_body', 'logging', '.', 'info', '(', "'Sending ready for review email for build_id=%r, '", "'release_name=%r, release_number=%d to %r'", ',', 'build', '.', 'id', ',', 'release', '.', 'name', ',', 'release', '.', 'number', ',', 'recipients', ')', 'return', 'render_or_send', '(', 'send_ready_for_review', ',', 'message', ')'] | Sends an email indicating that the release is ready for review. | ['Sends', 'an', 'email', 'indicating', 'that', 'the', 'release', 'is', 'ready', 'for', 'review', '.'] | train | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/server/emails.py#L45-L96 |
2,976 | Holzhaus/python-cmuclmtk | cmuclmtk/__init__.py | wngram2idngram | def wngram2idngram(input_file, vocab_file, output_file, buffersize=100, hashtablesize=2000000, files=20, compress=False, verbosity=2, n=3, write_ascii=False, fof_size=10):
"""
Takes a word N-gram file and a vocabulary file and lists every id n-gram which occurred in the text, along with its number of occurrences, in either ASCII or binary format.
Note : It is important that the vocabulary file is in alphabetical order. If you are using vocabularies generated by wfreq2vocab then this should not be an issue, as they will already be alphabetically sorted.
"""
cmd = ['wngram2idngram', '-vocab', os.path.abspath(vocab_file),
'-idngram', os.path.abspath(output_file)]
if buffersize:
cmd.extend(['-buffer', buffersize])
if hashtablesize:
cmd.extend(['-hash', hashtablesize])
if files:
cmd.extend(['-files', files])
if verbosity:
cmd.extend(['-verbosity', verbosity])
if n:
cmd.extend(['-n', n])
if fof_size:
cmd.extend(['-fof_size', fof_size])
if compress:
cmd.append('-compress')
if write_ascii:
cmd.append('-write_ascii')
# Ensure that every parameter is of type 'str'
cmd = [str(x) for x in cmd]
with tempfile.SpooledTemporaryFile() as output_f:
with tempfile.SpooledTemporaryFile() as input_f:
input_f.write(text.encode('utf-8') if sys.version_info >= (3,) and type(text) is str else text)
input_f.seek(0)
with output_to_debuglogger() as err_f:
with do_in_tempdir():
exitcode = subprocess.call(cmd, stdin=input_f, stdout=output_f, stderr=err_f)
output = output_f.read()
logger = logging.getLogger(__name__)
logger.debug("Command '%s' returned with exit code '%d'." % (' '.join(cmd), exitcode))
if exitcode != 0:
raise ConversionError("'%r' returned with non-zero exit status '%s'" % (cmd, exitcode))
if sys.version_info >= (3,) and type(output) is bytes:
output = output.decode('utf-8')
return output.strip() | python | def wngram2idngram(input_file, vocab_file, output_file, buffersize=100, hashtablesize=2000000, files=20, compress=False, verbosity=2, n=3, write_ascii=False, fof_size=10):
"""
Takes a word N-gram file and a vocabulary file and lists every id n-gram which occurred in the text, along with its number of occurrences, in either ASCII or binary format.
Note : It is important that the vocabulary file is in alphabetical order. If you are using vocabularies generated by wfreq2vocab then this should not be an issue, as they will already be alphabetically sorted.
"""
cmd = ['wngram2idngram', '-vocab', os.path.abspath(vocab_file),
'-idngram', os.path.abspath(output_file)]
if buffersize:
cmd.extend(['-buffer', buffersize])
if hashtablesize:
cmd.extend(['-hash', hashtablesize])
if files:
cmd.extend(['-files', files])
if verbosity:
cmd.extend(['-verbosity', verbosity])
if n:
cmd.extend(['-n', n])
if fof_size:
cmd.extend(['-fof_size', fof_size])
if compress:
cmd.append('-compress')
if write_ascii:
cmd.append('-write_ascii')
# Ensure that every parameter is of type 'str'
cmd = [str(x) for x in cmd]
with tempfile.SpooledTemporaryFile() as output_f:
with tempfile.SpooledTemporaryFile() as input_f:
input_f.write(text.encode('utf-8') if sys.version_info >= (3,) and type(text) is str else text)
input_f.seek(0)
with output_to_debuglogger() as err_f:
with do_in_tempdir():
exitcode = subprocess.call(cmd, stdin=input_f, stdout=output_f, stderr=err_f)
output = output_f.read()
logger = logging.getLogger(__name__)
logger.debug("Command '%s' returned with exit code '%d'." % (' '.join(cmd), exitcode))
if exitcode != 0:
raise ConversionError("'%r' returned with non-zero exit status '%s'" % (cmd, exitcode))
if sys.version_info >= (3,) and type(output) is bytes:
output = output.decode('utf-8')
return output.strip() | ['def', 'wngram2idngram', '(', 'input_file', ',', 'vocab_file', ',', 'output_file', ',', 'buffersize', '=', '100', ',', 'hashtablesize', '=', '2000000', ',', 'files', '=', '20', ',', 'compress', '=', 'False', ',', 'verbosity', '=', '2', ',', 'n', '=', '3', ',', 'write_ascii', '=', 'False', ',', 'fof_size', '=', '10', ')', ':', 'cmd', '=', '[', "'wngram2idngram'", ',', "'-vocab'", ',', 'os', '.', 'path', '.', 'abspath', '(', 'vocab_file', ')', ',', "'-idngram'", ',', 'os', '.', 'path', '.', 'abspath', '(', 'output_file', ')', ']', 'if', 'buffersize', ':', 'cmd', '.', 'extend', '(', '[', "'-buffer'", ',', 'buffersize', ']', ')', 'if', 'hashtablesize', ':', 'cmd', '.', 'extend', '(', '[', "'-hash'", ',', 'hashtablesize', ']', ')', 'if', 'files', ':', 'cmd', '.', 'extend', '(', '[', "'-files'", ',', 'files', ']', ')', 'if', 'verbosity', ':', 'cmd', '.', 'extend', '(', '[', "'-verbosity'", ',', 'verbosity', ']', ')', 'if', 'n', ':', 'cmd', '.', 'extend', '(', '[', "'-n'", ',', 'n', ']', ')', 'if', 'fof_size', ':', 'cmd', '.', 'extend', '(', '[', "'-fof_size'", ',', 'fof_size', ']', ')', 'if', 'compress', ':', 'cmd', '.', 'append', '(', "'-compress'", ')', 'if', 'write_ascii', ':', 'cmd', '.', 'append', '(', "'-write_ascii'", ')', "# Ensure that every parameter is of type 'str'", 'cmd', '=', '[', 'str', '(', 'x', ')', 'for', 'x', 'in', 'cmd', ']', 'with', 'tempfile', '.', 'SpooledTemporaryFile', '(', ')', 'as', 'output_f', ':', 'with', 'tempfile', '.', 'SpooledTemporaryFile', '(', ')', 'as', 'input_f', ':', 'input_f', '.', 'write', '(', 'text', '.', 'encode', '(', "'utf-8'", ')', 'if', 'sys', '.', 'version_info', '>=', '(', '3', ',', ')', 'and', 'type', '(', 'text', ')', 'is', 'str', 'else', 'text', ')', 'input_f', '.', 'seek', '(', '0', ')', 'with', 'output_to_debuglogger', '(', ')', 'as', 'err_f', ':', 'with', 'do_in_tempdir', '(', ')', ':', 'exitcode', '=', 'subprocess', '.', 'call', '(', 'cmd', ',', 'stdin', '=', 'input_f', ',', 'stdout', '=', 'output_f', ',', 'stderr', '=', 'err_f', ')', 'output', '=', 'output_f', '.', 'read', '(', ')', 'logger', '=', 'logging', '.', 'getLogger', '(', '__name__', ')', 'logger', '.', 'debug', '(', '"Command \'%s\' returned with exit code \'%d\'."', '%', '(', "' '", '.', 'join', '(', 'cmd', ')', ',', 'exitcode', ')', ')', 'if', 'exitcode', '!=', '0', ':', 'raise', 'ConversionError', '(', '"\'%r\' returned with non-zero exit status \'%s\'"', '%', '(', 'cmd', ',', 'exitcode', ')', ')', 'if', 'sys', '.', 'version_info', '>=', '(', '3', ',', ')', 'and', 'type', '(', 'output', ')', 'is', 'bytes', ':', 'output', '=', 'output', '.', 'decode', '(', "'utf-8'", ')', 'return', 'output', '.', 'strip', '(', ')'] | Takes a word N-gram file and a vocabulary file and lists every id n-gram which occurred in the text, along with its number of occurrences, in either ASCII or binary format.
Note : It is important that the vocabulary file is in alphabetical order. If you are using vocabularies generated by wfreq2vocab then this should not be an issue, as they will already be alphabetically sorted. | ['Takes', 'a', 'word', 'N', '-', 'gram', 'file', 'and', 'a', 'vocabulary', 'file', 'and', 'lists', 'every', 'id', 'n', '-', 'gram', 'which', 'occurred', 'in', 'the', 'text', 'along', 'with', 'its', 'number', 'of', 'occurrences', 'in', 'either', 'ASCII', 'or', 'binary', 'format', '.'] | train | https://github.com/Holzhaus/python-cmuclmtk/blob/67a5c6713c497ca644ea1c697a70e8d930c9d4b4/cmuclmtk/__init__.py#L275-L327 |
2,977 | coleifer/walrus | walrus/containers.py | ConsumerGroup.consumer | def consumer(self, name):
"""
Create a new consumer for the :py:class:`ConsumerGroup`.
:param name: name of consumer
:returns: a :py:class:`ConsumerGroup` using the given consumer name.
"""
return type(self)(self.database, self.name, self.keys, name) | python | def consumer(self, name):
"""
Create a new consumer for the :py:class:`ConsumerGroup`.
:param name: name of consumer
:returns: a :py:class:`ConsumerGroup` using the given consumer name.
"""
return type(self)(self.database, self.name, self.keys, name) | ['def', 'consumer', '(', 'self', ',', 'name', ')', ':', 'return', 'type', '(', 'self', ')', '(', 'self', '.', 'database', ',', 'self', '.', 'name', ',', 'self', '.', 'keys', ',', 'name', ')'] | Create a new consumer for the :py:class:`ConsumerGroup`.
:param name: name of consumer
:returns: a :py:class:`ConsumerGroup` using the given consumer name. | ['Create', 'a', 'new', 'consumer', 'for', 'the', ':', 'py', ':', 'class', ':', 'ConsumerGroup', '.'] | train | https://github.com/coleifer/walrus/blob/82bf15a6613487b5b5fefeb488f186d7e0106547/walrus/containers.py#L1359-L1366 |
2,978 | bcbio/bcbio-nextgen | bcbio/pipeline/alignment.py | _get_aligner_index | def _get_aligner_index(aligner, data):
"""Handle multiple specifications of aligner indexes, returning value to pass to aligner.
Original bcbio case -- a list of indices.
CWL case: a single file with secondaryFiles staged in the same directory.
"""
aligner_indexes = tz.get_in(("reference", get_aligner_with_aliases(aligner, data), "indexes"), data)
# standard bcbio case
if aligner_indexes and isinstance(aligner_indexes, (list, tuple)):
aligner_index = os.path.commonprefix(aligner_indexes)
if aligner_index.endswith("."):
aligner_index = aligner_index[:-1]
return aligner_index
# single file -- check for standard naming or directory
elif aligner_indexes and os.path.exists(aligner_indexes):
aligner_dir = os.path.dirname(aligner_indexes)
aligner_prefix = os.path.splitext(aligner_indexes)[0]
if len(glob.glob("%s.*" % aligner_prefix)) > 0:
return aligner_prefix
else:
return aligner_dir
if aligner not in allow_noindices():
raise ValueError("Did not find reference indices for aligner %s in genome: %s" %
(aligner, data["reference"])) | python | def _get_aligner_index(aligner, data):
"""Handle multiple specifications of aligner indexes, returning value to pass to aligner.
Original bcbio case -- a list of indices.
CWL case: a single file with secondaryFiles staged in the same directory.
"""
aligner_indexes = tz.get_in(("reference", get_aligner_with_aliases(aligner, data), "indexes"), data)
# standard bcbio case
if aligner_indexes and isinstance(aligner_indexes, (list, tuple)):
aligner_index = os.path.commonprefix(aligner_indexes)
if aligner_index.endswith("."):
aligner_index = aligner_index[:-1]
return aligner_index
# single file -- check for standard naming or directory
elif aligner_indexes and os.path.exists(aligner_indexes):
aligner_dir = os.path.dirname(aligner_indexes)
aligner_prefix = os.path.splitext(aligner_indexes)[0]
if len(glob.glob("%s.*" % aligner_prefix)) > 0:
return aligner_prefix
else:
return aligner_dir
if aligner not in allow_noindices():
raise ValueError("Did not find reference indices for aligner %s in genome: %s" %
(aligner, data["reference"])) | ['def', '_get_aligner_index', '(', 'aligner', ',', 'data', ')', ':', 'aligner_indexes', '=', 'tz', '.', 'get_in', '(', '(', '"reference"', ',', 'get_aligner_with_aliases', '(', 'aligner', ',', 'data', ')', ',', '"indexes"', ')', ',', 'data', ')', '# standard bcbio case', 'if', 'aligner_indexes', 'and', 'isinstance', '(', 'aligner_indexes', ',', '(', 'list', ',', 'tuple', ')', ')', ':', 'aligner_index', '=', 'os', '.', 'path', '.', 'commonprefix', '(', 'aligner_indexes', ')', 'if', 'aligner_index', '.', 'endswith', '(', '"."', ')', ':', 'aligner_index', '=', 'aligner_index', '[', ':', '-', '1', ']', 'return', 'aligner_index', '# single file -- check for standard naming or directory', 'elif', 'aligner_indexes', 'and', 'os', '.', 'path', '.', 'exists', '(', 'aligner_indexes', ')', ':', 'aligner_dir', '=', 'os', '.', 'path', '.', 'dirname', '(', 'aligner_indexes', ')', 'aligner_prefix', '=', 'os', '.', 'path', '.', 'splitext', '(', 'aligner_indexes', ')', '[', '0', ']', 'if', 'len', '(', 'glob', '.', 'glob', '(', '"%s.*"', '%', 'aligner_prefix', ')', ')', '>', '0', ':', 'return', 'aligner_prefix', 'else', ':', 'return', 'aligner_dir', 'if', 'aligner', 'not', 'in', 'allow_noindices', '(', ')', ':', 'raise', 'ValueError', '(', '"Did not find reference indices for aligner %s in genome: %s"', '%', '(', 'aligner', ',', 'data', '[', '"reference"', ']', ')', ')'] | Handle multiple specifications of aligner indexes, returning value to pass to aligner.
Original bcbio case -- a list of indices.
CWL case: a single file with secondaryFiles staged in the same directory. | ['Handle', 'multiple', 'specifications', 'of', 'aligner', 'indexes', 'returning', 'value', 'to', 'pass', 'to', 'aligner', '.'] | train | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/alignment.py#L115-L139 |
2,979 | JdeRobot/base | src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/minimal.py | MAVLink.heartbeat_send | def heartbeat_send(self, type, autopilot, base_mode, custom_mode, system_status, mavlink_version=2, force_mavlink1=False):
'''
The heartbeat message shows that a system is present and responding.
The type of the MAV and Autopilot hardware allow the
receiving system to treat further messages from this
system appropriate (e.g. by laying out the user
interface based on the autopilot).
type : Type of the MAV (quadrotor, helicopter, etc., up to 15 types, defined in MAV_TYPE ENUM) (uint8_t)
autopilot : Autopilot type / class. defined in MAV_AUTOPILOT ENUM (uint8_t)
base_mode : System mode bitfield, see MAV_MODE_FLAGS ENUM in mavlink/include/mavlink_types.h (uint8_t)
custom_mode : A bitfield for use for autopilot-specific flags. (uint32_t)
system_status : System status flag, see MAV_STATE ENUM (uint8_t)
mavlink_version : MAVLink version (uint8_t)
'''
return self.send(self.heartbeat_encode(type, autopilot, base_mode, custom_mode, system_status, mavlink_version), force_mavlink1=force_mavlink1) | python | def heartbeat_send(self, type, autopilot, base_mode, custom_mode, system_status, mavlink_version=2, force_mavlink1=False):
'''
The heartbeat message shows that a system is present and responding.
The type of the MAV and Autopilot hardware allow the
receiving system to treat further messages from this
system appropriate (e.g. by laying out the user
interface based on the autopilot).
type : Type of the MAV (quadrotor, helicopter, etc., up to 15 types, defined in MAV_TYPE ENUM) (uint8_t)
autopilot : Autopilot type / class. defined in MAV_AUTOPILOT ENUM (uint8_t)
base_mode : System mode bitfield, see MAV_MODE_FLAGS ENUM in mavlink/include/mavlink_types.h (uint8_t)
custom_mode : A bitfield for use for autopilot-specific flags. (uint32_t)
system_status : System status flag, see MAV_STATE ENUM (uint8_t)
mavlink_version : MAVLink version (uint8_t)
'''
return self.send(self.heartbeat_encode(type, autopilot, base_mode, custom_mode, system_status, mavlink_version), force_mavlink1=force_mavlink1) | ['def', 'heartbeat_send', '(', 'self', ',', 'type', ',', 'autopilot', ',', 'base_mode', ',', 'custom_mode', ',', 'system_status', ',', 'mavlink_version', '=', '2', ',', 'force_mavlink1', '=', 'False', ')', ':', 'return', 'self', '.', 'send', '(', 'self', '.', 'heartbeat_encode', '(', 'type', ',', 'autopilot', ',', 'base_mode', ',', 'custom_mode', ',', 'system_status', ',', 'mavlink_version', ')', ',', 'force_mavlink1', '=', 'force_mavlink1', ')'] | The heartbeat message shows that a system is present and responding.
The type of the MAV and Autopilot hardware allow the
receiving system to treat further messages from this
system appropriate (e.g. by laying out the user
interface based on the autopilot).
type : Type of the MAV (quadrotor, helicopter, etc., up to 15 types, defined in MAV_TYPE ENUM) (uint8_t)
autopilot : Autopilot type / class. defined in MAV_AUTOPILOT ENUM (uint8_t)
base_mode : System mode bitfield, see MAV_MODE_FLAGS ENUM in mavlink/include/mavlink_types.h (uint8_t)
custom_mode : A bitfield for use for autopilot-specific flags. (uint32_t)
system_status : System status flag, see MAV_STATE ENUM (uint8_t)
mavlink_version : MAVLink version (uint8_t) | ['The', 'heartbeat', 'message', 'shows', 'that', 'a', 'system', 'is', 'present', 'and', 'responding', '.', 'The', 'type', 'of', 'the', 'MAV', 'and', 'Autopilot', 'hardware', 'allow', 'the', 'receiving', 'system', 'to', 'treat', 'further', 'messages', 'from', 'this', 'system', 'appropriate', '(', 'e', '.', 'g', '.', 'by', 'laying', 'out', 'the', 'user', 'interface', 'based', 'on', 'the', 'autopilot', ')', '.'] | train | https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/minimal.py#L805-L821 |
2,980 | campaignmonitor/createsend-python | lib/createsend/transactional.py | Transactional.smart_email_list | def smart_email_list(self, status="all", client_id=None):
"""Gets the smart email list."""
if client_id is None:
response = self._get(
"/transactional/smartEmail?status=%s" % status)
else:
response = self._get(
"/transactional/smartEmail?status=%s&clientID=%s" % (status, client_id))
return json_to_py(response) | python | def smart_email_list(self, status="all", client_id=None):
"""Gets the smart email list."""
if client_id is None:
response = self._get(
"/transactional/smartEmail?status=%s" % status)
else:
response = self._get(
"/transactional/smartEmail?status=%s&clientID=%s" % (status, client_id))
return json_to_py(response) | ['def', 'smart_email_list', '(', 'self', ',', 'status', '=', '"all"', ',', 'client_id', '=', 'None', ')', ':', 'if', 'client_id', 'is', 'None', ':', 'response', '=', 'self', '.', '_get', '(', '"/transactional/smartEmail?status=%s"', '%', 'status', ')', 'else', ':', 'response', '=', 'self', '.', '_get', '(', '"/transactional/smartEmail?status=%s&clientID=%s"', '%', '(', 'status', ',', 'client_id', ')', ')', 'return', 'json_to_py', '(', 'response', ')'] | Gets the smart email list. | ['Gets', 'the', 'smart', 'email', 'list', '.'] | train | https://github.com/campaignmonitor/createsend-python/blob/4bfe2fd5cb2fc9d8f12280b23569eea0a6c66426/lib/createsend/transactional.py#L16-L24 |
2,981 | MonashBI/arcana | arcana/data/collection.py | BaseCollection.bind | def bind(self, study, **kwargs): # @UnusedVariable
"""
Used for duck typing Collection objects with Spec and Match
in source and sink initiation. Checks IDs match sessions in study.
"""
if self.frequency == 'per_subject':
tree_subject_ids = list(study.tree.subject_ids)
subject_ids = list(self._collection.keys())
if tree_subject_ids != subject_ids:
raise ArcanaUsageError(
"Subject IDs in collection provided to '{}' ('{}') "
"do not match Study tree ('{}')".format(
self.name, "', '".join(subject_ids),
"', '".join(tree_subject_ids)))
elif self.frequency == 'per_visit':
tree_visit_ids = list(study.tree.visit_ids)
visit_ids = list(self._collection.keys())
if tree_visit_ids != visit_ids:
raise ArcanaUsageError(
"Subject IDs in collection provided to '{}' ('{}') "
"do not match Study tree ('{}')".format(
self.name, "', '".join(visit_ids),
"', '".join(tree_visit_ids)))
elif self.frequency == 'per_session':
for subject in study.tree.subjects:
if subject.id not in self._collection:
raise ArcanaUsageError(
"Study subject ID '{}' was not found in colleciton "
"provided to '{}' (found '{}')".format(
subject.id, self.name,
"', '".join(self._collection.keys())))
for session in subject.sessions:
if session.visit_id not in self._collection[subject.id]:
raise ArcanaUsageError(
"Study visit ID '{}' for subject '{}' was not "
"found in colleciton provided to '{}' (found '{}')"
.format(subject.id, self.name,
"', '".join(
self._collection[subject.id].keys()))) | python | def bind(self, study, **kwargs): # @UnusedVariable
"""
Used for duck typing Collection objects with Spec and Match
in source and sink initiation. Checks IDs match sessions in study.
"""
if self.frequency == 'per_subject':
tree_subject_ids = list(study.tree.subject_ids)
subject_ids = list(self._collection.keys())
if tree_subject_ids != subject_ids:
raise ArcanaUsageError(
"Subject IDs in collection provided to '{}' ('{}') "
"do not match Study tree ('{}')".format(
self.name, "', '".join(subject_ids),
"', '".join(tree_subject_ids)))
elif self.frequency == 'per_visit':
tree_visit_ids = list(study.tree.visit_ids)
visit_ids = list(self._collection.keys())
if tree_visit_ids != visit_ids:
raise ArcanaUsageError(
"Subject IDs in collection provided to '{}' ('{}') "
"do not match Study tree ('{}')".format(
self.name, "', '".join(visit_ids),
"', '".join(tree_visit_ids)))
elif self.frequency == 'per_session':
for subject in study.tree.subjects:
if subject.id not in self._collection:
raise ArcanaUsageError(
"Study subject ID '{}' was not found in colleciton "
"provided to '{}' (found '{}')".format(
subject.id, self.name,
"', '".join(self._collection.keys())))
for session in subject.sessions:
if session.visit_id not in self._collection[subject.id]:
raise ArcanaUsageError(
"Study visit ID '{}' for subject '{}' was not "
"found in colleciton provided to '{}' (found '{}')"
.format(subject.id, self.name,
"', '".join(
self._collection[subject.id].keys()))) | ['def', 'bind', '(', 'self', ',', 'study', ',', '*', '*', 'kwargs', ')', ':', '# @UnusedVariable', 'if', 'self', '.', 'frequency', '==', "'per_subject'", ':', 'tree_subject_ids', '=', 'list', '(', 'study', '.', 'tree', '.', 'subject_ids', ')', 'subject_ids', '=', 'list', '(', 'self', '.', '_collection', '.', 'keys', '(', ')', ')', 'if', 'tree_subject_ids', '!=', 'subject_ids', ':', 'raise', 'ArcanaUsageError', '(', '"Subject IDs in collection provided to \'{}\' (\'{}\') "', '"do not match Study tree (\'{}\')"', '.', 'format', '(', 'self', '.', 'name', ',', '"\', \'"', '.', 'join', '(', 'subject_ids', ')', ',', '"\', \'"', '.', 'join', '(', 'tree_subject_ids', ')', ')', ')', 'elif', 'self', '.', 'frequency', '==', "'per_visit'", ':', 'tree_visit_ids', '=', 'list', '(', 'study', '.', 'tree', '.', 'visit_ids', ')', 'visit_ids', '=', 'list', '(', 'self', '.', '_collection', '.', 'keys', '(', ')', ')', 'if', 'tree_visit_ids', '!=', 'visit_ids', ':', 'raise', 'ArcanaUsageError', '(', '"Subject IDs in collection provided to \'{}\' (\'{}\') "', '"do not match Study tree (\'{}\')"', '.', 'format', '(', 'self', '.', 'name', ',', '"\', \'"', '.', 'join', '(', 'visit_ids', ')', ',', '"\', \'"', '.', 'join', '(', 'tree_visit_ids', ')', ')', ')', 'elif', 'self', '.', 'frequency', '==', "'per_session'", ':', 'for', 'subject', 'in', 'study', '.', 'tree', '.', 'subjects', ':', 'if', 'subject', '.', 'id', 'not', 'in', 'self', '.', '_collection', ':', 'raise', 'ArcanaUsageError', '(', '"Study subject ID \'{}\' was not found in colleciton "', '"provided to \'{}\' (found \'{}\')"', '.', 'format', '(', 'subject', '.', 'id', ',', 'self', '.', 'name', ',', '"\', \'"', '.', 'join', '(', 'self', '.', '_collection', '.', 'keys', '(', ')', ')', ')', ')', 'for', 'session', 'in', 'subject', '.', 'sessions', ':', 'if', 'session', '.', 'visit_id', 'not', 'in', 'self', '.', '_collection', '[', 'subject', '.', 'id', ']', ':', 'raise', 'ArcanaUsageError', '(', '"Study visit ID \'{}\' for subject \'{}\' was not "', '"found in colleciton provided to \'{}\' (found \'{}\')"', '.', 'format', '(', 'subject', '.', 'id', ',', 'self', '.', 'name', ',', '"\', \'"', '.', 'join', '(', 'self', '.', '_collection', '[', 'subject', '.', 'id', ']', '.', 'keys', '(', ')', ')', ')', ')'] | Used for duck typing Collection objects with Spec and Match
in source and sink initiation. Checks IDs match sessions in study. | ['Used', 'for', 'duck', 'typing', 'Collection', 'objects', 'with', 'Spec', 'and', 'Match', 'in', 'source', 'and', 'sink', 'initiation', '.', 'Checks', 'IDs', 'match', 'sessions', 'in', 'study', '.'] | train | https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/data/collection.py#L167-L205 |
2,982 | titusjan/argos | argos/config/choicecti.py | ChoiceCtiEditor.comboBoxRowsInserted | def comboBoxRowsInserted(self, _parent, start, end):
""" Called when the user has entered a new value in the combobox.
Puts the combobox values back into the cti.
"""
assert start == end, "Bug, please report: more than one row inserted"
configValue = self.comboBox.itemText(start)
logger.debug("Inserting {!r} at position {} in {}"
.format(configValue, start, self.cti.nodePath))
self.cti.insertValue(start, configValue) | python | def comboBoxRowsInserted(self, _parent, start, end):
""" Called when the user has entered a new value in the combobox.
Puts the combobox values back into the cti.
"""
assert start == end, "Bug, please report: more than one row inserted"
configValue = self.comboBox.itemText(start)
logger.debug("Inserting {!r} at position {} in {}"
.format(configValue, start, self.cti.nodePath))
self.cti.insertValue(start, configValue) | ['def', 'comboBoxRowsInserted', '(', 'self', ',', '_parent', ',', 'start', ',', 'end', ')', ':', 'assert', 'start', '==', 'end', ',', '"Bug, please report: more than one row inserted"', 'configValue', '=', 'self', '.', 'comboBox', '.', 'itemText', '(', 'start', ')', 'logger', '.', 'debug', '(', '"Inserting {!r} at position {} in {}"', '.', 'format', '(', 'configValue', ',', 'start', ',', 'self', '.', 'cti', '.', 'nodePath', ')', ')', 'self', '.', 'cti', '.', 'insertValue', '(', 'start', ',', 'configValue', ')'] | Called when the user has entered a new value in the combobox.
Puts the combobox values back into the cti. | ['Called', 'when', 'the', 'user', 'has', 'entered', 'a', 'new', 'value', 'in', 'the', 'combobox', '.', 'Puts', 'the', 'combobox', 'values', 'back', 'into', 'the', 'cti', '.'] | train | https://github.com/titusjan/argos/blob/20d0a3cae26c36ea789a5d219c02ca7df21279dd/argos/config/choicecti.py#L250-L258 |
2,983 | django-fluent/django-fluent-contents | fluent_contents/rendering/main.py | get_cached_placeholder_output | def get_cached_placeholder_output(parent_object, placeholder_name):
"""
Return cached output for a placeholder, if available.
This avoids fetching the Placeholder object.
"""
if not PlaceholderRenderingPipe.may_cache_placeholders():
return None
language_code = get_parent_language_code(parent_object)
cache_key = get_placeholder_cache_key_for_parent(parent_object, placeholder_name, language_code)
return cache.get(cache_key) | python | def get_cached_placeholder_output(parent_object, placeholder_name):
"""
Return cached output for a placeholder, if available.
This avoids fetching the Placeholder object.
"""
if not PlaceholderRenderingPipe.may_cache_placeholders():
return None
language_code = get_parent_language_code(parent_object)
cache_key = get_placeholder_cache_key_for_parent(parent_object, placeholder_name, language_code)
return cache.get(cache_key) | ['def', 'get_cached_placeholder_output', '(', 'parent_object', ',', 'placeholder_name', ')', ':', 'if', 'not', 'PlaceholderRenderingPipe', '.', 'may_cache_placeholders', '(', ')', ':', 'return', 'None', 'language_code', '=', 'get_parent_language_code', '(', 'parent_object', ')', 'cache_key', '=', 'get_placeholder_cache_key_for_parent', '(', 'parent_object', ',', 'placeholder_name', ',', 'language_code', ')', 'return', 'cache', '.', 'get', '(', 'cache_key', ')'] | Return cached output for a placeholder, if available.
This avoids fetching the Placeholder object. | ['Return', 'cached', 'output', 'for', 'a', 'placeholder', 'if', 'available', '.', 'This', 'avoids', 'fetching', 'the', 'Placeholder', 'object', '.'] | train | https://github.com/django-fluent/django-fluent-contents/blob/896f14add58471b98d7aa295b2c9e6abedec9003/fluent_contents/rendering/main.py#L14-L24 |
2,984 | llllllllll/codetransformer | codetransformer/decompiler/_343.py | normalize_tuple_slice | def normalize_tuple_slice(node):
"""
Normalize an ast.Tuple node representing the internals of a slice.
Returns the node wrapped in an ast.Index.
Returns an ExtSlice node built from the tuple elements if there are any
slices.
"""
if not any(isinstance(elt, ast.Slice) for elt in node.elts):
return ast.Index(value=node)
return ast.ExtSlice(
[
# Wrap non-Slice nodes in Index nodes.
elt if isinstance(elt, ast.Slice) else ast.Index(value=elt)
for elt in node.elts
]
) | python | def normalize_tuple_slice(node):
"""
Normalize an ast.Tuple node representing the internals of a slice.
Returns the node wrapped in an ast.Index.
Returns an ExtSlice node built from the tuple elements if there are any
slices.
"""
if not any(isinstance(elt, ast.Slice) for elt in node.elts):
return ast.Index(value=node)
return ast.ExtSlice(
[
# Wrap non-Slice nodes in Index nodes.
elt if isinstance(elt, ast.Slice) else ast.Index(value=elt)
for elt in node.elts
]
) | ['def', 'normalize_tuple_slice', '(', 'node', ')', ':', 'if', 'not', 'any', '(', 'isinstance', '(', 'elt', ',', 'ast', '.', 'Slice', ')', 'for', 'elt', 'in', 'node', '.', 'elts', ')', ':', 'return', 'ast', '.', 'Index', '(', 'value', '=', 'node', ')', 'return', 'ast', '.', 'ExtSlice', '(', '[', '# Wrap non-Slice nodes in Index nodes.', 'elt', 'if', 'isinstance', '(', 'elt', ',', 'ast', '.', 'Slice', ')', 'else', 'ast', '.', 'Index', '(', 'value', '=', 'elt', ')', 'for', 'elt', 'in', 'node', '.', 'elts', ']', ')'] | Normalize an ast.Tuple node representing the internals of a slice.
Returns the node wrapped in an ast.Index.
Returns an ExtSlice node built from the tuple elements if there are any
slices. | ['Normalize', 'an', 'ast', '.', 'Tuple', 'node', 'representing', 'the', 'internals', 'of', 'a', 'slice', '.'] | train | https://github.com/llllllllll/codetransformer/blob/c5f551e915df45adc7da7e0b1b635f0cc6a1bb27/codetransformer/decompiler/_343.py#L1244-L1261 |
2,985 | saltstack/salt | salt/cloud/clouds/azurearm.py | list_blobs | def list_blobs(call=None, kwargs=None): # pylint: disable=unused-argument
'''
List blobs.
'''
if kwargs is None:
kwargs = {}
if 'container' not in kwargs:
raise SaltCloudSystemExit(
'A container must be specified'
)
storageservice = _get_block_blob_service(kwargs)
ret = {}
try:
for blob in storageservice.list_blobs(kwargs['container']).items:
ret[blob.name] = {
'blob_type': blob.properties.blob_type,
'last_modified': blob.properties.last_modified.isoformat(),
'server_encrypted': blob.properties.server_encrypted,
}
except Exception as exc:
log.warning(six.text_type(exc))
return ret | python | def list_blobs(call=None, kwargs=None): # pylint: disable=unused-argument
'''
List blobs.
'''
if kwargs is None:
kwargs = {}
if 'container' not in kwargs:
raise SaltCloudSystemExit(
'A container must be specified'
)
storageservice = _get_block_blob_service(kwargs)
ret = {}
try:
for blob in storageservice.list_blobs(kwargs['container']).items:
ret[blob.name] = {
'blob_type': blob.properties.blob_type,
'last_modified': blob.properties.last_modified.isoformat(),
'server_encrypted': blob.properties.server_encrypted,
}
except Exception as exc:
log.warning(six.text_type(exc))
return ret | ['def', 'list_blobs', '(', 'call', '=', 'None', ',', 'kwargs', '=', 'None', ')', ':', '# pylint: disable=unused-argument', 'if', 'kwargs', 'is', 'None', ':', 'kwargs', '=', '{', '}', 'if', "'container'", 'not', 'in', 'kwargs', ':', 'raise', 'SaltCloudSystemExit', '(', "'A container must be specified'", ')', 'storageservice', '=', '_get_block_blob_service', '(', 'kwargs', ')', 'ret', '=', '{', '}', 'try', ':', 'for', 'blob', 'in', 'storageservice', '.', 'list_blobs', '(', 'kwargs', '[', "'container'", ']', ')', '.', 'items', ':', 'ret', '[', 'blob', '.', 'name', ']', '=', '{', "'blob_type'", ':', 'blob', '.', 'properties', '.', 'blob_type', ',', "'last_modified'", ':', 'blob', '.', 'properties', '.', 'last_modified', '.', 'isoformat', '(', ')', ',', "'server_encrypted'", ':', 'blob', '.', 'properties', '.', 'server_encrypted', ',', '}', 'except', 'Exception', 'as', 'exc', ':', 'log', '.', 'warning', '(', 'six', '.', 'text_type', '(', 'exc', ')', ')', 'return', 'ret'] | List blobs. | ['List', 'blobs', '.'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/azurearm.py#L1744-L1769 |
2,986 | andreikop/qutepart | qutepart/syntax/parser.py | ContextStack.pop | def pop(self, count):
"""Returns new context stack, which doesn't contain few levels
"""
if len(self._contexts) - 1 < count:
_logger.error("#pop value is too big %d", len(self._contexts))
if len(self._contexts) > 1:
return ContextStack(self._contexts[:1], self._data[:1])
else:
return self
return ContextStack(self._contexts[:-count], self._data[:-count]) | python | def pop(self, count):
"""Returns new context stack, which doesn't contain few levels
"""
if len(self._contexts) - 1 < count:
_logger.error("#pop value is too big %d", len(self._contexts))
if len(self._contexts) > 1:
return ContextStack(self._contexts[:1], self._data[:1])
else:
return self
return ContextStack(self._contexts[:-count], self._data[:-count]) | ['def', 'pop', '(', 'self', ',', 'count', ')', ':', 'if', 'len', '(', 'self', '.', '_contexts', ')', '-', '1', '<', 'count', ':', '_logger', '.', 'error', '(', '"#pop value is too big %d"', ',', 'len', '(', 'self', '.', '_contexts', ')', ')', 'if', 'len', '(', 'self', '.', '_contexts', ')', '>', '1', ':', 'return', 'ContextStack', '(', 'self', '.', '_contexts', '[', ':', '1', ']', ',', 'self', '.', '_data', '[', ':', '1', ']', ')', 'else', ':', 'return', 'self', 'return', 'ContextStack', '(', 'self', '.', '_contexts', '[', ':', '-', 'count', ']', ',', 'self', '.', '_data', '[', ':', '-', 'count', ']', ')'] | Returns new context stack, which doesn't contain few levels | ['Returns', 'new', 'context', 'stack', 'which', 'doesn', 't', 'contain', 'few', 'levels'] | train | https://github.com/andreikop/qutepart/blob/109d76b239751318bcef06f39b2fbbf18687a40b/qutepart/syntax/parser.py#L32-L42 |
2,987 | tjcsl/ion | intranet/apps/eighth/views/activities.py | generate_statistics_pdf | def generate_statistics_pdf(activities=None, start_date=None, all_years=False, year=None):
''' Accepts EighthActivity objects and outputs a PDF file. '''
if activities is None:
activities = EighthActivity.objects.all().order_by("name")
if year is None:
year = current_school_year()
if not isinstance(activities, list):
activities = activities.prefetch_related("rooms").prefetch_related("sponsors")
pdf_buffer = BytesIO()
h_margin = 1 * inch
v_margin = 0.5 * inch
doc = SimpleDocTemplate(pdf_buffer, pagesize=letter, rightMargin=h_margin, leftMargin=h_margin, topMargin=v_margin, bottomMargin=v_margin)
elements = []
styles = getSampleStyleSheet()
styles.add(ParagraphStyle(name="Indent", leftIndent=15))
empty_activities = []
for act in activities:
lelements = []
relements = []
act_stats = calculate_statistics(act, start_date=start_date, all_years=all_years, year=year)
if act_stats["total_blocks"] == 0:
empty_activities.append(act.name)
continue
elements.append(Paragraph(act.name, styles["Title"]))
sponsor_str = (", ".join([x.name for x in act.sponsors.all()])) if act.sponsors.count() > 0 else "None"
lelements.append(Paragraph("<b>Default Sponsors:</b> " + sponsor_str, styles["Normal"]))
lelements.append(Spacer(0, 0.025 * inch))
room_str = (", ".join([str(x) for x in act.rooms.all()])) if act.rooms.count() > 0 else "None"
relements.append(Paragraph("<b>Default Rooms:</b> " + room_str, styles["Normal"]))
relements.append(Spacer(0, 0.025 * inch))
relements.append(Paragraph("<b>Total blocks:</b> {}".format(act_stats["total_blocks"]), styles["Normal"]))
relements.append(Paragraph("<b>Scheduled blocks:</b> {}".format(act_stats["scheduled_blocks"]), styles["Indent"]))
relements.append(Paragraph("<b>Empty blocks:</b> {}".format(act_stats["empty_blocks"]), styles["Indent"]))
relements.append(Paragraph("<b>Cancelled blocks:</b> {}".format(act_stats["cancelled_blocks"]), styles["Indent"]))
lelements.append(Paragraph("<b>Total signups:</b> {}".format(act_stats["total_signups"]), styles["Normal"]))
lelements.append(Paragraph("<b>Average signups per block:</b> {}".format(act_stats["average_signups"]), styles["Indent"]))
lelements.append(Paragraph("<b>Average signups per student:</b> {}".format(act_stats["average_user_signups"]), styles["Indent"]))
lelements.append(
Paragraph("<b>Unique students:</b> {}, <b>Capacity:</b> {}".format(act_stats["students"], act_stats["capacity"]), styles["Normal"]))
elements.append(
Table([[lelements, relements]], style=[('LEFTPADDING', (0, 0), (-1, -1), 0), ('RIGHTPADDING', (0, 0), (-1, -1), 0), ('VALIGN', (0, 0),
(-1, -1), 'TOP')]))
parsed_members = [[x.username, y] for x, y in act_stats["members"]]
parsed_members = list(chunks(parsed_members, 30))[:3]
if parsed_members:
parsed_members = [[["Username", "Signups"]] + x for x in parsed_members]
parsed_members = [
Table(x, style=[('FONT', (0, 0), (1, 0), 'Helvetica-Bold'), ('ALIGN', (1, 0), (1, -1), 'RIGHT')]) for x in parsed_members
]
elements.append(Table([parsed_members], style=[('VALIGN', (-1, -1), (-1, -1), 'TOP')]))
if act_stats["students"] - 90 > 0:
elements.append(Paragraph("<b>{}</b> students were not shown on this page. ".format(act_stats["students"] - 90), styles["Normal"]))
else:
elements.append(Spacer(0, 0.20 * inch))
if start_date is not None:
elements.append(
Paragraph("<b>{}</b> block(s) are past the start date and are not included on this page.".format(act_stats["past_start_date"]),
styles["Normal"]))
elements.append(
Paragraph("<b>{}</b> block(s) not in the {}-{} school year are not included on this page.".format(
act_stats["old_blocks"], year - 1, year), styles["Normal"]))
elements.append(PageBreak())
if empty_activities:
empty_activities = [x[:37] + "..." if len(x) > 40 else x for x in empty_activities]
empty_activities = [[x] for x in empty_activities]
empty_activities = list(chunks(empty_activities, 35))
empty_activities = [[["Activity"]] + x for x in empty_activities]
empty_activities = [
Table(x, style=[('FONT', (0, 0), (-1, 0), 'Helvetica-Bold'), ('LEFTPADDING', (0, 0), (-1, -1), 0)]) for x in empty_activities
]
for i in range(0, len(empty_activities), 2):
elements.append(Paragraph("Empty Activities (Page {})".format(i // 2 + 1), styles["Title"]))
if all_years:
elements.append(Paragraph("The following activities have no 8th period blocks assigned to them.", styles["Normal"]))
else:
elements.append(
Paragraph("The following activities have no 8th period blocks assigned to them for the {}-{} school year.".format(year - 1, year),
styles["Normal"]))
elements.append(Spacer(0, 0.10 * inch))
ea = [empty_activities[i]]
if i + 1 < len(empty_activities):
ea.append(empty_activities[i + 1])
elements.append(
Table([ea], style=[
('LEFTPADDING', (0, 0), (-1, -1), 0),
('RIGHTPADDING', (0, 0), (-1, -1), 0),
('VALIGN', (0, 0), (-1, -1), 'TOP'),
], hAlign='LEFT'))
elements.append(PageBreak())
def first_page(canvas, _):
if len(activities) == 1:
canvas.setTitle("{} Statistics".format(activities[0].name))
else:
canvas.setTitle("8th Period Activity Statistics")
canvas.setAuthor("Generated by Ion")
doc.build(elements, onFirstPage=first_page)
pdf_buffer.seek(0)
return pdf_buffer | python | def generate_statistics_pdf(activities=None, start_date=None, all_years=False, year=None):
''' Accepts EighthActivity objects and outputs a PDF file. '''
if activities is None:
activities = EighthActivity.objects.all().order_by("name")
if year is None:
year = current_school_year()
if not isinstance(activities, list):
activities = activities.prefetch_related("rooms").prefetch_related("sponsors")
pdf_buffer = BytesIO()
h_margin = 1 * inch
v_margin = 0.5 * inch
doc = SimpleDocTemplate(pdf_buffer, pagesize=letter, rightMargin=h_margin, leftMargin=h_margin, topMargin=v_margin, bottomMargin=v_margin)
elements = []
styles = getSampleStyleSheet()
styles.add(ParagraphStyle(name="Indent", leftIndent=15))
empty_activities = []
for act in activities:
lelements = []
relements = []
act_stats = calculate_statistics(act, start_date=start_date, all_years=all_years, year=year)
if act_stats["total_blocks"] == 0:
empty_activities.append(act.name)
continue
elements.append(Paragraph(act.name, styles["Title"]))
sponsor_str = (", ".join([x.name for x in act.sponsors.all()])) if act.sponsors.count() > 0 else "None"
lelements.append(Paragraph("<b>Default Sponsors:</b> " + sponsor_str, styles["Normal"]))
lelements.append(Spacer(0, 0.025 * inch))
room_str = (", ".join([str(x) for x in act.rooms.all()])) if act.rooms.count() > 0 else "None"
relements.append(Paragraph("<b>Default Rooms:</b> " + room_str, styles["Normal"]))
relements.append(Spacer(0, 0.025 * inch))
relements.append(Paragraph("<b>Total blocks:</b> {}".format(act_stats["total_blocks"]), styles["Normal"]))
relements.append(Paragraph("<b>Scheduled blocks:</b> {}".format(act_stats["scheduled_blocks"]), styles["Indent"]))
relements.append(Paragraph("<b>Empty blocks:</b> {}".format(act_stats["empty_blocks"]), styles["Indent"]))
relements.append(Paragraph("<b>Cancelled blocks:</b> {}".format(act_stats["cancelled_blocks"]), styles["Indent"]))
lelements.append(Paragraph("<b>Total signups:</b> {}".format(act_stats["total_signups"]), styles["Normal"]))
lelements.append(Paragraph("<b>Average signups per block:</b> {}".format(act_stats["average_signups"]), styles["Indent"]))
lelements.append(Paragraph("<b>Average signups per student:</b> {}".format(act_stats["average_user_signups"]), styles["Indent"]))
lelements.append(
Paragraph("<b>Unique students:</b> {}, <b>Capacity:</b> {}".format(act_stats["students"], act_stats["capacity"]), styles["Normal"]))
elements.append(
Table([[lelements, relements]], style=[('LEFTPADDING', (0, 0), (-1, -1), 0), ('RIGHTPADDING', (0, 0), (-1, -1), 0), ('VALIGN', (0, 0),
(-1, -1), 'TOP')]))
parsed_members = [[x.username, y] for x, y in act_stats["members"]]
parsed_members = list(chunks(parsed_members, 30))[:3]
if parsed_members:
parsed_members = [[["Username", "Signups"]] + x for x in parsed_members]
parsed_members = [
Table(x, style=[('FONT', (0, 0), (1, 0), 'Helvetica-Bold'), ('ALIGN', (1, 0), (1, -1), 'RIGHT')]) for x in parsed_members
]
elements.append(Table([parsed_members], style=[('VALIGN', (-1, -1), (-1, -1), 'TOP')]))
if act_stats["students"] - 90 > 0:
elements.append(Paragraph("<b>{}</b> students were not shown on this page. ".format(act_stats["students"] - 90), styles["Normal"]))
else:
elements.append(Spacer(0, 0.20 * inch))
if start_date is not None:
elements.append(
Paragraph("<b>{}</b> block(s) are past the start date and are not included on this page.".format(act_stats["past_start_date"]),
styles["Normal"]))
elements.append(
Paragraph("<b>{}</b> block(s) not in the {}-{} school year are not included on this page.".format(
act_stats["old_blocks"], year - 1, year), styles["Normal"]))
elements.append(PageBreak())
if empty_activities:
empty_activities = [x[:37] + "..." if len(x) > 40 else x for x in empty_activities]
empty_activities = [[x] for x in empty_activities]
empty_activities = list(chunks(empty_activities, 35))
empty_activities = [[["Activity"]] + x for x in empty_activities]
empty_activities = [
Table(x, style=[('FONT', (0, 0), (-1, 0), 'Helvetica-Bold'), ('LEFTPADDING', (0, 0), (-1, -1), 0)]) for x in empty_activities
]
for i in range(0, len(empty_activities), 2):
elements.append(Paragraph("Empty Activities (Page {})".format(i // 2 + 1), styles["Title"]))
if all_years:
elements.append(Paragraph("The following activities have no 8th period blocks assigned to them.", styles["Normal"]))
else:
elements.append(
Paragraph("The following activities have no 8th period blocks assigned to them for the {}-{} school year.".format(year - 1, year),
styles["Normal"]))
elements.append(Spacer(0, 0.10 * inch))
ea = [empty_activities[i]]
if i + 1 < len(empty_activities):
ea.append(empty_activities[i + 1])
elements.append(
Table([ea], style=[
('LEFTPADDING', (0, 0), (-1, -1), 0),
('RIGHTPADDING', (0, 0), (-1, -1), 0),
('VALIGN', (0, 0), (-1, -1), 'TOP'),
], hAlign='LEFT'))
elements.append(PageBreak())
def first_page(canvas, _):
if len(activities) == 1:
canvas.setTitle("{} Statistics".format(activities[0].name))
else:
canvas.setTitle("8th Period Activity Statistics")
canvas.setAuthor("Generated by Ion")
doc.build(elements, onFirstPage=first_page)
pdf_buffer.seek(0)
return pdf_buffer | ['def', 'generate_statistics_pdf', '(', 'activities', '=', 'None', ',', 'start_date', '=', 'None', ',', 'all_years', '=', 'False', ',', 'year', '=', 'None', ')', ':', 'if', 'activities', 'is', 'None', ':', 'activities', '=', 'EighthActivity', '.', 'objects', '.', 'all', '(', ')', '.', 'order_by', '(', '"name"', ')', 'if', 'year', 'is', 'None', ':', 'year', '=', 'current_school_year', '(', ')', 'if', 'not', 'isinstance', '(', 'activities', ',', 'list', ')', ':', 'activities', '=', 'activities', '.', 'prefetch_related', '(', '"rooms"', ')', '.', 'prefetch_related', '(', '"sponsors"', ')', 'pdf_buffer', '=', 'BytesIO', '(', ')', 'h_margin', '=', '1', '*', 'inch', 'v_margin', '=', '0.5', '*', 'inch', 'doc', '=', 'SimpleDocTemplate', '(', 'pdf_buffer', ',', 'pagesize', '=', 'letter', ',', 'rightMargin', '=', 'h_margin', ',', 'leftMargin', '=', 'h_margin', ',', 'topMargin', '=', 'v_margin', ',', 'bottomMargin', '=', 'v_margin', ')', 'elements', '=', '[', ']', 'styles', '=', 'getSampleStyleSheet', '(', ')', 'styles', '.', 'add', '(', 'ParagraphStyle', '(', 'name', '=', '"Indent"', ',', 'leftIndent', '=', '15', ')', ')', 'empty_activities', '=', '[', ']', 'for', 'act', 'in', 'activities', ':', 'lelements', '=', '[', ']', 'relements', '=', '[', ']', 'act_stats', '=', 'calculate_statistics', '(', 'act', ',', 'start_date', '=', 'start_date', ',', 'all_years', '=', 'all_years', ',', 'year', '=', 'year', ')', 'if', 'act_stats', '[', '"total_blocks"', ']', '==', '0', ':', 'empty_activities', '.', 'append', '(', 'act', '.', 'name', ')', 'continue', 'elements', '.', 'append', '(', 'Paragraph', '(', 'act', '.', 'name', ',', 'styles', '[', '"Title"', ']', ')', ')', 'sponsor_str', '=', '(', '", "', '.', 'join', '(', '[', 'x', '.', 'name', 'for', 'x', 'in', 'act', '.', 'sponsors', '.', 'all', '(', ')', ']', ')', ')', 'if', 'act', '.', 'sponsors', '.', 'count', '(', ')', '>', '0', 'else', '"None"', 'lelements', '.', 'append', '(', 'Paragraph', '(', '"<b>Default Sponsors:</b> "', '+', 'sponsor_str', ',', 'styles', '[', '"Normal"', ']', ')', ')', 'lelements', '.', 'append', '(', 'Spacer', '(', '0', ',', '0.025', '*', 'inch', ')', ')', 'room_str', '=', '(', '", "', '.', 'join', '(', '[', 'str', '(', 'x', ')', 'for', 'x', 'in', 'act', '.', 'rooms', '.', 'all', '(', ')', ']', ')', ')', 'if', 'act', '.', 'rooms', '.', 'count', '(', ')', '>', '0', 'else', '"None"', 'relements', '.', 'append', '(', 'Paragraph', '(', '"<b>Default Rooms:</b> "', '+', 'room_str', ',', 'styles', '[', '"Normal"', ']', ')', ')', 'relements', '.', 'append', '(', 'Spacer', '(', '0', ',', '0.025', '*', 'inch', ')', ')', 'relements', '.', 'append', '(', 'Paragraph', '(', '"<b>Total blocks:</b> {}"', '.', 'format', '(', 'act_stats', '[', '"total_blocks"', ']', ')', ',', 'styles', '[', '"Normal"', ']', ')', ')', 'relements', '.', 'append', '(', 'Paragraph', '(', '"<b>Scheduled blocks:</b> {}"', '.', 'format', '(', 'act_stats', '[', '"scheduled_blocks"', ']', ')', ',', 'styles', '[', '"Indent"', ']', ')', ')', 'relements', '.', 'append', '(', 'Paragraph', '(', '"<b>Empty blocks:</b> {}"', '.', 'format', '(', 'act_stats', '[', '"empty_blocks"', ']', ')', ',', 'styles', '[', '"Indent"', ']', ')', ')', 'relements', '.', 'append', '(', 'Paragraph', '(', '"<b>Cancelled blocks:</b> {}"', '.', 'format', '(', 'act_stats', '[', '"cancelled_blocks"', ']', ')', ',', 'styles', '[', '"Indent"', ']', ')', ')', 'lelements', '.', 'append', '(', 'Paragraph', '(', '"<b>Total signups:</b> {}"', '.', 'format', '(', 'act_stats', '[', '"total_signups"', ']', ')', ',', 'styles', '[', '"Normal"', ']', ')', ')', 'lelements', '.', 'append', '(', 'Paragraph', '(', '"<b>Average signups per block:</b> {}"', '.', 'format', '(', 'act_stats', '[', '"average_signups"', ']', ')', ',', 'styles', '[', '"Indent"', ']', ')', ')', 'lelements', '.', 'append', '(', 'Paragraph', '(', '"<b>Average signups per student:</b> {}"', '.', 'format', '(', 'act_stats', '[', '"average_user_signups"', ']', ')', ',', 'styles', '[', '"Indent"', ']', ')', ')', 'lelements', '.', 'append', '(', 'Paragraph', '(', '"<b>Unique students:</b> {}, <b>Capacity:</b> {}"', '.', 'format', '(', 'act_stats', '[', '"students"', ']', ',', 'act_stats', '[', '"capacity"', ']', ')', ',', 'styles', '[', '"Normal"', ']', ')', ')', 'elements', '.', 'append', '(', 'Table', '(', '[', '[', 'lelements', ',', 'relements', ']', ']', ',', 'style', '=', '[', '(', "'LEFTPADDING'", ',', '(', '0', ',', '0', ')', ',', '(', '-', '1', ',', '-', '1', ')', ',', '0', ')', ',', '(', "'RIGHTPADDING'", ',', '(', '0', ',', '0', ')', ',', '(', '-', '1', ',', '-', '1', ')', ',', '0', ')', ',', '(', "'VALIGN'", ',', '(', '0', ',', '0', ')', ',', '(', '-', '1', ',', '-', '1', ')', ',', "'TOP'", ')', ']', ')', ')', 'parsed_members', '=', '[', '[', 'x', '.', 'username', ',', 'y', ']', 'for', 'x', ',', 'y', 'in', 'act_stats', '[', '"members"', ']', ']', 'parsed_members', '=', 'list', '(', 'chunks', '(', 'parsed_members', ',', '30', ')', ')', '[', ':', '3', ']', 'if', 'parsed_members', ':', 'parsed_members', '=', '[', '[', '[', '"Username"', ',', '"Signups"', ']', ']', '+', 'x', 'for', 'x', 'in', 'parsed_members', ']', 'parsed_members', '=', '[', 'Table', '(', 'x', ',', 'style', '=', '[', '(', "'FONT'", ',', '(', '0', ',', '0', ')', ',', '(', '1', ',', '0', ')', ',', "'Helvetica-Bold'", ')', ',', '(', "'ALIGN'", ',', '(', '1', ',', '0', ')', ',', '(', '1', ',', '-', '1', ')', ',', "'RIGHT'", ')', ']', ')', 'for', 'x', 'in', 'parsed_members', ']', 'elements', '.', 'append', '(', 'Table', '(', '[', 'parsed_members', ']', ',', 'style', '=', '[', '(', "'VALIGN'", ',', '(', '-', '1', ',', '-', '1', ')', ',', '(', '-', '1', ',', '-', '1', ')', ',', "'TOP'", ')', ']', ')', ')', 'if', 'act_stats', '[', '"students"', ']', '-', '90', '>', '0', ':', 'elements', '.', 'append', '(', 'Paragraph', '(', '"<b>{}</b> students were not shown on this page. "', '.', 'format', '(', 'act_stats', '[', '"students"', ']', '-', '90', ')', ',', 'styles', '[', '"Normal"', ']', ')', ')', 'else', ':', 'elements', '.', 'append', '(', 'Spacer', '(', '0', ',', '0.20', '*', 'inch', ')', ')', 'if', 'start_date', 'is', 'not', 'None', ':', 'elements', '.', 'append', '(', 'Paragraph', '(', '"<b>{}</b> block(s) are past the start date and are not included on this page."', '.', 'format', '(', 'act_stats', '[', '"past_start_date"', ']', ')', ',', 'styles', '[', '"Normal"', ']', ')', ')', 'elements', '.', 'append', '(', 'Paragraph', '(', '"<b>{}</b> block(s) not in the {}-{} school year are not included on this page."', '.', 'format', '(', 'act_stats', '[', '"old_blocks"', ']', ',', 'year', '-', '1', ',', 'year', ')', ',', 'styles', '[', '"Normal"', ']', ')', ')', 'elements', '.', 'append', '(', 'PageBreak', '(', ')', ')', 'if', 'empty_activities', ':', 'empty_activities', '=', '[', 'x', '[', ':', '37', ']', '+', '"..."', 'if', 'len', '(', 'x', ')', '>', '40', 'else', 'x', 'for', 'x', 'in', 'empty_activities', ']', 'empty_activities', '=', '[', '[', 'x', ']', 'for', 'x', 'in', 'empty_activities', ']', 'empty_activities', '=', 'list', '(', 'chunks', '(', 'empty_activities', ',', '35', ')', ')', 'empty_activities', '=', '[', '[', '[', '"Activity"', ']', ']', '+', 'x', 'for', 'x', 'in', 'empty_activities', ']', 'empty_activities', '=', '[', 'Table', '(', 'x', ',', 'style', '=', '[', '(', "'FONT'", ',', '(', '0', ',', '0', ')', ',', '(', '-', '1', ',', '0', ')', ',', "'Helvetica-Bold'", ')', ',', '(', "'LEFTPADDING'", ',', '(', '0', ',', '0', ')', ',', '(', '-', '1', ',', '-', '1', ')', ',', '0', ')', ']', ')', 'for', 'x', 'in', 'empty_activities', ']', 'for', 'i', 'in', 'range', '(', '0', ',', 'len', '(', 'empty_activities', ')', ',', '2', ')', ':', 'elements', '.', 'append', '(', 'Paragraph', '(', '"Empty Activities (Page {})"', '.', 'format', '(', 'i', '//', '2', '+', '1', ')', ',', 'styles', '[', '"Title"', ']', ')', ')', 'if', 'all_years', ':', 'elements', '.', 'append', '(', 'Paragraph', '(', '"The following activities have no 8th period blocks assigned to them."', ',', 'styles', '[', '"Normal"', ']', ')', ')', 'else', ':', 'elements', '.', 'append', '(', 'Paragraph', '(', '"The following activities have no 8th period blocks assigned to them for the {}-{} school year."', '.', 'format', '(', 'year', '-', '1', ',', 'year', ')', ',', 'styles', '[', '"Normal"', ']', ')', ')', 'elements', '.', 'append', '(', 'Spacer', '(', '0', ',', '0.10', '*', 'inch', ')', ')', 'ea', '=', '[', 'empty_activities', '[', 'i', ']', ']', 'if', 'i', '+', '1', '<', 'len', '(', 'empty_activities', ')', ':', 'ea', '.', 'append', '(', 'empty_activities', '[', 'i', '+', '1', ']', ')', 'elements', '.', 'append', '(', 'Table', '(', '[', 'ea', ']', ',', 'style', '=', '[', '(', "'LEFTPADDING'", ',', '(', '0', ',', '0', ')', ',', '(', '-', '1', ',', '-', '1', ')', ',', '0', ')', ',', '(', "'RIGHTPADDING'", ',', '(', '0', ',', '0', ')', ',', '(', '-', '1', ',', '-', '1', ')', ',', '0', ')', ',', '(', "'VALIGN'", ',', '(', '0', ',', '0', ')', ',', '(', '-', '1', ',', '-', '1', ')', ',', "'TOP'", ')', ',', ']', ',', 'hAlign', '=', "'LEFT'", ')', ')', 'elements', '.', 'append', '(', 'PageBreak', '(', ')', ')', 'def', 'first_page', '(', 'canvas', ',', '_', ')', ':', 'if', 'len', '(', 'activities', ')', '==', '1', ':', 'canvas', '.', 'setTitle', '(', '"{} Statistics"', '.', 'format', '(', 'activities', '[', '0', ']', '.', 'name', ')', ')', 'else', ':', 'canvas', '.', 'setTitle', '(', '"8th Period Activity Statistics"', ')', 'canvas', '.', 'setAuthor', '(', '"Generated by Ion"', ')', 'doc', '.', 'build', '(', 'elements', ',', 'onFirstPage', '=', 'first_page', ')', 'pdf_buffer', '.', 'seek', '(', '0', ')', 'return', 'pdf_buffer'] | Accepts EighthActivity objects and outputs a PDF file. | ['Accepts', 'EighthActivity', 'objects', 'and', 'outputs', 'a', 'PDF', 'file', '.'] | train | https://github.com/tjcsl/ion/blob/5d722b0725d572039bb0929fd5715a4070c82c72/intranet/apps/eighth/views/activities.py#L58-L173 |
2,988 | arista-eosplus/pyeapi | pyeapi/api/routemaps.py | Routemaps.get | def get(self, name):
"""Provides a method to retrieve all routemap configuration
related to the name attribute.
Args:
name (string): The name of the routemap.
Returns:
None if the specified routemap does not exists. If the routermap
exists a dictionary will be provided as follows::
{
'deny': {
30: {
'continue': 200,
'description': None,
'match': ['as 2000',
'source-protocol ospf',
'interface Ethernet2'],
'set': []
}
},
'permit': {
10: {
'continue': 100,
'description': None,
'match': ['interface Ethernet1'],
'set': ['tag 50']},
20: {
'continue': 200,
'description': None,
'match': ['as 2000',
'source-protocol ospf',
'interface Ethernet2'],
'set': []
}
}
}
"""
if not self.get_block(r'route-map\s%s\s\w+\s\d+' % name):
return None
return self._parse_entries(name) | python | def get(self, name):
"""Provides a method to retrieve all routemap configuration
related to the name attribute.
Args:
name (string): The name of the routemap.
Returns:
None if the specified routemap does not exists. If the routermap
exists a dictionary will be provided as follows::
{
'deny': {
30: {
'continue': 200,
'description': None,
'match': ['as 2000',
'source-protocol ospf',
'interface Ethernet2'],
'set': []
}
},
'permit': {
10: {
'continue': 100,
'description': None,
'match': ['interface Ethernet1'],
'set': ['tag 50']},
20: {
'continue': 200,
'description': None,
'match': ['as 2000',
'source-protocol ospf',
'interface Ethernet2'],
'set': []
}
}
}
"""
if not self.get_block(r'route-map\s%s\s\w+\s\d+' % name):
return None
return self._parse_entries(name) | ['def', 'get', '(', 'self', ',', 'name', ')', ':', 'if', 'not', 'self', '.', 'get_block', '(', "r'route-map\\s%s\\s\\w+\\s\\d+'", '%', 'name', ')', ':', 'return', 'None', 'return', 'self', '.', '_parse_entries', '(', 'name', ')'] | Provides a method to retrieve all routemap configuration
related to the name attribute.
Args:
name (string): The name of the routemap.
Returns:
None if the specified routemap does not exists. If the routermap
exists a dictionary will be provided as follows::
{
'deny': {
30: {
'continue': 200,
'description': None,
'match': ['as 2000',
'source-protocol ospf',
'interface Ethernet2'],
'set': []
}
},
'permit': {
10: {
'continue': 100,
'description': None,
'match': ['interface Ethernet1'],
'set': ['tag 50']},
20: {
'continue': 200,
'description': None,
'match': ['as 2000',
'source-protocol ospf',
'interface Ethernet2'],
'set': []
}
}
} | ['Provides', 'a', 'method', 'to', 'retrieve', 'all', 'routemap', 'configuration', 'related', 'to', 'the', 'name', 'attribute', '.'] | train | https://github.com/arista-eosplus/pyeapi/blob/96a74faef1fe3bd79c4e900aed29c9956a0587d6/pyeapi/api/routemaps.py#L57-L99 |
2,989 | apache/incubator-mxnet | example/cnn_text_classification/text_cnn.py | train | def train(symbol_data, train_iterator, valid_iterator, data_column_names, target_names):
"""Train cnn model
Parameters
----------
symbol_data: symbol
train_iterator: DataIter
Train DataIter
valid_iterator: DataIter
Valid DataIter
data_column_names: list of str
Defaults to ('data') for a typical model used in image classification
target_names: list of str
Defaults to ('softmax_label') for a typical model used in image classification
"""
devs = mx.cpu() # default setting
if args.gpus is not None:
for i in args.gpus.split(','):
mx.gpu(int(i))
devs = mx.gpu()
module = mx.mod.Module(symbol_data, data_names=data_column_names, label_names=target_names, context=devs)
module.fit(train_data=train_iterator,
eval_data=valid_iterator,
eval_metric='acc',
kvstore=args.kv_store,
optimizer=args.optimizer,
optimizer_params={'learning_rate': args.lr},
initializer=mx.initializer.Uniform(0.1),
num_epoch=args.num_epochs,
batch_end_callback=mx.callback.Speedometer(args.batch_size, args.disp_batches),
epoch_end_callback=save_model()) | python | def train(symbol_data, train_iterator, valid_iterator, data_column_names, target_names):
"""Train cnn model
Parameters
----------
symbol_data: symbol
train_iterator: DataIter
Train DataIter
valid_iterator: DataIter
Valid DataIter
data_column_names: list of str
Defaults to ('data') for a typical model used in image classification
target_names: list of str
Defaults to ('softmax_label') for a typical model used in image classification
"""
devs = mx.cpu() # default setting
if args.gpus is not None:
for i in args.gpus.split(','):
mx.gpu(int(i))
devs = mx.gpu()
module = mx.mod.Module(symbol_data, data_names=data_column_names, label_names=target_names, context=devs)
module.fit(train_data=train_iterator,
eval_data=valid_iterator,
eval_metric='acc',
kvstore=args.kv_store,
optimizer=args.optimizer,
optimizer_params={'learning_rate': args.lr},
initializer=mx.initializer.Uniform(0.1),
num_epoch=args.num_epochs,
batch_end_callback=mx.callback.Speedometer(args.batch_size, args.disp_batches),
epoch_end_callback=save_model()) | ['def', 'train', '(', 'symbol_data', ',', 'train_iterator', ',', 'valid_iterator', ',', 'data_column_names', ',', 'target_names', ')', ':', 'devs', '=', 'mx', '.', 'cpu', '(', ')', '# default setting', 'if', 'args', '.', 'gpus', 'is', 'not', 'None', ':', 'for', 'i', 'in', 'args', '.', 'gpus', '.', 'split', '(', "','", ')', ':', 'mx', '.', 'gpu', '(', 'int', '(', 'i', ')', ')', 'devs', '=', 'mx', '.', 'gpu', '(', ')', 'module', '=', 'mx', '.', 'mod', '.', 'Module', '(', 'symbol_data', ',', 'data_names', '=', 'data_column_names', ',', 'label_names', '=', 'target_names', ',', 'context', '=', 'devs', ')', 'module', '.', 'fit', '(', 'train_data', '=', 'train_iterator', ',', 'eval_data', '=', 'valid_iterator', ',', 'eval_metric', '=', "'acc'", ',', 'kvstore', '=', 'args', '.', 'kv_store', ',', 'optimizer', '=', 'args', '.', 'optimizer', ',', 'optimizer_params', '=', '{', "'learning_rate'", ':', 'args', '.', 'lr', '}', ',', 'initializer', '=', 'mx', '.', 'initializer', '.', 'Uniform', '(', '0.1', ')', ',', 'num_epoch', '=', 'args', '.', 'num_epochs', ',', 'batch_end_callback', '=', 'mx', '.', 'callback', '.', 'Speedometer', '(', 'args', '.', 'batch_size', ',', 'args', '.', 'disp_batches', ')', ',', 'epoch_end_callback', '=', 'save_model', '(', ')', ')'] | Train cnn model
Parameters
----------
symbol_data: symbol
train_iterator: DataIter
Train DataIter
valid_iterator: DataIter
Valid DataIter
data_column_names: list of str
Defaults to ('data') for a typical model used in image classification
target_names: list of str
Defaults to ('softmax_label') for a typical model used in image classification | ['Train', 'cnn', 'model'] | train | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/cnn_text_classification/text_cnn.py#L201-L231 |
2,990 | dead-beef/markovchain | markovchain/text/util.py | re_sub | def re_sub(pattern, repl, string, count=0, flags=0, custom_flags=0):
"""Replace regular expression.
Parameters
----------
pattern : `str` or `_sre.SRE_Pattern`
Compiled regular expression.
repl : `str` or `function`
Replacement.
string : `str`
Input string.
count: `int`
Maximum number of pattern occurrences.
flags : `int`
Flags.
custom_flags : `int`
Custom flags.
"""
if custom_flags & ReFlags.OVERLAP:
prev_string = None
while string != prev_string:
prev_string = string
string = re.sub(pattern, repl, string, count, flags)
return string
return re.sub(pattern, repl, string, count, flags) | python | def re_sub(pattern, repl, string, count=0, flags=0, custom_flags=0):
"""Replace regular expression.
Parameters
----------
pattern : `str` or `_sre.SRE_Pattern`
Compiled regular expression.
repl : `str` or `function`
Replacement.
string : `str`
Input string.
count: `int`
Maximum number of pattern occurrences.
flags : `int`
Flags.
custom_flags : `int`
Custom flags.
"""
if custom_flags & ReFlags.OVERLAP:
prev_string = None
while string != prev_string:
prev_string = string
string = re.sub(pattern, repl, string, count, flags)
return string
return re.sub(pattern, repl, string, count, flags) | ['def', 're_sub', '(', 'pattern', ',', 'repl', ',', 'string', ',', 'count', '=', '0', ',', 'flags', '=', '0', ',', 'custom_flags', '=', '0', ')', ':', 'if', 'custom_flags', '&', 'ReFlags', '.', 'OVERLAP', ':', 'prev_string', '=', 'None', 'while', 'string', '!=', 'prev_string', ':', 'prev_string', '=', 'string', 'string', '=', 're', '.', 'sub', '(', 'pattern', ',', 'repl', ',', 'string', ',', 'count', ',', 'flags', ')', 'return', 'string', 'return', 're', '.', 'sub', '(', 'pattern', ',', 'repl', ',', 'string', ',', 'count', ',', 'flags', ')'] | Replace regular expression.
Parameters
----------
pattern : `str` or `_sre.SRE_Pattern`
Compiled regular expression.
repl : `str` or `function`
Replacement.
string : `str`
Input string.
count: `int`
Maximum number of pattern occurrences.
flags : `int`
Flags.
custom_flags : `int`
Custom flags. | ['Replace', 'regular', 'expression', '.'] | train | https://github.com/dead-beef/markovchain/blob/9bd10b2f01089341c4a875a0fa569d50caba22c7/markovchain/text/util.py#L222-L246 |
2,991 | pyviz/holoviews | holoviews/core/data/__init__.py | Dataset.dframe | def dframe(self, dimensions=None, multi_index=False):
"""Convert dimension values to DataFrame.
Returns a pandas dataframe of columns along each dimension,
either completely flat or indexed by key dimensions.
Args:
dimensions: Dimensions to return as columns
multi_index: Convert key dimensions to (multi-)index
Returns:
DataFrame of columns corresponding to each dimension
"""
if dimensions is None:
dimensions = [d.name for d in self.dimensions()]
else:
dimensions = [self.get_dimension(d, strict=True).name for d in dimensions]
df = self.interface.dframe(self, dimensions)
if multi_index:
df = df.set_index([d for d in dimensions if d in self.kdims])
return df | python | def dframe(self, dimensions=None, multi_index=False):
"""Convert dimension values to DataFrame.
Returns a pandas dataframe of columns along each dimension,
either completely flat or indexed by key dimensions.
Args:
dimensions: Dimensions to return as columns
multi_index: Convert key dimensions to (multi-)index
Returns:
DataFrame of columns corresponding to each dimension
"""
if dimensions is None:
dimensions = [d.name for d in self.dimensions()]
else:
dimensions = [self.get_dimension(d, strict=True).name for d in dimensions]
df = self.interface.dframe(self, dimensions)
if multi_index:
df = df.set_index([d for d in dimensions if d in self.kdims])
return df | ['def', 'dframe', '(', 'self', ',', 'dimensions', '=', 'None', ',', 'multi_index', '=', 'False', ')', ':', 'if', 'dimensions', 'is', 'None', ':', 'dimensions', '=', '[', 'd', '.', 'name', 'for', 'd', 'in', 'self', '.', 'dimensions', '(', ')', ']', 'else', ':', 'dimensions', '=', '[', 'self', '.', 'get_dimension', '(', 'd', ',', 'strict', '=', 'True', ')', '.', 'name', 'for', 'd', 'in', 'dimensions', ']', 'df', '=', 'self', '.', 'interface', '.', 'dframe', '(', 'self', ',', 'dimensions', ')', 'if', 'multi_index', ':', 'df', '=', 'df', '.', 'set_index', '(', '[', 'd', 'for', 'd', 'in', 'dimensions', 'if', 'd', 'in', 'self', '.', 'kdims', ']', ')', 'return', 'df'] | Convert dimension values to DataFrame.
Returns a pandas dataframe of columns along each dimension,
either completely flat or indexed by key dimensions.
Args:
dimensions: Dimensions to return as columns
multi_index: Convert key dimensions to (multi-)index
Returns:
DataFrame of columns corresponding to each dimension | ['Convert', 'dimension', 'values', 'to', 'DataFrame', '.'] | train | https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/core/data/__init__.py#L791-L811 |
2,992 | secdev/scapy | scapy/layers/tls/record_tls13.py | TLS13.pre_dissect | def pre_dissect(self, s):
"""
Decrypt, verify and decompress the message.
"""
if len(s) < 5:
raise Exception("Invalid record: header is too short.")
if isinstance(self.tls_session.rcs.cipher, Cipher_NULL):
self.deciphered_len = None
return s
else:
msglen = struct.unpack('!H', s[3:5])[0]
hdr, efrag, r = s[:5], s[5:5 + msglen], s[msglen + 5:]
frag, auth_tag = self._tls_auth_decrypt(efrag)
self.deciphered_len = len(frag)
return hdr + frag + auth_tag + r | python | def pre_dissect(self, s):
"""
Decrypt, verify and decompress the message.
"""
if len(s) < 5:
raise Exception("Invalid record: header is too short.")
if isinstance(self.tls_session.rcs.cipher, Cipher_NULL):
self.deciphered_len = None
return s
else:
msglen = struct.unpack('!H', s[3:5])[0]
hdr, efrag, r = s[:5], s[5:5 + msglen], s[msglen + 5:]
frag, auth_tag = self._tls_auth_decrypt(efrag)
self.deciphered_len = len(frag)
return hdr + frag + auth_tag + r | ['def', 'pre_dissect', '(', 'self', ',', 's', ')', ':', 'if', 'len', '(', 's', ')', '<', '5', ':', 'raise', 'Exception', '(', '"Invalid record: header is too short."', ')', 'if', 'isinstance', '(', 'self', '.', 'tls_session', '.', 'rcs', '.', 'cipher', ',', 'Cipher_NULL', ')', ':', 'self', '.', 'deciphered_len', '=', 'None', 'return', 's', 'else', ':', 'msglen', '=', 'struct', '.', 'unpack', '(', "'!H'", ',', 's', '[', '3', ':', '5', ']', ')', '[', '0', ']', 'hdr', ',', 'efrag', ',', 'r', '=', 's', '[', ':', '5', ']', ',', 's', '[', '5', ':', '5', '+', 'msglen', ']', ',', 's', '[', 'msglen', '+', '5', ':', ']', 'frag', ',', 'auth_tag', '=', 'self', '.', '_tls_auth_decrypt', '(', 'efrag', ')', 'self', '.', 'deciphered_len', '=', 'len', '(', 'frag', ')', 'return', 'hdr', '+', 'frag', '+', 'auth_tag', '+', 'r'] | Decrypt, verify and decompress the message. | ['Decrypt', 'verify', 'and', 'decompress', 'the', 'message', '.'] | train | https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/layers/tls/record_tls13.py#L124-L139 |
2,993 | huge-success/sanic | sanic/config.py | Config.load_environment_vars | def load_environment_vars(self, prefix=SANIC_PREFIX):
"""
Looks for prefixed environment variables and applies
them to the configuration if present.
"""
for k, v in os.environ.items():
if k.startswith(prefix):
_, config_key = k.split(prefix, 1)
try:
self[config_key] = int(v)
except ValueError:
try:
self[config_key] = float(v)
except ValueError:
try:
self[config_key] = strtobool(v)
except ValueError:
self[config_key] = v | python | def load_environment_vars(self, prefix=SANIC_PREFIX):
"""
Looks for prefixed environment variables and applies
them to the configuration if present.
"""
for k, v in os.environ.items():
if k.startswith(prefix):
_, config_key = k.split(prefix, 1)
try:
self[config_key] = int(v)
except ValueError:
try:
self[config_key] = float(v)
except ValueError:
try:
self[config_key] = strtobool(v)
except ValueError:
self[config_key] = v | ['def', 'load_environment_vars', '(', 'self', ',', 'prefix', '=', 'SANIC_PREFIX', ')', ':', 'for', 'k', ',', 'v', 'in', 'os', '.', 'environ', '.', 'items', '(', ')', ':', 'if', 'k', '.', 'startswith', '(', 'prefix', ')', ':', '_', ',', 'config_key', '=', 'k', '.', 'split', '(', 'prefix', ',', '1', ')', 'try', ':', 'self', '[', 'config_key', ']', '=', 'int', '(', 'v', ')', 'except', 'ValueError', ':', 'try', ':', 'self', '[', 'config_key', ']', '=', 'float', '(', 'v', ')', 'except', 'ValueError', ':', 'try', ':', 'self', '[', 'config_key', ']', '=', 'strtobool', '(', 'v', ')', 'except', 'ValueError', ':', 'self', '[', 'config_key', ']', '=', 'v'] | Looks for prefixed environment variables and applies
them to the configuration if present. | ['Looks', 'for', 'prefixed', 'environment', 'variables', 'and', 'applies', 'them', 'to', 'the', 'configuration', 'if', 'present', '.'] | train | https://github.com/huge-success/sanic/blob/6a4a3f617fdbe1d3ee8bdc9d1b12ad2d0b34acdd/sanic/config.py#L116-L133 |
2,994 | albahnsen/CostSensitiveClassification | costcla/models/cost_tree.py | CostSensitiveDecisionTreeClassifier._classify | def _classify(self, X, tree, proba=False):
""" Private function that classify a dataset using tree.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
tree : object
proba : bool, optional (default=False)
If True then return probabilities else return class
Returns
-------
prediction : array of shape = [n_samples]
If proba then return the predicted positive probabilities, else return
the predicted class for each example in X
"""
n_samples, n_features = X.shape
predicted = np.ones(n_samples)
# Check if final node
if tree['split'] == -1:
if not proba:
predicted = predicted * tree['y_pred']
else:
predicted = predicted * tree['y_prob']
else:
j, l = tree['split']
filter_Xl = (X[:, j] <= l)
filter_Xr = ~filter_Xl
n_samples_Xl = np.nonzero(filter_Xl)[0].shape[0]
n_samples_Xr = np.nonzero(filter_Xr)[0].shape[0]
if n_samples_Xl == 0: # If left node is empty only continue with right
predicted[filter_Xr] = self._classify(X[filter_Xr, :], tree['sr'], proba)
elif n_samples_Xr == 0: # If right node is empty only continue with left
predicted[filter_Xl] = self._classify(X[filter_Xl, :], tree['sl'], proba)
else:
predicted[filter_Xl] = self._classify(X[filter_Xl, :], tree['sl'], proba)
predicted[filter_Xr] = self._classify(X[filter_Xr, :], tree['sr'], proba)
return predicted | python | def _classify(self, X, tree, proba=False):
""" Private function that classify a dataset using tree.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
tree : object
proba : bool, optional (default=False)
If True then return probabilities else return class
Returns
-------
prediction : array of shape = [n_samples]
If proba then return the predicted positive probabilities, else return
the predicted class for each example in X
"""
n_samples, n_features = X.shape
predicted = np.ones(n_samples)
# Check if final node
if tree['split'] == -1:
if not proba:
predicted = predicted * tree['y_pred']
else:
predicted = predicted * tree['y_prob']
else:
j, l = tree['split']
filter_Xl = (X[:, j] <= l)
filter_Xr = ~filter_Xl
n_samples_Xl = np.nonzero(filter_Xl)[0].shape[0]
n_samples_Xr = np.nonzero(filter_Xr)[0].shape[0]
if n_samples_Xl == 0: # If left node is empty only continue with right
predicted[filter_Xr] = self._classify(X[filter_Xr, :], tree['sr'], proba)
elif n_samples_Xr == 0: # If right node is empty only continue with left
predicted[filter_Xl] = self._classify(X[filter_Xl, :], tree['sl'], proba)
else:
predicted[filter_Xl] = self._classify(X[filter_Xl, :], tree['sl'], proba)
predicted[filter_Xr] = self._classify(X[filter_Xr, :], tree['sr'], proba)
return predicted | ['def', '_classify', '(', 'self', ',', 'X', ',', 'tree', ',', 'proba', '=', 'False', ')', ':', 'n_samples', ',', 'n_features', '=', 'X', '.', 'shape', 'predicted', '=', 'np', '.', 'ones', '(', 'n_samples', ')', '# Check if final node', 'if', 'tree', '[', "'split'", ']', '==', '-', '1', ':', 'if', 'not', 'proba', ':', 'predicted', '=', 'predicted', '*', 'tree', '[', "'y_pred'", ']', 'else', ':', 'predicted', '=', 'predicted', '*', 'tree', '[', "'y_prob'", ']', 'else', ':', 'j', ',', 'l', '=', 'tree', '[', "'split'", ']', 'filter_Xl', '=', '(', 'X', '[', ':', ',', 'j', ']', '<=', 'l', ')', 'filter_Xr', '=', '~', 'filter_Xl', 'n_samples_Xl', '=', 'np', '.', 'nonzero', '(', 'filter_Xl', ')', '[', '0', ']', '.', 'shape', '[', '0', ']', 'n_samples_Xr', '=', 'np', '.', 'nonzero', '(', 'filter_Xr', ')', '[', '0', ']', '.', 'shape', '[', '0', ']', 'if', 'n_samples_Xl', '==', '0', ':', '# If left node is empty only continue with right', 'predicted', '[', 'filter_Xr', ']', '=', 'self', '.', '_classify', '(', 'X', '[', 'filter_Xr', ',', ':', ']', ',', 'tree', '[', "'sr'", ']', ',', 'proba', ')', 'elif', 'n_samples_Xr', '==', '0', ':', '# If right node is empty only continue with left', 'predicted', '[', 'filter_Xl', ']', '=', 'self', '.', '_classify', '(', 'X', '[', 'filter_Xl', ',', ':', ']', ',', 'tree', '[', "'sl'", ']', ',', 'proba', ')', 'else', ':', 'predicted', '[', 'filter_Xl', ']', '=', 'self', '.', '_classify', '(', 'X', '[', 'filter_Xl', ',', ':', ']', ',', 'tree', '[', "'sl'", ']', ',', 'proba', ')', 'predicted', '[', 'filter_Xr', ']', '=', 'self', '.', '_classify', '(', 'X', '[', 'filter_Xr', ',', ':', ']', ',', 'tree', '[', "'sr'", ']', ',', 'proba', ')', 'return', 'predicted'] | Private function that classify a dataset using tree.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
tree : object
proba : bool, optional (default=False)
If True then return probabilities else return class
Returns
-------
prediction : array of shape = [n_samples]
If proba then return the predicted positive probabilities, else return
the predicted class for each example in X | ['Private', 'function', 'that', 'classify', 'a', 'dataset', 'using', 'tree', '.'] | train | https://github.com/albahnsen/CostSensitiveClassification/blob/75778ae32c70671c0cdde6c4651277b6a8b58871/costcla/models/cost_tree.py#L468-L513 |
2,995 | mushkevych/scheduler | synergy/scheduler/garbage_collector.py | GarbageCollector.scan_uow_candidates | def scan_uow_candidates(self):
""" method performs two actions:
- enlist stale or invalid units of work into reprocessing queue
- cancel UOWs that are older than 2 days and have been submitted more than 1 hour ago """
try:
since = settings.settings['synergy_start_timeperiod']
uow_list = self.uow_dao.get_reprocessing_candidates(since)
except LookupError as e:
self.logger.info('flow: no UOW candidates found for reprocessing: {0}'.format(e))
return
for uow in uow_list:
try:
if uow.process_name not in self.managed_handlers:
self.logger.debug('process {0} is not known to the Synergy Scheduler. Skipping its UOW.'
.format(uow.process_name))
continue
thread_handler = self.managed_handlers[uow.process_name]
assert isinstance(thread_handler, ManagedThreadHandler)
if not thread_handler.process_entry.is_on:
self.logger.debug('process {0} is inactive. Skipping its UOW.'.format(uow.process_name))
continue
entry = PriorityEntry(uow)
if entry in self.reprocess_uows[uow.process_name]:
# given UOW is already registered in the reprocessing queue
continue
# ASSUMPTION: UOW is re-created by a state machine during reprocessing
# thus - any UOW older 2 days could be marked as STATE_CANCELED
if datetime.utcnow() - uow.created_at > timedelta(hours=settings.settings['gc_life_support_hours']):
self._cancel_uow(uow)
continue
# if the UOW has been idle for more than 1 hour - resubmit it
if datetime.utcnow() - uow.submitted_at > timedelta(hours=settings.settings['gc_resubmit_after_hours'])\
or uow.is_invalid:
# enlist the UOW into the reprocessing queue
self.reprocess_uows[uow.process_name].put(entry)
except Exception as e:
self.logger.error('flow exception: {0}'.format(e), exc_info=True) | python | def scan_uow_candidates(self):
""" method performs two actions:
- enlist stale or invalid units of work into reprocessing queue
- cancel UOWs that are older than 2 days and have been submitted more than 1 hour ago """
try:
since = settings.settings['synergy_start_timeperiod']
uow_list = self.uow_dao.get_reprocessing_candidates(since)
except LookupError as e:
self.logger.info('flow: no UOW candidates found for reprocessing: {0}'.format(e))
return
for uow in uow_list:
try:
if uow.process_name not in self.managed_handlers:
self.logger.debug('process {0} is not known to the Synergy Scheduler. Skipping its UOW.'
.format(uow.process_name))
continue
thread_handler = self.managed_handlers[uow.process_name]
assert isinstance(thread_handler, ManagedThreadHandler)
if not thread_handler.process_entry.is_on:
self.logger.debug('process {0} is inactive. Skipping its UOW.'.format(uow.process_name))
continue
entry = PriorityEntry(uow)
if entry in self.reprocess_uows[uow.process_name]:
# given UOW is already registered in the reprocessing queue
continue
# ASSUMPTION: UOW is re-created by a state machine during reprocessing
# thus - any UOW older 2 days could be marked as STATE_CANCELED
if datetime.utcnow() - uow.created_at > timedelta(hours=settings.settings['gc_life_support_hours']):
self._cancel_uow(uow)
continue
# if the UOW has been idle for more than 1 hour - resubmit it
if datetime.utcnow() - uow.submitted_at > timedelta(hours=settings.settings['gc_resubmit_after_hours'])\
or uow.is_invalid:
# enlist the UOW into the reprocessing queue
self.reprocess_uows[uow.process_name].put(entry)
except Exception as e:
self.logger.error('flow exception: {0}'.format(e), exc_info=True) | ['def', 'scan_uow_candidates', '(', 'self', ')', ':', 'try', ':', 'since', '=', 'settings', '.', 'settings', '[', "'synergy_start_timeperiod'", ']', 'uow_list', '=', 'self', '.', 'uow_dao', '.', 'get_reprocessing_candidates', '(', 'since', ')', 'except', 'LookupError', 'as', 'e', ':', 'self', '.', 'logger', '.', 'info', '(', "'flow: no UOW candidates found for reprocessing: {0}'", '.', 'format', '(', 'e', ')', ')', 'return', 'for', 'uow', 'in', 'uow_list', ':', 'try', ':', 'if', 'uow', '.', 'process_name', 'not', 'in', 'self', '.', 'managed_handlers', ':', 'self', '.', 'logger', '.', 'debug', '(', "'process {0} is not known to the Synergy Scheduler. Skipping its UOW.'", '.', 'format', '(', 'uow', '.', 'process_name', ')', ')', 'continue', 'thread_handler', '=', 'self', '.', 'managed_handlers', '[', 'uow', '.', 'process_name', ']', 'assert', 'isinstance', '(', 'thread_handler', ',', 'ManagedThreadHandler', ')', 'if', 'not', 'thread_handler', '.', 'process_entry', '.', 'is_on', ':', 'self', '.', 'logger', '.', 'debug', '(', "'process {0} is inactive. Skipping its UOW.'", '.', 'format', '(', 'uow', '.', 'process_name', ')', ')', 'continue', 'entry', '=', 'PriorityEntry', '(', 'uow', ')', 'if', 'entry', 'in', 'self', '.', 'reprocess_uows', '[', 'uow', '.', 'process_name', ']', ':', '# given UOW is already registered in the reprocessing queue', 'continue', '# ASSUMPTION: UOW is re-created by a state machine during reprocessing', '# thus - any UOW older 2 days could be marked as STATE_CANCELED', 'if', 'datetime', '.', 'utcnow', '(', ')', '-', 'uow', '.', 'created_at', '>', 'timedelta', '(', 'hours', '=', 'settings', '.', 'settings', '[', "'gc_life_support_hours'", ']', ')', ':', 'self', '.', '_cancel_uow', '(', 'uow', ')', 'continue', '# if the UOW has been idle for more than 1 hour - resubmit it', 'if', 'datetime', '.', 'utcnow', '(', ')', '-', 'uow', '.', 'submitted_at', '>', 'timedelta', '(', 'hours', '=', 'settings', '.', 'settings', '[', "'gc_resubmit_after_hours'", ']', ')', 'or', 'uow', '.', 'is_invalid', ':', '# enlist the UOW into the reprocessing queue', 'self', '.', 'reprocess_uows', '[', 'uow', '.', 'process_name', ']', '.', 'put', '(', 'entry', ')', 'except', 'Exception', 'as', 'e', ':', 'self', '.', 'logger', '.', 'error', '(', "'flow exception: {0}'", '.', 'format', '(', 'e', ')', ',', 'exc_info', '=', 'True', ')'] | method performs two actions:
- enlist stale or invalid units of work into reprocessing queue
- cancel UOWs that are older than 2 days and have been submitted more than 1 hour ago | ['method', 'performs', 'two', 'actions', ':', '-', 'enlist', 'stale', 'or', 'invalid', 'units', 'of', 'work', 'into', 'reprocessing', 'queue', '-', 'cancel', 'UOWs', 'that', 'are', 'older', 'than', '2', 'days', 'and', 'have', 'been', 'submitted', 'more', 'than', '1', 'hour', 'ago'] | train | https://github.com/mushkevych/scheduler/blob/6740331360f49083c208085fb5a60ce80ebf418b/synergy/scheduler/garbage_collector.py#L37-L79 |
2,996 | thombashi/SimpleSQLite | simplesqlite/core.py | SimpleSQLite.fetch_table_names | def fetch_table_names(self, include_system_table=False):
"""
:return: List of table names in the database.
:rtype: list
:raises simplesqlite.NullDatabaseConnectionError:
|raises_check_connection|
:raises simplesqlite.OperationalError: |raises_operational_error|
:Sample Code:
.. code:: python
from simplesqlite import SimpleSQLite
con = SimpleSQLite("sample.sqlite", "w")
con.create_table_from_data_matrix(
"hoge",
["attr_a", "attr_b"],
[[1, "a"], [2, "b"]])
print(con.fetch_table_names())
:Output:
.. code-block:: python
['hoge']
"""
self.check_connection()
return self.schema_extractor.fetch_table_names(include_system_table) | python | def fetch_table_names(self, include_system_table=False):
"""
:return: List of table names in the database.
:rtype: list
:raises simplesqlite.NullDatabaseConnectionError:
|raises_check_connection|
:raises simplesqlite.OperationalError: |raises_operational_error|
:Sample Code:
.. code:: python
from simplesqlite import SimpleSQLite
con = SimpleSQLite("sample.sqlite", "w")
con.create_table_from_data_matrix(
"hoge",
["attr_a", "attr_b"],
[[1, "a"], [2, "b"]])
print(con.fetch_table_names())
:Output:
.. code-block:: python
['hoge']
"""
self.check_connection()
return self.schema_extractor.fetch_table_names(include_system_table) | ['def', 'fetch_table_names', '(', 'self', ',', 'include_system_table', '=', 'False', ')', ':', 'self', '.', 'check_connection', '(', ')', 'return', 'self', '.', 'schema_extractor', '.', 'fetch_table_names', '(', 'include_system_table', ')'] | :return: List of table names in the database.
:rtype: list
:raises simplesqlite.NullDatabaseConnectionError:
|raises_check_connection|
:raises simplesqlite.OperationalError: |raises_operational_error|
:Sample Code:
.. code:: python
from simplesqlite import SimpleSQLite
con = SimpleSQLite("sample.sqlite", "w")
con.create_table_from_data_matrix(
"hoge",
["attr_a", "attr_b"],
[[1, "a"], [2, "b"]])
print(con.fetch_table_names())
:Output:
.. code-block:: python
['hoge'] | [':', 'return', ':', 'List', 'of', 'table', 'names', 'in', 'the', 'database', '.', ':', 'rtype', ':', 'list', ':', 'raises', 'simplesqlite', '.', 'NullDatabaseConnectionError', ':', '|raises_check_connection|', ':', 'raises', 'simplesqlite', '.', 'OperationalError', ':', '|raises_operational_error|'] | train | https://github.com/thombashi/SimpleSQLite/blob/b16f212132b9b98773e68bf7395abc2f60f56fe5/simplesqlite/core.py#L683-L710 |
2,997 | pyros-dev/pyros-common | pyros_interfaces_mock/pyros_mock.py | PyrosMock.setup | def setup(self, publishers=None, subscribers=None, services=None, topics=None, params=None):
"""
:param publishers:
:param subscribers:
:param services:
:param topics: ONLY HERE for BW compat
:param params:
:return:
"""
super(PyrosMock, self).setup(publishers=publishers, subscribers=subscribers, services=services, topics=topics, params=params) | python | def setup(self, publishers=None, subscribers=None, services=None, topics=None, params=None):
"""
:param publishers:
:param subscribers:
:param services:
:param topics: ONLY HERE for BW compat
:param params:
:return:
"""
super(PyrosMock, self).setup(publishers=publishers, subscribers=subscribers, services=services, topics=topics, params=params) | ['def', 'setup', '(', 'self', ',', 'publishers', '=', 'None', ',', 'subscribers', '=', 'None', ',', 'services', '=', 'None', ',', 'topics', '=', 'None', ',', 'params', '=', 'None', ')', ':', 'super', '(', 'PyrosMock', ',', 'self', ')', '.', 'setup', '(', 'publishers', '=', 'publishers', ',', 'subscribers', '=', 'subscribers', ',', 'services', '=', 'services', ',', 'topics', '=', 'topics', ',', 'params', '=', 'params', ')'] | :param publishers:
:param subscribers:
:param services:
:param topics: ONLY HERE for BW compat
:param params:
:return: | [':', 'param', 'publishers', ':', ':', 'param', 'subscribers', ':', ':', 'param', 'services', ':', ':', 'param', 'topics', ':', 'ONLY', 'HERE', 'for', 'BW', 'compat', ':', 'param', 'params', ':', ':', 'return', ':'] | train | https://github.com/pyros-dev/pyros-common/blob/0709538b8777ec055ea31f59cdca5bebaaabb04e/pyros_interfaces_mock/pyros_mock.py#L130-L139 |
2,998 | poppy-project/pypot | pypot/dynamixel/controller.py | DxlController.set_register | def set_register(self, motors):
""" Gets the value from :class:`~pypot.dynamixel.motor.DxlMotor` and sets it to the specified register. """
if not motors:
return
ids = [m.id for m in motors]
values = (m.__dict__[self.varname] for m in motors)
getattr(self.io, 'set_{}'.format(self.regname))(dict(zip(ids, values)))
for m in motors:
m._write_synced[self.varname].done() | python | def set_register(self, motors):
""" Gets the value from :class:`~pypot.dynamixel.motor.DxlMotor` and sets it to the specified register. """
if not motors:
return
ids = [m.id for m in motors]
values = (m.__dict__[self.varname] for m in motors)
getattr(self.io, 'set_{}'.format(self.regname))(dict(zip(ids, values)))
for m in motors:
m._write_synced[self.varname].done() | ['def', 'set_register', '(', 'self', ',', 'motors', ')', ':', 'if', 'not', 'motors', ':', 'return', 'ids', '=', '[', 'm', '.', 'id', 'for', 'm', 'in', 'motors', ']', 'values', '=', '(', 'm', '.', '__dict__', '[', 'self', '.', 'varname', ']', 'for', 'm', 'in', 'motors', ')', 'getattr', '(', 'self', '.', 'io', ',', "'set_{}'", '.', 'format', '(', 'self', '.', 'regname', ')', ')', '(', 'dict', '(', 'zip', '(', 'ids', ',', 'values', ')', ')', ')', 'for', 'm', 'in', 'motors', ':', 'm', '.', '_write_synced', '[', 'self', '.', 'varname', ']', '.', 'done', '(', ')'] | Gets the value from :class:`~pypot.dynamixel.motor.DxlMotor` and sets it to the specified register. | ['Gets', 'the', 'value', 'from', ':', 'class', ':', '~pypot', '.', 'dynamixel', '.', 'motor', '.', 'DxlMotor', 'and', 'sets', 'it', 'to', 'the', 'specified', 'register', '.'] | train | https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/dynamixel/controller.py#L85-L95 |
2,999 | DataBiosphere/toil | src/toil/provisioners/node.py | Node.sshAppliance | def sshAppliance(self, *args, **kwargs):
"""
:param args: arguments to execute in the appliance
:param kwargs: tty=bool tells docker whether or not to create a TTY shell for
interactive SSHing. The default value is False. Input=string is passed as
input to the Popen call.
"""
kwargs['appliance'] = True
return self.coreSSH(*args, **kwargs) | python | def sshAppliance(self, *args, **kwargs):
"""
:param args: arguments to execute in the appliance
:param kwargs: tty=bool tells docker whether or not to create a TTY shell for
interactive SSHing. The default value is False. Input=string is passed as
input to the Popen call.
"""
kwargs['appliance'] = True
return self.coreSSH(*args, **kwargs) | ['def', 'sshAppliance', '(', 'self', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'kwargs', '[', "'appliance'", ']', '=', 'True', 'return', 'self', '.', 'coreSSH', '(', '*', 'args', ',', '*', '*', 'kwargs', ')'] | :param args: arguments to execute in the appliance
:param kwargs: tty=bool tells docker whether or not to create a TTY shell for
interactive SSHing. The default value is False. Input=string is passed as
input to the Popen call. | [':', 'param', 'args', ':', 'arguments', 'to', 'execute', 'in', 'the', 'appliance', ':', 'param', 'kwargs', ':', 'tty', '=', 'bool', 'tells', 'docker', 'whether', 'or', 'not', 'to', 'create', 'a', 'TTY', 'shell', 'for', 'interactive', 'SSHing', '.', 'The', 'default', 'value', 'is', 'False', '.', 'Input', '=', 'string', 'is', 'passed', 'as', 'input', 'to', 'the', 'Popen', 'call', '.'] | train | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/provisioners/node.py#L194-L202 |
Subsets and Splits