Unnamed: 0
int64 0
10k
| repository_name
stringlengths 7
54
| func_path_in_repository
stringlengths 5
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 100
30.3k
| language
stringclasses 1
value | func_code_string
stringlengths 100
30.3k
| func_code_tokens
stringlengths 138
33.2k
| func_documentation_string
stringlengths 1
15k
| func_documentation_tokens
stringlengths 5
5.14k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
|
---|---|---|---|---|---|---|---|---|---|---|---|
3,100 | olsoneric/pedemath | pedemath/vec3.py | rotate_around_vector_v3 | def rotate_around_vector_v3(v, angle_rad, norm_vec):
""" rotate v around norm_vec by angle_rad."""
cos_val = math.cos(angle_rad)
sin_val = math.sin(angle_rad)
## (v * cosVal) +
## ((normVec * v) * (1.0 - cosVal)) * normVec +
## (v ^ normVec) * sinVal)
#line1: scaleV3(v,cosVal)
#line2: dotV3( scaleV3( dotV3(normVec,v), 1.0-cosVal), normVec)
#line3: scaleV3( crossV3( v,normVec), sinVal)
#a = scaleV3(v,cosVal)
#b = scaleV3( normVec, dotV3(normVec,v) * (1.0-cosVal))
#c = scaleV3( crossV3( v,normVec), sinVal)
return add_v3(
add_v3(scale_v3(v, cos_val),
scale_v3(norm_vec, dot_v3(norm_vec, v) * (1.0 - cos_val))),
scale_v3(cross_v3(v, norm_vec), sin_val)
) | python | def rotate_around_vector_v3(v, angle_rad, norm_vec):
""" rotate v around norm_vec by angle_rad."""
cos_val = math.cos(angle_rad)
sin_val = math.sin(angle_rad)
## (v * cosVal) +
## ((normVec * v) * (1.0 - cosVal)) * normVec +
## (v ^ normVec) * sinVal)
#line1: scaleV3(v,cosVal)
#line2: dotV3( scaleV3( dotV3(normVec,v), 1.0-cosVal), normVec)
#line3: scaleV3( crossV3( v,normVec), sinVal)
#a = scaleV3(v,cosVal)
#b = scaleV3( normVec, dotV3(normVec,v) * (1.0-cosVal))
#c = scaleV3( crossV3( v,normVec), sinVal)
return add_v3(
add_v3(scale_v3(v, cos_val),
scale_v3(norm_vec, dot_v3(norm_vec, v) * (1.0 - cos_val))),
scale_v3(cross_v3(v, norm_vec), sin_val)
) | ['def', 'rotate_around_vector_v3', '(', 'v', ',', 'angle_rad', ',', 'norm_vec', ')', ':', 'cos_val', '=', 'math', '.', 'cos', '(', 'angle_rad', ')', 'sin_val', '=', 'math', '.', 'sin', '(', 'angle_rad', ')', '## (v * cosVal) +', '## ((normVec * v) * (1.0 - cosVal)) * normVec +', '## (v ^ normVec) * sinVal)', '#line1: scaleV3(v,cosVal)', '#line2: dotV3( scaleV3( dotV3(normVec,v), 1.0-cosVal), normVec)', '#line3: scaleV3( crossV3( v,normVec), sinVal)', '#a = scaleV3(v,cosVal)', '#b = scaleV3( normVec, dotV3(normVec,v) * (1.0-cosVal))', '#c = scaleV3( crossV3( v,normVec), sinVal)', 'return', 'add_v3', '(', 'add_v3', '(', 'scale_v3', '(', 'v', ',', 'cos_val', ')', ',', 'scale_v3', '(', 'norm_vec', ',', 'dot_v3', '(', 'norm_vec', ',', 'v', ')', '*', '(', '1.0', '-', 'cos_val', ')', ')', ')', ',', 'scale_v3', '(', 'cross_v3', '(', 'v', ',', 'norm_vec', ')', ',', 'sin_val', ')', ')'] | rotate v around norm_vec by angle_rad. | ['rotate', 'v', 'around', 'norm_vec', 'by', 'angle_rad', '.'] | train | https://github.com/olsoneric/pedemath/blob/4bffcfe7089e421d603eb0a9708b84789c2d16be/pedemath/vec3.py#L136-L153 |
3,101 | geophysics-ubonn/crtomo_tools | lib/crtomo/cfg.py | crtomo_config.set_defaults | def set_defaults(self):
"""Fill the dictionary with all defaults
"""
self['mswitch'] = 1
self['elem'] = '../grid/elem.dat'
self['elec'] = '../grid/elec.dat'
self['volt'] = '../mod/volt.dat'
self['inv_dir'] = '../inv'
self['diff_inv'] = 'F ! difference inversion?'
self['iseed_var'] = 'iseed variance'
self['cells_x'] = '0 ! # cells in x-direction'
self['cells_z'] = '-1 ! # cells in z-direction'
self['ani_x'] = '1.000 ! smoothing parameter in x-direction'
self['ani_z'] = '1.000 ! smoothing parameter in z-direction'
self['max_it'] = '20 ! max. nr of iterations'
self['dc_inv'] = 'F ! DC inversion?'
self['robust_inv'] = 'T ! robust inversion?'
self['fpi_inv'] = 'F ! final phase improvement?'
self['mag_rel'] = '5'
self['mag_abs'] = '1e-3'
self['pha_a1'] = 0
self['pha_b'] = 0
self['pha_rel'] = 0
self['pha_abs'] = 0
self['hom_bg'] = 'F'
self['hom_mag'] = '10.00'
self['hom_pha'] = '0.00'
self['another_ds'] = 'F'
self['d2_5'] = '1'
self['fic_sink'] = 'F'
self['fic_sink_node'] = '10000'
self['boundaries'] = 'F'
self['boundaries_file'] = 'boundary.dat'
self['mswitch2'] = '1'
self['lambda'] = 'lambda' | python | def set_defaults(self):
"""Fill the dictionary with all defaults
"""
self['mswitch'] = 1
self['elem'] = '../grid/elem.dat'
self['elec'] = '../grid/elec.dat'
self['volt'] = '../mod/volt.dat'
self['inv_dir'] = '../inv'
self['diff_inv'] = 'F ! difference inversion?'
self['iseed_var'] = 'iseed variance'
self['cells_x'] = '0 ! # cells in x-direction'
self['cells_z'] = '-1 ! # cells in z-direction'
self['ani_x'] = '1.000 ! smoothing parameter in x-direction'
self['ani_z'] = '1.000 ! smoothing parameter in z-direction'
self['max_it'] = '20 ! max. nr of iterations'
self['dc_inv'] = 'F ! DC inversion?'
self['robust_inv'] = 'T ! robust inversion?'
self['fpi_inv'] = 'F ! final phase improvement?'
self['mag_rel'] = '5'
self['mag_abs'] = '1e-3'
self['pha_a1'] = 0
self['pha_b'] = 0
self['pha_rel'] = 0
self['pha_abs'] = 0
self['hom_bg'] = 'F'
self['hom_mag'] = '10.00'
self['hom_pha'] = '0.00'
self['another_ds'] = 'F'
self['d2_5'] = '1'
self['fic_sink'] = 'F'
self['fic_sink_node'] = '10000'
self['boundaries'] = 'F'
self['boundaries_file'] = 'boundary.dat'
self['mswitch2'] = '1'
self['lambda'] = 'lambda' | ['def', 'set_defaults', '(', 'self', ')', ':', 'self', '[', "'mswitch'", ']', '=', '1', 'self', '[', "'elem'", ']', '=', "'../grid/elem.dat'", 'self', '[', "'elec'", ']', '=', "'../grid/elec.dat'", 'self', '[', "'volt'", ']', '=', "'../mod/volt.dat'", 'self', '[', "'inv_dir'", ']', '=', "'../inv'", 'self', '[', "'diff_inv'", ']', '=', "'F ! difference inversion?'", 'self', '[', "'iseed_var'", ']', '=', "'iseed variance'", 'self', '[', "'cells_x'", ']', '=', "'0 ! # cells in x-direction'", 'self', '[', "'cells_z'", ']', '=', "'-1 ! # cells in z-direction'", 'self', '[', "'ani_x'", ']', '=', "'1.000 ! smoothing parameter in x-direction'", 'self', '[', "'ani_z'", ']', '=', "'1.000 ! smoothing parameter in z-direction'", 'self', '[', "'max_it'", ']', '=', "'20 ! max. nr of iterations'", 'self', '[', "'dc_inv'", ']', '=', "'F ! DC inversion?'", 'self', '[', "'robust_inv'", ']', '=', "'T ! robust inversion?'", 'self', '[', "'fpi_inv'", ']', '=', "'F ! final phase improvement?'", 'self', '[', "'mag_rel'", ']', '=', "'5'", 'self', '[', "'mag_abs'", ']', '=', "'1e-3'", 'self', '[', "'pha_a1'", ']', '=', '0', 'self', '[', "'pha_b'", ']', '=', '0', 'self', '[', "'pha_rel'", ']', '=', '0', 'self', '[', "'pha_abs'", ']', '=', '0', 'self', '[', "'hom_bg'", ']', '=', "'F'", 'self', '[', "'hom_mag'", ']', '=', "'10.00'", 'self', '[', "'hom_pha'", ']', '=', "'0.00'", 'self', '[', "'another_ds'", ']', '=', "'F'", 'self', '[', "'d2_5'", ']', '=', "'1'", 'self', '[', "'fic_sink'", ']', '=', "'F'", 'self', '[', "'fic_sink_node'", ']', '=', "'10000'", 'self', '[', "'boundaries'", ']', '=', "'F'", 'self', '[', "'boundaries_file'", ']', '=', "'boundary.dat'", 'self', '[', "'mswitch2'", ']', '=', "'1'", 'self', '[', "'lambda'", ']', '=', "'lambda'"] | Fill the dictionary with all defaults | ['Fill', 'the', 'dictionary', 'with', 'all', 'defaults'] | train | https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/lib/crtomo/cfg.py#L180-L214 |
3,102 | sdispater/orator | orator/orm/factory.py | Factory.create_as | def create_as(self, klass, name, **attributes):
"""
Create an instance of the given model and type and persist it to the database.
:param klass: The class
:type klass: class
:param name: The type
:type name: str
:param attributes: The instance attributes
:type attributes: dict
:return: mixed
"""
return self.of(klass, name).create(**attributes) | python | def create_as(self, klass, name, **attributes):
"""
Create an instance of the given model and type and persist it to the database.
:param klass: The class
:type klass: class
:param name: The type
:type name: str
:param attributes: The instance attributes
:type attributes: dict
:return: mixed
"""
return self.of(klass, name).create(**attributes) | ['def', 'create_as', '(', 'self', ',', 'klass', ',', 'name', ',', '*', '*', 'attributes', ')', ':', 'return', 'self', '.', 'of', '(', 'klass', ',', 'name', ')', '.', 'create', '(', '*', '*', 'attributes', ')'] | Create an instance of the given model and type and persist it to the database.
:param klass: The class
:type klass: class
:param name: The type
:type name: str
:param attributes: The instance attributes
:type attributes: dict
:return: mixed | ['Create', 'an', 'instance', 'of', 'the', 'given', 'model', 'and', 'type', 'and', 'persist', 'it', 'to', 'the', 'database', '.'] | train | https://github.com/sdispater/orator/blob/bd90bf198ee897751848f9a92e49d18e60a74136/orator/orm/factory.py#L127-L142 |
3,103 | briwilcox/Concurrent-Pandas | concurrentpandas.py | ConcurrentPandas.unpack | def unpack(self, to_unpack):
"""
Unpack is a recursive function that will unpack anything that inherits
from abstract base class Container provided it is not also inheriting from Python basestring.
Raise Exception if resulting object is neither a container or a string
Code working in both Python 2 and Python 3
"""
# Python 3 lacks basestring type, work around below
try:
isinstance(to_unpack, basestring)
except NameError:
basestring = str
# Base Case
if isinstance(to_unpack, basestring):
self.input_queue.put(to_unpack)
return
for possible_key in to_unpack:
if isinstance(possible_key, basestring):
self.input_queue.put(possible_key)
elif sys.version_info >= (3, 0):
if isinstance(possible_key, collections.abc.Container) and not isinstance(possible_key, basestring):
self.unpack(possible_key)
else:
raise Exception("A type that is neither a string or a container was passed to unpack. "
"Aborting!")
else:
if isinstance(possible_key, collections.Container) and not isinstance(possible_key, basestring):
self.unpack(possible_key)
else:
raise Exception("A type that is neither a string or a container was passed to unpack. "
"Aborting!") | python | def unpack(self, to_unpack):
"""
Unpack is a recursive function that will unpack anything that inherits
from abstract base class Container provided it is not also inheriting from Python basestring.
Raise Exception if resulting object is neither a container or a string
Code working in both Python 2 and Python 3
"""
# Python 3 lacks basestring type, work around below
try:
isinstance(to_unpack, basestring)
except NameError:
basestring = str
# Base Case
if isinstance(to_unpack, basestring):
self.input_queue.put(to_unpack)
return
for possible_key in to_unpack:
if isinstance(possible_key, basestring):
self.input_queue.put(possible_key)
elif sys.version_info >= (3, 0):
if isinstance(possible_key, collections.abc.Container) and not isinstance(possible_key, basestring):
self.unpack(possible_key)
else:
raise Exception("A type that is neither a string or a container was passed to unpack. "
"Aborting!")
else:
if isinstance(possible_key, collections.Container) and not isinstance(possible_key, basestring):
self.unpack(possible_key)
else:
raise Exception("A type that is neither a string or a container was passed to unpack. "
"Aborting!") | ['def', 'unpack', '(', 'self', ',', 'to_unpack', ')', ':', '# Python 3 lacks basestring type, work around below', 'try', ':', 'isinstance', '(', 'to_unpack', ',', 'basestring', ')', 'except', 'NameError', ':', 'basestring', '=', 'str', '# Base Case', 'if', 'isinstance', '(', 'to_unpack', ',', 'basestring', ')', ':', 'self', '.', 'input_queue', '.', 'put', '(', 'to_unpack', ')', 'return', 'for', 'possible_key', 'in', 'to_unpack', ':', 'if', 'isinstance', '(', 'possible_key', ',', 'basestring', ')', ':', 'self', '.', 'input_queue', '.', 'put', '(', 'possible_key', ')', 'elif', 'sys', '.', 'version_info', '>=', '(', '3', ',', '0', ')', ':', 'if', 'isinstance', '(', 'possible_key', ',', 'collections', '.', 'abc', '.', 'Container', ')', 'and', 'not', 'isinstance', '(', 'possible_key', ',', 'basestring', ')', ':', 'self', '.', 'unpack', '(', 'possible_key', ')', 'else', ':', 'raise', 'Exception', '(', '"A type that is neither a string or a container was passed to unpack. "', '"Aborting!"', ')', 'else', ':', 'if', 'isinstance', '(', 'possible_key', ',', 'collections', '.', 'Container', ')', 'and', 'not', 'isinstance', '(', 'possible_key', ',', 'basestring', ')', ':', 'self', '.', 'unpack', '(', 'possible_key', ')', 'else', ':', 'raise', 'Exception', '(', '"A type that is neither a string or a container was passed to unpack. "', '"Aborting!"', ')'] | Unpack is a recursive function that will unpack anything that inherits
from abstract base class Container provided it is not also inheriting from Python basestring.
Raise Exception if resulting object is neither a container or a string
Code working in both Python 2 and Python 3 | ['Unpack', 'is', 'a', 'recursive', 'function', 'that', 'will', 'unpack', 'anything', 'that', 'inherits', 'from', 'abstract', 'base', 'class', 'Container', 'provided', 'it', 'is', 'not', 'also', 'inheriting', 'from', 'Python', 'basestring', '.'] | train | https://github.com/briwilcox/Concurrent-Pandas/blob/22cb392dacb712e1bdb5b60c6ba7015c38445c99/concurrentpandas.py#L202-L239 |
3,104 | bpannier/simpletr64 | simpletr64/actions/wifi.py | Wifi.getSpecificAssociatedDeviceInfo | def getSpecificAssociatedDeviceInfo(self, macAddress, wifiInterfaceId=1, timeout=1):
"""Execute GetSpecificAssociatedDeviceInfo action to get detailed information about a Wifi client.
:param str macAddress: MAC address in the form ``38:C9:86:26:7E:38``; be aware that the MAC address might
be case sensitive, depending on the router
:param int wifiInterfaceId: the id of the Wifi interface
:param float timeout: the timeout to wait for the action to be executed
:return: the detailed information's about a Wifi client
:rtype: WifiDeviceInfo
.. seealso:: :meth:`~simpletr64.actions.Wifi.getGenericAssociatedDeviceInfo`
"""
namespace = Wifi.getServiceType("getSpecificAssociatedDeviceInfo") + str(wifiInterfaceId)
uri = self.getControlURL(namespace)
results = self.execute(uri, namespace, "GetSpecificAssociatedDeviceInfo", timeout=timeout,
NewAssociatedDeviceMACAddress=macAddress)
return WifiDeviceInfo(results, macAddress=macAddress) | python | def getSpecificAssociatedDeviceInfo(self, macAddress, wifiInterfaceId=1, timeout=1):
"""Execute GetSpecificAssociatedDeviceInfo action to get detailed information about a Wifi client.
:param str macAddress: MAC address in the form ``38:C9:86:26:7E:38``; be aware that the MAC address might
be case sensitive, depending on the router
:param int wifiInterfaceId: the id of the Wifi interface
:param float timeout: the timeout to wait for the action to be executed
:return: the detailed information's about a Wifi client
:rtype: WifiDeviceInfo
.. seealso:: :meth:`~simpletr64.actions.Wifi.getGenericAssociatedDeviceInfo`
"""
namespace = Wifi.getServiceType("getSpecificAssociatedDeviceInfo") + str(wifiInterfaceId)
uri = self.getControlURL(namespace)
results = self.execute(uri, namespace, "GetSpecificAssociatedDeviceInfo", timeout=timeout,
NewAssociatedDeviceMACAddress=macAddress)
return WifiDeviceInfo(results, macAddress=macAddress) | ['def', 'getSpecificAssociatedDeviceInfo', '(', 'self', ',', 'macAddress', ',', 'wifiInterfaceId', '=', '1', ',', 'timeout', '=', '1', ')', ':', 'namespace', '=', 'Wifi', '.', 'getServiceType', '(', '"getSpecificAssociatedDeviceInfo"', ')', '+', 'str', '(', 'wifiInterfaceId', ')', 'uri', '=', 'self', '.', 'getControlURL', '(', 'namespace', ')', 'results', '=', 'self', '.', 'execute', '(', 'uri', ',', 'namespace', ',', '"GetSpecificAssociatedDeviceInfo"', ',', 'timeout', '=', 'timeout', ',', 'NewAssociatedDeviceMACAddress', '=', 'macAddress', ')', 'return', 'WifiDeviceInfo', '(', 'results', ',', 'macAddress', '=', 'macAddress', ')'] | Execute GetSpecificAssociatedDeviceInfo action to get detailed information about a Wifi client.
:param str macAddress: MAC address in the form ``38:C9:86:26:7E:38``; be aware that the MAC address might
be case sensitive, depending on the router
:param int wifiInterfaceId: the id of the Wifi interface
:param float timeout: the timeout to wait for the action to be executed
:return: the detailed information's about a Wifi client
:rtype: WifiDeviceInfo
.. seealso:: :meth:`~simpletr64.actions.Wifi.getGenericAssociatedDeviceInfo` | ['Execute', 'GetSpecificAssociatedDeviceInfo', 'action', 'to', 'get', 'detailed', 'information', 'about', 'a', 'Wifi', 'client', '.'] | train | https://github.com/bpannier/simpletr64/blob/31081139f4e6c85084a56de1617df73927135466/simpletr64/actions/wifi.py#L174-L192 |
3,105 | inveniosoftware/invenio-files-rest | invenio_files_rest/serializer.py | ObjectVersionSchema.dump_links | def dump_links(self, o):
"""Dump links."""
params = {'versionId': o.version_id}
data = {
'self': url_for(
'.object_api',
bucket_id=o.bucket_id,
key=o.key,
_external=True,
**(params if not o.is_head or o.deleted else {})
),
'version': url_for(
'.object_api',
bucket_id=o.bucket_id,
key=o.key,
_external=True,
**params
)
}
if o.is_head and not o.deleted:
data.update({'uploads': url_for(
'.object_api',
bucket_id=o.bucket_id,
key=o.key,
_external=True
) + '?uploads', })
return data | python | def dump_links(self, o):
"""Dump links."""
params = {'versionId': o.version_id}
data = {
'self': url_for(
'.object_api',
bucket_id=o.bucket_id,
key=o.key,
_external=True,
**(params if not o.is_head or o.deleted else {})
),
'version': url_for(
'.object_api',
bucket_id=o.bucket_id,
key=o.key,
_external=True,
**params
)
}
if o.is_head and not o.deleted:
data.update({'uploads': url_for(
'.object_api',
bucket_id=o.bucket_id,
key=o.key,
_external=True
) + '?uploads', })
return data | ['def', 'dump_links', '(', 'self', ',', 'o', ')', ':', 'params', '=', '{', "'versionId'", ':', 'o', '.', 'version_id', '}', 'data', '=', '{', "'self'", ':', 'url_for', '(', "'.object_api'", ',', 'bucket_id', '=', 'o', '.', 'bucket_id', ',', 'key', '=', 'o', '.', 'key', ',', '_external', '=', 'True', ',', '*', '*', '(', 'params', 'if', 'not', 'o', '.', 'is_head', 'or', 'o', '.', 'deleted', 'else', '{', '}', ')', ')', ',', "'version'", ':', 'url_for', '(', "'.object_api'", ',', 'bucket_id', '=', 'o', '.', 'bucket_id', ',', 'key', '=', 'o', '.', 'key', ',', '_external', '=', 'True', ',', '*', '*', 'params', ')', '}', 'if', 'o', '.', 'is_head', 'and', 'not', 'o', '.', 'deleted', ':', 'data', '.', 'update', '(', '{', "'uploads'", ':', 'url_for', '(', "'.object_api'", ',', 'bucket_id', '=', 'o', '.', 'bucket_id', ',', 'key', '=', 'o', '.', 'key', ',', '_external', '=', 'True', ')', '+', "'?uploads'", ',', '}', ')', 'return', 'data'] | Dump links. | ['Dump', 'links', '.'] | train | https://github.com/inveniosoftware/invenio-files-rest/blob/59a950da61cc8d5882a03c6fde6db2e2ed10befd/invenio_files_rest/serializer.py#L69-L97 |
3,106 | GNS3/gns3-server | gns3server/compute/virtualbox/virtualbox_vm.py | VirtualBoxVM.close | def close(self):
"""
Closes this VirtualBox VM.
"""
if self._closed:
# VM is already closed
return
if not (yield from super().close()):
return False
log.debug("VirtualBox VM '{name}' [{id}] is closing".format(name=self.name, id=self.id))
if self._console:
self._manager.port_manager.release_tcp_port(self._console, self._project)
self._console = None
for adapter in self._ethernet_adapters.values():
if adapter is not None:
for nio in adapter.ports.values():
if nio and isinstance(nio, NIOUDP):
self.manager.port_manager.release_udp_port(nio.lport, self._project)
for udp_tunnel in self._local_udp_tunnels.values():
self.manager.port_manager.release_udp_port(udp_tunnel[0].lport, self._project)
self.manager.port_manager.release_udp_port(udp_tunnel[1].lport, self._project)
self._local_udp_tunnels = {}
self.acpi_shutdown = False
yield from self.stop()
if self.linked_clone:
hdd_table = yield from self.save_linked_hdds_info()
for hdd in hdd_table.copy():
log.info("VirtualBox VM '{name}' [{id}] detaching HDD {controller} {port} {device}".format(name=self.name,
id=self.id,
controller=hdd["controller"],
port=hdd["port"],
device=hdd["device"]))
try:
yield from self._storage_attach('--storagectl "{}" --port {} --device {} --type hdd --medium none'.format(hdd["controller"],
hdd["port"],
hdd["device"]))
except VirtualBoxError as e:
log.warn("VirtualBox VM '{name}' [{id}] error detaching HDD {controller} {port} {device}: {error}".format(name=self.name,
id=self.id,
controller=hdd["controller"],
port=hdd["port"],
device=hdd["device"],
error=e))
continue
log.info("VirtualBox VM '{name}' [{id}] unregistering".format(name=self.name, id=self.id))
yield from self.manager.execute("unregistervm", [self._name])
log.info("VirtualBox VM '{name}' [{id}] closed".format(name=self.name, id=self.id))
self._closed = True | python | def close(self):
"""
Closes this VirtualBox VM.
"""
if self._closed:
# VM is already closed
return
if not (yield from super().close()):
return False
log.debug("VirtualBox VM '{name}' [{id}] is closing".format(name=self.name, id=self.id))
if self._console:
self._manager.port_manager.release_tcp_port(self._console, self._project)
self._console = None
for adapter in self._ethernet_adapters.values():
if adapter is not None:
for nio in adapter.ports.values():
if nio and isinstance(nio, NIOUDP):
self.manager.port_manager.release_udp_port(nio.lport, self._project)
for udp_tunnel in self._local_udp_tunnels.values():
self.manager.port_manager.release_udp_port(udp_tunnel[0].lport, self._project)
self.manager.port_manager.release_udp_port(udp_tunnel[1].lport, self._project)
self._local_udp_tunnels = {}
self.acpi_shutdown = False
yield from self.stop()
if self.linked_clone:
hdd_table = yield from self.save_linked_hdds_info()
for hdd in hdd_table.copy():
log.info("VirtualBox VM '{name}' [{id}] detaching HDD {controller} {port} {device}".format(name=self.name,
id=self.id,
controller=hdd["controller"],
port=hdd["port"],
device=hdd["device"]))
try:
yield from self._storage_attach('--storagectl "{}" --port {} --device {} --type hdd --medium none'.format(hdd["controller"],
hdd["port"],
hdd["device"]))
except VirtualBoxError as e:
log.warn("VirtualBox VM '{name}' [{id}] error detaching HDD {controller} {port} {device}: {error}".format(name=self.name,
id=self.id,
controller=hdd["controller"],
port=hdd["port"],
device=hdd["device"],
error=e))
continue
log.info("VirtualBox VM '{name}' [{id}] unregistering".format(name=self.name, id=self.id))
yield from self.manager.execute("unregistervm", [self._name])
log.info("VirtualBox VM '{name}' [{id}] closed".format(name=self.name, id=self.id))
self._closed = True | ['def', 'close', '(', 'self', ')', ':', 'if', 'self', '.', '_closed', ':', '# VM is already closed', 'return', 'if', 'not', '(', 'yield', 'from', 'super', '(', ')', '.', 'close', '(', ')', ')', ':', 'return', 'False', 'log', '.', 'debug', '(', '"VirtualBox VM \'{name}\' [{id}] is closing"', '.', 'format', '(', 'name', '=', 'self', '.', 'name', ',', 'id', '=', 'self', '.', 'id', ')', ')', 'if', 'self', '.', '_console', ':', 'self', '.', '_manager', '.', 'port_manager', '.', 'release_tcp_port', '(', 'self', '.', '_console', ',', 'self', '.', '_project', ')', 'self', '.', '_console', '=', 'None', 'for', 'adapter', 'in', 'self', '.', '_ethernet_adapters', '.', 'values', '(', ')', ':', 'if', 'adapter', 'is', 'not', 'None', ':', 'for', 'nio', 'in', 'adapter', '.', 'ports', '.', 'values', '(', ')', ':', 'if', 'nio', 'and', 'isinstance', '(', 'nio', ',', 'NIOUDP', ')', ':', 'self', '.', 'manager', '.', 'port_manager', '.', 'release_udp_port', '(', 'nio', '.', 'lport', ',', 'self', '.', '_project', ')', 'for', 'udp_tunnel', 'in', 'self', '.', '_local_udp_tunnels', '.', 'values', '(', ')', ':', 'self', '.', 'manager', '.', 'port_manager', '.', 'release_udp_port', '(', 'udp_tunnel', '[', '0', ']', '.', 'lport', ',', 'self', '.', '_project', ')', 'self', '.', 'manager', '.', 'port_manager', '.', 'release_udp_port', '(', 'udp_tunnel', '[', '1', ']', '.', 'lport', ',', 'self', '.', '_project', ')', 'self', '.', '_local_udp_tunnels', '=', '{', '}', 'self', '.', 'acpi_shutdown', '=', 'False', 'yield', 'from', 'self', '.', 'stop', '(', ')', 'if', 'self', '.', 'linked_clone', ':', 'hdd_table', '=', 'yield', 'from', 'self', '.', 'save_linked_hdds_info', '(', ')', 'for', 'hdd', 'in', 'hdd_table', '.', 'copy', '(', ')', ':', 'log', '.', 'info', '(', '"VirtualBox VM \'{name}\' [{id}] detaching HDD {controller} {port} {device}"', '.', 'format', '(', 'name', '=', 'self', '.', 'name', ',', 'id', '=', 'self', '.', 'id', ',', 'controller', '=', 'hdd', '[', '"controller"', ']', ',', 'port', '=', 'hdd', '[', '"port"', ']', ',', 'device', '=', 'hdd', '[', '"device"', ']', ')', ')', 'try', ':', 'yield', 'from', 'self', '.', '_storage_attach', '(', '\'--storagectl "{}" --port {} --device {} --type hdd --medium none\'', '.', 'format', '(', 'hdd', '[', '"controller"', ']', ',', 'hdd', '[', '"port"', ']', ',', 'hdd', '[', '"device"', ']', ')', ')', 'except', 'VirtualBoxError', 'as', 'e', ':', 'log', '.', 'warn', '(', '"VirtualBox VM \'{name}\' [{id}] error detaching HDD {controller} {port} {device}: {error}"', '.', 'format', '(', 'name', '=', 'self', '.', 'name', ',', 'id', '=', 'self', '.', 'id', ',', 'controller', '=', 'hdd', '[', '"controller"', ']', ',', 'port', '=', 'hdd', '[', '"port"', ']', ',', 'device', '=', 'hdd', '[', '"device"', ']', ',', 'error', '=', 'e', ')', ')', 'continue', 'log', '.', 'info', '(', '"VirtualBox VM \'{name}\' [{id}] unregistering"', '.', 'format', '(', 'name', '=', 'self', '.', 'name', ',', 'id', '=', 'self', '.', 'id', ')', ')', 'yield', 'from', 'self', '.', 'manager', '.', 'execute', '(', '"unregistervm"', ',', '[', 'self', '.', '_name', ']', ')', 'log', '.', 'info', '(', '"VirtualBox VM \'{name}\' [{id}] closed"', '.', 'format', '(', 'name', '=', 'self', '.', 'name', ',', 'id', '=', 'self', '.', 'id', ')', ')', 'self', '.', '_closed', '=', 'True'] | Closes this VirtualBox VM. | ['Closes', 'this', 'VirtualBox', 'VM', '.'] | train | https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/compute/virtualbox/virtualbox_vm.py#L484-L540 |
3,107 | ThreatConnect-Inc/tcex | tcex/tcex_resources.py | Resource.copy | def copy(self):
"""Return a "clean" copy of this instance.
Return:
(instance): A clean copy of this instance.
"""
resource = copy.copy(self)
# workaround for bytes/str issue in Py3 with copy of instance
# TypeError: a bytes-like object is required, not 'str' (ssl.py)
resource._request = self.tcex.request(self.tcex.session)
# reset properties of resource
resource.copy_reset()
# Preserve settings
resource.http_method = self.http_method
if self._request.payload.get('owner') is not None:
resource.owner = self._request.payload.get('owner')
# future bcs - these should not need to be reset. correct?
# resource._request_entity = self._api_entity
# resource._request_uri = self._api_uri
return resource | python | def copy(self):
"""Return a "clean" copy of this instance.
Return:
(instance): A clean copy of this instance.
"""
resource = copy.copy(self)
# workaround for bytes/str issue in Py3 with copy of instance
# TypeError: a bytes-like object is required, not 'str' (ssl.py)
resource._request = self.tcex.request(self.tcex.session)
# reset properties of resource
resource.copy_reset()
# Preserve settings
resource.http_method = self.http_method
if self._request.payload.get('owner') is not None:
resource.owner = self._request.payload.get('owner')
# future bcs - these should not need to be reset. correct?
# resource._request_entity = self._api_entity
# resource._request_uri = self._api_uri
return resource | ['def', 'copy', '(', 'self', ')', ':', 'resource', '=', 'copy', '.', 'copy', '(', 'self', ')', '# workaround for bytes/str issue in Py3 with copy of instance', "# TypeError: a bytes-like object is required, not 'str' (ssl.py)", 'resource', '.', '_request', '=', 'self', '.', 'tcex', '.', 'request', '(', 'self', '.', 'tcex', '.', 'session', ')', '# reset properties of resource', 'resource', '.', 'copy_reset', '(', ')', '# Preserve settings', 'resource', '.', 'http_method', '=', 'self', '.', 'http_method', 'if', 'self', '.', '_request', '.', 'payload', '.', 'get', '(', "'owner'", ')', 'is', 'not', 'None', ':', 'resource', '.', 'owner', '=', 'self', '.', '_request', '.', 'payload', '.', 'get', '(', "'owner'", ')', '# future bcs - these should not need to be reset. correct?', '# resource._request_entity = self._api_entity', '# resource._request_uri = self._api_uri', 'return', 'resource'] | Return a "clean" copy of this instance.
Return:
(instance): A clean copy of this instance. | ['Return', 'a', 'clean', 'copy', 'of', 'this', 'instance', '.'] | train | https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_resources.py#L524-L547 |
3,108 | juju-solutions/charms.reactive | charms/reactive/bus.py | Handler._get_args | def _get_args(self):
"""
Lazily evaluate the args.
"""
if not hasattr(self, '_args_evaled'):
# cache the args in case handler is re-invoked due to flags change
self._args_evaled = list(chain.from_iterable(self._args))
return self._args_evaled | python | def _get_args(self):
"""
Lazily evaluate the args.
"""
if not hasattr(self, '_args_evaled'):
# cache the args in case handler is re-invoked due to flags change
self._args_evaled = list(chain.from_iterable(self._args))
return self._args_evaled | ['def', '_get_args', '(', 'self', ')', ':', 'if', 'not', 'hasattr', '(', 'self', ',', "'_args_evaled'", ')', ':', '# cache the args in case handler is re-invoked due to flags change', 'self', '.', '_args_evaled', '=', 'list', '(', 'chain', '.', 'from_iterable', '(', 'self', '.', '_args', ')', ')', 'return', 'self', '.', '_args_evaled'] | Lazily evaluate the args. | ['Lazily', 'evaluate', 'the', 'args', '.'] | train | https://github.com/juju-solutions/charms.reactive/blob/e37e781432e77c12b63d2c739bd6cd70d3230c3a/charms/reactive/bus.py#L167-L174 |
3,109 | materialsproject/pymatgen | pymatgen/io/abinit/works.py | GKKPWork.on_ok | def on_ok(self, sender):
"""
This callback is called when one task reaches status `S_OK`.
It removes the WFKQ file if all its children have reached `S_OK`.
"""
if self.remove_wfkq:
for task in self.wfkq_tasks:
if task.status != task.S_OK: continue
children = self.wfkq_task_children[task]
if all(child.status == child.S_OK for child in children):
path = task.outdir.has_abiext("WFQ")
if path:
self.history.info("Removing WFQ: %s" % path)
os.remove(path)
# If wfk task we create a link to a wfq file so abinit is happy
if sender == self.wfk_task:
wfk_path = self.wfk_task.outdir.has_abiext("WFK")
# Check if netcdf
filename, extension = os.path.splitext(wfk_path)
infile = 'out_WFQ' + extension
infile = os.path.join(os.path.dirname(wfk_path), infile)
os.symlink(wfk_path, infile)
return super().on_ok(sender) | python | def on_ok(self, sender):
"""
This callback is called when one task reaches status `S_OK`.
It removes the WFKQ file if all its children have reached `S_OK`.
"""
if self.remove_wfkq:
for task in self.wfkq_tasks:
if task.status != task.S_OK: continue
children = self.wfkq_task_children[task]
if all(child.status == child.S_OK for child in children):
path = task.outdir.has_abiext("WFQ")
if path:
self.history.info("Removing WFQ: %s" % path)
os.remove(path)
# If wfk task we create a link to a wfq file so abinit is happy
if sender == self.wfk_task:
wfk_path = self.wfk_task.outdir.has_abiext("WFK")
# Check if netcdf
filename, extension = os.path.splitext(wfk_path)
infile = 'out_WFQ' + extension
infile = os.path.join(os.path.dirname(wfk_path), infile)
os.symlink(wfk_path, infile)
return super().on_ok(sender) | ['def', 'on_ok', '(', 'self', ',', 'sender', ')', ':', 'if', 'self', '.', 'remove_wfkq', ':', 'for', 'task', 'in', 'self', '.', 'wfkq_tasks', ':', 'if', 'task', '.', 'status', '!=', 'task', '.', 'S_OK', ':', 'continue', 'children', '=', 'self', '.', 'wfkq_task_children', '[', 'task', ']', 'if', 'all', '(', 'child', '.', 'status', '==', 'child', '.', 'S_OK', 'for', 'child', 'in', 'children', ')', ':', 'path', '=', 'task', '.', 'outdir', '.', 'has_abiext', '(', '"WFQ"', ')', 'if', 'path', ':', 'self', '.', 'history', '.', 'info', '(', '"Removing WFQ: %s"', '%', 'path', ')', 'os', '.', 'remove', '(', 'path', ')', '# If wfk task we create a link to a wfq file so abinit is happy', 'if', 'sender', '==', 'self', '.', 'wfk_task', ':', 'wfk_path', '=', 'self', '.', 'wfk_task', '.', 'outdir', '.', 'has_abiext', '(', '"WFK"', ')', '# Check if netcdf', 'filename', ',', 'extension', '=', 'os', '.', 'path', '.', 'splitext', '(', 'wfk_path', ')', 'infile', '=', "'out_WFQ'", '+', 'extension', 'infile', '=', 'os', '.', 'path', '.', 'join', '(', 'os', '.', 'path', '.', 'dirname', '(', 'wfk_path', ')', ',', 'infile', ')', 'os', '.', 'symlink', '(', 'wfk_path', ',', 'infile', ')', 'return', 'super', '(', ')', '.', 'on_ok', '(', 'sender', ')'] | This callback is called when one task reaches status `S_OK`.
It removes the WFKQ file if all its children have reached `S_OK`. | ['This', 'callback', 'is', 'called', 'when', 'one', 'task', 'reaches', 'status', 'S_OK', '.', 'It', 'removes', 'the', 'WFKQ', 'file', 'if', 'all', 'its', 'children', 'have', 'reached', 'S_OK', '.'] | train | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/abinit/works.py#L1780-L1804 |
3,110 | pip-services3-python/pip-services3-commons-python | pip_services3_commons/config/ConfigParams.py | ConfigParams.add_section | def add_section(self, section, section_params):
"""
Adds parameters into this ConfigParams under specified section.
Keys for the new parameters are appended with section dot prefix.
:param section: name of the section where add new parameters
:param section_params: new parameters to be added.
"""
if section == None:
raise Exception("Section name cannot be null")
section = "" if self._is_shadow_name(section) else section
if section_params == None or len(section_params) == 0:
return
for (key, value) in section_params.items():
key = "" if self._is_shadow_name(key) else key
if len(key) > 0 and len(section) > 0:
key = section + "." + key
elif len(key) == 0:
key = section
self[key] = value | python | def add_section(self, section, section_params):
"""
Adds parameters into this ConfigParams under specified section.
Keys for the new parameters are appended with section dot prefix.
:param section: name of the section where add new parameters
:param section_params: new parameters to be added.
"""
if section == None:
raise Exception("Section name cannot be null")
section = "" if self._is_shadow_name(section) else section
if section_params == None or len(section_params) == 0:
return
for (key, value) in section_params.items():
key = "" if self._is_shadow_name(key) else key
if len(key) > 0 and len(section) > 0:
key = section + "." + key
elif len(key) == 0:
key = section
self[key] = value | ['def', 'add_section', '(', 'self', ',', 'section', ',', 'section_params', ')', ':', 'if', 'section', '==', 'None', ':', 'raise', 'Exception', '(', '"Section name cannot be null"', ')', 'section', '=', '""', 'if', 'self', '.', '_is_shadow_name', '(', 'section', ')', 'else', 'section', 'if', 'section_params', '==', 'None', 'or', 'len', '(', 'section_params', ')', '==', '0', ':', 'return', 'for', '(', 'key', ',', 'value', ')', 'in', 'section_params', '.', 'items', '(', ')', ':', 'key', '=', '""', 'if', 'self', '.', '_is_shadow_name', '(', 'key', ')', 'else', 'key', 'if', 'len', '(', 'key', ')', '>', '0', 'and', 'len', '(', 'section', ')', '>', '0', ':', 'key', '=', 'section', '+', '"."', '+', 'key', 'elif', 'len', '(', 'key', ')', '==', '0', ':', 'key', '=', 'section', 'self', '[', 'key', ']', '=', 'value'] | Adds parameters into this ConfigParams under specified section.
Keys for the new parameters are appended with section dot prefix.
:param section: name of the section where add new parameters
:param section_params: new parameters to be added. | ['Adds', 'parameters', 'into', 'this', 'ConfigParams', 'under', 'specified', 'section', '.', 'Keys', 'for', 'the', 'new', 'parameters', 'are', 'appended', 'with', 'section', 'dot', 'prefix', '.'] | train | https://github.com/pip-services3-python/pip-services3-commons-python/blob/22cbbb3e91e49717f65c083d36147fdb07ba9e3b/pip_services3_commons/config/ConfigParams.py#L111-L136 |
3,111 | hover2pi/svo_filters | svo_filters/svo.py | Filter.info | def info(self, fetch=False):
"""
Print a table of info about the current filter
"""
# Get the info from the class
tp = (int, bytes, bool, str, float, tuple, list, np.ndarray)
info = [[k, str(v)] for k, v in vars(self).items() if isinstance(v, tp)
and k not in ['rsr', 'raw', 'centers'] and not k.startswith('_')]
# Make the table
table = at.Table(np.asarray(info).reshape(len(info), 2),
names=['Attributes', 'Values'])
# Sort and print
table.sort('Attributes')
if fetch:
return table
else:
table.pprint(max_width=-1, max_lines=-1, align=['>', '<']) | python | def info(self, fetch=False):
"""
Print a table of info about the current filter
"""
# Get the info from the class
tp = (int, bytes, bool, str, float, tuple, list, np.ndarray)
info = [[k, str(v)] for k, v in vars(self).items() if isinstance(v, tp)
and k not in ['rsr', 'raw', 'centers'] and not k.startswith('_')]
# Make the table
table = at.Table(np.asarray(info).reshape(len(info), 2),
names=['Attributes', 'Values'])
# Sort and print
table.sort('Attributes')
if fetch:
return table
else:
table.pprint(max_width=-1, max_lines=-1, align=['>', '<']) | ['def', 'info', '(', 'self', ',', 'fetch', '=', 'False', ')', ':', '# Get the info from the class', 'tp', '=', '(', 'int', ',', 'bytes', ',', 'bool', ',', 'str', ',', 'float', ',', 'tuple', ',', 'list', ',', 'np', '.', 'ndarray', ')', 'info', '=', '[', '[', 'k', ',', 'str', '(', 'v', ')', ']', 'for', 'k', ',', 'v', 'in', 'vars', '(', 'self', ')', '.', 'items', '(', ')', 'if', 'isinstance', '(', 'v', ',', 'tp', ')', 'and', 'k', 'not', 'in', '[', "'rsr'", ',', "'raw'", ',', "'centers'", ']', 'and', 'not', 'k', '.', 'startswith', '(', "'_'", ')', ']', '# Make the table', 'table', '=', 'at', '.', 'Table', '(', 'np', '.', 'asarray', '(', 'info', ')', '.', 'reshape', '(', 'len', '(', 'info', ')', ',', '2', ')', ',', 'names', '=', '[', "'Attributes'", ',', "'Values'", ']', ')', '# Sort and print', 'table', '.', 'sort', '(', "'Attributes'", ')', 'if', 'fetch', ':', 'return', 'table', 'else', ':', 'table', '.', 'pprint', '(', 'max_width', '=', '-', '1', ',', 'max_lines', '=', '-', '1', ',', 'align', '=', '[', "'>'", ',', "'<'", ']', ')'] | Print a table of info about the current filter | ['Print', 'a', 'table', 'of', 'info', 'about', 'the', 'current', 'filter'] | train | https://github.com/hover2pi/svo_filters/blob/f0587c4908baf636d4bdf030fa95029e8f31b975/svo_filters/svo.py#L417-L436 |
3,112 | cariad/py-wpconfigr | wpconfigr/wp_config_string.py | WpConfigString.set | def set(self, key, value):
"""
Updates the value of the given key in the loaded content.
Args:
key (str): Key of the property to update.
value (str): New value of the property.
Return:
bool: Indicates whether or not a change was made.
"""
match = self._get_match(key=key)
if not match:
self._log.info('"%s" does not exist, so it will be added.', key)
if isinstance(value, str):
self._log.info('"%s" will be added as a PHP string value.',
key)
value_str = '\'{}\''.format(value)
else:
self._log.info('"%s" will be added as a PHP object value.',
key)
value_str = str(value).lower()
new = 'define(\'{key}\', {value});'.format(
key=key,
value=value_str)
self._log.info('"%s" will be added as: %s', key, new)
replace_this = '<?php\n'
replace_with = '<?php\n' + new + '\n'
self._content = self._content.replace(replace_this, replace_with)
self._log.info('Content string has been updated.')
return True
if self._get_value_from_match(key=key, match=match) == value:
self._log.info('"%s" is already up-to-date.', key)
return False
self._log.info('"%s" exists and will be updated.', key)
start_index = match.start(1)
end_index = match.end(1)
if isinstance(value, bool):
value = str(value).lower()
self._log.info('"%s" will be updated with boolean value: %s',
key,
value)
else:
self._log.info('"%s" will be updated with string value: %s',
key,
value)
start = self._content[:start_index]
end = self._content[end_index:]
self._content = start + value + end
return True | python | def set(self, key, value):
"""
Updates the value of the given key in the loaded content.
Args:
key (str): Key of the property to update.
value (str): New value of the property.
Return:
bool: Indicates whether or not a change was made.
"""
match = self._get_match(key=key)
if not match:
self._log.info('"%s" does not exist, so it will be added.', key)
if isinstance(value, str):
self._log.info('"%s" will be added as a PHP string value.',
key)
value_str = '\'{}\''.format(value)
else:
self._log.info('"%s" will be added as a PHP object value.',
key)
value_str = str(value).lower()
new = 'define(\'{key}\', {value});'.format(
key=key,
value=value_str)
self._log.info('"%s" will be added as: %s', key, new)
replace_this = '<?php\n'
replace_with = '<?php\n' + new + '\n'
self._content = self._content.replace(replace_this, replace_with)
self._log.info('Content string has been updated.')
return True
if self._get_value_from_match(key=key, match=match) == value:
self._log.info('"%s" is already up-to-date.', key)
return False
self._log.info('"%s" exists and will be updated.', key)
start_index = match.start(1)
end_index = match.end(1)
if isinstance(value, bool):
value = str(value).lower()
self._log.info('"%s" will be updated with boolean value: %s',
key,
value)
else:
self._log.info('"%s" will be updated with string value: %s',
key,
value)
start = self._content[:start_index]
end = self._content[end_index:]
self._content = start + value + end
return True | ['def', 'set', '(', 'self', ',', 'key', ',', 'value', ')', ':', 'match', '=', 'self', '.', '_get_match', '(', 'key', '=', 'key', ')', 'if', 'not', 'match', ':', 'self', '.', '_log', '.', 'info', '(', '\'"%s" does not exist, so it will be added.\'', ',', 'key', ')', 'if', 'isinstance', '(', 'value', ',', 'str', ')', ':', 'self', '.', '_log', '.', 'info', '(', '\'"%s" will be added as a PHP string value.\'', ',', 'key', ')', 'value_str', '=', "'\\'{}\\''", '.', 'format', '(', 'value', ')', 'else', ':', 'self', '.', '_log', '.', 'info', '(', '\'"%s" will be added as a PHP object value.\'', ',', 'key', ')', 'value_str', '=', 'str', '(', 'value', ')', '.', 'lower', '(', ')', 'new', '=', "'define(\\'{key}\\', {value});'", '.', 'format', '(', 'key', '=', 'key', ',', 'value', '=', 'value_str', ')', 'self', '.', '_log', '.', 'info', '(', '\'"%s" will be added as: %s\'', ',', 'key', ',', 'new', ')', 'replace_this', '=', "'<?php\\n'", 'replace_with', '=', "'<?php\\n'", '+', 'new', '+', "'\\n'", 'self', '.', '_content', '=', 'self', '.', '_content', '.', 'replace', '(', 'replace_this', ',', 'replace_with', ')', 'self', '.', '_log', '.', 'info', '(', "'Content string has been updated.'", ')', 'return', 'True', 'if', 'self', '.', '_get_value_from_match', '(', 'key', '=', 'key', ',', 'match', '=', 'match', ')', '==', 'value', ':', 'self', '.', '_log', '.', 'info', '(', '\'"%s" is already up-to-date.\'', ',', 'key', ')', 'return', 'False', 'self', '.', '_log', '.', 'info', '(', '\'"%s" exists and will be updated.\'', ',', 'key', ')', 'start_index', '=', 'match', '.', 'start', '(', '1', ')', 'end_index', '=', 'match', '.', 'end', '(', '1', ')', 'if', 'isinstance', '(', 'value', ',', 'bool', ')', ':', 'value', '=', 'str', '(', 'value', ')', '.', 'lower', '(', ')', 'self', '.', '_log', '.', 'info', '(', '\'"%s" will be updated with boolean value: %s\'', ',', 'key', ',', 'value', ')', 'else', ':', 'self', '.', '_log', '.', 'info', '(', '\'"%s" will be updated with string value: %s\'', ',', 'key', ',', 'value', ')', 'start', '=', 'self', '.', '_content', '[', ':', 'start_index', ']', 'end', '=', 'self', '.', '_content', '[', 'end_index', ':', ']', 'self', '.', '_content', '=', 'start', '+', 'value', '+', 'end', 'return', 'True'] | Updates the value of the given key in the loaded content.
Args:
key (str): Key of the property to update.
value (str): New value of the property.
Return:
bool: Indicates whether or not a change was made. | ['Updates', 'the', 'value', 'of', 'the', 'given', 'key', 'in', 'the', 'loaded', 'content', '.'] | train | https://github.com/cariad/py-wpconfigr/blob/8f25bb849b72ce95957566544a2be8445316c818/wpconfigr/wp_config_string.py#L143-L204 |
3,113 | census-instrumentation/opencensus-python | opencensus/metrics/export/gauge.py | DerivedGauge.create_time_series | def create_time_series(self, label_values, func):
"""Create a derived measurement to trac `func`.
:type label_values: list(:class:`LabelValue`)
:param label_values: The measurement's label values.
:type func: function
:param func: The function to track.
:rtype: :class:`DerivedGaugePoint`
:return: A read-only measurement that tracks `func`.
"""
if label_values is None:
raise ValueError
if any(lv is None for lv in label_values):
raise ValueError
if len(label_values) != self._len_label_keys:
raise ValueError
if func is None:
raise ValueError
return self._create_time_series(label_values, func) | python | def create_time_series(self, label_values, func):
"""Create a derived measurement to trac `func`.
:type label_values: list(:class:`LabelValue`)
:param label_values: The measurement's label values.
:type func: function
:param func: The function to track.
:rtype: :class:`DerivedGaugePoint`
:return: A read-only measurement that tracks `func`.
"""
if label_values is None:
raise ValueError
if any(lv is None for lv in label_values):
raise ValueError
if len(label_values) != self._len_label_keys:
raise ValueError
if func is None:
raise ValueError
return self._create_time_series(label_values, func) | ['def', 'create_time_series', '(', 'self', ',', 'label_values', ',', 'func', ')', ':', 'if', 'label_values', 'is', 'None', ':', 'raise', 'ValueError', 'if', 'any', '(', 'lv', 'is', 'None', 'for', 'lv', 'in', 'label_values', ')', ':', 'raise', 'ValueError', 'if', 'len', '(', 'label_values', ')', '!=', 'self', '.', '_len_label_keys', ':', 'raise', 'ValueError', 'if', 'func', 'is', 'None', ':', 'raise', 'ValueError', 'return', 'self', '.', '_create_time_series', '(', 'label_values', ',', 'func', ')'] | Create a derived measurement to trac `func`.
:type label_values: list(:class:`LabelValue`)
:param label_values: The measurement's label values.
:type func: function
:param func: The function to track.
:rtype: :class:`DerivedGaugePoint`
:return: A read-only measurement that tracks `func`. | ['Create', 'a', 'derived', 'measurement', 'to', 'trac', 'func', '.'] | train | https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/metrics/export/gauge.py#L412-L432 |
3,114 | pantsbuild/pants | src/python/pants/backend/jvm/subsystems/jar_dependency_management.py | JarDependencyManagement.targets_by_artifact_set | def targets_by_artifact_set(self, targets):
"""Partitions the input targets by the sets of pinned artifacts they are managed by.
:param collections.Iterable targets: the input targets (typically just JarLibrary targets).
:return: a mapping of PinnedJarArtifactSet -> list of targets.
:rtype: dict
"""
sets_to_targets = defaultdict(list)
for target in targets:
sets_to_targets[self.for_target(target)].append(target)
return dict(sets_to_targets) | python | def targets_by_artifact_set(self, targets):
"""Partitions the input targets by the sets of pinned artifacts they are managed by.
:param collections.Iterable targets: the input targets (typically just JarLibrary targets).
:return: a mapping of PinnedJarArtifactSet -> list of targets.
:rtype: dict
"""
sets_to_targets = defaultdict(list)
for target in targets:
sets_to_targets[self.for_target(target)].append(target)
return dict(sets_to_targets) | ['def', 'targets_by_artifact_set', '(', 'self', ',', 'targets', ')', ':', 'sets_to_targets', '=', 'defaultdict', '(', 'list', ')', 'for', 'target', 'in', 'targets', ':', 'sets_to_targets', '[', 'self', '.', 'for_target', '(', 'target', ')', ']', '.', 'append', '(', 'target', ')', 'return', 'dict', '(', 'sets_to_targets', ')'] | Partitions the input targets by the sets of pinned artifacts they are managed by.
:param collections.Iterable targets: the input targets (typically just JarLibrary targets).
:return: a mapping of PinnedJarArtifactSet -> list of targets.
:rtype: dict | ['Partitions', 'the', 'input', 'targets', 'by', 'the', 'sets', 'of', 'pinned', 'artifacts', 'they', 'are', 'managed', 'by', '.'] | train | https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/backend/jvm/subsystems/jar_dependency_management.py#L164-L174 |
3,115 | appointlet/span | span/__init__.py | Span.encompassed_by | def encompassed_by(self, span):
"""
Returns true if the given span encompasses this span.
"""
if isinstance(span, list):
return [sp for sp in span if sp.encompasses(self)]
return span.encompasses(self) | python | def encompassed_by(self, span):
"""
Returns true if the given span encompasses this span.
"""
if isinstance(span, list):
return [sp for sp in span if sp.encompasses(self)]
return span.encompasses(self) | ['def', 'encompassed_by', '(', 'self', ',', 'span', ')', ':', 'if', 'isinstance', '(', 'span', ',', 'list', ')', ':', 'return', '[', 'sp', 'for', 'sp', 'in', 'span', 'if', 'sp', '.', 'encompasses', '(', 'self', ')', ']', 'return', 'span', '.', 'encompasses', '(', 'self', ')'] | Returns true if the given span encompasses this span. | ['Returns', 'true', 'if', 'the', 'given', 'span', 'encompasses', 'this', 'span', '.'] | train | https://github.com/appointlet/span/blob/6d4f2920e45df827890ebe55b1c41b1f3414c0c9/span/__init__.py#L77-L84 |
3,116 | iskandr/fancyimpute | fancyimpute/iterative_imputer.py | is_scalar_nan | def is_scalar_nan(x):
"""Tests if x is NaN
This function is meant to overcome the issue that np.isnan does not allow
non-numerical types as input, and that np.nan is not np.float('nan').
Parameters
----------
x : any type
Returns
-------
boolean
Examples
--------
>>> is_scalar_nan(np.nan)
True
>>> is_scalar_nan(float("nan"))
True
>>> is_scalar_nan(None)
False
>>> is_scalar_nan("")
False
>>> is_scalar_nan([np.nan])
False
"""
# convert from numpy.bool_ to python bool to ensure that testing
# is_scalar_nan(x) is True does not fail.
# Redondant np.floating is needed because numbers can't match np.float32
# in python 2.
return bool(isinstance(x, (numbers.Real, np.floating)) and np.isnan(x)) | python | def is_scalar_nan(x):
"""Tests if x is NaN
This function is meant to overcome the issue that np.isnan does not allow
non-numerical types as input, and that np.nan is not np.float('nan').
Parameters
----------
x : any type
Returns
-------
boolean
Examples
--------
>>> is_scalar_nan(np.nan)
True
>>> is_scalar_nan(float("nan"))
True
>>> is_scalar_nan(None)
False
>>> is_scalar_nan("")
False
>>> is_scalar_nan([np.nan])
False
"""
# convert from numpy.bool_ to python bool to ensure that testing
# is_scalar_nan(x) is True does not fail.
# Redondant np.floating is needed because numbers can't match np.float32
# in python 2.
return bool(isinstance(x, (numbers.Real, np.floating)) and np.isnan(x)) | ['def', 'is_scalar_nan', '(', 'x', ')', ':', '# convert from numpy.bool_ to python bool to ensure that testing', '# is_scalar_nan(x) is True does not fail.', "# Redondant np.floating is needed because numbers can't match np.float32", '# in python 2.', 'return', 'bool', '(', 'isinstance', '(', 'x', ',', '(', 'numbers', '.', 'Real', ',', 'np', '.', 'floating', ')', ')', 'and', 'np', '.', 'isnan', '(', 'x', ')', ')'] | Tests if x is NaN
This function is meant to overcome the issue that np.isnan does not allow
non-numerical types as input, and that np.nan is not np.float('nan').
Parameters
----------
x : any type
Returns
-------
boolean
Examples
--------
>>> is_scalar_nan(np.nan)
True
>>> is_scalar_nan(float("nan"))
True
>>> is_scalar_nan(None)
False
>>> is_scalar_nan("")
False
>>> is_scalar_nan([np.nan])
False | ['Tests', 'if', 'x', 'is', 'NaN', 'This', 'function', 'is', 'meant', 'to', 'overcome', 'the', 'issue', 'that', 'np', '.', 'isnan', 'does', 'not', 'allow', 'non', '-', 'numerical', 'types', 'as', 'input', 'and', 'that', 'np', '.', 'nan', 'is', 'not', 'np', '.', 'float', '(', 'nan', ')', '.', 'Parameters', '----------', 'x', ':', 'any', 'type', 'Returns', '-------', 'boolean', 'Examples', '--------', '>>>', 'is_scalar_nan', '(', 'np', '.', 'nan', ')', 'True', '>>>', 'is_scalar_nan', '(', 'float', '(', 'nan', '))', 'True', '>>>', 'is_scalar_nan', '(', 'None', ')', 'False', '>>>', 'is_scalar_nan', '(', ')', 'False', '>>>', 'is_scalar_nan', '(', '[', 'np', '.', 'nan', ']', ')', 'False'] | train | https://github.com/iskandr/fancyimpute/blob/9f0837d387c7303d5c8c925a9989ca77a1a96e3e/fancyimpute/iterative_imputer.py#L59-L87 |
3,117 | noahbenson/neuropythy | neuropythy/util/core.py | dataframe_select | def dataframe_select(df, *cols, **filters):
'''
dataframe_select(df, k1=v1, k2=v2...) yields df after selecting all the columns in which the
given keys (k1, k2, etc.) have been selected such that the associated columns in the dataframe
contain only the rows whose cells match the given values.
dataframe_select(df, col1, col2...) selects the given columns.
dataframe_select(df, col1, col2..., k1=v1, k2=v2...) selects both.
If a value is a tuple/list of 2 elements, then it is considered a range where cells must fall
between the values. If value is a tuple/list of more than 2 elements or is a set of any length
then it is a list of values, any one of which can match the cell.
'''
ii = np.ones(len(df), dtype='bool')
for (k,v) in six.iteritems(filters):
vals = df[k].values
if pimms.is_set(v): jj = np.isin(vals, list(v))
elif pimms.is_vector(v) and len(v) == 2: jj = (v[0] <= vals) & (vals < v[1])
elif pimms.is_vector(v): jj = np.isin(vals, list(v))
else: jj = (vals == v)
ii = np.logical_and(ii, jj)
if len(ii) != np.sum(ii): df = df.loc[ii]
if len(cols) > 0: df = df[list(cols)]
return df | python | def dataframe_select(df, *cols, **filters):
'''
dataframe_select(df, k1=v1, k2=v2...) yields df after selecting all the columns in which the
given keys (k1, k2, etc.) have been selected such that the associated columns in the dataframe
contain only the rows whose cells match the given values.
dataframe_select(df, col1, col2...) selects the given columns.
dataframe_select(df, col1, col2..., k1=v1, k2=v2...) selects both.
If a value is a tuple/list of 2 elements, then it is considered a range where cells must fall
between the values. If value is a tuple/list of more than 2 elements or is a set of any length
then it is a list of values, any one of which can match the cell.
'''
ii = np.ones(len(df), dtype='bool')
for (k,v) in six.iteritems(filters):
vals = df[k].values
if pimms.is_set(v): jj = np.isin(vals, list(v))
elif pimms.is_vector(v) and len(v) == 2: jj = (v[0] <= vals) & (vals < v[1])
elif pimms.is_vector(v): jj = np.isin(vals, list(v))
else: jj = (vals == v)
ii = np.logical_and(ii, jj)
if len(ii) != np.sum(ii): df = df.loc[ii]
if len(cols) > 0: df = df[list(cols)]
return df | ['def', 'dataframe_select', '(', 'df', ',', '*', 'cols', ',', '*', '*', 'filters', ')', ':', 'ii', '=', 'np', '.', 'ones', '(', 'len', '(', 'df', ')', ',', 'dtype', '=', "'bool'", ')', 'for', '(', 'k', ',', 'v', ')', 'in', 'six', '.', 'iteritems', '(', 'filters', ')', ':', 'vals', '=', 'df', '[', 'k', ']', '.', 'values', 'if', 'pimms', '.', 'is_set', '(', 'v', ')', ':', 'jj', '=', 'np', '.', 'isin', '(', 'vals', ',', 'list', '(', 'v', ')', ')', 'elif', 'pimms', '.', 'is_vector', '(', 'v', ')', 'and', 'len', '(', 'v', ')', '==', '2', ':', 'jj', '=', '(', 'v', '[', '0', ']', '<=', 'vals', ')', '&', '(', 'vals', '<', 'v', '[', '1', ']', ')', 'elif', 'pimms', '.', 'is_vector', '(', 'v', ')', ':', 'jj', '=', 'np', '.', 'isin', '(', 'vals', ',', 'list', '(', 'v', ')', ')', 'else', ':', 'jj', '=', '(', 'vals', '==', 'v', ')', 'ii', '=', 'np', '.', 'logical_and', '(', 'ii', ',', 'jj', ')', 'if', 'len', '(', 'ii', ')', '!=', 'np', '.', 'sum', '(', 'ii', ')', ':', 'df', '=', 'df', '.', 'loc', '[', 'ii', ']', 'if', 'len', '(', 'cols', ')', '>', '0', ':', 'df', '=', 'df', '[', 'list', '(', 'cols', ')', ']', 'return', 'df'] | dataframe_select(df, k1=v1, k2=v2...) yields df after selecting all the columns in which the
given keys (k1, k2, etc.) have been selected such that the associated columns in the dataframe
contain only the rows whose cells match the given values.
dataframe_select(df, col1, col2...) selects the given columns.
dataframe_select(df, col1, col2..., k1=v1, k2=v2...) selects both.
If a value is a tuple/list of 2 elements, then it is considered a range where cells must fall
between the values. If value is a tuple/list of more than 2 elements or is a set of any length
then it is a list of values, any one of which can match the cell. | ['dataframe_select', '(', 'df', 'k1', '=', 'v1', 'k2', '=', 'v2', '...', ')', 'yields', 'df', 'after', 'selecting', 'all', 'the', 'columns', 'in', 'which', 'the', 'given', 'keys', '(', 'k1', 'k2', 'etc', '.', ')', 'have', 'been', 'selected', 'such', 'that', 'the', 'associated', 'columns', 'in', 'the', 'dataframe', 'contain', 'only', 'the', 'rows', 'whose', 'cells', 'match', 'the', 'given', 'values', '.', 'dataframe_select', '(', 'df', 'col1', 'col2', '...', ')', 'selects', 'the', 'given', 'columns', '.', 'dataframe_select', '(', 'df', 'col1', 'col2', '...', 'k1', '=', 'v1', 'k2', '=', 'v2', '...', ')', 'selects', 'both', '.', 'If', 'a', 'value', 'is', 'a', 'tuple', '/', 'list', 'of', '2', 'elements', 'then', 'it', 'is', 'considered', 'a', 'range', 'where', 'cells', 'must', 'fall', 'between', 'the', 'values', '.', 'If', 'value', 'is', 'a', 'tuple', '/', 'list', 'of', 'more', 'than', '2', 'elements', 'or', 'is', 'a', 'set', 'of', 'any', 'length', 'then', 'it', 'is', 'a', 'list', 'of', 'values', 'any', 'one', 'of', 'which', 'can', 'match', 'the', 'cell', '.'] | train | https://github.com/noahbenson/neuropythy/blob/b588889f6db36ddb9602ae4a72c1c0d3f41586b2/neuropythy/util/core.py#L296-L318 |
3,118 | nickjj/flask-webpack | flask_webpack/__init__.py | Webpack.javascript_tag | def javascript_tag(self, *args):
"""
Convenience tag to output 1 or more javascript tags.
:param args: 1 or more javascript file names
:return: Script tag(s) containing the asset
"""
tags = []
for arg in args:
asset_path = self.asset_url_for('{0}.js'.format(arg))
if asset_path:
tags.append('<script src="{0}"></script>'.format(asset_path))
return '\n'.join(tags) | python | def javascript_tag(self, *args):
"""
Convenience tag to output 1 or more javascript tags.
:param args: 1 or more javascript file names
:return: Script tag(s) containing the asset
"""
tags = []
for arg in args:
asset_path = self.asset_url_for('{0}.js'.format(arg))
if asset_path:
tags.append('<script src="{0}"></script>'.format(asset_path))
return '\n'.join(tags) | ['def', 'javascript_tag', '(', 'self', ',', '*', 'args', ')', ':', 'tags', '=', '[', ']', 'for', 'arg', 'in', 'args', ':', 'asset_path', '=', 'self', '.', 'asset_url_for', '(', "'{0}.js'", '.', 'format', '(', 'arg', ')', ')', 'if', 'asset_path', ':', 'tags', '.', 'append', '(', '\'<script src="{0}"></script>\'', '.', 'format', '(', 'asset_path', ')', ')', 'return', "'\\n'", '.', 'join', '(', 'tags', ')'] | Convenience tag to output 1 or more javascript tags.
:param args: 1 or more javascript file names
:return: Script tag(s) containing the asset | ['Convenience', 'tag', 'to', 'output', '1', 'or', 'more', 'javascript', 'tags', '.'] | train | https://github.com/nickjj/flask-webpack/blob/241617c6ce0fd9ec11f507204958ddd0ec467634/flask_webpack/__init__.py#L81-L95 |
3,119 | pjamesjoyce/lcopt | lcopt/settings.py | LcoptSettings.write | def write(self):
"""write the current settings to the config file"""
with open(storage.config_file, 'w') as cfg:
yaml.dump(self.as_dict(), cfg, default_flow_style=False)
storage.refresh() | python | def write(self):
"""write the current settings to the config file"""
with open(storage.config_file, 'w') as cfg:
yaml.dump(self.as_dict(), cfg, default_flow_style=False)
storage.refresh() | ['def', 'write', '(', 'self', ')', ':', 'with', 'open', '(', 'storage', '.', 'config_file', ',', "'w'", ')', 'as', 'cfg', ':', 'yaml', '.', 'dump', '(', 'self', '.', 'as_dict', '(', ')', ',', 'cfg', ',', 'default_flow_style', '=', 'False', ')', 'storage', '.', 'refresh', '(', ')'] | write the current settings to the config file | ['write', 'the', 'current', 'settings', 'to', 'the', 'config', 'file'] | train | https://github.com/pjamesjoyce/lcopt/blob/3f1caca31fece4a3068a384900707e6d21d04597/lcopt/settings.py#L98-L103 |
3,120 | Kozea/cairocffi | cairocffi/context.py | Context.get_source | def get_source(self):
"""Return this context’s source.
:returns:
An instance of :class:`Pattern` or one of its sub-classes,
a new Python object referencing the existing cairo pattern.
"""
return Pattern._from_pointer(
cairo.cairo_get_source(self._pointer), incref=True) | python | def get_source(self):
"""Return this context’s source.
:returns:
An instance of :class:`Pattern` or one of its sub-classes,
a new Python object referencing the existing cairo pattern.
"""
return Pattern._from_pointer(
cairo.cairo_get_source(self._pointer), incref=True) | ['def', 'get_source', '(', 'self', ')', ':', 'return', 'Pattern', '.', '_from_pointer', '(', 'cairo', '.', 'cairo_get_source', '(', 'self', '.', '_pointer', ')', ',', 'incref', '=', 'True', ')'] | Return this context’s source.
:returns:
An instance of :class:`Pattern` or one of its sub-classes,
a new Python object referencing the existing cairo pattern. | ['Return', 'this', 'context’s', 'source', '.'] | train | https://github.com/Kozea/cairocffi/blob/450853add7e32eea20985b6aa5f54d9cb3cd04fe/cairocffi/context.py#L395-L404 |
3,121 | apple/turicreate | src/unity/python/turicreate/data_structures/sframe.py | SFrame.select_columns | def select_columns(self, column_names):
"""
Selects all columns where the name of the column or the type of column
is included in the column_names. An exception is raised if duplicate columns
are selected i.e. sf.select_columns(['a','a']), or non-existent columns
are selected.
Throws an exception for all other input types.
Parameters
----------
column_names: list[str or type]
The list of column names or a list of types.
Returns
-------
out : SFrame
A new SFrame that is made up of the columns referred to in
``column_names`` from the current SFrame.
See Also
--------
select_column
Examples
--------
>>> sf = turicreate.SFrame({'user_id': [1,2,3],
... 'user_name': ['alice', 'bob', 'charlie'],
... 'zipcode': [98101, 98102, 98103]
... })
>>> # This line is equivalent to `sf2 = sf[['user_id', 'zipcode']]`
>>> sf2 = sf.select_columns(['user_id', 'zipcode'])
>>> sf2
+---------+---------+
| user_id | zipcode |
+---------+---------+
| 1 | 98101 |
| 2 | 98102 |
| 3 | 98103 |
+---------+---------+
[3 rows x 2 columns]
"""
if not _is_non_string_iterable(column_names):
raise TypeError("column_names must be an iterable")
if not (all([isinstance(x, six.string_types) or isinstance(x, type) or isinstance(x, bytes)
for x in column_names])):
raise TypeError("Invalid key type: must be str, unicode, bytes or type")
requested_str_columns = [s for s in column_names if isinstance(s, six.string_types)]
# Make sure there are no duplicates keys
from collections import Counter
column_names_counter = Counter(column_names)
if (len(column_names)) != len(column_names_counter):
for key in column_names_counter:
if column_names_counter[key] > 1:
raise ValueError("There are duplicate keys in key list: '" + key + "'")
colnames_and_types = list(zip(self.column_names(), self.column_types()))
# Ok. we want the string columns to be in the ordering defined by the
# argument. And then all the type selection columns.
selected_columns = requested_str_columns
typelist = [s for s in column_names if isinstance(s, type)]
# next the type selection columns
# loop through all the columns, adding all columns with types in
# typelist. But don't add a column if it has already been added.
for i in colnames_and_types:
if i[1] in typelist and i[0] not in selected_columns:
selected_columns += [i[0]]
selected_columns = selected_columns
with cython_context():
return SFrame(data=[], _proxy=self.__proxy__.select_columns(selected_columns)) | python | def select_columns(self, column_names):
"""
Selects all columns where the name of the column or the type of column
is included in the column_names. An exception is raised if duplicate columns
are selected i.e. sf.select_columns(['a','a']), or non-existent columns
are selected.
Throws an exception for all other input types.
Parameters
----------
column_names: list[str or type]
The list of column names or a list of types.
Returns
-------
out : SFrame
A new SFrame that is made up of the columns referred to in
``column_names`` from the current SFrame.
See Also
--------
select_column
Examples
--------
>>> sf = turicreate.SFrame({'user_id': [1,2,3],
... 'user_name': ['alice', 'bob', 'charlie'],
... 'zipcode': [98101, 98102, 98103]
... })
>>> # This line is equivalent to `sf2 = sf[['user_id', 'zipcode']]`
>>> sf2 = sf.select_columns(['user_id', 'zipcode'])
>>> sf2
+---------+---------+
| user_id | zipcode |
+---------+---------+
| 1 | 98101 |
| 2 | 98102 |
| 3 | 98103 |
+---------+---------+
[3 rows x 2 columns]
"""
if not _is_non_string_iterable(column_names):
raise TypeError("column_names must be an iterable")
if not (all([isinstance(x, six.string_types) or isinstance(x, type) or isinstance(x, bytes)
for x in column_names])):
raise TypeError("Invalid key type: must be str, unicode, bytes or type")
requested_str_columns = [s for s in column_names if isinstance(s, six.string_types)]
# Make sure there are no duplicates keys
from collections import Counter
column_names_counter = Counter(column_names)
if (len(column_names)) != len(column_names_counter):
for key in column_names_counter:
if column_names_counter[key] > 1:
raise ValueError("There are duplicate keys in key list: '" + key + "'")
colnames_and_types = list(zip(self.column_names(), self.column_types()))
# Ok. we want the string columns to be in the ordering defined by the
# argument. And then all the type selection columns.
selected_columns = requested_str_columns
typelist = [s for s in column_names if isinstance(s, type)]
# next the type selection columns
# loop through all the columns, adding all columns with types in
# typelist. But don't add a column if it has already been added.
for i in colnames_and_types:
if i[1] in typelist and i[0] not in selected_columns:
selected_columns += [i[0]]
selected_columns = selected_columns
with cython_context():
return SFrame(data=[], _proxy=self.__proxy__.select_columns(selected_columns)) | ['def', 'select_columns', '(', 'self', ',', 'column_names', ')', ':', 'if', 'not', '_is_non_string_iterable', '(', 'column_names', ')', ':', 'raise', 'TypeError', '(', '"column_names must be an iterable"', ')', 'if', 'not', '(', 'all', '(', '[', 'isinstance', '(', 'x', ',', 'six', '.', 'string_types', ')', 'or', 'isinstance', '(', 'x', ',', 'type', ')', 'or', 'isinstance', '(', 'x', ',', 'bytes', ')', 'for', 'x', 'in', 'column_names', ']', ')', ')', ':', 'raise', 'TypeError', '(', '"Invalid key type: must be str, unicode, bytes or type"', ')', 'requested_str_columns', '=', '[', 's', 'for', 's', 'in', 'column_names', 'if', 'isinstance', '(', 's', ',', 'six', '.', 'string_types', ')', ']', '# Make sure there are no duplicates keys', 'from', 'collections', 'import', 'Counter', 'column_names_counter', '=', 'Counter', '(', 'column_names', ')', 'if', '(', 'len', '(', 'column_names', ')', ')', '!=', 'len', '(', 'column_names_counter', ')', ':', 'for', 'key', 'in', 'column_names_counter', ':', 'if', 'column_names_counter', '[', 'key', ']', '>', '1', ':', 'raise', 'ValueError', '(', '"There are duplicate keys in key list: \'"', '+', 'key', '+', '"\'"', ')', 'colnames_and_types', '=', 'list', '(', 'zip', '(', 'self', '.', 'column_names', '(', ')', ',', 'self', '.', 'column_types', '(', ')', ')', ')', '# Ok. we want the string columns to be in the ordering defined by the', '# argument. And then all the type selection columns.', 'selected_columns', '=', 'requested_str_columns', 'typelist', '=', '[', 's', 'for', 's', 'in', 'column_names', 'if', 'isinstance', '(', 's', ',', 'type', ')', ']', '# next the type selection columns', '# loop through all the columns, adding all columns with types in', "# typelist. But don't add a column if it has already been added.", 'for', 'i', 'in', 'colnames_and_types', ':', 'if', 'i', '[', '1', ']', 'in', 'typelist', 'and', 'i', '[', '0', ']', 'not', 'in', 'selected_columns', ':', 'selected_columns', '+=', '[', 'i', '[', '0', ']', ']', 'selected_columns', '=', 'selected_columns', 'with', 'cython_context', '(', ')', ':', 'return', 'SFrame', '(', 'data', '=', '[', ']', ',', '_proxy', '=', 'self', '.', '__proxy__', '.', 'select_columns', '(', 'selected_columns', ')', ')'] | Selects all columns where the name of the column or the type of column
is included in the column_names. An exception is raised if duplicate columns
are selected i.e. sf.select_columns(['a','a']), or non-existent columns
are selected.
Throws an exception for all other input types.
Parameters
----------
column_names: list[str or type]
The list of column names or a list of types.
Returns
-------
out : SFrame
A new SFrame that is made up of the columns referred to in
``column_names`` from the current SFrame.
See Also
--------
select_column
Examples
--------
>>> sf = turicreate.SFrame({'user_id': [1,2,3],
... 'user_name': ['alice', 'bob', 'charlie'],
... 'zipcode': [98101, 98102, 98103]
... })
>>> # This line is equivalent to `sf2 = sf[['user_id', 'zipcode']]`
>>> sf2 = sf.select_columns(['user_id', 'zipcode'])
>>> sf2
+---------+---------+
| user_id | zipcode |
+---------+---------+
| 1 | 98101 |
| 2 | 98102 |
| 3 | 98103 |
+---------+---------+
[3 rows x 2 columns] | ['Selects', 'all', 'columns', 'where', 'the', 'name', 'of', 'the', 'column', 'or', 'the', 'type', 'of', 'column', 'is', 'included', 'in', 'the', 'column_names', '.', 'An', 'exception', 'is', 'raised', 'if', 'duplicate', 'columns', 'are', 'selected', 'i', '.', 'e', '.', 'sf', '.', 'select_columns', '(', '[', 'a', 'a', ']', ')', 'or', 'non', '-', 'existent', 'columns', 'are', 'selected', '.'] | train | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/data_structures/sframe.py#L3062-L3137 |
3,122 | Spinmob/spinmob | _data.py | fitter.set_guess_to_fit_result | def set_guess_to_fit_result(self):
"""
If you have a fit result, set the guess parameters to the
fit parameters.
"""
if self.results is None:
print("No fit results to use! Run fit() first.")
return
# loop over the results and set the guess values
for n in range(len(self._pguess)): self._pguess[n] = self.results[0][n]
if self['autoplot']: self.plot()
return self | python | def set_guess_to_fit_result(self):
"""
If you have a fit result, set the guess parameters to the
fit parameters.
"""
if self.results is None:
print("No fit results to use! Run fit() first.")
return
# loop over the results and set the guess values
for n in range(len(self._pguess)): self._pguess[n] = self.results[0][n]
if self['autoplot']: self.plot()
return self | ['def', 'set_guess_to_fit_result', '(', 'self', ')', ':', 'if', 'self', '.', 'results', 'is', 'None', ':', 'print', '(', '"No fit results to use! Run fit() first."', ')', 'return', '# loop over the results and set the guess values', 'for', 'n', 'in', 'range', '(', 'len', '(', 'self', '.', '_pguess', ')', ')', ':', 'self', '.', '_pguess', '[', 'n', ']', '=', 'self', '.', 'results', '[', '0', ']', '[', 'n', ']', 'if', 'self', '[', "'autoplot'", ']', ':', 'self', '.', 'plot', '(', ')', 'return', 'self'] | If you have a fit result, set the guess parameters to the
fit parameters. | ['If', 'you', 'have', 'a', 'fit', 'result', 'set', 'the', 'guess', 'parameters', 'to', 'the', 'fit', 'parameters', '.'] | train | https://github.com/Spinmob/spinmob/blob/f037f5df07f194bcd4a01f4d9916e57b9e8fb45a/_data.py#L2080-L2094 |
3,123 | sassoo/goldman | goldman/middleware/bearer_token/__init__.py | Middleware._get_token | def _get_token(self, req):
""" Get the token from the Authorization header
If the header is actually malformed where Bearer Auth was
indicated by the request then an InvalidAuthSyntax exception
is raised. Otherwise an AuthRequired exception since it's
unclear in this scenario if the requestor was even aware
Authentication was required & if so which "scheme".
Calls _validate_auth_scheme first & bubbles up it's
exceptions.
:return:
string token
:raise:
AuthRequired, InvalidAuthSyntax
"""
self._validate_auth_scheme(req)
try:
return naked(req.auth.split(' ')[1])
except IndexError:
desc = 'You are using the Bearer Authentication scheme as ' \
'required to login but your Authorization header is ' \
'completely missing the access_token.'
raise InvalidAuthSyntax(**{
'detail': desc,
'headers': self._get_invalid_token_headers(desc),
'links': 'tools.ietf.org/html/rfc6750#section-2.1',
}) | python | def _get_token(self, req):
""" Get the token from the Authorization header
If the header is actually malformed where Bearer Auth was
indicated by the request then an InvalidAuthSyntax exception
is raised. Otherwise an AuthRequired exception since it's
unclear in this scenario if the requestor was even aware
Authentication was required & if so which "scheme".
Calls _validate_auth_scheme first & bubbles up it's
exceptions.
:return:
string token
:raise:
AuthRequired, InvalidAuthSyntax
"""
self._validate_auth_scheme(req)
try:
return naked(req.auth.split(' ')[1])
except IndexError:
desc = 'You are using the Bearer Authentication scheme as ' \
'required to login but your Authorization header is ' \
'completely missing the access_token.'
raise InvalidAuthSyntax(**{
'detail': desc,
'headers': self._get_invalid_token_headers(desc),
'links': 'tools.ietf.org/html/rfc6750#section-2.1',
}) | ['def', '_get_token', '(', 'self', ',', 'req', ')', ':', 'self', '.', '_validate_auth_scheme', '(', 'req', ')', 'try', ':', 'return', 'naked', '(', 'req', '.', 'auth', '.', 'split', '(', "' '", ')', '[', '1', ']', ')', 'except', 'IndexError', ':', 'desc', '=', "'You are using the Bearer Authentication scheme as '", "'required to login but your Authorization header is '", "'completely missing the access_token.'", 'raise', 'InvalidAuthSyntax', '(', '*', '*', '{', "'detail'", ':', 'desc', ',', "'headers'", ':', 'self', '.', '_get_invalid_token_headers', '(', 'desc', ')', ',', "'links'", ':', "'tools.ietf.org/html/rfc6750#section-2.1'", ',', '}', ')'] | Get the token from the Authorization header
If the header is actually malformed where Bearer Auth was
indicated by the request then an InvalidAuthSyntax exception
is raised. Otherwise an AuthRequired exception since it's
unclear in this scenario if the requestor was even aware
Authentication was required & if so which "scheme".
Calls _validate_auth_scheme first & bubbles up it's
exceptions.
:return:
string token
:raise:
AuthRequired, InvalidAuthSyntax | ['Get', 'the', 'token', 'from', 'the', 'Authorization', 'header'] | train | https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/middleware/bearer_token/__init__.py#L123-L154 |
3,124 | neuropsychology/NeuroKit.py | neurokit/statistics/statistics.py | find_following_duplicates | def find_following_duplicates(array):
"""
Find the duplicates that are following themselves.
Parameters
----------
array : list or ndarray
A list containing duplicates.
Returns
----------
uniques : list
A list containing True for each unique and False for following duplicates.
Example
----------
>>> import neurokit as nk
>>> mylist = ["a","a","b","a","a","a","c","c","b","b"]
>>> uniques = nk.find_following_duplicates(mylist)
>>> indices = np.where(uniques) # Find indices of uniques
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- numpy
"""
array = array[:]
uniques = []
for i in range(len(array)):
if i == 0:
uniques.append(True)
else:
if array[i] == array[i-1]:
uniques.append(False)
else:
uniques.append(True)
return(uniques) | python | def find_following_duplicates(array):
"""
Find the duplicates that are following themselves.
Parameters
----------
array : list or ndarray
A list containing duplicates.
Returns
----------
uniques : list
A list containing True for each unique and False for following duplicates.
Example
----------
>>> import neurokit as nk
>>> mylist = ["a","a","b","a","a","a","c","c","b","b"]
>>> uniques = nk.find_following_duplicates(mylist)
>>> indices = np.where(uniques) # Find indices of uniques
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- numpy
"""
array = array[:]
uniques = []
for i in range(len(array)):
if i == 0:
uniques.append(True)
else:
if array[i] == array[i-1]:
uniques.append(False)
else:
uniques.append(True)
return(uniques) | ['def', 'find_following_duplicates', '(', 'array', ')', ':', 'array', '=', 'array', '[', ':', ']', 'uniques', '=', '[', ']', 'for', 'i', 'in', 'range', '(', 'len', '(', 'array', ')', ')', ':', 'if', 'i', '==', '0', ':', 'uniques', '.', 'append', '(', 'True', ')', 'else', ':', 'if', 'array', '[', 'i', ']', '==', 'array', '[', 'i', '-', '1', ']', ':', 'uniques', '.', 'append', '(', 'False', ')', 'else', ':', 'uniques', '.', 'append', '(', 'True', ')', 'return', '(', 'uniques', ')'] | Find the duplicates that are following themselves.
Parameters
----------
array : list or ndarray
A list containing duplicates.
Returns
----------
uniques : list
A list containing True for each unique and False for following duplicates.
Example
----------
>>> import neurokit as nk
>>> mylist = ["a","a","b","a","a","a","c","c","b","b"]
>>> uniques = nk.find_following_duplicates(mylist)
>>> indices = np.where(uniques) # Find indices of uniques
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- numpy | ['Find', 'the', 'duplicates', 'that', 'are', 'following', 'themselves', '.'] | train | https://github.com/neuropsychology/NeuroKit.py/blob/c9589348fbbde0fa7e986048c48f38e6b488adfe/neurokit/statistics/statistics.py#L219-L263 |
3,125 | ocaballeror/LyricFetch | lyricfetch/scraping.py | musixmatch | def musixmatch(song):
"""
Returns the lyrics found in musixmatch for the specified mp3 file or an
empty string if not found.
"""
escape = re.sub("'-¡¿", '', URLESCAPE)
translate = {
escape: '',
' ': '-'
}
artist = song.artist.title()
artist = re.sub(r"( '|' )", '', artist)
artist = re.sub(r"'", '-', artist)
title = song.title
title = re.sub(r"( '|' )", '', title)
title = re.sub(r"'", '-', title)
artist = normalize(artist, translate)
artist = re.sub(r'\-{2,}', '-', artist)
title = normalize(title, translate)
title = re.sub(r'\-{2,}', '-', title)
url = 'https://www.musixmatch.com/lyrics/{}/{}'.format(artist, title)
soup = get_url(url)
text = ''
contents = soup.find_all('p', class_='mxm-lyrics__content')
for p in contents:
text += p.get_text().strip()
if p != contents[-1]:
text += '\n\n'
return text.strip() | python | def musixmatch(song):
"""
Returns the lyrics found in musixmatch for the specified mp3 file or an
empty string if not found.
"""
escape = re.sub("'-¡¿", '', URLESCAPE)
translate = {
escape: '',
' ': '-'
}
artist = song.artist.title()
artist = re.sub(r"( '|' )", '', artist)
artist = re.sub(r"'", '-', artist)
title = song.title
title = re.sub(r"( '|' )", '', title)
title = re.sub(r"'", '-', title)
artist = normalize(artist, translate)
artist = re.sub(r'\-{2,}', '-', artist)
title = normalize(title, translate)
title = re.sub(r'\-{2,}', '-', title)
url = 'https://www.musixmatch.com/lyrics/{}/{}'.format(artist, title)
soup = get_url(url)
text = ''
contents = soup.find_all('p', class_='mxm-lyrics__content')
for p in contents:
text += p.get_text().strip()
if p != contents[-1]:
text += '\n\n'
return text.strip() | ['def', 'musixmatch', '(', 'song', ')', ':', 'escape', '=', 're', '.', 'sub', '(', '"\'-¡¿", ', "'", ', ', 'U', 'LESCAPE)', '', 'translate', '=', '{', 'escape', ':', "''", ',', "' '", ':', "'-'", '}', 'artist', '=', 'song', '.', 'artist', '.', 'title', '(', ')', 'artist', '=', 're', '.', 'sub', '(', 'r"( \'|\' )"', ',', "''", ',', 'artist', ')', 'artist', '=', 're', '.', 'sub', '(', 'r"\'"', ',', "'-'", ',', 'artist', ')', 'title', '=', 'song', '.', 'title', 'title', '=', 're', '.', 'sub', '(', 'r"( \'|\' )"', ',', "''", ',', 'title', ')', 'title', '=', 're', '.', 'sub', '(', 'r"\'"', ',', "'-'", ',', 'title', ')', 'artist', '=', 'normalize', '(', 'artist', ',', 'translate', ')', 'artist', '=', 're', '.', 'sub', '(', "r'\\-{2,}'", ',', "'-'", ',', 'artist', ')', 'title', '=', 'normalize', '(', 'title', ',', 'translate', ')', 'title', '=', 're', '.', 'sub', '(', "r'\\-{2,}'", ',', "'-'", ',', 'title', ')', 'url', '=', "'https://www.musixmatch.com/lyrics/{}/{}'", '.', 'format', '(', 'artist', ',', 'title', ')', 'soup', '=', 'get_url', '(', 'url', ')', 'text', '=', "''", 'contents', '=', 'soup', '.', 'find_all', '(', "'p'", ',', 'class_', '=', "'mxm-lyrics__content'", ')', 'for', 'p', 'in', 'contents', ':', 'text', '+=', 'p', '.', 'get_text', '(', ')', '.', 'strip', '(', ')', 'if', 'p', '!=', 'contents', '[', '-', '1', ']', ':', 'text', '+=', "'\\n\\n'", 'return', 'text', '.', 'strip', '(', ')'] | Returns the lyrics found in musixmatch for the specified mp3 file or an
empty string if not found. | ['Returns', 'the', 'lyrics', 'found', 'in', 'musixmatch', 'for', 'the', 'specified', 'mp3', 'file', 'or', 'an', 'empty', 'string', 'if', 'not', 'found', '.'] | train | https://github.com/ocaballeror/LyricFetch/blob/86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb/lyricfetch/scraping.py#L283-L314 |
3,126 | Auzzy/1846-routes | routes1846/find_best_routes.py | _filter_invalid_routes | def _filter_invalid_routes(routes, board, railroad):
"""
Given a collection of routes, returns a new set containing only valid routes. Invalid routes removed:
- contain less than 2 cities, or
- go through Chicago using an impassable exit
- only contain Chicago as a station, but don't use the correct exit path
This fltering after the fact keeps the path finding algorithm simpler. It allows groups of 3 cells to be considered
(important for the Chicago checks), which would be tricky, since the algorithm operates on pairs of cells (at the
time of writing).
"""
chicago_space = board.get_space(CHICAGO_CELL)
chicago_neighbor_cells = [cell for cell in CHICAGO_CELL.neighbors.values() if cell != CHICAGO_CONNECTIONS_CELL]
stations = board.stations(railroad.name)
# A sieve style filter. If a condition isn't met, iteration continues to the next item. Items meeting all conditions
# are added to valid_routes at the end of the loop iteration.
valid_routes = set()
for route in routes:
# A route must connect at least 2 cities.
if len(route.cities) < 2:
continue
# A route cannot run from east to east
if isinstance(route.cities[0], EastTerminalCity) and isinstance(route.cities[-1], EastTerminalCity):
continue
# If the route goes through Chicago and isn't [C5, D6], ensure the path it took either contains its station or is unblocked
if route.contains_cell(CHICAGO_CONNECTIONS_CELL) and len(route.cities) != 2:
# Finds the subroute which starts at Chicago and is 3 tiles long. That is, it will go [C5, D6, chicago exit]
all_chicago_subroutes = [subroute for subroute in route.subroutes(CHICAGO_CONNECTIONS_CELL) if len(subroute) == 3]
chicago_subroute = all_chicago_subroutes[0] if all_chicago_subroutes else None
for cell in chicago_neighbor_cells:
chicago_exit = chicago_subroute and chicago_subroute.contains_cell(cell)
if chicago_exit and chicago_space.passable(cell, railroad):
break
else:
continue
# Each route must contain at least 1 station
stations_on_route = [station for station in stations if route.contains_cell(station.cell)]
if not stations_on_route:
continue
# If the only station is Chicago, the path must be [D6, C5], or exit through the appropriate side.
elif [CHICAGO_CELL] == [station.cell for station in stations_on_route]:
exit_cell = board.get_space(CHICAGO_CELL).get_station_exit_cell(stations_on_route[0])
chicago_exit_route = Route.create([chicago_space, board.get_space(exit_cell)])
if not (len(route) == 2 and route.contains_cell(CHICAGO_CONNECTIONS_CELL)) and not route.overlap(chicago_exit_route):
continue
valid_routes.add(route)
return valid_routes | python | def _filter_invalid_routes(routes, board, railroad):
"""
Given a collection of routes, returns a new set containing only valid routes. Invalid routes removed:
- contain less than 2 cities, or
- go through Chicago using an impassable exit
- only contain Chicago as a station, but don't use the correct exit path
This fltering after the fact keeps the path finding algorithm simpler. It allows groups of 3 cells to be considered
(important for the Chicago checks), which would be tricky, since the algorithm operates on pairs of cells (at the
time of writing).
"""
chicago_space = board.get_space(CHICAGO_CELL)
chicago_neighbor_cells = [cell for cell in CHICAGO_CELL.neighbors.values() if cell != CHICAGO_CONNECTIONS_CELL]
stations = board.stations(railroad.name)
# A sieve style filter. If a condition isn't met, iteration continues to the next item. Items meeting all conditions
# are added to valid_routes at the end of the loop iteration.
valid_routes = set()
for route in routes:
# A route must connect at least 2 cities.
if len(route.cities) < 2:
continue
# A route cannot run from east to east
if isinstance(route.cities[0], EastTerminalCity) and isinstance(route.cities[-1], EastTerminalCity):
continue
# If the route goes through Chicago and isn't [C5, D6], ensure the path it took either contains its station or is unblocked
if route.contains_cell(CHICAGO_CONNECTIONS_CELL) and len(route.cities) != 2:
# Finds the subroute which starts at Chicago and is 3 tiles long. That is, it will go [C5, D6, chicago exit]
all_chicago_subroutes = [subroute for subroute in route.subroutes(CHICAGO_CONNECTIONS_CELL) if len(subroute) == 3]
chicago_subroute = all_chicago_subroutes[0] if all_chicago_subroutes else None
for cell in chicago_neighbor_cells:
chicago_exit = chicago_subroute and chicago_subroute.contains_cell(cell)
if chicago_exit and chicago_space.passable(cell, railroad):
break
else:
continue
# Each route must contain at least 1 station
stations_on_route = [station for station in stations if route.contains_cell(station.cell)]
if not stations_on_route:
continue
# If the only station is Chicago, the path must be [D6, C5], or exit through the appropriate side.
elif [CHICAGO_CELL] == [station.cell for station in stations_on_route]:
exit_cell = board.get_space(CHICAGO_CELL).get_station_exit_cell(stations_on_route[0])
chicago_exit_route = Route.create([chicago_space, board.get_space(exit_cell)])
if not (len(route) == 2 and route.contains_cell(CHICAGO_CONNECTIONS_CELL)) and not route.overlap(chicago_exit_route):
continue
valid_routes.add(route)
return valid_routes | ['def', '_filter_invalid_routes', '(', 'routes', ',', 'board', ',', 'railroad', ')', ':', 'chicago_space', '=', 'board', '.', 'get_space', '(', 'CHICAGO_CELL', ')', 'chicago_neighbor_cells', '=', '[', 'cell', 'for', 'cell', 'in', 'CHICAGO_CELL', '.', 'neighbors', '.', 'values', '(', ')', 'if', 'cell', '!=', 'CHICAGO_CONNECTIONS_CELL', ']', 'stations', '=', 'board', '.', 'stations', '(', 'railroad', '.', 'name', ')', "# A sieve style filter. If a condition isn't met, iteration continues to the next item. Items meeting all conditions", '# are added to valid_routes at the end of the loop iteration.', 'valid_routes', '=', 'set', '(', ')', 'for', 'route', 'in', 'routes', ':', '# A route must connect at least 2 cities.', 'if', 'len', '(', 'route', '.', 'cities', ')', '<', '2', ':', 'continue', '# A route cannot run from east to east', 'if', 'isinstance', '(', 'route', '.', 'cities', '[', '0', ']', ',', 'EastTerminalCity', ')', 'and', 'isinstance', '(', 'route', '.', 'cities', '[', '-', '1', ']', ',', 'EastTerminalCity', ')', ':', 'continue', "# If the route goes through Chicago and isn't [C5, D6], ensure the path it took either contains its station or is unblocked", 'if', 'route', '.', 'contains_cell', '(', 'CHICAGO_CONNECTIONS_CELL', ')', 'and', 'len', '(', 'route', '.', 'cities', ')', '!=', '2', ':', '# Finds the subroute which starts at Chicago and is 3 tiles long. That is, it will go [C5, D6, chicago exit]', 'all_chicago_subroutes', '=', '[', 'subroute', 'for', 'subroute', 'in', 'route', '.', 'subroutes', '(', 'CHICAGO_CONNECTIONS_CELL', ')', 'if', 'len', '(', 'subroute', ')', '==', '3', ']', 'chicago_subroute', '=', 'all_chicago_subroutes', '[', '0', ']', 'if', 'all_chicago_subroutes', 'else', 'None', 'for', 'cell', 'in', 'chicago_neighbor_cells', ':', 'chicago_exit', '=', 'chicago_subroute', 'and', 'chicago_subroute', '.', 'contains_cell', '(', 'cell', ')', 'if', 'chicago_exit', 'and', 'chicago_space', '.', 'passable', '(', 'cell', ',', 'railroad', ')', ':', 'break', 'else', ':', 'continue', '# Each route must contain at least 1 station', 'stations_on_route', '=', '[', 'station', 'for', 'station', 'in', 'stations', 'if', 'route', '.', 'contains_cell', '(', 'station', '.', 'cell', ')', ']', 'if', 'not', 'stations_on_route', ':', 'continue', '# If the only station is Chicago, the path must be [D6, C5], or exit through the appropriate side.', 'elif', '[', 'CHICAGO_CELL', ']', '==', '[', 'station', '.', 'cell', 'for', 'station', 'in', 'stations_on_route', ']', ':', 'exit_cell', '=', 'board', '.', 'get_space', '(', 'CHICAGO_CELL', ')', '.', 'get_station_exit_cell', '(', 'stations_on_route', '[', '0', ']', ')', 'chicago_exit_route', '=', 'Route', '.', 'create', '(', '[', 'chicago_space', ',', 'board', '.', 'get_space', '(', 'exit_cell', ')', ']', ')', 'if', 'not', '(', 'len', '(', 'route', ')', '==', '2', 'and', 'route', '.', 'contains_cell', '(', 'CHICAGO_CONNECTIONS_CELL', ')', ')', 'and', 'not', 'route', '.', 'overlap', '(', 'chicago_exit_route', ')', ':', 'continue', 'valid_routes', '.', 'add', '(', 'route', ')', 'return', 'valid_routes'] | Given a collection of routes, returns a new set containing only valid routes. Invalid routes removed:
- contain less than 2 cities, or
- go through Chicago using an impassable exit
- only contain Chicago as a station, but don't use the correct exit path
This fltering after the fact keeps the path finding algorithm simpler. It allows groups of 3 cells to be considered
(important for the Chicago checks), which would be tricky, since the algorithm operates on pairs of cells (at the
time of writing). | ['Given', 'a', 'collection', 'of', 'routes', 'returns', 'a', 'new', 'set', 'containing', 'only', 'valid', 'routes', '.', 'Invalid', 'routes', 'removed', ':', '-', 'contain', 'less', 'than', '2', 'cities', 'or', '-', 'go', 'through', 'Chicago', 'using', 'an', 'impassable', 'exit', '-', 'only', 'contain', 'Chicago', 'as', 'a', 'station', 'but', 'don', 't', 'use', 'the', 'correct', 'exit', 'path'] | train | https://github.com/Auzzy/1846-routes/blob/60c90928e184cbcc09c9fef46c2df07f5f14c2c2/routes1846/find_best_routes.py#L171-L224 |
3,127 | apple/turicreate | src/unity/python/turicreate/toolkits/_mxnet/_mxnet_to_coreml/_layers.py | convert_elementwise_mul_scalar | def convert_elementwise_mul_scalar(net, node, module, builder):
"""Convert a scalar multiplication from mxnet to coreml.
Parameters
----------
net: network
A mxnet network object.
node: layer
Node to convert.
module: module
An module for MXNet
builder: NeuralNetworkBuilder
A neural network builder object.
"""
import numpy
input_name, output_name = _get_input_output_name(net, node)
name = node['name']
param = _get_attr(node)
mult = literal_eval(param['scalar'])
builder.add_scale(name=name,
W=numpy.array([mult]),
b=0,
has_bias=False,
input_name=input_name,
output_name=output_name) | python | def convert_elementwise_mul_scalar(net, node, module, builder):
"""Convert a scalar multiplication from mxnet to coreml.
Parameters
----------
net: network
A mxnet network object.
node: layer
Node to convert.
module: module
An module for MXNet
builder: NeuralNetworkBuilder
A neural network builder object.
"""
import numpy
input_name, output_name = _get_input_output_name(net, node)
name = node['name']
param = _get_attr(node)
mult = literal_eval(param['scalar'])
builder.add_scale(name=name,
W=numpy.array([mult]),
b=0,
has_bias=False,
input_name=input_name,
output_name=output_name) | ['def', 'convert_elementwise_mul_scalar', '(', 'net', ',', 'node', ',', 'module', ',', 'builder', ')', ':', 'import', 'numpy', 'input_name', ',', 'output_name', '=', '_get_input_output_name', '(', 'net', ',', 'node', ')', 'name', '=', 'node', '[', "'name'", ']', 'param', '=', '_get_attr', '(', 'node', ')', 'mult', '=', 'literal_eval', '(', 'param', '[', "'scalar'", ']', ')', 'builder', '.', 'add_scale', '(', 'name', '=', 'name', ',', 'W', '=', 'numpy', '.', 'array', '(', '[', 'mult', ']', ')', ',', 'b', '=', '0', ',', 'has_bias', '=', 'False', ',', 'input_name', '=', 'input_name', ',', 'output_name', '=', 'output_name', ')'] | Convert a scalar multiplication from mxnet to coreml.
Parameters
----------
net: network
A mxnet network object.
node: layer
Node to convert.
module: module
An module for MXNet
builder: NeuralNetworkBuilder
A neural network builder object. | ['Convert', 'a', 'scalar', 'multiplication', 'from', 'mxnet', 'to', 'coreml', '.'] | train | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_mxnet/_mxnet_to_coreml/_layers.py#L224-L252 |
3,128 | pydata/xarray | xarray/coding/cftimeindex.py | _parsed_string_to_bounds | def _parsed_string_to_bounds(date_type, resolution, parsed):
"""Generalization of
pandas.tseries.index.DatetimeIndex._parsed_string_to_bounds
for use with non-standard calendars and cftime.datetime
objects.
"""
if resolution == 'year':
return (date_type(parsed.year, 1, 1),
date_type(parsed.year + 1, 1, 1) - timedelta(microseconds=1))
elif resolution == 'month':
if parsed.month == 12:
end = date_type(parsed.year + 1, 1, 1) - timedelta(microseconds=1)
else:
end = (date_type(parsed.year, parsed.month + 1, 1) -
timedelta(microseconds=1))
return date_type(parsed.year, parsed.month, 1), end
elif resolution == 'day':
start = date_type(parsed.year, parsed.month, parsed.day)
return start, start + timedelta(days=1, microseconds=-1)
elif resolution == 'hour':
start = date_type(parsed.year, parsed.month, parsed.day, parsed.hour)
return start, start + timedelta(hours=1, microseconds=-1)
elif resolution == 'minute':
start = date_type(parsed.year, parsed.month, parsed.day, parsed.hour,
parsed.minute)
return start, start + timedelta(minutes=1, microseconds=-1)
elif resolution == 'second':
start = date_type(parsed.year, parsed.month, parsed.day, parsed.hour,
parsed.minute, parsed.second)
return start, start + timedelta(seconds=1, microseconds=-1)
else:
raise KeyError | python | def _parsed_string_to_bounds(date_type, resolution, parsed):
"""Generalization of
pandas.tseries.index.DatetimeIndex._parsed_string_to_bounds
for use with non-standard calendars and cftime.datetime
objects.
"""
if resolution == 'year':
return (date_type(parsed.year, 1, 1),
date_type(parsed.year + 1, 1, 1) - timedelta(microseconds=1))
elif resolution == 'month':
if parsed.month == 12:
end = date_type(parsed.year + 1, 1, 1) - timedelta(microseconds=1)
else:
end = (date_type(parsed.year, parsed.month + 1, 1) -
timedelta(microseconds=1))
return date_type(parsed.year, parsed.month, 1), end
elif resolution == 'day':
start = date_type(parsed.year, parsed.month, parsed.day)
return start, start + timedelta(days=1, microseconds=-1)
elif resolution == 'hour':
start = date_type(parsed.year, parsed.month, parsed.day, parsed.hour)
return start, start + timedelta(hours=1, microseconds=-1)
elif resolution == 'minute':
start = date_type(parsed.year, parsed.month, parsed.day, parsed.hour,
parsed.minute)
return start, start + timedelta(minutes=1, microseconds=-1)
elif resolution == 'second':
start = date_type(parsed.year, parsed.month, parsed.day, parsed.hour,
parsed.minute, parsed.second)
return start, start + timedelta(seconds=1, microseconds=-1)
else:
raise KeyError | ['def', '_parsed_string_to_bounds', '(', 'date_type', ',', 'resolution', ',', 'parsed', ')', ':', 'if', 'resolution', '==', "'year'", ':', 'return', '(', 'date_type', '(', 'parsed', '.', 'year', ',', '1', ',', '1', ')', ',', 'date_type', '(', 'parsed', '.', 'year', '+', '1', ',', '1', ',', '1', ')', '-', 'timedelta', '(', 'microseconds', '=', '1', ')', ')', 'elif', 'resolution', '==', "'month'", ':', 'if', 'parsed', '.', 'month', '==', '12', ':', 'end', '=', 'date_type', '(', 'parsed', '.', 'year', '+', '1', ',', '1', ',', '1', ')', '-', 'timedelta', '(', 'microseconds', '=', '1', ')', 'else', ':', 'end', '=', '(', 'date_type', '(', 'parsed', '.', 'year', ',', 'parsed', '.', 'month', '+', '1', ',', '1', ')', '-', 'timedelta', '(', 'microseconds', '=', '1', ')', ')', 'return', 'date_type', '(', 'parsed', '.', 'year', ',', 'parsed', '.', 'month', ',', '1', ')', ',', 'end', 'elif', 'resolution', '==', "'day'", ':', 'start', '=', 'date_type', '(', 'parsed', '.', 'year', ',', 'parsed', '.', 'month', ',', 'parsed', '.', 'day', ')', 'return', 'start', ',', 'start', '+', 'timedelta', '(', 'days', '=', '1', ',', 'microseconds', '=', '-', '1', ')', 'elif', 'resolution', '==', "'hour'", ':', 'start', '=', 'date_type', '(', 'parsed', '.', 'year', ',', 'parsed', '.', 'month', ',', 'parsed', '.', 'day', ',', 'parsed', '.', 'hour', ')', 'return', 'start', ',', 'start', '+', 'timedelta', '(', 'hours', '=', '1', ',', 'microseconds', '=', '-', '1', ')', 'elif', 'resolution', '==', "'minute'", ':', 'start', '=', 'date_type', '(', 'parsed', '.', 'year', ',', 'parsed', '.', 'month', ',', 'parsed', '.', 'day', ',', 'parsed', '.', 'hour', ',', 'parsed', '.', 'minute', ')', 'return', 'start', ',', 'start', '+', 'timedelta', '(', 'minutes', '=', '1', ',', 'microseconds', '=', '-', '1', ')', 'elif', 'resolution', '==', "'second'", ':', 'start', '=', 'date_type', '(', 'parsed', '.', 'year', ',', 'parsed', '.', 'month', ',', 'parsed', '.', 'day', ',', 'parsed', '.', 'hour', ',', 'parsed', '.', 'minute', ',', 'parsed', '.', 'second', ')', 'return', 'start', ',', 'start', '+', 'timedelta', '(', 'seconds', '=', '1', ',', 'microseconds', '=', '-', '1', ')', 'else', ':', 'raise', 'KeyError'] | Generalization of
pandas.tseries.index.DatetimeIndex._parsed_string_to_bounds
for use with non-standard calendars and cftime.datetime
objects. | ['Generalization', 'of', 'pandas', '.', 'tseries', '.', 'index', '.', 'DatetimeIndex', '.', '_parsed_string_to_bounds', 'for', 'use', 'with', 'non', '-', 'standard', 'calendars', 'and', 'cftime', '.', 'datetime', 'objects', '.'] | train | https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/coding/cftimeindex.py#L117-L148 |
3,129 | jtpaasch/simplygithub | simplygithub/branches.py | update_branch | def update_branch(profile, name, sha):
"""Move a branch's HEAD to a new SHA.
Args:
profile
A profile generated from ``simplygithub.authentication.profile``.
Such profiles tell this module (i) the ``repo`` to connect to,
and (ii) the ``token`` to connect with.
name
The name of the branch to update.
sha
The commit SHA to point the branch's HEAD to.
Returns:
A dict with data about the branch.
"""
ref = "heads/" + name
data = refs.update_ref(profile, ref, sha)
return data | python | def update_branch(profile, name, sha):
"""Move a branch's HEAD to a new SHA.
Args:
profile
A profile generated from ``simplygithub.authentication.profile``.
Such profiles tell this module (i) the ``repo`` to connect to,
and (ii) the ``token`` to connect with.
name
The name of the branch to update.
sha
The commit SHA to point the branch's HEAD to.
Returns:
A dict with data about the branch.
"""
ref = "heads/" + name
data = refs.update_ref(profile, ref, sha)
return data | ['def', 'update_branch', '(', 'profile', ',', 'name', ',', 'sha', ')', ':', 'ref', '=', '"heads/"', '+', 'name', 'data', '=', 'refs', '.', 'update_ref', '(', 'profile', ',', 'ref', ',', 'sha', ')', 'return', 'data'] | Move a branch's HEAD to a new SHA.
Args:
profile
A profile generated from ``simplygithub.authentication.profile``.
Such profiles tell this module (i) the ``repo`` to connect to,
and (ii) the ``token`` to connect with.
name
The name of the branch to update.
sha
The commit SHA to point the branch's HEAD to.
Returns:
A dict with data about the branch. | ['Move', 'a', 'branch', 's', 'HEAD', 'to', 'a', 'new', 'SHA', '.'] | train | https://github.com/jtpaasch/simplygithub/blob/b77506275ec276ce90879bf1ea9299a79448b903/simplygithub/branches.py#L98-L120 |
3,130 | google/grr | grr/core/grr_response_core/lib/utils.py | Xor | def Xor(bytestr, key):
"""Returns a `bytes` object where each byte has been xored with key."""
# TODO(hanuszczak): Remove this import when string migration is done.
# pytype: disable=import-error
from builtins import bytes # pylint: disable=redefined-builtin, g-import-not-at-top
# pytype: enable=import-error
precondition.AssertType(bytestr, bytes)
# TODO: This seemingly no-op operation actually changes things.
# In Python 2 this function receives a `str` object which has different
# iterator semantics. So we use a `bytes` wrapper from the `future` package to
# get the Python 3 behaviour. In Python 3 this should be indeed a no-op. Once
# the migration is completed and support for Python 2 is dropped, this line
# can be removed.
bytestr = bytes(bytestr)
return bytes([byte ^ key for byte in bytestr]) | python | def Xor(bytestr, key):
"""Returns a `bytes` object where each byte has been xored with key."""
# TODO(hanuszczak): Remove this import when string migration is done.
# pytype: disable=import-error
from builtins import bytes # pylint: disable=redefined-builtin, g-import-not-at-top
# pytype: enable=import-error
precondition.AssertType(bytestr, bytes)
# TODO: This seemingly no-op operation actually changes things.
# In Python 2 this function receives a `str` object which has different
# iterator semantics. So we use a `bytes` wrapper from the `future` package to
# get the Python 3 behaviour. In Python 3 this should be indeed a no-op. Once
# the migration is completed and support for Python 2 is dropped, this line
# can be removed.
bytestr = bytes(bytestr)
return bytes([byte ^ key for byte in bytestr]) | ['def', 'Xor', '(', 'bytestr', ',', 'key', ')', ':', '# TODO(hanuszczak): Remove this import when string migration is done.', '# pytype: disable=import-error', 'from', 'builtins', 'import', 'bytes', '# pylint: disable=redefined-builtin, g-import-not-at-top', '# pytype: enable=import-error', 'precondition', '.', 'AssertType', '(', 'bytestr', ',', 'bytes', ')', '# TODO: This seemingly no-op operation actually changes things.', '# In Python 2 this function receives a `str` object which has different', '# iterator semantics. So we use a `bytes` wrapper from the `future` package to', '# get the Python 3 behaviour. In Python 3 this should be indeed a no-op. Once', '# the migration is completed and support for Python 2 is dropped, this line', '# can be removed.', 'bytestr', '=', 'bytes', '(', 'bytestr', ')', 'return', 'bytes', '(', '[', 'byte', '^', 'key', 'for', 'byte', 'in', 'bytestr', ']', ')'] | Returns a `bytes` object where each byte has been xored with key. | ['Returns', 'a', 'bytes', 'object', 'where', 'each', 'byte', 'has', 'been', 'xored', 'with', 'key', '.'] | train | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/core/grr_response_core/lib/utils.py#L535-L551 |
3,131 | commonsense/metanl | metanl/mecab.py | MeCabWrapper.is_stopword_record | def is_stopword_record(self, record):
"""
Determine whether a single MeCab record represents a stopword.
This mostly determines words to strip based on their parts of speech.
If common_words is set to True (default), it will also strip common
verbs and nouns such as くる and よう. If more_stopwords is True, it
will look at the sub-part of speech to remove more categories.
"""
# preserve negations
if record.root == 'ない':
return False
return (
record.pos in STOPWORD_CATEGORIES or
record.subclass1 in STOPWORD_CATEGORIES or
record.root in STOPWORD_ROOTS
) | python | def is_stopword_record(self, record):
"""
Determine whether a single MeCab record represents a stopword.
This mostly determines words to strip based on their parts of speech.
If common_words is set to True (default), it will also strip common
verbs and nouns such as くる and よう. If more_stopwords is True, it
will look at the sub-part of speech to remove more categories.
"""
# preserve negations
if record.root == 'ない':
return False
return (
record.pos in STOPWORD_CATEGORIES or
record.subclass1 in STOPWORD_CATEGORIES or
record.root in STOPWORD_ROOTS
) | ['def', 'is_stopword_record', '(', 'self', ',', 'record', ')', ':', '# preserve negations', 'if', 'record', '.', 'root', '==', "'ない':", '', 'return', 'False', 'return', '(', 'record', '.', 'pos', 'in', 'STOPWORD_CATEGORIES', 'or', 'record', '.', 'subclass1', 'in', 'STOPWORD_CATEGORIES', 'or', 'record', '.', 'root', 'in', 'STOPWORD_ROOTS', ')'] | Determine whether a single MeCab record represents a stopword.
This mostly determines words to strip based on their parts of speech.
If common_words is set to True (default), it will also strip common
verbs and nouns such as くる and よう. If more_stopwords is True, it
will look at the sub-part of speech to remove more categories. | ['Determine', 'whether', 'a', 'single', 'MeCab', 'record', 'represents', 'a', 'stopword', '.'] | train | https://github.com/commonsense/metanl/blob/4b9ae8353489cc409bebd7e1fe10ab5b527b078e/metanl/mecab.py#L162-L178 |
3,132 | PSPC-SPAC-buyandsell/von_anchor | von_anchor/anchor/issuer.py | Issuer.get_box_ids_issued | async def get_box_ids_issued(self) -> str:
"""
Return json object on lists of all unique box identifiers (schema identifiers,
credential definition identifiers, and revocation registry identifiers) for
all credential definitions and credentials issued; e.g.,
::
{
"schema_id": [
"R17v42T4pk...:2:tombstone:1.2",
...
],
"cred_def_id": [
"R17v42T4pk...:3:CL:19:tag",
...
]
"rev_reg_id": [
"R17v42T4pk...:4:R17v42T4pk...:3:CL:19:tag:CL_ACCUM:0",
"R17v42T4pk...:4:R17v42T4pk...:3:CL:19:tag:CL_ACCUM:1",
...
]
}
An issuer must issue a credential definition to include its schema identifier
in the returned values; the schema identifier in isolation belongs properly
to an Origin, not necessarily to an Issuer.
The operation may be useful for a Verifier anchor going off-line to seed its
cache before doing so.
:return: tuple of sets for schema ids, cred def ids, rev reg ids
"""
LOGGER.debug('Issuer.get_box_ids_issued >>>')
cd_ids = [
d for d in listdir(self.dir_tails) if isdir(join(self.dir_tails, d)) and ok_cred_def_id(d, self.did)]
s_ids = []
for cd_id in cd_ids:
try:
s_ids.append(json.loads(await self.get_schema(cred_def_id2seq_no(cd_id)))['id'])
except AbsentSchema:
LOGGER.error(
'Issuer %s has issued cred def %s but no corresponding schema on ledger',
self.name,
cd_id)
rr_ids = [basename(link) for link in Tails.links(self.dir_tails, self.did)]
rv = json.dumps({
'schema_id': s_ids,
'cred_def_id': cd_ids,
'rev_reg_id': rr_ids
})
LOGGER.debug('Issuer.get_box_ids_issued <<< %s', rv)
return rv | python | async def get_box_ids_issued(self) -> str:
"""
Return json object on lists of all unique box identifiers (schema identifiers,
credential definition identifiers, and revocation registry identifiers) for
all credential definitions and credentials issued; e.g.,
::
{
"schema_id": [
"R17v42T4pk...:2:tombstone:1.2",
...
],
"cred_def_id": [
"R17v42T4pk...:3:CL:19:tag",
...
]
"rev_reg_id": [
"R17v42T4pk...:4:R17v42T4pk...:3:CL:19:tag:CL_ACCUM:0",
"R17v42T4pk...:4:R17v42T4pk...:3:CL:19:tag:CL_ACCUM:1",
...
]
}
An issuer must issue a credential definition to include its schema identifier
in the returned values; the schema identifier in isolation belongs properly
to an Origin, not necessarily to an Issuer.
The operation may be useful for a Verifier anchor going off-line to seed its
cache before doing so.
:return: tuple of sets for schema ids, cred def ids, rev reg ids
"""
LOGGER.debug('Issuer.get_box_ids_issued >>>')
cd_ids = [
d for d in listdir(self.dir_tails) if isdir(join(self.dir_tails, d)) and ok_cred_def_id(d, self.did)]
s_ids = []
for cd_id in cd_ids:
try:
s_ids.append(json.loads(await self.get_schema(cred_def_id2seq_no(cd_id)))['id'])
except AbsentSchema:
LOGGER.error(
'Issuer %s has issued cred def %s but no corresponding schema on ledger',
self.name,
cd_id)
rr_ids = [basename(link) for link in Tails.links(self.dir_tails, self.did)]
rv = json.dumps({
'schema_id': s_ids,
'cred_def_id': cd_ids,
'rev_reg_id': rr_ids
})
LOGGER.debug('Issuer.get_box_ids_issued <<< %s', rv)
return rv | ['async', 'def', 'get_box_ids_issued', '(', 'self', ')', '->', 'str', ':', 'LOGGER', '.', 'debug', '(', "'Issuer.get_box_ids_issued >>>'", ')', 'cd_ids', '=', '[', 'd', 'for', 'd', 'in', 'listdir', '(', 'self', '.', 'dir_tails', ')', 'if', 'isdir', '(', 'join', '(', 'self', '.', 'dir_tails', ',', 'd', ')', ')', 'and', 'ok_cred_def_id', '(', 'd', ',', 'self', '.', 'did', ')', ']', 's_ids', '=', '[', ']', 'for', 'cd_id', 'in', 'cd_ids', ':', 'try', ':', 's_ids', '.', 'append', '(', 'json', '.', 'loads', '(', 'await', 'self', '.', 'get_schema', '(', 'cred_def_id2seq_no', '(', 'cd_id', ')', ')', ')', '[', "'id'", ']', ')', 'except', 'AbsentSchema', ':', 'LOGGER', '.', 'error', '(', "'Issuer %s has issued cred def %s but no corresponding schema on ledger'", ',', 'self', '.', 'name', ',', 'cd_id', ')', 'rr_ids', '=', '[', 'basename', '(', 'link', ')', 'for', 'link', 'in', 'Tails', '.', 'links', '(', 'self', '.', 'dir_tails', ',', 'self', '.', 'did', ')', ']', 'rv', '=', 'json', '.', 'dumps', '(', '{', "'schema_id'", ':', 's_ids', ',', "'cred_def_id'", ':', 'cd_ids', ',', "'rev_reg_id'", ':', 'rr_ids', '}', ')', 'LOGGER', '.', 'debug', '(', "'Issuer.get_box_ids_issued <<< %s'", ',', 'rv', ')', 'return', 'rv'] | Return json object on lists of all unique box identifiers (schema identifiers,
credential definition identifiers, and revocation registry identifiers) for
all credential definitions and credentials issued; e.g.,
::
{
"schema_id": [
"R17v42T4pk...:2:tombstone:1.2",
...
],
"cred_def_id": [
"R17v42T4pk...:3:CL:19:tag",
...
]
"rev_reg_id": [
"R17v42T4pk...:4:R17v42T4pk...:3:CL:19:tag:CL_ACCUM:0",
"R17v42T4pk...:4:R17v42T4pk...:3:CL:19:tag:CL_ACCUM:1",
...
]
}
An issuer must issue a credential definition to include its schema identifier
in the returned values; the schema identifier in isolation belongs properly
to an Origin, not necessarily to an Issuer.
The operation may be useful for a Verifier anchor going off-line to seed its
cache before doing so.
:return: tuple of sets for schema ids, cred def ids, rev reg ids | ['Return', 'json', 'object', 'on', 'lists', 'of', 'all', 'unique', 'box', 'identifiers', '(', 'schema', 'identifiers', 'credential', 'definition', 'identifiers', 'and', 'revocation', 'registry', 'identifiers', ')', 'for', 'all', 'credential', 'definitions', 'and', 'credentials', 'issued', ';', 'e', '.', 'g', '.'] | train | https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/anchor/issuer.py#L624-L679 |
3,133 | kensho-technologies/graphql-compiler | graphql_compiler/schema_generation/schema_graph.py | _validate_non_abstract_edge_has_defined_endpoint_types | def _validate_non_abstract_edge_has_defined_endpoint_types(class_name, properties):
"""Validate that the non-abstract edge properties dict has defined in/out link properties."""
edge_source = properties.get(EDGE_SOURCE_PROPERTY_NAME, None)
edge_destination = properties.get(EDGE_DESTINATION_PROPERTY_NAME, None)
has_defined_endpoint_types = all((
edge_source is not None and edge_source.type_id == PROPERTY_TYPE_LINK_ID,
edge_destination is not None and edge_destination.type_id == PROPERTY_TYPE_LINK_ID,
))
if not has_defined_endpoint_types:
raise IllegalSchemaStateError(u'Found a non-abstract edge class with undefined or illegal '
u'in/out properties: {} {}'.format(class_name, properties)) | python | def _validate_non_abstract_edge_has_defined_endpoint_types(class_name, properties):
"""Validate that the non-abstract edge properties dict has defined in/out link properties."""
edge_source = properties.get(EDGE_SOURCE_PROPERTY_NAME, None)
edge_destination = properties.get(EDGE_DESTINATION_PROPERTY_NAME, None)
has_defined_endpoint_types = all((
edge_source is not None and edge_source.type_id == PROPERTY_TYPE_LINK_ID,
edge_destination is not None and edge_destination.type_id == PROPERTY_TYPE_LINK_ID,
))
if not has_defined_endpoint_types:
raise IllegalSchemaStateError(u'Found a non-abstract edge class with undefined or illegal '
u'in/out properties: {} {}'.format(class_name, properties)) | ['def', '_validate_non_abstract_edge_has_defined_endpoint_types', '(', 'class_name', ',', 'properties', ')', ':', 'edge_source', '=', 'properties', '.', 'get', '(', 'EDGE_SOURCE_PROPERTY_NAME', ',', 'None', ')', 'edge_destination', '=', 'properties', '.', 'get', '(', 'EDGE_DESTINATION_PROPERTY_NAME', ',', 'None', ')', 'has_defined_endpoint_types', '=', 'all', '(', '(', 'edge_source', 'is', 'not', 'None', 'and', 'edge_source', '.', 'type_id', '==', 'PROPERTY_TYPE_LINK_ID', ',', 'edge_destination', 'is', 'not', 'None', 'and', 'edge_destination', '.', 'type_id', '==', 'PROPERTY_TYPE_LINK_ID', ',', ')', ')', 'if', 'not', 'has_defined_endpoint_types', ':', 'raise', 'IllegalSchemaStateError', '(', "u'Found a non-abstract edge class with undefined or illegal '", "u'in/out properties: {} {}'", '.', 'format', '(', 'class_name', ',', 'properties', ')', ')'] | Validate that the non-abstract edge properties dict has defined in/out link properties. | ['Validate', 'that', 'the', 'non', '-', 'abstract', 'edge', 'properties', 'dict', 'has', 'defined', 'in', '/', 'out', 'link', 'properties', '.'] | train | https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/schema_generation/schema_graph.py#L16-L26 |
3,134 | quantopian/zipline | zipline/pipeline/factors/factor.py | Factor.demean | def demean(self, mask=NotSpecified, groupby=NotSpecified):
"""
Construct a Factor that computes ``self`` and subtracts the mean from
row of the result.
If ``mask`` is supplied, ignore values where ``mask`` returns False
when computing row means, and output NaN anywhere the mask is False.
If ``groupby`` is supplied, compute by partitioning each row based on
the values produced by ``groupby``, de-meaning the partitioned arrays,
and stitching the sub-results back together.
Parameters
----------
mask : zipline.pipeline.Filter, optional
A Filter defining values to ignore when computing means.
groupby : zipline.pipeline.Classifier, optional
A classifier defining partitions over which to compute means.
Examples
--------
Let ``f`` be a Factor which would produce the following output::
AAPL MSFT MCD BK
2017-03-13 1.0 2.0 3.0 4.0
2017-03-14 1.5 2.5 3.5 1.0
2017-03-15 2.0 3.0 4.0 1.5
2017-03-16 2.5 3.5 1.0 2.0
Let ``c`` be a Classifier producing the following output::
AAPL MSFT MCD BK
2017-03-13 1 1 2 2
2017-03-14 1 1 2 2
2017-03-15 1 1 2 2
2017-03-16 1 1 2 2
Let ``m`` be a Filter producing the following output::
AAPL MSFT MCD BK
2017-03-13 False True True True
2017-03-14 True False True True
2017-03-15 True True False True
2017-03-16 True True True False
Then ``f.demean()`` will subtract the mean from each row produced by
``f``.
::
AAPL MSFT MCD BK
2017-03-13 -1.500 -0.500 0.500 1.500
2017-03-14 -0.625 0.375 1.375 -1.125
2017-03-15 -0.625 0.375 1.375 -1.125
2017-03-16 0.250 1.250 -1.250 -0.250
``f.demean(mask=m)`` will subtract the mean from each row, but means
will be calculated ignoring values on the diagonal, and NaNs will
written to the diagonal in the output. Diagonal values are ignored
because they are the locations where the mask ``m`` produced False.
::
AAPL MSFT MCD BK
2017-03-13 NaN -1.000 0.000 1.000
2017-03-14 -0.500 NaN 1.500 -1.000
2017-03-15 -0.166 0.833 NaN -0.666
2017-03-16 0.166 1.166 -1.333 NaN
``f.demean(groupby=c)`` will subtract the group-mean of AAPL/MSFT and
MCD/BK from their respective entries. The AAPL/MSFT are grouped
together because both assets always produce 1 in the output of the
classifier ``c``. Similarly, MCD/BK are grouped together because they
always produce 2.
::
AAPL MSFT MCD BK
2017-03-13 -0.500 0.500 -0.500 0.500
2017-03-14 -0.500 0.500 1.250 -1.250
2017-03-15 -0.500 0.500 1.250 -1.250
2017-03-16 -0.500 0.500 -0.500 0.500
``f.demean(mask=m, groupby=c)`` will also subtract the group-mean of
AAPL/MSFT and MCD/BK, but means will be calculated ignoring values on
the diagonal , and NaNs will be written to the diagonal in the output.
::
AAPL MSFT MCD BK
2017-03-13 NaN 0.000 -0.500 0.500
2017-03-14 0.000 NaN 1.250 -1.250
2017-03-15 -0.500 0.500 NaN 0.000
2017-03-16 -0.500 0.500 0.000 NaN
Notes
-----
Mean is sensitive to the magnitudes of outliers. When working with
factor that can potentially produce large outliers, it is often useful
to use the ``mask`` parameter to discard values at the extremes of the
distribution::
>>> base = MyFactor(...) # doctest: +SKIP
>>> normalized = base.demean(
... mask=base.percentile_between(1, 99),
... ) # doctest: +SKIP
``demean()`` is only supported on Factors of dtype float64.
See Also
--------
:meth:`pandas.DataFrame.groupby`
"""
return GroupedRowTransform(
transform=demean,
transform_args=(),
factor=self,
groupby=groupby,
dtype=self.dtype,
missing_value=self.missing_value,
window_safe=self.window_safe,
mask=mask,
) | python | def demean(self, mask=NotSpecified, groupby=NotSpecified):
"""
Construct a Factor that computes ``self`` and subtracts the mean from
row of the result.
If ``mask`` is supplied, ignore values where ``mask`` returns False
when computing row means, and output NaN anywhere the mask is False.
If ``groupby`` is supplied, compute by partitioning each row based on
the values produced by ``groupby``, de-meaning the partitioned arrays,
and stitching the sub-results back together.
Parameters
----------
mask : zipline.pipeline.Filter, optional
A Filter defining values to ignore when computing means.
groupby : zipline.pipeline.Classifier, optional
A classifier defining partitions over which to compute means.
Examples
--------
Let ``f`` be a Factor which would produce the following output::
AAPL MSFT MCD BK
2017-03-13 1.0 2.0 3.0 4.0
2017-03-14 1.5 2.5 3.5 1.0
2017-03-15 2.0 3.0 4.0 1.5
2017-03-16 2.5 3.5 1.0 2.0
Let ``c`` be a Classifier producing the following output::
AAPL MSFT MCD BK
2017-03-13 1 1 2 2
2017-03-14 1 1 2 2
2017-03-15 1 1 2 2
2017-03-16 1 1 2 2
Let ``m`` be a Filter producing the following output::
AAPL MSFT MCD BK
2017-03-13 False True True True
2017-03-14 True False True True
2017-03-15 True True False True
2017-03-16 True True True False
Then ``f.demean()`` will subtract the mean from each row produced by
``f``.
::
AAPL MSFT MCD BK
2017-03-13 -1.500 -0.500 0.500 1.500
2017-03-14 -0.625 0.375 1.375 -1.125
2017-03-15 -0.625 0.375 1.375 -1.125
2017-03-16 0.250 1.250 -1.250 -0.250
``f.demean(mask=m)`` will subtract the mean from each row, but means
will be calculated ignoring values on the diagonal, and NaNs will
written to the diagonal in the output. Diagonal values are ignored
because they are the locations where the mask ``m`` produced False.
::
AAPL MSFT MCD BK
2017-03-13 NaN -1.000 0.000 1.000
2017-03-14 -0.500 NaN 1.500 -1.000
2017-03-15 -0.166 0.833 NaN -0.666
2017-03-16 0.166 1.166 -1.333 NaN
``f.demean(groupby=c)`` will subtract the group-mean of AAPL/MSFT and
MCD/BK from their respective entries. The AAPL/MSFT are grouped
together because both assets always produce 1 in the output of the
classifier ``c``. Similarly, MCD/BK are grouped together because they
always produce 2.
::
AAPL MSFT MCD BK
2017-03-13 -0.500 0.500 -0.500 0.500
2017-03-14 -0.500 0.500 1.250 -1.250
2017-03-15 -0.500 0.500 1.250 -1.250
2017-03-16 -0.500 0.500 -0.500 0.500
``f.demean(mask=m, groupby=c)`` will also subtract the group-mean of
AAPL/MSFT and MCD/BK, but means will be calculated ignoring values on
the diagonal , and NaNs will be written to the diagonal in the output.
::
AAPL MSFT MCD BK
2017-03-13 NaN 0.000 -0.500 0.500
2017-03-14 0.000 NaN 1.250 -1.250
2017-03-15 -0.500 0.500 NaN 0.000
2017-03-16 -0.500 0.500 0.000 NaN
Notes
-----
Mean is sensitive to the magnitudes of outliers. When working with
factor that can potentially produce large outliers, it is often useful
to use the ``mask`` parameter to discard values at the extremes of the
distribution::
>>> base = MyFactor(...) # doctest: +SKIP
>>> normalized = base.demean(
... mask=base.percentile_between(1, 99),
... ) # doctest: +SKIP
``demean()`` is only supported on Factors of dtype float64.
See Also
--------
:meth:`pandas.DataFrame.groupby`
"""
return GroupedRowTransform(
transform=demean,
transform_args=(),
factor=self,
groupby=groupby,
dtype=self.dtype,
missing_value=self.missing_value,
window_safe=self.window_safe,
mask=mask,
) | ['def', 'demean', '(', 'self', ',', 'mask', '=', 'NotSpecified', ',', 'groupby', '=', 'NotSpecified', ')', ':', 'return', 'GroupedRowTransform', '(', 'transform', '=', 'demean', ',', 'transform_args', '=', '(', ')', ',', 'factor', '=', 'self', ',', 'groupby', '=', 'groupby', ',', 'dtype', '=', 'self', '.', 'dtype', ',', 'missing_value', '=', 'self', '.', 'missing_value', ',', 'window_safe', '=', 'self', '.', 'window_safe', ',', 'mask', '=', 'mask', ',', ')'] | Construct a Factor that computes ``self`` and subtracts the mean from
row of the result.
If ``mask`` is supplied, ignore values where ``mask`` returns False
when computing row means, and output NaN anywhere the mask is False.
If ``groupby`` is supplied, compute by partitioning each row based on
the values produced by ``groupby``, de-meaning the partitioned arrays,
and stitching the sub-results back together.
Parameters
----------
mask : zipline.pipeline.Filter, optional
A Filter defining values to ignore when computing means.
groupby : zipline.pipeline.Classifier, optional
A classifier defining partitions over which to compute means.
Examples
--------
Let ``f`` be a Factor which would produce the following output::
AAPL MSFT MCD BK
2017-03-13 1.0 2.0 3.0 4.0
2017-03-14 1.5 2.5 3.5 1.0
2017-03-15 2.0 3.0 4.0 1.5
2017-03-16 2.5 3.5 1.0 2.0
Let ``c`` be a Classifier producing the following output::
AAPL MSFT MCD BK
2017-03-13 1 1 2 2
2017-03-14 1 1 2 2
2017-03-15 1 1 2 2
2017-03-16 1 1 2 2
Let ``m`` be a Filter producing the following output::
AAPL MSFT MCD BK
2017-03-13 False True True True
2017-03-14 True False True True
2017-03-15 True True False True
2017-03-16 True True True False
Then ``f.demean()`` will subtract the mean from each row produced by
``f``.
::
AAPL MSFT MCD BK
2017-03-13 -1.500 -0.500 0.500 1.500
2017-03-14 -0.625 0.375 1.375 -1.125
2017-03-15 -0.625 0.375 1.375 -1.125
2017-03-16 0.250 1.250 -1.250 -0.250
``f.demean(mask=m)`` will subtract the mean from each row, but means
will be calculated ignoring values on the diagonal, and NaNs will
written to the diagonal in the output. Diagonal values are ignored
because they are the locations where the mask ``m`` produced False.
::
AAPL MSFT MCD BK
2017-03-13 NaN -1.000 0.000 1.000
2017-03-14 -0.500 NaN 1.500 -1.000
2017-03-15 -0.166 0.833 NaN -0.666
2017-03-16 0.166 1.166 -1.333 NaN
``f.demean(groupby=c)`` will subtract the group-mean of AAPL/MSFT and
MCD/BK from their respective entries. The AAPL/MSFT are grouped
together because both assets always produce 1 in the output of the
classifier ``c``. Similarly, MCD/BK are grouped together because they
always produce 2.
::
AAPL MSFT MCD BK
2017-03-13 -0.500 0.500 -0.500 0.500
2017-03-14 -0.500 0.500 1.250 -1.250
2017-03-15 -0.500 0.500 1.250 -1.250
2017-03-16 -0.500 0.500 -0.500 0.500
``f.demean(mask=m, groupby=c)`` will also subtract the group-mean of
AAPL/MSFT and MCD/BK, but means will be calculated ignoring values on
the diagonal , and NaNs will be written to the diagonal in the output.
::
AAPL MSFT MCD BK
2017-03-13 NaN 0.000 -0.500 0.500
2017-03-14 0.000 NaN 1.250 -1.250
2017-03-15 -0.500 0.500 NaN 0.000
2017-03-16 -0.500 0.500 0.000 NaN
Notes
-----
Mean is sensitive to the magnitudes of outliers. When working with
factor that can potentially produce large outliers, it is often useful
to use the ``mask`` parameter to discard values at the extremes of the
distribution::
>>> base = MyFactor(...) # doctest: +SKIP
>>> normalized = base.demean(
... mask=base.percentile_between(1, 99),
... ) # doctest: +SKIP
``demean()`` is only supported on Factors of dtype float64.
See Also
--------
:meth:`pandas.DataFrame.groupby` | ['Construct', 'a', 'Factor', 'that', 'computes', 'self', 'and', 'subtracts', 'the', 'mean', 'from', 'row', 'of', 'the', 'result', '.'] | train | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/factors/factor.py#L402-L524 |
3,135 | cmorisse/ikp3db | ikp3db.py | IKBreakpoint.restore_breakpoints_state | def restore_breakpoints_state(cls, breakpoints_state_list):
"""Restore the state of breakpoints given a list provided by
backup_breakpoints_state(). If list of breakpoint has changed
since backup missing or added breakpoints are ignored.
breakpoints_state_list is a list of tuple. Each tuple is of form:
(breakpoint_number, enabled, condition)
"""
for breakpoint_state in breakpoints_state_list:
bp = cls.breakpoints_by_number[breakpoint_state[0]]
if bp:
bp.enabled = breakpoint_state[1]
bp.condition = breakpoint_state[2]
cls.update_active_breakpoint_flag()
return | python | def restore_breakpoints_state(cls, breakpoints_state_list):
"""Restore the state of breakpoints given a list provided by
backup_breakpoints_state(). If list of breakpoint has changed
since backup missing or added breakpoints are ignored.
breakpoints_state_list is a list of tuple. Each tuple is of form:
(breakpoint_number, enabled, condition)
"""
for breakpoint_state in breakpoints_state_list:
bp = cls.breakpoints_by_number[breakpoint_state[0]]
if bp:
bp.enabled = breakpoint_state[1]
bp.condition = breakpoint_state[2]
cls.update_active_breakpoint_flag()
return | ['def', 'restore_breakpoints_state', '(', 'cls', ',', 'breakpoints_state_list', ')', ':', 'for', 'breakpoint_state', 'in', 'breakpoints_state_list', ':', 'bp', '=', 'cls', '.', 'breakpoints_by_number', '[', 'breakpoint_state', '[', '0', ']', ']', 'if', 'bp', ':', 'bp', '.', 'enabled', '=', 'breakpoint_state', '[', '1', ']', 'bp', '.', 'condition', '=', 'breakpoint_state', '[', '2', ']', 'cls', '.', 'update_active_breakpoint_flag', '(', ')', 'return'] | Restore the state of breakpoints given a list provided by
backup_breakpoints_state(). If list of breakpoint has changed
since backup missing or added breakpoints are ignored.
breakpoints_state_list is a list of tuple. Each tuple is of form:
(breakpoint_number, enabled, condition) | ['Restore', 'the', 'state', 'of', 'breakpoints', 'given', 'a', 'list', 'provided', 'by', 'backup_breakpoints_state', '()', '.', 'If', 'list', 'of', 'breakpoint', 'has', 'changed', 'since', 'backup', 'missing', 'or', 'added', 'breakpoints', 'are', 'ignored', '.', 'breakpoints_state_list', 'is', 'a', 'list', 'of', 'tuple', '.', 'Each', 'tuple', 'is', 'of', 'form', ':', '(', 'breakpoint_number', 'enabled', 'condition', ')'] | train | https://github.com/cmorisse/ikp3db/blob/a0f318d4e8494b2e6f2f07ec0f1202ca023c920f/ikp3db.py#L559-L573 |
3,136 | inspirehep/inspire-dojson | inspire_dojson/hepnames/rules.py | name | def name(self, key, value):
"""Populate the ``name`` key.
Also populates the ``status``, ``birth_date`` and ``death_date`` keys through side effects.
"""
def _get_title(value):
c_value = force_single_element(value.get('c', ''))
if c_value != 'title (e.g. Sir)':
return c_value
def _get_value(value):
a_value = force_single_element(value.get('a', ''))
q_value = force_single_element(value.get('q', ''))
return a_value or normalize_name(q_value)
if value.get('d'):
dates = value['d']
try:
self['death_date'] = normalize_date(dates)
except ValueError:
dates = dates.split(' - ')
if len(dates) == 1:
dates = dates[0].split('-')
self['birth_date'] = normalize_date(dates[0])
self['death_date'] = normalize_date(dates[1])
self['status'] = force_single_element(value.get('g', '')).lower()
return {
'numeration': force_single_element(value.get('b', '')),
'preferred_name': force_single_element(value.get('q', '')),
'title': _get_title(value),
'value': _get_value(value),
} | python | def name(self, key, value):
"""Populate the ``name`` key.
Also populates the ``status``, ``birth_date`` and ``death_date`` keys through side effects.
"""
def _get_title(value):
c_value = force_single_element(value.get('c', ''))
if c_value != 'title (e.g. Sir)':
return c_value
def _get_value(value):
a_value = force_single_element(value.get('a', ''))
q_value = force_single_element(value.get('q', ''))
return a_value or normalize_name(q_value)
if value.get('d'):
dates = value['d']
try:
self['death_date'] = normalize_date(dates)
except ValueError:
dates = dates.split(' - ')
if len(dates) == 1:
dates = dates[0].split('-')
self['birth_date'] = normalize_date(dates[0])
self['death_date'] = normalize_date(dates[1])
self['status'] = force_single_element(value.get('g', '')).lower()
return {
'numeration': force_single_element(value.get('b', '')),
'preferred_name': force_single_element(value.get('q', '')),
'title': _get_title(value),
'value': _get_value(value),
} | ['def', 'name', '(', 'self', ',', 'key', ',', 'value', ')', ':', 'def', '_get_title', '(', 'value', ')', ':', 'c_value', '=', 'force_single_element', '(', 'value', '.', 'get', '(', "'c'", ',', "''", ')', ')', 'if', 'c_value', '!=', "'title (e.g. Sir)'", ':', 'return', 'c_value', 'def', '_get_value', '(', 'value', ')', ':', 'a_value', '=', 'force_single_element', '(', 'value', '.', 'get', '(', "'a'", ',', "''", ')', ')', 'q_value', '=', 'force_single_element', '(', 'value', '.', 'get', '(', "'q'", ',', "''", ')', ')', 'return', 'a_value', 'or', 'normalize_name', '(', 'q_value', ')', 'if', 'value', '.', 'get', '(', "'d'", ')', ':', 'dates', '=', 'value', '[', "'d'", ']', 'try', ':', 'self', '[', "'death_date'", ']', '=', 'normalize_date', '(', 'dates', ')', 'except', 'ValueError', ':', 'dates', '=', 'dates', '.', 'split', '(', "' - '", ')', 'if', 'len', '(', 'dates', ')', '==', '1', ':', 'dates', '=', 'dates', '[', '0', ']', '.', 'split', '(', "'-'", ')', 'self', '[', "'birth_date'", ']', '=', 'normalize_date', '(', 'dates', '[', '0', ']', ')', 'self', '[', "'death_date'", ']', '=', 'normalize_date', '(', 'dates', '[', '1', ']', ')', 'self', '[', "'status'", ']', '=', 'force_single_element', '(', 'value', '.', 'get', '(', "'g'", ',', "''", ')', ')', '.', 'lower', '(', ')', 'return', '{', "'numeration'", ':', 'force_single_element', '(', 'value', '.', 'get', '(', "'b'", ',', "''", ')', ')', ',', "'preferred_name'", ':', 'force_single_element', '(', 'value', '.', 'get', '(', "'q'", ',', "''", ')', ')', ',', "'title'", ':', '_get_title', '(', 'value', ')', ',', "'value'", ':', '_get_value', '(', 'value', ')', ',', '}'] | Populate the ``name`` key.
Also populates the ``status``, ``birth_date`` and ``death_date`` keys through side effects. | ['Populate', 'the', 'name', 'key', '.'] | train | https://github.com/inspirehep/inspire-dojson/blob/17f3789cd3d5ae58efa1190dc0eea9efb9c8ca59/inspire_dojson/hepnames/rules.py#L176-L209 |
3,137 | aodag/WebDispatch | webdispatch/uritemplate.py | URITemplate.convert_values | def convert_values(self, matchdict: Dict[str, str]) -> Dict[str, Any]:
""" convert values of ``matchdict``
with converter this object has."""
converted = {}
for varname, value in matchdict.items():
converter = self.converters[varname]
converted[varname] = converter(value)
return converted | python | def convert_values(self, matchdict: Dict[str, str]) -> Dict[str, Any]:
""" convert values of ``matchdict``
with converter this object has."""
converted = {}
for varname, value in matchdict.items():
converter = self.converters[varname]
converted[varname] = converter(value)
return converted | ['def', 'convert_values', '(', 'self', ',', 'matchdict', ':', 'Dict', '[', 'str', ',', 'str', ']', ')', '->', 'Dict', '[', 'str', ',', 'Any', ']', ':', 'converted', '=', '{', '}', 'for', 'varname', ',', 'value', 'in', 'matchdict', '.', 'items', '(', ')', ':', 'converter', '=', 'self', '.', 'converters', '[', 'varname', ']', 'converted', '[', 'varname', ']', '=', 'converter', '(', 'value', ')', 'return', 'converted'] | convert values of ``matchdict``
with converter this object has. | ['convert', 'values', 'of', 'matchdict', 'with', 'converter', 'this', 'object', 'has', '.'] | train | https://github.com/aodag/WebDispatch/blob/55f8658a2b4100498e098a80303a346c3940f1bc/webdispatch/uritemplate.py#L136-L144 |
3,138 | Cadair/jupyter_environment_kernels | environment_kernels/activate_helper.py | source_bash | def source_bash(args, stdin=None):
"""Simply bash-specific wrapper around source-foreign
Returns a dict to be used as a new environment"""
args = list(args)
new_args = ['bash', '--sourcer=source']
new_args.extend(args)
return source_foreign(new_args, stdin=stdin) | python | def source_bash(args, stdin=None):
"""Simply bash-specific wrapper around source-foreign
Returns a dict to be used as a new environment"""
args = list(args)
new_args = ['bash', '--sourcer=source']
new_args.extend(args)
return source_foreign(new_args, stdin=stdin) | ['def', 'source_bash', '(', 'args', ',', 'stdin', '=', 'None', ')', ':', 'args', '=', 'list', '(', 'args', ')', 'new_args', '=', '[', "'bash'", ',', "'--sourcer=source'", ']', 'new_args', '.', 'extend', '(', 'args', ')', 'return', 'source_foreign', '(', 'new_args', ',', 'stdin', '=', 'stdin', ')'] | Simply bash-specific wrapper around source-foreign
Returns a dict to be used as a new environment | ['Simply', 'bash', '-', 'specific', 'wrapper', 'around', 'source', '-', 'foreign'] | train | https://github.com/Cadair/jupyter_environment_kernels/blob/3da304550b511bda7d5d39280379b5ca39bb31bc/environment_kernels/activate_helper.py#L66-L73 |
3,139 | bcbio/bcbio-nextgen | bcbio/cwl/hpc.py | _get_filesystem_types | def _get_filesystem_types(args, sample_file):
"""Retrieve the types of inputs and staging based on sample JSON and arguments.
"""
out = set([])
ext = "" if args.no_container else "_container"
with open(sample_file) as in_handle:
for f in _get_file_paths(json.load(in_handle)):
if f.startswith("gs:"):
out.add("gcp%s" % ext)
elif f.startswith("s3:"):
out.add("s3%s" % ext)
elif f.startswith(("https:", "http:")):
out.add("http%s" % ext)
else:
out.add("local%s" % ext)
return out | python | def _get_filesystem_types(args, sample_file):
"""Retrieve the types of inputs and staging based on sample JSON and arguments.
"""
out = set([])
ext = "" if args.no_container else "_container"
with open(sample_file) as in_handle:
for f in _get_file_paths(json.load(in_handle)):
if f.startswith("gs:"):
out.add("gcp%s" % ext)
elif f.startswith("s3:"):
out.add("s3%s" % ext)
elif f.startswith(("https:", "http:")):
out.add("http%s" % ext)
else:
out.add("local%s" % ext)
return out | ['def', '_get_filesystem_types', '(', 'args', ',', 'sample_file', ')', ':', 'out', '=', 'set', '(', '[', ']', ')', 'ext', '=', '""', 'if', 'args', '.', 'no_container', 'else', '"_container"', 'with', 'open', '(', 'sample_file', ')', 'as', 'in_handle', ':', 'for', 'f', 'in', '_get_file_paths', '(', 'json', '.', 'load', '(', 'in_handle', ')', ')', ':', 'if', 'f', '.', 'startswith', '(', '"gs:"', ')', ':', 'out', '.', 'add', '(', '"gcp%s"', '%', 'ext', ')', 'elif', 'f', '.', 'startswith', '(', '"s3:"', ')', ':', 'out', '.', 'add', '(', '"s3%s"', '%', 'ext', ')', 'elif', 'f', '.', 'startswith', '(', '(', '"https:"', ',', '"http:"', ')', ')', ':', 'out', '.', 'add', '(', '"http%s"', '%', 'ext', ')', 'else', ':', 'out', '.', 'add', '(', '"local%s"', '%', 'ext', ')', 'return', 'out'] | Retrieve the types of inputs and staging based on sample JSON and arguments. | ['Retrieve', 'the', 'types', 'of', 'inputs', 'and', 'staging', 'based', 'on', 'sample', 'JSON', 'and', 'arguments', '.'] | train | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/hpc.py#L130-L145 |
3,140 | foremast/foremast | src/foremast/autoscaling_policy/create_policy.py | AutoScalingPolicy.prepare_policy_template | def prepare_policy_template(self, scaling_type, period_sec, server_group):
"""Renders scaling policy templates based on configs and variables.
After rendering, POSTs the json to Spinnaker for creation.
Args:
scaling_type (str): ``scale_up`` or ``scaling_down``. Type of policy
period_sec (int): Period of time to look at metrics for determining scale
server_group (str): The name of the server group to render template for
"""
template_kwargs = {
'app': self.app,
'env': self.env,
'region': self.region,
'server_group': server_group,
'period_sec': period_sec,
'scaling_policy': self.settings['asg']['scaling_policy'],
}
if scaling_type == 'scale_up':
template_kwargs['operation'] = 'increase'
template_kwargs['comparisonOperator'] = 'GreaterThanThreshold'
template_kwargs['scalingAdjustment'] = 1
elif scaling_type == 'scale_down':
cur_threshold = int(self.settings['asg']['scaling_policy']['threshold'])
self.settings['asg']['scaling_policy']['threshold'] = floor(cur_threshold * 0.5)
template_kwargs['operation'] = 'decrease'
template_kwargs['comparisonOperator'] = 'LessThanThreshold'
template_kwargs['scalingAdjustment'] = -1
rendered_template = get_template(template_file='infrastructure/autoscaling_policy.json.j2', **template_kwargs)
self.log.info('Creating a %s policy in %s for %s', scaling_type, self.env, self.app)
wait_for_task(rendered_template)
self.log.info('Successfully created a %s policy in %s for %s', scaling_type, self.env, self.app) | python | def prepare_policy_template(self, scaling_type, period_sec, server_group):
"""Renders scaling policy templates based on configs and variables.
After rendering, POSTs the json to Spinnaker for creation.
Args:
scaling_type (str): ``scale_up`` or ``scaling_down``. Type of policy
period_sec (int): Period of time to look at metrics for determining scale
server_group (str): The name of the server group to render template for
"""
template_kwargs = {
'app': self.app,
'env': self.env,
'region': self.region,
'server_group': server_group,
'period_sec': period_sec,
'scaling_policy': self.settings['asg']['scaling_policy'],
}
if scaling_type == 'scale_up':
template_kwargs['operation'] = 'increase'
template_kwargs['comparisonOperator'] = 'GreaterThanThreshold'
template_kwargs['scalingAdjustment'] = 1
elif scaling_type == 'scale_down':
cur_threshold = int(self.settings['asg']['scaling_policy']['threshold'])
self.settings['asg']['scaling_policy']['threshold'] = floor(cur_threshold * 0.5)
template_kwargs['operation'] = 'decrease'
template_kwargs['comparisonOperator'] = 'LessThanThreshold'
template_kwargs['scalingAdjustment'] = -1
rendered_template = get_template(template_file='infrastructure/autoscaling_policy.json.j2', **template_kwargs)
self.log.info('Creating a %s policy in %s for %s', scaling_type, self.env, self.app)
wait_for_task(rendered_template)
self.log.info('Successfully created a %s policy in %s for %s', scaling_type, self.env, self.app) | ['def', 'prepare_policy_template', '(', 'self', ',', 'scaling_type', ',', 'period_sec', ',', 'server_group', ')', ':', 'template_kwargs', '=', '{', "'app'", ':', 'self', '.', 'app', ',', "'env'", ':', 'self', '.', 'env', ',', "'region'", ':', 'self', '.', 'region', ',', "'server_group'", ':', 'server_group', ',', "'period_sec'", ':', 'period_sec', ',', "'scaling_policy'", ':', 'self', '.', 'settings', '[', "'asg'", ']', '[', "'scaling_policy'", ']', ',', '}', 'if', 'scaling_type', '==', "'scale_up'", ':', 'template_kwargs', '[', "'operation'", ']', '=', "'increase'", 'template_kwargs', '[', "'comparisonOperator'", ']', '=', "'GreaterThanThreshold'", 'template_kwargs', '[', "'scalingAdjustment'", ']', '=', '1', 'elif', 'scaling_type', '==', "'scale_down'", ':', 'cur_threshold', '=', 'int', '(', 'self', '.', 'settings', '[', "'asg'", ']', '[', "'scaling_policy'", ']', '[', "'threshold'", ']', ')', 'self', '.', 'settings', '[', "'asg'", ']', '[', "'scaling_policy'", ']', '[', "'threshold'", ']', '=', 'floor', '(', 'cur_threshold', '*', '0.5', ')', 'template_kwargs', '[', "'operation'", ']', '=', "'decrease'", 'template_kwargs', '[', "'comparisonOperator'", ']', '=', "'LessThanThreshold'", 'template_kwargs', '[', "'scalingAdjustment'", ']', '=', '-', '1', 'rendered_template', '=', 'get_template', '(', 'template_file', '=', "'infrastructure/autoscaling_policy.json.j2'", ',', '*', '*', 'template_kwargs', ')', 'self', '.', 'log', '.', 'info', '(', "'Creating a %s policy in %s for %s'", ',', 'scaling_type', ',', 'self', '.', 'env', ',', 'self', '.', 'app', ')', 'wait_for_task', '(', 'rendered_template', ')', 'self', '.', 'log', '.', 'info', '(', "'Successfully created a %s policy in %s for %s'", ',', 'scaling_type', ',', 'self', '.', 'env', ',', 'self', '.', 'app', ')'] | Renders scaling policy templates based on configs and variables.
After rendering, POSTs the json to Spinnaker for creation.
Args:
scaling_type (str): ``scale_up`` or ``scaling_down``. Type of policy
period_sec (int): Period of time to look at metrics for determining scale
server_group (str): The name of the server group to render template for | ['Renders', 'scaling', 'policy', 'templates', 'based', 'on', 'configs', 'and', 'variables', '.', 'After', 'rendering', 'POSTs', 'the', 'json', 'to', 'Spinnaker', 'for', 'creation', '.'] | train | https://github.com/foremast/foremast/blob/fb70f29b8ce532f061685a17d120486e47b215ba/src/foremast/autoscaling_policy/create_policy.py#L56-L88 |
3,141 | googleapis/google-auth-library-python | google/oauth2/_client.py | refresh_grant | def refresh_grant(request, token_uri, refresh_token, client_id, client_secret):
"""Implements the OAuth 2.0 refresh token grant.
For more details, see `rfc678 section 6`_.
Args:
request (google.auth.transport.Request): A callable used to make
HTTP requests.
token_uri (str): The OAuth 2.0 authorizations server's token endpoint
URI.
refresh_token (str): The refresh token to use to get a new access
token.
client_id (str): The OAuth 2.0 application's client ID.
client_secret (str): The Oauth 2.0 appliaction's client secret.
Returns:
Tuple[str, Optional[str], Optional[datetime], Mapping[str, str]]: The
access token, new refresh token, expiration, and additional data
returned by the token endpoint.
Raises:
google.auth.exceptions.RefreshError: If the token endpoint returned
an error.
.. _rfc6748 section 6: https://tools.ietf.org/html/rfc6749#section-6
"""
body = {
'grant_type': _REFRESH_GRANT_TYPE,
'client_id': client_id,
'client_secret': client_secret,
'refresh_token': refresh_token,
}
response_data = _token_endpoint_request(request, token_uri, body)
try:
access_token = response_data['access_token']
except KeyError as caught_exc:
new_exc = exceptions.RefreshError(
'No access token in response.', response_data)
six.raise_from(new_exc, caught_exc)
refresh_token = response_data.get('refresh_token', refresh_token)
expiry = _parse_expiry(response_data)
return access_token, refresh_token, expiry, response_data | python | def refresh_grant(request, token_uri, refresh_token, client_id, client_secret):
"""Implements the OAuth 2.0 refresh token grant.
For more details, see `rfc678 section 6`_.
Args:
request (google.auth.transport.Request): A callable used to make
HTTP requests.
token_uri (str): The OAuth 2.0 authorizations server's token endpoint
URI.
refresh_token (str): The refresh token to use to get a new access
token.
client_id (str): The OAuth 2.0 application's client ID.
client_secret (str): The Oauth 2.0 appliaction's client secret.
Returns:
Tuple[str, Optional[str], Optional[datetime], Mapping[str, str]]: The
access token, new refresh token, expiration, and additional data
returned by the token endpoint.
Raises:
google.auth.exceptions.RefreshError: If the token endpoint returned
an error.
.. _rfc6748 section 6: https://tools.ietf.org/html/rfc6749#section-6
"""
body = {
'grant_type': _REFRESH_GRANT_TYPE,
'client_id': client_id,
'client_secret': client_secret,
'refresh_token': refresh_token,
}
response_data = _token_endpoint_request(request, token_uri, body)
try:
access_token = response_data['access_token']
except KeyError as caught_exc:
new_exc = exceptions.RefreshError(
'No access token in response.', response_data)
six.raise_from(new_exc, caught_exc)
refresh_token = response_data.get('refresh_token', refresh_token)
expiry = _parse_expiry(response_data)
return access_token, refresh_token, expiry, response_data | ['def', 'refresh_grant', '(', 'request', ',', 'token_uri', ',', 'refresh_token', ',', 'client_id', ',', 'client_secret', ')', ':', 'body', '=', '{', "'grant_type'", ':', '_REFRESH_GRANT_TYPE', ',', "'client_id'", ':', 'client_id', ',', "'client_secret'", ':', 'client_secret', ',', "'refresh_token'", ':', 'refresh_token', ',', '}', 'response_data', '=', '_token_endpoint_request', '(', 'request', ',', 'token_uri', ',', 'body', ')', 'try', ':', 'access_token', '=', 'response_data', '[', "'access_token'", ']', 'except', 'KeyError', 'as', 'caught_exc', ':', 'new_exc', '=', 'exceptions', '.', 'RefreshError', '(', "'No access token in response.'", ',', 'response_data', ')', 'six', '.', 'raise_from', '(', 'new_exc', ',', 'caught_exc', ')', 'refresh_token', '=', 'response_data', '.', 'get', '(', "'refresh_token'", ',', 'refresh_token', ')', 'expiry', '=', '_parse_expiry', '(', 'response_data', ')', 'return', 'access_token', ',', 'refresh_token', ',', 'expiry', ',', 'response_data'] | Implements the OAuth 2.0 refresh token grant.
For more details, see `rfc678 section 6`_.
Args:
request (google.auth.transport.Request): A callable used to make
HTTP requests.
token_uri (str): The OAuth 2.0 authorizations server's token endpoint
URI.
refresh_token (str): The refresh token to use to get a new access
token.
client_id (str): The OAuth 2.0 application's client ID.
client_secret (str): The Oauth 2.0 appliaction's client secret.
Returns:
Tuple[str, Optional[str], Optional[datetime], Mapping[str, str]]: The
access token, new refresh token, expiration, and additional data
returned by the token endpoint.
Raises:
google.auth.exceptions.RefreshError: If the token endpoint returned
an error.
.. _rfc6748 section 6: https://tools.ietf.org/html/rfc6749#section-6 | ['Implements', 'the', 'OAuth', '2', '.', '0', 'refresh', 'token', 'grant', '.'] | train | https://github.com/googleapis/google-auth-library-python/blob/2c6ad78917e936f38f87c946209c8031166dc96e/google/oauth2/_client.py#L204-L249 |
3,142 | bcbio/bcbio-nextgen | bcbio/variation/mutect.py | _config_params | def _config_params(base_config, assoc_files, region, out_file, items):
"""Add parameters based on configuration variables, associated files and genomic regions.
"""
params = []
dbsnp = assoc_files.get("dbsnp")
if dbsnp:
params += ["--dbsnp", dbsnp]
cosmic = assoc_files.get("cosmic")
if cosmic:
params += ["--cosmic", cosmic]
variant_regions = bedutils.population_variant_regions(items)
region = subset_variant_regions(variant_regions, region, out_file, items)
if region:
params += ["-L", bamprep.region_to_gatk(region), "--interval_set_rule",
"INTERSECTION"]
# set low frequency calling parameter if adjusted
# to set other MuTect parameters on contamination, pass options to resources for mutect
# --fraction_contamination --minimum_normal_allele_fraction
min_af = tz.get_in(["algorithm", "min_allele_fraction"], base_config)
if min_af:
params += ["--minimum_mutation_cell_fraction", "%.2f" % (min_af / 100.0)]
resources = config_utils.get_resources("mutect", base_config)
if resources.get("options") is not None:
params += [str(x) for x in resources.get("options", [])]
# Output quality scores
if "--enable_qscore_output" not in params:
params.append("--enable_qscore_output")
# drf not currently supported in MuTect to turn off duplicateread filter
# params += gatk.standard_cl_params(items)
return params | python | def _config_params(base_config, assoc_files, region, out_file, items):
"""Add parameters based on configuration variables, associated files and genomic regions.
"""
params = []
dbsnp = assoc_files.get("dbsnp")
if dbsnp:
params += ["--dbsnp", dbsnp]
cosmic = assoc_files.get("cosmic")
if cosmic:
params += ["--cosmic", cosmic]
variant_regions = bedutils.population_variant_regions(items)
region = subset_variant_regions(variant_regions, region, out_file, items)
if region:
params += ["-L", bamprep.region_to_gatk(region), "--interval_set_rule",
"INTERSECTION"]
# set low frequency calling parameter if adjusted
# to set other MuTect parameters on contamination, pass options to resources for mutect
# --fraction_contamination --minimum_normal_allele_fraction
min_af = tz.get_in(["algorithm", "min_allele_fraction"], base_config)
if min_af:
params += ["--minimum_mutation_cell_fraction", "%.2f" % (min_af / 100.0)]
resources = config_utils.get_resources("mutect", base_config)
if resources.get("options") is not None:
params += [str(x) for x in resources.get("options", [])]
# Output quality scores
if "--enable_qscore_output" not in params:
params.append("--enable_qscore_output")
# drf not currently supported in MuTect to turn off duplicateread filter
# params += gatk.standard_cl_params(items)
return params | ['def', '_config_params', '(', 'base_config', ',', 'assoc_files', ',', 'region', ',', 'out_file', ',', 'items', ')', ':', 'params', '=', '[', ']', 'dbsnp', '=', 'assoc_files', '.', 'get', '(', '"dbsnp"', ')', 'if', 'dbsnp', ':', 'params', '+=', '[', '"--dbsnp"', ',', 'dbsnp', ']', 'cosmic', '=', 'assoc_files', '.', 'get', '(', '"cosmic"', ')', 'if', 'cosmic', ':', 'params', '+=', '[', '"--cosmic"', ',', 'cosmic', ']', 'variant_regions', '=', 'bedutils', '.', 'population_variant_regions', '(', 'items', ')', 'region', '=', 'subset_variant_regions', '(', 'variant_regions', ',', 'region', ',', 'out_file', ',', 'items', ')', 'if', 'region', ':', 'params', '+=', '[', '"-L"', ',', 'bamprep', '.', 'region_to_gatk', '(', 'region', ')', ',', '"--interval_set_rule"', ',', '"INTERSECTION"', ']', '# set low frequency calling parameter if adjusted', '# to set other MuTect parameters on contamination, pass options to resources for mutect', '# --fraction_contamination --minimum_normal_allele_fraction', 'min_af', '=', 'tz', '.', 'get_in', '(', '[', '"algorithm"', ',', '"min_allele_fraction"', ']', ',', 'base_config', ')', 'if', 'min_af', ':', 'params', '+=', '[', '"--minimum_mutation_cell_fraction"', ',', '"%.2f"', '%', '(', 'min_af', '/', '100.0', ')', ']', 'resources', '=', 'config_utils', '.', 'get_resources', '(', '"mutect"', ',', 'base_config', ')', 'if', 'resources', '.', 'get', '(', '"options"', ')', 'is', 'not', 'None', ':', 'params', '+=', '[', 'str', '(', 'x', ')', 'for', 'x', 'in', 'resources', '.', 'get', '(', '"options"', ',', '[', ']', ')', ']', '# Output quality scores', 'if', '"--enable_qscore_output"', 'not', 'in', 'params', ':', 'params', '.', 'append', '(', '"--enable_qscore_output"', ')', '# drf not currently supported in MuTect to turn off duplicateread filter', '# params += gatk.standard_cl_params(items)', 'return', 'params'] | Add parameters based on configuration variables, associated files and genomic regions. | ['Add', 'parameters', 'based', 'on', 'configuration', 'variables', 'associated', 'files', 'and', 'genomic', 'regions', '.'] | train | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/mutect.py#L46-L75 |
3,143 | Azure/azure-storage-python | azure-storage-file/azure/storage/file/fileservice.py | FileService.exists | def exists(self, share_name, directory_name=None, file_name=None, timeout=None, snapshot=None):
'''
Returns a boolean indicating whether the share exists if only share name is
given. If directory_name is specificed a boolean will be returned indicating
if the directory exists. If file_name is specified as well, a boolean will be
returned indicating if the file exists.
:param str share_name:
Name of a share.
:param str directory_name:
The path to a directory.
:param str file_name:
Name of a file.
:param int timeout:
The timeout parameter is expressed in seconds.
:param str snapshot:
A string that represents the snapshot version, if applicable.
:return: A boolean indicating whether the resource exists.
:rtype: bool
'''
_validate_not_none('share_name', share_name)
try:
request = HTTPRequest()
request.method = 'HEAD' if file_name is not None else 'GET'
request.host_locations = self._get_host_locations()
request.path = _get_path(share_name, directory_name, file_name)
if file_name is not None:
restype = None
expected_errors = [_RESOURCE_NOT_FOUND_ERROR_CODE, _PARENT_NOT_FOUND_ERROR_CODE]
elif directory_name is not None:
restype = 'directory'
expected_errors = [_RESOURCE_NOT_FOUND_ERROR_CODE, _SHARE_NOT_FOUND_ERROR_CODE,
_PARENT_NOT_FOUND_ERROR_CODE]
else:
restype = 'share'
expected_errors = [_SHARE_NOT_FOUND_ERROR_CODE]
request.query = {
'restype': restype,
'timeout': _int_to_str(timeout),
'sharesnapshot': _to_str(snapshot)
}
self._perform_request(request, expected_errors=expected_errors)
return True
except AzureHttpError as ex:
_dont_fail_not_exist(ex)
return False | python | def exists(self, share_name, directory_name=None, file_name=None, timeout=None, snapshot=None):
'''
Returns a boolean indicating whether the share exists if only share name is
given. If directory_name is specificed a boolean will be returned indicating
if the directory exists. If file_name is specified as well, a boolean will be
returned indicating if the file exists.
:param str share_name:
Name of a share.
:param str directory_name:
The path to a directory.
:param str file_name:
Name of a file.
:param int timeout:
The timeout parameter is expressed in seconds.
:param str snapshot:
A string that represents the snapshot version, if applicable.
:return: A boolean indicating whether the resource exists.
:rtype: bool
'''
_validate_not_none('share_name', share_name)
try:
request = HTTPRequest()
request.method = 'HEAD' if file_name is not None else 'GET'
request.host_locations = self._get_host_locations()
request.path = _get_path(share_name, directory_name, file_name)
if file_name is not None:
restype = None
expected_errors = [_RESOURCE_NOT_FOUND_ERROR_CODE, _PARENT_NOT_FOUND_ERROR_CODE]
elif directory_name is not None:
restype = 'directory'
expected_errors = [_RESOURCE_NOT_FOUND_ERROR_CODE, _SHARE_NOT_FOUND_ERROR_CODE,
_PARENT_NOT_FOUND_ERROR_CODE]
else:
restype = 'share'
expected_errors = [_SHARE_NOT_FOUND_ERROR_CODE]
request.query = {
'restype': restype,
'timeout': _int_to_str(timeout),
'sharesnapshot': _to_str(snapshot)
}
self._perform_request(request, expected_errors=expected_errors)
return True
except AzureHttpError as ex:
_dont_fail_not_exist(ex)
return False | ['def', 'exists', '(', 'self', ',', 'share_name', ',', 'directory_name', '=', 'None', ',', 'file_name', '=', 'None', ',', 'timeout', '=', 'None', ',', 'snapshot', '=', 'None', ')', ':', '_validate_not_none', '(', "'share_name'", ',', 'share_name', ')', 'try', ':', 'request', '=', 'HTTPRequest', '(', ')', 'request', '.', 'method', '=', "'HEAD'", 'if', 'file_name', 'is', 'not', 'None', 'else', "'GET'", 'request', '.', 'host_locations', '=', 'self', '.', '_get_host_locations', '(', ')', 'request', '.', 'path', '=', '_get_path', '(', 'share_name', ',', 'directory_name', ',', 'file_name', ')', 'if', 'file_name', 'is', 'not', 'None', ':', 'restype', '=', 'None', 'expected_errors', '=', '[', '_RESOURCE_NOT_FOUND_ERROR_CODE', ',', '_PARENT_NOT_FOUND_ERROR_CODE', ']', 'elif', 'directory_name', 'is', 'not', 'None', ':', 'restype', '=', "'directory'", 'expected_errors', '=', '[', '_RESOURCE_NOT_FOUND_ERROR_CODE', ',', '_SHARE_NOT_FOUND_ERROR_CODE', ',', '_PARENT_NOT_FOUND_ERROR_CODE', ']', 'else', ':', 'restype', '=', "'share'", 'expected_errors', '=', '[', '_SHARE_NOT_FOUND_ERROR_CODE', ']', 'request', '.', 'query', '=', '{', "'restype'", ':', 'restype', ',', "'timeout'", ':', '_int_to_str', '(', 'timeout', ')', ',', "'sharesnapshot'", ':', '_to_str', '(', 'snapshot', ')', '}', 'self', '.', '_perform_request', '(', 'request', ',', 'expected_errors', '=', 'expected_errors', ')', 'return', 'True', 'except', 'AzureHttpError', 'as', 'ex', ':', '_dont_fail_not_exist', '(', 'ex', ')', 'return', 'False'] | Returns a boolean indicating whether the share exists if only share name is
given. If directory_name is specificed a boolean will be returned indicating
if the directory exists. If file_name is specified as well, a boolean will be
returned indicating if the file exists.
:param str share_name:
Name of a share.
:param str directory_name:
The path to a directory.
:param str file_name:
Name of a file.
:param int timeout:
The timeout parameter is expressed in seconds.
:param str snapshot:
A string that represents the snapshot version, if applicable.
:return: A boolean indicating whether the resource exists.
:rtype: bool | ['Returns', 'a', 'boolean', 'indicating', 'whether', 'the', 'share', 'exists', 'if', 'only', 'share', 'name', 'is', 'given', '.', 'If', 'directory_name', 'is', 'specificed', 'a', 'boolean', 'will', 'be', 'returned', 'indicating', 'if', 'the', 'directory', 'exists', '.', 'If', 'file_name', 'is', 'specified', 'as', 'well', 'a', 'boolean', 'will', 'be', 'returned', 'indicating', 'if', 'the', 'file', 'exists', '.'] | train | https://github.com/Azure/azure-storage-python/blob/52327354b192cbcf6b7905118ec6b5d57fa46275/azure-storage-file/azure/storage/file/fileservice.py#L1260-L1307 |
3,144 | wesm/feather | cpp/build-support/cpplint.py | _DropCommonSuffixes | def _DropCommonSuffixes(filename):
"""Drops common suffixes like _test.cc or -inl.h from filename.
For example:
>>> _DropCommonSuffixes('foo/foo-inl.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/bar/foo.cc')
'foo/bar/foo'
>>> _DropCommonSuffixes('foo/foo_internal.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/foo_unusualinternal.h')
'foo/foo_unusualinternal'
Args:
filename: The input filename.
Returns:
The filename with the common suffix removed.
"""
for suffix in ('test.cc', 'regtest.cc', 'unittest.cc',
'inl.h', 'impl.h', 'internal.h'):
if (filename.endswith(suffix) and len(filename) > len(suffix) and
filename[-len(suffix) - 1] in ('-', '_')):
return filename[:-len(suffix) - 1]
return os.path.splitext(filename)[0] | python | def _DropCommonSuffixes(filename):
"""Drops common suffixes like _test.cc or -inl.h from filename.
For example:
>>> _DropCommonSuffixes('foo/foo-inl.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/bar/foo.cc')
'foo/bar/foo'
>>> _DropCommonSuffixes('foo/foo_internal.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/foo_unusualinternal.h')
'foo/foo_unusualinternal'
Args:
filename: The input filename.
Returns:
The filename with the common suffix removed.
"""
for suffix in ('test.cc', 'regtest.cc', 'unittest.cc',
'inl.h', 'impl.h', 'internal.h'):
if (filename.endswith(suffix) and len(filename) > len(suffix) and
filename[-len(suffix) - 1] in ('-', '_')):
return filename[:-len(suffix) - 1]
return os.path.splitext(filename)[0] | ['def', '_DropCommonSuffixes', '(', 'filename', ')', ':', 'for', 'suffix', 'in', '(', "'test.cc'", ',', "'regtest.cc'", ',', "'unittest.cc'", ',', "'inl.h'", ',', "'impl.h'", ',', "'internal.h'", ')', ':', 'if', '(', 'filename', '.', 'endswith', '(', 'suffix', ')', 'and', 'len', '(', 'filename', ')', '>', 'len', '(', 'suffix', ')', 'and', 'filename', '[', '-', 'len', '(', 'suffix', ')', '-', '1', ']', 'in', '(', "'-'", ',', "'_'", ')', ')', ':', 'return', 'filename', '[', ':', '-', 'len', '(', 'suffix', ')', '-', '1', ']', 'return', 'os', '.', 'path', '.', 'splitext', '(', 'filename', ')', '[', '0', ']'] | Drops common suffixes like _test.cc or -inl.h from filename.
For example:
>>> _DropCommonSuffixes('foo/foo-inl.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/bar/foo.cc')
'foo/bar/foo'
>>> _DropCommonSuffixes('foo/foo_internal.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/foo_unusualinternal.h')
'foo/foo_unusualinternal'
Args:
filename: The input filename.
Returns:
The filename with the common suffix removed. | ['Drops', 'common', 'suffixes', 'like', '_test', '.', 'cc', 'or', '-', 'inl', '.', 'h', 'from', 'filename', '.'] | train | https://github.com/wesm/feather/blob/99267b30461c46b9e437f95e1d9338a92a854270/cpp/build-support/cpplint.py#L4501-L4525 |
3,145 | aio-libs/aiohttp-devtools | aiohttp_devtools/start/template/app/settings.py | Settings.substitute_environ | def substitute_environ(self):
"""
Substitute environment variables into settings.
"""
for attr_name in dir(self):
if attr_name.startswith('_') or attr_name.upper() != attr_name:
continue
orig_value = getattr(self, attr_name)
is_required = isinstance(orig_value, Required)
orig_type = orig_value.v_type if is_required else type(orig_value)
env_var_name = self._ENV_PREFIX + attr_name
env_var = os.getenv(env_var_name, None)
if env_var is not None:
if issubclass(orig_type, bool):
env_var = env_var.upper() in ('1', 'TRUE')
elif issubclass(orig_type, int):
env_var = int(env_var)
elif issubclass(orig_type, Path):
env_var = Path(env_var)
elif issubclass(orig_type, bytes):
env_var = env_var.encode()
# could do floats here and lists etc via json
setattr(self, attr_name, env_var)
elif is_required and attr_name not in self._custom_settings:
raise RuntimeError('The required environment variable "{0}" is currently not set, '
'you\'ll need to run `source activate.settings.sh` '
'or you can set that single environment variable with '
'`export {0}="<value>"`'.format(env_var_name)) | python | def substitute_environ(self):
"""
Substitute environment variables into settings.
"""
for attr_name in dir(self):
if attr_name.startswith('_') or attr_name.upper() != attr_name:
continue
orig_value = getattr(self, attr_name)
is_required = isinstance(orig_value, Required)
orig_type = orig_value.v_type if is_required else type(orig_value)
env_var_name = self._ENV_PREFIX + attr_name
env_var = os.getenv(env_var_name, None)
if env_var is not None:
if issubclass(orig_type, bool):
env_var = env_var.upper() in ('1', 'TRUE')
elif issubclass(orig_type, int):
env_var = int(env_var)
elif issubclass(orig_type, Path):
env_var = Path(env_var)
elif issubclass(orig_type, bytes):
env_var = env_var.encode()
# could do floats here and lists etc via json
setattr(self, attr_name, env_var)
elif is_required and attr_name not in self._custom_settings:
raise RuntimeError('The required environment variable "{0}" is currently not set, '
'you\'ll need to run `source activate.settings.sh` '
'or you can set that single environment variable with '
'`export {0}="<value>"`'.format(env_var_name)) | ['def', 'substitute_environ', '(', 'self', ')', ':', 'for', 'attr_name', 'in', 'dir', '(', 'self', ')', ':', 'if', 'attr_name', '.', 'startswith', '(', "'_'", ')', 'or', 'attr_name', '.', 'upper', '(', ')', '!=', 'attr_name', ':', 'continue', 'orig_value', '=', 'getattr', '(', 'self', ',', 'attr_name', ')', 'is_required', '=', 'isinstance', '(', 'orig_value', ',', 'Required', ')', 'orig_type', '=', 'orig_value', '.', 'v_type', 'if', 'is_required', 'else', 'type', '(', 'orig_value', ')', 'env_var_name', '=', 'self', '.', '_ENV_PREFIX', '+', 'attr_name', 'env_var', '=', 'os', '.', 'getenv', '(', 'env_var_name', ',', 'None', ')', 'if', 'env_var', 'is', 'not', 'None', ':', 'if', 'issubclass', '(', 'orig_type', ',', 'bool', ')', ':', 'env_var', '=', 'env_var', '.', 'upper', '(', ')', 'in', '(', "'1'", ',', "'TRUE'", ')', 'elif', 'issubclass', '(', 'orig_type', ',', 'int', ')', ':', 'env_var', '=', 'int', '(', 'env_var', ')', 'elif', 'issubclass', '(', 'orig_type', ',', 'Path', ')', ':', 'env_var', '=', 'Path', '(', 'env_var', ')', 'elif', 'issubclass', '(', 'orig_type', ',', 'bytes', ')', ':', 'env_var', '=', 'env_var', '.', 'encode', '(', ')', '# could do floats here and lists etc via json', 'setattr', '(', 'self', ',', 'attr_name', ',', 'env_var', ')', 'elif', 'is_required', 'and', 'attr_name', 'not', 'in', 'self', '.', '_custom_settings', ':', 'raise', 'RuntimeError', '(', '\'The required environment variable "{0}" is currently not set, \'', "'you\\'ll need to run `source activate.settings.sh` '", "'or you can set that single environment variable with '", '\'`export {0}="<value>"`\'', '.', 'format', '(', 'env_var_name', ')', ')'] | Substitute environment variables into settings. | ['Substitute', 'environment', 'variables', 'into', 'settings', '.'] | train | https://github.com/aio-libs/aiohttp-devtools/blob/e9ea6feb43558e6e64595ea0ea5613f226cba81f/aiohttp_devtools/start/template/app/settings.py#L48-L76 |
3,146 | sanger-pathogens/circlator | circlator/merge.py | Merger._get_possible_circular_ref_contigs | def _get_possible_circular_ref_contigs(self, nucmer_hits, log_fh=None, log_outprefix=None):
'''Returns a dict ref name => tuple(hit at start, hit at end) for each ref sequence in the hash nucmer_hits (each value is a list of nucmer hits)'''
writing_log_file = None not in [log_fh, log_outprefix]
maybe_circular = {}
all_nucmer_hits = []
for l in nucmer_hits.values():
all_nucmer_hits.extend(l)
nucmer_hits_by_qry = self._hits_hashed_by_query(all_nucmer_hits)
for ref_name, list_of_hits in nucmer_hits.items():
if writing_log_file:
print(log_outprefix, ref_name, 'Checking ' + str(len(list_of_hits)) + ' nucmer hits', sep='\t', file=log_fh)
longest_start_hit = self._get_longest_hit_at_ref_start(list_of_hits)
longest_end_hit = self._get_longest_hit_at_ref_end(list_of_hits)
if longest_start_hit == longest_end_hit:
second_longest_start_hit = self._get_longest_hit_at_ref_start(list_of_hits, hits_to_exclude={longest_start_hit})
second_longest_end_hit = self._get_longest_hit_at_ref_end(list_of_hits, hits_to_exclude={longest_end_hit})
if second_longest_start_hit is not None:
longest_start_hit = self._get_hit_nearest_ref_start([longest_start_hit, second_longest_start_hit])
if second_longest_end_hit is not None:
longest_end_hit = self._get_hit_nearest_ref_end([longest_end_hit, second_longest_end_hit])
if (
longest_start_hit is not None
and longest_end_hit is not None
and longest_start_hit != longest_end_hit
and self._hits_have_same_query(longest_start_hit, longest_end_hit)
):
if writing_log_file:
print(log_outprefix, ref_name, 'potential pair of nucmer hits for circularization:', sep='\t', file=log_fh)
print(log_outprefix, ref_name, '', longest_start_hit, sep='\t', file=log_fh)
print(log_outprefix, ref_name, '', longest_end_hit, sep='\t', file=log_fh)
shortest_hit_length = self._min_qry_hit_length([longest_start_hit, longest_end_hit])
has_longer_hit = self._has_qry_hit_longer_than(
nucmer_hits_by_qry[longest_start_hit.qry_name],
shortest_hit_length,
hits_to_exclude={longest_start_hit, longest_end_hit}
)
if writing_log_file and has_longer_hit:
print(log_outprefix, ref_name, 'cannot use this pair because longer match was found', sep='\t', file=log_fh)
can_circularise = self._can_circularise(longest_start_hit, longest_end_hit)
if writing_log_file and not can_circularise:
print(log_outprefix, ref_name, 'cannot use this pair because positions/orientations of matches no good', sep='\t', file=log_fh)
if (not has_longer_hit) and can_circularise:
print(log_outprefix, ref_name, 'can use this pair of hits', sep='\t', file=log_fh)
maybe_circular[ref_name] = (longest_start_hit, longest_end_hit)
return maybe_circular | python | def _get_possible_circular_ref_contigs(self, nucmer_hits, log_fh=None, log_outprefix=None):
'''Returns a dict ref name => tuple(hit at start, hit at end) for each ref sequence in the hash nucmer_hits (each value is a list of nucmer hits)'''
writing_log_file = None not in [log_fh, log_outprefix]
maybe_circular = {}
all_nucmer_hits = []
for l in nucmer_hits.values():
all_nucmer_hits.extend(l)
nucmer_hits_by_qry = self._hits_hashed_by_query(all_nucmer_hits)
for ref_name, list_of_hits in nucmer_hits.items():
if writing_log_file:
print(log_outprefix, ref_name, 'Checking ' + str(len(list_of_hits)) + ' nucmer hits', sep='\t', file=log_fh)
longest_start_hit = self._get_longest_hit_at_ref_start(list_of_hits)
longest_end_hit = self._get_longest_hit_at_ref_end(list_of_hits)
if longest_start_hit == longest_end_hit:
second_longest_start_hit = self._get_longest_hit_at_ref_start(list_of_hits, hits_to_exclude={longest_start_hit})
second_longest_end_hit = self._get_longest_hit_at_ref_end(list_of_hits, hits_to_exclude={longest_end_hit})
if second_longest_start_hit is not None:
longest_start_hit = self._get_hit_nearest_ref_start([longest_start_hit, second_longest_start_hit])
if second_longest_end_hit is not None:
longest_end_hit = self._get_hit_nearest_ref_end([longest_end_hit, second_longest_end_hit])
if (
longest_start_hit is not None
and longest_end_hit is not None
and longest_start_hit != longest_end_hit
and self._hits_have_same_query(longest_start_hit, longest_end_hit)
):
if writing_log_file:
print(log_outprefix, ref_name, 'potential pair of nucmer hits for circularization:', sep='\t', file=log_fh)
print(log_outprefix, ref_name, '', longest_start_hit, sep='\t', file=log_fh)
print(log_outprefix, ref_name, '', longest_end_hit, sep='\t', file=log_fh)
shortest_hit_length = self._min_qry_hit_length([longest_start_hit, longest_end_hit])
has_longer_hit = self._has_qry_hit_longer_than(
nucmer_hits_by_qry[longest_start_hit.qry_name],
shortest_hit_length,
hits_to_exclude={longest_start_hit, longest_end_hit}
)
if writing_log_file and has_longer_hit:
print(log_outprefix, ref_name, 'cannot use this pair because longer match was found', sep='\t', file=log_fh)
can_circularise = self._can_circularise(longest_start_hit, longest_end_hit)
if writing_log_file and not can_circularise:
print(log_outprefix, ref_name, 'cannot use this pair because positions/orientations of matches no good', sep='\t', file=log_fh)
if (not has_longer_hit) and can_circularise:
print(log_outprefix, ref_name, 'can use this pair of hits', sep='\t', file=log_fh)
maybe_circular[ref_name] = (longest_start_hit, longest_end_hit)
return maybe_circular | ['def', '_get_possible_circular_ref_contigs', '(', 'self', ',', 'nucmer_hits', ',', 'log_fh', '=', 'None', ',', 'log_outprefix', '=', 'None', ')', ':', 'writing_log_file', '=', 'None', 'not', 'in', '[', 'log_fh', ',', 'log_outprefix', ']', 'maybe_circular', '=', '{', '}', 'all_nucmer_hits', '=', '[', ']', 'for', 'l', 'in', 'nucmer_hits', '.', 'values', '(', ')', ':', 'all_nucmer_hits', '.', 'extend', '(', 'l', ')', 'nucmer_hits_by_qry', '=', 'self', '.', '_hits_hashed_by_query', '(', 'all_nucmer_hits', ')', 'for', 'ref_name', ',', 'list_of_hits', 'in', 'nucmer_hits', '.', 'items', '(', ')', ':', 'if', 'writing_log_file', ':', 'print', '(', 'log_outprefix', ',', 'ref_name', ',', "'Checking '", '+', 'str', '(', 'len', '(', 'list_of_hits', ')', ')', '+', "' nucmer hits'", ',', 'sep', '=', "'\\t'", ',', 'file', '=', 'log_fh', ')', 'longest_start_hit', '=', 'self', '.', '_get_longest_hit_at_ref_start', '(', 'list_of_hits', ')', 'longest_end_hit', '=', 'self', '.', '_get_longest_hit_at_ref_end', '(', 'list_of_hits', ')', 'if', 'longest_start_hit', '==', 'longest_end_hit', ':', 'second_longest_start_hit', '=', 'self', '.', '_get_longest_hit_at_ref_start', '(', 'list_of_hits', ',', 'hits_to_exclude', '=', '{', 'longest_start_hit', '}', ')', 'second_longest_end_hit', '=', 'self', '.', '_get_longest_hit_at_ref_end', '(', 'list_of_hits', ',', 'hits_to_exclude', '=', '{', 'longest_end_hit', '}', ')', 'if', 'second_longest_start_hit', 'is', 'not', 'None', ':', 'longest_start_hit', '=', 'self', '.', '_get_hit_nearest_ref_start', '(', '[', 'longest_start_hit', ',', 'second_longest_start_hit', ']', ')', 'if', 'second_longest_end_hit', 'is', 'not', 'None', ':', 'longest_end_hit', '=', 'self', '.', '_get_hit_nearest_ref_end', '(', '[', 'longest_end_hit', ',', 'second_longest_end_hit', ']', ')', 'if', '(', 'longest_start_hit', 'is', 'not', 'None', 'and', 'longest_end_hit', 'is', 'not', 'None', 'and', 'longest_start_hit', '!=', 'longest_end_hit', 'and', 'self', '.', '_hits_have_same_query', '(', 'longest_start_hit', ',', 'longest_end_hit', ')', ')', ':', 'if', 'writing_log_file', ':', 'print', '(', 'log_outprefix', ',', 'ref_name', ',', "'potential pair of nucmer hits for circularization:'", ',', 'sep', '=', "'\\t'", ',', 'file', '=', 'log_fh', ')', 'print', '(', 'log_outprefix', ',', 'ref_name', ',', "''", ',', 'longest_start_hit', ',', 'sep', '=', "'\\t'", ',', 'file', '=', 'log_fh', ')', 'print', '(', 'log_outprefix', ',', 'ref_name', ',', "''", ',', 'longest_end_hit', ',', 'sep', '=', "'\\t'", ',', 'file', '=', 'log_fh', ')', 'shortest_hit_length', '=', 'self', '.', '_min_qry_hit_length', '(', '[', 'longest_start_hit', ',', 'longest_end_hit', ']', ')', 'has_longer_hit', '=', 'self', '.', '_has_qry_hit_longer_than', '(', 'nucmer_hits_by_qry', '[', 'longest_start_hit', '.', 'qry_name', ']', ',', 'shortest_hit_length', ',', 'hits_to_exclude', '=', '{', 'longest_start_hit', ',', 'longest_end_hit', '}', ')', 'if', 'writing_log_file', 'and', 'has_longer_hit', ':', 'print', '(', 'log_outprefix', ',', 'ref_name', ',', "'cannot use this pair because longer match was found'", ',', 'sep', '=', "'\\t'", ',', 'file', '=', 'log_fh', ')', 'can_circularise', '=', 'self', '.', '_can_circularise', '(', 'longest_start_hit', ',', 'longest_end_hit', ')', 'if', 'writing_log_file', 'and', 'not', 'can_circularise', ':', 'print', '(', 'log_outprefix', ',', 'ref_name', ',', "'cannot use this pair because positions/orientations of matches no good'", ',', 'sep', '=', "'\\t'", ',', 'file', '=', 'log_fh', ')', 'if', '(', 'not', 'has_longer_hit', ')', 'and', 'can_circularise', ':', 'print', '(', 'log_outprefix', ',', 'ref_name', ',', "'can use this pair of hits'", ',', 'sep', '=', "'\\t'", ',', 'file', '=', 'log_fh', ')', 'maybe_circular', '[', 'ref_name', ']', '=', '(', 'longest_start_hit', ',', 'longest_end_hit', ')', 'return', 'maybe_circular'] | Returns a dict ref name => tuple(hit at start, hit at end) for each ref sequence in the hash nucmer_hits (each value is a list of nucmer hits) | ['Returns', 'a', 'dict', 'ref', 'name', '=', '>', 'tuple', '(', 'hit', 'at', 'start', 'hit', 'at', 'end', ')', 'for', 'each', 'ref', 'sequence', 'in', 'the', 'hash', 'nucmer_hits', '(', 'each', 'value', 'is', 'a', 'list', 'of', 'nucmer', 'hits', ')'] | train | https://github.com/sanger-pathogens/circlator/blob/a4befb8c9dbbcd4b3ad1899a95aa3e689d58b638/circlator/merge.py#L240-L293 |
3,147 | mbedmicro/pyOCD | pyocd/core/session.py | Session.find_user_file | def find_user_file(self, option_name, filename_list):
"""! @brief Search the project directory for a file."""
if option_name is not None:
filePath = self._options.get(option_name, None)
else:
filePath = None
# Look for default filenames if a path wasn't provided.
if filePath is None:
for filename in filename_list:
thisPath = os.path.join(self.project_dir, filename)
if os.path.isfile(thisPath):
filePath = thisPath
break
# Use the path passed in options, which may be absolute, relative to the
# home directory, or relative to the project directory.
else:
filePath = os.path.expanduser(filePath)
if not os.path.isabs(filePath):
filePath = os.path.join(self.project_dir, filePath)
return filePath | python | def find_user_file(self, option_name, filename_list):
"""! @brief Search the project directory for a file."""
if option_name is not None:
filePath = self._options.get(option_name, None)
else:
filePath = None
# Look for default filenames if a path wasn't provided.
if filePath is None:
for filename in filename_list:
thisPath = os.path.join(self.project_dir, filename)
if os.path.isfile(thisPath):
filePath = thisPath
break
# Use the path passed in options, which may be absolute, relative to the
# home directory, or relative to the project directory.
else:
filePath = os.path.expanduser(filePath)
if not os.path.isabs(filePath):
filePath = os.path.join(self.project_dir, filePath)
return filePath | ['def', 'find_user_file', '(', 'self', ',', 'option_name', ',', 'filename_list', ')', ':', 'if', 'option_name', 'is', 'not', 'None', ':', 'filePath', '=', 'self', '.', '_options', '.', 'get', '(', 'option_name', ',', 'None', ')', 'else', ':', 'filePath', '=', 'None', "# Look for default filenames if a path wasn't provided.", 'if', 'filePath', 'is', 'None', ':', 'for', 'filename', 'in', 'filename_list', ':', 'thisPath', '=', 'os', '.', 'path', '.', 'join', '(', 'self', '.', 'project_dir', ',', 'filename', ')', 'if', 'os', '.', 'path', '.', 'isfile', '(', 'thisPath', ')', ':', 'filePath', '=', 'thisPath', 'break', '# Use the path passed in options, which may be absolute, relative to the', '# home directory, or relative to the project directory.', 'else', ':', 'filePath', '=', 'os', '.', 'path', '.', 'expanduser', '(', 'filePath', ')', 'if', 'not', 'os', '.', 'path', '.', 'isabs', '(', 'filePath', ')', ':', 'filePath', '=', 'os', '.', 'path', '.', 'join', '(', 'self', '.', 'project_dir', ',', 'filePath', ')', 'return', 'filePath'] | ! @brief Search the project directory for a file. | ['!'] | train | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/core/session.py#L167-L188 |
3,148 | tensorflow/tensor2tensor | tensor2tensor/layers/common_attention.py | attention_bias_local | def attention_bias_local(length, max_backward, max_forward):
"""Create an bias tensor to be added to attention logits.
A position may attend to positions at most max_distance from it,
forward and backwards.
This does not actually save any computation.
Args:
length: int
max_backward: int, maximum distance backward to attend. Negative values
indicate unlimited.
max_forward: int, maximum distance forward to attend. Negative values
indicate unlimited.
Returns:
a `Tensor` with shape [1, 1, length, length].
"""
band = common_layers.ones_matrix_band_part(
length,
length,
max_backward,
max_forward,
out_shape=[1, 1, length, length])
return -1e9 * (1.0 - band) | python | def attention_bias_local(length, max_backward, max_forward):
"""Create an bias tensor to be added to attention logits.
A position may attend to positions at most max_distance from it,
forward and backwards.
This does not actually save any computation.
Args:
length: int
max_backward: int, maximum distance backward to attend. Negative values
indicate unlimited.
max_forward: int, maximum distance forward to attend. Negative values
indicate unlimited.
Returns:
a `Tensor` with shape [1, 1, length, length].
"""
band = common_layers.ones_matrix_band_part(
length,
length,
max_backward,
max_forward,
out_shape=[1, 1, length, length])
return -1e9 * (1.0 - band) | ['def', 'attention_bias_local', '(', 'length', ',', 'max_backward', ',', 'max_forward', ')', ':', 'band', '=', 'common_layers', '.', 'ones_matrix_band_part', '(', 'length', ',', 'length', ',', 'max_backward', ',', 'max_forward', ',', 'out_shape', '=', '[', '1', ',', '1', ',', 'length', ',', 'length', ']', ')', 'return', '-', '1e9', '*', '(', '1.0', '-', 'band', ')'] | Create an bias tensor to be added to attention logits.
A position may attend to positions at most max_distance from it,
forward and backwards.
This does not actually save any computation.
Args:
length: int
max_backward: int, maximum distance backward to attend. Negative values
indicate unlimited.
max_forward: int, maximum distance forward to attend. Negative values
indicate unlimited.
Returns:
a `Tensor` with shape [1, 1, length, length]. | ['Create', 'an', 'bias', 'tensor', 'to', 'be', 'added', 'to', 'attention', 'logits', '.'] | train | https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L873-L897 |
3,149 | mozilla/moz-sql-parser | moz_sql_parser/formatting.py | escape | def escape(identifier, ansi_quotes, should_quote):
"""
Escape identifiers.
ANSI uses single quotes, but many databases use back quotes.
"""
if not should_quote(identifier):
return identifier
quote = '"' if ansi_quotes else '`'
identifier = identifier.replace(quote, 2*quote)
return '{0}{1}{2}'.format(quote, identifier, quote) | python | def escape(identifier, ansi_quotes, should_quote):
"""
Escape identifiers.
ANSI uses single quotes, but many databases use back quotes.
"""
if not should_quote(identifier):
return identifier
quote = '"' if ansi_quotes else '`'
identifier = identifier.replace(quote, 2*quote)
return '{0}{1}{2}'.format(quote, identifier, quote) | ['def', 'escape', '(', 'identifier', ',', 'ansi_quotes', ',', 'should_quote', ')', ':', 'if', 'not', 'should_quote', '(', 'identifier', ')', ':', 'return', 'identifier', 'quote', '=', '\'"\'', 'if', 'ansi_quotes', 'else', "'`'", 'identifier', '=', 'identifier', '.', 'replace', '(', 'quote', ',', '2', '*', 'quote', ')', 'return', "'{0}{1}{2}'", '.', 'format', '(', 'quote', ',', 'identifier', ',', 'quote', ')'] | Escape identifiers.
ANSI uses single quotes, but many databases use back quotes. | ['Escape', 'identifiers', '.'] | train | https://github.com/mozilla/moz-sql-parser/blob/35fcc69b8f73b48e1fd48025cae1e174d57c3921/moz_sql_parser/formatting.py#L39-L51 |
3,150 | angr/angr | angr/analyses/cfg/cfg_fast_soot.py | CFGFastSoot._create_jobs | def _create_jobs(self, target, jumpkind, current_function_addr, soot_block, addr, cfg_node, stmt_addr, stmt_idx): # pylint:disable=arguments-differ
"""
Given a node and details of a successor, makes a list of CFGJobs
and if it is a call or exit marks it appropriately so in the CFG
:param int target: Destination of the resultant job
:param str jumpkind: The jumpkind of the edge going to this node
:param int current_function_addr: Address of the current function
:param pyvex.IRSB irsb: IRSB of the predecessor node
:param int addr: The predecessor address
:param CFGNode cfg_node: The CFGNode of the predecessor node
:param int ins_addr: Address of the source instruction.
:param int stmt_addr: ID of the source statement.
:return: a list of CFGJobs
:rtype: list
"""
target_addr = target
jobs = [ ]
if target_addr is None:
# The target address is not a concrete value
if jumpkind == "Ijk_Ret":
# This block ends with a return instruction.
if current_function_addr != -1:
self._function_exits[current_function_addr].add(addr)
self._function_add_return_site(addr, current_function_addr)
self.functions[current_function_addr].returning = True
self._pending_jobs.add_returning_function(current_function_addr)
cfg_node.has_return = True
elif target_addr is not None:
# This is a direct jump with a concrete target.
# pylint: disable=too-many-nested-blocks
if jumpkind in ('Ijk_Boring', 'Ijk_InvalICache'):
# it might be a jumpout
target_func_addr = None
if target_addr in self._traced_addresses:
node = self.get_any_node(target_addr)
if node is not None:
target_func_addr = node.function_address
if target_func_addr is None:
target_func_addr = current_function_addr
to_outside = not target_func_addr == current_function_addr
edge = FunctionTransitionEdge(cfg_node, target_addr, current_function_addr,
to_outside=to_outside,
dst_func_addr=target_func_addr,
ins_addr=stmt_addr,
stmt_idx=stmt_idx,
)
ce = CFGJob(target_addr, target_func_addr, jumpkind, last_addr=addr, src_node=cfg_node,
src_ins_addr=stmt_addr, src_stmt_idx=stmt_idx, func_edges=[ edge ])
jobs.append(ce)
elif jumpkind == 'Ijk_Call' or jumpkind.startswith("Ijk_Sys"):
jobs += self._create_job_call(addr, soot_block, cfg_node, stmt_idx, stmt_addr, current_function_addr,
target_addr, jumpkind, is_syscall=False
)
self._pending_jobs.add_returning_function(target.method)
else:
# TODO: Support more jumpkinds
l.debug("Unsupported jumpkind %s", jumpkind)
return jobs | python | def _create_jobs(self, target, jumpkind, current_function_addr, soot_block, addr, cfg_node, stmt_addr, stmt_idx): # pylint:disable=arguments-differ
"""
Given a node and details of a successor, makes a list of CFGJobs
and if it is a call or exit marks it appropriately so in the CFG
:param int target: Destination of the resultant job
:param str jumpkind: The jumpkind of the edge going to this node
:param int current_function_addr: Address of the current function
:param pyvex.IRSB irsb: IRSB of the predecessor node
:param int addr: The predecessor address
:param CFGNode cfg_node: The CFGNode of the predecessor node
:param int ins_addr: Address of the source instruction.
:param int stmt_addr: ID of the source statement.
:return: a list of CFGJobs
:rtype: list
"""
target_addr = target
jobs = [ ]
if target_addr is None:
# The target address is not a concrete value
if jumpkind == "Ijk_Ret":
# This block ends with a return instruction.
if current_function_addr != -1:
self._function_exits[current_function_addr].add(addr)
self._function_add_return_site(addr, current_function_addr)
self.functions[current_function_addr].returning = True
self._pending_jobs.add_returning_function(current_function_addr)
cfg_node.has_return = True
elif target_addr is not None:
# This is a direct jump with a concrete target.
# pylint: disable=too-many-nested-blocks
if jumpkind in ('Ijk_Boring', 'Ijk_InvalICache'):
# it might be a jumpout
target_func_addr = None
if target_addr in self._traced_addresses:
node = self.get_any_node(target_addr)
if node is not None:
target_func_addr = node.function_address
if target_func_addr is None:
target_func_addr = current_function_addr
to_outside = not target_func_addr == current_function_addr
edge = FunctionTransitionEdge(cfg_node, target_addr, current_function_addr,
to_outside=to_outside,
dst_func_addr=target_func_addr,
ins_addr=stmt_addr,
stmt_idx=stmt_idx,
)
ce = CFGJob(target_addr, target_func_addr, jumpkind, last_addr=addr, src_node=cfg_node,
src_ins_addr=stmt_addr, src_stmt_idx=stmt_idx, func_edges=[ edge ])
jobs.append(ce)
elif jumpkind == 'Ijk_Call' or jumpkind.startswith("Ijk_Sys"):
jobs += self._create_job_call(addr, soot_block, cfg_node, stmt_idx, stmt_addr, current_function_addr,
target_addr, jumpkind, is_syscall=False
)
self._pending_jobs.add_returning_function(target.method)
else:
# TODO: Support more jumpkinds
l.debug("Unsupported jumpkind %s", jumpkind)
return jobs | ['def', '_create_jobs', '(', 'self', ',', 'target', ',', 'jumpkind', ',', 'current_function_addr', ',', 'soot_block', ',', 'addr', ',', 'cfg_node', ',', 'stmt_addr', ',', 'stmt_idx', ')', ':', '# pylint:disable=arguments-differ', 'target_addr', '=', 'target', 'jobs', '=', '[', ']', 'if', 'target_addr', 'is', 'None', ':', '# The target address is not a concrete value', 'if', 'jumpkind', '==', '"Ijk_Ret"', ':', '# This block ends with a return instruction.', 'if', 'current_function_addr', '!=', '-', '1', ':', 'self', '.', '_function_exits', '[', 'current_function_addr', ']', '.', 'add', '(', 'addr', ')', 'self', '.', '_function_add_return_site', '(', 'addr', ',', 'current_function_addr', ')', 'self', '.', 'functions', '[', 'current_function_addr', ']', '.', 'returning', '=', 'True', 'self', '.', '_pending_jobs', '.', 'add_returning_function', '(', 'current_function_addr', ')', 'cfg_node', '.', 'has_return', '=', 'True', 'elif', 'target_addr', 'is', 'not', 'None', ':', '# This is a direct jump with a concrete target.', '# pylint: disable=too-many-nested-blocks', 'if', 'jumpkind', 'in', '(', "'Ijk_Boring'", ',', "'Ijk_InvalICache'", ')', ':', '# it might be a jumpout', 'target_func_addr', '=', 'None', 'if', 'target_addr', 'in', 'self', '.', '_traced_addresses', ':', 'node', '=', 'self', '.', 'get_any_node', '(', 'target_addr', ')', 'if', 'node', 'is', 'not', 'None', ':', 'target_func_addr', '=', 'node', '.', 'function_address', 'if', 'target_func_addr', 'is', 'None', ':', 'target_func_addr', '=', 'current_function_addr', 'to_outside', '=', 'not', 'target_func_addr', '==', 'current_function_addr', 'edge', '=', 'FunctionTransitionEdge', '(', 'cfg_node', ',', 'target_addr', ',', 'current_function_addr', ',', 'to_outside', '=', 'to_outside', ',', 'dst_func_addr', '=', 'target_func_addr', ',', 'ins_addr', '=', 'stmt_addr', ',', 'stmt_idx', '=', 'stmt_idx', ',', ')', 'ce', '=', 'CFGJob', '(', 'target_addr', ',', 'target_func_addr', ',', 'jumpkind', ',', 'last_addr', '=', 'addr', ',', 'src_node', '=', 'cfg_node', ',', 'src_ins_addr', '=', 'stmt_addr', ',', 'src_stmt_idx', '=', 'stmt_idx', ',', 'func_edges', '=', '[', 'edge', ']', ')', 'jobs', '.', 'append', '(', 'ce', ')', 'elif', 'jumpkind', '==', "'Ijk_Call'", 'or', 'jumpkind', '.', 'startswith', '(', '"Ijk_Sys"', ')', ':', 'jobs', '+=', 'self', '.', '_create_job_call', '(', 'addr', ',', 'soot_block', ',', 'cfg_node', ',', 'stmt_idx', ',', 'stmt_addr', ',', 'current_function_addr', ',', 'target_addr', ',', 'jumpkind', ',', 'is_syscall', '=', 'False', ')', 'self', '.', '_pending_jobs', '.', 'add_returning_function', '(', 'target', '.', 'method', ')', 'else', ':', '# TODO: Support more jumpkinds', 'l', '.', 'debug', '(', '"Unsupported jumpkind %s"', ',', 'jumpkind', ')', 'return', 'jobs'] | Given a node and details of a successor, makes a list of CFGJobs
and if it is a call or exit marks it appropriately so in the CFG
:param int target: Destination of the resultant job
:param str jumpkind: The jumpkind of the edge going to this node
:param int current_function_addr: Address of the current function
:param pyvex.IRSB irsb: IRSB of the predecessor node
:param int addr: The predecessor address
:param CFGNode cfg_node: The CFGNode of the predecessor node
:param int ins_addr: Address of the source instruction.
:param int stmt_addr: ID of the source statement.
:return: a list of CFGJobs
:rtype: list | ['Given', 'a', 'node', 'and', 'details', 'of', 'a', 'successor', 'makes', 'a', 'list', 'of', 'CFGJobs', 'and', 'if', 'it', 'is', 'a', 'call', 'or', 'exit', 'marks', 'it', 'appropriately', 'so', 'in', 'the', 'CFG'] | train | https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/analyses/cfg/cfg_fast_soot.py#L371-L443 |
3,151 | radujica/baloo | baloo/weld/weld_str.py | weld_str_get | def weld_str_get(array, i):
"""Retrieve character at index i.
Parameters
----------
array : numpy.ndarray or WeldObject
Input data.
i : int
Index of character to retrieve. If greater than length of string, returns None.
Returns
-------
WeldObject
Representation of this computation.
"""
obj_id, weld_obj = create_weld_object(array)
index_literal = to_weld_literal(i, WeldLong())
missing_literal = default_missing_data_literal(WeldVec(WeldChar()))
missing_literal_id = get_weld_obj_id(weld_obj, missing_literal)
weld_template = """map(
{array},
|e: vec[i8]|
let lenString = len(e);
if({i} >= lenString,
{missing},
if({i} > 0L,
result(merge(appender[i8], lookup(slice(e, 0L, lenString), {i}))),
result(merge(appender[i8], lookup(slice(e, lenString, {i}), {i})))
)
)
)"""
weld_obj.weld_code = weld_template.format(array=obj_id,
i=index_literal,
missing=missing_literal_id)
return weld_obj | python | def weld_str_get(array, i):
"""Retrieve character at index i.
Parameters
----------
array : numpy.ndarray or WeldObject
Input data.
i : int
Index of character to retrieve. If greater than length of string, returns None.
Returns
-------
WeldObject
Representation of this computation.
"""
obj_id, weld_obj = create_weld_object(array)
index_literal = to_weld_literal(i, WeldLong())
missing_literal = default_missing_data_literal(WeldVec(WeldChar()))
missing_literal_id = get_weld_obj_id(weld_obj, missing_literal)
weld_template = """map(
{array},
|e: vec[i8]|
let lenString = len(e);
if({i} >= lenString,
{missing},
if({i} > 0L,
result(merge(appender[i8], lookup(slice(e, 0L, lenString), {i}))),
result(merge(appender[i8], lookup(slice(e, lenString, {i}), {i})))
)
)
)"""
weld_obj.weld_code = weld_template.format(array=obj_id,
i=index_literal,
missing=missing_literal_id)
return weld_obj | ['def', 'weld_str_get', '(', 'array', ',', 'i', ')', ':', 'obj_id', ',', 'weld_obj', '=', 'create_weld_object', '(', 'array', ')', 'index_literal', '=', 'to_weld_literal', '(', 'i', ',', 'WeldLong', '(', ')', ')', 'missing_literal', '=', 'default_missing_data_literal', '(', 'WeldVec', '(', 'WeldChar', '(', ')', ')', ')', 'missing_literal_id', '=', 'get_weld_obj_id', '(', 'weld_obj', ',', 'missing_literal', ')', 'weld_template', '=', '"""map(\n {array},\n |e: vec[i8]|\n let lenString = len(e);\n if({i} >= lenString,\n {missing},\n if({i} > 0L,\n result(merge(appender[i8], lookup(slice(e, 0L, lenString), {i}))),\n result(merge(appender[i8], lookup(slice(e, lenString, {i}), {i})))\n )\n )\n)"""', 'weld_obj', '.', 'weld_code', '=', 'weld_template', '.', 'format', '(', 'array', '=', 'obj_id', ',', 'i', '=', 'index_literal', ',', 'missing', '=', 'missing_literal_id', ')', 'return', 'weld_obj'] | Retrieve character at index i.
Parameters
----------
array : numpy.ndarray or WeldObject
Input data.
i : int
Index of character to retrieve. If greater than length of string, returns None.
Returns
-------
WeldObject
Representation of this computation. | ['Retrieve', 'character', 'at', 'index', 'i', '.'] | train | https://github.com/radujica/baloo/blob/f6e05e35b73a75e8a300754c6bdc575e5f2d53b9/baloo/weld/weld_str.py#L118-L155 |
3,152 | DataDog/integrations-core | mongo/datadog_checks/mongo/mongo.py | MongoDb._build_metric_list_to_collect | def _build_metric_list_to_collect(self, additional_metrics):
"""
Build the metric list to collect based on the instance preferences.
"""
metrics_to_collect = {}
# Defaut metrics
for default_metrics in itervalues(self.DEFAULT_METRICS):
metrics_to_collect.update(default_metrics)
# Additional metrics metrics
for option in additional_metrics:
additional_metrics = self.AVAILABLE_METRICS.get(option)
if not additional_metrics:
if option in self.DEFAULT_METRICS:
self.log.warning(
u"`%s` option is deprecated. The corresponding metrics are collected by default.", option
)
else:
self.log.warning(
u"Failed to extend the list of metrics to collect: unrecognized `%s` option", option
)
continue
self.log.debug(u"Adding `%s` corresponding metrics to the list of metrics to collect.", option)
metrics_to_collect.update(additional_metrics)
return metrics_to_collect | python | def _build_metric_list_to_collect(self, additional_metrics):
"""
Build the metric list to collect based on the instance preferences.
"""
metrics_to_collect = {}
# Defaut metrics
for default_metrics in itervalues(self.DEFAULT_METRICS):
metrics_to_collect.update(default_metrics)
# Additional metrics metrics
for option in additional_metrics:
additional_metrics = self.AVAILABLE_METRICS.get(option)
if not additional_metrics:
if option in self.DEFAULT_METRICS:
self.log.warning(
u"`%s` option is deprecated. The corresponding metrics are collected by default.", option
)
else:
self.log.warning(
u"Failed to extend the list of metrics to collect: unrecognized `%s` option", option
)
continue
self.log.debug(u"Adding `%s` corresponding metrics to the list of metrics to collect.", option)
metrics_to_collect.update(additional_metrics)
return metrics_to_collect | ['def', '_build_metric_list_to_collect', '(', 'self', ',', 'additional_metrics', ')', ':', 'metrics_to_collect', '=', '{', '}', '# Defaut metrics', 'for', 'default_metrics', 'in', 'itervalues', '(', 'self', '.', 'DEFAULT_METRICS', ')', ':', 'metrics_to_collect', '.', 'update', '(', 'default_metrics', ')', '# Additional metrics metrics', 'for', 'option', 'in', 'additional_metrics', ':', 'additional_metrics', '=', 'self', '.', 'AVAILABLE_METRICS', '.', 'get', '(', 'option', ')', 'if', 'not', 'additional_metrics', ':', 'if', 'option', 'in', 'self', '.', 'DEFAULT_METRICS', ':', 'self', '.', 'log', '.', 'warning', '(', 'u"`%s` option is deprecated. The corresponding metrics are collected by default."', ',', 'option', ')', 'else', ':', 'self', '.', 'log', '.', 'warning', '(', 'u"Failed to extend the list of metrics to collect: unrecognized `%s` option"', ',', 'option', ')', 'continue', 'self', '.', 'log', '.', 'debug', '(', 'u"Adding `%s` corresponding metrics to the list of metrics to collect."', ',', 'option', ')', 'metrics_to_collect', '.', 'update', '(', 'additional_metrics', ')', 'return', 'metrics_to_collect'] | Build the metric list to collect based on the instance preferences. | ['Build', 'the', 'metric', 'list', 'to', 'collect', 'based', 'on', 'the', 'instance', 'preferences', '.'] | train | https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/mongo/datadog_checks/mongo/mongo.py#L500-L527 |
3,153 | spencerahill/aospy | aospy/calc.py | Calc._file_name | def _file_name(self, dtype_out_time, extension='nc'):
"""Create the name of the aospy file."""
if dtype_out_time is None:
dtype_out_time = ''
out_lbl = utils.io.data_out_label(self.intvl_out, dtype_out_time,
dtype_vert=self.dtype_out_vert)
in_lbl = utils.io.data_in_label(self.intvl_in, self.dtype_in_time,
self.dtype_in_vert)
start_year = utils.times.infer_year(self.start_date)
end_year = utils.times.infer_year(self.end_date)
yr_lbl = utils.io.yr_label((start_year, end_year))
return '.'.join(
[self.name, out_lbl, in_lbl, self.model.name,
self.run.name, yr_lbl, extension]
).replace('..', '.') | python | def _file_name(self, dtype_out_time, extension='nc'):
"""Create the name of the aospy file."""
if dtype_out_time is None:
dtype_out_time = ''
out_lbl = utils.io.data_out_label(self.intvl_out, dtype_out_time,
dtype_vert=self.dtype_out_vert)
in_lbl = utils.io.data_in_label(self.intvl_in, self.dtype_in_time,
self.dtype_in_vert)
start_year = utils.times.infer_year(self.start_date)
end_year = utils.times.infer_year(self.end_date)
yr_lbl = utils.io.yr_label((start_year, end_year))
return '.'.join(
[self.name, out_lbl, in_lbl, self.model.name,
self.run.name, yr_lbl, extension]
).replace('..', '.') | ['def', '_file_name', '(', 'self', ',', 'dtype_out_time', ',', 'extension', '=', "'nc'", ')', ':', 'if', 'dtype_out_time', 'is', 'None', ':', 'dtype_out_time', '=', "''", 'out_lbl', '=', 'utils', '.', 'io', '.', 'data_out_label', '(', 'self', '.', 'intvl_out', ',', 'dtype_out_time', ',', 'dtype_vert', '=', 'self', '.', 'dtype_out_vert', ')', 'in_lbl', '=', 'utils', '.', 'io', '.', 'data_in_label', '(', 'self', '.', 'intvl_in', ',', 'self', '.', 'dtype_in_time', ',', 'self', '.', 'dtype_in_vert', ')', 'start_year', '=', 'utils', '.', 'times', '.', 'infer_year', '(', 'self', '.', 'start_date', ')', 'end_year', '=', 'utils', '.', 'times', '.', 'infer_year', '(', 'self', '.', 'end_date', ')', 'yr_lbl', '=', 'utils', '.', 'io', '.', 'yr_label', '(', '(', 'start_year', ',', 'end_year', ')', ')', 'return', "'.'", '.', 'join', '(', '[', 'self', '.', 'name', ',', 'out_lbl', ',', 'in_lbl', ',', 'self', '.', 'model', '.', 'name', ',', 'self', '.', 'run', '.', 'name', ',', 'yr_lbl', ',', 'extension', ']', ')', '.', 'replace', '(', "'..'", ',', "'.'", ')'] | Create the name of the aospy file. | ['Create', 'the', 'name', 'of', 'the', 'aospy', 'file', '.'] | train | https://github.com/spencerahill/aospy/blob/2f6e775b9b9956c54af117fdcdce2c87196afb6c/aospy/calc.py#L77-L91 |
3,154 | inspirehep/inspire-dojson | inspire_dojson/hep/rules/bd0xx.py | languages | def languages(self, key, value):
"""Populate the ``languages`` key."""
languages = self.get('languages', [])
values = force_list(value.get('a'))
for value in values:
for language in RE_LANGUAGE.split(value):
try:
name = language.strip().capitalize()
languages.append(pycountry.languages.get(name=name).alpha_2)
except KeyError:
pass
return languages | python | def languages(self, key, value):
"""Populate the ``languages`` key."""
languages = self.get('languages', [])
values = force_list(value.get('a'))
for value in values:
for language in RE_LANGUAGE.split(value):
try:
name = language.strip().capitalize()
languages.append(pycountry.languages.get(name=name).alpha_2)
except KeyError:
pass
return languages | ['def', 'languages', '(', 'self', ',', 'key', ',', 'value', ')', ':', 'languages', '=', 'self', '.', 'get', '(', "'languages'", ',', '[', ']', ')', 'values', '=', 'force_list', '(', 'value', '.', 'get', '(', "'a'", ')', ')', 'for', 'value', 'in', 'values', ':', 'for', 'language', 'in', 'RE_LANGUAGE', '.', 'split', '(', 'value', ')', ':', 'try', ':', 'name', '=', 'language', '.', 'strip', '(', ')', '.', 'capitalize', '(', ')', 'languages', '.', 'append', '(', 'pycountry', '.', 'languages', '.', 'get', '(', 'name', '=', 'name', ')', '.', 'alpha_2', ')', 'except', 'KeyError', ':', 'pass', 'return', 'languages'] | Populate the ``languages`` key. | ['Populate', 'the', 'languages', 'key', '.'] | train | https://github.com/inspirehep/inspire-dojson/blob/17f3789cd3d5ae58efa1190dc0eea9efb9c8ca59/inspire_dojson/hep/rules/bd0xx.py#L411-L424 |
3,155 | chop-dbhi/varify | fabfile.py | merge_commit | def merge_commit(commit):
"Fetches the latest code and merges up the specified commit."
with cd(env.path):
run('git fetch')
if '@' in commit:
branch, commit = commit.split('@')
run('git checkout {0}'.format(branch))
run('git merge {0}'.format(commit)) | python | def merge_commit(commit):
"Fetches the latest code and merges up the specified commit."
with cd(env.path):
run('git fetch')
if '@' in commit:
branch, commit = commit.split('@')
run('git checkout {0}'.format(branch))
run('git merge {0}'.format(commit)) | ['def', 'merge_commit', '(', 'commit', ')', ':', 'with', 'cd', '(', 'env', '.', 'path', ')', ':', 'run', '(', "'git fetch'", ')', 'if', "'@'", 'in', 'commit', ':', 'branch', ',', 'commit', '=', 'commit', '.', 'split', '(', "'@'", ')', 'run', '(', "'git checkout {0}'", '.', 'format', '(', 'branch', ')', ')', 'run', '(', "'git merge {0}'", '.', 'format', '(', 'commit', ')', ')'] | Fetches the latest code and merges up the specified commit. | ['Fetches', 'the', 'latest', 'code', 'and', 'merges', 'up', 'the', 'specified', 'commit', '.'] | train | https://github.com/chop-dbhi/varify/blob/5dc721e49ed9bd3582f4b117785fdd1a8b6ba777/fabfile.py#L105-L112 |
3,156 | Skyscanner/skyscanner-python-sdk | skyscanner/skyscanner.py | Flights.create_session | def create_session(self, **params):
"""
Create the session
date format: YYYY-mm-dd
location: ISO code
"""
return self.make_request(self.PRICING_SESSION_URL,
method='post',
headers=self._session_headers(),
callback=lambda resp: resp.headers[
'location'],
data=params) | python | def create_session(self, **params):
"""
Create the session
date format: YYYY-mm-dd
location: ISO code
"""
return self.make_request(self.PRICING_SESSION_URL,
method='post',
headers=self._session_headers(),
callback=lambda resp: resp.headers[
'location'],
data=params) | ['def', 'create_session', '(', 'self', ',', '*', '*', 'params', ')', ':', 'return', 'self', '.', 'make_request', '(', 'self', '.', 'PRICING_SESSION_URL', ',', 'method', '=', "'post'", ',', 'headers', '=', 'self', '.', '_session_headers', '(', ')', ',', 'callback', '=', 'lambda', 'resp', ':', 'resp', '.', 'headers', '[', "'location'", ']', ',', 'data', '=', 'params', ')'] | Create the session
date format: YYYY-mm-dd
location: ISO code | ['Create', 'the', 'session', 'date', 'format', ':', 'YYYY', '-', 'mm', '-', 'dd', 'location', ':', 'ISO', 'code'] | train | https://github.com/Skyscanner/skyscanner-python-sdk/blob/26ce4a563f538a689f2a29063f3604731703ddac/skyscanner/skyscanner.py#L434-L445 |
3,157 | manns/pyspread | pyspread/src/gui/_toolbars.py | AttributesToolbar._create_color_buttons | def _create_color_buttons(self):
"""Create color choice buttons"""
button_size = (30, 30)
button_style = wx.NO_BORDER
try:
self.linecolor_choice = \
csel.ColourSelect(self, -1, unichr(0x2500), (0, 0, 0),
size=button_size, style=button_style)
except UnicodeEncodeError:
# ANSI wxPython installed
self.linecolor_choice = \
csel.ColourSelect(self, -1, "-", (0, 0, 0),
size=button_size, style=button_style)
self.bgcolor_choice = \
csel.ColourSelect(self, -1, "", (255, 255, 255),
size=button_size, style=button_style)
self.textcolor_choice = \
csel.ColourSelect(self, -1, "A", (0, 0, 0),
size=button_size, style=button_style)
self.linecolor_choice.SetToolTipString(_(u"Border line color"))
self.bgcolor_choice.SetToolTipString(_(u"Cell background"))
self.textcolor_choice.SetToolTipString(_(u"Text color"))
self.AddControl(self.linecolor_choice)
self.AddControl(self.bgcolor_choice)
self.AddControl(self.textcolor_choice)
self.linecolor_choice.Bind(csel.EVT_COLOURSELECT, self.OnLineColor)
self.bgcolor_choice.Bind(csel.EVT_COLOURSELECT, self.OnBGColor)
self.textcolor_choice.Bind(csel.EVT_COLOURSELECT, self.OnTextColor) | python | def _create_color_buttons(self):
"""Create color choice buttons"""
button_size = (30, 30)
button_style = wx.NO_BORDER
try:
self.linecolor_choice = \
csel.ColourSelect(self, -1, unichr(0x2500), (0, 0, 0),
size=button_size, style=button_style)
except UnicodeEncodeError:
# ANSI wxPython installed
self.linecolor_choice = \
csel.ColourSelect(self, -1, "-", (0, 0, 0),
size=button_size, style=button_style)
self.bgcolor_choice = \
csel.ColourSelect(self, -1, "", (255, 255, 255),
size=button_size, style=button_style)
self.textcolor_choice = \
csel.ColourSelect(self, -1, "A", (0, 0, 0),
size=button_size, style=button_style)
self.linecolor_choice.SetToolTipString(_(u"Border line color"))
self.bgcolor_choice.SetToolTipString(_(u"Cell background"))
self.textcolor_choice.SetToolTipString(_(u"Text color"))
self.AddControl(self.linecolor_choice)
self.AddControl(self.bgcolor_choice)
self.AddControl(self.textcolor_choice)
self.linecolor_choice.Bind(csel.EVT_COLOURSELECT, self.OnLineColor)
self.bgcolor_choice.Bind(csel.EVT_COLOURSELECT, self.OnBGColor)
self.textcolor_choice.Bind(csel.EVT_COLOURSELECT, self.OnTextColor) | ['def', '_create_color_buttons', '(', 'self', ')', ':', 'button_size', '=', '(', '30', ',', '30', ')', 'button_style', '=', 'wx', '.', 'NO_BORDER', 'try', ':', 'self', '.', 'linecolor_choice', '=', 'csel', '.', 'ColourSelect', '(', 'self', ',', '-', '1', ',', 'unichr', '(', '0x2500', ')', ',', '(', '0', ',', '0', ',', '0', ')', ',', 'size', '=', 'button_size', ',', 'style', '=', 'button_style', ')', 'except', 'UnicodeEncodeError', ':', '# ANSI wxPython installed', 'self', '.', 'linecolor_choice', '=', 'csel', '.', 'ColourSelect', '(', 'self', ',', '-', '1', ',', '"-"', ',', '(', '0', ',', '0', ',', '0', ')', ',', 'size', '=', 'button_size', ',', 'style', '=', 'button_style', ')', 'self', '.', 'bgcolor_choice', '=', 'csel', '.', 'ColourSelect', '(', 'self', ',', '-', '1', ',', '""', ',', '(', '255', ',', '255', ',', '255', ')', ',', 'size', '=', 'button_size', ',', 'style', '=', 'button_style', ')', 'self', '.', 'textcolor_choice', '=', 'csel', '.', 'ColourSelect', '(', 'self', ',', '-', '1', ',', '"A"', ',', '(', '0', ',', '0', ',', '0', ')', ',', 'size', '=', 'button_size', ',', 'style', '=', 'button_style', ')', 'self', '.', 'linecolor_choice', '.', 'SetToolTipString', '(', '_', '(', 'u"Border line color"', ')', ')', 'self', '.', 'bgcolor_choice', '.', 'SetToolTipString', '(', '_', '(', 'u"Cell background"', ')', ')', 'self', '.', 'textcolor_choice', '.', 'SetToolTipString', '(', '_', '(', 'u"Text color"', ')', ')', 'self', '.', 'AddControl', '(', 'self', '.', 'linecolor_choice', ')', 'self', '.', 'AddControl', '(', 'self', '.', 'bgcolor_choice', ')', 'self', '.', 'AddControl', '(', 'self', '.', 'textcolor_choice', ')', 'self', '.', 'linecolor_choice', '.', 'Bind', '(', 'csel', '.', 'EVT_COLOURSELECT', ',', 'self', '.', 'OnLineColor', ')', 'self', '.', 'bgcolor_choice', '.', 'Bind', '(', 'csel', '.', 'EVT_COLOURSELECT', ',', 'self', '.', 'OnBGColor', ')', 'self', '.', 'textcolor_choice', '.', 'Bind', '(', 'csel', '.', 'EVT_COLOURSELECT', ',', 'self', '.', 'OnTextColor', ')'] | Create color choice buttons | ['Create', 'color', 'choice', 'buttons'] | train | https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/gui/_toolbars.py#L679-L712 |
3,158 | eqcorrscan/EQcorrscan | eqcorrscan/utils/clustering.py | empirical_SVD | def empirical_SVD(stream_list, linear=True):
"""
Depreciated. Use empirical_svd.
"""
warnings.warn('Depreciated, use empirical_svd instead.')
return empirical_svd(stream_list=stream_list, linear=linear) | python | def empirical_SVD(stream_list, linear=True):
"""
Depreciated. Use empirical_svd.
"""
warnings.warn('Depreciated, use empirical_svd instead.')
return empirical_svd(stream_list=stream_list, linear=linear) | ['def', 'empirical_SVD', '(', 'stream_list', ',', 'linear', '=', 'True', ')', ':', 'warnings', '.', 'warn', '(', "'Depreciated, use empirical_svd instead.'", ')', 'return', 'empirical_svd', '(', 'stream_list', '=', 'stream_list', ',', 'linear', '=', 'linear', ')'] | Depreciated. Use empirical_svd. | ['Depreciated', '.', 'Use', 'empirical_svd', '.'] | train | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/clustering.py#L416-L421 |
3,159 | shaunduncan/nosqlite | nosqlite.py | _all | def _all(field, value, document):
"""
Returns True if the value of document field contains all the values
specified by ``value``. If supplied value is not an iterable, a
MalformedQueryException is raised. If the value of the document field
is not an iterable, False is returned
"""
try:
a = set(value)
except TypeError:
raise MalformedQueryException("'$all' must accept an iterable")
try:
b = set(document.get(field, []))
except TypeError:
return False
else:
return a.intersection(b) == a | python | def _all(field, value, document):
"""
Returns True if the value of document field contains all the values
specified by ``value``. If supplied value is not an iterable, a
MalformedQueryException is raised. If the value of the document field
is not an iterable, False is returned
"""
try:
a = set(value)
except TypeError:
raise MalformedQueryException("'$all' must accept an iterable")
try:
b = set(document.get(field, []))
except TypeError:
return False
else:
return a.intersection(b) == a | ['def', '_all', '(', 'field', ',', 'value', ',', 'document', ')', ':', 'try', ':', 'a', '=', 'set', '(', 'value', ')', 'except', 'TypeError', ':', 'raise', 'MalformedQueryException', '(', '"\'$all\' must accept an iterable"', ')', 'try', ':', 'b', '=', 'set', '(', 'document', '.', 'get', '(', 'field', ',', '[', ']', ')', ')', 'except', 'TypeError', ':', 'return', 'False', 'else', ':', 'return', 'a', '.', 'intersection', '(', 'b', ')', '==', 'a'] | Returns True if the value of document field contains all the values
specified by ``value``. If supplied value is not an iterable, a
MalformedQueryException is raised. If the value of the document field
is not an iterable, False is returned | ['Returns', 'True', 'if', 'the', 'value', 'of', 'document', 'field', 'contains', 'all', 'the', 'values', 'specified', 'by', 'value', '.', 'If', 'supplied', 'value', 'is', 'not', 'an', 'iterable', 'a', 'MalformedQueryException', 'is', 'raised', '.', 'If', 'the', 'value', 'of', 'the', 'document', 'field', 'is', 'not', 'an', 'iterable', 'False', 'is', 'returned'] | train | https://github.com/shaunduncan/nosqlite/blob/3033c029b7c8290c66a8b36dc512e560505d4c85/nosqlite.py#L482-L499 |
3,160 | openstack/pyghmi | pyghmi/ipmi/command.py | Command.get_channel_access | def get_channel_access(self, channel=None, read_mode='volatile'):
"""Get channel access
:param channel: number [1:7]
:param read_mode:
non_volatile = get non-volatile Channel Access
volatile = get present volatile (active) setting of Channel Access
:return: A Python dict with the following keys/values:
{
- alerting:
- per_msg_auth:
- user_level_auth:
- access_mode:{
0: 'disabled',
1: 'pre_boot',
2: 'always',
3: 'shared'
}
- privilege_level: {
1: 'callback',
2: 'user',
3: 'operator',
4: 'administrator',
5: 'proprietary',
}
}
"""
if channel is None:
channel = self.get_network_channel()
data = []
data.append(channel & 0b00001111)
b = 0
read_modes = {
'non_volatile': 1,
'volatile': 2,
}
b |= (read_modes[read_mode] << 6) & 0b11000000
data.append(b)
response = self.raw_command(netfn=0x06, command=0x41, data=data)
if 'error' in response:
raise Exception(response['error'])
data = response['data']
if len(data) != 2:
raise Exception('expecting 2 data bytes')
r = {}
r['alerting'] = data[0] & 0b10000000 > 0
r['per_msg_auth'] = data[0] & 0b01000000 > 0
r['user_level_auth'] = data[0] & 0b00100000 > 0
access_modes = {
0: 'disabled',
1: 'pre_boot',
2: 'always',
3: 'shared'
}
r['access_mode'] = access_modes[data[0] & 0b00000011]
privilege_levels = {
0: 'reserved',
1: 'callback',
2: 'user',
3: 'operator',
4: 'administrator',
5: 'proprietary',
# 0x0F: 'no_access'
}
r['privilege_level'] = privilege_levels[data[1] & 0b00001111]
return r | python | def get_channel_access(self, channel=None, read_mode='volatile'):
"""Get channel access
:param channel: number [1:7]
:param read_mode:
non_volatile = get non-volatile Channel Access
volatile = get present volatile (active) setting of Channel Access
:return: A Python dict with the following keys/values:
{
- alerting:
- per_msg_auth:
- user_level_auth:
- access_mode:{
0: 'disabled',
1: 'pre_boot',
2: 'always',
3: 'shared'
}
- privilege_level: {
1: 'callback',
2: 'user',
3: 'operator',
4: 'administrator',
5: 'proprietary',
}
}
"""
if channel is None:
channel = self.get_network_channel()
data = []
data.append(channel & 0b00001111)
b = 0
read_modes = {
'non_volatile': 1,
'volatile': 2,
}
b |= (read_modes[read_mode] << 6) & 0b11000000
data.append(b)
response = self.raw_command(netfn=0x06, command=0x41, data=data)
if 'error' in response:
raise Exception(response['error'])
data = response['data']
if len(data) != 2:
raise Exception('expecting 2 data bytes')
r = {}
r['alerting'] = data[0] & 0b10000000 > 0
r['per_msg_auth'] = data[0] & 0b01000000 > 0
r['user_level_auth'] = data[0] & 0b00100000 > 0
access_modes = {
0: 'disabled',
1: 'pre_boot',
2: 'always',
3: 'shared'
}
r['access_mode'] = access_modes[data[0] & 0b00000011]
privilege_levels = {
0: 'reserved',
1: 'callback',
2: 'user',
3: 'operator',
4: 'administrator',
5: 'proprietary',
# 0x0F: 'no_access'
}
r['privilege_level'] = privilege_levels[data[1] & 0b00001111]
return r | ['def', 'get_channel_access', '(', 'self', ',', 'channel', '=', 'None', ',', 'read_mode', '=', "'volatile'", ')', ':', 'if', 'channel', 'is', 'None', ':', 'channel', '=', 'self', '.', 'get_network_channel', '(', ')', 'data', '=', '[', ']', 'data', '.', 'append', '(', 'channel', '&', '0b00001111', ')', 'b', '=', '0', 'read_modes', '=', '{', "'non_volatile'", ':', '1', ',', "'volatile'", ':', '2', ',', '}', 'b', '|=', '(', 'read_modes', '[', 'read_mode', ']', '<<', '6', ')', '&', '0b11000000', 'data', '.', 'append', '(', 'b', ')', 'response', '=', 'self', '.', 'raw_command', '(', 'netfn', '=', '0x06', ',', 'command', '=', '0x41', ',', 'data', '=', 'data', ')', 'if', "'error'", 'in', 'response', ':', 'raise', 'Exception', '(', 'response', '[', "'error'", ']', ')', 'data', '=', 'response', '[', "'data'", ']', 'if', 'len', '(', 'data', ')', '!=', '2', ':', 'raise', 'Exception', '(', "'expecting 2 data bytes'", ')', 'r', '=', '{', '}', 'r', '[', "'alerting'", ']', '=', 'data', '[', '0', ']', '&', '0b10000000', '>', '0', 'r', '[', "'per_msg_auth'", ']', '=', 'data', '[', '0', ']', '&', '0b01000000', '>', '0', 'r', '[', "'user_level_auth'", ']', '=', 'data', '[', '0', ']', '&', '0b00100000', '>', '0', 'access_modes', '=', '{', '0', ':', "'disabled'", ',', '1', ':', "'pre_boot'", ',', '2', ':', "'always'", ',', '3', ':', "'shared'", '}', 'r', '[', "'access_mode'", ']', '=', 'access_modes', '[', 'data', '[', '0', ']', '&', '0b00000011', ']', 'privilege_levels', '=', '{', '0', ':', "'reserved'", ',', '1', ':', "'callback'", ',', '2', ':', "'user'", ',', '3', ':', "'operator'", ',', '4', ':', "'administrator'", ',', '5', ':', "'proprietary'", ',', "# 0x0F: 'no_access'", '}', 'r', '[', "'privilege_level'", ']', '=', 'privilege_levels', '[', 'data', '[', '1', ']', '&', '0b00001111', ']', 'return', 'r'] | Get channel access
:param channel: number [1:7]
:param read_mode:
non_volatile = get non-volatile Channel Access
volatile = get present volatile (active) setting of Channel Access
:return: A Python dict with the following keys/values:
{
- alerting:
- per_msg_auth:
- user_level_auth:
- access_mode:{
0: 'disabled',
1: 'pre_boot',
2: 'always',
3: 'shared'
}
- privilege_level: {
1: 'callback',
2: 'user',
3: 'operator',
4: 'administrator',
5: 'proprietary',
}
} | ['Get', 'channel', 'access'] | train | https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/command.py#L1394-L1463 |
3,161 | log2timeline/dfvfs | dfvfs/helpers/file_system_searcher.py | FindSpec.AtMaximumDepth | def AtMaximumDepth(self, search_depth):
"""Determines if the find specification is at maximum depth.
Args:
search_depth (int): number of location path segments to compare.
Returns:
bool: True if at maximum depth, False if not.
"""
if self._location_segments is not None:
if search_depth >= self._number_of_location_segments:
return True
return False | python | def AtMaximumDepth(self, search_depth):
"""Determines if the find specification is at maximum depth.
Args:
search_depth (int): number of location path segments to compare.
Returns:
bool: True if at maximum depth, False if not.
"""
if self._location_segments is not None:
if search_depth >= self._number_of_location_segments:
return True
return False | ['def', 'AtMaximumDepth', '(', 'self', ',', 'search_depth', ')', ':', 'if', 'self', '.', '_location_segments', 'is', 'not', 'None', ':', 'if', 'search_depth', '>=', 'self', '.', '_number_of_location_segments', ':', 'return', 'True', 'return', 'False'] | Determines if the find specification is at maximum depth.
Args:
search_depth (int): number of location path segments to compare.
Returns:
bool: True if at maximum depth, False if not. | ['Determines', 'if', 'the', 'find', 'specification', 'is', 'at', 'maximum', 'depth', '.'] | train | https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/helpers/file_system_searcher.py#L314-L327 |
3,162 | KelSolaar/Foundations | foundations/environment.py | Environment.get_values | def get_values(self, *args):
"""
Gets environment variables values.
Usage::
>>> environment = Environment("HOME")
>>> environment.get_values()
{'HOME': u'/Users/JohnDoe'}
>>> environment.get_values("USER")
{'HOME': u'/Users/JohnDoe', 'USER': u'JohnDoe'}
:param \*args: Additional variables names to retrieve values from.
:type \*args: \*
:return: Variables : Values.
:rtype: dict
"""
args and self.__add_variables(*args)
LOGGER.debug("> Object environment variables: '{0}'.".format(
",".join((key for key in self.__variables if key))))
LOGGER.debug("> Available system environment variables: '{0}'".format(os.environ.keys()))
for variable in self.__variables:
value = os.environ.get(variable, None)
self.__variables[variable] = foundations.strings.to_string(value) if value else None
return self.__variables | python | def get_values(self, *args):
"""
Gets environment variables values.
Usage::
>>> environment = Environment("HOME")
>>> environment.get_values()
{'HOME': u'/Users/JohnDoe'}
>>> environment.get_values("USER")
{'HOME': u'/Users/JohnDoe', 'USER': u'JohnDoe'}
:param \*args: Additional variables names to retrieve values from.
:type \*args: \*
:return: Variables : Values.
:rtype: dict
"""
args and self.__add_variables(*args)
LOGGER.debug("> Object environment variables: '{0}'.".format(
",".join((key for key in self.__variables if key))))
LOGGER.debug("> Available system environment variables: '{0}'".format(os.environ.keys()))
for variable in self.__variables:
value = os.environ.get(variable, None)
self.__variables[variable] = foundations.strings.to_string(value) if value else None
return self.__variables | ['def', 'get_values', '(', 'self', ',', '*', 'args', ')', ':', 'args', 'and', 'self', '.', '__add_variables', '(', '*', 'args', ')', 'LOGGER', '.', 'debug', '(', '"> Object environment variables: \'{0}\'."', '.', 'format', '(', '","', '.', 'join', '(', '(', 'key', 'for', 'key', 'in', 'self', '.', '__variables', 'if', 'key', ')', ')', ')', ')', 'LOGGER', '.', 'debug', '(', '"> Available system environment variables: \'{0}\'"', '.', 'format', '(', 'os', '.', 'environ', '.', 'keys', '(', ')', ')', ')', 'for', 'variable', 'in', 'self', '.', '__variables', ':', 'value', '=', 'os', '.', 'environ', '.', 'get', '(', 'variable', ',', 'None', ')', 'self', '.', '__variables', '[', 'variable', ']', '=', 'foundations', '.', 'strings', '.', 'to_string', '(', 'value', ')', 'if', 'value', 'else', 'None', 'return', 'self', '.', '__variables'] | Gets environment variables values.
Usage::
>>> environment = Environment("HOME")
>>> environment.get_values()
{'HOME': u'/Users/JohnDoe'}
>>> environment.get_values("USER")
{'HOME': u'/Users/JohnDoe', 'USER': u'JohnDoe'}
:param \*args: Additional variables names to retrieve values from.
:type \*args: \*
:return: Variables : Values.
:rtype: dict | ['Gets', 'environment', 'variables', 'values', '.'] | train | https://github.com/KelSolaar/Foundations/blob/5c141330faf09dad70a12bc321f4c564917d0a91/foundations/environment.py#L131-L158 |
3,163 | fracpete/python-weka-wrapper3 | python/weka/core/classes.py | Configurable.to_dict | def to_dict(self):
"""
Returns a dictionary that represents this object, to be used for JSONification.
:return: the object dictionary
:rtype: dict
"""
result = {}
result["type"] = "Configurable"
result["class"] = get_classname(self)
result["config"] = {}
for k in self._config:
v = self._config[k]
if isinstance(v, JSONObject):
result["config"][k] = v.to_dict()
else:
result["config"][k] = v
return result | python | def to_dict(self):
"""
Returns a dictionary that represents this object, to be used for JSONification.
:return: the object dictionary
:rtype: dict
"""
result = {}
result["type"] = "Configurable"
result["class"] = get_classname(self)
result["config"] = {}
for k in self._config:
v = self._config[k]
if isinstance(v, JSONObject):
result["config"][k] = v.to_dict()
else:
result["config"][k] = v
return result | ['def', 'to_dict', '(', 'self', ')', ':', 'result', '=', '{', '}', 'result', '[', '"type"', ']', '=', '"Configurable"', 'result', '[', '"class"', ']', '=', 'get_classname', '(', 'self', ')', 'result', '[', '"config"', ']', '=', '{', '}', 'for', 'k', 'in', 'self', '.', '_config', ':', 'v', '=', 'self', '.', '_config', '[', 'k', ']', 'if', 'isinstance', '(', 'v', ',', 'JSONObject', ')', ':', 'result', '[', '"config"', ']', '[', 'k', ']', '=', 'v', '.', 'to_dict', '(', ')', 'else', ':', 'result', '[', '"config"', ']', '[', 'k', ']', '=', 'v', 'return', 'result'] | Returns a dictionary that represents this object, to be used for JSONification.
:return: the object dictionary
:rtype: dict | ['Returns', 'a', 'dictionary', 'that', 'represents', 'this', 'object', 'to', 'be', 'used', 'for', 'JSONification', '.'] | train | https://github.com/fracpete/python-weka-wrapper3/blob/d850ab1bdb25fbd5a8d86e99f34a397975425838/python/weka/core/classes.py#L352-L369 |
3,164 | wheeler-microfluidics/nested-structures | nested_structures/__init__.py | apply_dict_depth_first | def apply_dict_depth_first(nodes, func, depth=0, as_dict=True, parents=None, pre=None, post=None):
'''
This function is similar to the `apply_depth_first` except that it operates
on the `OrderedDict`-based structure returned from `apply_depth_first` when
`as_dict=True`.
Note that if `as_dict` is `False`, the result of this function is given in
the entry/tuple form.
'''
if as_dict:
items = OrderedDict()
else:
items = []
if parents is None:
parents = []
node_count = len(nodes)
for i, (k, node) in enumerate(nodes.iteritems()):
first = (i == 0)
last = (i == (node_count - 1))
if pre is not None:
pre(k, node, parents, first, last, depth)
item = func(k, node, parents, first, last, depth)
item_parents = parents + [(k, node)]
if node.children is not None:
children = apply_dict_depth_first(node.children, func,
depth=depth + 1,
as_dict=as_dict,
parents=item_parents,
pre=pre, post=post)
else:
children = None
if post is not None:
post(k, node, parents, first, last, depth)
if as_dict:
items[k] = Node(item, children)
elif children:
items.append((item, children))
else:
items.append(item)
return items | python | def apply_dict_depth_first(nodes, func, depth=0, as_dict=True, parents=None, pre=None, post=None):
'''
This function is similar to the `apply_depth_first` except that it operates
on the `OrderedDict`-based structure returned from `apply_depth_first` when
`as_dict=True`.
Note that if `as_dict` is `False`, the result of this function is given in
the entry/tuple form.
'''
if as_dict:
items = OrderedDict()
else:
items = []
if parents is None:
parents = []
node_count = len(nodes)
for i, (k, node) in enumerate(nodes.iteritems()):
first = (i == 0)
last = (i == (node_count - 1))
if pre is not None:
pre(k, node, parents, first, last, depth)
item = func(k, node, parents, first, last, depth)
item_parents = parents + [(k, node)]
if node.children is not None:
children = apply_dict_depth_first(node.children, func,
depth=depth + 1,
as_dict=as_dict,
parents=item_parents,
pre=pre, post=post)
else:
children = None
if post is not None:
post(k, node, parents, first, last, depth)
if as_dict:
items[k] = Node(item, children)
elif children:
items.append((item, children))
else:
items.append(item)
return items | ['def', 'apply_dict_depth_first', '(', 'nodes', ',', 'func', ',', 'depth', '=', '0', ',', 'as_dict', '=', 'True', ',', 'parents', '=', 'None', ',', 'pre', '=', 'None', ',', 'post', '=', 'None', ')', ':', 'if', 'as_dict', ':', 'items', '=', 'OrderedDict', '(', ')', 'else', ':', 'items', '=', '[', ']', 'if', 'parents', 'is', 'None', ':', 'parents', '=', '[', ']', 'node_count', '=', 'len', '(', 'nodes', ')', 'for', 'i', ',', '(', 'k', ',', 'node', ')', 'in', 'enumerate', '(', 'nodes', '.', 'iteritems', '(', ')', ')', ':', 'first', '=', '(', 'i', '==', '0', ')', 'last', '=', '(', 'i', '==', '(', 'node_count', '-', '1', ')', ')', 'if', 'pre', 'is', 'not', 'None', ':', 'pre', '(', 'k', ',', 'node', ',', 'parents', ',', 'first', ',', 'last', ',', 'depth', ')', 'item', '=', 'func', '(', 'k', ',', 'node', ',', 'parents', ',', 'first', ',', 'last', ',', 'depth', ')', 'item_parents', '=', 'parents', '+', '[', '(', 'k', ',', 'node', ')', ']', 'if', 'node', '.', 'children', 'is', 'not', 'None', ':', 'children', '=', 'apply_dict_depth_first', '(', 'node', '.', 'children', ',', 'func', ',', 'depth', '=', 'depth', '+', '1', ',', 'as_dict', '=', 'as_dict', ',', 'parents', '=', 'item_parents', ',', 'pre', '=', 'pre', ',', 'post', '=', 'post', ')', 'else', ':', 'children', '=', 'None', 'if', 'post', 'is', 'not', 'None', ':', 'post', '(', 'k', ',', 'node', ',', 'parents', ',', 'first', ',', 'last', ',', 'depth', ')', 'if', 'as_dict', ':', 'items', '[', 'k', ']', '=', 'Node', '(', 'item', ',', 'children', ')', 'elif', 'children', ':', 'items', '.', 'append', '(', '(', 'item', ',', 'children', ')', ')', 'else', ':', 'items', '.', 'append', '(', 'item', ')', 'return', 'items'] | This function is similar to the `apply_depth_first` except that it operates
on the `OrderedDict`-based structure returned from `apply_depth_first` when
`as_dict=True`.
Note that if `as_dict` is `False`, the result of this function is given in
the entry/tuple form. | ['This', 'function', 'is', 'similar', 'to', 'the', 'apply_depth_first', 'except', 'that', 'it', 'operates', 'on', 'the', 'OrderedDict', '-', 'based', 'structure', 'returned', 'from', 'apply_depth_first', 'when', 'as_dict', '=', 'True', '.'] | train | https://github.com/wheeler-microfluidics/nested-structures/blob/e3586bcca01c59f18ae16b8240e6e49197b63ecb/nested_structures/__init__.py#L149-L190 |
3,165 | dslackw/slpkg | slpkg/binary/install.py | BinaryInstall.not_downgrade | def not_downgrade(self, package):
"""Don't downgrade packages if repository version is lower than
installed"""
name = split_package(package)[0]
rep_ver = split_package(package)[1]
ins_ver = GetFromInstalled(name).version()[1:]
if not ins_ver:
ins_ver = "0"
if LooseVersion(rep_ver) < LooseVersion(ins_ver):
self.msg.template(78)
print("| Package {0} don't downgrade, "
"setting by user".format(name))
self.msg.template(78)
return True | python | def not_downgrade(self, package):
"""Don't downgrade packages if repository version is lower than
installed"""
name = split_package(package)[0]
rep_ver = split_package(package)[1]
ins_ver = GetFromInstalled(name).version()[1:]
if not ins_ver:
ins_ver = "0"
if LooseVersion(rep_ver) < LooseVersion(ins_ver):
self.msg.template(78)
print("| Package {0} don't downgrade, "
"setting by user".format(name))
self.msg.template(78)
return True | ['def', 'not_downgrade', '(', 'self', ',', 'package', ')', ':', 'name', '=', 'split_package', '(', 'package', ')', '[', '0', ']', 'rep_ver', '=', 'split_package', '(', 'package', ')', '[', '1', ']', 'ins_ver', '=', 'GetFromInstalled', '(', 'name', ')', '.', 'version', '(', ')', '[', '1', ':', ']', 'if', 'not', 'ins_ver', ':', 'ins_ver', '=', '"0"', 'if', 'LooseVersion', '(', 'rep_ver', ')', '<', 'LooseVersion', '(', 'ins_ver', ')', ':', 'self', '.', 'msg', '.', 'template', '(', '78', ')', 'print', '(', '"| Package {0} don\'t downgrade, "', '"setting by user"', '.', 'format', '(', 'name', ')', ')', 'self', '.', 'msg', '.', 'template', '(', '78', ')', 'return', 'True'] | Don't downgrade packages if repository version is lower than
installed | ['Don', 't', 'downgrade', 'packages', 'if', 'repository', 'version', 'is', 'lower', 'than', 'installed'] | train | https://github.com/dslackw/slpkg/blob/dd2e08a80e944d337d157b992167ba631a4343de/slpkg/binary/install.py#L242-L255 |
3,166 | wdm0006/git-pandas | gitpandas/repository.py | Repository.blame | def blame(self, rev='HEAD', committer=True, by='repository', ignore_globs=None, include_globs=None):
"""
Returns the blame from the current HEAD of the repository as a DataFrame. The DataFrame is grouped by committer
name, so it will be the sum of all contributions to the repository by each committer. As with the commit history
method, extensions and ignore_dirs parameters can be passed to exclude certain directories, or focus on certain
file extensions. The DataFrame will have the columns:
* committer
* loc
:param rev: (optional, default=HEAD) the specific revision to blame
:param committer: (optional, default=True) true if committer should be reported, false if author
:param by: (optional, default=repository) whether to group by repository or by file
:param ignore_globs: (optional, default=None) a list of globs to ignore, default none excludes nothing
:param include_globs: (optinal, default=None) a list of globs to include, default of None includes everything.
:return: DataFrame
"""
blames = []
file_names = [x for x in self.repo.git.log(pretty='format:', name_only=True, diff_filter='A').split('\n') if
x.strip() != '']
for file in self.__check_extension({x: x for x in file_names}, ignore_globs=ignore_globs,
include_globs=include_globs).keys():
try:
blames.append(
[x + [str(file).replace(self.git_dir + '/', '')] for x in
self.repo.blame(rev, str(file).replace(self.git_dir + '/', ''))]
)
except GitCommandError:
pass
blames = [item for sublist in blames for item in sublist]
if committer:
if by == 'repository':
blames = DataFrame(
[[x[0].committer.name, len(x[1])] for x in blames],
columns=['committer', 'loc']
).groupby('committer').agg({'loc': np.sum})
elif by == 'file':
blames = DataFrame(
[[x[0].committer.name, len(x[1]), x[2]] for x in blames],
columns=['committer', 'loc', 'file']
).groupby(['committer', 'file']).agg({'loc': np.sum})
else:
if by == 'repository':
blames = DataFrame(
[[x[0].author.name, len(x[1])] for x in blames],
columns=['author', 'loc']
).groupby('author').agg({'loc': np.sum})
elif by == 'file':
blames = DataFrame(
[[x[0].author.name, len(x[1]), x[2]] for x in blames],
columns=['author', 'loc', 'file']
).groupby(['author', 'file']).agg({'loc': np.sum})
return blames | python | def blame(self, rev='HEAD', committer=True, by='repository', ignore_globs=None, include_globs=None):
"""
Returns the blame from the current HEAD of the repository as a DataFrame. The DataFrame is grouped by committer
name, so it will be the sum of all contributions to the repository by each committer. As with the commit history
method, extensions and ignore_dirs parameters can be passed to exclude certain directories, or focus on certain
file extensions. The DataFrame will have the columns:
* committer
* loc
:param rev: (optional, default=HEAD) the specific revision to blame
:param committer: (optional, default=True) true if committer should be reported, false if author
:param by: (optional, default=repository) whether to group by repository or by file
:param ignore_globs: (optional, default=None) a list of globs to ignore, default none excludes nothing
:param include_globs: (optinal, default=None) a list of globs to include, default of None includes everything.
:return: DataFrame
"""
blames = []
file_names = [x for x in self.repo.git.log(pretty='format:', name_only=True, diff_filter='A').split('\n') if
x.strip() != '']
for file in self.__check_extension({x: x for x in file_names}, ignore_globs=ignore_globs,
include_globs=include_globs).keys():
try:
blames.append(
[x + [str(file).replace(self.git_dir + '/', '')] for x in
self.repo.blame(rev, str(file).replace(self.git_dir + '/', ''))]
)
except GitCommandError:
pass
blames = [item for sublist in blames for item in sublist]
if committer:
if by == 'repository':
blames = DataFrame(
[[x[0].committer.name, len(x[1])] for x in blames],
columns=['committer', 'loc']
).groupby('committer').agg({'loc': np.sum})
elif by == 'file':
blames = DataFrame(
[[x[0].committer.name, len(x[1]), x[2]] for x in blames],
columns=['committer', 'loc', 'file']
).groupby(['committer', 'file']).agg({'loc': np.sum})
else:
if by == 'repository':
blames = DataFrame(
[[x[0].author.name, len(x[1])] for x in blames],
columns=['author', 'loc']
).groupby('author').agg({'loc': np.sum})
elif by == 'file':
blames = DataFrame(
[[x[0].author.name, len(x[1]), x[2]] for x in blames],
columns=['author', 'loc', 'file']
).groupby(['author', 'file']).agg({'loc': np.sum})
return blames | ['def', 'blame', '(', 'self', ',', 'rev', '=', "'HEAD'", ',', 'committer', '=', 'True', ',', 'by', '=', "'repository'", ',', 'ignore_globs', '=', 'None', ',', 'include_globs', '=', 'None', ')', ':', 'blames', '=', '[', ']', 'file_names', '=', '[', 'x', 'for', 'x', 'in', 'self', '.', 'repo', '.', 'git', '.', 'log', '(', 'pretty', '=', "'format:'", ',', 'name_only', '=', 'True', ',', 'diff_filter', '=', "'A'", ')', '.', 'split', '(', "'\\n'", ')', 'if', 'x', '.', 'strip', '(', ')', '!=', "''", ']', 'for', 'file', 'in', 'self', '.', '__check_extension', '(', '{', 'x', ':', 'x', 'for', 'x', 'in', 'file_names', '}', ',', 'ignore_globs', '=', 'ignore_globs', ',', 'include_globs', '=', 'include_globs', ')', '.', 'keys', '(', ')', ':', 'try', ':', 'blames', '.', 'append', '(', '[', 'x', '+', '[', 'str', '(', 'file', ')', '.', 'replace', '(', 'self', '.', 'git_dir', '+', "'/'", ',', "''", ')', ']', 'for', 'x', 'in', 'self', '.', 'repo', '.', 'blame', '(', 'rev', ',', 'str', '(', 'file', ')', '.', 'replace', '(', 'self', '.', 'git_dir', '+', "'/'", ',', "''", ')', ')', ']', ')', 'except', 'GitCommandError', ':', 'pass', 'blames', '=', '[', 'item', 'for', 'sublist', 'in', 'blames', 'for', 'item', 'in', 'sublist', ']', 'if', 'committer', ':', 'if', 'by', '==', "'repository'", ':', 'blames', '=', 'DataFrame', '(', '[', '[', 'x', '[', '0', ']', '.', 'committer', '.', 'name', ',', 'len', '(', 'x', '[', '1', ']', ')', ']', 'for', 'x', 'in', 'blames', ']', ',', 'columns', '=', '[', "'committer'", ',', "'loc'", ']', ')', '.', 'groupby', '(', "'committer'", ')', '.', 'agg', '(', '{', "'loc'", ':', 'np', '.', 'sum', '}', ')', 'elif', 'by', '==', "'file'", ':', 'blames', '=', 'DataFrame', '(', '[', '[', 'x', '[', '0', ']', '.', 'committer', '.', 'name', ',', 'len', '(', 'x', '[', '1', ']', ')', ',', 'x', '[', '2', ']', ']', 'for', 'x', 'in', 'blames', ']', ',', 'columns', '=', '[', "'committer'", ',', "'loc'", ',', "'file'", ']', ')', '.', 'groupby', '(', '[', "'committer'", ',', "'file'", ']', ')', '.', 'agg', '(', '{', "'loc'", ':', 'np', '.', 'sum', '}', ')', 'else', ':', 'if', 'by', '==', "'repository'", ':', 'blames', '=', 'DataFrame', '(', '[', '[', 'x', '[', '0', ']', '.', 'author', '.', 'name', ',', 'len', '(', 'x', '[', '1', ']', ')', ']', 'for', 'x', 'in', 'blames', ']', ',', 'columns', '=', '[', "'author'", ',', "'loc'", ']', ')', '.', 'groupby', '(', "'author'", ')', '.', 'agg', '(', '{', "'loc'", ':', 'np', '.', 'sum', '}', ')', 'elif', 'by', '==', "'file'", ':', 'blames', '=', 'DataFrame', '(', '[', '[', 'x', '[', '0', ']', '.', 'author', '.', 'name', ',', 'len', '(', 'x', '[', '1', ']', ')', ',', 'x', '[', '2', ']', ']', 'for', 'x', 'in', 'blames', ']', ',', 'columns', '=', '[', "'author'", ',', "'loc'", ',', "'file'", ']', ')', '.', 'groupby', '(', '[', "'author'", ',', "'file'", ']', ')', '.', 'agg', '(', '{', "'loc'", ':', 'np', '.', 'sum', '}', ')', 'return', 'blames'] | Returns the blame from the current HEAD of the repository as a DataFrame. The DataFrame is grouped by committer
name, so it will be the sum of all contributions to the repository by each committer. As with the commit history
method, extensions and ignore_dirs parameters can be passed to exclude certain directories, or focus on certain
file extensions. The DataFrame will have the columns:
* committer
* loc
:param rev: (optional, default=HEAD) the specific revision to blame
:param committer: (optional, default=True) true if committer should be reported, false if author
:param by: (optional, default=repository) whether to group by repository or by file
:param ignore_globs: (optional, default=None) a list of globs to ignore, default none excludes nothing
:param include_globs: (optinal, default=None) a list of globs to include, default of None includes everything.
:return: DataFrame | ['Returns', 'the', 'blame', 'from', 'the', 'current', 'HEAD', 'of', 'the', 'repository', 'as', 'a', 'DataFrame', '.', 'The', 'DataFrame', 'is', 'grouped', 'by', 'committer', 'name', 'so', 'it', 'will', 'be', 'the', 'sum', 'of', 'all', 'contributions', 'to', 'the', 'repository', 'by', 'each', 'committer', '.', 'As', 'with', 'the', 'commit', 'history', 'method', 'extensions', 'and', 'ignore_dirs', 'parameters', 'can', 'be', 'passed', 'to', 'exclude', 'certain', 'directories', 'or', 'focus', 'on', 'certain', 'file', 'extensions', '.', 'The', 'DataFrame', 'will', 'have', 'the', 'columns', ':'] | train | https://github.com/wdm0006/git-pandas/blob/e56b817b1d66b8296d1d5e703d5db0e181d25899/gitpandas/repository.py#L524-L579 |
3,167 | ebu/PlugIt | plugit_proxy/views.py | generic_send_mail | def generic_send_mail(sender, dests, subject, message, key, origin='', html_message=False):
"""Generic mail sending function"""
# If no EBUIO Mail settings have been set, then no e-mail shall be sent
if settings.EBUIO_MAIL_SECRET_KEY and settings.EBUIO_MAIL_SECRET_HASH:
headers = {}
if key:
from Crypto.Cipher import AES
hash_key = hashlib.sha512(key + settings.EBUIO_MAIL_SECRET_HASH).hexdigest()[30:42]
encrypter = AES.new(((settings.EBUIO_MAIL_SECRET_KEY) * 32)[:32], AES.MODE_CFB, '87447JEUPEBU4hR!')
encrypted_key = encrypter.encrypt(hash_key + ':' + key)
base64_key = base64.urlsafe_b64encode(encrypted_key)
headers = {'Reply-To': settings.MAIL_SENDER.replace('@', '+' + base64_key + '@')}
msg = EmailMessage(subject, message, sender, dests, headers=headers)
if html_message:
msg.content_subtype = "html" # Main content is now text/html
msg.send(fail_silently=False)
try:
from main.models import MailSend
MailSend(dest=','.join(dests), subject=subject, sender=sender, message=message, origin=origin).save()
except ImportError:
pass
else:
logger.debug(
"E-Mail notification not sent, since no EBUIO_MAIL_SECRET_KEY and EBUIO_MAIL_SECRET_HASH set in settingsLocal.py.") | python | def generic_send_mail(sender, dests, subject, message, key, origin='', html_message=False):
"""Generic mail sending function"""
# If no EBUIO Mail settings have been set, then no e-mail shall be sent
if settings.EBUIO_MAIL_SECRET_KEY and settings.EBUIO_MAIL_SECRET_HASH:
headers = {}
if key:
from Crypto.Cipher import AES
hash_key = hashlib.sha512(key + settings.EBUIO_MAIL_SECRET_HASH).hexdigest()[30:42]
encrypter = AES.new(((settings.EBUIO_MAIL_SECRET_KEY) * 32)[:32], AES.MODE_CFB, '87447JEUPEBU4hR!')
encrypted_key = encrypter.encrypt(hash_key + ':' + key)
base64_key = base64.urlsafe_b64encode(encrypted_key)
headers = {'Reply-To': settings.MAIL_SENDER.replace('@', '+' + base64_key + '@')}
msg = EmailMessage(subject, message, sender, dests, headers=headers)
if html_message:
msg.content_subtype = "html" # Main content is now text/html
msg.send(fail_silently=False)
try:
from main.models import MailSend
MailSend(dest=','.join(dests), subject=subject, sender=sender, message=message, origin=origin).save()
except ImportError:
pass
else:
logger.debug(
"E-Mail notification not sent, since no EBUIO_MAIL_SECRET_KEY and EBUIO_MAIL_SECRET_HASH set in settingsLocal.py.") | ['def', 'generic_send_mail', '(', 'sender', ',', 'dests', ',', 'subject', ',', 'message', ',', 'key', ',', 'origin', '=', "''", ',', 'html_message', '=', 'False', ')', ':', '# If no EBUIO Mail settings have been set, then no e-mail shall be sent', 'if', 'settings', '.', 'EBUIO_MAIL_SECRET_KEY', 'and', 'settings', '.', 'EBUIO_MAIL_SECRET_HASH', ':', 'headers', '=', '{', '}', 'if', 'key', ':', 'from', 'Crypto', '.', 'Cipher', 'import', 'AES', 'hash_key', '=', 'hashlib', '.', 'sha512', '(', 'key', '+', 'settings', '.', 'EBUIO_MAIL_SECRET_HASH', ')', '.', 'hexdigest', '(', ')', '[', '30', ':', '42', ']', 'encrypter', '=', 'AES', '.', 'new', '(', '(', '(', 'settings', '.', 'EBUIO_MAIL_SECRET_KEY', ')', '*', '32', ')', '[', ':', '32', ']', ',', 'AES', '.', 'MODE_CFB', ',', "'87447JEUPEBU4hR!'", ')', 'encrypted_key', '=', 'encrypter', '.', 'encrypt', '(', 'hash_key', '+', "':'", '+', 'key', ')', 'base64_key', '=', 'base64', '.', 'urlsafe_b64encode', '(', 'encrypted_key', ')', 'headers', '=', '{', "'Reply-To'", ':', 'settings', '.', 'MAIL_SENDER', '.', 'replace', '(', "'@'", ',', "'+'", '+', 'base64_key', '+', "'@'", ')', '}', 'msg', '=', 'EmailMessage', '(', 'subject', ',', 'message', ',', 'sender', ',', 'dests', ',', 'headers', '=', 'headers', ')', 'if', 'html_message', ':', 'msg', '.', 'content_subtype', '=', '"html"', '# Main content is now text/html', 'msg', '.', 'send', '(', 'fail_silently', '=', 'False', ')', 'try', ':', 'from', 'main', '.', 'models', 'import', 'MailSend', 'MailSend', '(', 'dest', '=', "','", '.', 'join', '(', 'dests', ')', ',', 'subject', '=', 'subject', ',', 'sender', '=', 'sender', ',', 'message', '=', 'message', ',', 'origin', '=', 'origin', ')', '.', 'save', '(', ')', 'except', 'ImportError', ':', 'pass', 'else', ':', 'logger', '.', 'debug', '(', '"E-Mail notification not sent, since no EBUIO_MAIL_SECRET_KEY and EBUIO_MAIL_SECRET_HASH set in settingsLocal.py."', ')'] | Generic mail sending function | ['Generic', 'mail', 'sending', 'function'] | train | https://github.com/ebu/PlugIt/blob/de5f1e870f67caaef7a4a58e4bb1ed54d9c5dc53/plugit_proxy/views.py#L1213-L1246 |
3,168 | arviz-devs/arviz | arviz/data/datasets.py | list_datasets | def list_datasets():
"""Get a string representation of all available datasets with descriptions."""
lines = []
for name, resource in itertools.chain(LOCAL_DATASETS.items(), REMOTE_DATASETS.items()):
if isinstance(resource, LocalFileMetadata):
location = "local: {}".format(resource.filename)
elif isinstance(resource, RemoteFileMetadata):
location = "remote: {}".format(resource.url)
else:
location = "unknown"
lines.append("{}\n{}\n{}\n{}".format(name, "=" * len(name), resource.description, location))
return "\n\n{}\n\n".format(10 * "-").join(lines) | python | def list_datasets():
"""Get a string representation of all available datasets with descriptions."""
lines = []
for name, resource in itertools.chain(LOCAL_DATASETS.items(), REMOTE_DATASETS.items()):
if isinstance(resource, LocalFileMetadata):
location = "local: {}".format(resource.filename)
elif isinstance(resource, RemoteFileMetadata):
location = "remote: {}".format(resource.url)
else:
location = "unknown"
lines.append("{}\n{}\n{}\n{}".format(name, "=" * len(name), resource.description, location))
return "\n\n{}\n\n".format(10 * "-").join(lines) | ['def', 'list_datasets', '(', ')', ':', 'lines', '=', '[', ']', 'for', 'name', ',', 'resource', 'in', 'itertools', '.', 'chain', '(', 'LOCAL_DATASETS', '.', 'items', '(', ')', ',', 'REMOTE_DATASETS', '.', 'items', '(', ')', ')', ':', 'if', 'isinstance', '(', 'resource', ',', 'LocalFileMetadata', ')', ':', 'location', '=', '"local: {}"', '.', 'format', '(', 'resource', '.', 'filename', ')', 'elif', 'isinstance', '(', 'resource', ',', 'RemoteFileMetadata', ')', ':', 'location', '=', '"remote: {}"', '.', 'format', '(', 'resource', '.', 'url', ')', 'else', ':', 'location', '=', '"unknown"', 'lines', '.', 'append', '(', '"{}\\n{}\\n{}\\n{}"', '.', 'format', '(', 'name', ',', '"="', '*', 'len', '(', 'name', ')', ',', 'resource', '.', 'description', ',', 'location', ')', ')', 'return', '"\\n\\n{}\\n\\n"', '.', 'format', '(', '10', '*', '"-"', ')', '.', 'join', '(', 'lines', ')'] | Get a string representation of all available datasets with descriptions. | ['Get', 'a', 'string', 'representation', 'of', 'all', 'available', 'datasets', 'with', 'descriptions', '.'] | train | https://github.com/arviz-devs/arviz/blob/d04d8da07f029fd2931f48d2f7f324cf393e5277/arviz/data/datasets.py#L170-L183 |
3,169 | axialmarket/fsq | fsq/path.py | down | def down(p_queue, host=None):
if host is not None:
return _path(_c.FSQ_DOWN, root=_path(host, root=hosts(p_queue)))
'''Construct a path to the down file for a queue'''
return _path(p_queue, _c.FSQ_DOWN) | python | def down(p_queue, host=None):
if host is not None:
return _path(_c.FSQ_DOWN, root=_path(host, root=hosts(p_queue)))
'''Construct a path to the down file for a queue'''
return _path(p_queue, _c.FSQ_DOWN) | ['def', 'down', '(', 'p_queue', ',', 'host', '=', 'None', ')', ':', 'if', 'host', 'is', 'not', 'None', ':', 'return', '_path', '(', '_c', '.', 'FSQ_DOWN', ',', 'root', '=', '_path', '(', 'host', ',', 'root', '=', 'hosts', '(', 'p_queue', ')', ')', ')', 'return', '_path', '(', 'p_queue', ',', '_c', '.', 'FSQ_DOWN', ')'] | Construct a path to the down file for a queue | ['Construct', 'a', 'path', 'to', 'the', 'down', 'file', 'for', 'a', 'queue'] | train | https://github.com/axialmarket/fsq/blob/43b84c292cb8a187599d86753b947cf73248f989/fsq/path.py#L64-L68 |
3,170 | Alignak-monitoring/alignak | alignak/external_command.py | ExternalCommandManager.delay_svc_notification | def delay_svc_notification(self, service, notification_time):
"""Modify service first notification delay
Format of the line that triggers function call::
DELAY_SVC_NOTIFICATION;<host_name>;<service_description>;<notification_time>
:param service: service to edit
:type service: alignak.objects.service.Service
:param notification_time: new value to set
:type notification_time:
:return: None
"""
service.first_notification_delay = notification_time
self.send_an_element(service.get_update_status_brok()) | python | def delay_svc_notification(self, service, notification_time):
"""Modify service first notification delay
Format of the line that triggers function call::
DELAY_SVC_NOTIFICATION;<host_name>;<service_description>;<notification_time>
:param service: service to edit
:type service: alignak.objects.service.Service
:param notification_time: new value to set
:type notification_time:
:return: None
"""
service.first_notification_delay = notification_time
self.send_an_element(service.get_update_status_brok()) | ['def', 'delay_svc_notification', '(', 'self', ',', 'service', ',', 'notification_time', ')', ':', 'service', '.', 'first_notification_delay', '=', 'notification_time', 'self', '.', 'send_an_element', '(', 'service', '.', 'get_update_status_brok', '(', ')', ')'] | Modify service first notification delay
Format of the line that triggers function call::
DELAY_SVC_NOTIFICATION;<host_name>;<service_description>;<notification_time>
:param service: service to edit
:type service: alignak.objects.service.Service
:param notification_time: new value to set
:type notification_time:
:return: None | ['Modify', 'service', 'first', 'notification', 'delay', 'Format', 'of', 'the', 'line', 'that', 'triggers', 'function', 'call', '::'] | train | https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/external_command.py#L1701-L1714 |
3,171 | realitix/vulkan | generator/generate.py | format_vk | def format_vk(vk):
"""Format vk before using it"""
# Force extension require to be a list
for ext in get_extensions_filtered(vk):
req = ext['require']
if not isinstance(req, list):
ext['require'] = [req] | python | def format_vk(vk):
"""Format vk before using it"""
# Force extension require to be a list
for ext in get_extensions_filtered(vk):
req = ext['require']
if not isinstance(req, list):
ext['require'] = [req] | ['def', 'format_vk', '(', 'vk', ')', ':', '# Force extension require to be a list', 'for', 'ext', 'in', 'get_extensions_filtered', '(', 'vk', ')', ':', 'req', '=', 'ext', '[', "'require'", ']', 'if', 'not', 'isinstance', '(', 'req', ',', 'list', ')', ':', 'ext', '[', "'require'", ']', '=', '[', 'req', ']'] | Format vk before using it | ['Format', 'vk', 'before', 'using', 'it'] | train | https://github.com/realitix/vulkan/blob/07285387092aaa61d2d71fa2913d60a73f022cbe/generator/generate.py#L496-L503 |
3,172 | mila/pyoo | pyoo.py | VerticalCellRange.__set_values | def __set_values(self, values):
"""
Sets values in this cell range from an iterable.
This is much more effective than writing cell values one by one.
"""
array = tuple((self._clean_value(v),) for v in values)
self._get_target().setDataArray(array) | python | def __set_values(self, values):
"""
Sets values in this cell range from an iterable.
This is much more effective than writing cell values one by one.
"""
array = tuple((self._clean_value(v),) for v in values)
self._get_target().setDataArray(array) | ['def', '__set_values', '(', 'self', ',', 'values', ')', ':', 'array', '=', 'tuple', '(', '(', 'self', '.', '_clean_value', '(', 'v', ')', ',', ')', 'for', 'v', 'in', 'values', ')', 'self', '.', '_get_target', '(', ')', '.', 'setDataArray', '(', 'array', ')'] | Sets values in this cell range from an iterable.
This is much more effective than writing cell values one by one. | ['Sets', 'values', 'in', 'this', 'cell', 'range', 'from', 'an', 'iterable', '.'] | train | https://github.com/mila/pyoo/blob/1e024999f608c87ea72cd443e39c89eb0ba3cc62/pyoo.py#L1519-L1526 |
3,173 | DecBayComp/RWA-python | rwa/generic.py | namedtuple_storable | def namedtuple_storable(namedtuple, *args, **kwargs):
"""
Storable factory for named tuples.
"""
return default_storable(namedtuple, namedtuple._fields, *args, **kwargs) | python | def namedtuple_storable(namedtuple, *args, **kwargs):
"""
Storable factory for named tuples.
"""
return default_storable(namedtuple, namedtuple._fields, *args, **kwargs) | ['def', 'namedtuple_storable', '(', 'namedtuple', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'return', 'default_storable', '(', 'namedtuple', ',', 'namedtuple', '.', '_fields', ',', '*', 'args', ',', '*', '*', 'kwargs', ')'] | Storable factory for named tuples. | ['Storable', 'factory', 'for', 'named', 'tuples', '.'] | train | https://github.com/DecBayComp/RWA-python/blob/734a52e15a0e8c244d84d74acf3fd64721074732/rwa/generic.py#L1000-L1004 |
3,174 | ArduPilot/MAVProxy | MAVProxy/modules/mavproxy_link.py | LinkModule.report_altitude | def report_altitude(self, altitude):
'''possibly report a new altitude'''
master = self.master
if getattr(self.console, 'ElevationMap', None) is not None and self.mpstate.settings.basealt != 0:
lat = master.field('GLOBAL_POSITION_INT', 'lat', 0)*1.0e-7
lon = master.field('GLOBAL_POSITION_INT', 'lon', 0)*1.0e-7
alt1 = self.console.ElevationMap.GetElevation(lat, lon)
if alt1 is not None:
alt2 = self.mpstate.settings.basealt
altitude += alt2 - alt1
self.status.altitude = altitude
altitude_converted = self.height_convert_units(altitude)
if (int(self.mpstate.settings.altreadout) > 0 and
math.fabs(altitude_converted - self.last_altitude_announce) >=
int(self.settings.altreadout)):
self.last_altitude_announce = altitude_converted
rounded_alt = int(self.settings.altreadout) * ((self.settings.altreadout/2 + int(altitude_converted)) / int(self.settings.altreadout))
self.say("height %u" % rounded_alt, priority='notification') | python | def report_altitude(self, altitude):
'''possibly report a new altitude'''
master = self.master
if getattr(self.console, 'ElevationMap', None) is not None and self.mpstate.settings.basealt != 0:
lat = master.field('GLOBAL_POSITION_INT', 'lat', 0)*1.0e-7
lon = master.field('GLOBAL_POSITION_INT', 'lon', 0)*1.0e-7
alt1 = self.console.ElevationMap.GetElevation(lat, lon)
if alt1 is not None:
alt2 = self.mpstate.settings.basealt
altitude += alt2 - alt1
self.status.altitude = altitude
altitude_converted = self.height_convert_units(altitude)
if (int(self.mpstate.settings.altreadout) > 0 and
math.fabs(altitude_converted - self.last_altitude_announce) >=
int(self.settings.altreadout)):
self.last_altitude_announce = altitude_converted
rounded_alt = int(self.settings.altreadout) * ((self.settings.altreadout/2 + int(altitude_converted)) / int(self.settings.altreadout))
self.say("height %u" % rounded_alt, priority='notification') | ['def', 'report_altitude', '(', 'self', ',', 'altitude', ')', ':', 'master', '=', 'self', '.', 'master', 'if', 'getattr', '(', 'self', '.', 'console', ',', "'ElevationMap'", ',', 'None', ')', 'is', 'not', 'None', 'and', 'self', '.', 'mpstate', '.', 'settings', '.', 'basealt', '!=', '0', ':', 'lat', '=', 'master', '.', 'field', '(', "'GLOBAL_POSITION_INT'", ',', "'lat'", ',', '0', ')', '*', '1.0e-7', 'lon', '=', 'master', '.', 'field', '(', "'GLOBAL_POSITION_INT'", ',', "'lon'", ',', '0', ')', '*', '1.0e-7', 'alt1', '=', 'self', '.', 'console', '.', 'ElevationMap', '.', 'GetElevation', '(', 'lat', ',', 'lon', ')', 'if', 'alt1', 'is', 'not', 'None', ':', 'alt2', '=', 'self', '.', 'mpstate', '.', 'settings', '.', 'basealt', 'altitude', '+=', 'alt2', '-', 'alt1', 'self', '.', 'status', '.', 'altitude', '=', 'altitude', 'altitude_converted', '=', 'self', '.', 'height_convert_units', '(', 'altitude', ')', 'if', '(', 'int', '(', 'self', '.', 'mpstate', '.', 'settings', '.', 'altreadout', ')', '>', '0', 'and', 'math', '.', 'fabs', '(', 'altitude_converted', '-', 'self', '.', 'last_altitude_announce', ')', '>=', 'int', '(', 'self', '.', 'settings', '.', 'altreadout', ')', ')', ':', 'self', '.', 'last_altitude_announce', '=', 'altitude_converted', 'rounded_alt', '=', 'int', '(', 'self', '.', 'settings', '.', 'altreadout', ')', '*', '(', '(', 'self', '.', 'settings', '.', 'altreadout', '/', '2', '+', 'int', '(', 'altitude_converted', ')', ')', '/', 'int', '(', 'self', '.', 'settings', '.', 'altreadout', ')', ')', 'self', '.', 'say', '(', '"height %u"', '%', 'rounded_alt', ',', 'priority', '=', "'notification'", ')'] | possibly report a new altitude | ['possibly', 'report', 'a', 'new', 'altitude'] | train | https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/mavproxy_link.py#L354-L371 |
3,175 | pyviz/param | param/__init__.py | ObjectSelector._validate | def _validate(self, val):
"""
val must be None or one of the objects in self.objects.
"""
if not self.check_on_set:
self._ensure_value_is_in_objects(val)
return
if not (val in self.objects or (self.allow_None and val is None)):
# CEBALERT: can be called before __init__ has called
# super's __init__, i.e. before attrib_name has been set.
try:
attrib_name = self.name
except AttributeError:
attrib_name = ""
items = []
limiter = ']'
length = 0
for item in self.objects:
string = str(item)
length += len(string)
if length < 200:
items.append(string)
else:
limiter = ', ...]'
break
items = '[' + ', '.join(items) + limiter
raise ValueError("%s not in Parameter %s's list of possible objects, "
"valid options include %s"%(val,attrib_name, items)) | python | def _validate(self, val):
"""
val must be None or one of the objects in self.objects.
"""
if not self.check_on_set:
self._ensure_value_is_in_objects(val)
return
if not (val in self.objects or (self.allow_None and val is None)):
# CEBALERT: can be called before __init__ has called
# super's __init__, i.e. before attrib_name has been set.
try:
attrib_name = self.name
except AttributeError:
attrib_name = ""
items = []
limiter = ']'
length = 0
for item in self.objects:
string = str(item)
length += len(string)
if length < 200:
items.append(string)
else:
limiter = ', ...]'
break
items = '[' + ', '.join(items) + limiter
raise ValueError("%s not in Parameter %s's list of possible objects, "
"valid options include %s"%(val,attrib_name, items)) | ['def', '_validate', '(', 'self', ',', 'val', ')', ':', 'if', 'not', 'self', '.', 'check_on_set', ':', 'self', '.', '_ensure_value_is_in_objects', '(', 'val', ')', 'return', 'if', 'not', '(', 'val', 'in', 'self', '.', 'objects', 'or', '(', 'self', '.', 'allow_None', 'and', 'val', 'is', 'None', ')', ')', ':', '# CEBALERT: can be called before __init__ has called', "# super's __init__, i.e. before attrib_name has been set.", 'try', ':', 'attrib_name', '=', 'self', '.', 'name', 'except', 'AttributeError', ':', 'attrib_name', '=', '""', 'items', '=', '[', ']', 'limiter', '=', "']'", 'length', '=', '0', 'for', 'item', 'in', 'self', '.', 'objects', ':', 'string', '=', 'str', '(', 'item', ')', 'length', '+=', 'len', '(', 'string', ')', 'if', 'length', '<', '200', ':', 'items', '.', 'append', '(', 'string', ')', 'else', ':', 'limiter', '=', "', ...]'", 'break', 'items', '=', "'['", '+', "', '", '.', 'join', '(', 'items', ')', '+', 'limiter', 'raise', 'ValueError', '(', '"%s not in Parameter %s\'s list of possible objects, "', '"valid options include %s"', '%', '(', 'val', ',', 'attrib_name', ',', 'items', ')', ')'] | val must be None or one of the objects in self.objects. | ['val', 'must', 'be', 'None', 'or', 'one', 'of', 'the', 'objects', 'in', 'self', '.', 'objects', '.'] | train | https://github.com/pyviz/param/blob/8f0dafa78defa883247b40635f96cc6d5c1b3481/param/__init__.py#L1206-L1235 |
3,176 | incf-nidash/nidmresults | nidmresults/objects/contrast.py | ContrastMap.export | def export(self, nidm_version, export_dir):
"""
Create prov graph.
"""
# Contrast Map entity
atts = (
(PROV['type'], NIDM_CONTRAST_MAP),
(NIDM_CONTRAST_NAME, self.name))
if not self.isderfrommap:
atts = atts + (
(NIDM_IN_COORDINATE_SPACE, self.coord_space.id),)
if self.label is not None:
atts = atts + (
(PROV['label'], self.label),)
if self.name is not None:
atts = atts + (
(NIDM_CONTRAST_NAME, self.name),)
# Parameter estimate entity
self.add_attributes(atts) | python | def export(self, nidm_version, export_dir):
"""
Create prov graph.
"""
# Contrast Map entity
atts = (
(PROV['type'], NIDM_CONTRAST_MAP),
(NIDM_CONTRAST_NAME, self.name))
if not self.isderfrommap:
atts = atts + (
(NIDM_IN_COORDINATE_SPACE, self.coord_space.id),)
if self.label is not None:
atts = atts + (
(PROV['label'], self.label),)
if self.name is not None:
atts = atts + (
(NIDM_CONTRAST_NAME, self.name),)
# Parameter estimate entity
self.add_attributes(atts) | ['def', 'export', '(', 'self', ',', 'nidm_version', ',', 'export_dir', ')', ':', '# Contrast Map entity', 'atts', '=', '(', '(', 'PROV', '[', "'type'", ']', ',', 'NIDM_CONTRAST_MAP', ')', ',', '(', 'NIDM_CONTRAST_NAME', ',', 'self', '.', 'name', ')', ')', 'if', 'not', 'self', '.', 'isderfrommap', ':', 'atts', '=', 'atts', '+', '(', '(', 'NIDM_IN_COORDINATE_SPACE', ',', 'self', '.', 'coord_space', '.', 'id', ')', ',', ')', 'if', 'self', '.', 'label', 'is', 'not', 'None', ':', 'atts', '=', 'atts', '+', '(', '(', 'PROV', '[', "'label'", ']', ',', 'self', '.', 'label', ')', ',', ')', 'if', 'self', '.', 'name', 'is', 'not', 'None', ':', 'atts', '=', 'atts', '+', '(', '(', 'NIDM_CONTRAST_NAME', ',', 'self', '.', 'name', ')', ',', ')', '# Parameter estimate entity', 'self', '.', 'add_attributes', '(', 'atts', ')'] | Create prov graph. | ['Create', 'prov', 'graph', '.'] | train | https://github.com/incf-nidash/nidmresults/blob/438f7cce6abc4a4379b629bd76f4d427891e033f/nidmresults/objects/contrast.py#L181-L203 |
3,177 | SteveMcGrath/pySecurityCenter | examples/sc4/csv_gen/sccsv/generator.py | gen_csv | def gen_csv(sc, filename, field_list, source, filters):
'''csv SecurityCenterObj, AssetListName, CSVFields, EmailAddress
'''
# First thing we need to do is initialize the csvfile and build the header
# for the file.
datafile = open(filename, 'wb')
csvfile = csv.writer(datafile)
header = []
for field in field_list:
header.append(fields.fields[field]['name'])
csvfile.writerow(header)
debug.write('Generating %s: ' % filename)
# Next we will run the Security Center query. because this could be a
# potentially very large dataset that is returned, we don't want to run out
# of memory. To get around this, we will pass the query function the writer
# function with the appropriate fields so that it is parsed inline.
fparams = {'fobj': csvfile, 'flist': field_list}
sc.query('vulndetails', source=source,
func=writer, func_params=fparams, **filters)
debug.write('\n')
# Lastly we need to close the datafile.
datafile.close() | python | def gen_csv(sc, filename, field_list, source, filters):
'''csv SecurityCenterObj, AssetListName, CSVFields, EmailAddress
'''
# First thing we need to do is initialize the csvfile and build the header
# for the file.
datafile = open(filename, 'wb')
csvfile = csv.writer(datafile)
header = []
for field in field_list:
header.append(fields.fields[field]['name'])
csvfile.writerow(header)
debug.write('Generating %s: ' % filename)
# Next we will run the Security Center query. because this could be a
# potentially very large dataset that is returned, we don't want to run out
# of memory. To get around this, we will pass the query function the writer
# function with the appropriate fields so that it is parsed inline.
fparams = {'fobj': csvfile, 'flist': field_list}
sc.query('vulndetails', source=source,
func=writer, func_params=fparams, **filters)
debug.write('\n')
# Lastly we need to close the datafile.
datafile.close() | ['def', 'gen_csv', '(', 'sc', ',', 'filename', ',', 'field_list', ',', 'source', ',', 'filters', ')', ':', '# First thing we need to do is initialize the csvfile and build the header', '# for the file.', 'datafile', '=', 'open', '(', 'filename', ',', "'wb'", ')', 'csvfile', '=', 'csv', '.', 'writer', '(', 'datafile', ')', 'header', '=', '[', ']', 'for', 'field', 'in', 'field_list', ':', 'header', '.', 'append', '(', 'fields', '.', 'fields', '[', 'field', ']', '[', "'name'", ']', ')', 'csvfile', '.', 'writerow', '(', 'header', ')', 'debug', '.', 'write', '(', "'Generating %s: '", '%', 'filename', ')', '# Next we will run the Security Center query. because this could be a', "# potentially very large dataset that is returned, we don't want to run out", '# of memory. To get around this, we will pass the query function the writer', '# function with the appropriate fields so that it is parsed inline.', 'fparams', '=', '{', "'fobj'", ':', 'csvfile', ',', "'flist'", ':', 'field_list', '}', 'sc', '.', 'query', '(', "'vulndetails'", ',', 'source', '=', 'source', ',', 'func', '=', 'writer', ',', 'func_params', '=', 'fparams', ',', '*', '*', 'filters', ')', 'debug', '.', 'write', '(', "'\\n'", ')', '# Lastly we need to close the datafile.', 'datafile', '.', 'close', '(', ')'] | csv SecurityCenterObj, AssetListName, CSVFields, EmailAddress | ['csv', 'SecurityCenterObj', 'AssetListName', 'CSVFields', 'EmailAddress'] | train | https://github.com/SteveMcGrath/pySecurityCenter/blob/f0b10b1bcd4fd23a8d4d09ca6774cdf5e1cfd880/examples/sc4/csv_gen/sccsv/generator.py#L46-L70 |
3,178 | i3visio/osrframework | osrframework/utils/platforms.py | Platform.do_searchfy | def do_searchfy(self, query, **kwargs):
"""
Verifying a searchfy query in this platform.
This might be redefined in any class inheriting from Platform.
Performing additional procesing may be possible by iterating the requested profiles
to extract more entities from the URI would be slow. Sample code may be:
if kwargs["process"]:
r["attributes"] += json.loads(self.getInfo(process=True, mode="usufy", qURI=uri, query=i))
Args:
-----
query: The element to be searched.
Return:
-------
A list of elements to be appended.
"""
results = []
print("[*] Launching search using the {} module...".format(self.__class__.__name__))
test = self.check_searchfy(query, kwargs)
if test:
try:
# Recovering all the found aliases in the traditional way
ids = re.findall(self.searchfyAliasRegexp, test, re.DOTALL)
except:
# Version 2 of the wrappers
verifier = self.modes.get(mode)
if verifier and verifier.get("alias_extractor"):
ids = re.findall(verifier.get("alias_extractor"), test, re.DOTALL)
else:
return []
for j, alias in enumerate(ids):
r = {
"type": "i3visio.profile",
"value": self.platformName + " - " + alias,
"attributes": []
}
# Appending platform name
aux = {}
aux["type"] = "i3visio.platform"
aux["value"] = self.platformName
aux["attributes"] = []
r["attributes"].append(aux)
# Appending the alias
aux = {}
aux["type"] = "i3visio.alias"
aux["value"] = alias
aux["attributes"] = []
r["attributes"].append(aux)
# Appending the query performed to grab this items
aux = {}
aux["type"] = "i3visio.search"
aux["value"] = query
aux["attributes"] = []
r["attributes"].append(aux)
# Appending platform URI
try:
aux = {}
aux["type"] = "i3visio.uri"
uri = self.createURL(word=alias, mode="usufy")
aux["value"] = uri
aux["attributes"] = []
r["attributes"].append(aux)
except NameError:
pass
# Appending the result to results: in this case only one profile will be grabbed"""
results.append(r)
return results | python | def do_searchfy(self, query, **kwargs):
"""
Verifying a searchfy query in this platform.
This might be redefined in any class inheriting from Platform.
Performing additional procesing may be possible by iterating the requested profiles
to extract more entities from the URI would be slow. Sample code may be:
if kwargs["process"]:
r["attributes"] += json.loads(self.getInfo(process=True, mode="usufy", qURI=uri, query=i))
Args:
-----
query: The element to be searched.
Return:
-------
A list of elements to be appended.
"""
results = []
print("[*] Launching search using the {} module...".format(self.__class__.__name__))
test = self.check_searchfy(query, kwargs)
if test:
try:
# Recovering all the found aliases in the traditional way
ids = re.findall(self.searchfyAliasRegexp, test, re.DOTALL)
except:
# Version 2 of the wrappers
verifier = self.modes.get(mode)
if verifier and verifier.get("alias_extractor"):
ids = re.findall(verifier.get("alias_extractor"), test, re.DOTALL)
else:
return []
for j, alias in enumerate(ids):
r = {
"type": "i3visio.profile",
"value": self.platformName + " - " + alias,
"attributes": []
}
# Appending platform name
aux = {}
aux["type"] = "i3visio.platform"
aux["value"] = self.platformName
aux["attributes"] = []
r["attributes"].append(aux)
# Appending the alias
aux = {}
aux["type"] = "i3visio.alias"
aux["value"] = alias
aux["attributes"] = []
r["attributes"].append(aux)
# Appending the query performed to grab this items
aux = {}
aux["type"] = "i3visio.search"
aux["value"] = query
aux["attributes"] = []
r["attributes"].append(aux)
# Appending platform URI
try:
aux = {}
aux["type"] = "i3visio.uri"
uri = self.createURL(word=alias, mode="usufy")
aux["value"] = uri
aux["attributes"] = []
r["attributes"].append(aux)
except NameError:
pass
# Appending the result to results: in this case only one profile will be grabbed"""
results.append(r)
return results | ['def', 'do_searchfy', '(', 'self', ',', 'query', ',', '*', '*', 'kwargs', ')', ':', 'results', '=', '[', ']', 'print', '(', '"[*] Launching search using the {} module..."', '.', 'format', '(', 'self', '.', '__class__', '.', '__name__', ')', ')', 'test', '=', 'self', '.', 'check_searchfy', '(', 'query', ',', 'kwargs', ')', 'if', 'test', ':', 'try', ':', '# Recovering all the found aliases in the traditional way', 'ids', '=', 're', '.', 'findall', '(', 'self', '.', 'searchfyAliasRegexp', ',', 'test', ',', 're', '.', 'DOTALL', ')', 'except', ':', '# Version 2 of the wrappers', 'verifier', '=', 'self', '.', 'modes', '.', 'get', '(', 'mode', ')', 'if', 'verifier', 'and', 'verifier', '.', 'get', '(', '"alias_extractor"', ')', ':', 'ids', '=', 're', '.', 'findall', '(', 'verifier', '.', 'get', '(', '"alias_extractor"', ')', ',', 'test', ',', 're', '.', 'DOTALL', ')', 'else', ':', 'return', '[', ']', 'for', 'j', ',', 'alias', 'in', 'enumerate', '(', 'ids', ')', ':', 'r', '=', '{', '"type"', ':', '"i3visio.profile"', ',', '"value"', ':', 'self', '.', 'platformName', '+', '" - "', '+', 'alias', ',', '"attributes"', ':', '[', ']', '}', '# Appending platform name', 'aux', '=', '{', '}', 'aux', '[', '"type"', ']', '=', '"i3visio.platform"', 'aux', '[', '"value"', ']', '=', 'self', '.', 'platformName', 'aux', '[', '"attributes"', ']', '=', '[', ']', 'r', '[', '"attributes"', ']', '.', 'append', '(', 'aux', ')', '# Appending the alias', 'aux', '=', '{', '}', 'aux', '[', '"type"', ']', '=', '"i3visio.alias"', 'aux', '[', '"value"', ']', '=', 'alias', 'aux', '[', '"attributes"', ']', '=', '[', ']', 'r', '[', '"attributes"', ']', '.', 'append', '(', 'aux', ')', '# Appending the query performed to grab this items', 'aux', '=', '{', '}', 'aux', '[', '"type"', ']', '=', '"i3visio.search"', 'aux', '[', '"value"', ']', '=', 'query', 'aux', '[', '"attributes"', ']', '=', '[', ']', 'r', '[', '"attributes"', ']', '.', 'append', '(', 'aux', ')', '# Appending platform URI', 'try', ':', 'aux', '=', '{', '}', 'aux', '[', '"type"', ']', '=', '"i3visio.uri"', 'uri', '=', 'self', '.', 'createURL', '(', 'word', '=', 'alias', ',', 'mode', '=', '"usufy"', ')', 'aux', '[', '"value"', ']', '=', 'uri', 'aux', '[', '"attributes"', ']', '=', '[', ']', 'r', '[', '"attributes"', ']', '.', 'append', '(', 'aux', ')', 'except', 'NameError', ':', 'pass', '# Appending the result to results: in this case only one profile will be grabbed"""', 'results', '.', 'append', '(', 'r', ')', 'return', 'results'] | Verifying a searchfy query in this platform.
This might be redefined in any class inheriting from Platform.
Performing additional procesing may be possible by iterating the requested profiles
to extract more entities from the URI would be slow. Sample code may be:
if kwargs["process"]:
r["attributes"] += json.loads(self.getInfo(process=True, mode="usufy", qURI=uri, query=i))
Args:
-----
query: The element to be searched.
Return:
-------
A list of elements to be appended. | ['Verifying', 'a', 'searchfy', 'query', 'in', 'this', 'platform', '.'] | train | https://github.com/i3visio/osrframework/blob/83437f4c14c9c08cb80a896bd9834c77f6567871/osrframework/utils/platforms.py#L439-L517 |
3,179 | HewlettPackard/python-hpOneView | hpOneView/oneview_client.py | OneViewClient.appliance_node_information | def appliance_node_information(self):
"""
Gets the ApplianceNodeInformation API client.
Returns:
ApplianceNodeInformation:
"""
if not self.__appliance_node_information:
self.__appliance_node_information = ApplianceNodeInformation(self.__connection)
return self.__appliance_node_information | python | def appliance_node_information(self):
"""
Gets the ApplianceNodeInformation API client.
Returns:
ApplianceNodeInformation:
"""
if not self.__appliance_node_information:
self.__appliance_node_information = ApplianceNodeInformation(self.__connection)
return self.__appliance_node_information | ['def', 'appliance_node_information', '(', 'self', ')', ':', 'if', 'not', 'self', '.', '__appliance_node_information', ':', 'self', '.', '__appliance_node_information', '=', 'ApplianceNodeInformation', '(', 'self', '.', '__connection', ')', 'return', 'self', '.', '__appliance_node_information'] | Gets the ApplianceNodeInformation API client.
Returns:
ApplianceNodeInformation: | ['Gets', 'the', 'ApplianceNodeInformation', 'API', 'client', '.'] | train | https://github.com/HewlettPackard/python-hpOneView/blob/3c6219723ef25e6e0c83d44a89007f89bc325b89/hpOneView/oneview_client.py#L1153-L1162 |
3,180 | CalebBell/fluids | fluids/compressible.py | is_critical_flow | def is_critical_flow(P1, P2, k):
r'''Determines if a flow of a fluid driven by pressure gradient
P1 - P2 is critical, for a fluid with the given isentropic coefficient.
This function calculates critical flow pressure, and checks if this is
larger than P2. If so, the flow is critical and choked.
Parameters
----------
P1 : float
Higher, source pressure [Pa]
P2 : float
Lower, downstream pressure [Pa]
k : float
Isentropic coefficient []
Returns
-------
flowtype : bool
True if the flow is choked; otherwise False
Notes
-----
Assumes isentropic flow. Uses P_critical_flow function.
Examples
--------
Examples 1-2 from API 520.
>>> is_critical_flow(670E3, 532E3, 1.11)
False
>>> is_critical_flow(670E3, 101E3, 1.11)
True
References
----------
.. [1] API. 2014. API 520 - Part 1 Sizing, Selection, and Installation of
Pressure-relieving Devices, Part I - Sizing and Selection, 9E.
'''
Pcf = P_critical_flow(P1, k)
return Pcf > P2 | python | def is_critical_flow(P1, P2, k):
r'''Determines if a flow of a fluid driven by pressure gradient
P1 - P2 is critical, for a fluid with the given isentropic coefficient.
This function calculates critical flow pressure, and checks if this is
larger than P2. If so, the flow is critical and choked.
Parameters
----------
P1 : float
Higher, source pressure [Pa]
P2 : float
Lower, downstream pressure [Pa]
k : float
Isentropic coefficient []
Returns
-------
flowtype : bool
True if the flow is choked; otherwise False
Notes
-----
Assumes isentropic flow. Uses P_critical_flow function.
Examples
--------
Examples 1-2 from API 520.
>>> is_critical_flow(670E3, 532E3, 1.11)
False
>>> is_critical_flow(670E3, 101E3, 1.11)
True
References
----------
.. [1] API. 2014. API 520 - Part 1 Sizing, Selection, and Installation of
Pressure-relieving Devices, Part I - Sizing and Selection, 9E.
'''
Pcf = P_critical_flow(P1, k)
return Pcf > P2 | ['def', 'is_critical_flow', '(', 'P1', ',', 'P2', ',', 'k', ')', ':', 'Pcf', '=', 'P_critical_flow', '(', 'P1', ',', 'k', ')', 'return', 'Pcf', '>', 'P2'] | r'''Determines if a flow of a fluid driven by pressure gradient
P1 - P2 is critical, for a fluid with the given isentropic coefficient.
This function calculates critical flow pressure, and checks if this is
larger than P2. If so, the flow is critical and choked.
Parameters
----------
P1 : float
Higher, source pressure [Pa]
P2 : float
Lower, downstream pressure [Pa]
k : float
Isentropic coefficient []
Returns
-------
flowtype : bool
True if the flow is choked; otherwise False
Notes
-----
Assumes isentropic flow. Uses P_critical_flow function.
Examples
--------
Examples 1-2 from API 520.
>>> is_critical_flow(670E3, 532E3, 1.11)
False
>>> is_critical_flow(670E3, 101E3, 1.11)
True
References
----------
.. [1] API. 2014. API 520 - Part 1 Sizing, Selection, and Installation of
Pressure-relieving Devices, Part I - Sizing and Selection, 9E. | ['r', 'Determines', 'if', 'a', 'flow', 'of', 'a', 'fluid', 'driven', 'by', 'pressure', 'gradient', 'P1', '-', 'P2', 'is', 'critical', 'for', 'a', 'fluid', 'with', 'the', 'given', 'isentropic', 'coefficient', '.', 'This', 'function', 'calculates', 'critical', 'flow', 'pressure', 'and', 'checks', 'if', 'this', 'is', 'larger', 'than', 'P2', '.', 'If', 'so', 'the', 'flow', 'is', 'critical', 'and', 'choked', '.'] | train | https://github.com/CalebBell/fluids/blob/57f556752e039f1d3e5a822f408c184783db2828/fluids/compressible.py#L515-L554 |
3,181 | googleapis/google-cloud-python | bigquery/google/cloud/bigquery/dbapi/cursor.py | _format_operation_list | def _format_operation_list(operation, parameters):
"""Formats parameters in operation in the way BigQuery expects.
The input operation will be a query like ``SELECT %s`` and the output
will be a query like ``SELECT ?``.
:type operation: str
:param operation: A Google BigQuery query string.
:type parameters: Sequence[Any]
:param parameters: Sequence of parameter values.
:rtype: str
:returns: A formatted query string.
:raises: :class:`~google.cloud.bigquery.dbapi.ProgrammingError`
if a parameter used in the operation is not found in the
``parameters`` argument.
"""
formatted_params = ["?" for _ in parameters]
try:
return operation % tuple(formatted_params)
except TypeError as exc:
raise exceptions.ProgrammingError(exc) | python | def _format_operation_list(operation, parameters):
"""Formats parameters in operation in the way BigQuery expects.
The input operation will be a query like ``SELECT %s`` and the output
will be a query like ``SELECT ?``.
:type operation: str
:param operation: A Google BigQuery query string.
:type parameters: Sequence[Any]
:param parameters: Sequence of parameter values.
:rtype: str
:returns: A formatted query string.
:raises: :class:`~google.cloud.bigquery.dbapi.ProgrammingError`
if a parameter used in the operation is not found in the
``parameters`` argument.
"""
formatted_params = ["?" for _ in parameters]
try:
return operation % tuple(formatted_params)
except TypeError as exc:
raise exceptions.ProgrammingError(exc) | ['def', '_format_operation_list', '(', 'operation', ',', 'parameters', ')', ':', 'formatted_params', '=', '[', '"?"', 'for', '_', 'in', 'parameters', ']', 'try', ':', 'return', 'operation', '%', 'tuple', '(', 'formatted_params', ')', 'except', 'TypeError', 'as', 'exc', ':', 'raise', 'exceptions', '.', 'ProgrammingError', '(', 'exc', ')'] | Formats parameters in operation in the way BigQuery expects.
The input operation will be a query like ``SELECT %s`` and the output
will be a query like ``SELECT ?``.
:type operation: str
:param operation: A Google BigQuery query string.
:type parameters: Sequence[Any]
:param parameters: Sequence of parameter values.
:rtype: str
:returns: A formatted query string.
:raises: :class:`~google.cloud.bigquery.dbapi.ProgrammingError`
if a parameter used in the operation is not found in the
``parameters`` argument. | ['Formats', 'parameters', 'in', 'operation', 'in', 'the', 'way', 'BigQuery', 'expects', '.'] | train | https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/dbapi/cursor.py#L282-L305 |
3,182 | theno/fabsetup | fabsetup/addons.py | load_repo_addons | def load_repo_addons(_globals):
'''Load all fabsetup addons which are stored under ~/.fabsetup-addon-repos
as git repositories.
Args:
_globals(dict): the globals() namespace of the fabric script.
Return: None
'''
repos_dir = os.path.expanduser('~/.fabsetup-addon-repos')
if os.path.isdir(repos_dir):
basedir, repos, _ = next(os.walk(repos_dir))
for repo_dir in [os.path.join(basedir, repo)
for repo in repos
# omit dot dirs like '.rope'
# or 'fabsetup-theno-termdown.disabled'
if '.' not in repo]:
sys.path.append(repo_dir)
package_name, username = package_username(repo_dir.split('/')[-1])
load_addon(username, package_name, _globals) | python | def load_repo_addons(_globals):
'''Load all fabsetup addons which are stored under ~/.fabsetup-addon-repos
as git repositories.
Args:
_globals(dict): the globals() namespace of the fabric script.
Return: None
'''
repos_dir = os.path.expanduser('~/.fabsetup-addon-repos')
if os.path.isdir(repos_dir):
basedir, repos, _ = next(os.walk(repos_dir))
for repo_dir in [os.path.join(basedir, repo)
for repo in repos
# omit dot dirs like '.rope'
# or 'fabsetup-theno-termdown.disabled'
if '.' not in repo]:
sys.path.append(repo_dir)
package_name, username = package_username(repo_dir.split('/')[-1])
load_addon(username, package_name, _globals) | ['def', 'load_repo_addons', '(', '_globals', ')', ':', 'repos_dir', '=', 'os', '.', 'path', '.', 'expanduser', '(', "'~/.fabsetup-addon-repos'", ')', 'if', 'os', '.', 'path', '.', 'isdir', '(', 'repos_dir', ')', ':', 'basedir', ',', 'repos', ',', '_', '=', 'next', '(', 'os', '.', 'walk', '(', 'repos_dir', ')', ')', 'for', 'repo_dir', 'in', '[', 'os', '.', 'path', '.', 'join', '(', 'basedir', ',', 'repo', ')', 'for', 'repo', 'in', 'repos', "# omit dot dirs like '.rope'", "# or 'fabsetup-theno-termdown.disabled'", 'if', "'.'", 'not', 'in', 'repo', ']', ':', 'sys', '.', 'path', '.', 'append', '(', 'repo_dir', ')', 'package_name', ',', 'username', '=', 'package_username', '(', 'repo_dir', '.', 'split', '(', "'/'", ')', '[', '-', '1', ']', ')', 'load_addon', '(', 'username', ',', 'package_name', ',', '_globals', ')'] | Load all fabsetup addons which are stored under ~/.fabsetup-addon-repos
as git repositories.
Args:
_globals(dict): the globals() namespace of the fabric script.
Return: None | ['Load', 'all', 'fabsetup', 'addons', 'which', 'are', 'stored', 'under', '~', '/', '.', 'fabsetup', '-', 'addon', '-', 'repos', 'as', 'git', 'repositories', '.'] | train | https://github.com/theno/fabsetup/blob/ced728abff93551ba5677e63bc1bdc0ef5ca5777/fabsetup/addons.py#L126-L145 |
3,183 | OnroerendErfgoed/crabpy_pyramid | crabpy_pyramid/renderers/capakey.py | item_afdeling_adapter | def item_afdeling_adapter(obj, request):
"""
Adapter for rendering an object of
:class: `crabpy.gateway.capakey.Afdeling` to json.
"""
return {
'id': obj.id,
'naam': obj.naam,
'gemeente': {
'id': obj.gemeente.id,
'naam': obj.gemeente.naam
},
'centroid': obj.centroid,
'bounding_box': obj.bounding_box
} | python | def item_afdeling_adapter(obj, request):
"""
Adapter for rendering an object of
:class: `crabpy.gateway.capakey.Afdeling` to json.
"""
return {
'id': obj.id,
'naam': obj.naam,
'gemeente': {
'id': obj.gemeente.id,
'naam': obj.gemeente.naam
},
'centroid': obj.centroid,
'bounding_box': obj.bounding_box
} | ['def', 'item_afdeling_adapter', '(', 'obj', ',', 'request', ')', ':', 'return', '{', "'id'", ':', 'obj', '.', 'id', ',', "'naam'", ':', 'obj', '.', 'naam', ',', "'gemeente'", ':', '{', "'id'", ':', 'obj', '.', 'gemeente', '.', 'id', ',', "'naam'", ':', 'obj', '.', 'gemeente', '.', 'naam', '}', ',', "'centroid'", ':', 'obj', '.', 'centroid', ',', "'bounding_box'", ':', 'obj', '.', 'bounding_box', '}'] | Adapter for rendering an object of
:class: `crabpy.gateway.capakey.Afdeling` to json. | ['Adapter', 'for', 'rendering', 'an', 'object', 'of', ':', 'class', ':', 'crabpy', '.', 'gateway', '.', 'capakey', '.', 'Afdeling', 'to', 'json', '.'] | train | https://github.com/OnroerendErfgoed/crabpy_pyramid/blob/b727ea55838d71575db96e987b536a0bac9f6a7a/crabpy_pyramid/renderers/capakey.py#L80-L94 |
3,184 | nutechsoftware/alarmdecoder | examples/usb_device.py | main | def main():
"""
Example application that prints messages from the panel to the terminal.
"""
try:
# Retrieve the first USB device
device = AlarmDecoder(USBDevice.find())
# Set up an event handler and open the device
device.on_message += handle_message
with device.open():
while True:
time.sleep(1)
except Exception as ex:
print('Exception:', ex) | python | def main():
"""
Example application that prints messages from the panel to the terminal.
"""
try:
# Retrieve the first USB device
device = AlarmDecoder(USBDevice.find())
# Set up an event handler and open the device
device.on_message += handle_message
with device.open():
while True:
time.sleep(1)
except Exception as ex:
print('Exception:', ex) | ['def', 'main', '(', ')', ':', 'try', ':', '# Retrieve the first USB device', 'device', '=', 'AlarmDecoder', '(', 'USBDevice', '.', 'find', '(', ')', ')', '# Set up an event handler and open the device', 'device', '.', 'on_message', '+=', 'handle_message', 'with', 'device', '.', 'open', '(', ')', ':', 'while', 'True', ':', 'time', '.', 'sleep', '(', '1', ')', 'except', 'Exception', 'as', 'ex', ':', 'print', '(', "'Exception:'", ',', 'ex', ')'] | Example application that prints messages from the panel to the terminal. | ['Example', 'application', 'that', 'prints', 'messages', 'from', 'the', 'panel', 'to', 'the', 'terminal', '.'] | train | https://github.com/nutechsoftware/alarmdecoder/blob/b0c014089e24455228cb4402cf30ba98157578cd/examples/usb_device.py#L5-L20 |
3,185 | horejsek/python-webdriverwrapper | webdriverwrapper/info.py | WebdriverWrapperInfoMixin.check_expected_infos | def check_expected_infos(self, test_method):
"""
This method is called after each test. It will read decorated
informations and check if there are expected infos.
You can set expected infos by decorators :py:func:`.expected_info_messages`
and :py:func:`.allowed_info_messages`.
"""
f = lambda key, default=[]: getattr(test_method, key, default)
expected_info_messages = f(EXPECTED_INFO_MESSAGES)
allowed_info_messages = f(ALLOWED_INFO_MESSAGES)
self.check_infos(expected_info_messages, allowed_info_messages) | python | def check_expected_infos(self, test_method):
"""
This method is called after each test. It will read decorated
informations and check if there are expected infos.
You can set expected infos by decorators :py:func:`.expected_info_messages`
and :py:func:`.allowed_info_messages`.
"""
f = lambda key, default=[]: getattr(test_method, key, default)
expected_info_messages = f(EXPECTED_INFO_MESSAGES)
allowed_info_messages = f(ALLOWED_INFO_MESSAGES)
self.check_infos(expected_info_messages, allowed_info_messages) | ['def', 'check_expected_infos', '(', 'self', ',', 'test_method', ')', ':', 'f', '=', 'lambda', 'key', ',', 'default', '=', '[', ']', ':', 'getattr', '(', 'test_method', ',', 'key', ',', 'default', ')', 'expected_info_messages', '=', 'f', '(', 'EXPECTED_INFO_MESSAGES', ')', 'allowed_info_messages', '=', 'f', '(', 'ALLOWED_INFO_MESSAGES', ')', 'self', '.', 'check_infos', '(', 'expected_info_messages', ',', 'allowed_info_messages', ')'] | This method is called after each test. It will read decorated
informations and check if there are expected infos.
You can set expected infos by decorators :py:func:`.expected_info_messages`
and :py:func:`.allowed_info_messages`. | ['This', 'method', 'is', 'called', 'after', 'each', 'test', '.', 'It', 'will', 'read', 'decorated', 'informations', 'and', 'check', 'if', 'there', 'are', 'expected', 'infos', '.'] | train | https://github.com/horejsek/python-webdriverwrapper/blob/a492f79ab60ed83d860dd817b6a0961500d7e3f5/webdriverwrapper/info.py#L52-L63 |
3,186 | xolox/python-qpass | qpass/__init__.py | PasswordStore.context | def context(self):
"""
An execution context created using :mod:`executor.contexts`.
The value of :attr:`context` defaults to a
:class:`~executor.contexts.LocalContext` object with the following
characteristics:
- The working directory of the execution context is set to the
value of :attr:`directory`.
- The environment variable given by :data:`DIRECTORY_VARIABLE` is set
to the value of :attr:`directory`.
:raises: :exc:`.MissingPasswordStoreError` when :attr:`directory`
doesn't exist.
"""
# Make sure the directory exists.
self.ensure_directory_exists()
# Prepare the environment variables.
environment = {DIRECTORY_VARIABLE: self.directory}
try:
# Try to enable the GPG agent in headless sessions.
environment.update(get_gpg_variables())
except Exception:
# If we failed then let's at least make sure that the
# $GPG_TTY environment variable is set correctly.
environment.update(GPG_TTY=execute("tty", capture=True, check=False, tty=True, silent=True))
return LocalContext(directory=self.directory, environment=environment) | python | def context(self):
"""
An execution context created using :mod:`executor.contexts`.
The value of :attr:`context` defaults to a
:class:`~executor.contexts.LocalContext` object with the following
characteristics:
- The working directory of the execution context is set to the
value of :attr:`directory`.
- The environment variable given by :data:`DIRECTORY_VARIABLE` is set
to the value of :attr:`directory`.
:raises: :exc:`.MissingPasswordStoreError` when :attr:`directory`
doesn't exist.
"""
# Make sure the directory exists.
self.ensure_directory_exists()
# Prepare the environment variables.
environment = {DIRECTORY_VARIABLE: self.directory}
try:
# Try to enable the GPG agent in headless sessions.
environment.update(get_gpg_variables())
except Exception:
# If we failed then let's at least make sure that the
# $GPG_TTY environment variable is set correctly.
environment.update(GPG_TTY=execute("tty", capture=True, check=False, tty=True, silent=True))
return LocalContext(directory=self.directory, environment=environment) | ['def', 'context', '(', 'self', ')', ':', '# Make sure the directory exists.', 'self', '.', 'ensure_directory_exists', '(', ')', '# Prepare the environment variables.', 'environment', '=', '{', 'DIRECTORY_VARIABLE', ':', 'self', '.', 'directory', '}', 'try', ':', '# Try to enable the GPG agent in headless sessions.', 'environment', '.', 'update', '(', 'get_gpg_variables', '(', ')', ')', 'except', 'Exception', ':', "# If we failed then let's at least make sure that the", '# $GPG_TTY environment variable is set correctly.', 'environment', '.', 'update', '(', 'GPG_TTY', '=', 'execute', '(', '"tty"', ',', 'capture', '=', 'True', ',', 'check', '=', 'False', ',', 'tty', '=', 'True', ',', 'silent', '=', 'True', ')', ')', 'return', 'LocalContext', '(', 'directory', '=', 'self', '.', 'directory', ',', 'environment', '=', 'environment', ')'] | An execution context created using :mod:`executor.contexts`.
The value of :attr:`context` defaults to a
:class:`~executor.contexts.LocalContext` object with the following
characteristics:
- The working directory of the execution context is set to the
value of :attr:`directory`.
- The environment variable given by :data:`DIRECTORY_VARIABLE` is set
to the value of :attr:`directory`.
:raises: :exc:`.MissingPasswordStoreError` when :attr:`directory`
doesn't exist. | ['An', 'execution', 'context', 'created', 'using', ':', 'mod', ':', 'executor', '.', 'contexts', '.'] | train | https://github.com/xolox/python-qpass/blob/43ce447b0904ff42a54b8f1dd4d2479f950f258f/qpass/__init__.py#L244-L272 |
3,187 | matthiask/django-cte-forest | cte_forest/models.py | CTENodeManager.as_tree | def as_tree(self, visitor=None, children=None):
""" Recursively traverses each tree (starting from each root) in order
to generate a dictionary-based tree structure of the entire forest.
Each level of the forest/tree is a list of nodes, and each node
consists of a dictionary representation, where the entry
``children`` (by default) consists of a list of dictionary
representations of its children.
Optionally, a `visitor` callback can be used, which is responsible
for generating a dictionary representation of a given
:class:`CTENode`. By default, the :meth:`_default_node_visitor` is
used which generates a dictionary with the current node as well as
structural properties. See :meth:`_default_node_visitor` for the
appropriate signature of this callback.
Optionally, a `children` callback can be used, which is responsible
for determining which :class:`CTENode`s are children of each visited
:class:`CTENode`, resulting in a key (by default ``children``) and a
list of children :class:`CTENode` objects, which are then included
in the dictionary representation of the currently-visited node. See
:meth:`_default_node_children` for the appropriate signature of this
callback.
For each node visited, the :meth:`CTENode.as_tree` method is invoked
along with the optional `visitor` and `children` arguments. This
method, if not overridden, will delegate to :meth:`node_as_tree`,
which is responsible for invoking the :meth:`visitor` and
:meth:`children` methods, as well as updating the dictionary
representation of the node with the representation of the children
nodes.
:param visitor: optional function responsible for generating the
dictionary representation of a node.
:param children: optional function responsible for generating a
children key and list for a node.
:return: a dictionary representation of the structure of the forest.
"""
return [
root.as_tree(visitor=visitor, children=children) for root in self.roots()
] | python | def as_tree(self, visitor=None, children=None):
""" Recursively traverses each tree (starting from each root) in order
to generate a dictionary-based tree structure of the entire forest.
Each level of the forest/tree is a list of nodes, and each node
consists of a dictionary representation, where the entry
``children`` (by default) consists of a list of dictionary
representations of its children.
Optionally, a `visitor` callback can be used, which is responsible
for generating a dictionary representation of a given
:class:`CTENode`. By default, the :meth:`_default_node_visitor` is
used which generates a dictionary with the current node as well as
structural properties. See :meth:`_default_node_visitor` for the
appropriate signature of this callback.
Optionally, a `children` callback can be used, which is responsible
for determining which :class:`CTENode`s are children of each visited
:class:`CTENode`, resulting in a key (by default ``children``) and a
list of children :class:`CTENode` objects, which are then included
in the dictionary representation of the currently-visited node. See
:meth:`_default_node_children` for the appropriate signature of this
callback.
For each node visited, the :meth:`CTENode.as_tree` method is invoked
along with the optional `visitor` and `children` arguments. This
method, if not overridden, will delegate to :meth:`node_as_tree`,
which is responsible for invoking the :meth:`visitor` and
:meth:`children` methods, as well as updating the dictionary
representation of the node with the representation of the children
nodes.
:param visitor: optional function responsible for generating the
dictionary representation of a node.
:param children: optional function responsible for generating a
children key and list for a node.
:return: a dictionary representation of the structure of the forest.
"""
return [
root.as_tree(visitor=visitor, children=children) for root in self.roots()
] | ['def', 'as_tree', '(', 'self', ',', 'visitor', '=', 'None', ',', 'children', '=', 'None', ')', ':', 'return', '[', 'root', '.', 'as_tree', '(', 'visitor', '=', 'visitor', ',', 'children', '=', 'children', ')', 'for', 'root', 'in', 'self', '.', 'roots', '(', ')', ']'] | Recursively traverses each tree (starting from each root) in order
to generate a dictionary-based tree structure of the entire forest.
Each level of the forest/tree is a list of nodes, and each node
consists of a dictionary representation, where the entry
``children`` (by default) consists of a list of dictionary
representations of its children.
Optionally, a `visitor` callback can be used, which is responsible
for generating a dictionary representation of a given
:class:`CTENode`. By default, the :meth:`_default_node_visitor` is
used which generates a dictionary with the current node as well as
structural properties. See :meth:`_default_node_visitor` for the
appropriate signature of this callback.
Optionally, a `children` callback can be used, which is responsible
for determining which :class:`CTENode`s are children of each visited
:class:`CTENode`, resulting in a key (by default ``children``) and a
list of children :class:`CTENode` objects, which are then included
in the dictionary representation of the currently-visited node. See
:meth:`_default_node_children` for the appropriate signature of this
callback.
For each node visited, the :meth:`CTENode.as_tree` method is invoked
along with the optional `visitor` and `children` arguments. This
method, if not overridden, will delegate to :meth:`node_as_tree`,
which is responsible for invoking the :meth:`visitor` and
:meth:`children` methods, as well as updating the dictionary
representation of the node with the representation of the children
nodes.
:param visitor: optional function responsible for generating the
dictionary representation of a node.
:param children: optional function responsible for generating a
children key and list for a node.
:return: a dictionary representation of the structure of the forest. | ['Recursively', 'traverses', 'each', 'tree', '(', 'starting', 'from', 'each', 'root', ')', 'in', 'order', 'to', 'generate', 'a', 'dictionary', '-', 'based', 'tree', 'structure', 'of', 'the', 'entire', 'forest', '.', 'Each', 'level', 'of', 'the', 'forest', '/', 'tree', 'is', 'a', 'list', 'of', 'nodes', 'and', 'each', 'node', 'consists', 'of', 'a', 'dictionary', 'representation', 'where', 'the', 'entry', 'children', '(', 'by', 'default', ')', 'consists', 'of', 'a', 'list', 'of', 'dictionary', 'representations', 'of', 'its', 'children', '.'] | train | https://github.com/matthiask/django-cte-forest/blob/7bff29d69eddfcf214e9cf61647c91d28655619c/cte_forest/models.py#L604-L645 |
3,188 | bukun/TorCMS | torcms/handlers/page_handler.py | PageHandler.list | def list(self):
'''
View the list of the pages.
'''
kwd = {
'pager': '',
'title': '单页列表',
}
self.render('wiki_page/page_list.html',
kwd=kwd,
view=MWiki.query_recent(),
view_all=MWiki.query_all(),
format_date=tools.format_date,
userinfo=self.userinfo,
cfg=CMS_CFG) | python | def list(self):
'''
View the list of the pages.
'''
kwd = {
'pager': '',
'title': '单页列表',
}
self.render('wiki_page/page_list.html',
kwd=kwd,
view=MWiki.query_recent(),
view_all=MWiki.query_all(),
format_date=tools.format_date,
userinfo=self.userinfo,
cfg=CMS_CFG) | ['def', 'list', '(', 'self', ')', ':', 'kwd', '=', '{', "'pager'", ':', "''", ',', "'title'", ':', "'单页列表',", '', '}', 'self', '.', 'render', '(', "'wiki_page/page_list.html'", ',', 'kwd', '=', 'kwd', ',', 'view', '=', 'MWiki', '.', 'query_recent', '(', ')', ',', 'view_all', '=', 'MWiki', '.', 'query_all', '(', ')', ',', 'format_date', '=', 'tools', '.', 'format_date', ',', 'userinfo', '=', 'self', '.', 'userinfo', ',', 'cfg', '=', 'CMS_CFG', ')'] | View the list of the pages. | ['View', 'the', 'list', 'of', 'the', 'pages', '.'] | train | https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/handlers/page_handler.py#L179-L193 |
3,189 | EventTeam/beliefs | src/beliefs/cells/posets.py | PartialOrderedCell.is_equal | def is_equal(self, other):
"""
Computes whether two Partial Orderings contain the same information
"""
if not (hasattr(other, 'get_domain') or hasattr(other, 'upper') or hasattr(other, 'lower')):
other = self.coerce(other)
if self.is_domain_equal(other) \
and len(self.upper.symmetric_difference(other.upper)) == 0 \
and len(self.lower.symmetric_difference(other.lower)) == 0:
return True
return False | python | def is_equal(self, other):
"""
Computes whether two Partial Orderings contain the same information
"""
if not (hasattr(other, 'get_domain') or hasattr(other, 'upper') or hasattr(other, 'lower')):
other = self.coerce(other)
if self.is_domain_equal(other) \
and len(self.upper.symmetric_difference(other.upper)) == 0 \
and len(self.lower.symmetric_difference(other.lower)) == 0:
return True
return False | ['def', 'is_equal', '(', 'self', ',', 'other', ')', ':', 'if', 'not', '(', 'hasattr', '(', 'other', ',', "'get_domain'", ')', 'or', 'hasattr', '(', 'other', ',', "'upper'", ')', 'or', 'hasattr', '(', 'other', ',', "'lower'", ')', ')', ':', 'other', '=', 'self', '.', 'coerce', '(', 'other', ')', 'if', 'self', '.', 'is_domain_equal', '(', 'other', ')', 'and', 'len', '(', 'self', '.', 'upper', '.', 'symmetric_difference', '(', 'other', '.', 'upper', ')', ')', '==', '0', 'and', 'len', '(', 'self', '.', 'lower', '.', 'symmetric_difference', '(', 'other', '.', 'lower', ')', ')', '==', '0', ':', 'return', 'True', 'return', 'False'] | Computes whether two Partial Orderings contain the same information | ['Computes', 'whether', 'two', 'Partial', 'Orderings', 'contain', 'the', 'same', 'information'] | train | https://github.com/EventTeam/beliefs/blob/c07d22b61bebeede74a72800030dde770bf64208/src/beliefs/cells/posets.py#L161-L171 |
3,190 | mozilla-releng/scriptworker | scriptworker/utils.py | get_parts_of_url_path | def get_parts_of_url_path(url):
"""Given a url, take out the path part and split it by '/'.
Args:
url (str): the url slice
returns
list: parts after the domain name of the URL
"""
parsed = urlparse(url)
path = unquote(parsed.path).lstrip('/')
parts = path.split('/')
return parts | python | def get_parts_of_url_path(url):
"""Given a url, take out the path part and split it by '/'.
Args:
url (str): the url slice
returns
list: parts after the domain name of the URL
"""
parsed = urlparse(url)
path = unquote(parsed.path).lstrip('/')
parts = path.split('/')
return parts | ['def', 'get_parts_of_url_path', '(', 'url', ')', ':', 'parsed', '=', 'urlparse', '(', 'url', ')', 'path', '=', 'unquote', '(', 'parsed', '.', 'path', ')', '.', 'lstrip', '(', "'/'", ')', 'parts', '=', 'path', '.', 'split', '(', "'/'", ')', 'return', 'parts'] | Given a url, take out the path part and split it by '/'.
Args:
url (str): the url slice
returns
list: parts after the domain name of the URL | ['Given', 'a', 'url', 'take', 'out', 'the', 'path', 'part', 'and', 'split', 'it', 'by', '/', '.'] | train | https://github.com/mozilla-releng/scriptworker/blob/8e97bbd83b9b578565ec57904c966dd6ae4ef0ae/scriptworker/utils.py#L592-L605 |
3,191 | PmagPy/PmagPy | pmagpy/ipmag.py | download_magic | def download_magic(infile, dir_path='.', input_dir_path='',
overwrite=False, print_progress=True,
data_model=3., separate_locs=False):
"""
takes the name of a text file downloaded from the MagIC database and
unpacks it into magic-formatted files. by default, download_magic assumes
that you are doing everything in your current directory. if not, you may
provide optional arguments dir_path (where you want the results to go) and
input_dir_path (where the downloaded file is IF that location is different from
dir_path).
Parameters
----------
infile : str
MagIC-format file to unpack
dir_path : str
output directory (default ".")
input_dir_path : str, default ""
path for intput file if different from output_dir_path (default is same)
overwrite: bool
overwrite current directory (default False)
print_progress: bool
verbose output (default True)
data_model : float
MagIC data model 2.5 or 3 (default 3)
separate_locs : bool
create a separate directory for each location (Location_*)
(default False)
"""
if data_model == 2.5:
method_col = "magic_method_codes"
else:
method_col = "method_codes"
input_dir_path, dir_path = pmag.fix_directories(input_dir_path, dir_path)
infile = pmag.resolve_file_name(infile, input_dir_path)
# try to deal reasonably with unicode errors
try:
f = codecs.open(infile, 'r', "utf-8")
infile = f.readlines()
except UnicodeDecodeError:
f = codecs.open(infile, 'r', "Latin-1")
infile = f.readlines()
f.close()
File = [] # will contain all non-blank lines from downloaded file
for line in infile:
line = line.replace('\n', '')
if line[0:4] == '>>>>' or len(line.strip()) > 0: # skip blank lines
File.append(line)
LN = 0 # tracks our progress iterating through File
type_list = []
filenum = 0
while LN < len(File) - 1:
line = File[LN]
if ">>>>" in line:
LN += 1
continue
file_type = line.split('\t')[1]
file_type = file_type.lower()
if file_type[-1] == "\n":
file_type = file_type[:-1]
if print_progress == True:
print('working on: ', repr(file_type))
if file_type not in type_list:
type_list.append(file_type)
else:
filenum += 1
LN += 1
line = File[LN]
# skip empty tables
if line == ">>>>>>>>>>":
LN += 1
continue
keys = line.replace('\n', '').split('\t')
if keys[0][0] == '.':
keys = line.replace('\n', '').replace('.', '').split('\t')
keys.append('RecNo') # cludge for new MagIC download format
LN += 1
Recs = []
while LN < len(File):
line = File[LN]
# finish up one file type and then break
if ">>>>" in line and len(Recs) > 0:
if filenum == 0:
outfile = os.path.join(dir_path, file_type.strip() + '.txt')
else:
outfile = os.path.join(dir_path, file_type.strip() + '_' + str(filenum) + '.txt')
NewRecs = []
for rec in Recs:
if method_col in list(rec.keys()):
meths = rec[method_col].split(":")
if len(meths) > 0:
methods = ""
for meth in meths:
methods = methods + meth.strip() + ":" # get rid of nasty spaces!!!!!!
rec[method_col] = methods[:-1]
NewRecs.append(rec)
pmag.magic_write(outfile, Recs, file_type)
if print_progress == True:
print(file_type, " data put in ", outfile)
Recs = []
LN += 1
break
# keep adding records of the same file type
else:
rec = line.split('\t')
Rec = {}
if len(rec) == len(keys):
for k in range(len(rec)):
Rec[keys[k]] = rec[k]
Recs.append(Rec)
# in case of magic_search_results.txt, which has an extra
# column:
elif len(rec) - len(keys) == 1:
for k in range(len(rec))[:-1]:
Rec[keys[k]] = rec[k]
Recs.append(Rec)
elif len(rec) < len(keys):
for k in range(len(rec)):
Rec[keys[k]] = rec[k]
for k in range(len(rec), len(keys)):
Rec[keys[k]] = ""
Recs.append(Rec)
else:
print('WARNING: problem in file with line: ')
print(line)
print('skipping....')
LN += 1
if len(Recs) > 0:
if filenum == 0:
outfile = os.path.join(dir_path, file_type.strip() + '.txt')
else:
outfile = os.path.join(dir_path, file_type.strip() + '_' + str(filenum) + '.txt')
NewRecs = []
for rec in Recs:
if method_col in list(rec.keys()):
meths = rec[method_col].split(":")
if len(meths) > 0:
methods = ""
for meth in meths:
methods = methods + meth.strip() + ":" # get rid of nasty spaces!!!!!!
rec[method_col] = methods[:-1]
NewRecs.append(rec)
pmag.magic_write(outfile, Recs, file_type)
if print_progress == True:
print(file_type, " data put in ", outfile)
# look through locations table and create separate directories for each
# location
if separate_locs:
con = cb.Contribution(dir_path)
con.propagate_location_to_measurements()
con.propagate_name_down('location', 'samples')
for dtype in con.tables:
con.write_table_to_file(dtype)
locs, locnum = [], 1
if 'locations' in type_list:
locs, file_type = pmag.magic_read(
os.path.join(dir_path, 'locations.txt'))
if len(locs) > 0: # at least one location
# go through unique location names
for loc_name in set([loc.get('location') for loc in locs]):
if print_progress == True:
print('location_' + str(locnum) + ": ", loc_name)
lpath = os.path.join(dir_path, 'Location_' + str(locnum))
locnum += 1
try:
os.mkdir(lpath)
except:
print('directory ', lpath,
' already exists - overwriting everything: {}'.format(overwrite))
if not overwrite:
print("-W- download_magic encountered a duplicate subdirectory ({}) and could not finish.\nRerun with overwrite=True, or unpack this file in a different directory.".format(lpath))
return False
for f in type_list:
fname = os.path.join(dir_path, f + '.txt')
if print_progress == True:
print('unpacking: ', fname)
recs, file_type = pmag.magic_read(fname)
if print_progress == True:
print(len(recs), ' read in')
lrecs = pmag.get_dictitem(recs, 'location', loc_name, 'T')
if len(lrecs) > 0:
outfile_name = os.path.join(lpath, f + ".txt")
pmag.magic_write(outfile_name, lrecs, file_type)
if print_progress == True:
print(len(lrecs), ' stored in ', outfile_name)
return True | python | def download_magic(infile, dir_path='.', input_dir_path='',
overwrite=False, print_progress=True,
data_model=3., separate_locs=False):
"""
takes the name of a text file downloaded from the MagIC database and
unpacks it into magic-formatted files. by default, download_magic assumes
that you are doing everything in your current directory. if not, you may
provide optional arguments dir_path (where you want the results to go) and
input_dir_path (where the downloaded file is IF that location is different from
dir_path).
Parameters
----------
infile : str
MagIC-format file to unpack
dir_path : str
output directory (default ".")
input_dir_path : str, default ""
path for intput file if different from output_dir_path (default is same)
overwrite: bool
overwrite current directory (default False)
print_progress: bool
verbose output (default True)
data_model : float
MagIC data model 2.5 or 3 (default 3)
separate_locs : bool
create a separate directory for each location (Location_*)
(default False)
"""
if data_model == 2.5:
method_col = "magic_method_codes"
else:
method_col = "method_codes"
input_dir_path, dir_path = pmag.fix_directories(input_dir_path, dir_path)
infile = pmag.resolve_file_name(infile, input_dir_path)
# try to deal reasonably with unicode errors
try:
f = codecs.open(infile, 'r', "utf-8")
infile = f.readlines()
except UnicodeDecodeError:
f = codecs.open(infile, 'r', "Latin-1")
infile = f.readlines()
f.close()
File = [] # will contain all non-blank lines from downloaded file
for line in infile:
line = line.replace('\n', '')
if line[0:4] == '>>>>' or len(line.strip()) > 0: # skip blank lines
File.append(line)
LN = 0 # tracks our progress iterating through File
type_list = []
filenum = 0
while LN < len(File) - 1:
line = File[LN]
if ">>>>" in line:
LN += 1
continue
file_type = line.split('\t')[1]
file_type = file_type.lower()
if file_type[-1] == "\n":
file_type = file_type[:-1]
if print_progress == True:
print('working on: ', repr(file_type))
if file_type not in type_list:
type_list.append(file_type)
else:
filenum += 1
LN += 1
line = File[LN]
# skip empty tables
if line == ">>>>>>>>>>":
LN += 1
continue
keys = line.replace('\n', '').split('\t')
if keys[0][0] == '.':
keys = line.replace('\n', '').replace('.', '').split('\t')
keys.append('RecNo') # cludge for new MagIC download format
LN += 1
Recs = []
while LN < len(File):
line = File[LN]
# finish up one file type and then break
if ">>>>" in line and len(Recs) > 0:
if filenum == 0:
outfile = os.path.join(dir_path, file_type.strip() + '.txt')
else:
outfile = os.path.join(dir_path, file_type.strip() + '_' + str(filenum) + '.txt')
NewRecs = []
for rec in Recs:
if method_col in list(rec.keys()):
meths = rec[method_col].split(":")
if len(meths) > 0:
methods = ""
for meth in meths:
methods = methods + meth.strip() + ":" # get rid of nasty spaces!!!!!!
rec[method_col] = methods[:-1]
NewRecs.append(rec)
pmag.magic_write(outfile, Recs, file_type)
if print_progress == True:
print(file_type, " data put in ", outfile)
Recs = []
LN += 1
break
# keep adding records of the same file type
else:
rec = line.split('\t')
Rec = {}
if len(rec) == len(keys):
for k in range(len(rec)):
Rec[keys[k]] = rec[k]
Recs.append(Rec)
# in case of magic_search_results.txt, which has an extra
# column:
elif len(rec) - len(keys) == 1:
for k in range(len(rec))[:-1]:
Rec[keys[k]] = rec[k]
Recs.append(Rec)
elif len(rec) < len(keys):
for k in range(len(rec)):
Rec[keys[k]] = rec[k]
for k in range(len(rec), len(keys)):
Rec[keys[k]] = ""
Recs.append(Rec)
else:
print('WARNING: problem in file with line: ')
print(line)
print('skipping....')
LN += 1
if len(Recs) > 0:
if filenum == 0:
outfile = os.path.join(dir_path, file_type.strip() + '.txt')
else:
outfile = os.path.join(dir_path, file_type.strip() + '_' + str(filenum) + '.txt')
NewRecs = []
for rec in Recs:
if method_col in list(rec.keys()):
meths = rec[method_col].split(":")
if len(meths) > 0:
methods = ""
for meth in meths:
methods = methods + meth.strip() + ":" # get rid of nasty spaces!!!!!!
rec[method_col] = methods[:-1]
NewRecs.append(rec)
pmag.magic_write(outfile, Recs, file_type)
if print_progress == True:
print(file_type, " data put in ", outfile)
# look through locations table and create separate directories for each
# location
if separate_locs:
con = cb.Contribution(dir_path)
con.propagate_location_to_measurements()
con.propagate_name_down('location', 'samples')
for dtype in con.tables:
con.write_table_to_file(dtype)
locs, locnum = [], 1
if 'locations' in type_list:
locs, file_type = pmag.magic_read(
os.path.join(dir_path, 'locations.txt'))
if len(locs) > 0: # at least one location
# go through unique location names
for loc_name in set([loc.get('location') for loc in locs]):
if print_progress == True:
print('location_' + str(locnum) + ": ", loc_name)
lpath = os.path.join(dir_path, 'Location_' + str(locnum))
locnum += 1
try:
os.mkdir(lpath)
except:
print('directory ', lpath,
' already exists - overwriting everything: {}'.format(overwrite))
if not overwrite:
print("-W- download_magic encountered a duplicate subdirectory ({}) and could not finish.\nRerun with overwrite=True, or unpack this file in a different directory.".format(lpath))
return False
for f in type_list:
fname = os.path.join(dir_path, f + '.txt')
if print_progress == True:
print('unpacking: ', fname)
recs, file_type = pmag.magic_read(fname)
if print_progress == True:
print(len(recs), ' read in')
lrecs = pmag.get_dictitem(recs, 'location', loc_name, 'T')
if len(lrecs) > 0:
outfile_name = os.path.join(lpath, f + ".txt")
pmag.magic_write(outfile_name, lrecs, file_type)
if print_progress == True:
print(len(lrecs), ' stored in ', outfile_name)
return True | ['def', 'download_magic', '(', 'infile', ',', 'dir_path', '=', "'.'", ',', 'input_dir_path', '=', "''", ',', 'overwrite', '=', 'False', ',', 'print_progress', '=', 'True', ',', 'data_model', '=', '3.', ',', 'separate_locs', '=', 'False', ')', ':', 'if', 'data_model', '==', '2.5', ':', 'method_col', '=', '"magic_method_codes"', 'else', ':', 'method_col', '=', '"method_codes"', 'input_dir_path', ',', 'dir_path', '=', 'pmag', '.', 'fix_directories', '(', 'input_dir_path', ',', 'dir_path', ')', 'infile', '=', 'pmag', '.', 'resolve_file_name', '(', 'infile', ',', 'input_dir_path', ')', '# try to deal reasonably with unicode errors', 'try', ':', 'f', '=', 'codecs', '.', 'open', '(', 'infile', ',', "'r'", ',', '"utf-8"', ')', 'infile', '=', 'f', '.', 'readlines', '(', ')', 'except', 'UnicodeDecodeError', ':', 'f', '=', 'codecs', '.', 'open', '(', 'infile', ',', "'r'", ',', '"Latin-1"', ')', 'infile', '=', 'f', '.', 'readlines', '(', ')', 'f', '.', 'close', '(', ')', 'File', '=', '[', ']', '# will contain all non-blank lines from downloaded file', 'for', 'line', 'in', 'infile', ':', 'line', '=', 'line', '.', 'replace', '(', "'\\n'", ',', "''", ')', 'if', 'line', '[', '0', ':', '4', ']', '==', "'>>>>'", 'or', 'len', '(', 'line', '.', 'strip', '(', ')', ')', '>', '0', ':', '# skip blank lines', 'File', '.', 'append', '(', 'line', ')', 'LN', '=', '0', '# tracks our progress iterating through File', 'type_list', '=', '[', ']', 'filenum', '=', '0', 'while', 'LN', '<', 'len', '(', 'File', ')', '-', '1', ':', 'line', '=', 'File', '[', 'LN', ']', 'if', '">>>>"', 'in', 'line', ':', 'LN', '+=', '1', 'continue', 'file_type', '=', 'line', '.', 'split', '(', "'\\t'", ')', '[', '1', ']', 'file_type', '=', 'file_type', '.', 'lower', '(', ')', 'if', 'file_type', '[', '-', '1', ']', '==', '"\\n"', ':', 'file_type', '=', 'file_type', '[', ':', '-', '1', ']', 'if', 'print_progress', '==', 'True', ':', 'print', '(', "'working on: '", ',', 'repr', '(', 'file_type', ')', ')', 'if', 'file_type', 'not', 'in', 'type_list', ':', 'type_list', '.', 'append', '(', 'file_type', ')', 'else', ':', 'filenum', '+=', '1', 'LN', '+=', '1', 'line', '=', 'File', '[', 'LN', ']', '# skip empty tables', 'if', 'line', '==', '">>>>>>>>>>"', ':', 'LN', '+=', '1', 'continue', 'keys', '=', 'line', '.', 'replace', '(', "'\\n'", ',', "''", ')', '.', 'split', '(', "'\\t'", ')', 'if', 'keys', '[', '0', ']', '[', '0', ']', '==', "'.'", ':', 'keys', '=', 'line', '.', 'replace', '(', "'\\n'", ',', "''", ')', '.', 'replace', '(', "'.'", ',', "''", ')', '.', 'split', '(', "'\\t'", ')', 'keys', '.', 'append', '(', "'RecNo'", ')', '# cludge for new MagIC download format', 'LN', '+=', '1', 'Recs', '=', '[', ']', 'while', 'LN', '<', 'len', '(', 'File', ')', ':', 'line', '=', 'File', '[', 'LN', ']', '# finish up one file type and then break', 'if', '">>>>"', 'in', 'line', 'and', 'len', '(', 'Recs', ')', '>', '0', ':', 'if', 'filenum', '==', '0', ':', 'outfile', '=', 'os', '.', 'path', '.', 'join', '(', 'dir_path', ',', 'file_type', '.', 'strip', '(', ')', '+', "'.txt'", ')', 'else', ':', 'outfile', '=', 'os', '.', 'path', '.', 'join', '(', 'dir_path', ',', 'file_type', '.', 'strip', '(', ')', '+', "'_'", '+', 'str', '(', 'filenum', ')', '+', "'.txt'", ')', 'NewRecs', '=', '[', ']', 'for', 'rec', 'in', 'Recs', ':', 'if', 'method_col', 'in', 'list', '(', 'rec', '.', 'keys', '(', ')', ')', ':', 'meths', '=', 'rec', '[', 'method_col', ']', '.', 'split', '(', '":"', ')', 'if', 'len', '(', 'meths', ')', '>', '0', ':', 'methods', '=', '""', 'for', 'meth', 'in', 'meths', ':', 'methods', '=', 'methods', '+', 'meth', '.', 'strip', '(', ')', '+', '":"', '# get rid of nasty spaces!!!!!!', 'rec', '[', 'method_col', ']', '=', 'methods', '[', ':', '-', '1', ']', 'NewRecs', '.', 'append', '(', 'rec', ')', 'pmag', '.', 'magic_write', '(', 'outfile', ',', 'Recs', ',', 'file_type', ')', 'if', 'print_progress', '==', 'True', ':', 'print', '(', 'file_type', ',', '" data put in "', ',', 'outfile', ')', 'Recs', '=', '[', ']', 'LN', '+=', '1', 'break', '# keep adding records of the same file type', 'else', ':', 'rec', '=', 'line', '.', 'split', '(', "'\\t'", ')', 'Rec', '=', '{', '}', 'if', 'len', '(', 'rec', ')', '==', 'len', '(', 'keys', ')', ':', 'for', 'k', 'in', 'range', '(', 'len', '(', 'rec', ')', ')', ':', 'Rec', '[', 'keys', '[', 'k', ']', ']', '=', 'rec', '[', 'k', ']', 'Recs', '.', 'append', '(', 'Rec', ')', '# in case of magic_search_results.txt, which has an extra', '# column:', 'elif', 'len', '(', 'rec', ')', '-', 'len', '(', 'keys', ')', '==', '1', ':', 'for', 'k', 'in', 'range', '(', 'len', '(', 'rec', ')', ')', '[', ':', '-', '1', ']', ':', 'Rec', '[', 'keys', '[', 'k', ']', ']', '=', 'rec', '[', 'k', ']', 'Recs', '.', 'append', '(', 'Rec', ')', 'elif', 'len', '(', 'rec', ')', '<', 'len', '(', 'keys', ')', ':', 'for', 'k', 'in', 'range', '(', 'len', '(', 'rec', ')', ')', ':', 'Rec', '[', 'keys', '[', 'k', ']', ']', '=', 'rec', '[', 'k', ']', 'for', 'k', 'in', 'range', '(', 'len', '(', 'rec', ')', ',', 'len', '(', 'keys', ')', ')', ':', 'Rec', '[', 'keys', '[', 'k', ']', ']', '=', '""', 'Recs', '.', 'append', '(', 'Rec', ')', 'else', ':', 'print', '(', "'WARNING: problem in file with line: '", ')', 'print', '(', 'line', ')', 'print', '(', "'skipping....'", ')', 'LN', '+=', '1', 'if', 'len', '(', 'Recs', ')', '>', '0', ':', 'if', 'filenum', '==', '0', ':', 'outfile', '=', 'os', '.', 'path', '.', 'join', '(', 'dir_path', ',', 'file_type', '.', 'strip', '(', ')', '+', "'.txt'", ')', 'else', ':', 'outfile', '=', 'os', '.', 'path', '.', 'join', '(', 'dir_path', ',', 'file_type', '.', 'strip', '(', ')', '+', "'_'", '+', 'str', '(', 'filenum', ')', '+', "'.txt'", ')', 'NewRecs', '=', '[', ']', 'for', 'rec', 'in', 'Recs', ':', 'if', 'method_col', 'in', 'list', '(', 'rec', '.', 'keys', '(', ')', ')', ':', 'meths', '=', 'rec', '[', 'method_col', ']', '.', 'split', '(', '":"', ')', 'if', 'len', '(', 'meths', ')', '>', '0', ':', 'methods', '=', '""', 'for', 'meth', 'in', 'meths', ':', 'methods', '=', 'methods', '+', 'meth', '.', 'strip', '(', ')', '+', '":"', '# get rid of nasty spaces!!!!!!', 'rec', '[', 'method_col', ']', '=', 'methods', '[', ':', '-', '1', ']', 'NewRecs', '.', 'append', '(', 'rec', ')', 'pmag', '.', 'magic_write', '(', 'outfile', ',', 'Recs', ',', 'file_type', ')', 'if', 'print_progress', '==', 'True', ':', 'print', '(', 'file_type', ',', '" data put in "', ',', 'outfile', ')', '# look through locations table and create separate directories for each', '# location', 'if', 'separate_locs', ':', 'con', '=', 'cb', '.', 'Contribution', '(', 'dir_path', ')', 'con', '.', 'propagate_location_to_measurements', '(', ')', 'con', '.', 'propagate_name_down', '(', "'location'", ',', "'samples'", ')', 'for', 'dtype', 'in', 'con', '.', 'tables', ':', 'con', '.', 'write_table_to_file', '(', 'dtype', ')', 'locs', ',', 'locnum', '=', '[', ']', ',', '1', 'if', "'locations'", 'in', 'type_list', ':', 'locs', ',', 'file_type', '=', 'pmag', '.', 'magic_read', '(', 'os', '.', 'path', '.', 'join', '(', 'dir_path', ',', "'locations.txt'", ')', ')', 'if', 'len', '(', 'locs', ')', '>', '0', ':', '# at least one location', '# go through unique location names', 'for', 'loc_name', 'in', 'set', '(', '[', 'loc', '.', 'get', '(', "'location'", ')', 'for', 'loc', 'in', 'locs', ']', ')', ':', 'if', 'print_progress', '==', 'True', ':', 'print', '(', "'location_'", '+', 'str', '(', 'locnum', ')', '+', '": "', ',', 'loc_name', ')', 'lpath', '=', 'os', '.', 'path', '.', 'join', '(', 'dir_path', ',', "'Location_'", '+', 'str', '(', 'locnum', ')', ')', 'locnum', '+=', '1', 'try', ':', 'os', '.', 'mkdir', '(', 'lpath', ')', 'except', ':', 'print', '(', "'directory '", ',', 'lpath', ',', "' already exists - overwriting everything: {}'", '.', 'format', '(', 'overwrite', ')', ')', 'if', 'not', 'overwrite', ':', 'print', '(', '"-W- download_magic encountered a duplicate subdirectory ({}) and could not finish.\\nRerun with overwrite=True, or unpack this file in a different directory."', '.', 'format', '(', 'lpath', ')', ')', 'return', 'False', 'for', 'f', 'in', 'type_list', ':', 'fname', '=', 'os', '.', 'path', '.', 'join', '(', 'dir_path', ',', 'f', '+', "'.txt'", ')', 'if', 'print_progress', '==', 'True', ':', 'print', '(', "'unpacking: '", ',', 'fname', ')', 'recs', ',', 'file_type', '=', 'pmag', '.', 'magic_read', '(', 'fname', ')', 'if', 'print_progress', '==', 'True', ':', 'print', '(', 'len', '(', 'recs', ')', ',', "' read in'", ')', 'lrecs', '=', 'pmag', '.', 'get_dictitem', '(', 'recs', ',', "'location'", ',', 'loc_name', ',', "'T'", ')', 'if', 'len', '(', 'lrecs', ')', '>', '0', ':', 'outfile_name', '=', 'os', '.', 'path', '.', 'join', '(', 'lpath', ',', 'f', '+', '".txt"', ')', 'pmag', '.', 'magic_write', '(', 'outfile_name', ',', 'lrecs', ',', 'file_type', ')', 'if', 'print_progress', '==', 'True', ':', 'print', '(', 'len', '(', 'lrecs', ')', ',', "' stored in '", ',', 'outfile_name', ')', 'return', 'True'] | takes the name of a text file downloaded from the MagIC database and
unpacks it into magic-formatted files. by default, download_magic assumes
that you are doing everything in your current directory. if not, you may
provide optional arguments dir_path (where you want the results to go) and
input_dir_path (where the downloaded file is IF that location is different from
dir_path).
Parameters
----------
infile : str
MagIC-format file to unpack
dir_path : str
output directory (default ".")
input_dir_path : str, default ""
path for intput file if different from output_dir_path (default is same)
overwrite: bool
overwrite current directory (default False)
print_progress: bool
verbose output (default True)
data_model : float
MagIC data model 2.5 or 3 (default 3)
separate_locs : bool
create a separate directory for each location (Location_*)
(default False) | ['takes', 'the', 'name', 'of', 'a', 'text', 'file', 'downloaded', 'from', 'the', 'MagIC', 'database', 'and', 'unpacks', 'it', 'into', 'magic', '-', 'formatted', 'files', '.', 'by', 'default', 'download_magic', 'assumes', 'that', 'you', 'are', 'doing', 'everything', 'in', 'your', 'current', 'directory', '.', 'if', 'not', 'you', 'may', 'provide', 'optional', 'arguments', 'dir_path', '(', 'where', 'you', 'want', 'the', 'results', 'to', 'go', ')', 'and', 'input_dir_path', '(', 'where', 'the', 'downloaded', 'file', 'is', 'IF', 'that', 'location', 'is', 'different', 'from', 'dir_path', ')', '.'] | train | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L4059-L4245 |
3,192 | pmorissette/bt | bt/core.py | StrategyBase.adjust | def adjust(self, amount, update=True, flow=True, fee=0.0):
"""
Adjust capital - used to inject capital to a Strategy. This injection
of capital will have no effect on the children.
Args:
* amount (float): Amount to adjust by.
* update (bool): Force update?
* flow (bool): Is this adjustment a flow? A flow will not have an
impact on the performance (price index). Example of flows are
simply capital injections (say a monthly contribution to a
portfolio). This should not be reflected in the returns. A
non-flow (flow=False) does impact performance. A good example
of this is a commission, or a dividend.
"""
# adjust capital
self._capital += amount
self._last_fee += fee
# if flow - increment net_flows - this will not affect
# performance. Commissions and other fees are not flows since
# they have a performance impact
if flow:
self._net_flows += amount
if update:
# indicates that data is now stale and must
# be updated before access
self.root.stale = True | python | def adjust(self, amount, update=True, flow=True, fee=0.0):
"""
Adjust capital - used to inject capital to a Strategy. This injection
of capital will have no effect on the children.
Args:
* amount (float): Amount to adjust by.
* update (bool): Force update?
* flow (bool): Is this adjustment a flow? A flow will not have an
impact on the performance (price index). Example of flows are
simply capital injections (say a monthly contribution to a
portfolio). This should not be reflected in the returns. A
non-flow (flow=False) does impact performance. A good example
of this is a commission, or a dividend.
"""
# adjust capital
self._capital += amount
self._last_fee += fee
# if flow - increment net_flows - this will not affect
# performance. Commissions and other fees are not flows since
# they have a performance impact
if flow:
self._net_flows += amount
if update:
# indicates that data is now stale and must
# be updated before access
self.root.stale = True | ['def', 'adjust', '(', 'self', ',', 'amount', ',', 'update', '=', 'True', ',', 'flow', '=', 'True', ',', 'fee', '=', '0.0', ')', ':', '# adjust capital', 'self', '.', '_capital', '+=', 'amount', 'self', '.', '_last_fee', '+=', 'fee', '# if flow - increment net_flows - this will not affect', '# performance. Commissions and other fees are not flows since', '# they have a performance impact', 'if', 'flow', ':', 'self', '.', '_net_flows', '+=', 'amount', 'if', 'update', ':', '# indicates that data is now stale and must', '# be updated before access', 'self', '.', 'root', '.', 'stale', '=', 'True'] | Adjust capital - used to inject capital to a Strategy. This injection
of capital will have no effect on the children.
Args:
* amount (float): Amount to adjust by.
* update (bool): Force update?
* flow (bool): Is this adjustment a flow? A flow will not have an
impact on the performance (price index). Example of flows are
simply capital injections (say a monthly contribution to a
portfolio). This should not be reflected in the returns. A
non-flow (flow=False) does impact performance. A good example
of this is a commission, or a dividend. | ['Adjust', 'capital', '-', 'used', 'to', 'inject', 'capital', 'to', 'a', 'Strategy', '.', 'This', 'injection', 'of', 'capital', 'will', 'have', 'no', 'effect', 'on', 'the', 'children', '.'] | train | https://github.com/pmorissette/bt/blob/0363e6fa100d9392dd18e32e3d8379d5e83c28fa/bt/core.py#L589-L618 |
3,193 | saltstack/salt | salt/modules/useradd.py | _quote_username | def _quote_username(name):
'''
Usernames can only contain ascii chars, so make sure we return a str type
'''
if not isinstance(name, six.string_types):
return str(name) # future lint: disable=blacklisted-function
else:
return salt.utils.stringutils.to_str(name) | python | def _quote_username(name):
'''
Usernames can only contain ascii chars, so make sure we return a str type
'''
if not isinstance(name, six.string_types):
return str(name) # future lint: disable=blacklisted-function
else:
return salt.utils.stringutils.to_str(name) | ['def', '_quote_username', '(', 'name', ')', ':', 'if', 'not', 'isinstance', '(', 'name', ',', 'six', '.', 'string_types', ')', ':', 'return', 'str', '(', 'name', ')', '# future lint: disable=blacklisted-function', 'else', ':', 'return', 'salt', '.', 'utils', '.', 'stringutils', '.', 'to_str', '(', 'name', ')'] | Usernames can only contain ascii chars, so make sure we return a str type | ['Usernames', 'can', 'only', 'contain', 'ascii', 'chars', 'so', 'make', 'sure', 'we', 'return', 'a', 'str', 'type'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/useradd.py#L50-L57 |
3,194 | saltstack/salt | salt/modules/localemod.py | _localectl_set | def _localectl_set(locale=''):
'''
Use systemd's localectl command to set the LANG locale parameter, making
sure not to trample on other params that have been set.
'''
locale_params = _parse_dbus_locale() if dbus is not None else _localectl_status().get('system_locale', {})
locale_params['LANG'] = six.text_type(locale)
args = ' '.join(['{0}="{1}"'.format(k, v) for k, v in six.iteritems(locale_params) if v is not None])
return not __salt__['cmd.retcode']('localectl set-locale {0}'.format(args), python_shell=False) | python | def _localectl_set(locale=''):
'''
Use systemd's localectl command to set the LANG locale parameter, making
sure not to trample on other params that have been set.
'''
locale_params = _parse_dbus_locale() if dbus is not None else _localectl_status().get('system_locale', {})
locale_params['LANG'] = six.text_type(locale)
args = ' '.join(['{0}="{1}"'.format(k, v) for k, v in six.iteritems(locale_params) if v is not None])
return not __salt__['cmd.retcode']('localectl set-locale {0}'.format(args), python_shell=False) | ['def', '_localectl_set', '(', 'locale', '=', "''", ')', ':', 'locale_params', '=', '_parse_dbus_locale', '(', ')', 'if', 'dbus', 'is', 'not', 'None', 'else', '_localectl_status', '(', ')', '.', 'get', '(', "'system_locale'", ',', '{', '}', ')', 'locale_params', '[', "'LANG'", ']', '=', 'six', '.', 'text_type', '(', 'locale', ')', 'args', '=', "' '", '.', 'join', '(', '[', '\'{0}="{1}"\'', '.', 'format', '(', 'k', ',', 'v', ')', 'for', 'k', ',', 'v', 'in', 'six', '.', 'iteritems', '(', 'locale_params', ')', 'if', 'v', 'is', 'not', 'None', ']', ')', 'return', 'not', '__salt__', '[', "'cmd.retcode'", ']', '(', "'localectl set-locale {0}'", '.', 'format', '(', 'args', ')', ',', 'python_shell', '=', 'False', ')'] | Use systemd's localectl command to set the LANG locale parameter, making
sure not to trample on other params that have been set. | ['Use', 'systemd', 's', 'localectl', 'command', 'to', 'set', 'the', 'LANG', 'locale', 'parameter', 'making', 'sure', 'not', 'to', 'trample', 'on', 'other', 'params', 'that', 'have', 'been', 'set', '.'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/localemod.py#L100-L108 |
3,195 | RedHatInsights/insights-core | insights/collect.py | get_pool | def get_pool(parallel, kwargs):
"""
Yields:
a ThreadPoolExecutor if parallel is True and `concurrent.futures` exists.
`None` otherwise.
"""
if parallel:
try:
from concurrent.futures import ThreadPoolExecutor
with ThreadPoolExecutor(thread_name_prefix="insights-collector-pool", **kwargs) as pool:
yield pool
except ImportError:
yield None
else:
yield None | python | def get_pool(parallel, kwargs):
"""
Yields:
a ThreadPoolExecutor if parallel is True and `concurrent.futures` exists.
`None` otherwise.
"""
if parallel:
try:
from concurrent.futures import ThreadPoolExecutor
with ThreadPoolExecutor(thread_name_prefix="insights-collector-pool", **kwargs) as pool:
yield pool
except ImportError:
yield None
else:
yield None | ['def', 'get_pool', '(', 'parallel', ',', 'kwargs', ')', ':', 'if', 'parallel', ':', 'try', ':', 'from', 'concurrent', '.', 'futures', 'import', 'ThreadPoolExecutor', 'with', 'ThreadPoolExecutor', '(', 'thread_name_prefix', '=', '"insights-collector-pool"', ',', '*', '*', 'kwargs', ')', 'as', 'pool', ':', 'yield', 'pool', 'except', 'ImportError', ':', 'yield', 'None', 'else', ':', 'yield', 'None'] | Yields:
a ThreadPoolExecutor if parallel is True and `concurrent.futures` exists.
`None` otherwise. | ['Yields', ':', 'a', 'ThreadPoolExecutor', 'if', 'parallel', 'is', 'True', 'and', 'concurrent', '.', 'futures', 'exists', '.', 'None', 'otherwise', '.'] | train | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/collect.py#L199-L214 |
3,196 | clalancette/pycdlib | pycdlib/rockridge.py | RRERRecord.record | def record(self):
# type: () -> bytes
'''
Generate a string representing the Rock Ridge Extensions Reference
record.
Parameters:
None.
Returns:
String containing the Rock Ridge record.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('ER record not yet initialized!')
return b'ER' + struct.pack('=BBBBBB', RRERRecord.length(self.ext_id, self.ext_des, self.ext_src), SU_ENTRY_VERSION, len(self.ext_id), len(self.ext_des), len(self.ext_src), self.ext_ver) + self.ext_id + self.ext_des + self.ext_src | python | def record(self):
# type: () -> bytes
'''
Generate a string representing the Rock Ridge Extensions Reference
record.
Parameters:
None.
Returns:
String containing the Rock Ridge record.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('ER record not yet initialized!')
return b'ER' + struct.pack('=BBBBBB', RRERRecord.length(self.ext_id, self.ext_des, self.ext_src), SU_ENTRY_VERSION, len(self.ext_id), len(self.ext_des), len(self.ext_src), self.ext_ver) + self.ext_id + self.ext_des + self.ext_src | ['def', 'record', '(', 'self', ')', ':', '# type: () -> bytes', 'if', 'not', 'self', '.', '_initialized', ':', 'raise', 'pycdlibexception', '.', 'PyCdlibInternalError', '(', "'ER record not yet initialized!'", ')', 'return', "b'ER'", '+', 'struct', '.', 'pack', '(', "'=BBBBBB'", ',', 'RRERRecord', '.', 'length', '(', 'self', '.', 'ext_id', ',', 'self', '.', 'ext_des', ',', 'self', '.', 'ext_src', ')', ',', 'SU_ENTRY_VERSION', ',', 'len', '(', 'self', '.', 'ext_id', ')', ',', 'len', '(', 'self', '.', 'ext_des', ')', ',', 'len', '(', 'self', '.', 'ext_src', ')', ',', 'self', '.', 'ext_ver', ')', '+', 'self', '.', 'ext_id', '+', 'self', '.', 'ext_des', '+', 'self', '.', 'ext_src'] | Generate a string representing the Rock Ridge Extensions Reference
record.
Parameters:
None.
Returns:
String containing the Rock Ridge record. | ['Generate', 'a', 'string', 'representing', 'the', 'Rock', 'Ridge', 'Extensions', 'Reference', 'record', '.'] | train | https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/rockridge.py#L623-L637 |
3,197 | mitsei/dlkit | dlkit/json_/assessment/objects.py | Question.get_learning_objectives | def get_learning_objectives(self):
""" This method also mirrors that in the Item."""
# This is pretty much identicial to the method in assessment.Item!
mgr = self._get_provider_manager('LEARNING')
lookup_session = mgr.get_objective_lookup_session(proxy=getattr(self, "_proxy", None))
lookup_session.use_federated_objective_bank_view()
return lookup_session.get_objectives_by_ids(self.get_learning_objective_ids()) | python | def get_learning_objectives(self):
""" This method also mirrors that in the Item."""
# This is pretty much identicial to the method in assessment.Item!
mgr = self._get_provider_manager('LEARNING')
lookup_session = mgr.get_objective_lookup_session(proxy=getattr(self, "_proxy", None))
lookup_session.use_federated_objective_bank_view()
return lookup_session.get_objectives_by_ids(self.get_learning_objective_ids()) | ['def', 'get_learning_objectives', '(', 'self', ')', ':', '# This is pretty much identicial to the method in assessment.Item!', 'mgr', '=', 'self', '.', '_get_provider_manager', '(', "'LEARNING'", ')', 'lookup_session', '=', 'mgr', '.', 'get_objective_lookup_session', '(', 'proxy', '=', 'getattr', '(', 'self', ',', '"_proxy"', ',', 'None', ')', ')', 'lookup_session', '.', 'use_federated_objective_bank_view', '(', ')', 'return', 'lookup_session', '.', 'get_objectives_by_ids', '(', 'self', '.', 'get_learning_objective_ids', '(', ')', ')'] | This method also mirrors that in the Item. | ['This', 'method', 'also', 'mirrors', 'that', 'in', 'the', 'Item', '.'] | train | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/assessment/objects.py#L128-L134 |
3,198 | ejeschke/ginga | ginga/examples/gw/clocks.py | Clock.clock_resized_cb | def clock_resized_cb(self, viewer, width, height):
"""This method is called when an individual clock is resized.
It deletes and reconstructs the placement of the text objects
in the canvas.
"""
self.logger.info("resized canvas to %dx%d" % (width, height))
# add text objects to canvas
self.canvas.delete_all_objects()
Text = self.canvas.get_draw_class('text')
x, y = 20, int(height * 0.55)
# text object for the time
self.time_txt = Text(x, y, text='', color=self.color,
font=self.font, fontsize=self.largesize,
coord='window')
self.canvas.add(self.time_txt, tag='_time', redraw=False)
# for supplementary info (date, timezone, etc)
self.suppl_txt = Text(x, height - 10, text='', color=self.color,
font=self.font, fontsize=self.smallsize,
coord='window')
self.canvas.add(self.suppl_txt, tag='_suppl', redraw=False)
self.canvas.update_canvas(whence=3) | python | def clock_resized_cb(self, viewer, width, height):
"""This method is called when an individual clock is resized.
It deletes and reconstructs the placement of the text objects
in the canvas.
"""
self.logger.info("resized canvas to %dx%d" % (width, height))
# add text objects to canvas
self.canvas.delete_all_objects()
Text = self.canvas.get_draw_class('text')
x, y = 20, int(height * 0.55)
# text object for the time
self.time_txt = Text(x, y, text='', color=self.color,
font=self.font, fontsize=self.largesize,
coord='window')
self.canvas.add(self.time_txt, tag='_time', redraw=False)
# for supplementary info (date, timezone, etc)
self.suppl_txt = Text(x, height - 10, text='', color=self.color,
font=self.font, fontsize=self.smallsize,
coord='window')
self.canvas.add(self.suppl_txt, tag='_suppl', redraw=False)
self.canvas.update_canvas(whence=3) | ['def', 'clock_resized_cb', '(', 'self', ',', 'viewer', ',', 'width', ',', 'height', ')', ':', 'self', '.', 'logger', '.', 'info', '(', '"resized canvas to %dx%d"', '%', '(', 'width', ',', 'height', ')', ')', '# add text objects to canvas', 'self', '.', 'canvas', '.', 'delete_all_objects', '(', ')', 'Text', '=', 'self', '.', 'canvas', '.', 'get_draw_class', '(', "'text'", ')', 'x', ',', 'y', '=', '20', ',', 'int', '(', 'height', '*', '0.55', ')', '# text object for the time', 'self', '.', 'time_txt', '=', 'Text', '(', 'x', ',', 'y', ',', 'text', '=', "''", ',', 'color', '=', 'self', '.', 'color', ',', 'font', '=', 'self', '.', 'font', ',', 'fontsize', '=', 'self', '.', 'largesize', ',', 'coord', '=', "'window'", ')', 'self', '.', 'canvas', '.', 'add', '(', 'self', '.', 'time_txt', ',', 'tag', '=', "'_time'", ',', 'redraw', '=', 'False', ')', '# for supplementary info (date, timezone, etc)', 'self', '.', 'suppl_txt', '=', 'Text', '(', 'x', ',', 'height', '-', '10', ',', 'text', '=', "''", ',', 'color', '=', 'self', '.', 'color', ',', 'font', '=', 'self', '.', 'font', ',', 'fontsize', '=', 'self', '.', 'smallsize', ',', 'coord', '=', "'window'", ')', 'self', '.', 'canvas', '.', 'add', '(', 'self', '.', 'suppl_txt', ',', 'tag', '=', "'_suppl'", ',', 'redraw', '=', 'False', ')', 'self', '.', 'canvas', '.', 'update_canvas', '(', 'whence', '=', '3', ')'] | This method is called when an individual clock is resized.
It deletes and reconstructs the placement of the text objects
in the canvas. | ['This', 'method', 'is', 'called', 'when', 'an', 'individual', 'clock', 'is', 'resized', '.', 'It', 'deletes', 'and', 'reconstructs', 'the', 'placement', 'of', 'the', 'text', 'objects', 'in', 'the', 'canvas', '.'] | train | https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/examples/gw/clocks.py#L82-L106 |
3,199 | getsentry/sentry-plugins | src/sentry_plugins/github/plugin.py | GitHubRepositoryProvider.validate_config | def validate_config(self, organization, config, actor=None):
"""
```
if config['foo'] and not config['bar']:
raise PluginError('You cannot configure foo with bar')
return config
```
"""
if config.get('name'):
client = self.get_client(actor)
try:
repo = client.get_repo(config['name'])
except Exception as e:
self.raise_error(e)
else:
config['external_id'] = six.text_type(repo['id'])
return config | python | def validate_config(self, organization, config, actor=None):
"""
```
if config['foo'] and not config['bar']:
raise PluginError('You cannot configure foo with bar')
return config
```
"""
if config.get('name'):
client = self.get_client(actor)
try:
repo = client.get_repo(config['name'])
except Exception as e:
self.raise_error(e)
else:
config['external_id'] = six.text_type(repo['id'])
return config | ['def', 'validate_config', '(', 'self', ',', 'organization', ',', 'config', ',', 'actor', '=', 'None', ')', ':', 'if', 'config', '.', 'get', '(', "'name'", ')', ':', 'client', '=', 'self', '.', 'get_client', '(', 'actor', ')', 'try', ':', 'repo', '=', 'client', '.', 'get_repo', '(', 'config', '[', "'name'", ']', ')', 'except', 'Exception', 'as', 'e', ':', 'self', '.', 'raise_error', '(', 'e', ')', 'else', ':', 'config', '[', "'external_id'", ']', '=', 'six', '.', 'text_type', '(', 'repo', '[', "'id'", ']', ')', 'return', 'config'] | ```
if config['foo'] and not config['bar']:
raise PluginError('You cannot configure foo with bar')
return config
``` | ['if', 'config', '[', 'foo', ']', 'and', 'not', 'config', '[', 'bar', ']', ':', 'raise', 'PluginError', '(', 'You', 'cannot', 'configure', 'foo', 'with', 'bar', ')', 'return', 'config'] | train | https://github.com/getsentry/sentry-plugins/blob/2d65331bcb807e0bb16b5e7bdcae56b152bb0dda/src/sentry_plugins/github/plugin.py#L278-L294 |
Subsets and Splits