Unnamed: 0
int64 0
10k
| repository_name
stringlengths 7
54
| func_path_in_repository
stringlengths 5
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 100
30.3k
| language
stringclasses 1
value | func_code_string
stringlengths 100
30.3k
| func_code_tokens
stringlengths 138
33.2k
| func_documentation_string
stringlengths 1
15k
| func_documentation_tokens
stringlengths 5
5.14k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
|
---|---|---|---|---|---|---|---|---|---|---|---|
3,200 | noahbenson/neuropythy | neuropythy/vision/retinotopy.py | occipital_flatmap | def occipital_flatmap(cortex, radius=None):
'''
occipital_flatmap(cortex) yields a flattened mesh of the occipital cortex of the given cortex
object.
Note that if the cortex is not registrered to fsaverage, this will fail.
The option radius may be given to specify the fraction of the cortical sphere (in radians) to
include in the map.
'''
mdl = retinotopy_model('benson17', hemi=cortex.chirality)
mp = mdl.map_projection
if radius is not None: mp = mp.copy(radius=radius)
return mp(cortex) | python | def occipital_flatmap(cortex, radius=None):
'''
occipital_flatmap(cortex) yields a flattened mesh of the occipital cortex of the given cortex
object.
Note that if the cortex is not registrered to fsaverage, this will fail.
The option radius may be given to specify the fraction of the cortical sphere (in radians) to
include in the map.
'''
mdl = retinotopy_model('benson17', hemi=cortex.chirality)
mp = mdl.map_projection
if radius is not None: mp = mp.copy(radius=radius)
return mp(cortex) | ['def', 'occipital_flatmap', '(', 'cortex', ',', 'radius', '=', 'None', ')', ':', 'mdl', '=', 'retinotopy_model', '(', "'benson17'", ',', 'hemi', '=', 'cortex', '.', 'chirality', ')', 'mp', '=', 'mdl', '.', 'map_projection', 'if', 'radius', 'is', 'not', 'None', ':', 'mp', '=', 'mp', '.', 'copy', '(', 'radius', '=', 'radius', ')', 'return', 'mp', '(', 'cortex', ')'] | occipital_flatmap(cortex) yields a flattened mesh of the occipital cortex of the given cortex
object.
Note that if the cortex is not registrered to fsaverage, this will fail.
The option radius may be given to specify the fraction of the cortical sphere (in radians) to
include in the map. | ['occipital_flatmap', '(', 'cortex', ')', 'yields', 'a', 'flattened', 'mesh', 'of', 'the', 'occipital', 'cortex', 'of', 'the', 'given', 'cortex', 'object', '.', 'Note', 'that', 'if', 'the', 'cortex', 'is', 'not', 'registrered', 'to', 'fsaverage', 'this', 'will', 'fail', '.'] | train | https://github.com/noahbenson/neuropythy/blob/b588889f6db36ddb9602ae4a72c1c0d3f41586b2/neuropythy/vision/retinotopy.py#L645-L658 |
3,201 | ramrod-project/database-brain | schema/brain/controller/plugins.py | find_plugin | def find_plugin(value,
key=DEFAULT_LOOKUP_KEY,
conn=None):
"""
get's the plugin matching the key and value
example: find_plugin("plugin1", "ServiceName") => list of 0 or 1 item
example: find_plugin("plugin1", "Name") => list of 0-to-many items
:param value:
:param key: <str> (default "Name")
:param conn:
:return:
"""
# cast to list to hide rethink internals from caller
result = list(RPC.filter({
key: value
}).run(conn))
return result | python | def find_plugin(value,
key=DEFAULT_LOOKUP_KEY,
conn=None):
"""
get's the plugin matching the key and value
example: find_plugin("plugin1", "ServiceName") => list of 0 or 1 item
example: find_plugin("plugin1", "Name") => list of 0-to-many items
:param value:
:param key: <str> (default "Name")
:param conn:
:return:
"""
# cast to list to hide rethink internals from caller
result = list(RPC.filter({
key: value
}).run(conn))
return result | ['def', 'find_plugin', '(', 'value', ',', 'key', '=', 'DEFAULT_LOOKUP_KEY', ',', 'conn', '=', 'None', ')', ':', '# cast to list to hide rethink internals from caller', 'result', '=', 'list', '(', 'RPC', '.', 'filter', '(', '{', 'key', ':', 'value', '}', ')', '.', 'run', '(', 'conn', ')', ')', 'return', 'result'] | get's the plugin matching the key and value
example: find_plugin("plugin1", "ServiceName") => list of 0 or 1 item
example: find_plugin("plugin1", "Name") => list of 0-to-many items
:param value:
:param key: <str> (default "Name")
:param conn:
:return: | ['get', 's', 'the', 'plugin', 'matching', 'the', 'key', 'and', 'value'] | train | https://github.com/ramrod-project/database-brain/blob/b024cb44f34cabb9d80af38271ddb65c25767083/schema/brain/controller/plugins.py#L57-L75 |
3,202 | kpdyer/regex2dfa | third_party/re2/lib/codereview/codereview.py | MySend | def MySend(request_path, payload=None,
content_type="application/octet-stream",
timeout=None, force_auth=True,
**kwargs):
"""Run MySend1 maybe twice, because Rietveld is unreliable."""
try:
return MySend1(request_path, payload, content_type, timeout, force_auth, **kwargs)
except Exception, e:
if type(e) != urllib2.HTTPError or e.code != 500: # only retry on HTTP 500 error
raise
print >>sys.stderr, "Loading "+request_path+": "+ExceptionDetail()+"; trying again in 2 seconds."
time.sleep(2)
return MySend1(request_path, payload, content_type, timeout, force_auth, **kwargs) | python | def MySend(request_path, payload=None,
content_type="application/octet-stream",
timeout=None, force_auth=True,
**kwargs):
"""Run MySend1 maybe twice, because Rietveld is unreliable."""
try:
return MySend1(request_path, payload, content_type, timeout, force_auth, **kwargs)
except Exception, e:
if type(e) != urllib2.HTTPError or e.code != 500: # only retry on HTTP 500 error
raise
print >>sys.stderr, "Loading "+request_path+": "+ExceptionDetail()+"; trying again in 2 seconds."
time.sleep(2)
return MySend1(request_path, payload, content_type, timeout, force_auth, **kwargs) | ['def', 'MySend', '(', 'request_path', ',', 'payload', '=', 'None', ',', 'content_type', '=', '"application/octet-stream"', ',', 'timeout', '=', 'None', ',', 'force_auth', '=', 'True', ',', '*', '*', 'kwargs', ')', ':', 'try', ':', 'return', 'MySend1', '(', 'request_path', ',', 'payload', ',', 'content_type', ',', 'timeout', ',', 'force_auth', ',', '*', '*', 'kwargs', ')', 'except', 'Exception', ',', 'e', ':', 'if', 'type', '(', 'e', ')', '!=', 'urllib2', '.', 'HTTPError', 'or', 'e', '.', 'code', '!=', '500', ':', '# only retry on HTTP 500 error', 'raise', 'print', '>>', 'sys', '.', 'stderr', ',', '"Loading "', '+', 'request_path', '+', '": "', '+', 'ExceptionDetail', '(', ')', '+', '"; trying again in 2 seconds."', 'time', '.', 'sleep', '(', '2', ')', 'return', 'MySend1', '(', 'request_path', ',', 'payload', ',', 'content_type', ',', 'timeout', ',', 'force_auth', ',', '*', '*', 'kwargs', ')'] | Run MySend1 maybe twice, because Rietveld is unreliable. | ['Run', 'MySend1', 'maybe', 'twice', 'because', 'Rietveld', 'is', 'unreliable', '.'] | train | https://github.com/kpdyer/regex2dfa/blob/109f877e60ef0dfcb430f11516d215930b7b9936/third_party/re2/lib/codereview/codereview.py#L2444-L2456 |
3,203 | miLibris/flask-rest-jsonapi | flask_rest_jsonapi/schema.py | get_relationships | def get_relationships(schema, model_field=False):
"""Return relationship fields of a schema
:param Schema schema: a marshmallow schema
:param list: list of relationship fields of a schema
"""
relationships = [key for (key, value) in schema._declared_fields.items() if isinstance(value, Relationship)]
if model_field is True:
relationships = [get_model_field(schema, key) for key in relationships]
return relationships | python | def get_relationships(schema, model_field=False):
"""Return relationship fields of a schema
:param Schema schema: a marshmallow schema
:param list: list of relationship fields of a schema
"""
relationships = [key for (key, value) in schema._declared_fields.items() if isinstance(value, Relationship)]
if model_field is True:
relationships = [get_model_field(schema, key) for key in relationships]
return relationships | ['def', 'get_relationships', '(', 'schema', ',', 'model_field', '=', 'False', ')', ':', 'relationships', '=', '[', 'key', 'for', '(', 'key', ',', 'value', ')', 'in', 'schema', '.', '_declared_fields', '.', 'items', '(', ')', 'if', 'isinstance', '(', 'value', ',', 'Relationship', ')', ']', 'if', 'model_field', 'is', 'True', ':', 'relationships', '=', '[', 'get_model_field', '(', 'schema', ',', 'key', ')', 'for', 'key', 'in', 'relationships', ']', 'return', 'relationships'] | Return relationship fields of a schema
:param Schema schema: a marshmallow schema
:param list: list of relationship fields of a schema | ['Return', 'relationship', 'fields', 'of', 'a', 'schema'] | train | https://github.com/miLibris/flask-rest-jsonapi/blob/ecc8f2cd2b54cc0bfae7acd6cffcda0ba1140c43/flask_rest_jsonapi/schema.py#L119-L130 |
3,204 | Azure/msrest-for-python | msrest/polling/poller.py | LROPoller._start | def _start(self):
"""Start the long running operation.
On completion, runs any callbacks.
:param callable update_cmd: The API reuqest to check the status of
the operation.
"""
try:
self._polling_method.run()
except Exception as err:
self._exception = err
finally:
self._done.set()
callbacks, self._callbacks = self._callbacks, []
while callbacks:
for call in callbacks:
call(self._polling_method)
callbacks, self._callbacks = self._callbacks, [] | python | def _start(self):
"""Start the long running operation.
On completion, runs any callbacks.
:param callable update_cmd: The API reuqest to check the status of
the operation.
"""
try:
self._polling_method.run()
except Exception as err:
self._exception = err
finally:
self._done.set()
callbacks, self._callbacks = self._callbacks, []
while callbacks:
for call in callbacks:
call(self._polling_method)
callbacks, self._callbacks = self._callbacks, [] | ['def', '_start', '(', 'self', ')', ':', 'try', ':', 'self', '.', '_polling_method', '.', 'run', '(', ')', 'except', 'Exception', 'as', 'err', ':', 'self', '.', '_exception', '=', 'err', 'finally', ':', 'self', '.', '_done', '.', 'set', '(', ')', 'callbacks', ',', 'self', '.', '_callbacks', '=', 'self', '.', '_callbacks', ',', '[', ']', 'while', 'callbacks', ':', 'for', 'call', 'in', 'callbacks', ':', 'call', '(', 'self', '.', '_polling_method', ')', 'callbacks', ',', 'self', '.', '_callbacks', '=', 'self', '.', '_callbacks', ',', '[', ']'] | Start the long running operation.
On completion, runs any callbacks.
:param callable update_cmd: The API reuqest to check the status of
the operation. | ['Start', 'the', 'long', 'running', 'operation', '.', 'On', 'completion', 'runs', 'any', 'callbacks', '.'] | train | https://github.com/Azure/msrest-for-python/blob/0732bc90bdb290e5f58c675ffdd7dbfa9acefc93/msrest/polling/poller.py#L144-L163 |
3,205 | jxtech/wechatpy | wechatpy/utils.py | timezone | def timezone(zone):
"""Try to get timezone using pytz or python-dateutil
:param zone: timezone str
:return: timezone tzinfo or None
"""
try:
import pytz
return pytz.timezone(zone)
except ImportError:
pass
try:
from dateutil.tz import gettz
return gettz(zone)
except ImportError:
return None | python | def timezone(zone):
"""Try to get timezone using pytz or python-dateutil
:param zone: timezone str
:return: timezone tzinfo or None
"""
try:
import pytz
return pytz.timezone(zone)
except ImportError:
pass
try:
from dateutil.tz import gettz
return gettz(zone)
except ImportError:
return None | ['def', 'timezone', '(', 'zone', ')', ':', 'try', ':', 'import', 'pytz', 'return', 'pytz', '.', 'timezone', '(', 'zone', ')', 'except', 'ImportError', ':', 'pass', 'try', ':', 'from', 'dateutil', '.', 'tz', 'import', 'gettz', 'return', 'gettz', '(', 'zone', ')', 'except', 'ImportError', ':', 'return', 'None'] | Try to get timezone using pytz or python-dateutil
:param zone: timezone str
:return: timezone tzinfo or None | ['Try', 'to', 'get', 'timezone', 'using', 'pytz', 'or', 'python', '-', 'dateutil'] | train | https://github.com/jxtech/wechatpy/blob/4df0da795618c0895a10f1c2cde9e9d5c0a93aaa/wechatpy/utils.py#L106-L121 |
3,206 | IdentityPython/pysaml2 | src/saml2/httpbase.py | HTTPBase.send_using_soap | def send_using_soap(self, request, destination, headers=None, sign=False):
"""
Send a message using SOAP+POST
:param request:
:param destination:
:param headers:
:param sign:
:return:
"""
# _response = self.server.post(soap_message, headers, path=path)
try:
args = self.use_soap(request, destination, headers, sign)
args["headers"] = dict(args["headers"])
response = self.send(**args)
except Exception as exc:
logger.info("HTTPClient exception: %s", exc)
raise
if response.status_code == 200:
logger.info("SOAP response: %s", response.text)
return response
else:
raise HTTPError("%d:%s" % (response.status_code, response.content)) | python | def send_using_soap(self, request, destination, headers=None, sign=False):
"""
Send a message using SOAP+POST
:param request:
:param destination:
:param headers:
:param sign:
:return:
"""
# _response = self.server.post(soap_message, headers, path=path)
try:
args = self.use_soap(request, destination, headers, sign)
args["headers"] = dict(args["headers"])
response = self.send(**args)
except Exception as exc:
logger.info("HTTPClient exception: %s", exc)
raise
if response.status_code == 200:
logger.info("SOAP response: %s", response.text)
return response
else:
raise HTTPError("%d:%s" % (response.status_code, response.content)) | ['def', 'send_using_soap', '(', 'self', ',', 'request', ',', 'destination', ',', 'headers', '=', 'None', ',', 'sign', '=', 'False', ')', ':', '# _response = self.server.post(soap_message, headers, path=path)', 'try', ':', 'args', '=', 'self', '.', 'use_soap', '(', 'request', ',', 'destination', ',', 'headers', ',', 'sign', ')', 'args', '[', '"headers"', ']', '=', 'dict', '(', 'args', '[', '"headers"', ']', ')', 'response', '=', 'self', '.', 'send', '(', '*', '*', 'args', ')', 'except', 'Exception', 'as', 'exc', ':', 'logger', '.', 'info', '(', '"HTTPClient exception: %s"', ',', 'exc', ')', 'raise', 'if', 'response', '.', 'status_code', '==', '200', ':', 'logger', '.', 'info', '(', '"SOAP response: %s"', ',', 'response', '.', 'text', ')', 'return', 'response', 'else', ':', 'raise', 'HTTPError', '(', '"%d:%s"', '%', '(', 'response', '.', 'status_code', ',', 'response', '.', 'content', ')', ')'] | Send a message using SOAP+POST
:param request:
:param destination:
:param headers:
:param sign:
:return: | ['Send', 'a', 'message', 'using', 'SOAP', '+', 'POST'] | train | https://github.com/IdentityPython/pysaml2/blob/d3aa78eeb7d37c12688f783cb4db1c7263a14ad6/src/saml2/httpbase.py#L361-L385 |
3,207 | Parsl/libsubmit | libsubmit/providers/local/local.py | LocalProvider.status | def status(self, job_ids):
''' Get the status of a list of jobs identified by their ids.
Args:
- job_ids (List of ids) : List of identifiers for the jobs
Returns:
- List of status codes.
'''
logging.debug("Checking status of : {0}".format(job_ids))
for job_id in self.resources:
poll_code = self.resources[job_id]['proc'].poll()
if self.resources[job_id]['status'] in ['COMPLETED', 'FAILED']:
continue
if poll_code is None:
self.resources[job_id]['status'] = 'RUNNING'
elif poll_code == 0 and self.resources[job_id]['status'] != 'RUNNING':
self.resources[job_id]['status'] = 'COMPLETED'
elif poll_code < 0 and self.resources[job_id]['status'] != 'RUNNING':
self.resources[job_id]['status'] = 'FAILED'
return [self.resources[jid]['status'] for jid in job_ids] | python | def status(self, job_ids):
''' Get the status of a list of jobs identified by their ids.
Args:
- job_ids (List of ids) : List of identifiers for the jobs
Returns:
- List of status codes.
'''
logging.debug("Checking status of : {0}".format(job_ids))
for job_id in self.resources:
poll_code = self.resources[job_id]['proc'].poll()
if self.resources[job_id]['status'] in ['COMPLETED', 'FAILED']:
continue
if poll_code is None:
self.resources[job_id]['status'] = 'RUNNING'
elif poll_code == 0 and self.resources[job_id]['status'] != 'RUNNING':
self.resources[job_id]['status'] = 'COMPLETED'
elif poll_code < 0 and self.resources[job_id]['status'] != 'RUNNING':
self.resources[job_id]['status'] = 'FAILED'
return [self.resources[jid]['status'] for jid in job_ids] | ['def', 'status', '(', 'self', ',', 'job_ids', ')', ':', 'logging', '.', 'debug', '(', '"Checking status of : {0}"', '.', 'format', '(', 'job_ids', ')', ')', 'for', 'job_id', 'in', 'self', '.', 'resources', ':', 'poll_code', '=', 'self', '.', 'resources', '[', 'job_id', ']', '[', "'proc'", ']', '.', 'poll', '(', ')', 'if', 'self', '.', 'resources', '[', 'job_id', ']', '[', "'status'", ']', 'in', '[', "'COMPLETED'", ',', "'FAILED'", ']', ':', 'continue', 'if', 'poll_code', 'is', 'None', ':', 'self', '.', 'resources', '[', 'job_id', ']', '[', "'status'", ']', '=', "'RUNNING'", 'elif', 'poll_code', '==', '0', 'and', 'self', '.', 'resources', '[', 'job_id', ']', '[', "'status'", ']', '!=', "'RUNNING'", ':', 'self', '.', 'resources', '[', 'job_id', ']', '[', "'status'", ']', '=', "'COMPLETED'", 'elif', 'poll_code', '<', '0', 'and', 'self', '.', 'resources', '[', 'job_id', ']', '[', "'status'", ']', '!=', "'RUNNING'", ':', 'self', '.', 'resources', '[', 'job_id', ']', '[', "'status'", ']', '=', "'FAILED'", 'return', '[', 'self', '.', 'resources', '[', 'jid', ']', '[', "'status'", ']', 'for', 'jid', 'in', 'job_ids', ']'] | Get the status of a list of jobs identified by their ids.
Args:
- job_ids (List of ids) : List of identifiers for the jobs
Returns:
- List of status codes. | ['Get', 'the', 'status', 'of', 'a', 'list', 'of', 'jobs', 'identified', 'by', 'their', 'ids', '.'] | train | https://github.com/Parsl/libsubmit/blob/27a41c16dd6f1c16d830a9ce1c97804920a59f64/libsubmit/providers/local/local.py#L77-L101 |
3,208 | pywbem/pywbem | attic/cim_provider.py | CIMProvider.MI_referenceNames | def MI_referenceNames(self,
env,
objectName,
resultClassName,
role):
# pylint: disable=invalid-name
"""Return instance names of an association class.
Implements the WBEM operation ReferenceNames in terms
of the references method. A derived class will not normally
override this method.
"""
logger = env.get_logger()
logger.log_debug('CIMProvider MI_referenceNames <2> called. ' \
'resultClass: %s' % (resultClassName))
ch = env.get_cimom_handle()
if not resultClassName:
raise pywbem.CIMError(
pywbem.CIM_ERR_FAILED,
"Empty resultClassName passed to ReferenceNames")
assocClass = ch.GetClass(resultClassName, objectName.namespace,
LocalOnly=False,
IncludeQualifiers=True)
keys = pywbem.NocaseDict()
keyNames = [p.name for p in assocClass.properties.values()
if 'key' in p.qualifiers]
for keyName in keyNames:
p = assocClass.properties[keyName]
keys.__setitem__(p.name, p)
_strip_quals(keys)
model = pywbem.CIMInstance(classname=assocClass.classname,
properties=keys)
model.path = pywbem.CIMInstanceName(classname=assocClass.classname,
namespace=objectName.namespace)
#if role is None:
# raise pywbem.CIMError(pywbem.CIM_ERR_FAILED,
# "** this shouldn't happen")
if role:
if role not in model.properties:
raise pywbem.CIMError(pywbem.CIM_ERR_FAILED,
"** this shouldn't happen")
model[role] = objectName
for inst in self.references(env=env,
object_name=objectName,
model=model,
assoc_class=assocClass,
result_class_name='',
role=role,
result_role=None,
keys_only=True):
for prop in inst.properties.values():
if hasattr(prop.value, 'namespace') and \
prop.value.namespace is None:
prop.value.namespace = objectName.namespace
yield build_instance_name(inst, keyNames)
logger.log_debug('CIMProvider MI_referenceNames returning') | python | def MI_referenceNames(self,
env,
objectName,
resultClassName,
role):
# pylint: disable=invalid-name
"""Return instance names of an association class.
Implements the WBEM operation ReferenceNames in terms
of the references method. A derived class will not normally
override this method.
"""
logger = env.get_logger()
logger.log_debug('CIMProvider MI_referenceNames <2> called. ' \
'resultClass: %s' % (resultClassName))
ch = env.get_cimom_handle()
if not resultClassName:
raise pywbem.CIMError(
pywbem.CIM_ERR_FAILED,
"Empty resultClassName passed to ReferenceNames")
assocClass = ch.GetClass(resultClassName, objectName.namespace,
LocalOnly=False,
IncludeQualifiers=True)
keys = pywbem.NocaseDict()
keyNames = [p.name for p in assocClass.properties.values()
if 'key' in p.qualifiers]
for keyName in keyNames:
p = assocClass.properties[keyName]
keys.__setitem__(p.name, p)
_strip_quals(keys)
model = pywbem.CIMInstance(classname=assocClass.classname,
properties=keys)
model.path = pywbem.CIMInstanceName(classname=assocClass.classname,
namespace=objectName.namespace)
#if role is None:
# raise pywbem.CIMError(pywbem.CIM_ERR_FAILED,
# "** this shouldn't happen")
if role:
if role not in model.properties:
raise pywbem.CIMError(pywbem.CIM_ERR_FAILED,
"** this shouldn't happen")
model[role] = objectName
for inst in self.references(env=env,
object_name=objectName,
model=model,
assoc_class=assocClass,
result_class_name='',
role=role,
result_role=None,
keys_only=True):
for prop in inst.properties.values():
if hasattr(prop.value, 'namespace') and \
prop.value.namespace is None:
prop.value.namespace = objectName.namespace
yield build_instance_name(inst, keyNames)
logger.log_debug('CIMProvider MI_referenceNames returning') | ['def', 'MI_referenceNames', '(', 'self', ',', 'env', ',', 'objectName', ',', 'resultClassName', ',', 'role', ')', ':', '# pylint: disable=invalid-name', 'logger', '=', 'env', '.', 'get_logger', '(', ')', 'logger', '.', 'log_debug', '(', "'CIMProvider MI_referenceNames <2> called. '", "'resultClass: %s'", '%', '(', 'resultClassName', ')', ')', 'ch', '=', 'env', '.', 'get_cimom_handle', '(', ')', 'if', 'not', 'resultClassName', ':', 'raise', 'pywbem', '.', 'CIMError', '(', 'pywbem', '.', 'CIM_ERR_FAILED', ',', '"Empty resultClassName passed to ReferenceNames"', ')', 'assocClass', '=', 'ch', '.', 'GetClass', '(', 'resultClassName', ',', 'objectName', '.', 'namespace', ',', 'LocalOnly', '=', 'False', ',', 'IncludeQualifiers', '=', 'True', ')', 'keys', '=', 'pywbem', '.', 'NocaseDict', '(', ')', 'keyNames', '=', '[', 'p', '.', 'name', 'for', 'p', 'in', 'assocClass', '.', 'properties', '.', 'values', '(', ')', 'if', "'key'", 'in', 'p', '.', 'qualifiers', ']', 'for', 'keyName', 'in', 'keyNames', ':', 'p', '=', 'assocClass', '.', 'properties', '[', 'keyName', ']', 'keys', '.', '__setitem__', '(', 'p', '.', 'name', ',', 'p', ')', '_strip_quals', '(', 'keys', ')', 'model', '=', 'pywbem', '.', 'CIMInstance', '(', 'classname', '=', 'assocClass', '.', 'classname', ',', 'properties', '=', 'keys', ')', 'model', '.', 'path', '=', 'pywbem', '.', 'CIMInstanceName', '(', 'classname', '=', 'assocClass', '.', 'classname', ',', 'namespace', '=', 'objectName', '.', 'namespace', ')', '#if role is None:', '# raise pywbem.CIMError(pywbem.CIM_ERR_FAILED,', '# "** this shouldn\'t happen")', 'if', 'role', ':', 'if', 'role', 'not', 'in', 'model', '.', 'properties', ':', 'raise', 'pywbem', '.', 'CIMError', '(', 'pywbem', '.', 'CIM_ERR_FAILED', ',', '"** this shouldn\'t happen"', ')', 'model', '[', 'role', ']', '=', 'objectName', 'for', 'inst', 'in', 'self', '.', 'references', '(', 'env', '=', 'env', ',', 'object_name', '=', 'objectName', ',', 'model', '=', 'model', ',', 'assoc_class', '=', 'assocClass', ',', 'result_class_name', '=', "''", ',', 'role', '=', 'role', ',', 'result_role', '=', 'None', ',', 'keys_only', '=', 'True', ')', ':', 'for', 'prop', 'in', 'inst', '.', 'properties', '.', 'values', '(', ')', ':', 'if', 'hasattr', '(', 'prop', '.', 'value', ',', "'namespace'", ')', 'and', 'prop', '.', 'value', '.', 'namespace', 'is', 'None', ':', 'prop', '.', 'value', '.', 'namespace', '=', 'objectName', '.', 'namespace', 'yield', 'build_instance_name', '(', 'inst', ',', 'keyNames', ')', 'logger', '.', 'log_debug', '(', "'CIMProvider MI_referenceNames returning'", ')'] | Return instance names of an association class.
Implements the WBEM operation ReferenceNames in terms
of the references method. A derived class will not normally
override this method. | ['Return', 'instance', 'names', 'of', 'an', 'association', 'class', '.'] | train | https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/attic/cim_provider.py#L892-L950 |
3,209 | django-treebeard/django-treebeard | treebeard/mp_tree.py | MP_Node.get_siblings | def get_siblings(self):
"""
:returns: A queryset of all the node's siblings, including the node
itself.
"""
qset = get_result_class(self.__class__).objects.filter(
depth=self.depth
).order_by(
'path'
)
if self.depth > 1:
# making sure the non-root nodes share a parent
parentpath = self._get_basepath(self.path, self.depth - 1)
qset = qset.filter(
path__range=self._get_children_path_interval(parentpath))
return qset | python | def get_siblings(self):
"""
:returns: A queryset of all the node's siblings, including the node
itself.
"""
qset = get_result_class(self.__class__).objects.filter(
depth=self.depth
).order_by(
'path'
)
if self.depth > 1:
# making sure the non-root nodes share a parent
parentpath = self._get_basepath(self.path, self.depth - 1)
qset = qset.filter(
path__range=self._get_children_path_interval(parentpath))
return qset | ['def', 'get_siblings', '(', 'self', ')', ':', 'qset', '=', 'get_result_class', '(', 'self', '.', '__class__', ')', '.', 'objects', '.', 'filter', '(', 'depth', '=', 'self', '.', 'depth', ')', '.', 'order_by', '(', "'path'", ')', 'if', 'self', '.', 'depth', '>', '1', ':', '# making sure the non-root nodes share a parent', 'parentpath', '=', 'self', '.', '_get_basepath', '(', 'self', '.', 'path', ',', 'self', '.', 'depth', '-', '1', ')', 'qset', '=', 'qset', '.', 'filter', '(', 'path__range', '=', 'self', '.', '_get_children_path_interval', '(', 'parentpath', ')', ')', 'return', 'qset'] | :returns: A queryset of all the node's siblings, including the node
itself. | [':', 'returns', ':', 'A', 'queryset', 'of', 'all', 'the', 'node', 's', 'siblings', 'including', 'the', 'node', 'itself', '.'] | train | https://github.com/django-treebeard/django-treebeard/blob/8042ee939cb45394909237da447f8925e3cc6aa3/treebeard/mp_tree.py#L920-L935 |
3,210 | LinuxChristian/pyW215 | pyW215/pyW215.py | SmartPlug.total_consumption | def total_consumption(self):
"""Get the total power consumpuntion in the device lifetime."""
if self.use_legacy_protocol:
# TotalConsumption currently fails on the legacy protocol and
# creates a mess in the logs. Just return 'N/A' for now.
return 'N/A'
res = 'N/A'
try:
res = self.SOAPAction("GetPMWarningThreshold", "TotalConsumption", self.moduleParameters("2"))
except:
return 'N/A'
if res is None:
return 'N/A'
try:
float(res)
except ValueError:
_LOGGER.error("Failed to retrieve total power consumption from SmartPlug")
return res | python | def total_consumption(self):
"""Get the total power consumpuntion in the device lifetime."""
if self.use_legacy_protocol:
# TotalConsumption currently fails on the legacy protocol and
# creates a mess in the logs. Just return 'N/A' for now.
return 'N/A'
res = 'N/A'
try:
res = self.SOAPAction("GetPMWarningThreshold", "TotalConsumption", self.moduleParameters("2"))
except:
return 'N/A'
if res is None:
return 'N/A'
try:
float(res)
except ValueError:
_LOGGER.error("Failed to retrieve total power consumption from SmartPlug")
return res | ['def', 'total_consumption', '(', 'self', ')', ':', 'if', 'self', '.', 'use_legacy_protocol', ':', '# TotalConsumption currently fails on the legacy protocol and', "# creates a mess in the logs. Just return 'N/A' for now.", 'return', "'N/A'", 'res', '=', "'N/A'", 'try', ':', 'res', '=', 'self', '.', 'SOAPAction', '(', '"GetPMWarningThreshold"', ',', '"TotalConsumption"', ',', 'self', '.', 'moduleParameters', '(', '"2"', ')', ')', 'except', ':', 'return', "'N/A'", 'if', 'res', 'is', 'None', ':', 'return', "'N/A'", 'try', ':', 'float', '(', 'res', ')', 'except', 'ValueError', ':', '_LOGGER', '.', 'error', '(', '"Failed to retrieve total power consumption from SmartPlug"', ')', 'return', 'res'] | Get the total power consumpuntion in the device lifetime. | ['Get', 'the', 'total', 'power', 'consumpuntion', 'in', 'the', 'device', 'lifetime', '.'] | train | https://github.com/LinuxChristian/pyW215/blob/63e50b8ee11bc38ed66554f9b92429b552dda550/pyW215/pyW215.py#L229-L250 |
3,211 | viralogic/py-enumerable | py_linq/py_linq3.py | Enumerable3.order_by_descending | def order_by_descending(self, key):
"""
Returns new Enumerable sorted in descending order by given key
:param key: key to sort by as lambda expression
:return: new Enumerable object
"""
if key is None:
raise NullArgumentError(u"No key for sorting given")
kf = [OrderingDirection(key, reverse=True)]
return SortedEnumerable3(kf, self._data) | python | def order_by_descending(self, key):
"""
Returns new Enumerable sorted in descending order by given key
:param key: key to sort by as lambda expression
:return: new Enumerable object
"""
if key is None:
raise NullArgumentError(u"No key for sorting given")
kf = [OrderingDirection(key, reverse=True)]
return SortedEnumerable3(kf, self._data) | ['def', 'order_by_descending', '(', 'self', ',', 'key', ')', ':', 'if', 'key', 'is', 'None', ':', 'raise', 'NullArgumentError', '(', 'u"No key for sorting given"', ')', 'kf', '=', '[', 'OrderingDirection', '(', 'key', ',', 'reverse', '=', 'True', ')', ']', 'return', 'SortedEnumerable3', '(', 'kf', ',', 'self', '.', '_data', ')'] | Returns new Enumerable sorted in descending order by given key
:param key: key to sort by as lambda expression
:return: new Enumerable object | ['Returns', 'new', 'Enumerable', 'sorted', 'in', 'descending', 'order', 'by', 'given', 'key', ':', 'param', 'key', ':', 'key', 'to', 'sort', 'by', 'as', 'lambda', 'expression', ':', 'return', ':', 'new', 'Enumerable', 'object'] | train | https://github.com/viralogic/py-enumerable/blob/63363649bccef223379e1e87056747240c83aa9d/py_linq/py_linq3.py#L188-L197 |
3,212 | yatiml/yatiml | yatiml/representers.py | Representer.__sweeten | def __sweeten(self, dumper: 'Dumper', class_: Type, node: Node) -> None:
"""Applies the user's yatiml_sweeten() function(s), if any.
Sweetening is done for the base classes first, then for the \
derived classes, down the hierarchy to the class we're \
constructing.
Args:
dumper: The dumper that is dumping this object.
class_: The type of the object to be dumped.
represented_object: The object to be dumped.
"""
for base_class in class_.__bases__:
if base_class in dumper.yaml_representers:
logger.debug('Sweetening for class {}'.format(
self.class_.__name__))
self.__sweeten(dumper, base_class, node)
if hasattr(class_, 'yatiml_sweeten'):
class_.yatiml_sweeten(node) | python | def __sweeten(self, dumper: 'Dumper', class_: Type, node: Node) -> None:
"""Applies the user's yatiml_sweeten() function(s), if any.
Sweetening is done for the base classes first, then for the \
derived classes, down the hierarchy to the class we're \
constructing.
Args:
dumper: The dumper that is dumping this object.
class_: The type of the object to be dumped.
represented_object: The object to be dumped.
"""
for base_class in class_.__bases__:
if base_class in dumper.yaml_representers:
logger.debug('Sweetening for class {}'.format(
self.class_.__name__))
self.__sweeten(dumper, base_class, node)
if hasattr(class_, 'yatiml_sweeten'):
class_.yatiml_sweeten(node) | ['def', '__sweeten', '(', 'self', ',', 'dumper', ':', "'Dumper'", ',', 'class_', ':', 'Type', ',', 'node', ':', 'Node', ')', '->', 'None', ':', 'for', 'base_class', 'in', 'class_', '.', '__bases__', ':', 'if', 'base_class', 'in', 'dumper', '.', 'yaml_representers', ':', 'logger', '.', 'debug', '(', "'Sweetening for class {}'", '.', 'format', '(', 'self', '.', 'class_', '.', '__name__', ')', ')', 'self', '.', '__sweeten', '(', 'dumper', ',', 'base_class', ',', 'node', ')', 'if', 'hasattr', '(', 'class_', ',', "'yatiml_sweeten'", ')', ':', 'class_', '.', 'yatiml_sweeten', '(', 'node', ')'] | Applies the user's yatiml_sweeten() function(s), if any.
Sweetening is done for the base classes first, then for the \
derived classes, down the hierarchy to the class we're \
constructing.
Args:
dumper: The dumper that is dumping this object.
class_: The type of the object to be dumped.
represented_object: The object to be dumped. | ['Applies', 'the', 'user', 's', 'yatiml_sweeten', '()', 'function', '(', 's', ')', 'if', 'any', '.'] | train | https://github.com/yatiml/yatiml/blob/4f55c058b72388350f0af3076ac3ea9bc1c142b0/yatiml/representers.py#L80-L98 |
3,213 | sirfoga/pyhal | hal/maths/primes.py | Integer.is_probably_prime | def is_probably_prime(self):
"""Tests with miller-rabin
:return: True iff prime
"""
if self.is_naive_prime():
return True
# check if multiple pf low primes
for prime in LOW_PRIMES:
if self.to_int % prime == 0:
return False
# if all else fails, call rabin to determine if to_int is prime
return self.test_miller_rabin(5) | python | def is_probably_prime(self):
"""Tests with miller-rabin
:return: True iff prime
"""
if self.is_naive_prime():
return True
# check if multiple pf low primes
for prime in LOW_PRIMES:
if self.to_int % prime == 0:
return False
# if all else fails, call rabin to determine if to_int is prime
return self.test_miller_rabin(5) | ['def', 'is_probably_prime', '(', 'self', ')', ':', 'if', 'self', '.', 'is_naive_prime', '(', ')', ':', 'return', 'True', '# check if multiple pf low primes', 'for', 'prime', 'in', 'LOW_PRIMES', ':', 'if', 'self', '.', 'to_int', '%', 'prime', '==', '0', ':', 'return', 'False', '# if all else fails, call rabin to determine if to_int is prime', 'return', 'self', '.', 'test_miller_rabin', '(', '5', ')'] | Tests with miller-rabin
:return: True iff prime | ['Tests', 'with', 'miller', '-', 'rabin', ':', 'return', ':', 'True', 'iff', 'prime'] | train | https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/maths/primes.py#L44-L58 |
3,214 | google/grr | grr/core/grr_response_core/stats/default_stats_collector.py | DefaultStatsCollector.RecordEvent | def RecordEvent(self, metric_name, value, fields=None):
"""See base class."""
self._event_metrics[metric_name].Record(value, fields) | python | def RecordEvent(self, metric_name, value, fields=None):
"""See base class."""
self._event_metrics[metric_name].Record(value, fields) | ['def', 'RecordEvent', '(', 'self', ',', 'metric_name', ',', 'value', ',', 'fields', '=', 'None', ')', ':', 'self', '.', '_event_metrics', '[', 'metric_name', ']', '.', 'Record', '(', 'value', ',', 'fields', ')'] | See base class. | ['See', 'base', 'class', '.'] | train | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/core/grr_response_core/stats/default_stats_collector.py#L190-L192 |
3,215 | saltstack/salt | salt/utils/data.py | compare_lists | def compare_lists(old=None, new=None):
'''
Compare before and after results from various salt functions, returning a
dict describing the changes that were made
'''
ret = dict()
for item in new:
if item not in old:
ret['new'] = item
for item in old:
if item not in new:
ret['old'] = item
return ret | python | def compare_lists(old=None, new=None):
'''
Compare before and after results from various salt functions, returning a
dict describing the changes that were made
'''
ret = dict()
for item in new:
if item not in old:
ret['new'] = item
for item in old:
if item not in new:
ret['old'] = item
return ret | ['def', 'compare_lists', '(', 'old', '=', 'None', ',', 'new', '=', 'None', ')', ':', 'ret', '=', 'dict', '(', ')', 'for', 'item', 'in', 'new', ':', 'if', 'item', 'not', 'in', 'old', ':', 'ret', '[', "'new'", ']', '=', 'item', 'for', 'item', 'in', 'old', ':', 'if', 'item', 'not', 'in', 'new', ':', 'ret', '[', "'old'", ']', '=', 'item', 'return', 'ret'] | Compare before and after results from various salt functions, returning a
dict describing the changes that were made | ['Compare', 'before', 'and', 'after', 'results', 'from', 'various', 'salt', 'functions', 'returning', 'a', 'dict', 'describing', 'the', 'changes', 'that', 'were', 'made'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/data.py#L147-L159 |
3,216 | textbook/atmdb | atmdb/core.py | Service.calculate_timeout | def calculate_timeout(http_date):
"""Extract request timeout from e.g. ``Retry-After`` header.
Notes:
Per :rfc:`2616#section-14.37`, the ``Retry-After`` header can
be either an integer number of seconds or an HTTP date. This
function can handle either.
Arguments:
http_date (:py:class:`str`): The date to parse.
Returns:
:py:class:`int`: The timeout, in seconds.
"""
try:
return int(http_date)
except ValueError:
date_after = parse(http_date)
utc_now = datetime.now(tz=timezone.utc)
return int((date_after - utc_now).total_seconds()) | python | def calculate_timeout(http_date):
"""Extract request timeout from e.g. ``Retry-After`` header.
Notes:
Per :rfc:`2616#section-14.37`, the ``Retry-After`` header can
be either an integer number of seconds or an HTTP date. This
function can handle either.
Arguments:
http_date (:py:class:`str`): The date to parse.
Returns:
:py:class:`int`: The timeout, in seconds.
"""
try:
return int(http_date)
except ValueError:
date_after = parse(http_date)
utc_now = datetime.now(tz=timezone.utc)
return int((date_after - utc_now).total_seconds()) | ['def', 'calculate_timeout', '(', 'http_date', ')', ':', 'try', ':', 'return', 'int', '(', 'http_date', ')', 'except', 'ValueError', ':', 'date_after', '=', 'parse', '(', 'http_date', ')', 'utc_now', '=', 'datetime', '.', 'now', '(', 'tz', '=', 'timezone', '.', 'utc', ')', 'return', 'int', '(', '(', 'date_after', '-', 'utc_now', ')', '.', 'total_seconds', '(', ')', ')'] | Extract request timeout from e.g. ``Retry-After`` header.
Notes:
Per :rfc:`2616#section-14.37`, the ``Retry-After`` header can
be either an integer number of seconds or an HTTP date. This
function can handle either.
Arguments:
http_date (:py:class:`str`): The date to parse.
Returns:
:py:class:`int`: The timeout, in seconds. | ['Extract', 'request', 'timeout', 'from', 'e', '.', 'g', '.', 'Retry', '-', 'After', 'header', '.'] | train | https://github.com/textbook/atmdb/blob/cab14547d2e777a1e26c2560266365c484855789/atmdb/core.py#L65-L85 |
3,217 | GPflow/GPflow | gpflow/conditionals.py | _sample_conditional | def _sample_conditional(Xnew, feat, kern, f, *, full_cov=False, full_output_cov=False, q_sqrt=None, white=False, num_samples=None):
"""
`sample_conditional` will return a sample from the conditional distribution.
In most cases this means calculating the conditional mean m and variance v and then
returning m + sqrt(v) * eps, with eps ~ N(0, 1).
However, for some combinations of Mok and Mof more efficient sampling routines exists.
The dispatcher will make sure that we use the most efficient one.
:return: samples, mean, cov
samples has shape [num_samples, N, P] or [N, P] if num_samples is None
mean and cov as for conditional()
"""
if full_cov and full_output_cov:
raise NotImplementedError("The combination of both full_cov and full_output_cov is not "
"implemented for sample_conditional.")
logger.debug("sample conditional: InducingFeature Kernel")
mean, cov = conditional(Xnew, feat, kern, f, q_sqrt=q_sqrt, white=white,
full_cov=full_cov, full_output_cov=full_output_cov)
if full_cov:
# mean: [..., N, P]
# cov: [..., P, N, N]
mean_PN = tf.matrix_transpose(mean) # [..., P, N]
samples = _sample_mvn(mean_PN, cov, 'full', num_samples=num_samples) # [..., (S), P, N]
samples = tf.matrix_transpose(samples) # [..., (S), P, N]
else:
# mean: [..., N, P]
# cov: [..., N, P] or [..., N, P, P]
cov_structure = "full" if full_output_cov else "diag"
samples = _sample_mvn(mean, cov, cov_structure, num_samples=num_samples) # [..., (S), P, N]
return samples, mean, cov | python | def _sample_conditional(Xnew, feat, kern, f, *, full_cov=False, full_output_cov=False, q_sqrt=None, white=False, num_samples=None):
"""
`sample_conditional` will return a sample from the conditional distribution.
In most cases this means calculating the conditional mean m and variance v and then
returning m + sqrt(v) * eps, with eps ~ N(0, 1).
However, for some combinations of Mok and Mof more efficient sampling routines exists.
The dispatcher will make sure that we use the most efficient one.
:return: samples, mean, cov
samples has shape [num_samples, N, P] or [N, P] if num_samples is None
mean and cov as for conditional()
"""
if full_cov and full_output_cov:
raise NotImplementedError("The combination of both full_cov and full_output_cov is not "
"implemented for sample_conditional.")
logger.debug("sample conditional: InducingFeature Kernel")
mean, cov = conditional(Xnew, feat, kern, f, q_sqrt=q_sqrt, white=white,
full_cov=full_cov, full_output_cov=full_output_cov)
if full_cov:
# mean: [..., N, P]
# cov: [..., P, N, N]
mean_PN = tf.matrix_transpose(mean) # [..., P, N]
samples = _sample_mvn(mean_PN, cov, 'full', num_samples=num_samples) # [..., (S), P, N]
samples = tf.matrix_transpose(samples) # [..., (S), P, N]
else:
# mean: [..., N, P]
# cov: [..., N, P] or [..., N, P, P]
cov_structure = "full" if full_output_cov else "diag"
samples = _sample_mvn(mean, cov, cov_structure, num_samples=num_samples) # [..., (S), P, N]
return samples, mean, cov | ['def', '_sample_conditional', '(', 'Xnew', ',', 'feat', ',', 'kern', ',', 'f', ',', '*', ',', 'full_cov', '=', 'False', ',', 'full_output_cov', '=', 'False', ',', 'q_sqrt', '=', 'None', ',', 'white', '=', 'False', ',', 'num_samples', '=', 'None', ')', ':', 'if', 'full_cov', 'and', 'full_output_cov', ':', 'raise', 'NotImplementedError', '(', '"The combination of both full_cov and full_output_cov is not "', '"implemented for sample_conditional."', ')', 'logger', '.', 'debug', '(', '"sample conditional: InducingFeature Kernel"', ')', 'mean', ',', 'cov', '=', 'conditional', '(', 'Xnew', ',', 'feat', ',', 'kern', ',', 'f', ',', 'q_sqrt', '=', 'q_sqrt', ',', 'white', '=', 'white', ',', 'full_cov', '=', 'full_cov', ',', 'full_output_cov', '=', 'full_output_cov', ')', 'if', 'full_cov', ':', '# mean: [..., N, P]', '# cov: [..., P, N, N]', 'mean_PN', '=', 'tf', '.', 'matrix_transpose', '(', 'mean', ')', '# [..., P, N]', 'samples', '=', '_sample_mvn', '(', 'mean_PN', ',', 'cov', ',', "'full'", ',', 'num_samples', '=', 'num_samples', ')', '# [..., (S), P, N]', 'samples', '=', 'tf', '.', 'matrix_transpose', '(', 'samples', ')', '# [..., (S), P, N]', 'else', ':', '# mean: [..., N, P]', '# cov: [..., N, P] or [..., N, P, P]', 'cov_structure', '=', '"full"', 'if', 'full_output_cov', 'else', '"diag"', 'samples', '=', '_sample_mvn', '(', 'mean', ',', 'cov', ',', 'cov_structure', ',', 'num_samples', '=', 'num_samples', ')', '# [..., (S), P, N]', 'return', 'samples', ',', 'mean', ',', 'cov'] | `sample_conditional` will return a sample from the conditional distribution.
In most cases this means calculating the conditional mean m and variance v and then
returning m + sqrt(v) * eps, with eps ~ N(0, 1).
However, for some combinations of Mok and Mof more efficient sampling routines exists.
The dispatcher will make sure that we use the most efficient one.
:return: samples, mean, cov
samples has shape [num_samples, N, P] or [N, P] if num_samples is None
mean and cov as for conditional() | ['sample_conditional', 'will', 'return', 'a', 'sample', 'from', 'the', 'conditional', 'distribution', '.', 'In', 'most', 'cases', 'this', 'means', 'calculating', 'the', 'conditional', 'mean', 'm', 'and', 'variance', 'v', 'and', 'then', 'returning', 'm', '+', 'sqrt', '(', 'v', ')', '*', 'eps', 'with', 'eps', '~', 'N', '(', '0', '1', ')', '.', 'However', 'for', 'some', 'combinations', 'of', 'Mok', 'and', 'Mof', 'more', 'efficient', 'sampling', 'routines', 'exists', '.', 'The', 'dispatcher', 'will', 'make', 'sure', 'that', 'we', 'use', 'the', 'most', 'efficient', 'one', '.'] | train | https://github.com/GPflow/GPflow/blob/549394f0b1b0696c7b521a065e49bdae6e7acf27/gpflow/conditionals.py#L138-L170 |
3,218 | cisco-sas/kitty | kitty/model/low_level/encoder.py | StrNullTerminatedEncoder.encode | def encode(self, value):
'''
:param value: value to encode
'''
encoded = strToBytes(value) + b'\x00'
return Bits(bytes=encoded) | python | def encode(self, value):
'''
:param value: value to encode
'''
encoded = strToBytes(value) + b'\x00'
return Bits(bytes=encoded) | ['def', 'encode', '(', 'self', ',', 'value', ')', ':', 'encoded', '=', 'strToBytes', '(', 'value', ')', '+', "b'\\x00'", 'return', 'Bits', '(', 'bytes', '=', 'encoded', ')'] | :param value: value to encode | [':', 'param', 'value', ':', 'value', 'to', 'encode'] | train | https://github.com/cisco-sas/kitty/blob/cb0760989dcdfe079e43ac574d872d0b18953a32/kitty/model/low_level/encoder.py#L158-L163 |
3,219 | bwhite/hadoopy | hadoopy/thirdparty/pyinstaller/PyInstaller/build.py | set_dependencies | def set_dependencies(analysis, dependencies, path):
"""
Syncronize the Analysis result with the needed dependencies.
"""
for toc in (analysis.binaries, analysis.datas):
for i, tpl in enumerate(toc):
if not tpl[1] in dependencies.keys():
logger.info("Adding dependency %s located in %s" % (tpl[1], path))
dependencies[tpl[1]] = path
else:
dep_path = get_relative_path(path, dependencies[tpl[1]])
logger.info("Referencing %s to be a dependecy for %s, located in %s" % (tpl[1], path, dep_path))
analysis.dependencies.append((":".join((dep_path, tpl[0])), tpl[1], "DEPENDENCY"))
toc[i] = (None, None, None)
# Clean the list
toc[:] = [tpl for tpl in toc if tpl != (None, None, None)] | python | def set_dependencies(analysis, dependencies, path):
"""
Syncronize the Analysis result with the needed dependencies.
"""
for toc in (analysis.binaries, analysis.datas):
for i, tpl in enumerate(toc):
if not tpl[1] in dependencies.keys():
logger.info("Adding dependency %s located in %s" % (tpl[1], path))
dependencies[tpl[1]] = path
else:
dep_path = get_relative_path(path, dependencies[tpl[1]])
logger.info("Referencing %s to be a dependecy for %s, located in %s" % (tpl[1], path, dep_path))
analysis.dependencies.append((":".join((dep_path, tpl[0])), tpl[1], "DEPENDENCY"))
toc[i] = (None, None, None)
# Clean the list
toc[:] = [tpl for tpl in toc if tpl != (None, None, None)] | ['def', 'set_dependencies', '(', 'analysis', ',', 'dependencies', ',', 'path', ')', ':', 'for', 'toc', 'in', '(', 'analysis', '.', 'binaries', ',', 'analysis', '.', 'datas', ')', ':', 'for', 'i', ',', 'tpl', 'in', 'enumerate', '(', 'toc', ')', ':', 'if', 'not', 'tpl', '[', '1', ']', 'in', 'dependencies', '.', 'keys', '(', ')', ':', 'logger', '.', 'info', '(', '"Adding dependency %s located in %s"', '%', '(', 'tpl', '[', '1', ']', ',', 'path', ')', ')', 'dependencies', '[', 'tpl', '[', '1', ']', ']', '=', 'path', 'else', ':', 'dep_path', '=', 'get_relative_path', '(', 'path', ',', 'dependencies', '[', 'tpl', '[', '1', ']', ']', ')', 'logger', '.', 'info', '(', '"Referencing %s to be a dependecy for %s, located in %s"', '%', '(', 'tpl', '[', '1', ']', ',', 'path', ',', 'dep_path', ')', ')', 'analysis', '.', 'dependencies', '.', 'append', '(', '(', '":"', '.', 'join', '(', '(', 'dep_path', ',', 'tpl', '[', '0', ']', ')', ')', ',', 'tpl', '[', '1', ']', ',', '"DEPENDENCY"', ')', ')', 'toc', '[', 'i', ']', '=', '(', 'None', ',', 'None', ',', 'None', ')', '# Clean the list', 'toc', '[', ':', ']', '=', '[', 'tpl', 'for', 'tpl', 'in', 'toc', 'if', 'tpl', '!=', '(', 'None', ',', 'None', ',', 'None', ')', ']'] | Syncronize the Analysis result with the needed dependencies. | ['Syncronize', 'the', 'Analysis', 'result', 'with', 'the', 'needed', 'dependencies', '.'] | train | https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/thirdparty/pyinstaller/PyInstaller/build.py#L1458-L1474 |
3,220 | rmed/flask-waffleconf | flask_waffleconf/core.py | _WaffleState.update_conf | def update_conf(self):
"""Update configuration values from database.
This method should be called when there is an update notification.
"""
parsed = self.parse_conf()
if not parsed:
return None
# Update app config
self.app.config.update(parsed) | python | def update_conf(self):
"""Update configuration values from database.
This method should be called when there is an update notification.
"""
parsed = self.parse_conf()
if not parsed:
return None
# Update app config
self.app.config.update(parsed) | ['def', 'update_conf', '(', 'self', ')', ':', 'parsed', '=', 'self', '.', 'parse_conf', '(', ')', 'if', 'not', 'parsed', ':', 'return', 'None', '# Update app config', 'self', '.', 'app', '.', 'config', '.', 'update', '(', 'parsed', ')'] | Update configuration values from database.
This method should be called when there is an update notification. | ['Update', 'configuration', 'values', 'from', 'database', '.'] | train | https://github.com/rmed/flask-waffleconf/blob/a75ed69101796c9f3f42eff9f91e91dc6dd13869/flask_waffleconf/core.py#L146-L157 |
3,221 | markovmodel/msmtools | msmtools/flux/dense/tpt.py | coarsegrain | def coarsegrain(F, sets):
r"""Coarse-grains the flux to the given sets
$fc_{i,j} = \sum_{i \in I,j \in J} f_{i,j}$
Note that if you coarse-grain a net flux, it does not necessarily have a net
flux property anymore. If want to make sure you get a netflux,
use to_netflux(coarsegrain(F,sets)).
Parameters
----------
F : (n, n) ndarray
Matrix of flux values between pairs of states.
sets : list of array-like of ints
The sets of states onto which the flux is coarse-grained.
"""
nnew = len(sets)
Fc = np.zeros((nnew, nnew))
for i in range(0, nnew - 1):
for j in range(i + 1, nnew):
I = list(sets[i])
J = list(sets[j])
Fc[i, j] = np.sum(F[I, :][:, J])
Fc[j, i] = np.sum(F[J, :][:, I])
return Fc | python | def coarsegrain(F, sets):
r"""Coarse-grains the flux to the given sets
$fc_{i,j} = \sum_{i \in I,j \in J} f_{i,j}$
Note that if you coarse-grain a net flux, it does not necessarily have a net
flux property anymore. If want to make sure you get a netflux,
use to_netflux(coarsegrain(F,sets)).
Parameters
----------
F : (n, n) ndarray
Matrix of flux values between pairs of states.
sets : list of array-like of ints
The sets of states onto which the flux is coarse-grained.
"""
nnew = len(sets)
Fc = np.zeros((nnew, nnew))
for i in range(0, nnew - 1):
for j in range(i + 1, nnew):
I = list(sets[i])
J = list(sets[j])
Fc[i, j] = np.sum(F[I, :][:, J])
Fc[j, i] = np.sum(F[J, :][:, I])
return Fc | ['def', 'coarsegrain', '(', 'F', ',', 'sets', ')', ':', 'nnew', '=', 'len', '(', 'sets', ')', 'Fc', '=', 'np', '.', 'zeros', '(', '(', 'nnew', ',', 'nnew', ')', ')', 'for', 'i', 'in', 'range', '(', '0', ',', 'nnew', '-', '1', ')', ':', 'for', 'j', 'in', 'range', '(', 'i', '+', '1', ',', 'nnew', ')', ':', 'I', '=', 'list', '(', 'sets', '[', 'i', ']', ')', 'J', '=', 'list', '(', 'sets', '[', 'j', ']', ')', 'Fc', '[', 'i', ',', 'j', ']', '=', 'np', '.', 'sum', '(', 'F', '[', 'I', ',', ':', ']', '[', ':', ',', 'J', ']', ')', 'Fc', '[', 'j', ',', 'i', ']', '=', 'np', '.', 'sum', '(', 'F', '[', 'J', ',', ':', ']', '[', ':', ',', 'I', ']', ')', 'return', 'Fc'] | r"""Coarse-grains the flux to the given sets
$fc_{i,j} = \sum_{i \in I,j \in J} f_{i,j}$
Note that if you coarse-grain a net flux, it does not necessarily have a net
flux property anymore. If want to make sure you get a netflux,
use to_netflux(coarsegrain(F,sets)).
Parameters
----------
F : (n, n) ndarray
Matrix of flux values between pairs of states.
sets : list of array-like of ints
The sets of states onto which the flux is coarse-grained. | ['r', 'Coarse', '-', 'grains', 'the', 'flux', 'to', 'the', 'given', 'sets'] | train | https://github.com/markovmodel/msmtools/blob/54dc76dd2113a0e8f3d15d5316abab41402941be/msmtools/flux/dense/tpt.py#L186-L210 |
3,222 | lrq3000/pyFileFixity | pyFileFixity/lib/reedsolomon/reedsolo.py | rs_find_error_evaluator | def rs_find_error_evaluator(synd, err_loc, nsym):
'''Compute the error (or erasures if you supply sigma=erasures locator polynomial, or errata) evaluator polynomial Omega from the syndrome and the error/erasures/errata locator Sigma. Omega is already computed at the same time as Sigma inside the Berlekamp-Massey implemented above, but in case you modify Sigma, you can recompute Omega afterwards using this method, or just ensure that Omega computed by BM is correct given Sigma.'''
# Omega(x) = [ Synd(x) * Error_loc(x) ] mod x^(n-k+1)
_, remainder = gf_poly_div( gf_poly_mul(synd, err_loc), ([1] + [0]*(nsym+1)) ) # first multiply syndromes * errata_locator, then do a polynomial division to truncate the polynomial to the required length
# Faster way that is equivalent
#remainder = gf_poly_mul(synd, err_loc) # first multiply the syndromes with the errata locator polynomial
#remainder = remainder[len(remainder)-(nsym+1):] # then divide by a polynomial of the length we want, which is equivalent to slicing the list (which represents the polynomial)
return remainder | python | def rs_find_error_evaluator(synd, err_loc, nsym):
'''Compute the error (or erasures if you supply sigma=erasures locator polynomial, or errata) evaluator polynomial Omega from the syndrome and the error/erasures/errata locator Sigma. Omega is already computed at the same time as Sigma inside the Berlekamp-Massey implemented above, but in case you modify Sigma, you can recompute Omega afterwards using this method, or just ensure that Omega computed by BM is correct given Sigma.'''
# Omega(x) = [ Synd(x) * Error_loc(x) ] mod x^(n-k+1)
_, remainder = gf_poly_div( gf_poly_mul(synd, err_loc), ([1] + [0]*(nsym+1)) ) # first multiply syndromes * errata_locator, then do a polynomial division to truncate the polynomial to the required length
# Faster way that is equivalent
#remainder = gf_poly_mul(synd, err_loc) # first multiply the syndromes with the errata locator polynomial
#remainder = remainder[len(remainder)-(nsym+1):] # then divide by a polynomial of the length we want, which is equivalent to slicing the list (which represents the polynomial)
return remainder | ['def', 'rs_find_error_evaluator', '(', 'synd', ',', 'err_loc', ',', 'nsym', ')', ':', '# Omega(x) = [ Synd(x) * Error_loc(x) ] mod x^(n-k+1)', '_', ',', 'remainder', '=', 'gf_poly_div', '(', 'gf_poly_mul', '(', 'synd', ',', 'err_loc', ')', ',', '(', '[', '1', ']', '+', '[', '0', ']', '*', '(', 'nsym', '+', '1', ')', ')', ')', '# first multiply syndromes * errata_locator, then do a polynomial division to truncate the polynomial to the required length', '# Faster way that is equivalent', '#remainder = gf_poly_mul(synd, err_loc) # first multiply the syndromes with the errata locator polynomial', '#remainder = remainder[len(remainder)-(nsym+1):] # then divide by a polynomial of the length we want, which is equivalent to slicing the list (which represents the polynomial)', 'return', 'remainder'] | Compute the error (or erasures if you supply sigma=erasures locator polynomial, or errata) evaluator polynomial Omega from the syndrome and the error/erasures/errata locator Sigma. Omega is already computed at the same time as Sigma inside the Berlekamp-Massey implemented above, but in case you modify Sigma, you can recompute Omega afterwards using this method, or just ensure that Omega computed by BM is correct given Sigma. | ['Compute', 'the', 'error', '(', 'or', 'erasures', 'if', 'you', 'supply', 'sigma', '=', 'erasures', 'locator', 'polynomial', 'or', 'errata', ')', 'evaluator', 'polynomial', 'Omega', 'from', 'the', 'syndrome', 'and', 'the', 'error', '/', 'erasures', '/', 'errata', 'locator', 'Sigma', '.', 'Omega', 'is', 'already', 'computed', 'at', 'the', 'same', 'time', 'as', 'Sigma', 'inside', 'the', 'Berlekamp', '-', 'Massey', 'implemented', 'above', 'but', 'in', 'case', 'you', 'modify', 'Sigma', 'you', 'can', 'recompute', 'Omega', 'afterwards', 'using', 'this', 'method', 'or', 'just', 'ensure', 'that', 'Omega', 'computed', 'by', 'BM', 'is', 'correct', 'given', 'Sigma', '.'] | train | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/reedsolomon/reedsolo.py#L577-L586 |
3,223 | MaxHalford/starboost | starboost/boosting.py | BoostingClassifier.iter_predict_proba | def iter_predict_proba(self, X, include_init=False):
"""Returns the predicted probabilities for ``X`` at every stage of the boosting procedure.
Arguments:
X (array-like or sparse matrix of shape (n_samples, n_features)): The input samples.
Sparse matrices are accepted only if they are supported by the weak model.
include_init (bool, default=False): If ``True`` then the prediction from
``init_estimator`` will also be returned.
Returns:
iterator of arrays of shape (n_samples, n_classes) containing the predicted
probabilities at each stage
"""
utils.validation.check_is_fitted(self, 'init_estimator_')
X = utils.check_array(X, accept_sparse=['csr', 'csc'], dtype=None, force_all_finite=False)
probas = np.empty(shape=(len(X), len(self.classes_)), dtype=np.float64)
for y_pred in super().iter_predict(X, include_init=include_init):
if len(self.classes_) == 2:
probas[:, 1] = sigmoid(y_pred[:, 0])
probas[:, 0] = 1. - probas[:, 1]
else:
probas[:] = softmax(y_pred)
yield probas | python | def iter_predict_proba(self, X, include_init=False):
"""Returns the predicted probabilities for ``X`` at every stage of the boosting procedure.
Arguments:
X (array-like or sparse matrix of shape (n_samples, n_features)): The input samples.
Sparse matrices are accepted only if they are supported by the weak model.
include_init (bool, default=False): If ``True`` then the prediction from
``init_estimator`` will also be returned.
Returns:
iterator of arrays of shape (n_samples, n_classes) containing the predicted
probabilities at each stage
"""
utils.validation.check_is_fitted(self, 'init_estimator_')
X = utils.check_array(X, accept_sparse=['csr', 'csc'], dtype=None, force_all_finite=False)
probas = np.empty(shape=(len(X), len(self.classes_)), dtype=np.float64)
for y_pred in super().iter_predict(X, include_init=include_init):
if len(self.classes_) == 2:
probas[:, 1] = sigmoid(y_pred[:, 0])
probas[:, 0] = 1. - probas[:, 1]
else:
probas[:] = softmax(y_pred)
yield probas | ['def', 'iter_predict_proba', '(', 'self', ',', 'X', ',', 'include_init', '=', 'False', ')', ':', 'utils', '.', 'validation', '.', 'check_is_fitted', '(', 'self', ',', "'init_estimator_'", ')', 'X', '=', 'utils', '.', 'check_array', '(', 'X', ',', 'accept_sparse', '=', '[', "'csr'", ',', "'csc'", ']', ',', 'dtype', '=', 'None', ',', 'force_all_finite', '=', 'False', ')', 'probas', '=', 'np', '.', 'empty', '(', 'shape', '=', '(', 'len', '(', 'X', ')', ',', 'len', '(', 'self', '.', 'classes_', ')', ')', ',', 'dtype', '=', 'np', '.', 'float64', ')', 'for', 'y_pred', 'in', 'super', '(', ')', '.', 'iter_predict', '(', 'X', ',', 'include_init', '=', 'include_init', ')', ':', 'if', 'len', '(', 'self', '.', 'classes_', ')', '==', '2', ':', 'probas', '[', ':', ',', '1', ']', '=', 'sigmoid', '(', 'y_pred', '[', ':', ',', '0', ']', ')', 'probas', '[', ':', ',', '0', ']', '=', '1.', '-', 'probas', '[', ':', ',', '1', ']', 'else', ':', 'probas', '[', ':', ']', '=', 'softmax', '(', 'y_pred', ')', 'yield', 'probas'] | Returns the predicted probabilities for ``X`` at every stage of the boosting procedure.
Arguments:
X (array-like or sparse matrix of shape (n_samples, n_features)): The input samples.
Sparse matrices are accepted only if they are supported by the weak model.
include_init (bool, default=False): If ``True`` then the prediction from
``init_estimator`` will also be returned.
Returns:
iterator of arrays of shape (n_samples, n_classes) containing the predicted
probabilities at each stage | ['Returns', 'the', 'predicted', 'probabilities', 'for', 'X', 'at', 'every', 'stage', 'of', 'the', 'boosting', 'procedure', '.'] | train | https://github.com/MaxHalford/starboost/blob/59d96dcc983404cbc326878facd8171fd2655ce1/starboost/boosting.py#L343-L367 |
3,224 | creare-com/pydem | pydem/processing_manager.py | TileEdgeFile.get_edge_init_data | def get_edge_init_data(self, fn, save_path=None):
"""
Creates the initialization data from the edge structure
"""
edge_init_data = {key: self.edges[fn][key].get('data') for key in
self.edges[fn].keys()}
edge_init_done = {key: self.edges[fn][key].get('done') for key in
self.edges[fn].keys()}
edge_init_todo = {key: self.edges[fn][key].get('todo') for key in
self.edges[fn].keys()}
return edge_init_data, edge_init_done, edge_init_todo | python | def get_edge_init_data(self, fn, save_path=None):
"""
Creates the initialization data from the edge structure
"""
edge_init_data = {key: self.edges[fn][key].get('data') for key in
self.edges[fn].keys()}
edge_init_done = {key: self.edges[fn][key].get('done') for key in
self.edges[fn].keys()}
edge_init_todo = {key: self.edges[fn][key].get('todo') for key in
self.edges[fn].keys()}
return edge_init_data, edge_init_done, edge_init_todo | ['def', 'get_edge_init_data', '(', 'self', ',', 'fn', ',', 'save_path', '=', 'None', ')', ':', 'edge_init_data', '=', '{', 'key', ':', 'self', '.', 'edges', '[', 'fn', ']', '[', 'key', ']', '.', 'get', '(', "'data'", ')', 'for', 'key', 'in', 'self', '.', 'edges', '[', 'fn', ']', '.', 'keys', '(', ')', '}', 'edge_init_done', '=', '{', 'key', ':', 'self', '.', 'edges', '[', 'fn', ']', '[', 'key', ']', '.', 'get', '(', "'done'", ')', 'for', 'key', 'in', 'self', '.', 'edges', '[', 'fn', ']', '.', 'keys', '(', ')', '}', 'edge_init_todo', '=', '{', 'key', ':', 'self', '.', 'edges', '[', 'fn', ']', '[', 'key', ']', '.', 'get', '(', "'todo'", ')', 'for', 'key', 'in', 'self', '.', 'edges', '[', 'fn', ']', '.', 'keys', '(', ')', '}', 'return', 'edge_init_data', ',', 'edge_init_done', ',', 'edge_init_todo'] | Creates the initialization data from the edge structure | ['Creates', 'the', 'initialization', 'data', 'from', 'the', 'edge', 'structure'] | train | https://github.com/creare-com/pydem/blob/c2fc8d84cfb411df84f71a6dec9edc4b544f710a/pydem/processing_manager.py#L503-L514 |
3,225 | erikvw/django-collect-offline-files | django_collect_offline_files/transaction/transaction_importer.py | JSONLoadFile.deserialized_objects | def deserialized_objects(self):
"""Returns a generator of deserialized objects.
"""
if not self._deserialized_objects:
json_text = self.read()
self._deserialized_objects = self.deserialize(json_text=json_text)
return self._deserialized_objects | python | def deserialized_objects(self):
"""Returns a generator of deserialized objects.
"""
if not self._deserialized_objects:
json_text = self.read()
self._deserialized_objects = self.deserialize(json_text=json_text)
return self._deserialized_objects | ['def', 'deserialized_objects', '(', 'self', ')', ':', 'if', 'not', 'self', '.', '_deserialized_objects', ':', 'json_text', '=', 'self', '.', 'read', '(', ')', 'self', '.', '_deserialized_objects', '=', 'self', '.', 'deserialize', '(', 'json_text', '=', 'json_text', ')', 'return', 'self', '.', '_deserialized_objects'] | Returns a generator of deserialized objects. | ['Returns', 'a', 'generator', 'of', 'deserialized', 'objects', '.'] | train | https://github.com/erikvw/django-collect-offline-files/blob/78f61c823ea3926eb88206b019b5dca3c36017da/django_collect_offline_files/transaction/transaction_importer.py#L78-L84 |
3,226 | PlaidWeb/Publ | publ/category.py | Category.description | def description(self):
""" Get the textual description of the category """
if self._meta and self._meta.get_payload():
return utils.TrueCallableProxy(self._description)
return utils.CallableProxy(None) | python | def description(self):
""" Get the textual description of the category """
if self._meta and self._meta.get_payload():
return utils.TrueCallableProxy(self._description)
return utils.CallableProxy(None) | ['def', 'description', '(', 'self', ')', ':', 'if', 'self', '.', '_meta', 'and', 'self', '.', '_meta', '.', 'get_payload', '(', ')', ':', 'return', 'utils', '.', 'TrueCallableProxy', '(', 'self', '.', '_description', ')', 'return', 'utils', '.', 'CallableProxy', '(', 'None', ')'] | Get the textual description of the category | ['Get', 'the', 'textual', 'description', 'of', 'the', 'category'] | train | https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/category.py#L127-L131 |
3,227 | welbornprod/colr | colr/trans.py | hex2termhex | def hex2termhex(hexval: str, allow_short: bool = False) -> str:
""" Convert a hex value into the nearest terminal color matched hex. """
return rgb2termhex(*hex2rgb(hexval, allow_short=allow_short)) | python | def hex2termhex(hexval: str, allow_short: bool = False) -> str:
""" Convert a hex value into the nearest terminal color matched hex. """
return rgb2termhex(*hex2rgb(hexval, allow_short=allow_short)) | ['def', 'hex2termhex', '(', 'hexval', ':', 'str', ',', 'allow_short', ':', 'bool', '=', 'False', ')', '->', 'str', ':', 'return', 'rgb2termhex', '(', '*', 'hex2rgb', '(', 'hexval', ',', 'allow_short', '=', 'allow_short', ')', ')'] | Convert a hex value into the nearest terminal color matched hex. | ['Convert', 'a', 'hex', 'value', 'into', 'the', 'nearest', 'terminal', 'color', 'matched', 'hex', '.'] | train | https://github.com/welbornprod/colr/blob/417117fdbddbc53142096685ac2af006b2bd0220/colr/trans.py#L385-L387 |
3,228 | CybOXProject/mixbox | mixbox/fields.py | iterfields | def iterfields(klass):
"""Iterate over the input class members and yield its TypedFields.
Args:
klass: A class (usually an Entity subclass).
Yields:
(class attribute name, TypedField instance) tuples.
"""
is_field = lambda x: isinstance(x, TypedField)
for name, field in inspect.getmembers(klass, predicate=is_field):
yield name, field | python | def iterfields(klass):
"""Iterate over the input class members and yield its TypedFields.
Args:
klass: A class (usually an Entity subclass).
Yields:
(class attribute name, TypedField instance) tuples.
"""
is_field = lambda x: isinstance(x, TypedField)
for name, field in inspect.getmembers(klass, predicate=is_field):
yield name, field | ['def', 'iterfields', '(', 'klass', ')', ':', 'is_field', '=', 'lambda', 'x', ':', 'isinstance', '(', 'x', ',', 'TypedField', ')', 'for', 'name', ',', 'field', 'in', 'inspect', '.', 'getmembers', '(', 'klass', ',', 'predicate', '=', 'is_field', ')', ':', 'yield', 'name', ',', 'field'] | Iterate over the input class members and yield its TypedFields.
Args:
klass: A class (usually an Entity subclass).
Yields:
(class attribute name, TypedField instance) tuples. | ['Iterate', 'over', 'the', 'input', 'class', 'members', 'and', 'yield', 'its', 'TypedFields', '.'] | train | https://github.com/CybOXProject/mixbox/blob/9097dae7a433f5b98c18171c4a5598f69a7d30af/mixbox/fields.py#L50-L62 |
3,229 | apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/type.py | change_generated_target_suffix | def change_generated_target_suffix (type, properties, suffix):
""" Change the suffix previously registered for this type/properties
combination. If suffix is not yet specified, sets it.
"""
assert isinstance(type, basestring)
assert is_iterable_typed(properties, basestring)
assert isinstance(suffix, basestring)
change_generated_target_ps(1, type, properties, suffix) | python | def change_generated_target_suffix (type, properties, suffix):
""" Change the suffix previously registered for this type/properties
combination. If suffix is not yet specified, sets it.
"""
assert isinstance(type, basestring)
assert is_iterable_typed(properties, basestring)
assert isinstance(suffix, basestring)
change_generated_target_ps(1, type, properties, suffix) | ['def', 'change_generated_target_suffix', '(', 'type', ',', 'properties', ',', 'suffix', ')', ':', 'assert', 'isinstance', '(', 'type', ',', 'basestring', ')', 'assert', 'is_iterable_typed', '(', 'properties', ',', 'basestring', ')', 'assert', 'isinstance', '(', 'suffix', ',', 'basestring', ')', 'change_generated_target_ps', '(', '1', ',', 'type', ',', 'properties', ',', 'suffix', ')'] | Change the suffix previously registered for this type/properties
combination. If suffix is not yet specified, sets it. | ['Change', 'the', 'suffix', 'previously', 'registered', 'for', 'this', 'type', '/', 'properties', 'combination', '.', 'If', 'suffix', 'is', 'not', 'yet', 'specified', 'sets', 'it', '.'] | train | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/type.py#L242-L249 |
3,230 | rigetti/pyquil | pyquil/quil.py | merge_programs | def merge_programs(prog_list):
"""
Merges a list of pyQuil programs into a single one by appending them in sequence.
If multiple programs in the list contain the same gate and/or noisy gate definition
with identical name, this definition will only be applied once. If different definitions
with the same name appear multiple times in the program list, each will be applied once
in the order of last occurrence.
:param list prog_list: A list of pyquil programs
:return: a single pyQuil program
:rtype: Program
"""
definitions = [gate for prog in prog_list for gate in Program(prog).defined_gates]
seen = {}
# Collect definitions in reverse order and reapply definitions in reverse
# collected order to ensure that the last occurrence of a definition is applied last.
for definition in reversed(definitions):
name = definition.name
if name in seen.keys():
# Do not add truly identical definitions with the same name
# If two different definitions share a name, we include each definition so as to provide
# a waring to the user when the contradictory defgate is called.
if definition not in seen[name]:
seen[name].append(definition)
else:
seen[name] = [definition]
new_definitions = [gate for key in seen.keys() for gate in reversed(seen[key])]
p = sum([Program(prog).instructions for prog in prog_list], Program()) # Combine programs without gate definitions
for definition in new_definitions:
p.defgate(definition.name, definition.matrix, definition.parameters)
return p | python | def merge_programs(prog_list):
"""
Merges a list of pyQuil programs into a single one by appending them in sequence.
If multiple programs in the list contain the same gate and/or noisy gate definition
with identical name, this definition will only be applied once. If different definitions
with the same name appear multiple times in the program list, each will be applied once
in the order of last occurrence.
:param list prog_list: A list of pyquil programs
:return: a single pyQuil program
:rtype: Program
"""
definitions = [gate for prog in prog_list for gate in Program(prog).defined_gates]
seen = {}
# Collect definitions in reverse order and reapply definitions in reverse
# collected order to ensure that the last occurrence of a definition is applied last.
for definition in reversed(definitions):
name = definition.name
if name in seen.keys():
# Do not add truly identical definitions with the same name
# If two different definitions share a name, we include each definition so as to provide
# a waring to the user when the contradictory defgate is called.
if definition not in seen[name]:
seen[name].append(definition)
else:
seen[name] = [definition]
new_definitions = [gate for key in seen.keys() for gate in reversed(seen[key])]
p = sum([Program(prog).instructions for prog in prog_list], Program()) # Combine programs without gate definitions
for definition in new_definitions:
p.defgate(definition.name, definition.matrix, definition.parameters)
return p | ['def', 'merge_programs', '(', 'prog_list', ')', ':', 'definitions', '=', '[', 'gate', 'for', 'prog', 'in', 'prog_list', 'for', 'gate', 'in', 'Program', '(', 'prog', ')', '.', 'defined_gates', ']', 'seen', '=', '{', '}', '# Collect definitions in reverse order and reapply definitions in reverse', '# collected order to ensure that the last occurrence of a definition is applied last.', 'for', 'definition', 'in', 'reversed', '(', 'definitions', ')', ':', 'name', '=', 'definition', '.', 'name', 'if', 'name', 'in', 'seen', '.', 'keys', '(', ')', ':', '# Do not add truly identical definitions with the same name', '# If two different definitions share a name, we include each definition so as to provide', '# a waring to the user when the contradictory defgate is called.', 'if', 'definition', 'not', 'in', 'seen', '[', 'name', ']', ':', 'seen', '[', 'name', ']', '.', 'append', '(', 'definition', ')', 'else', ':', 'seen', '[', 'name', ']', '=', '[', 'definition', ']', 'new_definitions', '=', '[', 'gate', 'for', 'key', 'in', 'seen', '.', 'keys', '(', ')', 'for', 'gate', 'in', 'reversed', '(', 'seen', '[', 'key', ']', ')', ']', 'p', '=', 'sum', '(', '[', 'Program', '(', 'prog', ')', '.', 'instructions', 'for', 'prog', 'in', 'prog_list', ']', ',', 'Program', '(', ')', ')', '# Combine programs without gate definitions', 'for', 'definition', 'in', 'new_definitions', ':', 'p', '.', 'defgate', '(', 'definition', '.', 'name', ',', 'definition', '.', 'matrix', ',', 'definition', '.', 'parameters', ')', 'return', 'p'] | Merges a list of pyQuil programs into a single one by appending them in sequence.
If multiple programs in the list contain the same gate and/or noisy gate definition
with identical name, this definition will only be applied once. If different definitions
with the same name appear multiple times in the program list, each will be applied once
in the order of last occurrence.
:param list prog_list: A list of pyquil programs
:return: a single pyQuil program
:rtype: Program | ['Merges', 'a', 'list', 'of', 'pyQuil', 'programs', 'into', 'a', 'single', 'one', 'by', 'appending', 'them', 'in', 'sequence', '.', 'If', 'multiple', 'programs', 'in', 'the', 'list', 'contain', 'the', 'same', 'gate', 'and', '/', 'or', 'noisy', 'gate', 'definition', 'with', 'identical', 'name', 'this', 'definition', 'will', 'only', 'be', 'applied', 'once', '.', 'If', 'different', 'definitions', 'with', 'the', 'same', 'name', 'appear', 'multiple', 'times', 'in', 'the', 'program', 'list', 'each', 'will', 'be', 'applied', 'once', 'in', 'the', 'order', 'of', 'last', 'occurrence', '.'] | train | https://github.com/rigetti/pyquil/blob/ec98e453084b0037d69d8c3245f6822a5422593d/pyquil/quil.py#L941-L974 |
3,231 | NikolayDachev/jadm | lib/paramiko-1.14.1/paramiko/hostkeys.py | HostKeys.hash_host | def hash_host(hostname, salt=None):
"""
Return a "hashed" form of the hostname, as used by OpenSSH when storing
hashed hostnames in the known_hosts file.
:param str hostname: the hostname to hash
:param str salt: optional salt to use when hashing (must be 20 bytes long)
:return: the hashed hostname as a `str`
"""
if salt is None:
salt = os.urandom(sha1().digest_size)
else:
if salt.startswith('|1|'):
salt = salt.split('|')[2]
salt = decodebytes(b(salt))
assert len(salt) == sha1().digest_size
hmac = HMAC(salt, b(hostname), sha1).digest()
hostkey = '|1|%s|%s' % (u(encodebytes(salt)), u(encodebytes(hmac)))
return hostkey.replace('\n', '') | python | def hash_host(hostname, salt=None):
"""
Return a "hashed" form of the hostname, as used by OpenSSH when storing
hashed hostnames in the known_hosts file.
:param str hostname: the hostname to hash
:param str salt: optional salt to use when hashing (must be 20 bytes long)
:return: the hashed hostname as a `str`
"""
if salt is None:
salt = os.urandom(sha1().digest_size)
else:
if salt.startswith('|1|'):
salt = salt.split('|')[2]
salt = decodebytes(b(salt))
assert len(salt) == sha1().digest_size
hmac = HMAC(salt, b(hostname), sha1).digest()
hostkey = '|1|%s|%s' % (u(encodebytes(salt)), u(encodebytes(hmac)))
return hostkey.replace('\n', '') | ['def', 'hash_host', '(', 'hostname', ',', 'salt', '=', 'None', ')', ':', 'if', 'salt', 'is', 'None', ':', 'salt', '=', 'os', '.', 'urandom', '(', 'sha1', '(', ')', '.', 'digest_size', ')', 'else', ':', 'if', 'salt', '.', 'startswith', '(', "'|1|'", ')', ':', 'salt', '=', 'salt', '.', 'split', '(', "'|'", ')', '[', '2', ']', 'salt', '=', 'decodebytes', '(', 'b', '(', 'salt', ')', ')', 'assert', 'len', '(', 'salt', ')', '==', 'sha1', '(', ')', '.', 'digest_size', 'hmac', '=', 'HMAC', '(', 'salt', ',', 'b', '(', 'hostname', ')', ',', 'sha1', ')', '.', 'digest', '(', ')', 'hostkey', '=', "'|1|%s|%s'", '%', '(', 'u', '(', 'encodebytes', '(', 'salt', ')', ')', ',', 'u', '(', 'encodebytes', '(', 'hmac', ')', ')', ')', 'return', 'hostkey', '.', 'replace', '(', "'\\n'", ',', "''", ')'] | Return a "hashed" form of the hostname, as used by OpenSSH when storing
hashed hostnames in the known_hosts file.
:param str hostname: the hostname to hash
:param str salt: optional salt to use when hashing (must be 20 bytes long)
:return: the hashed hostname as a `str` | ['Return', 'a', 'hashed', 'form', 'of', 'the', 'hostname', 'as', 'used', 'by', 'OpenSSH', 'when', 'storing', 'hashed', 'hostnames', 'in', 'the', 'known_hosts', 'file', '.'] | train | https://github.com/NikolayDachev/jadm/blob/12bb550445edfcd87506f7cba7a6a35d413c5511/lib/paramiko-1.14.1/paramiko/hostkeys.py#L258-L276 |
3,232 | BD2KOnFHIR/fhirtordf | fhirtordf/rdfsupport/rdfcompare.py | skolemize | def skolemize(gin: Graph) -> Graph:
"""
Replace all of the blank nodes in graph gin with FHIR paths
:param gin: input graph
:return: output graph
"""
gout = Graph()
# Emit any unreferenced subject BNodes (boxes)
anon_subjs = [s for s in gin.subjects() if isinstance(s, BNode) and len([gin.subject_predicates(s)]) == 0]
if anon_subjs:
idx = None if len(anon_subjs) == 1 else 0
for s in anon_subjs:
map_node(s, FHIR['treeRoot' + ('_{}'.format(idx) if idx is not None else '')], gin, gout)
if idx is not None:
idx += 1
# Cover all other non-bnode entries
for subj in set(s for s in gin.subjects() if isinstance(s, URIRef)):
map_node(subj, subj, gin, gout)
return gout | python | def skolemize(gin: Graph) -> Graph:
"""
Replace all of the blank nodes in graph gin with FHIR paths
:param gin: input graph
:return: output graph
"""
gout = Graph()
# Emit any unreferenced subject BNodes (boxes)
anon_subjs = [s for s in gin.subjects() if isinstance(s, BNode) and len([gin.subject_predicates(s)]) == 0]
if anon_subjs:
idx = None if len(anon_subjs) == 1 else 0
for s in anon_subjs:
map_node(s, FHIR['treeRoot' + ('_{}'.format(idx) if idx is not None else '')], gin, gout)
if idx is not None:
idx += 1
# Cover all other non-bnode entries
for subj in set(s for s in gin.subjects() if isinstance(s, URIRef)):
map_node(subj, subj, gin, gout)
return gout | ['def', 'skolemize', '(', 'gin', ':', 'Graph', ')', '->', 'Graph', ':', 'gout', '=', 'Graph', '(', ')', '# Emit any unreferenced subject BNodes (boxes)', 'anon_subjs', '=', '[', 's', 'for', 's', 'in', 'gin', '.', 'subjects', '(', ')', 'if', 'isinstance', '(', 's', ',', 'BNode', ')', 'and', 'len', '(', '[', 'gin', '.', 'subject_predicates', '(', 's', ')', ']', ')', '==', '0', ']', 'if', 'anon_subjs', ':', 'idx', '=', 'None', 'if', 'len', '(', 'anon_subjs', ')', '==', '1', 'else', '0', 'for', 's', 'in', 'anon_subjs', ':', 'map_node', '(', 's', ',', 'FHIR', '[', "'treeRoot'", '+', '(', "'_{}'", '.', 'format', '(', 'idx', ')', 'if', 'idx', 'is', 'not', 'None', 'else', "''", ')', ']', ',', 'gin', ',', 'gout', ')', 'if', 'idx', 'is', 'not', 'None', ':', 'idx', '+=', '1', '# Cover all other non-bnode entries', 'for', 'subj', 'in', 'set', '(', 's', 'for', 's', 'in', 'gin', '.', 'subjects', '(', ')', 'if', 'isinstance', '(', 's', ',', 'URIRef', ')', ')', ':', 'map_node', '(', 'subj', ',', 'subj', ',', 'gin', ',', 'gout', ')', 'return', 'gout'] | Replace all of the blank nodes in graph gin with FHIR paths
:param gin: input graph
:return: output graph | ['Replace', 'all', 'of', 'the', 'blank', 'nodes', 'in', 'graph', 'gin', 'with', 'FHIR', 'paths', ':', 'param', 'gin', ':', 'input', 'graph', ':', 'return', ':', 'output', 'graph'] | train | https://github.com/BD2KOnFHIR/fhirtordf/blob/f97b3df683fa4caacf5cf4f29699ab060bcc0fbf/fhirtordf/rdfsupport/rdfcompare.py#L66-L86 |
3,233 | tanghaibao/goatools | goatools/obo_parser.py | GODag.draw_lineage | def draw_lineage(self, recs, nodecolor="mediumseagreen",
edgecolor="lightslateblue", dpi=96,
lineage_img="GO_lineage.png", engine="pygraphviz",
gml=False, draw_parents=True, draw_children=True):
"""Draw GO DAG subplot."""
assert engine in GraphEngines
grph = None
if engine == "pygraphviz":
grph = self.make_graph_pygraphviz(recs, nodecolor, edgecolor, dpi,
draw_parents=draw_parents,
draw_children=draw_children)
else:
grph = self.make_graph_pydot(recs, nodecolor, edgecolor, dpi,
draw_parents=draw_parents, draw_children=draw_children)
if gml:
import networkx as nx # use networkx to do the conversion
gmlbase = lineage_img.rsplit(".", 1)[0]
obj = nx.from_agraph(grph) if engine == "pygraphviz" else nx.from_pydot(grph)
del obj.graph['node']
del obj.graph['edge']
gmlfile = gmlbase + ".gml"
nx.write_gml(self.label_wrap, gmlfile)
sys.stderr.write("GML graph written to {0}\n".format(gmlfile))
sys.stderr.write(("lineage info for terms %s written to %s\n" %
([rec.item_id for rec in recs], lineage_img)))
if engine == "pygraphviz":
grph.draw(lineage_img, prog="dot")
else:
grph.write_png(lineage_img) | python | def draw_lineage(self, recs, nodecolor="mediumseagreen",
edgecolor="lightslateblue", dpi=96,
lineage_img="GO_lineage.png", engine="pygraphviz",
gml=False, draw_parents=True, draw_children=True):
"""Draw GO DAG subplot."""
assert engine in GraphEngines
grph = None
if engine == "pygraphviz":
grph = self.make_graph_pygraphviz(recs, nodecolor, edgecolor, dpi,
draw_parents=draw_parents,
draw_children=draw_children)
else:
grph = self.make_graph_pydot(recs, nodecolor, edgecolor, dpi,
draw_parents=draw_parents, draw_children=draw_children)
if gml:
import networkx as nx # use networkx to do the conversion
gmlbase = lineage_img.rsplit(".", 1)[0]
obj = nx.from_agraph(grph) if engine == "pygraphviz" else nx.from_pydot(grph)
del obj.graph['node']
del obj.graph['edge']
gmlfile = gmlbase + ".gml"
nx.write_gml(self.label_wrap, gmlfile)
sys.stderr.write("GML graph written to {0}\n".format(gmlfile))
sys.stderr.write(("lineage info for terms %s written to %s\n" %
([rec.item_id for rec in recs], lineage_img)))
if engine == "pygraphviz":
grph.draw(lineage_img, prog="dot")
else:
grph.write_png(lineage_img) | ['def', 'draw_lineage', '(', 'self', ',', 'recs', ',', 'nodecolor', '=', '"mediumseagreen"', ',', 'edgecolor', '=', '"lightslateblue"', ',', 'dpi', '=', '96', ',', 'lineage_img', '=', '"GO_lineage.png"', ',', 'engine', '=', '"pygraphviz"', ',', 'gml', '=', 'False', ',', 'draw_parents', '=', 'True', ',', 'draw_children', '=', 'True', ')', ':', 'assert', 'engine', 'in', 'GraphEngines', 'grph', '=', 'None', 'if', 'engine', '==', '"pygraphviz"', ':', 'grph', '=', 'self', '.', 'make_graph_pygraphviz', '(', 'recs', ',', 'nodecolor', ',', 'edgecolor', ',', 'dpi', ',', 'draw_parents', '=', 'draw_parents', ',', 'draw_children', '=', 'draw_children', ')', 'else', ':', 'grph', '=', 'self', '.', 'make_graph_pydot', '(', 'recs', ',', 'nodecolor', ',', 'edgecolor', ',', 'dpi', ',', 'draw_parents', '=', 'draw_parents', ',', 'draw_children', '=', 'draw_children', ')', 'if', 'gml', ':', 'import', 'networkx', 'as', 'nx', '# use networkx to do the conversion', 'gmlbase', '=', 'lineage_img', '.', 'rsplit', '(', '"."', ',', '1', ')', '[', '0', ']', 'obj', '=', 'nx', '.', 'from_agraph', '(', 'grph', ')', 'if', 'engine', '==', '"pygraphviz"', 'else', 'nx', '.', 'from_pydot', '(', 'grph', ')', 'del', 'obj', '.', 'graph', '[', "'node'", ']', 'del', 'obj', '.', 'graph', '[', "'edge'", ']', 'gmlfile', '=', 'gmlbase', '+', '".gml"', 'nx', '.', 'write_gml', '(', 'self', '.', 'label_wrap', ',', 'gmlfile', ')', 'sys', '.', 'stderr', '.', 'write', '(', '"GML graph written to {0}\\n"', '.', 'format', '(', 'gmlfile', ')', ')', 'sys', '.', 'stderr', '.', 'write', '(', '(', '"lineage info for terms %s written to %s\\n"', '%', '(', '[', 'rec', '.', 'item_id', 'for', 'rec', 'in', 'recs', ']', ',', 'lineage_img', ')', ')', ')', 'if', 'engine', '==', '"pygraphviz"', ':', 'grph', '.', 'draw', '(', 'lineage_img', ',', 'prog', '=', '"dot"', ')', 'else', ':', 'grph', '.', 'write_png', '(', 'lineage_img', ')'] | Draw GO DAG subplot. | ['Draw', 'GO', 'DAG', 'subplot', '.'] | train | https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/obo_parser.py#L601-L633 |
3,234 | timothycrosley/deprecated.frosted | frosted/checker.py | Checker.add_binding | def add_binding(self, node, value, report_redef=True):
"""Called when a binding is altered.
- `node` is the statement responsible for the change
- `value` is the optional new value, a Binding instance, associated
with the binding; if None, the binding is deleted if it exists.
- if `report_redef` is True (default), rebinding while unused will be
reported.
"""
redefinedWhileUnused = False
if not isinstance(self.scope, ClassScope):
for scope in self.scope_stack[::-1]:
existing = scope.get(value.name)
if (isinstance(existing, Importation)
and not existing.used
and (not isinstance(value, Importation) or
value.fullName == existing.fullName)
and report_redef
and not self.different_forks(node, existing.source)):
redefinedWhileUnused = True
self.report(messages.RedefinedWhileUnused,
node, value.name, existing.source)
existing = self.scope.get(value.name)
if not redefinedWhileUnused and self.has_parent(value.source, ast.ListComp):
if (existing and report_redef
and not self.has_parent(existing.source, (ast.For, ast.ListComp))
and not self.different_forks(node, existing.source)):
self.report(messages.RedefinedInListComp,
node, value.name, existing.source)
if (isinstance(existing, Definition)
and not existing.used
and not self.different_forks(node, existing.source)):
self.report(messages.RedefinedWhileUnused,
node, value.name, existing.source)
else:
self.scope[value.name] = value | python | def add_binding(self, node, value, report_redef=True):
"""Called when a binding is altered.
- `node` is the statement responsible for the change
- `value` is the optional new value, a Binding instance, associated
with the binding; if None, the binding is deleted if it exists.
- if `report_redef` is True (default), rebinding while unused will be
reported.
"""
redefinedWhileUnused = False
if not isinstance(self.scope, ClassScope):
for scope in self.scope_stack[::-1]:
existing = scope.get(value.name)
if (isinstance(existing, Importation)
and not existing.used
and (not isinstance(value, Importation) or
value.fullName == existing.fullName)
and report_redef
and not self.different_forks(node, existing.source)):
redefinedWhileUnused = True
self.report(messages.RedefinedWhileUnused,
node, value.name, existing.source)
existing = self.scope.get(value.name)
if not redefinedWhileUnused and self.has_parent(value.source, ast.ListComp):
if (existing and report_redef
and not self.has_parent(existing.source, (ast.For, ast.ListComp))
and not self.different_forks(node, existing.source)):
self.report(messages.RedefinedInListComp,
node, value.name, existing.source)
if (isinstance(existing, Definition)
and not existing.used
and not self.different_forks(node, existing.source)):
self.report(messages.RedefinedWhileUnused,
node, value.name, existing.source)
else:
self.scope[value.name] = value | ['def', 'add_binding', '(', 'self', ',', 'node', ',', 'value', ',', 'report_redef', '=', 'True', ')', ':', 'redefinedWhileUnused', '=', 'False', 'if', 'not', 'isinstance', '(', 'self', '.', 'scope', ',', 'ClassScope', ')', ':', 'for', 'scope', 'in', 'self', '.', 'scope_stack', '[', ':', ':', '-', '1', ']', ':', 'existing', '=', 'scope', '.', 'get', '(', 'value', '.', 'name', ')', 'if', '(', 'isinstance', '(', 'existing', ',', 'Importation', ')', 'and', 'not', 'existing', '.', 'used', 'and', '(', 'not', 'isinstance', '(', 'value', ',', 'Importation', ')', 'or', 'value', '.', 'fullName', '==', 'existing', '.', 'fullName', ')', 'and', 'report_redef', 'and', 'not', 'self', '.', 'different_forks', '(', 'node', ',', 'existing', '.', 'source', ')', ')', ':', 'redefinedWhileUnused', '=', 'True', 'self', '.', 'report', '(', 'messages', '.', 'RedefinedWhileUnused', ',', 'node', ',', 'value', '.', 'name', ',', 'existing', '.', 'source', ')', 'existing', '=', 'self', '.', 'scope', '.', 'get', '(', 'value', '.', 'name', ')', 'if', 'not', 'redefinedWhileUnused', 'and', 'self', '.', 'has_parent', '(', 'value', '.', 'source', ',', 'ast', '.', 'ListComp', ')', ':', 'if', '(', 'existing', 'and', 'report_redef', 'and', 'not', 'self', '.', 'has_parent', '(', 'existing', '.', 'source', ',', '(', 'ast', '.', 'For', ',', 'ast', '.', 'ListComp', ')', ')', 'and', 'not', 'self', '.', 'different_forks', '(', 'node', ',', 'existing', '.', 'source', ')', ')', ':', 'self', '.', 'report', '(', 'messages', '.', 'RedefinedInListComp', ',', 'node', ',', 'value', '.', 'name', ',', 'existing', '.', 'source', ')', 'if', '(', 'isinstance', '(', 'existing', ',', 'Definition', ')', 'and', 'not', 'existing', '.', 'used', 'and', 'not', 'self', '.', 'different_forks', '(', 'node', ',', 'existing', '.', 'source', ')', ')', ':', 'self', '.', 'report', '(', 'messages', '.', 'RedefinedWhileUnused', ',', 'node', ',', 'value', '.', 'name', ',', 'existing', '.', 'source', ')', 'else', ':', 'self', '.', 'scope', '[', 'value', '.', 'name', ']', '=', 'value'] | Called when a binding is altered.
- `node` is the statement responsible for the change
- `value` is the optional new value, a Binding instance, associated
with the binding; if None, the binding is deleted if it exists.
- if `report_redef` is True (default), rebinding while unused will be
reported. | ['Called', 'when', 'a', 'binding', 'is', 'altered', '.'] | train | https://github.com/timothycrosley/deprecated.frosted/blob/61ba7f341fc55676c3580c8c4e52117986cd5e12/frosted/checker.py#L407-L445 |
3,235 | pudo/banal | banal/dicts.py | keys_values | def keys_values(data, *keys):
"""Get an entry as a list from a dict. Provide a fallback key."""
values = []
if is_mapping(data):
for key in keys:
if key in data:
values.extend(ensure_list(data[key]))
return values | python | def keys_values(data, *keys):
"""Get an entry as a list from a dict. Provide a fallback key."""
values = []
if is_mapping(data):
for key in keys:
if key in data:
values.extend(ensure_list(data[key]))
return values | ['def', 'keys_values', '(', 'data', ',', '*', 'keys', ')', ':', 'values', '=', '[', ']', 'if', 'is_mapping', '(', 'data', ')', ':', 'for', 'key', 'in', 'keys', ':', 'if', 'key', 'in', 'data', ':', 'values', '.', 'extend', '(', 'ensure_list', '(', 'data', '[', 'key', ']', ')', ')', 'return', 'values'] | Get an entry as a list from a dict. Provide a fallback key. | ['Get', 'an', 'entry', 'as', 'a', 'list', 'from', 'a', 'dict', '.', 'Provide', 'a', 'fallback', 'key', '.'] | train | https://github.com/pudo/banal/blob/528c339be5138458e387a058581cf7d261285447/banal/dicts.py#L32-L39 |
3,236 | dhermes/bezier | src/bezier/surface.py | Surface.evaluate_barycentric_multi | def evaluate_barycentric_multi(self, param_vals, _verify=True):
r"""Compute multiple points on the surface.
Assumes ``param_vals`` has three columns of barycentric coordinates.
See :meth:`evaluate_barycentric` for more details on how each row of
parameter values is evaluated.
.. image:: ../../images/surface_evaluate_barycentric_multi.png
:align: center
.. doctest:: surface-eval-multi2
:options: +NORMALIZE_WHITESPACE
>>> nodes = np.asfortranarray([
... [0.0, 1.0 , 2.0, -1.5, -0.5, -3.0],
... [0.0, 0.75, 1.0, 1.0, 1.5, 2.0],
... ])
>>> surface = bezier.Surface(nodes, degree=2)
>>> surface
<Surface (degree=2, dimension=2)>
>>> param_vals = np.asfortranarray([
... [0. , 0.25, 0.75 ],
... [1. , 0. , 0. ],
... [0.25 , 0.5 , 0.25 ],
... [0.375, 0.25, 0.375],
... ])
>>> points = surface.evaluate_barycentric_multi(param_vals)
>>> points
array([[-1.75 , 0. , 0.25 , -0.625 ],
[ 1.75 , 0. , 1.0625 , 1.046875]])
.. testcleanup:: surface-eval-multi2
import make_images
make_images.surface_evaluate_barycentric_multi(surface, points)
Args:
param_vals (numpy.ndarray): Array of parameter values (as a
``N x 3`` array).
_verify (Optional[bool]): Indicates if the coordinates should be
verified. See :meth:`evaluate_barycentric`. Defaults to
:data:`True`. Will also double check that ``param_vals``
is the right shape.
Returns:
numpy.ndarray: The points on the surface.
Raises:
ValueError: If ``param_vals`` is not a 2D array and
``_verify=True``.
"""
if _verify:
if param_vals.ndim != 2:
raise ValueError("Parameter values must be 2D array")
for lambda1, lambda2, lambda3 in param_vals:
self._verify_barycentric(lambda1, lambda2, lambda3)
return _surface_helpers.evaluate_barycentric_multi(
self._nodes, self._degree, param_vals, self._dimension
) | python | def evaluate_barycentric_multi(self, param_vals, _verify=True):
r"""Compute multiple points on the surface.
Assumes ``param_vals`` has three columns of barycentric coordinates.
See :meth:`evaluate_barycentric` for more details on how each row of
parameter values is evaluated.
.. image:: ../../images/surface_evaluate_barycentric_multi.png
:align: center
.. doctest:: surface-eval-multi2
:options: +NORMALIZE_WHITESPACE
>>> nodes = np.asfortranarray([
... [0.0, 1.0 , 2.0, -1.5, -0.5, -3.0],
... [0.0, 0.75, 1.0, 1.0, 1.5, 2.0],
... ])
>>> surface = bezier.Surface(nodes, degree=2)
>>> surface
<Surface (degree=2, dimension=2)>
>>> param_vals = np.asfortranarray([
... [0. , 0.25, 0.75 ],
... [1. , 0. , 0. ],
... [0.25 , 0.5 , 0.25 ],
... [0.375, 0.25, 0.375],
... ])
>>> points = surface.evaluate_barycentric_multi(param_vals)
>>> points
array([[-1.75 , 0. , 0.25 , -0.625 ],
[ 1.75 , 0. , 1.0625 , 1.046875]])
.. testcleanup:: surface-eval-multi2
import make_images
make_images.surface_evaluate_barycentric_multi(surface, points)
Args:
param_vals (numpy.ndarray): Array of parameter values (as a
``N x 3`` array).
_verify (Optional[bool]): Indicates if the coordinates should be
verified. See :meth:`evaluate_barycentric`. Defaults to
:data:`True`. Will also double check that ``param_vals``
is the right shape.
Returns:
numpy.ndarray: The points on the surface.
Raises:
ValueError: If ``param_vals`` is not a 2D array and
``_verify=True``.
"""
if _verify:
if param_vals.ndim != 2:
raise ValueError("Parameter values must be 2D array")
for lambda1, lambda2, lambda3 in param_vals:
self._verify_barycentric(lambda1, lambda2, lambda3)
return _surface_helpers.evaluate_barycentric_multi(
self._nodes, self._degree, param_vals, self._dimension
) | ['def', 'evaluate_barycentric_multi', '(', 'self', ',', 'param_vals', ',', '_verify', '=', 'True', ')', ':', 'if', '_verify', ':', 'if', 'param_vals', '.', 'ndim', '!=', '2', ':', 'raise', 'ValueError', '(', '"Parameter values must be 2D array"', ')', 'for', 'lambda1', ',', 'lambda2', ',', 'lambda3', 'in', 'param_vals', ':', 'self', '.', '_verify_barycentric', '(', 'lambda1', ',', 'lambda2', ',', 'lambda3', ')', 'return', '_surface_helpers', '.', 'evaluate_barycentric_multi', '(', 'self', '.', '_nodes', ',', 'self', '.', '_degree', ',', 'param_vals', ',', 'self', '.', '_dimension', ')'] | r"""Compute multiple points on the surface.
Assumes ``param_vals`` has three columns of barycentric coordinates.
See :meth:`evaluate_barycentric` for more details on how each row of
parameter values is evaluated.
.. image:: ../../images/surface_evaluate_barycentric_multi.png
:align: center
.. doctest:: surface-eval-multi2
:options: +NORMALIZE_WHITESPACE
>>> nodes = np.asfortranarray([
... [0.0, 1.0 , 2.0, -1.5, -0.5, -3.0],
... [0.0, 0.75, 1.0, 1.0, 1.5, 2.0],
... ])
>>> surface = bezier.Surface(nodes, degree=2)
>>> surface
<Surface (degree=2, dimension=2)>
>>> param_vals = np.asfortranarray([
... [0. , 0.25, 0.75 ],
... [1. , 0. , 0. ],
... [0.25 , 0.5 , 0.25 ],
... [0.375, 0.25, 0.375],
... ])
>>> points = surface.evaluate_barycentric_multi(param_vals)
>>> points
array([[-1.75 , 0. , 0.25 , -0.625 ],
[ 1.75 , 0. , 1.0625 , 1.046875]])
.. testcleanup:: surface-eval-multi2
import make_images
make_images.surface_evaluate_barycentric_multi(surface, points)
Args:
param_vals (numpy.ndarray): Array of parameter values (as a
``N x 3`` array).
_verify (Optional[bool]): Indicates if the coordinates should be
verified. See :meth:`evaluate_barycentric`. Defaults to
:data:`True`. Will also double check that ``param_vals``
is the right shape.
Returns:
numpy.ndarray: The points on the surface.
Raises:
ValueError: If ``param_vals`` is not a 2D array and
``_verify=True``. | ['r', 'Compute', 'multiple', 'points', 'on', 'the', 'surface', '.'] | train | https://github.com/dhermes/bezier/blob/4f941f82637a8e70a5b159a9203132192e23406b/src/bezier/surface.py#L477-L536 |
3,237 | NuGrid/NuGridPy | nugridpy/utils.py | Utils._process_abundance_vector | def _process_abundance_vector(self, a, z, isomers, yps):
'''
This private method takes as input one vector definition and
processes it, including sorting by charge number and
mass number. It returns the processed input variables
plus an element and isotope vector and a list of
isomers.
'''
def cmp_to_key(mycmp):
'Convert a cmp= function into a key= function'
class K(object):
def __init__(self, obj, *args):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) < 0
def __gt__(self, other):
return mycmp(self.obj, other.obj) > 0
def __eq__(self, other):
return mycmp(self.obj, other.obj) == 0
def __le__(self, other):
return mycmp(self.obj, other.obj) <= 0
def __ge__(self, other):
return mycmp(self.obj, other.obj) >= 0
def __ne__(self, other):
return mycmp(self.obj, other.obj) != 0
return K
tmp=[]
isom=[]
for i in range(len(a)):
if z[i]!=0 and isomers[i]==1: #if its not 'NEUt and not an isomer'
tmp.append([self.stable_names[int(z[i])]+'-'+str(int(a[i])),yps[i],z[i],a[i]])
elif isomers[i]!=1: #if it is an isomer
if yps[i]==0:
isom.append([self.stable_names[int(z[i])]+'-'+str(int(a[i]))+'-'+str(int(isomers[i]-1)),1e-99])
else:
isom.append([self.stable_names[int(z[i])]+'-'+str(int(a[i]))+'-'+str(int(isomers[i]-1)),yps[i]])
tmp.sort(key = cmp_to_key(self.compar))
tmp.sort(key = cmp_to_key(self.comparator))
abunds=[]
isotope_to_plot=[]
z_iso_to_plot=[]
a_iso_to_plot=[]
el_iso_to_plot=[]
for i in range(len(tmp)):
isotope_to_plot.append(tmp[i][0])
abunds.append(tmp[i][1])
z_iso_to_plot.append(int(tmp[i][2]))
a_iso_to_plot.append(int(tmp[i][3]))
el_iso_to_plot.append(self.stable_names[int(tmp[i][2])])
return a_iso_to_plot,z_iso_to_plot,abunds,isotope_to_plot,el_iso_to_plot,isom | python | def _process_abundance_vector(self, a, z, isomers, yps):
'''
This private method takes as input one vector definition and
processes it, including sorting by charge number and
mass number. It returns the processed input variables
plus an element and isotope vector and a list of
isomers.
'''
def cmp_to_key(mycmp):
'Convert a cmp= function into a key= function'
class K(object):
def __init__(self, obj, *args):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) < 0
def __gt__(self, other):
return mycmp(self.obj, other.obj) > 0
def __eq__(self, other):
return mycmp(self.obj, other.obj) == 0
def __le__(self, other):
return mycmp(self.obj, other.obj) <= 0
def __ge__(self, other):
return mycmp(self.obj, other.obj) >= 0
def __ne__(self, other):
return mycmp(self.obj, other.obj) != 0
return K
tmp=[]
isom=[]
for i in range(len(a)):
if z[i]!=0 and isomers[i]==1: #if its not 'NEUt and not an isomer'
tmp.append([self.stable_names[int(z[i])]+'-'+str(int(a[i])),yps[i],z[i],a[i]])
elif isomers[i]!=1: #if it is an isomer
if yps[i]==0:
isom.append([self.stable_names[int(z[i])]+'-'+str(int(a[i]))+'-'+str(int(isomers[i]-1)),1e-99])
else:
isom.append([self.stable_names[int(z[i])]+'-'+str(int(a[i]))+'-'+str(int(isomers[i]-1)),yps[i]])
tmp.sort(key = cmp_to_key(self.compar))
tmp.sort(key = cmp_to_key(self.comparator))
abunds=[]
isotope_to_plot=[]
z_iso_to_plot=[]
a_iso_to_plot=[]
el_iso_to_plot=[]
for i in range(len(tmp)):
isotope_to_plot.append(tmp[i][0])
abunds.append(tmp[i][1])
z_iso_to_plot.append(int(tmp[i][2]))
a_iso_to_plot.append(int(tmp[i][3]))
el_iso_to_plot.append(self.stable_names[int(tmp[i][2])])
return a_iso_to_plot,z_iso_to_plot,abunds,isotope_to_plot,el_iso_to_plot,isom | ['def', '_process_abundance_vector', '(', 'self', ',', 'a', ',', 'z', ',', 'isomers', ',', 'yps', ')', ':', 'def', 'cmp_to_key', '(', 'mycmp', ')', ':', "'Convert a cmp= function into a key= function'", 'class', 'K', '(', 'object', ')', ':', 'def', '__init__', '(', 'self', ',', 'obj', ',', '*', 'args', ')', ':', 'self', '.', 'obj', '=', 'obj', 'def', '__lt__', '(', 'self', ',', 'other', ')', ':', 'return', 'mycmp', '(', 'self', '.', 'obj', ',', 'other', '.', 'obj', ')', '<', '0', 'def', '__gt__', '(', 'self', ',', 'other', ')', ':', 'return', 'mycmp', '(', 'self', '.', 'obj', ',', 'other', '.', 'obj', ')', '>', '0', 'def', '__eq__', '(', 'self', ',', 'other', ')', ':', 'return', 'mycmp', '(', 'self', '.', 'obj', ',', 'other', '.', 'obj', ')', '==', '0', 'def', '__le__', '(', 'self', ',', 'other', ')', ':', 'return', 'mycmp', '(', 'self', '.', 'obj', ',', 'other', '.', 'obj', ')', '<=', '0', 'def', '__ge__', '(', 'self', ',', 'other', ')', ':', 'return', 'mycmp', '(', 'self', '.', 'obj', ',', 'other', '.', 'obj', ')', '>=', '0', 'def', '__ne__', '(', 'self', ',', 'other', ')', ':', 'return', 'mycmp', '(', 'self', '.', 'obj', ',', 'other', '.', 'obj', ')', '!=', '0', 'return', 'K', 'tmp', '=', '[', ']', 'isom', '=', '[', ']', 'for', 'i', 'in', 'range', '(', 'len', '(', 'a', ')', ')', ':', 'if', 'z', '[', 'i', ']', '!=', '0', 'and', 'isomers', '[', 'i', ']', '==', '1', ':', "#if its not 'NEUt and not an isomer'", 'tmp', '.', 'append', '(', '[', 'self', '.', 'stable_names', '[', 'int', '(', 'z', '[', 'i', ']', ')', ']', '+', "'-'", '+', 'str', '(', 'int', '(', 'a', '[', 'i', ']', ')', ')', ',', 'yps', '[', 'i', ']', ',', 'z', '[', 'i', ']', ',', 'a', '[', 'i', ']', ']', ')', 'elif', 'isomers', '[', 'i', ']', '!=', '1', ':', '#if it is an isomer', 'if', 'yps', '[', 'i', ']', '==', '0', ':', 'isom', '.', 'append', '(', '[', 'self', '.', 'stable_names', '[', 'int', '(', 'z', '[', 'i', ']', ')', ']', '+', "'-'", '+', 'str', '(', 'int', '(', 'a', '[', 'i', ']', ')', ')', '+', "'-'", '+', 'str', '(', 'int', '(', 'isomers', '[', 'i', ']', '-', '1', ')', ')', ',', '1e-99', ']', ')', 'else', ':', 'isom', '.', 'append', '(', '[', 'self', '.', 'stable_names', '[', 'int', '(', 'z', '[', 'i', ']', ')', ']', '+', "'-'", '+', 'str', '(', 'int', '(', 'a', '[', 'i', ']', ')', ')', '+', "'-'", '+', 'str', '(', 'int', '(', 'isomers', '[', 'i', ']', '-', '1', ')', ')', ',', 'yps', '[', 'i', ']', ']', ')', 'tmp', '.', 'sort', '(', 'key', '=', 'cmp_to_key', '(', 'self', '.', 'compar', ')', ')', 'tmp', '.', 'sort', '(', 'key', '=', 'cmp_to_key', '(', 'self', '.', 'comparator', ')', ')', 'abunds', '=', '[', ']', 'isotope_to_plot', '=', '[', ']', 'z_iso_to_plot', '=', '[', ']', 'a_iso_to_plot', '=', '[', ']', 'el_iso_to_plot', '=', '[', ']', 'for', 'i', 'in', 'range', '(', 'len', '(', 'tmp', ')', ')', ':', 'isotope_to_plot', '.', 'append', '(', 'tmp', '[', 'i', ']', '[', '0', ']', ')', 'abunds', '.', 'append', '(', 'tmp', '[', 'i', ']', '[', '1', ']', ')', 'z_iso_to_plot', '.', 'append', '(', 'int', '(', 'tmp', '[', 'i', ']', '[', '2', ']', ')', ')', 'a_iso_to_plot', '.', 'append', '(', 'int', '(', 'tmp', '[', 'i', ']', '[', '3', ']', ')', ')', 'el_iso_to_plot', '.', 'append', '(', 'self', '.', 'stable_names', '[', 'int', '(', 'tmp', '[', 'i', ']', '[', '2', ']', ')', ']', ')', 'return', 'a_iso_to_plot', ',', 'z_iso_to_plot', ',', 'abunds', ',', 'isotope_to_plot', ',', 'el_iso_to_plot', ',', 'isom'] | This private method takes as input one vector definition and
processes it, including sorting by charge number and
mass number. It returns the processed input variables
plus an element and isotope vector and a list of
isomers. | ['This', 'private', 'method', 'takes', 'as', 'input', 'one', 'vector', 'definition', 'and', 'processes', 'it', 'including', 'sorting', 'by', 'charge', 'number', 'and', 'mass', 'number', '.', 'It', 'returns', 'the', 'processed', 'input', 'variables', 'plus', 'an', 'element', 'and', 'isotope', 'vector', 'and', 'a', 'list', 'of', 'isomers', '.'] | train | https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/utils.py#L274-L326 |
3,238 | nickoala/telepot | telepot/delegate.py | pave_event_space | def pave_event_space(fn=pair):
"""
:return:
a pair producer that ensures the seeder and delegator share the same event space.
"""
global _event_space
event_space = next(_event_space)
@_ensure_seeders_list
def p(seeders, delegator_factory, *args, **kwargs):
return fn(seeders + [per_event_source_id(event_space)],
delegator_factory, *args, event_space=event_space, **kwargs)
return p | python | def pave_event_space(fn=pair):
"""
:return:
a pair producer that ensures the seeder and delegator share the same event space.
"""
global _event_space
event_space = next(_event_space)
@_ensure_seeders_list
def p(seeders, delegator_factory, *args, **kwargs):
return fn(seeders + [per_event_source_id(event_space)],
delegator_factory, *args, event_space=event_space, **kwargs)
return p | ['def', 'pave_event_space', '(', 'fn', '=', 'pair', ')', ':', 'global', '_event_space', 'event_space', '=', 'next', '(', '_event_space', ')', '@', '_ensure_seeders_list', 'def', 'p', '(', 'seeders', ',', 'delegator_factory', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'return', 'fn', '(', 'seeders', '+', '[', 'per_event_source_id', '(', 'event_space', ')', ']', ',', 'delegator_factory', ',', '*', 'args', ',', 'event_space', '=', 'event_space', ',', '*', '*', 'kwargs', ')', 'return', 'p'] | :return:
a pair producer that ensures the seeder and delegator share the same event space. | [':', 'return', ':', 'a', 'pair', 'producer', 'that', 'ensures', 'the', 'seeder', 'and', 'delegator', 'share', 'the', 'same', 'event', 'space', '.'] | train | https://github.com/nickoala/telepot/blob/3792fde251d0f1d5a6ca16c8ad1a71f89360c41d/telepot/delegate.py#L347-L359 |
3,239 | erdc/RAPIDpy | RAPIDpy/postprocess/generate_seasonal_averages.py | generate_seasonal_averages | def generate_seasonal_averages(qout_file, seasonal_average_file,
num_cpus=multiprocessing.cpu_count()):
"""
This function loops through a CF compliant rapid streamflow
file to produce a netCDF file with a seasonal average for
365 days a year
"""
with RAPIDDataset(qout_file) as qout_nc_file:
print("Generating seasonal average file ...")
seasonal_avg_nc = Dataset(seasonal_average_file, 'w')
seasonal_avg_nc.createDimension('rivid', qout_nc_file.size_river_id)
seasonal_avg_nc.createDimension('day_of_year', 365)
time_series_var = seasonal_avg_nc.createVariable('rivid', 'i4',
('rivid',))
time_series_var.long_name = (
'unique identifier for each river reach')
average_flow_var = \
seasonal_avg_nc.createVariable('average_flow', 'f8',
('rivid', 'day_of_year'))
average_flow_var.long_name = 'seasonal average streamflow'
average_flow_var.units = 'm3/s'
std_dev_flow_var = \
seasonal_avg_nc.createVariable('std_dev_flow', 'f8',
('rivid', 'day_of_year'))
std_dev_flow_var.long_name = 'seasonal std. dev. streamflow'
std_dev_flow_var.units = 'm3/s'
std_dev_flow_var = \
seasonal_avg_nc.createVariable('max_flow', 'f8',
('rivid', 'day_of_year'))
std_dev_flow_var.long_name = 'seasonal max streamflow'
std_dev_flow_var.units = 'm3/s'
std_dev_flow_var = \
seasonal_avg_nc.createVariable('min_flow', 'f8',
('rivid', 'day_of_year'))
std_dev_flow_var.long_name = 'seasonal min streamflow'
std_dev_flow_var.units = 'm3/s'
lat_var = seasonal_avg_nc.createVariable('lat', 'f8', ('rivid',),
fill_value=-9999.0)
lon_var = seasonal_avg_nc.createVariable('lon', 'f8', ('rivid',),
fill_value=-9999.0)
add_latlon_metadata(lat_var, lon_var)
seasonal_avg_nc.variables['lat'][:] = \
qout_nc_file.qout_nc.variables['lat'][:]
seasonal_avg_nc.variables['lon'][:] = \
qout_nc_file.qout_nc.variables['lon'][:]
river_id_list = qout_nc_file.get_river_id_array()
seasonal_avg_nc.variables['rivid'][:] = river_id_list
seasonal_avg_nc.close()
# generate multiprocessing jobs
mp_lock = multiprocessing.Manager().Lock() # pylint: disable=no-member
job_combinations = []
for day_of_year in range(1, 366):
job_combinations.append((qout_file,
seasonal_average_file,
day_of_year,
mp_lock
))
pool = multiprocessing.Pool(num_cpus)
pool.map(generate_single_seasonal_average,
job_combinations)
pool.close()
pool.join() | python | def generate_seasonal_averages(qout_file, seasonal_average_file,
num_cpus=multiprocessing.cpu_count()):
"""
This function loops through a CF compliant rapid streamflow
file to produce a netCDF file with a seasonal average for
365 days a year
"""
with RAPIDDataset(qout_file) as qout_nc_file:
print("Generating seasonal average file ...")
seasonal_avg_nc = Dataset(seasonal_average_file, 'w')
seasonal_avg_nc.createDimension('rivid', qout_nc_file.size_river_id)
seasonal_avg_nc.createDimension('day_of_year', 365)
time_series_var = seasonal_avg_nc.createVariable('rivid', 'i4',
('rivid',))
time_series_var.long_name = (
'unique identifier for each river reach')
average_flow_var = \
seasonal_avg_nc.createVariable('average_flow', 'f8',
('rivid', 'day_of_year'))
average_flow_var.long_name = 'seasonal average streamflow'
average_flow_var.units = 'm3/s'
std_dev_flow_var = \
seasonal_avg_nc.createVariable('std_dev_flow', 'f8',
('rivid', 'day_of_year'))
std_dev_flow_var.long_name = 'seasonal std. dev. streamflow'
std_dev_flow_var.units = 'm3/s'
std_dev_flow_var = \
seasonal_avg_nc.createVariable('max_flow', 'f8',
('rivid', 'day_of_year'))
std_dev_flow_var.long_name = 'seasonal max streamflow'
std_dev_flow_var.units = 'm3/s'
std_dev_flow_var = \
seasonal_avg_nc.createVariable('min_flow', 'f8',
('rivid', 'day_of_year'))
std_dev_flow_var.long_name = 'seasonal min streamflow'
std_dev_flow_var.units = 'm3/s'
lat_var = seasonal_avg_nc.createVariable('lat', 'f8', ('rivid',),
fill_value=-9999.0)
lon_var = seasonal_avg_nc.createVariable('lon', 'f8', ('rivid',),
fill_value=-9999.0)
add_latlon_metadata(lat_var, lon_var)
seasonal_avg_nc.variables['lat'][:] = \
qout_nc_file.qout_nc.variables['lat'][:]
seasonal_avg_nc.variables['lon'][:] = \
qout_nc_file.qout_nc.variables['lon'][:]
river_id_list = qout_nc_file.get_river_id_array()
seasonal_avg_nc.variables['rivid'][:] = river_id_list
seasonal_avg_nc.close()
# generate multiprocessing jobs
mp_lock = multiprocessing.Manager().Lock() # pylint: disable=no-member
job_combinations = []
for day_of_year in range(1, 366):
job_combinations.append((qout_file,
seasonal_average_file,
day_of_year,
mp_lock
))
pool = multiprocessing.Pool(num_cpus)
pool.map(generate_single_seasonal_average,
job_combinations)
pool.close()
pool.join() | ['def', 'generate_seasonal_averages', '(', 'qout_file', ',', 'seasonal_average_file', ',', 'num_cpus', '=', 'multiprocessing', '.', 'cpu_count', '(', ')', ')', ':', 'with', 'RAPIDDataset', '(', 'qout_file', ')', 'as', 'qout_nc_file', ':', 'print', '(', '"Generating seasonal average file ..."', ')', 'seasonal_avg_nc', '=', 'Dataset', '(', 'seasonal_average_file', ',', "'w'", ')', 'seasonal_avg_nc', '.', 'createDimension', '(', "'rivid'", ',', 'qout_nc_file', '.', 'size_river_id', ')', 'seasonal_avg_nc', '.', 'createDimension', '(', "'day_of_year'", ',', '365', ')', 'time_series_var', '=', 'seasonal_avg_nc', '.', 'createVariable', '(', "'rivid'", ',', "'i4'", ',', '(', "'rivid'", ',', ')', ')', 'time_series_var', '.', 'long_name', '=', '(', "'unique identifier for each river reach'", ')', 'average_flow_var', '=', 'seasonal_avg_nc', '.', 'createVariable', '(', "'average_flow'", ',', "'f8'", ',', '(', "'rivid'", ',', "'day_of_year'", ')', ')', 'average_flow_var', '.', 'long_name', '=', "'seasonal average streamflow'", 'average_flow_var', '.', 'units', '=', "'m3/s'", 'std_dev_flow_var', '=', 'seasonal_avg_nc', '.', 'createVariable', '(', "'std_dev_flow'", ',', "'f8'", ',', '(', "'rivid'", ',', "'day_of_year'", ')', ')', 'std_dev_flow_var', '.', 'long_name', '=', "'seasonal std. dev. streamflow'", 'std_dev_flow_var', '.', 'units', '=', "'m3/s'", 'std_dev_flow_var', '=', 'seasonal_avg_nc', '.', 'createVariable', '(', "'max_flow'", ',', "'f8'", ',', '(', "'rivid'", ',', "'day_of_year'", ')', ')', 'std_dev_flow_var', '.', 'long_name', '=', "'seasonal max streamflow'", 'std_dev_flow_var', '.', 'units', '=', "'m3/s'", 'std_dev_flow_var', '=', 'seasonal_avg_nc', '.', 'createVariable', '(', "'min_flow'", ',', "'f8'", ',', '(', "'rivid'", ',', "'day_of_year'", ')', ')', 'std_dev_flow_var', '.', 'long_name', '=', "'seasonal min streamflow'", 'std_dev_flow_var', '.', 'units', '=', "'m3/s'", 'lat_var', '=', 'seasonal_avg_nc', '.', 'createVariable', '(', "'lat'", ',', "'f8'", ',', '(', "'rivid'", ',', ')', ',', 'fill_value', '=', '-', '9999.0', ')', 'lon_var', '=', 'seasonal_avg_nc', '.', 'createVariable', '(', "'lon'", ',', "'f8'", ',', '(', "'rivid'", ',', ')', ',', 'fill_value', '=', '-', '9999.0', ')', 'add_latlon_metadata', '(', 'lat_var', ',', 'lon_var', ')', 'seasonal_avg_nc', '.', 'variables', '[', "'lat'", ']', '[', ':', ']', '=', 'qout_nc_file', '.', 'qout_nc', '.', 'variables', '[', "'lat'", ']', '[', ':', ']', 'seasonal_avg_nc', '.', 'variables', '[', "'lon'", ']', '[', ':', ']', '=', 'qout_nc_file', '.', 'qout_nc', '.', 'variables', '[', "'lon'", ']', '[', ':', ']', 'river_id_list', '=', 'qout_nc_file', '.', 'get_river_id_array', '(', ')', 'seasonal_avg_nc', '.', 'variables', '[', "'rivid'", ']', '[', ':', ']', '=', 'river_id_list', 'seasonal_avg_nc', '.', 'close', '(', ')', '# generate multiprocessing jobs', 'mp_lock', '=', 'multiprocessing', '.', 'Manager', '(', ')', '.', 'Lock', '(', ')', '# pylint: disable=no-member', 'job_combinations', '=', '[', ']', 'for', 'day_of_year', 'in', 'range', '(', '1', ',', '366', ')', ':', 'job_combinations', '.', 'append', '(', '(', 'qout_file', ',', 'seasonal_average_file', ',', 'day_of_year', ',', 'mp_lock', ')', ')', 'pool', '=', 'multiprocessing', '.', 'Pool', '(', 'num_cpus', ')', 'pool', '.', 'map', '(', 'generate_single_seasonal_average', ',', 'job_combinations', ')', 'pool', '.', 'close', '(', ')', 'pool', '.', 'join', '(', ')'] | This function loops through a CF compliant rapid streamflow
file to produce a netCDF file with a seasonal average for
365 days a year | ['This', 'function', 'loops', 'through', 'a', 'CF', 'compliant', 'rapid', 'streamflow', 'file', 'to', 'produce', 'a', 'netCDF', 'file', 'with', 'a', 'seasonal', 'average', 'for', '365', 'days', 'a', 'year'] | train | https://github.com/erdc/RAPIDpy/blob/50e14e130554b254a00ff23b226cd7e4c6cfe91a/RAPIDpy/postprocess/generate_seasonal_averages.py#L70-L143 |
3,240 | ibis-project/ibis | ibis/config.py | _build_option_description | def _build_option_description(k):
"""Builds a formatted description of a registered option and prints it."""
o = _get_registered_option(k)
d = _get_deprecated_option(k)
buf = ['{} '.format(k)]
if o.doc:
doc = '\n'.join(o.doc.strip().splitlines())
else:
doc = 'No description available.'
buf.append(doc)
if o:
buf.append(
'\n [default: {}] [currently: {}]'.format(
o.defval, _get_option(k, True)
)
)
if d:
buf.append(
'\n (Deprecated{})'.format(
', use `{}` instead.'.format(d.rkey) if d.rkey else ''
)
)
buf.append('\n\n')
return ''.join(buf) | python | def _build_option_description(k):
"""Builds a formatted description of a registered option and prints it."""
o = _get_registered_option(k)
d = _get_deprecated_option(k)
buf = ['{} '.format(k)]
if o.doc:
doc = '\n'.join(o.doc.strip().splitlines())
else:
doc = 'No description available.'
buf.append(doc)
if o:
buf.append(
'\n [default: {}] [currently: {}]'.format(
o.defval, _get_option(k, True)
)
)
if d:
buf.append(
'\n (Deprecated{})'.format(
', use `{}` instead.'.format(d.rkey) if d.rkey else ''
)
)
buf.append('\n\n')
return ''.join(buf) | ['def', '_build_option_description', '(', 'k', ')', ':', 'o', '=', '_get_registered_option', '(', 'k', ')', 'd', '=', '_get_deprecated_option', '(', 'k', ')', 'buf', '=', '[', "'{} '", '.', 'format', '(', 'k', ')', ']', 'if', 'o', '.', 'doc', ':', 'doc', '=', "'\\n'", '.', 'join', '(', 'o', '.', 'doc', '.', 'strip', '(', ')', '.', 'splitlines', '(', ')', ')', 'else', ':', 'doc', '=', "'No description available.'", 'buf', '.', 'append', '(', 'doc', ')', 'if', 'o', ':', 'buf', '.', 'append', '(', "'\\n [default: {}] [currently: {}]'", '.', 'format', '(', 'o', '.', 'defval', ',', '_get_option', '(', 'k', ',', 'True', ')', ')', ')', 'if', 'd', ':', 'buf', '.', 'append', '(', "'\\n (Deprecated{})'", '.', 'format', '(', "', use `{}` instead.'", '.', 'format', '(', 'd', '.', 'rkey', ')', 'if', 'd', '.', 'rkey', 'else', "''", ')', ')', 'buf', '.', 'append', '(', "'\\n\\n'", ')', 'return', "''", '.', 'join', '(', 'buf', ')'] | Builds a formatted description of a registered option and prints it. | ['Builds', 'a', 'formatted', 'description', 'of', 'a', 'registered', 'option', 'and', 'prints', 'it', '.'] | train | https://github.com/ibis-project/ibis/blob/1e39a5fd9ef088b45c155e8a5f541767ee8ef2e7/ibis/config.py#L567-L596 |
3,241 | quantmind/pulsar-odm | odm/mapper.py | copy_models | def copy_models(module_from, module_to):
"""Copy models from one module to another
:param module_from:
:param module_to:
:return:
"""
module_from = get_module(module_from)
module_to = get_module(module_to)
models = get_models(module_from)
if models:
models = models.copy()
models.update(((t.key, t) for t in module_tables(module_from)))
module_to.__odm_models__ = models
return models | python | def copy_models(module_from, module_to):
"""Copy models from one module to another
:param module_from:
:param module_to:
:return:
"""
module_from = get_module(module_from)
module_to = get_module(module_to)
models = get_models(module_from)
if models:
models = models.copy()
models.update(((t.key, t) for t in module_tables(module_from)))
module_to.__odm_models__ = models
return models | ['def', 'copy_models', '(', 'module_from', ',', 'module_to', ')', ':', 'module_from', '=', 'get_module', '(', 'module_from', ')', 'module_to', '=', 'get_module', '(', 'module_to', ')', 'models', '=', 'get_models', '(', 'module_from', ')', 'if', 'models', ':', 'models', '=', 'models', '.', 'copy', '(', ')', 'models', '.', 'update', '(', '(', '(', 't', '.', 'key', ',', 't', ')', 'for', 't', 'in', 'module_tables', '(', 'module_from', ')', ')', ')', 'module_to', '.', '__odm_models__', '=', 'models', 'return', 'models'] | Copy models from one module to another
:param module_from:
:param module_to:
:return: | ['Copy', 'models', 'from', 'one', 'module', 'to', 'another', ':', 'param', 'module_from', ':', ':', 'param', 'module_to', ':', ':', 'return', ':'] | train | https://github.com/quantmind/pulsar-odm/blob/5955c20beca0a89270c2b390335838deb7d5915e/odm/mapper.py#L131-L144 |
3,242 | kennethreitz/omnijson | omnijson/packages/simplejson/__init__.py | load | def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, object_pairs_hook=None,
use_decimal=False, **kw):
"""Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
a JSON document) to a Python object.
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
If *use_decimal* is true (default: ``False``) then it implies
parse_float=decimal.Decimal for parity with ``dump``.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
return loads(fp.read(),
encoding=encoding, cls=cls, object_hook=object_hook,
parse_float=parse_float, parse_int=parse_int,
parse_constant=parse_constant, object_pairs_hook=object_pairs_hook,
use_decimal=use_decimal, **kw) | python | def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, object_pairs_hook=None,
use_decimal=False, **kw):
"""Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
a JSON document) to a Python object.
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
If *use_decimal* is true (default: ``False``) then it implies
parse_float=decimal.Decimal for parity with ``dump``.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
return loads(fp.read(),
encoding=encoding, cls=cls, object_hook=object_hook,
parse_float=parse_float, parse_int=parse_int,
parse_constant=parse_constant, object_pairs_hook=object_pairs_hook,
use_decimal=use_decimal, **kw) | ['def', 'load', '(', 'fp', ',', 'encoding', '=', 'None', ',', 'cls', '=', 'None', ',', 'object_hook', '=', 'None', ',', 'parse_float', '=', 'None', ',', 'parse_int', '=', 'None', ',', 'parse_constant', '=', 'None', ',', 'object_pairs_hook', '=', 'None', ',', 'use_decimal', '=', 'False', ',', '*', '*', 'kw', ')', ':', 'return', 'loads', '(', 'fp', '.', 'read', '(', ')', ',', 'encoding', '=', 'encoding', ',', 'cls', '=', 'cls', ',', 'object_hook', '=', 'object_hook', ',', 'parse_float', '=', 'parse_float', ',', 'parse_int', '=', 'parse_int', ',', 'parse_constant', '=', 'parse_constant', ',', 'object_pairs_hook', '=', 'object_pairs_hook', ',', 'use_decimal', '=', 'use_decimal', ',', '*', '*', 'kw', ')'] | Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
a JSON document) to a Python object.
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
If *use_decimal* is true (default: ``False``) then it implies
parse_float=decimal.Decimal for parity with ``dump``.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg. | ['Deserialize', 'fp', '(', 'a', '.', 'read', '()', '-', 'supporting', 'file', '-', 'like', 'object', 'containing', 'a', 'JSON', 'document', ')', 'to', 'a', 'Python', 'object', '.'] | train | https://github.com/kennethreitz/omnijson/blob/a5890a51a59ad76f78a61f5bf91fa86b784cf694/omnijson/packages/simplejson/__init__.py#L276-L329 |
3,243 | jtwhite79/pyemu | pyemu/pst/pst_handler.py | Pst.enforce_bounds | def enforce_bounds(self):
""" enforce bounds violation resulting from the
parameter pertubation calculations
"""
too_big = self.parameter_data.loc[:,"parval1"] > \
self.parameter_data.loc[:,"parubnd"]
self.parameter_data.loc[too_big,"parval1"] = \
self.parameter_data.loc[too_big,"parubnd"]
too_small = self.parameter_data.loc[:,"parval1"] < \
self.parameter_data.loc[:,"parlbnd"]
self.parameter_data.loc[too_small,"parval1"] = \
self.parameter_data.loc[too_small,"parlbnd"] | python | def enforce_bounds(self):
""" enforce bounds violation resulting from the
parameter pertubation calculations
"""
too_big = self.parameter_data.loc[:,"parval1"] > \
self.parameter_data.loc[:,"parubnd"]
self.parameter_data.loc[too_big,"parval1"] = \
self.parameter_data.loc[too_big,"parubnd"]
too_small = self.parameter_data.loc[:,"parval1"] < \
self.parameter_data.loc[:,"parlbnd"]
self.parameter_data.loc[too_small,"parval1"] = \
self.parameter_data.loc[too_small,"parlbnd"] | ['def', 'enforce_bounds', '(', 'self', ')', ':', 'too_big', '=', 'self', '.', 'parameter_data', '.', 'loc', '[', ':', ',', '"parval1"', ']', '>', 'self', '.', 'parameter_data', '.', 'loc', '[', ':', ',', '"parubnd"', ']', 'self', '.', 'parameter_data', '.', 'loc', '[', 'too_big', ',', '"parval1"', ']', '=', 'self', '.', 'parameter_data', '.', 'loc', '[', 'too_big', ',', '"parubnd"', ']', 'too_small', '=', 'self', '.', 'parameter_data', '.', 'loc', '[', ':', ',', '"parval1"', ']', '<', 'self', '.', 'parameter_data', '.', 'loc', '[', ':', ',', '"parlbnd"', ']', 'self', '.', 'parameter_data', '.', 'loc', '[', 'too_small', ',', '"parval1"', ']', '=', 'self', '.', 'parameter_data', '.', 'loc', '[', 'too_small', ',', '"parlbnd"', ']'] | enforce bounds violation resulting from the
parameter pertubation calculations | ['enforce', 'bounds', 'violation', 'resulting', 'from', 'the', 'parameter', 'pertubation', 'calculations'] | train | https://github.com/jtwhite79/pyemu/blob/c504d8e7a4097cec07655a6318d275739bd8148a/pyemu/pst/pst_handler.py#L2051-L2064 |
3,244 | Julius2342/pyvlx | pyvlx/frames/frame_activate_scene.py | FrameActivateSceneRequest.from_payload | def from_payload(self, payload):
"""Init frame from binary data."""
self.session_id = payload[0]*256 + payload[1]
self.originator = Originator(payload[2])
self.priority = Priority(payload[3])
self.scene_id = payload[4]
self.velocity = Velocity(payload[5]) | python | def from_payload(self, payload):
"""Init frame from binary data."""
self.session_id = payload[0]*256 + payload[1]
self.originator = Originator(payload[2])
self.priority = Priority(payload[3])
self.scene_id = payload[4]
self.velocity = Velocity(payload[5]) | ['def', 'from_payload', '(', 'self', ',', 'payload', ')', ':', 'self', '.', 'session_id', '=', 'payload', '[', '0', ']', '*', '256', '+', 'payload', '[', '1', ']', 'self', '.', 'originator', '=', 'Originator', '(', 'payload', '[', '2', ']', ')', 'self', '.', 'priority', '=', 'Priority', '(', 'payload', '[', '3', ']', ')', 'self', '.', 'scene_id', '=', 'payload', '[', '4', ']', 'self', '.', 'velocity', '=', 'Velocity', '(', 'payload', '[', '5', ']', ')'] | Init frame from binary data. | ['Init', 'frame', 'from', 'binary', 'data', '.'] | train | https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/frames/frame_activate_scene.py#L32-L38 |
3,245 | clement-alexandre/TotemBionet | totembionet/src/discrete_model/influence_graph.py | InfluenceGraph._transform_list_of_states_to_state | def _transform_list_of_states_to_state(self, state: List[int]) -> State:
"""
Private method which transform a list which contains the state of the gene
in the models to a State object.
Examples
--------
The model contains 2 genes: operon = {0, 1, 2}
mucuB = {0, 1}
>>> graph._transform_list_of_states_to_dict_of_states([0, 1])
{operon: 0, mucuB: 1}
>>> graph._transform_list_of_states_to_dict_of_states([2, 0])
{operon: 2, mucuB: 0}
"""
return State({gene: state[i] for i, gene in enumerate(self.genes)}) | python | def _transform_list_of_states_to_state(self, state: List[int]) -> State:
"""
Private method which transform a list which contains the state of the gene
in the models to a State object.
Examples
--------
The model contains 2 genes: operon = {0, 1, 2}
mucuB = {0, 1}
>>> graph._transform_list_of_states_to_dict_of_states([0, 1])
{operon: 0, mucuB: 1}
>>> graph._transform_list_of_states_to_dict_of_states([2, 0])
{operon: 2, mucuB: 0}
"""
return State({gene: state[i] for i, gene in enumerate(self.genes)}) | ['def', '_transform_list_of_states_to_state', '(', 'self', ',', 'state', ':', 'List', '[', 'int', ']', ')', '->', 'State', ':', 'return', 'State', '(', '{', 'gene', ':', 'state', '[', 'i', ']', 'for', 'i', ',', 'gene', 'in', 'enumerate', '(', 'self', '.', 'genes', ')', '}', ')'] | Private method which transform a list which contains the state of the gene
in the models to a State object.
Examples
--------
The model contains 2 genes: operon = {0, 1, 2}
mucuB = {0, 1}
>>> graph._transform_list_of_states_to_dict_of_states([0, 1])
{operon: 0, mucuB: 1}
>>> graph._transform_list_of_states_to_dict_of_states([2, 0])
{operon: 2, mucuB: 0} | ['Private', 'method', 'which', 'transform', 'a', 'list', 'which', 'contains', 'the', 'state', 'of', 'the', 'gene', 'in', 'the', 'models', 'to', 'a', 'State', 'object', '.'] | train | https://github.com/clement-alexandre/TotemBionet/blob/f37a2f9358c1ce49f21c4a868b904da5dcd4614f/totembionet/src/discrete_model/influence_graph.py#L71-L86 |
3,246 | Robpol86/libnl | libnl/linux_private/rtnetlink.py | rtgenmsg.rtgen_family | def rtgen_family(self, value):
"""Family setter."""
self.bytearray[self._get_slicers(0)] = bytearray(c_ubyte(value or 0)) | python | def rtgen_family(self, value):
"""Family setter."""
self.bytearray[self._get_slicers(0)] = bytearray(c_ubyte(value or 0)) | ['def', 'rtgen_family', '(', 'self', ',', 'value', ')', ':', 'self', '.', 'bytearray', '[', 'self', '.', '_get_slicers', '(', '0', ')', ']', '=', 'bytearray', '(', 'c_ubyte', '(', 'value', 'or', '0', ')', ')'] | Family setter. | ['Family', 'setter', '.'] | train | https://github.com/Robpol86/libnl/blob/274e9fdaa39822d06ef70b799ed4a95937a4d923/libnl/linux_private/rtnetlink.py#L150-L152 |
3,247 | spyder-ide/spyder-kernels | spyder_kernels/utils/nsview.py | make_remote_view | def make_remote_view(data, settings, more_excluded_names=None):
"""
Make a remote view of dictionary *data*
-> globals explorer
"""
data = get_remote_data(data, settings, mode='editable',
more_excluded_names=more_excluded_names)
remote = {}
for key, value in list(data.items()):
view = value_to_display(value, minmax=settings['minmax'])
remote[key] = {'type': get_human_readable_type(value),
'size': get_size(value),
'color': get_color_name(value),
'view': view}
return remote | python | def make_remote_view(data, settings, more_excluded_names=None):
"""
Make a remote view of dictionary *data*
-> globals explorer
"""
data = get_remote_data(data, settings, mode='editable',
more_excluded_names=more_excluded_names)
remote = {}
for key, value in list(data.items()):
view = value_to_display(value, minmax=settings['minmax'])
remote[key] = {'type': get_human_readable_type(value),
'size': get_size(value),
'color': get_color_name(value),
'view': view}
return remote | ['def', 'make_remote_view', '(', 'data', ',', 'settings', ',', 'more_excluded_names', '=', 'None', ')', ':', 'data', '=', 'get_remote_data', '(', 'data', ',', 'settings', ',', 'mode', '=', "'editable'", ',', 'more_excluded_names', '=', 'more_excluded_names', ')', 'remote', '=', '{', '}', 'for', 'key', ',', 'value', 'in', 'list', '(', 'data', '.', 'items', '(', ')', ')', ':', 'view', '=', 'value_to_display', '(', 'value', ',', 'minmax', '=', 'settings', '[', "'minmax'", ']', ')', 'remote', '[', 'key', ']', '=', '{', "'type'", ':', 'get_human_readable_type', '(', 'value', ')', ',', "'size'", ':', 'get_size', '(', 'value', ')', ',', "'color'", ':', 'get_color_name', '(', 'value', ')', ',', "'view'", ':', 'view', '}', 'return', 'remote'] | Make a remote view of dictionary *data*
-> globals explorer | ['Make', 'a', 'remote', 'view', 'of', 'dictionary', '*', 'data', '*', '-', '>', 'globals', 'explorer'] | train | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/utils/nsview.py#L661-L675 |
3,248 | pysathq/pysat | pysat/pb.py | PBEnc.atmost | def atmost(cls, lits, weights=None, bound=1, top_id=None,
encoding=EncType.best):
"""
A synonim for :meth:`PBEnc.leq`.
"""
return cls.leq(lits, weights, bound, top_id, encoding) | python | def atmost(cls, lits, weights=None, bound=1, top_id=None,
encoding=EncType.best):
"""
A synonim for :meth:`PBEnc.leq`.
"""
return cls.leq(lits, weights, bound, top_id, encoding) | ['def', 'atmost', '(', 'cls', ',', 'lits', ',', 'weights', '=', 'None', ',', 'bound', '=', '1', ',', 'top_id', '=', 'None', ',', 'encoding', '=', 'EncType', '.', 'best', ')', ':', 'return', 'cls', '.', 'leq', '(', 'lits', ',', 'weights', ',', 'bound', ',', 'top_id', ',', 'encoding', ')'] | A synonim for :meth:`PBEnc.leq`. | ['A', 'synonim', 'for', ':', 'meth', ':', 'PBEnc', '.', 'leq', '.'] | train | https://github.com/pysathq/pysat/blob/522742e8f2d4c6ac50ecd9087f7a346206774c67/pysat/pb.py#L284-L290 |
3,249 | johnbywater/eventsourcing | eventsourcing/contrib/suffixtrees/domain/model/suffixtree.py | register_new_suffix_tree | def register_new_suffix_tree(case_insensitive=False):
"""Factory method, returns new suffix tree object.
"""
assert isinstance(case_insensitive, bool)
root_node = register_new_node()
suffix_tree_id = uuid4()
event = SuffixTree.Created(
originator_id=suffix_tree_id,
root_node_id=root_node.id,
case_insensitive=case_insensitive,
)
entity = SuffixTree.mutate(event=event)
assert isinstance(entity, SuffixTree)
entity.nodes[root_node.id] = root_node
publish(event)
return entity | python | def register_new_suffix_tree(case_insensitive=False):
"""Factory method, returns new suffix tree object.
"""
assert isinstance(case_insensitive, bool)
root_node = register_new_node()
suffix_tree_id = uuid4()
event = SuffixTree.Created(
originator_id=suffix_tree_id,
root_node_id=root_node.id,
case_insensitive=case_insensitive,
)
entity = SuffixTree.mutate(event=event)
assert isinstance(entity, SuffixTree)
entity.nodes[root_node.id] = root_node
publish(event)
return entity | ['def', 'register_new_suffix_tree', '(', 'case_insensitive', '=', 'False', ')', ':', 'assert', 'isinstance', '(', 'case_insensitive', ',', 'bool', ')', 'root_node', '=', 'register_new_node', '(', ')', 'suffix_tree_id', '=', 'uuid4', '(', ')', 'event', '=', 'SuffixTree', '.', 'Created', '(', 'originator_id', '=', 'suffix_tree_id', ',', 'root_node_id', '=', 'root_node', '.', 'id', ',', 'case_insensitive', '=', 'case_insensitive', ',', ')', 'entity', '=', 'SuffixTree', '.', 'mutate', '(', 'event', '=', 'event', ')', 'assert', 'isinstance', '(', 'entity', ',', 'SuffixTree', ')', 'entity', '.', 'nodes', '[', 'root_node', '.', 'id', ']', '=', 'root_node', 'publish', '(', 'event', ')', 'return', 'entity'] | Factory method, returns new suffix tree object. | ['Factory', 'method', 'returns', 'new', 'suffix', 'tree', 'object', '.'] | train | https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/contrib/suffixtrees/domain/model/suffixtree.py#L349-L369 |
3,250 | project-ncl/pnc-cli | pnc_cli/tools/utils.py | required | def required(field):
"""Decorator that checks if return value is set, if not, raises exception.
"""
def wrap(f):
def wrappedf(*args):
result = f(*args)
if result is None or result == "":
raise Exception(
"Config option '%s' is required." % field)
else:
return result
return wrappedf
return wrap | python | def required(field):
"""Decorator that checks if return value is set, if not, raises exception.
"""
def wrap(f):
def wrappedf(*args):
result = f(*args)
if result is None or result == "":
raise Exception(
"Config option '%s' is required." % field)
else:
return result
return wrappedf
return wrap | ['def', 'required', '(', 'field', ')', ':', 'def', 'wrap', '(', 'f', ')', ':', 'def', 'wrappedf', '(', '*', 'args', ')', ':', 'result', '=', 'f', '(', '*', 'args', ')', 'if', 'result', 'is', 'None', 'or', 'result', '==', '""', ':', 'raise', 'Exception', '(', '"Config option \'%s\' is required."', '%', 'field', ')', 'else', ':', 'return', 'result', 'return', 'wrappedf', 'return', 'wrap'] | Decorator that checks if return value is set, if not, raises exception. | ['Decorator', 'that', 'checks', 'if', 'return', 'value', 'is', 'set', 'if', 'not', 'raises', 'exception', '.'] | train | https://github.com/project-ncl/pnc-cli/blob/3dc149bf84928f60a8044ac50b58bbaddd451902/pnc_cli/tools/utils.py#L144-L157 |
3,251 | Calysto/calysto | calysto/ai/conx.py | SRN.addContext | def addContext(self, layer, hiddenLayerName = 'hidden', verbosity = 0):
"""
Adds a context layer. Necessary to keep self.contextLayers dictionary up to date.
"""
# better not add context layer first if using sweep() without mapInput
SRN.add(self, layer, verbosity)
if hiddenLayerName in self.contextLayers:
raise KeyError('There is already a context layer associated with this hidden layer.', \
hiddenLayerName)
else:
self.contextLayers[hiddenLayerName] = layer
layer.kind = 'Context' | python | def addContext(self, layer, hiddenLayerName = 'hidden', verbosity = 0):
"""
Adds a context layer. Necessary to keep self.contextLayers dictionary up to date.
"""
# better not add context layer first if using sweep() without mapInput
SRN.add(self, layer, verbosity)
if hiddenLayerName in self.contextLayers:
raise KeyError('There is already a context layer associated with this hidden layer.', \
hiddenLayerName)
else:
self.contextLayers[hiddenLayerName] = layer
layer.kind = 'Context' | ['def', 'addContext', '(', 'self', ',', 'layer', ',', 'hiddenLayerName', '=', "'hidden'", ',', 'verbosity', '=', '0', ')', ':', '# better not add context layer first if using sweep() without mapInput', 'SRN', '.', 'add', '(', 'self', ',', 'layer', ',', 'verbosity', ')', 'if', 'hiddenLayerName', 'in', 'self', '.', 'contextLayers', ':', 'raise', 'KeyError', '(', "'There is already a context layer associated with this hidden layer.'", ',', 'hiddenLayerName', ')', 'else', ':', 'self', '.', 'contextLayers', '[', 'hiddenLayerName', ']', '=', 'layer', 'layer', '.', 'kind', '=', "'Context'"] | Adds a context layer. Necessary to keep self.contextLayers dictionary up to date. | ['Adds', 'a', 'context', 'layer', '.', 'Necessary', 'to', 'keep', 'self', '.', 'contextLayers', 'dictionary', 'up', 'to', 'date', '.'] | train | https://github.com/Calysto/calysto/blob/20813c0f48096317aa775d03a5c6b20f12fafc93/calysto/ai/conx.py#L4686-L4697 |
3,252 | newville/wxmplot | examples/tifffile.py | reorient | def reorient(image, orientation):
"""Return reoriented view of image array.
Parameters
----------
image : numpy array
Non-squeezed output of asarray() functions.
Axes -3 and -2 must be image length and width respectively.
orientation : int or str
One of TIFF_ORIENTATIONS keys or values.
"""
o = TIFF_ORIENTATIONS.get(orientation, orientation)
if o == 'top_left':
return image
elif o == 'top_right':
return image[..., ::-1, :]
elif o == 'bottom_left':
return image[..., ::-1, :, :]
elif o == 'bottom_right':
return image[..., ::-1, ::-1, :]
elif o == 'left_top':
return numpy.swapaxes(image, -3, -2)
elif o == 'right_top':
return numpy.swapaxes(image, -3, -2)[..., ::-1, :]
elif o == 'left_bottom':
return numpy.swapaxes(image, -3, -2)[..., ::-1, :, :]
elif o == 'right_bottom':
return numpy.swapaxes(image, -3, -2)[..., ::-1, ::-1, :] | python | def reorient(image, orientation):
"""Return reoriented view of image array.
Parameters
----------
image : numpy array
Non-squeezed output of asarray() functions.
Axes -3 and -2 must be image length and width respectively.
orientation : int or str
One of TIFF_ORIENTATIONS keys or values.
"""
o = TIFF_ORIENTATIONS.get(orientation, orientation)
if o == 'top_left':
return image
elif o == 'top_right':
return image[..., ::-1, :]
elif o == 'bottom_left':
return image[..., ::-1, :, :]
elif o == 'bottom_right':
return image[..., ::-1, ::-1, :]
elif o == 'left_top':
return numpy.swapaxes(image, -3, -2)
elif o == 'right_top':
return numpy.swapaxes(image, -3, -2)[..., ::-1, :]
elif o == 'left_bottom':
return numpy.swapaxes(image, -3, -2)[..., ::-1, :, :]
elif o == 'right_bottom':
return numpy.swapaxes(image, -3, -2)[..., ::-1, ::-1, :] | ['def', 'reorient', '(', 'image', ',', 'orientation', ')', ':', 'o', '=', 'TIFF_ORIENTATIONS', '.', 'get', '(', 'orientation', ',', 'orientation', ')', 'if', 'o', '==', "'top_left'", ':', 'return', 'image', 'elif', 'o', '==', "'top_right'", ':', 'return', 'image', '[', '...', ',', ':', ':', '-', '1', ',', ':', ']', 'elif', 'o', '==', "'bottom_left'", ':', 'return', 'image', '[', '...', ',', ':', ':', '-', '1', ',', ':', ',', ':', ']', 'elif', 'o', '==', "'bottom_right'", ':', 'return', 'image', '[', '...', ',', ':', ':', '-', '1', ',', ':', ':', '-', '1', ',', ':', ']', 'elif', 'o', '==', "'left_top'", ':', 'return', 'numpy', '.', 'swapaxes', '(', 'image', ',', '-', '3', ',', '-', '2', ')', 'elif', 'o', '==', "'right_top'", ':', 'return', 'numpy', '.', 'swapaxes', '(', 'image', ',', '-', '3', ',', '-', '2', ')', '[', '...', ',', ':', ':', '-', '1', ',', ':', ']', 'elif', 'o', '==', "'left_bottom'", ':', 'return', 'numpy', '.', 'swapaxes', '(', 'image', ',', '-', '3', ',', '-', '2', ')', '[', '...', ',', ':', ':', '-', '1', ',', ':', ',', ':', ']', 'elif', 'o', '==', "'right_bottom'", ':', 'return', 'numpy', '.', 'swapaxes', '(', 'image', ',', '-', '3', ',', '-', '2', ')', '[', '...', ',', ':', ':', '-', '1', ',', ':', ':', '-', '1', ',', ':', ']'] | Return reoriented view of image array.
Parameters
----------
image : numpy array
Non-squeezed output of asarray() functions.
Axes -3 and -2 must be image length and width respectively.
orientation : int or str
One of TIFF_ORIENTATIONS keys or values. | ['Return', 'reoriented', 'view', 'of', 'image', 'array', '.'] | train | https://github.com/newville/wxmplot/blob/8e0dc037453e5cdf18c968dc5a3d29efd761edee/examples/tifffile.py#L1757-L1785 |
3,253 | earwig/mwparserfromhell | mwparserfromhell/wikicode.py | Wikicode._is_child_wikicode | def _is_child_wikicode(self, obj, recursive=True):
"""Return whether the given :class:`.Wikicode` is a descendant."""
def deref(nodes):
if isinstance(nodes, _ListProxy):
return nodes._parent # pylint: disable=protected-access
return nodes
target = deref(obj.nodes)
if target is deref(self.nodes):
return True
if recursive:
todo = [self]
while todo:
code = todo.pop()
if target is deref(code.nodes):
return True
for node in code.nodes:
todo += list(node.__children__())
return False | python | def _is_child_wikicode(self, obj, recursive=True):
"""Return whether the given :class:`.Wikicode` is a descendant."""
def deref(nodes):
if isinstance(nodes, _ListProxy):
return nodes._parent # pylint: disable=protected-access
return nodes
target = deref(obj.nodes)
if target is deref(self.nodes):
return True
if recursive:
todo = [self]
while todo:
code = todo.pop()
if target is deref(code.nodes):
return True
for node in code.nodes:
todo += list(node.__children__())
return False | ['def', '_is_child_wikicode', '(', 'self', ',', 'obj', ',', 'recursive', '=', 'True', ')', ':', 'def', 'deref', '(', 'nodes', ')', ':', 'if', 'isinstance', '(', 'nodes', ',', '_ListProxy', ')', ':', 'return', 'nodes', '.', '_parent', '# pylint: disable=protected-access', 'return', 'nodes', 'target', '=', 'deref', '(', 'obj', '.', 'nodes', ')', 'if', 'target', 'is', 'deref', '(', 'self', '.', 'nodes', ')', ':', 'return', 'True', 'if', 'recursive', ':', 'todo', '=', '[', 'self', ']', 'while', 'todo', ':', 'code', '=', 'todo', '.', 'pop', '(', ')', 'if', 'target', 'is', 'deref', '(', 'code', '.', 'nodes', ')', ':', 'return', 'True', 'for', 'node', 'in', 'code', '.', 'nodes', ':', 'todo', '+=', 'list', '(', 'node', '.', '__children__', '(', ')', ')', 'return', 'False'] | Return whether the given :class:`.Wikicode` is a descendant. | ['Return', 'whether', 'the', 'given', ':', 'class', ':', '.', 'Wikicode', 'is', 'a', 'descendant', '.'] | train | https://github.com/earwig/mwparserfromhell/blob/98dc30902d35c714a70aca8e6616f49d71cb24cc/mwparserfromhell/wikicode.py#L112-L130 |
3,254 | limix/limix-core | limix_core/linalg/linalg_matrix.py | solve_chol | def solve_chol(A,B):
"""
Solve cholesky decomposition::
return A\(A'\B)
"""
# X = linalg.solve(A,linalg.solve(A.transpose(),B))
# much faster version
X = linalg.cho_solve((A, True), B)
return X | python | def solve_chol(A,B):
"""
Solve cholesky decomposition::
return A\(A'\B)
"""
# X = linalg.solve(A,linalg.solve(A.transpose(),B))
# much faster version
X = linalg.cho_solve((A, True), B)
return X | ['def', 'solve_chol', '(', 'A', ',', 'B', ')', ':', '# X = linalg.solve(A,linalg.solve(A.transpose(),B))', '# much faster version', 'X', '=', 'linalg', '.', 'cho_solve', '(', '(', 'A', ',', 'True', ')', ',', 'B', ')', 'return', 'X'] | Solve cholesky decomposition::
return A\(A'\B) | ['Solve', 'cholesky', 'decomposition', '::'] | train | https://github.com/limix/limix-core/blob/5c590b4d351409f83ca320844b4897ce92203814/limix_core/linalg/linalg_matrix.py#L29-L39 |
3,255 | saltstack/salt | salt/modules/mac_user.py | chhome | def chhome(name, home, **kwargs):
'''
Change the home directory of the user
CLI Example:
.. code-block:: bash
salt '*' user.chhome foo /Users/foo
'''
kwargs = salt.utils.args.clean_kwargs(**kwargs)
persist = kwargs.pop('persist', False)
if kwargs:
salt.utils.args.invalid_kwargs(kwargs)
if persist:
log.info('Ignoring unsupported \'persist\' argument to user.chhome')
pre_info = info(name)
if not pre_info:
raise CommandExecutionError('User \'{0}\' does not exist'.format(name))
if home == pre_info['home']:
return True
_dscl(
['/Users/{0}'.format(name), 'NFSHomeDirectory',
pre_info['home'], home],
ctype='change'
)
# dscl buffers changes, sleep 1 second before checking if new value
# matches desired value
time.sleep(1)
return info(name).get('home') == home | python | def chhome(name, home, **kwargs):
'''
Change the home directory of the user
CLI Example:
.. code-block:: bash
salt '*' user.chhome foo /Users/foo
'''
kwargs = salt.utils.args.clean_kwargs(**kwargs)
persist = kwargs.pop('persist', False)
if kwargs:
salt.utils.args.invalid_kwargs(kwargs)
if persist:
log.info('Ignoring unsupported \'persist\' argument to user.chhome')
pre_info = info(name)
if not pre_info:
raise CommandExecutionError('User \'{0}\' does not exist'.format(name))
if home == pre_info['home']:
return True
_dscl(
['/Users/{0}'.format(name), 'NFSHomeDirectory',
pre_info['home'], home],
ctype='change'
)
# dscl buffers changes, sleep 1 second before checking if new value
# matches desired value
time.sleep(1)
return info(name).get('home') == home | ['def', 'chhome', '(', 'name', ',', 'home', ',', '*', '*', 'kwargs', ')', ':', 'kwargs', '=', 'salt', '.', 'utils', '.', 'args', '.', 'clean_kwargs', '(', '*', '*', 'kwargs', ')', 'persist', '=', 'kwargs', '.', 'pop', '(', "'persist'", ',', 'False', ')', 'if', 'kwargs', ':', 'salt', '.', 'utils', '.', 'args', '.', 'invalid_kwargs', '(', 'kwargs', ')', 'if', 'persist', ':', 'log', '.', 'info', '(', "'Ignoring unsupported \\'persist\\' argument to user.chhome'", ')', 'pre_info', '=', 'info', '(', 'name', ')', 'if', 'not', 'pre_info', ':', 'raise', 'CommandExecutionError', '(', "'User \\'{0}\\' does not exist'", '.', 'format', '(', 'name', ')', ')', 'if', 'home', '==', 'pre_info', '[', "'home'", ']', ':', 'return', 'True', '_dscl', '(', '[', "'/Users/{0}'", '.', 'format', '(', 'name', ')', ',', "'NFSHomeDirectory'", ',', 'pre_info', '[', "'home'", ']', ',', 'home', ']', ',', 'ctype', '=', "'change'", ')', '# dscl buffers changes, sleep 1 second before checking if new value', '# matches desired value', 'time', '.', 'sleep', '(', '1', ')', 'return', 'info', '(', 'name', ')', '.', 'get', '(', "'home'", ')', '==', 'home'] | Change the home directory of the user
CLI Example:
.. code-block:: bash
salt '*' user.chhome foo /Users/foo | ['Change', 'the', 'home', 'directory', 'of', 'the', 'user'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mac_user.py#L268-L298 |
3,256 | Tenchi2xh/Almonds | almonds/plane.py | Plane.extrema | def extrema(self, x0, y0, w, h):
"""
Returns the minimum and maximum values contained in a given area.
:param x0: Starting x index.
:param y0: Starting y index.
:param w: Width of the area to scan.
:param h: Height of the area to scan.
:return: Tuple containing the minimum and maximum values of the given area.
"""
minimum = 9223372036854775807
maximum = 0
for y in range(y0, y0 + h):
for x in range(x0, x0 + w):
value = self[x, y]
if value != self.filler:
minimum = min(minimum, value)
maximum = max(maximum, value)
return minimum, maximum | python | def extrema(self, x0, y0, w, h):
"""
Returns the minimum and maximum values contained in a given area.
:param x0: Starting x index.
:param y0: Starting y index.
:param w: Width of the area to scan.
:param h: Height of the area to scan.
:return: Tuple containing the minimum and maximum values of the given area.
"""
minimum = 9223372036854775807
maximum = 0
for y in range(y0, y0 + h):
for x in range(x0, x0 + w):
value = self[x, y]
if value != self.filler:
minimum = min(minimum, value)
maximum = max(maximum, value)
return minimum, maximum | ['def', 'extrema', '(', 'self', ',', 'x0', ',', 'y0', ',', 'w', ',', 'h', ')', ':', 'minimum', '=', '9223372036854775807', 'maximum', '=', '0', 'for', 'y', 'in', 'range', '(', 'y0', ',', 'y0', '+', 'h', ')', ':', 'for', 'x', 'in', 'range', '(', 'x0', ',', 'x0', '+', 'w', ')', ':', 'value', '=', 'self', '[', 'x', ',', 'y', ']', 'if', 'value', '!=', 'self', '.', 'filler', ':', 'minimum', '=', 'min', '(', 'minimum', ',', 'value', ')', 'maximum', '=', 'max', '(', 'maximum', ',', 'value', ')', 'return', 'minimum', ',', 'maximum'] | Returns the minimum and maximum values contained in a given area.
:param x0: Starting x index.
:param y0: Starting y index.
:param w: Width of the area to scan.
:param h: Height of the area to scan.
:return: Tuple containing the minimum and maximum values of the given area. | ['Returns', 'the', 'minimum', 'and', 'maximum', 'values', 'contained', 'in', 'a', 'given', 'area', '.'] | train | https://github.com/Tenchi2xh/Almonds/blob/6b27024729f055f2cb5e14ae3ca3cb428ae054bc/almonds/plane.py#L25-L43 |
3,257 | jadolg/rocketchat_API | rocketchat_API/rocketchat.py | RocketChat.rooms_favorite | def rooms_favorite(self, room_id=None, room_name=None, favorite=True):
"""Favorite or unfavorite room."""
if room_id is not None:
return self.__call_api_post('rooms.favorite', roomId=room_id, favorite=favorite)
elif room_name is not None:
return self.__call_api_post('rooms.favorite', roomName=room_name, favorite=favorite)
else:
raise RocketMissingParamException('roomId or roomName required') | python | def rooms_favorite(self, room_id=None, room_name=None, favorite=True):
"""Favorite or unfavorite room."""
if room_id is not None:
return self.__call_api_post('rooms.favorite', roomId=room_id, favorite=favorite)
elif room_name is not None:
return self.__call_api_post('rooms.favorite', roomName=room_name, favorite=favorite)
else:
raise RocketMissingParamException('roomId or roomName required') | ['def', 'rooms_favorite', '(', 'self', ',', 'room_id', '=', 'None', ',', 'room_name', '=', 'None', ',', 'favorite', '=', 'True', ')', ':', 'if', 'room_id', 'is', 'not', 'None', ':', 'return', 'self', '.', '__call_api_post', '(', "'rooms.favorite'", ',', 'roomId', '=', 'room_id', ',', 'favorite', '=', 'favorite', ')', 'elif', 'room_name', 'is', 'not', 'None', ':', 'return', 'self', '.', '__call_api_post', '(', "'rooms.favorite'", ',', 'roomName', '=', 'room_name', ',', 'favorite', '=', 'favorite', ')', 'else', ':', 'raise', 'RocketMissingParamException', '(', "'roomId or roomName required'", ')'] | Favorite or unfavorite room. | ['Favorite', 'or', 'unfavorite', 'room', '.'] | train | https://github.com/jadolg/rocketchat_API/blob/f220d094434991cb9892418245f054ea06f28aad/rocketchat_API/rocketchat.py#L659-L666 |
3,258 | openvax/isovar | isovar/reference_context.py | reference_contexts_for_variants | def reference_contexts_for_variants(
variants,
context_size,
transcript_id_whitelist=None):
"""
Extract a set of reference contexts for each variant in the collection.
Parameters
----------
variants : varcode.VariantCollection
context_size : int
Max of nucleotides to include to the left and right of the variant
in the context sequence.
transcript_id_whitelist : set, optional
If given, then only consider transcripts whose IDs are in this set.
Returns a dictionary from variants to lists of ReferenceContext objects,
sorted by max coding sequence length of any transcript.
"""
result = OrderedDict()
for variant in variants:
result[variant] = reference_contexts_for_variant(
variant=variant,
context_size=context_size,
transcript_id_whitelist=transcript_id_whitelist)
return result | python | def reference_contexts_for_variants(
variants,
context_size,
transcript_id_whitelist=None):
"""
Extract a set of reference contexts for each variant in the collection.
Parameters
----------
variants : varcode.VariantCollection
context_size : int
Max of nucleotides to include to the left and right of the variant
in the context sequence.
transcript_id_whitelist : set, optional
If given, then only consider transcripts whose IDs are in this set.
Returns a dictionary from variants to lists of ReferenceContext objects,
sorted by max coding sequence length of any transcript.
"""
result = OrderedDict()
for variant in variants:
result[variant] = reference_contexts_for_variant(
variant=variant,
context_size=context_size,
transcript_id_whitelist=transcript_id_whitelist)
return result | ['def', 'reference_contexts_for_variants', '(', 'variants', ',', 'context_size', ',', 'transcript_id_whitelist', '=', 'None', ')', ':', 'result', '=', 'OrderedDict', '(', ')', 'for', 'variant', 'in', 'variants', ':', 'result', '[', 'variant', ']', '=', 'reference_contexts_for_variant', '(', 'variant', '=', 'variant', ',', 'context_size', '=', 'context_size', ',', 'transcript_id_whitelist', '=', 'transcript_id_whitelist', ')', 'return', 'result'] | Extract a set of reference contexts for each variant in the collection.
Parameters
----------
variants : varcode.VariantCollection
context_size : int
Max of nucleotides to include to the left and right of the variant
in the context sequence.
transcript_id_whitelist : set, optional
If given, then only consider transcripts whose IDs are in this set.
Returns a dictionary from variants to lists of ReferenceContext objects,
sorted by max coding sequence length of any transcript. | ['Extract', 'a', 'set', 'of', 'reference', 'contexts', 'for', 'each', 'variant', 'in', 'the', 'collection', '.'] | train | https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/reference_context.py#L141-L168 |
3,259 | gem/oq-engine | openquake/risklib/asset.py | Exposure.get_mesh_assets_by_site | def get_mesh_assets_by_site(self):
"""
:returns: (Mesh instance, assets_by_site list)
"""
assets_by_loc = general.groupby(self, key=lambda a: a.location)
mesh = geo.Mesh.from_coords(list(assets_by_loc))
assets_by_site = [
assets_by_loc[lonlat] for lonlat in zip(mesh.lons, mesh.lats)]
return mesh, assets_by_site | python | def get_mesh_assets_by_site(self):
"""
:returns: (Mesh instance, assets_by_site list)
"""
assets_by_loc = general.groupby(self, key=lambda a: a.location)
mesh = geo.Mesh.from_coords(list(assets_by_loc))
assets_by_site = [
assets_by_loc[lonlat] for lonlat in zip(mesh.lons, mesh.lats)]
return mesh, assets_by_site | ['def', 'get_mesh_assets_by_site', '(', 'self', ')', ':', 'assets_by_loc', '=', 'general', '.', 'groupby', '(', 'self', ',', 'key', '=', 'lambda', 'a', ':', 'a', '.', 'location', ')', 'mesh', '=', 'geo', '.', 'Mesh', '.', 'from_coords', '(', 'list', '(', 'assets_by_loc', ')', ')', 'assets_by_site', '=', '[', 'assets_by_loc', '[', 'lonlat', ']', 'for', 'lonlat', 'in', 'zip', '(', 'mesh', '.', 'lons', ',', 'mesh', '.', 'lats', ')', ']', 'return', 'mesh', ',', 'assets_by_site'] | :returns: (Mesh instance, assets_by_site list) | [':', 'returns', ':', '(', 'Mesh', 'instance', 'assets_by_site', 'list', ')'] | train | https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/risklib/asset.py#L1049-L1057 |
3,260 | SUSE-Enceladus/ipa | ipa/ipa_utils.py | extract_archive | def extract_archive(client, archive_path, extract_path=None):
"""
Extract the archive in current path using the provided client.
If extract_path is provided extract the archive there.
"""
command = 'tar -xf {path}'.format(path=archive_path)
if extract_path:
command += ' -C {extract_path}'.format(extract_path=extract_path)
out = execute_ssh_command(client, command)
return out | python | def extract_archive(client, archive_path, extract_path=None):
"""
Extract the archive in current path using the provided client.
If extract_path is provided extract the archive there.
"""
command = 'tar -xf {path}'.format(path=archive_path)
if extract_path:
command += ' -C {extract_path}'.format(extract_path=extract_path)
out = execute_ssh_command(client, command)
return out | ['def', 'extract_archive', '(', 'client', ',', 'archive_path', ',', 'extract_path', '=', 'None', ')', ':', 'command', '=', "'tar -xf {path}'", '.', 'format', '(', 'path', '=', 'archive_path', ')', 'if', 'extract_path', ':', 'command', '+=', "' -C {extract_path}'", '.', 'format', '(', 'extract_path', '=', 'extract_path', ')', 'out', '=', 'execute_ssh_command', '(', 'client', ',', 'command', ')', 'return', 'out'] | Extract the archive in current path using the provided client.
If extract_path is provided extract the archive there. | ['Extract', 'the', 'archive', 'in', 'current', 'path', 'using', 'the', 'provided', 'client', '.'] | train | https://github.com/SUSE-Enceladus/ipa/blob/0845eed0ea25a27dbb059ad1016105fa60002228/ipa/ipa_utils.py#L150-L162 |
3,261 | mitsei/dlkit | dlkit/json_/resource/objects.py | ResourceForm.get_group_metadata | def get_group_metadata(self):
"""Gets the metadata for a group.
return: (osid.Metadata) - metadata for the group
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.get_group_metadata_template
metadata = dict(self._mdata['group'])
metadata.update({'existing_boolean_values': self._my_map['group']})
return Metadata(**metadata) | python | def get_group_metadata(self):
"""Gets the metadata for a group.
return: (osid.Metadata) - metadata for the group
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.get_group_metadata_template
metadata = dict(self._mdata['group'])
metadata.update({'existing_boolean_values': self._my_map['group']})
return Metadata(**metadata) | ['def', 'get_group_metadata', '(', 'self', ')', ':', '# Implemented from template for osid.resource.ResourceForm.get_group_metadata_template', 'metadata', '=', 'dict', '(', 'self', '.', '_mdata', '[', "'group'", ']', ')', 'metadata', '.', 'update', '(', '{', "'existing_boolean_values'", ':', 'self', '.', '_my_map', '[', "'group'", ']', '}', ')', 'return', 'Metadata', '(', '*', '*', 'metadata', ')'] | Gets the metadata for a group.
return: (osid.Metadata) - metadata for the group
*compliance: mandatory -- This method must be implemented.* | ['Gets', 'the', 'metadata', 'for', 'a', 'group', '.'] | train | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/resource/objects.py#L192-L202 |
3,262 | saltstack/salt | salt/modules/nacl.py | dec | def dec(data, **kwargs):
'''
Alias to `{box_type}_decrypt`
box_type: secretbox, sealedbox(default)
'''
kwargs['opts'] = __opts__
return salt.utils.nacl.dec(data, **kwargs) | python | def dec(data, **kwargs):
'''
Alias to `{box_type}_decrypt`
box_type: secretbox, sealedbox(default)
'''
kwargs['opts'] = __opts__
return salt.utils.nacl.dec(data, **kwargs) | ['def', 'dec', '(', 'data', ',', '*', '*', 'kwargs', ')', ':', 'kwargs', '[', "'opts'", ']', '=', '__opts__', 'return', 'salt', '.', 'utils', '.', 'nacl', '.', 'dec', '(', 'data', ',', '*', '*', 'kwargs', ')'] | Alias to `{box_type}_decrypt`
box_type: secretbox, sealedbox(default) | ['Alias', 'to', '{', 'box_type', '}', '_decrypt'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/nacl.py#L221-L228 |
3,263 | ff0000/scarlet | scarlet/cache/views.py | CacheView.dispatch | def dispatch(self, request, *args, **kwargs):
"""
Overrides Django's default dispatch to provide caching.
If the should_cache method returns True, this will call
two functions get_cache_version and get_cache_prefix
the results of those two functions are combined and passed to
the standard django caching middleware.
"""
self.request = request
self.args = args
self.kwargs = kwargs
self.cache_middleware = None
response = None
if self.should_cache():
prefix = "%s:%s" % (self.get_cache_version(),
self.get_cache_prefix())
# Using middleware here since that is what the decorator uses
# internally and it avoids making this code all complicated with
# all sorts of wrappers.
self.set_cache_middleware(self.cache_time, prefix)
response = self.cache_middleware.process_request(self.request)
else:
self.set_do_not_cache()
if not response:
response = super(CacheView, self).dispatch(self.request, *args,
**kwargs)
return self._finalize_cached_response(request, response) | python | def dispatch(self, request, *args, **kwargs):
"""
Overrides Django's default dispatch to provide caching.
If the should_cache method returns True, this will call
two functions get_cache_version and get_cache_prefix
the results of those two functions are combined and passed to
the standard django caching middleware.
"""
self.request = request
self.args = args
self.kwargs = kwargs
self.cache_middleware = None
response = None
if self.should_cache():
prefix = "%s:%s" % (self.get_cache_version(),
self.get_cache_prefix())
# Using middleware here since that is what the decorator uses
# internally and it avoids making this code all complicated with
# all sorts of wrappers.
self.set_cache_middleware(self.cache_time, prefix)
response = self.cache_middleware.process_request(self.request)
else:
self.set_do_not_cache()
if not response:
response = super(CacheView, self).dispatch(self.request, *args,
**kwargs)
return self._finalize_cached_response(request, response) | ['def', 'dispatch', '(', 'self', ',', 'request', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'self', '.', 'request', '=', 'request', 'self', '.', 'args', '=', 'args', 'self', '.', 'kwargs', '=', 'kwargs', 'self', '.', 'cache_middleware', '=', 'None', 'response', '=', 'None', 'if', 'self', '.', 'should_cache', '(', ')', ':', 'prefix', '=', '"%s:%s"', '%', '(', 'self', '.', 'get_cache_version', '(', ')', ',', 'self', '.', 'get_cache_prefix', '(', ')', ')', '# Using middleware here since that is what the decorator uses', '# internally and it avoids making this code all complicated with', '# all sorts of wrappers.', 'self', '.', 'set_cache_middleware', '(', 'self', '.', 'cache_time', ',', 'prefix', ')', 'response', '=', 'self', '.', 'cache_middleware', '.', 'process_request', '(', 'self', '.', 'request', ')', 'else', ':', 'self', '.', 'set_do_not_cache', '(', ')', 'if', 'not', 'response', ':', 'response', '=', 'super', '(', 'CacheView', ',', 'self', ')', '.', 'dispatch', '(', 'self', '.', 'request', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', 'return', 'self', '.', '_finalize_cached_response', '(', 'request', ',', 'response', ')'] | Overrides Django's default dispatch to provide caching.
If the should_cache method returns True, this will call
two functions get_cache_version and get_cache_prefix
the results of those two functions are combined and passed to
the standard django caching middleware. | ['Overrides', 'Django', 's', 'default', 'dispatch', 'to', 'provide', 'caching', '.'] | train | https://github.com/ff0000/scarlet/blob/6c37befd810916a2d7ffff2cdb2dab57bcb6d12e/scarlet/cache/views.py#L179-L211 |
3,264 | sassoftware/saspy | saspy/sasqc.py | SASqc.shewhart | def shewhart(self, data: ['SASdata', str] = None,
boxchart: str = None,
cchart: str = None,
irchart: str = None,
mchart: str = None,
mrchart: str = None,
npchart: str = None,
pchart: str = None,
rchart: str = None,
schart: str = None,
uchart: str = None,
xrchart: str = None,
xschart: str = None,
procopts: str = None,
stmtpassthrough: str = None,
**kwargs: dict) -> 'SASresults':
"""
Python method to call the SHEWHART procedure
Documentation link:
https://go.documentation.sas.com/?cdcId=pgmsascdc&cdcVersion=9.4_3.4&docsetId=qcug&docsetTarget=qcug_shewhart_toc.htm&locale=en
:param data: SASdata object or string. This parameter is required.
:parm boxchart: The boxchart variable can only be a string type.
:parm cchart: The cchart variable can only be a string type.
:parm irchart: The irchart variable can only be a string type.
:parm mchart: The mchart variable can only be a string type.
:parm mrchart: The mrchart variable can only be a string type.
:parm npchart: The npchart variable can only be a string type.
:parm pchart: The pchart variable can only be a string type.
:parm rchart: The rchart variable can only be a string type.
:parm schart: The schart variable can only be a string type.
:parm uchart: The uchart variable can only be a string type.
:parm xrchart: The xrchart variable can only be a string type.
:parm xschart: The xschart variable can only be a string type.
:parm procopts: The procopts variable is a generic option available for advanced use. It can only be a string type.
:parm stmtpassthrough: The stmtpassthrough variable is a generic option available for advanced use. It can only be a string type.
:return: SAS Result Object
""" | python | def shewhart(self, data: ['SASdata', str] = None,
boxchart: str = None,
cchart: str = None,
irchart: str = None,
mchart: str = None,
mrchart: str = None,
npchart: str = None,
pchart: str = None,
rchart: str = None,
schart: str = None,
uchart: str = None,
xrchart: str = None,
xschart: str = None,
procopts: str = None,
stmtpassthrough: str = None,
**kwargs: dict) -> 'SASresults':
"""
Python method to call the SHEWHART procedure
Documentation link:
https://go.documentation.sas.com/?cdcId=pgmsascdc&cdcVersion=9.4_3.4&docsetId=qcug&docsetTarget=qcug_shewhart_toc.htm&locale=en
:param data: SASdata object or string. This parameter is required.
:parm boxchart: The boxchart variable can only be a string type.
:parm cchart: The cchart variable can only be a string type.
:parm irchart: The irchart variable can only be a string type.
:parm mchart: The mchart variable can only be a string type.
:parm mrchart: The mrchart variable can only be a string type.
:parm npchart: The npchart variable can only be a string type.
:parm pchart: The pchart variable can only be a string type.
:parm rchart: The rchart variable can only be a string type.
:parm schart: The schart variable can only be a string type.
:parm uchart: The uchart variable can only be a string type.
:parm xrchart: The xrchart variable can only be a string type.
:parm xschart: The xschart variable can only be a string type.
:parm procopts: The procopts variable is a generic option available for advanced use. It can only be a string type.
:parm stmtpassthrough: The stmtpassthrough variable is a generic option available for advanced use. It can only be a string type.
:return: SAS Result Object
""" | ['def', 'shewhart', '(', 'self', ',', 'data', ':', '[', "'SASdata'", ',', 'str', ']', '=', 'None', ',', 'boxchart', ':', 'str', '=', 'None', ',', 'cchart', ':', 'str', '=', 'None', ',', 'irchart', ':', 'str', '=', 'None', ',', 'mchart', ':', 'str', '=', 'None', ',', 'mrchart', ':', 'str', '=', 'None', ',', 'npchart', ':', 'str', '=', 'None', ',', 'pchart', ':', 'str', '=', 'None', ',', 'rchart', ':', 'str', '=', 'None', ',', 'schart', ':', 'str', '=', 'None', ',', 'uchart', ':', 'str', '=', 'None', ',', 'xrchart', ':', 'str', '=', 'None', ',', 'xschart', ':', 'str', '=', 'None', ',', 'procopts', ':', 'str', '=', 'None', ',', 'stmtpassthrough', ':', 'str', '=', 'None', ',', '*', '*', 'kwargs', ':', 'dict', ')', '->', "'SASresults'", ':'] | Python method to call the SHEWHART procedure
Documentation link:
https://go.documentation.sas.com/?cdcId=pgmsascdc&cdcVersion=9.4_3.4&docsetId=qcug&docsetTarget=qcug_shewhart_toc.htm&locale=en
:param data: SASdata object or string. This parameter is required.
:parm boxchart: The boxchart variable can only be a string type.
:parm cchart: The cchart variable can only be a string type.
:parm irchart: The irchart variable can only be a string type.
:parm mchart: The mchart variable can only be a string type.
:parm mrchart: The mrchart variable can only be a string type.
:parm npchart: The npchart variable can only be a string type.
:parm pchart: The pchart variable can only be a string type.
:parm rchart: The rchart variable can only be a string type.
:parm schart: The schart variable can only be a string type.
:parm uchart: The uchart variable can only be a string type.
:parm xrchart: The xrchart variable can only be a string type.
:parm xschart: The xschart variable can only be a string type.
:parm procopts: The procopts variable is a generic option available for advanced use. It can only be a string type.
:parm stmtpassthrough: The stmtpassthrough variable is a generic option available for advanced use. It can only be a string type.
:return: SAS Result Object | ['Python', 'method', 'to', 'call', 'the', 'SHEWHART', 'procedure'] | train | https://github.com/sassoftware/saspy/blob/e433f71990f249d3a6c3db323ceb11cb2d462cf9/saspy/sasqc.py#L175-L213 |
3,265 | raamana/hiwenet | hiwenet/more_metrics.py | diff_medians | def diff_medians(array_one, array_two):
"""
Computes the difference in medians between two arrays of values.
Given arrays will be flattened (to 1D array) regardless of dimension,
and any non-finite/NaN values will be ignored.
Parameters
----------
array_one, array_two : iterable
Two arrays of values, possibly of different length.
Returns
-------
diff_medians : float
scalar measuring the difference in medians, ignoring NaNs/non-finite values.
Raises
------
ValueError
If one or more of the arrays are empty.
"""
array_one = check_array(array_one)
array_two = check_array(array_two)
diff_medians = np.ma.median(array_one) - np.ma.median(array_two)
return diff_medians | python | def diff_medians(array_one, array_two):
"""
Computes the difference in medians between two arrays of values.
Given arrays will be flattened (to 1D array) regardless of dimension,
and any non-finite/NaN values will be ignored.
Parameters
----------
array_one, array_two : iterable
Two arrays of values, possibly of different length.
Returns
-------
diff_medians : float
scalar measuring the difference in medians, ignoring NaNs/non-finite values.
Raises
------
ValueError
If one or more of the arrays are empty.
"""
array_one = check_array(array_one)
array_two = check_array(array_two)
diff_medians = np.ma.median(array_one) - np.ma.median(array_two)
return diff_medians | ['def', 'diff_medians', '(', 'array_one', ',', 'array_two', ')', ':', 'array_one', '=', 'check_array', '(', 'array_one', ')', 'array_two', '=', 'check_array', '(', 'array_two', ')', 'diff_medians', '=', 'np', '.', 'ma', '.', 'median', '(', 'array_one', ')', '-', 'np', '.', 'ma', '.', 'median', '(', 'array_two', ')', 'return', 'diff_medians'] | Computes the difference in medians between two arrays of values.
Given arrays will be flattened (to 1D array) regardless of dimension,
and any non-finite/NaN values will be ignored.
Parameters
----------
array_one, array_two : iterable
Two arrays of values, possibly of different length.
Returns
-------
diff_medians : float
scalar measuring the difference in medians, ignoring NaNs/non-finite values.
Raises
------
ValueError
If one or more of the arrays are empty. | ['Computes', 'the', 'difference', 'in', 'medians', 'between', 'two', 'arrays', 'of', 'values', '.'] | train | https://github.com/raamana/hiwenet/blob/b12699b3722fd0a6a835e7d7ca4baf58fb181809/hiwenet/more_metrics.py#L21-L49 |
3,266 | tornadoweb/tornado | tornado/gen.py | multi_future | def multi_future(
children: Union[List[_Yieldable], Dict[Any, _Yieldable]],
quiet_exceptions: "Union[Type[Exception], Tuple[Type[Exception], ...]]" = (),
) -> "Union[Future[List], Future[Dict]]":
"""Wait for multiple asynchronous futures in parallel.
Since Tornado 6.0, this function is exactly the same as `multi`.
.. versionadded:: 4.0
.. versionchanged:: 4.2
If multiple ``Futures`` fail, any exceptions after the first (which is
raised) will be logged. Added the ``quiet_exceptions``
argument to suppress this logging for selected exception types.
.. deprecated:: 4.3
Use `multi` instead.
"""
if isinstance(children, dict):
keys = list(children.keys()) # type: Optional[List]
children_seq = children.values() # type: Iterable
else:
keys = None
children_seq = children
children_futs = list(map(convert_yielded, children_seq))
assert all(is_future(i) or isinstance(i, _NullFuture) for i in children_futs)
unfinished_children = set(children_futs)
future = _create_future()
if not children_futs:
future_set_result_unless_cancelled(future, {} if keys is not None else [])
def callback(fut: Future) -> None:
unfinished_children.remove(fut)
if not unfinished_children:
result_list = []
for f in children_futs:
try:
result_list.append(f.result())
except Exception as e:
if future.done():
if not isinstance(e, quiet_exceptions):
app_log.error(
"Multiple exceptions in yield list", exc_info=True
)
else:
future_set_exc_info(future, sys.exc_info())
if not future.done():
if keys is not None:
future_set_result_unless_cancelled(
future, dict(zip(keys, result_list))
)
else:
future_set_result_unless_cancelled(future, result_list)
listening = set() # type: Set[Future]
for f in children_futs:
if f not in listening:
listening.add(f)
future_add_done_callback(f, callback)
return future | python | def multi_future(
children: Union[List[_Yieldable], Dict[Any, _Yieldable]],
quiet_exceptions: "Union[Type[Exception], Tuple[Type[Exception], ...]]" = (),
) -> "Union[Future[List], Future[Dict]]":
"""Wait for multiple asynchronous futures in parallel.
Since Tornado 6.0, this function is exactly the same as `multi`.
.. versionadded:: 4.0
.. versionchanged:: 4.2
If multiple ``Futures`` fail, any exceptions after the first (which is
raised) will be logged. Added the ``quiet_exceptions``
argument to suppress this logging for selected exception types.
.. deprecated:: 4.3
Use `multi` instead.
"""
if isinstance(children, dict):
keys = list(children.keys()) # type: Optional[List]
children_seq = children.values() # type: Iterable
else:
keys = None
children_seq = children
children_futs = list(map(convert_yielded, children_seq))
assert all(is_future(i) or isinstance(i, _NullFuture) for i in children_futs)
unfinished_children = set(children_futs)
future = _create_future()
if not children_futs:
future_set_result_unless_cancelled(future, {} if keys is not None else [])
def callback(fut: Future) -> None:
unfinished_children.remove(fut)
if not unfinished_children:
result_list = []
for f in children_futs:
try:
result_list.append(f.result())
except Exception as e:
if future.done():
if not isinstance(e, quiet_exceptions):
app_log.error(
"Multiple exceptions in yield list", exc_info=True
)
else:
future_set_exc_info(future, sys.exc_info())
if not future.done():
if keys is not None:
future_set_result_unless_cancelled(
future, dict(zip(keys, result_list))
)
else:
future_set_result_unless_cancelled(future, result_list)
listening = set() # type: Set[Future]
for f in children_futs:
if f not in listening:
listening.add(f)
future_add_done_callback(f, callback)
return future | ['def', 'multi_future', '(', 'children', ':', 'Union', '[', 'List', '[', '_Yieldable', ']', ',', 'Dict', '[', 'Any', ',', '_Yieldable', ']', ']', ',', 'quiet_exceptions', ':', '"Union[Type[Exception], Tuple[Type[Exception], ...]]"', '=', '(', ')', ',', ')', '->', '"Union[Future[List], Future[Dict]]"', ':', 'if', 'isinstance', '(', 'children', ',', 'dict', ')', ':', 'keys', '=', 'list', '(', 'children', '.', 'keys', '(', ')', ')', '# type: Optional[List]', 'children_seq', '=', 'children', '.', 'values', '(', ')', '# type: Iterable', 'else', ':', 'keys', '=', 'None', 'children_seq', '=', 'children', 'children_futs', '=', 'list', '(', 'map', '(', 'convert_yielded', ',', 'children_seq', ')', ')', 'assert', 'all', '(', 'is_future', '(', 'i', ')', 'or', 'isinstance', '(', 'i', ',', '_NullFuture', ')', 'for', 'i', 'in', 'children_futs', ')', 'unfinished_children', '=', 'set', '(', 'children_futs', ')', 'future', '=', '_create_future', '(', ')', 'if', 'not', 'children_futs', ':', 'future_set_result_unless_cancelled', '(', 'future', ',', '{', '}', 'if', 'keys', 'is', 'not', 'None', 'else', '[', ']', ')', 'def', 'callback', '(', 'fut', ':', 'Future', ')', '->', 'None', ':', 'unfinished_children', '.', 'remove', '(', 'fut', ')', 'if', 'not', 'unfinished_children', ':', 'result_list', '=', '[', ']', 'for', 'f', 'in', 'children_futs', ':', 'try', ':', 'result_list', '.', 'append', '(', 'f', '.', 'result', '(', ')', ')', 'except', 'Exception', 'as', 'e', ':', 'if', 'future', '.', 'done', '(', ')', ':', 'if', 'not', 'isinstance', '(', 'e', ',', 'quiet_exceptions', ')', ':', 'app_log', '.', 'error', '(', '"Multiple exceptions in yield list"', ',', 'exc_info', '=', 'True', ')', 'else', ':', 'future_set_exc_info', '(', 'future', ',', 'sys', '.', 'exc_info', '(', ')', ')', 'if', 'not', 'future', '.', 'done', '(', ')', ':', 'if', 'keys', 'is', 'not', 'None', ':', 'future_set_result_unless_cancelled', '(', 'future', ',', 'dict', '(', 'zip', '(', 'keys', ',', 'result_list', ')', ')', ')', 'else', ':', 'future_set_result_unless_cancelled', '(', 'future', ',', 'result_list', ')', 'listening', '=', 'set', '(', ')', '# type: Set[Future]', 'for', 'f', 'in', 'children_futs', ':', 'if', 'f', 'not', 'in', 'listening', ':', 'listening', '.', 'add', '(', 'f', ')', 'future_add_done_callback', '(', 'f', ',', 'callback', ')', 'return', 'future'] | Wait for multiple asynchronous futures in parallel.
Since Tornado 6.0, this function is exactly the same as `multi`.
.. versionadded:: 4.0
.. versionchanged:: 4.2
If multiple ``Futures`` fail, any exceptions after the first (which is
raised) will be logged. Added the ``quiet_exceptions``
argument to suppress this logging for selected exception types.
.. deprecated:: 4.3
Use `multi` instead. | ['Wait', 'for', 'multiple', 'asynchronous', 'futures', 'in', 'parallel', '.'] | train | https://github.com/tornadoweb/tornado/blob/b8b481770bcdb333a69afde5cce7eaa449128326/tornado/gen.py#L463-L523 |
3,267 | zetaops/zengine | zengine/lib/camunda_parser.py | CamundaProcessParser._get_description | def _get_description(self):
"""
Tries to get WF description from 'collabration' or 'process' or 'pariticipant'
Returns:
"""
ns = {'ns': '{%s}' % BPMN_MODEL_NS}
desc = (
self.doc_xpath('.//{ns}collaboration/{ns}documentation'.format(**ns)) or
self.doc_xpath('.//{ns}process/{ns}documentation'.format(**ns)) or
self.doc_xpath('.//{ns}collaboration/{ns}participant/{ns}documentation'.format(**ns))
)
if desc:
return desc[0].findtext('.') | python | def _get_description(self):
"""
Tries to get WF description from 'collabration' or 'process' or 'pariticipant'
Returns:
"""
ns = {'ns': '{%s}' % BPMN_MODEL_NS}
desc = (
self.doc_xpath('.//{ns}collaboration/{ns}documentation'.format(**ns)) or
self.doc_xpath('.//{ns}process/{ns}documentation'.format(**ns)) or
self.doc_xpath('.//{ns}collaboration/{ns}participant/{ns}documentation'.format(**ns))
)
if desc:
return desc[0].findtext('.') | ['def', '_get_description', '(', 'self', ')', ':', 'ns', '=', '{', "'ns'", ':', "'{%s}'", '%', 'BPMN_MODEL_NS', '}', 'desc', '=', '(', 'self', '.', 'doc_xpath', '(', "'.//{ns}collaboration/{ns}documentation'", '.', 'format', '(', '*', '*', 'ns', ')', ')', 'or', 'self', '.', 'doc_xpath', '(', "'.//{ns}process/{ns}documentation'", '.', 'format', '(', '*', '*', 'ns', ')', ')', 'or', 'self', '.', 'doc_xpath', '(', "'.//{ns}collaboration/{ns}participant/{ns}documentation'", '.', 'format', '(', '*', '*', 'ns', ')', ')', ')', 'if', 'desc', ':', 'return', 'desc', '[', '0', ']', '.', 'findtext', '(', "'.'", ')'] | Tries to get WF description from 'collabration' or 'process' or 'pariticipant'
Returns: | ['Tries', 'to', 'get', 'WF', 'description', 'from', 'collabration', 'or', 'process', 'or', 'pariticipant', 'Returns', ':'] | train | https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/lib/camunda_parser.py#L66-L79 |
3,268 | ubc/ubcpi | ubcpi/serialize.py | serialize_seeds | def serialize_seeds(seeds, block):
"""
Serialize the seeds in peer instruction XBlock to xml
Args:
seeds (lxml.etree.Element): The <seeds> XML element.
block (PeerInstructionXBlock): The XBlock with configuration to serialize.
Returns:
None
"""
for seed_dict in block.seeds:
seed = etree.SubElement(seeds, 'seed')
# options in xml starts with 1
seed.set('option', unicode(seed_dict.get('answer', 0) + 1))
seed.text = seed_dict.get('rationale', '') | python | def serialize_seeds(seeds, block):
"""
Serialize the seeds in peer instruction XBlock to xml
Args:
seeds (lxml.etree.Element): The <seeds> XML element.
block (PeerInstructionXBlock): The XBlock with configuration to serialize.
Returns:
None
"""
for seed_dict in block.seeds:
seed = etree.SubElement(seeds, 'seed')
# options in xml starts with 1
seed.set('option', unicode(seed_dict.get('answer', 0) + 1))
seed.text = seed_dict.get('rationale', '') | ['def', 'serialize_seeds', '(', 'seeds', ',', 'block', ')', ':', 'for', 'seed_dict', 'in', 'block', '.', 'seeds', ':', 'seed', '=', 'etree', '.', 'SubElement', '(', 'seeds', ',', "'seed'", ')', '# options in xml starts with 1', 'seed', '.', 'set', '(', "'option'", ',', 'unicode', '(', 'seed_dict', '.', 'get', '(', "'answer'", ',', '0', ')', '+', '1', ')', ')', 'seed', '.', 'text', '=', 'seed_dict', '.', 'get', '(', "'rationale'", ',', "''", ')'] | Serialize the seeds in peer instruction XBlock to xml
Args:
seeds (lxml.etree.Element): The <seeds> XML element.
block (PeerInstructionXBlock): The XBlock with configuration to serialize.
Returns:
None | ['Serialize', 'the', 'seeds', 'in', 'peer', 'instruction', 'XBlock', 'to', 'xml'] | train | https://github.com/ubc/ubcpi/blob/7b6de03f93f3a4a8af4b92dfde7c69eeaf21f46e/ubcpi/serialize.py#L292-L307 |
3,269 | coldfix/udiskie | udiskie/udisks2.py | Daemon._interfaces_removed | def _interfaces_removed(self, object_path, interfaces):
"""Internal method."""
old_state = copy(self._objects[object_path])
for interface in interfaces:
del self._objects[object_path][interface]
new_state = self._objects[object_path]
if Interface['Drive'] in interfaces:
self._detect_toggle(
'has_media',
self.get(object_path, old_state),
self.get(object_path, new_state),
None, 'media_removed')
if Interface['Block'] in interfaces:
slave = self.get(object_path, old_state).luks_cleartext_slave
if slave:
if not self._has_job(slave.object_path, 'device_locked'):
self.trigger('device_locked', slave)
if self._objects[object_path]:
self.trigger('device_changed',
self.get(object_path, old_state),
self.get(object_path, new_state))
else:
del self._objects[object_path]
if object_kind(object_path) in ('device', 'drive'):
self.trigger(
'device_removed',
self.get(object_path, old_state)) | python | def _interfaces_removed(self, object_path, interfaces):
"""Internal method."""
old_state = copy(self._objects[object_path])
for interface in interfaces:
del self._objects[object_path][interface]
new_state = self._objects[object_path]
if Interface['Drive'] in interfaces:
self._detect_toggle(
'has_media',
self.get(object_path, old_state),
self.get(object_path, new_state),
None, 'media_removed')
if Interface['Block'] in interfaces:
slave = self.get(object_path, old_state).luks_cleartext_slave
if slave:
if not self._has_job(slave.object_path, 'device_locked'):
self.trigger('device_locked', slave)
if self._objects[object_path]:
self.trigger('device_changed',
self.get(object_path, old_state),
self.get(object_path, new_state))
else:
del self._objects[object_path]
if object_kind(object_path) in ('device', 'drive'):
self.trigger(
'device_removed',
self.get(object_path, old_state)) | ['def', '_interfaces_removed', '(', 'self', ',', 'object_path', ',', 'interfaces', ')', ':', 'old_state', '=', 'copy', '(', 'self', '.', '_objects', '[', 'object_path', ']', ')', 'for', 'interface', 'in', 'interfaces', ':', 'del', 'self', '.', '_objects', '[', 'object_path', ']', '[', 'interface', ']', 'new_state', '=', 'self', '.', '_objects', '[', 'object_path', ']', 'if', 'Interface', '[', "'Drive'", ']', 'in', 'interfaces', ':', 'self', '.', '_detect_toggle', '(', "'has_media'", ',', 'self', '.', 'get', '(', 'object_path', ',', 'old_state', ')', ',', 'self', '.', 'get', '(', 'object_path', ',', 'new_state', ')', ',', 'None', ',', "'media_removed'", ')', 'if', 'Interface', '[', "'Block'", ']', 'in', 'interfaces', ':', 'slave', '=', 'self', '.', 'get', '(', 'object_path', ',', 'old_state', ')', '.', 'luks_cleartext_slave', 'if', 'slave', ':', 'if', 'not', 'self', '.', '_has_job', '(', 'slave', '.', 'object_path', ',', "'device_locked'", ')', ':', 'self', '.', 'trigger', '(', "'device_locked'", ',', 'slave', ')', 'if', 'self', '.', '_objects', '[', 'object_path', ']', ':', 'self', '.', 'trigger', '(', "'device_changed'", ',', 'self', '.', 'get', '(', 'object_path', ',', 'old_state', ')', ',', 'self', '.', 'get', '(', 'object_path', ',', 'new_state', ')', ')', 'else', ':', 'del', 'self', '.', '_objects', '[', 'object_path', ']', 'if', 'object_kind', '(', 'object_path', ')', 'in', '(', "'device'", ',', "'drive'", ')', ':', 'self', '.', 'trigger', '(', "'device_removed'", ',', 'self', '.', 'get', '(', 'object_path', ',', 'old_state', ')', ')'] | Internal method. | ['Internal', 'method', '.'] | train | https://github.com/coldfix/udiskie/blob/804c9d27df6f7361fec3097c432398f2d702f911/udiskie/udisks2.py#L821-L850 |
3,270 | santoshphilip/eppy | eppy/useful_scripts/loopdiagram.py | makediagram | def makediagram(edges):
"""make the diagram with the edges"""
graph = pydot.Dot(graph_type='digraph')
nodes = edges2nodes(edges)
epnodes = [(node,
makeanode(node[0])) for node in nodes if nodetype(node)=="epnode"]
endnodes = [(node,
makeendnode(node[0])) for node in nodes if nodetype(node)=="EndNode"]
epbr = [(node, makeabranch(node)) for node in nodes if not istuple(node)]
nodedict = dict(epnodes + epbr + endnodes)
for value in list(nodedict.values()):
graph.add_node(value)
for e1, e2 in edges:
graph.add_edge(pydot.Edge(nodedict[e1], nodedict[e2]))
return graph | python | def makediagram(edges):
"""make the diagram with the edges"""
graph = pydot.Dot(graph_type='digraph')
nodes = edges2nodes(edges)
epnodes = [(node,
makeanode(node[0])) for node in nodes if nodetype(node)=="epnode"]
endnodes = [(node,
makeendnode(node[0])) for node in nodes if nodetype(node)=="EndNode"]
epbr = [(node, makeabranch(node)) for node in nodes if not istuple(node)]
nodedict = dict(epnodes + epbr + endnodes)
for value in list(nodedict.values()):
graph.add_node(value)
for e1, e2 in edges:
graph.add_edge(pydot.Edge(nodedict[e1], nodedict[e2]))
return graph | ['def', 'makediagram', '(', 'edges', ')', ':', 'graph', '=', 'pydot', '.', 'Dot', '(', 'graph_type', '=', "'digraph'", ')', 'nodes', '=', 'edges2nodes', '(', 'edges', ')', 'epnodes', '=', '[', '(', 'node', ',', 'makeanode', '(', 'node', '[', '0', ']', ')', ')', 'for', 'node', 'in', 'nodes', 'if', 'nodetype', '(', 'node', ')', '==', '"epnode"', ']', 'endnodes', '=', '[', '(', 'node', ',', 'makeendnode', '(', 'node', '[', '0', ']', ')', ')', 'for', 'node', 'in', 'nodes', 'if', 'nodetype', '(', 'node', ')', '==', '"EndNode"', ']', 'epbr', '=', '[', '(', 'node', ',', 'makeabranch', '(', 'node', ')', ')', 'for', 'node', 'in', 'nodes', 'if', 'not', 'istuple', '(', 'node', ')', ']', 'nodedict', '=', 'dict', '(', 'epnodes', '+', 'epbr', '+', 'endnodes', ')', 'for', 'value', 'in', 'list', '(', 'nodedict', '.', 'values', '(', ')', ')', ':', 'graph', '.', 'add_node', '(', 'value', ')', 'for', 'e1', ',', 'e2', 'in', 'edges', ':', 'graph', '.', 'add_edge', '(', 'pydot', '.', 'Edge', '(', 'nodedict', '[', 'e1', ']', ',', 'nodedict', '[', 'e2', ']', ')', ')', 'return', 'graph'] | make the diagram with the edges | ['make', 'the', 'diagram', 'with', 'the', 'edges'] | train | https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/useful_scripts/loopdiagram.py#L140-L154 |
3,271 | swimlane/swimlane-python | swimlane/core/cursor.py | PaginatedCursor._evaluate | def _evaluate(self):
"""Lazily retrieve and paginate report results and build Record instances from returned data"""
if self._elements:
for element in self._elements:
yield element
else:
for page in itertools.count():
raw_elements = self._retrieve_raw_elements(page)
for raw_element in raw_elements:
element = self._parse_raw_element(raw_element)
self._elements.append(element)
yield element
if self.__limit and len(self._elements) >= self.__limit:
break
if any([
len(raw_elements) < self.page_size,
(self.__limit and len(self._elements) >= self.__limit)
]):
break | python | def _evaluate(self):
"""Lazily retrieve and paginate report results and build Record instances from returned data"""
if self._elements:
for element in self._elements:
yield element
else:
for page in itertools.count():
raw_elements = self._retrieve_raw_elements(page)
for raw_element in raw_elements:
element = self._parse_raw_element(raw_element)
self._elements.append(element)
yield element
if self.__limit and len(self._elements) >= self.__limit:
break
if any([
len(raw_elements) < self.page_size,
(self.__limit and len(self._elements) >= self.__limit)
]):
break | ['def', '_evaluate', '(', 'self', ')', ':', 'if', 'self', '.', '_elements', ':', 'for', 'element', 'in', 'self', '.', '_elements', ':', 'yield', 'element', 'else', ':', 'for', 'page', 'in', 'itertools', '.', 'count', '(', ')', ':', 'raw_elements', '=', 'self', '.', '_retrieve_raw_elements', '(', 'page', ')', 'for', 'raw_element', 'in', 'raw_elements', ':', 'element', '=', 'self', '.', '_parse_raw_element', '(', 'raw_element', ')', 'self', '.', '_elements', '.', 'append', '(', 'element', ')', 'yield', 'element', 'if', 'self', '.', '__limit', 'and', 'len', '(', 'self', '.', '_elements', ')', '>=', 'self', '.', '__limit', ':', 'break', 'if', 'any', '(', '[', 'len', '(', 'raw_elements', ')', '<', 'self', '.', 'page_size', ',', '(', 'self', '.', '__limit', 'and', 'len', '(', 'self', '.', '_elements', ')', '>=', 'self', '.', '__limit', ')', ']', ')', ':', 'break'] | Lazily retrieve and paginate report results and build Record instances from returned data | ['Lazily', 'retrieve', 'and', 'paginate', 'report', 'results', 'and', 'build', 'Record', 'instances', 'from', 'returned', 'data'] | train | https://github.com/swimlane/swimlane-python/blob/588fc503a76799bcdb5aecdf2f64a6ee05e3922d/swimlane/core/cursor.py#L44-L64 |
3,272 | carpedm20/fbchat | fbchat/_client.py | Client.sendRemoteVoiceClips | def sendRemoteVoiceClips(
self, clip_urls, message=None, thread_id=None, thread_type=ThreadType.USER
):
"""
Sends voice clips from URLs to a thread
:param clip_urls: URLs of clips to upload and send
:param message: Additional message
:param thread_id: User/Group ID to send to. See :ref:`intro_threads`
:param thread_type: See :ref:`intro_threads`
:type thread_type: models.ThreadType
:return: :ref:`Message ID <intro_message_ids>` of the sent files
:raises: FBchatException if request failed
"""
clip_urls = require_list(clip_urls)
files = self._upload(get_files_from_urls(clip_urls), voice_clip=True)
return self._sendFiles(
files=files, message=message, thread_id=thread_id, thread_type=thread_type
) | python | def sendRemoteVoiceClips(
self, clip_urls, message=None, thread_id=None, thread_type=ThreadType.USER
):
"""
Sends voice clips from URLs to a thread
:param clip_urls: URLs of clips to upload and send
:param message: Additional message
:param thread_id: User/Group ID to send to. See :ref:`intro_threads`
:param thread_type: See :ref:`intro_threads`
:type thread_type: models.ThreadType
:return: :ref:`Message ID <intro_message_ids>` of the sent files
:raises: FBchatException if request failed
"""
clip_urls = require_list(clip_urls)
files = self._upload(get_files_from_urls(clip_urls), voice_clip=True)
return self._sendFiles(
files=files, message=message, thread_id=thread_id, thread_type=thread_type
) | ['def', 'sendRemoteVoiceClips', '(', 'self', ',', 'clip_urls', ',', 'message', '=', 'None', ',', 'thread_id', '=', 'None', ',', 'thread_type', '=', 'ThreadType', '.', 'USER', ')', ':', 'clip_urls', '=', 'require_list', '(', 'clip_urls', ')', 'files', '=', 'self', '.', '_upload', '(', 'get_files_from_urls', '(', 'clip_urls', ')', ',', 'voice_clip', '=', 'True', ')', 'return', 'self', '.', '_sendFiles', '(', 'files', '=', 'files', ',', 'message', '=', 'message', ',', 'thread_id', '=', 'thread_id', ',', 'thread_type', '=', 'thread_type', ')'] | Sends voice clips from URLs to a thread
:param clip_urls: URLs of clips to upload and send
:param message: Additional message
:param thread_id: User/Group ID to send to. See :ref:`intro_threads`
:param thread_type: See :ref:`intro_threads`
:type thread_type: models.ThreadType
:return: :ref:`Message ID <intro_message_ids>` of the sent files
:raises: FBchatException if request failed | ['Sends', 'voice', 'clips', 'from', 'URLs', 'to', 'a', 'thread'] | train | https://github.com/carpedm20/fbchat/blob/f480d68b5773473e6daba7f66075ee30e8d737a8/fbchat/_client.py#L1610-L1628 |
3,273 | emc-openstack/storops | storops/vnx/block_cli.py | duel_command | def duel_command(f):
""" indicate it's a command need to be called on both SP
:param f: function that returns the command in list
:return: command execution result on both sps (tuple of 2)
"""
@functools.wraps(f)
def func_wrapper(self, *argv, **kwargs):
commands = _get_commands(f, self, *argv, **kwargs)
return self.execute_dual(commands)
return func_wrapper | python | def duel_command(f):
""" indicate it's a command need to be called on both SP
:param f: function that returns the command in list
:return: command execution result on both sps (tuple of 2)
"""
@functools.wraps(f)
def func_wrapper(self, *argv, **kwargs):
commands = _get_commands(f, self, *argv, **kwargs)
return self.execute_dual(commands)
return func_wrapper | ['def', 'duel_command', '(', 'f', ')', ':', '@', 'functools', '.', 'wraps', '(', 'f', ')', 'def', 'func_wrapper', '(', 'self', ',', '*', 'argv', ',', '*', '*', 'kwargs', ')', ':', 'commands', '=', '_get_commands', '(', 'f', ',', 'self', ',', '*', 'argv', ',', '*', '*', 'kwargs', ')', 'return', 'self', '.', 'execute_dual', '(', 'commands', ')', 'return', 'func_wrapper'] | indicate it's a command need to be called on both SP
:param f: function that returns the command in list
:return: command execution result on both sps (tuple of 2) | ['indicate', 'it', 's', 'a', 'command', 'need', 'to', 'be', 'called', 'on', 'both', 'SP'] | train | https://github.com/emc-openstack/storops/blob/24b4b13bf065c0ef0538dd0b5ebb8f25d24176bd/storops/vnx/block_cli.py#L78-L90 |
3,274 | linnarsson-lab/loompy | loompy/loompy.py | create | def create(filename: str, layers: Union[np.ndarray, Dict[str, np.ndarray], loompy.LayerManager], row_attrs: Union[loompy.AttributeManager, Dict[str, np.ndarray]], col_attrs: Union[loompy.AttributeManager, Dict[str, np.ndarray]], *, file_attrs: Dict[str, str] = None) -> None:
"""
Create a new Loom file from the given data.
Args:
filename (str): The filename (typically using a ``.loom`` file extension)
layers: One of the following:
* Two-dimensional (N-by-M) numpy ndarray of float values
* Sparse matrix (e.g. :class:`scipy.sparse.csr_matrix`)
* Dictionary of named layers, each an N-by-M ndarray or sparse matrix
* A :class:`.LayerManager`, with each layer an N-by-M ndarray
row_attrs (dict): Row attributes, where keys are attribute names and values
are numpy arrays (float or string) of length N
col_attrs (dict): Column attributes, where keys are attribute names and
values are numpy arrays (float or string) of length M
file_attrs (dict): Global attributes, where keys are attribute names and
values are strings
Returns:
Nothing
Remarks:
If the file exists, it will be overwritten.
"""
if isinstance(row_attrs, loompy.AttributeManager):
row_attrs = {k: v[:] for k, v in row_attrs.items()}
if isinstance(col_attrs, loompy.AttributeManager):
col_attrs = {k: v[:] for k, v in col_attrs.items()}
if isinstance(layers, np.ndarray) or scipy.sparse.issparse(layers):
layers = {"": layers}
elif isinstance(layers, loompy.LayerManager):
layers = {k: v[:, :] for k, v in layers.items()}
if "" not in layers:
raise ValueError("Data for default layer must be provided")
# Sanity checks
shape = layers[""].shape # type: ignore
if shape[0] == 0 or shape[1] == 0:
raise ValueError("Main matrix cannot be empty")
for name, layer in layers.items():
if layer.shape != shape: # type: ignore
raise ValueError(f"Layer '{name}' is not the same shape as the main matrix")
for name, ra in row_attrs.items():
if ra.shape[0] != shape[0]:
raise ValueError(f"Row attribute '{name}' is not the same length ({ra.shape[0]}) as number of rows in main matrix ({shape[0]})")
for name, ca in col_attrs.items():
if ca.shape[0] != shape[1]:
raise ValueError(f"Column attribute '{name}' is not the same length ({ca.shape[0]}) as number of columns in main matrix ({shape[1]})")
try:
with new(filename, file_attrs=file_attrs) as ds:
for key, vals in layers.items():
ds.layer[key] = vals
for key, vals in row_attrs.items():
ds.ra[key] = vals
for key, vals in col_attrs.items():
ds.ca[key] = vals
except ValueError as ve:
#ds.close(suppress_warning=True) # ds does not exist here
if os.path.exists(filename):
os.remove(filename)
raise ve | python | def create(filename: str, layers: Union[np.ndarray, Dict[str, np.ndarray], loompy.LayerManager], row_attrs: Union[loompy.AttributeManager, Dict[str, np.ndarray]], col_attrs: Union[loompy.AttributeManager, Dict[str, np.ndarray]], *, file_attrs: Dict[str, str] = None) -> None:
"""
Create a new Loom file from the given data.
Args:
filename (str): The filename (typically using a ``.loom`` file extension)
layers: One of the following:
* Two-dimensional (N-by-M) numpy ndarray of float values
* Sparse matrix (e.g. :class:`scipy.sparse.csr_matrix`)
* Dictionary of named layers, each an N-by-M ndarray or sparse matrix
* A :class:`.LayerManager`, with each layer an N-by-M ndarray
row_attrs (dict): Row attributes, where keys are attribute names and values
are numpy arrays (float or string) of length N
col_attrs (dict): Column attributes, where keys are attribute names and
values are numpy arrays (float or string) of length M
file_attrs (dict): Global attributes, where keys are attribute names and
values are strings
Returns:
Nothing
Remarks:
If the file exists, it will be overwritten.
"""
if isinstance(row_attrs, loompy.AttributeManager):
row_attrs = {k: v[:] for k, v in row_attrs.items()}
if isinstance(col_attrs, loompy.AttributeManager):
col_attrs = {k: v[:] for k, v in col_attrs.items()}
if isinstance(layers, np.ndarray) or scipy.sparse.issparse(layers):
layers = {"": layers}
elif isinstance(layers, loompy.LayerManager):
layers = {k: v[:, :] for k, v in layers.items()}
if "" not in layers:
raise ValueError("Data for default layer must be provided")
# Sanity checks
shape = layers[""].shape # type: ignore
if shape[0] == 0 or shape[1] == 0:
raise ValueError("Main matrix cannot be empty")
for name, layer in layers.items():
if layer.shape != shape: # type: ignore
raise ValueError(f"Layer '{name}' is not the same shape as the main matrix")
for name, ra in row_attrs.items():
if ra.shape[0] != shape[0]:
raise ValueError(f"Row attribute '{name}' is not the same length ({ra.shape[0]}) as number of rows in main matrix ({shape[0]})")
for name, ca in col_attrs.items():
if ca.shape[0] != shape[1]:
raise ValueError(f"Column attribute '{name}' is not the same length ({ca.shape[0]}) as number of columns in main matrix ({shape[1]})")
try:
with new(filename, file_attrs=file_attrs) as ds:
for key, vals in layers.items():
ds.layer[key] = vals
for key, vals in row_attrs.items():
ds.ra[key] = vals
for key, vals in col_attrs.items():
ds.ca[key] = vals
except ValueError as ve:
#ds.close(suppress_warning=True) # ds does not exist here
if os.path.exists(filename):
os.remove(filename)
raise ve | ['def', 'create', '(', 'filename', ':', 'str', ',', 'layers', ':', 'Union', '[', 'np', '.', 'ndarray', ',', 'Dict', '[', 'str', ',', 'np', '.', 'ndarray', ']', ',', 'loompy', '.', 'LayerManager', ']', ',', 'row_attrs', ':', 'Union', '[', 'loompy', '.', 'AttributeManager', ',', 'Dict', '[', 'str', ',', 'np', '.', 'ndarray', ']', ']', ',', 'col_attrs', ':', 'Union', '[', 'loompy', '.', 'AttributeManager', ',', 'Dict', '[', 'str', ',', 'np', '.', 'ndarray', ']', ']', ',', '*', ',', 'file_attrs', ':', 'Dict', '[', 'str', ',', 'str', ']', '=', 'None', ')', '->', 'None', ':', 'if', 'isinstance', '(', 'row_attrs', ',', 'loompy', '.', 'AttributeManager', ')', ':', 'row_attrs', '=', '{', 'k', ':', 'v', '[', ':', ']', 'for', 'k', ',', 'v', 'in', 'row_attrs', '.', 'items', '(', ')', '}', 'if', 'isinstance', '(', 'col_attrs', ',', 'loompy', '.', 'AttributeManager', ')', ':', 'col_attrs', '=', '{', 'k', ':', 'v', '[', ':', ']', 'for', 'k', ',', 'v', 'in', 'col_attrs', '.', 'items', '(', ')', '}', 'if', 'isinstance', '(', 'layers', ',', 'np', '.', 'ndarray', ')', 'or', 'scipy', '.', 'sparse', '.', 'issparse', '(', 'layers', ')', ':', 'layers', '=', '{', '""', ':', 'layers', '}', 'elif', 'isinstance', '(', 'layers', ',', 'loompy', '.', 'LayerManager', ')', ':', 'layers', '=', '{', 'k', ':', 'v', '[', ':', ',', ':', ']', 'for', 'k', ',', 'v', 'in', 'layers', '.', 'items', '(', ')', '}', 'if', '""', 'not', 'in', 'layers', ':', 'raise', 'ValueError', '(', '"Data for default layer must be provided"', ')', '# Sanity checks', 'shape', '=', 'layers', '[', '""', ']', '.', 'shape', '# type: ignore', 'if', 'shape', '[', '0', ']', '==', '0', 'or', 'shape', '[', '1', ']', '==', '0', ':', 'raise', 'ValueError', '(', '"Main matrix cannot be empty"', ')', 'for', 'name', ',', 'layer', 'in', 'layers', '.', 'items', '(', ')', ':', 'if', 'layer', '.', 'shape', '!=', 'shape', ':', '# type: ignore', 'raise', 'ValueError', '(', 'f"Layer \'{name}\' is not the same shape as the main matrix"', ')', 'for', 'name', ',', 'ra', 'in', 'row_attrs', '.', 'items', '(', ')', ':', 'if', 'ra', '.', 'shape', '[', '0', ']', '!=', 'shape', '[', '0', ']', ':', 'raise', 'ValueError', '(', 'f"Row attribute \'{name}\' is not the same length ({ra.shape[0]}) as number of rows in main matrix ({shape[0]})"', ')', 'for', 'name', ',', 'ca', 'in', 'col_attrs', '.', 'items', '(', ')', ':', 'if', 'ca', '.', 'shape', '[', '0', ']', '!=', 'shape', '[', '1', ']', ':', 'raise', 'ValueError', '(', 'f"Column attribute \'{name}\' is not the same length ({ca.shape[0]}) as number of columns in main matrix ({shape[1]})"', ')', 'try', ':', 'with', 'new', '(', 'filename', ',', 'file_attrs', '=', 'file_attrs', ')', 'as', 'ds', ':', 'for', 'key', ',', 'vals', 'in', 'layers', '.', 'items', '(', ')', ':', 'ds', '.', 'layer', '[', 'key', ']', '=', 'vals', 'for', 'key', ',', 'vals', 'in', 'row_attrs', '.', 'items', '(', ')', ':', 'ds', '.', 'ra', '[', 'key', ']', '=', 'vals', 'for', 'key', ',', 'vals', 'in', 'col_attrs', '.', 'items', '(', ')', ':', 'ds', '.', 'ca', '[', 'key', ']', '=', 'vals', 'except', 'ValueError', 'as', 've', ':', '#ds.close(suppress_warning=True) # ds does not exist here', 'if', 'os', '.', 'path', '.', 'exists', '(', 'filename', ')', ':', 'os', '.', 'remove', '(', 'filename', ')', 'raise', 've'] | Create a new Loom file from the given data.
Args:
filename (str): The filename (typically using a ``.loom`` file extension)
layers: One of the following:
* Two-dimensional (N-by-M) numpy ndarray of float values
* Sparse matrix (e.g. :class:`scipy.sparse.csr_matrix`)
* Dictionary of named layers, each an N-by-M ndarray or sparse matrix
* A :class:`.LayerManager`, with each layer an N-by-M ndarray
row_attrs (dict): Row attributes, where keys are attribute names and values
are numpy arrays (float or string) of length N
col_attrs (dict): Column attributes, where keys are attribute names and
values are numpy arrays (float or string) of length M
file_attrs (dict): Global attributes, where keys are attribute names and
values are strings
Returns:
Nothing
Remarks:
If the file exists, it will be overwritten. | ['Create', 'a', 'new', 'Loom', 'file', 'from', 'the', 'given', 'data', '.'] | train | https://github.com/linnarsson-lab/loompy/blob/62c8373a92b058753baa3a95331fb541f560f599/loompy/loompy.py#L1023-L1089 |
3,275 | bykof/billomapy | billomapy/billomapy.py | Billomapy.confirmation_pdf | def confirmation_pdf(self, confirmation_id):
"""
Opens a pdf of a confirmation
:param confirmation_id: the confirmation id
:return: dict
"""
return self._create_get_request(resource=CONFIRMATIONS, billomat_id=confirmation_id, command=PDF) | python | def confirmation_pdf(self, confirmation_id):
"""
Opens a pdf of a confirmation
:param confirmation_id: the confirmation id
:return: dict
"""
return self._create_get_request(resource=CONFIRMATIONS, billomat_id=confirmation_id, command=PDF) | ['def', 'confirmation_pdf', '(', 'self', ',', 'confirmation_id', ')', ':', 'return', 'self', '.', '_create_get_request', '(', 'resource', '=', 'CONFIRMATIONS', ',', 'billomat_id', '=', 'confirmation_id', ',', 'command', '=', 'PDF', ')'] | Opens a pdf of a confirmation
:param confirmation_id: the confirmation id
:return: dict | ['Opens', 'a', 'pdf', 'of', 'a', 'confirmation'] | train | https://github.com/bykof/billomapy/blob/a28ba69fd37654fa145d0411d52c200e7f8984ab/billomapy/billomapy.py#L2933-L2940 |
3,276 | python-diamond/Diamond | src/collectors/icinga_stats/icinga_stats.py | IcingaStatsCollector._get_externalcmd_stats | def _get_externalcmd_stats(self, app_stats):
"""
Process:
* high_external_command_buffer_slots
* total_external_command_buffer_slots
* used_external_command_buffer_slots
* external_command_stats=
"""
khigh = "high_external_command_buffer_slots"
ktotal = "total_external_command_buffer_slots"
kused = "used_external_command_buffer_slots"
kstats = "external_command_stats"
aliases = {
khigh: "external_command.buffer_high",
ktotal: "external_command.buffer_total",
kused: "external_command.buffer_used",
"x01": "external_command.01",
"x05": "external_command.05",
"x15": "external_command.15",
}
stats = {}
if khigh in app_stats.keys() and str(app_stats[khigh]).isdigit():
key = aliases[khigh]
stats[key] = int(app_stats[khigh])
if ktotal in app_stats.keys() and str(app_stats[ktotal].isdigit()):
key = aliases[ktotal]
stats[key] = int(app_stats[ktotal])
if kused in app_stats.keys() and str(app_stats[kused].isdigit()):
key = aliases[kused]
stats[key] = int(app_stats[ktotal])
if kstats in app_stats.keys():
(x01, x05, x15) = self._convert_tripplet(app_stats[kstats])
stats[aliases["x01"]] = x01
stats[aliases["x05"]] = x05
stats[aliases["x01"]] = x15
return stats | python | def _get_externalcmd_stats(self, app_stats):
"""
Process:
* high_external_command_buffer_slots
* total_external_command_buffer_slots
* used_external_command_buffer_slots
* external_command_stats=
"""
khigh = "high_external_command_buffer_slots"
ktotal = "total_external_command_buffer_slots"
kused = "used_external_command_buffer_slots"
kstats = "external_command_stats"
aliases = {
khigh: "external_command.buffer_high",
ktotal: "external_command.buffer_total",
kused: "external_command.buffer_used",
"x01": "external_command.01",
"x05": "external_command.05",
"x15": "external_command.15",
}
stats = {}
if khigh in app_stats.keys() and str(app_stats[khigh]).isdigit():
key = aliases[khigh]
stats[key] = int(app_stats[khigh])
if ktotal in app_stats.keys() and str(app_stats[ktotal].isdigit()):
key = aliases[ktotal]
stats[key] = int(app_stats[ktotal])
if kused in app_stats.keys() and str(app_stats[kused].isdigit()):
key = aliases[kused]
stats[key] = int(app_stats[ktotal])
if kstats in app_stats.keys():
(x01, x05, x15) = self._convert_tripplet(app_stats[kstats])
stats[aliases["x01"]] = x01
stats[aliases["x05"]] = x05
stats[aliases["x01"]] = x15
return stats | ['def', '_get_externalcmd_stats', '(', 'self', ',', 'app_stats', ')', ':', 'khigh', '=', '"high_external_command_buffer_slots"', 'ktotal', '=', '"total_external_command_buffer_slots"', 'kused', '=', '"used_external_command_buffer_slots"', 'kstats', '=', '"external_command_stats"', 'aliases', '=', '{', 'khigh', ':', '"external_command.buffer_high"', ',', 'ktotal', ':', '"external_command.buffer_total"', ',', 'kused', ':', '"external_command.buffer_used"', ',', '"x01"', ':', '"external_command.01"', ',', '"x05"', ':', '"external_command.05"', ',', '"x15"', ':', '"external_command.15"', ',', '}', 'stats', '=', '{', '}', 'if', 'khigh', 'in', 'app_stats', '.', 'keys', '(', ')', 'and', 'str', '(', 'app_stats', '[', 'khigh', ']', ')', '.', 'isdigit', '(', ')', ':', 'key', '=', 'aliases', '[', 'khigh', ']', 'stats', '[', 'key', ']', '=', 'int', '(', 'app_stats', '[', 'khigh', ']', ')', 'if', 'ktotal', 'in', 'app_stats', '.', 'keys', '(', ')', 'and', 'str', '(', 'app_stats', '[', 'ktotal', ']', '.', 'isdigit', '(', ')', ')', ':', 'key', '=', 'aliases', '[', 'ktotal', ']', 'stats', '[', 'key', ']', '=', 'int', '(', 'app_stats', '[', 'ktotal', ']', ')', 'if', 'kused', 'in', 'app_stats', '.', 'keys', '(', ')', 'and', 'str', '(', 'app_stats', '[', 'kused', ']', '.', 'isdigit', '(', ')', ')', ':', 'key', '=', 'aliases', '[', 'kused', ']', 'stats', '[', 'key', ']', '=', 'int', '(', 'app_stats', '[', 'ktotal', ']', ')', 'if', 'kstats', 'in', 'app_stats', '.', 'keys', '(', ')', ':', '(', 'x01', ',', 'x05', ',', 'x15', ')', '=', 'self', '.', '_convert_tripplet', '(', 'app_stats', '[', 'kstats', ']', ')', 'stats', '[', 'aliases', '[', '"x01"', ']', ']', '=', 'x01', 'stats', '[', 'aliases', '[', '"x05"', ']', ']', '=', 'x05', 'stats', '[', 'aliases', '[', '"x01"', ']', ']', '=', 'x15', 'return', 'stats'] | Process:
* high_external_command_buffer_slots
* total_external_command_buffer_slots
* used_external_command_buffer_slots
* external_command_stats= | ['Process', ':', '*', 'high_external_command_buffer_slots', '*', 'total_external_command_buffer_slots', '*', 'used_external_command_buffer_slots', '*', 'external_command_stats', '='] | train | https://github.com/python-diamond/Diamond/blob/0f3eb04327d6d3ed5e53a9967d6c9d2c09714a47/src/collectors/icinga_stats/icinga_stats.py#L282-L321 |
3,277 | dailymuse/oz | oz/core/actions.py | server | def server():
"""Runs the server"""
tornado.log.enable_pretty_logging()
# Get and validate the server_type
server_type = oz.settings["server_type"]
if server_type not in [None, "wsgi", "asyncio", "twisted"]:
raise Exception("Unknown server type: %s" % server_type)
# Install the correct ioloop if necessary
if server_type == "asyncio":
from tornado.platform.asyncio import AsyncIOMainLoop
AsyncIOMainLoop().install()
elif server_type == "twisted":
from tornado.platform.twisted import TwistedIOLoop
TwistedIOLoop().install()
if server_type == "wsgi":
wsgi_app = tornado.wsgi.WSGIApplication(oz._routes, **oz.settings)
wsgi_srv = wsgiref.simple_server.make_server("", oz.settings["port"], wsgi_app)
wsgi_srv.serve_forever()
else:
web_app = tornado.web.Application(oz._routes, **oz.settings)
if oz.settings["ssl_cert_file"] != None and oz.settings["ssl_key_file"] != None:
ssl_options = {
"certfile": oz.settings["ssl_cert_file"],
"keyfile": oz.settings["ssl_key_file"],
"cert_reqs": oz.settings["ssl_cert_reqs"],
"ca_certs": oz.settings["ssl_ca_certs"]
}
else:
ssl_options = None
http_srv = tornado.httpserver.HTTPServer(
web_app,
ssl_options=ssl_options,
body_timeout=oz.settings["body_timeout"],
xheaders=oz.settings["xheaders"]
)
http_srv.bind(oz.settings["port"])
server_workers = oz.settings["server_workers"]
if server_workers > 1:
if oz.settings["debug"]:
print("WARNING: Debug is enabled, but multiple server workers have been configured. Only one server worker can run in debug mode.")
server_workers = 1
elif (server_type == "asyncio" or server_type == "twisted"):
print("WARNING: A non-default server type is being used, but multiple server workers have been configured. Only one server worker can run on a non-default server type.")
server_workers = 1
# Forks multiple sub-processes if server_workers > 1
http_srv.start(server_workers)
# Registers signal handles for graceful server shutdown
if oz.settings.get("use_graceful_shutdown"):
if server_type == "asyncio" or server_type == "twisted":
print("WARNING: Cannot enable graceful shutdown for asyncio or twisted server types.")
else:
# NOTE: Do not expect any logging to with certain tools (e.g., invoker),
# because they may quiet logs on SIGINT/SIGTERM
signal.signal(signal.SIGTERM, functools.partial(_shutdown_tornado_ioloop, http_srv))
signal.signal(signal.SIGINT, functools.partial(_shutdown_tornado_ioloop, http_srv))
# Starts the ioloops
if server_type == "asyncio":
import asyncio
asyncio.get_event_loop().run_forever()
elif server_type == "twisted":
from twisted.internet import reactor
reactor.run()
else:
from tornado import ioloop
ioloop.IOLoop.instance().start() | python | def server():
"""Runs the server"""
tornado.log.enable_pretty_logging()
# Get and validate the server_type
server_type = oz.settings["server_type"]
if server_type not in [None, "wsgi", "asyncio", "twisted"]:
raise Exception("Unknown server type: %s" % server_type)
# Install the correct ioloop if necessary
if server_type == "asyncio":
from tornado.platform.asyncio import AsyncIOMainLoop
AsyncIOMainLoop().install()
elif server_type == "twisted":
from tornado.platform.twisted import TwistedIOLoop
TwistedIOLoop().install()
if server_type == "wsgi":
wsgi_app = tornado.wsgi.WSGIApplication(oz._routes, **oz.settings)
wsgi_srv = wsgiref.simple_server.make_server("", oz.settings["port"], wsgi_app)
wsgi_srv.serve_forever()
else:
web_app = tornado.web.Application(oz._routes, **oz.settings)
if oz.settings["ssl_cert_file"] != None and oz.settings["ssl_key_file"] != None:
ssl_options = {
"certfile": oz.settings["ssl_cert_file"],
"keyfile": oz.settings["ssl_key_file"],
"cert_reqs": oz.settings["ssl_cert_reqs"],
"ca_certs": oz.settings["ssl_ca_certs"]
}
else:
ssl_options = None
http_srv = tornado.httpserver.HTTPServer(
web_app,
ssl_options=ssl_options,
body_timeout=oz.settings["body_timeout"],
xheaders=oz.settings["xheaders"]
)
http_srv.bind(oz.settings["port"])
server_workers = oz.settings["server_workers"]
if server_workers > 1:
if oz.settings["debug"]:
print("WARNING: Debug is enabled, but multiple server workers have been configured. Only one server worker can run in debug mode.")
server_workers = 1
elif (server_type == "asyncio" or server_type == "twisted"):
print("WARNING: A non-default server type is being used, but multiple server workers have been configured. Only one server worker can run on a non-default server type.")
server_workers = 1
# Forks multiple sub-processes if server_workers > 1
http_srv.start(server_workers)
# Registers signal handles for graceful server shutdown
if oz.settings.get("use_graceful_shutdown"):
if server_type == "asyncio" or server_type == "twisted":
print("WARNING: Cannot enable graceful shutdown for asyncio or twisted server types.")
else:
# NOTE: Do not expect any logging to with certain tools (e.g., invoker),
# because they may quiet logs on SIGINT/SIGTERM
signal.signal(signal.SIGTERM, functools.partial(_shutdown_tornado_ioloop, http_srv))
signal.signal(signal.SIGINT, functools.partial(_shutdown_tornado_ioloop, http_srv))
# Starts the ioloops
if server_type == "asyncio":
import asyncio
asyncio.get_event_loop().run_forever()
elif server_type == "twisted":
from twisted.internet import reactor
reactor.run()
else:
from tornado import ioloop
ioloop.IOLoop.instance().start() | ['def', 'server', '(', ')', ':', 'tornado', '.', 'log', '.', 'enable_pretty_logging', '(', ')', '# Get and validate the server_type', 'server_type', '=', 'oz', '.', 'settings', '[', '"server_type"', ']', 'if', 'server_type', 'not', 'in', '[', 'None', ',', '"wsgi"', ',', '"asyncio"', ',', '"twisted"', ']', ':', 'raise', 'Exception', '(', '"Unknown server type: %s"', '%', 'server_type', ')', '# Install the correct ioloop if necessary', 'if', 'server_type', '==', '"asyncio"', ':', 'from', 'tornado', '.', 'platform', '.', 'asyncio', 'import', 'AsyncIOMainLoop', 'AsyncIOMainLoop', '(', ')', '.', 'install', '(', ')', 'elif', 'server_type', '==', '"twisted"', ':', 'from', 'tornado', '.', 'platform', '.', 'twisted', 'import', 'TwistedIOLoop', 'TwistedIOLoop', '(', ')', '.', 'install', '(', ')', 'if', 'server_type', '==', '"wsgi"', ':', 'wsgi_app', '=', 'tornado', '.', 'wsgi', '.', 'WSGIApplication', '(', 'oz', '.', '_routes', ',', '*', '*', 'oz', '.', 'settings', ')', 'wsgi_srv', '=', 'wsgiref', '.', 'simple_server', '.', 'make_server', '(', '""', ',', 'oz', '.', 'settings', '[', '"port"', ']', ',', 'wsgi_app', ')', 'wsgi_srv', '.', 'serve_forever', '(', ')', 'else', ':', 'web_app', '=', 'tornado', '.', 'web', '.', 'Application', '(', 'oz', '.', '_routes', ',', '*', '*', 'oz', '.', 'settings', ')', 'if', 'oz', '.', 'settings', '[', '"ssl_cert_file"', ']', '!=', 'None', 'and', 'oz', '.', 'settings', '[', '"ssl_key_file"', ']', '!=', 'None', ':', 'ssl_options', '=', '{', '"certfile"', ':', 'oz', '.', 'settings', '[', '"ssl_cert_file"', ']', ',', '"keyfile"', ':', 'oz', '.', 'settings', '[', '"ssl_key_file"', ']', ',', '"cert_reqs"', ':', 'oz', '.', 'settings', '[', '"ssl_cert_reqs"', ']', ',', '"ca_certs"', ':', 'oz', '.', 'settings', '[', '"ssl_ca_certs"', ']', '}', 'else', ':', 'ssl_options', '=', 'None', 'http_srv', '=', 'tornado', '.', 'httpserver', '.', 'HTTPServer', '(', 'web_app', ',', 'ssl_options', '=', 'ssl_options', ',', 'body_timeout', '=', 'oz', '.', 'settings', '[', '"body_timeout"', ']', ',', 'xheaders', '=', 'oz', '.', 'settings', '[', '"xheaders"', ']', ')', 'http_srv', '.', 'bind', '(', 'oz', '.', 'settings', '[', '"port"', ']', ')', 'server_workers', '=', 'oz', '.', 'settings', '[', '"server_workers"', ']', 'if', 'server_workers', '>', '1', ':', 'if', 'oz', '.', 'settings', '[', '"debug"', ']', ':', 'print', '(', '"WARNING: Debug is enabled, but multiple server workers have been configured. Only one server worker can run in debug mode."', ')', 'server_workers', '=', '1', 'elif', '(', 'server_type', '==', '"asyncio"', 'or', 'server_type', '==', '"twisted"', ')', ':', 'print', '(', '"WARNING: A non-default server type is being used, but multiple server workers have been configured. Only one server worker can run on a non-default server type."', ')', 'server_workers', '=', '1', '# Forks multiple sub-processes if server_workers > 1', 'http_srv', '.', 'start', '(', 'server_workers', ')', '# Registers signal handles for graceful server shutdown', 'if', 'oz', '.', 'settings', '.', 'get', '(', '"use_graceful_shutdown"', ')', ':', 'if', 'server_type', '==', '"asyncio"', 'or', 'server_type', '==', '"twisted"', ':', 'print', '(', '"WARNING: Cannot enable graceful shutdown for asyncio or twisted server types."', ')', 'else', ':', '# NOTE: Do not expect any logging to with certain tools (e.g., invoker),', '# because they may quiet logs on SIGINT/SIGTERM', 'signal', '.', 'signal', '(', 'signal', '.', 'SIGTERM', ',', 'functools', '.', 'partial', '(', '_shutdown_tornado_ioloop', ',', 'http_srv', ')', ')', 'signal', '.', 'signal', '(', 'signal', '.', 'SIGINT', ',', 'functools', '.', 'partial', '(', '_shutdown_tornado_ioloop', ',', 'http_srv', ')', ')', '# Starts the ioloops', 'if', 'server_type', '==', '"asyncio"', ':', 'import', 'asyncio', 'asyncio', '.', 'get_event_loop', '(', ')', '.', 'run_forever', '(', ')', 'elif', 'server_type', '==', '"twisted"', ':', 'from', 'twisted', '.', 'internet', 'import', 'reactor', 'reactor', '.', 'run', '(', ')', 'else', ':', 'from', 'tornado', 'import', 'ioloop', 'ioloop', '.', 'IOLoop', '.', 'instance', '(', ')', '.', 'start', '(', ')'] | Runs the server | ['Runs', 'the', 'server'] | train | https://github.com/dailymuse/oz/blob/4329f6a207dc9d2a8fbeb4d16d415dbe4570b5bd/oz/core/actions.py#L65-L141 |
3,278 | SCIP-Interfaces/PySCIPOpt | examples/unfinished/portfolio_soco.py | p_portfolio | def p_portfolio(I,sigma,r,alpha,beta):
"""p_portfolio -- modified markowitz model for portfolio optimization.
Parameters:
- I: set of items
- sigma[i]: standard deviation of item i
- r[i]: revenue of item i
- alpha: acceptance threshold
- beta: desired confidence level
Returns a model, ready to be solved.
"""
model = Model("p_portfolio")
x = {}
for i in I:
x[i] = model.addVar(vtype="C", name="x(%s)"%i) # quantity of i to buy
rho = model.addVar(vtype="C", name="rho")
rhoaux = model.addVar(vtype="C", name="rhoaux")
model.addCons(rho == quicksum(r[i]*x[i] for i in I))
model.addCons(quicksum(x[i] for i in I) == 1)
model.addCons(rhoaux == (alpha - rho)*(1/phi_inv(beta))) #todo
model.addCons(quicksum(sigma[i]**2 * x[i] * x[i] for i in I) <= rhoaux * rhoaux)
model.setObjective(rho, "maximize")
model.data = x
return model | python | def p_portfolio(I,sigma,r,alpha,beta):
"""p_portfolio -- modified markowitz model for portfolio optimization.
Parameters:
- I: set of items
- sigma[i]: standard deviation of item i
- r[i]: revenue of item i
- alpha: acceptance threshold
- beta: desired confidence level
Returns a model, ready to be solved.
"""
model = Model("p_portfolio")
x = {}
for i in I:
x[i] = model.addVar(vtype="C", name="x(%s)"%i) # quantity of i to buy
rho = model.addVar(vtype="C", name="rho")
rhoaux = model.addVar(vtype="C", name="rhoaux")
model.addCons(rho == quicksum(r[i]*x[i] for i in I))
model.addCons(quicksum(x[i] for i in I) == 1)
model.addCons(rhoaux == (alpha - rho)*(1/phi_inv(beta))) #todo
model.addCons(quicksum(sigma[i]**2 * x[i] * x[i] for i in I) <= rhoaux * rhoaux)
model.setObjective(rho, "maximize")
model.data = x
return model | ['def', 'p_portfolio', '(', 'I', ',', 'sigma', ',', 'r', ',', 'alpha', ',', 'beta', ')', ':', 'model', '=', 'Model', '(', '"p_portfolio"', ')', 'x', '=', '{', '}', 'for', 'i', 'in', 'I', ':', 'x', '[', 'i', ']', '=', 'model', '.', 'addVar', '(', 'vtype', '=', '"C"', ',', 'name', '=', '"x(%s)"', '%', 'i', ')', '# quantity of i to buy', 'rho', '=', 'model', '.', 'addVar', '(', 'vtype', '=', '"C"', ',', 'name', '=', '"rho"', ')', 'rhoaux', '=', 'model', '.', 'addVar', '(', 'vtype', '=', '"C"', ',', 'name', '=', '"rhoaux"', ')', 'model', '.', 'addCons', '(', 'rho', '==', 'quicksum', '(', 'r', '[', 'i', ']', '*', 'x', '[', 'i', ']', 'for', 'i', 'in', 'I', ')', ')', 'model', '.', 'addCons', '(', 'quicksum', '(', 'x', '[', 'i', ']', 'for', 'i', 'in', 'I', ')', '==', '1', ')', 'model', '.', 'addCons', '(', 'rhoaux', '==', '(', 'alpha', '-', 'rho', ')', '*', '(', '1', '/', 'phi_inv', '(', 'beta', ')', ')', ')', '#todo', 'model', '.', 'addCons', '(', 'quicksum', '(', 'sigma', '[', 'i', ']', '**', '2', '*', 'x', '[', 'i', ']', '*', 'x', '[', 'i', ']', 'for', 'i', 'in', 'I', ')', '<=', 'rhoaux', '*', 'rhoaux', ')', 'model', '.', 'setObjective', '(', 'rho', ',', '"maximize"', ')', 'model', '.', 'data', '=', 'x', 'return', 'model'] | p_portfolio -- modified markowitz model for portfolio optimization.
Parameters:
- I: set of items
- sigma[i]: standard deviation of item i
- r[i]: revenue of item i
- alpha: acceptance threshold
- beta: desired confidence level
Returns a model, ready to be solved. | ['p_portfolio', '--', 'modified', 'markowitz', 'model', 'for', 'portfolio', 'optimization', '.', 'Parameters', ':', '-', 'I', ':', 'set', 'of', 'items', '-', 'sigma', '[', 'i', ']', ':', 'standard', 'deviation', 'of', 'item', 'i', '-', 'r', '[', 'i', ']', ':', 'revenue', 'of', 'item', 'i', '-', 'alpha', ':', 'acceptance', 'threshold', '-', 'beta', ':', 'desired', 'confidence', 'level', 'Returns', 'a', 'model', 'ready', 'to', 'be', 'solved', '.'] | train | https://github.com/SCIP-Interfaces/PySCIPOpt/blob/9c960b40d94a48b0304d73dbe28b467b9c065abe/examples/unfinished/portfolio_soco.py#L27-L55 |
3,279 | roboogle/gtkmvc3 | gtkmvco/gtkmvc3/progen/model.py | ProgenModel.generate_project | def generate_project(self):
"""
Generate the whole project. Returns True if at least one
file has been generated, False otherwise."""
# checks needed properties
if not self.name or not self.destdir or \
not os.path.isdir(self.destdir):
raise ValueError("Empty or invalid property values: run with 'help' command")
_log("Generating project '%s'" % self.name)
_log("Destination directory is: '%s'" % self.destdir)
top = os.path.join(self.destdir, self.name)
src = os.path.join(top, self.src_name)
resources = os.path.join(top, self.res_name)
utils = os.path.join(src, "utils")
if self.complex:
models = os.path.join(src, "models")
ctrls = os.path.join(src, "ctrls")
views = os.path.join(src, "views")
else: models = ctrls = views = src
res = self.__generate_tree(top, src, resources, models, ctrls, views, utils)
res = self.__generate_classes(models, ctrls, views) or res
res = self.__mksrc(os.path.join(utils, "globals.py"), templates.glob) or res
if self.complex: self.templ.update({'model_import' : "from models.application import ApplModel",
'ctrl_import' : "from ctrls.application import ApplCtrl",
'view_import' : "from views.application import ApplView"})
else: self.templ.update({'model_import' : "from ApplModel import ApplModel",
'ctrl_import' : "from ApplCtrl import ApplCtrl",
'view_import' : "from ApplView import ApplView"})
res = self.__mksrc(os.path.join(top, "%s.py" % self.name), templates.main) or res
# builder file
if self.builder:
res = self.__generate_builder(resources) or res
if self.dist_gtkmvc3: res = self.__copy_framework(os.path.join(resources, "external")) or res
if not res: _log("No actions were taken")
else: _log("Done")
return res | python | def generate_project(self):
"""
Generate the whole project. Returns True if at least one
file has been generated, False otherwise."""
# checks needed properties
if not self.name or not self.destdir or \
not os.path.isdir(self.destdir):
raise ValueError("Empty or invalid property values: run with 'help' command")
_log("Generating project '%s'" % self.name)
_log("Destination directory is: '%s'" % self.destdir)
top = os.path.join(self.destdir, self.name)
src = os.path.join(top, self.src_name)
resources = os.path.join(top, self.res_name)
utils = os.path.join(src, "utils")
if self.complex:
models = os.path.join(src, "models")
ctrls = os.path.join(src, "ctrls")
views = os.path.join(src, "views")
else: models = ctrls = views = src
res = self.__generate_tree(top, src, resources, models, ctrls, views, utils)
res = self.__generate_classes(models, ctrls, views) or res
res = self.__mksrc(os.path.join(utils, "globals.py"), templates.glob) or res
if self.complex: self.templ.update({'model_import' : "from models.application import ApplModel",
'ctrl_import' : "from ctrls.application import ApplCtrl",
'view_import' : "from views.application import ApplView"})
else: self.templ.update({'model_import' : "from ApplModel import ApplModel",
'ctrl_import' : "from ApplCtrl import ApplCtrl",
'view_import' : "from ApplView import ApplView"})
res = self.__mksrc(os.path.join(top, "%s.py" % self.name), templates.main) or res
# builder file
if self.builder:
res = self.__generate_builder(resources) or res
if self.dist_gtkmvc3: res = self.__copy_framework(os.path.join(resources, "external")) or res
if not res: _log("No actions were taken")
else: _log("Done")
return res | ['def', 'generate_project', '(', 'self', ')', ':', '# checks needed properties', 'if', 'not', 'self', '.', 'name', 'or', 'not', 'self', '.', 'destdir', 'or', 'not', 'os', '.', 'path', '.', 'isdir', '(', 'self', '.', 'destdir', ')', ':', 'raise', 'ValueError', '(', '"Empty or invalid property values: run with \'help\' command"', ')', '_log', '(', '"Generating project \'%s\'"', '%', 'self', '.', 'name', ')', '_log', '(', '"Destination directory is: \'%s\'"', '%', 'self', '.', 'destdir', ')', 'top', '=', 'os', '.', 'path', '.', 'join', '(', 'self', '.', 'destdir', ',', 'self', '.', 'name', ')', 'src', '=', 'os', '.', 'path', '.', 'join', '(', 'top', ',', 'self', '.', 'src_name', ')', 'resources', '=', 'os', '.', 'path', '.', 'join', '(', 'top', ',', 'self', '.', 'res_name', ')', 'utils', '=', 'os', '.', 'path', '.', 'join', '(', 'src', ',', '"utils"', ')', 'if', 'self', '.', 'complex', ':', 'models', '=', 'os', '.', 'path', '.', 'join', '(', 'src', ',', '"models"', ')', 'ctrls', '=', 'os', '.', 'path', '.', 'join', '(', 'src', ',', '"ctrls"', ')', 'views', '=', 'os', '.', 'path', '.', 'join', '(', 'src', ',', '"views"', ')', 'else', ':', 'models', '=', 'ctrls', '=', 'views', '=', 'src', 'res', '=', 'self', '.', '__generate_tree', '(', 'top', ',', 'src', ',', 'resources', ',', 'models', ',', 'ctrls', ',', 'views', ',', 'utils', ')', 'res', '=', 'self', '.', '__generate_classes', '(', 'models', ',', 'ctrls', ',', 'views', ')', 'or', 'res', 'res', '=', 'self', '.', '__mksrc', '(', 'os', '.', 'path', '.', 'join', '(', 'utils', ',', '"globals.py"', ')', ',', 'templates', '.', 'glob', ')', 'or', 'res', 'if', 'self', '.', 'complex', ':', 'self', '.', 'templ', '.', 'update', '(', '{', "'model_import'", ':', '"from models.application import ApplModel"', ',', "'ctrl_import'", ':', '"from ctrls.application import ApplCtrl"', ',', "'view_import'", ':', '"from views.application import ApplView"', '}', ')', 'else', ':', 'self', '.', 'templ', '.', 'update', '(', '{', "'model_import'", ':', '"from ApplModel import ApplModel"', ',', "'ctrl_import'", ':', '"from ApplCtrl import ApplCtrl"', ',', "'view_import'", ':', '"from ApplView import ApplView"', '}', ')', 'res', '=', 'self', '.', '__mksrc', '(', 'os', '.', 'path', '.', 'join', '(', 'top', ',', '"%s.py"', '%', 'self', '.', 'name', ')', ',', 'templates', '.', 'main', ')', 'or', 'res', '# builder file', 'if', 'self', '.', 'builder', ':', 'res', '=', 'self', '.', '__generate_builder', '(', 'resources', ')', 'or', 'res', 'if', 'self', '.', 'dist_gtkmvc3', ':', 'res', '=', 'self', '.', '__copy_framework', '(', 'os', '.', 'path', '.', 'join', '(', 'resources', ',', '"external"', ')', ')', 'or', 'res', 'if', 'not', 'res', ':', '_log', '(', '"No actions were taken"', ')', 'else', ':', '_log', '(', '"Done"', ')', 'return', 'res'] | Generate the whole project. Returns True if at least one
file has been generated, False otherwise. | ['Generate', 'the', 'whole', 'project', '.', 'Returns', 'True', 'if', 'at', 'least', 'one', 'file', 'has', 'been', 'generated', 'False', 'otherwise', '.'] | train | https://github.com/roboogle/gtkmvc3/blob/63405fd8d2056be26af49103b13a8d5e57fe4dff/gtkmvco/gtkmvc3/progen/model.py#L97-L141 |
3,280 | dfujim/bdata | bdata/bdata.py | bdata._get_asym_comb | def _get_asym_comb(self,d):
"""
Find the combined asymmetry for slr runs. Elegant 4-counter method.
"""
# get data
d0 = d[0]; d1 = d[2]; d2 = d[1]; d3 = d[3]
# pre-calcs
r_denom = d0*d3
r_denom[r_denom==0] = np.nan
r = np.sqrt((d1*d2/r_denom))
r[r==-1] = np.nan
# combined asymmetry
asym_comb = (r-1)/(r+1)
# check for div by zero
d0[d0==0] = np.nan
d1[d1==0] = np.nan
d2[d2==0] = np.nan
d3[d3==0] = np.nan
# error in combined asymmetry
asym_comb_err = r*np.sqrt(1/d1 + 1/d0 + 1/d3 + 1/d2)/np.square(r+1)
# replace nan with zero
asym_comb[np.isnan(asym_comb)] = 0.
asym_comb_err[np.isnan(asym_comb_err)] = 0.
return [asym_comb,asym_comb_err] | python | def _get_asym_comb(self,d):
"""
Find the combined asymmetry for slr runs. Elegant 4-counter method.
"""
# get data
d0 = d[0]; d1 = d[2]; d2 = d[1]; d3 = d[3]
# pre-calcs
r_denom = d0*d3
r_denom[r_denom==0] = np.nan
r = np.sqrt((d1*d2/r_denom))
r[r==-1] = np.nan
# combined asymmetry
asym_comb = (r-1)/(r+1)
# check for div by zero
d0[d0==0] = np.nan
d1[d1==0] = np.nan
d2[d2==0] = np.nan
d3[d3==0] = np.nan
# error in combined asymmetry
asym_comb_err = r*np.sqrt(1/d1 + 1/d0 + 1/d3 + 1/d2)/np.square(r+1)
# replace nan with zero
asym_comb[np.isnan(asym_comb)] = 0.
asym_comb_err[np.isnan(asym_comb_err)] = 0.
return [asym_comb,asym_comb_err] | ['def', '_get_asym_comb', '(', 'self', ',', 'd', ')', ':', '# get data', 'd0', '=', 'd', '[', '0', ']', 'd1', '=', 'd', '[', '2', ']', 'd2', '=', 'd', '[', '1', ']', 'd3', '=', 'd', '[', '3', ']', '# pre-calcs', 'r_denom', '=', 'd0', '*', 'd3', 'r_denom', '[', 'r_denom', '==', '0', ']', '=', 'np', '.', 'nan', 'r', '=', 'np', '.', 'sqrt', '(', '(', 'd1', '*', 'd2', '/', 'r_denom', ')', ')', 'r', '[', 'r', '==', '-', '1', ']', '=', 'np', '.', 'nan', '# combined asymmetry', 'asym_comb', '=', '(', 'r', '-', '1', ')', '/', '(', 'r', '+', '1', ')', '# check for div by zero', 'd0', '[', 'd0', '==', '0', ']', '=', 'np', '.', 'nan', 'd1', '[', 'd1', '==', '0', ']', '=', 'np', '.', 'nan', 'd2', '[', 'd2', '==', '0', ']', '=', 'np', '.', 'nan', 'd3', '[', 'd3', '==', '0', ']', '=', 'np', '.', 'nan', '# error in combined asymmetry', 'asym_comb_err', '=', 'r', '*', 'np', '.', 'sqrt', '(', '1', '/', 'd1', '+', '1', '/', 'd0', '+', '1', '/', 'd3', '+', '1', '/', 'd2', ')', '/', 'np', '.', 'square', '(', 'r', '+', '1', ')', '# replace nan with zero ', 'asym_comb', '[', 'np', '.', 'isnan', '(', 'asym_comb', ')', ']', '=', '0.', 'asym_comb_err', '[', 'np', '.', 'isnan', '(', 'asym_comb_err', ')', ']', '=', '0.', 'return', '[', 'asym_comb', ',', 'asym_comb_err', ']'] | Find the combined asymmetry for slr runs. Elegant 4-counter method. | ['Find', 'the', 'combined', 'asymmetry', 'for', 'slr', 'runs', '.', 'Elegant', '4', '-', 'counter', 'method', '.'] | train | https://github.com/dfujim/bdata/blob/86af7b091e5cc167d2b9a3146953da347cc38614/bdata/bdata.py#L580-L610 |
3,281 | watson-developer-cloud/python-sdk | ibm_watson/speech_to_text_v1.py | SpeechToTextV1.recognize | def recognize(self,
audio,
model=None,
language_customization_id=None,
acoustic_customization_id=None,
base_model_version=None,
customization_weight=None,
inactivity_timeout=None,
keywords=None,
keywords_threshold=None,
max_alternatives=None,
word_alternatives_threshold=None,
word_confidence=None,
timestamps=None,
profanity_filter=None,
smart_formatting=None,
speaker_labels=None,
customization_id=None,
grammar_name=None,
redaction=None,
content_type=None,
**kwargs):
"""
Recognize audio.
Sends audio and returns transcription results for a recognition request. You can
pass a maximum of 100 MB and a minimum of 100 bytes of audio with a request. The
service automatically detects the endianness of the incoming audio and, for audio
that includes multiple channels, downmixes the audio to one-channel mono during
transcoding. The method returns only final results; to enable interim results, use
the WebSocket API.
**See also:** [Making a basic HTTP
request](https://cloud.ibm.com/docs/services/speech-to-text/http.html#HTTP-basic).
### Streaming mode
For requests to transcribe live audio as it becomes available, you must set the
`Transfer-Encoding` header to `chunked` to use streaming mode. In streaming mode,
the service closes the connection (status code 408) if it does not receive at
least 15 seconds of audio (including silence) in any 30-second period. The service
also closes the connection (status code 400) if it detects no speech for
`inactivity_timeout` seconds of streaming audio; use the `inactivity_timeout`
parameter to change the default of 30 seconds.
**See also:**
* [Audio
transmission](https://cloud.ibm.com/docs/services/speech-to-text/input.html#transmission)
*
[Timeouts](https://cloud.ibm.com/docs/services/speech-to-text/input.html#timeouts)
### Audio formats (content types)
The service accepts audio in the following formats (MIME types).
* For formats that are labeled **Required**, you must use the `Content-Type`
header with the request to specify the format of the audio.
* For all other formats, you can omit the `Content-Type` header or specify
`application/octet-stream` with the header to have the service automatically
detect the format of the audio. (With the `curl` command, you can specify either
`\"Content-Type:\"` or `\"Content-Type: application/octet-stream\"`.)
Where indicated, the format that you specify must include the sampling rate and
can optionally include the number of channels and the endianness of the audio.
* `audio/alaw` (**Required.** Specify the sampling rate (`rate`) of the audio.)
* `audio/basic` (**Required.** Use only with narrowband models.)
* `audio/flac`
* `audio/g729` (Use only with narrowband models.)
* `audio/l16` (**Required.** Specify the sampling rate (`rate`) and optionally the
number of channels (`channels`) and endianness (`endianness`) of the audio.)
* `audio/mp3`
* `audio/mpeg`
* `audio/mulaw` (**Required.** Specify the sampling rate (`rate`) of the audio.)
* `audio/ogg` (The service automatically detects the codec of the input audio.)
* `audio/ogg;codecs=opus`
* `audio/ogg;codecs=vorbis`
* `audio/wav` (Provide audio with a maximum of nine channels.)
* `audio/webm` (The service automatically detects the codec of the input audio.)
* `audio/webm;codecs=opus`
* `audio/webm;codecs=vorbis`
The sampling rate of the audio must match the sampling rate of the model for the
recognition request: for broadband models, at least 16 kHz; for narrowband models,
at least 8 kHz. If the sampling rate of the audio is higher than the minimum
required rate, the service down-samples the audio to the appropriate rate. If the
sampling rate of the audio is lower than the minimum required rate, the request
fails.
**See also:** [Audio
formats](https://cloud.ibm.com/docs/services/speech-to-text/audio-formats.html).
### Multipart speech recognition
**Note:** The Watson SDKs do not support multipart speech recognition.
The HTTP `POST` method of the service also supports multipart speech recognition.
With multipart requests, you pass all audio data as multipart form data. You
specify some parameters as request headers and query parameters, but you pass JSON
metadata as form data to control most aspects of the transcription.
The multipart approach is intended for use with browsers for which JavaScript is
disabled or when the parameters used with the request are greater than the 8 KB
limit imposed by most HTTP servers and proxies. You can encounter this limit, for
example, if you want to spot a very large number of keywords.
**See also:** [Making a multipart HTTP
request](https://cloud.ibm.com/docs/services/speech-to-text/http.html#HTTP-multi).
:param file audio: The audio to transcribe.
:param str model: The identifier of the model that is to be used for the
recognition request. See [Languages and
models](https://cloud.ibm.com/docs/services/speech-to-text/models.html).
:param str language_customization_id: The customization ID (GUID) of a custom
language model that is to be used with the recognition request. The base model of
the specified custom language model must match the model specified with the
`model` parameter. You must make the request with credentials for the instance of
the service that owns the custom model. By default, no custom language model is
used. See [Custom
models](https://cloud.ibm.com/docs/services/speech-to-text/input.html#custom-input).
**Note:** Use this parameter instead of the deprecated `customization_id`
parameter.
:param str acoustic_customization_id: The customization ID (GUID) of a custom
acoustic model that is to be used with the recognition request. The base model of
the specified custom acoustic model must match the model specified with the
`model` parameter. You must make the request with credentials for the instance of
the service that owns the custom model. By default, no custom acoustic model is
used. See [Custom
models](https://cloud.ibm.com/docs/services/speech-to-text/input.html#custom-input).
:param str base_model_version: The version of the specified base model that is to
be used with recognition request. Multiple versions of a base model can exist when
a model is updated for internal improvements. The parameter is intended primarily
for use with custom models that have been upgraded for a new base model. The
default value depends on whether the parameter is used with or without a custom
model. See [Base model
version](https://cloud.ibm.com/docs/services/speech-to-text/input.html#version).
:param float customization_weight: If you specify the customization ID (GUID) of a
custom language model with the recognition request, the customization weight tells
the service how much weight to give to words from the custom language model
compared to those from the base model for the current request.
Specify a value between 0.0 and 1.0. Unless a different customization weight was
specified for the custom model when it was trained, the default value is 0.3. A
customization weight that you specify overrides a weight that was specified when
the custom model was trained.
The default value yields the best performance in general. Assign a higher value if
your audio makes frequent use of OOV words from the custom model. Use caution when
setting the weight: a higher value can improve the accuracy of phrases from the
custom model's domain, but it can negatively affect performance on non-domain
phrases.
See [Custom
models](https://cloud.ibm.com/docs/services/speech-to-text/input.html#custom-input).
:param int inactivity_timeout: The time in seconds after which, if only silence
(no speech) is detected in streaming audio, the connection is closed with a 400
error. The parameter is useful for stopping audio submission from a live
microphone when a user simply walks away. Use `-1` for infinity. See [Inactivity
timeout](https://cloud.ibm.com/docs/services/speech-to-text/input.html#timeouts-inactivity).
:param list[str] keywords: An array of keyword strings to spot in the audio. Each
keyword string can include one or more string tokens. Keywords are spotted only in
the final results, not in interim hypotheses. If you specify any keywords, you
must also specify a keywords threshold. You can spot a maximum of 1000 keywords.
Omit the parameter or specify an empty array if you do not need to spot keywords.
See [Keyword
spotting](https://cloud.ibm.com/docs/services/speech-to-text/output.html#keyword_spotting).
:param float keywords_threshold: A confidence value that is the lower bound for
spotting a keyword. A word is considered to match a keyword if its confidence is
greater than or equal to the threshold. Specify a probability between 0.0 and 1.0.
If you specify a threshold, you must also specify one or more keywords. The
service performs no keyword spotting if you omit either parameter. See [Keyword
spotting](https://cloud.ibm.com/docs/services/speech-to-text/output.html#keyword_spotting).
:param int max_alternatives: The maximum number of alternative transcripts that
the service is to return. By default, the service returns a single transcript. If
you specify a value of `0`, the service uses the default value, `1`. See [Maximum
alternatives](https://cloud.ibm.com/docs/services/speech-to-text/output.html#max_alternatives).
:param float word_alternatives_threshold: A confidence value that is the lower
bound for identifying a hypothesis as a possible word alternative (also known as
\"Confusion Networks\"). An alternative word is considered if its confidence is
greater than or equal to the threshold. Specify a probability between 0.0 and 1.0.
By default, the service computes no alternative words. See [Word
alternatives](https://cloud.ibm.com/docs/services/speech-to-text/output.html#word_alternatives).
:param bool word_confidence: If `true`, the service returns a confidence measure
in the range of 0.0 to 1.0 for each word. By default, the service returns no word
confidence scores. See [Word
confidence](https://cloud.ibm.com/docs/services/speech-to-text/output.html#word_confidence).
:param bool timestamps: If `true`, the service returns time alignment for each
word. By default, no timestamps are returned. See [Word
timestamps](https://cloud.ibm.com/docs/services/speech-to-text/output.html#word_timestamps).
:param bool profanity_filter: If `true`, the service filters profanity from all
output except for keyword results by replacing inappropriate words with a series
of asterisks. Set the parameter to `false` to return results with no censoring.
Applies to US English transcription only. See [Profanity
filtering](https://cloud.ibm.com/docs/services/speech-to-text/output.html#profanity_filter).
:param bool smart_formatting: If `true`, the service converts dates, times, series
of digits and numbers, phone numbers, currency values, and internet addresses into
more readable, conventional representations in the final transcript of a
recognition request. For US English, the service also converts certain keyword
strings to punctuation symbols. By default, the service performs no smart
formatting.
**Note:** Applies to US English, Japanese, and Spanish transcription only.
See [Smart
formatting](https://cloud.ibm.com/docs/services/speech-to-text/output.html#smart_formatting).
:param bool speaker_labels: If `true`, the response includes labels that identify
which words were spoken by which participants in a multi-person exchange. By
default, the service returns no speaker labels. Setting `speaker_labels` to `true`
forces the `timestamps` parameter to be `true`, regardless of whether you specify
`false` for the parameter.
**Note:** Applies to US English, Japanese, and Spanish transcription only. To
determine whether a language model supports speaker labels, you can also use the
**Get a model** method and check that the attribute `speaker_labels` is set to
`true`.
See [Speaker
labels](https://cloud.ibm.com/docs/services/speech-to-text/output.html#speaker_labels).
:param str customization_id: **Deprecated.** Use the `language_customization_id`
parameter to specify the customization ID (GUID) of a custom language model that
is to be used with the recognition request. Do not specify both parameters with a
request.
:param str grammar_name: The name of a grammar that is to be used with the
recognition request. If you specify a grammar, you must also use the
`language_customization_id` parameter to specify the name of the custom language
model for which the grammar is defined. The service recognizes only strings that
are recognized by the specified grammar; it does not recognize other custom words
from the model's words resource. See
[Grammars](https://cloud.ibm.com/docs/services/speech-to-text/input.html#grammars-input).
:param bool redaction: If `true`, the service redacts, or masks, numeric data from
final transcripts. The feature redacts any number that has three or more
consecutive digits by replacing each digit with an `X` character. It is intended
to redact sensitive numeric data, such as credit card numbers. By default, the
service performs no redaction.
When you enable redaction, the service automatically enables smart formatting,
regardless of whether you explicitly disable that feature. To ensure maximum
security, the service also disables keyword spotting (ignores the `keywords` and
`keywords_threshold` parameters) and returns only a single final transcript
(forces the `max_alternatives` parameter to be `1`).
**Note:** Applies to US English, Japanese, and Korean transcription only.
See [Numeric
redaction](https://cloud.ibm.com/docs/services/speech-to-text/output.html#redaction).
:param str content_type: The format (MIME type) of the audio. For more information
about specifying an audio format, see **Audio formats (content types)** in the
method description.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if audio is None:
raise ValueError('audio must be provided')
headers = {'Content-Type': content_type}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers('speech_to_text', 'V1', 'recognize')
headers.update(sdk_headers)
params = {
'model': model,
'language_customization_id': language_customization_id,
'acoustic_customization_id': acoustic_customization_id,
'base_model_version': base_model_version,
'customization_weight': customization_weight,
'inactivity_timeout': inactivity_timeout,
'keywords': self._convert_list(keywords),
'keywords_threshold': keywords_threshold,
'max_alternatives': max_alternatives,
'word_alternatives_threshold': word_alternatives_threshold,
'word_confidence': word_confidence,
'timestamps': timestamps,
'profanity_filter': profanity_filter,
'smart_formatting': smart_formatting,
'speaker_labels': speaker_labels,
'customization_id': customization_id,
'grammar_name': grammar_name,
'redaction': redaction
}
data = audio
url = '/v1/recognize'
response = self.request(
method='POST',
url=url,
headers=headers,
params=params,
data=data,
accept_json=True)
return response | python | def recognize(self,
audio,
model=None,
language_customization_id=None,
acoustic_customization_id=None,
base_model_version=None,
customization_weight=None,
inactivity_timeout=None,
keywords=None,
keywords_threshold=None,
max_alternatives=None,
word_alternatives_threshold=None,
word_confidence=None,
timestamps=None,
profanity_filter=None,
smart_formatting=None,
speaker_labels=None,
customization_id=None,
grammar_name=None,
redaction=None,
content_type=None,
**kwargs):
"""
Recognize audio.
Sends audio and returns transcription results for a recognition request. You can
pass a maximum of 100 MB and a minimum of 100 bytes of audio with a request. The
service automatically detects the endianness of the incoming audio and, for audio
that includes multiple channels, downmixes the audio to one-channel mono during
transcoding. The method returns only final results; to enable interim results, use
the WebSocket API.
**See also:** [Making a basic HTTP
request](https://cloud.ibm.com/docs/services/speech-to-text/http.html#HTTP-basic).
### Streaming mode
For requests to transcribe live audio as it becomes available, you must set the
`Transfer-Encoding` header to `chunked` to use streaming mode. In streaming mode,
the service closes the connection (status code 408) if it does not receive at
least 15 seconds of audio (including silence) in any 30-second period. The service
also closes the connection (status code 400) if it detects no speech for
`inactivity_timeout` seconds of streaming audio; use the `inactivity_timeout`
parameter to change the default of 30 seconds.
**See also:**
* [Audio
transmission](https://cloud.ibm.com/docs/services/speech-to-text/input.html#transmission)
*
[Timeouts](https://cloud.ibm.com/docs/services/speech-to-text/input.html#timeouts)
### Audio formats (content types)
The service accepts audio in the following formats (MIME types).
* For formats that are labeled **Required**, you must use the `Content-Type`
header with the request to specify the format of the audio.
* For all other formats, you can omit the `Content-Type` header or specify
`application/octet-stream` with the header to have the service automatically
detect the format of the audio. (With the `curl` command, you can specify either
`\"Content-Type:\"` or `\"Content-Type: application/octet-stream\"`.)
Where indicated, the format that you specify must include the sampling rate and
can optionally include the number of channels and the endianness of the audio.
* `audio/alaw` (**Required.** Specify the sampling rate (`rate`) of the audio.)
* `audio/basic` (**Required.** Use only with narrowband models.)
* `audio/flac`
* `audio/g729` (Use only with narrowband models.)
* `audio/l16` (**Required.** Specify the sampling rate (`rate`) and optionally the
number of channels (`channels`) and endianness (`endianness`) of the audio.)
* `audio/mp3`
* `audio/mpeg`
* `audio/mulaw` (**Required.** Specify the sampling rate (`rate`) of the audio.)
* `audio/ogg` (The service automatically detects the codec of the input audio.)
* `audio/ogg;codecs=opus`
* `audio/ogg;codecs=vorbis`
* `audio/wav` (Provide audio with a maximum of nine channels.)
* `audio/webm` (The service automatically detects the codec of the input audio.)
* `audio/webm;codecs=opus`
* `audio/webm;codecs=vorbis`
The sampling rate of the audio must match the sampling rate of the model for the
recognition request: for broadband models, at least 16 kHz; for narrowband models,
at least 8 kHz. If the sampling rate of the audio is higher than the minimum
required rate, the service down-samples the audio to the appropriate rate. If the
sampling rate of the audio is lower than the minimum required rate, the request
fails.
**See also:** [Audio
formats](https://cloud.ibm.com/docs/services/speech-to-text/audio-formats.html).
### Multipart speech recognition
**Note:** The Watson SDKs do not support multipart speech recognition.
The HTTP `POST` method of the service also supports multipart speech recognition.
With multipart requests, you pass all audio data as multipart form data. You
specify some parameters as request headers and query parameters, but you pass JSON
metadata as form data to control most aspects of the transcription.
The multipart approach is intended for use with browsers for which JavaScript is
disabled or when the parameters used with the request are greater than the 8 KB
limit imposed by most HTTP servers and proxies. You can encounter this limit, for
example, if you want to spot a very large number of keywords.
**See also:** [Making a multipart HTTP
request](https://cloud.ibm.com/docs/services/speech-to-text/http.html#HTTP-multi).
:param file audio: The audio to transcribe.
:param str model: The identifier of the model that is to be used for the
recognition request. See [Languages and
models](https://cloud.ibm.com/docs/services/speech-to-text/models.html).
:param str language_customization_id: The customization ID (GUID) of a custom
language model that is to be used with the recognition request. The base model of
the specified custom language model must match the model specified with the
`model` parameter. You must make the request with credentials for the instance of
the service that owns the custom model. By default, no custom language model is
used. See [Custom
models](https://cloud.ibm.com/docs/services/speech-to-text/input.html#custom-input).
**Note:** Use this parameter instead of the deprecated `customization_id`
parameter.
:param str acoustic_customization_id: The customization ID (GUID) of a custom
acoustic model that is to be used with the recognition request. The base model of
the specified custom acoustic model must match the model specified with the
`model` parameter. You must make the request with credentials for the instance of
the service that owns the custom model. By default, no custom acoustic model is
used. See [Custom
models](https://cloud.ibm.com/docs/services/speech-to-text/input.html#custom-input).
:param str base_model_version: The version of the specified base model that is to
be used with recognition request. Multiple versions of a base model can exist when
a model is updated for internal improvements. The parameter is intended primarily
for use with custom models that have been upgraded for a new base model. The
default value depends on whether the parameter is used with or without a custom
model. See [Base model
version](https://cloud.ibm.com/docs/services/speech-to-text/input.html#version).
:param float customization_weight: If you specify the customization ID (GUID) of a
custom language model with the recognition request, the customization weight tells
the service how much weight to give to words from the custom language model
compared to those from the base model for the current request.
Specify a value between 0.0 and 1.0. Unless a different customization weight was
specified for the custom model when it was trained, the default value is 0.3. A
customization weight that you specify overrides a weight that was specified when
the custom model was trained.
The default value yields the best performance in general. Assign a higher value if
your audio makes frequent use of OOV words from the custom model. Use caution when
setting the weight: a higher value can improve the accuracy of phrases from the
custom model's domain, but it can negatively affect performance on non-domain
phrases.
See [Custom
models](https://cloud.ibm.com/docs/services/speech-to-text/input.html#custom-input).
:param int inactivity_timeout: The time in seconds after which, if only silence
(no speech) is detected in streaming audio, the connection is closed with a 400
error. The parameter is useful for stopping audio submission from a live
microphone when a user simply walks away. Use `-1` for infinity. See [Inactivity
timeout](https://cloud.ibm.com/docs/services/speech-to-text/input.html#timeouts-inactivity).
:param list[str] keywords: An array of keyword strings to spot in the audio. Each
keyword string can include one or more string tokens. Keywords are spotted only in
the final results, not in interim hypotheses. If you specify any keywords, you
must also specify a keywords threshold. You can spot a maximum of 1000 keywords.
Omit the parameter or specify an empty array if you do not need to spot keywords.
See [Keyword
spotting](https://cloud.ibm.com/docs/services/speech-to-text/output.html#keyword_spotting).
:param float keywords_threshold: A confidence value that is the lower bound for
spotting a keyword. A word is considered to match a keyword if its confidence is
greater than or equal to the threshold. Specify a probability between 0.0 and 1.0.
If you specify a threshold, you must also specify one or more keywords. The
service performs no keyword spotting if you omit either parameter. See [Keyword
spotting](https://cloud.ibm.com/docs/services/speech-to-text/output.html#keyword_spotting).
:param int max_alternatives: The maximum number of alternative transcripts that
the service is to return. By default, the service returns a single transcript. If
you specify a value of `0`, the service uses the default value, `1`. See [Maximum
alternatives](https://cloud.ibm.com/docs/services/speech-to-text/output.html#max_alternatives).
:param float word_alternatives_threshold: A confidence value that is the lower
bound for identifying a hypothesis as a possible word alternative (also known as
\"Confusion Networks\"). An alternative word is considered if its confidence is
greater than or equal to the threshold. Specify a probability between 0.0 and 1.0.
By default, the service computes no alternative words. See [Word
alternatives](https://cloud.ibm.com/docs/services/speech-to-text/output.html#word_alternatives).
:param bool word_confidence: If `true`, the service returns a confidence measure
in the range of 0.0 to 1.0 for each word. By default, the service returns no word
confidence scores. See [Word
confidence](https://cloud.ibm.com/docs/services/speech-to-text/output.html#word_confidence).
:param bool timestamps: If `true`, the service returns time alignment for each
word. By default, no timestamps are returned. See [Word
timestamps](https://cloud.ibm.com/docs/services/speech-to-text/output.html#word_timestamps).
:param bool profanity_filter: If `true`, the service filters profanity from all
output except for keyword results by replacing inappropriate words with a series
of asterisks. Set the parameter to `false` to return results with no censoring.
Applies to US English transcription only. See [Profanity
filtering](https://cloud.ibm.com/docs/services/speech-to-text/output.html#profanity_filter).
:param bool smart_formatting: If `true`, the service converts dates, times, series
of digits and numbers, phone numbers, currency values, and internet addresses into
more readable, conventional representations in the final transcript of a
recognition request. For US English, the service also converts certain keyword
strings to punctuation symbols. By default, the service performs no smart
formatting.
**Note:** Applies to US English, Japanese, and Spanish transcription only.
See [Smart
formatting](https://cloud.ibm.com/docs/services/speech-to-text/output.html#smart_formatting).
:param bool speaker_labels: If `true`, the response includes labels that identify
which words were spoken by which participants in a multi-person exchange. By
default, the service returns no speaker labels. Setting `speaker_labels` to `true`
forces the `timestamps` parameter to be `true`, regardless of whether you specify
`false` for the parameter.
**Note:** Applies to US English, Japanese, and Spanish transcription only. To
determine whether a language model supports speaker labels, you can also use the
**Get a model** method and check that the attribute `speaker_labels` is set to
`true`.
See [Speaker
labels](https://cloud.ibm.com/docs/services/speech-to-text/output.html#speaker_labels).
:param str customization_id: **Deprecated.** Use the `language_customization_id`
parameter to specify the customization ID (GUID) of a custom language model that
is to be used with the recognition request. Do not specify both parameters with a
request.
:param str grammar_name: The name of a grammar that is to be used with the
recognition request. If you specify a grammar, you must also use the
`language_customization_id` parameter to specify the name of the custom language
model for which the grammar is defined. The service recognizes only strings that
are recognized by the specified grammar; it does not recognize other custom words
from the model's words resource. See
[Grammars](https://cloud.ibm.com/docs/services/speech-to-text/input.html#grammars-input).
:param bool redaction: If `true`, the service redacts, or masks, numeric data from
final transcripts. The feature redacts any number that has three or more
consecutive digits by replacing each digit with an `X` character. It is intended
to redact sensitive numeric data, such as credit card numbers. By default, the
service performs no redaction.
When you enable redaction, the service automatically enables smart formatting,
regardless of whether you explicitly disable that feature. To ensure maximum
security, the service also disables keyword spotting (ignores the `keywords` and
`keywords_threshold` parameters) and returns only a single final transcript
(forces the `max_alternatives` parameter to be `1`).
**Note:** Applies to US English, Japanese, and Korean transcription only.
See [Numeric
redaction](https://cloud.ibm.com/docs/services/speech-to-text/output.html#redaction).
:param str content_type: The format (MIME type) of the audio. For more information
about specifying an audio format, see **Audio formats (content types)** in the
method description.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if audio is None:
raise ValueError('audio must be provided')
headers = {'Content-Type': content_type}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers('speech_to_text', 'V1', 'recognize')
headers.update(sdk_headers)
params = {
'model': model,
'language_customization_id': language_customization_id,
'acoustic_customization_id': acoustic_customization_id,
'base_model_version': base_model_version,
'customization_weight': customization_weight,
'inactivity_timeout': inactivity_timeout,
'keywords': self._convert_list(keywords),
'keywords_threshold': keywords_threshold,
'max_alternatives': max_alternatives,
'word_alternatives_threshold': word_alternatives_threshold,
'word_confidence': word_confidence,
'timestamps': timestamps,
'profanity_filter': profanity_filter,
'smart_formatting': smart_formatting,
'speaker_labels': speaker_labels,
'customization_id': customization_id,
'grammar_name': grammar_name,
'redaction': redaction
}
data = audio
url = '/v1/recognize'
response = self.request(
method='POST',
url=url,
headers=headers,
params=params,
data=data,
accept_json=True)
return response | ['def', 'recognize', '(', 'self', ',', 'audio', ',', 'model', '=', 'None', ',', 'language_customization_id', '=', 'None', ',', 'acoustic_customization_id', '=', 'None', ',', 'base_model_version', '=', 'None', ',', 'customization_weight', '=', 'None', ',', 'inactivity_timeout', '=', 'None', ',', 'keywords', '=', 'None', ',', 'keywords_threshold', '=', 'None', ',', 'max_alternatives', '=', 'None', ',', 'word_alternatives_threshold', '=', 'None', ',', 'word_confidence', '=', 'None', ',', 'timestamps', '=', 'None', ',', 'profanity_filter', '=', 'None', ',', 'smart_formatting', '=', 'None', ',', 'speaker_labels', '=', 'None', ',', 'customization_id', '=', 'None', ',', 'grammar_name', '=', 'None', ',', 'redaction', '=', 'None', ',', 'content_type', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'if', 'audio', 'is', 'None', ':', 'raise', 'ValueError', '(', "'audio must be provided'", ')', 'headers', '=', '{', "'Content-Type'", ':', 'content_type', '}', 'if', "'headers'", 'in', 'kwargs', ':', 'headers', '.', 'update', '(', 'kwargs', '.', 'get', '(', "'headers'", ')', ')', 'sdk_headers', '=', 'get_sdk_headers', '(', "'speech_to_text'", ',', "'V1'", ',', "'recognize'", ')', 'headers', '.', 'update', '(', 'sdk_headers', ')', 'params', '=', '{', "'model'", ':', 'model', ',', "'language_customization_id'", ':', 'language_customization_id', ',', "'acoustic_customization_id'", ':', 'acoustic_customization_id', ',', "'base_model_version'", ':', 'base_model_version', ',', "'customization_weight'", ':', 'customization_weight', ',', "'inactivity_timeout'", ':', 'inactivity_timeout', ',', "'keywords'", ':', 'self', '.', '_convert_list', '(', 'keywords', ')', ',', "'keywords_threshold'", ':', 'keywords_threshold', ',', "'max_alternatives'", ':', 'max_alternatives', ',', "'word_alternatives_threshold'", ':', 'word_alternatives_threshold', ',', "'word_confidence'", ':', 'word_confidence', ',', "'timestamps'", ':', 'timestamps', ',', "'profanity_filter'", ':', 'profanity_filter', ',', "'smart_formatting'", ':', 'smart_formatting', ',', "'speaker_labels'", ':', 'speaker_labels', ',', "'customization_id'", ':', 'customization_id', ',', "'grammar_name'", ':', 'grammar_name', ',', "'redaction'", ':', 'redaction', '}', 'data', '=', 'audio', 'url', '=', "'/v1/recognize'", 'response', '=', 'self', '.', 'request', '(', 'method', '=', "'POST'", ',', 'url', '=', 'url', ',', 'headers', '=', 'headers', ',', 'params', '=', 'params', ',', 'data', '=', 'data', ',', 'accept_json', '=', 'True', ')', 'return', 'response'] | Recognize audio.
Sends audio and returns transcription results for a recognition request. You can
pass a maximum of 100 MB and a minimum of 100 bytes of audio with a request. The
service automatically detects the endianness of the incoming audio and, for audio
that includes multiple channels, downmixes the audio to one-channel mono during
transcoding. The method returns only final results; to enable interim results, use
the WebSocket API.
**See also:** [Making a basic HTTP
request](https://cloud.ibm.com/docs/services/speech-to-text/http.html#HTTP-basic).
### Streaming mode
For requests to transcribe live audio as it becomes available, you must set the
`Transfer-Encoding` header to `chunked` to use streaming mode. In streaming mode,
the service closes the connection (status code 408) if it does not receive at
least 15 seconds of audio (including silence) in any 30-second period. The service
also closes the connection (status code 400) if it detects no speech for
`inactivity_timeout` seconds of streaming audio; use the `inactivity_timeout`
parameter to change the default of 30 seconds.
**See also:**
* [Audio
transmission](https://cloud.ibm.com/docs/services/speech-to-text/input.html#transmission)
*
[Timeouts](https://cloud.ibm.com/docs/services/speech-to-text/input.html#timeouts)
### Audio formats (content types)
The service accepts audio in the following formats (MIME types).
* For formats that are labeled **Required**, you must use the `Content-Type`
header with the request to specify the format of the audio.
* For all other formats, you can omit the `Content-Type` header or specify
`application/octet-stream` with the header to have the service automatically
detect the format of the audio. (With the `curl` command, you can specify either
`\"Content-Type:\"` or `\"Content-Type: application/octet-stream\"`.)
Where indicated, the format that you specify must include the sampling rate and
can optionally include the number of channels and the endianness of the audio.
* `audio/alaw` (**Required.** Specify the sampling rate (`rate`) of the audio.)
* `audio/basic` (**Required.** Use only with narrowband models.)
* `audio/flac`
* `audio/g729` (Use only with narrowband models.)
* `audio/l16` (**Required.** Specify the sampling rate (`rate`) and optionally the
number of channels (`channels`) and endianness (`endianness`) of the audio.)
* `audio/mp3`
* `audio/mpeg`
* `audio/mulaw` (**Required.** Specify the sampling rate (`rate`) of the audio.)
* `audio/ogg` (The service automatically detects the codec of the input audio.)
* `audio/ogg;codecs=opus`
* `audio/ogg;codecs=vorbis`
* `audio/wav` (Provide audio with a maximum of nine channels.)
* `audio/webm` (The service automatically detects the codec of the input audio.)
* `audio/webm;codecs=opus`
* `audio/webm;codecs=vorbis`
The sampling rate of the audio must match the sampling rate of the model for the
recognition request: for broadband models, at least 16 kHz; for narrowband models,
at least 8 kHz. If the sampling rate of the audio is higher than the minimum
required rate, the service down-samples the audio to the appropriate rate. If the
sampling rate of the audio is lower than the minimum required rate, the request
fails.
**See also:** [Audio
formats](https://cloud.ibm.com/docs/services/speech-to-text/audio-formats.html).
### Multipart speech recognition
**Note:** The Watson SDKs do not support multipart speech recognition.
The HTTP `POST` method of the service also supports multipart speech recognition.
With multipart requests, you pass all audio data as multipart form data. You
specify some parameters as request headers and query parameters, but you pass JSON
metadata as form data to control most aspects of the transcription.
The multipart approach is intended for use with browsers for which JavaScript is
disabled or when the parameters used with the request are greater than the 8 KB
limit imposed by most HTTP servers and proxies. You can encounter this limit, for
example, if you want to spot a very large number of keywords.
**See also:** [Making a multipart HTTP
request](https://cloud.ibm.com/docs/services/speech-to-text/http.html#HTTP-multi).
:param file audio: The audio to transcribe.
:param str model: The identifier of the model that is to be used for the
recognition request. See [Languages and
models](https://cloud.ibm.com/docs/services/speech-to-text/models.html).
:param str language_customization_id: The customization ID (GUID) of a custom
language model that is to be used with the recognition request. The base model of
the specified custom language model must match the model specified with the
`model` parameter. You must make the request with credentials for the instance of
the service that owns the custom model. By default, no custom language model is
used. See [Custom
models](https://cloud.ibm.com/docs/services/speech-to-text/input.html#custom-input).
**Note:** Use this parameter instead of the deprecated `customization_id`
parameter.
:param str acoustic_customization_id: The customization ID (GUID) of a custom
acoustic model that is to be used with the recognition request. The base model of
the specified custom acoustic model must match the model specified with the
`model` parameter. You must make the request with credentials for the instance of
the service that owns the custom model. By default, no custom acoustic model is
used. See [Custom
models](https://cloud.ibm.com/docs/services/speech-to-text/input.html#custom-input).
:param str base_model_version: The version of the specified base model that is to
be used with recognition request. Multiple versions of a base model can exist when
a model is updated for internal improvements. The parameter is intended primarily
for use with custom models that have been upgraded for a new base model. The
default value depends on whether the parameter is used with or without a custom
model. See [Base model
version](https://cloud.ibm.com/docs/services/speech-to-text/input.html#version).
:param float customization_weight: If you specify the customization ID (GUID) of a
custom language model with the recognition request, the customization weight tells
the service how much weight to give to words from the custom language model
compared to those from the base model for the current request.
Specify a value between 0.0 and 1.0. Unless a different customization weight was
specified for the custom model when it was trained, the default value is 0.3. A
customization weight that you specify overrides a weight that was specified when
the custom model was trained.
The default value yields the best performance in general. Assign a higher value if
your audio makes frequent use of OOV words from the custom model. Use caution when
setting the weight: a higher value can improve the accuracy of phrases from the
custom model's domain, but it can negatively affect performance on non-domain
phrases.
See [Custom
models](https://cloud.ibm.com/docs/services/speech-to-text/input.html#custom-input).
:param int inactivity_timeout: The time in seconds after which, if only silence
(no speech) is detected in streaming audio, the connection is closed with a 400
error. The parameter is useful for stopping audio submission from a live
microphone when a user simply walks away. Use `-1` for infinity. See [Inactivity
timeout](https://cloud.ibm.com/docs/services/speech-to-text/input.html#timeouts-inactivity).
:param list[str] keywords: An array of keyword strings to spot in the audio. Each
keyword string can include one or more string tokens. Keywords are spotted only in
the final results, not in interim hypotheses. If you specify any keywords, you
must also specify a keywords threshold. You can spot a maximum of 1000 keywords.
Omit the parameter or specify an empty array if you do not need to spot keywords.
See [Keyword
spotting](https://cloud.ibm.com/docs/services/speech-to-text/output.html#keyword_spotting).
:param float keywords_threshold: A confidence value that is the lower bound for
spotting a keyword. A word is considered to match a keyword if its confidence is
greater than or equal to the threshold. Specify a probability between 0.0 and 1.0.
If you specify a threshold, you must also specify one or more keywords. The
service performs no keyword spotting if you omit either parameter. See [Keyword
spotting](https://cloud.ibm.com/docs/services/speech-to-text/output.html#keyword_spotting).
:param int max_alternatives: The maximum number of alternative transcripts that
the service is to return. By default, the service returns a single transcript. If
you specify a value of `0`, the service uses the default value, `1`. See [Maximum
alternatives](https://cloud.ibm.com/docs/services/speech-to-text/output.html#max_alternatives).
:param float word_alternatives_threshold: A confidence value that is the lower
bound for identifying a hypothesis as a possible word alternative (also known as
\"Confusion Networks\"). An alternative word is considered if its confidence is
greater than or equal to the threshold. Specify a probability between 0.0 and 1.0.
By default, the service computes no alternative words. See [Word
alternatives](https://cloud.ibm.com/docs/services/speech-to-text/output.html#word_alternatives).
:param bool word_confidence: If `true`, the service returns a confidence measure
in the range of 0.0 to 1.0 for each word. By default, the service returns no word
confidence scores. See [Word
confidence](https://cloud.ibm.com/docs/services/speech-to-text/output.html#word_confidence).
:param bool timestamps: If `true`, the service returns time alignment for each
word. By default, no timestamps are returned. See [Word
timestamps](https://cloud.ibm.com/docs/services/speech-to-text/output.html#word_timestamps).
:param bool profanity_filter: If `true`, the service filters profanity from all
output except for keyword results by replacing inappropriate words with a series
of asterisks. Set the parameter to `false` to return results with no censoring.
Applies to US English transcription only. See [Profanity
filtering](https://cloud.ibm.com/docs/services/speech-to-text/output.html#profanity_filter).
:param bool smart_formatting: If `true`, the service converts dates, times, series
of digits and numbers, phone numbers, currency values, and internet addresses into
more readable, conventional representations in the final transcript of a
recognition request. For US English, the service also converts certain keyword
strings to punctuation symbols. By default, the service performs no smart
formatting.
**Note:** Applies to US English, Japanese, and Spanish transcription only.
See [Smart
formatting](https://cloud.ibm.com/docs/services/speech-to-text/output.html#smart_formatting).
:param bool speaker_labels: If `true`, the response includes labels that identify
which words were spoken by which participants in a multi-person exchange. By
default, the service returns no speaker labels. Setting `speaker_labels` to `true`
forces the `timestamps` parameter to be `true`, regardless of whether you specify
`false` for the parameter.
**Note:** Applies to US English, Japanese, and Spanish transcription only. To
determine whether a language model supports speaker labels, you can also use the
**Get a model** method and check that the attribute `speaker_labels` is set to
`true`.
See [Speaker
labels](https://cloud.ibm.com/docs/services/speech-to-text/output.html#speaker_labels).
:param str customization_id: **Deprecated.** Use the `language_customization_id`
parameter to specify the customization ID (GUID) of a custom language model that
is to be used with the recognition request. Do not specify both parameters with a
request.
:param str grammar_name: The name of a grammar that is to be used with the
recognition request. If you specify a grammar, you must also use the
`language_customization_id` parameter to specify the name of the custom language
model for which the grammar is defined. The service recognizes only strings that
are recognized by the specified grammar; it does not recognize other custom words
from the model's words resource. See
[Grammars](https://cloud.ibm.com/docs/services/speech-to-text/input.html#grammars-input).
:param bool redaction: If `true`, the service redacts, or masks, numeric data from
final transcripts. The feature redacts any number that has three or more
consecutive digits by replacing each digit with an `X` character. It is intended
to redact sensitive numeric data, such as credit card numbers. By default, the
service performs no redaction.
When you enable redaction, the service automatically enables smart formatting,
regardless of whether you explicitly disable that feature. To ensure maximum
security, the service also disables keyword spotting (ignores the `keywords` and
`keywords_threshold` parameters) and returns only a single final transcript
(forces the `max_alternatives` parameter to be `1`).
**Note:** Applies to US English, Japanese, and Korean transcription only.
See [Numeric
redaction](https://cloud.ibm.com/docs/services/speech-to-text/output.html#redaction).
:param str content_type: The format (MIME type) of the audio. For more information
about specifying an audio format, see **Audio formats (content types)** in the
method description.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse | ['Recognize', 'audio', '.'] | train | https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/speech_to_text_v1.py#L171-L438 |
3,282 | saltstack/salt | salt/minion.py | SyndicManager._return_pub_syndic | def _return_pub_syndic(self, values, master_id=None):
'''
Wrapper to call the '_return_pub_multi' a syndic, best effort to get the one you asked for
'''
func = '_return_pub_multi'
for master, syndic_future in self.iter_master_options(master_id):
if not syndic_future.done() or syndic_future.exception():
log.error(
'Unable to call %s on %s, that syndic is not connected',
func, master
)
continue
future, data = self.pub_futures.get(master, (None, None))
if future is not None:
if not future.done():
if master == master_id:
# Targeted master previous send not done yet, call again later
return False
else:
# Fallback master is busy, try the next one
continue
elif future.exception():
# Previous execution on this master returned an error
log.error(
'Unable to call %s on %s, trying another...',
func, master
)
self._mark_master_dead(master)
del self.pub_futures[master]
# Add not sent data to the delayed list and try the next master
self.delayed.extend(data)
continue
future = getattr(syndic_future.result(), func)(values,
'_syndic_return',
timeout=self._return_retry_timer(),
sync=False)
self.pub_futures[master] = (future, values)
return True
# Loop done and didn't exit: wasn't sent, try again later
return False | python | def _return_pub_syndic(self, values, master_id=None):
'''
Wrapper to call the '_return_pub_multi' a syndic, best effort to get the one you asked for
'''
func = '_return_pub_multi'
for master, syndic_future in self.iter_master_options(master_id):
if not syndic_future.done() or syndic_future.exception():
log.error(
'Unable to call %s on %s, that syndic is not connected',
func, master
)
continue
future, data = self.pub_futures.get(master, (None, None))
if future is not None:
if not future.done():
if master == master_id:
# Targeted master previous send not done yet, call again later
return False
else:
# Fallback master is busy, try the next one
continue
elif future.exception():
# Previous execution on this master returned an error
log.error(
'Unable to call %s on %s, trying another...',
func, master
)
self._mark_master_dead(master)
del self.pub_futures[master]
# Add not sent data to the delayed list and try the next master
self.delayed.extend(data)
continue
future = getattr(syndic_future.result(), func)(values,
'_syndic_return',
timeout=self._return_retry_timer(),
sync=False)
self.pub_futures[master] = (future, values)
return True
# Loop done and didn't exit: wasn't sent, try again later
return False | ['def', '_return_pub_syndic', '(', 'self', ',', 'values', ',', 'master_id', '=', 'None', ')', ':', 'func', '=', "'_return_pub_multi'", 'for', 'master', ',', 'syndic_future', 'in', 'self', '.', 'iter_master_options', '(', 'master_id', ')', ':', 'if', 'not', 'syndic_future', '.', 'done', '(', ')', 'or', 'syndic_future', '.', 'exception', '(', ')', ':', 'log', '.', 'error', '(', "'Unable to call %s on %s, that syndic is not connected'", ',', 'func', ',', 'master', ')', 'continue', 'future', ',', 'data', '=', 'self', '.', 'pub_futures', '.', 'get', '(', 'master', ',', '(', 'None', ',', 'None', ')', ')', 'if', 'future', 'is', 'not', 'None', ':', 'if', 'not', 'future', '.', 'done', '(', ')', ':', 'if', 'master', '==', 'master_id', ':', '# Targeted master previous send not done yet, call again later', 'return', 'False', 'else', ':', '# Fallback master is busy, try the next one', 'continue', 'elif', 'future', '.', 'exception', '(', ')', ':', '# Previous execution on this master returned an error', 'log', '.', 'error', '(', "'Unable to call %s on %s, trying another...'", ',', 'func', ',', 'master', ')', 'self', '.', '_mark_master_dead', '(', 'master', ')', 'del', 'self', '.', 'pub_futures', '[', 'master', ']', '# Add not sent data to the delayed list and try the next master', 'self', '.', 'delayed', '.', 'extend', '(', 'data', ')', 'continue', 'future', '=', 'getattr', '(', 'syndic_future', '.', 'result', '(', ')', ',', 'func', ')', '(', 'values', ',', "'_syndic_return'", ',', 'timeout', '=', 'self', '.', '_return_retry_timer', '(', ')', ',', 'sync', '=', 'False', ')', 'self', '.', 'pub_futures', '[', 'master', ']', '=', '(', 'future', ',', 'values', ')', 'return', 'True', "# Loop done and didn't exit: wasn't sent, try again later", 'return', 'False'] | Wrapper to call the '_return_pub_multi' a syndic, best effort to get the one you asked for | ['Wrapper', 'to', 'call', 'the', '_return_pub_multi', 'a', 'syndic', 'best', 'effort', 'to', 'get', 'the', 'one', 'you', 'asked', 'for'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L3255-L3295 |
3,283 | tomi77/django-extra-tools | django_extra_tools/wsgi_request.py | get_client_ip | def get_client_ip(request):
"""
Get the client IP from the request
"""
# set the default value of the ip to be the REMOTE_ADDR if available
# else None
ip = request.META.get('REMOTE_ADDR')
# try to get the first non-proxy ip (not a private ip) from the
# HTTP_X_FORWARDED_FOR
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
proxies = x_forwarded_for.split(',')
# remove the private ips from the beginning
proxies = [proxy for proxy in proxies
if not proxy.startswith(settings.PRIVATE_IPS_PREFIX)]
# take the first ip which is not a private one (of a proxy)
if len(proxies) > 0:
ip = proxies[0]
return ip | python | def get_client_ip(request):
"""
Get the client IP from the request
"""
# set the default value of the ip to be the REMOTE_ADDR if available
# else None
ip = request.META.get('REMOTE_ADDR')
# try to get the first non-proxy ip (not a private ip) from the
# HTTP_X_FORWARDED_FOR
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
proxies = x_forwarded_for.split(',')
# remove the private ips from the beginning
proxies = [proxy for proxy in proxies
if not proxy.startswith(settings.PRIVATE_IPS_PREFIX)]
# take the first ip which is not a private one (of a proxy)
if len(proxies) > 0:
ip = proxies[0]
return ip | ['def', 'get_client_ip', '(', 'request', ')', ':', '# set the default value of the ip to be the REMOTE_ADDR if available', '# else None', 'ip', '=', 'request', '.', 'META', '.', 'get', '(', "'REMOTE_ADDR'", ')', '# try to get the first non-proxy ip (not a private ip) from the', '# HTTP_X_FORWARDED_FOR', 'x_forwarded_for', '=', 'request', '.', 'META', '.', 'get', '(', "'HTTP_X_FORWARDED_FOR'", ')', 'if', 'x_forwarded_for', ':', 'proxies', '=', 'x_forwarded_for', '.', 'split', '(', "','", ')', '# remove the private ips from the beginning', 'proxies', '=', '[', 'proxy', 'for', 'proxy', 'in', 'proxies', 'if', 'not', 'proxy', '.', 'startswith', '(', 'settings', '.', 'PRIVATE_IPS_PREFIX', ')', ']', '# take the first ip which is not a private one (of a proxy)', 'if', 'len', '(', 'proxies', ')', '>', '0', ':', 'ip', '=', 'proxies', '[', '0', ']', 'return', 'ip'] | Get the client IP from the request | ['Get', 'the', 'client', 'IP', 'from', 'the', 'request'] | train | https://github.com/tomi77/django-extra-tools/blob/fb6d226bc5cf3fc0eb8abe61a512c3f5c7dcc8a8/django_extra_tools/wsgi_request.py#L4-L23 |
3,284 | mryellow/maze_explorer | mazeexp/engine/world.py | WorldLayer.update_sensors | def update_sensors(self):
"""
Check path for each sensor and record wall proximity
"""
assert isinstance(self.player.cshape.center, eu.Vector2)
pos = self.player.cshape.center
a = math.radians(self.player.rotation)
for sensor in self.player.sensors:
sensor.sensed_type = 'wall'
rad = a + sensor.angle
dis = min(self.distance_to_tile(pos, rad), sensor.max_range)
# Keep state of sensed range, `dis` is from center
sensor.proximity = dis - self.player.radius
# Check for collisions with items
# List of items within sensor range, do for each sensor's range
if self.mode['items'] and len(self.mode['items']) > 0:
nears = self.collman.ranked_objs_near(self.player, sensor.max_range)
for near in nears:
other, other_dis = near
# Distances are from edge to edge see #2
other_dis += self.player.radius
# Skip if further
if other_dis > dis:
continue
# Determine if within `fov`
other_rad = math.atan2(other.x - self.player.x, other.y - self.player.y)
# Round to bearing within one revolution
other_rad = other_rad % (math.pi*2)
round_rad = rad % (math.pi*2)
if abs(other_rad - round_rad) < (sensor.fov/2):
sensor.proximity = other_dis - self.player.radius
sensor.sensed_type = other.btype
dis = other_dis
# Redirect sensor lines
# TODO: Decouple into view rendering
end = pos.copy()
end.x += math.sin(rad) * dis
end.y += math.cos(rad) * dis
sensor.line.start = pos
sensor.line.end = end
sensor.line.color = self.player.palette[sensor.sensed_type] + (int(255*0.5),) | python | def update_sensors(self):
"""
Check path for each sensor and record wall proximity
"""
assert isinstance(self.player.cshape.center, eu.Vector2)
pos = self.player.cshape.center
a = math.radians(self.player.rotation)
for sensor in self.player.sensors:
sensor.sensed_type = 'wall'
rad = a + sensor.angle
dis = min(self.distance_to_tile(pos, rad), sensor.max_range)
# Keep state of sensed range, `dis` is from center
sensor.proximity = dis - self.player.radius
# Check for collisions with items
# List of items within sensor range, do for each sensor's range
if self.mode['items'] and len(self.mode['items']) > 0:
nears = self.collman.ranked_objs_near(self.player, sensor.max_range)
for near in nears:
other, other_dis = near
# Distances are from edge to edge see #2
other_dis += self.player.radius
# Skip if further
if other_dis > dis:
continue
# Determine if within `fov`
other_rad = math.atan2(other.x - self.player.x, other.y - self.player.y)
# Round to bearing within one revolution
other_rad = other_rad % (math.pi*2)
round_rad = rad % (math.pi*2)
if abs(other_rad - round_rad) < (sensor.fov/2):
sensor.proximity = other_dis - self.player.radius
sensor.sensed_type = other.btype
dis = other_dis
# Redirect sensor lines
# TODO: Decouple into view rendering
end = pos.copy()
end.x += math.sin(rad) * dis
end.y += math.cos(rad) * dis
sensor.line.start = pos
sensor.line.end = end
sensor.line.color = self.player.palette[sensor.sensed_type] + (int(255*0.5),) | ['def', 'update_sensors', '(', 'self', ')', ':', 'assert', 'isinstance', '(', 'self', '.', 'player', '.', 'cshape', '.', 'center', ',', 'eu', '.', 'Vector2', ')', 'pos', '=', 'self', '.', 'player', '.', 'cshape', '.', 'center', 'a', '=', 'math', '.', 'radians', '(', 'self', '.', 'player', '.', 'rotation', ')', 'for', 'sensor', 'in', 'self', '.', 'player', '.', 'sensors', ':', 'sensor', '.', 'sensed_type', '=', "'wall'", 'rad', '=', 'a', '+', 'sensor', '.', 'angle', 'dis', '=', 'min', '(', 'self', '.', 'distance_to_tile', '(', 'pos', ',', 'rad', ')', ',', 'sensor', '.', 'max_range', ')', '# Keep state of sensed range, `dis` is from center', 'sensor', '.', 'proximity', '=', 'dis', '-', 'self', '.', 'player', '.', 'radius', '# Check for collisions with items', "# List of items within sensor range, do for each sensor's range", 'if', 'self', '.', 'mode', '[', "'items'", ']', 'and', 'len', '(', 'self', '.', 'mode', '[', "'items'", ']', ')', '>', '0', ':', 'nears', '=', 'self', '.', 'collman', '.', 'ranked_objs_near', '(', 'self', '.', 'player', ',', 'sensor', '.', 'max_range', ')', 'for', 'near', 'in', 'nears', ':', 'other', ',', 'other_dis', '=', 'near', '# Distances are from edge to edge see #2', 'other_dis', '+=', 'self', '.', 'player', '.', 'radius', '# Skip if further', 'if', 'other_dis', '>', 'dis', ':', 'continue', '# Determine if within `fov`', 'other_rad', '=', 'math', '.', 'atan2', '(', 'other', '.', 'x', '-', 'self', '.', 'player', '.', 'x', ',', 'other', '.', 'y', '-', 'self', '.', 'player', '.', 'y', ')', '# Round to bearing within one revolution', 'other_rad', '=', 'other_rad', '%', '(', 'math', '.', 'pi', '*', '2', ')', 'round_rad', '=', 'rad', '%', '(', 'math', '.', 'pi', '*', '2', ')', 'if', 'abs', '(', 'other_rad', '-', 'round_rad', ')', '<', '(', 'sensor', '.', 'fov', '/', '2', ')', ':', 'sensor', '.', 'proximity', '=', 'other_dis', '-', 'self', '.', 'player', '.', 'radius', 'sensor', '.', 'sensed_type', '=', 'other', '.', 'btype', 'dis', '=', 'other_dis', '# Redirect sensor lines', '# TODO: Decouple into view rendering', 'end', '=', 'pos', '.', 'copy', '(', ')', 'end', '.', 'x', '+=', 'math', '.', 'sin', '(', 'rad', ')', '*', 'dis', 'end', '.', 'y', '+=', 'math', '.', 'cos', '(', 'rad', ')', '*', 'dis', 'sensor', '.', 'line', '.', 'start', '=', 'pos', 'sensor', '.', 'line', '.', 'end', '=', 'end', 'sensor', '.', 'line', '.', 'color', '=', 'self', '.', 'player', '.', 'palette', '[', 'sensor', '.', 'sensed_type', ']', '+', '(', 'int', '(', '255', '*', '0.5', ')', ',', ')'] | Check path for each sensor and record wall proximity | ['Check', 'path', 'for', 'each', 'sensor', 'and', 'record', 'wall', 'proximity'] | train | https://github.com/mryellow/maze_explorer/blob/ab8a25ccd05105d2fe57e0213d690cfc07e45827/mazeexp/engine/world.py#L355-L400 |
3,285 | RudolfCardinal/pythonlib | cardinal_pythonlib/sqlalchemy/schema.py | columns_equal | def columns_equal(a: Column, b: Column) -> bool:
"""
Are two SQLAlchemy columns are equal? Checks based on:
- column ``name``
- column ``type`` (see :func:`column_types_equal`)
- ``nullable``
"""
return (
a.name == b.name and
column_types_equal(a.type, b.type) and
a.nullable == b.nullable
) | python | def columns_equal(a: Column, b: Column) -> bool:
"""
Are two SQLAlchemy columns are equal? Checks based on:
- column ``name``
- column ``type`` (see :func:`column_types_equal`)
- ``nullable``
"""
return (
a.name == b.name and
column_types_equal(a.type, b.type) and
a.nullable == b.nullable
) | ['def', 'columns_equal', '(', 'a', ':', 'Column', ',', 'b', ':', 'Column', ')', '->', 'bool', ':', 'return', '(', 'a', '.', 'name', '==', 'b', '.', 'name', 'and', 'column_types_equal', '(', 'a', '.', 'type', ',', 'b', '.', 'type', ')', 'and', 'a', '.', 'nullable', '==', 'b', '.', 'nullable', ')'] | Are two SQLAlchemy columns are equal? Checks based on:
- column ``name``
- column ``type`` (see :func:`column_types_equal`)
- ``nullable`` | ['Are', 'two', 'SQLAlchemy', 'columns', 'are', 'equal?', 'Checks', 'based', 'on', ':'] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/sqlalchemy/schema.py#L1101-L1113 |
3,286 | pmacosta/pexdoc | pexdoc/pinspect.py | Callables.get_callable_from_line | def get_callable_from_line(self, module_file, lineno):
"""Get the callable that the line number belongs to."""
module_name = _get_module_name_from_fname(module_file)
if module_name not in self._modules_dict:
self.trace([module_file])
ret = None
# Sort callables by starting line number
iobj = sorted(self._modules_dict[module_name], key=lambda x: x["code_id"][1])
for value in iobj:
if value["code_id"][1] <= lineno <= value["last_lineno"]:
ret = value["name"]
elif value["code_id"][1] > lineno:
break
return ret if ret else module_name | python | def get_callable_from_line(self, module_file, lineno):
"""Get the callable that the line number belongs to."""
module_name = _get_module_name_from_fname(module_file)
if module_name not in self._modules_dict:
self.trace([module_file])
ret = None
# Sort callables by starting line number
iobj = sorted(self._modules_dict[module_name], key=lambda x: x["code_id"][1])
for value in iobj:
if value["code_id"][1] <= lineno <= value["last_lineno"]:
ret = value["name"]
elif value["code_id"][1] > lineno:
break
return ret if ret else module_name | ['def', 'get_callable_from_line', '(', 'self', ',', 'module_file', ',', 'lineno', ')', ':', 'module_name', '=', '_get_module_name_from_fname', '(', 'module_file', ')', 'if', 'module_name', 'not', 'in', 'self', '.', '_modules_dict', ':', 'self', '.', 'trace', '(', '[', 'module_file', ']', ')', 'ret', '=', 'None', '# Sort callables by starting line number', 'iobj', '=', 'sorted', '(', 'self', '.', '_modules_dict', '[', 'module_name', ']', ',', 'key', '=', 'lambda', 'x', ':', 'x', '[', '"code_id"', ']', '[', '1', ']', ')', 'for', 'value', 'in', 'iobj', ':', 'if', 'value', '[', '"code_id"', ']', '[', '1', ']', '<=', 'lineno', '<=', 'value', '[', '"last_lineno"', ']', ':', 'ret', '=', 'value', '[', '"name"', ']', 'elif', 'value', '[', '"code_id"', ']', '[', '1', ']', '>', 'lineno', ':', 'break', 'return', 'ret', 'if', 'ret', 'else', 'module_name'] | Get the callable that the line number belongs to. | ['Get', 'the', 'callable', 'that', 'the', 'line', 'number', 'belongs', 'to', '.'] | train | https://github.com/pmacosta/pexdoc/blob/201ac243e5781347feb75896a4231429fe6da4b1/pexdoc/pinspect.py#L512-L525 |
3,287 | jf-parent/brome | brome/core/selector.py | Selector.resolve_selector | def resolve_selector(self):
"""Resolve the selector variable in place
"""
effective_selector_list = []
for current_selector in self._selector_list:
# INLINE SELECTOR
if self.get_type(current_selector) != 'selector_variable':
effective_selector_list.append(current_selector)
# SELECTOR VARIABLE
else:
# Make sure the proxy driver have a selector dictionary
if self.get_type(current_selector) == 'selector_variable':
if not BROME_CONFIG['selector_dict']:
raise Exception("""
You must provide a selector dictionary if you want
to use the selector variable type
""")
# Make sure that the selector dictionary
# contains the selector variable
if self._get_selector(current_selector) \
not in BROME_CONFIG['selector_dict']:
raise Exception("""
Cannot find the selector variable (%s)
in the selector dictionary
""" % self._get_selector(current_selector))
effective_selector = BROME_CONFIG['selector_dict'][self._get_selector(current_selector)] # noqa
if type(effective_selector) is dict:
current_browser_id = False
keys = [key for key in effective_selector.keys()
if key not in ['default', 'hr']]
for key in keys:
for target in key.split('|'):
try:
re.search(
target.lower(), self._pdriver.get_id().lower()
).group(0)
current_browser_id = key
except AttributeError:
pass
if current_browser_id:
effective_selector_list.append(
effective_selector.get(current_browser_id)
)
else:
effective_selector_list.append(
effective_selector.get('default')
)
else:
if self.get_type(effective_selector) in \
[value for key, value in SELECTOR_DICT.items()
if key != 'selector_variable']:
effective_selector_list.append(effective_selector)
else:
raise Exception("""
All selector need to start with either:
'nm:' (name), 'xp:' (xpath), 'cn:' (classname),
'id:' (id), 'cs:' (css), 'tn:' (tag name),
'lt:' (link text), 'pl:' (partial link text)
""")
return effective_selector_list | python | def resolve_selector(self):
"""Resolve the selector variable in place
"""
effective_selector_list = []
for current_selector in self._selector_list:
# INLINE SELECTOR
if self.get_type(current_selector) != 'selector_variable':
effective_selector_list.append(current_selector)
# SELECTOR VARIABLE
else:
# Make sure the proxy driver have a selector dictionary
if self.get_type(current_selector) == 'selector_variable':
if not BROME_CONFIG['selector_dict']:
raise Exception("""
You must provide a selector dictionary if you want
to use the selector variable type
""")
# Make sure that the selector dictionary
# contains the selector variable
if self._get_selector(current_selector) \
not in BROME_CONFIG['selector_dict']:
raise Exception("""
Cannot find the selector variable (%s)
in the selector dictionary
""" % self._get_selector(current_selector))
effective_selector = BROME_CONFIG['selector_dict'][self._get_selector(current_selector)] # noqa
if type(effective_selector) is dict:
current_browser_id = False
keys = [key for key in effective_selector.keys()
if key not in ['default', 'hr']]
for key in keys:
for target in key.split('|'):
try:
re.search(
target.lower(), self._pdriver.get_id().lower()
).group(0)
current_browser_id = key
except AttributeError:
pass
if current_browser_id:
effective_selector_list.append(
effective_selector.get(current_browser_id)
)
else:
effective_selector_list.append(
effective_selector.get('default')
)
else:
if self.get_type(effective_selector) in \
[value for key, value in SELECTOR_DICT.items()
if key != 'selector_variable']:
effective_selector_list.append(effective_selector)
else:
raise Exception("""
All selector need to start with either:
'nm:' (name), 'xp:' (xpath), 'cn:' (classname),
'id:' (id), 'cs:' (css), 'tn:' (tag name),
'lt:' (link text), 'pl:' (partial link text)
""")
return effective_selector_list | ['def', 'resolve_selector', '(', 'self', ')', ':', 'effective_selector_list', '=', '[', ']', 'for', 'current_selector', 'in', 'self', '.', '_selector_list', ':', '# INLINE SELECTOR', 'if', 'self', '.', 'get_type', '(', 'current_selector', ')', '!=', "'selector_variable'", ':', 'effective_selector_list', '.', 'append', '(', 'current_selector', ')', '# SELECTOR VARIABLE', 'else', ':', '# Make sure the proxy driver have a selector dictionary', 'if', 'self', '.', 'get_type', '(', 'current_selector', ')', '==', "'selector_variable'", ':', 'if', 'not', 'BROME_CONFIG', '[', "'selector_dict'", ']', ':', 'raise', 'Exception', '(', '"""\n You must provide a selector dictionary if you want\n to use the selector variable type\n """', ')', '# Make sure that the selector dictionary', '# contains the selector variable', 'if', 'self', '.', '_get_selector', '(', 'current_selector', ')', 'not', 'in', 'BROME_CONFIG', '[', "'selector_dict'", ']', ':', 'raise', 'Exception', '(', '"""\n Cannot find the selector variable (%s)\n in the selector dictionary\n """', '%', 'self', '.', '_get_selector', '(', 'current_selector', ')', ')', 'effective_selector', '=', 'BROME_CONFIG', '[', "'selector_dict'", ']', '[', 'self', '.', '_get_selector', '(', 'current_selector', ')', ']', '# noqa', 'if', 'type', '(', 'effective_selector', ')', 'is', 'dict', ':', 'current_browser_id', '=', 'False', 'keys', '=', '[', 'key', 'for', 'key', 'in', 'effective_selector', '.', 'keys', '(', ')', 'if', 'key', 'not', 'in', '[', "'default'", ',', "'hr'", ']', ']', 'for', 'key', 'in', 'keys', ':', 'for', 'target', 'in', 'key', '.', 'split', '(', "'|'", ')', ':', 'try', ':', 're', '.', 'search', '(', 'target', '.', 'lower', '(', ')', ',', 'self', '.', '_pdriver', '.', 'get_id', '(', ')', '.', 'lower', '(', ')', ')', '.', 'group', '(', '0', ')', 'current_browser_id', '=', 'key', 'except', 'AttributeError', ':', 'pass', 'if', 'current_browser_id', ':', 'effective_selector_list', '.', 'append', '(', 'effective_selector', '.', 'get', '(', 'current_browser_id', ')', ')', 'else', ':', 'effective_selector_list', '.', 'append', '(', 'effective_selector', '.', 'get', '(', "'default'", ')', ')', 'else', ':', 'if', 'self', '.', 'get_type', '(', 'effective_selector', ')', 'in', '[', 'value', 'for', 'key', ',', 'value', 'in', 'SELECTOR_DICT', '.', 'items', '(', ')', 'if', 'key', '!=', "'selector_variable'", ']', ':', 'effective_selector_list', '.', 'append', '(', 'effective_selector', ')', 'else', ':', 'raise', 'Exception', '(', '"""\n All selector need to start with either:\n \'nm:\' (name), \'xp:\' (xpath), \'cn:\' (classname),\n \'id:\' (id), \'cs:\' (css), \'tn:\' (tag name),\n \'lt:\' (link text), \'pl:\' (partial link text)\n """', ')', 'return', 'effective_selector_list'] | Resolve the selector variable in place | ['Resolve', 'the', 'selector', 'variable', 'in', 'place'] | train | https://github.com/jf-parent/brome/blob/784f45d96b83b703dd2181cb59ca8ea777c2510e/brome/core/selector.py#L105-L174 |
3,288 | wright-group/WrightTools | WrightTools/data/_data.py | Data.print_tree | def print_tree(self, *, verbose=True):
"""Print a ascii-formatted tree representation of the data contents."""
print("{0} ({1})".format(self.natural_name, self.filepath))
self._print_branch("", depth=0, verbose=verbose) | python | def print_tree(self, *, verbose=True):
"""Print a ascii-formatted tree representation of the data contents."""
print("{0} ({1})".format(self.natural_name, self.filepath))
self._print_branch("", depth=0, verbose=verbose) | ['def', 'print_tree', '(', 'self', ',', '*', ',', 'verbose', '=', 'True', ')', ':', 'print', '(', '"{0} ({1})"', '.', 'format', '(', 'self', '.', 'natural_name', ',', 'self', '.', 'filepath', ')', ')', 'self', '.', '_print_branch', '(', '""', ',', 'depth', '=', '0', ',', 'verbose', '=', 'verbose', ')'] | Print a ascii-formatted tree representation of the data contents. | ['Print', 'a', 'ascii', '-', 'formatted', 'tree', 'representation', 'of', 'the', 'data', 'contents', '.'] | train | https://github.com/wright-group/WrightTools/blob/80d3ddd5074d8d5c1bc03fd5a0e0f10d4b424aeb/WrightTools/data/_data.py#L1314-L1317 |
3,289 | MolSSI-BSE/basis_set_exchange | basis_set_exchange/cli/bsecurate_handlers.py | _bsecurate_cli_compare_basis_sets | def _bsecurate_cli_compare_basis_sets(args):
'''Handles compare-basis-sets subcommand'''
ret = curate.compare_basis_sets(args.basis1, args.basis2, args.version1, args.version2, args.uncontract_general,
args.data_dir, args.data_dir)
if ret:
return "No difference found"
else:
return "DIFFERENCES FOUND. SEE ABOVE" | python | def _bsecurate_cli_compare_basis_sets(args):
'''Handles compare-basis-sets subcommand'''
ret = curate.compare_basis_sets(args.basis1, args.basis2, args.version1, args.version2, args.uncontract_general,
args.data_dir, args.data_dir)
if ret:
return "No difference found"
else:
return "DIFFERENCES FOUND. SEE ABOVE" | ['def', '_bsecurate_cli_compare_basis_sets', '(', 'args', ')', ':', 'ret', '=', 'curate', '.', 'compare_basis_sets', '(', 'args', '.', 'basis1', ',', 'args', '.', 'basis2', ',', 'args', '.', 'version1', ',', 'args', '.', 'version2', ',', 'args', '.', 'uncontract_general', ',', 'args', '.', 'data_dir', ',', 'args', '.', 'data_dir', ')', 'if', 'ret', ':', 'return', '"No difference found"', 'else', ':', 'return', '"DIFFERENCES FOUND. SEE ABOVE"'] | Handles compare-basis-sets subcommand | ['Handles', 'compare', '-', 'basis', '-', 'sets', 'subcommand'] | train | https://github.com/MolSSI-BSE/basis_set_exchange/blob/e79110aaeb65f392ed5032420322dee3336948f7/basis_set_exchange/cli/bsecurate_handlers.py#L51-L58 |
3,290 | DataBiosphere/toil | src/toil/leader.py | Leader.checkForDeadlocks | def checkForDeadlocks(self):
"""
Checks if the system is deadlocked running service jobs.
"""
totalRunningJobs = len(self.batchSystem.getRunningBatchJobIDs())
totalServicesIssued = self.serviceJobsIssued + self.preemptableServiceJobsIssued
# If there are no updated jobs and at least some jobs running
if totalServicesIssued >= totalRunningJobs and totalRunningJobs > 0:
serviceJobs = [x for x in list(self.jobBatchSystemIDToIssuedJob.keys()) if isinstance(self.jobBatchSystemIDToIssuedJob[x], ServiceJobNode)]
runningServiceJobs = set([x for x in serviceJobs if self.serviceManager.isRunning(self.jobBatchSystemIDToIssuedJob[x])])
assert len(runningServiceJobs) <= totalRunningJobs
# If all the running jobs are active services then we have a potential deadlock
if len(runningServiceJobs) == totalRunningJobs:
# We wait self.config.deadlockWait seconds before declaring the system deadlocked
if self.potentialDeadlockedJobs != runningServiceJobs:
self.potentialDeadlockedJobs = runningServiceJobs
self.potentialDeadlockTime = time.time()
elif time.time() - self.potentialDeadlockTime >= self.config.deadlockWait:
raise DeadlockException("The system is service deadlocked - all %d running jobs are active services" % totalRunningJobs)
else:
# We have observed non-service jobs running, so reset the potential deadlock
self.potentialDeadlockedJobs = set()
self.potentialDeadlockTime = 0
else:
# We have observed non-service jobs running, so reset the potential deadlock
self.potentialDeadlockedJobs = set()
self.potentialDeadlockTime = 0 | python | def checkForDeadlocks(self):
"""
Checks if the system is deadlocked running service jobs.
"""
totalRunningJobs = len(self.batchSystem.getRunningBatchJobIDs())
totalServicesIssued = self.serviceJobsIssued + self.preemptableServiceJobsIssued
# If there are no updated jobs and at least some jobs running
if totalServicesIssued >= totalRunningJobs and totalRunningJobs > 0:
serviceJobs = [x for x in list(self.jobBatchSystemIDToIssuedJob.keys()) if isinstance(self.jobBatchSystemIDToIssuedJob[x], ServiceJobNode)]
runningServiceJobs = set([x for x in serviceJobs if self.serviceManager.isRunning(self.jobBatchSystemIDToIssuedJob[x])])
assert len(runningServiceJobs) <= totalRunningJobs
# If all the running jobs are active services then we have a potential deadlock
if len(runningServiceJobs) == totalRunningJobs:
# We wait self.config.deadlockWait seconds before declaring the system deadlocked
if self.potentialDeadlockedJobs != runningServiceJobs:
self.potentialDeadlockedJobs = runningServiceJobs
self.potentialDeadlockTime = time.time()
elif time.time() - self.potentialDeadlockTime >= self.config.deadlockWait:
raise DeadlockException("The system is service deadlocked - all %d running jobs are active services" % totalRunningJobs)
else:
# We have observed non-service jobs running, so reset the potential deadlock
self.potentialDeadlockedJobs = set()
self.potentialDeadlockTime = 0
else:
# We have observed non-service jobs running, so reset the potential deadlock
self.potentialDeadlockedJobs = set()
self.potentialDeadlockTime = 0 | ['def', 'checkForDeadlocks', '(', 'self', ')', ':', 'totalRunningJobs', '=', 'len', '(', 'self', '.', 'batchSystem', '.', 'getRunningBatchJobIDs', '(', ')', ')', 'totalServicesIssued', '=', 'self', '.', 'serviceJobsIssued', '+', 'self', '.', 'preemptableServiceJobsIssued', '# If there are no updated jobs and at least some jobs running', 'if', 'totalServicesIssued', '>=', 'totalRunningJobs', 'and', 'totalRunningJobs', '>', '0', ':', 'serviceJobs', '=', '[', 'x', 'for', 'x', 'in', 'list', '(', 'self', '.', 'jobBatchSystemIDToIssuedJob', '.', 'keys', '(', ')', ')', 'if', 'isinstance', '(', 'self', '.', 'jobBatchSystemIDToIssuedJob', '[', 'x', ']', ',', 'ServiceJobNode', ')', ']', 'runningServiceJobs', '=', 'set', '(', '[', 'x', 'for', 'x', 'in', 'serviceJobs', 'if', 'self', '.', 'serviceManager', '.', 'isRunning', '(', 'self', '.', 'jobBatchSystemIDToIssuedJob', '[', 'x', ']', ')', ']', ')', 'assert', 'len', '(', 'runningServiceJobs', ')', '<=', 'totalRunningJobs', '# If all the running jobs are active services then we have a potential deadlock', 'if', 'len', '(', 'runningServiceJobs', ')', '==', 'totalRunningJobs', ':', '# We wait self.config.deadlockWait seconds before declaring the system deadlocked', 'if', 'self', '.', 'potentialDeadlockedJobs', '!=', 'runningServiceJobs', ':', 'self', '.', 'potentialDeadlockedJobs', '=', 'runningServiceJobs', 'self', '.', 'potentialDeadlockTime', '=', 'time', '.', 'time', '(', ')', 'elif', 'time', '.', 'time', '(', ')', '-', 'self', '.', 'potentialDeadlockTime', '>=', 'self', '.', 'config', '.', 'deadlockWait', ':', 'raise', 'DeadlockException', '(', '"The system is service deadlocked - all %d running jobs are active services"', '%', 'totalRunningJobs', ')', 'else', ':', '# We have observed non-service jobs running, so reset the potential deadlock', 'self', '.', 'potentialDeadlockedJobs', '=', 'set', '(', ')', 'self', '.', 'potentialDeadlockTime', '=', '0', 'else', ':', '# We have observed non-service jobs running, so reset the potential deadlock', 'self', '.', 'potentialDeadlockedJobs', '=', 'set', '(', ')', 'self', '.', 'potentialDeadlockTime', '=', '0'] | Checks if the system is deadlocked running service jobs. | ['Checks', 'if', 'the', 'system', 'is', 'deadlocked', 'running', 'service', 'jobs', '.'] | train | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/leader.py#L569-L596 |
3,291 | chaoss/grimoirelab-manuscripts | manuscripts/esquery.py | ElasticQuery.__get_query_filters | def __get_query_filters(cls, filters={}, inverse=False):
"""
Convert a dict with the filters to be applied ({"name1":"value1", "name2":"value2"})
to a list of query objects which can be used together in a query using boolean
combination logic.
:param filters: dict with the filters to be applied
:param inverse: if True include all the inverse filters (the one starting with *)
:return: a list of es_dsl 'MatchPhrase' Query objects
Ex: [MatchPhrase(name1="value1"), MatchPhrase(name2="value2"), ..]
Dict representation of the object: {'match_phrase': {'field': 'home'}}
"""
query_filters = []
for name in filters:
if name[0] == '*' and not inverse:
# An inverse filter and not inverse mode
continue
if name[0] != '*' and inverse:
# A direct filter and inverse mode
continue
field_name = name[1:] if name[0] == '*' else name
params = {field_name: filters[name]}
# trying to use es_dsl only and not creating hard coded queries
query_filters.append(Q('match_phrase', **params))
return query_filters | python | def __get_query_filters(cls, filters={}, inverse=False):
"""
Convert a dict with the filters to be applied ({"name1":"value1", "name2":"value2"})
to a list of query objects which can be used together in a query using boolean
combination logic.
:param filters: dict with the filters to be applied
:param inverse: if True include all the inverse filters (the one starting with *)
:return: a list of es_dsl 'MatchPhrase' Query objects
Ex: [MatchPhrase(name1="value1"), MatchPhrase(name2="value2"), ..]
Dict representation of the object: {'match_phrase': {'field': 'home'}}
"""
query_filters = []
for name in filters:
if name[0] == '*' and not inverse:
# An inverse filter and not inverse mode
continue
if name[0] != '*' and inverse:
# A direct filter and inverse mode
continue
field_name = name[1:] if name[0] == '*' else name
params = {field_name: filters[name]}
# trying to use es_dsl only and not creating hard coded queries
query_filters.append(Q('match_phrase', **params))
return query_filters | ['def', '__get_query_filters', '(', 'cls', ',', 'filters', '=', '{', '}', ',', 'inverse', '=', 'False', ')', ':', 'query_filters', '=', '[', ']', 'for', 'name', 'in', 'filters', ':', 'if', 'name', '[', '0', ']', '==', "'*'", 'and', 'not', 'inverse', ':', '# An inverse filter and not inverse mode', 'continue', 'if', 'name', '[', '0', ']', '!=', "'*'", 'and', 'inverse', ':', '# A direct filter and inverse mode', 'continue', 'field_name', '=', 'name', '[', '1', ':', ']', 'if', 'name', '[', '0', ']', '==', "'*'", 'else', 'name', 'params', '=', '{', 'field_name', ':', 'filters', '[', 'name', ']', '}', '# trying to use es_dsl only and not creating hard coded queries', 'query_filters', '.', 'append', '(', 'Q', '(', "'match_phrase'", ',', '*', '*', 'params', ')', ')', 'return', 'query_filters'] | Convert a dict with the filters to be applied ({"name1":"value1", "name2":"value2"})
to a list of query objects which can be used together in a query using boolean
combination logic.
:param filters: dict with the filters to be applied
:param inverse: if True include all the inverse filters (the one starting with *)
:return: a list of es_dsl 'MatchPhrase' Query objects
Ex: [MatchPhrase(name1="value1"), MatchPhrase(name2="value2"), ..]
Dict representation of the object: {'match_phrase': {'field': 'home'}} | ['Convert', 'a', 'dict', 'with', 'the', 'filters', 'to', 'be', 'applied', '(', '{', 'name1', ':', 'value1', 'name2', ':', 'value2', '}', ')', 'to', 'a', 'list', 'of', 'query', 'objects', 'which', 'can', 'be', 'used', 'together', 'in', 'a', 'query', 'using', 'boolean', 'combination', 'logic', '.'] | train | https://github.com/chaoss/grimoirelab-manuscripts/blob/94a3ad4f11bfbcd6c5190e01cb5d3e47a5187cd9/manuscripts/esquery.py#L41-L68 |
3,292 | pyQode/pyqode.core | pyqode/core/modes/indenter.py | IndenterMode.indent_selection | def indent_selection(self, cursor):
"""
Indent selected text
:param cursor: QTextCursor
"""
doc = self.editor.document()
tab_len = self.editor.tab_length
cursor.beginEditBlock()
nb_lines = len(cursor.selection().toPlainText().splitlines())
c = self.editor.textCursor()
if c.atBlockStart() and c.position() == c.selectionEnd():
nb_lines += 1
block = doc.findBlock(cursor.selectionStart())
i = 0
# indent every lines
while i < nb_lines:
nb_space_to_add = tab_len
cursor = QtGui.QTextCursor(block)
cursor.movePosition(cursor.StartOfLine, cursor.MoveAnchor)
if self.editor.use_spaces_instead_of_tabs:
for _ in range(nb_space_to_add):
cursor.insertText(" ")
else:
cursor.insertText('\t')
block = block.next()
i += 1
cursor.endEditBlock() | python | def indent_selection(self, cursor):
"""
Indent selected text
:param cursor: QTextCursor
"""
doc = self.editor.document()
tab_len = self.editor.tab_length
cursor.beginEditBlock()
nb_lines = len(cursor.selection().toPlainText().splitlines())
c = self.editor.textCursor()
if c.atBlockStart() and c.position() == c.selectionEnd():
nb_lines += 1
block = doc.findBlock(cursor.selectionStart())
i = 0
# indent every lines
while i < nb_lines:
nb_space_to_add = tab_len
cursor = QtGui.QTextCursor(block)
cursor.movePosition(cursor.StartOfLine, cursor.MoveAnchor)
if self.editor.use_spaces_instead_of_tabs:
for _ in range(nb_space_to_add):
cursor.insertText(" ")
else:
cursor.insertText('\t')
block = block.next()
i += 1
cursor.endEditBlock() | ['def', 'indent_selection', '(', 'self', ',', 'cursor', ')', ':', 'doc', '=', 'self', '.', 'editor', '.', 'document', '(', ')', 'tab_len', '=', 'self', '.', 'editor', '.', 'tab_length', 'cursor', '.', 'beginEditBlock', '(', ')', 'nb_lines', '=', 'len', '(', 'cursor', '.', 'selection', '(', ')', '.', 'toPlainText', '(', ')', '.', 'splitlines', '(', ')', ')', 'c', '=', 'self', '.', 'editor', '.', 'textCursor', '(', ')', 'if', 'c', '.', 'atBlockStart', '(', ')', 'and', 'c', '.', 'position', '(', ')', '==', 'c', '.', 'selectionEnd', '(', ')', ':', 'nb_lines', '+=', '1', 'block', '=', 'doc', '.', 'findBlock', '(', 'cursor', '.', 'selectionStart', '(', ')', ')', 'i', '=', '0', '# indent every lines', 'while', 'i', '<', 'nb_lines', ':', 'nb_space_to_add', '=', 'tab_len', 'cursor', '=', 'QtGui', '.', 'QTextCursor', '(', 'block', ')', 'cursor', '.', 'movePosition', '(', 'cursor', '.', 'StartOfLine', ',', 'cursor', '.', 'MoveAnchor', ')', 'if', 'self', '.', 'editor', '.', 'use_spaces_instead_of_tabs', ':', 'for', '_', 'in', 'range', '(', 'nb_space_to_add', ')', ':', 'cursor', '.', 'insertText', '(', '" "', ')', 'else', ':', 'cursor', '.', 'insertText', '(', "'\\t'", ')', 'block', '=', 'block', '.', 'next', '(', ')', 'i', '+=', '1', 'cursor', '.', 'endEditBlock', '(', ')'] | Indent selected text
:param cursor: QTextCursor | ['Indent', 'selected', 'text'] | train | https://github.com/pyQode/pyqode.core/blob/a99ec6cd22d519394f613309412f8329dc4e90cb/pyqode/core/modes/indenter.py#L41-L68 |
3,293 | raiden-network/raiden | raiden/network/proxies/token_network_registry.py | TokenNetworkRegistry.get_token_network | def get_token_network(
self,
token_address: TokenAddress,
block_identifier: BlockSpecification = 'latest',
) -> Optional[Address]:
""" Return the token network address for the given token or None if
there is no correspoding address.
"""
if not isinstance(token_address, T_TargetAddress):
raise ValueError('token_address must be an address')
address = self.proxy.contract.functions.token_to_token_networks(
to_checksum_address(token_address),
).call(block_identifier=block_identifier)
address = to_canonical_address(address)
if is_same_address(address, NULL_ADDRESS):
return None
return address | python | def get_token_network(
self,
token_address: TokenAddress,
block_identifier: BlockSpecification = 'latest',
) -> Optional[Address]:
""" Return the token network address for the given token or None if
there is no correspoding address.
"""
if not isinstance(token_address, T_TargetAddress):
raise ValueError('token_address must be an address')
address = self.proxy.contract.functions.token_to_token_networks(
to_checksum_address(token_address),
).call(block_identifier=block_identifier)
address = to_canonical_address(address)
if is_same_address(address, NULL_ADDRESS):
return None
return address | ['def', 'get_token_network', '(', 'self', ',', 'token_address', ':', 'TokenAddress', ',', 'block_identifier', ':', 'BlockSpecification', '=', "'latest'", ',', ')', '->', 'Optional', '[', 'Address', ']', ':', 'if', 'not', 'isinstance', '(', 'token_address', ',', 'T_TargetAddress', ')', ':', 'raise', 'ValueError', '(', "'token_address must be an address'", ')', 'address', '=', 'self', '.', 'proxy', '.', 'contract', '.', 'functions', '.', 'token_to_token_networks', '(', 'to_checksum_address', '(', 'token_address', ')', ',', ')', '.', 'call', '(', 'block_identifier', '=', 'block_identifier', ')', 'address', '=', 'to_canonical_address', '(', 'address', ')', 'if', 'is_same_address', '(', 'address', ',', 'NULL_ADDRESS', ')', ':', 'return', 'None', 'return', 'address'] | Return the token network address for the given token or None if
there is no correspoding address. | ['Return', 'the', 'token', 'network', 'address', 'for', 'the', 'given', 'token', 'or', 'None', 'if', 'there', 'is', 'no', 'correspoding', 'address', '.'] | train | https://github.com/raiden-network/raiden/blob/407ba15c72074e9de88771d6b9661ff4dc36bef5/raiden/network/proxies/token_network_registry.py#L79-L98 |
3,294 | google/tangent | tangent/cfg.py | CFG.build_cfg | def build_cfg(cls, node):
"""Build a CFG for a function.
Args:
node: A function definition the body of which to analyze.
Returns:
A CFG object.
Raises:
TypeError: If the input is not a function definition.
"""
if not isinstance(node, gast.FunctionDef):
raise TypeError('input must be a function definition')
cfg = cls()
cfg.entry = Node(node.args)
cfg.head = [cfg.entry]
cfg.visit_statements(node.body)
cfg.exit = Node(None)
cfg.set_head(cfg.exit)
cfg.backlink(cfg.entry)
return cfg | python | def build_cfg(cls, node):
"""Build a CFG for a function.
Args:
node: A function definition the body of which to analyze.
Returns:
A CFG object.
Raises:
TypeError: If the input is not a function definition.
"""
if not isinstance(node, gast.FunctionDef):
raise TypeError('input must be a function definition')
cfg = cls()
cfg.entry = Node(node.args)
cfg.head = [cfg.entry]
cfg.visit_statements(node.body)
cfg.exit = Node(None)
cfg.set_head(cfg.exit)
cfg.backlink(cfg.entry)
return cfg | ['def', 'build_cfg', '(', 'cls', ',', 'node', ')', ':', 'if', 'not', 'isinstance', '(', 'node', ',', 'gast', '.', 'FunctionDef', ')', ':', 'raise', 'TypeError', '(', "'input must be a function definition'", ')', 'cfg', '=', 'cls', '(', ')', 'cfg', '.', 'entry', '=', 'Node', '(', 'node', '.', 'args', ')', 'cfg', '.', 'head', '=', '[', 'cfg', '.', 'entry', ']', 'cfg', '.', 'visit_statements', '(', 'node', '.', 'body', ')', 'cfg', '.', 'exit', '=', 'Node', '(', 'None', ')', 'cfg', '.', 'set_head', '(', 'cfg', '.', 'exit', ')', 'cfg', '.', 'backlink', '(', 'cfg', '.', 'entry', ')', 'return', 'cfg'] | Build a CFG for a function.
Args:
node: A function definition the body of which to analyze.
Returns:
A CFG object.
Raises:
TypeError: If the input is not a function definition. | ['Build', 'a', 'CFG', 'for', 'a', 'function', '.'] | train | https://github.com/google/tangent/blob/6533e83af09de7345d1b438512679992f080dcc9/tangent/cfg.py#L87-L108 |
3,295 | apache/spark | python/pyspark/mllib/feature.py | IDF.fit | def fit(self, dataset):
"""
Computes the inverse document frequency.
:param dataset: an RDD of term frequency vectors
"""
if not isinstance(dataset, RDD):
raise TypeError("dataset should be an RDD of term frequency vectors")
jmodel = callMLlibFunc("fitIDF", self.minDocFreq, dataset.map(_convert_to_vector))
return IDFModel(jmodel) | python | def fit(self, dataset):
"""
Computes the inverse document frequency.
:param dataset: an RDD of term frequency vectors
"""
if not isinstance(dataset, RDD):
raise TypeError("dataset should be an RDD of term frequency vectors")
jmodel = callMLlibFunc("fitIDF", self.minDocFreq, dataset.map(_convert_to_vector))
return IDFModel(jmodel) | ['def', 'fit', '(', 'self', ',', 'dataset', ')', ':', 'if', 'not', 'isinstance', '(', 'dataset', ',', 'RDD', ')', ':', 'raise', 'TypeError', '(', '"dataset should be an RDD of term frequency vectors"', ')', 'jmodel', '=', 'callMLlibFunc', '(', '"fitIDF"', ',', 'self', '.', 'minDocFreq', ',', 'dataset', '.', 'map', '(', '_convert_to_vector', ')', ')', 'return', 'IDFModel', '(', 'jmodel', ')'] | Computes the inverse document frequency.
:param dataset: an RDD of term frequency vectors | ['Computes', 'the', 'inverse', 'document', 'frequency', '.'] | train | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/feature.py#L577-L586 |
3,296 | glue-viz/glue-vispy-viewers | glue_vispy_viewers/extern/vispy/visuals/colorbar.py | _CoreColorBarVisual._update | def _update(self):
"""Rebuilds the shaders, and repositions the objects
that are used internally by the ColorBarVisual
"""
x, y = self._pos
halfw, halfh = self._halfdim
# test that width and height are non-zero
if halfw <= 0:
raise ValueError("half-width must be positive and non-zero"
", not %s" % halfw)
if halfh <= 0:
raise ValueError("half-height must be positive and non-zero"
", not %s" % halfh)
# test that the given width and height is consistent
# with the orientation
if (self._orientation == "bottom" or self._orientation == "top"):
if halfw < halfh:
raise ValueError("half-width(%s) < half-height(%s) for"
"%s orientation,"
" expected half-width >= half-height" %
(halfw, halfh, self._orientation, ))
else: # orientation == left or orientation == right
if halfw > halfh:
raise ValueError("half-width(%s) > half-height(%s) for"
"%s orientation,"
" expected half-width <= half-height" %
(halfw, halfh, self._orientation, ))
# Set up the attributes that the shaders require
vertices = np.array([[x - halfw, y - halfh],
[x + halfw, y - halfh],
[x + halfw, y + halfh],
# tri 2
[x - halfw, y - halfh],
[x + halfw, y + halfh],
[x - halfw, y + halfh]],
dtype=np.float32)
self.shared_program['a_position'] = vertices | python | def _update(self):
"""Rebuilds the shaders, and repositions the objects
that are used internally by the ColorBarVisual
"""
x, y = self._pos
halfw, halfh = self._halfdim
# test that width and height are non-zero
if halfw <= 0:
raise ValueError("half-width must be positive and non-zero"
", not %s" % halfw)
if halfh <= 0:
raise ValueError("half-height must be positive and non-zero"
", not %s" % halfh)
# test that the given width and height is consistent
# with the orientation
if (self._orientation == "bottom" or self._orientation == "top"):
if halfw < halfh:
raise ValueError("half-width(%s) < half-height(%s) for"
"%s orientation,"
" expected half-width >= half-height" %
(halfw, halfh, self._orientation, ))
else: # orientation == left or orientation == right
if halfw > halfh:
raise ValueError("half-width(%s) > half-height(%s) for"
"%s orientation,"
" expected half-width <= half-height" %
(halfw, halfh, self._orientation, ))
# Set up the attributes that the shaders require
vertices = np.array([[x - halfw, y - halfh],
[x + halfw, y - halfh],
[x + halfw, y + halfh],
# tri 2
[x - halfw, y - halfh],
[x + halfw, y + halfh],
[x - halfw, y + halfh]],
dtype=np.float32)
self.shared_program['a_position'] = vertices | ['def', '_update', '(', 'self', ')', ':', 'x', ',', 'y', '=', 'self', '.', '_pos', 'halfw', ',', 'halfh', '=', 'self', '.', '_halfdim', '# test that width and height are non-zero', 'if', 'halfw', '<=', '0', ':', 'raise', 'ValueError', '(', '"half-width must be positive and non-zero"', '", not %s"', '%', 'halfw', ')', 'if', 'halfh', '<=', '0', ':', 'raise', 'ValueError', '(', '"half-height must be positive and non-zero"', '", not %s"', '%', 'halfh', ')', '# test that the given width and height is consistent', '# with the orientation', 'if', '(', 'self', '.', '_orientation', '==', '"bottom"', 'or', 'self', '.', '_orientation', '==', '"top"', ')', ':', 'if', 'halfw', '<', 'halfh', ':', 'raise', 'ValueError', '(', '"half-width(%s) < half-height(%s) for"', '"%s orientation,"', '" expected half-width >= half-height"', '%', '(', 'halfw', ',', 'halfh', ',', 'self', '.', '_orientation', ',', ')', ')', 'else', ':', '# orientation == left or orientation == right', 'if', 'halfw', '>', 'halfh', ':', 'raise', 'ValueError', '(', '"half-width(%s) > half-height(%s) for"', '"%s orientation,"', '" expected half-width <= half-height"', '%', '(', 'halfw', ',', 'halfh', ',', 'self', '.', '_orientation', ',', ')', ')', '# Set up the attributes that the shaders require', 'vertices', '=', 'np', '.', 'array', '(', '[', '[', 'x', '-', 'halfw', ',', 'y', '-', 'halfh', ']', ',', '[', 'x', '+', 'halfw', ',', 'y', '-', 'halfh', ']', ',', '[', 'x', '+', 'halfw', ',', 'y', '+', 'halfh', ']', ',', '# tri 2', '[', 'x', '-', 'halfw', ',', 'y', '-', 'halfh', ']', ',', '[', 'x', '+', 'halfw', ',', 'y', '+', 'halfh', ']', ',', '[', 'x', '-', 'halfw', ',', 'y', '+', 'halfh', ']', ']', ',', 'dtype', '=', 'np', '.', 'float32', ')', 'self', '.', 'shared_program', '[', "'a_position'", ']', '=', 'vertices'] | Rebuilds the shaders, and repositions the objects
that are used internally by the ColorBarVisual | ['Rebuilds', 'the', 'shaders', 'and', 'repositions', 'the', 'objects', 'that', 'are', 'used', 'internally', 'by', 'the', 'ColorBarVisual'] | train | https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/visuals/colorbar.py#L117-L158 |
3,297 | mlperf/training | reinforcement/tensorflow/minigo/bigtable_input.py | make_single_array | def make_single_array(ds, batch_size=8*1024):
"""Create a single numpy array from a dataset.
The dataset must have only one dimension, that is,
the length of its `output_shapes` and `output_types`
is 1, and its output shape must be `[]`, that is,
every tensor in the dataset must be a scalar.
Args:
ds: a TF Dataset.
batch_size: how many elements to read per pass
Returns:
a single numpy array.
"""
if isinstance(ds.output_types, tuple) or isinstance(ds.output_shapes, tuple):
raise ValueError('Dataset must have a single type and shape')
nshapes = len(ds.output_shapes)
if nshapes > 0:
raise ValueError('Dataset must be comprised of scalars (TensorShape=[])')
batches = []
with tf.Session() as sess:
ds = ds.batch(batch_size)
iterator = ds.make_initializable_iterator()
sess.run(iterator.initializer)
get_next = iterator.get_next()
with tqdm(desc='Elements', unit_scale=1) as pbar:
try:
while True:
batches.append(sess.run(get_next))
pbar.update(len(batches[-1]))
except tf.errors.OutOfRangeError:
pass
if batches:
return np.concatenate(batches)
return np.array([], dtype=ds.output_types.as_numpy_dtype) | python | def make_single_array(ds, batch_size=8*1024):
"""Create a single numpy array from a dataset.
The dataset must have only one dimension, that is,
the length of its `output_shapes` and `output_types`
is 1, and its output shape must be `[]`, that is,
every tensor in the dataset must be a scalar.
Args:
ds: a TF Dataset.
batch_size: how many elements to read per pass
Returns:
a single numpy array.
"""
if isinstance(ds.output_types, tuple) or isinstance(ds.output_shapes, tuple):
raise ValueError('Dataset must have a single type and shape')
nshapes = len(ds.output_shapes)
if nshapes > 0:
raise ValueError('Dataset must be comprised of scalars (TensorShape=[])')
batches = []
with tf.Session() as sess:
ds = ds.batch(batch_size)
iterator = ds.make_initializable_iterator()
sess.run(iterator.initializer)
get_next = iterator.get_next()
with tqdm(desc='Elements', unit_scale=1) as pbar:
try:
while True:
batches.append(sess.run(get_next))
pbar.update(len(batches[-1]))
except tf.errors.OutOfRangeError:
pass
if batches:
return np.concatenate(batches)
return np.array([], dtype=ds.output_types.as_numpy_dtype) | ['def', 'make_single_array', '(', 'ds', ',', 'batch_size', '=', '8', '*', '1024', ')', ':', 'if', 'isinstance', '(', 'ds', '.', 'output_types', ',', 'tuple', ')', 'or', 'isinstance', '(', 'ds', '.', 'output_shapes', ',', 'tuple', ')', ':', 'raise', 'ValueError', '(', "'Dataset must have a single type and shape'", ')', 'nshapes', '=', 'len', '(', 'ds', '.', 'output_shapes', ')', 'if', 'nshapes', '>', '0', ':', 'raise', 'ValueError', '(', "'Dataset must be comprised of scalars (TensorShape=[])'", ')', 'batches', '=', '[', ']', 'with', 'tf', '.', 'Session', '(', ')', 'as', 'sess', ':', 'ds', '=', 'ds', '.', 'batch', '(', 'batch_size', ')', 'iterator', '=', 'ds', '.', 'make_initializable_iterator', '(', ')', 'sess', '.', 'run', '(', 'iterator', '.', 'initializer', ')', 'get_next', '=', 'iterator', '.', 'get_next', '(', ')', 'with', 'tqdm', '(', 'desc', '=', "'Elements'", ',', 'unit_scale', '=', '1', ')', 'as', 'pbar', ':', 'try', ':', 'while', 'True', ':', 'batches', '.', 'append', '(', 'sess', '.', 'run', '(', 'get_next', ')', ')', 'pbar', '.', 'update', '(', 'len', '(', 'batches', '[', '-', '1', ']', ')', ')', 'except', 'tf', '.', 'errors', '.', 'OutOfRangeError', ':', 'pass', 'if', 'batches', ':', 'return', 'np', '.', 'concatenate', '(', 'batches', ')', 'return', 'np', '.', 'array', '(', '[', ']', ',', 'dtype', '=', 'ds', '.', 'output_types', '.', 'as_numpy_dtype', ')'] | Create a single numpy array from a dataset.
The dataset must have only one dimension, that is,
the length of its `output_shapes` and `output_types`
is 1, and its output shape must be `[]`, that is,
every tensor in the dataset must be a scalar.
Args:
ds: a TF Dataset.
batch_size: how many elements to read per pass
Returns:
a single numpy array. | ['Create', 'a', 'single', 'numpy', 'array', 'from', 'a', 'dataset', '.'] | train | https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/reinforcement/tensorflow/minigo/bigtable_input.py#L106-L141 |
3,298 | numenta/htmresearch | projects/sequence_learning/sequence_simulations.py | printOptions | def printOptions(options, tm, outFile):
"""
Pretty print the set of options
"""
print >>outFile, "TM parameters:"
printTemporalMemory(tm, outFile)
print >>outFile, "Experiment parameters:"
for k,v in options.__dict__.iteritems():
print >>outFile, " %s : %s" % (k,str(v))
outFile.flush() | python | def printOptions(options, tm, outFile):
"""
Pretty print the set of options
"""
print >>outFile, "TM parameters:"
printTemporalMemory(tm, outFile)
print >>outFile, "Experiment parameters:"
for k,v in options.__dict__.iteritems():
print >>outFile, " %s : %s" % (k,str(v))
outFile.flush() | ['def', 'printOptions', '(', 'options', ',', 'tm', ',', 'outFile', ')', ':', 'print', '>>', 'outFile', ',', '"TM parameters:"', 'printTemporalMemory', '(', 'tm', ',', 'outFile', ')', 'print', '>>', 'outFile', ',', '"Experiment parameters:"', 'for', 'k', ',', 'v', 'in', 'options', '.', '__dict__', '.', 'iteritems', '(', ')', ':', 'print', '>>', 'outFile', ',', '" %s : %s"', '%', '(', 'k', ',', 'str', '(', 'v', ')', ')', 'outFile', '.', 'flush', '(', ')'] | Pretty print the set of options | ['Pretty', 'print', 'the', 'set', 'of', 'options'] | train | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/projects/sequence_learning/sequence_simulations.py#L326-L335 |
3,299 | aaugustin/django-sequences | sequences/__init__.py | get_next_value | def get_next_value(
sequence_name='default', initial_value=1, reset_value=None,
*, nowait=False, using=None):
"""
Return the next value for a given sequence.
"""
# Inner import because models cannot be imported before their application.
from .models import Sequence
if reset_value is not None:
assert initial_value < reset_value
if using is None:
using = router.db_for_write(Sequence)
connection = connections[using]
if (getattr(connection, 'pg_version', 0) >= 90500
and reset_value is None and not nowait):
# PostgreSQL ≥ 9.5 supports "upsert".
with connection.cursor() as cursor:
cursor.execute(UPSERT_QUERY, [sequence_name, initial_value])
last, = cursor.fetchone()
return last
else:
# Other databases require making more database queries.
with transaction.atomic(using=using, savepoint=False):
sequence, created = (
Sequence.objects
.select_for_update(nowait=nowait)
.get_or_create(name=sequence_name,
defaults={'last': initial_value})
)
if not created:
sequence.last += 1
if reset_value is not None and sequence.last >= reset_value:
sequence.last = initial_value
sequence.save()
return sequence.last | python | def get_next_value(
sequence_name='default', initial_value=1, reset_value=None,
*, nowait=False, using=None):
"""
Return the next value for a given sequence.
"""
# Inner import because models cannot be imported before their application.
from .models import Sequence
if reset_value is not None:
assert initial_value < reset_value
if using is None:
using = router.db_for_write(Sequence)
connection = connections[using]
if (getattr(connection, 'pg_version', 0) >= 90500
and reset_value is None and not nowait):
# PostgreSQL ≥ 9.5 supports "upsert".
with connection.cursor() as cursor:
cursor.execute(UPSERT_QUERY, [sequence_name, initial_value])
last, = cursor.fetchone()
return last
else:
# Other databases require making more database queries.
with transaction.atomic(using=using, savepoint=False):
sequence, created = (
Sequence.objects
.select_for_update(nowait=nowait)
.get_or_create(name=sequence_name,
defaults={'last': initial_value})
)
if not created:
sequence.last += 1
if reset_value is not None and sequence.last >= reset_value:
sequence.last = initial_value
sequence.save()
return sequence.last | ['def', 'get_next_value', '(', 'sequence_name', '=', "'default'", ',', 'initial_value', '=', '1', ',', 'reset_value', '=', 'None', ',', '*', ',', 'nowait', '=', 'False', ',', 'using', '=', 'None', ')', ':', '# Inner import because models cannot be imported before their application.', 'from', '.', 'models', 'import', 'Sequence', 'if', 'reset_value', 'is', 'not', 'None', ':', 'assert', 'initial_value', '<', 'reset_value', 'if', 'using', 'is', 'None', ':', 'using', '=', 'router', '.', 'db_for_write', '(', 'Sequence', ')', 'connection', '=', 'connections', '[', 'using', ']', 'if', '(', 'getattr', '(', 'connection', ',', "'pg_version'", ',', '0', ')', '>=', '90500', 'and', 'reset_value', 'is', 'None', 'and', 'not', 'nowait', ')', ':', '# PostgreSQL ≥ 9.5 supports "upsert".', 'with', 'connection', '.', 'cursor', '(', ')', 'as', 'cursor', ':', 'cursor', '.', 'execute', '(', 'UPSERT_QUERY', ',', '[', 'sequence_name', ',', 'initial_value', ']', ')', 'last', ',', '=', 'cursor', '.', 'fetchone', '(', ')', 'return', 'last', 'else', ':', '# Other databases require making more database queries.', 'with', 'transaction', '.', 'atomic', '(', 'using', '=', 'using', ',', 'savepoint', '=', 'False', ')', ':', 'sequence', ',', 'created', '=', '(', 'Sequence', '.', 'objects', '.', 'select_for_update', '(', 'nowait', '=', 'nowait', ')', '.', 'get_or_create', '(', 'name', '=', 'sequence_name', ',', 'defaults', '=', '{', "'last'", ':', 'initial_value', '}', ')', ')', 'if', 'not', 'created', ':', 'sequence', '.', 'last', '+=', '1', 'if', 'reset_value', 'is', 'not', 'None', 'and', 'sequence', '.', 'last', '>=', 'reset_value', ':', 'sequence', '.', 'last', '=', 'initial_value', 'sequence', '.', 'save', '(', ')', 'return', 'sequence', '.', 'last'] | Return the next value for a given sequence. | ['Return', 'the', 'next', 'value', 'for', 'a', 'given', 'sequence', '.'] | train | https://github.com/aaugustin/django-sequences/blob/0228ae003540ccb63be4a456fb8f63a2f4038de6/sequences/__init__.py#L13-L59 |
Subsets and Splits