Unnamed: 0
int64 0
10k
| repository_name
stringlengths 7
54
| func_path_in_repository
stringlengths 5
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 100
30.3k
| language
stringclasses 1
value | func_code_string
stringlengths 100
30.3k
| func_code_tokens
stringlengths 138
33.2k
| func_documentation_string
stringlengths 1
15k
| func_documentation_tokens
stringlengths 5
5.14k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
|
---|---|---|---|---|---|---|---|---|---|---|---|
9,100 | bitesofcode/projexui | projexui/widgets/xnodewidget/xnodescene.py | XNodeScene.setViewMode | def setViewMode( self, state = True ):
"""
Starts the view mode for moving around the scene.
"""
if self._viewMode == state:
return
self._viewMode = state
if state:
self._mainView.setDragMode( self._mainView.ScrollHandDrag )
else:
self._mainView.setDragMode( self._mainView.RubberBandDrag )
self.emitViewModeChanged() | python | def setViewMode( self, state = True ):
"""
Starts the view mode for moving around the scene.
"""
if self._viewMode == state:
return
self._viewMode = state
if state:
self._mainView.setDragMode( self._mainView.ScrollHandDrag )
else:
self._mainView.setDragMode( self._mainView.RubberBandDrag )
self.emitViewModeChanged() | ['def', 'setViewMode', '(', 'self', ',', 'state', '=', 'True', ')', ':', 'if', 'self', '.', '_viewMode', '==', 'state', ':', 'return', 'self', '.', '_viewMode', '=', 'state', 'if', 'state', ':', 'self', '.', '_mainView', '.', 'setDragMode', '(', 'self', '.', '_mainView', '.', 'ScrollHandDrag', ')', 'else', ':', 'self', '.', '_mainView', '.', 'setDragMode', '(', 'self', '.', '_mainView', '.', 'RubberBandDrag', ')', 'self', '.', 'emitViewModeChanged', '(', ')'] | Starts the view mode for moving around the scene. | ['Starts', 'the', 'view', 'mode', 'for', 'moving', 'around', 'the', 'scene', '.'] | train | https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xnodewidget/xnodescene.py#L1564-L1577 |
9,101 | ArduPilot/MAVProxy | MAVProxy/modules/mavproxy_firmware.py | FirmwareModule.manifest_download | def manifest_download(self):
'''download manifest files'''
if self.downloaders_lock.acquire(False):
if len(self.downloaders):
# there already exist downloader threads
self.downloaders_lock.release()
return
for url in ['http://firmware.ardupilot.org/manifest.json']:
filename = self.make_safe_filename_from_url(url)
path = mp_util.dot_mavproxy("manifest-%s" % filename)
self.downloaders[url] = threading.Thread(target=self.download_url, args=(url, path))
self.downloaders[url].start()
self.downloaders_lock.release()
else:
print("fw: Failed to acquire download lock") | python | def manifest_download(self):
'''download manifest files'''
if self.downloaders_lock.acquire(False):
if len(self.downloaders):
# there already exist downloader threads
self.downloaders_lock.release()
return
for url in ['http://firmware.ardupilot.org/manifest.json']:
filename = self.make_safe_filename_from_url(url)
path = mp_util.dot_mavproxy("manifest-%s" % filename)
self.downloaders[url] = threading.Thread(target=self.download_url, args=(url, path))
self.downloaders[url].start()
self.downloaders_lock.release()
else:
print("fw: Failed to acquire download lock") | ['def', 'manifest_download', '(', 'self', ')', ':', 'if', 'self', '.', 'downloaders_lock', '.', 'acquire', '(', 'False', ')', ':', 'if', 'len', '(', 'self', '.', 'downloaders', ')', ':', '# there already exist downloader threads', 'self', '.', 'downloaders_lock', '.', 'release', '(', ')', 'return', 'for', 'url', 'in', '[', "'http://firmware.ardupilot.org/manifest.json'", ']', ':', 'filename', '=', 'self', '.', 'make_safe_filename_from_url', '(', 'url', ')', 'path', '=', 'mp_util', '.', 'dot_mavproxy', '(', '"manifest-%s"', '%', 'filename', ')', 'self', '.', 'downloaders', '[', 'url', ']', '=', 'threading', '.', 'Thread', '(', 'target', '=', 'self', '.', 'download_url', ',', 'args', '=', '(', 'url', ',', 'path', ')', ')', 'self', '.', 'downloaders', '[', 'url', ']', '.', 'start', '(', ')', 'self', '.', 'downloaders_lock', '.', 'release', '(', ')', 'else', ':', 'print', '(', '"fw: Failed to acquire download lock"', ')'] | download manifest files | ['download', 'manifest', 'files'] | train | https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/mavproxy_firmware.py#L330-L345 |
9,102 | jorahn/icy | icy/ext/xml2json.py | json2elem | def json2elem(json_data, factory=ET.Element):
"""Convert a JSON string into an Element.
Whatever Element implementation we could import will be used by
default; if you want to use something else, pass the Element class
as the factory parameter.
"""
return internal_to_elem(json.loads(json_data), factory) | python | def json2elem(json_data, factory=ET.Element):
"""Convert a JSON string into an Element.
Whatever Element implementation we could import will be used by
default; if you want to use something else, pass the Element class
as the factory parameter.
"""
return internal_to_elem(json.loads(json_data), factory) | ['def', 'json2elem', '(', 'json_data', ',', 'factory', '=', 'ET', '.', 'Element', ')', ':', 'return', 'internal_to_elem', '(', 'json', '.', 'loads', '(', 'json_data', ')', ',', 'factory', ')'] | Convert a JSON string into an Element.
Whatever Element implementation we could import will be used by
default; if you want to use something else, pass the Element class
as the factory parameter. | ['Convert', 'a', 'JSON', 'string', 'into', 'an', 'Element', '.'] | train | https://github.com/jorahn/icy/blob/d0bd765c933b2d9bff4d7d646c0938348b9c5c25/icy/ext/xml2json.py#L161-L170 |
9,103 | evhub/coconut | coconut/compiler/compiler.py | Compiler.passthrough_repl | def passthrough_repl(self, inputstring, **kwargs):
"""Add back passthroughs."""
out = []
index = None
for c in append_it(inputstring, None):
try:
if index is not None:
if c is not None and c in nums:
index += c
elif c == unwrapper and index:
ref = self.get_ref("passthrough", index)
out.append(ref)
index = None
elif c != "\\" or index:
out.append("\\" + index)
if c is not None:
out.append(c)
index = None
elif c is not None:
if c == "\\":
index = ""
else:
out.append(c)
except CoconutInternalException as err:
complain(err)
if index is not None:
out.append(index)
index = None
out.append(c)
return "".join(out) | python | def passthrough_repl(self, inputstring, **kwargs):
"""Add back passthroughs."""
out = []
index = None
for c in append_it(inputstring, None):
try:
if index is not None:
if c is not None and c in nums:
index += c
elif c == unwrapper and index:
ref = self.get_ref("passthrough", index)
out.append(ref)
index = None
elif c != "\\" or index:
out.append("\\" + index)
if c is not None:
out.append(c)
index = None
elif c is not None:
if c == "\\":
index = ""
else:
out.append(c)
except CoconutInternalException as err:
complain(err)
if index is not None:
out.append(index)
index = None
out.append(c)
return "".join(out) | ['def', 'passthrough_repl', '(', 'self', ',', 'inputstring', ',', '*', '*', 'kwargs', ')', ':', 'out', '=', '[', ']', 'index', '=', 'None', 'for', 'c', 'in', 'append_it', '(', 'inputstring', ',', 'None', ')', ':', 'try', ':', 'if', 'index', 'is', 'not', 'None', ':', 'if', 'c', 'is', 'not', 'None', 'and', 'c', 'in', 'nums', ':', 'index', '+=', 'c', 'elif', 'c', '==', 'unwrapper', 'and', 'index', ':', 'ref', '=', 'self', '.', 'get_ref', '(', '"passthrough"', ',', 'index', ')', 'out', '.', 'append', '(', 'ref', ')', 'index', '=', 'None', 'elif', 'c', '!=', '"\\\\"', 'or', 'index', ':', 'out', '.', 'append', '(', '"\\\\"', '+', 'index', ')', 'if', 'c', 'is', 'not', 'None', ':', 'out', '.', 'append', '(', 'c', ')', 'index', '=', 'None', 'elif', 'c', 'is', 'not', 'None', ':', 'if', 'c', '==', '"\\\\"', ':', 'index', '=', '""', 'else', ':', 'out', '.', 'append', '(', 'c', ')', 'except', 'CoconutInternalException', 'as', 'err', ':', 'complain', '(', 'err', ')', 'if', 'index', 'is', 'not', 'None', ':', 'out', '.', 'append', '(', 'index', ')', 'index', '=', 'None', 'out', '.', 'append', '(', 'c', ')', 'return', '""', '.', 'join', '(', 'out', ')'] | Add back passthroughs. | ['Add', 'back', 'passthroughs', '.'] | train | https://github.com/evhub/coconut/blob/ff97177344e7604e89a0a98a977a87ed2a56fc6d/coconut/compiler/compiler.py#L999-L1031 |
9,104 | uktrade/directory-validators | directory_validators/company.py | case_study_social_link_linkedin | def case_study_social_link_linkedin(value):
"""
Confirms that the social media url is pointed at the correct domain.
Args:
value (string): The url to check.
Raises:
django.forms.ValidationError
"""
parsed = parse.urlparse(value.lower())
if not parsed.netloc.endswith('linkedin.com'):
raise ValidationError(MESSAGE_NOT_LINKEDIN) | python | def case_study_social_link_linkedin(value):
"""
Confirms that the social media url is pointed at the correct domain.
Args:
value (string): The url to check.
Raises:
django.forms.ValidationError
"""
parsed = parse.urlparse(value.lower())
if not parsed.netloc.endswith('linkedin.com'):
raise ValidationError(MESSAGE_NOT_LINKEDIN) | ['def', 'case_study_social_link_linkedin', '(', 'value', ')', ':', 'parsed', '=', 'parse', '.', 'urlparse', '(', 'value', '.', 'lower', '(', ')', ')', 'if', 'not', 'parsed', '.', 'netloc', '.', 'endswith', '(', "'linkedin.com'", ')', ':', 'raise', 'ValidationError', '(', 'MESSAGE_NOT_LINKEDIN', ')'] | Confirms that the social media url is pointed at the correct domain.
Args:
value (string): The url to check.
Raises:
django.forms.ValidationError | ['Confirms', 'that', 'the', 'social', 'media', 'url', 'is', 'pointed', 'at', 'the', 'correct', 'domain', '.'] | train | https://github.com/uktrade/directory-validators/blob/e01f9d2aec683e34d978e4f67ed383ea2f9b85a0/directory_validators/company.py#L142-L156 |
9,105 | Workiva/furious | furious/extras/appengine/ndb_persistence.py | FuriousContext.from_id | def from_id(cls, id):
"""Load a `cls` entity and instantiate the Context it stores."""
from furious.context import Context
# TODO: Handle exceptions and retries here.
entity = cls.get_by_id(id)
if not entity:
raise FuriousContextNotFoundError(
"Context entity not found for: {}".format(id))
return Context.from_dict(entity.context) | python | def from_id(cls, id):
"""Load a `cls` entity and instantiate the Context it stores."""
from furious.context import Context
# TODO: Handle exceptions and retries here.
entity = cls.get_by_id(id)
if not entity:
raise FuriousContextNotFoundError(
"Context entity not found for: {}".format(id))
return Context.from_dict(entity.context) | ['def', 'from_id', '(', 'cls', ',', 'id', ')', ':', 'from', 'furious', '.', 'context', 'import', 'Context', '# TODO: Handle exceptions and retries here.', 'entity', '=', 'cls', '.', 'get_by_id', '(', 'id', ')', 'if', 'not', 'entity', ':', 'raise', 'FuriousContextNotFoundError', '(', '"Context entity not found for: {}"', '.', 'format', '(', 'id', ')', ')', 'return', 'Context', '.', 'from_dict', '(', 'entity', '.', 'context', ')'] | Load a `cls` entity and instantiate the Context it stores. | ['Load', 'a', 'cls', 'entity', 'and', 'instantiate', 'the', 'Context', 'it', 'stores', '.'] | train | https://github.com/Workiva/furious/blob/c29823ec8b98549e7439d7273aa064d1e5830632/furious/extras/appengine/ndb_persistence.py#L55-L65 |
9,106 | kallimachos/sphinxmark | sphinxmark/__init__.py | getimage | def getimage(app):
"""Get image file."""
# append source directory to TEMPLATE_PATH so template is found
srcdir = os.path.abspath(os.path.dirname(__file__))
TEMPLATE_PATH.append(srcdir)
staticbase = '_static'
buildpath = os.path.join(app.outdir, staticbase)
try:
os.makedirs(buildpath)
except OSError:
if not os.path.isdir(buildpath):
raise
if app.config.sphinxmark_image == 'default':
imagefile = 'watermark-draft.png'
imagepath = os.path.join(srcdir, imagefile)
copy(imagepath, buildpath)
LOG.debug('[sphinxmark] Using default image: ' + imagefile)
elif app.config.sphinxmark_image == 'text':
imagefile = createimage(app, srcdir, buildpath)
LOG.debug('[sphinxmark] Image: ' + imagefile)
else:
imagefile = app.config.sphinxmark_image
if app.config.html_static_path:
staticpath = app.config.html_static_path[0]
else:
staticpath = '_static'
LOG.debug('[sphinxmark] static path: ' + staticpath)
imagepath = os.path.join(app.confdir, staticpath, imagefile)
LOG.debug('[sphinxmark] Imagepath: ' + imagepath)
try:
copy(imagepath, buildpath)
except Exception:
message = ("Cannot find '%s'. Put watermark images in the "
"'_static' directory or specify the location using "
"'html_static_path'." % imagefile)
LOG.warning(message)
LOG.warning('Failed to add watermark.')
return
return(buildpath, imagefile) | python | def getimage(app):
"""Get image file."""
# append source directory to TEMPLATE_PATH so template is found
srcdir = os.path.abspath(os.path.dirname(__file__))
TEMPLATE_PATH.append(srcdir)
staticbase = '_static'
buildpath = os.path.join(app.outdir, staticbase)
try:
os.makedirs(buildpath)
except OSError:
if not os.path.isdir(buildpath):
raise
if app.config.sphinxmark_image == 'default':
imagefile = 'watermark-draft.png'
imagepath = os.path.join(srcdir, imagefile)
copy(imagepath, buildpath)
LOG.debug('[sphinxmark] Using default image: ' + imagefile)
elif app.config.sphinxmark_image == 'text':
imagefile = createimage(app, srcdir, buildpath)
LOG.debug('[sphinxmark] Image: ' + imagefile)
else:
imagefile = app.config.sphinxmark_image
if app.config.html_static_path:
staticpath = app.config.html_static_path[0]
else:
staticpath = '_static'
LOG.debug('[sphinxmark] static path: ' + staticpath)
imagepath = os.path.join(app.confdir, staticpath, imagefile)
LOG.debug('[sphinxmark] Imagepath: ' + imagepath)
try:
copy(imagepath, buildpath)
except Exception:
message = ("Cannot find '%s'. Put watermark images in the "
"'_static' directory or specify the location using "
"'html_static_path'." % imagefile)
LOG.warning(message)
LOG.warning('Failed to add watermark.')
return
return(buildpath, imagefile) | ['def', 'getimage', '(', 'app', ')', ':', '# append source directory to TEMPLATE_PATH so template is found', 'srcdir', '=', 'os', '.', 'path', '.', 'abspath', '(', 'os', '.', 'path', '.', 'dirname', '(', '__file__', ')', ')', 'TEMPLATE_PATH', '.', 'append', '(', 'srcdir', ')', 'staticbase', '=', "'_static'", 'buildpath', '=', 'os', '.', 'path', '.', 'join', '(', 'app', '.', 'outdir', ',', 'staticbase', ')', 'try', ':', 'os', '.', 'makedirs', '(', 'buildpath', ')', 'except', 'OSError', ':', 'if', 'not', 'os', '.', 'path', '.', 'isdir', '(', 'buildpath', ')', ':', 'raise', 'if', 'app', '.', 'config', '.', 'sphinxmark_image', '==', "'default'", ':', 'imagefile', '=', "'watermark-draft.png'", 'imagepath', '=', 'os', '.', 'path', '.', 'join', '(', 'srcdir', ',', 'imagefile', ')', 'copy', '(', 'imagepath', ',', 'buildpath', ')', 'LOG', '.', 'debug', '(', "'[sphinxmark] Using default image: '", '+', 'imagefile', ')', 'elif', 'app', '.', 'config', '.', 'sphinxmark_image', '==', "'text'", ':', 'imagefile', '=', 'createimage', '(', 'app', ',', 'srcdir', ',', 'buildpath', ')', 'LOG', '.', 'debug', '(', "'[sphinxmark] Image: '", '+', 'imagefile', ')', 'else', ':', 'imagefile', '=', 'app', '.', 'config', '.', 'sphinxmark_image', 'if', 'app', '.', 'config', '.', 'html_static_path', ':', 'staticpath', '=', 'app', '.', 'config', '.', 'html_static_path', '[', '0', ']', 'else', ':', 'staticpath', '=', "'_static'", 'LOG', '.', 'debug', '(', "'[sphinxmark] static path: '", '+', 'staticpath', ')', 'imagepath', '=', 'os', '.', 'path', '.', 'join', '(', 'app', '.', 'confdir', ',', 'staticpath', ',', 'imagefile', ')', 'LOG', '.', 'debug', '(', "'[sphinxmark] Imagepath: '", '+', 'imagepath', ')', 'try', ':', 'copy', '(', 'imagepath', ',', 'buildpath', ')', 'except', 'Exception', ':', 'message', '=', '(', '"Cannot find \'%s\'. Put watermark images in the "', '"\'_static\' directory or specify the location using "', '"\'html_static_path\'."', '%', 'imagefile', ')', 'LOG', '.', 'warning', '(', 'message', ')', 'LOG', '.', 'warning', '(', "'Failed to add watermark.'", ')', 'return', 'return', '(', 'buildpath', ',', 'imagefile', ')'] | Get image file. | ['Get', 'image', 'file', '.'] | train | https://github.com/kallimachos/sphinxmark/blob/f7b17d9dabf1fff448bb38d90474498f0d203990/sphinxmark/__init__.py#L106-L149 |
9,107 | treycucco/bidon | bidon/util/transform.py | get_val | def get_val(source, extract=None, transform=None):
"""Extract a value from a source, transform and return it."""
if extract is None:
raw_value = source
else:
raw_value = extract(source)
if transform is None:
return raw_value
else:
return transform(raw_value) | python | def get_val(source, extract=None, transform=None):
"""Extract a value from a source, transform and return it."""
if extract is None:
raw_value = source
else:
raw_value = extract(source)
if transform is None:
return raw_value
else:
return transform(raw_value) | ['def', 'get_val', '(', 'source', ',', 'extract', '=', 'None', ',', 'transform', '=', 'None', ')', ':', 'if', 'extract', 'is', 'None', ':', 'raw_value', '=', 'source', 'else', ':', 'raw_value', '=', 'extract', '(', 'source', ')', 'if', 'transform', 'is', 'None', ':', 'return', 'raw_value', 'else', ':', 'return', 'transform', '(', 'raw_value', ')'] | Extract a value from a source, transform and return it. | ['Extract', 'a', 'value', 'from', 'a', 'source', 'transform', 'and', 'return', 'it', '.'] | train | https://github.com/treycucco/bidon/blob/d9f24596841d0e69e8ac70a1d1a1deecea95e340/bidon/util/transform.py#L10-L20 |
9,108 | sdispater/orator | orator/query/builder.py | QueryBuilder.count | def count(self, *columns):
"""
Retrieve the "count" result of the query
:param columns: The columns to get
:type columns: tuple
:return: The count
:rtype: int
"""
if not columns and self.distinct_:
columns = self.columns
if not columns:
columns = ["*"]
return int(self.aggregate("count", *columns)) | python | def count(self, *columns):
"""
Retrieve the "count" result of the query
:param columns: The columns to get
:type columns: tuple
:return: The count
:rtype: int
"""
if not columns and self.distinct_:
columns = self.columns
if not columns:
columns = ["*"]
return int(self.aggregate("count", *columns)) | ['def', 'count', '(', 'self', ',', '*', 'columns', ')', ':', 'if', 'not', 'columns', 'and', 'self', '.', 'distinct_', ':', 'columns', '=', 'self', '.', 'columns', 'if', 'not', 'columns', ':', 'columns', '=', '[', '"*"', ']', 'return', 'int', '(', 'self', '.', 'aggregate', '(', '"count"', ',', '*', 'columns', ')', ')'] | Retrieve the "count" result of the query
:param columns: The columns to get
:type columns: tuple
:return: The count
:rtype: int | ['Retrieve', 'the', 'count', 'result', 'of', 'the', 'query'] | train | https://github.com/sdispater/orator/blob/bd90bf198ee897751848f9a92e49d18e60a74136/orator/query/builder.py#L1244-L1260 |
9,109 | saltstack/salt | salt/cloud/clouds/cloudstack.py | get_networkid | def get_networkid(vm_):
'''
Return the networkid to use, only valid for Advanced Zone
'''
networkid = config.get_cloud_config_value('networkid', vm_, __opts__)
if networkid is not None:
return networkid
else:
return False | python | def get_networkid(vm_):
'''
Return the networkid to use, only valid for Advanced Zone
'''
networkid = config.get_cloud_config_value('networkid', vm_, __opts__)
if networkid is not None:
return networkid
else:
return False | ['def', 'get_networkid', '(', 'vm_', ')', ':', 'networkid', '=', 'config', '.', 'get_cloud_config_value', '(', "'networkid'", ',', 'vm_', ',', '__opts__', ')', 'if', 'networkid', 'is', 'not', 'None', ':', 'return', 'networkid', 'else', ':', 'return', 'False'] | Return the networkid to use, only valid for Advanced Zone | ['Return', 'the', 'networkid', 'to', 'use', 'only', 'valid', 'for', 'Advanced', 'Zone'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/cloudstack.py#L231-L240 |
9,110 | AndrewAnnex/SpiceyPy | spiceypy/spiceypy.py | pl2nvc | def pl2nvc(plane):
"""
Return a unit normal vector and constant that define a specified plane.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/pl2nvc_c.html
:param plane: A SPICE plane.
:type plane: supporttypes.Plane
:return:
A normal vector and constant defining
the geometric plane represented by plane.
:rtype: tuple
"""
assert (isinstance(plane, stypes.Plane))
normal = stypes.emptyDoubleVector(3)
constant = ctypes.c_double()
libspice.pl2nvc_c(ctypes.byref(plane), normal, ctypes.byref(constant))
return stypes.cVectorToPython(normal), constant.value | python | def pl2nvc(plane):
"""
Return a unit normal vector and constant that define a specified plane.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/pl2nvc_c.html
:param plane: A SPICE plane.
:type plane: supporttypes.Plane
:return:
A normal vector and constant defining
the geometric plane represented by plane.
:rtype: tuple
"""
assert (isinstance(plane, stypes.Plane))
normal = stypes.emptyDoubleVector(3)
constant = ctypes.c_double()
libspice.pl2nvc_c(ctypes.byref(plane), normal, ctypes.byref(constant))
return stypes.cVectorToPython(normal), constant.value | ['def', 'pl2nvc', '(', 'plane', ')', ':', 'assert', '(', 'isinstance', '(', 'plane', ',', 'stypes', '.', 'Plane', ')', ')', 'normal', '=', 'stypes', '.', 'emptyDoubleVector', '(', '3', ')', 'constant', '=', 'ctypes', '.', 'c_double', '(', ')', 'libspice', '.', 'pl2nvc_c', '(', 'ctypes', '.', 'byref', '(', 'plane', ')', ',', 'normal', ',', 'ctypes', '.', 'byref', '(', 'constant', ')', ')', 'return', 'stypes', '.', 'cVectorToPython', '(', 'normal', ')', ',', 'constant', '.', 'value'] | Return a unit normal vector and constant that define a specified plane.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/pl2nvc_c.html
:param plane: A SPICE plane.
:type plane: supporttypes.Plane
:return:
A normal vector and constant defining
the geometric plane represented by plane.
:rtype: tuple | ['Return', 'a', 'unit', 'normal', 'vector', 'and', 'constant', 'that', 'define', 'a', 'specified', 'plane', '.'] | train | https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L9581-L9598 |
9,111 | PyHDI/Pyverilog | pyverilog/vparser/parser.py | VerilogParser.p_generate_if | def p_generate_if(self, p):
'generate_if : IF LPAREN cond RPAREN gif_true_item ELSE gif_false_item'
p[0] = IfStatement(p[3], p[5], p[7], lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) | python | def p_generate_if(self, p):
'generate_if : IF LPAREN cond RPAREN gif_true_item ELSE gif_false_item'
p[0] = IfStatement(p[3], p[5], p[7], lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) | ['def', 'p_generate_if', '(', 'self', ',', 'p', ')', ':', 'p', '[', '0', ']', '=', 'IfStatement', '(', 'p', '[', '3', ']', ',', 'p', '[', '5', ']', ',', 'p', '[', '7', ']', ',', 'lineno', '=', 'p', '.', 'lineno', '(', '1', ')', ')', 'p', '.', 'set_lineno', '(', '0', ',', 'p', '.', 'lineno', '(', '1', ')', ')'] | generate_if : IF LPAREN cond RPAREN gif_true_item ELSE gif_false_item | ['generate_if', ':', 'IF', 'LPAREN', 'cond', 'RPAREN', 'gif_true_item', 'ELSE', 'gif_false_item'] | train | https://github.com/PyHDI/Pyverilog/blob/b852cc5ed6a7a2712e33639f9d9782d0d1587a53/pyverilog/vparser/parser.py#L1943-L1946 |
9,112 | twilio/twilio-python | twilio/rest/serverless/v1/service/environment/__init__.py | EnvironmentPage.get_instance | def get_instance(self, payload):
"""
Build an instance of EnvironmentInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.serverless.v1.service.environment.EnvironmentInstance
:rtype: twilio.rest.serverless.v1.service.environment.EnvironmentInstance
"""
return EnvironmentInstance(self._version, payload, service_sid=self._solution['service_sid'], ) | python | def get_instance(self, payload):
"""
Build an instance of EnvironmentInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.serverless.v1.service.environment.EnvironmentInstance
:rtype: twilio.rest.serverless.v1.service.environment.EnvironmentInstance
"""
return EnvironmentInstance(self._version, payload, service_sid=self._solution['service_sid'], ) | ['def', 'get_instance', '(', 'self', ',', 'payload', ')', ':', 'return', 'EnvironmentInstance', '(', 'self', '.', '_version', ',', 'payload', ',', 'service_sid', '=', 'self', '.', '_solution', '[', "'service_sid'", ']', ',', ')'] | Build an instance of EnvironmentInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.serverless.v1.service.environment.EnvironmentInstance
:rtype: twilio.rest.serverless.v1.service.environment.EnvironmentInstance | ['Build', 'an', 'instance', 'of', 'EnvironmentInstance'] | train | https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/serverless/v1/service/environment/__init__.py#L194-L203 |
9,113 | materialsproject/pymatgen | pymatgen/analysis/local_env.py | site_is_of_motif_type | def site_is_of_motif_type(struct, n, approach="min_dist", delta=0.1, \
cutoff=10.0, thresh=None):
"""
Returns the motif type of the site with index n in structure struct;
currently featuring "tetrahedral", "octahedral", "bcc", and "cp"
(close-packed: fcc and hcp) as well as "square pyramidal" and
"trigonal bipyramidal". If the site is not recognized,
"unrecognized" is returned. If a site should be assigned to two
different motifs, "multiple assignments" is returned.
Args:
struct (Structure): input structure.
n (int): index of site in Structure object for which motif type
is to be determined.
approach (str): type of neighbor-finding approach, where
"min_dist" will use the MinimumDistanceNN class,
"voronoi" the VoronoiNN class, "min_OKeeffe" the
MinimumOKeeffe class, and "min_VIRE" the MinimumVIRENN class.
delta (float): tolerance involved in neighbor finding.
cutoff (float): (large) radius to find tentative neighbors.
thresh (dict): thresholds for motif criteria (currently, required
keys and their default values are "qtet": 0.5,
"qoct": 0.5, "qbcc": 0.5, "q6": 0.4).
Returns: motif type (str).
"""
if thresh is None:
thresh = {
"qtet": 0.5, "qoct": 0.5, "qbcc": 0.5, "q6": 0.4,
"qtribipyr": 0.8, "qsqpyr": 0.8}
ops = LocalStructOrderParams([
"cn", "tet", "oct", "bcc", "q6", "sq_pyr", "tri_bipyr"])
neighs_cent = get_neighbors_of_site_with_index(
struct, n, approach=approach, delta=delta, cutoff=cutoff)
neighs_cent.append(struct.sites[n])
opvals = ops.get_order_parameters(
neighs_cent, len(neighs_cent) - 1, indices_neighs=[
i for i in range(len(neighs_cent) - 1)])
cn = int(opvals[0] + 0.5)
motif_type = "unrecognized"
nmotif = 0
if cn == 4 and opvals[1] > thresh["qtet"]:
motif_type = "tetrahedral"
nmotif += 1
if cn == 5 and opvals[5] > thresh["qsqpyr"]:
motif_type = "square pyramidal"
nmotif += 1
if cn == 5 and opvals[6] > thresh["qtribipyr"]:
motif_type = "trigonal bipyramidal"
nmotif += 1
if cn == 6 and opvals[2] > thresh["qoct"]:
motif_type = "octahedral"
nmotif += 1
if cn == 8 and (opvals[3] > thresh["qbcc"] and opvals[1] < thresh["qtet"]):
motif_type = "bcc"
nmotif += 1
if cn == 12 and (opvals[4] > thresh["q6"] and opvals[1] < thresh["q6"] and
opvals[2] < thresh["q6"] and opvals[3] < thresh["q6"]):
motif_type = "cp"
nmotif += 1
if nmotif > 1:
motif_type = "multiple assignments"
return motif_type | python | def site_is_of_motif_type(struct, n, approach="min_dist", delta=0.1, \
cutoff=10.0, thresh=None):
"""
Returns the motif type of the site with index n in structure struct;
currently featuring "tetrahedral", "octahedral", "bcc", and "cp"
(close-packed: fcc and hcp) as well as "square pyramidal" and
"trigonal bipyramidal". If the site is not recognized,
"unrecognized" is returned. If a site should be assigned to two
different motifs, "multiple assignments" is returned.
Args:
struct (Structure): input structure.
n (int): index of site in Structure object for which motif type
is to be determined.
approach (str): type of neighbor-finding approach, where
"min_dist" will use the MinimumDistanceNN class,
"voronoi" the VoronoiNN class, "min_OKeeffe" the
MinimumOKeeffe class, and "min_VIRE" the MinimumVIRENN class.
delta (float): tolerance involved in neighbor finding.
cutoff (float): (large) radius to find tentative neighbors.
thresh (dict): thresholds for motif criteria (currently, required
keys and their default values are "qtet": 0.5,
"qoct": 0.5, "qbcc": 0.5, "q6": 0.4).
Returns: motif type (str).
"""
if thresh is None:
thresh = {
"qtet": 0.5, "qoct": 0.5, "qbcc": 0.5, "q6": 0.4,
"qtribipyr": 0.8, "qsqpyr": 0.8}
ops = LocalStructOrderParams([
"cn", "tet", "oct", "bcc", "q6", "sq_pyr", "tri_bipyr"])
neighs_cent = get_neighbors_of_site_with_index(
struct, n, approach=approach, delta=delta, cutoff=cutoff)
neighs_cent.append(struct.sites[n])
opvals = ops.get_order_parameters(
neighs_cent, len(neighs_cent) - 1, indices_neighs=[
i for i in range(len(neighs_cent) - 1)])
cn = int(opvals[0] + 0.5)
motif_type = "unrecognized"
nmotif = 0
if cn == 4 and opvals[1] > thresh["qtet"]:
motif_type = "tetrahedral"
nmotif += 1
if cn == 5 and opvals[5] > thresh["qsqpyr"]:
motif_type = "square pyramidal"
nmotif += 1
if cn == 5 and opvals[6] > thresh["qtribipyr"]:
motif_type = "trigonal bipyramidal"
nmotif += 1
if cn == 6 and opvals[2] > thresh["qoct"]:
motif_type = "octahedral"
nmotif += 1
if cn == 8 and (opvals[3] > thresh["qbcc"] and opvals[1] < thresh["qtet"]):
motif_type = "bcc"
nmotif += 1
if cn == 12 and (opvals[4] > thresh["q6"] and opvals[1] < thresh["q6"] and
opvals[2] < thresh["q6"] and opvals[3] < thresh["q6"]):
motif_type = "cp"
nmotif += 1
if nmotif > 1:
motif_type = "multiple assignments"
return motif_type | ['def', 'site_is_of_motif_type', '(', 'struct', ',', 'n', ',', 'approach', '=', '"min_dist"', ',', 'delta', '=', '0.1', ',', 'cutoff', '=', '10.0', ',', 'thresh', '=', 'None', ')', ':', 'if', 'thresh', 'is', 'None', ':', 'thresh', '=', '{', '"qtet"', ':', '0.5', ',', '"qoct"', ':', '0.5', ',', '"qbcc"', ':', '0.5', ',', '"q6"', ':', '0.4', ',', '"qtribipyr"', ':', '0.8', ',', '"qsqpyr"', ':', '0.8', '}', 'ops', '=', 'LocalStructOrderParams', '(', '[', '"cn"', ',', '"tet"', ',', '"oct"', ',', '"bcc"', ',', '"q6"', ',', '"sq_pyr"', ',', '"tri_bipyr"', ']', ')', 'neighs_cent', '=', 'get_neighbors_of_site_with_index', '(', 'struct', ',', 'n', ',', 'approach', '=', 'approach', ',', 'delta', '=', 'delta', ',', 'cutoff', '=', 'cutoff', ')', 'neighs_cent', '.', 'append', '(', 'struct', '.', 'sites', '[', 'n', ']', ')', 'opvals', '=', 'ops', '.', 'get_order_parameters', '(', 'neighs_cent', ',', 'len', '(', 'neighs_cent', ')', '-', '1', ',', 'indices_neighs', '=', '[', 'i', 'for', 'i', 'in', 'range', '(', 'len', '(', 'neighs_cent', ')', '-', '1', ')', ']', ')', 'cn', '=', 'int', '(', 'opvals', '[', '0', ']', '+', '0.5', ')', 'motif_type', '=', '"unrecognized"', 'nmotif', '=', '0', 'if', 'cn', '==', '4', 'and', 'opvals', '[', '1', ']', '>', 'thresh', '[', '"qtet"', ']', ':', 'motif_type', '=', '"tetrahedral"', 'nmotif', '+=', '1', 'if', 'cn', '==', '5', 'and', 'opvals', '[', '5', ']', '>', 'thresh', '[', '"qsqpyr"', ']', ':', 'motif_type', '=', '"square pyramidal"', 'nmotif', '+=', '1', 'if', 'cn', '==', '5', 'and', 'opvals', '[', '6', ']', '>', 'thresh', '[', '"qtribipyr"', ']', ':', 'motif_type', '=', '"trigonal bipyramidal"', 'nmotif', '+=', '1', 'if', 'cn', '==', '6', 'and', 'opvals', '[', '2', ']', '>', 'thresh', '[', '"qoct"', ']', ':', 'motif_type', '=', '"octahedral"', 'nmotif', '+=', '1', 'if', 'cn', '==', '8', 'and', '(', 'opvals', '[', '3', ']', '>', 'thresh', '[', '"qbcc"', ']', 'and', 'opvals', '[', '1', ']', '<', 'thresh', '[', '"qtet"', ']', ')', ':', 'motif_type', '=', '"bcc"', 'nmotif', '+=', '1', 'if', 'cn', '==', '12', 'and', '(', 'opvals', '[', '4', ']', '>', 'thresh', '[', '"q6"', ']', 'and', 'opvals', '[', '1', ']', '<', 'thresh', '[', '"q6"', ']', 'and', 'opvals', '[', '2', ']', '<', 'thresh', '[', '"q6"', ']', 'and', 'opvals', '[', '3', ']', '<', 'thresh', '[', '"q6"', ']', ')', ':', 'motif_type', '=', '"cp"', 'nmotif', '+=', '1', 'if', 'nmotif', '>', '1', ':', 'motif_type', '=', '"multiple assignments"', 'return', 'motif_type'] | Returns the motif type of the site with index n in structure struct;
currently featuring "tetrahedral", "octahedral", "bcc", and "cp"
(close-packed: fcc and hcp) as well as "square pyramidal" and
"trigonal bipyramidal". If the site is not recognized,
"unrecognized" is returned. If a site should be assigned to two
different motifs, "multiple assignments" is returned.
Args:
struct (Structure): input structure.
n (int): index of site in Structure object for which motif type
is to be determined.
approach (str): type of neighbor-finding approach, where
"min_dist" will use the MinimumDistanceNN class,
"voronoi" the VoronoiNN class, "min_OKeeffe" the
MinimumOKeeffe class, and "min_VIRE" the MinimumVIRENN class.
delta (float): tolerance involved in neighbor finding.
cutoff (float): (large) radius to find tentative neighbors.
thresh (dict): thresholds for motif criteria (currently, required
keys and their default values are "qtet": 0.5,
"qoct": 0.5, "qbcc": 0.5, "q6": 0.4).
Returns: motif type (str). | ['Returns', 'the', 'motif', 'type', 'of', 'the', 'site', 'with', 'index', 'n', 'in', 'structure', 'struct', ';', 'currently', 'featuring', 'tetrahedral', 'octahedral', 'bcc', 'and', 'cp', '(', 'close', '-', 'packed', ':', 'fcc', 'and', 'hcp', ')', 'as', 'well', 'as', 'square', 'pyramidal', 'and', 'trigonal', 'bipyramidal', '.', 'If', 'the', 'site', 'is', 'not', 'recognized', 'unrecognized', 'is', 'returned', '.', 'If', 'a', 'site', 'should', 'be', 'assigned', 'to', 'two', 'different', 'motifs', 'multiple', 'assignments', 'is', 'returned', '.'] | train | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/local_env.py#L1696-L1764 |
9,114 | pymupdf/PyMuPDF | fitz/fitz.py | Matrix.invert | def invert(self, src=None):
"""Calculate the inverted matrix. Return 0 if successful and replace
current one. Else return 1 and do nothing.
"""
if src is None:
dst = TOOLS._invert_matrix(self)
else:
dst = TOOLS._invert_matrix(src)
if dst[0] == 1:
return 1
self.a, self.b, self.c, self.d, self.e, self.f = dst[1]
return 0 | python | def invert(self, src=None):
"""Calculate the inverted matrix. Return 0 if successful and replace
current one. Else return 1 and do nothing.
"""
if src is None:
dst = TOOLS._invert_matrix(self)
else:
dst = TOOLS._invert_matrix(src)
if dst[0] == 1:
return 1
self.a, self.b, self.c, self.d, self.e, self.f = dst[1]
return 0 | ['def', 'invert', '(', 'self', ',', 'src', '=', 'None', ')', ':', 'if', 'src', 'is', 'None', ':', 'dst', '=', 'TOOLS', '.', '_invert_matrix', '(', 'self', ')', 'else', ':', 'dst', '=', 'TOOLS', '.', '_invert_matrix', '(', 'src', ')', 'if', 'dst', '[', '0', ']', '==', '1', ':', 'return', '1', 'self', '.', 'a', ',', 'self', '.', 'b', ',', 'self', '.', 'c', ',', 'self', '.', 'd', ',', 'self', '.', 'e', ',', 'self', '.', 'f', '=', 'dst', '[', '1', ']', 'return', '0'] | Calculate the inverted matrix. Return 0 if successful and replace
current one. Else return 1 and do nothing. | ['Calculate', 'the', 'inverted', 'matrix', '.', 'Return', '0', 'if', 'successful', 'and', 'replace', 'current', 'one', '.', 'Else', 'return', '1', 'and', 'do', 'nothing', '.'] | train | https://github.com/pymupdf/PyMuPDF/blob/917f2d83482510e26ba0ff01fd2392c26f3a8e90/fitz/fitz.py#L158-L169 |
9,115 | materialsproject/pymatgen-db | matgendb/builders/core.py | merge_tasks | def merge_tasks(core_collections, sandbox_collections, id_prefix, new_tasks, batch_size=100, wipe=False):
"""Merge core and sandbox collections into a temporary collection in the sandbox.
:param core_collections: Core collection info
:type core_collections: Collections
:param sandbox_collections: Sandbox collection info
:type sandbox_collections: Collections
"""
merged = copy.copy(sandbox_collections)
# create/clear target collection
target = merged.database[new_tasks]
if wipe:
_log.debug("merge_tasks.wipe.begin")
target.remove()
merged.database['counter'].remove()
_log.debug("merge_tasks.wipe.end")
# perform the merge
batch = []
for doc in core_collections.tasks.find():
batch.append(doc)
if len(batch) == batch_size:
target.insert(batch)
batch = []
if batch:
target.insert(batch)
batch = []
for doc in sandbox_collections.tasks.find():
doc['task_id'] = id_prefix + '-' + str(doc['task_id'])
batch.append(doc)
if len(batch) == batch_size:
target.insert(batch)
batch = []
if batch:
target.insert(batch) | python | def merge_tasks(core_collections, sandbox_collections, id_prefix, new_tasks, batch_size=100, wipe=False):
"""Merge core and sandbox collections into a temporary collection in the sandbox.
:param core_collections: Core collection info
:type core_collections: Collections
:param sandbox_collections: Sandbox collection info
:type sandbox_collections: Collections
"""
merged = copy.copy(sandbox_collections)
# create/clear target collection
target = merged.database[new_tasks]
if wipe:
_log.debug("merge_tasks.wipe.begin")
target.remove()
merged.database['counter'].remove()
_log.debug("merge_tasks.wipe.end")
# perform the merge
batch = []
for doc in core_collections.tasks.find():
batch.append(doc)
if len(batch) == batch_size:
target.insert(batch)
batch = []
if batch:
target.insert(batch)
batch = []
for doc in sandbox_collections.tasks.find():
doc['task_id'] = id_prefix + '-' + str(doc['task_id'])
batch.append(doc)
if len(batch) == batch_size:
target.insert(batch)
batch = []
if batch:
target.insert(batch) | ['def', 'merge_tasks', '(', 'core_collections', ',', 'sandbox_collections', ',', 'id_prefix', ',', 'new_tasks', ',', 'batch_size', '=', '100', ',', 'wipe', '=', 'False', ')', ':', 'merged', '=', 'copy', '.', 'copy', '(', 'sandbox_collections', ')', '# create/clear target collection', 'target', '=', 'merged', '.', 'database', '[', 'new_tasks', ']', 'if', 'wipe', ':', '_log', '.', 'debug', '(', '"merge_tasks.wipe.begin"', ')', 'target', '.', 'remove', '(', ')', 'merged', '.', 'database', '[', "'counter'", ']', '.', 'remove', '(', ')', '_log', '.', 'debug', '(', '"merge_tasks.wipe.end"', ')', '# perform the merge', 'batch', '=', '[', ']', 'for', 'doc', 'in', 'core_collections', '.', 'tasks', '.', 'find', '(', ')', ':', 'batch', '.', 'append', '(', 'doc', ')', 'if', 'len', '(', 'batch', ')', '==', 'batch_size', ':', 'target', '.', 'insert', '(', 'batch', ')', 'batch', '=', '[', ']', 'if', 'batch', ':', 'target', '.', 'insert', '(', 'batch', ')', 'batch', '=', '[', ']', 'for', 'doc', 'in', 'sandbox_collections', '.', 'tasks', '.', 'find', '(', ')', ':', 'doc', '[', "'task_id'", ']', '=', 'id_prefix', '+', "'-'", '+', 'str', '(', 'doc', '[', "'task_id'", ']', ')', 'batch', '.', 'append', '(', 'doc', ')', 'if', 'len', '(', 'batch', ')', '==', 'batch_size', ':', 'target', '.', 'insert', '(', 'batch', ')', 'batch', '=', '[', ']', 'if', 'batch', ':', 'target', '.', 'insert', '(', 'batch', ')'] | Merge core and sandbox collections into a temporary collection in the sandbox.
:param core_collections: Core collection info
:type core_collections: Collections
:param sandbox_collections: Sandbox collection info
:type sandbox_collections: Collections | ['Merge', 'core', 'and', 'sandbox', 'collections', 'into', 'a', 'temporary', 'collection', 'in', 'the', 'sandbox', '.'] | train | https://github.com/materialsproject/pymatgen-db/blob/02e4351c2cea431407644f49193e8bf43ed39b9a/matgendb/builders/core.py#L153-L186 |
9,116 | nickmckay/LiPD-utilities | Python/lipd/misc.py | rm_files | def rm_files(path, extension):
"""
Remove all files in the given directory with the given extension
:param str path: Directory
:param str extension: File type to remove
:return none:
"""
files = list_files(extension, path)
for file in files:
if file.endswith(extension):
os.remove(os.path.join(path, file))
return | python | def rm_files(path, extension):
"""
Remove all files in the given directory with the given extension
:param str path: Directory
:param str extension: File type to remove
:return none:
"""
files = list_files(extension, path)
for file in files:
if file.endswith(extension):
os.remove(os.path.join(path, file))
return | ['def', 'rm_files', '(', 'path', ',', 'extension', ')', ':', 'files', '=', 'list_files', '(', 'extension', ',', 'path', ')', 'for', 'file', 'in', 'files', ':', 'if', 'file', '.', 'endswith', '(', 'extension', ')', ':', 'os', '.', 'remove', '(', 'os', '.', 'path', '.', 'join', '(', 'path', ',', 'file', ')', ')', 'return'] | Remove all files in the given directory with the given extension
:param str path: Directory
:param str extension: File type to remove
:return none: | ['Remove', 'all', 'files', 'in', 'the', 'given', 'directory', 'with', 'the', 'given', 'extension'] | train | https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/misc.py#L670-L682 |
9,117 | pschmitt/shortmomi | shortmomi/views.py | get_vm_by_name | def get_vm_by_name(content, name, regex=False):
'''
Get a VM by its name
'''
return get_object_by_name(content, vim.VirtualMachine, name, regex) | python | def get_vm_by_name(content, name, regex=False):
'''
Get a VM by its name
'''
return get_object_by_name(content, vim.VirtualMachine, name, regex) | ['def', 'get_vm_by_name', '(', 'content', ',', 'name', ',', 'regex', '=', 'False', ')', ':', 'return', 'get_object_by_name', '(', 'content', ',', 'vim', '.', 'VirtualMachine', ',', 'name', ',', 'regex', ')'] | Get a VM by its name | ['Get', 'a', 'VM', 'by', 'its', 'name'] | train | https://github.com/pschmitt/shortmomi/blob/81ad5a874e454ef0da93b7fd95474e7b9b9918d8/shortmomi/views.py#L29-L33 |
9,118 | juju/charm-helpers | charmhelpers/contrib/openstack/amulet/utils.py | OpenStackAmuletUtils.connect_amqp_by_unit | def connect_amqp_by_unit(self, sentry_unit, ssl=False,
port=None, fatal=True,
username="testuser1", password="changeme"):
"""Establish and return a pika amqp connection to the rabbitmq service
running on a rmq juju unit.
:param sentry_unit: sentry unit pointer
:param ssl: boolean, default to False
:param port: amqp port, use defaults if None
:param fatal: boolean, default to True (raises on connect error)
:param username: amqp user name, default to testuser1
:param password: amqp user password
:returns: pika amqp connection pointer or None if failed and non-fatal
"""
host = sentry_unit.info['public-address']
unit_name = sentry_unit.info['unit_name']
# Default port logic if port is not specified
if ssl and not port:
port = 5671
elif not ssl and not port:
port = 5672
self.log.debug('Connecting to amqp on {}:{} ({}) as '
'{}...'.format(host, port, unit_name, username))
try:
credentials = pika.PlainCredentials(username, password)
parameters = pika.ConnectionParameters(host=host, port=port,
credentials=credentials,
ssl=ssl,
connection_attempts=3,
retry_delay=5,
socket_timeout=1)
connection = pika.BlockingConnection(parameters)
assert connection.is_open is True
assert connection.is_closing is False
self.log.debug('Connect OK')
return connection
except Exception as e:
msg = ('amqp connection failed to {}:{} as '
'{} ({})'.format(host, port, username, str(e)))
if fatal:
amulet.raise_status(amulet.FAIL, msg)
else:
self.log.warn(msg)
return None | python | def connect_amqp_by_unit(self, sentry_unit, ssl=False,
port=None, fatal=True,
username="testuser1", password="changeme"):
"""Establish and return a pika amqp connection to the rabbitmq service
running on a rmq juju unit.
:param sentry_unit: sentry unit pointer
:param ssl: boolean, default to False
:param port: amqp port, use defaults if None
:param fatal: boolean, default to True (raises on connect error)
:param username: amqp user name, default to testuser1
:param password: amqp user password
:returns: pika amqp connection pointer or None if failed and non-fatal
"""
host = sentry_unit.info['public-address']
unit_name = sentry_unit.info['unit_name']
# Default port logic if port is not specified
if ssl and not port:
port = 5671
elif not ssl and not port:
port = 5672
self.log.debug('Connecting to amqp on {}:{} ({}) as '
'{}...'.format(host, port, unit_name, username))
try:
credentials = pika.PlainCredentials(username, password)
parameters = pika.ConnectionParameters(host=host, port=port,
credentials=credentials,
ssl=ssl,
connection_attempts=3,
retry_delay=5,
socket_timeout=1)
connection = pika.BlockingConnection(parameters)
assert connection.is_open is True
assert connection.is_closing is False
self.log.debug('Connect OK')
return connection
except Exception as e:
msg = ('amqp connection failed to {}:{} as '
'{} ({})'.format(host, port, username, str(e)))
if fatal:
amulet.raise_status(amulet.FAIL, msg)
else:
self.log.warn(msg)
return None | ['def', 'connect_amqp_by_unit', '(', 'self', ',', 'sentry_unit', ',', 'ssl', '=', 'False', ',', 'port', '=', 'None', ',', 'fatal', '=', 'True', ',', 'username', '=', '"testuser1"', ',', 'password', '=', '"changeme"', ')', ':', 'host', '=', 'sentry_unit', '.', 'info', '[', "'public-address'", ']', 'unit_name', '=', 'sentry_unit', '.', 'info', '[', "'unit_name'", ']', '# Default port logic if port is not specified', 'if', 'ssl', 'and', 'not', 'port', ':', 'port', '=', '5671', 'elif', 'not', 'ssl', 'and', 'not', 'port', ':', 'port', '=', '5672', 'self', '.', 'log', '.', 'debug', '(', "'Connecting to amqp on {}:{} ({}) as '", "'{}...'", '.', 'format', '(', 'host', ',', 'port', ',', 'unit_name', ',', 'username', ')', ')', 'try', ':', 'credentials', '=', 'pika', '.', 'PlainCredentials', '(', 'username', ',', 'password', ')', 'parameters', '=', 'pika', '.', 'ConnectionParameters', '(', 'host', '=', 'host', ',', 'port', '=', 'port', ',', 'credentials', '=', 'credentials', ',', 'ssl', '=', 'ssl', ',', 'connection_attempts', '=', '3', ',', 'retry_delay', '=', '5', ',', 'socket_timeout', '=', '1', ')', 'connection', '=', 'pika', '.', 'BlockingConnection', '(', 'parameters', ')', 'assert', 'connection', '.', 'is_open', 'is', 'True', 'assert', 'connection', '.', 'is_closing', 'is', 'False', 'self', '.', 'log', '.', 'debug', '(', "'Connect OK'", ')', 'return', 'connection', 'except', 'Exception', 'as', 'e', ':', 'msg', '=', '(', "'amqp connection failed to {}:{} as '", "'{} ({})'", '.', 'format', '(', 'host', ',', 'port', ',', 'username', ',', 'str', '(', 'e', ')', ')', ')', 'if', 'fatal', ':', 'amulet', '.', 'raise_status', '(', 'amulet', '.', 'FAIL', ',', 'msg', ')', 'else', ':', 'self', '.', 'log', '.', 'warn', '(', 'msg', ')', 'return', 'None'] | Establish and return a pika amqp connection to the rabbitmq service
running on a rmq juju unit.
:param sentry_unit: sentry unit pointer
:param ssl: boolean, default to False
:param port: amqp port, use defaults if None
:param fatal: boolean, default to True (raises on connect error)
:param username: amqp user name, default to testuser1
:param password: amqp user password
:returns: pika amqp connection pointer or None if failed and non-fatal | ['Establish', 'and', 'return', 'a', 'pika', 'amqp', 'connection', 'to', 'the', 'rabbitmq', 'service', 'running', 'on', 'a', 'rmq', 'juju', 'unit', '.'] | train | https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/openstack/amulet/utils.py#L1406-L1452 |
9,119 | LudovicRousseau/pyscard | smartcard/ulist.py | ulist.__remove_duplicates | def __remove_duplicates(self, _other):
"""Remove from other items already in list."""
if not isinstance(_other, type(self)) \
and not isinstance(_other, type(list)) \
and not isinstance(_other, type([])):
other = [_other]
else:
other = list(_other)
# remove items already in self
newother = []
for i in range(0, len(other)):
item = other.pop(0)
if not list.__contains__(self, item):
newother.append(item)
# remove duplicate items in other
other = []
if newother != []:
other.append(newother[0])
for i in range(1, len(newother)):
item = newother.pop()
if not other.__contains__(item):
other.append(item)
return other | python | def __remove_duplicates(self, _other):
"""Remove from other items already in list."""
if not isinstance(_other, type(self)) \
and not isinstance(_other, type(list)) \
and not isinstance(_other, type([])):
other = [_other]
else:
other = list(_other)
# remove items already in self
newother = []
for i in range(0, len(other)):
item = other.pop(0)
if not list.__contains__(self, item):
newother.append(item)
# remove duplicate items in other
other = []
if newother != []:
other.append(newother[0])
for i in range(1, len(newother)):
item = newother.pop()
if not other.__contains__(item):
other.append(item)
return other | ['def', '__remove_duplicates', '(', 'self', ',', '_other', ')', ':', 'if', 'not', 'isinstance', '(', '_other', ',', 'type', '(', 'self', ')', ')', 'and', 'not', 'isinstance', '(', '_other', ',', 'type', '(', 'list', ')', ')', 'and', 'not', 'isinstance', '(', '_other', ',', 'type', '(', '[', ']', ')', ')', ':', 'other', '=', '[', '_other', ']', 'else', ':', 'other', '=', 'list', '(', '_other', ')', '# remove items already in self', 'newother', '=', '[', ']', 'for', 'i', 'in', 'range', '(', '0', ',', 'len', '(', 'other', ')', ')', ':', 'item', '=', 'other', '.', 'pop', '(', '0', ')', 'if', 'not', 'list', '.', '__contains__', '(', 'self', ',', 'item', ')', ':', 'newother', '.', 'append', '(', 'item', ')', '# remove duplicate items in other', 'other', '=', '[', ']', 'if', 'newother', '!=', '[', ']', ':', 'other', '.', 'append', '(', 'newother', '[', '0', ']', ')', 'for', 'i', 'in', 'range', '(', '1', ',', 'len', '(', 'newother', ')', ')', ':', 'item', '=', 'newother', '.', 'pop', '(', ')', 'if', 'not', 'other', '.', '__contains__', '(', 'item', ')', ':', 'other', '.', 'append', '(', 'item', ')', 'return', 'other'] | Remove from other items already in list. | ['Remove', 'from', 'other', 'items', 'already', 'in', 'list', '.'] | train | https://github.com/LudovicRousseau/pyscard/blob/62e675028086c75656444cc21d563d9f08ebf8e7/smartcard/ulist.py#L83-L107 |
9,120 | SuLab/WikidataIntegrator | wikidataintegrator/wdi_core.py | WDItemEngine.generate_item_instances | def generate_item_instances(cls, items, mediawiki_api_url='https://www.wikidata.org/w/api.php', login=None,
user_agent=config['USER_AGENT_DEFAULT']):
"""
A method which allows for retrieval of a list of Wikidata items or properties. The method generates a list of
tuples where the first value in the tuple is the QID or property ID, whereas the second is the new instance of
WDItemEngine containing all the data of the item. This is most useful for mass retrieval of WD items.
:param items: A list of QIDs or property IDs
:type items: list
:param mediawiki_api_url: The MediaWiki url which should be used
:type mediawiki_api_url: str
:param login: An object of type WDLogin, which holds the credentials/session cookies required for >50 item bulk
retrieval of items.
:type login: wdi_login.WDLogin
:return: A list of tuples, first value in the tuple is the QID or property ID string, second value is the
instance of WDItemEngine with the corresponding item data.
"""
assert type(items) == list
url = mediawiki_api_url
params = {
'action': 'wbgetentities',
'ids': '|'.join(items),
'format': 'json'
}
headers = {
'User-Agent': user_agent
}
if login:
reply = login.get_session().get(url, params=params, headers=headers)
else:
reply = requests.get(url, params=params)
item_instances = []
for qid, v in reply.json()['entities'].items():
ii = cls(wd_item_id=qid, item_data=v)
ii.mediawiki_api_url = mediawiki_api_url
item_instances.append((qid, ii))
return item_instances | python | def generate_item_instances(cls, items, mediawiki_api_url='https://www.wikidata.org/w/api.php', login=None,
user_agent=config['USER_AGENT_DEFAULT']):
"""
A method which allows for retrieval of a list of Wikidata items or properties. The method generates a list of
tuples where the first value in the tuple is the QID or property ID, whereas the second is the new instance of
WDItemEngine containing all the data of the item. This is most useful for mass retrieval of WD items.
:param items: A list of QIDs or property IDs
:type items: list
:param mediawiki_api_url: The MediaWiki url which should be used
:type mediawiki_api_url: str
:param login: An object of type WDLogin, which holds the credentials/session cookies required for >50 item bulk
retrieval of items.
:type login: wdi_login.WDLogin
:return: A list of tuples, first value in the tuple is the QID or property ID string, second value is the
instance of WDItemEngine with the corresponding item data.
"""
assert type(items) == list
url = mediawiki_api_url
params = {
'action': 'wbgetentities',
'ids': '|'.join(items),
'format': 'json'
}
headers = {
'User-Agent': user_agent
}
if login:
reply = login.get_session().get(url, params=params, headers=headers)
else:
reply = requests.get(url, params=params)
item_instances = []
for qid, v in reply.json()['entities'].items():
ii = cls(wd_item_id=qid, item_data=v)
ii.mediawiki_api_url = mediawiki_api_url
item_instances.append((qid, ii))
return item_instances | ['def', 'generate_item_instances', '(', 'cls', ',', 'items', ',', 'mediawiki_api_url', '=', "'https://www.wikidata.org/w/api.php'", ',', 'login', '=', 'None', ',', 'user_agent', '=', 'config', '[', "'USER_AGENT_DEFAULT'", ']', ')', ':', 'assert', 'type', '(', 'items', ')', '==', 'list', 'url', '=', 'mediawiki_api_url', 'params', '=', '{', "'action'", ':', "'wbgetentities'", ',', "'ids'", ':', "'|'", '.', 'join', '(', 'items', ')', ',', "'format'", ':', "'json'", '}', 'headers', '=', '{', "'User-Agent'", ':', 'user_agent', '}', 'if', 'login', ':', 'reply', '=', 'login', '.', 'get_session', '(', ')', '.', 'get', '(', 'url', ',', 'params', '=', 'params', ',', 'headers', '=', 'headers', ')', 'else', ':', 'reply', '=', 'requests', '.', 'get', '(', 'url', ',', 'params', '=', 'params', ')', 'item_instances', '=', '[', ']', 'for', 'qid', ',', 'v', 'in', 'reply', '.', 'json', '(', ')', '[', "'entities'", ']', '.', 'items', '(', ')', ':', 'ii', '=', 'cls', '(', 'wd_item_id', '=', 'qid', ',', 'item_data', '=', 'v', ')', 'ii', '.', 'mediawiki_api_url', '=', 'mediawiki_api_url', 'item_instances', '.', 'append', '(', '(', 'qid', ',', 'ii', ')', ')', 'return', 'item_instances'] | A method which allows for retrieval of a list of Wikidata items or properties. The method generates a list of
tuples where the first value in the tuple is the QID or property ID, whereas the second is the new instance of
WDItemEngine containing all the data of the item. This is most useful for mass retrieval of WD items.
:param items: A list of QIDs or property IDs
:type items: list
:param mediawiki_api_url: The MediaWiki url which should be used
:type mediawiki_api_url: str
:param login: An object of type WDLogin, which holds the credentials/session cookies required for >50 item bulk
retrieval of items.
:type login: wdi_login.WDLogin
:return: A list of tuples, first value in the tuple is the QID or property ID string, second value is the
instance of WDItemEngine with the corresponding item data. | ['A', 'method', 'which', 'allows', 'for', 'retrieval', 'of', 'a', 'list', 'of', 'Wikidata', 'items', 'or', 'properties', '.', 'The', 'method', 'generates', 'a', 'list', 'of', 'tuples', 'where', 'the', 'first', 'value', 'in', 'the', 'tuple', 'is', 'the', 'QID', 'or', 'property', 'ID', 'whereas', 'the', 'second', 'is', 'the', 'new', 'instance', 'of', 'WDItemEngine', 'containing', 'all', 'the', 'data', 'of', 'the', 'item', '.', 'This', 'is', 'most', 'useful', 'for', 'mass', 'retrieval', 'of', 'WD', 'items', '.', ':', 'param', 'items', ':', 'A', 'list', 'of', 'QIDs', 'or', 'property', 'IDs', ':', 'type', 'items', ':', 'list', ':', 'param', 'mediawiki_api_url', ':', 'The', 'MediaWiki', 'url', 'which', 'should', 'be', 'used', ':', 'type', 'mediawiki_api_url', ':', 'str', ':', 'param', 'login', ':', 'An', 'object', 'of', 'type', 'WDLogin', 'which', 'holds', 'the', 'credentials', '/', 'session', 'cookies', 'required', 'for', '>', '50', 'item', 'bulk', 'retrieval', 'of', 'items', '.', ':', 'type', 'login', ':', 'wdi_login', '.', 'WDLogin', ':', 'return', ':', 'A', 'list', 'of', 'tuples', 'first', 'value', 'in', 'the', 'tuple', 'is', 'the', 'QID', 'or', 'property', 'ID', 'string', 'second', 'value', 'is', 'the', 'instance', 'of', 'WDItemEngine', 'with', 'the', 'corresponding', 'item', 'data', '.'] | train | https://github.com/SuLab/WikidataIntegrator/blob/8ceb2ed1c08fec070ec9edfcf7db7b8691481b62/wikidataintegrator/wdi_core.py#L1104-L1143 |
9,121 | ansible-community/ara | ara/models.py | content_sha1 | def content_sha1(context):
"""
Used by the FileContent model to automatically compute the sha1
hash of content before storing it to the database.
"""
try:
content = context.current_parameters['content']
except AttributeError:
content = context
return hashlib.sha1(encodeutils.to_utf8(content)).hexdigest() | python | def content_sha1(context):
"""
Used by the FileContent model to automatically compute the sha1
hash of content before storing it to the database.
"""
try:
content = context.current_parameters['content']
except AttributeError:
content = context
return hashlib.sha1(encodeutils.to_utf8(content)).hexdigest() | ['def', 'content_sha1', '(', 'context', ')', ':', 'try', ':', 'content', '=', 'context', '.', 'current_parameters', '[', "'content'", ']', 'except', 'AttributeError', ':', 'content', '=', 'context', 'return', 'hashlib', '.', 'sha1', '(', 'encodeutils', '.', 'to_utf8', '(', 'content', ')', ')', '.', 'hexdigest', '(', ')'] | Used by the FileContent model to automatically compute the sha1
hash of content before storing it to the database. | ['Used', 'by', 'the', 'FileContent', 'model', 'to', 'automatically', 'compute', 'the', 'sha1', 'hash', 'of', 'content', 'before', 'storing', 'it', 'to', 'the', 'database', '.'] | train | https://github.com/ansible-community/ara/blob/15e2d0133c23b6d07438a553bb8149fadff21547/ara/models.py#L53-L62 |
9,122 | Hackerfleet/hfos | hfos/tool/installer.py | install_service | def install_service(instance, dbhost, dbname, port):
"""Install systemd service configuration"""
_check_root()
log("Installing systemd service")
launcher = os.path.realpath(__file__).replace('manage', 'launcher')
executable = sys.executable + " " + launcher
executable += " --instance " + instance
executable += " --dbname " + dbname + " --dbhost " + dbhost
executable += " --port " + port
executable += " --dolog --logfile /var/log/hfos-" + instance + ".log"
executable += " --logfileverbosity 30 -q"
definitions = {
'instance': instance,
'executable': executable
}
service_name = 'hfos-' + instance + '.service'
write_template_file(os.path.join('dev/templates', service_template),
os.path.join('/etc/systemd/system/', service_name),
definitions)
Popen([
'systemctl',
'enable',
service_name
])
log('Launching service')
Popen([
'systemctl',
'start',
service_name
])
log("Done: Install Service") | python | def install_service(instance, dbhost, dbname, port):
"""Install systemd service configuration"""
_check_root()
log("Installing systemd service")
launcher = os.path.realpath(__file__).replace('manage', 'launcher')
executable = sys.executable + " " + launcher
executable += " --instance " + instance
executable += " --dbname " + dbname + " --dbhost " + dbhost
executable += " --port " + port
executable += " --dolog --logfile /var/log/hfos-" + instance + ".log"
executable += " --logfileverbosity 30 -q"
definitions = {
'instance': instance,
'executable': executable
}
service_name = 'hfos-' + instance + '.service'
write_template_file(os.path.join('dev/templates', service_template),
os.path.join('/etc/systemd/system/', service_name),
definitions)
Popen([
'systemctl',
'enable',
service_name
])
log('Launching service')
Popen([
'systemctl',
'start',
service_name
])
log("Done: Install Service") | ['def', 'install_service', '(', 'instance', ',', 'dbhost', ',', 'dbname', ',', 'port', ')', ':', '_check_root', '(', ')', 'log', '(', '"Installing systemd service"', ')', 'launcher', '=', 'os', '.', 'path', '.', 'realpath', '(', '__file__', ')', '.', 'replace', '(', "'manage'", ',', "'launcher'", ')', 'executable', '=', 'sys', '.', 'executable', '+', '" "', '+', 'launcher', 'executable', '+=', '" --instance "', '+', 'instance', 'executable', '+=', '" --dbname "', '+', 'dbname', '+', '" --dbhost "', '+', 'dbhost', 'executable', '+=', '" --port "', '+', 'port', 'executable', '+=', '" --dolog --logfile /var/log/hfos-"', '+', 'instance', '+', '".log"', 'executable', '+=', '" --logfileverbosity 30 -q"', 'definitions', '=', '{', "'instance'", ':', 'instance', ',', "'executable'", ':', 'executable', '}', 'service_name', '=', "'hfos-'", '+', 'instance', '+', "'.service'", 'write_template_file', '(', 'os', '.', 'path', '.', 'join', '(', "'dev/templates'", ',', 'service_template', ')', ',', 'os', '.', 'path', '.', 'join', '(', "'/etc/systemd/system/'", ',', 'service_name', ')', ',', 'definitions', ')', 'Popen', '(', '[', "'systemctl'", ',', "'enable'", ',', 'service_name', ']', ')', 'log', '(', "'Launching service'", ')', 'Popen', '(', '[', "'systemctl'", ',', "'start'", ',', 'service_name', ']', ')', 'log', '(', '"Done: Install Service"', ')'] | Install systemd service configuration | ['Install', 'systemd', 'service', 'configuration'] | train | https://github.com/Hackerfleet/hfos/blob/b6df14eacaffb6be5c844108873ff8763ec7f0c9/hfos/tool/installer.py#L383-L422 |
9,123 | soravux/scoop | scoop/broker/brokertcp.py | Broker.addBrokerList | def addBrokerList(self, aBrokerInfoList):
"""Add a broker to the broker cluster available list.
Connects to the added broker if needed."""
self.clusterAvailable.update(set(aBrokerInfoList))
# If we need another connection to a fellow broker
# TODO: only connect to a given number
for aBrokerInfo in aBrokerInfoList:
self.clusterSocket.connect(
"tcp://{hostname}:{port}".format(
hostname=aBrokerInfo.hostname,
port=aBrokerInfo.task_port,
)
)
self.cluster.append(aBrokerInfo) | python | def addBrokerList(self, aBrokerInfoList):
"""Add a broker to the broker cluster available list.
Connects to the added broker if needed."""
self.clusterAvailable.update(set(aBrokerInfoList))
# If we need another connection to a fellow broker
# TODO: only connect to a given number
for aBrokerInfo in aBrokerInfoList:
self.clusterSocket.connect(
"tcp://{hostname}:{port}".format(
hostname=aBrokerInfo.hostname,
port=aBrokerInfo.task_port,
)
)
self.cluster.append(aBrokerInfo) | ['def', 'addBrokerList', '(', 'self', ',', 'aBrokerInfoList', ')', ':', 'self', '.', 'clusterAvailable', '.', 'update', '(', 'set', '(', 'aBrokerInfoList', ')', ')', '# If we need another connection to a fellow broker', '# TODO: only connect to a given number', 'for', 'aBrokerInfo', 'in', 'aBrokerInfoList', ':', 'self', '.', 'clusterSocket', '.', 'connect', '(', '"tcp://{hostname}:{port}"', '.', 'format', '(', 'hostname', '=', 'aBrokerInfo', '.', 'hostname', ',', 'port', '=', 'aBrokerInfo', '.', 'task_port', ',', ')', ')', 'self', '.', 'cluster', '.', 'append', '(', 'aBrokerInfo', ')'] | Add a broker to the broker cluster available list.
Connects to the added broker if needed. | ['Add', 'a', 'broker', 'to', 'the', 'broker', 'cluster', 'available', 'list', '.', 'Connects', 'to', 'the', 'added', 'broker', 'if', 'needed', '.'] | train | https://github.com/soravux/scoop/blob/d391dfa62f47e49d48328ee9cf08aa114256fd33/scoop/broker/brokertcp.py#L262-L276 |
9,124 | O365/python-o365 | O365/excel.py | WorkbookSession.create_session | def create_session(self):
""" Request a new session id """
url = self.build_url(self._endpoints.get('create_session'))
response = self.con.post(url, data={'persistChanges': self.persist})
if not response:
raise RuntimeError('Could not create session as requested by the user.')
data = response.json()
self.session_id = data.get('id')
return True | python | def create_session(self):
""" Request a new session id """
url = self.build_url(self._endpoints.get('create_session'))
response = self.con.post(url, data={'persistChanges': self.persist})
if not response:
raise RuntimeError('Could not create session as requested by the user.')
data = response.json()
self.session_id = data.get('id')
return True | ['def', 'create_session', '(', 'self', ')', ':', 'url', '=', 'self', '.', 'build_url', '(', 'self', '.', '_endpoints', '.', 'get', '(', "'create_session'", ')', ')', 'response', '=', 'self', '.', 'con', '.', 'post', '(', 'url', ',', 'data', '=', '{', "'persistChanges'", ':', 'self', '.', 'persist', '}', ')', 'if', 'not', 'response', ':', 'raise', 'RuntimeError', '(', "'Could not create session as requested by the user.'", ')', 'data', '=', 'response', '.', 'json', '(', ')', 'self', '.', 'session_id', '=', 'data', '.', 'get', '(', "'id'", ')', 'return', 'True'] | Request a new session id | ['Request', 'a', 'new', 'session', 'id'] | train | https://github.com/O365/python-o365/blob/02a71cf3775cc6a3c042e003365d6a07c8c75a73/O365/excel.py#L81-L91 |
9,125 | gtaylor/python-colormath | colormath/color_conversions.py | RGB_to_XYZ | def RGB_to_XYZ(cobj, target_illuminant=None, *args, **kwargs):
"""
RGB to XYZ conversion. Expects 0-255 RGB values.
Based off of: http://www.brucelindbloom.com/index.html?Eqn_RGB_to_XYZ.html
"""
# Will contain linearized RGB channels (removed the gamma func).
linear_channels = {}
if isinstance(cobj, sRGBColor):
for channel in ['r', 'g', 'b']:
V = getattr(cobj, 'rgb_' + channel)
if V <= 0.04045:
linear_channels[channel] = V / 12.92
else:
linear_channels[channel] = math.pow((V + 0.055) / 1.055, 2.4)
elif isinstance(cobj, BT2020Color):
if kwargs.get('is_12_bits_system'):
a, b, c = 1.0993, 0.0181, 0.081697877417347
else:
a, b, c = 1.099, 0.018, 0.08124794403514049
for channel in ['r', 'g', 'b']:
V = getattr(cobj, 'rgb_' + channel)
if V <= c:
linear_channels[channel] = V / 4.5
else:
linear_channels[channel] = math.pow((V + (a - 1)) / a, 1 / 0.45)
else:
# If it's not sRGB...
gamma = cobj.rgb_gamma
for channel in ['r', 'g', 'b']:
V = getattr(cobj, 'rgb_' + channel)
linear_channels[channel] = math.pow(V, gamma)
# Apply an RGB working space matrix to the XYZ values (matrix mul).
xyz_x, xyz_y, xyz_z = apply_RGB_matrix(
linear_channels['r'], linear_channels['g'], linear_channels['b'],
rgb_type=cobj, convtype="rgb_to_xyz")
if target_illuminant is None:
target_illuminant = cobj.native_illuminant
# The illuminant of the original RGB object. This will always match
# the RGB colorspace's native illuminant.
illuminant = cobj.native_illuminant
xyzcolor = XYZColor(xyz_x, xyz_y, xyz_z, illuminant=illuminant)
# This will take care of any illuminant changes for us (if source
# illuminant != target illuminant).
xyzcolor.apply_adaptation(target_illuminant)
return xyzcolor | python | def RGB_to_XYZ(cobj, target_illuminant=None, *args, **kwargs):
"""
RGB to XYZ conversion. Expects 0-255 RGB values.
Based off of: http://www.brucelindbloom.com/index.html?Eqn_RGB_to_XYZ.html
"""
# Will contain linearized RGB channels (removed the gamma func).
linear_channels = {}
if isinstance(cobj, sRGBColor):
for channel in ['r', 'g', 'b']:
V = getattr(cobj, 'rgb_' + channel)
if V <= 0.04045:
linear_channels[channel] = V / 12.92
else:
linear_channels[channel] = math.pow((V + 0.055) / 1.055, 2.4)
elif isinstance(cobj, BT2020Color):
if kwargs.get('is_12_bits_system'):
a, b, c = 1.0993, 0.0181, 0.081697877417347
else:
a, b, c = 1.099, 0.018, 0.08124794403514049
for channel in ['r', 'g', 'b']:
V = getattr(cobj, 'rgb_' + channel)
if V <= c:
linear_channels[channel] = V / 4.5
else:
linear_channels[channel] = math.pow((V + (a - 1)) / a, 1 / 0.45)
else:
# If it's not sRGB...
gamma = cobj.rgb_gamma
for channel in ['r', 'g', 'b']:
V = getattr(cobj, 'rgb_' + channel)
linear_channels[channel] = math.pow(V, gamma)
# Apply an RGB working space matrix to the XYZ values (matrix mul).
xyz_x, xyz_y, xyz_z = apply_RGB_matrix(
linear_channels['r'], linear_channels['g'], linear_channels['b'],
rgb_type=cobj, convtype="rgb_to_xyz")
if target_illuminant is None:
target_illuminant = cobj.native_illuminant
# The illuminant of the original RGB object. This will always match
# the RGB colorspace's native illuminant.
illuminant = cobj.native_illuminant
xyzcolor = XYZColor(xyz_x, xyz_y, xyz_z, illuminant=illuminant)
# This will take care of any illuminant changes for us (if source
# illuminant != target illuminant).
xyzcolor.apply_adaptation(target_illuminant)
return xyzcolor | ['def', 'RGB_to_XYZ', '(', 'cobj', ',', 'target_illuminant', '=', 'None', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', '# Will contain linearized RGB channels (removed the gamma func).', 'linear_channels', '=', '{', '}', 'if', 'isinstance', '(', 'cobj', ',', 'sRGBColor', ')', ':', 'for', 'channel', 'in', '[', "'r'", ',', "'g'", ',', "'b'", ']', ':', 'V', '=', 'getattr', '(', 'cobj', ',', "'rgb_'", '+', 'channel', ')', 'if', 'V', '<=', '0.04045', ':', 'linear_channels', '[', 'channel', ']', '=', 'V', '/', '12.92', 'else', ':', 'linear_channels', '[', 'channel', ']', '=', 'math', '.', 'pow', '(', '(', 'V', '+', '0.055', ')', '/', '1.055', ',', '2.4', ')', 'elif', 'isinstance', '(', 'cobj', ',', 'BT2020Color', ')', ':', 'if', 'kwargs', '.', 'get', '(', "'is_12_bits_system'", ')', ':', 'a', ',', 'b', ',', 'c', '=', '1.0993', ',', '0.0181', ',', '0.081697877417347', 'else', ':', 'a', ',', 'b', ',', 'c', '=', '1.099', ',', '0.018', ',', '0.08124794403514049', 'for', 'channel', 'in', '[', "'r'", ',', "'g'", ',', "'b'", ']', ':', 'V', '=', 'getattr', '(', 'cobj', ',', "'rgb_'", '+', 'channel', ')', 'if', 'V', '<=', 'c', ':', 'linear_channels', '[', 'channel', ']', '=', 'V', '/', '4.5', 'else', ':', 'linear_channels', '[', 'channel', ']', '=', 'math', '.', 'pow', '(', '(', 'V', '+', '(', 'a', '-', '1', ')', ')', '/', 'a', ',', '1', '/', '0.45', ')', 'else', ':', "# If it's not sRGB...", 'gamma', '=', 'cobj', '.', 'rgb_gamma', 'for', 'channel', 'in', '[', "'r'", ',', "'g'", ',', "'b'", ']', ':', 'V', '=', 'getattr', '(', 'cobj', ',', "'rgb_'", '+', 'channel', ')', 'linear_channels', '[', 'channel', ']', '=', 'math', '.', 'pow', '(', 'V', ',', 'gamma', ')', '# Apply an RGB working space matrix to the XYZ values (matrix mul).', 'xyz_x', ',', 'xyz_y', ',', 'xyz_z', '=', 'apply_RGB_matrix', '(', 'linear_channels', '[', "'r'", ']', ',', 'linear_channels', '[', "'g'", ']', ',', 'linear_channels', '[', "'b'", ']', ',', 'rgb_type', '=', 'cobj', ',', 'convtype', '=', '"rgb_to_xyz"', ')', 'if', 'target_illuminant', 'is', 'None', ':', 'target_illuminant', '=', 'cobj', '.', 'native_illuminant', '# The illuminant of the original RGB object. This will always match', "# the RGB colorspace's native illuminant.", 'illuminant', '=', 'cobj', '.', 'native_illuminant', 'xyzcolor', '=', 'XYZColor', '(', 'xyz_x', ',', 'xyz_y', ',', 'xyz_z', ',', 'illuminant', '=', 'illuminant', ')', '# This will take care of any illuminant changes for us (if source', '# illuminant != target illuminant).', 'xyzcolor', '.', 'apply_adaptation', '(', 'target_illuminant', ')', 'return', 'xyzcolor'] | RGB to XYZ conversion. Expects 0-255 RGB values.
Based off of: http://www.brucelindbloom.com/index.html?Eqn_RGB_to_XYZ.html | ['RGB', 'to', 'XYZ', 'conversion', '.', 'Expects', '0', '-', '255', 'RGB', 'values', '.'] | train | https://github.com/gtaylor/python-colormath/blob/1d168613718d2d7d31ec4230524e987ef66823c7/colormath/color_conversions.py#L542-L593 |
9,126 | tanghaibao/jcvi | jcvi/formats/fastq.py | guessoffset | def guessoffset(args):
"""
%prog guessoffset fastqfile
Guess the quality offset of the fastqfile, whether 33 or 64.
See encoding schemes: <http://en.wikipedia.org/wiki/FASTQ_format>
SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS...............................
..........................XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
...............................IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII
.................................JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJ
LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL...............................
!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefgh
| | | | |
33 59 64 73 104
S - Sanger Phred+33, raw reads typically (0, 40)
X - Solexa Solexa+64, raw reads typically (-5, 40)
I - Illumina 1.3+ Phred+64, raw reads typically (0, 40)
J - Illumina 1.5+ Phred+64, raw reads typically (3, 40)
L - Illumina 1.8+ Phred+33, raw reads typically (0, 40)
with 0=unused, 1=unused, 2=Read Segment Quality Control Indicator (bold)
"""
p = OptionParser(guessoffset.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastqfile, = args
ai = iter_fastq(fastqfile)
rec = next(ai)
offset = 64
while rec:
quality = rec.quality
lowcounts = len([x for x in quality if x < 59])
highcounts = len([x for x in quality if x > 74])
diff = highcounts - lowcounts
if diff > 10:
break
elif diff < -10:
offset = 33
break
rec = next(ai)
if offset == 33:
print("Sanger encoding (offset=33)", file=sys.stderr)
elif offset == 64:
print("Illumina encoding (offset=64)", file=sys.stderr)
return offset | python | def guessoffset(args):
"""
%prog guessoffset fastqfile
Guess the quality offset of the fastqfile, whether 33 or 64.
See encoding schemes: <http://en.wikipedia.org/wiki/FASTQ_format>
SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS...............................
..........................XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
...............................IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII
.................................JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJ
LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL...............................
!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefgh
| | | | |
33 59 64 73 104
S - Sanger Phred+33, raw reads typically (0, 40)
X - Solexa Solexa+64, raw reads typically (-5, 40)
I - Illumina 1.3+ Phred+64, raw reads typically (0, 40)
J - Illumina 1.5+ Phred+64, raw reads typically (3, 40)
L - Illumina 1.8+ Phred+33, raw reads typically (0, 40)
with 0=unused, 1=unused, 2=Read Segment Quality Control Indicator (bold)
"""
p = OptionParser(guessoffset.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastqfile, = args
ai = iter_fastq(fastqfile)
rec = next(ai)
offset = 64
while rec:
quality = rec.quality
lowcounts = len([x for x in quality if x < 59])
highcounts = len([x for x in quality if x > 74])
diff = highcounts - lowcounts
if diff > 10:
break
elif diff < -10:
offset = 33
break
rec = next(ai)
if offset == 33:
print("Sanger encoding (offset=33)", file=sys.stderr)
elif offset == 64:
print("Illumina encoding (offset=64)", file=sys.stderr)
return offset | ['def', 'guessoffset', '(', 'args', ')', ':', 'p', '=', 'OptionParser', '(', 'guessoffset', '.', '__doc__', ')', 'opts', ',', 'args', '=', 'p', '.', 'parse_args', '(', 'args', ')', 'if', 'len', '(', 'args', ')', '!=', '1', ':', 'sys', '.', 'exit', '(', 'not', 'p', '.', 'print_help', '(', ')', ')', 'fastqfile', ',', '=', 'args', 'ai', '=', 'iter_fastq', '(', 'fastqfile', ')', 'rec', '=', 'next', '(', 'ai', ')', 'offset', '=', '64', 'while', 'rec', ':', 'quality', '=', 'rec', '.', 'quality', 'lowcounts', '=', 'len', '(', '[', 'x', 'for', 'x', 'in', 'quality', 'if', 'x', '<', '59', ']', ')', 'highcounts', '=', 'len', '(', '[', 'x', 'for', 'x', 'in', 'quality', 'if', 'x', '>', '74', ']', ')', 'diff', '=', 'highcounts', '-', 'lowcounts', 'if', 'diff', '>', '10', ':', 'break', 'elif', 'diff', '<', '-', '10', ':', 'offset', '=', '33', 'break', 'rec', '=', 'next', '(', 'ai', ')', 'if', 'offset', '==', '33', ':', 'print', '(', '"Sanger encoding (offset=33)"', ',', 'file', '=', 'sys', '.', 'stderr', ')', 'elif', 'offset', '==', '64', ':', 'print', '(', '"Illumina encoding (offset=64)"', ',', 'file', '=', 'sys', '.', 'stderr', ')', 'return', 'offset'] | %prog guessoffset fastqfile
Guess the quality offset of the fastqfile, whether 33 or 64.
See encoding schemes: <http://en.wikipedia.org/wiki/FASTQ_format>
SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS...............................
..........................XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
...............................IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII
.................................JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJ
LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL...............................
!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefgh
| | | | |
33 59 64 73 104
S - Sanger Phred+33, raw reads typically (0, 40)
X - Solexa Solexa+64, raw reads typically (-5, 40)
I - Illumina 1.3+ Phred+64, raw reads typically (0, 40)
J - Illumina 1.5+ Phred+64, raw reads typically (3, 40)
L - Illumina 1.8+ Phred+33, raw reads typically (0, 40)
with 0=unused, 1=unused, 2=Read Segment Quality Control Indicator (bold) | ['%prog', 'guessoffset', 'fastqfile'] | train | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/fastq.py#L597-L647 |
9,127 | pypa/pipenv | pipenv/patched/notpip/_internal/wheel.py | message_about_scripts_not_on_PATH | def message_about_scripts_not_on_PATH(scripts):
# type: (Sequence[str]) -> Optional[str]
"""Determine if any scripts are not on PATH and format a warning.
Returns a warning message if one or more scripts are not on PATH,
otherwise None.
"""
if not scripts:
return None
# Group scripts by the path they were installed in
grouped_by_dir = collections.defaultdict(set) # type: Dict[str, set]
for destfile in scripts:
parent_dir = os.path.dirname(destfile)
script_name = os.path.basename(destfile)
grouped_by_dir[parent_dir].add(script_name)
# We don't want to warn for directories that are on PATH.
not_warn_dirs = [
os.path.normcase(i).rstrip(os.sep) for i in
os.environ.get("PATH", "").split(os.pathsep)
]
# If an executable sits with sys.executable, we don't warn for it.
# This covers the case of venv invocations without activating the venv.
executable_loc = os.environ.get("PIP_PYTHON_PATH", sys.executable)
not_warn_dirs.append(os.path.normcase(os.path.dirname(executable_loc)))
warn_for = {
parent_dir: scripts for parent_dir, scripts in grouped_by_dir.items()
if os.path.normcase(parent_dir) not in not_warn_dirs
}
if not warn_for:
return None
# Format a message
msg_lines = []
for parent_dir, scripts in warn_for.items():
scripts = sorted(scripts)
if len(scripts) == 1:
start_text = "script {} is".format(scripts[0])
else:
start_text = "scripts {} are".format(
", ".join(scripts[:-1]) + " and " + scripts[-1]
)
msg_lines.append(
"The {} installed in '{}' which is not on PATH."
.format(start_text, parent_dir)
)
last_line_fmt = (
"Consider adding {} to PATH or, if you prefer "
"to suppress this warning, use --no-warn-script-location."
)
if len(msg_lines) == 1:
msg_lines.append(last_line_fmt.format("this directory"))
else:
msg_lines.append(last_line_fmt.format("these directories"))
# Returns the formatted multiline message
return "\n".join(msg_lines) | python | def message_about_scripts_not_on_PATH(scripts):
# type: (Sequence[str]) -> Optional[str]
"""Determine if any scripts are not on PATH and format a warning.
Returns a warning message if one or more scripts are not on PATH,
otherwise None.
"""
if not scripts:
return None
# Group scripts by the path they were installed in
grouped_by_dir = collections.defaultdict(set) # type: Dict[str, set]
for destfile in scripts:
parent_dir = os.path.dirname(destfile)
script_name = os.path.basename(destfile)
grouped_by_dir[parent_dir].add(script_name)
# We don't want to warn for directories that are on PATH.
not_warn_dirs = [
os.path.normcase(i).rstrip(os.sep) for i in
os.environ.get("PATH", "").split(os.pathsep)
]
# If an executable sits with sys.executable, we don't warn for it.
# This covers the case of venv invocations without activating the venv.
executable_loc = os.environ.get("PIP_PYTHON_PATH", sys.executable)
not_warn_dirs.append(os.path.normcase(os.path.dirname(executable_loc)))
warn_for = {
parent_dir: scripts for parent_dir, scripts in grouped_by_dir.items()
if os.path.normcase(parent_dir) not in not_warn_dirs
}
if not warn_for:
return None
# Format a message
msg_lines = []
for parent_dir, scripts in warn_for.items():
scripts = sorted(scripts)
if len(scripts) == 1:
start_text = "script {} is".format(scripts[0])
else:
start_text = "scripts {} are".format(
", ".join(scripts[:-1]) + " and " + scripts[-1]
)
msg_lines.append(
"The {} installed in '{}' which is not on PATH."
.format(start_text, parent_dir)
)
last_line_fmt = (
"Consider adding {} to PATH or, if you prefer "
"to suppress this warning, use --no-warn-script-location."
)
if len(msg_lines) == 1:
msg_lines.append(last_line_fmt.format("this directory"))
else:
msg_lines.append(last_line_fmt.format("these directories"))
# Returns the formatted multiline message
return "\n".join(msg_lines) | ['def', 'message_about_scripts_not_on_PATH', '(', 'scripts', ')', ':', '# type: (Sequence[str]) -> Optional[str]', 'if', 'not', 'scripts', ':', 'return', 'None', '# Group scripts by the path they were installed in', 'grouped_by_dir', '=', 'collections', '.', 'defaultdict', '(', 'set', ')', '# type: Dict[str, set]', 'for', 'destfile', 'in', 'scripts', ':', 'parent_dir', '=', 'os', '.', 'path', '.', 'dirname', '(', 'destfile', ')', 'script_name', '=', 'os', '.', 'path', '.', 'basename', '(', 'destfile', ')', 'grouped_by_dir', '[', 'parent_dir', ']', '.', 'add', '(', 'script_name', ')', "# We don't want to warn for directories that are on PATH.", 'not_warn_dirs', '=', '[', 'os', '.', 'path', '.', 'normcase', '(', 'i', ')', '.', 'rstrip', '(', 'os', '.', 'sep', ')', 'for', 'i', 'in', 'os', '.', 'environ', '.', 'get', '(', '"PATH"', ',', '""', ')', '.', 'split', '(', 'os', '.', 'pathsep', ')', ']', "# If an executable sits with sys.executable, we don't warn for it.", '# This covers the case of venv invocations without activating the venv.', 'executable_loc', '=', 'os', '.', 'environ', '.', 'get', '(', '"PIP_PYTHON_PATH"', ',', 'sys', '.', 'executable', ')', 'not_warn_dirs', '.', 'append', '(', 'os', '.', 'path', '.', 'normcase', '(', 'os', '.', 'path', '.', 'dirname', '(', 'executable_loc', ')', ')', ')', 'warn_for', '=', '{', 'parent_dir', ':', 'scripts', 'for', 'parent_dir', ',', 'scripts', 'in', 'grouped_by_dir', '.', 'items', '(', ')', 'if', 'os', '.', 'path', '.', 'normcase', '(', 'parent_dir', ')', 'not', 'in', 'not_warn_dirs', '}', 'if', 'not', 'warn_for', ':', 'return', 'None', '# Format a message', 'msg_lines', '=', '[', ']', 'for', 'parent_dir', ',', 'scripts', 'in', 'warn_for', '.', 'items', '(', ')', ':', 'scripts', '=', 'sorted', '(', 'scripts', ')', 'if', 'len', '(', 'scripts', ')', '==', '1', ':', 'start_text', '=', '"script {} is"', '.', 'format', '(', 'scripts', '[', '0', ']', ')', 'else', ':', 'start_text', '=', '"scripts {} are"', '.', 'format', '(', '", "', '.', 'join', '(', 'scripts', '[', ':', '-', '1', ']', ')', '+', '" and "', '+', 'scripts', '[', '-', '1', ']', ')', 'msg_lines', '.', 'append', '(', '"The {} installed in \'{}\' which is not on PATH."', '.', 'format', '(', 'start_text', ',', 'parent_dir', ')', ')', 'last_line_fmt', '=', '(', '"Consider adding {} to PATH or, if you prefer "', '"to suppress this warning, use --no-warn-script-location."', ')', 'if', 'len', '(', 'msg_lines', ')', '==', '1', ':', 'msg_lines', '.', 'append', '(', 'last_line_fmt', '.', 'format', '(', '"this directory"', ')', ')', 'else', ':', 'msg_lines', '.', 'append', '(', 'last_line_fmt', '.', 'format', '(', '"these directories"', ')', ')', '# Returns the formatted multiline message', 'return', '"\\n"', '.', 'join', '(', 'msg_lines', ')'] | Determine if any scripts are not on PATH and format a warning.
Returns a warning message if one or more scripts are not on PATH,
otherwise None. | ['Determine', 'if', 'any', 'scripts', 'are', 'not', 'on', 'PATH', 'and', 'format', 'a', 'warning', '.'] | train | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/patched/notpip/_internal/wheel.py#L180-L239 |
9,128 | Damgaard/PyImgur | pyimgur/__init__.py | Imgur.get_notification | def get_notification(self, id):
"""
Return a Notification object.
:param id: The id of the notification object to return.
"""
url = self._base_url + "/3/notification/{0}".format(id)
resp = self._send_request(url)
return Notification(resp, self) | python | def get_notification(self, id):
"""
Return a Notification object.
:param id: The id of the notification object to return.
"""
url = self._base_url + "/3/notification/{0}".format(id)
resp = self._send_request(url)
return Notification(resp, self) | ['def', 'get_notification', '(', 'self', ',', 'id', ')', ':', 'url', '=', 'self', '.', '_base_url', '+', '"/3/notification/{0}"', '.', 'format', '(', 'id', ')', 'resp', '=', 'self', '.', '_send_request', '(', 'url', ')', 'return', 'Notification', '(', 'resp', ',', 'self', ')'] | Return a Notification object.
:param id: The id of the notification object to return. | ['Return', 'a', 'Notification', 'object', '.'] | train | https://github.com/Damgaard/PyImgur/blob/606f17078d24158632f807430f8d0b9b3cd8b312/pyimgur/__init__.py#L1005-L1013 |
9,129 | bihealth/vcfpy | vcfpy/header.py | Header.has_header_line | def has_header_line(self, key, id_):
"""Return whether there is a header line with the given ID of the
type given by ``key``
:param key: The VCF header key/line type.
:param id_: The ID value to compare fore
:return: ``True`` if there is a header line starting with ``##${key}=``
in the VCF file having the mapping entry ``ID`` set to ``id_``.
"""
if key not in self._indices:
return False
else:
return id_ in self._indices[key] | python | def has_header_line(self, key, id_):
"""Return whether there is a header line with the given ID of the
type given by ``key``
:param key: The VCF header key/line type.
:param id_: The ID value to compare fore
:return: ``True`` if there is a header line starting with ``##${key}=``
in the VCF file having the mapping entry ``ID`` set to ``id_``.
"""
if key not in self._indices:
return False
else:
return id_ in self._indices[key] | ['def', 'has_header_line', '(', 'self', ',', 'key', ',', 'id_', ')', ':', 'if', 'key', 'not', 'in', 'self', '.', '_indices', ':', 'return', 'False', 'else', ':', 'return', 'id_', 'in', 'self', '.', '_indices', '[', 'key', ']'] | Return whether there is a header line with the given ID of the
type given by ``key``
:param key: The VCF header key/line type.
:param id_: The ID value to compare fore
:return: ``True`` if there is a header line starting with ``##${key}=``
in the VCF file having the mapping entry ``ID`` set to ``id_``. | ['Return', 'whether', 'there', 'is', 'a', 'header', 'line', 'with', 'the', 'given', 'ID', 'of', 'the', 'type', 'given', 'by', 'key'] | train | https://github.com/bihealth/vcfpy/blob/99e2165df30f11e0c95f3170f31bc5191d9e9e15/vcfpy/header.py#L360-L373 |
9,130 | welbornprod/colr | colr/controls.py | move_pos | def move_pos(line=1, column=1, file=sys.stdout):
""" Move the cursor to a new position. Values are 1-based, and default
to 1.
Esc[<line>;<column>H
or
Esc[<line>;<column>f
"""
move.pos(line=line, col=column).write(file=file) | python | def move_pos(line=1, column=1, file=sys.stdout):
""" Move the cursor to a new position. Values are 1-based, and default
to 1.
Esc[<line>;<column>H
or
Esc[<line>;<column>f
"""
move.pos(line=line, col=column).write(file=file) | ['def', 'move_pos', '(', 'line', '=', '1', ',', 'column', '=', '1', ',', 'file', '=', 'sys', '.', 'stdout', ')', ':', 'move', '.', 'pos', '(', 'line', '=', 'line', ',', 'col', '=', 'column', ')', '.', 'write', '(', 'file', '=', 'file', ')'] | Move the cursor to a new position. Values are 1-based, and default
to 1.
Esc[<line>;<column>H
or
Esc[<line>;<column>f | ['Move', 'the', 'cursor', 'to', 'a', 'new', 'position', '.', 'Values', 'are', '1', '-', 'based', 'and', 'default', 'to', '1', '.'] | train | https://github.com/welbornprod/colr/blob/417117fdbddbc53142096685ac2af006b2bd0220/colr/controls.py#L178-L186 |
9,131 | orb-framework/orb | orb/core/column_types/reference.py | ReferenceColumn.dbRestore | def dbRestore(self, db_value, context=None):
"""
Extracts the db_value provided back from the database.
:param db_value: <variant>
:param context: <orb.Context>
:return: <variant>
"""
if isinstance(db_value, (str, unicode)) and db_value.startswith('{'):
try:
db_value = projex.text.safe_eval(db_value)
except StandardError:
log.exception('Invalid reference found')
raise orb.errors.OrbError('Invalid reference found.')
if isinstance(db_value, dict):
cls = self.referenceModel()
if not cls:
raise orb.errors.ModelNotFound(schema=self.reference())
else:
load_event = orb.events.LoadEvent(data=db_value)
# update the expansion information to not propagate to references
if context:
context = context.copy()
expand = context.expandtree(cls)
sub_expand = expand.pop(self.name(), {})
context.expand = context.raw_values['expand'] = sub_expand
db_value = cls(loadEvent=load_event, context=context)
return super(ReferenceColumn, self).dbRestore(db_value, context=context) | python | def dbRestore(self, db_value, context=None):
"""
Extracts the db_value provided back from the database.
:param db_value: <variant>
:param context: <orb.Context>
:return: <variant>
"""
if isinstance(db_value, (str, unicode)) and db_value.startswith('{'):
try:
db_value = projex.text.safe_eval(db_value)
except StandardError:
log.exception('Invalid reference found')
raise orb.errors.OrbError('Invalid reference found.')
if isinstance(db_value, dict):
cls = self.referenceModel()
if not cls:
raise orb.errors.ModelNotFound(schema=self.reference())
else:
load_event = orb.events.LoadEvent(data=db_value)
# update the expansion information to not propagate to references
if context:
context = context.copy()
expand = context.expandtree(cls)
sub_expand = expand.pop(self.name(), {})
context.expand = context.raw_values['expand'] = sub_expand
db_value = cls(loadEvent=load_event, context=context)
return super(ReferenceColumn, self).dbRestore(db_value, context=context) | ['def', 'dbRestore', '(', 'self', ',', 'db_value', ',', 'context', '=', 'None', ')', ':', 'if', 'isinstance', '(', 'db_value', ',', '(', 'str', ',', 'unicode', ')', ')', 'and', 'db_value', '.', 'startswith', '(', "'{'", ')', ':', 'try', ':', 'db_value', '=', 'projex', '.', 'text', '.', 'safe_eval', '(', 'db_value', ')', 'except', 'StandardError', ':', 'log', '.', 'exception', '(', "'Invalid reference found'", ')', 'raise', 'orb', '.', 'errors', '.', 'OrbError', '(', "'Invalid reference found.'", ')', 'if', 'isinstance', '(', 'db_value', ',', 'dict', ')', ':', 'cls', '=', 'self', '.', 'referenceModel', '(', ')', 'if', 'not', 'cls', ':', 'raise', 'orb', '.', 'errors', '.', 'ModelNotFound', '(', 'schema', '=', 'self', '.', 'reference', '(', ')', ')', 'else', ':', 'load_event', '=', 'orb', '.', 'events', '.', 'LoadEvent', '(', 'data', '=', 'db_value', ')', '# update the expansion information to not propagate to references', 'if', 'context', ':', 'context', '=', 'context', '.', 'copy', '(', ')', 'expand', '=', 'context', '.', 'expandtree', '(', 'cls', ')', 'sub_expand', '=', 'expand', '.', 'pop', '(', 'self', '.', 'name', '(', ')', ',', '{', '}', ')', 'context', '.', 'expand', '=', 'context', '.', 'raw_values', '[', "'expand'", ']', '=', 'sub_expand', 'db_value', '=', 'cls', '(', 'loadEvent', '=', 'load_event', ',', 'context', '=', 'context', ')', 'return', 'super', '(', 'ReferenceColumn', ',', 'self', ')', '.', 'dbRestore', '(', 'db_value', ',', 'context', '=', 'context', ')'] | Extracts the db_value provided back from the database.
:param db_value: <variant>
:param context: <orb.Context>
:return: <variant> | ['Extracts', 'the', 'db_value', 'provided', 'back', 'from', 'the', 'database', '.'] | train | https://github.com/orb-framework/orb/blob/575be2689cb269e65a0a2678232ff940acc19e5a/orb/core/column_types/reference.py#L89-L121 |
9,132 | dhermes/bezier | src/bezier/_surface_intersection.py | verify_edge_segments | def verify_edge_segments(edge_infos):
"""Verify that the edge segments in an intersection are valid.
.. note::
This is a helper used only by :func:`generic_intersect`.
Args:
edge_infos (Optional[list]): List of "edge info" lists. Each list
represents a curved polygon and contains 3-tuples of edge index,
start and end (see the output of :func:`ends_to_curve`).
Raises:
ValueError: If two consecutive edge segments lie on the same edge
index.
ValueError: If the start and end parameter are "invalid" (they should
be between 0 and 1 and start should be strictly less than end).
"""
if edge_infos is None:
return
for edge_info in edge_infos:
num_segments = len(edge_info)
for index in six.moves.xrange(-1, num_segments - 1):
index1, start1, end1 = edge_info[index]
# First, verify the start and end parameters for the current
# segment.
if not 0.0 <= start1 < end1 <= 1.0:
raise ValueError(BAD_SEGMENT_PARAMS, edge_info[index])
# Then, verify that the indices are not the same.
index2, _, _ = edge_info[index + 1]
if index1 == index2:
raise ValueError(
SEGMENTS_SAME_EDGE, edge_info[index], edge_info[index + 1]
) | python | def verify_edge_segments(edge_infos):
"""Verify that the edge segments in an intersection are valid.
.. note::
This is a helper used only by :func:`generic_intersect`.
Args:
edge_infos (Optional[list]): List of "edge info" lists. Each list
represents a curved polygon and contains 3-tuples of edge index,
start and end (see the output of :func:`ends_to_curve`).
Raises:
ValueError: If two consecutive edge segments lie on the same edge
index.
ValueError: If the start and end parameter are "invalid" (they should
be between 0 and 1 and start should be strictly less than end).
"""
if edge_infos is None:
return
for edge_info in edge_infos:
num_segments = len(edge_info)
for index in six.moves.xrange(-1, num_segments - 1):
index1, start1, end1 = edge_info[index]
# First, verify the start and end parameters for the current
# segment.
if not 0.0 <= start1 < end1 <= 1.0:
raise ValueError(BAD_SEGMENT_PARAMS, edge_info[index])
# Then, verify that the indices are not the same.
index2, _, _ = edge_info[index + 1]
if index1 == index2:
raise ValueError(
SEGMENTS_SAME_EDGE, edge_info[index], edge_info[index + 1]
) | ['def', 'verify_edge_segments', '(', 'edge_infos', ')', ':', 'if', 'edge_infos', 'is', 'None', ':', 'return', 'for', 'edge_info', 'in', 'edge_infos', ':', 'num_segments', '=', 'len', '(', 'edge_info', ')', 'for', 'index', 'in', 'six', '.', 'moves', '.', 'xrange', '(', '-', '1', ',', 'num_segments', '-', '1', ')', ':', 'index1', ',', 'start1', ',', 'end1', '=', 'edge_info', '[', 'index', ']', '# First, verify the start and end parameters for the current', '# segment.', 'if', 'not', '0.0', '<=', 'start1', '<', 'end1', '<=', '1.0', ':', 'raise', 'ValueError', '(', 'BAD_SEGMENT_PARAMS', ',', 'edge_info', '[', 'index', ']', ')', '# Then, verify that the indices are not the same.', 'index2', ',', '_', ',', '_', '=', 'edge_info', '[', 'index', '+', '1', ']', 'if', 'index1', '==', 'index2', ':', 'raise', 'ValueError', '(', 'SEGMENTS_SAME_EDGE', ',', 'edge_info', '[', 'index', ']', ',', 'edge_info', '[', 'index', '+', '1', ']', ')'] | Verify that the edge segments in an intersection are valid.
.. note::
This is a helper used only by :func:`generic_intersect`.
Args:
edge_infos (Optional[list]): List of "edge info" lists. Each list
represents a curved polygon and contains 3-tuples of edge index,
start and end (see the output of :func:`ends_to_curve`).
Raises:
ValueError: If two consecutive edge segments lie on the same edge
index.
ValueError: If the start and end parameter are "invalid" (they should
be between 0 and 1 and start should be strictly less than end). | ['Verify', 'that', 'the', 'edge', 'segments', 'in', 'an', 'intersection', 'are', 'valid', '.'] | train | https://github.com/dhermes/bezier/blob/4f941f82637a8e70a5b159a9203132192e23406b/src/bezier/_surface_intersection.py#L456-L491 |
9,133 | google/grr | grr/server/grr_response_server/gui/api_plugins/report_plugins/server_report_plugins.py | _EntryToEvent | def _EntryToEvent(entry, handlers, transformers):
"""Converts an APIAuditEntry to a legacy AuditEvent."""
event = rdf_events.AuditEvent(
timestamp=entry.timestamp,
user=entry.username,
action=handlers[entry.router_method_name])
for fn in transformers:
fn(entry, event)
return event | python | def _EntryToEvent(entry, handlers, transformers):
"""Converts an APIAuditEntry to a legacy AuditEvent."""
event = rdf_events.AuditEvent(
timestamp=entry.timestamp,
user=entry.username,
action=handlers[entry.router_method_name])
for fn in transformers:
fn(entry, event)
return event | ['def', '_EntryToEvent', '(', 'entry', ',', 'handlers', ',', 'transformers', ')', ':', 'event', '=', 'rdf_events', '.', 'AuditEvent', '(', 'timestamp', '=', 'entry', '.', 'timestamp', ',', 'user', '=', 'entry', '.', 'username', ',', 'action', '=', 'handlers', '[', 'entry', '.', 'router_method_name', ']', ')', 'for', 'fn', 'in', 'transformers', ':', 'fn', '(', 'entry', ',', 'event', ')', 'return', 'event'] | Converts an APIAuditEntry to a legacy AuditEvent. | ['Converts', 'an', 'APIAuditEntry', 'to', 'a', 'legacy', 'AuditEvent', '.'] | train | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/gui/api_plugins/report_plugins/server_report_plugins.py#L54-L64 |
9,134 | SoCo/SoCo | soco/core.py | SoCo.remove_from_sonos_playlist | def remove_from_sonos_playlist(self, sonos_playlist, track, update_id=0):
"""Remove a track from a Sonos Playlist.
This is a convenience method for :py:meth:`reorder_sonos_playlist`.
Example::
device.remove_from_sonos_playlist(sonos_playlist, track=0)
Args:
sonos_playlist
(:py:class:`~.soco.data_structures.DidlPlaylistContainer`):
Sonos playlist object or the item_id (str) of the Sonos
playlist.
track (int): *0**-based position of the track to move. The first
track is track 0, just like indexing into a Python list.
update_id (int): Optional update counter for the object. If left
at the default of 0, it will be looked up.
Returns:
dict: See :py:meth:`reorder_sonos_playlist`
Raises:
SoCoUPnPException: See :py:meth:`reorder_sonos_playlist`
"""
return self.reorder_sonos_playlist(sonos_playlist, int(track), None,
update_id) | python | def remove_from_sonos_playlist(self, sonos_playlist, track, update_id=0):
"""Remove a track from a Sonos Playlist.
This is a convenience method for :py:meth:`reorder_sonos_playlist`.
Example::
device.remove_from_sonos_playlist(sonos_playlist, track=0)
Args:
sonos_playlist
(:py:class:`~.soco.data_structures.DidlPlaylistContainer`):
Sonos playlist object or the item_id (str) of the Sonos
playlist.
track (int): *0**-based position of the track to move. The first
track is track 0, just like indexing into a Python list.
update_id (int): Optional update counter for the object. If left
at the default of 0, it will be looked up.
Returns:
dict: See :py:meth:`reorder_sonos_playlist`
Raises:
SoCoUPnPException: See :py:meth:`reorder_sonos_playlist`
"""
return self.reorder_sonos_playlist(sonos_playlist, int(track), None,
update_id) | ['def', 'remove_from_sonos_playlist', '(', 'self', ',', 'sonos_playlist', ',', 'track', ',', 'update_id', '=', '0', ')', ':', 'return', 'self', '.', 'reorder_sonos_playlist', '(', 'sonos_playlist', ',', 'int', '(', 'track', ')', ',', 'None', ',', 'update_id', ')'] | Remove a track from a Sonos Playlist.
This is a convenience method for :py:meth:`reorder_sonos_playlist`.
Example::
device.remove_from_sonos_playlist(sonos_playlist, track=0)
Args:
sonos_playlist
(:py:class:`~.soco.data_structures.DidlPlaylistContainer`):
Sonos playlist object or the item_id (str) of the Sonos
playlist.
track (int): *0**-based position of the track to move. The first
track is track 0, just like indexing into a Python list.
update_id (int): Optional update counter for the object. If left
at the default of 0, it will be looked up.
Returns:
dict: See :py:meth:`reorder_sonos_playlist`
Raises:
SoCoUPnPException: See :py:meth:`reorder_sonos_playlist` | ['Remove', 'a', 'track', 'from', 'a', 'Sonos', 'Playlist', '.', 'This', 'is', 'a', 'convenience', 'method', 'for', ':', 'py', ':', 'meth', ':', 'reorder_sonos_playlist', '.'] | train | https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/soco/core.py#L1922-L1947 |
9,135 | wandb/client | wandb/vendor/prompt_toolkit/terminal/win32_output.py | ColorLookupTable.lookup_fg_color | def lookup_fg_color(self, fg_color):
"""
Return the color for use in the
`windll.kernel32.SetConsoleTextAttribute` API call.
:param fg_color: Foreground as text. E.g. 'ffffff' or 'red'
"""
# Foreground.
if fg_color in FG_ANSI_COLORS:
return FG_ANSI_COLORS[fg_color]
else:
return self._color_indexes(fg_color)[0] | python | def lookup_fg_color(self, fg_color):
"""
Return the color for use in the
`windll.kernel32.SetConsoleTextAttribute` API call.
:param fg_color: Foreground as text. E.g. 'ffffff' or 'red'
"""
# Foreground.
if fg_color in FG_ANSI_COLORS:
return FG_ANSI_COLORS[fg_color]
else:
return self._color_indexes(fg_color)[0] | ['def', 'lookup_fg_color', '(', 'self', ',', 'fg_color', ')', ':', '# Foreground.', 'if', 'fg_color', 'in', 'FG_ANSI_COLORS', ':', 'return', 'FG_ANSI_COLORS', '[', 'fg_color', ']', 'else', ':', 'return', 'self', '.', '_color_indexes', '(', 'fg_color', ')', '[', '0', ']'] | Return the color for use in the
`windll.kernel32.SetConsoleTextAttribute` API call.
:param fg_color: Foreground as text. E.g. 'ffffff' or 'red' | ['Return', 'the', 'color', 'for', 'use', 'in', 'the', 'windll', '.', 'kernel32', '.', 'SetConsoleTextAttribute', 'API', 'call', '.'] | train | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/terminal/win32_output.py#L532-L543 |
9,136 | sentinel-hub/sentinelhub-py | sentinelhub/fis.py | FisService.get_filename | def get_filename(request, geometry):
""" Returns filename
Returns the filename's location on disk where data is or is going to be stored.
The files are stored in the folder specified by the user when initialising OGC-type
of request. The name of the file has the following structure:
{service_type}_{layer}_{geometry}_{crs}_{start_time}_{end_time}_{resolution}_{bins}_{histogram_type}_
*{custom_url_params}.json
:param request: FIS request
:type request: FisRequest
:param geometry: geometry object
:type: BBox or Geometry
:return: filename for this request
:rtype: str
"""
date_interval = parse_time_interval(request.time)
geometry_string = geometry.wkt if isinstance(geometry, Geometry) else str(geometry)
filename = '_'.join([
str(request.service_type.value),
request.layer,
geometry_string,
CRS.ogc_string(geometry.crs),
'{}_{}'.format(date_interval[0], date_interval[1]),
request.resolution,
str(request.bins) if request.bins else '',
request.histogram_type.value if request.histogram_type else ''
])
filename = OgcImageService.filename_add_custom_url_params(filename, request)
return OgcImageService.finalize_filename(filename, MimeType.JSON) | python | def get_filename(request, geometry):
""" Returns filename
Returns the filename's location on disk where data is or is going to be stored.
The files are stored in the folder specified by the user when initialising OGC-type
of request. The name of the file has the following structure:
{service_type}_{layer}_{geometry}_{crs}_{start_time}_{end_time}_{resolution}_{bins}_{histogram_type}_
*{custom_url_params}.json
:param request: FIS request
:type request: FisRequest
:param geometry: geometry object
:type: BBox or Geometry
:return: filename for this request
:rtype: str
"""
date_interval = parse_time_interval(request.time)
geometry_string = geometry.wkt if isinstance(geometry, Geometry) else str(geometry)
filename = '_'.join([
str(request.service_type.value),
request.layer,
geometry_string,
CRS.ogc_string(geometry.crs),
'{}_{}'.format(date_interval[0], date_interval[1]),
request.resolution,
str(request.bins) if request.bins else '',
request.histogram_type.value if request.histogram_type else ''
])
filename = OgcImageService.filename_add_custom_url_params(filename, request)
return OgcImageService.finalize_filename(filename, MimeType.JSON) | ['def', 'get_filename', '(', 'request', ',', 'geometry', ')', ':', 'date_interval', '=', 'parse_time_interval', '(', 'request', '.', 'time', ')', 'geometry_string', '=', 'geometry', '.', 'wkt', 'if', 'isinstance', '(', 'geometry', ',', 'Geometry', ')', 'else', 'str', '(', 'geometry', ')', 'filename', '=', "'_'", '.', 'join', '(', '[', 'str', '(', 'request', '.', 'service_type', '.', 'value', ')', ',', 'request', '.', 'layer', ',', 'geometry_string', ',', 'CRS', '.', 'ogc_string', '(', 'geometry', '.', 'crs', ')', ',', "'{}_{}'", '.', 'format', '(', 'date_interval', '[', '0', ']', ',', 'date_interval', '[', '1', ']', ')', ',', 'request', '.', 'resolution', ',', 'str', '(', 'request', '.', 'bins', ')', 'if', 'request', '.', 'bins', 'else', "''", ',', 'request', '.', 'histogram_type', '.', 'value', 'if', 'request', '.', 'histogram_type', 'else', "''", ']', ')', 'filename', '=', 'OgcImageService', '.', 'filename_add_custom_url_params', '(', 'filename', ',', 'request', ')', 'return', 'OgcImageService', '.', 'finalize_filename', '(', 'filename', ',', 'MimeType', '.', 'JSON', ')'] | Returns filename
Returns the filename's location on disk where data is or is going to be stored.
The files are stored in the folder specified by the user when initialising OGC-type
of request. The name of the file has the following structure:
{service_type}_{layer}_{geometry}_{crs}_{start_time}_{end_time}_{resolution}_{bins}_{histogram_type}_
*{custom_url_params}.json
:param request: FIS request
:type request: FisRequest
:param geometry: geometry object
:type: BBox or Geometry
:return: filename for this request
:rtype: str | ['Returns', 'filename'] | train | https://github.com/sentinel-hub/sentinelhub-py/blob/08a83b7f1e289187159a643336995d8369860fea/sentinelhub/fis.py#L41-L74 |
9,137 | learningequality/ricecooker | ricecooker/managers/progress.py | RestoreManager.load_progress | def load_progress(self, resume_step):
""" load_progress: loads progress from restoration file
Args: resume_step (str): step at which to resume session
Returns: manager with progress from step
"""
resume_step = Status[resume_step]
progress_path = self.get_restore_path(resume_step)
# If progress is corrupted, revert to step before
while not self.check_for_session(resume_step):
config.LOGGER.error("Ricecooker has not reached {0} status. Reverting to earlier step...".format(resume_step.name))
# All files are corrupted or absent, restart process
if resume_step.value - 1 < 0:
self.init_session()
return self
resume_step = Status(resume_step.value - 1)
progress_path = self.get_restore_path(resume_step)
config.LOGGER.error("Starting from status {0}".format(resume_step.name))
# Load manager
with open(progress_path, 'rb') as handle:
manager = pickle.load(handle)
if isinstance(manager, RestoreManager):
return manager
else:
return self | python | def load_progress(self, resume_step):
""" load_progress: loads progress from restoration file
Args: resume_step (str): step at which to resume session
Returns: manager with progress from step
"""
resume_step = Status[resume_step]
progress_path = self.get_restore_path(resume_step)
# If progress is corrupted, revert to step before
while not self.check_for_session(resume_step):
config.LOGGER.error("Ricecooker has not reached {0} status. Reverting to earlier step...".format(resume_step.name))
# All files are corrupted or absent, restart process
if resume_step.value - 1 < 0:
self.init_session()
return self
resume_step = Status(resume_step.value - 1)
progress_path = self.get_restore_path(resume_step)
config.LOGGER.error("Starting from status {0}".format(resume_step.name))
# Load manager
with open(progress_path, 'rb') as handle:
manager = pickle.load(handle)
if isinstance(manager, RestoreManager):
return manager
else:
return self | ['def', 'load_progress', '(', 'self', ',', 'resume_step', ')', ':', 'resume_step', '=', 'Status', '[', 'resume_step', ']', 'progress_path', '=', 'self', '.', 'get_restore_path', '(', 'resume_step', ')', '# If progress is corrupted, revert to step before', 'while', 'not', 'self', '.', 'check_for_session', '(', 'resume_step', ')', ':', 'config', '.', 'LOGGER', '.', 'error', '(', '"Ricecooker has not reached {0} status. Reverting to earlier step..."', '.', 'format', '(', 'resume_step', '.', 'name', ')', ')', '# All files are corrupted or absent, restart process', 'if', 'resume_step', '.', 'value', '-', '1', '<', '0', ':', 'self', '.', 'init_session', '(', ')', 'return', 'self', 'resume_step', '=', 'Status', '(', 'resume_step', '.', 'value', '-', '1', ')', 'progress_path', '=', 'self', '.', 'get_restore_path', '(', 'resume_step', ')', 'config', '.', 'LOGGER', '.', 'error', '(', '"Starting from status {0}"', '.', 'format', '(', 'resume_step', '.', 'name', ')', ')', '# Load manager', 'with', 'open', '(', 'progress_path', ',', "'rb'", ')', 'as', 'handle', ':', 'manager', '=', 'pickle', '.', 'load', '(', 'handle', ')', 'if', 'isinstance', '(', 'manager', ',', 'RestoreManager', ')', ':', 'return', 'manager', 'else', ':', 'return', 'self'] | load_progress: loads progress from restoration file
Args: resume_step (str): step at which to resume session
Returns: manager with progress from step | ['load_progress', ':', 'loads', 'progress', 'from', 'restoration', 'file', 'Args', ':', 'resume_step', '(', 'str', ')', ':', 'step', 'at', 'which', 'to', 'resume', 'session', 'Returns', ':', 'manager', 'with', 'progress', 'from', 'step'] | train | https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/managers/progress.py#L100-L125 |
9,138 | DataONEorg/d1_python | lib_client/src/d1_client/solr_client.py | SolrClient._get_solr_type | def _get_solr_type(self, field):
"""Returns the Solr type of the specified field name.
Assumes the convention of dynamic fields using an underscore + type character
code for the field name.
"""
field_type = 'string'
try:
field_type = FIELD_TYPE_CONVERSION_MAP[field]
return field_type
except:
pass
fta = field.split('_')
if len(fta) > 1:
ft = fta[len(fta) - 1]
try:
field_type = FIELD_TYPE_CONVERSION_MAP[ft]
# cache the type so it's used next time
FIELD_TYPE_CONVERSION_MAP[field] = field_type
except:
pass
return field_type | python | def _get_solr_type(self, field):
"""Returns the Solr type of the specified field name.
Assumes the convention of dynamic fields using an underscore + type character
code for the field name.
"""
field_type = 'string'
try:
field_type = FIELD_TYPE_CONVERSION_MAP[field]
return field_type
except:
pass
fta = field.split('_')
if len(fta) > 1:
ft = fta[len(fta) - 1]
try:
field_type = FIELD_TYPE_CONVERSION_MAP[ft]
# cache the type so it's used next time
FIELD_TYPE_CONVERSION_MAP[field] = field_type
except:
pass
return field_type | ['def', '_get_solr_type', '(', 'self', ',', 'field', ')', ':', 'field_type', '=', "'string'", 'try', ':', 'field_type', '=', 'FIELD_TYPE_CONVERSION_MAP', '[', 'field', ']', 'return', 'field_type', 'except', ':', 'pass', 'fta', '=', 'field', '.', 'split', '(', "'_'", ')', 'if', 'len', '(', 'fta', ')', '>', '1', ':', 'ft', '=', 'fta', '[', 'len', '(', 'fta', ')', '-', '1', ']', 'try', ':', 'field_type', '=', 'FIELD_TYPE_CONVERSION_MAP', '[', 'ft', ']', "# cache the type so it's used next time", 'FIELD_TYPE_CONVERSION_MAP', '[', 'field', ']', '=', 'field_type', 'except', ':', 'pass', 'return', 'field_type'] | Returns the Solr type of the specified field name.
Assumes the convention of dynamic fields using an underscore + type character
code for the field name. | ['Returns', 'the', 'Solr', 'type', 'of', 'the', 'specified', 'field', 'name', '.'] | train | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_client/src/d1_client/solr_client.py#L453-L475 |
9,139 | taizilongxu/douban.fm | doubanfm/API/netease_api.py | Netease.search | def search(self, song_title, limit=1):
"""
根据歌曲名搜索歌曲
: params : song_title: 歌曲名
limit: 搜索数量
"""
url = "http://music.163.com/api/search/pc"
headers = {'Cookie': 'appver=1.5.2',
'Referer': 'http://music.163.com'}
payload = {'s': song_title,
'limit': limit,
'type': 1}
r = requests.post(url, params=payload, headers=headers)
data = json.loads(r.text)
if data['code'] == 200:
return data['result']['songs'][0]
else:
return None | python | def search(self, song_title, limit=1):
"""
根据歌曲名搜索歌曲
: params : song_title: 歌曲名
limit: 搜索数量
"""
url = "http://music.163.com/api/search/pc"
headers = {'Cookie': 'appver=1.5.2',
'Referer': 'http://music.163.com'}
payload = {'s': song_title,
'limit': limit,
'type': 1}
r = requests.post(url, params=payload, headers=headers)
data = json.loads(r.text)
if data['code'] == 200:
return data['result']['songs'][0]
else:
return None | ['def', 'search', '(', 'self', ',', 'song_title', ',', 'limit', '=', '1', ')', ':', 'url', '=', '"http://music.163.com/api/search/pc"', 'headers', '=', '{', "'Cookie'", ':', "'appver=1.5.2'", ',', "'Referer'", ':', "'http://music.163.com'", '}', 'payload', '=', '{', "'s'", ':', 'song_title', ',', "'limit'", ':', 'limit', ',', "'type'", ':', '1', '}', 'r', '=', 'requests', '.', 'post', '(', 'url', ',', 'params', '=', 'payload', ',', 'headers', '=', 'headers', ')', 'data', '=', 'json', '.', 'loads', '(', 'r', '.', 'text', ')', 'if', 'data', '[', "'code'", ']', '==', '200', ':', 'return', 'data', '[', "'result'", ']', '[', "'songs'", ']', '[', '0', ']', 'else', ':', 'return', 'None'] | 根据歌曲名搜索歌曲
: params : song_title: 歌曲名
limit: 搜索数量 | ['根据歌曲名搜索歌曲'] | train | https://github.com/taizilongxu/douban.fm/blob/d65126d3bd3e12d8a7109137caff8da0efc22b2f/doubanfm/API/netease_api.py#L24-L44 |
9,140 | qacafe/cdrouter.py | cdrouter/filters.py | Field.eq | def eq(self, value):
"""Construct an equal to (``=``) filter.
:param value: Filter value
:return: :class:`filters.Field <filters.Field>` object
:rtype: filters.Field
"""
self.op = '='
self.negate_op = '!='
self.value = self._value(value)
return self | python | def eq(self, value):
"""Construct an equal to (``=``) filter.
:param value: Filter value
:return: :class:`filters.Field <filters.Field>` object
:rtype: filters.Field
"""
self.op = '='
self.negate_op = '!='
self.value = self._value(value)
return self | ['def', 'eq', '(', 'self', ',', 'value', ')', ':', 'self', '.', 'op', '=', "'='", 'self', '.', 'negate_op', '=', "'!='", 'self', '.', 'value', '=', 'self', '.', '_value', '(', 'value', ')', 'return', 'self'] | Construct an equal to (``=``) filter.
:param value: Filter value
:return: :class:`filters.Field <filters.Field>` object
:rtype: filters.Field | ['Construct', 'an', 'equal', 'to', '(', '=', ')', 'filter', '.'] | train | https://github.com/qacafe/cdrouter.py/blob/aacf2c6ab0b987250f7b1892f4bba14bb2b7dbe5/cdrouter/filters.py#L101-L111 |
9,141 | openstack/proliantutils | proliantutils/redfish/redfish.py | RedfishOperations.set_vm_status | def set_vm_status(self, device='FLOPPY',
boot_option='BOOT_ONCE', write_protect='YES'):
"""Sets the Virtual Media drive status
It sets the boot option for virtual media device.
Note: boot option can be set only for CD device.
:param device: virual media device
:param boot_option: boot option to set on the virtual media device
:param write_protect: set the write protect flag on the vmedia device
Note: It's ignored. In Redfish it is read-only.
:raises: IloError, on an error from iLO.
:raises: IloInvalidInputError, if the device is not valid.
"""
# CONNECT is a RIBCL call. There is no such property to set in Redfish.
if boot_option == 'CONNECT':
return
self._validate_virtual_media(device)
if boot_option not in BOOT_OPTION_MAP:
msg = (self._("Virtual media boot option '%s' is invalid.")
% boot_option)
LOG.debug(msg)
raise exception.IloInvalidInputError(msg)
manager = self._get_sushy_manager(PROLIANT_MANAGER_ID)
try:
vmedia_device = (
manager.virtual_media.get_member_device(
VIRTUAL_MEDIA_MAP[device]))
vmedia_device.set_vm_status(BOOT_OPTION_MAP[boot_option])
except sushy.exceptions.SushyError as e:
msg = (self._("The Redfish controller failed to set the virtual "
"media status for '%(device)s'. Error %(error)s") %
{'device': device, 'error': str(e)})
LOG.debug(msg)
raise exception.IloError(msg) | python | def set_vm_status(self, device='FLOPPY',
boot_option='BOOT_ONCE', write_protect='YES'):
"""Sets the Virtual Media drive status
It sets the boot option for virtual media device.
Note: boot option can be set only for CD device.
:param device: virual media device
:param boot_option: boot option to set on the virtual media device
:param write_protect: set the write protect flag on the vmedia device
Note: It's ignored. In Redfish it is read-only.
:raises: IloError, on an error from iLO.
:raises: IloInvalidInputError, if the device is not valid.
"""
# CONNECT is a RIBCL call. There is no such property to set in Redfish.
if boot_option == 'CONNECT':
return
self._validate_virtual_media(device)
if boot_option not in BOOT_OPTION_MAP:
msg = (self._("Virtual media boot option '%s' is invalid.")
% boot_option)
LOG.debug(msg)
raise exception.IloInvalidInputError(msg)
manager = self._get_sushy_manager(PROLIANT_MANAGER_ID)
try:
vmedia_device = (
manager.virtual_media.get_member_device(
VIRTUAL_MEDIA_MAP[device]))
vmedia_device.set_vm_status(BOOT_OPTION_MAP[boot_option])
except sushy.exceptions.SushyError as e:
msg = (self._("The Redfish controller failed to set the virtual "
"media status for '%(device)s'. Error %(error)s") %
{'device': device, 'error': str(e)})
LOG.debug(msg)
raise exception.IloError(msg) | ['def', 'set_vm_status', '(', 'self', ',', 'device', '=', "'FLOPPY'", ',', 'boot_option', '=', "'BOOT_ONCE'", ',', 'write_protect', '=', "'YES'", ')', ':', '# CONNECT is a RIBCL call. There is no such property to set in Redfish.', 'if', 'boot_option', '==', "'CONNECT'", ':', 'return', 'self', '.', '_validate_virtual_media', '(', 'device', ')', 'if', 'boot_option', 'not', 'in', 'BOOT_OPTION_MAP', ':', 'msg', '=', '(', 'self', '.', '_', '(', '"Virtual media boot option \'%s\' is invalid."', ')', '%', 'boot_option', ')', 'LOG', '.', 'debug', '(', 'msg', ')', 'raise', 'exception', '.', 'IloInvalidInputError', '(', 'msg', ')', 'manager', '=', 'self', '.', '_get_sushy_manager', '(', 'PROLIANT_MANAGER_ID', ')', 'try', ':', 'vmedia_device', '=', '(', 'manager', '.', 'virtual_media', '.', 'get_member_device', '(', 'VIRTUAL_MEDIA_MAP', '[', 'device', ']', ')', ')', 'vmedia_device', '.', 'set_vm_status', '(', 'BOOT_OPTION_MAP', '[', 'boot_option', ']', ')', 'except', 'sushy', '.', 'exceptions', '.', 'SushyError', 'as', 'e', ':', 'msg', '=', '(', 'self', '.', '_', '(', '"The Redfish controller failed to set the virtual "', '"media status for \'%(device)s\'. Error %(error)s"', ')', '%', '{', "'device'", ':', 'device', ',', "'error'", ':', 'str', '(', 'e', ')', '}', ')', 'LOG', '.', 'debug', '(', 'msg', ')', 'raise', 'exception', '.', 'IloError', '(', 'msg', ')'] | Sets the Virtual Media drive status
It sets the boot option for virtual media device.
Note: boot option can be set only for CD device.
:param device: virual media device
:param boot_option: boot option to set on the virtual media device
:param write_protect: set the write protect flag on the vmedia device
Note: It's ignored. In Redfish it is read-only.
:raises: IloError, on an error from iLO.
:raises: IloInvalidInputError, if the device is not valid. | ['Sets', 'the', 'Virtual', 'Media', 'drive', 'status'] | train | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/redfish/redfish.py#L434-L471 |
9,142 | spyder-ide/spyder | spyder/config/gui.py | get_color_scheme | def get_color_scheme(name):
"""Get syntax color scheme"""
color_scheme = {}
for key in sh.COLOR_SCHEME_KEYS:
color_scheme[key] = CONF.get("appearance", "%s/%s" % (name, key))
return color_scheme | python | def get_color_scheme(name):
"""Get syntax color scheme"""
color_scheme = {}
for key in sh.COLOR_SCHEME_KEYS:
color_scheme[key] = CONF.get("appearance", "%s/%s" % (name, key))
return color_scheme | ['def', 'get_color_scheme', '(', 'name', ')', ':', 'color_scheme', '=', '{', '}', 'for', 'key', 'in', 'sh', '.', 'COLOR_SCHEME_KEYS', ':', 'color_scheme', '[', 'key', ']', '=', 'CONF', '.', 'get', '(', '"appearance"', ',', '"%s/%s"', '%', '(', 'name', ',', 'key', ')', ')', 'return', 'color_scheme'] | Get syntax color scheme | ['Get', 'syntax', 'color', 'scheme'] | train | https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/config/gui.py#L142-L147 |
9,143 | inveniosoftware-contrib/invenio-classifier | invenio_classifier/keyworder.py | get_composite_keywords | def get_composite_keywords(ckw_db, fulltext, skw_spans):
"""Return a list of composite keywords bound with number of occurrences.
:param ckw_db: list of KewordToken objects
(they are supposed to be composite ones)
:param fulltext: string to search in
:param skw_spans: dictionary of already identified single keywords
:return : dictionary of matches in a format {
<keyword object>, [[position, position...], [info_about_matches] ],
..
}
"""
timer_start = time.clock()
# Build the list of composite candidates
ckw_out = {}
skw_as_components = []
for composite_keyword in ckw_db.values():
# Counters for the composite keyword. First count is for the
# number of occurrences in the whole document and second count
# is for the human defined keywords.
ckw_count = 0
matched_spans = []
# First search in the fulltext using the regex pattern of the whole
# composite keyword (including the alternative labels)
for regex in composite_keyword.regex:
for match in regex.finditer(fulltext):
span = list(match.span())
span[1] -= 1
span = tuple(span)
if span not in matched_spans:
ckw_count += 1
matched_spans.append(span)
# Get the single keywords locations.
try:
components = composite_keyword.compositeof
except AttributeError:
current_app.logger.error(
"Cached ontology is corrupted. Please "
"remove the cached ontology in your temporary file."
)
raise OntologyError('Cached ontology is corrupted.')
spans = []
try:
spans = [skw_spans[component][0] for component in components]
except KeyError:
# Some of the keyword components are not to be found in the text.
# Therefore we cannot continue because the match is incomplete.
pass
ckw_spans = []
for index in range(len(spans) - 1):
len_ckw = len(ckw_spans)
if ckw_spans: # cause ckw_spans include the previous
previous_spans = ckw_spans
else:
previous_spans = spans[index]
for new_span in [(span0, colmd1) for span0 in previous_spans
for colmd1 in spans[index + 1]]:
span = _get_ckw_span(fulltext, new_span)
if span is not None:
ckw_spans.append(span)
# the spans must be overlapping to be included
if index > 0 and ckw_spans:
_ckw_spans = []
for _span in ckw_spans[len_ckw:]: # new spans
for _colmd2 in ckw_spans[:len_ckw]:
s = _span_overlapping(_span, _colmd2)
if s:
_ckw_spans.append(s)
ckw_spans = _ckw_spans
for matched_span in [mspan for mspan in ckw_spans
if mspan not in matched_spans]:
ckw_count += 1
matched_spans.append(matched_span)
if ckw_count:
# Gather the component counts.
component_counts = []
for component in components:
skw_as_components.append(component)
# Get the single keyword count.
try:
component_counts.append(len(skw_spans[component][0]))
except KeyError:
component_counts.append(0)
# Store the composite keyword
ckw_out[composite_keyword] = [matched_spans, component_counts]
# Remove the single keywords that appear as components from the list
# of single keywords.
for skw in skw_as_components:
try:
del skw_spans[skw]
except KeyError:
pass
# Remove the composite keywords that are fully present in
# longer composite keywords
_ckw_base = filter(lambda x: len(x.compositeof) == 2, ckw_out.keys())
_ckw_extended = sorted(
filter(lambda x: len(x.compositeof) > 2, ckw_out.keys()),
key=lambda x: len(x.compositeof))
if _ckw_extended:
candidates = []
for kw1 in _ckw_base:
s1 = set(kw1.compositeof)
for kw2 in _ckw_extended:
s2 = set(kw2.compositeof)
if s1.issubset(s2):
candidates.append((kw1, kw2))
# break # don't stop because this keyword may be
# partly contained by kw_x and kw_y
for i in range(len(_ckw_extended)):
kw1 = _ckw_extended[i]
s1 = set(kw1.compositeof)
for ii in range(i + 1, len(_ckw_extended)):
kw2 = _ckw_extended[ii]
s2 = set(kw2.compositeof)
if s1.issubset(s2):
candidates.append((kw1, kw2))
break
if candidates:
for kw1, kw2 in candidates:
try:
match1 = ckw_out[kw1] # subset of the kw2
match2 = ckw_out[kw2]
except KeyError:
continue
positions1 = match1[0]
for pos1 in positions1:
for pos2 in match2[0]:
if _span_overlapping(pos1, pos2):
del positions1[positions1.index(pos1)]
# if we removed all the matches also
# delete the keyword
if len(positions1) == 0:
del ckw_out[kw1]
break
current_app.logger.info(
"Matching composite keywords... %d keywords found "
"in %.1f sec." % (len(ckw_out), time.clock() - timer_start),
)
return ckw_out | python | def get_composite_keywords(ckw_db, fulltext, skw_spans):
"""Return a list of composite keywords bound with number of occurrences.
:param ckw_db: list of KewordToken objects
(they are supposed to be composite ones)
:param fulltext: string to search in
:param skw_spans: dictionary of already identified single keywords
:return : dictionary of matches in a format {
<keyword object>, [[position, position...], [info_about_matches] ],
..
}
"""
timer_start = time.clock()
# Build the list of composite candidates
ckw_out = {}
skw_as_components = []
for composite_keyword in ckw_db.values():
# Counters for the composite keyword. First count is for the
# number of occurrences in the whole document and second count
# is for the human defined keywords.
ckw_count = 0
matched_spans = []
# First search in the fulltext using the regex pattern of the whole
# composite keyword (including the alternative labels)
for regex in composite_keyword.regex:
for match in regex.finditer(fulltext):
span = list(match.span())
span[1] -= 1
span = tuple(span)
if span not in matched_spans:
ckw_count += 1
matched_spans.append(span)
# Get the single keywords locations.
try:
components = composite_keyword.compositeof
except AttributeError:
current_app.logger.error(
"Cached ontology is corrupted. Please "
"remove the cached ontology in your temporary file."
)
raise OntologyError('Cached ontology is corrupted.')
spans = []
try:
spans = [skw_spans[component][0] for component in components]
except KeyError:
# Some of the keyword components are not to be found in the text.
# Therefore we cannot continue because the match is incomplete.
pass
ckw_spans = []
for index in range(len(spans) - 1):
len_ckw = len(ckw_spans)
if ckw_spans: # cause ckw_spans include the previous
previous_spans = ckw_spans
else:
previous_spans = spans[index]
for new_span in [(span0, colmd1) for span0 in previous_spans
for colmd1 in spans[index + 1]]:
span = _get_ckw_span(fulltext, new_span)
if span is not None:
ckw_spans.append(span)
# the spans must be overlapping to be included
if index > 0 and ckw_spans:
_ckw_spans = []
for _span in ckw_spans[len_ckw:]: # new spans
for _colmd2 in ckw_spans[:len_ckw]:
s = _span_overlapping(_span, _colmd2)
if s:
_ckw_spans.append(s)
ckw_spans = _ckw_spans
for matched_span in [mspan for mspan in ckw_spans
if mspan not in matched_spans]:
ckw_count += 1
matched_spans.append(matched_span)
if ckw_count:
# Gather the component counts.
component_counts = []
for component in components:
skw_as_components.append(component)
# Get the single keyword count.
try:
component_counts.append(len(skw_spans[component][0]))
except KeyError:
component_counts.append(0)
# Store the composite keyword
ckw_out[composite_keyword] = [matched_spans, component_counts]
# Remove the single keywords that appear as components from the list
# of single keywords.
for skw in skw_as_components:
try:
del skw_spans[skw]
except KeyError:
pass
# Remove the composite keywords that are fully present in
# longer composite keywords
_ckw_base = filter(lambda x: len(x.compositeof) == 2, ckw_out.keys())
_ckw_extended = sorted(
filter(lambda x: len(x.compositeof) > 2, ckw_out.keys()),
key=lambda x: len(x.compositeof))
if _ckw_extended:
candidates = []
for kw1 in _ckw_base:
s1 = set(kw1.compositeof)
for kw2 in _ckw_extended:
s2 = set(kw2.compositeof)
if s1.issubset(s2):
candidates.append((kw1, kw2))
# break # don't stop because this keyword may be
# partly contained by kw_x and kw_y
for i in range(len(_ckw_extended)):
kw1 = _ckw_extended[i]
s1 = set(kw1.compositeof)
for ii in range(i + 1, len(_ckw_extended)):
kw2 = _ckw_extended[ii]
s2 = set(kw2.compositeof)
if s1.issubset(s2):
candidates.append((kw1, kw2))
break
if candidates:
for kw1, kw2 in candidates:
try:
match1 = ckw_out[kw1] # subset of the kw2
match2 = ckw_out[kw2]
except KeyError:
continue
positions1 = match1[0]
for pos1 in positions1:
for pos2 in match2[0]:
if _span_overlapping(pos1, pos2):
del positions1[positions1.index(pos1)]
# if we removed all the matches also
# delete the keyword
if len(positions1) == 0:
del ckw_out[kw1]
break
current_app.logger.info(
"Matching composite keywords... %d keywords found "
"in %.1f sec." % (len(ckw_out), time.clock() - timer_start),
)
return ckw_out | ['def', 'get_composite_keywords', '(', 'ckw_db', ',', 'fulltext', ',', 'skw_spans', ')', ':', 'timer_start', '=', 'time', '.', 'clock', '(', ')', '# Build the list of composite candidates', 'ckw_out', '=', '{', '}', 'skw_as_components', '=', '[', ']', 'for', 'composite_keyword', 'in', 'ckw_db', '.', 'values', '(', ')', ':', '# Counters for the composite keyword. First count is for the', '# number of occurrences in the whole document and second count', '# is for the human defined keywords.', 'ckw_count', '=', '0', 'matched_spans', '=', '[', ']', '# First search in the fulltext using the regex pattern of the whole', '# composite keyword (including the alternative labels)', 'for', 'regex', 'in', 'composite_keyword', '.', 'regex', ':', 'for', 'match', 'in', 'regex', '.', 'finditer', '(', 'fulltext', ')', ':', 'span', '=', 'list', '(', 'match', '.', 'span', '(', ')', ')', 'span', '[', '1', ']', '-=', '1', 'span', '=', 'tuple', '(', 'span', ')', 'if', 'span', 'not', 'in', 'matched_spans', ':', 'ckw_count', '+=', '1', 'matched_spans', '.', 'append', '(', 'span', ')', '# Get the single keywords locations.', 'try', ':', 'components', '=', 'composite_keyword', '.', 'compositeof', 'except', 'AttributeError', ':', 'current_app', '.', 'logger', '.', 'error', '(', '"Cached ontology is corrupted. Please "', '"remove the cached ontology in your temporary file."', ')', 'raise', 'OntologyError', '(', "'Cached ontology is corrupted.'", ')', 'spans', '=', '[', ']', 'try', ':', 'spans', '=', '[', 'skw_spans', '[', 'component', ']', '[', '0', ']', 'for', 'component', 'in', 'components', ']', 'except', 'KeyError', ':', '# Some of the keyword components are not to be found in the text.', '# Therefore we cannot continue because the match is incomplete.', 'pass', 'ckw_spans', '=', '[', ']', 'for', 'index', 'in', 'range', '(', 'len', '(', 'spans', ')', '-', '1', ')', ':', 'len_ckw', '=', 'len', '(', 'ckw_spans', ')', 'if', 'ckw_spans', ':', '# cause ckw_spans include the previous', 'previous_spans', '=', 'ckw_spans', 'else', ':', 'previous_spans', '=', 'spans', '[', 'index', ']', 'for', 'new_span', 'in', '[', '(', 'span0', ',', 'colmd1', ')', 'for', 'span0', 'in', 'previous_spans', 'for', 'colmd1', 'in', 'spans', '[', 'index', '+', '1', ']', ']', ':', 'span', '=', '_get_ckw_span', '(', 'fulltext', ',', 'new_span', ')', 'if', 'span', 'is', 'not', 'None', ':', 'ckw_spans', '.', 'append', '(', 'span', ')', '# the spans must be overlapping to be included', 'if', 'index', '>', '0', 'and', 'ckw_spans', ':', '_ckw_spans', '=', '[', ']', 'for', '_span', 'in', 'ckw_spans', '[', 'len_ckw', ':', ']', ':', '# new spans', 'for', '_colmd2', 'in', 'ckw_spans', '[', ':', 'len_ckw', ']', ':', 's', '=', '_span_overlapping', '(', '_span', ',', '_colmd2', ')', 'if', 's', ':', '_ckw_spans', '.', 'append', '(', 's', ')', 'ckw_spans', '=', '_ckw_spans', 'for', 'matched_span', 'in', '[', 'mspan', 'for', 'mspan', 'in', 'ckw_spans', 'if', 'mspan', 'not', 'in', 'matched_spans', ']', ':', 'ckw_count', '+=', '1', 'matched_spans', '.', 'append', '(', 'matched_span', ')', 'if', 'ckw_count', ':', '# Gather the component counts.', 'component_counts', '=', '[', ']', 'for', 'component', 'in', 'components', ':', 'skw_as_components', '.', 'append', '(', 'component', ')', '# Get the single keyword count.', 'try', ':', 'component_counts', '.', 'append', '(', 'len', '(', 'skw_spans', '[', 'component', ']', '[', '0', ']', ')', ')', 'except', 'KeyError', ':', 'component_counts', '.', 'append', '(', '0', ')', '# Store the composite keyword', 'ckw_out', '[', 'composite_keyword', ']', '=', '[', 'matched_spans', ',', 'component_counts', ']', '# Remove the single keywords that appear as components from the list', '# of single keywords.', 'for', 'skw', 'in', 'skw_as_components', ':', 'try', ':', 'del', 'skw_spans', '[', 'skw', ']', 'except', 'KeyError', ':', 'pass', '# Remove the composite keywords that are fully present in', '# longer composite keywords', '_ckw_base', '=', 'filter', '(', 'lambda', 'x', ':', 'len', '(', 'x', '.', 'compositeof', ')', '==', '2', ',', 'ckw_out', '.', 'keys', '(', ')', ')', '_ckw_extended', '=', 'sorted', '(', 'filter', '(', 'lambda', 'x', ':', 'len', '(', 'x', '.', 'compositeof', ')', '>', '2', ',', 'ckw_out', '.', 'keys', '(', ')', ')', ',', 'key', '=', 'lambda', 'x', ':', 'len', '(', 'x', '.', 'compositeof', ')', ')', 'if', '_ckw_extended', ':', 'candidates', '=', '[', ']', 'for', 'kw1', 'in', '_ckw_base', ':', 's1', '=', 'set', '(', 'kw1', '.', 'compositeof', ')', 'for', 'kw2', 'in', '_ckw_extended', ':', 's2', '=', 'set', '(', 'kw2', '.', 'compositeof', ')', 'if', 's1', '.', 'issubset', '(', 's2', ')', ':', 'candidates', '.', 'append', '(', '(', 'kw1', ',', 'kw2', ')', ')', "# break # don't stop because this keyword may be", '# partly contained by kw_x and kw_y', 'for', 'i', 'in', 'range', '(', 'len', '(', '_ckw_extended', ')', ')', ':', 'kw1', '=', '_ckw_extended', '[', 'i', ']', 's1', '=', 'set', '(', 'kw1', '.', 'compositeof', ')', 'for', 'ii', 'in', 'range', '(', 'i', '+', '1', ',', 'len', '(', '_ckw_extended', ')', ')', ':', 'kw2', '=', '_ckw_extended', '[', 'ii', ']', 's2', '=', 'set', '(', 'kw2', '.', 'compositeof', ')', 'if', 's1', '.', 'issubset', '(', 's2', ')', ':', 'candidates', '.', 'append', '(', '(', 'kw1', ',', 'kw2', ')', ')', 'break', 'if', 'candidates', ':', 'for', 'kw1', ',', 'kw2', 'in', 'candidates', ':', 'try', ':', 'match1', '=', 'ckw_out', '[', 'kw1', ']', '# subset of the kw2', 'match2', '=', 'ckw_out', '[', 'kw2', ']', 'except', 'KeyError', ':', 'continue', 'positions1', '=', 'match1', '[', '0', ']', 'for', 'pos1', 'in', 'positions1', ':', 'for', 'pos2', 'in', 'match2', '[', '0', ']', ':', 'if', '_span_overlapping', '(', 'pos1', ',', 'pos2', ')', ':', 'del', 'positions1', '[', 'positions1', '.', 'index', '(', 'pos1', ')', ']', '# if we removed all the matches also', '# delete the keyword', 'if', 'len', '(', 'positions1', ')', '==', '0', ':', 'del', 'ckw_out', '[', 'kw1', ']', 'break', 'current_app', '.', 'logger', '.', 'info', '(', '"Matching composite keywords... %d keywords found "', '"in %.1f sec."', '%', '(', 'len', '(', 'ckw_out', ')', ',', 'time', '.', 'clock', '(', ')', '-', 'timer_start', ')', ',', ')', 'return', 'ckw_out'] | Return a list of composite keywords bound with number of occurrences.
:param ckw_db: list of KewordToken objects
(they are supposed to be composite ones)
:param fulltext: string to search in
:param skw_spans: dictionary of already identified single keywords
:return : dictionary of matches in a format {
<keyword object>, [[position, position...], [info_about_matches] ],
..
} | ['Return', 'a', 'list', 'of', 'composite', 'keywords', 'bound', 'with', 'number', 'of', 'occurrences', '.'] | train | https://github.com/inveniosoftware-contrib/invenio-classifier/blob/3c758cf34dca6bf0548e7da5de34e5f72e3b255e/invenio_classifier/keyworder.py#L91-L244 |
9,144 | crate/crate-python | src/crate/client/sqlalchemy/compiler.py | CrateCompiler.visit_update | def visit_update(self, update_stmt, **kw):
"""
used to compile <sql.expression.Update> expressions
Parts are taken from the SQLCompiler base class.
"""
if not update_stmt.parameters and \
not hasattr(update_stmt, '_crate_specific'):
return super(CrateCompiler, self).visit_update(update_stmt, **kw)
self.isupdate = True
extra_froms = update_stmt._extra_froms
text = 'UPDATE '
if update_stmt._prefixes:
text += self._generate_prefixes(update_stmt,
update_stmt._prefixes, **kw)
table_text = self.update_tables_clause(update_stmt, update_stmt.table,
extra_froms, **kw)
dialect_hints = None
if update_stmt._hints:
dialect_hints, table_text = self._setup_crud_hints(
update_stmt, table_text
)
crud_params = self._get_crud_params(update_stmt, **kw)
text += table_text
text += ' SET '
include_table = extra_froms and \
self.render_table_with_column_in_update_from
set_clauses = []
for k, v in crud_params:
clause = k._compiler_dispatch(self,
include_table=include_table) + \
' = ' + v
set_clauses.append(clause)
for k, v in update_stmt.parameters.items():
if isinstance(k, str) and '[' in k:
bindparam = sa.sql.bindparam(k, v)
set_clauses.append(k + ' = ' + self.process(bindparam))
text += ', '.join(set_clauses)
if self.returning or update_stmt._returning:
if not self.returning:
self.returning = update_stmt._returning
if self.returning_precedes_values:
text += " " + self.returning_clause(
update_stmt, self.returning)
if extra_froms:
extra_from_text = self.update_from_clause(
update_stmt,
update_stmt.table,
extra_froms,
dialect_hints,
**kw)
if extra_from_text:
text += " " + extra_from_text
if update_stmt._whereclause is not None:
t = self.process(update_stmt._whereclause)
if t:
text += " WHERE " + t
limit_clause = self.update_limit_clause(update_stmt)
if limit_clause:
text += " " + limit_clause
if self.returning and not self.returning_precedes_values:
text += " " + self.returning_clause(
update_stmt, self.returning)
return text | python | def visit_update(self, update_stmt, **kw):
"""
used to compile <sql.expression.Update> expressions
Parts are taken from the SQLCompiler base class.
"""
if not update_stmt.parameters and \
not hasattr(update_stmt, '_crate_specific'):
return super(CrateCompiler, self).visit_update(update_stmt, **kw)
self.isupdate = True
extra_froms = update_stmt._extra_froms
text = 'UPDATE '
if update_stmt._prefixes:
text += self._generate_prefixes(update_stmt,
update_stmt._prefixes, **kw)
table_text = self.update_tables_clause(update_stmt, update_stmt.table,
extra_froms, **kw)
dialect_hints = None
if update_stmt._hints:
dialect_hints, table_text = self._setup_crud_hints(
update_stmt, table_text
)
crud_params = self._get_crud_params(update_stmt, **kw)
text += table_text
text += ' SET '
include_table = extra_froms and \
self.render_table_with_column_in_update_from
set_clauses = []
for k, v in crud_params:
clause = k._compiler_dispatch(self,
include_table=include_table) + \
' = ' + v
set_clauses.append(clause)
for k, v in update_stmt.parameters.items():
if isinstance(k, str) and '[' in k:
bindparam = sa.sql.bindparam(k, v)
set_clauses.append(k + ' = ' + self.process(bindparam))
text += ', '.join(set_clauses)
if self.returning or update_stmt._returning:
if not self.returning:
self.returning = update_stmt._returning
if self.returning_precedes_values:
text += " " + self.returning_clause(
update_stmt, self.returning)
if extra_froms:
extra_from_text = self.update_from_clause(
update_stmt,
update_stmt.table,
extra_froms,
dialect_hints,
**kw)
if extra_from_text:
text += " " + extra_from_text
if update_stmt._whereclause is not None:
t = self.process(update_stmt._whereclause)
if t:
text += " WHERE " + t
limit_clause = self.update_limit_clause(update_stmt)
if limit_clause:
text += " " + limit_clause
if self.returning and not self.returning_precedes_values:
text += " " + self.returning_clause(
update_stmt, self.returning)
return text | ['def', 'visit_update', '(', 'self', ',', 'update_stmt', ',', '*', '*', 'kw', ')', ':', 'if', 'not', 'update_stmt', '.', 'parameters', 'and', 'not', 'hasattr', '(', 'update_stmt', ',', "'_crate_specific'", ')', ':', 'return', 'super', '(', 'CrateCompiler', ',', 'self', ')', '.', 'visit_update', '(', 'update_stmt', ',', '*', '*', 'kw', ')', 'self', '.', 'isupdate', '=', 'True', 'extra_froms', '=', 'update_stmt', '.', '_extra_froms', 'text', '=', "'UPDATE '", 'if', 'update_stmt', '.', '_prefixes', ':', 'text', '+=', 'self', '.', '_generate_prefixes', '(', 'update_stmt', ',', 'update_stmt', '.', '_prefixes', ',', '*', '*', 'kw', ')', 'table_text', '=', 'self', '.', 'update_tables_clause', '(', 'update_stmt', ',', 'update_stmt', '.', 'table', ',', 'extra_froms', ',', '*', '*', 'kw', ')', 'dialect_hints', '=', 'None', 'if', 'update_stmt', '.', '_hints', ':', 'dialect_hints', ',', 'table_text', '=', 'self', '.', '_setup_crud_hints', '(', 'update_stmt', ',', 'table_text', ')', 'crud_params', '=', 'self', '.', '_get_crud_params', '(', 'update_stmt', ',', '*', '*', 'kw', ')', 'text', '+=', 'table_text', 'text', '+=', "' SET '", 'include_table', '=', 'extra_froms', 'and', 'self', '.', 'render_table_with_column_in_update_from', 'set_clauses', '=', '[', ']', 'for', 'k', ',', 'v', 'in', 'crud_params', ':', 'clause', '=', 'k', '.', '_compiler_dispatch', '(', 'self', ',', 'include_table', '=', 'include_table', ')', '+', "' = '", '+', 'v', 'set_clauses', '.', 'append', '(', 'clause', ')', 'for', 'k', ',', 'v', 'in', 'update_stmt', '.', 'parameters', '.', 'items', '(', ')', ':', 'if', 'isinstance', '(', 'k', ',', 'str', ')', 'and', "'['", 'in', 'k', ':', 'bindparam', '=', 'sa', '.', 'sql', '.', 'bindparam', '(', 'k', ',', 'v', ')', 'set_clauses', '.', 'append', '(', 'k', '+', "' = '", '+', 'self', '.', 'process', '(', 'bindparam', ')', ')', 'text', '+=', "', '", '.', 'join', '(', 'set_clauses', ')', 'if', 'self', '.', 'returning', 'or', 'update_stmt', '.', '_returning', ':', 'if', 'not', 'self', '.', 'returning', ':', 'self', '.', 'returning', '=', 'update_stmt', '.', '_returning', 'if', 'self', '.', 'returning_precedes_values', ':', 'text', '+=', '" "', '+', 'self', '.', 'returning_clause', '(', 'update_stmt', ',', 'self', '.', 'returning', ')', 'if', 'extra_froms', ':', 'extra_from_text', '=', 'self', '.', 'update_from_clause', '(', 'update_stmt', ',', 'update_stmt', '.', 'table', ',', 'extra_froms', ',', 'dialect_hints', ',', '*', '*', 'kw', ')', 'if', 'extra_from_text', ':', 'text', '+=', '" "', '+', 'extra_from_text', 'if', 'update_stmt', '.', '_whereclause', 'is', 'not', 'None', ':', 't', '=', 'self', '.', 'process', '(', 'update_stmt', '.', '_whereclause', ')', 'if', 't', ':', 'text', '+=', '" WHERE "', '+', 't', 'limit_clause', '=', 'self', '.', 'update_limit_clause', '(', 'update_stmt', ')', 'if', 'limit_clause', ':', 'text', '+=', '" "', '+', 'limit_clause', 'if', 'self', '.', 'returning', 'and', 'not', 'self', '.', 'returning_precedes_values', ':', 'text', '+=', '" "', '+', 'self', '.', 'returning_clause', '(', 'update_stmt', ',', 'self', '.', 'returning', ')', 'return', 'text'] | used to compile <sql.expression.Update> expressions
Parts are taken from the SQLCompiler base class. | ['used', 'to', 'compile', '<sql', '.', 'expression', '.', 'Update', '>', 'expressions', 'Parts', 'are', 'taken', 'from', 'the', 'SQLCompiler', 'base', 'class', '.'] | train | https://github.com/crate/crate-python/blob/68e39c95f5bbe88b74bbfa26de4347fc644636a8/src/crate/client/sqlalchemy/compiler.py#L278-L360 |
9,145 | Robin8Put/pmes | balance/handler.py | Balance.confirmbalance | async def confirmbalance(self, *args, **kwargs):
""" Confirm balance after trading
Accepts:
- message (signed dictionary):
- "txid" - str
- "coinid" - str
- "amount" - int
Returns:
- "address" - str
- "coinid" - str
- "amount" - int
- "uid" - int
- "unconfirmed" - int (0 by default)
- "deposit" - int (0 by default)
Verified: True
"""
# Get data from request
if kwargs.get("message"):
kwargs = json.loads(kwargs.get("message", "{}"))
txid = kwargs.get("txid")
coinid = kwargs.get("coinid")
buyer_address = kwargs.get("buyer_address")
cid = kwargs.get("cid")
address = kwargs.get("buyer_address")
try:
coinid = coinid.replace("TEST", "")
except:
pass
# Check if required fields exists
if not all([coinid, cid, buyer_address, txid]):
return {"error":400, "reason": "Confirm balance. Missed required fields"}
if not coinid in settings.bridges.keys():
return await self.error_400("Confirm balance. Invalid coinid: %s" % coinid)
# Get offers price
self.account.blockchain.setendpoint(settings.bridges[coinid])
offer = await self.account.blockchain.getoffer(cid=cid,
buyer_address=buyer_address)
# Get offers price for updating balance
amount = int(offer["price"])
coinid = "PUT"
# Get sellers account
history_database = self.client[settings.HISTORY]
history_collection = history_database[coinid]
history = await history_collection.find_one({"txid":txid})
try:
account = await self.account.getaccountdata(public_key=history["public_key"])
except:
return await self.error_404("Confirm balance. Not found current deal.")
# Connect to balance database
database = self.client[self.collection]
balance_collection = database[coinid]
# Try to update balance if exists
balance = await balance_collection.find_one({"uid":account["id"]})
# Decrement unconfirmed
submitted = int(balance["amount_frozen"]) - int(amount)
if submitted < 0:
return await self.error_400("Not enough frozen amount.")
decremented = await balance_collection.find_one_and_update(
{"uid":account["id"]},
{"$set":{"amount_frozen": str(submitted)}})
difference = int(balance["amount_active"]) + int(amount)
updated = await balance_collection.find_one_and_update(
{"uid":account["id"]},
{"$set":{"amount_active":str(difference)}})
if not updated:
return {"error":404,
"reason":"Confirm balance. Not found current transaction id"}
# Delete transaction id field
await history_collection.find_one_and_update({"txid":txid},
{"$unset":{"txid":1}})
if int(account["level"]) == 2:
await self.account.updatelevel(**{"id":account["id"], "level":3})
return {i:updated[i] for i in updated if i != "_id" and i != "txid"} | python | async def confirmbalance(self, *args, **kwargs):
""" Confirm balance after trading
Accepts:
- message (signed dictionary):
- "txid" - str
- "coinid" - str
- "amount" - int
Returns:
- "address" - str
- "coinid" - str
- "amount" - int
- "uid" - int
- "unconfirmed" - int (0 by default)
- "deposit" - int (0 by default)
Verified: True
"""
# Get data from request
if kwargs.get("message"):
kwargs = json.loads(kwargs.get("message", "{}"))
txid = kwargs.get("txid")
coinid = kwargs.get("coinid")
buyer_address = kwargs.get("buyer_address")
cid = kwargs.get("cid")
address = kwargs.get("buyer_address")
try:
coinid = coinid.replace("TEST", "")
except:
pass
# Check if required fields exists
if not all([coinid, cid, buyer_address, txid]):
return {"error":400, "reason": "Confirm balance. Missed required fields"}
if not coinid in settings.bridges.keys():
return await self.error_400("Confirm balance. Invalid coinid: %s" % coinid)
# Get offers price
self.account.blockchain.setendpoint(settings.bridges[coinid])
offer = await self.account.blockchain.getoffer(cid=cid,
buyer_address=buyer_address)
# Get offers price for updating balance
amount = int(offer["price"])
coinid = "PUT"
# Get sellers account
history_database = self.client[settings.HISTORY]
history_collection = history_database[coinid]
history = await history_collection.find_one({"txid":txid})
try:
account = await self.account.getaccountdata(public_key=history["public_key"])
except:
return await self.error_404("Confirm balance. Not found current deal.")
# Connect to balance database
database = self.client[self.collection]
balance_collection = database[coinid]
# Try to update balance if exists
balance = await balance_collection.find_one({"uid":account["id"]})
# Decrement unconfirmed
submitted = int(balance["amount_frozen"]) - int(amount)
if submitted < 0:
return await self.error_400("Not enough frozen amount.")
decremented = await balance_collection.find_one_and_update(
{"uid":account["id"]},
{"$set":{"amount_frozen": str(submitted)}})
difference = int(balance["amount_active"]) + int(amount)
updated = await balance_collection.find_one_and_update(
{"uid":account["id"]},
{"$set":{"amount_active":str(difference)}})
if not updated:
return {"error":404,
"reason":"Confirm balance. Not found current transaction id"}
# Delete transaction id field
await history_collection.find_one_and_update({"txid":txid},
{"$unset":{"txid":1}})
if int(account["level"]) == 2:
await self.account.updatelevel(**{"id":account["id"], "level":3})
return {i:updated[i] for i in updated if i != "_id" and i != "txid"} | ['async', 'def', 'confirmbalance', '(', 'self', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', '# Get data from request', 'if', 'kwargs', '.', 'get', '(', '"message"', ')', ':', 'kwargs', '=', 'json', '.', 'loads', '(', 'kwargs', '.', 'get', '(', '"message"', ',', '"{}"', ')', ')', 'txid', '=', 'kwargs', '.', 'get', '(', '"txid"', ')', 'coinid', '=', 'kwargs', '.', 'get', '(', '"coinid"', ')', 'buyer_address', '=', 'kwargs', '.', 'get', '(', '"buyer_address"', ')', 'cid', '=', 'kwargs', '.', 'get', '(', '"cid"', ')', 'address', '=', 'kwargs', '.', 'get', '(', '"buyer_address"', ')', 'try', ':', 'coinid', '=', 'coinid', '.', 'replace', '(', '"TEST"', ',', '""', ')', 'except', ':', 'pass', '# Check if required fields exists', 'if', 'not', 'all', '(', '[', 'coinid', ',', 'cid', ',', 'buyer_address', ',', 'txid', ']', ')', ':', 'return', '{', '"error"', ':', '400', ',', '"reason"', ':', '"Confirm balance. Missed required fields"', '}', 'if', 'not', 'coinid', 'in', 'settings', '.', 'bridges', '.', 'keys', '(', ')', ':', 'return', 'await', 'self', '.', 'error_400', '(', '"Confirm balance. Invalid coinid: %s"', '%', 'coinid', ')', '# Get offers price\t', 'self', '.', 'account', '.', 'blockchain', '.', 'setendpoint', '(', 'settings', '.', 'bridges', '[', 'coinid', ']', ')', 'offer', '=', 'await', 'self', '.', 'account', '.', 'blockchain', '.', 'getoffer', '(', 'cid', '=', 'cid', ',', 'buyer_address', '=', 'buyer_address', ')', '# Get offers price for updating balance', 'amount', '=', 'int', '(', 'offer', '[', '"price"', ']', ')', 'coinid', '=', '"PUT"', '# Get sellers account', 'history_database', '=', 'self', '.', 'client', '[', 'settings', '.', 'HISTORY', ']', 'history_collection', '=', 'history_database', '[', 'coinid', ']', 'history', '=', 'await', 'history_collection', '.', 'find_one', '(', '{', '"txid"', ':', 'txid', '}', ')', 'try', ':', 'account', '=', 'await', 'self', '.', 'account', '.', 'getaccountdata', '(', 'public_key', '=', 'history', '[', '"public_key"', ']', ')', 'except', ':', 'return', 'await', 'self', '.', 'error_404', '(', '"Confirm balance. Not found current deal."', ')', '# Connect to balance database', 'database', '=', 'self', '.', 'client', '[', 'self', '.', 'collection', ']', 'balance_collection', '=', 'database', '[', 'coinid', ']', '# Try to update balance if exists', 'balance', '=', 'await', 'balance_collection', '.', 'find_one', '(', '{', '"uid"', ':', 'account', '[', '"id"', ']', '}', ')', '# Decrement unconfirmed', 'submitted', '=', 'int', '(', 'balance', '[', '"amount_frozen"', ']', ')', '-', 'int', '(', 'amount', ')', 'if', 'submitted', '<', '0', ':', 'return', 'await', 'self', '.', 'error_400', '(', '"Not enough frozen amount."', ')', 'decremented', '=', 'await', 'balance_collection', '.', 'find_one_and_update', '(', '{', '"uid"', ':', 'account', '[', '"id"', ']', '}', ',', '{', '"$set"', ':', '{', '"amount_frozen"', ':', 'str', '(', 'submitted', ')', '}', '}', ')', 'difference', '=', 'int', '(', 'balance', '[', '"amount_active"', ']', ')', '+', 'int', '(', 'amount', ')', 'updated', '=', 'await', 'balance_collection', '.', 'find_one_and_update', '(', '{', '"uid"', ':', 'account', '[', '"id"', ']', '}', ',', '{', '"$set"', ':', '{', '"amount_active"', ':', 'str', '(', 'difference', ')', '}', '}', ')', 'if', 'not', 'updated', ':', 'return', '{', '"error"', ':', '404', ',', '"reason"', ':', '"Confirm balance. Not found current transaction id"', '}', '# Delete transaction id field', 'await', 'history_collection', '.', 'find_one_and_update', '(', '{', '"txid"', ':', 'txid', '}', ',', '{', '"$unset"', ':', '{', '"txid"', ':', '1', '}', '}', ')', 'if', 'int', '(', 'account', '[', '"level"', ']', ')', '==', '2', ':', 'await', 'self', '.', 'account', '.', 'updatelevel', '(', '*', '*', '{', '"id"', ':', 'account', '[', '"id"', ']', ',', '"level"', ':', '3', '}', ')', 'return', '{', 'i', ':', 'updated', '[', 'i', ']', 'for', 'i', 'in', 'updated', 'if', 'i', '!=', '"_id"', 'and', 'i', '!=', '"txid"', '}'] | Confirm balance after trading
Accepts:
- message (signed dictionary):
- "txid" - str
- "coinid" - str
- "amount" - int
Returns:
- "address" - str
- "coinid" - str
- "amount" - int
- "uid" - int
- "unconfirmed" - int (0 by default)
- "deposit" - int (0 by default)
Verified: True | ['Confirm', 'balance', 'after', 'trading'] | train | https://github.com/Robin8Put/pmes/blob/338bec94162098f05b75bad035417317e1252fd2/balance/handler.py#L840-L931 |
9,146 | dakrauth/django-swingtime | swingtime/views.py | occurrence_view | def occurrence_view(
request,
event_pk,
pk,
template='swingtime/occurrence_detail.html',
form_class=forms.SingleOccurrenceForm
):
'''
View a specific occurrence and optionally handle any updates.
Context parameters:
``occurrence``
the occurrence object keyed by ``pk``
``form``
a form object for updating the occurrence
'''
occurrence = get_object_or_404(Occurrence, pk=pk, event__pk=event_pk)
if request.method == 'POST':
form = form_class(request.POST, instance=occurrence)
if form.is_valid():
form.save()
return http.HttpResponseRedirect(request.path)
else:
form = form_class(instance=occurrence)
return render(request, template, {'occurrence': occurrence, 'form': form}) | python | def occurrence_view(
request,
event_pk,
pk,
template='swingtime/occurrence_detail.html',
form_class=forms.SingleOccurrenceForm
):
'''
View a specific occurrence and optionally handle any updates.
Context parameters:
``occurrence``
the occurrence object keyed by ``pk``
``form``
a form object for updating the occurrence
'''
occurrence = get_object_or_404(Occurrence, pk=pk, event__pk=event_pk)
if request.method == 'POST':
form = form_class(request.POST, instance=occurrence)
if form.is_valid():
form.save()
return http.HttpResponseRedirect(request.path)
else:
form = form_class(instance=occurrence)
return render(request, template, {'occurrence': occurrence, 'form': form}) | ['def', 'occurrence_view', '(', 'request', ',', 'event_pk', ',', 'pk', ',', 'template', '=', "'swingtime/occurrence_detail.html'", ',', 'form_class', '=', 'forms', '.', 'SingleOccurrenceForm', ')', ':', 'occurrence', '=', 'get_object_or_404', '(', 'Occurrence', ',', 'pk', '=', 'pk', ',', 'event__pk', '=', 'event_pk', ')', 'if', 'request', '.', 'method', '==', "'POST'", ':', 'form', '=', 'form_class', '(', 'request', '.', 'POST', ',', 'instance', '=', 'occurrence', ')', 'if', 'form', '.', 'is_valid', '(', ')', ':', 'form', '.', 'save', '(', ')', 'return', 'http', '.', 'HttpResponseRedirect', '(', 'request', '.', 'path', ')', 'else', ':', 'form', '=', 'form_class', '(', 'instance', '=', 'occurrence', ')', 'return', 'render', '(', 'request', ',', 'template', ',', '{', "'occurrence'", ':', 'occurrence', ',', "'form'", ':', 'form', '}', ')'] | View a specific occurrence and optionally handle any updates.
Context parameters:
``occurrence``
the occurrence object keyed by ``pk``
``form``
a form object for updating the occurrence | ['View', 'a', 'specific', 'occurrence', 'and', 'optionally', 'handle', 'any', 'updates', '.'] | train | https://github.com/dakrauth/django-swingtime/blob/d1cdd449bd5c6895c3ff182fd890c4d3452943fe/swingtime/views.py#L91-L118 |
9,147 | StackStorm/pybind | pybind/slxos/v17s_1_02/interface/ethernet/openflow/__init__.py | openflow._set_protected_vlans | def _set_protected_vlans(self, v, load=False):
"""
Setter method for protected_vlans, mapped from YANG variable /interface/ethernet/openflow/protected_vlans (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_protected_vlans is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_protected_vlans() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=protected_vlans.protected_vlans, is_container='container', presence=False, yang_name="protected-vlans", rest_name="protected-vlans", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'protected vlan ', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-openflow', defining_module='brocade-openflow', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """protected_vlans must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=protected_vlans.protected_vlans, is_container='container', presence=False, yang_name="protected-vlans", rest_name="protected-vlans", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'protected vlan ', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-openflow', defining_module='brocade-openflow', yang_type='container', is_config=True)""",
})
self.__protected_vlans = t
if hasattr(self, '_set'):
self._set() | python | def _set_protected_vlans(self, v, load=False):
"""
Setter method for protected_vlans, mapped from YANG variable /interface/ethernet/openflow/protected_vlans (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_protected_vlans is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_protected_vlans() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=protected_vlans.protected_vlans, is_container='container', presence=False, yang_name="protected-vlans", rest_name="protected-vlans", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'protected vlan ', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-openflow', defining_module='brocade-openflow', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """protected_vlans must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=protected_vlans.protected_vlans, is_container='container', presence=False, yang_name="protected-vlans", rest_name="protected-vlans", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'protected vlan ', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-openflow', defining_module='brocade-openflow', yang_type='container', is_config=True)""",
})
self.__protected_vlans = t
if hasattr(self, '_set'):
self._set() | ['def', '_set_protected_vlans', '(', 'self', ',', 'v', ',', 'load', '=', 'False', ')', ':', 'if', 'hasattr', '(', 'v', ',', '"_utype"', ')', ':', 'v', '=', 'v', '.', '_utype', '(', 'v', ')', 'try', ':', 't', '=', 'YANGDynClass', '(', 'v', ',', 'base', '=', 'protected_vlans', '.', 'protected_vlans', ',', 'is_container', '=', "'container'", ',', 'presence', '=', 'False', ',', 'yang_name', '=', '"protected-vlans"', ',', 'rest_name', '=', '"protected-vlans"', ',', 'parent', '=', 'self', ',', 'path_helper', '=', 'self', '.', '_path_helper', ',', 'extmethods', '=', 'self', '.', '_extmethods', ',', 'register_paths', '=', 'True', ',', 'extensions', '=', '{', "u'tailf-common'", ':', '{', "u'info'", ':', "u'protected vlan '", ',', "u'cli-full-no'", ':', 'None', '}', '}', ',', 'namespace', '=', "'urn:brocade.com:mgmt:brocade-openflow'", ',', 'defining_module', '=', "'brocade-openflow'", ',', 'yang_type', '=', "'container'", ',', 'is_config', '=', 'True', ')', 'except', '(', 'TypeError', ',', 'ValueError', ')', ':', 'raise', 'ValueError', '(', '{', "'error-string'", ':', '"""protected_vlans must be of a type compatible with container"""', ',', "'defined-type'", ':', '"container"', ',', "'generated-type'", ':', '"""YANGDynClass(base=protected_vlans.protected_vlans, is_container=\'container\', presence=False, yang_name="protected-vlans", rest_name="protected-vlans", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'info\': u\'protected vlan \', u\'cli-full-no\': None}}, namespace=\'urn:brocade.com:mgmt:brocade-openflow\', defining_module=\'brocade-openflow\', yang_type=\'container\', is_config=True)"""', ',', '}', ')', 'self', '.', '__protected_vlans', '=', 't', 'if', 'hasattr', '(', 'self', ',', "'_set'", ')', ':', 'self', '.', '_set', '(', ')'] | Setter method for protected_vlans, mapped from YANG variable /interface/ethernet/openflow/protected_vlans (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_protected_vlans is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_protected_vlans() directly. | ['Setter', 'method', 'for', 'protected_vlans', 'mapped', 'from', 'YANG', 'variable', '/', 'interface', '/', 'ethernet', '/', 'openflow', '/', 'protected_vlans', '(', 'container', ')', 'If', 'this', 'variable', 'is', 'read', '-', 'only', '(', 'config', ':', 'false', ')', 'in', 'the', 'source', 'YANG', 'file', 'then', '_set_protected_vlans', 'is', 'considered', 'as', 'a', 'private', 'method', '.', 'Backends', 'looking', 'to', 'populate', 'this', 'variable', 'should', 'do', 'so', 'via', 'calling', 'thisObj', '.', '_set_protected_vlans', '()', 'directly', '.'] | train | https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/interface/ethernet/openflow/__init__.py#L94-L115 |
9,148 | LIVVkit/LIVVkit | livvkit/components/performance.py | generate_timing_stats | def generate_timing_stats(file_list, var_list):
"""
Parse all of the timing files, and generate some statistics
about the run.
Args:
file_list: A list of timing files to parse
var_list: A list of variables to look for in the timing file
Returns:
A dict containing values that have the form:
[mean, min, max, mean, standard deviation]
"""
timing_result = dict()
timing_summary = dict()
for file in file_list:
timing_result[file] = functions.parse_gptl(file, var_list)
for var in var_list:
var_time = []
for f, data in timing_result.items():
try:
var_time.append(data[var])
except:
continue
if len(var_time):
timing_summary[var] = {'mean': np.mean(var_time),
'max': np.max(var_time),
'min': np.min(var_time),
'std': np.std(var_time)}
return timing_summary | python | def generate_timing_stats(file_list, var_list):
"""
Parse all of the timing files, and generate some statistics
about the run.
Args:
file_list: A list of timing files to parse
var_list: A list of variables to look for in the timing file
Returns:
A dict containing values that have the form:
[mean, min, max, mean, standard deviation]
"""
timing_result = dict()
timing_summary = dict()
for file in file_list:
timing_result[file] = functions.parse_gptl(file, var_list)
for var in var_list:
var_time = []
for f, data in timing_result.items():
try:
var_time.append(data[var])
except:
continue
if len(var_time):
timing_summary[var] = {'mean': np.mean(var_time),
'max': np.max(var_time),
'min': np.min(var_time),
'std': np.std(var_time)}
return timing_summary | ['def', 'generate_timing_stats', '(', 'file_list', ',', 'var_list', ')', ':', 'timing_result', '=', 'dict', '(', ')', 'timing_summary', '=', 'dict', '(', ')', 'for', 'file', 'in', 'file_list', ':', 'timing_result', '[', 'file', ']', '=', 'functions', '.', 'parse_gptl', '(', 'file', ',', 'var_list', ')', 'for', 'var', 'in', 'var_list', ':', 'var_time', '=', '[', ']', 'for', 'f', ',', 'data', 'in', 'timing_result', '.', 'items', '(', ')', ':', 'try', ':', 'var_time', '.', 'append', '(', 'data', '[', 'var', ']', ')', 'except', ':', 'continue', 'if', 'len', '(', 'var_time', ')', ':', 'timing_summary', '[', 'var', ']', '=', '{', "'mean'", ':', 'np', '.', 'mean', '(', 'var_time', ')', ',', "'max'", ':', 'np', '.', 'max', '(', 'var_time', ')', ',', "'min'", ':', 'np', '.', 'min', '(', 'var_time', ')', ',', "'std'", ':', 'np', '.', 'std', '(', 'var_time', ')', '}', 'return', 'timing_summary'] | Parse all of the timing files, and generate some statistics
about the run.
Args:
file_list: A list of timing files to parse
var_list: A list of variables to look for in the timing file
Returns:
A dict containing values that have the form:
[mean, min, max, mean, standard deviation] | ['Parse', 'all', 'of', 'the', 'timing', 'files', 'and', 'generate', 'some', 'statistics', 'about', 'the', 'run', '.'] | train | https://github.com/LIVVkit/LIVVkit/blob/680120cd437e408673e62e535fc0a246c7fc17db/livvkit/components/performance.py#L185-L214 |
9,149 | GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/api/map_job/output_writer.py | OutputWriter.commit_output | def commit_output(cls, shard_ctx, iterator):
"""Saves output references when a shard finishes.
Inside end_shard(), an output writer can optionally use this method
to persist some references to the outputs from this shard
(e.g a list of filenames)
Args:
shard_ctx: map_job_context.ShardContext for this shard.
iterator: an iterator that yields json serializable
references to the outputs from this shard.
Contents from the iterator can be accessible later via
map_job.Job.get_outputs.
"""
# We accept an iterator just in case output references get too big.
outs = tuple(iterator)
shard_ctx._state.writer_state["outs"] = outs | python | def commit_output(cls, shard_ctx, iterator):
"""Saves output references when a shard finishes.
Inside end_shard(), an output writer can optionally use this method
to persist some references to the outputs from this shard
(e.g a list of filenames)
Args:
shard_ctx: map_job_context.ShardContext for this shard.
iterator: an iterator that yields json serializable
references to the outputs from this shard.
Contents from the iterator can be accessible later via
map_job.Job.get_outputs.
"""
# We accept an iterator just in case output references get too big.
outs = tuple(iterator)
shard_ctx._state.writer_state["outs"] = outs | ['def', 'commit_output', '(', 'cls', ',', 'shard_ctx', ',', 'iterator', ')', ':', '# We accept an iterator just in case output references get too big.', 'outs', '=', 'tuple', '(', 'iterator', ')', 'shard_ctx', '.', '_state', '.', 'writer_state', '[', '"outs"', ']', '=', 'outs'] | Saves output references when a shard finishes.
Inside end_shard(), an output writer can optionally use this method
to persist some references to the outputs from this shard
(e.g a list of filenames)
Args:
shard_ctx: map_job_context.ShardContext for this shard.
iterator: an iterator that yields json serializable
references to the outputs from this shard.
Contents from the iterator can be accessible later via
map_job.Job.get_outputs. | ['Saves', 'output', 'references', 'when', 'a', 'shard', 'finishes', '.'] | train | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/api/map_job/output_writer.py#L111-L127 |
9,150 | quantumlib/Cirq | cirq/google/engine/engine.py | Engine.get_job | def get_job(self, job_resource_name: str) -> Dict:
"""Returns metadata about a previously created job.
See get_job_result if you want the results of the job and not just
metadata about the job.
Params:
job_resource_name: A string of the form
`projects/project_id/programs/program_id/jobs/job_id`.
Returns:
A dictionary containing the metadata.
"""
return self.service.projects().programs().jobs().get(
name=job_resource_name).execute() | python | def get_job(self, job_resource_name: str) -> Dict:
"""Returns metadata about a previously created job.
See get_job_result if you want the results of the job and not just
metadata about the job.
Params:
job_resource_name: A string of the form
`projects/project_id/programs/program_id/jobs/job_id`.
Returns:
A dictionary containing the metadata.
"""
return self.service.projects().programs().jobs().get(
name=job_resource_name).execute() | ['def', 'get_job', '(', 'self', ',', 'job_resource_name', ':', 'str', ')', '->', 'Dict', ':', 'return', 'self', '.', 'service', '.', 'projects', '(', ')', '.', 'programs', '(', ')', '.', 'jobs', '(', ')', '.', 'get', '(', 'name', '=', 'job_resource_name', ')', '.', 'execute', '(', ')'] | Returns metadata about a previously created job.
See get_job_result if you want the results of the job and not just
metadata about the job.
Params:
job_resource_name: A string of the form
`projects/project_id/programs/program_id/jobs/job_id`.
Returns:
A dictionary containing the metadata. | ['Returns', 'metadata', 'about', 'a', 'previously', 'created', 'job', '.'] | train | https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/cirq/google/engine/engine.py#L414-L428 |
9,151 | ibis-project/ibis | ibis/pandas/udf.py | udf._grouped | def _grouped(input_type, output_type, base_class, output_type_method):
"""Define a user-defined function that is applied per group.
Parameters
----------
input_type : List[ibis.expr.datatypes.DataType]
A list of the types found in :mod:`~ibis.expr.datatypes`. The
length of this list must match the number of arguments to the
function. Variadic arguments are not yet supported.
output_type : ibis.expr.datatypes.DataType
The return type of the function.
base_class : Type[T]
The base class of the generated Node
output_type_method : Callable
A callable that determines the method to call to get the expression
type of the UDF
See Also
--------
ibis.pandas.udf.reduction
ibis.pandas.udf.analytic
"""
def wrapper(func):
funcsig = valid_function_signature(input_type, func)
UDAFNode = type(
func.__name__,
(base_class,),
{
'signature': sig.TypeSignature.from_dtypes(input_type),
'output_type': output_type_method(output_type),
},
)
# An execution rule for a simple aggregate node
@execute_node.register(
UDAFNode, *udf_signature(input_type, pin=None, klass=pd.Series)
)
def execute_udaf_node(op, *args, **kwargs):
args, kwargs = arguments_from_signature(
funcsig, *args, **kwargs
)
return func(*args, **kwargs)
# An execution rule for a grouped aggregation node. This
# includes aggregates applied over a window.
nargs = len(input_type)
group_by_signatures = [
udf_signature(input_type, pin=pin, klass=SeriesGroupBy)
for pin in range(nargs)
]
@toolz.compose(
*(
execute_node.register(UDAFNode, *types)
for types in group_by_signatures
)
)
def execute_udaf_node_groupby(op, *args, **kwargs):
# construct a generator that yields the next group of data
# for every argument excluding the first (pandas performs
# the iteration for the first argument) for each argument
# that is a SeriesGroupBy.
#
# If the argument is not a SeriesGroupBy then keep
# repeating it until all groups are exhausted.
aggcontext = kwargs.pop('aggcontext', None)
assert aggcontext is not None, 'aggcontext is None'
iters = (
(data for _, data in arg)
if isinstance(arg, SeriesGroupBy)
else itertools.repeat(arg)
for arg in args[1:]
)
funcsig = signature(func)
def aggregator(first, *rest, **kwargs):
# map(next, *rest) gets the inputs for the next group
# TODO: might be inefficient to do this on every call
args, kwargs = arguments_from_signature(
funcsig, first, *map(next, rest), **kwargs
)
return func(*args, **kwargs)
result = aggcontext.agg(args[0], aggregator, *iters, **kwargs)
return result
@functools.wraps(func)
def wrapped(*args):
return UDAFNode(*args).to_expr()
return wrapped
return wrapper | python | def _grouped(input_type, output_type, base_class, output_type_method):
"""Define a user-defined function that is applied per group.
Parameters
----------
input_type : List[ibis.expr.datatypes.DataType]
A list of the types found in :mod:`~ibis.expr.datatypes`. The
length of this list must match the number of arguments to the
function. Variadic arguments are not yet supported.
output_type : ibis.expr.datatypes.DataType
The return type of the function.
base_class : Type[T]
The base class of the generated Node
output_type_method : Callable
A callable that determines the method to call to get the expression
type of the UDF
See Also
--------
ibis.pandas.udf.reduction
ibis.pandas.udf.analytic
"""
def wrapper(func):
funcsig = valid_function_signature(input_type, func)
UDAFNode = type(
func.__name__,
(base_class,),
{
'signature': sig.TypeSignature.from_dtypes(input_type),
'output_type': output_type_method(output_type),
},
)
# An execution rule for a simple aggregate node
@execute_node.register(
UDAFNode, *udf_signature(input_type, pin=None, klass=pd.Series)
)
def execute_udaf_node(op, *args, **kwargs):
args, kwargs = arguments_from_signature(
funcsig, *args, **kwargs
)
return func(*args, **kwargs)
# An execution rule for a grouped aggregation node. This
# includes aggregates applied over a window.
nargs = len(input_type)
group_by_signatures = [
udf_signature(input_type, pin=pin, klass=SeriesGroupBy)
for pin in range(nargs)
]
@toolz.compose(
*(
execute_node.register(UDAFNode, *types)
for types in group_by_signatures
)
)
def execute_udaf_node_groupby(op, *args, **kwargs):
# construct a generator that yields the next group of data
# for every argument excluding the first (pandas performs
# the iteration for the first argument) for each argument
# that is a SeriesGroupBy.
#
# If the argument is not a SeriesGroupBy then keep
# repeating it until all groups are exhausted.
aggcontext = kwargs.pop('aggcontext', None)
assert aggcontext is not None, 'aggcontext is None'
iters = (
(data for _, data in arg)
if isinstance(arg, SeriesGroupBy)
else itertools.repeat(arg)
for arg in args[1:]
)
funcsig = signature(func)
def aggregator(first, *rest, **kwargs):
# map(next, *rest) gets the inputs for the next group
# TODO: might be inefficient to do this on every call
args, kwargs = arguments_from_signature(
funcsig, first, *map(next, rest), **kwargs
)
return func(*args, **kwargs)
result = aggcontext.agg(args[0], aggregator, *iters, **kwargs)
return result
@functools.wraps(func)
def wrapped(*args):
return UDAFNode(*args).to_expr()
return wrapped
return wrapper | ['def', '_grouped', '(', 'input_type', ',', 'output_type', ',', 'base_class', ',', 'output_type_method', ')', ':', 'def', 'wrapper', '(', 'func', ')', ':', 'funcsig', '=', 'valid_function_signature', '(', 'input_type', ',', 'func', ')', 'UDAFNode', '=', 'type', '(', 'func', '.', '__name__', ',', '(', 'base_class', ',', ')', ',', '{', "'signature'", ':', 'sig', '.', 'TypeSignature', '.', 'from_dtypes', '(', 'input_type', ')', ',', "'output_type'", ':', 'output_type_method', '(', 'output_type', ')', ',', '}', ',', ')', '# An execution rule for a simple aggregate node', '@', 'execute_node', '.', 'register', '(', 'UDAFNode', ',', '*', 'udf_signature', '(', 'input_type', ',', 'pin', '=', 'None', ',', 'klass', '=', 'pd', '.', 'Series', ')', ')', 'def', 'execute_udaf_node', '(', 'op', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'args', ',', 'kwargs', '=', 'arguments_from_signature', '(', 'funcsig', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', 'return', 'func', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', '# An execution rule for a grouped aggregation node. This', '# includes aggregates applied over a window.', 'nargs', '=', 'len', '(', 'input_type', ')', 'group_by_signatures', '=', '[', 'udf_signature', '(', 'input_type', ',', 'pin', '=', 'pin', ',', 'klass', '=', 'SeriesGroupBy', ')', 'for', 'pin', 'in', 'range', '(', 'nargs', ')', ']', '@', 'toolz', '.', 'compose', '(', '*', '(', 'execute_node', '.', 'register', '(', 'UDAFNode', ',', '*', 'types', ')', 'for', 'types', 'in', 'group_by_signatures', ')', ')', 'def', 'execute_udaf_node_groupby', '(', 'op', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', '# construct a generator that yields the next group of data', '# for every argument excluding the first (pandas performs', '# the iteration for the first argument) for each argument', '# that is a SeriesGroupBy.', '#', '# If the argument is not a SeriesGroupBy then keep', '# repeating it until all groups are exhausted.', 'aggcontext', '=', 'kwargs', '.', 'pop', '(', "'aggcontext'", ',', 'None', ')', 'assert', 'aggcontext', 'is', 'not', 'None', ',', "'aggcontext is None'", 'iters', '=', '(', '(', 'data', 'for', '_', ',', 'data', 'in', 'arg', ')', 'if', 'isinstance', '(', 'arg', ',', 'SeriesGroupBy', ')', 'else', 'itertools', '.', 'repeat', '(', 'arg', ')', 'for', 'arg', 'in', 'args', '[', '1', ':', ']', ')', 'funcsig', '=', 'signature', '(', 'func', ')', 'def', 'aggregator', '(', 'first', ',', '*', 'rest', ',', '*', '*', 'kwargs', ')', ':', '# map(next, *rest) gets the inputs for the next group', '# TODO: might be inefficient to do this on every call', 'args', ',', 'kwargs', '=', 'arguments_from_signature', '(', 'funcsig', ',', 'first', ',', '*', 'map', '(', 'next', ',', 'rest', ')', ',', '*', '*', 'kwargs', ')', 'return', 'func', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', 'result', '=', 'aggcontext', '.', 'agg', '(', 'args', '[', '0', ']', ',', 'aggregator', ',', '*', 'iters', ',', '*', '*', 'kwargs', ')', 'return', 'result', '@', 'functools', '.', 'wraps', '(', 'func', ')', 'def', 'wrapped', '(', '*', 'args', ')', ':', 'return', 'UDAFNode', '(', '*', 'args', ')', '.', 'to_expr', '(', ')', 'return', 'wrapped', 'return', 'wrapper'] | Define a user-defined function that is applied per group.
Parameters
----------
input_type : List[ibis.expr.datatypes.DataType]
A list of the types found in :mod:`~ibis.expr.datatypes`. The
length of this list must match the number of arguments to the
function. Variadic arguments are not yet supported.
output_type : ibis.expr.datatypes.DataType
The return type of the function.
base_class : Type[T]
The base class of the generated Node
output_type_method : Callable
A callable that determines the method to call to get the expression
type of the UDF
See Also
--------
ibis.pandas.udf.reduction
ibis.pandas.udf.analytic | ['Define', 'a', 'user', '-', 'defined', 'function', 'that', 'is', 'applied', 'per', 'group', '.'] | train | https://github.com/ibis-project/ibis/blob/1e39a5fd9ef088b45c155e8a5f541767ee8ef2e7/ibis/pandas/udf.py#L433-L527 |
9,152 | fabioz/PyDev.Debugger | pydevd_attach_to_process/winappdbg/window.py | Window.set_thread | def set_thread(self, thread = None):
"""
Manually set the thread process. Use with care!
@type thread: L{Thread}
@param thread: (Optional) Thread object. Use C{None} to autodetect.
"""
if thread is None:
self.__thread = None
else:
self.__load_Thread_class()
if not isinstance(thread, Thread):
msg = "Parent thread must be a Thread instance, "
msg += "got %s instead" % type(thread)
raise TypeError(msg)
self.dwThreadId = thread.get_tid()
self.__thread = thread | python | def set_thread(self, thread = None):
"""
Manually set the thread process. Use with care!
@type thread: L{Thread}
@param thread: (Optional) Thread object. Use C{None} to autodetect.
"""
if thread is None:
self.__thread = None
else:
self.__load_Thread_class()
if not isinstance(thread, Thread):
msg = "Parent thread must be a Thread instance, "
msg += "got %s instead" % type(thread)
raise TypeError(msg)
self.dwThreadId = thread.get_tid()
self.__thread = thread | ['def', 'set_thread', '(', 'self', ',', 'thread', '=', 'None', ')', ':', 'if', 'thread', 'is', 'None', ':', 'self', '.', '__thread', '=', 'None', 'else', ':', 'self', '.', '__load_Thread_class', '(', ')', 'if', 'not', 'isinstance', '(', 'thread', ',', 'Thread', ')', ':', 'msg', '=', '"Parent thread must be a Thread instance, "', 'msg', '+=', '"got %s instead"', '%', 'type', '(', 'thread', ')', 'raise', 'TypeError', '(', 'msg', ')', 'self', '.', 'dwThreadId', '=', 'thread', '.', 'get_tid', '(', ')', 'self', '.', '__thread', '=', 'thread'] | Manually set the thread process. Use with care!
@type thread: L{Thread}
@param thread: (Optional) Thread object. Use C{None} to autodetect. | ['Manually', 'set', 'the', 'thread', 'process', '.', 'Use', 'with', 'care!'] | train | https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/window.py#L233-L249 |
9,153 | quodlibet/mutagen | mutagen/_senf/_fsnative.py | _decode_surrogatepass | def _decode_surrogatepass(data, codec):
"""Like data.decode(codec, 'surrogatepass') but makes utf-16-le/be work
on Python < 3.4 + Windows
https://bugs.python.org/issue27971
Raises UnicodeDecodeError, LookupError
"""
try:
return data.decode(codec, _surrogatepass)
except UnicodeDecodeError:
if not _codec_can_decode_with_surrogatepass(codec):
if _normalize_codec(codec) == "utf-16-be":
data = _swap_bytes(data)
codec = "utf-16-le"
if _normalize_codec(codec) == "utf-16-le":
buffer_ = ctypes.create_string_buffer(data + b"\x00\x00")
value = ctypes.wstring_at(buffer_, len(data) // 2)
if value.encode("utf-16-le", _surrogatepass) != data:
raise
return value
else:
raise
else:
raise | python | def _decode_surrogatepass(data, codec):
"""Like data.decode(codec, 'surrogatepass') but makes utf-16-le/be work
on Python < 3.4 + Windows
https://bugs.python.org/issue27971
Raises UnicodeDecodeError, LookupError
"""
try:
return data.decode(codec, _surrogatepass)
except UnicodeDecodeError:
if not _codec_can_decode_with_surrogatepass(codec):
if _normalize_codec(codec) == "utf-16-be":
data = _swap_bytes(data)
codec = "utf-16-le"
if _normalize_codec(codec) == "utf-16-le":
buffer_ = ctypes.create_string_buffer(data + b"\x00\x00")
value = ctypes.wstring_at(buffer_, len(data) // 2)
if value.encode("utf-16-le", _surrogatepass) != data:
raise
return value
else:
raise
else:
raise | ['def', '_decode_surrogatepass', '(', 'data', ',', 'codec', ')', ':', 'try', ':', 'return', 'data', '.', 'decode', '(', 'codec', ',', '_surrogatepass', ')', 'except', 'UnicodeDecodeError', ':', 'if', 'not', '_codec_can_decode_with_surrogatepass', '(', 'codec', ')', ':', 'if', '_normalize_codec', '(', 'codec', ')', '==', '"utf-16-be"', ':', 'data', '=', '_swap_bytes', '(', 'data', ')', 'codec', '=', '"utf-16-le"', 'if', '_normalize_codec', '(', 'codec', ')', '==', '"utf-16-le"', ':', 'buffer_', '=', 'ctypes', '.', 'create_string_buffer', '(', 'data', '+', 'b"\\x00\\x00"', ')', 'value', '=', 'ctypes', '.', 'wstring_at', '(', 'buffer_', ',', 'len', '(', 'data', ')', '//', '2', ')', 'if', 'value', '.', 'encode', '(', '"utf-16-le"', ',', '_surrogatepass', ')', '!=', 'data', ':', 'raise', 'return', 'value', 'else', ':', 'raise', 'else', ':', 'raise'] | Like data.decode(codec, 'surrogatepass') but makes utf-16-le/be work
on Python < 3.4 + Windows
https://bugs.python.org/issue27971
Raises UnicodeDecodeError, LookupError | ['Like', 'data', '.', 'decode', '(', 'codec', 'surrogatepass', ')', 'but', 'makes', 'utf', '-', '16', '-', 'le', '/', 'be', 'work', 'on', 'Python', '<', '3', '.', '4', '+', 'Windows'] | train | https://github.com/quodlibet/mutagen/blob/e393df5971ba41ba5a50de9c2c9e7e5484d82c4e/mutagen/_senf/_fsnative.py#L95-L120 |
9,154 | cloudmesh-cmd3/cmd3 | cmd3/plugins/shell_scope.py | shell_scope.do_loglevel | def do_loglevel(self, args, arguments):
"""
::
Usage:
loglevel
loglevel critical
loglevel error
loglevel warning
loglevel info
loglevel debug
Shows current log level or changes it.
loglevel - shows current log level
critical - shows log message in critical level
error - shows log message in error level including critical
warning - shows log message in warning level including error
info - shows log message in info level including warning
debug - shows log message in debug level including info
"""
if arguments['debug']:
self.loglevel = "DEBUG"
elif arguments['error']:
self.loglevel = "ERROR"
elif arguments['warning']:
self.loglevel = "WARNING"
elif arguments['info']:
self.loglevel = "INFO"
elif arguments['critical']:
self.loglevel = "CRITICAL"
else:
Console.ok("Log level: {0}".format(self.loglevel))
return
Console.ok ("Log level: {0} is set".format(self.loglevel))
filename = path_expand("~/.cloudmesh/cmd3.yaml")
config = ConfigDict(filename=filename)
config["cmd3"]["properties"]["loglevel"] = self.loglevel
config.write(filename=filename, output="yaml", attribute_indent=" ") | python | def do_loglevel(self, args, arguments):
"""
::
Usage:
loglevel
loglevel critical
loglevel error
loglevel warning
loglevel info
loglevel debug
Shows current log level or changes it.
loglevel - shows current log level
critical - shows log message in critical level
error - shows log message in error level including critical
warning - shows log message in warning level including error
info - shows log message in info level including warning
debug - shows log message in debug level including info
"""
if arguments['debug']:
self.loglevel = "DEBUG"
elif arguments['error']:
self.loglevel = "ERROR"
elif arguments['warning']:
self.loglevel = "WARNING"
elif arguments['info']:
self.loglevel = "INFO"
elif arguments['critical']:
self.loglevel = "CRITICAL"
else:
Console.ok("Log level: {0}".format(self.loglevel))
return
Console.ok ("Log level: {0} is set".format(self.loglevel))
filename = path_expand("~/.cloudmesh/cmd3.yaml")
config = ConfigDict(filename=filename)
config["cmd3"]["properties"]["loglevel"] = self.loglevel
config.write(filename=filename, output="yaml", attribute_indent=" ") | ['def', 'do_loglevel', '(', 'self', ',', 'args', ',', 'arguments', ')', ':', 'if', 'arguments', '[', "'debug'", ']', ':', 'self', '.', 'loglevel', '=', '"DEBUG"', 'elif', 'arguments', '[', "'error'", ']', ':', 'self', '.', 'loglevel', '=', '"ERROR"', 'elif', 'arguments', '[', "'warning'", ']', ':', 'self', '.', 'loglevel', '=', '"WARNING"', 'elif', 'arguments', '[', "'info'", ']', ':', 'self', '.', 'loglevel', '=', '"INFO"', 'elif', 'arguments', '[', "'critical'", ']', ':', 'self', '.', 'loglevel', '=', '"CRITICAL"', 'else', ':', 'Console', '.', 'ok', '(', '"Log level: {0}"', '.', 'format', '(', 'self', '.', 'loglevel', ')', ')', 'return', 'Console', '.', 'ok', '(', '"Log level: {0} is set"', '.', 'format', '(', 'self', '.', 'loglevel', ')', ')', 'filename', '=', 'path_expand', '(', '"~/.cloudmesh/cmd3.yaml"', ')', 'config', '=', 'ConfigDict', '(', 'filename', '=', 'filename', ')', 'config', '[', '"cmd3"', ']', '[', '"properties"', ']', '[', '"loglevel"', ']', '=', 'self', '.', 'loglevel', 'config', '.', 'write', '(', 'filename', '=', 'filename', ',', 'output', '=', '"yaml"', ',', 'attribute_indent', '=', '" "', ')'] | ::
Usage:
loglevel
loglevel critical
loglevel error
loglevel warning
loglevel info
loglevel debug
Shows current log level or changes it.
loglevel - shows current log level
critical - shows log message in critical level
error - shows log message in error level including critical
warning - shows log message in warning level including error
info - shows log message in info level including warning
debug - shows log message in debug level including info | ['::'] | train | https://github.com/cloudmesh-cmd3/cmd3/blob/92e33c96032fd3921f159198a0e57917c4dc34ed/cmd3/plugins/shell_scope.py#L279-L319 |
9,155 | saltstack/salt | salt/states/smartos.py | image_vacuum | def image_vacuum(name):
'''
Delete images not in use or installed via image_present
.. warning::
Only image_present states that are included via the
top file will be detected.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
# list of images to keep
images = []
# retrieve image_present state data for host
for state in __salt__['state.show_lowstate']():
# don't throw exceptions when not highstate run
if 'state' not in state:
continue
# skip if not from this state module
if state['state'] != __virtualname__:
continue
# skip if not image_present
if state['fun'] not in ['image_present']:
continue
# keep images installed via image_present
if 'name' in state:
if _is_uuid(state['name']):
images.append(state['name'])
elif _is_docker_uuid(state['name']):
state['name'] = __salt__['imgadm.docker_to_uuid'](state['name'])
if not state['name']:
continue
images.append(state['name'])
# retrieve images in use by vms
for image_uuid in __salt__['vmadm.list'](order='image_uuid'):
if image_uuid not in images:
images.append(image_uuid)
# purge unused images
ret['result'] = True
for image_uuid in __salt__['imgadm.list']():
if image_uuid in images:
continue
image = __salt__['imgadm.get'](image_uuid)
if image['manifest']['name'] == 'docker-layer':
# NOTE: docker images are made of multiple layers, loop over them
while image:
image_uuid = image['manifest']['uuid']
if image_uuid in __salt__['imgadm.delete'](image_uuid):
ret['changes'][image_uuid] = None
else:
ret['result'] = False
ret['comment'] = 'failed to delete images'
if 'origin' in image['manifest']:
image = __salt__['imgadm.get'](image['manifest']['origin'])
else:
image = None
else:
# NOTE: normal images can just be delete
if image_uuid in __salt__['imgadm.delete'](image_uuid):
ret['changes'][image_uuid] = None
else:
ret['result'] = False
ret['comment'] = 'failed to delete images'
if ret['result'] and not ret['changes']:
ret['comment'] = 'no images deleted'
elif ret['result'] and ret['changes']:
ret['comment'] = 'images deleted'
return ret | python | def image_vacuum(name):
'''
Delete images not in use or installed via image_present
.. warning::
Only image_present states that are included via the
top file will be detected.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
# list of images to keep
images = []
# retrieve image_present state data for host
for state in __salt__['state.show_lowstate']():
# don't throw exceptions when not highstate run
if 'state' not in state:
continue
# skip if not from this state module
if state['state'] != __virtualname__:
continue
# skip if not image_present
if state['fun'] not in ['image_present']:
continue
# keep images installed via image_present
if 'name' in state:
if _is_uuid(state['name']):
images.append(state['name'])
elif _is_docker_uuid(state['name']):
state['name'] = __salt__['imgadm.docker_to_uuid'](state['name'])
if not state['name']:
continue
images.append(state['name'])
# retrieve images in use by vms
for image_uuid in __salt__['vmadm.list'](order='image_uuid'):
if image_uuid not in images:
images.append(image_uuid)
# purge unused images
ret['result'] = True
for image_uuid in __salt__['imgadm.list']():
if image_uuid in images:
continue
image = __salt__['imgadm.get'](image_uuid)
if image['manifest']['name'] == 'docker-layer':
# NOTE: docker images are made of multiple layers, loop over them
while image:
image_uuid = image['manifest']['uuid']
if image_uuid in __salt__['imgadm.delete'](image_uuid):
ret['changes'][image_uuid] = None
else:
ret['result'] = False
ret['comment'] = 'failed to delete images'
if 'origin' in image['manifest']:
image = __salt__['imgadm.get'](image['manifest']['origin'])
else:
image = None
else:
# NOTE: normal images can just be delete
if image_uuid in __salt__['imgadm.delete'](image_uuid):
ret['changes'][image_uuid] = None
else:
ret['result'] = False
ret['comment'] = 'failed to delete images'
if ret['result'] and not ret['changes']:
ret['comment'] = 'no images deleted'
elif ret['result'] and ret['changes']:
ret['comment'] = 'images deleted'
return ret | ['def', 'image_vacuum', '(', 'name', ')', ':', 'name', '=', 'name', '.', 'lower', '(', ')', 'ret', '=', '{', "'name'", ':', 'name', ',', "'changes'", ':', '{', '}', ',', "'result'", ':', 'None', ',', "'comment'", ':', "''", '}', '# list of images to keep', 'images', '=', '[', ']', '# retrieve image_present state data for host', 'for', 'state', 'in', '__salt__', '[', "'state.show_lowstate'", ']', '(', ')', ':', "# don't throw exceptions when not highstate run", 'if', "'state'", 'not', 'in', 'state', ':', 'continue', '# skip if not from this state module', 'if', 'state', '[', "'state'", ']', '!=', '__virtualname__', ':', 'continue', '# skip if not image_present', 'if', 'state', '[', "'fun'", ']', 'not', 'in', '[', "'image_present'", ']', ':', 'continue', '# keep images installed via image_present', 'if', "'name'", 'in', 'state', ':', 'if', '_is_uuid', '(', 'state', '[', "'name'", ']', ')', ':', 'images', '.', 'append', '(', 'state', '[', "'name'", ']', ')', 'elif', '_is_docker_uuid', '(', 'state', '[', "'name'", ']', ')', ':', 'state', '[', "'name'", ']', '=', '__salt__', '[', "'imgadm.docker_to_uuid'", ']', '(', 'state', '[', "'name'", ']', ')', 'if', 'not', 'state', '[', "'name'", ']', ':', 'continue', 'images', '.', 'append', '(', 'state', '[', "'name'", ']', ')', '# retrieve images in use by vms', 'for', 'image_uuid', 'in', '__salt__', '[', "'vmadm.list'", ']', '(', 'order', '=', "'image_uuid'", ')', ':', 'if', 'image_uuid', 'not', 'in', 'images', ':', 'images', '.', 'append', '(', 'image_uuid', ')', '# purge unused images', 'ret', '[', "'result'", ']', '=', 'True', 'for', 'image_uuid', 'in', '__salt__', '[', "'imgadm.list'", ']', '(', ')', ':', 'if', 'image_uuid', 'in', 'images', ':', 'continue', 'image', '=', '__salt__', '[', "'imgadm.get'", ']', '(', 'image_uuid', ')', 'if', 'image', '[', "'manifest'", ']', '[', "'name'", ']', '==', "'docker-layer'", ':', '# NOTE: docker images are made of multiple layers, loop over them', 'while', 'image', ':', 'image_uuid', '=', 'image', '[', "'manifest'", ']', '[', "'uuid'", ']', 'if', 'image_uuid', 'in', '__salt__', '[', "'imgadm.delete'", ']', '(', 'image_uuid', ')', ':', 'ret', '[', "'changes'", ']', '[', 'image_uuid', ']', '=', 'None', 'else', ':', 'ret', '[', "'result'", ']', '=', 'False', 'ret', '[', "'comment'", ']', '=', "'failed to delete images'", 'if', "'origin'", 'in', 'image', '[', "'manifest'", ']', ':', 'image', '=', '__salt__', '[', "'imgadm.get'", ']', '(', 'image', '[', "'manifest'", ']', '[', "'origin'", ']', ')', 'else', ':', 'image', '=', 'None', 'else', ':', '# NOTE: normal images can just be delete', 'if', 'image_uuid', 'in', '__salt__', '[', "'imgadm.delete'", ']', '(', 'image_uuid', ')', ':', 'ret', '[', "'changes'", ']', '[', 'image_uuid', ']', '=', 'None', 'else', ':', 'ret', '[', "'result'", ']', '=', 'False', 'ret', '[', "'comment'", ']', '=', "'failed to delete images'", 'if', 'ret', '[', "'result'", ']', 'and', 'not', 'ret', '[', "'changes'", ']', ':', 'ret', '[', "'comment'", ']', '=', "'no images deleted'", 'elif', 'ret', '[', "'result'", ']', 'and', 'ret', '[', "'changes'", ']', ':', 'ret', '[', "'comment'", ']', '=', "'images deleted'", 'return', 'ret'] | Delete images not in use or installed via image_present
.. warning::
Only image_present states that are included via the
top file will be detected. | ['Delete', 'images', 'not', 'in', 'use', 'or', 'installed', 'via', 'image_present'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/smartos.py#L607-L685 |
9,156 | OCA/odoorpc | odoorpc/report.py | Report.list | def list(self):
"""List available reports from the server by returning a dictionary
with reports classified by data model:
.. doctest::
:options: +SKIP
>>> odoo.report.list()['account.invoice']
[{'name': u'Duplicates',
'report_name': u'account.account_invoice_report_duplicate_main',
'report_type': u'qweb-pdf'},
{'name': 'Invoices',
'report_type': 'qweb-pdf',
'report_name': 'account.report_invoice'}]
.. doctest::
:hide:
>>> from pprint import pprint as pp
>>> any(data['report_name'] == 'account.report_invoice'
... for data in odoo.report.list()['account.invoice'])
True
*Python 2:*
:return: `list` of dictionaries
:raise: `urllib2.URLError` (connection error)
*Python 3:*
:return: `list` of dictionaries
:raise: `urllib.error.URLError` (connection error)
"""
report_model = 'ir.actions.report'
if v(self._odoo.version)[0] < 11:
report_model = 'ir.actions.report.xml'
IrReport = self._odoo.env[report_model]
report_ids = IrReport.search([])
reports = IrReport.read(
report_ids, ['name', 'model', 'report_name', 'report_type'])
result = {}
for report in reports:
model = report.pop('model')
report.pop('id')
if model not in result:
result[model] = []
result[model].append(report)
return result | python | def list(self):
"""List available reports from the server by returning a dictionary
with reports classified by data model:
.. doctest::
:options: +SKIP
>>> odoo.report.list()['account.invoice']
[{'name': u'Duplicates',
'report_name': u'account.account_invoice_report_duplicate_main',
'report_type': u'qweb-pdf'},
{'name': 'Invoices',
'report_type': 'qweb-pdf',
'report_name': 'account.report_invoice'}]
.. doctest::
:hide:
>>> from pprint import pprint as pp
>>> any(data['report_name'] == 'account.report_invoice'
... for data in odoo.report.list()['account.invoice'])
True
*Python 2:*
:return: `list` of dictionaries
:raise: `urllib2.URLError` (connection error)
*Python 3:*
:return: `list` of dictionaries
:raise: `urllib.error.URLError` (connection error)
"""
report_model = 'ir.actions.report'
if v(self._odoo.version)[0] < 11:
report_model = 'ir.actions.report.xml'
IrReport = self._odoo.env[report_model]
report_ids = IrReport.search([])
reports = IrReport.read(
report_ids, ['name', 'model', 'report_name', 'report_type'])
result = {}
for report in reports:
model = report.pop('model')
report.pop('id')
if model not in result:
result[model] = []
result[model].append(report)
return result | ['def', 'list', '(', 'self', ')', ':', 'report_model', '=', "'ir.actions.report'", 'if', 'v', '(', 'self', '.', '_odoo', '.', 'version', ')', '[', '0', ']', '<', '11', ':', 'report_model', '=', "'ir.actions.report.xml'", 'IrReport', '=', 'self', '.', '_odoo', '.', 'env', '[', 'report_model', ']', 'report_ids', '=', 'IrReport', '.', 'search', '(', '[', ']', ')', 'reports', '=', 'IrReport', '.', 'read', '(', 'report_ids', ',', '[', "'name'", ',', "'model'", ',', "'report_name'", ',', "'report_type'", ']', ')', 'result', '=', '{', '}', 'for', 'report', 'in', 'reports', ':', 'model', '=', 'report', '.', 'pop', '(', "'model'", ')', 'report', '.', 'pop', '(', "'id'", ')', 'if', 'model', 'not', 'in', 'result', ':', 'result', '[', 'model', ']', '=', '[', ']', 'result', '[', 'model', ']', '.', 'append', '(', 'report', ')', 'return', 'result'] | List available reports from the server by returning a dictionary
with reports classified by data model:
.. doctest::
:options: +SKIP
>>> odoo.report.list()['account.invoice']
[{'name': u'Duplicates',
'report_name': u'account.account_invoice_report_duplicate_main',
'report_type': u'qweb-pdf'},
{'name': 'Invoices',
'report_type': 'qweb-pdf',
'report_name': 'account.report_invoice'}]
.. doctest::
:hide:
>>> from pprint import pprint as pp
>>> any(data['report_name'] == 'account.report_invoice'
... for data in odoo.report.list()['account.invoice'])
True
*Python 2:*
:return: `list` of dictionaries
:raise: `urllib2.URLError` (connection error)
*Python 3:*
:return: `list` of dictionaries
:raise: `urllib.error.URLError` (connection error) | ['List', 'available', 'reports', 'from', 'the', 'server', 'by', 'returning', 'a', 'dictionary', 'with', 'reports', 'classified', 'by', 'data', 'model', ':'] | train | https://github.com/OCA/odoorpc/blob/d90aa0b2bc4fafbab8bd8f50d50e3fb0b9ba91f0/odoorpc/report.py#L158-L205 |
9,157 | MolSSI-BSE/basis_set_exchange | basis_set_exchange/curate/compare.py | electron_shells_are_subset | def electron_shells_are_subset(subset, superset, compare_meta=False, rel_tol=0.0):
'''
Determine if a list of electron shells is a subset of another
If 'subset' is a subset of the 'superset', True is returned.
The shells are compared approximately (exponents/coefficients are
within a tolerance)
If compare_meta is True, the metadata is also compared for exact equality.
'''
for item1 in subset:
for item2 in superset:
if compare_electron_shells(item1, item2, compare_meta, rel_tol):
break
else:
return False
return True | python | def electron_shells_are_subset(subset, superset, compare_meta=False, rel_tol=0.0):
'''
Determine if a list of electron shells is a subset of another
If 'subset' is a subset of the 'superset', True is returned.
The shells are compared approximately (exponents/coefficients are
within a tolerance)
If compare_meta is True, the metadata is also compared for exact equality.
'''
for item1 in subset:
for item2 in superset:
if compare_electron_shells(item1, item2, compare_meta, rel_tol):
break
else:
return False
return True | ['def', 'electron_shells_are_subset', '(', 'subset', ',', 'superset', ',', 'compare_meta', '=', 'False', ',', 'rel_tol', '=', '0.0', ')', ':', 'for', 'item1', 'in', 'subset', ':', 'for', 'item2', 'in', 'superset', ':', 'if', 'compare_electron_shells', '(', 'item1', ',', 'item2', ',', 'compare_meta', ',', 'rel_tol', ')', ':', 'break', 'else', ':', 'return', 'False', 'return', 'True'] | Determine if a list of electron shells is a subset of another
If 'subset' is a subset of the 'superset', True is returned.
The shells are compared approximately (exponents/coefficients are
within a tolerance)
If compare_meta is True, the metadata is also compared for exact equality. | ['Determine', 'if', 'a', 'list', 'of', 'electron', 'shells', 'is', 'a', 'subset', 'of', 'another'] | train | https://github.com/MolSSI-BSE/basis_set_exchange/blob/e79110aaeb65f392ed5032420322dee3336948f7/basis_set_exchange/curate/compare.py#L147-L166 |
9,158 | Kozea/wdb | client/wdb/ext.py | _patch_tcpserver | def _patch_tcpserver():
"""
Patch shutdown_request to open blocking interaction after the end of the
request
"""
shutdown_request = TCPServer.shutdown_request
def shutdown_request_patched(*args, **kwargs):
thread = current_thread()
shutdown_request(*args, **kwargs)
if thread in _exc_cache:
post_mortem_interaction(*_exc_cache.pop(thread))
TCPServer.shutdown_request = shutdown_request_patched | python | def _patch_tcpserver():
"""
Patch shutdown_request to open blocking interaction after the end of the
request
"""
shutdown_request = TCPServer.shutdown_request
def shutdown_request_patched(*args, **kwargs):
thread = current_thread()
shutdown_request(*args, **kwargs)
if thread in _exc_cache:
post_mortem_interaction(*_exc_cache.pop(thread))
TCPServer.shutdown_request = shutdown_request_patched | ['def', '_patch_tcpserver', '(', ')', ':', 'shutdown_request', '=', 'TCPServer', '.', 'shutdown_request', 'def', 'shutdown_request_patched', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'thread', '=', 'current_thread', '(', ')', 'shutdown_request', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', 'if', 'thread', 'in', '_exc_cache', ':', 'post_mortem_interaction', '(', '*', '_exc_cache', '.', 'pop', '(', 'thread', ')', ')', 'TCPServer', '.', 'shutdown_request', '=', 'shutdown_request_patched'] | Patch shutdown_request to open blocking interaction after the end of the
request | ['Patch', 'shutdown_request', 'to', 'open', 'blocking', 'interaction', 'after', 'the', 'end', 'of', 'the', 'request'] | train | https://github.com/Kozea/wdb/blob/6af7901b02e866d76f8b0a697a8c078e5b70d1aa/client/wdb/ext.py#L33-L46 |
9,159 | cogeotiff/rio-tiler | rio_tiler/utils.py | expression | def expression(sceneid, tile_x, tile_y, tile_z, expr=None, **kwargs):
"""
Apply expression on data.
Attributes
----------
sceneid : str
Landsat id, Sentinel id, CBERS ids or file url.
tile_x : int
Mercator tile X index.
tile_y : int
Mercator tile Y index.
tile_z : int
Mercator tile ZOOM level.
expr : str, required
Expression to apply (e.g '(B5+B4)/(B5-B4)')
Band name should start with 'B'.
Returns
-------
out : ndarray
Returns processed pixel value.
"""
if not expr:
raise Exception("Missing expression")
bands_names = tuple(set(re.findall(r"b(?P<bands>[0-9A]{1,2})", expr)))
rgb = expr.split(",")
if sceneid.startswith("L"):
from rio_tiler.landsat8 import tile as l8_tile
arr, mask = l8_tile(
sceneid, tile_x, tile_y, tile_z, bands=bands_names, **kwargs
)
elif sceneid.startswith("S2"):
from rio_tiler.sentinel2 import tile as s2_tile
arr, mask = s2_tile(
sceneid, tile_x, tile_y, tile_z, bands=bands_names, **kwargs
)
elif sceneid.startswith("CBERS"):
from rio_tiler.cbers import tile as cbers_tile
arr, mask = cbers_tile(
sceneid, tile_x, tile_y, tile_z, bands=bands_names, **kwargs
)
else:
from rio_tiler.main import tile as main_tile
bands = tuple(map(int, bands_names))
arr, mask = main_tile(sceneid, tile_x, tile_y, tile_z, indexes=bands, **kwargs)
ctx = {}
for bdx, b in enumerate(bands_names):
ctx["b{}".format(b)] = arr[bdx]
return (
np.array(
[np.nan_to_num(ne.evaluate(bloc.strip(), local_dict=ctx)) for bloc in rgb]
),
mask,
) | python | def expression(sceneid, tile_x, tile_y, tile_z, expr=None, **kwargs):
"""
Apply expression on data.
Attributes
----------
sceneid : str
Landsat id, Sentinel id, CBERS ids or file url.
tile_x : int
Mercator tile X index.
tile_y : int
Mercator tile Y index.
tile_z : int
Mercator tile ZOOM level.
expr : str, required
Expression to apply (e.g '(B5+B4)/(B5-B4)')
Band name should start with 'B'.
Returns
-------
out : ndarray
Returns processed pixel value.
"""
if not expr:
raise Exception("Missing expression")
bands_names = tuple(set(re.findall(r"b(?P<bands>[0-9A]{1,2})", expr)))
rgb = expr.split(",")
if sceneid.startswith("L"):
from rio_tiler.landsat8 import tile as l8_tile
arr, mask = l8_tile(
sceneid, tile_x, tile_y, tile_z, bands=bands_names, **kwargs
)
elif sceneid.startswith("S2"):
from rio_tiler.sentinel2 import tile as s2_tile
arr, mask = s2_tile(
sceneid, tile_x, tile_y, tile_z, bands=bands_names, **kwargs
)
elif sceneid.startswith("CBERS"):
from rio_tiler.cbers import tile as cbers_tile
arr, mask = cbers_tile(
sceneid, tile_x, tile_y, tile_z, bands=bands_names, **kwargs
)
else:
from rio_tiler.main import tile as main_tile
bands = tuple(map(int, bands_names))
arr, mask = main_tile(sceneid, tile_x, tile_y, tile_z, indexes=bands, **kwargs)
ctx = {}
for bdx, b in enumerate(bands_names):
ctx["b{}".format(b)] = arr[bdx]
return (
np.array(
[np.nan_to_num(ne.evaluate(bloc.strip(), local_dict=ctx)) for bloc in rgb]
),
mask,
) | ['def', 'expression', '(', 'sceneid', ',', 'tile_x', ',', 'tile_y', ',', 'tile_z', ',', 'expr', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'if', 'not', 'expr', ':', 'raise', 'Exception', '(', '"Missing expression"', ')', 'bands_names', '=', 'tuple', '(', 'set', '(', 're', '.', 'findall', '(', 'r"b(?P<bands>[0-9A]{1,2})"', ',', 'expr', ')', ')', ')', 'rgb', '=', 'expr', '.', 'split', '(', '","', ')', 'if', 'sceneid', '.', 'startswith', '(', '"L"', ')', ':', 'from', 'rio_tiler', '.', 'landsat8', 'import', 'tile', 'as', 'l8_tile', 'arr', ',', 'mask', '=', 'l8_tile', '(', 'sceneid', ',', 'tile_x', ',', 'tile_y', ',', 'tile_z', ',', 'bands', '=', 'bands_names', ',', '*', '*', 'kwargs', ')', 'elif', 'sceneid', '.', 'startswith', '(', '"S2"', ')', ':', 'from', 'rio_tiler', '.', 'sentinel2', 'import', 'tile', 'as', 's2_tile', 'arr', ',', 'mask', '=', 's2_tile', '(', 'sceneid', ',', 'tile_x', ',', 'tile_y', ',', 'tile_z', ',', 'bands', '=', 'bands_names', ',', '*', '*', 'kwargs', ')', 'elif', 'sceneid', '.', 'startswith', '(', '"CBERS"', ')', ':', 'from', 'rio_tiler', '.', 'cbers', 'import', 'tile', 'as', 'cbers_tile', 'arr', ',', 'mask', '=', 'cbers_tile', '(', 'sceneid', ',', 'tile_x', ',', 'tile_y', ',', 'tile_z', ',', 'bands', '=', 'bands_names', ',', '*', '*', 'kwargs', ')', 'else', ':', 'from', 'rio_tiler', '.', 'main', 'import', 'tile', 'as', 'main_tile', 'bands', '=', 'tuple', '(', 'map', '(', 'int', ',', 'bands_names', ')', ')', 'arr', ',', 'mask', '=', 'main_tile', '(', 'sceneid', ',', 'tile_x', ',', 'tile_y', ',', 'tile_z', ',', 'indexes', '=', 'bands', ',', '*', '*', 'kwargs', ')', 'ctx', '=', '{', '}', 'for', 'bdx', ',', 'b', 'in', 'enumerate', '(', 'bands_names', ')', ':', 'ctx', '[', '"b{}"', '.', 'format', '(', 'b', ')', ']', '=', 'arr', '[', 'bdx', ']', 'return', '(', 'np', '.', 'array', '(', '[', 'np', '.', 'nan_to_num', '(', 'ne', '.', 'evaluate', '(', 'bloc', '.', 'strip', '(', ')', ',', 'local_dict', '=', 'ctx', ')', ')', 'for', 'bloc', 'in', 'rgb', ']', ')', ',', 'mask', ',', ')'] | Apply expression on data.
Attributes
----------
sceneid : str
Landsat id, Sentinel id, CBERS ids or file url.
tile_x : int
Mercator tile X index.
tile_y : int
Mercator tile Y index.
tile_z : int
Mercator tile ZOOM level.
expr : str, required
Expression to apply (e.g '(B5+B4)/(B5-B4)')
Band name should start with 'B'.
Returns
-------
out : ndarray
Returns processed pixel value. | ['Apply', 'expression', 'on', 'data', '.'] | train | https://github.com/cogeotiff/rio-tiler/blob/09bb0fc6cee556410477f016abbae172b12c46a6/rio_tiler/utils.py#L570-L634 |
9,160 | pulumi/pulumi | sdk/python/lib/pulumi/config.py | Config.get_float | def get_float(self, key: str) -> Optional[float]:
"""
Returns an optional configuration value, as a float, by its key, or None if it doesn't exist.
If the configuration value isn't a legal float, this function will throw an error.
:param str key: The requested configuration key.
:return: The configuration key's value, or None if one does not exist.
:rtype: Optional[float]
:raises ConfigTypeError: The configuration value existed but couldn't be coerced to float.
"""
v = self.get(key)
if v is None:
return None
try:
return float(v)
except:
raise ConfigTypeError(self.full_key(key), v, 'float') | python | def get_float(self, key: str) -> Optional[float]:
"""
Returns an optional configuration value, as a float, by its key, or None if it doesn't exist.
If the configuration value isn't a legal float, this function will throw an error.
:param str key: The requested configuration key.
:return: The configuration key's value, or None if one does not exist.
:rtype: Optional[float]
:raises ConfigTypeError: The configuration value existed but couldn't be coerced to float.
"""
v = self.get(key)
if v is None:
return None
try:
return float(v)
except:
raise ConfigTypeError(self.full_key(key), v, 'float') | ['def', 'get_float', '(', 'self', ',', 'key', ':', 'str', ')', '->', 'Optional', '[', 'float', ']', ':', 'v', '=', 'self', '.', 'get', '(', 'key', ')', 'if', 'v', 'is', 'None', ':', 'return', 'None', 'try', ':', 'return', 'float', '(', 'v', ')', 'except', ':', 'raise', 'ConfigTypeError', '(', 'self', '.', 'full_key', '(', 'key', ')', ',', 'v', ',', "'float'", ')'] | Returns an optional configuration value, as a float, by its key, or None if it doesn't exist.
If the configuration value isn't a legal float, this function will throw an error.
:param str key: The requested configuration key.
:return: The configuration key's value, or None if one does not exist.
:rtype: Optional[float]
:raises ConfigTypeError: The configuration value existed but couldn't be coerced to float. | ['Returns', 'an', 'optional', 'configuration', 'value', 'as', 'a', 'float', 'by', 'its', 'key', 'or', 'None', 'if', 'it', 'doesn', 't', 'exist', '.', 'If', 'the', 'configuration', 'value', 'isn', 't', 'a', 'legal', 'float', 'this', 'function', 'will', 'throw', 'an', 'error', '.'] | train | https://github.com/pulumi/pulumi/blob/95d51efe6ab9a533838b6d83aa240b5f912e72aa/sdk/python/lib/pulumi/config.py#L97-L113 |
9,161 | jic-dtool/dtool-info | dtool_info/dataset.py | ls | def ls(quiet, verbose, uri):
"""List datasets / items in a dataset.
If the URI is a dataset the items in the dataset will be listed.
It is not possible to list the items in a proto dataset.
If the URI is a location containing datasets the datasets will be listed.
Proto datasets are highlighted in red.
"""
if dtoolcore._is_dataset(uri, CONFIG_PATH):
_list_dataset_items(uri, quiet, verbose)
else:
_list_datasets(uri, quiet, verbose) | python | def ls(quiet, verbose, uri):
"""List datasets / items in a dataset.
If the URI is a dataset the items in the dataset will be listed.
It is not possible to list the items in a proto dataset.
If the URI is a location containing datasets the datasets will be listed.
Proto datasets are highlighted in red.
"""
if dtoolcore._is_dataset(uri, CONFIG_PATH):
_list_dataset_items(uri, quiet, verbose)
else:
_list_datasets(uri, quiet, verbose) | ['def', 'ls', '(', 'quiet', ',', 'verbose', ',', 'uri', ')', ':', 'if', 'dtoolcore', '.', '_is_dataset', '(', 'uri', ',', 'CONFIG_PATH', ')', ':', '_list_dataset_items', '(', 'uri', ',', 'quiet', ',', 'verbose', ')', 'else', ':', '_list_datasets', '(', 'uri', ',', 'quiet', ',', 'verbose', ')'] | List datasets / items in a dataset.
If the URI is a dataset the items in the dataset will be listed.
It is not possible to list the items in a proto dataset.
If the URI is a location containing datasets the datasets will be listed.
Proto datasets are highlighted in red. | ['List', 'datasets', '/', 'items', 'in', 'a', 'dataset', '.'] | train | https://github.com/jic-dtool/dtool-info/blob/3c6c7755f4c142e548bbfdf3b38230612fd4060a/dtool_info/dataset.py#L161-L173 |
9,162 | merll/docker-fabric | dockerfabric/cli.py | copy_resources | def copy_resources(src_container, src_resources, storage_dir, dst_directories=None, apply_chown=None, apply_chmod=None):
"""
Copies files and directories from a Docker container. Multiple resources can be copied and additional options are
available than in :func:`copy_resource`. Unlike in :func:`copy_resource`, Resources are copied as they are and not
compressed to a tarball, and they are left on the remote machine.
:param src_container: Container name or id.
:type src_container: unicode
:param src_resources: Resources, as (file or directory) names to copy.
:type src_resources: iterable
:param storage_dir: Remote directory to store the copied objects in.
:type storage_dir: unicode
:param dst_directories: Optional dictionary of destination directories, in the format ``resource: destination``. If
not set, resources will be in the same relative structure to one another as inside the container. For setting a
common default, use ``*`` as the resource key.
:type dst_directories: dict
:param apply_chown: Owner to set for the copied resources. Can be a user name or id, group name or id, both in the
notation ``user:group``, or as a tuple ``(user, group)``.
:type apply_chown: unicode or tuple
:param apply_chmod: File system permissions to set for the copied resources. Can be any notation as accepted by
`chmod`.
:type apply_chmod: unicode
"""
def _copy_resource(resource):
default_dest_path = generic_path if generic_path is not None else resource
dest_path = directories.get(resource, default_dest_path).strip(posixpath.sep)
head, tail = posixpath.split(dest_path)
rel_path = posixpath.join(storage_dir, head)
run(mkdir(rel_path, check_if_exists=True))
run('docker cp {0}:{1} {2}'.format(src_container, resource, rel_path), shell=False)
directories = dst_directories or {}
generic_path = directories.get('*')
for res in src_resources:
_copy_resource(res)
if apply_chmod:
run(chmod(apply_chmod, storage_dir))
if apply_chown:
sudo(chown(apply_chown, storage_dir)) | python | def copy_resources(src_container, src_resources, storage_dir, dst_directories=None, apply_chown=None, apply_chmod=None):
"""
Copies files and directories from a Docker container. Multiple resources can be copied and additional options are
available than in :func:`copy_resource`. Unlike in :func:`copy_resource`, Resources are copied as they are and not
compressed to a tarball, and they are left on the remote machine.
:param src_container: Container name or id.
:type src_container: unicode
:param src_resources: Resources, as (file or directory) names to copy.
:type src_resources: iterable
:param storage_dir: Remote directory to store the copied objects in.
:type storage_dir: unicode
:param dst_directories: Optional dictionary of destination directories, in the format ``resource: destination``. If
not set, resources will be in the same relative structure to one another as inside the container. For setting a
common default, use ``*`` as the resource key.
:type dst_directories: dict
:param apply_chown: Owner to set for the copied resources. Can be a user name or id, group name or id, both in the
notation ``user:group``, or as a tuple ``(user, group)``.
:type apply_chown: unicode or tuple
:param apply_chmod: File system permissions to set for the copied resources. Can be any notation as accepted by
`chmod`.
:type apply_chmod: unicode
"""
def _copy_resource(resource):
default_dest_path = generic_path if generic_path is not None else resource
dest_path = directories.get(resource, default_dest_path).strip(posixpath.sep)
head, tail = posixpath.split(dest_path)
rel_path = posixpath.join(storage_dir, head)
run(mkdir(rel_path, check_if_exists=True))
run('docker cp {0}:{1} {2}'.format(src_container, resource, rel_path), shell=False)
directories = dst_directories or {}
generic_path = directories.get('*')
for res in src_resources:
_copy_resource(res)
if apply_chmod:
run(chmod(apply_chmod, storage_dir))
if apply_chown:
sudo(chown(apply_chown, storage_dir)) | ['def', 'copy_resources', '(', 'src_container', ',', 'src_resources', ',', 'storage_dir', ',', 'dst_directories', '=', 'None', ',', 'apply_chown', '=', 'None', ',', 'apply_chmod', '=', 'None', ')', ':', 'def', '_copy_resource', '(', 'resource', ')', ':', 'default_dest_path', '=', 'generic_path', 'if', 'generic_path', 'is', 'not', 'None', 'else', 'resource', 'dest_path', '=', 'directories', '.', 'get', '(', 'resource', ',', 'default_dest_path', ')', '.', 'strip', '(', 'posixpath', '.', 'sep', ')', 'head', ',', 'tail', '=', 'posixpath', '.', 'split', '(', 'dest_path', ')', 'rel_path', '=', 'posixpath', '.', 'join', '(', 'storage_dir', ',', 'head', ')', 'run', '(', 'mkdir', '(', 'rel_path', ',', 'check_if_exists', '=', 'True', ')', ')', 'run', '(', "'docker cp {0}:{1} {2}'", '.', 'format', '(', 'src_container', ',', 'resource', ',', 'rel_path', ')', ',', 'shell', '=', 'False', ')', 'directories', '=', 'dst_directories', 'or', '{', '}', 'generic_path', '=', 'directories', '.', 'get', '(', "'*'", ')', 'for', 'res', 'in', 'src_resources', ':', '_copy_resource', '(', 'res', ')', 'if', 'apply_chmod', ':', 'run', '(', 'chmod', '(', 'apply_chmod', ',', 'storage_dir', ')', ')', 'if', 'apply_chown', ':', 'sudo', '(', 'chown', '(', 'apply_chown', ',', 'storage_dir', ')', ')'] | Copies files and directories from a Docker container. Multiple resources can be copied and additional options are
available than in :func:`copy_resource`. Unlike in :func:`copy_resource`, Resources are copied as they are and not
compressed to a tarball, and they are left on the remote machine.
:param src_container: Container name or id.
:type src_container: unicode
:param src_resources: Resources, as (file or directory) names to copy.
:type src_resources: iterable
:param storage_dir: Remote directory to store the copied objects in.
:type storage_dir: unicode
:param dst_directories: Optional dictionary of destination directories, in the format ``resource: destination``. If
not set, resources will be in the same relative structure to one another as inside the container. For setting a
common default, use ``*`` as the resource key.
:type dst_directories: dict
:param apply_chown: Owner to set for the copied resources. Can be a user name or id, group name or id, both in the
notation ``user:group``, or as a tuple ``(user, group)``.
:type apply_chown: unicode or tuple
:param apply_chmod: File system permissions to set for the copied resources. Can be any notation as accepted by
`chmod`.
:type apply_chmod: unicode | ['Copies', 'files', 'and', 'directories', 'from', 'a', 'Docker', 'container', '.', 'Multiple', 'resources', 'can', 'be', 'copied', 'and', 'additional', 'options', 'are', 'available', 'than', 'in', ':', 'func', ':', 'copy_resource', '.', 'Unlike', 'in', ':', 'func', ':', 'copy_resource', 'Resources', 'are', 'copied', 'as', 'they', 'are', 'and', 'not', 'compressed', 'to', 'a', 'tarball', 'and', 'they', 'are', 'left', 'on', 'the', 'remote', 'machine', '.'] | train | https://github.com/merll/docker-fabric/blob/785d84e40e17265b667d8b11a6e30d8e6b2bf8d4/dockerfabric/cli.py#L324-L362 |
9,163 | intelsdi-x/snap-plugin-lib-py | snap_plugin/v1/config_map.py | ConfigMap.keys | def keys(self):
"Returns a list of ConfigMap keys."
return (list(self._pb.IntMap.keys()) + list(self._pb.StringMap.keys()) +
list(self._pb.FloatMap.keys()) + list(self._pb.BoolMap.keys())) | python | def keys(self):
"Returns a list of ConfigMap keys."
return (list(self._pb.IntMap.keys()) + list(self._pb.StringMap.keys()) +
list(self._pb.FloatMap.keys()) + list(self._pb.BoolMap.keys())) | ['def', 'keys', '(', 'self', ')', ':', 'return', '(', 'list', '(', 'self', '.', '_pb', '.', 'IntMap', '.', 'keys', '(', ')', ')', '+', 'list', '(', 'self', '.', '_pb', '.', 'StringMap', '.', 'keys', '(', ')', ')', '+', 'list', '(', 'self', '.', '_pb', '.', 'FloatMap', '.', 'keys', '(', ')', ')', '+', 'list', '(', 'self', '.', '_pb', '.', 'BoolMap', '.', 'keys', '(', ')', ')', ')'] | Returns a list of ConfigMap keys. | ['Returns', 'a', 'list', 'of', 'ConfigMap', 'keys', '.'] | train | https://github.com/intelsdi-x/snap-plugin-lib-py/blob/8da5d00ac5f9d2b48a7239563ac7788209891ca4/snap_plugin/v1/config_map.py#L159-L162 |
9,164 | gem/oq-engine | openquake/hmtk/strain/strain_utils.py | tapered_gutenberg_richter_cdf | def tapered_gutenberg_richter_cdf(moment, moment_threshold, beta,
corner_moment):
'''
Tapered Gutenberg Richter Cumulative Density Function
:param float or numpy.ndarray moment:
Moment for calculation of rate
:param float or numpy.ndarray moment_threshold:
Threshold Moment of the distribution (moment rate essentially!)
:param float beta:
Beta value (b * ln(10.)) of the Tapered Gutenberg-Richter Function
:param float corner_momnet:
Corner moment of the Tapered Gutenberg-Richter Function
:returns:
Cumulative probability of moment release > moment
'''
cdf = np.exp((moment_threshold - moment) / corner_moment)
return ((moment / moment_threshold) ** (-beta)) * cdf | python | def tapered_gutenberg_richter_cdf(moment, moment_threshold, beta,
corner_moment):
'''
Tapered Gutenberg Richter Cumulative Density Function
:param float or numpy.ndarray moment:
Moment for calculation of rate
:param float or numpy.ndarray moment_threshold:
Threshold Moment of the distribution (moment rate essentially!)
:param float beta:
Beta value (b * ln(10.)) of the Tapered Gutenberg-Richter Function
:param float corner_momnet:
Corner moment of the Tapered Gutenberg-Richter Function
:returns:
Cumulative probability of moment release > moment
'''
cdf = np.exp((moment_threshold - moment) / corner_moment)
return ((moment / moment_threshold) ** (-beta)) * cdf | ['def', 'tapered_gutenberg_richter_cdf', '(', 'moment', ',', 'moment_threshold', ',', 'beta', ',', 'corner_moment', ')', ':', 'cdf', '=', 'np', '.', 'exp', '(', '(', 'moment_threshold', '-', 'moment', ')', '/', 'corner_moment', ')', 'return', '(', '(', 'moment', '/', 'moment_threshold', ')', '**', '(', '-', 'beta', ')', ')', '*', 'cdf'] | Tapered Gutenberg Richter Cumulative Density Function
:param float or numpy.ndarray moment:
Moment for calculation of rate
:param float or numpy.ndarray moment_threshold:
Threshold Moment of the distribution (moment rate essentially!)
:param float beta:
Beta value (b * ln(10.)) of the Tapered Gutenberg-Richter Function
:param float corner_momnet:
Corner moment of the Tapered Gutenberg-Richter Function
:returns:
Cumulative probability of moment release > moment | ['Tapered', 'Gutenberg', 'Richter', 'Cumulative', 'Density', 'Function'] | train | https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hmtk/strain/strain_utils.py#L111-L134 |
9,165 | chaoss/grimoirelab-elk | grimoire_elk/enriched/git.py | GitEnrich.add_commit_branches | def add_commit_branches(self, git_repo, enrich_backend):
"""Add the information about branches to the documents representing commits in
the enriched index. Branches are obtained using the command `git ls-remote`,
then for each branch, the list of commits is retrieved via the command `git rev-list branch-name` and
used to update the corresponding items in the enriched index.
:param git_repo: GitRepository object
:param enrich_backend: the enrich backend
"""
to_process = []
for hash, refname in git_repo._discover_refs(remote=True):
if not refname.startswith('refs/heads/'):
continue
commit_count = 0
branch_name = refname.replace('refs/heads/', '')
try:
commits = git_repo.rev_list([branch_name])
for commit in commits:
to_process.append(commit)
commit_count += 1
if commit_count == MAX_BULK_UPDATE_SIZE:
self.__process_commits_in_branch(enrich_backend, branch_name, to_process)
# reset the counter
to_process = []
commit_count = 0
if commit_count:
self.__process_commits_in_branch(enrich_backend, branch_name, to_process)
except Exception as e:
logger.error("Skip adding branch info for repo %s due to %s", git_repo.uri, e)
return | python | def add_commit_branches(self, git_repo, enrich_backend):
"""Add the information about branches to the documents representing commits in
the enriched index. Branches are obtained using the command `git ls-remote`,
then for each branch, the list of commits is retrieved via the command `git rev-list branch-name` and
used to update the corresponding items in the enriched index.
:param git_repo: GitRepository object
:param enrich_backend: the enrich backend
"""
to_process = []
for hash, refname in git_repo._discover_refs(remote=True):
if not refname.startswith('refs/heads/'):
continue
commit_count = 0
branch_name = refname.replace('refs/heads/', '')
try:
commits = git_repo.rev_list([branch_name])
for commit in commits:
to_process.append(commit)
commit_count += 1
if commit_count == MAX_BULK_UPDATE_SIZE:
self.__process_commits_in_branch(enrich_backend, branch_name, to_process)
# reset the counter
to_process = []
commit_count = 0
if commit_count:
self.__process_commits_in_branch(enrich_backend, branch_name, to_process)
except Exception as e:
logger.error("Skip adding branch info for repo %s due to %s", git_repo.uri, e)
return | ['def', 'add_commit_branches', '(', 'self', ',', 'git_repo', ',', 'enrich_backend', ')', ':', 'to_process', '=', '[', ']', 'for', 'hash', ',', 'refname', 'in', 'git_repo', '.', '_discover_refs', '(', 'remote', '=', 'True', ')', ':', 'if', 'not', 'refname', '.', 'startswith', '(', "'refs/heads/'", ')', ':', 'continue', 'commit_count', '=', '0', 'branch_name', '=', 'refname', '.', 'replace', '(', "'refs/heads/'", ',', "''", ')', 'try', ':', 'commits', '=', 'git_repo', '.', 'rev_list', '(', '[', 'branch_name', ']', ')', 'for', 'commit', 'in', 'commits', ':', 'to_process', '.', 'append', '(', 'commit', ')', 'commit_count', '+=', '1', 'if', 'commit_count', '==', 'MAX_BULK_UPDATE_SIZE', ':', 'self', '.', '__process_commits_in_branch', '(', 'enrich_backend', ',', 'branch_name', ',', 'to_process', ')', '# reset the counter', 'to_process', '=', '[', ']', 'commit_count', '=', '0', 'if', 'commit_count', ':', 'self', '.', '__process_commits_in_branch', '(', 'enrich_backend', ',', 'branch_name', ',', 'to_process', ')', 'except', 'Exception', 'as', 'e', ':', 'logger', '.', 'error', '(', '"Skip adding branch info for repo %s due to %s"', ',', 'git_repo', '.', 'uri', ',', 'e', ')', 'return'] | Add the information about branches to the documents representing commits in
the enriched index. Branches are obtained using the command `git ls-remote`,
then for each branch, the list of commits is retrieved via the command `git rev-list branch-name` and
used to update the corresponding items in the enriched index.
:param git_repo: GitRepository object
:param enrich_backend: the enrich backend | ['Add', 'the', 'information', 'about', 'branches', 'to', 'the', 'documents', 'representing', 'commits', 'in', 'the', 'enriched', 'index', '.', 'Branches', 'are', 'obtained', 'using', 'the', 'command', 'git', 'ls', '-', 'remote', 'then', 'for', 'each', 'branch', 'the', 'list', 'of', 'commits', 'is', 'retrieved', 'via', 'the', 'command', 'git', 'rev', '-', 'list', 'branch', '-', 'name', 'and', 'used', 'to', 'update', 'the', 'corresponding', 'items', 'in', 'the', 'enriched', 'index', '.'] | train | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/git.py#L770-L807 |
9,166 | jmcarp/robobrowser | robobrowser/cache.py | RoboCache.store | def store(self, response):
"""Store response in cache, skipping if code is forbidden.
:param requests.Response response: HTTP response
"""
if response.status_code not in CACHE_CODES:
return
now = datetime.datetime.now()
self.data[response.url] = {
'date': now,
'response': response,
}
logger.info('Stored response in cache')
self._reduce_age(now)
self._reduce_count() | python | def store(self, response):
"""Store response in cache, skipping if code is forbidden.
:param requests.Response response: HTTP response
"""
if response.status_code not in CACHE_CODES:
return
now = datetime.datetime.now()
self.data[response.url] = {
'date': now,
'response': response,
}
logger.info('Stored response in cache')
self._reduce_age(now)
self._reduce_count() | ['def', 'store', '(', 'self', ',', 'response', ')', ':', 'if', 'response', '.', 'status_code', 'not', 'in', 'CACHE_CODES', ':', 'return', 'now', '=', 'datetime', '.', 'datetime', '.', 'now', '(', ')', 'self', '.', 'data', '[', 'response', '.', 'url', ']', '=', '{', "'date'", ':', 'now', ',', "'response'", ':', 'response', ',', '}', 'logger', '.', 'info', '(', "'Stored response in cache'", ')', 'self', '.', '_reduce_age', '(', 'now', ')', 'self', '.', '_reduce_count', '(', ')'] | Store response in cache, skipping if code is forbidden.
:param requests.Response response: HTTP response | ['Store', 'response', 'in', 'cache', 'skipping', 'if', 'code', 'is', 'forbidden', '.'] | train | https://github.com/jmcarp/robobrowser/blob/4284c11d00ae1397983e269aa180e5cf7ee5f4cf/robobrowser/cache.py#L48-L63 |
9,167 | AoiKuiyuyou/AoikLiveReload | tools/waf/aoikwafutil.py | pip_ins_req | def pip_ins_req(
ctx,
python,
req_path,
venv_path=None,
inputs=None,
outputs=None,
touch=None,
check_import=False,
check_import_module=None,
pip_setup_file=None,
pip_setup_touch=None,
virtualenv_setup_touch=None,
always=False,
):
"""
Create task that uses given virtual environment's `pip` to sets up \
packages listed in given requirements file.
:param ctx: BuildContext object.
:param python: Python program path used to set up `pip` and `virtualenv`.
:param req_path: Requirements file relative path relative to top directory.
:param venv_path: Virtual environment directory relative path relative to
top directory.
If given, will create the virtual environment and set up packages
listed in given requirements file in the virtual environment.
If not given, will set up packages listed in given requirements file in
given Python program's environment.
:param inputs: Input items list to add to created task.
See :paramref:`create_cmd_task.inputs` for allowed item types.
:param outputs: Output items list to add to created task.
See :paramref:`create_cmd_task.outputs` for allowed item types.
:param touch: Touch file path for dirty checking.
:param check_import: Whether import module for dirty checking.
:param check_import_module: Module name to import for dirty checking.
:param pip_setup_file: `get-pip.py` file path for `pip_setup` task.
:param pip_setup_touch: Touch file path for `pip_setup` task.
:param virtualenv_setup_touch: Touch file path for `virtualenv_setup` task.
:param always: Whether always run.
:return: Created task.
"""
# Ensure given context object is BuildContext object
_ensure_build_context(ctx)
# If virtual environment directory path is not given
if venv_path is None:
# Use given Python program path
venv_python = python
# If virtual environment directory path is given
else:
# Get Python program path in the virtual environment
venv_python = get_python_path(venv_path)
# Mark the path as input target
venv_python = mark_input(venv_python)
# If virtual environment directory path is not given,
# it means not create virtual environment.
if venv_path is None:
# Create task that sets up `pip`
pip_setup_task = pip_setup(
# Context
ctx=ctx,
# Python program path
python=python,
# `get-pip.py` file path
setup_file=pip_setup_file,
# Touch file path
touch=pip_setup_touch,
# Whether import module for dirty checking
always=always,
)
# Not create virtual environment
venv_task = None
# If virtual environment directory path is given
else:
# Not create task that sets up `pip` here because `create_venv`
# function below will do
pip_setup_task = None
# Create task that sets up virtual environment
venv_task = create_venv(
# Context
ctx=ctx,
# Python program path
python=python,
# Virtual environment directory path
venv_path=venv_path,
# Output items list
outputs=[
# Add the virtual environment's `python` program path as output
# target for dirty checking
get_python_path(venv_path),
# Add the virtual environment's `pip` program path as output
# target for dirty checking
get_pip_path(venv_path),
],
# Whether always run
always=always,
# Task name
task_name='Create venv `{}`'.format(venv_path),
# `get-pip.py` file path for `pip_setup` task
pip_setup_file=pip_setup_file,
# Touch file path for `pip_setup` task
pip_setup_touch=pip_setup_touch,
# Touch file path for `virtualenv_setup` task
virtualenv_setup_touch=virtualenv_setup_touch,
)
# If touch file path is not given
if not touch:
# Not update touch file
touch_node = None
# If touch file path is given
else:
# Update touch file
touch_node, always = update_touch_file(
# Context
ctx=ctx,
# Touch file path
path=touch,
# Whether import module for dirty checking
check_import=check_import,
# Module name to import for dirty checking
check_import_module=check_import_module,
# Python program path for dirty checking
check_import_python=venv_python,
# Whether always run
always=always,
)
# Create task that sets up packages
task = create_cmd_task(
# Context
ctx=ctx,
# Command parts
parts=[
# Python program path
venv_python,
# Run module
'-m',
# Module name
'pip',
# Install package
'install',
# Read package names from requirements file
'-r',
# Requirements file path. Mark as input target.
mark_input(req_path),
],
# Input items list
inputs=inputs,
# Output items list
outputs=[
# Use the touch node as output target for dirty checking
touch_node,
# Given output items list
outputs,
],
# Whether always run
always=always,
)
# Chain these tasks to run one after another
chain_tasks([
pip_setup_task,
venv_task,
task,
])
# Return the created task
return task | python | def pip_ins_req(
ctx,
python,
req_path,
venv_path=None,
inputs=None,
outputs=None,
touch=None,
check_import=False,
check_import_module=None,
pip_setup_file=None,
pip_setup_touch=None,
virtualenv_setup_touch=None,
always=False,
):
"""
Create task that uses given virtual environment's `pip` to sets up \
packages listed in given requirements file.
:param ctx: BuildContext object.
:param python: Python program path used to set up `pip` and `virtualenv`.
:param req_path: Requirements file relative path relative to top directory.
:param venv_path: Virtual environment directory relative path relative to
top directory.
If given, will create the virtual environment and set up packages
listed in given requirements file in the virtual environment.
If not given, will set up packages listed in given requirements file in
given Python program's environment.
:param inputs: Input items list to add to created task.
See :paramref:`create_cmd_task.inputs` for allowed item types.
:param outputs: Output items list to add to created task.
See :paramref:`create_cmd_task.outputs` for allowed item types.
:param touch: Touch file path for dirty checking.
:param check_import: Whether import module for dirty checking.
:param check_import_module: Module name to import for dirty checking.
:param pip_setup_file: `get-pip.py` file path for `pip_setup` task.
:param pip_setup_touch: Touch file path for `pip_setup` task.
:param virtualenv_setup_touch: Touch file path for `virtualenv_setup` task.
:param always: Whether always run.
:return: Created task.
"""
# Ensure given context object is BuildContext object
_ensure_build_context(ctx)
# If virtual environment directory path is not given
if venv_path is None:
# Use given Python program path
venv_python = python
# If virtual environment directory path is given
else:
# Get Python program path in the virtual environment
venv_python = get_python_path(venv_path)
# Mark the path as input target
venv_python = mark_input(venv_python)
# If virtual environment directory path is not given,
# it means not create virtual environment.
if venv_path is None:
# Create task that sets up `pip`
pip_setup_task = pip_setup(
# Context
ctx=ctx,
# Python program path
python=python,
# `get-pip.py` file path
setup_file=pip_setup_file,
# Touch file path
touch=pip_setup_touch,
# Whether import module for dirty checking
always=always,
)
# Not create virtual environment
venv_task = None
# If virtual environment directory path is given
else:
# Not create task that sets up `pip` here because `create_venv`
# function below will do
pip_setup_task = None
# Create task that sets up virtual environment
venv_task = create_venv(
# Context
ctx=ctx,
# Python program path
python=python,
# Virtual environment directory path
venv_path=venv_path,
# Output items list
outputs=[
# Add the virtual environment's `python` program path as output
# target for dirty checking
get_python_path(venv_path),
# Add the virtual environment's `pip` program path as output
# target for dirty checking
get_pip_path(venv_path),
],
# Whether always run
always=always,
# Task name
task_name='Create venv `{}`'.format(venv_path),
# `get-pip.py` file path for `pip_setup` task
pip_setup_file=pip_setup_file,
# Touch file path for `pip_setup` task
pip_setup_touch=pip_setup_touch,
# Touch file path for `virtualenv_setup` task
virtualenv_setup_touch=virtualenv_setup_touch,
)
# If touch file path is not given
if not touch:
# Not update touch file
touch_node = None
# If touch file path is given
else:
# Update touch file
touch_node, always = update_touch_file(
# Context
ctx=ctx,
# Touch file path
path=touch,
# Whether import module for dirty checking
check_import=check_import,
# Module name to import for dirty checking
check_import_module=check_import_module,
# Python program path for dirty checking
check_import_python=venv_python,
# Whether always run
always=always,
)
# Create task that sets up packages
task = create_cmd_task(
# Context
ctx=ctx,
# Command parts
parts=[
# Python program path
venv_python,
# Run module
'-m',
# Module name
'pip',
# Install package
'install',
# Read package names from requirements file
'-r',
# Requirements file path. Mark as input target.
mark_input(req_path),
],
# Input items list
inputs=inputs,
# Output items list
outputs=[
# Use the touch node as output target for dirty checking
touch_node,
# Given output items list
outputs,
],
# Whether always run
always=always,
)
# Chain these tasks to run one after another
chain_tasks([
pip_setup_task,
venv_task,
task,
])
# Return the created task
return task | ['def', 'pip_ins_req', '(', 'ctx', ',', 'python', ',', 'req_path', ',', 'venv_path', '=', 'None', ',', 'inputs', '=', 'None', ',', 'outputs', '=', 'None', ',', 'touch', '=', 'None', ',', 'check_import', '=', 'False', ',', 'check_import_module', '=', 'None', ',', 'pip_setup_file', '=', 'None', ',', 'pip_setup_touch', '=', 'None', ',', 'virtualenv_setup_touch', '=', 'None', ',', 'always', '=', 'False', ',', ')', ':', '# Ensure given context object is BuildContext object', '_ensure_build_context', '(', 'ctx', ')', '# If virtual environment directory path is not given', 'if', 'venv_path', 'is', 'None', ':', '# Use given Python program path', 'venv_python', '=', 'python', '# If virtual environment directory path is given', 'else', ':', '# Get Python program path in the virtual environment', 'venv_python', '=', 'get_python_path', '(', 'venv_path', ')', '# Mark the path as input target', 'venv_python', '=', 'mark_input', '(', 'venv_python', ')', '# If virtual environment directory path is not given,', '# it means not create virtual environment.', 'if', 'venv_path', 'is', 'None', ':', '# Create task that sets up `pip`', 'pip_setup_task', '=', 'pip_setup', '(', '# Context', 'ctx', '=', 'ctx', ',', '# Python program path', 'python', '=', 'python', ',', '# `get-pip.py` file path', 'setup_file', '=', 'pip_setup_file', ',', '# Touch file path', 'touch', '=', 'pip_setup_touch', ',', '# Whether import module for dirty checking', 'always', '=', 'always', ',', ')', '# Not create virtual environment', 'venv_task', '=', 'None', '# If virtual environment directory path is given', 'else', ':', '# Not create task that sets up `pip` here because `create_venv`', '# function below will do', 'pip_setup_task', '=', 'None', '# Create task that sets up virtual environment', 'venv_task', '=', 'create_venv', '(', '# Context', 'ctx', '=', 'ctx', ',', '# Python program path', 'python', '=', 'python', ',', '# Virtual environment directory path', 'venv_path', '=', 'venv_path', ',', '# Output items list', 'outputs', '=', '[', "# Add the virtual environment's `python` program path as output", '# target for dirty checking', 'get_python_path', '(', 'venv_path', ')', ',', "# Add the virtual environment's `pip` program path as output", '# target for dirty checking', 'get_pip_path', '(', 'venv_path', ')', ',', ']', ',', '# Whether always run', 'always', '=', 'always', ',', '# Task name', 'task_name', '=', "'Create venv `{}`'", '.', 'format', '(', 'venv_path', ')', ',', '# `get-pip.py` file path for `pip_setup` task', 'pip_setup_file', '=', 'pip_setup_file', ',', '# Touch file path for `pip_setup` task', 'pip_setup_touch', '=', 'pip_setup_touch', ',', '# Touch file path for `virtualenv_setup` task', 'virtualenv_setup_touch', '=', 'virtualenv_setup_touch', ',', ')', '# If touch file path is not given', 'if', 'not', 'touch', ':', '# Not update touch file', 'touch_node', '=', 'None', '# If touch file path is given', 'else', ':', '# Update touch file', 'touch_node', ',', 'always', '=', 'update_touch_file', '(', '# Context', 'ctx', '=', 'ctx', ',', '# Touch file path', 'path', '=', 'touch', ',', '# Whether import module for dirty checking', 'check_import', '=', 'check_import', ',', '# Module name to import for dirty checking', 'check_import_module', '=', 'check_import_module', ',', '# Python program path for dirty checking', 'check_import_python', '=', 'venv_python', ',', '# Whether always run', 'always', '=', 'always', ',', ')', '# Create task that sets up packages', 'task', '=', 'create_cmd_task', '(', '# Context', 'ctx', '=', 'ctx', ',', '# Command parts', 'parts', '=', '[', '# Python program path', 'venv_python', ',', '# Run module', "'-m'", ',', '# Module name', "'pip'", ',', '# Install package', "'install'", ',', '# Read package names from requirements file', "'-r'", ',', '# Requirements file path. Mark as input target.', 'mark_input', '(', 'req_path', ')', ',', ']', ',', '# Input items list', 'inputs', '=', 'inputs', ',', '# Output items list', 'outputs', '=', '[', '# Use the touch node as output target for dirty checking', 'touch_node', ',', '# Given output items list', 'outputs', ',', ']', ',', '# Whether always run', 'always', '=', 'always', ',', ')', '# Chain these tasks to run one after another', 'chain_tasks', '(', '[', 'pip_setup_task', ',', 'venv_task', ',', 'task', ',', ']', ')', '# Return the created task', 'return', 'task'] | Create task that uses given virtual environment's `pip` to sets up \
packages listed in given requirements file.
:param ctx: BuildContext object.
:param python: Python program path used to set up `pip` and `virtualenv`.
:param req_path: Requirements file relative path relative to top directory.
:param venv_path: Virtual environment directory relative path relative to
top directory.
If given, will create the virtual environment and set up packages
listed in given requirements file in the virtual environment.
If not given, will set up packages listed in given requirements file in
given Python program's environment.
:param inputs: Input items list to add to created task.
See :paramref:`create_cmd_task.inputs` for allowed item types.
:param outputs: Output items list to add to created task.
See :paramref:`create_cmd_task.outputs` for allowed item types.
:param touch: Touch file path for dirty checking.
:param check_import: Whether import module for dirty checking.
:param check_import_module: Module name to import for dirty checking.
:param pip_setup_file: `get-pip.py` file path for `pip_setup` task.
:param pip_setup_touch: Touch file path for `pip_setup` task.
:param virtualenv_setup_touch: Touch file path for `virtualenv_setup` task.
:param always: Whether always run.
:return: Created task. | ['Create', 'task', 'that', 'uses', 'given', 'virtual', 'environment', 's', 'pip', 'to', 'sets', 'up', '\\', 'packages', 'listed', 'in', 'given', 'requirements', 'file', '.'] | train | https://github.com/AoiKuiyuyou/AoikLiveReload/blob/0d5adb12118a33749e6690a8165fdb769cff7d5c/tools/waf/aoikwafutil.py#L1939-L2159 |
9,168 | globus/globus-cli | globus_cli/helpers/auth_flows.py | exchange_code_and_store_config | def exchange_code_and_store_config(auth_client, auth_code):
"""
Finishes auth flow after code is gotten from command line or local server.
Exchanges code for tokens and gets user info from auth.
Stores tokens and user info in config.
"""
# do a token exchange with the given code
tkn = auth_client.oauth2_exchange_code_for_tokens(auth_code)
tkn = tkn.by_resource_server
# extract access tokens from final response
transfer_at = tkn["transfer.api.globus.org"]["access_token"]
transfer_at_expires = tkn["transfer.api.globus.org"]["expires_at_seconds"]
transfer_rt = tkn["transfer.api.globus.org"]["refresh_token"]
auth_at = tkn["auth.globus.org"]["access_token"]
auth_at_expires = tkn["auth.globus.org"]["expires_at_seconds"]
auth_rt = tkn["auth.globus.org"]["refresh_token"]
# revoke any existing tokens
for token_opt in (
TRANSFER_RT_OPTNAME,
TRANSFER_AT_OPTNAME,
AUTH_RT_OPTNAME,
AUTH_AT_OPTNAME,
):
token = lookup_option(token_opt)
if token:
auth_client.oauth2_revoke_token(token)
# write new tokens to config
write_option(TRANSFER_RT_OPTNAME, transfer_rt)
write_option(TRANSFER_AT_OPTNAME, transfer_at)
write_option(TRANSFER_AT_EXPIRES_OPTNAME, transfer_at_expires)
write_option(AUTH_RT_OPTNAME, auth_rt)
write_option(AUTH_AT_OPTNAME, auth_at)
write_option(AUTH_AT_EXPIRES_OPTNAME, auth_at_expires) | python | def exchange_code_and_store_config(auth_client, auth_code):
"""
Finishes auth flow after code is gotten from command line or local server.
Exchanges code for tokens and gets user info from auth.
Stores tokens and user info in config.
"""
# do a token exchange with the given code
tkn = auth_client.oauth2_exchange_code_for_tokens(auth_code)
tkn = tkn.by_resource_server
# extract access tokens from final response
transfer_at = tkn["transfer.api.globus.org"]["access_token"]
transfer_at_expires = tkn["transfer.api.globus.org"]["expires_at_seconds"]
transfer_rt = tkn["transfer.api.globus.org"]["refresh_token"]
auth_at = tkn["auth.globus.org"]["access_token"]
auth_at_expires = tkn["auth.globus.org"]["expires_at_seconds"]
auth_rt = tkn["auth.globus.org"]["refresh_token"]
# revoke any existing tokens
for token_opt in (
TRANSFER_RT_OPTNAME,
TRANSFER_AT_OPTNAME,
AUTH_RT_OPTNAME,
AUTH_AT_OPTNAME,
):
token = lookup_option(token_opt)
if token:
auth_client.oauth2_revoke_token(token)
# write new tokens to config
write_option(TRANSFER_RT_OPTNAME, transfer_rt)
write_option(TRANSFER_AT_OPTNAME, transfer_at)
write_option(TRANSFER_AT_EXPIRES_OPTNAME, transfer_at_expires)
write_option(AUTH_RT_OPTNAME, auth_rt)
write_option(AUTH_AT_OPTNAME, auth_at)
write_option(AUTH_AT_EXPIRES_OPTNAME, auth_at_expires) | ['def', 'exchange_code_and_store_config', '(', 'auth_client', ',', 'auth_code', ')', ':', '# do a token exchange with the given code', 'tkn', '=', 'auth_client', '.', 'oauth2_exchange_code_for_tokens', '(', 'auth_code', ')', 'tkn', '=', 'tkn', '.', 'by_resource_server', '# extract access tokens from final response', 'transfer_at', '=', 'tkn', '[', '"transfer.api.globus.org"', ']', '[', '"access_token"', ']', 'transfer_at_expires', '=', 'tkn', '[', '"transfer.api.globus.org"', ']', '[', '"expires_at_seconds"', ']', 'transfer_rt', '=', 'tkn', '[', '"transfer.api.globus.org"', ']', '[', '"refresh_token"', ']', 'auth_at', '=', 'tkn', '[', '"auth.globus.org"', ']', '[', '"access_token"', ']', 'auth_at_expires', '=', 'tkn', '[', '"auth.globus.org"', ']', '[', '"expires_at_seconds"', ']', 'auth_rt', '=', 'tkn', '[', '"auth.globus.org"', ']', '[', '"refresh_token"', ']', '# revoke any existing tokens', 'for', 'token_opt', 'in', '(', 'TRANSFER_RT_OPTNAME', ',', 'TRANSFER_AT_OPTNAME', ',', 'AUTH_RT_OPTNAME', ',', 'AUTH_AT_OPTNAME', ',', ')', ':', 'token', '=', 'lookup_option', '(', 'token_opt', ')', 'if', 'token', ':', 'auth_client', '.', 'oauth2_revoke_token', '(', 'token', ')', '# write new tokens to config', 'write_option', '(', 'TRANSFER_RT_OPTNAME', ',', 'transfer_rt', ')', 'write_option', '(', 'TRANSFER_AT_OPTNAME', ',', 'transfer_at', ')', 'write_option', '(', 'TRANSFER_AT_EXPIRES_OPTNAME', ',', 'transfer_at_expires', ')', 'write_option', '(', 'AUTH_RT_OPTNAME', ',', 'auth_rt', ')', 'write_option', '(', 'AUTH_AT_OPTNAME', ',', 'auth_at', ')', 'write_option', '(', 'AUTH_AT_EXPIRES_OPTNAME', ',', 'auth_at_expires', ')'] | Finishes auth flow after code is gotten from command line or local server.
Exchanges code for tokens and gets user info from auth.
Stores tokens and user info in config. | ['Finishes', 'auth', 'flow', 'after', 'code', 'is', 'gotten', 'from', 'command', 'line', 'or', 'local', 'server', '.', 'Exchanges', 'code', 'for', 'tokens', 'and', 'gets', 'user', 'info', 'from', 'auth', '.', 'Stores', 'tokens', 'and', 'user', 'info', 'in', 'config', '.'] | train | https://github.com/globus/globus-cli/blob/336675ff24da64c5ee487243f39ae39fc49a7e14/globus_cli/helpers/auth_flows.py#L107-L142 |
9,169 | openvax/isovar | isovar/locus_reads.py | locus_read_generator | def locus_read_generator(
samfile,
chromosome,
base1_position_before_variant,
base1_position_after_variant,
use_duplicate_reads=USE_DUPLICATE_READS,
use_secondary_alignments=USE_SECONDARY_ALIGNMENTS,
min_mapping_quality=MIN_READ_MAPPING_QUALITY):
"""
Generator that yields a sequence of ReadAtLocus records for reads which
contain the positions before and after a variant. The actual work to figure
out if what's between those positions matches a variant happens later in
the `variant_reads` module.
Parameters
----------
samfile : pysam.AlignmentFile
chromosome : str
base1_position_before_variant : int
Genomic position of reference nucleotide before a variant
base1_position_after_variant : int
Genomic position of reference nucleotide before a variant
use_duplicate_reads : bool
By default, we're ignoring any duplicate reads
use_secondary_alignments : bool
By default we are using secondary alignments, set this to False to
only use primary alignments of reads.
min_mapping_quality : int
Drop reads below this mapping quality
Yields ReadAtLocus objects
"""
logger.debug(
"Gathering reads at locus %s: %d-%d",
chromosome,
base1_position_before_variant,
base1_position_after_variant)
base0_position_before_variant = base1_position_before_variant - 1
base0_position_after_variant = base1_position_after_variant - 1
count = 0
# We get a pileup at the base before the variant and then check to make sure
# that reads also overlap the reference position after the variant.
#
# TODO: scan over a wider interval of pileups and collect reads that don't
# overlap the bases before/after a variant due to splicing
for pileup_element in pileup_reads_at_position(
samfile=samfile,
chromosome=chromosome,
base0_position=base0_position_before_variant):
read = LocusRead.from_pysam_pileup_element(
pileup_element,
base0_position_before_variant=base0_position_before_variant,
base0_position_after_variant=base0_position_after_variant,
use_secondary_alignments=use_secondary_alignments,
use_duplicate_reads=use_duplicate_reads,
min_mapping_quality=min_mapping_quality)
if read is not None:
count += 1
yield read
logger.info(
"Found %d reads overlapping locus %s: %d-%d",
count,
chromosome,
base1_position_before_variant,
base1_position_after_variant) | python | def locus_read_generator(
samfile,
chromosome,
base1_position_before_variant,
base1_position_after_variant,
use_duplicate_reads=USE_DUPLICATE_READS,
use_secondary_alignments=USE_SECONDARY_ALIGNMENTS,
min_mapping_quality=MIN_READ_MAPPING_QUALITY):
"""
Generator that yields a sequence of ReadAtLocus records for reads which
contain the positions before and after a variant. The actual work to figure
out if what's between those positions matches a variant happens later in
the `variant_reads` module.
Parameters
----------
samfile : pysam.AlignmentFile
chromosome : str
base1_position_before_variant : int
Genomic position of reference nucleotide before a variant
base1_position_after_variant : int
Genomic position of reference nucleotide before a variant
use_duplicate_reads : bool
By default, we're ignoring any duplicate reads
use_secondary_alignments : bool
By default we are using secondary alignments, set this to False to
only use primary alignments of reads.
min_mapping_quality : int
Drop reads below this mapping quality
Yields ReadAtLocus objects
"""
logger.debug(
"Gathering reads at locus %s: %d-%d",
chromosome,
base1_position_before_variant,
base1_position_after_variant)
base0_position_before_variant = base1_position_before_variant - 1
base0_position_after_variant = base1_position_after_variant - 1
count = 0
# We get a pileup at the base before the variant and then check to make sure
# that reads also overlap the reference position after the variant.
#
# TODO: scan over a wider interval of pileups and collect reads that don't
# overlap the bases before/after a variant due to splicing
for pileup_element in pileup_reads_at_position(
samfile=samfile,
chromosome=chromosome,
base0_position=base0_position_before_variant):
read = LocusRead.from_pysam_pileup_element(
pileup_element,
base0_position_before_variant=base0_position_before_variant,
base0_position_after_variant=base0_position_after_variant,
use_secondary_alignments=use_secondary_alignments,
use_duplicate_reads=use_duplicate_reads,
min_mapping_quality=min_mapping_quality)
if read is not None:
count += 1
yield read
logger.info(
"Found %d reads overlapping locus %s: %d-%d",
count,
chromosome,
base1_position_before_variant,
base1_position_after_variant) | ['def', 'locus_read_generator', '(', 'samfile', ',', 'chromosome', ',', 'base1_position_before_variant', ',', 'base1_position_after_variant', ',', 'use_duplicate_reads', '=', 'USE_DUPLICATE_READS', ',', 'use_secondary_alignments', '=', 'USE_SECONDARY_ALIGNMENTS', ',', 'min_mapping_quality', '=', 'MIN_READ_MAPPING_QUALITY', ')', ':', 'logger', '.', 'debug', '(', '"Gathering reads at locus %s: %d-%d"', ',', 'chromosome', ',', 'base1_position_before_variant', ',', 'base1_position_after_variant', ')', 'base0_position_before_variant', '=', 'base1_position_before_variant', '-', '1', 'base0_position_after_variant', '=', 'base1_position_after_variant', '-', '1', 'count', '=', '0', '# We get a pileup at the base before the variant and then check to make sure', '# that reads also overlap the reference position after the variant.', '#', "# TODO: scan over a wider interval of pileups and collect reads that don't", '# overlap the bases before/after a variant due to splicing', 'for', 'pileup_element', 'in', 'pileup_reads_at_position', '(', 'samfile', '=', 'samfile', ',', 'chromosome', '=', 'chromosome', ',', 'base0_position', '=', 'base0_position_before_variant', ')', ':', 'read', '=', 'LocusRead', '.', 'from_pysam_pileup_element', '(', 'pileup_element', ',', 'base0_position_before_variant', '=', 'base0_position_before_variant', ',', 'base0_position_after_variant', '=', 'base0_position_after_variant', ',', 'use_secondary_alignments', '=', 'use_secondary_alignments', ',', 'use_duplicate_reads', '=', 'use_duplicate_reads', ',', 'min_mapping_quality', '=', 'min_mapping_quality', ')', 'if', 'read', 'is', 'not', 'None', ':', 'count', '+=', '1', 'yield', 'read', 'logger', '.', 'info', '(', '"Found %d reads overlapping locus %s: %d-%d"', ',', 'count', ',', 'chromosome', ',', 'base1_position_before_variant', ',', 'base1_position_after_variant', ')'] | Generator that yields a sequence of ReadAtLocus records for reads which
contain the positions before and after a variant. The actual work to figure
out if what's between those positions matches a variant happens later in
the `variant_reads` module.
Parameters
----------
samfile : pysam.AlignmentFile
chromosome : str
base1_position_before_variant : int
Genomic position of reference nucleotide before a variant
base1_position_after_variant : int
Genomic position of reference nucleotide before a variant
use_duplicate_reads : bool
By default, we're ignoring any duplicate reads
use_secondary_alignments : bool
By default we are using secondary alignments, set this to False to
only use primary alignments of reads.
min_mapping_quality : int
Drop reads below this mapping quality
Yields ReadAtLocus objects | ['Generator', 'that', 'yields', 'a', 'sequence', 'of', 'ReadAtLocus', 'records', 'for', 'reads', 'which', 'contain', 'the', 'positions', 'before', 'and', 'after', 'a', 'variant', '.', 'The', 'actual', 'work', 'to', 'figure', 'out', 'if', 'what', 's', 'between', 'those', 'positions', 'matches', 'a', 'variant', 'happens', 'later', 'in', 'the', 'variant_reads', 'module', '.'] | train | https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/locus_reads.py#L243-L317 |
9,170 | pyamg/pyamg | pyamg/aggregation/smooth.py | cg_prolongation_smoothing | def cg_prolongation_smoothing(A, T, B, BtBinv, Sparsity_Pattern, maxiter, tol,
weighting='local', Cpt_params=None):
"""Use CG to smooth T by solving A T = 0, subject to nullspace and sparsity constraints.
Parameters
----------
A : csr_matrix, bsr_matrix
SPD sparse NxN matrix
T : bsr_matrix
Tentative prolongator, a NxM sparse matrix (M < N).
This is initial guess for the equation A T = 0.
Assumed that T B_c = B_f
B : array
Near-nullspace modes for coarse grid, i.e., B_c.
Has shape (M,k) where k is the number of coarse candidate vectors.
BtBinv : array
3 dimensional array such that,
BtBinv[i] = pinv(B_i.H Bi), and B_i is B restricted
to the neighborhood (in the matrix graph) of dof of i.
Sparsity_Pattern : csr_matrix, bsr_matrix
Sparse NxM matrix
This is the sparsity pattern constraint to enforce on the
eventual prolongator
maxiter : int
maximum number of iterations
tol : float
residual tolerance for A T = 0
weighting : string
'block', 'diagonal' or 'local' construction of the diagonal
preconditioning
Cpt_params : tuple
Tuple of the form (bool, dict). If the Cpt_params[0] = False, then
the standard SA prolongation smoothing is carried out. If True, then
dict must be a dictionary of parameters containing, (1) P_I: P_I.T is
the injection matrix for the Cpts, (2) I_F: an identity matrix
for only the F-points (i.e. I, but with zero rows and columns for
C-points) and I_C: the C-point analogue to I_F.
Returns
-------
T : bsr_matrix
Smoothed prolongator using conjugate gradients to solve A T = 0,
subject to the constraints, T B_c = B_f, and T has no nonzero
outside of the sparsity pattern in Sparsity_Pattern.
See Also
--------
The principal calling routine,
pyamg.aggregation.smooth.energy_prolongation_smoother
"""
# Preallocate
AP = sparse.bsr_matrix((np.zeros(Sparsity_Pattern.data.shape,
dtype=T.dtype),
Sparsity_Pattern.indices, Sparsity_Pattern.indptr),
shape=(Sparsity_Pattern.shape))
# CG will be run with diagonal preconditioning
if weighting == 'diagonal':
Dinv = get_diagonal(A, norm_eq=False, inv=True)
elif weighting == 'block':
Dinv = get_block_diag(A, blocksize=A.blocksize[0], inv_flag=True)
Dinv = sparse.bsr_matrix((Dinv, np.arange(Dinv.shape[0]),
np.arange(Dinv.shape[0]+1)),
shape=A.shape)
elif weighting == 'local':
# Based on Gershgorin estimate
D = np.abs(A)*np.ones((A.shape[0], 1), dtype=A.dtype)
Dinv = np.zeros_like(D)
Dinv[D != 0] = 1.0 / np.abs(D[D != 0])
else:
raise ValueError('weighting value is invalid')
# Calculate initial residual
# Equivalent to R = -A*T; R = R.multiply(Sparsity_Pattern)
# with the added constraint that R has an explicit 0 wherever
# R is 0 and Sparsity_Pattern is not
uones = np.zeros(Sparsity_Pattern.data.shape, dtype=T.dtype)
R = sparse.bsr_matrix((uones, Sparsity_Pattern.indices,
Sparsity_Pattern.indptr),
shape=(Sparsity_Pattern.shape))
pyamg.amg_core.incomplete_mat_mult_bsr(A.indptr, A.indices,
np.ravel(A.data),
T.indptr, T.indices,
np.ravel(T.data),
R.indptr, R.indices,
np.ravel(R.data),
int(T.shape[0]/T.blocksize[0]),
int(T.shape[1]/T.blocksize[1]),
A.blocksize[0], A.blocksize[1],
T.blocksize[1])
R.data *= -1.0
# Enforce R*B = 0
Satisfy_Constraints(R, B, BtBinv)
if R.nnz == 0:
print("Error in sa_energy_min(..). Initial R no nonzeros on a level. \
Returning tentative prolongator\n")
return T
# Calculate Frobenius norm of the residual
resid = R.nnz # np.sqrt((R.data.conjugate()*R.data).sum())
# print "Energy Minimization of Prolongator \
# --- Iteration 0 --- r = " + str(resid)
i = 0
while i < maxiter and resid > tol:
# Apply diagonal preconditioner
if weighting == 'local' or weighting == 'diagonal':
Z = scale_rows(R, Dinv)
else:
Z = Dinv*R
# Frobenius inner-product of (R,Z) = sum( np.conjugate(rk).*zk)
newsum = (R.conjugate().multiply(Z)).sum()
if newsum < tol:
# met tolerance, so halt
break
# P is the search direction, not the prolongator, which is T.
if(i == 0):
P = Z
oldsum = newsum
else:
beta = newsum / oldsum
P = Z + beta*P
oldsum = newsum
# Calculate new direction and enforce constraints
# Equivalent to: AP = A*P; AP = AP.multiply(Sparsity_Pattern)
# with the added constraint that explicit zeros are in AP wherever
# AP = 0 and Sparsity_Pattern does not !!!!
AP.data[:] = 0.0
pyamg.amg_core.incomplete_mat_mult_bsr(A.indptr, A.indices,
np.ravel(A.data),
P.indptr, P.indices,
np.ravel(P.data),
AP.indptr, AP.indices,
np.ravel(AP.data),
int(T.shape[0]/T.blocksize[0]),
int(T.shape[1]/T.blocksize[1]),
A.blocksize[0], A.blocksize[1],
P.blocksize[1])
# Enforce AP*B = 0
Satisfy_Constraints(AP, B, BtBinv)
# Frobenius inner-product of (P, AP)
alpha = newsum/(P.conjugate().multiply(AP)).sum()
# Update the prolongator, T
T = T + alpha*P
# Ensure identity at C-pts
if Cpt_params[0]:
T = Cpt_params[1]['I_F']*T + Cpt_params[1]['P_I']
# Update residual
R = R - alpha*AP
i += 1
# Calculate Frobenius norm of the residual
resid = R.nnz # np.sqrt((R.data.conjugate()*R.data).sum())
# print "Energy Minimization of Prolongator \
# --- Iteration " + str(i) + " --- r = " + str(resid)
return T | python | def cg_prolongation_smoothing(A, T, B, BtBinv, Sparsity_Pattern, maxiter, tol,
weighting='local', Cpt_params=None):
"""Use CG to smooth T by solving A T = 0, subject to nullspace and sparsity constraints.
Parameters
----------
A : csr_matrix, bsr_matrix
SPD sparse NxN matrix
T : bsr_matrix
Tentative prolongator, a NxM sparse matrix (M < N).
This is initial guess for the equation A T = 0.
Assumed that T B_c = B_f
B : array
Near-nullspace modes for coarse grid, i.e., B_c.
Has shape (M,k) where k is the number of coarse candidate vectors.
BtBinv : array
3 dimensional array such that,
BtBinv[i] = pinv(B_i.H Bi), and B_i is B restricted
to the neighborhood (in the matrix graph) of dof of i.
Sparsity_Pattern : csr_matrix, bsr_matrix
Sparse NxM matrix
This is the sparsity pattern constraint to enforce on the
eventual prolongator
maxiter : int
maximum number of iterations
tol : float
residual tolerance for A T = 0
weighting : string
'block', 'diagonal' or 'local' construction of the diagonal
preconditioning
Cpt_params : tuple
Tuple of the form (bool, dict). If the Cpt_params[0] = False, then
the standard SA prolongation smoothing is carried out. If True, then
dict must be a dictionary of parameters containing, (1) P_I: P_I.T is
the injection matrix for the Cpts, (2) I_F: an identity matrix
for only the F-points (i.e. I, but with zero rows and columns for
C-points) and I_C: the C-point analogue to I_F.
Returns
-------
T : bsr_matrix
Smoothed prolongator using conjugate gradients to solve A T = 0,
subject to the constraints, T B_c = B_f, and T has no nonzero
outside of the sparsity pattern in Sparsity_Pattern.
See Also
--------
The principal calling routine,
pyamg.aggregation.smooth.energy_prolongation_smoother
"""
# Preallocate
AP = sparse.bsr_matrix((np.zeros(Sparsity_Pattern.data.shape,
dtype=T.dtype),
Sparsity_Pattern.indices, Sparsity_Pattern.indptr),
shape=(Sparsity_Pattern.shape))
# CG will be run with diagonal preconditioning
if weighting == 'diagonal':
Dinv = get_diagonal(A, norm_eq=False, inv=True)
elif weighting == 'block':
Dinv = get_block_diag(A, blocksize=A.blocksize[0], inv_flag=True)
Dinv = sparse.bsr_matrix((Dinv, np.arange(Dinv.shape[0]),
np.arange(Dinv.shape[0]+1)),
shape=A.shape)
elif weighting == 'local':
# Based on Gershgorin estimate
D = np.abs(A)*np.ones((A.shape[0], 1), dtype=A.dtype)
Dinv = np.zeros_like(D)
Dinv[D != 0] = 1.0 / np.abs(D[D != 0])
else:
raise ValueError('weighting value is invalid')
# Calculate initial residual
# Equivalent to R = -A*T; R = R.multiply(Sparsity_Pattern)
# with the added constraint that R has an explicit 0 wherever
# R is 0 and Sparsity_Pattern is not
uones = np.zeros(Sparsity_Pattern.data.shape, dtype=T.dtype)
R = sparse.bsr_matrix((uones, Sparsity_Pattern.indices,
Sparsity_Pattern.indptr),
shape=(Sparsity_Pattern.shape))
pyamg.amg_core.incomplete_mat_mult_bsr(A.indptr, A.indices,
np.ravel(A.data),
T.indptr, T.indices,
np.ravel(T.data),
R.indptr, R.indices,
np.ravel(R.data),
int(T.shape[0]/T.blocksize[0]),
int(T.shape[1]/T.blocksize[1]),
A.blocksize[0], A.blocksize[1],
T.blocksize[1])
R.data *= -1.0
# Enforce R*B = 0
Satisfy_Constraints(R, B, BtBinv)
if R.nnz == 0:
print("Error in sa_energy_min(..). Initial R no nonzeros on a level. \
Returning tentative prolongator\n")
return T
# Calculate Frobenius norm of the residual
resid = R.nnz # np.sqrt((R.data.conjugate()*R.data).sum())
# print "Energy Minimization of Prolongator \
# --- Iteration 0 --- r = " + str(resid)
i = 0
while i < maxiter and resid > tol:
# Apply diagonal preconditioner
if weighting == 'local' or weighting == 'diagonal':
Z = scale_rows(R, Dinv)
else:
Z = Dinv*R
# Frobenius inner-product of (R,Z) = sum( np.conjugate(rk).*zk)
newsum = (R.conjugate().multiply(Z)).sum()
if newsum < tol:
# met tolerance, so halt
break
# P is the search direction, not the prolongator, which is T.
if(i == 0):
P = Z
oldsum = newsum
else:
beta = newsum / oldsum
P = Z + beta*P
oldsum = newsum
# Calculate new direction and enforce constraints
# Equivalent to: AP = A*P; AP = AP.multiply(Sparsity_Pattern)
# with the added constraint that explicit zeros are in AP wherever
# AP = 0 and Sparsity_Pattern does not !!!!
AP.data[:] = 0.0
pyamg.amg_core.incomplete_mat_mult_bsr(A.indptr, A.indices,
np.ravel(A.data),
P.indptr, P.indices,
np.ravel(P.data),
AP.indptr, AP.indices,
np.ravel(AP.data),
int(T.shape[0]/T.blocksize[0]),
int(T.shape[1]/T.blocksize[1]),
A.blocksize[0], A.blocksize[1],
P.blocksize[1])
# Enforce AP*B = 0
Satisfy_Constraints(AP, B, BtBinv)
# Frobenius inner-product of (P, AP)
alpha = newsum/(P.conjugate().multiply(AP)).sum()
# Update the prolongator, T
T = T + alpha*P
# Ensure identity at C-pts
if Cpt_params[0]:
T = Cpt_params[1]['I_F']*T + Cpt_params[1]['P_I']
# Update residual
R = R - alpha*AP
i += 1
# Calculate Frobenius norm of the residual
resid = R.nnz # np.sqrt((R.data.conjugate()*R.data).sum())
# print "Energy Minimization of Prolongator \
# --- Iteration " + str(i) + " --- r = " + str(resid)
return T | ['def', 'cg_prolongation_smoothing', '(', 'A', ',', 'T', ',', 'B', ',', 'BtBinv', ',', 'Sparsity_Pattern', ',', 'maxiter', ',', 'tol', ',', 'weighting', '=', "'local'", ',', 'Cpt_params', '=', 'None', ')', ':', '# Preallocate', 'AP', '=', 'sparse', '.', 'bsr_matrix', '(', '(', 'np', '.', 'zeros', '(', 'Sparsity_Pattern', '.', 'data', '.', 'shape', ',', 'dtype', '=', 'T', '.', 'dtype', ')', ',', 'Sparsity_Pattern', '.', 'indices', ',', 'Sparsity_Pattern', '.', 'indptr', ')', ',', 'shape', '=', '(', 'Sparsity_Pattern', '.', 'shape', ')', ')', '# CG will be run with diagonal preconditioning', 'if', 'weighting', '==', "'diagonal'", ':', 'Dinv', '=', 'get_diagonal', '(', 'A', ',', 'norm_eq', '=', 'False', ',', 'inv', '=', 'True', ')', 'elif', 'weighting', '==', "'block'", ':', 'Dinv', '=', 'get_block_diag', '(', 'A', ',', 'blocksize', '=', 'A', '.', 'blocksize', '[', '0', ']', ',', 'inv_flag', '=', 'True', ')', 'Dinv', '=', 'sparse', '.', 'bsr_matrix', '(', '(', 'Dinv', ',', 'np', '.', 'arange', '(', 'Dinv', '.', 'shape', '[', '0', ']', ')', ',', 'np', '.', 'arange', '(', 'Dinv', '.', 'shape', '[', '0', ']', '+', '1', ')', ')', ',', 'shape', '=', 'A', '.', 'shape', ')', 'elif', 'weighting', '==', "'local'", ':', '# Based on Gershgorin estimate', 'D', '=', 'np', '.', 'abs', '(', 'A', ')', '*', 'np', '.', 'ones', '(', '(', 'A', '.', 'shape', '[', '0', ']', ',', '1', ')', ',', 'dtype', '=', 'A', '.', 'dtype', ')', 'Dinv', '=', 'np', '.', 'zeros_like', '(', 'D', ')', 'Dinv', '[', 'D', '!=', '0', ']', '=', '1.0', '/', 'np', '.', 'abs', '(', 'D', '[', 'D', '!=', '0', ']', ')', 'else', ':', 'raise', 'ValueError', '(', "'weighting value is invalid'", ')', '# Calculate initial residual', '# Equivalent to R = -A*T; R = R.multiply(Sparsity_Pattern)', '# with the added constraint that R has an explicit 0 wherever', '# R is 0 and Sparsity_Pattern is not', 'uones', '=', 'np', '.', 'zeros', '(', 'Sparsity_Pattern', '.', 'data', '.', 'shape', ',', 'dtype', '=', 'T', '.', 'dtype', ')', 'R', '=', 'sparse', '.', 'bsr_matrix', '(', '(', 'uones', ',', 'Sparsity_Pattern', '.', 'indices', ',', 'Sparsity_Pattern', '.', 'indptr', ')', ',', 'shape', '=', '(', 'Sparsity_Pattern', '.', 'shape', ')', ')', 'pyamg', '.', 'amg_core', '.', 'incomplete_mat_mult_bsr', '(', 'A', '.', 'indptr', ',', 'A', '.', 'indices', ',', 'np', '.', 'ravel', '(', 'A', '.', 'data', ')', ',', 'T', '.', 'indptr', ',', 'T', '.', 'indices', ',', 'np', '.', 'ravel', '(', 'T', '.', 'data', ')', ',', 'R', '.', 'indptr', ',', 'R', '.', 'indices', ',', 'np', '.', 'ravel', '(', 'R', '.', 'data', ')', ',', 'int', '(', 'T', '.', 'shape', '[', '0', ']', '/', 'T', '.', 'blocksize', '[', '0', ']', ')', ',', 'int', '(', 'T', '.', 'shape', '[', '1', ']', '/', 'T', '.', 'blocksize', '[', '1', ']', ')', ',', 'A', '.', 'blocksize', '[', '0', ']', ',', 'A', '.', 'blocksize', '[', '1', ']', ',', 'T', '.', 'blocksize', '[', '1', ']', ')', 'R', '.', 'data', '*=', '-', '1.0', '# Enforce R*B = 0', 'Satisfy_Constraints', '(', 'R', ',', 'B', ',', 'BtBinv', ')', 'if', 'R', '.', 'nnz', '==', '0', ':', 'print', '(', '"Error in sa_energy_min(..). Initial R no nonzeros on a level. \\\n Returning tentative prolongator\\n"', ')', 'return', 'T', '# Calculate Frobenius norm of the residual', 'resid', '=', 'R', '.', 'nnz', '# np.sqrt((R.data.conjugate()*R.data).sum())', '# print "Energy Minimization of Prolongator \\', '# --- Iteration 0 --- r = " + str(resid)', 'i', '=', '0', 'while', 'i', '<', 'maxiter', 'and', 'resid', '>', 'tol', ':', '# Apply diagonal preconditioner', 'if', 'weighting', '==', "'local'", 'or', 'weighting', '==', "'diagonal'", ':', 'Z', '=', 'scale_rows', '(', 'R', ',', 'Dinv', ')', 'else', ':', 'Z', '=', 'Dinv', '*', 'R', '# Frobenius inner-product of (R,Z) = sum( np.conjugate(rk).*zk)', 'newsum', '=', '(', 'R', '.', 'conjugate', '(', ')', '.', 'multiply', '(', 'Z', ')', ')', '.', 'sum', '(', ')', 'if', 'newsum', '<', 'tol', ':', '# met tolerance, so halt', 'break', '# P is the search direction, not the prolongator, which is T.', 'if', '(', 'i', '==', '0', ')', ':', 'P', '=', 'Z', 'oldsum', '=', 'newsum', 'else', ':', 'beta', '=', 'newsum', '/', 'oldsum', 'P', '=', 'Z', '+', 'beta', '*', 'P', 'oldsum', '=', 'newsum', '# Calculate new direction and enforce constraints', '# Equivalent to: AP = A*P; AP = AP.multiply(Sparsity_Pattern)', '# with the added constraint that explicit zeros are in AP wherever', '# AP = 0 and Sparsity_Pattern does not !!!!', 'AP', '.', 'data', '[', ':', ']', '=', '0.0', 'pyamg', '.', 'amg_core', '.', 'incomplete_mat_mult_bsr', '(', 'A', '.', 'indptr', ',', 'A', '.', 'indices', ',', 'np', '.', 'ravel', '(', 'A', '.', 'data', ')', ',', 'P', '.', 'indptr', ',', 'P', '.', 'indices', ',', 'np', '.', 'ravel', '(', 'P', '.', 'data', ')', ',', 'AP', '.', 'indptr', ',', 'AP', '.', 'indices', ',', 'np', '.', 'ravel', '(', 'AP', '.', 'data', ')', ',', 'int', '(', 'T', '.', 'shape', '[', '0', ']', '/', 'T', '.', 'blocksize', '[', '0', ']', ')', ',', 'int', '(', 'T', '.', 'shape', '[', '1', ']', '/', 'T', '.', 'blocksize', '[', '1', ']', ')', ',', 'A', '.', 'blocksize', '[', '0', ']', ',', 'A', '.', 'blocksize', '[', '1', ']', ',', 'P', '.', 'blocksize', '[', '1', ']', ')', '# Enforce AP*B = 0', 'Satisfy_Constraints', '(', 'AP', ',', 'B', ',', 'BtBinv', ')', '# Frobenius inner-product of (P, AP)', 'alpha', '=', 'newsum', '/', '(', 'P', '.', 'conjugate', '(', ')', '.', 'multiply', '(', 'AP', ')', ')', '.', 'sum', '(', ')', '# Update the prolongator, T', 'T', '=', 'T', '+', 'alpha', '*', 'P', '# Ensure identity at C-pts', 'if', 'Cpt_params', '[', '0', ']', ':', 'T', '=', 'Cpt_params', '[', '1', ']', '[', "'I_F'", ']', '*', 'T', '+', 'Cpt_params', '[', '1', ']', '[', "'P_I'", ']', '# Update residual', 'R', '=', 'R', '-', 'alpha', '*', 'AP', 'i', '+=', '1', '# Calculate Frobenius norm of the residual', 'resid', '=', 'R', '.', 'nnz', '# np.sqrt((R.data.conjugate()*R.data).sum())', '# print "Energy Minimization of Prolongator \\', '# --- Iteration " + str(i) + " --- r = " + str(resid)', 'return', 'T'] | Use CG to smooth T by solving A T = 0, subject to nullspace and sparsity constraints.
Parameters
----------
A : csr_matrix, bsr_matrix
SPD sparse NxN matrix
T : bsr_matrix
Tentative prolongator, a NxM sparse matrix (M < N).
This is initial guess for the equation A T = 0.
Assumed that T B_c = B_f
B : array
Near-nullspace modes for coarse grid, i.e., B_c.
Has shape (M,k) where k is the number of coarse candidate vectors.
BtBinv : array
3 dimensional array such that,
BtBinv[i] = pinv(B_i.H Bi), and B_i is B restricted
to the neighborhood (in the matrix graph) of dof of i.
Sparsity_Pattern : csr_matrix, bsr_matrix
Sparse NxM matrix
This is the sparsity pattern constraint to enforce on the
eventual prolongator
maxiter : int
maximum number of iterations
tol : float
residual tolerance for A T = 0
weighting : string
'block', 'diagonal' or 'local' construction of the diagonal
preconditioning
Cpt_params : tuple
Tuple of the form (bool, dict). If the Cpt_params[0] = False, then
the standard SA prolongation smoothing is carried out. If True, then
dict must be a dictionary of parameters containing, (1) P_I: P_I.T is
the injection matrix for the Cpts, (2) I_F: an identity matrix
for only the F-points (i.e. I, but with zero rows and columns for
C-points) and I_C: the C-point analogue to I_F.
Returns
-------
T : bsr_matrix
Smoothed prolongator using conjugate gradients to solve A T = 0,
subject to the constraints, T B_c = B_f, and T has no nonzero
outside of the sparsity pattern in Sparsity_Pattern.
See Also
--------
The principal calling routine,
pyamg.aggregation.smooth.energy_prolongation_smoother | ['Use', 'CG', 'to', 'smooth', 'T', 'by', 'solving', 'A', 'T', '=', '0', 'subject', 'to', 'nullspace', 'and', 'sparsity', 'constraints', '.'] | train | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/aggregation/smooth.py#L278-L446 |
9,171 | PmagPy/PmagPy | pmagpy/ipmag.py | lat_from_pole | def lat_from_pole(ref_loc_lon, ref_loc_lat, pole_plon, pole_plat):
"""
Calculate paleolatitude for a reference location based on a paleomagnetic pole
Required Parameters
----------
ref_loc_lon: longitude of reference location in degrees
ref_loc_lat: latitude of reference location
pole_plon: paleopole longitude in degrees
pole_plat: paleopole latitude in degrees
"""
ref_loc = (ref_loc_lon, ref_loc_lat)
pole = (pole_plon, pole_plat)
paleo_lat = 90 - pmag.angle(pole, ref_loc)
return float(paleo_lat) | python | def lat_from_pole(ref_loc_lon, ref_loc_lat, pole_plon, pole_plat):
"""
Calculate paleolatitude for a reference location based on a paleomagnetic pole
Required Parameters
----------
ref_loc_lon: longitude of reference location in degrees
ref_loc_lat: latitude of reference location
pole_plon: paleopole longitude in degrees
pole_plat: paleopole latitude in degrees
"""
ref_loc = (ref_loc_lon, ref_loc_lat)
pole = (pole_plon, pole_plat)
paleo_lat = 90 - pmag.angle(pole, ref_loc)
return float(paleo_lat) | ['def', 'lat_from_pole', '(', 'ref_loc_lon', ',', 'ref_loc_lat', ',', 'pole_plon', ',', 'pole_plat', ')', ':', 'ref_loc', '=', '(', 'ref_loc_lon', ',', 'ref_loc_lat', ')', 'pole', '=', '(', 'pole_plon', ',', 'pole_plat', ')', 'paleo_lat', '=', '90', '-', 'pmag', '.', 'angle', '(', 'pole', ',', 'ref_loc', ')', 'return', 'float', '(', 'paleo_lat', ')'] | Calculate paleolatitude for a reference location based on a paleomagnetic pole
Required Parameters
----------
ref_loc_lon: longitude of reference location in degrees
ref_loc_lat: latitude of reference location
pole_plon: paleopole longitude in degrees
pole_plat: paleopole latitude in degrees | ['Calculate', 'paleolatitude', 'for', 'a', 'reference', 'location', 'based', 'on', 'a', 'paleomagnetic', 'pole'] | train | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L1408-L1423 |
9,172 | apache/incubator-mxnet | python/mxnet/io/utils.py | _getdata_by_idx | def _getdata_by_idx(data, idx):
"""Shuffle the data."""
shuffle_data = []
for k, v in data:
if (isinstance(v, h5py.Dataset) if h5py else False):
shuffle_data.append((k, v))
elif isinstance(v, CSRNDArray):
shuffle_data.append((k, sparse_array(v.asscipy()[idx], v.context)))
else:
shuffle_data.append((k, array(v.asnumpy()[idx], v.context)))
return shuffle_data | python | def _getdata_by_idx(data, idx):
"""Shuffle the data."""
shuffle_data = []
for k, v in data:
if (isinstance(v, h5py.Dataset) if h5py else False):
shuffle_data.append((k, v))
elif isinstance(v, CSRNDArray):
shuffle_data.append((k, sparse_array(v.asscipy()[idx], v.context)))
else:
shuffle_data.append((k, array(v.asnumpy()[idx], v.context)))
return shuffle_data | ['def', '_getdata_by_idx', '(', 'data', ',', 'idx', ')', ':', 'shuffle_data', '=', '[', ']', 'for', 'k', ',', 'v', 'in', 'data', ':', 'if', '(', 'isinstance', '(', 'v', ',', 'h5py', '.', 'Dataset', ')', 'if', 'h5py', 'else', 'False', ')', ':', 'shuffle_data', '.', 'append', '(', '(', 'k', ',', 'v', ')', ')', 'elif', 'isinstance', '(', 'v', ',', 'CSRNDArray', ')', ':', 'shuffle_data', '.', 'append', '(', '(', 'k', ',', 'sparse_array', '(', 'v', '.', 'asscipy', '(', ')', '[', 'idx', ']', ',', 'v', '.', 'context', ')', ')', ')', 'else', ':', 'shuffle_data', '.', 'append', '(', '(', 'k', ',', 'array', '(', 'v', '.', 'asnumpy', '(', ')', '[', 'idx', ']', ',', 'v', '.', 'context', ')', ')', ')', 'return', 'shuffle_data'] | Shuffle the data. | ['Shuffle', 'the', 'data', '.'] | train | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/io/utils.py#L74-L86 |
9,173 | mesbahamin/chronophore | chronophore/chronophore.py | set_up_logging | def set_up_logging(log_file, console_log_level):
"""Configure logging settings and return a logger object."""
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler(str(log_file))
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(console_log_level)
formatter = logging.Formatter(
"{asctime} {levelname} ({name}): {message}", style='{'
)
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
return logger | python | def set_up_logging(log_file, console_log_level):
"""Configure logging settings and return a logger object."""
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler(str(log_file))
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(console_log_level)
formatter = logging.Formatter(
"{asctime} {levelname} ({name}): {message}", style='{'
)
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
return logger | ['def', 'set_up_logging', '(', 'log_file', ',', 'console_log_level', ')', ':', 'logger', '=', 'logging', '.', 'getLogger', '(', ')', 'logger', '.', 'setLevel', '(', 'logging', '.', 'DEBUG', ')', 'fh', '=', 'logging', '.', 'FileHandler', '(', 'str', '(', 'log_file', ')', ')', 'fh', '.', 'setLevel', '(', 'logging', '.', 'DEBUG', ')', 'ch', '=', 'logging', '.', 'StreamHandler', '(', ')', 'ch', '.', 'setLevel', '(', 'console_log_level', ')', 'formatter', '=', 'logging', '.', 'Formatter', '(', '"{asctime} {levelname} ({name}): {message}"', ',', 'style', '=', "'{'", ')', 'fh', '.', 'setFormatter', '(', 'formatter', ')', 'ch', '.', 'setFormatter', '(', 'formatter', ')', 'logger', '.', 'addHandler', '(', 'fh', ')', 'logger', '.', 'addHandler', '(', 'ch', ')', 'return', 'logger'] | Configure logging settings and return a logger object. | ['Configure', 'logging', 'settings', 'and', 'return', 'a', 'logger', 'object', '.'] | train | https://github.com/mesbahamin/chronophore/blob/ee140c61b4dfada966f078de8304bac737cec6f7/chronophore/chronophore.py#L48-L64 |
9,174 | maceoutliner/django-fiction-outlines | fiction_outlines/receivers.py | validate_character_for_story_element | def validate_character_for_story_element(sender, instance, action, reverse, pk_set, *args, **kwargs):
'''
Validates that character is from the same outline as the story node.
'''
if action == 'pre_add':
if reverse:
for spk in pk_set:
story_node = StoryElementNode.objects.get(pk=spk)
if instance.outline != story_node.outline:
raise IntegrityError(_('Character Instance must be from the same outline as story node.'))
else:
for cpk in pk_set:
char_instance = CharacterInstance.objects.get(pk=cpk)
if char_instance.outline != instance.outline:
raise IntegrityError(_('Character Instance must be from the same outline as story node.')) | python | def validate_character_for_story_element(sender, instance, action, reverse, pk_set, *args, **kwargs):
'''
Validates that character is from the same outline as the story node.
'''
if action == 'pre_add':
if reverse:
for spk in pk_set:
story_node = StoryElementNode.objects.get(pk=spk)
if instance.outline != story_node.outline:
raise IntegrityError(_('Character Instance must be from the same outline as story node.'))
else:
for cpk in pk_set:
char_instance = CharacterInstance.objects.get(pk=cpk)
if char_instance.outline != instance.outline:
raise IntegrityError(_('Character Instance must be from the same outline as story node.')) | ['def', 'validate_character_for_story_element', '(', 'sender', ',', 'instance', ',', 'action', ',', 'reverse', ',', 'pk_set', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'if', 'action', '==', "'pre_add'", ':', 'if', 'reverse', ':', 'for', 'spk', 'in', 'pk_set', ':', 'story_node', '=', 'StoryElementNode', '.', 'objects', '.', 'get', '(', 'pk', '=', 'spk', ')', 'if', 'instance', '.', 'outline', '!=', 'story_node', '.', 'outline', ':', 'raise', 'IntegrityError', '(', '_', '(', "'Character Instance must be from the same outline as story node.'", ')', ')', 'else', ':', 'for', 'cpk', 'in', 'pk_set', ':', 'char_instance', '=', 'CharacterInstance', '.', 'objects', '.', 'get', '(', 'pk', '=', 'cpk', ')', 'if', 'char_instance', '.', 'outline', '!=', 'instance', '.', 'outline', ':', 'raise', 'IntegrityError', '(', '_', '(', "'Character Instance must be from the same outline as story node.'", ')', ')'] | Validates that character is from the same outline as the story node. | ['Validates', 'that', 'character', 'is', 'from', 'the', 'same', 'outline', 'as', 'the', 'story', 'node', '.'] | train | https://github.com/maceoutliner/django-fiction-outlines/blob/6c58e356af3fbe7b23557643ba27e46eaef9d4e3/fiction_outlines/receivers.py#L170-L184 |
9,175 | CEA-COSMIC/ModOpt | modopt/signal/noise.py | thresh | def thresh(data, threshold, threshold_type='hard'):
r"""Threshold data
This method perfoms hard or soft thresholding on the input data
Parameters
----------
data : np.ndarray, list or tuple
Input data array
threshold : float or np.ndarray
Threshold level(s)
threshold_type : str {'hard', 'soft'}
Type of noise to be added (default is 'hard')
Returns
-------
np.ndarray thresholded data
Raises
------
ValueError
If `threshold_type` is not 'hard' or 'soft'
Notes
-----
Implements one of the following two equations:
* Hard Threshold
.. math::
\mathrm{HT}_\lambda(x) =
\begin{cases}
x & \text{if } |x|\geq\lambda \\
0 & \text{otherwise}
\end{cases}
* Soft Threshold
.. math::
\mathrm{ST}_\lambda(x) =
\begin{cases}
x-\lambda\text{sign}(x) & \text{if } |x|\geq\lambda \\
0 & \text{otherwise}
\end{cases}
Examples
--------
>>> import numpy as np
>>> from modopt.signal.noise import thresh
>>> np.random.seed(1)
>>> x = np.random.randint(-9, 9, 10)
>>> x
array([-4, 2, 3, -1, 0, 2, -4, 6, -9, 7])
>>> thresh(x, 4)
array([-4, 0, 0, 0, 0, 0, -4, 6, -9, 7])
>>> import numpy as np
>>> from modopt.signal.noise import thresh
>>> np.random.seed(1)
>>> x = np.random.ranf((3, 3))
>>> x
array([[ 4.17022005e-01, 7.20324493e-01, 1.14374817e-04],
[ 3.02332573e-01, 1.46755891e-01, 9.23385948e-02],
[ 1.86260211e-01, 3.45560727e-01, 3.96767474e-01]])
>>> thresh(x, 0.2, threshold_type='soft')
array([[ 0.217022 , 0.52032449, -0. ],
[ 0.10233257, -0. , -0. ],
[-0. , 0.14556073, 0.19676747]])
"""
data = np.array(data)
if threshold_type not in ('hard', 'soft'):
raise ValueError('Invalid threshold type. Options are "hard" or'
'"soft"')
if threshold_type == 'soft':
return np.around(np.maximum((1.0 - threshold /
np.maximum(np.finfo(np.float64).eps, np.abs(data))),
0.0) * data, decimals=15)
else:
return data * (np.abs(data) >= threshold) | python | def thresh(data, threshold, threshold_type='hard'):
r"""Threshold data
This method perfoms hard or soft thresholding on the input data
Parameters
----------
data : np.ndarray, list or tuple
Input data array
threshold : float or np.ndarray
Threshold level(s)
threshold_type : str {'hard', 'soft'}
Type of noise to be added (default is 'hard')
Returns
-------
np.ndarray thresholded data
Raises
------
ValueError
If `threshold_type` is not 'hard' or 'soft'
Notes
-----
Implements one of the following two equations:
* Hard Threshold
.. math::
\mathrm{HT}_\lambda(x) =
\begin{cases}
x & \text{if } |x|\geq\lambda \\
0 & \text{otherwise}
\end{cases}
* Soft Threshold
.. math::
\mathrm{ST}_\lambda(x) =
\begin{cases}
x-\lambda\text{sign}(x) & \text{if } |x|\geq\lambda \\
0 & \text{otherwise}
\end{cases}
Examples
--------
>>> import numpy as np
>>> from modopt.signal.noise import thresh
>>> np.random.seed(1)
>>> x = np.random.randint(-9, 9, 10)
>>> x
array([-4, 2, 3, -1, 0, 2, -4, 6, -9, 7])
>>> thresh(x, 4)
array([-4, 0, 0, 0, 0, 0, -4, 6, -9, 7])
>>> import numpy as np
>>> from modopt.signal.noise import thresh
>>> np.random.seed(1)
>>> x = np.random.ranf((3, 3))
>>> x
array([[ 4.17022005e-01, 7.20324493e-01, 1.14374817e-04],
[ 3.02332573e-01, 1.46755891e-01, 9.23385948e-02],
[ 1.86260211e-01, 3.45560727e-01, 3.96767474e-01]])
>>> thresh(x, 0.2, threshold_type='soft')
array([[ 0.217022 , 0.52032449, -0. ],
[ 0.10233257, -0. , -0. ],
[-0. , 0.14556073, 0.19676747]])
"""
data = np.array(data)
if threshold_type not in ('hard', 'soft'):
raise ValueError('Invalid threshold type. Options are "hard" or'
'"soft"')
if threshold_type == 'soft':
return np.around(np.maximum((1.0 - threshold /
np.maximum(np.finfo(np.float64).eps, np.abs(data))),
0.0) * data, decimals=15)
else:
return data * (np.abs(data) >= threshold) | ['def', 'thresh', '(', 'data', ',', 'threshold', ',', 'threshold_type', '=', "'hard'", ')', ':', 'data', '=', 'np', '.', 'array', '(', 'data', ')', 'if', 'threshold_type', 'not', 'in', '(', "'hard'", ',', "'soft'", ')', ':', 'raise', 'ValueError', '(', '\'Invalid threshold type. Options are "hard" or\'', '\'"soft"\'', ')', 'if', 'threshold_type', '==', "'soft'", ':', 'return', 'np', '.', 'around', '(', 'np', '.', 'maximum', '(', '(', '1.0', '-', 'threshold', '/', 'np', '.', 'maximum', '(', 'np', '.', 'finfo', '(', 'np', '.', 'float64', ')', '.', 'eps', ',', 'np', '.', 'abs', '(', 'data', ')', ')', ')', ',', '0.0', ')', '*', 'data', ',', 'decimals', '=', '15', ')', 'else', ':', 'return', 'data', '*', '(', 'np', '.', 'abs', '(', 'data', ')', '>=', 'threshold', ')'] | r"""Threshold data
This method perfoms hard or soft thresholding on the input data
Parameters
----------
data : np.ndarray, list or tuple
Input data array
threshold : float or np.ndarray
Threshold level(s)
threshold_type : str {'hard', 'soft'}
Type of noise to be added (default is 'hard')
Returns
-------
np.ndarray thresholded data
Raises
------
ValueError
If `threshold_type` is not 'hard' or 'soft'
Notes
-----
Implements one of the following two equations:
* Hard Threshold
.. math::
\mathrm{HT}_\lambda(x) =
\begin{cases}
x & \text{if } |x|\geq\lambda \\
0 & \text{otherwise}
\end{cases}
* Soft Threshold
.. math::
\mathrm{ST}_\lambda(x) =
\begin{cases}
x-\lambda\text{sign}(x) & \text{if } |x|\geq\lambda \\
0 & \text{otherwise}
\end{cases}
Examples
--------
>>> import numpy as np
>>> from modopt.signal.noise import thresh
>>> np.random.seed(1)
>>> x = np.random.randint(-9, 9, 10)
>>> x
array([-4, 2, 3, -1, 0, 2, -4, 6, -9, 7])
>>> thresh(x, 4)
array([-4, 0, 0, 0, 0, 0, -4, 6, -9, 7])
>>> import numpy as np
>>> from modopt.signal.noise import thresh
>>> np.random.seed(1)
>>> x = np.random.ranf((3, 3))
>>> x
array([[ 4.17022005e-01, 7.20324493e-01, 1.14374817e-04],
[ 3.02332573e-01, 1.46755891e-01, 9.23385948e-02],
[ 1.86260211e-01, 3.45560727e-01, 3.96767474e-01]])
>>> thresh(x, 0.2, threshold_type='soft')
array([[ 0.217022 , 0.52032449, -0. ],
[ 0.10233257, -0. , -0. ],
[-0. , 0.14556073, 0.19676747]]) | ['r', 'Threshold', 'data'] | train | https://github.com/CEA-COSMIC/ModOpt/blob/019b189cb897cbb4d210c44a100daaa08468830c/modopt/signal/noise.py#L91-L173 |
9,176 | saulpw/visidata | visidata/vdtui.py | wrmap | def wrmap(func, iterable, *args):
'Same as map(func, iterable, *args), but ignoring exceptions.'
for it in iterable:
try:
yield func(it, *args)
except Exception as e:
pass | python | def wrmap(func, iterable, *args):
'Same as map(func, iterable, *args), but ignoring exceptions.'
for it in iterable:
try:
yield func(it, *args)
except Exception as e:
pass | ['def', 'wrmap', '(', 'func', ',', 'iterable', ',', '*', 'args', ')', ':', 'for', 'it', 'in', 'iterable', ':', 'try', ':', 'yield', 'func', '(', 'it', ',', '*', 'args', ')', 'except', 'Exception', 'as', 'e', ':', 'pass'] | Same as map(func, iterable, *args), but ignoring exceptions. | ['Same', 'as', 'map', '(', 'func', 'iterable', '*', 'args', ')', 'but', 'ignoring', 'exceptions', '.'] | train | https://github.com/saulpw/visidata/blob/32771e0cea6c24fc7902683d14558391395c591f/visidata/vdtui.py#L1998-L2004 |
9,177 | Rapptz/discord.py | discord/ext/commands/core.py | bot_has_permissions | def bot_has_permissions(**perms):
"""Similar to :func:`.has_permissions` except checks if the bot itself has
the permissions listed.
This check raises a special exception, :exc:`.BotMissingPermissions`
that is inherited from :exc:`.CheckFailure`.
"""
def predicate(ctx):
guild = ctx.guild
me = guild.me if guild is not None else ctx.bot.user
permissions = ctx.channel.permissions_for(me)
missing = [perm for perm, value in perms.items() if getattr(permissions, perm, None) != value]
if not missing:
return True
raise BotMissingPermissions(missing)
return check(predicate) | python | def bot_has_permissions(**perms):
"""Similar to :func:`.has_permissions` except checks if the bot itself has
the permissions listed.
This check raises a special exception, :exc:`.BotMissingPermissions`
that is inherited from :exc:`.CheckFailure`.
"""
def predicate(ctx):
guild = ctx.guild
me = guild.me if guild is not None else ctx.bot.user
permissions = ctx.channel.permissions_for(me)
missing = [perm for perm, value in perms.items() if getattr(permissions, perm, None) != value]
if not missing:
return True
raise BotMissingPermissions(missing)
return check(predicate) | ['def', 'bot_has_permissions', '(', '*', '*', 'perms', ')', ':', 'def', 'predicate', '(', 'ctx', ')', ':', 'guild', '=', 'ctx', '.', 'guild', 'me', '=', 'guild', '.', 'me', 'if', 'guild', 'is', 'not', 'None', 'else', 'ctx', '.', 'bot', '.', 'user', 'permissions', '=', 'ctx', '.', 'channel', '.', 'permissions_for', '(', 'me', ')', 'missing', '=', '[', 'perm', 'for', 'perm', ',', 'value', 'in', 'perms', '.', 'items', '(', ')', 'if', 'getattr', '(', 'permissions', ',', 'perm', ',', 'None', ')', '!=', 'value', ']', 'if', 'not', 'missing', ':', 'return', 'True', 'raise', 'BotMissingPermissions', '(', 'missing', ')', 'return', 'check', '(', 'predicate', ')'] | Similar to :func:`.has_permissions` except checks if the bot itself has
the permissions listed.
This check raises a special exception, :exc:`.BotMissingPermissions`
that is inherited from :exc:`.CheckFailure`. | ['Similar', 'to', ':', 'func', ':', '.', 'has_permissions', 'except', 'checks', 'if', 'the', 'bot', 'itself', 'has', 'the', 'permissions', 'listed', '.'] | train | https://github.com/Rapptz/discord.py/blob/05d4f7f9620ef33635d6ac965b26528e09cdaf5b/discord/ext/commands/core.py#L1494-L1513 |
9,178 | floydhub/floyd-cli | floyd/client/files.py | ignore_path | def ignore_path(path, ignore_list=None, whitelist=None):
"""
Returns a boolean indicating if a path should be ignored given an
ignore_list and a whitelist of glob patterns.
"""
if ignore_list is None:
return True
should_ignore = matches_glob_list(path, ignore_list)
if whitelist is None:
return should_ignore
return should_ignore and not matches_glob_list(path, whitelist) | python | def ignore_path(path, ignore_list=None, whitelist=None):
"""
Returns a boolean indicating if a path should be ignored given an
ignore_list and a whitelist of glob patterns.
"""
if ignore_list is None:
return True
should_ignore = matches_glob_list(path, ignore_list)
if whitelist is None:
return should_ignore
return should_ignore and not matches_glob_list(path, whitelist) | ['def', 'ignore_path', '(', 'path', ',', 'ignore_list', '=', 'None', ',', 'whitelist', '=', 'None', ')', ':', 'if', 'ignore_list', 'is', 'None', ':', 'return', 'True', 'should_ignore', '=', 'matches_glob_list', '(', 'path', ',', 'ignore_list', ')', 'if', 'whitelist', 'is', 'None', ':', 'return', 'should_ignore', 'return', 'should_ignore', 'and', 'not', 'matches_glob_list', '(', 'path', ',', 'whitelist', ')'] | Returns a boolean indicating if a path should be ignored given an
ignore_list and a whitelist of glob patterns. | ['Returns', 'a', 'boolean', 'indicating', 'if', 'a', 'path', 'should', 'be', 'ignored', 'given', 'an', 'ignore_list', 'and', 'a', 'whitelist', 'of', 'glob', 'patterns', '.'] | train | https://github.com/floydhub/floyd-cli/blob/ea6b9521119cbde2dfc71ce0cc87c0d9c143fc6c/floyd/client/files.py#L62-L74 |
9,179 | materialsproject/pymatgen | pymatgen/analysis/interface_reactions.py | InterfacialReactivity._reverse_convert | def _reverse_convert(x, factor1, factor2):
"""
Converts mixing ratio x in c1 - c2 tie line to that in
comp1 - comp2 tie line.
Args:
x (float): Mixing ratio x in c1 - c2 tie line, a float between
0 and 1.
factor1 (float): Compositional ratio between composition c1 and
processed composition comp1. E.g., factor for
Composition('SiO2') and Composition('O') is 2.
factor2 (float): Compositional ratio between composition c2 and
processed composition comp2.
Returns:
Mixing ratio in comp1 - comp2 tie line, a float between 0 and 1.
"""
return x * factor1 / ((1-x) * factor2 + x * factor1) | python | def _reverse_convert(x, factor1, factor2):
"""
Converts mixing ratio x in c1 - c2 tie line to that in
comp1 - comp2 tie line.
Args:
x (float): Mixing ratio x in c1 - c2 tie line, a float between
0 and 1.
factor1 (float): Compositional ratio between composition c1 and
processed composition comp1. E.g., factor for
Composition('SiO2') and Composition('O') is 2.
factor2 (float): Compositional ratio between composition c2 and
processed composition comp2.
Returns:
Mixing ratio in comp1 - comp2 tie line, a float between 0 and 1.
"""
return x * factor1 / ((1-x) * factor2 + x * factor1) | ['def', '_reverse_convert', '(', 'x', ',', 'factor1', ',', 'factor2', ')', ':', 'return', 'x', '*', 'factor1', '/', '(', '(', '1', '-', 'x', ')', '*', 'factor2', '+', 'x', '*', 'factor1', ')'] | Converts mixing ratio x in c1 - c2 tie line to that in
comp1 - comp2 tie line.
Args:
x (float): Mixing ratio x in c1 - c2 tie line, a float between
0 and 1.
factor1 (float): Compositional ratio between composition c1 and
processed composition comp1. E.g., factor for
Composition('SiO2') and Composition('O') is 2.
factor2 (float): Compositional ratio between composition c2 and
processed composition comp2.
Returns:
Mixing ratio in comp1 - comp2 tie line, a float between 0 and 1. | ['Converts', 'mixing', 'ratio', 'x', 'in', 'c1', '-', 'c2', 'tie', 'line', 'to', 'that', 'in', 'comp1', '-', 'comp2', 'tie', 'line', '.'] | train | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/interface_reactions.py#L291-L308 |
9,180 | PythonCharmers/python-future | src/future/backports/http/cookiejar.py | CookieJar.add_cookie_header | def add_cookie_header(self, request):
"""Add correct Cookie: header to request (urllib.request.Request object).
The Cookie2 header is also added unless policy.hide_cookie2 is true.
"""
_debug("add_cookie_header")
self._cookies_lock.acquire()
try:
self._policy._now = self._now = int(time.time())
cookies = self._cookies_for_request(request)
attrs = self._cookie_attrs(cookies)
if attrs:
if not request.has_header("Cookie"):
request.add_unredirected_header(
"Cookie", "; ".join(attrs))
# if necessary, advertise that we know RFC 2965
if (self._policy.rfc2965 and not self._policy.hide_cookie2 and
not request.has_header("Cookie2")):
for cookie in cookies:
if cookie.version != 1:
request.add_unredirected_header("Cookie2", '$Version="1"')
break
finally:
self._cookies_lock.release()
self.clear_expired_cookies() | python | def add_cookie_header(self, request):
"""Add correct Cookie: header to request (urllib.request.Request object).
The Cookie2 header is also added unless policy.hide_cookie2 is true.
"""
_debug("add_cookie_header")
self._cookies_lock.acquire()
try:
self._policy._now = self._now = int(time.time())
cookies = self._cookies_for_request(request)
attrs = self._cookie_attrs(cookies)
if attrs:
if not request.has_header("Cookie"):
request.add_unredirected_header(
"Cookie", "; ".join(attrs))
# if necessary, advertise that we know RFC 2965
if (self._policy.rfc2965 and not self._policy.hide_cookie2 and
not request.has_header("Cookie2")):
for cookie in cookies:
if cookie.version != 1:
request.add_unredirected_header("Cookie2", '$Version="1"')
break
finally:
self._cookies_lock.release()
self.clear_expired_cookies() | ['def', 'add_cookie_header', '(', 'self', ',', 'request', ')', ':', '_debug', '(', '"add_cookie_header"', ')', 'self', '.', '_cookies_lock', '.', 'acquire', '(', ')', 'try', ':', 'self', '.', '_policy', '.', '_now', '=', 'self', '.', '_now', '=', 'int', '(', 'time', '.', 'time', '(', ')', ')', 'cookies', '=', 'self', '.', '_cookies_for_request', '(', 'request', ')', 'attrs', '=', 'self', '.', '_cookie_attrs', '(', 'cookies', ')', 'if', 'attrs', ':', 'if', 'not', 'request', '.', 'has_header', '(', '"Cookie"', ')', ':', 'request', '.', 'add_unredirected_header', '(', '"Cookie"', ',', '"; "', '.', 'join', '(', 'attrs', ')', ')', '# if necessary, advertise that we know RFC 2965', 'if', '(', 'self', '.', '_policy', '.', 'rfc2965', 'and', 'not', 'self', '.', '_policy', '.', 'hide_cookie2', 'and', 'not', 'request', '.', 'has_header', '(', '"Cookie2"', ')', ')', ':', 'for', 'cookie', 'in', 'cookies', ':', 'if', 'cookie', '.', 'version', '!=', '1', ':', 'request', '.', 'add_unredirected_header', '(', '"Cookie2"', ',', '\'$Version="1"\'', ')', 'break', 'finally', ':', 'self', '.', '_cookies_lock', '.', 'release', '(', ')', 'self', '.', 'clear_expired_cookies', '(', ')'] | Add correct Cookie: header to request (urllib.request.Request object).
The Cookie2 header is also added unless policy.hide_cookie2 is true. | ['Add', 'correct', 'Cookie', ':', 'header', 'to', 'request', '(', 'urllib', '.', 'request', '.', 'Request', 'object', ')', '.'] | train | https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/http/cookiejar.py#L1334-L1365 |
9,181 | project-rig/rig | rig/machine_control/machine_controller.py | MachineController.write | def write(self, address, data, x, y, p=0):
"""Write a bytestring to an address in memory.
It is strongly encouraged to only read and write to blocks of memory
allocated using :py:meth:`.sdram_alloc`. Additionally,
:py:meth:`.sdram_alloc_as_filelike` can be used to safely wrap
read/write access to memory with a file-like interface and prevent
accidental access to areas outside the allocated block.
Parameters
----------
address : int
The address at which to start writing the data. Addresses are given
within the address space of a SpiNNaker core. See the SpiNNaker
datasheet for more information.
data : :py:class:`bytes`
Data to write into memory. Writes are automatically broken into a
sequence of SCP write commands.
"""
# Call the SCPConnection to perform the write on our behalf
connection = self._get_connection(x, y)
return connection.write(self.scp_data_length, self.scp_window_size,
x, y, p, address, data) | python | def write(self, address, data, x, y, p=0):
"""Write a bytestring to an address in memory.
It is strongly encouraged to only read and write to blocks of memory
allocated using :py:meth:`.sdram_alloc`. Additionally,
:py:meth:`.sdram_alloc_as_filelike` can be used to safely wrap
read/write access to memory with a file-like interface and prevent
accidental access to areas outside the allocated block.
Parameters
----------
address : int
The address at which to start writing the data. Addresses are given
within the address space of a SpiNNaker core. See the SpiNNaker
datasheet for more information.
data : :py:class:`bytes`
Data to write into memory. Writes are automatically broken into a
sequence of SCP write commands.
"""
# Call the SCPConnection to perform the write on our behalf
connection = self._get_connection(x, y)
return connection.write(self.scp_data_length, self.scp_window_size,
x, y, p, address, data) | ['def', 'write', '(', 'self', ',', 'address', ',', 'data', ',', 'x', ',', 'y', ',', 'p', '=', '0', ')', ':', '# Call the SCPConnection to perform the write on our behalf', 'connection', '=', 'self', '.', '_get_connection', '(', 'x', ',', 'y', ')', 'return', 'connection', '.', 'write', '(', 'self', '.', 'scp_data_length', ',', 'self', '.', 'scp_window_size', ',', 'x', ',', 'y', ',', 'p', ',', 'address', ',', 'data', ')'] | Write a bytestring to an address in memory.
It is strongly encouraged to only read and write to blocks of memory
allocated using :py:meth:`.sdram_alloc`. Additionally,
:py:meth:`.sdram_alloc_as_filelike` can be used to safely wrap
read/write access to memory with a file-like interface and prevent
accidental access to areas outside the allocated block.
Parameters
----------
address : int
The address at which to start writing the data. Addresses are given
within the address space of a SpiNNaker core. See the SpiNNaker
datasheet for more information.
data : :py:class:`bytes`
Data to write into memory. Writes are automatically broken into a
sequence of SCP write commands. | ['Write', 'a', 'bytestring', 'to', 'an', 'address', 'in', 'memory', '.'] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L502-L524 |
9,182 | PlaidWeb/Publ | publ/markdown.py | render_title | def render_title(text, markup=True, no_smartquotes=False):
""" Convert a Markdown title to HTML """
# HACK: If the title starts with something that looks like a list, save it
# for later
pfx, text = re.match(r'([0-9. ]*)(.*)', text).group(1, 2)
text = pfx + misaka.Markdown(TitleRenderer(),
extensions=TITLE_EXTENSIONS)(text)
if not markup:
strip = HTMLStripper()
strip.feed(text)
text = strip.get_data()
if not no_smartquotes:
text = misaka.smartypants(text)
return flask.Markup(text) | python | def render_title(text, markup=True, no_smartquotes=False):
""" Convert a Markdown title to HTML """
# HACK: If the title starts with something that looks like a list, save it
# for later
pfx, text = re.match(r'([0-9. ]*)(.*)', text).group(1, 2)
text = pfx + misaka.Markdown(TitleRenderer(),
extensions=TITLE_EXTENSIONS)(text)
if not markup:
strip = HTMLStripper()
strip.feed(text)
text = strip.get_data()
if not no_smartquotes:
text = misaka.smartypants(text)
return flask.Markup(text) | ['def', 'render_title', '(', 'text', ',', 'markup', '=', 'True', ',', 'no_smartquotes', '=', 'False', ')', ':', '# HACK: If the title starts with something that looks like a list, save it', '# for later', 'pfx', ',', 'text', '=', 're', '.', 'match', '(', "r'([0-9. ]*)(.*)'", ',', 'text', ')', '.', 'group', '(', '1', ',', '2', ')', 'text', '=', 'pfx', '+', 'misaka', '.', 'Markdown', '(', 'TitleRenderer', '(', ')', ',', 'extensions', '=', 'TITLE_EXTENSIONS', ')', '(', 'text', ')', 'if', 'not', 'markup', ':', 'strip', '=', 'HTMLStripper', '(', ')', 'strip', '.', 'feed', '(', 'text', ')', 'text', '=', 'strip', '.', 'get_data', '(', ')', 'if', 'not', 'no_smartquotes', ':', 'text', '=', 'misaka', '.', 'smartypants', '(', 'text', ')', 'return', 'flask', '.', 'Markup', '(', 'text', ')'] | Convert a Markdown title to HTML | ['Convert', 'a', 'Markdown', 'title', 'to', 'HTML'] | train | https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/markdown.py#L218-L235 |
9,183 | saltstack/salt | salt/netapi/__init__.py | NetapiClient.local_async | def local_async(self, *args, **kwargs):
'''
Run :ref:`execution modules <all-salt.modules>` asynchronously
Wraps :py:meth:`salt.client.LocalClient.run_job`.
:return: job ID
'''
local = salt.client.get_local_client(mopts=self.opts)
ret = local.run_job(*args, **kwargs)
return ret | python | def local_async(self, *args, **kwargs):
'''
Run :ref:`execution modules <all-salt.modules>` asynchronously
Wraps :py:meth:`salt.client.LocalClient.run_job`.
:return: job ID
'''
local = salt.client.get_local_client(mopts=self.opts)
ret = local.run_job(*args, **kwargs)
return ret | ['def', 'local_async', '(', 'self', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'local', '=', 'salt', '.', 'client', '.', 'get_local_client', '(', 'mopts', '=', 'self', '.', 'opts', ')', 'ret', '=', 'local', '.', 'run_job', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', 'return', 'ret'] | Run :ref:`execution modules <all-salt.modules>` asynchronously
Wraps :py:meth:`salt.client.LocalClient.run_job`.
:return: job ID | ['Run', ':', 'ref', ':', 'execution', 'modules', '<all', '-', 'salt', '.', 'modules', '>', 'asynchronously'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/netapi/__init__.py#L82-L92 |
9,184 | titusjan/argos | argos/utils/masks.py | ArrayWithMask.replaceMaskedValue | def replaceMaskedValue(self, replacementValue):
""" Replaces values where the mask is True with the replacement value.
"""
if self.mask is False:
pass
elif self.mask is True:
self.data[:] = replacementValue
else:
self.data[self.mask] = replacementValue | python | def replaceMaskedValue(self, replacementValue):
""" Replaces values where the mask is True with the replacement value.
"""
if self.mask is False:
pass
elif self.mask is True:
self.data[:] = replacementValue
else:
self.data[self.mask] = replacementValue | ['def', 'replaceMaskedValue', '(', 'self', ',', 'replacementValue', ')', ':', 'if', 'self', '.', 'mask', 'is', 'False', ':', 'pass', 'elif', 'self', '.', 'mask', 'is', 'True', ':', 'self', '.', 'data', '[', ':', ']', '=', 'replacementValue', 'else', ':', 'self', '.', 'data', '[', 'self', '.', 'mask', ']', '=', 'replacementValue'] | Replaces values where the mask is True with the replacement value. | ['Replaces', 'values', 'where', 'the', 'mask', 'is', 'True', 'with', 'the', 'replacement', 'value', '.'] | train | https://github.com/titusjan/argos/blob/20d0a3cae26c36ea789a5d219c02ca7df21279dd/argos/utils/masks.py#L195-L203 |
9,185 | joeblackwaslike/base58check | base58check/__init__.py | b58decode | def b58decode(val, charset=DEFAULT_CHARSET):
"""Decode base58check encoded input to original raw bytes.
:param bytes val: The value to base58cheeck decode.
:param bytes charset: (optional) The character set to use for decoding.
:return: the decoded bytes.
:rtype: bytes
Usage::
>>> import base58check
>>> base58check.b58decode('\x00v\x80\xad\xec\x8e\xab\xca\xba\xc6v\xbe'
... '\x9e\x83\x85J\xde\x0b\xd2,\xdb\x0b\xb9`\xde')
b'1BoatSLRHtKNngkdXEeobR76b53LETtpyT'
"""
def _b58decode_int(val):
output = 0
for char in val:
output = output * base + charset.index(char)
return output
if isinstance(val, str):
val = val.encode()
if isinstance(charset, str):
charset = charset.encode()
base = len(charset)
if not base == 58:
raise ValueError('charset base must be 58, not %s' % base)
pad_len = len(val)
val = val.lstrip(bytes([charset[0]]))
pad_len -= len(val)
acc = _b58decode_int(val)
result = deque()
while acc > 0:
acc, mod = divmod(acc, 256)
result.appendleft(mod)
prefix = b'\0' * pad_len
return prefix + bytes(result) | python | def b58decode(val, charset=DEFAULT_CHARSET):
"""Decode base58check encoded input to original raw bytes.
:param bytes val: The value to base58cheeck decode.
:param bytes charset: (optional) The character set to use for decoding.
:return: the decoded bytes.
:rtype: bytes
Usage::
>>> import base58check
>>> base58check.b58decode('\x00v\x80\xad\xec\x8e\xab\xca\xba\xc6v\xbe'
... '\x9e\x83\x85J\xde\x0b\xd2,\xdb\x0b\xb9`\xde')
b'1BoatSLRHtKNngkdXEeobR76b53LETtpyT'
"""
def _b58decode_int(val):
output = 0
for char in val:
output = output * base + charset.index(char)
return output
if isinstance(val, str):
val = val.encode()
if isinstance(charset, str):
charset = charset.encode()
base = len(charset)
if not base == 58:
raise ValueError('charset base must be 58, not %s' % base)
pad_len = len(val)
val = val.lstrip(bytes([charset[0]]))
pad_len -= len(val)
acc = _b58decode_int(val)
result = deque()
while acc > 0:
acc, mod = divmod(acc, 256)
result.appendleft(mod)
prefix = b'\0' * pad_len
return prefix + bytes(result) | ['def', 'b58decode', '(', 'val', ',', 'charset', '=', 'DEFAULT_CHARSET', ')', ':', 'def', '_b58decode_int', '(', 'val', ')', ':', 'output', '=', '0', 'for', 'char', 'in', 'val', ':', 'output', '=', 'output', '*', 'base', '+', 'charset', '.', 'index', '(', 'char', ')', 'return', 'output', 'if', 'isinstance', '(', 'val', ',', 'str', ')', ':', 'val', '=', 'val', '.', 'encode', '(', ')', 'if', 'isinstance', '(', 'charset', ',', 'str', ')', ':', 'charset', '=', 'charset', '.', 'encode', '(', ')', 'base', '=', 'len', '(', 'charset', ')', 'if', 'not', 'base', '==', '58', ':', 'raise', 'ValueError', '(', "'charset base must be 58, not %s'", '%', 'base', ')', 'pad_len', '=', 'len', '(', 'val', ')', 'val', '=', 'val', '.', 'lstrip', '(', 'bytes', '(', '[', 'charset', '[', '0', ']', ']', ')', ')', 'pad_len', '-=', 'len', '(', 'val', ')', 'acc', '=', '_b58decode_int', '(', 'val', ')', 'result', '=', 'deque', '(', ')', 'while', 'acc', '>', '0', ':', 'acc', ',', 'mod', '=', 'divmod', '(', 'acc', ',', '256', ')', 'result', '.', 'appendleft', '(', 'mod', ')', 'prefix', '=', "b'\\0'", '*', 'pad_len', 'return', 'prefix', '+', 'bytes', '(', 'result', ')'] | Decode base58check encoded input to original raw bytes.
:param bytes val: The value to base58cheeck decode.
:param bytes charset: (optional) The character set to use for decoding.
:return: the decoded bytes.
:rtype: bytes
Usage::
>>> import base58check
>>> base58check.b58decode('\x00v\x80\xad\xec\x8e\xab\xca\xba\xc6v\xbe'
... '\x9e\x83\x85J\xde\x0b\xd2,\xdb\x0b\xb9`\xde')
b'1BoatSLRHtKNngkdXEeobR76b53LETtpyT' | ['Decode', 'base58check', 'encoded', 'input', 'to', 'original', 'raw', 'bytes', '.'] | train | https://github.com/joeblackwaslike/base58check/blob/417282766e697b8affc926a5f52cb9fcc41978cc/base58check/__init__.py#L96-L141 |
9,186 | Netflix-Skunkworks/historical | historical/security_group/collector.py | describe_group | def describe_group(record, region):
"""Attempts to describe group ids."""
account_id = record['account']
group_name = cloudwatch.filter_request_parameters('groupName', record)
vpc_id = cloudwatch.filter_request_parameters('vpcId', record)
group_id = cloudwatch.filter_request_parameters('groupId', record, look_in_response=True)
# Did this get collected already by the poller?
if cloudwatch.get_collected_details(record):
LOG.debug(f"[<--] Received already collected security group data: {record['detail']['collected']}")
return [record['detail']['collected']]
try:
# Always depend on Group ID first:
if group_id: # pylint: disable=R1705
return describe_security_groups(
account_number=account_id,
assume_role=HISTORICAL_ROLE,
region=region,
GroupIds=[group_id]
)['SecurityGroups']
elif vpc_id and group_name:
return describe_security_groups(
account_number=account_id,
assume_role=HISTORICAL_ROLE,
region=region,
Filters=[
{
'Name': 'group-name',
'Values': [group_name]
},
{
'Name': 'vpc-id',
'Values': [vpc_id]
}
]
)['SecurityGroups']
else:
raise Exception('[X] Did not receive Group ID or VPC/Group Name pairs. '
f'We got: ID: {group_id} VPC/Name: {vpc_id}/{group_name}.')
except ClientError as exc:
if exc.response['Error']['Code'] == 'InvalidGroup.NotFound':
return []
raise exc | python | def describe_group(record, region):
"""Attempts to describe group ids."""
account_id = record['account']
group_name = cloudwatch.filter_request_parameters('groupName', record)
vpc_id = cloudwatch.filter_request_parameters('vpcId', record)
group_id = cloudwatch.filter_request_parameters('groupId', record, look_in_response=True)
# Did this get collected already by the poller?
if cloudwatch.get_collected_details(record):
LOG.debug(f"[<--] Received already collected security group data: {record['detail']['collected']}")
return [record['detail']['collected']]
try:
# Always depend on Group ID first:
if group_id: # pylint: disable=R1705
return describe_security_groups(
account_number=account_id,
assume_role=HISTORICAL_ROLE,
region=region,
GroupIds=[group_id]
)['SecurityGroups']
elif vpc_id and group_name:
return describe_security_groups(
account_number=account_id,
assume_role=HISTORICAL_ROLE,
region=region,
Filters=[
{
'Name': 'group-name',
'Values': [group_name]
},
{
'Name': 'vpc-id',
'Values': [vpc_id]
}
]
)['SecurityGroups']
else:
raise Exception('[X] Did not receive Group ID or VPC/Group Name pairs. '
f'We got: ID: {group_id} VPC/Name: {vpc_id}/{group_name}.')
except ClientError as exc:
if exc.response['Error']['Code'] == 'InvalidGroup.NotFound':
return []
raise exc | ['def', 'describe_group', '(', 'record', ',', 'region', ')', ':', 'account_id', '=', 'record', '[', "'account'", ']', 'group_name', '=', 'cloudwatch', '.', 'filter_request_parameters', '(', "'groupName'", ',', 'record', ')', 'vpc_id', '=', 'cloudwatch', '.', 'filter_request_parameters', '(', "'vpcId'", ',', 'record', ')', 'group_id', '=', 'cloudwatch', '.', 'filter_request_parameters', '(', "'groupId'", ',', 'record', ',', 'look_in_response', '=', 'True', ')', '# Did this get collected already by the poller?', 'if', 'cloudwatch', '.', 'get_collected_details', '(', 'record', ')', ':', 'LOG', '.', 'debug', '(', 'f"[<--] Received already collected security group data: {record[\'detail\'][\'collected\']}"', ')', 'return', '[', 'record', '[', "'detail'", ']', '[', "'collected'", ']', ']', 'try', ':', '# Always depend on Group ID first:', 'if', 'group_id', ':', '# pylint: disable=R1705', 'return', 'describe_security_groups', '(', 'account_number', '=', 'account_id', ',', 'assume_role', '=', 'HISTORICAL_ROLE', ',', 'region', '=', 'region', ',', 'GroupIds', '=', '[', 'group_id', ']', ')', '[', "'SecurityGroups'", ']', 'elif', 'vpc_id', 'and', 'group_name', ':', 'return', 'describe_security_groups', '(', 'account_number', '=', 'account_id', ',', 'assume_role', '=', 'HISTORICAL_ROLE', ',', 'region', '=', 'region', ',', 'Filters', '=', '[', '{', "'Name'", ':', "'group-name'", ',', "'Values'", ':', '[', 'group_name', ']', '}', ',', '{', "'Name'", ':', "'vpc-id'", ',', "'Values'", ':', '[', 'vpc_id', ']', '}', ']', ')', '[', "'SecurityGroups'", ']', 'else', ':', 'raise', 'Exception', '(', "'[X] Did not receive Group ID or VPC/Group Name pairs. '", "f'We got: ID: {group_id} VPC/Name: {vpc_id}/{group_name}.'", ')', 'except', 'ClientError', 'as', 'exc', ':', 'if', 'exc', '.', 'response', '[', "'Error'", ']', '[', "'Code'", ']', '==', "'InvalidGroup.NotFound'", ':', 'return', '[', ']', 'raise', 'exc'] | Attempts to describe group ids. | ['Attempts', 'to', 'describe', 'group', 'ids', '.'] | train | https://github.com/Netflix-Skunkworks/historical/blob/c3ebaa8388a3fe67e23a6c9c6b04c3e618497c4a/historical/security_group/collector.py#L47-L92 |
9,187 | mushkevych/scheduler | synergy/system/process_helper.py | kill_process | def kill_process(process_name):
""" method is called to kill a running process """
try:
sys.stdout.write('killing: {0} {{ \n'.format(process_name))
pid = get_process_pid(process_name)
if pid is not None and psutil.pid_exists(int(pid)):
p = psutil.Process(pid)
p.kill()
p.wait()
remove_pid_file(process_name)
except Exception as e:
sys.stderr.write('Exception on killing {0} : {1} \n'.format(process_name, e))
finally:
sys.stdout.write('}') | python | def kill_process(process_name):
""" method is called to kill a running process """
try:
sys.stdout.write('killing: {0} {{ \n'.format(process_name))
pid = get_process_pid(process_name)
if pid is not None and psutil.pid_exists(int(pid)):
p = psutil.Process(pid)
p.kill()
p.wait()
remove_pid_file(process_name)
except Exception as e:
sys.stderr.write('Exception on killing {0} : {1} \n'.format(process_name, e))
finally:
sys.stdout.write('}') | ['def', 'kill_process', '(', 'process_name', ')', ':', 'try', ':', 'sys', '.', 'stdout', '.', 'write', '(', "'killing: {0} {{ \\n'", '.', 'format', '(', 'process_name', ')', ')', 'pid', '=', 'get_process_pid', '(', 'process_name', ')', 'if', 'pid', 'is', 'not', 'None', 'and', 'psutil', '.', 'pid_exists', '(', 'int', '(', 'pid', ')', ')', ':', 'p', '=', 'psutil', '.', 'Process', '(', 'pid', ')', 'p', '.', 'kill', '(', ')', 'p', '.', 'wait', '(', ')', 'remove_pid_file', '(', 'process_name', ')', 'except', 'Exception', 'as', 'e', ':', 'sys', '.', 'stderr', '.', 'write', '(', "'Exception on killing {0} : {1} \\n'", '.', 'format', '(', 'process_name', ',', 'e', ')', ')', 'finally', ':', 'sys', '.', 'stdout', '.', 'write', '(', "'}'", ')'] | method is called to kill a running process | ['method', 'is', 'called', 'to', 'kill', 'a', 'running', 'process'] | train | https://github.com/mushkevych/scheduler/blob/6740331360f49083c208085fb5a60ce80ebf418b/synergy/system/process_helper.py#L25-L38 |
9,188 | jeremymcrae/denovonear | denovonear/ensembl_requester.py | EnsemblRequest.get_protein_seq_for_transcript | def get_protein_seq_for_transcript(self, transcript_id):
""" obtain the sequence for a transcript from ensembl
"""
headers = {"content-type": "text/plain"}
self.attempt = 0
ext = "/sequence/id/{}?type=protein".format(transcript_id)
return self.ensembl_request(ext, headers) | python | def get_protein_seq_for_transcript(self, transcript_id):
""" obtain the sequence for a transcript from ensembl
"""
headers = {"content-type": "text/plain"}
self.attempt = 0
ext = "/sequence/id/{}?type=protein".format(transcript_id)
return self.ensembl_request(ext, headers) | ['def', 'get_protein_seq_for_transcript', '(', 'self', ',', 'transcript_id', ')', ':', 'headers', '=', '{', '"content-type"', ':', '"text/plain"', '}', 'self', '.', 'attempt', '=', '0', 'ext', '=', '"/sequence/id/{}?type=protein"', '.', 'format', '(', 'transcript_id', ')', 'return', 'self', '.', 'ensembl_request', '(', 'ext', ',', 'headers', ')'] | obtain the sequence for a transcript from ensembl | ['obtain', 'the', 'sequence', 'for', 'a', 'transcript', 'from', 'ensembl'] | train | https://github.com/jeremymcrae/denovonear/blob/feaab0fc77e89d70b31e8092899e4f0e68bac9fe/denovonear/ensembl_requester.py#L286-L295 |
9,189 | onicagroup/runway | runway/module/staticsite.py | ensure_valid_environment_config | def ensure_valid_environment_config(module_name, config):
"""Exit if config is invalid."""
if not config.get('namespace'):
LOGGER.fatal("staticsite: module %s's environment configuration is "
"missing a namespace definition!",
module_name)
sys.exit(1) | python | def ensure_valid_environment_config(module_name, config):
"""Exit if config is invalid."""
if not config.get('namespace'):
LOGGER.fatal("staticsite: module %s's environment configuration is "
"missing a namespace definition!",
module_name)
sys.exit(1) | ['def', 'ensure_valid_environment_config', '(', 'module_name', ',', 'config', ')', ':', 'if', 'not', 'config', '.', 'get', '(', "'namespace'", ')', ':', 'LOGGER', '.', 'fatal', '(', '"staticsite: module %s\'s environment configuration is "', '"missing a namespace definition!"', ',', 'module_name', ')', 'sys', '.', 'exit', '(', '1', ')'] | Exit if config is invalid. | ['Exit', 'if', 'config', 'is', 'invalid', '.'] | train | https://github.com/onicagroup/runway/blob/3f3549ec3bf6e39b9f27d9738a1847f3a4369e7f/runway/module/staticsite.py#L16-L22 |
9,190 | DomainTools/python_api | domaintools/cli.py | parse | def parse(args=None):
"""Defines how to parse CLI arguments for the DomainTools API"""
parser = argparse.ArgumentParser(description='The DomainTools CLI API Client')
parser.add_argument('-u', '--username', dest='user', default='', help='API Username')
parser.add_argument('-k', '--key', dest='key', default='', help='API Key')
parser.add_argument('-c', '--credfile', dest='credentials', default=os.path.expanduser('~/.dtapi'),
help='Optional file with API username and API key, one per line.')
parser.add_argument('-l', '--rate-limit', dest='rate_limit', action='store_true', default=False,
help='Rate limit API calls against the API based on per minute limits.')
parser.add_argument('-f', '--format', dest='format', choices=['list', 'json', 'xml', 'html'], default='json')
parser.add_argument('-o', '--outfile', dest='out_file', type=argparse.FileType('wbU'), default=sys.stdout,
help='Output file (defaults to stdout)')
parser.add_argument('-v', '--version', action='version', version='DomainTools CLI API Client {0}'.format(version))
parser.add_argument('--no-https', dest='https', action='store_false', default=True,
help='Use HTTP instead of HTTPS.')
parser.add_argument('--no-verify-ssl', dest='verify_ssl', action='store_false', default=True,
help='Skip verification of SSL certificate when making HTTPs API calls')
subparsers = parser.add_subparsers(help='The name of the API call you wish to perform (`whois` for example)',
dest='api_call')
subparsers.required = True
for api_call in API_CALLS:
api_method = getattr(API, api_call)
subparser = subparsers.add_parser(api_call, help=api_method.__name__)
spec = inspect.getargspec(api_method)
for argument_name, default in reversed(list(zip_longest(reversed(spec.args or []),
reversed(spec.defaults or []), fillvalue='EMPTY'))):
if argument_name == 'self':
continue
elif default == 'EMPTY':
subparser.add_argument(argument_name)
else:
subparser.add_argument('--{0}'.format(argument_name.replace('_', '-')), dest=argument_name,
default=default, nargs='*')
arguments = vars(parser.parse_args(args) if args else parser.parse_args())
if not arguments.get('user', None) or not arguments.get('key', None):
try:
with open(arguments.pop('credentials')) as credentials:
arguments['user'], arguments['key'] = credentials.readline().strip(), credentials.readline().strip()
except Exception:
pass
for key, value in arguments.items():
if value in ('-', ['-']):
arguments[key] == (line.strip() for line in sys.stdin.readlines())
elif value == []:
arguments[key] = True
elif type(value) == list and len(value) == 1:
arguments[key] = value[0]
return (arguments.pop('out_file'), arguments.pop('format'), arguments) | python | def parse(args=None):
"""Defines how to parse CLI arguments for the DomainTools API"""
parser = argparse.ArgumentParser(description='The DomainTools CLI API Client')
parser.add_argument('-u', '--username', dest='user', default='', help='API Username')
parser.add_argument('-k', '--key', dest='key', default='', help='API Key')
parser.add_argument('-c', '--credfile', dest='credentials', default=os.path.expanduser('~/.dtapi'),
help='Optional file with API username and API key, one per line.')
parser.add_argument('-l', '--rate-limit', dest='rate_limit', action='store_true', default=False,
help='Rate limit API calls against the API based on per minute limits.')
parser.add_argument('-f', '--format', dest='format', choices=['list', 'json', 'xml', 'html'], default='json')
parser.add_argument('-o', '--outfile', dest='out_file', type=argparse.FileType('wbU'), default=sys.stdout,
help='Output file (defaults to stdout)')
parser.add_argument('-v', '--version', action='version', version='DomainTools CLI API Client {0}'.format(version))
parser.add_argument('--no-https', dest='https', action='store_false', default=True,
help='Use HTTP instead of HTTPS.')
parser.add_argument('--no-verify-ssl', dest='verify_ssl', action='store_false', default=True,
help='Skip verification of SSL certificate when making HTTPs API calls')
subparsers = parser.add_subparsers(help='The name of the API call you wish to perform (`whois` for example)',
dest='api_call')
subparsers.required = True
for api_call in API_CALLS:
api_method = getattr(API, api_call)
subparser = subparsers.add_parser(api_call, help=api_method.__name__)
spec = inspect.getargspec(api_method)
for argument_name, default in reversed(list(zip_longest(reversed(spec.args or []),
reversed(spec.defaults or []), fillvalue='EMPTY'))):
if argument_name == 'self':
continue
elif default == 'EMPTY':
subparser.add_argument(argument_name)
else:
subparser.add_argument('--{0}'.format(argument_name.replace('_', '-')), dest=argument_name,
default=default, nargs='*')
arguments = vars(parser.parse_args(args) if args else parser.parse_args())
if not arguments.get('user', None) or not arguments.get('key', None):
try:
with open(arguments.pop('credentials')) as credentials:
arguments['user'], arguments['key'] = credentials.readline().strip(), credentials.readline().strip()
except Exception:
pass
for key, value in arguments.items():
if value in ('-', ['-']):
arguments[key] == (line.strip() for line in sys.stdin.readlines())
elif value == []:
arguments[key] = True
elif type(value) == list and len(value) == 1:
arguments[key] = value[0]
return (arguments.pop('out_file'), arguments.pop('format'), arguments) | ['def', 'parse', '(', 'args', '=', 'None', ')', ':', 'parser', '=', 'argparse', '.', 'ArgumentParser', '(', 'description', '=', "'The DomainTools CLI API Client'", ')', 'parser', '.', 'add_argument', '(', "'-u'", ',', "'--username'", ',', 'dest', '=', "'user'", ',', 'default', '=', "''", ',', 'help', '=', "'API Username'", ')', 'parser', '.', 'add_argument', '(', "'-k'", ',', "'--key'", ',', 'dest', '=', "'key'", ',', 'default', '=', "''", ',', 'help', '=', "'API Key'", ')', 'parser', '.', 'add_argument', '(', "'-c'", ',', "'--credfile'", ',', 'dest', '=', "'credentials'", ',', 'default', '=', 'os', '.', 'path', '.', 'expanduser', '(', "'~/.dtapi'", ')', ',', 'help', '=', "'Optional file with API username and API key, one per line.'", ')', 'parser', '.', 'add_argument', '(', "'-l'", ',', "'--rate-limit'", ',', 'dest', '=', "'rate_limit'", ',', 'action', '=', "'store_true'", ',', 'default', '=', 'False', ',', 'help', '=', "'Rate limit API calls against the API based on per minute limits.'", ')', 'parser', '.', 'add_argument', '(', "'-f'", ',', "'--format'", ',', 'dest', '=', "'format'", ',', 'choices', '=', '[', "'list'", ',', "'json'", ',', "'xml'", ',', "'html'", ']', ',', 'default', '=', "'json'", ')', 'parser', '.', 'add_argument', '(', "'-o'", ',', "'--outfile'", ',', 'dest', '=', "'out_file'", ',', 'type', '=', 'argparse', '.', 'FileType', '(', "'wbU'", ')', ',', 'default', '=', 'sys', '.', 'stdout', ',', 'help', '=', "'Output file (defaults to stdout)'", ')', 'parser', '.', 'add_argument', '(', "'-v'", ',', "'--version'", ',', 'action', '=', "'version'", ',', 'version', '=', "'DomainTools CLI API Client {0}'", '.', 'format', '(', 'version', ')', ')', 'parser', '.', 'add_argument', '(', "'--no-https'", ',', 'dest', '=', "'https'", ',', 'action', '=', "'store_false'", ',', 'default', '=', 'True', ',', 'help', '=', "'Use HTTP instead of HTTPS.'", ')', 'parser', '.', 'add_argument', '(', "'--no-verify-ssl'", ',', 'dest', '=', "'verify_ssl'", ',', 'action', '=', "'store_false'", ',', 'default', '=', 'True', ',', 'help', '=', "'Skip verification of SSL certificate when making HTTPs API calls'", ')', 'subparsers', '=', 'parser', '.', 'add_subparsers', '(', 'help', '=', "'The name of the API call you wish to perform (`whois` for example)'", ',', 'dest', '=', "'api_call'", ')', 'subparsers', '.', 'required', '=', 'True', 'for', 'api_call', 'in', 'API_CALLS', ':', 'api_method', '=', 'getattr', '(', 'API', ',', 'api_call', ')', 'subparser', '=', 'subparsers', '.', 'add_parser', '(', 'api_call', ',', 'help', '=', 'api_method', '.', '__name__', ')', 'spec', '=', 'inspect', '.', 'getargspec', '(', 'api_method', ')', 'for', 'argument_name', ',', 'default', 'in', 'reversed', '(', 'list', '(', 'zip_longest', '(', 'reversed', '(', 'spec', '.', 'args', 'or', '[', ']', ')', ',', 'reversed', '(', 'spec', '.', 'defaults', 'or', '[', ']', ')', ',', 'fillvalue', '=', "'EMPTY'", ')', ')', ')', ':', 'if', 'argument_name', '==', "'self'", ':', 'continue', 'elif', 'default', '==', "'EMPTY'", ':', 'subparser', '.', 'add_argument', '(', 'argument_name', ')', 'else', ':', 'subparser', '.', 'add_argument', '(', "'--{0}'", '.', 'format', '(', 'argument_name', '.', 'replace', '(', "'_'", ',', "'-'", ')', ')', ',', 'dest', '=', 'argument_name', ',', 'default', '=', 'default', ',', 'nargs', '=', "'*'", ')', 'arguments', '=', 'vars', '(', 'parser', '.', 'parse_args', '(', 'args', ')', 'if', 'args', 'else', 'parser', '.', 'parse_args', '(', ')', ')', 'if', 'not', 'arguments', '.', 'get', '(', "'user'", ',', 'None', ')', 'or', 'not', 'arguments', '.', 'get', '(', "'key'", ',', 'None', ')', ':', 'try', ':', 'with', 'open', '(', 'arguments', '.', 'pop', '(', "'credentials'", ')', ')', 'as', 'credentials', ':', 'arguments', '[', "'user'", ']', ',', 'arguments', '[', "'key'", ']', '=', 'credentials', '.', 'readline', '(', ')', '.', 'strip', '(', ')', ',', 'credentials', '.', 'readline', '(', ')', '.', 'strip', '(', ')', 'except', 'Exception', ':', 'pass', 'for', 'key', ',', 'value', 'in', 'arguments', '.', 'items', '(', ')', ':', 'if', 'value', 'in', '(', "'-'", ',', '[', "'-'", ']', ')', ':', 'arguments', '[', 'key', ']', '==', '(', 'line', '.', 'strip', '(', ')', 'for', 'line', 'in', 'sys', '.', 'stdin', '.', 'readlines', '(', ')', ')', 'elif', 'value', '==', '[', ']', ':', 'arguments', '[', 'key', ']', '=', 'True', 'elif', 'type', '(', 'value', ')', '==', 'list', 'and', 'len', '(', 'value', ')', '==', '1', ':', 'arguments', '[', 'key', ']', '=', 'value', '[', '0', ']', 'return', '(', 'arguments', '.', 'pop', '(', "'out_file'", ')', ',', 'arguments', '.', 'pop', '(', "'format'", ')', ',', 'arguments', ')'] | Defines how to parse CLI arguments for the DomainTools API | ['Defines', 'how', 'to', 'parse', 'CLI', 'arguments', 'for', 'the', 'DomainTools', 'API'] | train | https://github.com/DomainTools/python_api/blob/17be85fd4913fbe14d7660a4f4829242f1663e60/domaintools/cli.py#L19-L71 |
9,191 | cloudera/cm_api | python/src/cm_api/endpoints/types.py | ApiCommand.retry | def retry(self):
"""
Retry a failed or aborted command.
@return: A new ApiCommand object with the updated information.
"""
path = self._path() + '/retry'
resp = self._get_resource_root().post(path)
return ApiCommand.from_json_dict(resp, self._get_resource_root()) | python | def retry(self):
"""
Retry a failed or aborted command.
@return: A new ApiCommand object with the updated information.
"""
path = self._path() + '/retry'
resp = self._get_resource_root().post(path)
return ApiCommand.from_json_dict(resp, self._get_resource_root()) | ['def', 'retry', '(', 'self', ')', ':', 'path', '=', 'self', '.', '_path', '(', ')', '+', "'/retry'", 'resp', '=', 'self', '.', '_get_resource_root', '(', ')', '.', 'post', '(', 'path', ')', 'return', 'ApiCommand', '.', 'from_json_dict', '(', 'resp', ',', 'self', '.', '_get_resource_root', '(', ')', ')'] | Retry a failed or aborted command.
@return: A new ApiCommand object with the updated information. | ['Retry', 'a', 'failed', 'or', 'aborted', 'command', '.'] | train | https://github.com/cloudera/cm_api/blob/5d2512375bd94684b4da36df9e0d9177865ffcbb/python/src/cm_api/endpoints/types.py#L574-L582 |
9,192 | jbittel/django-mama-cas | mama_cas/utils.py | clean_service_url | def clean_service_url(url):
"""
Return only the scheme, hostname (with optional port) and path
components of the parameter URL.
"""
parts = urlparse(url)
return urlunparse((parts.scheme, parts.netloc, parts.path, '', '', '')) | python | def clean_service_url(url):
"""
Return only the scheme, hostname (with optional port) and path
components of the parameter URL.
"""
parts = urlparse(url)
return urlunparse((parts.scheme, parts.netloc, parts.path, '', '', '')) | ['def', 'clean_service_url', '(', 'url', ')', ':', 'parts', '=', 'urlparse', '(', 'url', ')', 'return', 'urlunparse', '(', '(', 'parts', '.', 'scheme', ',', 'parts', '.', 'netloc', ',', 'parts', '.', 'path', ',', "''", ',', "''", ',', "''", ')', ')'] | Return only the scheme, hostname (with optional port) and path
components of the parameter URL. | ['Return', 'only', 'the', 'scheme', 'hostname', '(', 'with', 'optional', 'port', ')', 'and', 'path', 'components', 'of', 'the', 'parameter', 'URL', '.'] | train | https://github.com/jbittel/django-mama-cas/blob/03935d97442b46d8127ab9e1cd8deb96953fe156/mama_cas/utils.py#L49-L55 |
9,193 | christophertbrown/bioscripts | ctbBio/transform.py | box_cox | def box_cox(table):
"""
box-cox transform table
"""
from scipy.stats import boxcox as bc
t = []
for i in table:
if min(i) == 0:
scale = min([j for j in i if j != 0]) * 10e-10
else:
scale = 0
t.append(np.ndarray.tolist(bc(np.array([j + scale for j in i]))[0]))
return t | python | def box_cox(table):
"""
box-cox transform table
"""
from scipy.stats import boxcox as bc
t = []
for i in table:
if min(i) == 0:
scale = min([j for j in i if j != 0]) * 10e-10
else:
scale = 0
t.append(np.ndarray.tolist(bc(np.array([j + scale for j in i]))[0]))
return t | ['def', 'box_cox', '(', 'table', ')', ':', 'from', 'scipy', '.', 'stats', 'import', 'boxcox', 'as', 'bc', 't', '=', '[', ']', 'for', 'i', 'in', 'table', ':', 'if', 'min', '(', 'i', ')', '==', '0', ':', 'scale', '=', 'min', '(', '[', 'j', 'for', 'j', 'in', 'i', 'if', 'j', '!=', '0', ']', ')', '*', '10e-10', 'else', ':', 'scale', '=', '0', 't', '.', 'append', '(', 'np', '.', 'ndarray', '.', 'tolist', '(', 'bc', '(', 'np', '.', 'array', '(', '[', 'j', '+', 'scale', 'for', 'j', 'in', 'i', ']', ')', ')', '[', '0', ']', ')', ')', 'return', 't'] | box-cox transform table | ['box', '-', 'cox', 'transform', 'table'] | train | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/transform.py#L121-L133 |
9,194 | marrow/mongo | marrow/mongo/param/filter.py | F | def F(Document, __raw__=None, **filters):
"""Generate a MongoDB filter document through parameter interpolation.
Arguments passed by name have their name interpreted as an optional prefix (currently only `not`), a double-
underscore
Because this utility is likely going to be used frequently it has been given a single-character name.
"""
ops = Filter(__raw__)
args = _process_arguments(Document, FILTER_PREFIX_MAP, FILTER_OPERATION_MAP, filters)
for prefix, suffix, field, value in args:
if suffix:
op = suffix(field, value)
else:
op = DEFAULT_FILTER(field, value)
if prefix:
op = prefix(op)
ops &= op
return ops | python | def F(Document, __raw__=None, **filters):
"""Generate a MongoDB filter document through parameter interpolation.
Arguments passed by name have their name interpreted as an optional prefix (currently only `not`), a double-
underscore
Because this utility is likely going to be used frequently it has been given a single-character name.
"""
ops = Filter(__raw__)
args = _process_arguments(Document, FILTER_PREFIX_MAP, FILTER_OPERATION_MAP, filters)
for prefix, suffix, field, value in args:
if suffix:
op = suffix(field, value)
else:
op = DEFAULT_FILTER(field, value)
if prefix:
op = prefix(op)
ops &= op
return ops | ['def', 'F', '(', 'Document', ',', '__raw__', '=', 'None', ',', '*', '*', 'filters', ')', ':', 'ops', '=', 'Filter', '(', '__raw__', ')', 'args', '=', '_process_arguments', '(', 'Document', ',', 'FILTER_PREFIX_MAP', ',', 'FILTER_OPERATION_MAP', ',', 'filters', ')', 'for', 'prefix', ',', 'suffix', ',', 'field', ',', 'value', 'in', 'args', ':', 'if', 'suffix', ':', 'op', '=', 'suffix', '(', 'field', ',', 'value', ')', 'else', ':', 'op', '=', 'DEFAULT_FILTER', '(', 'field', ',', 'value', ')', 'if', 'prefix', ':', 'op', '=', 'prefix', '(', 'op', ')', 'ops', '&=', 'op', 'return', 'ops'] | Generate a MongoDB filter document through parameter interpolation.
Arguments passed by name have their name interpreted as an optional prefix (currently only `not`), a double-
underscore
Because this utility is likely going to be used frequently it has been given a single-character name. | ['Generate', 'a', 'MongoDB', 'filter', 'document', 'through', 'parameter', 'interpolation', '.', 'Arguments', 'passed', 'by', 'name', 'have', 'their', 'name', 'interpreted', 'as', 'an', 'optional', 'prefix', '(', 'currently', 'only', 'not', ')', 'a', 'double', '-', 'underscore', 'Because', 'this', 'utility', 'is', 'likely', 'going', 'to', 'be', 'used', 'frequently', 'it', 'has', 'been', 'given', 'a', 'single', '-', 'character', 'name', '.'] | train | https://github.com/marrow/mongo/blob/2066dc73e281b8a46cb5fc965267d6b8e1b18467/marrow/mongo/param/filter.py#L67-L90 |
9,195 | ergo/ziggurat_foundations | ziggurat_foundations/__init__.py | make_passwordmanager | def make_passwordmanager(schemes=None):
"""
schemes contains a list of replace this list with the hash(es) you wish
to support.
this example sets pbkdf2_sha256 as the default,
with support for legacy bcrypt hashes.
:param schemes:
:return: CryptContext()
"""
from passlib.context import CryptContext
if not schemes:
schemes = ["pbkdf2_sha256", "bcrypt"]
pwd_context = CryptContext(schemes=schemes, deprecated="auto")
return pwd_context | python | def make_passwordmanager(schemes=None):
"""
schemes contains a list of replace this list with the hash(es) you wish
to support.
this example sets pbkdf2_sha256 as the default,
with support for legacy bcrypt hashes.
:param schemes:
:return: CryptContext()
"""
from passlib.context import CryptContext
if not schemes:
schemes = ["pbkdf2_sha256", "bcrypt"]
pwd_context = CryptContext(schemes=schemes, deprecated="auto")
return pwd_context | ['def', 'make_passwordmanager', '(', 'schemes', '=', 'None', ')', ':', 'from', 'passlib', '.', 'context', 'import', 'CryptContext', 'if', 'not', 'schemes', ':', 'schemes', '=', '[', '"pbkdf2_sha256"', ',', '"bcrypt"', ']', 'pwd_context', '=', 'CryptContext', '(', 'schemes', '=', 'schemes', ',', 'deprecated', '=', '"auto"', ')', 'return', 'pwd_context'] | schemes contains a list of replace this list with the hash(es) you wish
to support.
this example sets pbkdf2_sha256 as the default,
with support for legacy bcrypt hashes.
:param schemes:
:return: CryptContext() | ['schemes', 'contains', 'a', 'list', 'of', 'replace', 'this', 'list', 'with', 'the', 'hash', '(', 'es', ')', 'you', 'wish', 'to', 'support', '.', 'this', 'example', 'sets', 'pbkdf2_sha256', 'as', 'the', 'default', 'with', 'support', 'for', 'legacy', 'bcrypt', 'hashes', '.'] | train | https://github.com/ergo/ziggurat_foundations/blob/9eeec894d08e8d7defa60ddc04b63f69cd4cbeba/ziggurat_foundations/__init__.py#L43-L58 |
9,196 | astrocatalogs/astrocats | astrocats/main.py | load_command_line_args | def load_command_line_args(clargs=None):
"""Load and parse command-line arguments.
Arguments
---------
args : str or None
'Faked' commandline arguments passed to `argparse`.
Returns
-------
args : `argparse.Namespace` object
Namespace in which settings are stored - default values modified by the
given command-line arguments.
"""
import argparse
git_vers = get_git()
parser = argparse.ArgumentParser(
prog='astrocats',
description='Generate catalogs for astronomical data.')
parser.add_argument('command', nargs='?', default=None)
parser.add_argument(
'--version',
action='version',
version='AstroCats v{}, SHA: {}'.format(__version__, git_vers))
parser.add_argument(
'--verbose',
'-v',
dest='verbose',
default=False,
action='store_true',
help='Print more messages to the screen.')
parser.add_argument(
'--debug',
'-d',
dest='debug',
default=False,
action='store_true',
help='Print excessive messages to the screen.')
parser.add_argument(
'--include-private',
dest='private',
default=False,
action='store_true',
help='Include private data in import.')
parser.add_argument(
'--travis',
'-t',
dest='travis',
default=False,
action='store_true',
help='Run import script in test mode for Travis.')
parser.add_argument(
'--clone-depth',
dest='clone_depth',
default=0,
type=int,
help=('When cloning git repos, only clone out to this depth '
'(default: 0 = all levels).'))
parser.add_argument(
'--purge-outputs',
dest='purge_outputs',
default=False,
action='store_true',
help=('Purge git outputs after cloning.'))
parser.add_argument(
'--log',
dest='log_filename',
default=None,
help='Filename to which to store logging information.')
# If output files should be written or not
# ----------------------------------------
write_group = parser.add_mutually_exclusive_group()
write_group.add_argument(
'--write',
action='store_true',
dest='write_entries',
default=True,
help='Write entries to files [default].')
write_group.add_argument(
'--no-write',
action='store_false',
dest='write_entries',
default=True,
help='do not write entries to file.')
# If previously cleared output files should be deleted or not
# -----------------------------------------------------------
delete_group = parser.add_mutually_exclusive_group()
delete_group.add_argument(
'--predelete',
action='store_true',
dest='delete_old',
default=True,
help='Delete all old event files to begin [default].')
delete_group.add_argument(
'--no-predelete',
action='store_false',
dest='delete_old',
default=True,
help='Do not delete all old event files to start.')
args, sub_clargs = parser.parse_known_args(args=clargs)
# Print the help information if no command is given
if args.command is None:
parser.print_help()
return None, None
return args, sub_clargs | python | def load_command_line_args(clargs=None):
"""Load and parse command-line arguments.
Arguments
---------
args : str or None
'Faked' commandline arguments passed to `argparse`.
Returns
-------
args : `argparse.Namespace` object
Namespace in which settings are stored - default values modified by the
given command-line arguments.
"""
import argparse
git_vers = get_git()
parser = argparse.ArgumentParser(
prog='astrocats',
description='Generate catalogs for astronomical data.')
parser.add_argument('command', nargs='?', default=None)
parser.add_argument(
'--version',
action='version',
version='AstroCats v{}, SHA: {}'.format(__version__, git_vers))
parser.add_argument(
'--verbose',
'-v',
dest='verbose',
default=False,
action='store_true',
help='Print more messages to the screen.')
parser.add_argument(
'--debug',
'-d',
dest='debug',
default=False,
action='store_true',
help='Print excessive messages to the screen.')
parser.add_argument(
'--include-private',
dest='private',
default=False,
action='store_true',
help='Include private data in import.')
parser.add_argument(
'--travis',
'-t',
dest='travis',
default=False,
action='store_true',
help='Run import script in test mode for Travis.')
parser.add_argument(
'--clone-depth',
dest='clone_depth',
default=0,
type=int,
help=('When cloning git repos, only clone out to this depth '
'(default: 0 = all levels).'))
parser.add_argument(
'--purge-outputs',
dest='purge_outputs',
default=False,
action='store_true',
help=('Purge git outputs after cloning.'))
parser.add_argument(
'--log',
dest='log_filename',
default=None,
help='Filename to which to store logging information.')
# If output files should be written or not
# ----------------------------------------
write_group = parser.add_mutually_exclusive_group()
write_group.add_argument(
'--write',
action='store_true',
dest='write_entries',
default=True,
help='Write entries to files [default].')
write_group.add_argument(
'--no-write',
action='store_false',
dest='write_entries',
default=True,
help='do not write entries to file.')
# If previously cleared output files should be deleted or not
# -----------------------------------------------------------
delete_group = parser.add_mutually_exclusive_group()
delete_group.add_argument(
'--predelete',
action='store_true',
dest='delete_old',
default=True,
help='Delete all old event files to begin [default].')
delete_group.add_argument(
'--no-predelete',
action='store_false',
dest='delete_old',
default=True,
help='Do not delete all old event files to start.')
args, sub_clargs = parser.parse_known_args(args=clargs)
# Print the help information if no command is given
if args.command is None:
parser.print_help()
return None, None
return args, sub_clargs | ['def', 'load_command_line_args', '(', 'clargs', '=', 'None', ')', ':', 'import', 'argparse', 'git_vers', '=', 'get_git', '(', ')', 'parser', '=', 'argparse', '.', 'ArgumentParser', '(', 'prog', '=', "'astrocats'", ',', 'description', '=', "'Generate catalogs for astronomical data.'", ')', 'parser', '.', 'add_argument', '(', "'command'", ',', 'nargs', '=', "'?'", ',', 'default', '=', 'None', ')', 'parser', '.', 'add_argument', '(', "'--version'", ',', 'action', '=', "'version'", ',', 'version', '=', "'AstroCats v{}, SHA: {}'", '.', 'format', '(', '__version__', ',', 'git_vers', ')', ')', 'parser', '.', 'add_argument', '(', "'--verbose'", ',', "'-v'", ',', 'dest', '=', "'verbose'", ',', 'default', '=', 'False', ',', 'action', '=', "'store_true'", ',', 'help', '=', "'Print more messages to the screen.'", ')', 'parser', '.', 'add_argument', '(', "'--debug'", ',', "'-d'", ',', 'dest', '=', "'debug'", ',', 'default', '=', 'False', ',', 'action', '=', "'store_true'", ',', 'help', '=', "'Print excessive messages to the screen.'", ')', 'parser', '.', 'add_argument', '(', "'--include-private'", ',', 'dest', '=', "'private'", ',', 'default', '=', 'False', ',', 'action', '=', "'store_true'", ',', 'help', '=', "'Include private data in import.'", ')', 'parser', '.', 'add_argument', '(', "'--travis'", ',', "'-t'", ',', 'dest', '=', "'travis'", ',', 'default', '=', 'False', ',', 'action', '=', "'store_true'", ',', 'help', '=', "'Run import script in test mode for Travis.'", ')', 'parser', '.', 'add_argument', '(', "'--clone-depth'", ',', 'dest', '=', "'clone_depth'", ',', 'default', '=', '0', ',', 'type', '=', 'int', ',', 'help', '=', '(', "'When cloning git repos, only clone out to this depth '", "'(default: 0 = all levels).'", ')', ')', 'parser', '.', 'add_argument', '(', "'--purge-outputs'", ',', 'dest', '=', "'purge_outputs'", ',', 'default', '=', 'False', ',', 'action', '=', "'store_true'", ',', 'help', '=', '(', "'Purge git outputs after cloning.'", ')', ')', 'parser', '.', 'add_argument', '(', "'--log'", ',', 'dest', '=', "'log_filename'", ',', 'default', '=', 'None', ',', 'help', '=', "'Filename to which to store logging information.'", ')', '# If output files should be written or not', '# ----------------------------------------', 'write_group', '=', 'parser', '.', 'add_mutually_exclusive_group', '(', ')', 'write_group', '.', 'add_argument', '(', "'--write'", ',', 'action', '=', "'store_true'", ',', 'dest', '=', "'write_entries'", ',', 'default', '=', 'True', ',', 'help', '=', "'Write entries to files [default].'", ')', 'write_group', '.', 'add_argument', '(', "'--no-write'", ',', 'action', '=', "'store_false'", ',', 'dest', '=', "'write_entries'", ',', 'default', '=', 'True', ',', 'help', '=', "'do not write entries to file.'", ')', '# If previously cleared output files should be deleted or not', '# -----------------------------------------------------------', 'delete_group', '=', 'parser', '.', 'add_mutually_exclusive_group', '(', ')', 'delete_group', '.', 'add_argument', '(', "'--predelete'", ',', 'action', '=', "'store_true'", ',', 'dest', '=', "'delete_old'", ',', 'default', '=', 'True', ',', 'help', '=', "'Delete all old event files to begin [default].'", ')', 'delete_group', '.', 'add_argument', '(', "'--no-predelete'", ',', 'action', '=', "'store_false'", ',', 'dest', '=', "'delete_old'", ',', 'default', '=', 'True', ',', 'help', '=', "'Do not delete all old event files to start.'", ')', 'args', ',', 'sub_clargs', '=', 'parser', '.', 'parse_known_args', '(', 'args', '=', 'clargs', ')', '# Print the help information if no command is given', 'if', 'args', '.', 'command', 'is', 'None', ':', 'parser', '.', 'print_help', '(', ')', 'return', 'None', ',', 'None', 'return', 'args', ',', 'sub_clargs'] | Load and parse command-line arguments.
Arguments
---------
args : str or None
'Faked' commandline arguments passed to `argparse`.
Returns
-------
args : `argparse.Namespace` object
Namespace in which settings are stored - default values modified by the
given command-line arguments. | ['Load', 'and', 'parse', 'command', '-', 'line', 'arguments', '.'] | train | https://github.com/astrocatalogs/astrocats/blob/11abc3131c6366ecd23964369e55ff264add7805/astrocats/main.py#L147-L259 |
9,197 | numenta/htmresearch | htmresearch/frameworks/grid_cell_learning/DynamicCAN.py | Dynamic1DCAN.calculatePathIntegrationError | def calculatePathIntegrationError(self, time, dt=None, trajectory=None,
envelope=False, inputNoise=None):
"""
Calculate the error of our path integration, relative to an ideal module.
To do this, we track the movement of an individual bump
Note that the network must be trained before this is done.
:param time: How long to simulate for in seconds. We recommend using a
small value, e.g. ~10s.
:param trajectory: An optional trajectory that specifies how the network moves.
:param inputNoise: Whether or not to apply noise, and how much.
:return: A tuple of the true trajectory and the inferred trajectory.
"""
# Set up plotting
if self.plotting:
self.fig = plt.figure()
self.ax1 = self.fig.add_subplot(411)
self.ax2 = self.fig.add_subplot(412)
self.ax3 = self.fig.add_subplot(413)
self.ax4 = self.fig.add_subplot(414)
plt.tight_layout()
plt.ion()
self.fig.show()
self.fig.canvas.draw()
mouse = plt.imread(os.path.dirname(os.path.realpath(__file__))
+ "/mouse_graphic.png")
self.ax1.set_xlabel("Excitatory population activity")
self.ax2.set_xlabel("Inhibitory population activity")
self.ax3.set_xlabel("Movement in cells")
self.ax3.set_ylabel("Cost")
self.ax4.set_xlabel("Location")
plt.tight_layout()
if dt is None:
oldDt = self.dt
else:
oldDt = self.dt
self.dt = dt
# Simulate for a second to get nice starting activation bumps.
# Turn plotting off so as not to confuse the viewer
oldPlotting = self.plotting
self.plotting = False
self.simulate(1, 1, 1, 0, envelope=envelope, inputNoise=None)
self.plotting = oldPlotting
estimatedVelocities = []
trueVelocities = []
times = np.arange(0, time, self.dt)
if trajectory is None:
# Sum together two different sinusoidals for a more interesting path.
trajectory = (np.sin((-times*np.pi/10 - np.pi/2.))+1)*2.5
trajectory += (np.cos((-times*np.pi/3 - np.pi/2.))+1)*.75
velocities = np.diff(trajectory)/self.dt
oldActivations = copy.copy(self.activationsI)
oldX = trajectory[0]
for i, t in enumerate(times[:-1]):
v = velocities[i]
x = trajectory[i]
feedforwardInputI = np.ones(self.activationsI.shape)
feedforwardInputE = np.ones(self.activationsEL.shape)
if inputNoise is not None:
noisesI = np.random.random_sample(feedforwardInputI.shape)*inputNoise
noisesE = np.random.random_sample(feedforwardInputE.shape)*inputNoise
else:
noisesE = 1.
noisesI = 1.
self.update(feedforwardInputI*noisesI, feedforwardInputE*noisesE,
v, True, envelope=envelope)
estimationTime = np.abs(np.mod(t, ESTIMATION_INTERVAL))
if estimationTime <= 0.00001 or \
np.abs(estimationTime - ESTIMATION_INTERVAL) <= 0.00001:
rotations = [np.sum(np.abs(np.roll(oldActivations, i) -
self.activationsI))
for i in range(-10, 11, 1)]
shift = np.argmin(rotations) - 10
trueVelocities.append(x - oldX)
oldX = x
oldActivations = copy.copy(self.activationsI)
estimatedVelocities.append(shift)
if self.plotting:
plotTime = np.abs(np.mod(t, PLOT_INTERVAL))
if plotTime <= 0.00001 or np.abs(plotTime - PLOT_INTERVAL) <= 0.00001:
self.ax3.clear()
self.ax3.plot(np.arange(-len(rotations)/2 + 1, len(rotations)/2 + 1, 1),
rotations,
color="g",
label="Shift")
self.ax3.legend(loc="best")
self.ax3.set_xlabel("Movement in cells")
self.ax3.set_ylabel("Cost")
self.ax3.axvline(x=shift)
self.ax4.clear()
self.ax4.set_xlim(np.amin(trajectory), np.amax(trajectory))
self.ax4.set_ylim(0, 1)
mouse_bound = (x - 0.25*np.sign(v), x + 0.25*np.sign(v), .05, .55)
self.ax4.imshow(mouse,
aspect='auto',
extent=mouse_bound,
zorder=-1)
self.ax4.set_xlabel("Location")
self.ax4.axes.get_yaxis().set_visible(False)
self.fig.canvas.draw()
self.plotActivation(time=t, velocity=v, boosting=False)
self.dt = oldDt
return(np.asarray(trueVelocities), np.asarray(estimatedVelocities)) | python | def calculatePathIntegrationError(self, time, dt=None, trajectory=None,
envelope=False, inputNoise=None):
"""
Calculate the error of our path integration, relative to an ideal module.
To do this, we track the movement of an individual bump
Note that the network must be trained before this is done.
:param time: How long to simulate for in seconds. We recommend using a
small value, e.g. ~10s.
:param trajectory: An optional trajectory that specifies how the network moves.
:param inputNoise: Whether or not to apply noise, and how much.
:return: A tuple of the true trajectory and the inferred trajectory.
"""
# Set up plotting
if self.plotting:
self.fig = plt.figure()
self.ax1 = self.fig.add_subplot(411)
self.ax2 = self.fig.add_subplot(412)
self.ax3 = self.fig.add_subplot(413)
self.ax4 = self.fig.add_subplot(414)
plt.tight_layout()
plt.ion()
self.fig.show()
self.fig.canvas.draw()
mouse = plt.imread(os.path.dirname(os.path.realpath(__file__))
+ "/mouse_graphic.png")
self.ax1.set_xlabel("Excitatory population activity")
self.ax2.set_xlabel("Inhibitory population activity")
self.ax3.set_xlabel("Movement in cells")
self.ax3.set_ylabel("Cost")
self.ax4.set_xlabel("Location")
plt.tight_layout()
if dt is None:
oldDt = self.dt
else:
oldDt = self.dt
self.dt = dt
# Simulate for a second to get nice starting activation bumps.
# Turn plotting off so as not to confuse the viewer
oldPlotting = self.plotting
self.plotting = False
self.simulate(1, 1, 1, 0, envelope=envelope, inputNoise=None)
self.plotting = oldPlotting
estimatedVelocities = []
trueVelocities = []
times = np.arange(0, time, self.dt)
if trajectory is None:
# Sum together two different sinusoidals for a more interesting path.
trajectory = (np.sin((-times*np.pi/10 - np.pi/2.))+1)*2.5
trajectory += (np.cos((-times*np.pi/3 - np.pi/2.))+1)*.75
velocities = np.diff(trajectory)/self.dt
oldActivations = copy.copy(self.activationsI)
oldX = trajectory[0]
for i, t in enumerate(times[:-1]):
v = velocities[i]
x = trajectory[i]
feedforwardInputI = np.ones(self.activationsI.shape)
feedforwardInputE = np.ones(self.activationsEL.shape)
if inputNoise is not None:
noisesI = np.random.random_sample(feedforwardInputI.shape)*inputNoise
noisesE = np.random.random_sample(feedforwardInputE.shape)*inputNoise
else:
noisesE = 1.
noisesI = 1.
self.update(feedforwardInputI*noisesI, feedforwardInputE*noisesE,
v, True, envelope=envelope)
estimationTime = np.abs(np.mod(t, ESTIMATION_INTERVAL))
if estimationTime <= 0.00001 or \
np.abs(estimationTime - ESTIMATION_INTERVAL) <= 0.00001:
rotations = [np.sum(np.abs(np.roll(oldActivations, i) -
self.activationsI))
for i in range(-10, 11, 1)]
shift = np.argmin(rotations) - 10
trueVelocities.append(x - oldX)
oldX = x
oldActivations = copy.copy(self.activationsI)
estimatedVelocities.append(shift)
if self.plotting:
plotTime = np.abs(np.mod(t, PLOT_INTERVAL))
if plotTime <= 0.00001 or np.abs(plotTime - PLOT_INTERVAL) <= 0.00001:
self.ax3.clear()
self.ax3.plot(np.arange(-len(rotations)/2 + 1, len(rotations)/2 + 1, 1),
rotations,
color="g",
label="Shift")
self.ax3.legend(loc="best")
self.ax3.set_xlabel("Movement in cells")
self.ax3.set_ylabel("Cost")
self.ax3.axvline(x=shift)
self.ax4.clear()
self.ax4.set_xlim(np.amin(trajectory), np.amax(trajectory))
self.ax4.set_ylim(0, 1)
mouse_bound = (x - 0.25*np.sign(v), x + 0.25*np.sign(v), .05, .55)
self.ax4.imshow(mouse,
aspect='auto',
extent=mouse_bound,
zorder=-1)
self.ax4.set_xlabel("Location")
self.ax4.axes.get_yaxis().set_visible(False)
self.fig.canvas.draw()
self.plotActivation(time=t, velocity=v, boosting=False)
self.dt = oldDt
return(np.asarray(trueVelocities), np.asarray(estimatedVelocities)) | ['def', 'calculatePathIntegrationError', '(', 'self', ',', 'time', ',', 'dt', '=', 'None', ',', 'trajectory', '=', 'None', ',', 'envelope', '=', 'False', ',', 'inputNoise', '=', 'None', ')', ':', '# Set up plotting', 'if', 'self', '.', 'plotting', ':', 'self', '.', 'fig', '=', 'plt', '.', 'figure', '(', ')', 'self', '.', 'ax1', '=', 'self', '.', 'fig', '.', 'add_subplot', '(', '411', ')', 'self', '.', 'ax2', '=', 'self', '.', 'fig', '.', 'add_subplot', '(', '412', ')', 'self', '.', 'ax3', '=', 'self', '.', 'fig', '.', 'add_subplot', '(', '413', ')', 'self', '.', 'ax4', '=', 'self', '.', 'fig', '.', 'add_subplot', '(', '414', ')', 'plt', '.', 'tight_layout', '(', ')', 'plt', '.', 'ion', '(', ')', 'self', '.', 'fig', '.', 'show', '(', ')', 'self', '.', 'fig', '.', 'canvas', '.', 'draw', '(', ')', 'mouse', '=', 'plt', '.', 'imread', '(', 'os', '.', 'path', '.', 'dirname', '(', 'os', '.', 'path', '.', 'realpath', '(', '__file__', ')', ')', '+', '"/mouse_graphic.png"', ')', 'self', '.', 'ax1', '.', 'set_xlabel', '(', '"Excitatory population activity"', ')', 'self', '.', 'ax2', '.', 'set_xlabel', '(', '"Inhibitory population activity"', ')', 'self', '.', 'ax3', '.', 'set_xlabel', '(', '"Movement in cells"', ')', 'self', '.', 'ax3', '.', 'set_ylabel', '(', '"Cost"', ')', 'self', '.', 'ax4', '.', 'set_xlabel', '(', '"Location"', ')', 'plt', '.', 'tight_layout', '(', ')', 'if', 'dt', 'is', 'None', ':', 'oldDt', '=', 'self', '.', 'dt', 'else', ':', 'oldDt', '=', 'self', '.', 'dt', 'self', '.', 'dt', '=', 'dt', '# Simulate for a second to get nice starting activation bumps.', '# Turn plotting off so as not to confuse the viewer', 'oldPlotting', '=', 'self', '.', 'plotting', 'self', '.', 'plotting', '=', 'False', 'self', '.', 'simulate', '(', '1', ',', '1', ',', '1', ',', '0', ',', 'envelope', '=', 'envelope', ',', 'inputNoise', '=', 'None', ')', 'self', '.', 'plotting', '=', 'oldPlotting', 'estimatedVelocities', '=', '[', ']', 'trueVelocities', '=', '[', ']', 'times', '=', 'np', '.', 'arange', '(', '0', ',', 'time', ',', 'self', '.', 'dt', ')', 'if', 'trajectory', 'is', 'None', ':', '# Sum together two different sinusoidals for a more interesting path.', 'trajectory', '=', '(', 'np', '.', 'sin', '(', '(', '-', 'times', '*', 'np', '.', 'pi', '/', '10', '-', 'np', '.', 'pi', '/', '2.', ')', ')', '+', '1', ')', '*', '2.5', 'trajectory', '+=', '(', 'np', '.', 'cos', '(', '(', '-', 'times', '*', 'np', '.', 'pi', '/', '3', '-', 'np', '.', 'pi', '/', '2.', ')', ')', '+', '1', ')', '*', '.75', 'velocities', '=', 'np', '.', 'diff', '(', 'trajectory', ')', '/', 'self', '.', 'dt', 'oldActivations', '=', 'copy', '.', 'copy', '(', 'self', '.', 'activationsI', ')', 'oldX', '=', 'trajectory', '[', '0', ']', 'for', 'i', ',', 't', 'in', 'enumerate', '(', 'times', '[', ':', '-', '1', ']', ')', ':', 'v', '=', 'velocities', '[', 'i', ']', 'x', '=', 'trajectory', '[', 'i', ']', 'feedforwardInputI', '=', 'np', '.', 'ones', '(', 'self', '.', 'activationsI', '.', 'shape', ')', 'feedforwardInputE', '=', 'np', '.', 'ones', '(', 'self', '.', 'activationsEL', '.', 'shape', ')', 'if', 'inputNoise', 'is', 'not', 'None', ':', 'noisesI', '=', 'np', '.', 'random', '.', 'random_sample', '(', 'feedforwardInputI', '.', 'shape', ')', '*', 'inputNoise', 'noisesE', '=', 'np', '.', 'random', '.', 'random_sample', '(', 'feedforwardInputE', '.', 'shape', ')', '*', 'inputNoise', 'else', ':', 'noisesE', '=', '1.', 'noisesI', '=', '1.', 'self', '.', 'update', '(', 'feedforwardInputI', '*', 'noisesI', ',', 'feedforwardInputE', '*', 'noisesE', ',', 'v', ',', 'True', ',', 'envelope', '=', 'envelope', ')', 'estimationTime', '=', 'np', '.', 'abs', '(', 'np', '.', 'mod', '(', 't', ',', 'ESTIMATION_INTERVAL', ')', ')', 'if', 'estimationTime', '<=', '0.00001', 'or', 'np', '.', 'abs', '(', 'estimationTime', '-', 'ESTIMATION_INTERVAL', ')', '<=', '0.00001', ':', 'rotations', '=', '[', 'np', '.', 'sum', '(', 'np', '.', 'abs', '(', 'np', '.', 'roll', '(', 'oldActivations', ',', 'i', ')', '-', 'self', '.', 'activationsI', ')', ')', 'for', 'i', 'in', 'range', '(', '-', '10', ',', '11', ',', '1', ')', ']', 'shift', '=', 'np', '.', 'argmin', '(', 'rotations', ')', '-', '10', 'trueVelocities', '.', 'append', '(', 'x', '-', 'oldX', ')', 'oldX', '=', 'x', 'oldActivations', '=', 'copy', '.', 'copy', '(', 'self', '.', 'activationsI', ')', 'estimatedVelocities', '.', 'append', '(', 'shift', ')', 'if', 'self', '.', 'plotting', ':', 'plotTime', '=', 'np', '.', 'abs', '(', 'np', '.', 'mod', '(', 't', ',', 'PLOT_INTERVAL', ')', ')', 'if', 'plotTime', '<=', '0.00001', 'or', 'np', '.', 'abs', '(', 'plotTime', '-', 'PLOT_INTERVAL', ')', '<=', '0.00001', ':', 'self', '.', 'ax3', '.', 'clear', '(', ')', 'self', '.', 'ax3', '.', 'plot', '(', 'np', '.', 'arange', '(', '-', 'len', '(', 'rotations', ')', '/', '2', '+', '1', ',', 'len', '(', 'rotations', ')', '/', '2', '+', '1', ',', '1', ')', ',', 'rotations', ',', 'color', '=', '"g"', ',', 'label', '=', '"Shift"', ')', 'self', '.', 'ax3', '.', 'legend', '(', 'loc', '=', '"best"', ')', 'self', '.', 'ax3', '.', 'set_xlabel', '(', '"Movement in cells"', ')', 'self', '.', 'ax3', '.', 'set_ylabel', '(', '"Cost"', ')', 'self', '.', 'ax3', '.', 'axvline', '(', 'x', '=', 'shift', ')', 'self', '.', 'ax4', '.', 'clear', '(', ')', 'self', '.', 'ax4', '.', 'set_xlim', '(', 'np', '.', 'amin', '(', 'trajectory', ')', ',', 'np', '.', 'amax', '(', 'trajectory', ')', ')', 'self', '.', 'ax4', '.', 'set_ylim', '(', '0', ',', '1', ')', 'mouse_bound', '=', '(', 'x', '-', '0.25', '*', 'np', '.', 'sign', '(', 'v', ')', ',', 'x', '+', '0.25', '*', 'np', '.', 'sign', '(', 'v', ')', ',', '.05', ',', '.55', ')', 'self', '.', 'ax4', '.', 'imshow', '(', 'mouse', ',', 'aspect', '=', "'auto'", ',', 'extent', '=', 'mouse_bound', ',', 'zorder', '=', '-', '1', ')', 'self', '.', 'ax4', '.', 'set_xlabel', '(', '"Location"', ')', 'self', '.', 'ax4', '.', 'axes', '.', 'get_yaxis', '(', ')', '.', 'set_visible', '(', 'False', ')', 'self', '.', 'fig', '.', 'canvas', '.', 'draw', '(', ')', 'self', '.', 'plotActivation', '(', 'time', '=', 't', ',', 'velocity', '=', 'v', ',', 'boosting', '=', 'False', ')', 'self', '.', 'dt', '=', 'oldDt', 'return', '(', 'np', '.', 'asarray', '(', 'trueVelocities', ')', ',', 'np', '.', 'asarray', '(', 'estimatedVelocities', ')', ')'] | Calculate the error of our path integration, relative to an ideal module.
To do this, we track the movement of an individual bump
Note that the network must be trained before this is done.
:param time: How long to simulate for in seconds. We recommend using a
small value, e.g. ~10s.
:param trajectory: An optional trajectory that specifies how the network moves.
:param inputNoise: Whether or not to apply noise, and how much.
:return: A tuple of the true trajectory and the inferred trajectory. | ['Calculate', 'the', 'error', 'of', 'our', 'path', 'integration', 'relative', 'to', 'an', 'ideal', 'module', '.', 'To', 'do', 'this', 'we', 'track', 'the', 'movement', 'of', 'an', 'individual', 'bump'] | train | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/grid_cell_learning/DynamicCAN.py#L298-L421 |
9,198 | wtsi-hgi/gitlab-build-variables | gitlabbuildvariables/executables/gitlab_update_variables.py | _parse_args | def _parse_args(args: List[str]) -> _UpdateArgumentsRunConfig:
"""
Parses the given CLI arguments to get a run configuration.
:param args: CLI arguments
:return: run configuration derived from the given CLI arguments
"""
parser = argparse.ArgumentParser(
prog="gitlab-update-variables", description="Tool for setting a GitLab project's build variables")
add_common_arguments(parser)
parser.add_argument("config_location", type=str, help="Location of the configuration file")
parser.add_argument("--setting-repository", dest="setting_repository", nargs="+", type=str,
help="Directory from which variable settings groups may be sourced")
parser.add_argument("--default-setting-extension", dest="default_setting_extensions",nargs="+", type=str,
help="Extensions to try adding to the variable to source location if it does not exist")
arguments = parser.parse_args(args)
return _UpdateArgumentsRunConfig(
arguments.config_location, arguments.setting_repository, arguments.default_setting_extensions,
url=arguments.url, token=arguments.token, debug=arguments.debug) | python | def _parse_args(args: List[str]) -> _UpdateArgumentsRunConfig:
"""
Parses the given CLI arguments to get a run configuration.
:param args: CLI arguments
:return: run configuration derived from the given CLI arguments
"""
parser = argparse.ArgumentParser(
prog="gitlab-update-variables", description="Tool for setting a GitLab project's build variables")
add_common_arguments(parser)
parser.add_argument("config_location", type=str, help="Location of the configuration file")
parser.add_argument("--setting-repository", dest="setting_repository", nargs="+", type=str,
help="Directory from which variable settings groups may be sourced")
parser.add_argument("--default-setting-extension", dest="default_setting_extensions",nargs="+", type=str,
help="Extensions to try adding to the variable to source location if it does not exist")
arguments = parser.parse_args(args)
return _UpdateArgumentsRunConfig(
arguments.config_location, arguments.setting_repository, arguments.default_setting_extensions,
url=arguments.url, token=arguments.token, debug=arguments.debug) | ['def', '_parse_args', '(', 'args', ':', 'List', '[', 'str', ']', ')', '->', '_UpdateArgumentsRunConfig', ':', 'parser', '=', 'argparse', '.', 'ArgumentParser', '(', 'prog', '=', '"gitlab-update-variables"', ',', 'description', '=', '"Tool for setting a GitLab project\'s build variables"', ')', 'add_common_arguments', '(', 'parser', ')', 'parser', '.', 'add_argument', '(', '"config_location"', ',', 'type', '=', 'str', ',', 'help', '=', '"Location of the configuration file"', ')', 'parser', '.', 'add_argument', '(', '"--setting-repository"', ',', 'dest', '=', '"setting_repository"', ',', 'nargs', '=', '"+"', ',', 'type', '=', 'str', ',', 'help', '=', '"Directory from which variable settings groups may be sourced"', ')', 'parser', '.', 'add_argument', '(', '"--default-setting-extension"', ',', 'dest', '=', '"default_setting_extensions"', ',', 'nargs', '=', '"+"', ',', 'type', '=', 'str', ',', 'help', '=', '"Extensions to try adding to the variable to source location if it does not exist"', ')', 'arguments', '=', 'parser', '.', 'parse_args', '(', 'args', ')', 'return', '_UpdateArgumentsRunConfig', '(', 'arguments', '.', 'config_location', ',', 'arguments', '.', 'setting_repository', ',', 'arguments', '.', 'default_setting_extensions', ',', 'url', '=', 'arguments', '.', 'url', ',', 'token', '=', 'arguments', '.', 'token', ',', 'debug', '=', 'arguments', '.', 'debug', ')'] | Parses the given CLI arguments to get a run configuration.
:param args: CLI arguments
:return: run configuration derived from the given CLI arguments | ['Parses', 'the', 'given', 'CLI', 'arguments', 'to', 'get', 'a', 'run', 'configuration', '.', ':', 'param', 'args', ':', 'CLI', 'arguments', ':', 'return', ':', 'run', 'configuration', 'derived', 'from', 'the', 'given', 'CLI', 'arguments'] | train | https://github.com/wtsi-hgi/gitlab-build-variables/blob/ed1afe50bc41fa20ffb29cacba5ff6dbc2446808/gitlabbuildvariables/executables/gitlab_update_variables.py#L24-L42 |
9,199 | mmp2/megaman | megaman/relaxation/riemannian_relaxation.py | RiemannianRelaxation.calc_loss | def calc_loss(self, embedding):
"""Helper function to calculate rieman loss given new embedding"""
Hnew = self.compute_dual_rmetric(Ynew=embedding)
return self.rieman_loss(Hnew=Hnew) | python | def calc_loss(self, embedding):
"""Helper function to calculate rieman loss given new embedding"""
Hnew = self.compute_dual_rmetric(Ynew=embedding)
return self.rieman_loss(Hnew=Hnew) | ['def', 'calc_loss', '(', 'self', ',', 'embedding', ')', ':', 'Hnew', '=', 'self', '.', 'compute_dual_rmetric', '(', 'Ynew', '=', 'embedding', ')', 'return', 'self', '.', 'rieman_loss', '(', 'Hnew', '=', 'Hnew', ')'] | Helper function to calculate rieman loss given new embedding | ['Helper', 'function', 'to', 'calculate', 'rieman', 'loss', 'given', 'new', 'embedding'] | train | https://github.com/mmp2/megaman/blob/faccaf267aad0a8b18ec8a705735fd9dd838ca1e/megaman/relaxation/riemannian_relaxation.py#L98-L101 |
Subsets and Splits