Unnamed: 0
int64 0
10k
| repository_name
stringlengths 7
54
| func_path_in_repository
stringlengths 5
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 100
30.3k
| language
stringclasses 1
value | func_code_string
stringlengths 100
30.3k
| func_code_tokens
stringlengths 138
33.2k
| func_documentation_string
stringlengths 1
15k
| func_documentation_tokens
stringlengths 5
5.14k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
|
---|---|---|---|---|---|---|---|---|---|---|---|
800 | etingof/pysnmp | pysnmp/smi/rfc1902.py | ObjectIdentity.getMibSymbol | def getMibSymbol(self):
"""Returns MIB variable symbolic identification.
Returns
-------
str
MIB module name
str
MIB variable symbolic name
: :py:class:`~pysnmp.proto.rfc1902.ObjectName`
class instance representing MIB variable instance index.
Raises
------
SmiError
If MIB variable conversion has not been performed.
Examples
--------
>>> objectIdentity = ObjectIdentity('1.3.6.1.2.1.1.1.0')
>>> objectIdentity.resolveWithMib(mibViewController)
>>> objectIdentity.getMibSymbol()
('SNMPv2-MIB', 'sysDescr', (0,))
>>>
"""
if self._state & self.ST_CLEAN:
return self._modName, self._symName, self._indices
else:
raise SmiError(
'%s object not fully initialized' % self.__class__.__name__) | python | def getMibSymbol(self):
"""Returns MIB variable symbolic identification.
Returns
-------
str
MIB module name
str
MIB variable symbolic name
: :py:class:`~pysnmp.proto.rfc1902.ObjectName`
class instance representing MIB variable instance index.
Raises
------
SmiError
If MIB variable conversion has not been performed.
Examples
--------
>>> objectIdentity = ObjectIdentity('1.3.6.1.2.1.1.1.0')
>>> objectIdentity.resolveWithMib(mibViewController)
>>> objectIdentity.getMibSymbol()
('SNMPv2-MIB', 'sysDescr', (0,))
>>>
"""
if self._state & self.ST_CLEAN:
return self._modName, self._symName, self._indices
else:
raise SmiError(
'%s object not fully initialized' % self.__class__.__name__) | ['def', 'getMibSymbol', '(', 'self', ')', ':', 'if', 'self', '.', '_state', '&', 'self', '.', 'ST_CLEAN', ':', 'return', 'self', '.', '_modName', ',', 'self', '.', '_symName', ',', 'self', '.', '_indices', 'else', ':', 'raise', 'SmiError', '(', "'%s object not fully initialized'", '%', 'self', '.', '__class__', '.', '__name__', ')'] | Returns MIB variable symbolic identification.
Returns
-------
str
MIB module name
str
MIB variable symbolic name
: :py:class:`~pysnmp.proto.rfc1902.ObjectName`
class instance representing MIB variable instance index.
Raises
------
SmiError
If MIB variable conversion has not been performed.
Examples
--------
>>> objectIdentity = ObjectIdentity('1.3.6.1.2.1.1.1.0')
>>> objectIdentity.resolveWithMib(mibViewController)
>>> objectIdentity.getMibSymbol()
('SNMPv2-MIB', 'sysDescr', (0,))
>>> | ['Returns', 'MIB', 'variable', 'symbolic', 'identification', '.'] | train | https://github.com/etingof/pysnmp/blob/cde062dd42f67dfd2d7686286a322d40e9c3a4b7/pysnmp/smi/rfc1902.py#L98-L128 |
801 | bristosoft/financial | finance.py | fvga | def fvga(a, i, g, n):
""" This function is for the future value of an annuity
with growth rate. It is the future value of a growing
stream of periodic investments.
a = Periodic Investment (1000)
i = interest rate as decimal (.0675)
g = the growth rate (.05)
n = the number of compound periods (20)
Example: fv(1000, .0675, .05, 20)
"""
return a * ((((1 + i) ** n) - (((1 + g) ** n)))/(i - g)) | python | def fvga(a, i, g, n):
""" This function is for the future value of an annuity
with growth rate. It is the future value of a growing
stream of periodic investments.
a = Periodic Investment (1000)
i = interest rate as decimal (.0675)
g = the growth rate (.05)
n = the number of compound periods (20)
Example: fv(1000, .0675, .05, 20)
"""
return a * ((((1 + i) ** n) - (((1 + g) ** n)))/(i - g)) | ['def', 'fvga', '(', 'a', ',', 'i', ',', 'g', ',', 'n', ')', ':', 'return', 'a', '*', '(', '(', '(', '(', '1', '+', 'i', ')', '**', 'n', ')', '-', '(', '(', '(', '1', '+', 'g', ')', '**', 'n', ')', ')', ')', '/', '(', 'i', '-', 'g', ')', ')'] | This function is for the future value of an annuity
with growth rate. It is the future value of a growing
stream of periodic investments.
a = Periodic Investment (1000)
i = interest rate as decimal (.0675)
g = the growth rate (.05)
n = the number of compound periods (20)
Example: fv(1000, .0675, .05, 20) | ['This', 'function', 'is', 'for', 'the', 'future', 'value', 'of', 'an', 'annuity', 'with', 'growth', 'rate', '.', 'It', 'is', 'the', 'future', 'value', 'of', 'a', 'growing', 'stream', 'of', 'periodic', 'investments', '.', 'a', '=', 'Periodic', 'Investment', '(', '1000', ')', 'i', '=', 'interest', 'rate', 'as', 'decimal', '(', '.', '0675', ')', 'g', '=', 'the', 'growth', 'rate', '(', '.', '05', ')', 'n', '=', 'the', 'number', 'of', 'compound', 'periods', '(', '20', ')', 'Example', ':', 'fv', '(', '1000', '.', '0675', '.', '05', '20', ')'] | train | https://github.com/bristosoft/financial/blob/382c4fef610d67777d7109d9d0ae230ab67ca20f/finance.py#L63-L73 |
802 | ArduPilot/MAVProxy | MAVProxy/modules/mavproxy_param.py | ParamModule.idle_task | def idle_task(self):
'''handle missing parameters'''
self.check_new_target_system()
sysid = self.get_sysid()
self.pstate[sysid].vehicle_name = self.vehicle_name
self.pstate[sysid].fetch_check(self.master) | python | def idle_task(self):
'''handle missing parameters'''
self.check_new_target_system()
sysid = self.get_sysid()
self.pstate[sysid].vehicle_name = self.vehicle_name
self.pstate[sysid].fetch_check(self.master) | ['def', 'idle_task', '(', 'self', ')', ':', 'self', '.', 'check_new_target_system', '(', ')', 'sysid', '=', 'self', '.', 'get_sysid', '(', ')', 'self', '.', 'pstate', '[', 'sysid', ']', '.', 'vehicle_name', '=', 'self', '.', 'vehicle_name', 'self', '.', 'pstate', '[', 'sysid', ']', '.', 'fetch_check', '(', 'self', '.', 'master', ')'] | handle missing parameters | ['handle', 'missing', 'parameters'] | train | https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/mavproxy_param.py#L418-L423 |
803 | Erotemic/utool | utool/util_dict.py | dict_subset | def dict_subset(dict_, keys, default=util_const.NoParam):
r"""
Args:
dict_ (dict):
keys (list):
Returns:
dict: subset dictionary
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict_ = {'K': 3, 'dcvs_clip_max': 0.2, 'p': 0.1}
>>> keys = ['K', 'dcvs_clip_max']
>>> d = tuple([])
>>> subdict_ = dict_subset(dict_, keys)
>>> result = ut.repr4(subdict_, sorted_=True, newlines=False)
>>> print(result)
{'K': 3, 'dcvs_clip_max': 0.2}
"""
if default is util_const.NoParam:
items = dict_take(dict_, keys)
else:
items = dict_take(dict_, keys, default)
subdict_ = OrderedDict(list(zip(keys, items)))
#item_sublist = [(key, dict_[key]) for key in keys]
##subdict_ = type(dict_)(item_sublist) # maintain old dict format
#subdict_ = OrderedDict(item_sublist)
return subdict_ | python | def dict_subset(dict_, keys, default=util_const.NoParam):
r"""
Args:
dict_ (dict):
keys (list):
Returns:
dict: subset dictionary
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict_ = {'K': 3, 'dcvs_clip_max': 0.2, 'p': 0.1}
>>> keys = ['K', 'dcvs_clip_max']
>>> d = tuple([])
>>> subdict_ = dict_subset(dict_, keys)
>>> result = ut.repr4(subdict_, sorted_=True, newlines=False)
>>> print(result)
{'K': 3, 'dcvs_clip_max': 0.2}
"""
if default is util_const.NoParam:
items = dict_take(dict_, keys)
else:
items = dict_take(dict_, keys, default)
subdict_ = OrderedDict(list(zip(keys, items)))
#item_sublist = [(key, dict_[key]) for key in keys]
##subdict_ = type(dict_)(item_sublist) # maintain old dict format
#subdict_ = OrderedDict(item_sublist)
return subdict_ | ['def', 'dict_subset', '(', 'dict_', ',', 'keys', ',', 'default', '=', 'util_const', '.', 'NoParam', ')', ':', 'if', 'default', 'is', 'util_const', '.', 'NoParam', ':', 'items', '=', 'dict_take', '(', 'dict_', ',', 'keys', ')', 'else', ':', 'items', '=', 'dict_take', '(', 'dict_', ',', 'keys', ',', 'default', ')', 'subdict_', '=', 'OrderedDict', '(', 'list', '(', 'zip', '(', 'keys', ',', 'items', ')', ')', ')', '#item_sublist = [(key, dict_[key]) for key in keys]', '##subdict_ = type(dict_)(item_sublist) # maintain old dict format', '#subdict_ = OrderedDict(item_sublist)', 'return', 'subdict_'] | r"""
Args:
dict_ (dict):
keys (list):
Returns:
dict: subset dictionary
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict_ = {'K': 3, 'dcvs_clip_max': 0.2, 'p': 0.1}
>>> keys = ['K', 'dcvs_clip_max']
>>> d = tuple([])
>>> subdict_ = dict_subset(dict_, keys)
>>> result = ut.repr4(subdict_, sorted_=True, newlines=False)
>>> print(result)
{'K': 3, 'dcvs_clip_max': 0.2} | ['r', 'Args', ':', 'dict_', '(', 'dict', ')', ':', 'keys', '(', 'list', ')', ':'] | train | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_dict.py#L841-L870 |
804 | bcbio/bcbio-nextgen | bcbio/variation/germline.py | fix_germline_samplename | def fix_germline_samplename(in_file, sample_name, data):
"""Replace germline sample names, originally from normal BAM file.
"""
out_file = "%s-fixnames%s" % utils.splitext_plus(in_file)
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
sample_file = "%s-samples.txt" % utils.splitext_plus(tx_out_file)[0]
with open(sample_file, "w") as out_handle:
out_handle.write("%s\n" % sample_name)
cmd = ("bcftools reheader -s {sample_file} {in_file} -o {tx_out_file}")
do.run(cmd.format(**locals()), "Fix germline samplename: %s" % sample_name)
return vcfutils.bgzip_and_index(out_file, data["config"]) | python | def fix_germline_samplename(in_file, sample_name, data):
"""Replace germline sample names, originally from normal BAM file.
"""
out_file = "%s-fixnames%s" % utils.splitext_plus(in_file)
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
sample_file = "%s-samples.txt" % utils.splitext_plus(tx_out_file)[0]
with open(sample_file, "w") as out_handle:
out_handle.write("%s\n" % sample_name)
cmd = ("bcftools reheader -s {sample_file} {in_file} -o {tx_out_file}")
do.run(cmd.format(**locals()), "Fix germline samplename: %s" % sample_name)
return vcfutils.bgzip_and_index(out_file, data["config"]) | ['def', 'fix_germline_samplename', '(', 'in_file', ',', 'sample_name', ',', 'data', ')', ':', 'out_file', '=', '"%s-fixnames%s"', '%', 'utils', '.', 'splitext_plus', '(', 'in_file', ')', 'if', 'not', 'utils', '.', 'file_exists', '(', 'out_file', ')', ':', 'with', 'file_transaction', '(', 'data', ',', 'out_file', ')', 'as', 'tx_out_file', ':', 'sample_file', '=', '"%s-samples.txt"', '%', 'utils', '.', 'splitext_plus', '(', 'tx_out_file', ')', '[', '0', ']', 'with', 'open', '(', 'sample_file', ',', '"w"', ')', 'as', 'out_handle', ':', 'out_handle', '.', 'write', '(', '"%s\\n"', '%', 'sample_name', ')', 'cmd', '=', '(', '"bcftools reheader -s {sample_file} {in_file} -o {tx_out_file}"', ')', 'do', '.', 'run', '(', 'cmd', '.', 'format', '(', '*', '*', 'locals', '(', ')', ')', ',', '"Fix germline samplename: %s"', '%', 'sample_name', ')', 'return', 'vcfutils', '.', 'bgzip_and_index', '(', 'out_file', ',', 'data', '[', '"config"', ']', ')'] | Replace germline sample names, originally from normal BAM file. | ['Replace', 'germline', 'sample', 'names', 'originally', 'from', 'normal', 'BAM', 'file', '.'] | train | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/germline.py#L133-L144 |
805 | Infinidat/infi.clickhouse_orm | src/infi/clickhouse_orm/query.py | AggregateQuerySet.group_by | def group_by(self, *args):
"""
This method lets you specify the grouping fields explicitly. The `args` must
be names of grouping fields or calculated fields that this queryset was
created with.
"""
for name in args:
assert name in self._fields or name in self._calculated_fields, \
'Cannot group by `%s` since it is not included in the query' % name
qs = copy(self)
qs._grouping_fields = args
return qs | python | def group_by(self, *args):
"""
This method lets you specify the grouping fields explicitly. The `args` must
be names of grouping fields or calculated fields that this queryset was
created with.
"""
for name in args:
assert name in self._fields or name in self._calculated_fields, \
'Cannot group by `%s` since it is not included in the query' % name
qs = copy(self)
qs._grouping_fields = args
return qs | ['def', 'group_by', '(', 'self', ',', '*', 'args', ')', ':', 'for', 'name', 'in', 'args', ':', 'assert', 'name', 'in', 'self', '.', '_fields', 'or', 'name', 'in', 'self', '.', '_calculated_fields', ',', "'Cannot group by `%s` since it is not included in the query'", '%', 'name', 'qs', '=', 'copy', '(', 'self', ')', 'qs', '.', '_grouping_fields', '=', 'args', 'return', 'qs'] | This method lets you specify the grouping fields explicitly. The `args` must
be names of grouping fields or calculated fields that this queryset was
created with. | ['This', 'method', 'lets', 'you', 'specify', 'the', 'grouping', 'fields', 'explicitly', '.', 'The', 'args', 'must', 'be', 'names', 'of', 'grouping', 'fields', 'or', 'calculated', 'fields', 'that', 'this', 'queryset', 'was', 'created', 'with', '.'] | train | https://github.com/Infinidat/infi.clickhouse_orm/blob/595f2023e334e3925a5c3fbfdd6083a5992a7169/src/infi/clickhouse_orm/query.py#L554-L565 |
806 | ageitgey/face_recognition | face_recognition/api.py | face_encodings | def face_encodings(face_image, known_face_locations=None, num_jitters=1):
"""
Given an image, return the 128-dimension face encoding for each face in the image.
:param face_image: The image that contains one or more faces
:param known_face_locations: Optional - the bounding boxes of each face if you already know them.
:param num_jitters: How many times to re-sample the face when calculating encoding. Higher is more accurate, but slower (i.e. 100 is 100x slower)
:return: A list of 128-dimensional face encodings (one for each face in the image)
"""
raw_landmarks = _raw_face_landmarks(face_image, known_face_locations, model="small")
return [np.array(face_encoder.compute_face_descriptor(face_image, raw_landmark_set, num_jitters)) for raw_landmark_set in raw_landmarks] | python | def face_encodings(face_image, known_face_locations=None, num_jitters=1):
"""
Given an image, return the 128-dimension face encoding for each face in the image.
:param face_image: The image that contains one or more faces
:param known_face_locations: Optional - the bounding boxes of each face if you already know them.
:param num_jitters: How many times to re-sample the face when calculating encoding. Higher is more accurate, but slower (i.e. 100 is 100x slower)
:return: A list of 128-dimensional face encodings (one for each face in the image)
"""
raw_landmarks = _raw_face_landmarks(face_image, known_face_locations, model="small")
return [np.array(face_encoder.compute_face_descriptor(face_image, raw_landmark_set, num_jitters)) for raw_landmark_set in raw_landmarks] | ['def', 'face_encodings', '(', 'face_image', ',', 'known_face_locations', '=', 'None', ',', 'num_jitters', '=', '1', ')', ':', 'raw_landmarks', '=', '_raw_face_landmarks', '(', 'face_image', ',', 'known_face_locations', ',', 'model', '=', '"small"', ')', 'return', '[', 'np', '.', 'array', '(', 'face_encoder', '.', 'compute_face_descriptor', '(', 'face_image', ',', 'raw_landmark_set', ',', 'num_jitters', ')', ')', 'for', 'raw_landmark_set', 'in', 'raw_landmarks', ']'] | Given an image, return the 128-dimension face encoding for each face in the image.
:param face_image: The image that contains one or more faces
:param known_face_locations: Optional - the bounding boxes of each face if you already know them.
:param num_jitters: How many times to re-sample the face when calculating encoding. Higher is more accurate, but slower (i.e. 100 is 100x slower)
:return: A list of 128-dimensional face encodings (one for each face in the image) | ['Given', 'an', 'image', 'return', 'the', '128', '-', 'dimension', 'face', 'encoding', 'for', 'each', 'face', 'in', 'the', 'image', '.'] | train | https://github.com/ageitgey/face_recognition/blob/c96b010c02f15e8eeb0f71308c641179ac1f19bb/face_recognition/api.py#L203-L213 |
807 | NoneGG/aredis | aredis/commands/streams.py | StreamsCommandMixin.xack | async def xack(self, name: str, group: str, stream_id: str) -> int:
"""
[NOTICE] Not officially released yet
XACK is the command that allows a consumer to mark a pending message as correctly processed.
:param name: name of the stream
:param group: name of the consumer group
:param stream_id: id of the entry the consumer wants to mark
:return: number of entry marked
"""
return await self.execute_command('XACK', name, group, stream_id) | python | async def xack(self, name: str, group: str, stream_id: str) -> int:
"""
[NOTICE] Not officially released yet
XACK is the command that allows a consumer to mark a pending message as correctly processed.
:param name: name of the stream
:param group: name of the consumer group
:param stream_id: id of the entry the consumer wants to mark
:return: number of entry marked
"""
return await self.execute_command('XACK', name, group, stream_id) | ['async', 'def', 'xack', '(', 'self', ',', 'name', ':', 'str', ',', 'group', ':', 'str', ',', 'stream_id', ':', 'str', ')', '->', 'int', ':', 'return', 'await', 'self', '.', 'execute_command', '(', "'XACK'", ',', 'name', ',', 'group', ',', 'stream_id', ')'] | [NOTICE] Not officially released yet
XACK is the command that allows a consumer to mark a pending message as correctly processed.
:param name: name of the stream
:param group: name of the consumer group
:param stream_id: id of the entry the consumer wants to mark
:return: number of entry marked | ['[', 'NOTICE', ']', 'Not', 'officially', 'released', 'yet'] | train | https://github.com/NoneGG/aredis/blob/204caad740ac13e5760d46444a2ba7632982a046/aredis/commands/streams.py#L353-L364 |
808 | StackStorm/pybind | pybind/slxos/v17s_1_02/brocade_mpls_rpc/__init__.py | brocade_mpls._set_mpls_adjust_bandwidth_lsp | def _set_mpls_adjust_bandwidth_lsp(self, v, load=False):
"""
Setter method for mpls_adjust_bandwidth_lsp, mapped from YANG variable /brocade_mpls_rpc/mpls_adjust_bandwidth_lsp (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_mpls_adjust_bandwidth_lsp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mpls_adjust_bandwidth_lsp() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=mpls_adjust_bandwidth_lsp.mpls_adjust_bandwidth_lsp, is_leaf=True, yang_name="mpls-adjust-bandwidth-lsp", rest_name="mpls-adjust-bandwidth-lsp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'mplsAdjustBandwidth'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='rpc', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mpls_adjust_bandwidth_lsp must be of a type compatible with rpc""",
'defined-type': "rpc",
'generated-type': """YANGDynClass(base=mpls_adjust_bandwidth_lsp.mpls_adjust_bandwidth_lsp, is_leaf=True, yang_name="mpls-adjust-bandwidth-lsp", rest_name="mpls-adjust-bandwidth-lsp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'mplsAdjustBandwidth'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='rpc', is_config=True)""",
})
self.__mpls_adjust_bandwidth_lsp = t
if hasattr(self, '_set'):
self._set() | python | def _set_mpls_adjust_bandwidth_lsp(self, v, load=False):
"""
Setter method for mpls_adjust_bandwidth_lsp, mapped from YANG variable /brocade_mpls_rpc/mpls_adjust_bandwidth_lsp (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_mpls_adjust_bandwidth_lsp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mpls_adjust_bandwidth_lsp() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=mpls_adjust_bandwidth_lsp.mpls_adjust_bandwidth_lsp, is_leaf=True, yang_name="mpls-adjust-bandwidth-lsp", rest_name="mpls-adjust-bandwidth-lsp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'mplsAdjustBandwidth'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='rpc', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mpls_adjust_bandwidth_lsp must be of a type compatible with rpc""",
'defined-type': "rpc",
'generated-type': """YANGDynClass(base=mpls_adjust_bandwidth_lsp.mpls_adjust_bandwidth_lsp, is_leaf=True, yang_name="mpls-adjust-bandwidth-lsp", rest_name="mpls-adjust-bandwidth-lsp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'mplsAdjustBandwidth'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='rpc', is_config=True)""",
})
self.__mpls_adjust_bandwidth_lsp = t
if hasattr(self, '_set'):
self._set() | ['def', '_set_mpls_adjust_bandwidth_lsp', '(', 'self', ',', 'v', ',', 'load', '=', 'False', ')', ':', 'if', 'hasattr', '(', 'v', ',', '"_utype"', ')', ':', 'v', '=', 'v', '.', '_utype', '(', 'v', ')', 'try', ':', 't', '=', 'YANGDynClass', '(', 'v', ',', 'base', '=', 'mpls_adjust_bandwidth_lsp', '.', 'mpls_adjust_bandwidth_lsp', ',', 'is_leaf', '=', 'True', ',', 'yang_name', '=', '"mpls-adjust-bandwidth-lsp"', ',', 'rest_name', '=', '"mpls-adjust-bandwidth-lsp"', ',', 'parent', '=', 'self', ',', 'path_helper', '=', 'self', '.', '_path_helper', ',', 'extmethods', '=', 'self', '.', '_extmethods', ',', 'register_paths', '=', 'False', ',', 'extensions', '=', '{', "u'tailf-common'", ':', '{', "u'hidden'", ':', "u'rpccmd'", ',', "u'actionpoint'", ':', "u'mplsAdjustBandwidth'", '}', '}', ',', 'namespace', '=', "'urn:brocade.com:mgmt:brocade-mpls'", ',', 'defining_module', '=', "'brocade-mpls'", ',', 'yang_type', '=', "'rpc'", ',', 'is_config', '=', 'True', ')', 'except', '(', 'TypeError', ',', 'ValueError', ')', ':', 'raise', 'ValueError', '(', '{', "'error-string'", ':', '"""mpls_adjust_bandwidth_lsp must be of a type compatible with rpc"""', ',', "'defined-type'", ':', '"rpc"', ',', "'generated-type'", ':', '"""YANGDynClass(base=mpls_adjust_bandwidth_lsp.mpls_adjust_bandwidth_lsp, is_leaf=True, yang_name="mpls-adjust-bandwidth-lsp", rest_name="mpls-adjust-bandwidth-lsp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u\'tailf-common\': {u\'hidden\': u\'rpccmd\', u\'actionpoint\': u\'mplsAdjustBandwidth\'}}, namespace=\'urn:brocade.com:mgmt:brocade-mpls\', defining_module=\'brocade-mpls\', yang_type=\'rpc\', is_config=True)"""', ',', '}', ')', 'self', '.', '__mpls_adjust_bandwidth_lsp', '=', 't', 'if', 'hasattr', '(', 'self', ',', "'_set'", ')', ':', 'self', '.', '_set', '(', ')'] | Setter method for mpls_adjust_bandwidth_lsp, mapped from YANG variable /brocade_mpls_rpc/mpls_adjust_bandwidth_lsp (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_mpls_adjust_bandwidth_lsp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mpls_adjust_bandwidth_lsp() directly. | ['Setter', 'method', 'for', 'mpls_adjust_bandwidth_lsp', 'mapped', 'from', 'YANG', 'variable', '/', 'brocade_mpls_rpc', '/', 'mpls_adjust_bandwidth_lsp', '(', 'rpc', ')', 'If', 'this', 'variable', 'is', 'read', '-', 'only', '(', 'config', ':', 'false', ')', 'in', 'the', 'source', 'YANG', 'file', 'then', '_set_mpls_adjust_bandwidth_lsp', 'is', 'considered', 'as', 'a', 'private', 'method', '.', 'Backends', 'looking', 'to', 'populate', 'this', 'variable', 'should', 'do', 'so', 'via', 'calling', 'thisObj', '.', '_set_mpls_adjust_bandwidth_lsp', '()', 'directly', '.'] | train | https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/brocade_mpls_rpc/__init__.py#L2934-L2955 |
809 | rgmining/ria | ria/one.py | BipartiteGraph.update | def update(self):
"""Update reviewers' anomalous scores and products' summaries.
Returns:
maximum absolute difference between old summary and new one, and
old anomalous score and new one.
"""
if self.updated:
return 0
res = super(BipartiteGraph, self).update()
self.updated = True
return res | python | def update(self):
"""Update reviewers' anomalous scores and products' summaries.
Returns:
maximum absolute difference between old summary and new one, and
old anomalous score and new one.
"""
if self.updated:
return 0
res = super(BipartiteGraph, self).update()
self.updated = True
return res | ['def', 'update', '(', 'self', ')', ':', 'if', 'self', '.', 'updated', ':', 'return', '0', 'res', '=', 'super', '(', 'BipartiteGraph', ',', 'self', ')', '.', 'update', '(', ')', 'self', '.', 'updated', '=', 'True', 'return', 'res'] | Update reviewers' anomalous scores and products' summaries.
Returns:
maximum absolute difference between old summary and new one, and
old anomalous score and new one. | ['Update', 'reviewers', 'anomalous', 'scores', 'and', 'products', 'summaries', '.'] | train | https://github.com/rgmining/ria/blob/39223c67b7e59e10bd8e3a9062fb13f8bf893a5d/ria/one.py#L43-L55 |
810 | elapouya/python-textops | textops/base.py | TextOp.je | def je(self):
r"""Execute operations, returns a string ( '' if the result is None, join='').
This works like :attr:`j` except it returns an empty string if the execution
result is None.
Examples:
>>> echo(None).je
''
"""
text = self._process()
return self.make_string(text,join_str='',return_if_none='') | python | def je(self):
r"""Execute operations, returns a string ( '' if the result is None, join='').
This works like :attr:`j` except it returns an empty string if the execution
result is None.
Examples:
>>> echo(None).je
''
"""
text = self._process()
return self.make_string(text,join_str='',return_if_none='') | ['def', 'je', '(', 'self', ')', ':', 'text', '=', 'self', '.', '_process', '(', ')', 'return', 'self', '.', 'make_string', '(', 'text', ',', 'join_str', '=', "''", ',', 'return_if_none', '=', "''", ')'] | r"""Execute operations, returns a string ( '' if the result is None, join='').
This works like :attr:`j` except it returns an empty string if the execution
result is None.
Examples:
>>> echo(None).je
'' | ['r', 'Execute', 'operations', 'returns', 'a', 'string', '(', 'if', 'the', 'result', 'is', 'None', 'join', '=', ')', '.'] | train | https://github.com/elapouya/python-textops/blob/5c63b9074a1acd8dd108725f1b370f6684c941ef/textops/base.py#L311-L323 |
811 | RI-imaging/qpformat | qpformat/file_formats/series_zip_tif_phasics.py | SeriesZipTifPhasics.verify | def verify(path):
"""Verify that `path` is a zip file with Phasics TIFF files"""
valid = False
try:
zf = zipfile.ZipFile(path)
except (zipfile.BadZipfile, IsADirectoryError):
pass
else:
names = sorted(zf.namelist())
names = [nn for nn in names if nn.endswith(".tif")]
names = [nn for nn in names if nn.startswith("SID PHA")]
for name in names:
with zf.open(name) as pt:
fd = io.BytesIO(pt.read())
if SingleTifPhasics.verify(fd):
valid = True
break
zf.close()
return valid | python | def verify(path):
"""Verify that `path` is a zip file with Phasics TIFF files"""
valid = False
try:
zf = zipfile.ZipFile(path)
except (zipfile.BadZipfile, IsADirectoryError):
pass
else:
names = sorted(zf.namelist())
names = [nn for nn in names if nn.endswith(".tif")]
names = [nn for nn in names if nn.startswith("SID PHA")]
for name in names:
with zf.open(name) as pt:
fd = io.BytesIO(pt.read())
if SingleTifPhasics.verify(fd):
valid = True
break
zf.close()
return valid | ['def', 'verify', '(', 'path', ')', ':', 'valid', '=', 'False', 'try', ':', 'zf', '=', 'zipfile', '.', 'ZipFile', '(', 'path', ')', 'except', '(', 'zipfile', '.', 'BadZipfile', ',', 'IsADirectoryError', ')', ':', 'pass', 'else', ':', 'names', '=', 'sorted', '(', 'zf', '.', 'namelist', '(', ')', ')', 'names', '=', '[', 'nn', 'for', 'nn', 'in', 'names', 'if', 'nn', '.', 'endswith', '(', '".tif"', ')', ']', 'names', '=', '[', 'nn', 'for', 'nn', 'in', 'names', 'if', 'nn', '.', 'startswith', '(', '"SID PHA"', ')', ']', 'for', 'name', 'in', 'names', ':', 'with', 'zf', '.', 'open', '(', 'name', ')', 'as', 'pt', ':', 'fd', '=', 'io', '.', 'BytesIO', '(', 'pt', '.', 'read', '(', ')', ')', 'if', 'SingleTifPhasics', '.', 'verify', '(', 'fd', ')', ':', 'valid', '=', 'True', 'break', 'zf', '.', 'close', '(', ')', 'return', 'valid'] | Verify that `path` is a zip file with Phasics TIFF files | ['Verify', 'that', 'path', 'is', 'a', 'zip', 'file', 'with', 'Phasics', 'TIFF', 'files'] | train | https://github.com/RI-imaging/qpformat/blob/364e29d7d9e8b9f1d7a4a25c753d1baf9d73d5eb/qpformat/file_formats/series_zip_tif_phasics.py#L74-L92 |
812 | odlgroup/odl | odl/discr/discretization.py | DiscretizedSpaceElement.astype | def astype(self, dtype):
"""Return a copy of this element with new ``dtype``.
Parameters
----------
dtype :
Scalar data type of the returned space. Can be provided
in any way the `numpy.dtype` constructor understands, e.g.
as built-in type or as a string. Data types with non-trivial
shapes are not allowed.
Returns
-------
newelem : `DiscretizedSpaceElement`
Version of this element with given data type.
"""
return self.space.astype(dtype).element(self.tensor.astype(dtype)) | python | def astype(self, dtype):
"""Return a copy of this element with new ``dtype``.
Parameters
----------
dtype :
Scalar data type of the returned space. Can be provided
in any way the `numpy.dtype` constructor understands, e.g.
as built-in type or as a string. Data types with non-trivial
shapes are not allowed.
Returns
-------
newelem : `DiscretizedSpaceElement`
Version of this element with given data type.
"""
return self.space.astype(dtype).element(self.tensor.astype(dtype)) | ['def', 'astype', '(', 'self', ',', 'dtype', ')', ':', 'return', 'self', '.', 'space', '.', 'astype', '(', 'dtype', ')', '.', 'element', '(', 'self', '.', 'tensor', '.', 'astype', '(', 'dtype', ')', ')'] | Return a copy of this element with new ``dtype``.
Parameters
----------
dtype :
Scalar data type of the returned space. Can be provided
in any way the `numpy.dtype` constructor understands, e.g.
as built-in type or as a string. Data types with non-trivial
shapes are not allowed.
Returns
-------
newelem : `DiscretizedSpaceElement`
Version of this element with given data type. | ['Return', 'a', 'copy', 'of', 'this', 'element', 'with', 'new', 'dtype', '.'] | train | https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/discr/discretization.py#L345-L361 |
813 | piglei/uwsgi-sloth | uwsgi_sloth/commands/analyze.py | load_subcommand | def load_subcommand(subparsers):
"""Load this subcommand
"""
parser_analyze = subparsers.add_parser('analyze', help='Analyze uwsgi log to get report')
parser_analyze.add_argument('-f', '--filepath', type=argparse.FileType('r'), dest='filepath',
help='Path of uwsgi log file', required=True)
parser_analyze.add_argument('--output', dest="output", type=argparse.FileType('w'), default=sys.stdout,
help='HTML report file path')
parser_analyze.add_argument('--min-msecs', dest="min_msecs", type=int, default=200,
help='Request serve time lower than this value will not be counted, default: 200')
parser_analyze.add_argument('--domain', dest="domain", type=str, required=False,
help='Make url in report become a hyper-link by settings a domain')
parser_analyze.add_argument('--url-file', dest="url_file", type=argparse.FileType('r'), required=False,
help='Customized url rules in regular expression')
parser_analyze.add_argument('--limit-url-groups', dest="limit_url_groups", type=int, required=False,
default=LIMIT_URL_GROUPS, help='Number of url groups considered, default: 200')
parser_analyze.add_argument('--limit-per-url-group', dest="limit_per_url_group", type=int,
required=False, default=LIMIT_PER_URL_GROUP,
help='Number of urls per group considered, default: 20')
parser_analyze.set_defaults(func=analyze) | python | def load_subcommand(subparsers):
"""Load this subcommand
"""
parser_analyze = subparsers.add_parser('analyze', help='Analyze uwsgi log to get report')
parser_analyze.add_argument('-f', '--filepath', type=argparse.FileType('r'), dest='filepath',
help='Path of uwsgi log file', required=True)
parser_analyze.add_argument('--output', dest="output", type=argparse.FileType('w'), default=sys.stdout,
help='HTML report file path')
parser_analyze.add_argument('--min-msecs', dest="min_msecs", type=int, default=200,
help='Request serve time lower than this value will not be counted, default: 200')
parser_analyze.add_argument('--domain', dest="domain", type=str, required=False,
help='Make url in report become a hyper-link by settings a domain')
parser_analyze.add_argument('--url-file', dest="url_file", type=argparse.FileType('r'), required=False,
help='Customized url rules in regular expression')
parser_analyze.add_argument('--limit-url-groups', dest="limit_url_groups", type=int, required=False,
default=LIMIT_URL_GROUPS, help='Number of url groups considered, default: 200')
parser_analyze.add_argument('--limit-per-url-group', dest="limit_per_url_group", type=int,
required=False, default=LIMIT_PER_URL_GROUP,
help='Number of urls per group considered, default: 20')
parser_analyze.set_defaults(func=analyze) | ['def', 'load_subcommand', '(', 'subparsers', ')', ':', 'parser_analyze', '=', 'subparsers', '.', 'add_parser', '(', "'analyze'", ',', 'help', '=', "'Analyze uwsgi log to get report'", ')', 'parser_analyze', '.', 'add_argument', '(', "'-f'", ',', "'--filepath'", ',', 'type', '=', 'argparse', '.', 'FileType', '(', "'r'", ')', ',', 'dest', '=', "'filepath'", ',', 'help', '=', "'Path of uwsgi log file'", ',', 'required', '=', 'True', ')', 'parser_analyze', '.', 'add_argument', '(', "'--output'", ',', 'dest', '=', '"output"', ',', 'type', '=', 'argparse', '.', 'FileType', '(', "'w'", ')', ',', 'default', '=', 'sys', '.', 'stdout', ',', 'help', '=', "'HTML report file path'", ')', 'parser_analyze', '.', 'add_argument', '(', "'--min-msecs'", ',', 'dest', '=', '"min_msecs"', ',', 'type', '=', 'int', ',', 'default', '=', '200', ',', 'help', '=', "'Request serve time lower than this value will not be counted, default: 200'", ')', 'parser_analyze', '.', 'add_argument', '(', "'--domain'", ',', 'dest', '=', '"domain"', ',', 'type', '=', 'str', ',', 'required', '=', 'False', ',', 'help', '=', "'Make url in report become a hyper-link by settings a domain'", ')', 'parser_analyze', '.', 'add_argument', '(', "'--url-file'", ',', 'dest', '=', '"url_file"', ',', 'type', '=', 'argparse', '.', 'FileType', '(', "'r'", ')', ',', 'required', '=', 'False', ',', 'help', '=', "'Customized url rules in regular expression'", ')', 'parser_analyze', '.', 'add_argument', '(', "'--limit-url-groups'", ',', 'dest', '=', '"limit_url_groups"', ',', 'type', '=', 'int', ',', 'required', '=', 'False', ',', 'default', '=', 'LIMIT_URL_GROUPS', ',', 'help', '=', "'Number of url groups considered, default: 200'", ')', 'parser_analyze', '.', 'add_argument', '(', "'--limit-per-url-group'", ',', 'dest', '=', '"limit_per_url_group"', ',', 'type', '=', 'int', ',', 'required', '=', 'False', ',', 'default', '=', 'LIMIT_PER_URL_GROUP', ',', 'help', '=', "'Number of urls per group considered, default: 20'", ')', 'parser_analyze', '.', 'set_defaults', '(', 'func', '=', 'analyze', ')'] | Load this subcommand | ['Load', 'this', 'subcommand'] | train | https://github.com/piglei/uwsgi-sloth/blob/2834ac5ed17d89ca5f19151c649ac610f6f37bd1/uwsgi_sloth/commands/analyze.py#L48-L67 |
814 | nickw444/wtforms-webwidgets | wtforms_webwidgets/bootstrap/util.py | meta_wrapped | def meta_wrapped(f):
"""
Add a field label, errors, and a description (if it exists) to
a field.
"""
@wraps(f)
def wrapped(self, field, *args, **kwargs):
html = "{label}{errors}{original}<small>{description}</small>".format(
label=field.label(class_='control-label'),
original=f(self, field, *args, **kwargs),
errors=render_field_errors(field) or '',
description=render_field_description(field)
)
return HTMLString(html)
return wrapped | python | def meta_wrapped(f):
"""
Add a field label, errors, and a description (if it exists) to
a field.
"""
@wraps(f)
def wrapped(self, field, *args, **kwargs):
html = "{label}{errors}{original}<small>{description}</small>".format(
label=field.label(class_='control-label'),
original=f(self, field, *args, **kwargs),
errors=render_field_errors(field) or '',
description=render_field_description(field)
)
return HTMLString(html)
return wrapped | ['def', 'meta_wrapped', '(', 'f', ')', ':', '@', 'wraps', '(', 'f', ')', 'def', 'wrapped', '(', 'self', ',', 'field', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'html', '=', '"{label}{errors}{original}<small>{description}</small>"', '.', 'format', '(', 'label', '=', 'field', '.', 'label', '(', 'class_', '=', "'control-label'", ')', ',', 'original', '=', 'f', '(', 'self', ',', 'field', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ',', 'errors', '=', 'render_field_errors', '(', 'field', ')', 'or', "''", ',', 'description', '=', 'render_field_description', '(', 'field', ')', ')', 'return', 'HTMLString', '(', 'html', ')', 'return', 'wrapped'] | Add a field label, errors, and a description (if it exists) to
a field. | ['Add', 'a', 'field', 'label', 'errors', 'and', 'a', 'description', '(', 'if', 'it', 'exists', ')', 'to', 'a', 'field', '.'] | train | https://github.com/nickw444/wtforms-webwidgets/blob/88f224b68c0b0f4f5c97de39fe1428b96e12f8db/wtforms_webwidgets/bootstrap/util.py#L53-L67 |
815 | MAVENSDC/cdflib | cdflib/cdfwrite.py | CDF._md5_compute | def _md5_compute(self, f):
'''
Computes the checksum of the file
'''
md5 = hashlib.md5()
block_size = 16384
f.seek(0, 2)
remaining = f.tell()
f.seek(0)
while (remaining > block_size):
data = f.read(block_size)
remaining = remaining - block_size
md5.update(data)
if remaining > 0:
data = f.read(remaining)
md5.update(data)
return md5.digest() | python | def _md5_compute(self, f):
'''
Computes the checksum of the file
'''
md5 = hashlib.md5()
block_size = 16384
f.seek(0, 2)
remaining = f.tell()
f.seek(0)
while (remaining > block_size):
data = f.read(block_size)
remaining = remaining - block_size
md5.update(data)
if remaining > 0:
data = f.read(remaining)
md5.update(data)
return md5.digest() | ['def', '_md5_compute', '(', 'self', ',', 'f', ')', ':', 'md5', '=', 'hashlib', '.', 'md5', '(', ')', 'block_size', '=', '16384', 'f', '.', 'seek', '(', '0', ',', '2', ')', 'remaining', '=', 'f', '.', 'tell', '(', ')', 'f', '.', 'seek', '(', '0', ')', 'while', '(', 'remaining', '>', 'block_size', ')', ':', 'data', '=', 'f', '.', 'read', '(', 'block_size', ')', 'remaining', '=', 'remaining', '-', 'block_size', 'md5', '.', 'update', '(', 'data', ')', 'if', 'remaining', '>', '0', ':', 'data', '=', 'f', '.', 'read', '(', 'remaining', ')', 'md5', '.', 'update', '(', 'data', ')', 'return', 'md5', '.', 'digest', '(', ')'] | Computes the checksum of the file | ['Computes', 'the', 'checksum', 'of', 'the', 'file'] | train | https://github.com/MAVENSDC/cdflib/blob/d237c60e5db67db0f92d96054209c25c4042465c/cdflib/cdfwrite.py#L2424-L2443 |
816 | delph-in/pydelphin | delphin/lib/pegre.py | choice | def choice(*es):
"""
Create a PEG function to match an ordered choice.
"""
msg = 'Expected one of: {}'.format(', '.join(map(repr, es)))
def match_choice(s, grm=None, pos=0):
errs = []
for e in es:
try:
return e(s, grm, pos)
except PegreError as ex:
errs.append((ex.message, ex.position))
if errs:
raise PegreChoiceError(errs, pos)
return match_choice | python | def choice(*es):
"""
Create a PEG function to match an ordered choice.
"""
msg = 'Expected one of: {}'.format(', '.join(map(repr, es)))
def match_choice(s, grm=None, pos=0):
errs = []
for e in es:
try:
return e(s, grm, pos)
except PegreError as ex:
errs.append((ex.message, ex.position))
if errs:
raise PegreChoiceError(errs, pos)
return match_choice | ['def', 'choice', '(', '*', 'es', ')', ':', 'msg', '=', "'Expected one of: {}'", '.', 'format', '(', "', '", '.', 'join', '(', 'map', '(', 'repr', ',', 'es', ')', ')', ')', 'def', 'match_choice', '(', 's', ',', 'grm', '=', 'None', ',', 'pos', '=', '0', ')', ':', 'errs', '=', '[', ']', 'for', 'e', 'in', 'es', ':', 'try', ':', 'return', 'e', '(', 's', ',', 'grm', ',', 'pos', ')', 'except', 'PegreError', 'as', 'ex', ':', 'errs', '.', 'append', '(', '(', 'ex', '.', 'message', ',', 'ex', '.', 'position', ')', ')', 'if', 'errs', ':', 'raise', 'PegreChoiceError', '(', 'errs', ',', 'pos', ')', 'return', 'match_choice'] | Create a PEG function to match an ordered choice. | ['Create', 'a', 'PEG', 'function', 'to', 'match', 'an', 'ordered', 'choice', '.'] | train | https://github.com/delph-in/pydelphin/blob/7bd2cd63ab7cf74803e1d6547b9ebc014b382abd/delphin/lib/pegre.py#L184-L198 |
817 | saltstack/salt | salt/modules/selinux.py | fcontext_add_or_delete_policy | def fcontext_add_or_delete_policy(action, name, filetype=None, sel_type=None, sel_user=None, sel_level=None):
'''
.. versionadded:: 2017.7.0
Adds or deletes the SELinux policy for a given filespec and other optional parameters.
Returns the result of the call to semanage.
Note that you don't have to remove an entry before setting a new
one for a given filespec and filetype, as adding one with semanage
automatically overwrites a previously configured SELinux context.
.. warning::
Use :mod:`selinux.fcontext_add_policy()<salt.modules.selinux.fcontext_add_policy>`,
or :mod:`selinux.fcontext_delete_policy()<salt.modules.selinux.fcontext_delete_policy>`.
.. deprecated:: 2019.2.0
action
The action to perform. Either ``add`` or ``delete``.
name
filespec of the file or directory. Regex syntax is allowed.
file_type
The SELinux filetype specification. Use one of [a, f, d, c, b,
s, l, p]. See also ``man semanage-fcontext``. Defaults to 'a'
(all files).
sel_type
SELinux context type. There are many.
sel_user
SELinux user. Use ``semanage login -l`` to determine which ones
are available to you.
sel_level
The MLS range of the SELinux context.
CLI Example:
.. code-block:: bash
salt '*' selinux.fcontext_add_or_delete_policy add my-policy
'''
salt.utils.versions.warn_until(
'Sodium',
'The \'selinux.fcontext_add_or_delete_policy\' module has been deprecated. Please use the '
'\'selinux.fcontext_add_policy\' and \'selinux.fcontext_delete_policy\' modules instead. '
'Support for the \'selinux.fcontext_add_or_delete_policy\' module will be removed in Salt '
'{version}.'
)
return _fcontext_add_or_delete_policy(action, name, filetype, sel_type, sel_user, sel_level) | python | def fcontext_add_or_delete_policy(action, name, filetype=None, sel_type=None, sel_user=None, sel_level=None):
'''
.. versionadded:: 2017.7.0
Adds or deletes the SELinux policy for a given filespec and other optional parameters.
Returns the result of the call to semanage.
Note that you don't have to remove an entry before setting a new
one for a given filespec and filetype, as adding one with semanage
automatically overwrites a previously configured SELinux context.
.. warning::
Use :mod:`selinux.fcontext_add_policy()<salt.modules.selinux.fcontext_add_policy>`,
or :mod:`selinux.fcontext_delete_policy()<salt.modules.selinux.fcontext_delete_policy>`.
.. deprecated:: 2019.2.0
action
The action to perform. Either ``add`` or ``delete``.
name
filespec of the file or directory. Regex syntax is allowed.
file_type
The SELinux filetype specification. Use one of [a, f, d, c, b,
s, l, p]. See also ``man semanage-fcontext``. Defaults to 'a'
(all files).
sel_type
SELinux context type. There are many.
sel_user
SELinux user. Use ``semanage login -l`` to determine which ones
are available to you.
sel_level
The MLS range of the SELinux context.
CLI Example:
.. code-block:: bash
salt '*' selinux.fcontext_add_or_delete_policy add my-policy
'''
salt.utils.versions.warn_until(
'Sodium',
'The \'selinux.fcontext_add_or_delete_policy\' module has been deprecated. Please use the '
'\'selinux.fcontext_add_policy\' and \'selinux.fcontext_delete_policy\' modules instead. '
'Support for the \'selinux.fcontext_add_or_delete_policy\' module will be removed in Salt '
'{version}.'
)
return _fcontext_add_or_delete_policy(action, name, filetype, sel_type, sel_user, sel_level) | ['def', 'fcontext_add_or_delete_policy', '(', 'action', ',', 'name', ',', 'filetype', '=', 'None', ',', 'sel_type', '=', 'None', ',', 'sel_user', '=', 'None', ',', 'sel_level', '=', 'None', ')', ':', 'salt', '.', 'utils', '.', 'versions', '.', 'warn_until', '(', "'Sodium'", ',', "'The \\'selinux.fcontext_add_or_delete_policy\\' module has been deprecated. Please use the '", "'\\'selinux.fcontext_add_policy\\' and \\'selinux.fcontext_delete_policy\\' modules instead. '", "'Support for the \\'selinux.fcontext_add_or_delete_policy\\' module will be removed in Salt '", "'{version}.'", ')', 'return', '_fcontext_add_or_delete_policy', '(', 'action', ',', 'name', ',', 'filetype', ',', 'sel_type', ',', 'sel_user', ',', 'sel_level', ')'] | .. versionadded:: 2017.7.0
Adds or deletes the SELinux policy for a given filespec and other optional parameters.
Returns the result of the call to semanage.
Note that you don't have to remove an entry before setting a new
one for a given filespec and filetype, as adding one with semanage
automatically overwrites a previously configured SELinux context.
.. warning::
Use :mod:`selinux.fcontext_add_policy()<salt.modules.selinux.fcontext_add_policy>`,
or :mod:`selinux.fcontext_delete_policy()<salt.modules.selinux.fcontext_delete_policy>`.
.. deprecated:: 2019.2.0
action
The action to perform. Either ``add`` or ``delete``.
name
filespec of the file or directory. Regex syntax is allowed.
file_type
The SELinux filetype specification. Use one of [a, f, d, c, b,
s, l, p]. See also ``man semanage-fcontext``. Defaults to 'a'
(all files).
sel_type
SELinux context type. There are many.
sel_user
SELinux user. Use ``semanage login -l`` to determine which ones
are available to you.
sel_level
The MLS range of the SELinux context.
CLI Example:
.. code-block:: bash
salt '*' selinux.fcontext_add_or_delete_policy add my-policy | ['..', 'versionadded', '::', '2017', '.', '7', '.', '0'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/selinux.py#L584-L637 |
818 | nickmckay/LiPD-utilities | Python/lipd/excel.py | compile_temp | def compile_temp(d, key, value):
"""
Compiles temporary dictionaries for metadata. Adds a new entry to an existing dictionary.
:param dict d:
:param str key:
:param any value:
:return dict:
"""
if not value:
d[key] = None
elif len(value) == 1:
d[key] = value[0]
else:
d[key] = value
return d | python | def compile_temp(d, key, value):
"""
Compiles temporary dictionaries for metadata. Adds a new entry to an existing dictionary.
:param dict d:
:param str key:
:param any value:
:return dict:
"""
if not value:
d[key] = None
elif len(value) == 1:
d[key] = value[0]
else:
d[key] = value
return d | ['def', 'compile_temp', '(', 'd', ',', 'key', ',', 'value', ')', ':', 'if', 'not', 'value', ':', 'd', '[', 'key', ']', '=', 'None', 'elif', 'len', '(', 'value', ')', '==', '1', ':', 'd', '[', 'key', ']', '=', 'value', '[', '0', ']', 'else', ':', 'd', '[', 'key', ']', '=', 'value', 'return', 'd'] | Compiles temporary dictionaries for metadata. Adds a new entry to an existing dictionary.
:param dict d:
:param str key:
:param any value:
:return dict: | ['Compiles', 'temporary', 'dictionaries', 'for', 'metadata', '.', 'Adds', 'a', 'new', 'entry', 'to', 'an', 'existing', 'dictionary', '.', ':', 'param', 'dict', 'd', ':', ':', 'param', 'str', 'key', ':', ':', 'param', 'any', 'value', ':', ':', 'return', 'dict', ':'] | train | https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/excel.py#L1395-L1409 |
819 | gmr/tredis | tredis/keys.py | KeysMixin.wait | def wait(self, num_slaves, timeout=0):
"""his command blocks the current client until all the previous write
commands are successfully transferred and acknowledged by at least the
specified number of slaves. If the timeout, specified in milliseconds,
is reached, the command returns even if the specified number of slaves
were not yet reached.
The command will always return the number of slaves that acknowledged
the write commands sent before the :meth:`~tredis.RedisClient.wait`
command, both in the case where the specified number of slaves are
reached, or when the timeout is reached.
.. note::
**Time complexity**: ``O(1)``
:param int num_slaves: Number of slaves to acknowledge previous writes
:param int timeout: Timeout in milliseconds
:rtype: int
:raises: :exc:`~tredis.exceptions.RedisError`
"""
command = [
b'WAIT',
ascii(num_slaves).encode('ascii'),
ascii(timeout).encode('ascii')
]
return self._execute(command) | python | def wait(self, num_slaves, timeout=0):
"""his command blocks the current client until all the previous write
commands are successfully transferred and acknowledged by at least the
specified number of slaves. If the timeout, specified in milliseconds,
is reached, the command returns even if the specified number of slaves
were not yet reached.
The command will always return the number of slaves that acknowledged
the write commands sent before the :meth:`~tredis.RedisClient.wait`
command, both in the case where the specified number of slaves are
reached, or when the timeout is reached.
.. note::
**Time complexity**: ``O(1)``
:param int num_slaves: Number of slaves to acknowledge previous writes
:param int timeout: Timeout in milliseconds
:rtype: int
:raises: :exc:`~tredis.exceptions.RedisError`
"""
command = [
b'WAIT',
ascii(num_slaves).encode('ascii'),
ascii(timeout).encode('ascii')
]
return self._execute(command) | ['def', 'wait', '(', 'self', ',', 'num_slaves', ',', 'timeout', '=', '0', ')', ':', 'command', '=', '[', "b'WAIT'", ',', 'ascii', '(', 'num_slaves', ')', '.', 'encode', '(', "'ascii'", ')', ',', 'ascii', '(', 'timeout', ')', '.', 'encode', '(', "'ascii'", ')', ']', 'return', 'self', '.', '_execute', '(', 'command', ')'] | his command blocks the current client until all the previous write
commands are successfully transferred and acknowledged by at least the
specified number of slaves. If the timeout, specified in milliseconds,
is reached, the command returns even if the specified number of slaves
were not yet reached.
The command will always return the number of slaves that acknowledged
the write commands sent before the :meth:`~tredis.RedisClient.wait`
command, both in the case where the specified number of slaves are
reached, or when the timeout is reached.
.. note::
**Time complexity**: ``O(1)``
:param int num_slaves: Number of slaves to acknowledge previous writes
:param int timeout: Timeout in milliseconds
:rtype: int
:raises: :exc:`~tredis.exceptions.RedisError` | ['his', 'command', 'blocks', 'the', 'current', 'client', 'until', 'all', 'the', 'previous', 'write', 'commands', 'are', 'successfully', 'transferred', 'and', 'acknowledged', 'by', 'at', 'least', 'the', 'specified', 'number', 'of', 'slaves', '.', 'If', 'the', 'timeout', 'specified', 'in', 'milliseconds', 'is', 'reached', 'the', 'command', 'returns', 'even', 'if', 'the', 'specified', 'number', 'of', 'slaves', 'were', 'not', 'yet', 'reached', '.'] | train | https://github.com/gmr/tredis/blob/2e91c6a58a35460be0525c51ac6a98fde3b506ad/tredis/keys.py#L659-L686 |
820 | saltstack/salt | salt/utils/verify.py | verify_env | def verify_env(
dirs,
user,
permissive=False,
pki_dir='',
skip_extra=False,
root_dir=ROOT_DIR):
'''
Verify that the named directories are in place and that the environment
can shake the salt
'''
if salt.utils.platform.is_windows():
return win_verify_env(root_dir,
dirs,
permissive=permissive,
skip_extra=skip_extra)
import pwd # after confirming not running Windows
try:
pwnam = pwd.getpwnam(user)
uid = pwnam[2]
gid = pwnam[3]
groups = salt.utils.user.get_gid_list(user, include_default=False)
except KeyError:
err = ('Failed to prepare the Salt environment for user '
'{0}. The user is not available.\n').format(user)
sys.stderr.write(err)
sys.exit(salt.defaults.exitcodes.EX_NOUSER)
for dir_ in dirs:
if not dir_:
continue
if not os.path.isdir(dir_):
try:
with salt.utils.files.set_umask(0o022):
os.makedirs(dir_)
# If starting the process as root, chown the new dirs
if os.getuid() == 0:
os.chown(dir_, uid, gid)
except OSError as err:
msg = 'Failed to create directory path "{0}" - {1}\n'
sys.stderr.write(msg.format(dir_, err))
sys.exit(err.errno)
mode = os.stat(dir_)
# If starting the process as root, chown the new dirs
if os.getuid() == 0:
fmode = os.stat(dir_)
if fmode.st_uid != uid or fmode.st_gid != gid:
if permissive and fmode.st_gid in groups:
# Allow the directory to be owned by any group root
# belongs to if we say it's ok to be permissive
pass
else:
# chown the file for the new user
os.chown(dir_, uid, gid)
for subdir in [a for a in os.listdir(dir_) if 'jobs' not in a]:
fsubdir = os.path.join(dir_, subdir)
if '{0}jobs'.format(os.path.sep) in fsubdir:
continue
for root, dirs, files in salt.utils.path.os_walk(fsubdir):
for name in files:
if name.startswith('.'):
continue
path = os.path.join(root, name)
try:
fmode = os.stat(path)
except (IOError, OSError):
pass
if fmode.st_uid != uid or fmode.st_gid != gid:
if permissive and fmode.st_gid in groups:
pass
else:
# chown the file for the new user
os.chown(path, uid, gid)
for name in dirs:
path = os.path.join(root, name)
fmode = os.stat(path)
if fmode.st_uid != uid or fmode.st_gid != gid:
if permissive and fmode.st_gid in groups:
pass
else:
# chown the file for the new user
os.chown(path, uid, gid)
# Allow the pki dir to be 700 or 750, but nothing else.
# This prevents other users from writing out keys, while
# allowing the use-case of 3rd-party software (like django)
# to read in what it needs to integrate.
#
# If the permissions aren't correct, default to the more secure 700.
# If acls are enabled, the pki_dir needs to remain readable, this
# is still secure because the private keys are still only readable
# by the user running the master
if dir_ == pki_dir:
smode = stat.S_IMODE(mode.st_mode)
if smode != 448 and smode != 488:
if os.access(dir_, os.W_OK):
os.chmod(dir_, 448)
else:
msg = 'Unable to securely set the permissions of "{0}".'
msg = msg.format(dir_)
if is_console_configured():
log.critical(msg)
else:
sys.stderr.write("CRITICAL: {0}\n".format(msg))
if skip_extra is False:
# Run the extra verification checks
zmq_version() | python | def verify_env(
dirs,
user,
permissive=False,
pki_dir='',
skip_extra=False,
root_dir=ROOT_DIR):
'''
Verify that the named directories are in place and that the environment
can shake the salt
'''
if salt.utils.platform.is_windows():
return win_verify_env(root_dir,
dirs,
permissive=permissive,
skip_extra=skip_extra)
import pwd # after confirming not running Windows
try:
pwnam = pwd.getpwnam(user)
uid = pwnam[2]
gid = pwnam[3]
groups = salt.utils.user.get_gid_list(user, include_default=False)
except KeyError:
err = ('Failed to prepare the Salt environment for user '
'{0}. The user is not available.\n').format(user)
sys.stderr.write(err)
sys.exit(salt.defaults.exitcodes.EX_NOUSER)
for dir_ in dirs:
if not dir_:
continue
if not os.path.isdir(dir_):
try:
with salt.utils.files.set_umask(0o022):
os.makedirs(dir_)
# If starting the process as root, chown the new dirs
if os.getuid() == 0:
os.chown(dir_, uid, gid)
except OSError as err:
msg = 'Failed to create directory path "{0}" - {1}\n'
sys.stderr.write(msg.format(dir_, err))
sys.exit(err.errno)
mode = os.stat(dir_)
# If starting the process as root, chown the new dirs
if os.getuid() == 0:
fmode = os.stat(dir_)
if fmode.st_uid != uid or fmode.st_gid != gid:
if permissive and fmode.st_gid in groups:
# Allow the directory to be owned by any group root
# belongs to if we say it's ok to be permissive
pass
else:
# chown the file for the new user
os.chown(dir_, uid, gid)
for subdir in [a for a in os.listdir(dir_) if 'jobs' not in a]:
fsubdir = os.path.join(dir_, subdir)
if '{0}jobs'.format(os.path.sep) in fsubdir:
continue
for root, dirs, files in salt.utils.path.os_walk(fsubdir):
for name in files:
if name.startswith('.'):
continue
path = os.path.join(root, name)
try:
fmode = os.stat(path)
except (IOError, OSError):
pass
if fmode.st_uid != uid or fmode.st_gid != gid:
if permissive and fmode.st_gid in groups:
pass
else:
# chown the file for the new user
os.chown(path, uid, gid)
for name in dirs:
path = os.path.join(root, name)
fmode = os.stat(path)
if fmode.st_uid != uid or fmode.st_gid != gid:
if permissive and fmode.st_gid in groups:
pass
else:
# chown the file for the new user
os.chown(path, uid, gid)
# Allow the pki dir to be 700 or 750, but nothing else.
# This prevents other users from writing out keys, while
# allowing the use-case of 3rd-party software (like django)
# to read in what it needs to integrate.
#
# If the permissions aren't correct, default to the more secure 700.
# If acls are enabled, the pki_dir needs to remain readable, this
# is still secure because the private keys are still only readable
# by the user running the master
if dir_ == pki_dir:
smode = stat.S_IMODE(mode.st_mode)
if smode != 448 and smode != 488:
if os.access(dir_, os.W_OK):
os.chmod(dir_, 448)
else:
msg = 'Unable to securely set the permissions of "{0}".'
msg = msg.format(dir_)
if is_console_configured():
log.critical(msg)
else:
sys.stderr.write("CRITICAL: {0}\n".format(msg))
if skip_extra is False:
# Run the extra verification checks
zmq_version() | ['def', 'verify_env', '(', 'dirs', ',', 'user', ',', 'permissive', '=', 'False', ',', 'pki_dir', '=', "''", ',', 'skip_extra', '=', 'False', ',', 'root_dir', '=', 'ROOT_DIR', ')', ':', 'if', 'salt', '.', 'utils', '.', 'platform', '.', 'is_windows', '(', ')', ':', 'return', 'win_verify_env', '(', 'root_dir', ',', 'dirs', ',', 'permissive', '=', 'permissive', ',', 'skip_extra', '=', 'skip_extra', ')', 'import', 'pwd', '# after confirming not running Windows', 'try', ':', 'pwnam', '=', 'pwd', '.', 'getpwnam', '(', 'user', ')', 'uid', '=', 'pwnam', '[', '2', ']', 'gid', '=', 'pwnam', '[', '3', ']', 'groups', '=', 'salt', '.', 'utils', '.', 'user', '.', 'get_gid_list', '(', 'user', ',', 'include_default', '=', 'False', ')', 'except', 'KeyError', ':', 'err', '=', '(', "'Failed to prepare the Salt environment for user '", "'{0}. The user is not available.\\n'", ')', '.', 'format', '(', 'user', ')', 'sys', '.', 'stderr', '.', 'write', '(', 'err', ')', 'sys', '.', 'exit', '(', 'salt', '.', 'defaults', '.', 'exitcodes', '.', 'EX_NOUSER', ')', 'for', 'dir_', 'in', 'dirs', ':', 'if', 'not', 'dir_', ':', 'continue', 'if', 'not', 'os', '.', 'path', '.', 'isdir', '(', 'dir_', ')', ':', 'try', ':', 'with', 'salt', '.', 'utils', '.', 'files', '.', 'set_umask', '(', '0o022', ')', ':', 'os', '.', 'makedirs', '(', 'dir_', ')', '# If starting the process as root, chown the new dirs', 'if', 'os', '.', 'getuid', '(', ')', '==', '0', ':', 'os', '.', 'chown', '(', 'dir_', ',', 'uid', ',', 'gid', ')', 'except', 'OSError', 'as', 'err', ':', 'msg', '=', '\'Failed to create directory path "{0}" - {1}\\n\'', 'sys', '.', 'stderr', '.', 'write', '(', 'msg', '.', 'format', '(', 'dir_', ',', 'err', ')', ')', 'sys', '.', 'exit', '(', 'err', '.', 'errno', ')', 'mode', '=', 'os', '.', 'stat', '(', 'dir_', ')', '# If starting the process as root, chown the new dirs', 'if', 'os', '.', 'getuid', '(', ')', '==', '0', ':', 'fmode', '=', 'os', '.', 'stat', '(', 'dir_', ')', 'if', 'fmode', '.', 'st_uid', '!=', 'uid', 'or', 'fmode', '.', 'st_gid', '!=', 'gid', ':', 'if', 'permissive', 'and', 'fmode', '.', 'st_gid', 'in', 'groups', ':', '# Allow the directory to be owned by any group root', "# belongs to if we say it's ok to be permissive", 'pass', 'else', ':', '# chown the file for the new user', 'os', '.', 'chown', '(', 'dir_', ',', 'uid', ',', 'gid', ')', 'for', 'subdir', 'in', '[', 'a', 'for', 'a', 'in', 'os', '.', 'listdir', '(', 'dir_', ')', 'if', "'jobs'", 'not', 'in', 'a', ']', ':', 'fsubdir', '=', 'os', '.', 'path', '.', 'join', '(', 'dir_', ',', 'subdir', ')', 'if', "'{0}jobs'", '.', 'format', '(', 'os', '.', 'path', '.', 'sep', ')', 'in', 'fsubdir', ':', 'continue', 'for', 'root', ',', 'dirs', ',', 'files', 'in', 'salt', '.', 'utils', '.', 'path', '.', 'os_walk', '(', 'fsubdir', ')', ':', 'for', 'name', 'in', 'files', ':', 'if', 'name', '.', 'startswith', '(', "'.'", ')', ':', 'continue', 'path', '=', 'os', '.', 'path', '.', 'join', '(', 'root', ',', 'name', ')', 'try', ':', 'fmode', '=', 'os', '.', 'stat', '(', 'path', ')', 'except', '(', 'IOError', ',', 'OSError', ')', ':', 'pass', 'if', 'fmode', '.', 'st_uid', '!=', 'uid', 'or', 'fmode', '.', 'st_gid', '!=', 'gid', ':', 'if', 'permissive', 'and', 'fmode', '.', 'st_gid', 'in', 'groups', ':', 'pass', 'else', ':', '# chown the file for the new user', 'os', '.', 'chown', '(', 'path', ',', 'uid', ',', 'gid', ')', 'for', 'name', 'in', 'dirs', ':', 'path', '=', 'os', '.', 'path', '.', 'join', '(', 'root', ',', 'name', ')', 'fmode', '=', 'os', '.', 'stat', '(', 'path', ')', 'if', 'fmode', '.', 'st_uid', '!=', 'uid', 'or', 'fmode', '.', 'st_gid', '!=', 'gid', ':', 'if', 'permissive', 'and', 'fmode', '.', 'st_gid', 'in', 'groups', ':', 'pass', 'else', ':', '# chown the file for the new user', 'os', '.', 'chown', '(', 'path', ',', 'uid', ',', 'gid', ')', '# Allow the pki dir to be 700 or 750, but nothing else.', '# This prevents other users from writing out keys, while', '# allowing the use-case of 3rd-party software (like django)', '# to read in what it needs to integrate.', '#', "# If the permissions aren't correct, default to the more secure 700.", '# If acls are enabled, the pki_dir needs to remain readable, this', '# is still secure because the private keys are still only readable', '# by the user running the master', 'if', 'dir_', '==', 'pki_dir', ':', 'smode', '=', 'stat', '.', 'S_IMODE', '(', 'mode', '.', 'st_mode', ')', 'if', 'smode', '!=', '448', 'and', 'smode', '!=', '488', ':', 'if', 'os', '.', 'access', '(', 'dir_', ',', 'os', '.', 'W_OK', ')', ':', 'os', '.', 'chmod', '(', 'dir_', ',', '448', ')', 'else', ':', 'msg', '=', '\'Unable to securely set the permissions of "{0}".\'', 'msg', '=', 'msg', '.', 'format', '(', 'dir_', ')', 'if', 'is_console_configured', '(', ')', ':', 'log', '.', 'critical', '(', 'msg', ')', 'else', ':', 'sys', '.', 'stderr', '.', 'write', '(', '"CRITICAL: {0}\\n"', '.', 'format', '(', 'msg', ')', ')', 'if', 'skip_extra', 'is', 'False', ':', '# Run the extra verification checks', 'zmq_version', '(', ')'] | Verify that the named directories are in place and that the environment
can shake the salt | ['Verify', 'that', 'the', 'named', 'directories', 'are', 'in', 'place', 'and', 'that', 'the', 'environment', 'can', 'shake', 'the', 'salt'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/verify.py#L200-L307 |
821 | joshspeagle/dynesty | dynesty/bounding.py | _friends_bootstrap_radius | def _friends_bootstrap_radius(args):
"""Internal method used to compute the radius (half-side-length) for each
ball (cube) used in :class:`RadFriends` (:class:`SupFriends`) using
bootstrapping."""
# Unzipping.
points, ftype = args
rstate = np.random
# Resampling.
npoints, ndim = points.shape
idxs = rstate.randint(npoints, size=npoints) # resample
idx_in = np.unique(idxs) # selected objects
sel = np.ones(npoints, dtype='bool')
sel[idx_in] = False
idx_out = np.where(sel)[0] # "missing" objects
if len(idx_out) < 2: # edge case
idx_out = np.append(idx_out, [0, 1])
points_in, points_out = points[idx_in], points[idx_out]
# Construct KDTree to enable quick nearest-neighbor lookup for
# our resampled objects.
kdtree = spatial.KDTree(points_in)
if ftype == 'balls':
# Compute distances from our "missing" points its closest neighbor
# among the resampled points using the Euclidean norm
# (i.e. "radius" of n-sphere).
dists, ids = kdtree.query(points_out, k=1, eps=0, p=2)
elif ftype == 'cubes':
# Compute distances from our "missing" points its closest neighbor
# among the resampled points using the Euclidean norm
# (i.e. "half-side-length" of n-cube).
dists, ids = kdtree.query(points_out, k=1, eps=0, p=np.inf)
# Conservative upper-bound on radius.
dist = max(dists)
return dist | python | def _friends_bootstrap_radius(args):
"""Internal method used to compute the radius (half-side-length) for each
ball (cube) used in :class:`RadFriends` (:class:`SupFriends`) using
bootstrapping."""
# Unzipping.
points, ftype = args
rstate = np.random
# Resampling.
npoints, ndim = points.shape
idxs = rstate.randint(npoints, size=npoints) # resample
idx_in = np.unique(idxs) # selected objects
sel = np.ones(npoints, dtype='bool')
sel[idx_in] = False
idx_out = np.where(sel)[0] # "missing" objects
if len(idx_out) < 2: # edge case
idx_out = np.append(idx_out, [0, 1])
points_in, points_out = points[idx_in], points[idx_out]
# Construct KDTree to enable quick nearest-neighbor lookup for
# our resampled objects.
kdtree = spatial.KDTree(points_in)
if ftype == 'balls':
# Compute distances from our "missing" points its closest neighbor
# among the resampled points using the Euclidean norm
# (i.e. "radius" of n-sphere).
dists, ids = kdtree.query(points_out, k=1, eps=0, p=2)
elif ftype == 'cubes':
# Compute distances from our "missing" points its closest neighbor
# among the resampled points using the Euclidean norm
# (i.e. "half-side-length" of n-cube).
dists, ids = kdtree.query(points_out, k=1, eps=0, p=np.inf)
# Conservative upper-bound on radius.
dist = max(dists)
return dist | ['def', '_friends_bootstrap_radius', '(', 'args', ')', ':', '# Unzipping.', 'points', ',', 'ftype', '=', 'args', 'rstate', '=', 'np', '.', 'random', '# Resampling.', 'npoints', ',', 'ndim', '=', 'points', '.', 'shape', 'idxs', '=', 'rstate', '.', 'randint', '(', 'npoints', ',', 'size', '=', 'npoints', ')', '# resample', 'idx_in', '=', 'np', '.', 'unique', '(', 'idxs', ')', '# selected objects', 'sel', '=', 'np', '.', 'ones', '(', 'npoints', ',', 'dtype', '=', "'bool'", ')', 'sel', '[', 'idx_in', ']', '=', 'False', 'idx_out', '=', 'np', '.', 'where', '(', 'sel', ')', '[', '0', ']', '# "missing" objects', 'if', 'len', '(', 'idx_out', ')', '<', '2', ':', '# edge case', 'idx_out', '=', 'np', '.', 'append', '(', 'idx_out', ',', '[', '0', ',', '1', ']', ')', 'points_in', ',', 'points_out', '=', 'points', '[', 'idx_in', ']', ',', 'points', '[', 'idx_out', ']', '# Construct KDTree to enable quick nearest-neighbor lookup for', '# our resampled objects.', 'kdtree', '=', 'spatial', '.', 'KDTree', '(', 'points_in', ')', 'if', 'ftype', '==', "'balls'", ':', '# Compute distances from our "missing" points its closest neighbor', '# among the resampled points using the Euclidean norm', '# (i.e. "radius" of n-sphere).', 'dists', ',', 'ids', '=', 'kdtree', '.', 'query', '(', 'points_out', ',', 'k', '=', '1', ',', 'eps', '=', '0', ',', 'p', '=', '2', ')', 'elif', 'ftype', '==', "'cubes'", ':', '# Compute distances from our "missing" points its closest neighbor', '# among the resampled points using the Euclidean norm', '# (i.e. "half-side-length" of n-cube).', 'dists', ',', 'ids', '=', 'kdtree', '.', 'query', '(', 'points_out', ',', 'k', '=', '1', ',', 'eps', '=', '0', ',', 'p', '=', 'np', '.', 'inf', ')', '# Conservative upper-bound on radius.', 'dist', '=', 'max', '(', 'dists', ')', 'return', 'dist'] | Internal method used to compute the radius (half-side-length) for each
ball (cube) used in :class:`RadFriends` (:class:`SupFriends`) using
bootstrapping. | ['Internal', 'method', 'used', 'to', 'compute', 'the', 'radius', '(', 'half', '-', 'side', '-', 'length', ')', 'for', 'each', 'ball', '(', 'cube', ')', 'used', 'in', ':', 'class', ':', 'RadFriends', '(', ':', 'class', ':', 'SupFriends', ')', 'using', 'bootstrapping', '.'] | train | https://github.com/joshspeagle/dynesty/blob/9e482aafeb5cf84bedb896fa6f07a761d917983e/dynesty/bounding.py#L1467-L1505 |
822 | IDSIA/sacred | sacred/utils.py | convert_camel_case_to_snake_case | def convert_camel_case_to_snake_case(name):
"""Convert CamelCase to snake_case."""
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower() | python | def convert_camel_case_to_snake_case(name):
"""Convert CamelCase to snake_case."""
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower() | ['def', 'convert_camel_case_to_snake_case', '(', 'name', ')', ':', 's1', '=', 're', '.', 'sub', '(', "'(.)([A-Z][a-z]+)'", ',', "r'\\1_\\2'", ',', 'name', ')', 'return', 're', '.', 'sub', '(', "'([a-z0-9])([A-Z])'", ',', "r'\\1_\\2'", ',', 's1', ')', '.', 'lower', '(', ')'] | Convert CamelCase to snake_case. | ['Convert', 'CamelCase', 'to', 'snake_case', '.'] | train | https://github.com/IDSIA/sacred/blob/72633776bed9b5bddf93ae7d215188e61970973a/sacred/utils.py#L608-L611 |
823 | rmax/scrapy-redis | src/scrapy_redis/connection.py | get_redis | def get_redis(**kwargs):
"""Returns a redis client instance.
Parameters
----------
redis_cls : class, optional
Defaults to ``redis.StrictRedis``.
url : str, optional
If given, ``redis_cls.from_url`` is used to instantiate the class.
**kwargs
Extra parameters to be passed to the ``redis_cls`` class.
Returns
-------
server
Redis client instance.
"""
redis_cls = kwargs.pop('redis_cls', defaults.REDIS_CLS)
url = kwargs.pop('url', None)
if url:
return redis_cls.from_url(url, **kwargs)
else:
return redis_cls(**kwargs) | python | def get_redis(**kwargs):
"""Returns a redis client instance.
Parameters
----------
redis_cls : class, optional
Defaults to ``redis.StrictRedis``.
url : str, optional
If given, ``redis_cls.from_url`` is used to instantiate the class.
**kwargs
Extra parameters to be passed to the ``redis_cls`` class.
Returns
-------
server
Redis client instance.
"""
redis_cls = kwargs.pop('redis_cls', defaults.REDIS_CLS)
url = kwargs.pop('url', None)
if url:
return redis_cls.from_url(url, **kwargs)
else:
return redis_cls(**kwargs) | ['def', 'get_redis', '(', '*', '*', 'kwargs', ')', ':', 'redis_cls', '=', 'kwargs', '.', 'pop', '(', "'redis_cls'", ',', 'defaults', '.', 'REDIS_CLS', ')', 'url', '=', 'kwargs', '.', 'pop', '(', "'url'", ',', 'None', ')', 'if', 'url', ':', 'return', 'redis_cls', '.', 'from_url', '(', 'url', ',', '*', '*', 'kwargs', ')', 'else', ':', 'return', 'redis_cls', '(', '*', '*', 'kwargs', ')'] | Returns a redis client instance.
Parameters
----------
redis_cls : class, optional
Defaults to ``redis.StrictRedis``.
url : str, optional
If given, ``redis_cls.from_url`` is used to instantiate the class.
**kwargs
Extra parameters to be passed to the ``redis_cls`` class.
Returns
-------
server
Redis client instance. | ['Returns', 'a', 'redis', 'client', 'instance', '.'] | train | https://github.com/rmax/scrapy-redis/blob/31c022dd145654cb4ea1429f09852a82afa0a01c/src/scrapy_redis/connection.py#L67-L90 |
824 | aws/aws-encryption-sdk-python | src/aws_encryption_sdk/streaming_client.py | StreamDecryptor.close | def close(self):
"""Closes out the stream."""
_LOGGER.debug("Closing stream")
if not hasattr(self, "footer"):
raise SerializationError("Footer not read")
super(StreamDecryptor, self).close() | python | def close(self):
"""Closes out the stream."""
_LOGGER.debug("Closing stream")
if not hasattr(self, "footer"):
raise SerializationError("Footer not read")
super(StreamDecryptor, self).close() | ['def', 'close', '(', 'self', ')', ':', '_LOGGER', '.', 'debug', '(', '"Closing stream"', ')', 'if', 'not', 'hasattr', '(', 'self', ',', '"footer"', ')', ':', 'raise', 'SerializationError', '(', '"Footer not read"', ')', 'super', '(', 'StreamDecryptor', ',', 'self', ')', '.', 'close', '(', ')'] | Closes out the stream. | ['Closes', 'out', 'the', 'stream', '.'] | train | https://github.com/aws/aws-encryption-sdk-python/blob/d182155d5fb1ef176d9e7d0647679737d5146495/src/aws_encryption_sdk/streaming_client.py#L923-L928 |
825 | horejsek/python-sqlpuzzle | sqlpuzzle/_queries/query.py | Query.has | def has(self, querypart_name, value=None):
"""Returns True if `querypart_name` with `value` is set.
For example you can check if you already used condition by `sql.has('where')`.
If you want to check for more information, for example if that condition
also contain ID, you can do this by `sql.has('where', 'id')`.
"""
querypart = self._queryparts.get(querypart_name)
if not querypart:
return False
if not querypart.is_set:
return False
if value:
return querypart.has(value)
return True | python | def has(self, querypart_name, value=None):
"""Returns True if `querypart_name` with `value` is set.
For example you can check if you already used condition by `sql.has('where')`.
If you want to check for more information, for example if that condition
also contain ID, you can do this by `sql.has('where', 'id')`.
"""
querypart = self._queryparts.get(querypart_name)
if not querypart:
return False
if not querypart.is_set:
return False
if value:
return querypart.has(value)
return True | ['def', 'has', '(', 'self', ',', 'querypart_name', ',', 'value', '=', 'None', ')', ':', 'querypart', '=', 'self', '.', '_queryparts', '.', 'get', '(', 'querypart_name', ')', 'if', 'not', 'querypart', ':', 'return', 'False', 'if', 'not', 'querypart', '.', 'is_set', ':', 'return', 'False', 'if', 'value', ':', 'return', 'querypart', '.', 'has', '(', 'value', ')', 'return', 'True'] | Returns True if `querypart_name` with `value` is set.
For example you can check if you already used condition by `sql.has('where')`.
If you want to check for more information, for example if that condition
also contain ID, you can do this by `sql.has('where', 'id')`. | ['Returns', 'True', 'if', 'querypart_name', 'with', 'value', 'is', 'set', '.'] | train | https://github.com/horejsek/python-sqlpuzzle/blob/d3a42ed1b339b8eafddb8d2c28a3a5832b3998dd/sqlpuzzle/_queries/query.py#L46-L61 |
826 | vertexproject/synapse | synapse/lib/cmdr.py | runItemCmdr | async def runItemCmdr(item, outp=None, **opts):
'''
Create a cmdr for the given item and run the cmd loop.
Example:
runItemCmdr(foo)
'''
cmdr = await getItemCmdr(item, outp=outp, **opts)
await cmdr.runCmdLoop() | python | async def runItemCmdr(item, outp=None, **opts):
'''
Create a cmdr for the given item and run the cmd loop.
Example:
runItemCmdr(foo)
'''
cmdr = await getItemCmdr(item, outp=outp, **opts)
await cmdr.runCmdLoop() | ['async', 'def', 'runItemCmdr', '(', 'item', ',', 'outp', '=', 'None', ',', '*', '*', 'opts', ')', ':', 'cmdr', '=', 'await', 'getItemCmdr', '(', 'item', ',', 'outp', '=', 'outp', ',', '*', '*', 'opts', ')', 'await', 'cmdr', '.', 'runCmdLoop', '(', ')'] | Create a cmdr for the given item and run the cmd loop.
Example:
runItemCmdr(foo) | ['Create', 'a', 'cmdr', 'for', 'the', 'given', 'item', 'and', 'run', 'the', 'cmd', 'loop', '.'] | train | https://github.com/vertexproject/synapse/blob/22e67c5a8f6d7caddbcf34b39ab1bd2d6c4a6e0b/synapse/lib/cmdr.py#L41-L51 |
827 | jupyter-widgets/ipywidgets | ipywidgets/widgets/widget_selection.py | _make_options | def _make_options(x):
"""Standardize the options tuple format.
The returned tuple should be in the format (('label', value), ('label', value), ...).
The input can be
* an iterable of (label, value) pairs
* an iterable of values, and labels will be generated
"""
# Check if x is a mapping of labels to values
if isinstance(x, Mapping):
import warnings
warnings.warn("Support for mapping types has been deprecated and will be dropped in a future release.", DeprecationWarning)
return tuple((unicode_type(k), v) for k, v in x.items())
# only iterate once through the options.
xlist = tuple(x)
# Check if x is an iterable of (label, value) pairs
if all((isinstance(i, (list, tuple)) and len(i) == 2) for i in xlist):
return tuple((unicode_type(k), v) for k, v in xlist)
# Otherwise, assume x is an iterable of values
return tuple((unicode_type(i), i) for i in xlist) | python | def _make_options(x):
"""Standardize the options tuple format.
The returned tuple should be in the format (('label', value), ('label', value), ...).
The input can be
* an iterable of (label, value) pairs
* an iterable of values, and labels will be generated
"""
# Check if x is a mapping of labels to values
if isinstance(x, Mapping):
import warnings
warnings.warn("Support for mapping types has been deprecated and will be dropped in a future release.", DeprecationWarning)
return tuple((unicode_type(k), v) for k, v in x.items())
# only iterate once through the options.
xlist = tuple(x)
# Check if x is an iterable of (label, value) pairs
if all((isinstance(i, (list, tuple)) and len(i) == 2) for i in xlist):
return tuple((unicode_type(k), v) for k, v in xlist)
# Otherwise, assume x is an iterable of values
return tuple((unicode_type(i), i) for i in xlist) | ['def', '_make_options', '(', 'x', ')', ':', '# Check if x is a mapping of labels to values', 'if', 'isinstance', '(', 'x', ',', 'Mapping', ')', ':', 'import', 'warnings', 'warnings', '.', 'warn', '(', '"Support for mapping types has been deprecated and will be dropped in a future release."', ',', 'DeprecationWarning', ')', 'return', 'tuple', '(', '(', 'unicode_type', '(', 'k', ')', ',', 'v', ')', 'for', 'k', ',', 'v', 'in', 'x', '.', 'items', '(', ')', ')', '# only iterate once through the options.', 'xlist', '=', 'tuple', '(', 'x', ')', '# Check if x is an iterable of (label, value) pairs', 'if', 'all', '(', '(', 'isinstance', '(', 'i', ',', '(', 'list', ',', 'tuple', ')', ')', 'and', 'len', '(', 'i', ')', '==', '2', ')', 'for', 'i', 'in', 'xlist', ')', ':', 'return', 'tuple', '(', '(', 'unicode_type', '(', 'k', ')', ',', 'v', ')', 'for', 'k', ',', 'v', 'in', 'xlist', ')', '# Otherwise, assume x is an iterable of values', 'return', 'tuple', '(', '(', 'unicode_type', '(', 'i', ')', ',', 'i', ')', 'for', 'i', 'in', 'xlist', ')'] | Standardize the options tuple format.
The returned tuple should be in the format (('label', value), ('label', value), ...).
The input can be
* an iterable of (label, value) pairs
* an iterable of values, and labels will be generated | ['Standardize', 'the', 'options', 'tuple', 'format', '.'] | train | https://github.com/jupyter-widgets/ipywidgets/blob/36fe37594cd5a268def228709ca27e37b99ac606/ipywidgets/widgets/widget_selection.py#L108-L131 |
828 | spyder-ide/spyder | spyder/plugins/variableexplorer/widgets/dataframeeditor.py | DataFrameHeaderModel.rowCount | def rowCount(self, index=None):
"""Get number of rows in the header."""
if self.axis == 0:
return max(1, self._shape[0])
else:
if self.total_rows <= self.rows_loaded:
return self.total_rows
else:
return self.rows_loaded | python | def rowCount(self, index=None):
"""Get number of rows in the header."""
if self.axis == 0:
return max(1, self._shape[0])
else:
if self.total_rows <= self.rows_loaded:
return self.total_rows
else:
return self.rows_loaded | ['def', 'rowCount', '(', 'self', ',', 'index', '=', 'None', ')', ':', 'if', 'self', '.', 'axis', '==', '0', ':', 'return', 'max', '(', '1', ',', 'self', '.', '_shape', '[', '0', ']', ')', 'else', ':', 'if', 'self', '.', 'total_rows', '<=', 'self', '.', 'rows_loaded', ':', 'return', 'self', '.', 'total_rows', 'else', ':', 'return', 'self', '.', 'rows_loaded'] | Get number of rows in the header. | ['Get', 'number', 'of', 'rows', 'in', 'the', 'header', '.'] | train | https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/variableexplorer/widgets/dataframeeditor.py#L645-L653 |
829 | wummel/dosage | dosagelib/scraper.py | Scraper.getStrips | def getStrips(self, maxstrips=None):
"""Get comic strips."""
if maxstrips:
word = u"strip" if maxstrips == 1 else "strips"
msg = u'Retrieving %d %s' % (maxstrips, word)
else:
msg = u'Retrieving all strips'
if self.indexes:
if len(self.indexes) == 1:
msg += u" for index %s" % self.indexes[0]
else:
msg += u" for indexes %s" % self.indexes
# Always call starter() since it might initialize cookies.
# See for example Oglaf comic.
self.starter()
urls = [self.getIndexStripUrl(index) for index in self.indexes]
else:
urls = [self.getLatestUrl()]
if self.adult:
msg += u" (including adult content)"
out.info(msg)
for url in urls:
for strip in self.getStripsFor(url, maxstrips):
yield strip | python | def getStrips(self, maxstrips=None):
"""Get comic strips."""
if maxstrips:
word = u"strip" if maxstrips == 1 else "strips"
msg = u'Retrieving %d %s' % (maxstrips, word)
else:
msg = u'Retrieving all strips'
if self.indexes:
if len(self.indexes) == 1:
msg += u" for index %s" % self.indexes[0]
else:
msg += u" for indexes %s" % self.indexes
# Always call starter() since it might initialize cookies.
# See for example Oglaf comic.
self.starter()
urls = [self.getIndexStripUrl(index) for index in self.indexes]
else:
urls = [self.getLatestUrl()]
if self.adult:
msg += u" (including adult content)"
out.info(msg)
for url in urls:
for strip in self.getStripsFor(url, maxstrips):
yield strip | ['def', 'getStrips', '(', 'self', ',', 'maxstrips', '=', 'None', ')', ':', 'if', 'maxstrips', ':', 'word', '=', 'u"strip"', 'if', 'maxstrips', '==', '1', 'else', '"strips"', 'msg', '=', "u'Retrieving %d %s'", '%', '(', 'maxstrips', ',', 'word', ')', 'else', ':', 'msg', '=', "u'Retrieving all strips'", 'if', 'self', '.', 'indexes', ':', 'if', 'len', '(', 'self', '.', 'indexes', ')', '==', '1', ':', 'msg', '+=', 'u" for index %s"', '%', 'self', '.', 'indexes', '[', '0', ']', 'else', ':', 'msg', '+=', 'u" for indexes %s"', '%', 'self', '.', 'indexes', '# Always call starter() since it might initialize cookies.', '# See for example Oglaf comic.', 'self', '.', 'starter', '(', ')', 'urls', '=', '[', 'self', '.', 'getIndexStripUrl', '(', 'index', ')', 'for', 'index', 'in', 'self', '.', 'indexes', ']', 'else', ':', 'urls', '=', '[', 'self', '.', 'getLatestUrl', '(', ')', ']', 'if', 'self', '.', 'adult', ':', 'msg', '+=', 'u" (including adult content)"', 'out', '.', 'info', '(', 'msg', ')', 'for', 'url', 'in', 'urls', ':', 'for', 'strip', 'in', 'self', '.', 'getStripsFor', '(', 'url', ',', 'maxstrips', ')', ':', 'yield', 'strip'] | Get comic strips. | ['Get', 'comic', 'strips', '.'] | train | https://github.com/wummel/dosage/blob/a0109c3a46219f280e6e5e77183674e40da0f304/dosagelib/scraper.py#L137-L160 |
830 | mlperf/training | object_detection/pytorch/maskrcnn_benchmark/data/datasets/evaluation/voc/voc_eval.py | eval_detection_voc | def eval_detection_voc(pred_boxlists, gt_boxlists, iou_thresh=0.5, use_07_metric=False):
"""Evaluate on voc dataset.
Args:
pred_boxlists(list[BoxList]): pred boxlist, has labels and scores fields.
gt_boxlists(list[BoxList]): ground truth boxlist, has labels field.
iou_thresh: iou thresh
use_07_metric: boolean
Returns:
dict represents the results
"""
assert len(gt_boxlists) == len(
pred_boxlists
), "Length of gt and pred lists need to be same."
prec, rec = calc_detection_voc_prec_rec(
pred_boxlists=pred_boxlists, gt_boxlists=gt_boxlists, iou_thresh=iou_thresh
)
ap = calc_detection_voc_ap(prec, rec, use_07_metric=use_07_metric)
return {"ap": ap, "map": np.nanmean(ap)} | python | def eval_detection_voc(pred_boxlists, gt_boxlists, iou_thresh=0.5, use_07_metric=False):
"""Evaluate on voc dataset.
Args:
pred_boxlists(list[BoxList]): pred boxlist, has labels and scores fields.
gt_boxlists(list[BoxList]): ground truth boxlist, has labels field.
iou_thresh: iou thresh
use_07_metric: boolean
Returns:
dict represents the results
"""
assert len(gt_boxlists) == len(
pred_boxlists
), "Length of gt and pred lists need to be same."
prec, rec = calc_detection_voc_prec_rec(
pred_boxlists=pred_boxlists, gt_boxlists=gt_boxlists, iou_thresh=iou_thresh
)
ap = calc_detection_voc_ap(prec, rec, use_07_metric=use_07_metric)
return {"ap": ap, "map": np.nanmean(ap)} | ['def', 'eval_detection_voc', '(', 'pred_boxlists', ',', 'gt_boxlists', ',', 'iou_thresh', '=', '0.5', ',', 'use_07_metric', '=', 'False', ')', ':', 'assert', 'len', '(', 'gt_boxlists', ')', '==', 'len', '(', 'pred_boxlists', ')', ',', '"Length of gt and pred lists need to be same."', 'prec', ',', 'rec', '=', 'calc_detection_voc_prec_rec', '(', 'pred_boxlists', '=', 'pred_boxlists', ',', 'gt_boxlists', '=', 'gt_boxlists', ',', 'iou_thresh', '=', 'iou_thresh', ')', 'ap', '=', 'calc_detection_voc_ap', '(', 'prec', ',', 'rec', ',', 'use_07_metric', '=', 'use_07_metric', ')', 'return', '{', '"ap"', ':', 'ap', ',', '"map"', ':', 'np', '.', 'nanmean', '(', 'ap', ')', '}'] | Evaluate on voc dataset.
Args:
pred_boxlists(list[BoxList]): pred boxlist, has labels and scores fields.
gt_boxlists(list[BoxList]): ground truth boxlist, has labels field.
iou_thresh: iou thresh
use_07_metric: boolean
Returns:
dict represents the results | ['Evaluate', 'on', 'voc', 'dataset', '.', 'Args', ':', 'pred_boxlists', '(', 'list', '[', 'BoxList', ']', ')', ':', 'pred', 'boxlist', 'has', 'labels', 'and', 'scores', 'fields', '.', 'gt_boxlists', '(', 'list', '[', 'BoxList', ']', ')', ':', 'ground', 'truth', 'boxlist', 'has', 'labels', 'field', '.', 'iou_thresh', ':', 'iou', 'thresh', 'use_07_metric', ':', 'boolean', 'Returns', ':', 'dict', 'represents', 'the', 'results'] | train | https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/object_detection/pytorch/maskrcnn_benchmark/data/datasets/evaluation/voc/voc_eval.py#L48-L65 |
831 | MisterWil/abodepy | abodepy/devices/lock.py | AbodeLock.lock | def lock(self):
"""Lock the device."""
success = self.set_status(CONST.STATUS_LOCKCLOSED_INT)
if success:
self._json_state['status'] = CONST.STATUS_LOCKCLOSED
return success | python | def lock(self):
"""Lock the device."""
success = self.set_status(CONST.STATUS_LOCKCLOSED_INT)
if success:
self._json_state['status'] = CONST.STATUS_LOCKCLOSED
return success | ['def', 'lock', '(', 'self', ')', ':', 'success', '=', 'self', '.', 'set_status', '(', 'CONST', '.', 'STATUS_LOCKCLOSED_INT', ')', 'if', 'success', ':', 'self', '.', '_json_state', '[', "'status'", ']', '=', 'CONST', '.', 'STATUS_LOCKCLOSED', 'return', 'success'] | Lock the device. | ['Lock', 'the', 'device', '.'] | train | https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/devices/lock.py#L10-L17 |
832 | slundberg/shap | shap/benchmark/metrics.py | human_and_00 | def human_and_00(X, y, model_generator, method_name):
""" AND (false/false)
This tests how well a feature attribution method agrees with human intuition
for an AND operation combined with linear effects. This metric deals
specifically with the question of credit allocation for the following function
when all three inputs are true:
if fever: +2 points
if cough: +2 points
if fever and cough: +6 points
transform = "identity"
sort_order = 0
"""
return _human_and(X, model_generator, method_name, False, False) | python | def human_and_00(X, y, model_generator, method_name):
""" AND (false/false)
This tests how well a feature attribution method agrees with human intuition
for an AND operation combined with linear effects. This metric deals
specifically with the question of credit allocation for the following function
when all three inputs are true:
if fever: +2 points
if cough: +2 points
if fever and cough: +6 points
transform = "identity"
sort_order = 0
"""
return _human_and(X, model_generator, method_name, False, False) | ['def', 'human_and_00', '(', 'X', ',', 'y', ',', 'model_generator', ',', 'method_name', ')', ':', 'return', '_human_and', '(', 'X', ',', 'model_generator', ',', 'method_name', ',', 'False', ',', 'False', ')'] | AND (false/false)
This tests how well a feature attribution method agrees with human intuition
for an AND operation combined with linear effects. This metric deals
specifically with the question of credit allocation for the following function
when all three inputs are true:
if fever: +2 points
if cough: +2 points
if fever and cough: +6 points
transform = "identity"
sort_order = 0 | ['AND', '(', 'false', '/', 'false', ')'] | train | https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/metrics.py#L578-L592 |
833 | vicalloy/lbutils | lbutils/views.py | request_get_next | def request_get_next(request, default_next):
"""
get next url form request
order: POST.next GET.next HTTP_REFERER, default_next
"""
next_url = request.POST.get('next')\
or request.GET.get('next')\
or request.META.get('HTTP_REFERER')\
or default_next
return next_url | python | def request_get_next(request, default_next):
"""
get next url form request
order: POST.next GET.next HTTP_REFERER, default_next
"""
next_url = request.POST.get('next')\
or request.GET.get('next')\
or request.META.get('HTTP_REFERER')\
or default_next
return next_url | ['def', 'request_get_next', '(', 'request', ',', 'default_next', ')', ':', 'next_url', '=', 'request', '.', 'POST', '.', 'get', '(', "'next'", ')', 'or', 'request', '.', 'GET', '.', 'get', '(', "'next'", ')', 'or', 'request', '.', 'META', '.', 'get', '(', "'HTTP_REFERER'", ')', 'or', 'default_next', 'return', 'next_url'] | get next url form request
order: POST.next GET.next HTTP_REFERER, default_next | ['get', 'next', 'url', 'form', 'request'] | train | https://github.com/vicalloy/lbutils/blob/66ae7e73bc939f073cdc1b91602a95e67caf4ba6/lbutils/views.py#L28-L38 |
834 | vmware/pyvmomi | pyVmomi/DynamicTypeManagerHelper.py | DynamicTypeConstructor._ConvertMethodType | def _ConvertMethodType(self, methodType):
"""
Convert vmodl.reflect.DynamicTypeManager.MethodTypeInfo to pyVmomi method
definition
"""
if methodType:
name = methodType.name
wsdlName = methodType.wsdlName
version = methodType.version
params = self._Filter(self._ConvertParamType, methodType.paramTypeInfo)
privId = methodType.privId
faults = methodType.fault
# Figure out reture info
if methodType.returnTypeInfo:
returnTypeInfo = methodType.returnTypeInfo
retFlags = self._ConvertAnnotations(returnTypeInfo.annotation)
methodRetType = returnTypeInfo.type
else:
retFlags = 0
methodRetType = "void"
if wsdlName.endswith("_Task"):
# TODO: Need a seperate task return type for task, instead of
# hardcode vim.Task as return type
retType = "vim.Task"
else:
retType = methodRetType
retInfo = (retFlags, retType, methodRetType)
method = (name, wsdlName, version, params, retInfo, privId, faults)
else:
method = None
return method | python | def _ConvertMethodType(self, methodType):
"""
Convert vmodl.reflect.DynamicTypeManager.MethodTypeInfo to pyVmomi method
definition
"""
if methodType:
name = methodType.name
wsdlName = methodType.wsdlName
version = methodType.version
params = self._Filter(self._ConvertParamType, methodType.paramTypeInfo)
privId = methodType.privId
faults = methodType.fault
# Figure out reture info
if methodType.returnTypeInfo:
returnTypeInfo = methodType.returnTypeInfo
retFlags = self._ConvertAnnotations(returnTypeInfo.annotation)
methodRetType = returnTypeInfo.type
else:
retFlags = 0
methodRetType = "void"
if wsdlName.endswith("_Task"):
# TODO: Need a seperate task return type for task, instead of
# hardcode vim.Task as return type
retType = "vim.Task"
else:
retType = methodRetType
retInfo = (retFlags, retType, methodRetType)
method = (name, wsdlName, version, params, retInfo, privId, faults)
else:
method = None
return method | ['def', '_ConvertMethodType', '(', 'self', ',', 'methodType', ')', ':', 'if', 'methodType', ':', 'name', '=', 'methodType', '.', 'name', 'wsdlName', '=', 'methodType', '.', 'wsdlName', 'version', '=', 'methodType', '.', 'version', 'params', '=', 'self', '.', '_Filter', '(', 'self', '.', '_ConvertParamType', ',', 'methodType', '.', 'paramTypeInfo', ')', 'privId', '=', 'methodType', '.', 'privId', 'faults', '=', 'methodType', '.', 'fault', '# Figure out reture info', 'if', 'methodType', '.', 'returnTypeInfo', ':', 'returnTypeInfo', '=', 'methodType', '.', 'returnTypeInfo', 'retFlags', '=', 'self', '.', '_ConvertAnnotations', '(', 'returnTypeInfo', '.', 'annotation', ')', 'methodRetType', '=', 'returnTypeInfo', '.', 'type', 'else', ':', 'retFlags', '=', '0', 'methodRetType', '=', '"void"', 'if', 'wsdlName', '.', 'endswith', '(', '"_Task"', ')', ':', '# TODO: Need a seperate task return type for task, instead of', '# hardcode vim.Task as return type', 'retType', '=', '"vim.Task"', 'else', ':', 'retType', '=', 'methodRetType', 'retInfo', '=', '(', 'retFlags', ',', 'retType', ',', 'methodRetType', ')', 'method', '=', '(', 'name', ',', 'wsdlName', ',', 'version', ',', 'params', ',', 'retInfo', ',', 'privId', ',', 'faults', ')', 'else', ':', 'method', '=', 'None', 'return', 'method'] | Convert vmodl.reflect.DynamicTypeManager.MethodTypeInfo to pyVmomi method
definition | ['Convert', 'vmodl', '.', 'reflect', '.', 'DynamicTypeManager', '.', 'MethodTypeInfo', 'to', 'pyVmomi', 'method', 'definition'] | train | https://github.com/vmware/pyvmomi/blob/3ffcb23bf77d757175c0d5216ba9a25345d824cd/pyVmomi/DynamicTypeManagerHelper.py#L177-L209 |
835 | apache/incubator-superset | contrib/docker/superset_config.py | get_env_variable | def get_env_variable(var_name, default=None):
"""Get the environment variable or raise exception."""
try:
return os.environ[var_name]
except KeyError:
if default is not None:
return default
else:
error_msg = 'The environment variable {} was missing, abort...'\
.format(var_name)
raise EnvironmentError(error_msg) | python | def get_env_variable(var_name, default=None):
"""Get the environment variable or raise exception."""
try:
return os.environ[var_name]
except KeyError:
if default is not None:
return default
else:
error_msg = 'The environment variable {} was missing, abort...'\
.format(var_name)
raise EnvironmentError(error_msg) | ['def', 'get_env_variable', '(', 'var_name', ',', 'default', '=', 'None', ')', ':', 'try', ':', 'return', 'os', '.', 'environ', '[', 'var_name', ']', 'except', 'KeyError', ':', 'if', 'default', 'is', 'not', 'None', ':', 'return', 'default', 'else', ':', 'error_msg', '=', "'The environment variable {} was missing, abort...'", '.', 'format', '(', 'var_name', ')', 'raise', 'EnvironmentError', '(', 'error_msg', ')'] | Get the environment variable or raise exception. | ['Get', 'the', 'environment', 'variable', 'or', 'raise', 'exception', '.'] | train | https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/contrib/docker/superset_config.py#L20-L30 |
836 | python-visualization/folium | folium/utilities.py | if_pandas_df_convert_to_numpy | def if_pandas_df_convert_to_numpy(obj):
"""Return a Numpy array from a Pandas dataframe.
Iterating over a DataFrame has weird side effects, such as the first
row being the column names. Converting to Numpy is more safe.
"""
if pd is not None and isinstance(obj, pd.DataFrame):
return obj.values
else:
return obj | python | def if_pandas_df_convert_to_numpy(obj):
"""Return a Numpy array from a Pandas dataframe.
Iterating over a DataFrame has weird side effects, such as the first
row being the column names. Converting to Numpy is more safe.
"""
if pd is not None and isinstance(obj, pd.DataFrame):
return obj.values
else:
return obj | ['def', 'if_pandas_df_convert_to_numpy', '(', 'obj', ')', ':', 'if', 'pd', 'is', 'not', 'None', 'and', 'isinstance', '(', 'obj', ',', 'pd', '.', 'DataFrame', ')', ':', 'return', 'obj', '.', 'values', 'else', ':', 'return', 'obj'] | Return a Numpy array from a Pandas dataframe.
Iterating over a DataFrame has weird side effects, such as the first
row being the column names. Converting to Numpy is more safe. | ['Return', 'a', 'Numpy', 'array', 'from', 'a', 'Pandas', 'dataframe', '.'] | train | https://github.com/python-visualization/folium/blob/8595240517135d1637ca4cf7cc624045f1d911b3/folium/utilities.py#L97-L106 |
837 | rameshg87/pyremotevbox | pyremotevbox/ZSI/wstools/XMLSchema.py | XMLSchemaComponent.getTargetNamespace | def getTargetNamespace(self):
"""return targetNamespace
"""
parent = self
targetNamespace = 'targetNamespace'
tns = self.attributes.get(targetNamespace)
while not tns and parent and parent._parent is not None:
parent = parent._parent()
tns = parent.attributes.get(targetNamespace)
return tns or '' | python | def getTargetNamespace(self):
"""return targetNamespace
"""
parent = self
targetNamespace = 'targetNamespace'
tns = self.attributes.get(targetNamespace)
while not tns and parent and parent._parent is not None:
parent = parent._parent()
tns = parent.attributes.get(targetNamespace)
return tns or '' | ['def', 'getTargetNamespace', '(', 'self', ')', ':', 'parent', '=', 'self', 'targetNamespace', '=', "'targetNamespace'", 'tns', '=', 'self', '.', 'attributes', '.', 'get', '(', 'targetNamespace', ')', 'while', 'not', 'tns', 'and', 'parent', 'and', 'parent', '.', '_parent', 'is', 'not', 'None', ':', 'parent', '=', 'parent', '.', '_parent', '(', ')', 'tns', '=', 'parent', '.', 'attributes', '.', 'get', '(', 'targetNamespace', ')', 'return', 'tns', 'or', "''"] | return targetNamespace | ['return', 'targetNamespace'] | train | https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/wstools/XMLSchema.py#L536-L545 |
838 | ntoll/uflash | uflash.py | flash | def flash(path_to_python=None, paths_to_microbits=None,
path_to_runtime=None, python_script=None, minify=False):
"""
Given a path to or source of a Python file will attempt to create a hex
file and then flash it onto the referenced BBC micro:bit.
If the path_to_python & python_script are unspecified it will simply flash
the unmodified MicroPython runtime onto the device.
If used, the python_script argument should be a bytes object representing
a UTF-8 encoded string. For example::
script = "from microbit import *\\ndisplay.scroll('Hello, World!')"
uflash.flash(python_script=script.encode('utf-8'))
If paths_to_microbits is unspecified it will attempt to find the device's
path on the filesystem automatically.
If the path_to_runtime is unspecified it will use the built in version of
the MicroPython runtime. This feature is useful if a custom build of
MicroPython is available.
If the automatic discovery fails, then it will raise an IOError.
"""
# Check for the correct version of Python.
if not ((sys.version_info[0] == 3 and sys.version_info[1] >= 3) or
(sys.version_info[0] == 2 and sys.version_info[1] >= 7)):
raise RuntimeError('Will only run on Python 2.7, or 3.3 and later.')
# Grab the Python script (if needed).
python_hex = ''
if path_to_python:
if not path_to_python.endswith('.py'):
raise ValueError('Python files must end in ".py".')
with open(path_to_python, 'rb') as python_script:
python_hex = hexlify(python_script.read(), minify)
elif python_script:
python_hex = hexlify(python_script, minify)
runtime = _RUNTIME
# Load the hex for the runtime.
if path_to_runtime:
with open(path_to_runtime) as runtime_file:
runtime = runtime_file.read()
# Generate the resulting hex file.
micropython_hex = embed_hex(runtime, python_hex)
# Find the micro:bit.
if not paths_to_microbits:
found_microbit = find_microbit()
if found_microbit:
paths_to_microbits = [found_microbit]
# Attempt to write the hex file to the micro:bit.
if paths_to_microbits:
for path in paths_to_microbits:
hex_path = os.path.join(path, 'micropython.hex')
print('Flashing Python to: {}'.format(hex_path))
save_hex(micropython_hex, hex_path)
else:
raise IOError('Unable to find micro:bit. Is it plugged in?') | python | def flash(path_to_python=None, paths_to_microbits=None,
path_to_runtime=None, python_script=None, minify=False):
"""
Given a path to or source of a Python file will attempt to create a hex
file and then flash it onto the referenced BBC micro:bit.
If the path_to_python & python_script are unspecified it will simply flash
the unmodified MicroPython runtime onto the device.
If used, the python_script argument should be a bytes object representing
a UTF-8 encoded string. For example::
script = "from microbit import *\\ndisplay.scroll('Hello, World!')"
uflash.flash(python_script=script.encode('utf-8'))
If paths_to_microbits is unspecified it will attempt to find the device's
path on the filesystem automatically.
If the path_to_runtime is unspecified it will use the built in version of
the MicroPython runtime. This feature is useful if a custom build of
MicroPython is available.
If the automatic discovery fails, then it will raise an IOError.
"""
# Check for the correct version of Python.
if not ((sys.version_info[0] == 3 and sys.version_info[1] >= 3) or
(sys.version_info[0] == 2 and sys.version_info[1] >= 7)):
raise RuntimeError('Will only run on Python 2.7, or 3.3 and later.')
# Grab the Python script (if needed).
python_hex = ''
if path_to_python:
if not path_to_python.endswith('.py'):
raise ValueError('Python files must end in ".py".')
with open(path_to_python, 'rb') as python_script:
python_hex = hexlify(python_script.read(), minify)
elif python_script:
python_hex = hexlify(python_script, minify)
runtime = _RUNTIME
# Load the hex for the runtime.
if path_to_runtime:
with open(path_to_runtime) as runtime_file:
runtime = runtime_file.read()
# Generate the resulting hex file.
micropython_hex = embed_hex(runtime, python_hex)
# Find the micro:bit.
if not paths_to_microbits:
found_microbit = find_microbit()
if found_microbit:
paths_to_microbits = [found_microbit]
# Attempt to write the hex file to the micro:bit.
if paths_to_microbits:
for path in paths_to_microbits:
hex_path = os.path.join(path, 'micropython.hex')
print('Flashing Python to: {}'.format(hex_path))
save_hex(micropython_hex, hex_path)
else:
raise IOError('Unable to find micro:bit. Is it plugged in?') | ['def', 'flash', '(', 'path_to_python', '=', 'None', ',', 'paths_to_microbits', '=', 'None', ',', 'path_to_runtime', '=', 'None', ',', 'python_script', '=', 'None', ',', 'minify', '=', 'False', ')', ':', '# Check for the correct version of Python.', 'if', 'not', '(', '(', 'sys', '.', 'version_info', '[', '0', ']', '==', '3', 'and', 'sys', '.', 'version_info', '[', '1', ']', '>=', '3', ')', 'or', '(', 'sys', '.', 'version_info', '[', '0', ']', '==', '2', 'and', 'sys', '.', 'version_info', '[', '1', ']', '>=', '7', ')', ')', ':', 'raise', 'RuntimeError', '(', "'Will only run on Python 2.7, or 3.3 and later.'", ')', '# Grab the Python script (if needed).', 'python_hex', '=', "''", 'if', 'path_to_python', ':', 'if', 'not', 'path_to_python', '.', 'endswith', '(', "'.py'", ')', ':', 'raise', 'ValueError', '(', '\'Python files must end in ".py".\'', ')', 'with', 'open', '(', 'path_to_python', ',', "'rb'", ')', 'as', 'python_script', ':', 'python_hex', '=', 'hexlify', '(', 'python_script', '.', 'read', '(', ')', ',', 'minify', ')', 'elif', 'python_script', ':', 'python_hex', '=', 'hexlify', '(', 'python_script', ',', 'minify', ')', 'runtime', '=', '_RUNTIME', '# Load the hex for the runtime.', 'if', 'path_to_runtime', ':', 'with', 'open', '(', 'path_to_runtime', ')', 'as', 'runtime_file', ':', 'runtime', '=', 'runtime_file', '.', 'read', '(', ')', '# Generate the resulting hex file.', 'micropython_hex', '=', 'embed_hex', '(', 'runtime', ',', 'python_hex', ')', '# Find the micro:bit.', 'if', 'not', 'paths_to_microbits', ':', 'found_microbit', '=', 'find_microbit', '(', ')', 'if', 'found_microbit', ':', 'paths_to_microbits', '=', '[', 'found_microbit', ']', '# Attempt to write the hex file to the micro:bit.', 'if', 'paths_to_microbits', ':', 'for', 'path', 'in', 'paths_to_microbits', ':', 'hex_path', '=', 'os', '.', 'path', '.', 'join', '(', 'path', ',', "'micropython.hex'", ')', 'print', '(', "'Flashing Python to: {}'", '.', 'format', '(', 'hex_path', ')', ')', 'save_hex', '(', 'micropython_hex', ',', 'hex_path', ')', 'else', ':', 'raise', 'IOError', '(', "'Unable to find micro:bit. Is it plugged in?'", ')'] | Given a path to or source of a Python file will attempt to create a hex
file and then flash it onto the referenced BBC micro:bit.
If the path_to_python & python_script are unspecified it will simply flash
the unmodified MicroPython runtime onto the device.
If used, the python_script argument should be a bytes object representing
a UTF-8 encoded string. For example::
script = "from microbit import *\\ndisplay.scroll('Hello, World!')"
uflash.flash(python_script=script.encode('utf-8'))
If paths_to_microbits is unspecified it will attempt to find the device's
path on the filesystem automatically.
If the path_to_runtime is unspecified it will use the built in version of
the MicroPython runtime. This feature is useful if a custom build of
MicroPython is available.
If the automatic discovery fails, then it will raise an IOError. | ['Given', 'a', 'path', 'to', 'or', 'source', 'of', 'a', 'Python', 'file', 'will', 'attempt', 'to', 'create', 'a', 'hex', 'file', 'and', 'then', 'flash', 'it', 'onto', 'the', 'referenced', 'BBC', 'micro', ':', 'bit', '.'] | train | https://github.com/ntoll/uflash/blob/867468d386da0aa20212b69a152ce8bfc0972366/uflash.py#L284-L341 |
839 | gem/oq-engine | openquake/hmtk/sources/simple_fault_source.py | mtkSimpleFaultSource.create_geometry | def create_geometry(self, input_geometry, dip, upper_depth, lower_depth,
mesh_spacing=1.0):
'''
If geometry is defined as a numpy array then create instance of
nhlib.geo.line.Line class, otherwise if already instance of class
accept class
:param input_geometry:
Trace (line) of the fault source as either
i) instance of nhlib.geo.line.Line class
ii) numpy.ndarray [Longitude, Latitude]
:param float dip:
Dip of fault surface (in degrees)
:param float upper_depth:
Upper seismogenic depth (km)
:param float lower_depth:
Lower seismogenic depth (km)
:param float mesh_spacing:
Spacing of the fault mesh (km) {default = 1.0}
'''
assert((dip > 0.) and (dip <= 90.))
self.dip = dip
self._check_seismogenic_depths(upper_depth, lower_depth)
if not isinstance(input_geometry, Line):
if not isinstance(input_geometry, np.ndarray):
raise ValueError('Unrecognised or unsupported geometry '
'definition')
else:
self.fault_trace = Line([Point(row[0], row[1]) for row in
input_geometry])
else:
self.fault_trace = input_geometry
# Build fault surface
self.geometry = SimpleFaultSurface.from_fault_data(self.fault_trace,
self.upper_depth,
self.lower_depth,
self.dip,
mesh_spacing) | python | def create_geometry(self, input_geometry, dip, upper_depth, lower_depth,
mesh_spacing=1.0):
'''
If geometry is defined as a numpy array then create instance of
nhlib.geo.line.Line class, otherwise if already instance of class
accept class
:param input_geometry:
Trace (line) of the fault source as either
i) instance of nhlib.geo.line.Line class
ii) numpy.ndarray [Longitude, Latitude]
:param float dip:
Dip of fault surface (in degrees)
:param float upper_depth:
Upper seismogenic depth (km)
:param float lower_depth:
Lower seismogenic depth (km)
:param float mesh_spacing:
Spacing of the fault mesh (km) {default = 1.0}
'''
assert((dip > 0.) and (dip <= 90.))
self.dip = dip
self._check_seismogenic_depths(upper_depth, lower_depth)
if not isinstance(input_geometry, Line):
if not isinstance(input_geometry, np.ndarray):
raise ValueError('Unrecognised or unsupported geometry '
'definition')
else:
self.fault_trace = Line([Point(row[0], row[1]) for row in
input_geometry])
else:
self.fault_trace = input_geometry
# Build fault surface
self.geometry = SimpleFaultSurface.from_fault_data(self.fault_trace,
self.upper_depth,
self.lower_depth,
self.dip,
mesh_spacing) | ['def', 'create_geometry', '(', 'self', ',', 'input_geometry', ',', 'dip', ',', 'upper_depth', ',', 'lower_depth', ',', 'mesh_spacing', '=', '1.0', ')', ':', 'assert', '(', '(', 'dip', '>', '0.', ')', 'and', '(', 'dip', '<=', '90.', ')', ')', 'self', '.', 'dip', '=', 'dip', 'self', '.', '_check_seismogenic_depths', '(', 'upper_depth', ',', 'lower_depth', ')', 'if', 'not', 'isinstance', '(', 'input_geometry', ',', 'Line', ')', ':', 'if', 'not', 'isinstance', '(', 'input_geometry', ',', 'np', '.', 'ndarray', ')', ':', 'raise', 'ValueError', '(', "'Unrecognised or unsupported geometry '", "'definition'", ')', 'else', ':', 'self', '.', 'fault_trace', '=', 'Line', '(', '[', 'Point', '(', 'row', '[', '0', ']', ',', 'row', '[', '1', ']', ')', 'for', 'row', 'in', 'input_geometry', ']', ')', 'else', ':', 'self', '.', 'fault_trace', '=', 'input_geometry', '# Build fault surface', 'self', '.', 'geometry', '=', 'SimpleFaultSurface', '.', 'from_fault_data', '(', 'self', '.', 'fault_trace', ',', 'self', '.', 'upper_depth', ',', 'self', '.', 'lower_depth', ',', 'self', '.', 'dip', ',', 'mesh_spacing', ')'] | If geometry is defined as a numpy array then create instance of
nhlib.geo.line.Line class, otherwise if already instance of class
accept class
:param input_geometry:
Trace (line) of the fault source as either
i) instance of nhlib.geo.line.Line class
ii) numpy.ndarray [Longitude, Latitude]
:param float dip:
Dip of fault surface (in degrees)
:param float upper_depth:
Upper seismogenic depth (km)
:param float lower_depth:
Lower seismogenic depth (km)
:param float mesh_spacing:
Spacing of the fault mesh (km) {default = 1.0} | ['If', 'geometry', 'is', 'defined', 'as', 'a', 'numpy', 'array', 'then', 'create', 'instance', 'of', 'nhlib', '.', 'geo', '.', 'line', '.', 'Line', 'class', 'otherwise', 'if', 'already', 'instance', 'of', 'class', 'accept', 'class'] | train | https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hmtk/sources/simple_fault_source.py#L147-L189 |
840 | saltstack/salt | salt/cloud/clouds/vmware.py | upgrade_tools_all | def upgrade_tools_all(call=None):
'''
To upgrade VMware Tools on all virtual machines present in
the specified provider
.. note::
If the virtual machine is running Windows OS, this function
will attempt to suppress the automatic reboot caused by a
VMware Tools upgrade.
CLI Example:
.. code-block:: bash
salt-cloud -f upgrade_tools_all my-vmware-config
'''
if call != 'function':
raise SaltCloudSystemExit(
'The upgrade_tools_all function must be called with '
'-f or --function.'
)
ret = {}
vm_properties = ["name"]
vm_list = salt.utils.vmware.get_mors_with_properties(_get_si(), vim.VirtualMachine, vm_properties)
for vm in vm_list:
ret[vm['name']] = _upg_tools_helper(vm['object'])
return ret | python | def upgrade_tools_all(call=None):
'''
To upgrade VMware Tools on all virtual machines present in
the specified provider
.. note::
If the virtual machine is running Windows OS, this function
will attempt to suppress the automatic reboot caused by a
VMware Tools upgrade.
CLI Example:
.. code-block:: bash
salt-cloud -f upgrade_tools_all my-vmware-config
'''
if call != 'function':
raise SaltCloudSystemExit(
'The upgrade_tools_all function must be called with '
'-f or --function.'
)
ret = {}
vm_properties = ["name"]
vm_list = salt.utils.vmware.get_mors_with_properties(_get_si(), vim.VirtualMachine, vm_properties)
for vm in vm_list:
ret[vm['name']] = _upg_tools_helper(vm['object'])
return ret | ['def', 'upgrade_tools_all', '(', 'call', '=', 'None', ')', ':', 'if', 'call', '!=', "'function'", ':', 'raise', 'SaltCloudSystemExit', '(', "'The upgrade_tools_all function must be called with '", "'-f or --function.'", ')', 'ret', '=', '{', '}', 'vm_properties', '=', '[', '"name"', ']', 'vm_list', '=', 'salt', '.', 'utils', '.', 'vmware', '.', 'get_mors_with_properties', '(', '_get_si', '(', ')', ',', 'vim', '.', 'VirtualMachine', ',', 'vm_properties', ')', 'for', 'vm', 'in', 'vm_list', ':', 'ret', '[', 'vm', '[', "'name'", ']', ']', '=', '_upg_tools_helper', '(', 'vm', '[', "'object'", ']', ')', 'return', 'ret'] | To upgrade VMware Tools on all virtual machines present in
the specified provider
.. note::
If the virtual machine is running Windows OS, this function
will attempt to suppress the automatic reboot caused by a
VMware Tools upgrade.
CLI Example:
.. code-block:: bash
salt-cloud -f upgrade_tools_all my-vmware-config | ['To', 'upgrade', 'VMware', 'Tools', 'on', 'all', 'virtual', 'machines', 'present', 'in', 'the', 'specified', 'provider'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/vmware.py#L3326-L3357 |
841 | sorgerlab/indra | indra/sources/eidos/processor.py | EidosProcessor.extract_causal_relations | def extract_causal_relations(self):
"""Extract causal relations as Statements."""
# Get the extractions that are labeled as directed and causal
relations = [e for e in self.doc.extractions if
'DirectedRelation' in e['labels'] and
'Causal' in e['labels']]
# For each relation, we try to extract an INDRA Statement and
# save it if its valid
for relation in relations:
stmt = self.get_causal_relation(relation)
if stmt is not None:
self.statements.append(stmt) | python | def extract_causal_relations(self):
"""Extract causal relations as Statements."""
# Get the extractions that are labeled as directed and causal
relations = [e for e in self.doc.extractions if
'DirectedRelation' in e['labels'] and
'Causal' in e['labels']]
# For each relation, we try to extract an INDRA Statement and
# save it if its valid
for relation in relations:
stmt = self.get_causal_relation(relation)
if stmt is not None:
self.statements.append(stmt) | ['def', 'extract_causal_relations', '(', 'self', ')', ':', '# Get the extractions that are labeled as directed and causal', 'relations', '=', '[', 'e', 'for', 'e', 'in', 'self', '.', 'doc', '.', 'extractions', 'if', "'DirectedRelation'", 'in', 'e', '[', "'labels'", ']', 'and', "'Causal'", 'in', 'e', '[', "'labels'", ']', ']', '# For each relation, we try to extract an INDRA Statement and', '# save it if its valid', 'for', 'relation', 'in', 'relations', ':', 'stmt', '=', 'self', '.', 'get_causal_relation', '(', 'relation', ')', 'if', 'stmt', 'is', 'not', 'None', ':', 'self', '.', 'statements', '.', 'append', '(', 'stmt', ')'] | Extract causal relations as Statements. | ['Extract', 'causal', 'relations', 'as', 'Statements', '.'] | train | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/eidos/processor.py#L27-L38 |
842 | Esri/ArcREST | src/arcrest/ags/_gpobjects.py | GPString.fromJSON | def fromJSON(value):
"""loads the GP object from a JSON string """
j = json.loads(value)
v = GPString()
if "defaultValue" in j:
v.value = j['defaultValue']
else:
v.value = j['value']
if 'paramName' in j:
v.paramName = j['paramName']
elif 'name' in j:
v.paramName = j['name']
return v | python | def fromJSON(value):
"""loads the GP object from a JSON string """
j = json.loads(value)
v = GPString()
if "defaultValue" in j:
v.value = j['defaultValue']
else:
v.value = j['value']
if 'paramName' in j:
v.paramName = j['paramName']
elif 'name' in j:
v.paramName = j['name']
return v | ['def', 'fromJSON', '(', 'value', ')', ':', 'j', '=', 'json', '.', 'loads', '(', 'value', ')', 'v', '=', 'GPString', '(', ')', 'if', '"defaultValue"', 'in', 'j', ':', 'v', '.', 'value', '=', 'j', '[', "'defaultValue'", ']', 'else', ':', 'v', '.', 'value', '=', 'j', '[', "'value'", ']', 'if', "'paramName'", 'in', 'j', ':', 'v', '.', 'paramName', '=', 'j', '[', "'paramName'", ']', 'elif', "'name'", 'in', 'j', ':', 'v', '.', 'paramName', '=', 'j', '[', "'name'", ']', 'return', 'v'] | loads the GP object from a JSON string | ['loads', 'the', 'GP', 'object', 'from', 'a', 'JSON', 'string'] | train | https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/ags/_gpobjects.py#L607-L619 |
843 | LISE-B26/pylabcontrol | build/lib/pylabcontrol/src/core/script_iterator.py | ScriptIterator._estimate_progress | def _estimate_progress(self):
"""
estimates the current progress that is then used in _receive_signal
:return: current progress in percent
"""
estimate = True
# ==== get the current subscript and the time it takes to execute it =====
current_subscript = self._current_subscript_stage['current_subscript']
# ==== get the number of subscripts =====
num_subscripts = len(self.scripts)
# ==== get number of iterations and loop index ======================
if self.iterator_type == 'loop':
num_iterations = self.settings['num_loops']
elif self.iterator_type == 'sweep':
sweep_range = self.settings['sweep_range']
if self.settings['stepping_mode'] == 'value_step':
num_iterations = int((sweep_range['max_value'] - sweep_range['min_value']) / sweep_range['N/value_step']) + 1
# len(np.linspace(sweep_range['min_value'], sweep_range['max_value'],
# (sweep_range['max_value'] - sweep_range['min_value']) /
# sweep_range['N/value_step'] + 1, endpoint=True).tolist())
elif self.settings['stepping_mode'] == 'N':
num_iterations = sweep_range['N/value_step']
else:
raise KeyError('unknown key' + self.settings['stepping_mode'])
else:
print('unknown iterator type in Iterator receive signal - can\'t estimate ramining time')
estimate = False
if estimate:
# get number of loops (completed + 1)
loop_index = self.loop_index
if num_subscripts > 1:
# estimate the progress based on the duration the individual subscripts
loop_execution_time = 0. # time for a single loop execution in s
sub_progress_time = 0. # progress of current loop iteration in s
# ==== get typical duration of current subscript ======================
if current_subscript is not None:
current_subscript_exec_duration = self._current_subscript_stage['subscript_exec_duration'][
current_subscript.name].total_seconds()
else:
current_subscript_exec_duration = 0.0
current_subscript_elapsed_time = (datetime.datetime.now() - current_subscript.start_time).total_seconds()
# estimate the duration of the current subscript if the script hasn't been executed once fully and subscript_exec_duration is 0
if current_subscript_exec_duration == 0.0:
remaining_time = current_subscript.remaining_time.total_seconds()
current_subscript_exec_duration = remaining_time + current_subscript_elapsed_time
# ==== get typical duration of one loop iteration ======================
remaining_scripts = 0 # script that remain to be executed for the first time
for subscript_name, duration in self._current_subscript_stage['subscript_exec_duration'].items():
if duration.total_seconds() == 0.0:
remaining_scripts += 1
loop_execution_time += duration.total_seconds()
# add the times of the subscripts that have been executed in the current loop
# ignore the current subscript, because that will be taken care of later
if self._current_subscript_stage['subscript_exec_count'][subscript_name] == loop_index \
and subscript_name is not current_subscript.name:
# this subscript has already been executed in this iteration
sub_progress_time += duration.total_seconds()
# add the proportional duration of the current subscript given by the subscript progress
sub_progress_time += current_subscript_elapsed_time
# if there are scripts that have not been executed yet
# assume that all the scripts that have not been executed yet take as long as the average of the other scripts
if remaining_scripts == num_subscripts:
# none of the subscript has been finished. assume that all the scripts take as long as the first
loop_execution_time = num_subscripts * current_subscript_exec_duration
elif remaining_scripts > 1:
loop_execution_time = 1. * num_subscripts / (num_subscripts - remaining_scripts)
elif remaining_scripts == 1:
# there is only one script left which is the current script
loop_execution_time += current_subscript_exec_duration
if loop_execution_time > 0:
progress_subscript = 100. * sub_progress_time / loop_execution_time
else:
progress_subscript = 1. * progress_subscript / num_subscripts
# print(' === script iterator progress estimation loop_index = {:d}/{:d}, progress_subscript = {:f}'.format(loop_index, number_of_iterations, progress_subscript))
progress = 100. * (loop_index - 1. + 0.01 * progress_subscript) / num_iterations
else:
# if can't estimate the remaining time set to half
progress = 50
return progress | python | def _estimate_progress(self):
"""
estimates the current progress that is then used in _receive_signal
:return: current progress in percent
"""
estimate = True
# ==== get the current subscript and the time it takes to execute it =====
current_subscript = self._current_subscript_stage['current_subscript']
# ==== get the number of subscripts =====
num_subscripts = len(self.scripts)
# ==== get number of iterations and loop index ======================
if self.iterator_type == 'loop':
num_iterations = self.settings['num_loops']
elif self.iterator_type == 'sweep':
sweep_range = self.settings['sweep_range']
if self.settings['stepping_mode'] == 'value_step':
num_iterations = int((sweep_range['max_value'] - sweep_range['min_value']) / sweep_range['N/value_step']) + 1
# len(np.linspace(sweep_range['min_value'], sweep_range['max_value'],
# (sweep_range['max_value'] - sweep_range['min_value']) /
# sweep_range['N/value_step'] + 1, endpoint=True).tolist())
elif self.settings['stepping_mode'] == 'N':
num_iterations = sweep_range['N/value_step']
else:
raise KeyError('unknown key' + self.settings['stepping_mode'])
else:
print('unknown iterator type in Iterator receive signal - can\'t estimate ramining time')
estimate = False
if estimate:
# get number of loops (completed + 1)
loop_index = self.loop_index
if num_subscripts > 1:
# estimate the progress based on the duration the individual subscripts
loop_execution_time = 0. # time for a single loop execution in s
sub_progress_time = 0. # progress of current loop iteration in s
# ==== get typical duration of current subscript ======================
if current_subscript is not None:
current_subscript_exec_duration = self._current_subscript_stage['subscript_exec_duration'][
current_subscript.name].total_seconds()
else:
current_subscript_exec_duration = 0.0
current_subscript_elapsed_time = (datetime.datetime.now() - current_subscript.start_time).total_seconds()
# estimate the duration of the current subscript if the script hasn't been executed once fully and subscript_exec_duration is 0
if current_subscript_exec_duration == 0.0:
remaining_time = current_subscript.remaining_time.total_seconds()
current_subscript_exec_duration = remaining_time + current_subscript_elapsed_time
# ==== get typical duration of one loop iteration ======================
remaining_scripts = 0 # script that remain to be executed for the first time
for subscript_name, duration in self._current_subscript_stage['subscript_exec_duration'].items():
if duration.total_seconds() == 0.0:
remaining_scripts += 1
loop_execution_time += duration.total_seconds()
# add the times of the subscripts that have been executed in the current loop
# ignore the current subscript, because that will be taken care of later
if self._current_subscript_stage['subscript_exec_count'][subscript_name] == loop_index \
and subscript_name is not current_subscript.name:
# this subscript has already been executed in this iteration
sub_progress_time += duration.total_seconds()
# add the proportional duration of the current subscript given by the subscript progress
sub_progress_time += current_subscript_elapsed_time
# if there are scripts that have not been executed yet
# assume that all the scripts that have not been executed yet take as long as the average of the other scripts
if remaining_scripts == num_subscripts:
# none of the subscript has been finished. assume that all the scripts take as long as the first
loop_execution_time = num_subscripts * current_subscript_exec_duration
elif remaining_scripts > 1:
loop_execution_time = 1. * num_subscripts / (num_subscripts - remaining_scripts)
elif remaining_scripts == 1:
# there is only one script left which is the current script
loop_execution_time += current_subscript_exec_duration
if loop_execution_time > 0:
progress_subscript = 100. * sub_progress_time / loop_execution_time
else:
progress_subscript = 1. * progress_subscript / num_subscripts
# print(' === script iterator progress estimation loop_index = {:d}/{:d}, progress_subscript = {:f}'.format(loop_index, number_of_iterations, progress_subscript))
progress = 100. * (loop_index - 1. + 0.01 * progress_subscript) / num_iterations
else:
# if can't estimate the remaining time set to half
progress = 50
return progress | ['def', '_estimate_progress', '(', 'self', ')', ':', 'estimate', '=', 'True', '# ==== get the current subscript and the time it takes to execute it =====', 'current_subscript', '=', 'self', '.', '_current_subscript_stage', '[', "'current_subscript'", ']', '# ==== get the number of subscripts =====', 'num_subscripts', '=', 'len', '(', 'self', '.', 'scripts', ')', '# ==== get number of iterations and loop index ======================', 'if', 'self', '.', 'iterator_type', '==', "'loop'", ':', 'num_iterations', '=', 'self', '.', 'settings', '[', "'num_loops'", ']', 'elif', 'self', '.', 'iterator_type', '==', "'sweep'", ':', 'sweep_range', '=', 'self', '.', 'settings', '[', "'sweep_range'", ']', 'if', 'self', '.', 'settings', '[', "'stepping_mode'", ']', '==', "'value_step'", ':', 'num_iterations', '=', 'int', '(', '(', 'sweep_range', '[', "'max_value'", ']', '-', 'sweep_range', '[', "'min_value'", ']', ')', '/', 'sweep_range', '[', "'N/value_step'", ']', ')', '+', '1', "# len(np.linspace(sweep_range['min_value'], sweep_range['max_value'],", "# (sweep_range['max_value'] - sweep_range['min_value']) /", "# sweep_range['N/value_step'] + 1, endpoint=True).tolist())", 'elif', 'self', '.', 'settings', '[', "'stepping_mode'", ']', '==', "'N'", ':', 'num_iterations', '=', 'sweep_range', '[', "'N/value_step'", ']', 'else', ':', 'raise', 'KeyError', '(', "'unknown key'", '+', 'self', '.', 'settings', '[', "'stepping_mode'", ']', ')', 'else', ':', 'print', '(', "'unknown iterator type in Iterator receive signal - can\\'t estimate ramining time'", ')', 'estimate', '=', 'False', 'if', 'estimate', ':', '# get number of loops (completed + 1)', 'loop_index', '=', 'self', '.', 'loop_index', 'if', 'num_subscripts', '>', '1', ':', '# estimate the progress based on the duration the individual subscripts', 'loop_execution_time', '=', '0.', '# time for a single loop execution in s', 'sub_progress_time', '=', '0.', '# progress of current loop iteration in s', '# ==== get typical duration of current subscript ======================', 'if', 'current_subscript', 'is', 'not', 'None', ':', 'current_subscript_exec_duration', '=', 'self', '.', '_current_subscript_stage', '[', "'subscript_exec_duration'", ']', '[', 'current_subscript', '.', 'name', ']', '.', 'total_seconds', '(', ')', 'else', ':', 'current_subscript_exec_duration', '=', '0.0', 'current_subscript_elapsed_time', '=', '(', 'datetime', '.', 'datetime', '.', 'now', '(', ')', '-', 'current_subscript', '.', 'start_time', ')', '.', 'total_seconds', '(', ')', "# estimate the duration of the current subscript if the script hasn't been executed once fully and subscript_exec_duration is 0", 'if', 'current_subscript_exec_duration', '==', '0.0', ':', 'remaining_time', '=', 'current_subscript', '.', 'remaining_time', '.', 'total_seconds', '(', ')', 'current_subscript_exec_duration', '=', 'remaining_time', '+', 'current_subscript_elapsed_time', '# ==== get typical duration of one loop iteration ======================', 'remaining_scripts', '=', '0', '# script that remain to be executed for the first time', 'for', 'subscript_name', ',', 'duration', 'in', 'self', '.', '_current_subscript_stage', '[', "'subscript_exec_duration'", ']', '.', 'items', '(', ')', ':', 'if', 'duration', '.', 'total_seconds', '(', ')', '==', '0.0', ':', 'remaining_scripts', '+=', '1', 'loop_execution_time', '+=', 'duration', '.', 'total_seconds', '(', ')', '# add the times of the subscripts that have been executed in the current loop', '# ignore the current subscript, because that will be taken care of later', 'if', 'self', '.', '_current_subscript_stage', '[', "'subscript_exec_count'", ']', '[', 'subscript_name', ']', '==', 'loop_index', 'and', 'subscript_name', 'is', 'not', 'current_subscript', '.', 'name', ':', '# this subscript has already been executed in this iteration', 'sub_progress_time', '+=', 'duration', '.', 'total_seconds', '(', ')', '# add the proportional duration of the current subscript given by the subscript progress', 'sub_progress_time', '+=', 'current_subscript_elapsed_time', '# if there are scripts that have not been executed yet', '# assume that all the scripts that have not been executed yet take as long as the average of the other scripts', 'if', 'remaining_scripts', '==', 'num_subscripts', ':', '# none of the subscript has been finished. assume that all the scripts take as long as the first', 'loop_execution_time', '=', 'num_subscripts', '*', 'current_subscript_exec_duration', 'elif', 'remaining_scripts', '>', '1', ':', 'loop_execution_time', '=', '1.', '*', 'num_subscripts', '/', '(', 'num_subscripts', '-', 'remaining_scripts', ')', 'elif', 'remaining_scripts', '==', '1', ':', '# there is only one script left which is the current script', 'loop_execution_time', '+=', 'current_subscript_exec_duration', 'if', 'loop_execution_time', '>', '0', ':', 'progress_subscript', '=', '100.', '*', 'sub_progress_time', '/', 'loop_execution_time', 'else', ':', 'progress_subscript', '=', '1.', '*', 'progress_subscript', '/', 'num_subscripts', "# print(' === script iterator progress estimation loop_index = {:d}/{:d}, progress_subscript = {:f}'.format(loop_index, number_of_iterations, progress_subscript))", 'progress', '=', '100.', '*', '(', 'loop_index', '-', '1.', '+', '0.01', '*', 'progress_subscript', ')', '/', 'num_iterations', 'else', ':', "# if can't estimate the remaining time set to half", 'progress', '=', '50', 'return', 'progress'] | estimates the current progress that is then used in _receive_signal
:return: current progress in percent | ['estimates', 'the', 'current', 'progress', 'that', 'is', 'then', 'used', 'in', '_receive_signal'] | train | https://github.com/LISE-B26/pylabcontrol/blob/67482e5157fcd1c40705e5c2cacfb93564703ed0/build/lib/pylabcontrol/src/core/script_iterator.py#L242-L338 |
844 | sprockets/sprockets-influxdb | sprockets_influxdb.py | _on_timeout | def _on_timeout():
"""Invoked periodically to ensure that metrics that have been collected
are submitted to InfluxDB.
:rtype: tornado.concurrent.Future or None
"""
global _buffer_size
LOGGER.debug('No metrics submitted in the last %.2f seconds',
_timeout_interval / 1000.0)
_buffer_size = _pending_measurements()
if _buffer_size:
return _trigger_batch_write()
_start_timeout() | python | def _on_timeout():
"""Invoked periodically to ensure that metrics that have been collected
are submitted to InfluxDB.
:rtype: tornado.concurrent.Future or None
"""
global _buffer_size
LOGGER.debug('No metrics submitted in the last %.2f seconds',
_timeout_interval / 1000.0)
_buffer_size = _pending_measurements()
if _buffer_size:
return _trigger_batch_write()
_start_timeout() | ['def', '_on_timeout', '(', ')', ':', 'global', '_buffer_size', 'LOGGER', '.', 'debug', '(', "'No metrics submitted in the last %.2f seconds'", ',', '_timeout_interval', '/', '1000.0', ')', '_buffer_size', '=', '_pending_measurements', '(', ')', 'if', '_buffer_size', ':', 'return', '_trigger_batch_write', '(', ')', '_start_timeout', '(', ')'] | Invoked periodically to ensure that metrics that have been collected
are submitted to InfluxDB.
:rtype: tornado.concurrent.Future or None | ['Invoked', 'periodically', 'to', 'ensure', 'that', 'metrics', 'that', 'have', 'been', 'collected', 'are', 'submitted', 'to', 'InfluxDB', '.'] | train | https://github.com/sprockets/sprockets-influxdb/blob/cce73481b8f26b02e65e3f9914a9a22eceff3063/sprockets_influxdb.py#L604-L618 |
845 | learningequality/iceqube | src/iceqube/worker/backends/base.py | BaseWorkerBackend.process_messages | def process_messages(self):
"""
Read from the incoming_message_mailbox and report to the storage backend
based on the first message found there.
Returns: None
"""
try:
msg = self.msgbackend.pop(self.incoming_message_mailbox)
self.handle_incoming_message(msg)
except queue.Empty:
logger.debug("Worker message queue currently empty.") | python | def process_messages(self):
"""
Read from the incoming_message_mailbox and report to the storage backend
based on the first message found there.
Returns: None
"""
try:
msg = self.msgbackend.pop(self.incoming_message_mailbox)
self.handle_incoming_message(msg)
except queue.Empty:
logger.debug("Worker message queue currently empty.") | ['def', 'process_messages', '(', 'self', ')', ':', 'try', ':', 'msg', '=', 'self', '.', 'msgbackend', '.', 'pop', '(', 'self', '.', 'incoming_message_mailbox', ')', 'self', '.', 'handle_incoming_message', '(', 'msg', ')', 'except', 'queue', '.', 'Empty', ':', 'logger', '.', 'debug', '(', '"Worker message queue currently empty."', ')'] | Read from the incoming_message_mailbox and report to the storage backend
based on the first message found there.
Returns: None | ['Read', 'from', 'the', 'incoming_message_mailbox', 'and', 'report', 'to', 'the', 'storage', 'backend', 'based', 'on', 'the', 'first', 'message', 'found', 'there', '.', 'Returns', ':', 'None'] | train | https://github.com/learningequality/iceqube/blob/97ac9e0f65bfedb0efa9f94638bcb57c7926dea2/src/iceqube/worker/backends/base.py#L58-L68 |
846 | wonambi-python/wonambi | wonambi/widgets/notes.py | ExportEventsDialog.button_clicked | def button_clicked(self, button):
"""Action when button was clicked.
Parameters
----------
button : instance of QPushButton
which button was pressed
"""
if button is self.idx_ok:
fn = Path(self.filename)
xp_format = self.xp_format.get_value()
if self.all_types.get_value():
evt_type = self.event_types
else:
evt_type = [
x.text() for x in self.idx_evt_type.selectedItems()]
if 'CSV' == xp_format:
self.parent.notes.annot.export_events(fn, evt_type)
elif 'Brain Vision' == xp_format:
events = []
for et in evt_type:
events.extend(self.parent.notes.annot.get_events(name=et))
if not events:
self.parent.statusBar.showMessage('No events found.')
return
events = sorted(events, key=lambda x: x['start'])
dataset = self.parent.info.dataset
data = ChanTime()
data.start_time = dataset.header['start_time']
data.s_freq = int(dataset.header['s_freq'])
with fn.with_suffix('.vmrk').open('w') as f:
lg.info('Writing to ' + str(fn) + '.vmrk')
f.write(_write_vmrk(data, fn, events))
self.accept()
if button is self.idx_cancel:
self.reject() | python | def button_clicked(self, button):
"""Action when button was clicked.
Parameters
----------
button : instance of QPushButton
which button was pressed
"""
if button is self.idx_ok:
fn = Path(self.filename)
xp_format = self.xp_format.get_value()
if self.all_types.get_value():
evt_type = self.event_types
else:
evt_type = [
x.text() for x in self.idx_evt_type.selectedItems()]
if 'CSV' == xp_format:
self.parent.notes.annot.export_events(fn, evt_type)
elif 'Brain Vision' == xp_format:
events = []
for et in evt_type:
events.extend(self.parent.notes.annot.get_events(name=et))
if not events:
self.parent.statusBar.showMessage('No events found.')
return
events = sorted(events, key=lambda x: x['start'])
dataset = self.parent.info.dataset
data = ChanTime()
data.start_time = dataset.header['start_time']
data.s_freq = int(dataset.header['s_freq'])
with fn.with_suffix('.vmrk').open('w') as f:
lg.info('Writing to ' + str(fn) + '.vmrk')
f.write(_write_vmrk(data, fn, events))
self.accept()
if button is self.idx_cancel:
self.reject() | ['def', 'button_clicked', '(', 'self', ',', 'button', ')', ':', 'if', 'button', 'is', 'self', '.', 'idx_ok', ':', 'fn', '=', 'Path', '(', 'self', '.', 'filename', ')', 'xp_format', '=', 'self', '.', 'xp_format', '.', 'get_value', '(', ')', 'if', 'self', '.', 'all_types', '.', 'get_value', '(', ')', ':', 'evt_type', '=', 'self', '.', 'event_types', 'else', ':', 'evt_type', '=', '[', 'x', '.', 'text', '(', ')', 'for', 'x', 'in', 'self', '.', 'idx_evt_type', '.', 'selectedItems', '(', ')', ']', 'if', "'CSV'", '==', 'xp_format', ':', 'self', '.', 'parent', '.', 'notes', '.', 'annot', '.', 'export_events', '(', 'fn', ',', 'evt_type', ')', 'elif', "'Brain Vision'", '==', 'xp_format', ':', 'events', '=', '[', ']', 'for', 'et', 'in', 'evt_type', ':', 'events', '.', 'extend', '(', 'self', '.', 'parent', '.', 'notes', '.', 'annot', '.', 'get_events', '(', 'name', '=', 'et', ')', ')', 'if', 'not', 'events', ':', 'self', '.', 'parent', '.', 'statusBar', '.', 'showMessage', '(', "'No events found.'", ')', 'return', 'events', '=', 'sorted', '(', 'events', ',', 'key', '=', 'lambda', 'x', ':', 'x', '[', "'start'", ']', ')', 'dataset', '=', 'self', '.', 'parent', '.', 'info', '.', 'dataset', 'data', '=', 'ChanTime', '(', ')', 'data', '.', 'start_time', '=', 'dataset', '.', 'header', '[', "'start_time'", ']', 'data', '.', 's_freq', '=', 'int', '(', 'dataset', '.', 'header', '[', "'s_freq'", ']', ')', 'with', 'fn', '.', 'with_suffix', '(', "'.vmrk'", ')', '.', 'open', '(', "'w'", ')', 'as', 'f', ':', 'lg', '.', 'info', '(', "'Writing to '", '+', 'str', '(', 'fn', ')', '+', "'.vmrk'", ')', 'f', '.', 'write', '(', '_write_vmrk', '(', 'data', ',', 'fn', ',', 'events', ')', ')', 'self', '.', 'accept', '(', ')', 'if', 'button', 'is', 'self', '.', 'idx_cancel', ':', 'self', '.', 'reject', '(', ')'] | Action when button was clicked.
Parameters
----------
button : instance of QPushButton
which button was pressed | ['Action', 'when', 'button', 'was', 'clicked', '.'] | train | https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/widgets/notes.py#L1929-L1974 |
847 | goerz/clusterjob | clusterjob/__init__.py | AsyncResult.get | def get(self, timeout=None):
"""Return status"""
status = self.status
if status >= COMPLETED:
return status
else:
self.wait(timeout)
return self.status | python | def get(self, timeout=None):
"""Return status"""
status = self.status
if status >= COMPLETED:
return status
else:
self.wait(timeout)
return self.status | ['def', 'get', '(', 'self', ',', 'timeout', '=', 'None', ')', ':', 'status', '=', 'self', '.', 'status', 'if', 'status', '>=', 'COMPLETED', ':', 'return', 'status', 'else', ':', 'self', '.', 'wait', '(', 'timeout', ')', 'return', 'self', '.', 'status'] | Return status | ['Return', 'status'] | train | https://github.com/goerz/clusterjob/blob/361760d1a6dd3cbde49c5c2158a3acd0c314a749/clusterjob/__init__.py#L954-L961 |
848 | dpkp/kafka-python | kafka/producer/record_accumulator.py | RecordAccumulator.abort_expired_batches | def abort_expired_batches(self, request_timeout_ms, cluster):
"""Abort the batches that have been sitting in RecordAccumulator for
more than the configured request_timeout due to metadata being
unavailable.
Arguments:
request_timeout_ms (int): milliseconds to timeout
cluster (ClusterMetadata): current metadata for kafka cluster
Returns:
list of ProducerBatch that were expired
"""
expired_batches = []
to_remove = []
count = 0
for tp in list(self._batches.keys()):
assert tp in self._tp_locks, 'TopicPartition not in locks dict'
# We only check if the batch should be expired if the partition
# does not have a batch in flight. This is to avoid the later
# batches get expired when an earlier batch is still in progress.
# This protection only takes effect when user sets
# max.in.flight.request.per.connection=1. Otherwise the expiration
# order is not guranteed.
if tp in self.muted:
continue
with self._tp_locks[tp]:
# iterate over the batches and expire them if they have stayed
# in accumulator for more than request_timeout_ms
dq = self._batches[tp]
for batch in dq:
is_full = bool(bool(batch != dq[-1]) or batch.records.is_full())
# check if the batch is expired
if batch.maybe_expire(request_timeout_ms,
self.config['retry_backoff_ms'],
self.config['linger_ms'],
is_full):
expired_batches.append(batch)
to_remove.append(batch)
count += 1
self.deallocate(batch)
else:
# Stop at the first batch that has not expired.
break
# Python does not allow us to mutate the dq during iteration
# Assuming expired batches are infrequent, this is better than
# creating a new copy of the deque for iteration on every loop
if to_remove:
for batch in to_remove:
dq.remove(batch)
to_remove = []
if expired_batches:
log.warning("Expired %d batches in accumulator", count) # trace
return expired_batches | python | def abort_expired_batches(self, request_timeout_ms, cluster):
"""Abort the batches that have been sitting in RecordAccumulator for
more than the configured request_timeout due to metadata being
unavailable.
Arguments:
request_timeout_ms (int): milliseconds to timeout
cluster (ClusterMetadata): current metadata for kafka cluster
Returns:
list of ProducerBatch that were expired
"""
expired_batches = []
to_remove = []
count = 0
for tp in list(self._batches.keys()):
assert tp in self._tp_locks, 'TopicPartition not in locks dict'
# We only check if the batch should be expired if the partition
# does not have a batch in flight. This is to avoid the later
# batches get expired when an earlier batch is still in progress.
# This protection only takes effect when user sets
# max.in.flight.request.per.connection=1. Otherwise the expiration
# order is not guranteed.
if tp in self.muted:
continue
with self._tp_locks[tp]:
# iterate over the batches and expire them if they have stayed
# in accumulator for more than request_timeout_ms
dq = self._batches[tp]
for batch in dq:
is_full = bool(bool(batch != dq[-1]) or batch.records.is_full())
# check if the batch is expired
if batch.maybe_expire(request_timeout_ms,
self.config['retry_backoff_ms'],
self.config['linger_ms'],
is_full):
expired_batches.append(batch)
to_remove.append(batch)
count += 1
self.deallocate(batch)
else:
# Stop at the first batch that has not expired.
break
# Python does not allow us to mutate the dq during iteration
# Assuming expired batches are infrequent, this is better than
# creating a new copy of the deque for iteration on every loop
if to_remove:
for batch in to_remove:
dq.remove(batch)
to_remove = []
if expired_batches:
log.warning("Expired %d batches in accumulator", count) # trace
return expired_batches | ['def', 'abort_expired_batches', '(', 'self', ',', 'request_timeout_ms', ',', 'cluster', ')', ':', 'expired_batches', '=', '[', ']', 'to_remove', '=', '[', ']', 'count', '=', '0', 'for', 'tp', 'in', 'list', '(', 'self', '.', '_batches', '.', 'keys', '(', ')', ')', ':', 'assert', 'tp', 'in', 'self', '.', '_tp_locks', ',', "'TopicPartition not in locks dict'", '# We only check if the batch should be expired if the partition', '# does not have a batch in flight. This is to avoid the later', '# batches get expired when an earlier batch is still in progress.', '# This protection only takes effect when user sets', '# max.in.flight.request.per.connection=1. Otherwise the expiration', '# order is not guranteed.', 'if', 'tp', 'in', 'self', '.', 'muted', ':', 'continue', 'with', 'self', '.', '_tp_locks', '[', 'tp', ']', ':', '# iterate over the batches and expire them if they have stayed', '# in accumulator for more than request_timeout_ms', 'dq', '=', 'self', '.', '_batches', '[', 'tp', ']', 'for', 'batch', 'in', 'dq', ':', 'is_full', '=', 'bool', '(', 'bool', '(', 'batch', '!=', 'dq', '[', '-', '1', ']', ')', 'or', 'batch', '.', 'records', '.', 'is_full', '(', ')', ')', '# check if the batch is expired', 'if', 'batch', '.', 'maybe_expire', '(', 'request_timeout_ms', ',', 'self', '.', 'config', '[', "'retry_backoff_ms'", ']', ',', 'self', '.', 'config', '[', "'linger_ms'", ']', ',', 'is_full', ')', ':', 'expired_batches', '.', 'append', '(', 'batch', ')', 'to_remove', '.', 'append', '(', 'batch', ')', 'count', '+=', '1', 'self', '.', 'deallocate', '(', 'batch', ')', 'else', ':', '# Stop at the first batch that has not expired.', 'break', '# Python does not allow us to mutate the dq during iteration', '# Assuming expired batches are infrequent, this is better than', '# creating a new copy of the deque for iteration on every loop', 'if', 'to_remove', ':', 'for', 'batch', 'in', 'to_remove', ':', 'dq', '.', 'remove', '(', 'batch', ')', 'to_remove', '=', '[', ']', 'if', 'expired_batches', ':', 'log', '.', 'warning', '(', '"Expired %d batches in accumulator"', ',', 'count', ')', '# trace', 'return', 'expired_batches'] | Abort the batches that have been sitting in RecordAccumulator for
more than the configured request_timeout due to metadata being
unavailable.
Arguments:
request_timeout_ms (int): milliseconds to timeout
cluster (ClusterMetadata): current metadata for kafka cluster
Returns:
list of ProducerBatch that were expired | ['Abort', 'the', 'batches', 'that', 'have', 'been', 'sitting', 'in', 'RecordAccumulator', 'for', 'more', 'than', 'the', 'configured', 'request_timeout', 'due', 'to', 'metadata', 'being', 'unavailable', '.'] | train | https://github.com/dpkp/kafka-python/blob/f6a8a38937688ea2cc5dc13d3d1039493be5c9b5/kafka/producer/record_accumulator.py#L277-L334 |
849 | iotile/coretools | iotilesensorgraph/iotile/sg/graph.py | SensorGraph.get_tick | def get_tick(self, name):
"""Check the config variables to see if there is a configurable tick.
Sensor Graph has a built-in 10 second tick that is sent every 10
seconds to allow for triggering timed events. There is a second
'user' tick that is generated internally by the sensorgraph compiler
and used for fast operations and finally there are several field
configurable ticks that can be used for setting up configurable
timers.
This is done by setting a config variable on the controller with the
desired tick interval, which is then interpreted by this function.
The appropriate config_id to use is listed in `known_constants.py`
Returns:
int: 0 if the tick is disabled, otherwise the number of seconds
between each tick
"""
name_map = {
'fast': config_fast_tick_secs,
'user1': config_tick1_secs,
'user2': config_tick2_secs
}
config = name_map.get(name)
if config is None:
raise ArgumentError("Unknown tick requested", name=name)
slot = SlotIdentifier.FromString('controller')
try:
var = self.get_config(slot, config)
return var[1]
except ArgumentError:
return 0 | python | def get_tick(self, name):
"""Check the config variables to see if there is a configurable tick.
Sensor Graph has a built-in 10 second tick that is sent every 10
seconds to allow for triggering timed events. There is a second
'user' tick that is generated internally by the sensorgraph compiler
and used for fast operations and finally there are several field
configurable ticks that can be used for setting up configurable
timers.
This is done by setting a config variable on the controller with the
desired tick interval, which is then interpreted by this function.
The appropriate config_id to use is listed in `known_constants.py`
Returns:
int: 0 if the tick is disabled, otherwise the number of seconds
between each tick
"""
name_map = {
'fast': config_fast_tick_secs,
'user1': config_tick1_secs,
'user2': config_tick2_secs
}
config = name_map.get(name)
if config is None:
raise ArgumentError("Unknown tick requested", name=name)
slot = SlotIdentifier.FromString('controller')
try:
var = self.get_config(slot, config)
return var[1]
except ArgumentError:
return 0 | ['def', 'get_tick', '(', 'self', ',', 'name', ')', ':', 'name_map', '=', '{', "'fast'", ':', 'config_fast_tick_secs', ',', "'user1'", ':', 'config_tick1_secs', ',', "'user2'", ':', 'config_tick2_secs', '}', 'config', '=', 'name_map', '.', 'get', '(', 'name', ')', 'if', 'config', 'is', 'None', ':', 'raise', 'ArgumentError', '(', '"Unknown tick requested"', ',', 'name', '=', 'name', ')', 'slot', '=', 'SlotIdentifier', '.', 'FromString', '(', "'controller'", ')', 'try', ':', 'var', '=', 'self', '.', 'get_config', '(', 'slot', ',', 'config', ')', 'return', 'var', '[', '1', ']', 'except', 'ArgumentError', ':', 'return', '0'] | Check the config variables to see if there is a configurable tick.
Sensor Graph has a built-in 10 second tick that is sent every 10
seconds to allow for triggering timed events. There is a second
'user' tick that is generated internally by the sensorgraph compiler
and used for fast operations and finally there are several field
configurable ticks that can be used for setting up configurable
timers.
This is done by setting a config variable on the controller with the
desired tick interval, which is then interpreted by this function.
The appropriate config_id to use is listed in `known_constants.py`
Returns:
int: 0 if the tick is disabled, otherwise the number of seconds
between each tick | ['Check', 'the', 'config', 'variables', 'to', 'see', 'if', 'there', 'is', 'a', 'configurable', 'tick', '.'] | train | https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilesensorgraph/iotile/sg/graph.py#L282-L318 |
850 | iotile/coretools | transport_plugins/native_ble/iotile_transport_native_ble/device_adapter.py | NativeBLEDeviceAdapter._on_interface_opened | def _on_interface_opened(self, success, result, failure_reason, context, next_characteristic=None):
"""Callback function called when the notification related to an interface has been enabled.
It is executed in the baBLE working thread: should not be blocking.
Args:
success (bool): A bool indicating that the operation is successful or not
result (dict): Information (if successful)
failure_reason (any): An object indicating the reason why the operation is not successful (else None)
context (dict): The connection context
next_characteristic (bable_interface.Characteristic): If not None, indicate another characteristic to enable
notification.
"""
if not success:
self.connections.finish_operation(context['connection_id'], False, failure_reason)
return
if next_characteristic is not None:
self.bable.set_notification(
enabled=True,
connection_handle=context['connection_handle'],
characteristic=next_characteristic,
on_notification_set=[self._on_interface_opened, context],
on_notification_received=self._on_notification_received,
sync=False
)
else:
self.connections.finish_operation(context['connection_id'], True, None) | python | def _on_interface_opened(self, success, result, failure_reason, context, next_characteristic=None):
"""Callback function called when the notification related to an interface has been enabled.
It is executed in the baBLE working thread: should not be blocking.
Args:
success (bool): A bool indicating that the operation is successful or not
result (dict): Information (if successful)
failure_reason (any): An object indicating the reason why the operation is not successful (else None)
context (dict): The connection context
next_characteristic (bable_interface.Characteristic): If not None, indicate another characteristic to enable
notification.
"""
if not success:
self.connections.finish_operation(context['connection_id'], False, failure_reason)
return
if next_characteristic is not None:
self.bable.set_notification(
enabled=True,
connection_handle=context['connection_handle'],
characteristic=next_characteristic,
on_notification_set=[self._on_interface_opened, context],
on_notification_received=self._on_notification_received,
sync=False
)
else:
self.connections.finish_operation(context['connection_id'], True, None) | ['def', '_on_interface_opened', '(', 'self', ',', 'success', ',', 'result', ',', 'failure_reason', ',', 'context', ',', 'next_characteristic', '=', 'None', ')', ':', 'if', 'not', 'success', ':', 'self', '.', 'connections', '.', 'finish_operation', '(', 'context', '[', "'connection_id'", ']', ',', 'False', ',', 'failure_reason', ')', 'return', 'if', 'next_characteristic', 'is', 'not', 'None', ':', 'self', '.', 'bable', '.', 'set_notification', '(', 'enabled', '=', 'True', ',', 'connection_handle', '=', 'context', '[', "'connection_handle'", ']', ',', 'characteristic', '=', 'next_characteristic', ',', 'on_notification_set', '=', '[', 'self', '.', '_on_interface_opened', ',', 'context', ']', ',', 'on_notification_received', '=', 'self', '.', '_on_notification_received', ',', 'sync', '=', 'False', ')', 'else', ':', 'self', '.', 'connections', '.', 'finish_operation', '(', 'context', '[', "'connection_id'", ']', ',', 'True', ',', 'None', ')'] | Callback function called when the notification related to an interface has been enabled.
It is executed in the baBLE working thread: should not be blocking.
Args:
success (bool): A bool indicating that the operation is successful or not
result (dict): Information (if successful)
failure_reason (any): An object indicating the reason why the operation is not successful (else None)
context (dict): The connection context
next_characteristic (bable_interface.Characteristic): If not None, indicate another characteristic to enable
notification. | ['Callback', 'function', 'called', 'when', 'the', 'notification', 'related', 'to', 'an', 'interface', 'has', 'been', 'enabled', '.', 'It', 'is', 'executed', 'in', 'the', 'baBLE', 'working', 'thread', ':', 'should', 'not', 'be', 'blocking', '.'] | train | https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/transport_plugins/native_ble/iotile_transport_native_ble/device_adapter.py#L649-L675 |
851 | saltstack/salt | salt/modules/solr.py | is_replication_enabled | def is_replication_enabled(host=None, core_name=None):
'''
SLAVE CALL
Check for errors, and determine if a slave is replicating or not.
host : str (None)
The solr host to query. __opts__['host'] is default.
core_name : str (None)
The name of the solr core if using cores. Leave this blank if you are
not using cores or if you want to check all cores.
Return : dict<str,obj>::
{'success':boolean, 'data':dict, 'errors':list, 'warnings':list}
CLI Example:
.. code-block:: bash
salt '*' solr.is_replication_enabled music
'''
ret = _get_return_dict()
success = True
# since only slaves can call this let's check the config:
if _is_master() and host is None:
errors = ['Only "slave" minions can run "is_replication_enabled"']
return ret.update({'success': False, 'errors': errors})
# define a convenience method so we don't duplicate code
def _checks(ret, success, resp, core):
if response['success']:
slave = resp['data']['details']['slave']
# we need to initialize this to false in case there is an error
# on the master and we can't get this info.
enabled = 'false'
master_url = slave['masterUrl']
# check for errors on the slave
if 'ERROR' in slave:
success = False
err = "{0}: {1} - {2}".format(core, slave['ERROR'], master_url)
resp['errors'].append(err)
# if there is an error return everything
data = slave if core is None else {core: {'data': slave}}
else:
enabled = slave['masterDetails']['master'][
'replicationEnabled']
# if replication is turned off on the master, or polling is
# disabled we need to return false. These may not be errors,
# but the purpose of this call is to check to see if the slaves
# can replicate.
if enabled == 'false':
resp['warnings'].append("Replication is disabled on master.")
success = False
if slave['isPollingDisabled'] == 'true':
success = False
resp['warning'].append("Polling is disabled")
# update the return
ret = _update_return_dict(ret, success, data,
resp['errors'], resp['warnings'])
return (ret, success)
if _get_none_or_value(core_name) is None and _check_for_cores():
for name in __opts__['solr.cores']:
response = _replication_request('details', host=host,
core_name=name)
ret, success = _checks(ret, success, response, name)
else:
response = _replication_request('details', host=host,
core_name=core_name)
ret, success = _checks(ret, success, response, core_name)
return ret | python | def is_replication_enabled(host=None, core_name=None):
'''
SLAVE CALL
Check for errors, and determine if a slave is replicating or not.
host : str (None)
The solr host to query. __opts__['host'] is default.
core_name : str (None)
The name of the solr core if using cores. Leave this blank if you are
not using cores or if you want to check all cores.
Return : dict<str,obj>::
{'success':boolean, 'data':dict, 'errors':list, 'warnings':list}
CLI Example:
.. code-block:: bash
salt '*' solr.is_replication_enabled music
'''
ret = _get_return_dict()
success = True
# since only slaves can call this let's check the config:
if _is_master() and host is None:
errors = ['Only "slave" minions can run "is_replication_enabled"']
return ret.update({'success': False, 'errors': errors})
# define a convenience method so we don't duplicate code
def _checks(ret, success, resp, core):
if response['success']:
slave = resp['data']['details']['slave']
# we need to initialize this to false in case there is an error
# on the master and we can't get this info.
enabled = 'false'
master_url = slave['masterUrl']
# check for errors on the slave
if 'ERROR' in slave:
success = False
err = "{0}: {1} - {2}".format(core, slave['ERROR'], master_url)
resp['errors'].append(err)
# if there is an error return everything
data = slave if core is None else {core: {'data': slave}}
else:
enabled = slave['masterDetails']['master'][
'replicationEnabled']
# if replication is turned off on the master, or polling is
# disabled we need to return false. These may not be errors,
# but the purpose of this call is to check to see if the slaves
# can replicate.
if enabled == 'false':
resp['warnings'].append("Replication is disabled on master.")
success = False
if slave['isPollingDisabled'] == 'true':
success = False
resp['warning'].append("Polling is disabled")
# update the return
ret = _update_return_dict(ret, success, data,
resp['errors'], resp['warnings'])
return (ret, success)
if _get_none_or_value(core_name) is None and _check_for_cores():
for name in __opts__['solr.cores']:
response = _replication_request('details', host=host,
core_name=name)
ret, success = _checks(ret, success, response, name)
else:
response = _replication_request('details', host=host,
core_name=core_name)
ret, success = _checks(ret, success, response, core_name)
return ret | ['def', 'is_replication_enabled', '(', 'host', '=', 'None', ',', 'core_name', '=', 'None', ')', ':', 'ret', '=', '_get_return_dict', '(', ')', 'success', '=', 'True', "# since only slaves can call this let's check the config:", 'if', '_is_master', '(', ')', 'and', 'host', 'is', 'None', ':', 'errors', '=', '[', '\'Only "slave" minions can run "is_replication_enabled"\'', ']', 'return', 'ret', '.', 'update', '(', '{', "'success'", ':', 'False', ',', "'errors'", ':', 'errors', '}', ')', "# define a convenience method so we don't duplicate code", 'def', '_checks', '(', 'ret', ',', 'success', ',', 'resp', ',', 'core', ')', ':', 'if', 'response', '[', "'success'", ']', ':', 'slave', '=', 'resp', '[', "'data'", ']', '[', "'details'", ']', '[', "'slave'", ']', '# we need to initialize this to false in case there is an error', "# on the master and we can't get this info.", 'enabled', '=', "'false'", 'master_url', '=', 'slave', '[', "'masterUrl'", ']', '# check for errors on the slave', 'if', "'ERROR'", 'in', 'slave', ':', 'success', '=', 'False', 'err', '=', '"{0}: {1} - {2}"', '.', 'format', '(', 'core', ',', 'slave', '[', "'ERROR'", ']', ',', 'master_url', ')', 'resp', '[', "'errors'", ']', '.', 'append', '(', 'err', ')', '# if there is an error return everything', 'data', '=', 'slave', 'if', 'core', 'is', 'None', 'else', '{', 'core', ':', '{', "'data'", ':', 'slave', '}', '}', 'else', ':', 'enabled', '=', 'slave', '[', "'masterDetails'", ']', '[', "'master'", ']', '[', "'replicationEnabled'", ']', '# if replication is turned off on the master, or polling is', '# disabled we need to return false. These may not be errors,', '# but the purpose of this call is to check to see if the slaves', '# can replicate.', 'if', 'enabled', '==', "'false'", ':', 'resp', '[', "'warnings'", ']', '.', 'append', '(', '"Replication is disabled on master."', ')', 'success', '=', 'False', 'if', 'slave', '[', "'isPollingDisabled'", ']', '==', "'true'", ':', 'success', '=', 'False', 'resp', '[', "'warning'", ']', '.', 'append', '(', '"Polling is disabled"', ')', '# update the return', 'ret', '=', '_update_return_dict', '(', 'ret', ',', 'success', ',', 'data', ',', 'resp', '[', "'errors'", ']', ',', 'resp', '[', "'warnings'", ']', ')', 'return', '(', 'ret', ',', 'success', ')', 'if', '_get_none_or_value', '(', 'core_name', ')', 'is', 'None', 'and', '_check_for_cores', '(', ')', ':', 'for', 'name', 'in', '__opts__', '[', "'solr.cores'", ']', ':', 'response', '=', '_replication_request', '(', "'details'", ',', 'host', '=', 'host', ',', 'core_name', '=', 'name', ')', 'ret', ',', 'success', '=', '_checks', '(', 'ret', ',', 'success', ',', 'response', ',', 'name', ')', 'else', ':', 'response', '=', '_replication_request', '(', "'details'", ',', 'host', '=', 'host', ',', 'core_name', '=', 'core_name', ')', 'ret', ',', 'success', '=', '_checks', '(', 'ret', ',', 'success', ',', 'response', ',', 'core_name', ')', 'return', 'ret'] | SLAVE CALL
Check for errors, and determine if a slave is replicating or not.
host : str (None)
The solr host to query. __opts__['host'] is default.
core_name : str (None)
The name of the solr core if using cores. Leave this blank if you are
not using cores or if you want to check all cores.
Return : dict<str,obj>::
{'success':boolean, 'data':dict, 'errors':list, 'warnings':list}
CLI Example:
.. code-block:: bash
salt '*' solr.is_replication_enabled music | ['SLAVE', 'CALL', 'Check', 'for', 'errors', 'and', 'determine', 'if', 'a', 'slave', 'is', 'replicating', 'or', 'not', '.'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/solr.py#L637-L708 |
852 | dalloriam/engel | engel/libraries/bootstrap4/widgets/structure.py | ImageCard.build | def build(self, title, text, img_url):
"""
:param title: Title of the card
:param text: Description of the card
:param img_url: Image of the card
"""
super(ImageCard, self).build()
self.title = Title(id=self.id + "-title", text=title, classname="card-title", size=3, parent=self)
self.block = Panel(id=self.id + "-block", classname="card-block", parent=self)
self.image = Image(id=self.id + "-image", img_url=img_url, classname="card-image-top img-fluid", parent=self.block)
self.text = Paragraph(id=self.id + "-text", text=text, classname="card-text", parent=self.block) | python | def build(self, title, text, img_url):
"""
:param title: Title of the card
:param text: Description of the card
:param img_url: Image of the card
"""
super(ImageCard, self).build()
self.title = Title(id=self.id + "-title", text=title, classname="card-title", size=3, parent=self)
self.block = Panel(id=self.id + "-block", classname="card-block", parent=self)
self.image = Image(id=self.id + "-image", img_url=img_url, classname="card-image-top img-fluid", parent=self.block)
self.text = Paragraph(id=self.id + "-text", text=text, classname="card-text", parent=self.block) | ['def', 'build', '(', 'self', ',', 'title', ',', 'text', ',', 'img_url', ')', ':', 'super', '(', 'ImageCard', ',', 'self', ')', '.', 'build', '(', ')', 'self', '.', 'title', '=', 'Title', '(', 'id', '=', 'self', '.', 'id', '+', '"-title"', ',', 'text', '=', 'title', ',', 'classname', '=', '"card-title"', ',', 'size', '=', '3', ',', 'parent', '=', 'self', ')', 'self', '.', 'block', '=', 'Panel', '(', 'id', '=', 'self', '.', 'id', '+', '"-block"', ',', 'classname', '=', '"card-block"', ',', 'parent', '=', 'self', ')', 'self', '.', 'image', '=', 'Image', '(', 'id', '=', 'self', '.', 'id', '+', '"-image"', ',', 'img_url', '=', 'img_url', ',', 'classname', '=', '"card-image-top img-fluid"', ',', 'parent', '=', 'self', '.', 'block', ')', 'self', '.', 'text', '=', 'Paragraph', '(', 'id', '=', 'self', '.', 'id', '+', '"-text"', ',', 'text', '=', 'text', ',', 'classname', '=', '"card-text"', ',', 'parent', '=', 'self', '.', 'block', ')'] | :param title: Title of the card
:param text: Description of the card
:param img_url: Image of the card | [':', 'param', 'title', ':', 'Title', 'of', 'the', 'card', ':', 'param', 'text', ':', 'Description', 'of', 'the', 'card', ':', 'param', 'img_url', ':', 'Image', 'of', 'the', 'card'] | train | https://github.com/dalloriam/engel/blob/f3477cd546e885bc53e755b3eb1452ce43ef5697/engel/libraries/bootstrap4/widgets/structure.py#L32-L44 |
853 | bububa/pyTOP | pyTOP/wangwang.py | EService.evals_get | def evals_get(self, service_staff_id, start_date, end_date, session):
'''taobao.wangwang.eservice.evals.get 获取评价详细
根据用户id查询用户对应的评价详细情况, 主账号id可以查询店铺内子账号的评价 组管理员可以查询组内账号的评价 非管理员的子账号可以查自己的评价'''
request = TOPRequest('taobao.wangwang.eservice.evals.get')
request['service_staff_id'] = service_staff_id
request['start_date'] = start_date
request['end_date'] = end_date
self.create(self.execute(request, session))
return self.staff_eval_details | python | def evals_get(self, service_staff_id, start_date, end_date, session):
'''taobao.wangwang.eservice.evals.get 获取评价详细
根据用户id查询用户对应的评价详细情况, 主账号id可以查询店铺内子账号的评价 组管理员可以查询组内账号的评价 非管理员的子账号可以查自己的评价'''
request = TOPRequest('taobao.wangwang.eservice.evals.get')
request['service_staff_id'] = service_staff_id
request['start_date'] = start_date
request['end_date'] = end_date
self.create(self.execute(request, session))
return self.staff_eval_details | ['def', 'evals_get', '(', 'self', ',', 'service_staff_id', ',', 'start_date', ',', 'end_date', ',', 'session', ')', ':', 'request', '=', 'TOPRequest', '(', "'taobao.wangwang.eservice.evals.get'", ')', 'request', '[', "'service_staff_id'", ']', '=', 'service_staff_id', 'request', '[', "'start_date'", ']', '=', 'start_date', 'request', '[', "'end_date'", ']', '=', 'end_date', 'self', '.', 'create', '(', 'self', '.', 'execute', '(', 'request', ',', 'session', ')', ')', 'return', 'self', '.', 'staff_eval_details'] | taobao.wangwang.eservice.evals.get 获取评价详细
根据用户id查询用户对应的评价详细情况, 主账号id可以查询店铺内子账号的评价 组管理员可以查询组内账号的评价 非管理员的子账号可以查自己的评价 | ['taobao', '.', 'wangwang', '.', 'eservice', '.', 'evals', '.', 'get', '获取评价详细', '根据用户id查询用户对应的评价详细情况,', '主账号id可以查询店铺内子账号的评价', '组管理员可以查询组内账号的评价', '非管理员的子账号可以查自己的评价'] | train | https://github.com/bububa/pyTOP/blob/1e48009bcfe886be392628244b370e6374e1f2b2/pyTOP/wangwang.py#L83-L92 |
854 | pyamg/pyamg | pyamg/classical/cr.py | _CRsweep | def _CRsweep(A, B, Findex, Cindex, nu, thetacr, method):
"""Perform CR sweeps on a target vector.
Internal function called by CR. Performs habituated or concurrent
relaxation sweeps on target vector. Stops when either (i) very fast
convergence, CF < 0.1*thetacr, are observed, or at least a given number
of sweeps have been performed and the relative change in CF < 0.1.
Parameters
----------
A : csr_matrix
B : array like
Target near null space mode
Findex : array like
List of F indices in current splitting
Cindex : array like
List of C indices in current splitting
nu : int
minimum number of relaxation sweeps to do
thetacr
Desired convergence factor
Returns
-------
rho : float
Convergence factor of last iteration
e : array like
Smoothed error vector
"""
n = A.shape[0] # problem size
numax = nu
z = np.zeros((n,))
e = deepcopy(B[:, 0])
e[Cindex] = 0.0
enorm = norm(e)
rhok = 1
it = 0
while True:
if method == 'habituated':
gauss_seidel(A, e, z, iterations=1)
e[Cindex] = 0.0
elif method == 'concurrent':
gauss_seidel_indexed(A, e, z, indices=Findex, iterations=1)
else:
raise NotImplementedError('method not recognized: need habituated '
'or concurrent')
enorm_old = enorm
enorm = norm(e)
rhok_old = rhok
rhok = enorm / enorm_old
it += 1
# criteria 1 -- fast convergence
if rhok < 0.1 * thetacr:
break
# criteria 2 -- at least nu iters, small relative change in CF (<0.1)
elif ((abs(rhok - rhok_old) / rhok) < 0.1) and (it >= nu):
break
return rhok, e | python | def _CRsweep(A, B, Findex, Cindex, nu, thetacr, method):
"""Perform CR sweeps on a target vector.
Internal function called by CR. Performs habituated or concurrent
relaxation sweeps on target vector. Stops when either (i) very fast
convergence, CF < 0.1*thetacr, are observed, or at least a given number
of sweeps have been performed and the relative change in CF < 0.1.
Parameters
----------
A : csr_matrix
B : array like
Target near null space mode
Findex : array like
List of F indices in current splitting
Cindex : array like
List of C indices in current splitting
nu : int
minimum number of relaxation sweeps to do
thetacr
Desired convergence factor
Returns
-------
rho : float
Convergence factor of last iteration
e : array like
Smoothed error vector
"""
n = A.shape[0] # problem size
numax = nu
z = np.zeros((n,))
e = deepcopy(B[:, 0])
e[Cindex] = 0.0
enorm = norm(e)
rhok = 1
it = 0
while True:
if method == 'habituated':
gauss_seidel(A, e, z, iterations=1)
e[Cindex] = 0.0
elif method == 'concurrent':
gauss_seidel_indexed(A, e, z, indices=Findex, iterations=1)
else:
raise NotImplementedError('method not recognized: need habituated '
'or concurrent')
enorm_old = enorm
enorm = norm(e)
rhok_old = rhok
rhok = enorm / enorm_old
it += 1
# criteria 1 -- fast convergence
if rhok < 0.1 * thetacr:
break
# criteria 2 -- at least nu iters, small relative change in CF (<0.1)
elif ((abs(rhok - rhok_old) / rhok) < 0.1) and (it >= nu):
break
return rhok, e | ['def', '_CRsweep', '(', 'A', ',', 'B', ',', 'Findex', ',', 'Cindex', ',', 'nu', ',', 'thetacr', ',', 'method', ')', ':', 'n', '=', 'A', '.', 'shape', '[', '0', ']', '# problem size', 'numax', '=', 'nu', 'z', '=', 'np', '.', 'zeros', '(', '(', 'n', ',', ')', ')', 'e', '=', 'deepcopy', '(', 'B', '[', ':', ',', '0', ']', ')', 'e', '[', 'Cindex', ']', '=', '0.0', 'enorm', '=', 'norm', '(', 'e', ')', 'rhok', '=', '1', 'it', '=', '0', 'while', 'True', ':', 'if', 'method', '==', "'habituated'", ':', 'gauss_seidel', '(', 'A', ',', 'e', ',', 'z', ',', 'iterations', '=', '1', ')', 'e', '[', 'Cindex', ']', '=', '0.0', 'elif', 'method', '==', "'concurrent'", ':', 'gauss_seidel_indexed', '(', 'A', ',', 'e', ',', 'z', ',', 'indices', '=', 'Findex', ',', 'iterations', '=', '1', ')', 'else', ':', 'raise', 'NotImplementedError', '(', "'method not recognized: need habituated '", "'or concurrent'", ')', 'enorm_old', '=', 'enorm', 'enorm', '=', 'norm', '(', 'e', ')', 'rhok_old', '=', 'rhok', 'rhok', '=', 'enorm', '/', 'enorm_old', 'it', '+=', '1', '# criteria 1 -- fast convergence', 'if', 'rhok', '<', '0.1', '*', 'thetacr', ':', 'break', '# criteria 2 -- at least nu iters, small relative change in CF (<0.1)', 'elif', '(', '(', 'abs', '(', 'rhok', '-', 'rhok_old', ')', '/', 'rhok', ')', '<', '0.1', ')', 'and', '(', 'it', '>=', 'nu', ')', ':', 'break', 'return', 'rhok', ',', 'e'] | Perform CR sweeps on a target vector.
Internal function called by CR. Performs habituated or concurrent
relaxation sweeps on target vector. Stops when either (i) very fast
convergence, CF < 0.1*thetacr, are observed, or at least a given number
of sweeps have been performed and the relative change in CF < 0.1.
Parameters
----------
A : csr_matrix
B : array like
Target near null space mode
Findex : array like
List of F indices in current splitting
Cindex : array like
List of C indices in current splitting
nu : int
minimum number of relaxation sweeps to do
thetacr
Desired convergence factor
Returns
-------
rho : float
Convergence factor of last iteration
e : array like
Smoothed error vector | ['Perform', 'CR', 'sweeps', 'on', 'a', 'target', 'vector', '.'] | train | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/classical/cr.py#L16-L78 |
855 | shichao-an/115wangpan | u115/api.py | API._load_torrents_directory | def _load_torrents_directory(self):
"""
Load torrents directory
If it does not exist yet, this request will cause the system to create
one
"""
r = self._req_lixian_get_id(torrent=True)
self._downloads_directory = self._load_directory(r['cid']) | python | def _load_torrents_directory(self):
"""
Load torrents directory
If it does not exist yet, this request will cause the system to create
one
"""
r = self._req_lixian_get_id(torrent=True)
self._downloads_directory = self._load_directory(r['cid']) | ['def', '_load_torrents_directory', '(', 'self', ')', ':', 'r', '=', 'self', '.', '_req_lixian_get_id', '(', 'torrent', '=', 'True', ')', 'self', '.', '_downloads_directory', '=', 'self', '.', '_load_directory', '(', 'r', '[', "'cid'", ']', ')'] | Load torrents directory
If it does not exist yet, this request will cause the system to create
one | ['Load', 'torrents', 'directory'] | train | https://github.com/shichao-an/115wangpan/blob/e7cc935313f675e886bceca831fcffcdedf1e880/u115/api.py#L1023-L1031 |
856 | ets-labs/python-dependency-injector | examples/providers/dependency.py | UsersService.get_by_id | def get_by_id(self, id):
"""Return user info by user id."""
with contextlib.closing(self.database.cursor()) as cursor:
cursor.execute('SELECT id, name FROM users WHERE id=?', (id,))
return cursor.fetchone() | python | def get_by_id(self, id):
"""Return user info by user id."""
with contextlib.closing(self.database.cursor()) as cursor:
cursor.execute('SELECT id, name FROM users WHERE id=?', (id,))
return cursor.fetchone() | ['def', 'get_by_id', '(', 'self', ',', 'id', ')', ':', 'with', 'contextlib', '.', 'closing', '(', 'self', '.', 'database', '.', 'cursor', '(', ')', ')', 'as', 'cursor', ':', 'cursor', '.', 'execute', '(', "'SELECT id, name FROM users WHERE id=?'", ',', '(', 'id', ',', ')', ')', 'return', 'cursor', '.', 'fetchone', '(', ')'] | Return user info by user id. | ['Return', 'user', 'info', 'by', 'user', 'id', '.'] | train | https://github.com/ets-labs/python-dependency-injector/blob/d04fe41eb17f667da38b97525e2d16c8f2d272fe/examples/providers/dependency.py#L40-L44 |
857 | pandas-dev/pandas | pandas/core/arrays/categorical.py | _recode_for_categories | def _recode_for_categories(codes, old_categories, new_categories):
"""
Convert a set of codes for to a new set of categories
Parameters
----------
codes : array
old_categories, new_categories : Index
Returns
-------
new_codes : array
Examples
--------
>>> old_cat = pd.Index(['b', 'a', 'c'])
>>> new_cat = pd.Index(['a', 'b'])
>>> codes = np.array([0, 1, 1, 2])
>>> _recode_for_categories(codes, old_cat, new_cat)
array([ 1, 0, 0, -1])
"""
from pandas.core.algorithms import take_1d
if len(old_categories) == 0:
# All null anyway, so just retain the nulls
return codes.copy()
elif new_categories.equals(old_categories):
# Same categories, so no need to actually recode
return codes.copy()
indexer = coerce_indexer_dtype(new_categories.get_indexer(old_categories),
new_categories)
new_codes = take_1d(indexer, codes.copy(), fill_value=-1)
return new_codes | python | def _recode_for_categories(codes, old_categories, new_categories):
"""
Convert a set of codes for to a new set of categories
Parameters
----------
codes : array
old_categories, new_categories : Index
Returns
-------
new_codes : array
Examples
--------
>>> old_cat = pd.Index(['b', 'a', 'c'])
>>> new_cat = pd.Index(['a', 'b'])
>>> codes = np.array([0, 1, 1, 2])
>>> _recode_for_categories(codes, old_cat, new_cat)
array([ 1, 0, 0, -1])
"""
from pandas.core.algorithms import take_1d
if len(old_categories) == 0:
# All null anyway, so just retain the nulls
return codes.copy()
elif new_categories.equals(old_categories):
# Same categories, so no need to actually recode
return codes.copy()
indexer = coerce_indexer_dtype(new_categories.get_indexer(old_categories),
new_categories)
new_codes = take_1d(indexer, codes.copy(), fill_value=-1)
return new_codes | ['def', '_recode_for_categories', '(', 'codes', ',', 'old_categories', ',', 'new_categories', ')', ':', 'from', 'pandas', '.', 'core', '.', 'algorithms', 'import', 'take_1d', 'if', 'len', '(', 'old_categories', ')', '==', '0', ':', '# All null anyway, so just retain the nulls', 'return', 'codes', '.', 'copy', '(', ')', 'elif', 'new_categories', '.', 'equals', '(', 'old_categories', ')', ':', '# Same categories, so no need to actually recode', 'return', 'codes', '.', 'copy', '(', ')', 'indexer', '=', 'coerce_indexer_dtype', '(', 'new_categories', '.', 'get_indexer', '(', 'old_categories', ')', ',', 'new_categories', ')', 'new_codes', '=', 'take_1d', '(', 'indexer', ',', 'codes', '.', 'copy', '(', ')', ',', 'fill_value', '=', '-', '1', ')', 'return', 'new_codes'] | Convert a set of codes for to a new set of categories
Parameters
----------
codes : array
old_categories, new_categories : Index
Returns
-------
new_codes : array
Examples
--------
>>> old_cat = pd.Index(['b', 'a', 'c'])
>>> new_cat = pd.Index(['a', 'b'])
>>> codes = np.array([0, 1, 1, 2])
>>> _recode_for_categories(codes, old_cat, new_cat)
array([ 1, 0, 0, -1]) | ['Convert', 'a', 'set', 'of', 'codes', 'for', 'to', 'a', 'new', 'set', 'of', 'categories'] | train | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/categorical.py#L2585-L2617 |
858 | pypa/pipenv | pipenv/vendor/tomlkit/parser.py | Parser.inc_n | def inc_n(self, n, exception=None): # type: (int, Optional[ParseError]) -> bool
"""
Increments the parser by n characters
if the end of the input has not been reached.
"""
return self._src.inc_n(n=n, exception=exception) | python | def inc_n(self, n, exception=None): # type: (int, Optional[ParseError]) -> bool
"""
Increments the parser by n characters
if the end of the input has not been reached.
"""
return self._src.inc_n(n=n, exception=exception) | ['def', 'inc_n', '(', 'self', ',', 'n', ',', 'exception', '=', 'None', ')', ':', '# type: (int, Optional[ParseError]) -> bool', 'return', 'self', '.', '_src', '.', 'inc_n', '(', 'n', '=', 'n', ',', 'exception', '=', 'exception', ')'] | Increments the parser by n characters
if the end of the input has not been reached. | ['Increments', 'the', 'parser', 'by', 'n', 'characters', 'if', 'the', 'end', 'of', 'the', 'input', 'has', 'not', 'been', 'reached', '.'] | train | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/tomlkit/parser.py#L91-L96 |
859 | simonvh/genomepy | genomepy/provider.py | ProviderBase.register_provider | def register_provider(cls, provider):
"""Register method to keep list of providers."""
def decorator(subclass):
"""Register as decorator function."""
cls._providers[provider] = subclass
subclass.name = provider
return subclass
return decorator | python | def register_provider(cls, provider):
"""Register method to keep list of providers."""
def decorator(subclass):
"""Register as decorator function."""
cls._providers[provider] = subclass
subclass.name = provider
return subclass
return decorator | ['def', 'register_provider', '(', 'cls', ',', 'provider', ')', ':', 'def', 'decorator', '(', 'subclass', ')', ':', '"""Register as decorator function."""', 'cls', '.', '_providers', '[', 'provider', ']', '=', 'subclass', 'subclass', '.', 'name', '=', 'provider', 'return', 'subclass', 'return', 'decorator'] | Register method to keep list of providers. | ['Register', 'method', 'to', 'keep', 'list', 'of', 'providers', '.'] | train | https://github.com/simonvh/genomepy/blob/abace2366511dbe855fe1430b1f7d9ec4cbf6d29/genomepy/provider.py#L73-L80 |
860 | UCL-INGI/INGInious | inginious/frontend/template_helper.py | TemplateHelper.get_renderer | def get_renderer(self, with_layout=True):
""" Get the default renderer """
if with_layout and self.is_lti():
return self._default_renderer_lti
elif with_layout:
return self._default_renderer
else:
return self._default_renderer_nolayout | python | def get_renderer(self, with_layout=True):
""" Get the default renderer """
if with_layout and self.is_lti():
return self._default_renderer_lti
elif with_layout:
return self._default_renderer
else:
return self._default_renderer_nolayout | ['def', 'get_renderer', '(', 'self', ',', 'with_layout', '=', 'True', ')', ':', 'if', 'with_layout', 'and', 'self', '.', 'is_lti', '(', ')', ':', 'return', 'self', '.', '_default_renderer_lti', 'elif', 'with_layout', ':', 'return', 'self', '.', '_default_renderer', 'else', ':', 'return', 'self', '.', '_default_renderer_nolayout'] | Get the default renderer | ['Get', 'the', 'default', 'renderer'] | train | https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/frontend/template_helper.py#L63-L70 |
861 | singularityhub/singularity-cli | spython/main/parse/recipe.py | Recipe.parse | def parse(self):
'''parse is the base function for parsing the recipe, whether it be
a Dockerfile or Singularity recipe. The recipe is read in as lines,
and saved to a list if needed for the future. If the client has
it, the recipe type specific _parse function is called.
Instructions for making a client subparser:
It should have a main function _parse that parses a list of lines
from some recipe text file into the appropriate sections, e.g.,
self.fromHeader
self.environ
self.labels
self.install
self.files
self.test
self.entrypoint
'''
self.cmd = None
self.comments = []
self.entrypoint = None
self.environ = []
self.files = []
self.install = []
self.labels = []
self.ports = []
self.test = None
self.volumes = []
if self.recipe:
# Read in the raw lines of the file
self.lines = read_file(self.recipe)
# If properly instantiated by Docker or Singularity Recipe, parse
if hasattr(self, '_parse'):
self._parse() | python | def parse(self):
'''parse is the base function for parsing the recipe, whether it be
a Dockerfile or Singularity recipe. The recipe is read in as lines,
and saved to a list if needed for the future. If the client has
it, the recipe type specific _parse function is called.
Instructions for making a client subparser:
It should have a main function _parse that parses a list of lines
from some recipe text file into the appropriate sections, e.g.,
self.fromHeader
self.environ
self.labels
self.install
self.files
self.test
self.entrypoint
'''
self.cmd = None
self.comments = []
self.entrypoint = None
self.environ = []
self.files = []
self.install = []
self.labels = []
self.ports = []
self.test = None
self.volumes = []
if self.recipe:
# Read in the raw lines of the file
self.lines = read_file(self.recipe)
# If properly instantiated by Docker or Singularity Recipe, parse
if hasattr(self, '_parse'):
self._parse() | ['def', 'parse', '(', 'self', ')', ':', 'self', '.', 'cmd', '=', 'None', 'self', '.', 'comments', '=', '[', ']', 'self', '.', 'entrypoint', '=', 'None', 'self', '.', 'environ', '=', '[', ']', 'self', '.', 'files', '=', '[', ']', 'self', '.', 'install', '=', '[', ']', 'self', '.', 'labels', '=', '[', ']', 'self', '.', 'ports', '=', '[', ']', 'self', '.', 'test', '=', 'None', 'self', '.', 'volumes', '=', '[', ']', 'if', 'self', '.', 'recipe', ':', '# Read in the raw lines of the file', 'self', '.', 'lines', '=', 'read_file', '(', 'self', '.', 'recipe', ')', '# If properly instantiated by Docker or Singularity Recipe, parse', 'if', 'hasattr', '(', 'self', ',', "'_parse'", ')', ':', 'self', '.', '_parse', '(', ')'] | parse is the base function for parsing the recipe, whether it be
a Dockerfile or Singularity recipe. The recipe is read in as lines,
and saved to a list if needed for the future. If the client has
it, the recipe type specific _parse function is called.
Instructions for making a client subparser:
It should have a main function _parse that parses a list of lines
from some recipe text file into the appropriate sections, e.g.,
self.fromHeader
self.environ
self.labels
self.install
self.files
self.test
self.entrypoint | ['parse', 'is', 'the', 'base', 'function', 'for', 'parsing', 'the', 'recipe', 'whether', 'it', 'be', 'a', 'Dockerfile', 'or', 'Singularity', 'recipe', '.', 'The', 'recipe', 'is', 'read', 'in', 'as', 'lines', 'and', 'saved', 'to', 'a', 'list', 'if', 'needed', 'for', 'the', 'future', '.', 'If', 'the', 'client', 'has', 'it', 'the', 'recipe', 'type', 'specific', '_parse', 'function', 'is', 'called', '.'] | train | https://github.com/singularityhub/singularity-cli/blob/cb36b4504812ca87e29c6a40b222a545d1865799/spython/main/parse/recipe.py#L89-L128 |
862 | lago-project/lago | lago/guestfs_tools.py | guestfs_conn_mount_ro | def guestfs_conn_mount_ro(disk_path, disk_root, retries=5, wait=1):
"""
Open a GuestFS handle with `disk_path` and try mounting the root
filesystem. `disk_root` is a hint where it should be looked and will
only be used if GuestFS will not be able to deduce it independently.
Note that mounting a live guest, can lead to filesystem inconsistencies,
causing the mount operation to fail. As we use readonly mode, this is
safe, but the operation itself can still fail. Therefore, this method
will watch for mount failures and retry 5 times before throwing
an exception.
Args:
disk_path(str): Path to the disk.
disk_root(str): Hint what is the root device with the OS filesystem.
retries(int): Number of retries for :func:`~guestfs.GuestFS.mount_ro`
operation. Note that on each retry a new GuestFS handle will
be used.
wait(int): Time to wait between retries.
Yields:
guestfs.GuestFS: An open GuestFS handle.
Raises:
:exc:`GuestFSError`: On any guestfs operation error, including
exceeding retries for the :func:`~guestfs.GuestFS.mount_ro`
operation.
"""
for attempt in range(retries):
with guestfs_conn_ro(disk_path) as conn:
rootfs = find_rootfs(conn, disk_root)
try:
conn.mount_ro(rootfs, '/')
except RuntimeError as err:
LOGGER.debug(err)
if attempt < retries - 1:
LOGGER.debug(
(
'failed mounting %s:%s using guestfs, '
'attempt %s/%s'
), disk_path, rootfs, attempt + 1, retries
)
time.sleep(wait)
continue
else:
raise GuestFSError(
'failed mounting {0}:{1} using guestfs'.format(
disk_path, rootfs
)
)
yield conn
try:
conn.umount(rootfs)
except RuntimeError as err:
LOGGER.debug(err)
raise GuestFSError(
('failed unmounting {0}:{1} using'
'guestfs').format(disk_path, rootfs)
)
break | python | def guestfs_conn_mount_ro(disk_path, disk_root, retries=5, wait=1):
"""
Open a GuestFS handle with `disk_path` and try mounting the root
filesystem. `disk_root` is a hint where it should be looked and will
only be used if GuestFS will not be able to deduce it independently.
Note that mounting a live guest, can lead to filesystem inconsistencies,
causing the mount operation to fail. As we use readonly mode, this is
safe, but the operation itself can still fail. Therefore, this method
will watch for mount failures and retry 5 times before throwing
an exception.
Args:
disk_path(str): Path to the disk.
disk_root(str): Hint what is the root device with the OS filesystem.
retries(int): Number of retries for :func:`~guestfs.GuestFS.mount_ro`
operation. Note that on each retry a new GuestFS handle will
be used.
wait(int): Time to wait between retries.
Yields:
guestfs.GuestFS: An open GuestFS handle.
Raises:
:exc:`GuestFSError`: On any guestfs operation error, including
exceeding retries for the :func:`~guestfs.GuestFS.mount_ro`
operation.
"""
for attempt in range(retries):
with guestfs_conn_ro(disk_path) as conn:
rootfs = find_rootfs(conn, disk_root)
try:
conn.mount_ro(rootfs, '/')
except RuntimeError as err:
LOGGER.debug(err)
if attempt < retries - 1:
LOGGER.debug(
(
'failed mounting %s:%s using guestfs, '
'attempt %s/%s'
), disk_path, rootfs, attempt + 1, retries
)
time.sleep(wait)
continue
else:
raise GuestFSError(
'failed mounting {0}:{1} using guestfs'.format(
disk_path, rootfs
)
)
yield conn
try:
conn.umount(rootfs)
except RuntimeError as err:
LOGGER.debug(err)
raise GuestFSError(
('failed unmounting {0}:{1} using'
'guestfs').format(disk_path, rootfs)
)
break | ['def', 'guestfs_conn_mount_ro', '(', 'disk_path', ',', 'disk_root', ',', 'retries', '=', '5', ',', 'wait', '=', '1', ')', ':', 'for', 'attempt', 'in', 'range', '(', 'retries', ')', ':', 'with', 'guestfs_conn_ro', '(', 'disk_path', ')', 'as', 'conn', ':', 'rootfs', '=', 'find_rootfs', '(', 'conn', ',', 'disk_root', ')', 'try', ':', 'conn', '.', 'mount_ro', '(', 'rootfs', ',', "'/'", ')', 'except', 'RuntimeError', 'as', 'err', ':', 'LOGGER', '.', 'debug', '(', 'err', ')', 'if', 'attempt', '<', 'retries', '-', '1', ':', 'LOGGER', '.', 'debug', '(', '(', "'failed mounting %s:%s using guestfs, '", "'attempt %s/%s'", ')', ',', 'disk_path', ',', 'rootfs', ',', 'attempt', '+', '1', ',', 'retries', ')', 'time', '.', 'sleep', '(', 'wait', ')', 'continue', 'else', ':', 'raise', 'GuestFSError', '(', "'failed mounting {0}:{1} using guestfs'", '.', 'format', '(', 'disk_path', ',', 'rootfs', ')', ')', 'yield', 'conn', 'try', ':', 'conn', '.', 'umount', '(', 'rootfs', ')', 'except', 'RuntimeError', 'as', 'err', ':', 'LOGGER', '.', 'debug', '(', 'err', ')', 'raise', 'GuestFSError', '(', '(', "'failed unmounting {0}:{1} using'", "'guestfs'", ')', '.', 'format', '(', 'disk_path', ',', 'rootfs', ')', ')', 'break'] | Open a GuestFS handle with `disk_path` and try mounting the root
filesystem. `disk_root` is a hint where it should be looked and will
only be used if GuestFS will not be able to deduce it independently.
Note that mounting a live guest, can lead to filesystem inconsistencies,
causing the mount operation to fail. As we use readonly mode, this is
safe, but the operation itself can still fail. Therefore, this method
will watch for mount failures and retry 5 times before throwing
an exception.
Args:
disk_path(str): Path to the disk.
disk_root(str): Hint what is the root device with the OS filesystem.
retries(int): Number of retries for :func:`~guestfs.GuestFS.mount_ro`
operation. Note that on each retry a new GuestFS handle will
be used.
wait(int): Time to wait between retries.
Yields:
guestfs.GuestFS: An open GuestFS handle.
Raises:
:exc:`GuestFSError`: On any guestfs operation error, including
exceeding retries for the :func:`~guestfs.GuestFS.mount_ro`
operation. | ['Open', 'a', 'GuestFS', 'handle', 'with', 'disk_path', 'and', 'try', 'mounting', 'the', 'root', 'filesystem', '.', 'disk_root', 'is', 'a', 'hint', 'where', 'it', 'should', 'be', 'looked', 'and', 'will', 'only', 'be', 'used', 'if', 'GuestFS', 'will', 'not', 'be', 'able', 'to', 'deduce', 'it', 'independently', '.'] | train | https://github.com/lago-project/lago/blob/5b8970f7687e063e4619066d5b8093ca997678c9/lago/guestfs_tools.py#L71-L133 |
863 | yograterol/zoort | zoort.py | compress_folder_dump | def compress_folder_dump(path, target):
'''
Compress folder dump to tar.gz file
'''
import tarfile
if not path or not os.path.isdir(path):
raise SystemExit(_error_codes.get(105))
name_out_file = (target + 'dump-' +
datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S'))
tar = tarfile.open(name_out_file + '.tar.gz', 'w:gz')
tar.add(path, arcname='dump')
tar.close()
return (name_out_file, name_out_file + '.tar.gz') | python | def compress_folder_dump(path, target):
'''
Compress folder dump to tar.gz file
'''
import tarfile
if not path or not os.path.isdir(path):
raise SystemExit(_error_codes.get(105))
name_out_file = (target + 'dump-' +
datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S'))
tar = tarfile.open(name_out_file + '.tar.gz', 'w:gz')
tar.add(path, arcname='dump')
tar.close()
return (name_out_file, name_out_file + '.tar.gz') | ['def', 'compress_folder_dump', '(', 'path', ',', 'target', ')', ':', 'import', 'tarfile', 'if', 'not', 'path', 'or', 'not', 'os', '.', 'path', '.', 'isdir', '(', 'path', ')', ':', 'raise', 'SystemExit', '(', '_error_codes', '.', 'get', '(', '105', ')', ')', 'name_out_file', '=', '(', 'target', '+', "'dump-'", '+', 'datetime', '.', 'datetime', '.', 'now', '(', ')', '.', 'strftime', '(', "'%Y-%m-%d-%H-%M-%S'", ')', ')', 'tar', '=', 'tarfile', '.', 'open', '(', 'name_out_file', '+', "'.tar.gz'", ',', "'w:gz'", ')', 'tar', '.', 'add', '(', 'path', ',', 'arcname', '=', "'dump'", ')', 'tar', '.', 'close', '(', ')', 'return', '(', 'name_out_file', ',', 'name_out_file', '+', "'.tar.gz'", ')'] | Compress folder dump to tar.gz file | ['Compress', 'folder', 'dump', 'to', 'tar', '.', 'gz', 'file'] | train | https://github.com/yograterol/zoort/blob/ed6669ab945007c20a83f6d468856c4eb585c752/zoort.py#L665-L677 |
864 | carljm/django-adminfiles | adminfiles/flickr.py | Photo.setTags | def setTags(self, tags):
"""Set the tags for current photo to list tags.
(flickr.photos.settags)
"""
method = 'flickr.photos.setTags'
tags = uniq(tags)
_dopost(method, auth=True, photo_id=self.id, tags=tags)
self._load_properties() | python | def setTags(self, tags):
"""Set the tags for current photo to list tags.
(flickr.photos.settags)
"""
method = 'flickr.photos.setTags'
tags = uniq(tags)
_dopost(method, auth=True, photo_id=self.id, tags=tags)
self._load_properties() | ['def', 'setTags', '(', 'self', ',', 'tags', ')', ':', 'method', '=', "'flickr.photos.setTags'", 'tags', '=', 'uniq', '(', 'tags', ')', '_dopost', '(', 'method', ',', 'auth', '=', 'True', ',', 'photo_id', '=', 'self', '.', 'id', ',', 'tags', '=', 'tags', ')', 'self', '.', '_load_properties', '(', ')'] | Set the tags for current photo to list tags.
(flickr.photos.settags) | ['Set', 'the', 'tags', 'for', 'current', 'photo', 'to', 'list', 'tags', '.', '(', 'flickr', '.', 'photos', '.', 'settags', ')'] | train | https://github.com/carljm/django-adminfiles/blob/b01dc7be266305d575c11d5ff9a37ccac04a78c2/adminfiles/flickr.py#L171-L178 |
865 | sentinel-hub/eo-learn | mask/eolearn/mask/cloud_mask.py | AddCloudMaskTask._make_request | def _make_request(self, bbox, meta_info, timestamps):
""" Make OGC request to create input for cloud detector classifier
:param bbox: Bounding box
:param meta_info: Meta-info dictionary of input eopatch
:return: Requested data
"""
service_type = ServiceType(meta_info['service_type'])
# Raise error if resolutions are not specified
if self.cm_size_x is None and self.cm_size_y is None:
raise ValueError("Specify size_x and size_y for data request")
# If WCS request, make sure both resolutions are set
if service_type == ServiceType.WCS:
if self.cm_size_y is None:
self.cm_size_y = self.cm_size_x
elif self.cm_size_x is None:
self.cm_size_x = self.cm_size_y
custom_url_params = {CustomUrlParam.SHOWLOGO: False,
CustomUrlParam.TRANSPARENT: False,
CustomUrlParam.EVALSCRIPT: self.model_evalscript}
request = {ServiceType.WMS: self._get_wms_request,
ServiceType.WCS: self._get_wcs_request}[service_type](bbox,
meta_info['time_interval'],
self.cm_size_x,
self.cm_size_y,
meta_info['maxcc'],
meta_info['time_difference'],
custom_url_params)
request_dates = request.get_dates()
download_frames = get_common_timestamps(request_dates, timestamps)
request_return = request.get_data(raise_download_errors=False, data_filter=download_frames)
bad_data = [idx for idx, value in enumerate(request_return) if value is None]
for idx in reversed(sorted(bad_data)):
LOGGER.warning('Data from %s could not be downloaded for %s!', str(request_dates[idx]), self.data_feature)
del request_return[idx]
del request_dates[idx]
return np.asarray(request_return), request_dates | python | def _make_request(self, bbox, meta_info, timestamps):
""" Make OGC request to create input for cloud detector classifier
:param bbox: Bounding box
:param meta_info: Meta-info dictionary of input eopatch
:return: Requested data
"""
service_type = ServiceType(meta_info['service_type'])
# Raise error if resolutions are not specified
if self.cm_size_x is None and self.cm_size_y is None:
raise ValueError("Specify size_x and size_y for data request")
# If WCS request, make sure both resolutions are set
if service_type == ServiceType.WCS:
if self.cm_size_y is None:
self.cm_size_y = self.cm_size_x
elif self.cm_size_x is None:
self.cm_size_x = self.cm_size_y
custom_url_params = {CustomUrlParam.SHOWLOGO: False,
CustomUrlParam.TRANSPARENT: False,
CustomUrlParam.EVALSCRIPT: self.model_evalscript}
request = {ServiceType.WMS: self._get_wms_request,
ServiceType.WCS: self._get_wcs_request}[service_type](bbox,
meta_info['time_interval'],
self.cm_size_x,
self.cm_size_y,
meta_info['maxcc'],
meta_info['time_difference'],
custom_url_params)
request_dates = request.get_dates()
download_frames = get_common_timestamps(request_dates, timestamps)
request_return = request.get_data(raise_download_errors=False, data_filter=download_frames)
bad_data = [idx for idx, value in enumerate(request_return) if value is None]
for idx in reversed(sorted(bad_data)):
LOGGER.warning('Data from %s could not be downloaded for %s!', str(request_dates[idx]), self.data_feature)
del request_return[idx]
del request_dates[idx]
return np.asarray(request_return), request_dates | ['def', '_make_request', '(', 'self', ',', 'bbox', ',', 'meta_info', ',', 'timestamps', ')', ':', 'service_type', '=', 'ServiceType', '(', 'meta_info', '[', "'service_type'", ']', ')', '# Raise error if resolutions are not specified', 'if', 'self', '.', 'cm_size_x', 'is', 'None', 'and', 'self', '.', 'cm_size_y', 'is', 'None', ':', 'raise', 'ValueError', '(', '"Specify size_x and size_y for data request"', ')', '# If WCS request, make sure both resolutions are set', 'if', 'service_type', '==', 'ServiceType', '.', 'WCS', ':', 'if', 'self', '.', 'cm_size_y', 'is', 'None', ':', 'self', '.', 'cm_size_y', '=', 'self', '.', 'cm_size_x', 'elif', 'self', '.', 'cm_size_x', 'is', 'None', ':', 'self', '.', 'cm_size_x', '=', 'self', '.', 'cm_size_y', 'custom_url_params', '=', '{', 'CustomUrlParam', '.', 'SHOWLOGO', ':', 'False', ',', 'CustomUrlParam', '.', 'TRANSPARENT', ':', 'False', ',', 'CustomUrlParam', '.', 'EVALSCRIPT', ':', 'self', '.', 'model_evalscript', '}', 'request', '=', '{', 'ServiceType', '.', 'WMS', ':', 'self', '.', '_get_wms_request', ',', 'ServiceType', '.', 'WCS', ':', 'self', '.', '_get_wcs_request', '}', '[', 'service_type', ']', '(', 'bbox', ',', 'meta_info', '[', "'time_interval'", ']', ',', 'self', '.', 'cm_size_x', ',', 'self', '.', 'cm_size_y', ',', 'meta_info', '[', "'maxcc'", ']', ',', 'meta_info', '[', "'time_difference'", ']', ',', 'custom_url_params', ')', 'request_dates', '=', 'request', '.', 'get_dates', '(', ')', 'download_frames', '=', 'get_common_timestamps', '(', 'request_dates', ',', 'timestamps', ')', 'request_return', '=', 'request', '.', 'get_data', '(', 'raise_download_errors', '=', 'False', ',', 'data_filter', '=', 'download_frames', ')', 'bad_data', '=', '[', 'idx', 'for', 'idx', ',', 'value', 'in', 'enumerate', '(', 'request_return', ')', 'if', 'value', 'is', 'None', ']', 'for', 'idx', 'in', 'reversed', '(', 'sorted', '(', 'bad_data', ')', ')', ':', 'LOGGER', '.', 'warning', '(', "'Data from %s could not be downloaded for %s!'", ',', 'str', '(', 'request_dates', '[', 'idx', ']', ')', ',', 'self', '.', 'data_feature', ')', 'del', 'request_return', '[', 'idx', ']', 'del', 'request_dates', '[', 'idx', ']', 'return', 'np', '.', 'asarray', '(', 'request_return', ')', ',', 'request_dates'] | Make OGC request to create input for cloud detector classifier
:param bbox: Bounding box
:param meta_info: Meta-info dictionary of input eopatch
:return: Requested data | ['Make', 'OGC', 'request', 'to', 'create', 'input', 'for', 'cloud', 'detector', 'classifier'] | train | https://github.com/sentinel-hub/eo-learn/blob/b8c390b9f553c561612fe9eb64e720611633a035/mask/eolearn/mask/cloud_mask.py#L201-L244 |
866 | xolox/python-qpass | qpass/__init__.py | AbstractPasswordStore.select_entry | def select_entry(self, *arguments):
"""
Select a password from the available choices.
:param arguments: Refer to :func:`smart_search()`.
:returns: The name of a password (a string) or :data:`None`
(when no password matched the given `arguments`).
"""
matches = self.smart_search(*arguments)
if len(matches) > 1:
logger.info("More than one match, prompting for choice ..")
labels = [entry.name for entry in matches]
return matches[labels.index(prompt_for_choice(labels))]
else:
logger.info("Matched one entry: %s", matches[0].name)
return matches[0] | python | def select_entry(self, *arguments):
"""
Select a password from the available choices.
:param arguments: Refer to :func:`smart_search()`.
:returns: The name of a password (a string) or :data:`None`
(when no password matched the given `arguments`).
"""
matches = self.smart_search(*arguments)
if len(matches) > 1:
logger.info("More than one match, prompting for choice ..")
labels = [entry.name for entry in matches]
return matches[labels.index(prompt_for_choice(labels))]
else:
logger.info("Matched one entry: %s", matches[0].name)
return matches[0] | ['def', 'select_entry', '(', 'self', ',', '*', 'arguments', ')', ':', 'matches', '=', 'self', '.', 'smart_search', '(', '*', 'arguments', ')', 'if', 'len', '(', 'matches', ')', '>', '1', ':', 'logger', '.', 'info', '(', '"More than one match, prompting for choice .."', ')', 'labels', '=', '[', 'entry', '.', 'name', 'for', 'entry', 'in', 'matches', ']', 'return', 'matches', '[', 'labels', '.', 'index', '(', 'prompt_for_choice', '(', 'labels', ')', ')', ']', 'else', ':', 'logger', '.', 'info', '(', '"Matched one entry: %s"', ',', 'matches', '[', '0', ']', '.', 'name', ')', 'return', 'matches', '[', '0', ']'] | Select a password from the available choices.
:param arguments: Refer to :func:`smart_search()`.
:returns: The name of a password (a string) or :data:`None`
(when no password matched the given `arguments`). | ['Select', 'a', 'password', 'from', 'the', 'available', 'choices', '.'] | train | https://github.com/xolox/python-qpass/blob/43ce447b0904ff42a54b8f1dd4d2479f950f258f/qpass/__init__.py#L132-L147 |
867 | LIVVkit/LIVVkit | livvkit/components/performance.py | generate_scaling_plot | def generate_scaling_plot(timing_data, title, ylabel, description, plot_file):
"""
Generate a scaling plot.
Args:
timing_data: data returned from a `*_scaling` method
title: the title of the plot
ylabel: the y-axis label of the plot
description: a description of the plot
plot_file: the file to write out to
Returns:
an image element containing the plot file and metadata
"""
proc_counts = timing_data['proc_counts']
if len(proc_counts) > 2:
plt.figure(figsize=(10, 8), dpi=150)
plt.title(title)
plt.xlabel("Number of processors")
plt.ylabel(ylabel)
for case, case_color in zip(['bench', 'model'], ['#91bfdb', '#fc8d59']):
case_data = timing_data[case]
means = case_data['means']
mins = case_data['mins']
maxs = case_data['maxs']
plt.fill_between(proc_counts, mins, maxs, facecolor=case_color, alpha=0.5)
plt.plot(proc_counts, means, 'o-', color=case_color, label=case)
plt.legend(loc='best')
else:
plt.figure(figsize=(5, 3))
plt.axis('off')
plt.text(0.4, 0.8, "ERROR:")
plt.text(0.0, 0.6, "Not enough data points to draw scaling plot")
plt.text(0.0, 0.44, "To generate this data rerun BATS with the")
plt.text(0.0, 0.36, "performance option enabled.")
if livvkit.publish:
plt.savefig(os.path.splitext(plot_file)[0]+'.eps', dpi=600)
plt.savefig(plot_file)
plt.close()
return elements.image(title, description, os.path.basename(plot_file)) | python | def generate_scaling_plot(timing_data, title, ylabel, description, plot_file):
"""
Generate a scaling plot.
Args:
timing_data: data returned from a `*_scaling` method
title: the title of the plot
ylabel: the y-axis label of the plot
description: a description of the plot
plot_file: the file to write out to
Returns:
an image element containing the plot file and metadata
"""
proc_counts = timing_data['proc_counts']
if len(proc_counts) > 2:
plt.figure(figsize=(10, 8), dpi=150)
plt.title(title)
plt.xlabel("Number of processors")
plt.ylabel(ylabel)
for case, case_color in zip(['bench', 'model'], ['#91bfdb', '#fc8d59']):
case_data = timing_data[case]
means = case_data['means']
mins = case_data['mins']
maxs = case_data['maxs']
plt.fill_between(proc_counts, mins, maxs, facecolor=case_color, alpha=0.5)
plt.plot(proc_counts, means, 'o-', color=case_color, label=case)
plt.legend(loc='best')
else:
plt.figure(figsize=(5, 3))
plt.axis('off')
plt.text(0.4, 0.8, "ERROR:")
plt.text(0.0, 0.6, "Not enough data points to draw scaling plot")
plt.text(0.0, 0.44, "To generate this data rerun BATS with the")
plt.text(0.0, 0.36, "performance option enabled.")
if livvkit.publish:
plt.savefig(os.path.splitext(plot_file)[0]+'.eps', dpi=600)
plt.savefig(plot_file)
plt.close()
return elements.image(title, description, os.path.basename(plot_file)) | ['def', 'generate_scaling_plot', '(', 'timing_data', ',', 'title', ',', 'ylabel', ',', 'description', ',', 'plot_file', ')', ':', 'proc_counts', '=', 'timing_data', '[', "'proc_counts'", ']', 'if', 'len', '(', 'proc_counts', ')', '>', '2', ':', 'plt', '.', 'figure', '(', 'figsize', '=', '(', '10', ',', '8', ')', ',', 'dpi', '=', '150', ')', 'plt', '.', 'title', '(', 'title', ')', 'plt', '.', 'xlabel', '(', '"Number of processors"', ')', 'plt', '.', 'ylabel', '(', 'ylabel', ')', 'for', 'case', ',', 'case_color', 'in', 'zip', '(', '[', "'bench'", ',', "'model'", ']', ',', '[', "'#91bfdb'", ',', "'#fc8d59'", ']', ')', ':', 'case_data', '=', 'timing_data', '[', 'case', ']', 'means', '=', 'case_data', '[', "'means'", ']', 'mins', '=', 'case_data', '[', "'mins'", ']', 'maxs', '=', 'case_data', '[', "'maxs'", ']', 'plt', '.', 'fill_between', '(', 'proc_counts', ',', 'mins', ',', 'maxs', ',', 'facecolor', '=', 'case_color', ',', 'alpha', '=', '0.5', ')', 'plt', '.', 'plot', '(', 'proc_counts', ',', 'means', ',', "'o-'", ',', 'color', '=', 'case_color', ',', 'label', '=', 'case', ')', 'plt', '.', 'legend', '(', 'loc', '=', "'best'", ')', 'else', ':', 'plt', '.', 'figure', '(', 'figsize', '=', '(', '5', ',', '3', ')', ')', 'plt', '.', 'axis', '(', "'off'", ')', 'plt', '.', 'text', '(', '0.4', ',', '0.8', ',', '"ERROR:"', ')', 'plt', '.', 'text', '(', '0.0', ',', '0.6', ',', '"Not enough data points to draw scaling plot"', ')', 'plt', '.', 'text', '(', '0.0', ',', '0.44', ',', '"To generate this data rerun BATS with the"', ')', 'plt', '.', 'text', '(', '0.0', ',', '0.36', ',', '"performance option enabled."', ')', 'if', 'livvkit', '.', 'publish', ':', 'plt', '.', 'savefig', '(', 'os', '.', 'path', '.', 'splitext', '(', 'plot_file', ')', '[', '0', ']', '+', "'.eps'", ',', 'dpi', '=', '600', ')', 'plt', '.', 'savefig', '(', 'plot_file', ')', 'plt', '.', 'close', '(', ')', 'return', 'elements', '.', 'image', '(', 'title', ',', 'description', ',', 'os', '.', 'path', '.', 'basename', '(', 'plot_file', ')', ')'] | Generate a scaling plot.
Args:
timing_data: data returned from a `*_scaling` method
title: the title of the plot
ylabel: the y-axis label of the plot
description: a description of the plot
plot_file: the file to write out to
Returns:
an image element containing the plot file and metadata | ['Generate', 'a', 'scaling', 'plot', '.'] | train | https://github.com/LIVVkit/LIVVkit/blob/680120cd437e408673e62e535fc0a246c7fc17db/livvkit/components/performance.py#L312-L354 |
868 | JonathonReinhart/scuba | scuba/__main__.py | ScubaDive.copy_scubadir_file | def copy_scubadir_file(self, name, source):
'''Copies source into the scubadir
Returns the container-path of the copied file
'''
dest = os.path.join(self.__scubadir_hostpath, name)
assert not os.path.exists(dest)
shutil.copy2(source, dest)
return os.path.join(self.__scubadir_contpath, name) | python | def copy_scubadir_file(self, name, source):
'''Copies source into the scubadir
Returns the container-path of the copied file
'''
dest = os.path.join(self.__scubadir_hostpath, name)
assert not os.path.exists(dest)
shutil.copy2(source, dest)
return os.path.join(self.__scubadir_contpath, name) | ['def', 'copy_scubadir_file', '(', 'self', ',', 'name', ',', 'source', ')', ':', 'dest', '=', 'os', '.', 'path', '.', 'join', '(', 'self', '.', '__scubadir_hostpath', ',', 'name', ')', 'assert', 'not', 'os', '.', 'path', '.', 'exists', '(', 'dest', ')', 'shutil', '.', 'copy2', '(', 'source', ',', 'dest', ')', 'return', 'os', '.', 'path', '.', 'join', '(', 'self', '.', '__scubadir_contpath', ',', 'name', ')'] | Copies source into the scubadir
Returns the container-path of the copied file | ['Copies', 'source', 'into', 'the', 'scubadir'] | train | https://github.com/JonathonReinhart/scuba/blob/0244c81ec482d3c60202028bc075621447bc3ad1/scuba/__main__.py#L347-L356 |
869 | abilian/abilian-core | abilian/web/views/images.py | BaseImageView.make_response | def make_response(self, image, size, mode, filename=None, *args, **kwargs):
"""
:param image: image as bytes
:param size: requested maximum width/height size
:param mode: one of 'scale', 'fit' or 'crop'
:param filename: filename
"""
try:
fmt = get_format(image)
except IOError:
# not a known image file
raise NotFound()
self.content_type = "image/png" if fmt == "PNG" else "image/jpeg"
ext = "." + str(fmt.lower())
if not filename:
filename = "image"
if not filename.lower().endswith(ext):
filename += ext
self.filename = filename
if size:
image = resize(image, size, size, mode=mode)
if mode == CROP:
assert get_size(image) == (size, size)
else:
image = image.read()
return make_response(image) | python | def make_response(self, image, size, mode, filename=None, *args, **kwargs):
"""
:param image: image as bytes
:param size: requested maximum width/height size
:param mode: one of 'scale', 'fit' or 'crop'
:param filename: filename
"""
try:
fmt = get_format(image)
except IOError:
# not a known image file
raise NotFound()
self.content_type = "image/png" if fmt == "PNG" else "image/jpeg"
ext = "." + str(fmt.lower())
if not filename:
filename = "image"
if not filename.lower().endswith(ext):
filename += ext
self.filename = filename
if size:
image = resize(image, size, size, mode=mode)
if mode == CROP:
assert get_size(image) == (size, size)
else:
image = image.read()
return make_response(image) | ['def', 'make_response', '(', 'self', ',', 'image', ',', 'size', ',', 'mode', ',', 'filename', '=', 'None', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'try', ':', 'fmt', '=', 'get_format', '(', 'image', ')', 'except', 'IOError', ':', '# not a known image file', 'raise', 'NotFound', '(', ')', 'self', '.', 'content_type', '=', '"image/png"', 'if', 'fmt', '==', '"PNG"', 'else', '"image/jpeg"', 'ext', '=', '"."', '+', 'str', '(', 'fmt', '.', 'lower', '(', ')', ')', 'if', 'not', 'filename', ':', 'filename', '=', '"image"', 'if', 'not', 'filename', '.', 'lower', '(', ')', '.', 'endswith', '(', 'ext', ')', ':', 'filename', '+=', 'ext', 'self', '.', 'filename', '=', 'filename', 'if', 'size', ':', 'image', '=', 'resize', '(', 'image', ',', 'size', ',', 'size', ',', 'mode', '=', 'mode', ')', 'if', 'mode', '==', 'CROP', ':', 'assert', 'get_size', '(', 'image', ')', '==', '(', 'size', ',', 'size', ')', 'else', ':', 'image', '=', 'image', '.', 'read', '(', ')', 'return', 'make_response', '(', 'image', ')'] | :param image: image as bytes
:param size: requested maximum width/height size
:param mode: one of 'scale', 'fit' or 'crop'
:param filename: filename | [':', 'param', 'image', ':', 'image', 'as', 'bytes', ':', 'param', 'size', ':', 'requested', 'maximum', 'width', '/', 'height', 'size', ':', 'param', 'mode', ':', 'one', 'of', 'scale', 'fit', 'or', 'crop', ':', 'param', 'filename', ':', 'filename'] | train | https://github.com/abilian/abilian-core/blob/0a71275bf108c3d51e13ca9e093c0249235351e3/abilian/web/views/images.py#L60-L89 |
870 | OpenTreeOfLife/peyotl | peyotl/nexson_syntax/helper.py | _index_list_of_values | def _index_list_of_values(d, k):
"""Returns d[k] or [d[k]] if the value is not a list"""
v = d[k]
if isinstance(v, list):
return v
return [v] | python | def _index_list_of_values(d, k):
"""Returns d[k] or [d[k]] if the value is not a list"""
v = d[k]
if isinstance(v, list):
return v
return [v] | ['def', '_index_list_of_values', '(', 'd', ',', 'k', ')', ':', 'v', '=', 'd', '[', 'k', ']', 'if', 'isinstance', '(', 'v', ',', 'list', ')', ':', 'return', 'v', 'return', '[', 'v', ']'] | Returns d[k] or [d[k]] if the value is not a list | ['Returns', 'd', '[', 'k', ']', 'or', '[', 'd', '[', 'k', ']]', 'if', 'the', 'value', 'is', 'not', 'a', 'list'] | train | https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/nexson_syntax/helper.py#L90-L95 |
871 | oubiga/respect | respect/utils.py | sanitize_qualifiers | def sanitize_qualifiers(repos=None, followers=None, language=None):
'''
qualifiers = c repos:+42 followers:+1000 language:
params = {'q': 'tom repos:>42 followers:>1000'}
'''
qualifiers = ''
if repos:
qualifiers += 'repos:{0} '.format(repos)
qualifiers = re.sub(r"([+])([=a-zA-Z0-9]+)", r">\2", qualifiers)
qualifiers = re.sub(r"([-])([=a-zA-Z0-9]+)", r"<\2", qualifiers)
if followers:
qualifiers += 'followers:{0} '.format(followers)
qualifiers = re.sub(r"([+])([=a-zA-Z0-9]+)", r">\2", qualifiers)
qualifiers = re.sub(r"([-])([=a-zA-Z0-9]+)", r"<\2", qualifiers)
try:
if language in ALLOWED_LANGUAGES and not language == '':
qualifiers += 'language:{0} '.format(language)
elif language == '':
qualifiers += ''
else:
raise AllowedLanguagesException
except AllowedLanguagesException as e:
print(e)
return qualifiers | python | def sanitize_qualifiers(repos=None, followers=None, language=None):
'''
qualifiers = c repos:+42 followers:+1000 language:
params = {'q': 'tom repos:>42 followers:>1000'}
'''
qualifiers = ''
if repos:
qualifiers += 'repos:{0} '.format(repos)
qualifiers = re.sub(r"([+])([=a-zA-Z0-9]+)", r">\2", qualifiers)
qualifiers = re.sub(r"([-])([=a-zA-Z0-9]+)", r"<\2", qualifiers)
if followers:
qualifiers += 'followers:{0} '.format(followers)
qualifiers = re.sub(r"([+])([=a-zA-Z0-9]+)", r">\2", qualifiers)
qualifiers = re.sub(r"([-])([=a-zA-Z0-9]+)", r"<\2", qualifiers)
try:
if language in ALLOWED_LANGUAGES and not language == '':
qualifiers += 'language:{0} '.format(language)
elif language == '':
qualifiers += ''
else:
raise AllowedLanguagesException
except AllowedLanguagesException as e:
print(e)
return qualifiers | ['def', 'sanitize_qualifiers', '(', 'repos', '=', 'None', ',', 'followers', '=', 'None', ',', 'language', '=', 'None', ')', ':', 'qualifiers', '=', "''", 'if', 'repos', ':', 'qualifiers', '+=', "'repos:{0} '", '.', 'format', '(', 'repos', ')', 'qualifiers', '=', 're', '.', 'sub', '(', 'r"([+])([=a-zA-Z0-9]+)"', ',', 'r">\\2"', ',', 'qualifiers', ')', 'qualifiers', '=', 're', '.', 'sub', '(', 'r"([-])([=a-zA-Z0-9]+)"', ',', 'r"<\\2"', ',', 'qualifiers', ')', 'if', 'followers', ':', 'qualifiers', '+=', "'followers:{0} '", '.', 'format', '(', 'followers', ')', 'qualifiers', '=', 're', '.', 'sub', '(', 'r"([+])([=a-zA-Z0-9]+)"', ',', 'r">\\2"', ',', 'qualifiers', ')', 'qualifiers', '=', 're', '.', 'sub', '(', 'r"([-])([=a-zA-Z0-9]+)"', ',', 'r"<\\2"', ',', 'qualifiers', ')', 'try', ':', 'if', 'language', 'in', 'ALLOWED_LANGUAGES', 'and', 'not', 'language', '==', "''", ':', 'qualifiers', '+=', "'language:{0} '", '.', 'format', '(', 'language', ')', 'elif', 'language', '==', "''", ':', 'qualifiers', '+=', "''", 'else', ':', 'raise', 'AllowedLanguagesException', 'except', 'AllowedLanguagesException', 'as', 'e', ':', 'print', '(', 'e', ')', 'return', 'qualifiers'] | qualifiers = c repos:+42 followers:+1000 language:
params = {'q': 'tom repos:>42 followers:>1000'} | ['qualifiers', '=', 'c', 'repos', ':', '+', '42', 'followers', ':', '+', '1000', 'language', ':', 'params', '=', '{', 'q', ':', 'tom', 'repos', ':', '>', '42', 'followers', ':', '>', '1000', '}'] | train | https://github.com/oubiga/respect/blob/550554ec4d3139379d03cb8f82a8cd2d80c3ad62/respect/utils.py#L74-L101 |
872 | pydata/xarray | xarray/core/computation.py | apply_ufunc | def apply_ufunc(
func: Callable,
*args: Any,
input_core_dims: Optional[Sequence[Sequence]] = None,
output_core_dims: Optional[Sequence[Sequence]] = ((),),
exclude_dims: AbstractSet = frozenset(),
vectorize: bool = False,
join: str = 'exact',
dataset_join: str = 'exact',
dataset_fill_value: object = _NO_FILL_VALUE,
keep_attrs: bool = False,
kwargs: Mapping = None,
dask: str = 'forbidden',
output_dtypes: Optional[Sequence] = None,
output_sizes: Optional[Mapping[Any, int]] = None
) -> Any:
"""Apply a vectorized function for unlabeled arrays on xarray objects.
The function will be mapped over the data variable(s) of the input
arguments using xarray's standard rules for labeled computation, including
alignment, broadcasting, looping over GroupBy/Dataset variables, and
merging of coordinates.
Parameters
----------
func : callable
Function to call like ``func(*args, **kwargs)`` on unlabeled arrays
(``.data``) that returns an array or tuple of arrays. If multiple
arguments with non-matching dimensions are supplied, this function is
expected to vectorize (broadcast) over axes of positional arguments in
the style of NumPy universal functions [1]_ (if this is not the case,
set ``vectorize=True``). If this function returns multiple outputs, you
must set ``output_core_dims`` as well.
*args : Dataset, DataArray, GroupBy, Variable, numpy/dask arrays or scalars
Mix of labeled and/or unlabeled arrays to which to apply the function.
input_core_dims : Sequence[Sequence], optional
List of the same length as ``args`` giving the list of core dimensions
on each input argument that should not be broadcast. By default, we
assume there are no core dimensions on any input arguments.
For example, ``input_core_dims=[[], ['time']]`` indicates that all
dimensions on the first argument and all dimensions other than 'time'
on the second argument should be broadcast.
Core dimensions are automatically moved to the last axes of input
variables before applying ``func``, which facilitates using NumPy style
generalized ufuncs [2]_.
output_core_dims : List[tuple], optional
List of the same length as the number of output arguments from
``func``, giving the list of core dimensions on each output that were
not broadcast on the inputs. By default, we assume that ``func``
outputs exactly one array, with axes corresponding to each broadcast
dimension.
Core dimensions are assumed to appear as the last dimensions of each
output in the provided order.
exclude_dims : set, optional
Core dimensions on the inputs to exclude from alignment and
broadcasting entirely. Any input coordinates along these dimensions
will be dropped. Each excluded dimension must also appear in
``input_core_dims`` for at least one argument. Only dimensions listed
here are allowed to change size between input and output objects.
vectorize : bool, optional
If True, then assume ``func`` only takes arrays defined over core
dimensions as input and vectorize it automatically with
:py:func:`numpy.vectorize`. This option exists for convenience, but is
almost always slower than supplying a pre-vectorized function.
Using this option requires NumPy version 1.12 or newer.
join : {'outer', 'inner', 'left', 'right', 'exact'}, optional
Method for joining the indexes of the passed objects along each
dimension, and the variables of Dataset objects with mismatched
data variables:
- 'outer': use the union of object indexes
- 'inner': use the intersection of object indexes
- 'left': use indexes from the first object with each dimension
- 'right': use indexes from the last object with each dimension
- 'exact': raise `ValueError` instead of aligning when indexes to be
aligned are not equal
dataset_join : {'outer', 'inner', 'left', 'right', 'exact'}, optional
Method for joining variables of Dataset objects with mismatched
data variables.
- 'outer': take variables from both Dataset objects
- 'inner': take only overlapped variables
- 'left': take only variables from the first object
- 'right': take only variables from the last object
- 'exact': data variables on all Dataset objects must match exactly
dataset_fill_value : optional
Value used in place of missing variables on Dataset inputs when the
datasets do not share the exact same ``data_vars``. Required if
``dataset_join not in {'inner', 'exact'}``, otherwise ignored.
keep_attrs: boolean, Optional
Whether to copy attributes from the first argument to the output.
kwargs: dict, optional
Optional keyword arguments passed directly on to call ``func``.
dask: 'forbidden', 'allowed' or 'parallelized', optional
How to handle applying to objects containing lazy data in the form of
dask arrays:
- 'forbidden' (default): raise an error if a dask array is encountered.
- 'allowed': pass dask arrays directly on to ``func``.
- 'parallelized': automatically parallelize ``func`` if any of the
inputs are a dask array. If used, the ``output_dtypes`` argument must
also be provided. Multiple output arguments are not yet supported.
output_dtypes : list of dtypes, optional
Optional list of output dtypes. Only used if dask='parallelized'.
output_sizes : dict, optional
Optional mapping from dimension names to sizes for outputs. Only used
if dask='parallelized' and new dimensions (not found on inputs) appear
on outputs.
Returns
-------
Single value or tuple of Dataset, DataArray, Variable, dask.array.Array or
numpy.ndarray, the first type on that list to appear on an input.
Examples
--------
Calculate the vector magnitude of two arguments:
>>> def magnitude(a, b):
... func = lambda x, y: np.sqrt(x ** 2 + y ** 2)
... return xr.apply_ufunc(func, a, b)
You can now apply ``magnitude()`` to ``xr.DataArray`` and ``xr.Dataset``
objects, with automatically preserved dimensions and coordinates, e.g.,
>>> array = xr.DataArray([1, 2, 3], coords=[('x', [0.1, 0.2, 0.3])])
>>> magnitude(array, -array)
<xarray.DataArray (x: 3)>
array([1.414214, 2.828427, 4.242641])
Coordinates:
* x (x) float64 0.1 0.2 0.3
Plain scalars, numpy arrays and a mix of these with xarray objects is also
supported:
>>> magnitude(4, 5)
5.0
>>> magnitude(3, np.array([0, 4]))
array([3., 5.])
>>> magnitude(array, 0)
<xarray.DataArray (x: 3)>
array([1., 2., 3.])
Coordinates:
* x (x) float64 0.1 0.2 0.3
Other examples of how you could use ``apply_ufunc`` to write functions to
(very nearly) replicate existing xarray functionality:
Compute the mean (``.mean``) over one dimension::
def mean(obj, dim):
# note: apply always moves core dimensions to the end
return apply_ufunc(np.mean, obj,
input_core_dims=[[dim]],
kwargs={'axis': -1})
Inner product over a specific dimension (like ``xr.dot``)::
def _inner(x, y):
result = np.matmul(x[..., np.newaxis, :], y[..., :, np.newaxis])
return result[..., 0, 0]
def inner_product(a, b, dim):
return apply_ufunc(_inner, a, b, input_core_dims=[[dim], [dim]])
Stack objects along a new dimension (like ``xr.concat``)::
def stack(objects, dim, new_coord):
# note: this version does not stack coordinates
func = lambda *x: np.stack(x, axis=-1)
result = apply_ufunc(func, *objects,
output_core_dims=[[dim]],
join='outer',
dataset_fill_value=np.nan)
result[dim] = new_coord
return result
If your function is not vectorized but can be applied only to core
dimensions, you can use ``vectorize=True`` to turn into a vectorized
function. This wraps :py:func:`numpy.vectorize`, so the operation isn't
terribly fast. Here we'll use it to calculate the distance between
empirical samples from two probability distributions, using a scipy
function that needs to be applied to vectors::
import scipy.stats
def earth_mover_distance(first_samples,
second_samples,
dim='ensemble'):
return apply_ufunc(scipy.stats.wasserstein_distance,
first_samples, second_samples,
input_core_dims=[[dim], [dim]],
vectorize=True)
Most of NumPy's builtin functions already broadcast their inputs
appropriately for use in `apply`. You may find helper functions such as
numpy.broadcast_arrays helpful in writing your function. `apply_ufunc` also
works well with numba's vectorize and guvectorize. Further explanation with
examples are provided in the xarray documentation [3].
See also
--------
numpy.broadcast_arrays
numba.vectorize
numba.guvectorize
References
----------
.. [1] http://docs.scipy.org/doc/numpy/reference/ufuncs.html
.. [2] http://docs.scipy.org/doc/numpy/reference/c-api.generalized-ufuncs.html
.. [3] http://xarray.pydata.org/en/stable/computation.html#wrapping-custom-computation
""" # noqa: E501 # don't error on that URL one line up
from .groupby import GroupBy
from .dataarray import DataArray
from .variable import Variable
if input_core_dims is None:
input_core_dims = ((),) * (len(args))
elif len(input_core_dims) != len(args):
raise ValueError(
'input_core_dims must be None or a tuple with the length same to '
'the number of arguments. Given input_core_dims: {}, '
'number of args: {}.'.format(input_core_dims, len(args)))
if kwargs is None:
kwargs = {}
signature = _UFuncSignature(input_core_dims, output_core_dims)
if exclude_dims and not exclude_dims <= signature.all_core_dims:
raise ValueError('each dimension in `exclude_dims` must also be a '
'core dimension in the function signature')
if kwargs:
func = functools.partial(func, **kwargs)
if vectorize:
if signature.all_core_dims:
# we need the signature argument
if LooseVersion(np.__version__) < '1.12': # pragma: no cover
raise NotImplementedError(
'numpy 1.12 or newer required when using vectorize=True '
'in xarray.apply_ufunc with non-scalar output core '
'dimensions.')
func = np.vectorize(func,
otypes=output_dtypes,
signature=signature.to_gufunc_string())
else:
func = np.vectorize(func, otypes=output_dtypes)
variables_vfunc = functools.partial(apply_variable_ufunc, func,
signature=signature,
exclude_dims=exclude_dims,
keep_attrs=keep_attrs,
dask=dask,
output_dtypes=output_dtypes,
output_sizes=output_sizes)
if any(isinstance(a, GroupBy) for a in args):
this_apply = functools.partial(apply_ufunc, func,
input_core_dims=input_core_dims,
output_core_dims=output_core_dims,
exclude_dims=exclude_dims,
join=join,
dataset_join=dataset_join,
dataset_fill_value=dataset_fill_value,
keep_attrs=keep_attrs,
dask=dask)
return apply_groupby_func(this_apply, *args)
elif any(is_dict_like(a) for a in args):
return apply_dataset_vfunc(variables_vfunc, *args,
signature=signature,
join=join,
exclude_dims=exclude_dims,
dataset_join=dataset_join,
fill_value=dataset_fill_value,
keep_attrs=keep_attrs)
elif any(isinstance(a, DataArray) for a in args):
return apply_dataarray_vfunc(variables_vfunc, *args,
signature=signature,
join=join,
exclude_dims=exclude_dims,
keep_attrs=keep_attrs)
elif any(isinstance(a, Variable) for a in args):
return variables_vfunc(*args)
else:
return apply_array_ufunc(func, *args, dask=dask) | python | def apply_ufunc(
func: Callable,
*args: Any,
input_core_dims: Optional[Sequence[Sequence]] = None,
output_core_dims: Optional[Sequence[Sequence]] = ((),),
exclude_dims: AbstractSet = frozenset(),
vectorize: bool = False,
join: str = 'exact',
dataset_join: str = 'exact',
dataset_fill_value: object = _NO_FILL_VALUE,
keep_attrs: bool = False,
kwargs: Mapping = None,
dask: str = 'forbidden',
output_dtypes: Optional[Sequence] = None,
output_sizes: Optional[Mapping[Any, int]] = None
) -> Any:
"""Apply a vectorized function for unlabeled arrays on xarray objects.
The function will be mapped over the data variable(s) of the input
arguments using xarray's standard rules for labeled computation, including
alignment, broadcasting, looping over GroupBy/Dataset variables, and
merging of coordinates.
Parameters
----------
func : callable
Function to call like ``func(*args, **kwargs)`` on unlabeled arrays
(``.data``) that returns an array or tuple of arrays. If multiple
arguments with non-matching dimensions are supplied, this function is
expected to vectorize (broadcast) over axes of positional arguments in
the style of NumPy universal functions [1]_ (if this is not the case,
set ``vectorize=True``). If this function returns multiple outputs, you
must set ``output_core_dims`` as well.
*args : Dataset, DataArray, GroupBy, Variable, numpy/dask arrays or scalars
Mix of labeled and/or unlabeled arrays to which to apply the function.
input_core_dims : Sequence[Sequence], optional
List of the same length as ``args`` giving the list of core dimensions
on each input argument that should not be broadcast. By default, we
assume there are no core dimensions on any input arguments.
For example, ``input_core_dims=[[], ['time']]`` indicates that all
dimensions on the first argument and all dimensions other than 'time'
on the second argument should be broadcast.
Core dimensions are automatically moved to the last axes of input
variables before applying ``func``, which facilitates using NumPy style
generalized ufuncs [2]_.
output_core_dims : List[tuple], optional
List of the same length as the number of output arguments from
``func``, giving the list of core dimensions on each output that were
not broadcast on the inputs. By default, we assume that ``func``
outputs exactly one array, with axes corresponding to each broadcast
dimension.
Core dimensions are assumed to appear as the last dimensions of each
output in the provided order.
exclude_dims : set, optional
Core dimensions on the inputs to exclude from alignment and
broadcasting entirely. Any input coordinates along these dimensions
will be dropped. Each excluded dimension must also appear in
``input_core_dims`` for at least one argument. Only dimensions listed
here are allowed to change size between input and output objects.
vectorize : bool, optional
If True, then assume ``func`` only takes arrays defined over core
dimensions as input and vectorize it automatically with
:py:func:`numpy.vectorize`. This option exists for convenience, but is
almost always slower than supplying a pre-vectorized function.
Using this option requires NumPy version 1.12 or newer.
join : {'outer', 'inner', 'left', 'right', 'exact'}, optional
Method for joining the indexes of the passed objects along each
dimension, and the variables of Dataset objects with mismatched
data variables:
- 'outer': use the union of object indexes
- 'inner': use the intersection of object indexes
- 'left': use indexes from the first object with each dimension
- 'right': use indexes from the last object with each dimension
- 'exact': raise `ValueError` instead of aligning when indexes to be
aligned are not equal
dataset_join : {'outer', 'inner', 'left', 'right', 'exact'}, optional
Method for joining variables of Dataset objects with mismatched
data variables.
- 'outer': take variables from both Dataset objects
- 'inner': take only overlapped variables
- 'left': take only variables from the first object
- 'right': take only variables from the last object
- 'exact': data variables on all Dataset objects must match exactly
dataset_fill_value : optional
Value used in place of missing variables on Dataset inputs when the
datasets do not share the exact same ``data_vars``. Required if
``dataset_join not in {'inner', 'exact'}``, otherwise ignored.
keep_attrs: boolean, Optional
Whether to copy attributes from the first argument to the output.
kwargs: dict, optional
Optional keyword arguments passed directly on to call ``func``.
dask: 'forbidden', 'allowed' or 'parallelized', optional
How to handle applying to objects containing lazy data in the form of
dask arrays:
- 'forbidden' (default): raise an error if a dask array is encountered.
- 'allowed': pass dask arrays directly on to ``func``.
- 'parallelized': automatically parallelize ``func`` if any of the
inputs are a dask array. If used, the ``output_dtypes`` argument must
also be provided. Multiple output arguments are not yet supported.
output_dtypes : list of dtypes, optional
Optional list of output dtypes. Only used if dask='parallelized'.
output_sizes : dict, optional
Optional mapping from dimension names to sizes for outputs. Only used
if dask='parallelized' and new dimensions (not found on inputs) appear
on outputs.
Returns
-------
Single value or tuple of Dataset, DataArray, Variable, dask.array.Array or
numpy.ndarray, the first type on that list to appear on an input.
Examples
--------
Calculate the vector magnitude of two arguments:
>>> def magnitude(a, b):
... func = lambda x, y: np.sqrt(x ** 2 + y ** 2)
... return xr.apply_ufunc(func, a, b)
You can now apply ``magnitude()`` to ``xr.DataArray`` and ``xr.Dataset``
objects, with automatically preserved dimensions and coordinates, e.g.,
>>> array = xr.DataArray([1, 2, 3], coords=[('x', [0.1, 0.2, 0.3])])
>>> magnitude(array, -array)
<xarray.DataArray (x: 3)>
array([1.414214, 2.828427, 4.242641])
Coordinates:
* x (x) float64 0.1 0.2 0.3
Plain scalars, numpy arrays and a mix of these with xarray objects is also
supported:
>>> magnitude(4, 5)
5.0
>>> magnitude(3, np.array([0, 4]))
array([3., 5.])
>>> magnitude(array, 0)
<xarray.DataArray (x: 3)>
array([1., 2., 3.])
Coordinates:
* x (x) float64 0.1 0.2 0.3
Other examples of how you could use ``apply_ufunc`` to write functions to
(very nearly) replicate existing xarray functionality:
Compute the mean (``.mean``) over one dimension::
def mean(obj, dim):
# note: apply always moves core dimensions to the end
return apply_ufunc(np.mean, obj,
input_core_dims=[[dim]],
kwargs={'axis': -1})
Inner product over a specific dimension (like ``xr.dot``)::
def _inner(x, y):
result = np.matmul(x[..., np.newaxis, :], y[..., :, np.newaxis])
return result[..., 0, 0]
def inner_product(a, b, dim):
return apply_ufunc(_inner, a, b, input_core_dims=[[dim], [dim]])
Stack objects along a new dimension (like ``xr.concat``)::
def stack(objects, dim, new_coord):
# note: this version does not stack coordinates
func = lambda *x: np.stack(x, axis=-1)
result = apply_ufunc(func, *objects,
output_core_dims=[[dim]],
join='outer',
dataset_fill_value=np.nan)
result[dim] = new_coord
return result
If your function is not vectorized but can be applied only to core
dimensions, you can use ``vectorize=True`` to turn into a vectorized
function. This wraps :py:func:`numpy.vectorize`, so the operation isn't
terribly fast. Here we'll use it to calculate the distance between
empirical samples from two probability distributions, using a scipy
function that needs to be applied to vectors::
import scipy.stats
def earth_mover_distance(first_samples,
second_samples,
dim='ensemble'):
return apply_ufunc(scipy.stats.wasserstein_distance,
first_samples, second_samples,
input_core_dims=[[dim], [dim]],
vectorize=True)
Most of NumPy's builtin functions already broadcast their inputs
appropriately for use in `apply`. You may find helper functions such as
numpy.broadcast_arrays helpful in writing your function. `apply_ufunc` also
works well with numba's vectorize and guvectorize. Further explanation with
examples are provided in the xarray documentation [3].
See also
--------
numpy.broadcast_arrays
numba.vectorize
numba.guvectorize
References
----------
.. [1] http://docs.scipy.org/doc/numpy/reference/ufuncs.html
.. [2] http://docs.scipy.org/doc/numpy/reference/c-api.generalized-ufuncs.html
.. [3] http://xarray.pydata.org/en/stable/computation.html#wrapping-custom-computation
""" # noqa: E501 # don't error on that URL one line up
from .groupby import GroupBy
from .dataarray import DataArray
from .variable import Variable
if input_core_dims is None:
input_core_dims = ((),) * (len(args))
elif len(input_core_dims) != len(args):
raise ValueError(
'input_core_dims must be None or a tuple with the length same to '
'the number of arguments. Given input_core_dims: {}, '
'number of args: {}.'.format(input_core_dims, len(args)))
if kwargs is None:
kwargs = {}
signature = _UFuncSignature(input_core_dims, output_core_dims)
if exclude_dims and not exclude_dims <= signature.all_core_dims:
raise ValueError('each dimension in `exclude_dims` must also be a '
'core dimension in the function signature')
if kwargs:
func = functools.partial(func, **kwargs)
if vectorize:
if signature.all_core_dims:
# we need the signature argument
if LooseVersion(np.__version__) < '1.12': # pragma: no cover
raise NotImplementedError(
'numpy 1.12 or newer required when using vectorize=True '
'in xarray.apply_ufunc with non-scalar output core '
'dimensions.')
func = np.vectorize(func,
otypes=output_dtypes,
signature=signature.to_gufunc_string())
else:
func = np.vectorize(func, otypes=output_dtypes)
variables_vfunc = functools.partial(apply_variable_ufunc, func,
signature=signature,
exclude_dims=exclude_dims,
keep_attrs=keep_attrs,
dask=dask,
output_dtypes=output_dtypes,
output_sizes=output_sizes)
if any(isinstance(a, GroupBy) for a in args):
this_apply = functools.partial(apply_ufunc, func,
input_core_dims=input_core_dims,
output_core_dims=output_core_dims,
exclude_dims=exclude_dims,
join=join,
dataset_join=dataset_join,
dataset_fill_value=dataset_fill_value,
keep_attrs=keep_attrs,
dask=dask)
return apply_groupby_func(this_apply, *args)
elif any(is_dict_like(a) for a in args):
return apply_dataset_vfunc(variables_vfunc, *args,
signature=signature,
join=join,
exclude_dims=exclude_dims,
dataset_join=dataset_join,
fill_value=dataset_fill_value,
keep_attrs=keep_attrs)
elif any(isinstance(a, DataArray) for a in args):
return apply_dataarray_vfunc(variables_vfunc, *args,
signature=signature,
join=join,
exclude_dims=exclude_dims,
keep_attrs=keep_attrs)
elif any(isinstance(a, Variable) for a in args):
return variables_vfunc(*args)
else:
return apply_array_ufunc(func, *args, dask=dask) | ['def', 'apply_ufunc', '(', 'func', ':', 'Callable', ',', '*', 'args', ':', 'Any', ',', 'input_core_dims', ':', 'Optional', '[', 'Sequence', '[', 'Sequence', ']', ']', '=', 'None', ',', 'output_core_dims', ':', 'Optional', '[', 'Sequence', '[', 'Sequence', ']', ']', '=', '(', '(', ')', ',', ')', ',', 'exclude_dims', ':', 'AbstractSet', '=', 'frozenset', '(', ')', ',', 'vectorize', ':', 'bool', '=', 'False', ',', 'join', ':', 'str', '=', "'exact'", ',', 'dataset_join', ':', 'str', '=', "'exact'", ',', 'dataset_fill_value', ':', 'object', '=', '_NO_FILL_VALUE', ',', 'keep_attrs', ':', 'bool', '=', 'False', ',', 'kwargs', ':', 'Mapping', '=', 'None', ',', 'dask', ':', 'str', '=', "'forbidden'", ',', 'output_dtypes', ':', 'Optional', '[', 'Sequence', ']', '=', 'None', ',', 'output_sizes', ':', 'Optional', '[', 'Mapping', '[', 'Any', ',', 'int', ']', ']', '=', 'None', ')', '->', 'Any', ':', "# noqa: E501 # don't error on that URL one line up", 'from', '.', 'groupby', 'import', 'GroupBy', 'from', '.', 'dataarray', 'import', 'DataArray', 'from', '.', 'variable', 'import', 'Variable', 'if', 'input_core_dims', 'is', 'None', ':', 'input_core_dims', '=', '(', '(', ')', ',', ')', '*', '(', 'len', '(', 'args', ')', ')', 'elif', 'len', '(', 'input_core_dims', ')', '!=', 'len', '(', 'args', ')', ':', 'raise', 'ValueError', '(', "'input_core_dims must be None or a tuple with the length same to '", "'the number of arguments. Given input_core_dims: {}, '", "'number of args: {}.'", '.', 'format', '(', 'input_core_dims', ',', 'len', '(', 'args', ')', ')', ')', 'if', 'kwargs', 'is', 'None', ':', 'kwargs', '=', '{', '}', 'signature', '=', '_UFuncSignature', '(', 'input_core_dims', ',', 'output_core_dims', ')', 'if', 'exclude_dims', 'and', 'not', 'exclude_dims', '<=', 'signature', '.', 'all_core_dims', ':', 'raise', 'ValueError', '(', "'each dimension in `exclude_dims` must also be a '", "'core dimension in the function signature'", ')', 'if', 'kwargs', ':', 'func', '=', 'functools', '.', 'partial', '(', 'func', ',', '*', '*', 'kwargs', ')', 'if', 'vectorize', ':', 'if', 'signature', '.', 'all_core_dims', ':', '# we need the signature argument', 'if', 'LooseVersion', '(', 'np', '.', '__version__', ')', '<', "'1.12'", ':', '# pragma: no cover', 'raise', 'NotImplementedError', '(', "'numpy 1.12 or newer required when using vectorize=True '", "'in xarray.apply_ufunc with non-scalar output core '", "'dimensions.'", ')', 'func', '=', 'np', '.', 'vectorize', '(', 'func', ',', 'otypes', '=', 'output_dtypes', ',', 'signature', '=', 'signature', '.', 'to_gufunc_string', '(', ')', ')', 'else', ':', 'func', '=', 'np', '.', 'vectorize', '(', 'func', ',', 'otypes', '=', 'output_dtypes', ')', 'variables_vfunc', '=', 'functools', '.', 'partial', '(', 'apply_variable_ufunc', ',', 'func', ',', 'signature', '=', 'signature', ',', 'exclude_dims', '=', 'exclude_dims', ',', 'keep_attrs', '=', 'keep_attrs', ',', 'dask', '=', 'dask', ',', 'output_dtypes', '=', 'output_dtypes', ',', 'output_sizes', '=', 'output_sizes', ')', 'if', 'any', '(', 'isinstance', '(', 'a', ',', 'GroupBy', ')', 'for', 'a', 'in', 'args', ')', ':', 'this_apply', '=', 'functools', '.', 'partial', '(', 'apply_ufunc', ',', 'func', ',', 'input_core_dims', '=', 'input_core_dims', ',', 'output_core_dims', '=', 'output_core_dims', ',', 'exclude_dims', '=', 'exclude_dims', ',', 'join', '=', 'join', ',', 'dataset_join', '=', 'dataset_join', ',', 'dataset_fill_value', '=', 'dataset_fill_value', ',', 'keep_attrs', '=', 'keep_attrs', ',', 'dask', '=', 'dask', ')', 'return', 'apply_groupby_func', '(', 'this_apply', ',', '*', 'args', ')', 'elif', 'any', '(', 'is_dict_like', '(', 'a', ')', 'for', 'a', 'in', 'args', ')', ':', 'return', 'apply_dataset_vfunc', '(', 'variables_vfunc', ',', '*', 'args', ',', 'signature', '=', 'signature', ',', 'join', '=', 'join', ',', 'exclude_dims', '=', 'exclude_dims', ',', 'dataset_join', '=', 'dataset_join', ',', 'fill_value', '=', 'dataset_fill_value', ',', 'keep_attrs', '=', 'keep_attrs', ')', 'elif', 'any', '(', 'isinstance', '(', 'a', ',', 'DataArray', ')', 'for', 'a', 'in', 'args', ')', ':', 'return', 'apply_dataarray_vfunc', '(', 'variables_vfunc', ',', '*', 'args', ',', 'signature', '=', 'signature', ',', 'join', '=', 'join', ',', 'exclude_dims', '=', 'exclude_dims', ',', 'keep_attrs', '=', 'keep_attrs', ')', 'elif', 'any', '(', 'isinstance', '(', 'a', ',', 'Variable', ')', 'for', 'a', 'in', 'args', ')', ':', 'return', 'variables_vfunc', '(', '*', 'args', ')', 'else', ':', 'return', 'apply_array_ufunc', '(', 'func', ',', '*', 'args', ',', 'dask', '=', 'dask', ')'] | Apply a vectorized function for unlabeled arrays on xarray objects.
The function will be mapped over the data variable(s) of the input
arguments using xarray's standard rules for labeled computation, including
alignment, broadcasting, looping over GroupBy/Dataset variables, and
merging of coordinates.
Parameters
----------
func : callable
Function to call like ``func(*args, **kwargs)`` on unlabeled arrays
(``.data``) that returns an array or tuple of arrays. If multiple
arguments with non-matching dimensions are supplied, this function is
expected to vectorize (broadcast) over axes of positional arguments in
the style of NumPy universal functions [1]_ (if this is not the case,
set ``vectorize=True``). If this function returns multiple outputs, you
must set ``output_core_dims`` as well.
*args : Dataset, DataArray, GroupBy, Variable, numpy/dask arrays or scalars
Mix of labeled and/or unlabeled arrays to which to apply the function.
input_core_dims : Sequence[Sequence], optional
List of the same length as ``args`` giving the list of core dimensions
on each input argument that should not be broadcast. By default, we
assume there are no core dimensions on any input arguments.
For example, ``input_core_dims=[[], ['time']]`` indicates that all
dimensions on the first argument and all dimensions other than 'time'
on the second argument should be broadcast.
Core dimensions are automatically moved to the last axes of input
variables before applying ``func``, which facilitates using NumPy style
generalized ufuncs [2]_.
output_core_dims : List[tuple], optional
List of the same length as the number of output arguments from
``func``, giving the list of core dimensions on each output that were
not broadcast on the inputs. By default, we assume that ``func``
outputs exactly one array, with axes corresponding to each broadcast
dimension.
Core dimensions are assumed to appear as the last dimensions of each
output in the provided order.
exclude_dims : set, optional
Core dimensions on the inputs to exclude from alignment and
broadcasting entirely. Any input coordinates along these dimensions
will be dropped. Each excluded dimension must also appear in
``input_core_dims`` for at least one argument. Only dimensions listed
here are allowed to change size between input and output objects.
vectorize : bool, optional
If True, then assume ``func`` only takes arrays defined over core
dimensions as input and vectorize it automatically with
:py:func:`numpy.vectorize`. This option exists for convenience, but is
almost always slower than supplying a pre-vectorized function.
Using this option requires NumPy version 1.12 or newer.
join : {'outer', 'inner', 'left', 'right', 'exact'}, optional
Method for joining the indexes of the passed objects along each
dimension, and the variables of Dataset objects with mismatched
data variables:
- 'outer': use the union of object indexes
- 'inner': use the intersection of object indexes
- 'left': use indexes from the first object with each dimension
- 'right': use indexes from the last object with each dimension
- 'exact': raise `ValueError` instead of aligning when indexes to be
aligned are not equal
dataset_join : {'outer', 'inner', 'left', 'right', 'exact'}, optional
Method for joining variables of Dataset objects with mismatched
data variables.
- 'outer': take variables from both Dataset objects
- 'inner': take only overlapped variables
- 'left': take only variables from the first object
- 'right': take only variables from the last object
- 'exact': data variables on all Dataset objects must match exactly
dataset_fill_value : optional
Value used in place of missing variables on Dataset inputs when the
datasets do not share the exact same ``data_vars``. Required if
``dataset_join not in {'inner', 'exact'}``, otherwise ignored.
keep_attrs: boolean, Optional
Whether to copy attributes from the first argument to the output.
kwargs: dict, optional
Optional keyword arguments passed directly on to call ``func``.
dask: 'forbidden', 'allowed' or 'parallelized', optional
How to handle applying to objects containing lazy data in the form of
dask arrays:
- 'forbidden' (default): raise an error if a dask array is encountered.
- 'allowed': pass dask arrays directly on to ``func``.
- 'parallelized': automatically parallelize ``func`` if any of the
inputs are a dask array. If used, the ``output_dtypes`` argument must
also be provided. Multiple output arguments are not yet supported.
output_dtypes : list of dtypes, optional
Optional list of output dtypes. Only used if dask='parallelized'.
output_sizes : dict, optional
Optional mapping from dimension names to sizes for outputs. Only used
if dask='parallelized' and new dimensions (not found on inputs) appear
on outputs.
Returns
-------
Single value or tuple of Dataset, DataArray, Variable, dask.array.Array or
numpy.ndarray, the first type on that list to appear on an input.
Examples
--------
Calculate the vector magnitude of two arguments:
>>> def magnitude(a, b):
... func = lambda x, y: np.sqrt(x ** 2 + y ** 2)
... return xr.apply_ufunc(func, a, b)
You can now apply ``magnitude()`` to ``xr.DataArray`` and ``xr.Dataset``
objects, with automatically preserved dimensions and coordinates, e.g.,
>>> array = xr.DataArray([1, 2, 3], coords=[('x', [0.1, 0.2, 0.3])])
>>> magnitude(array, -array)
<xarray.DataArray (x: 3)>
array([1.414214, 2.828427, 4.242641])
Coordinates:
* x (x) float64 0.1 0.2 0.3
Plain scalars, numpy arrays and a mix of these with xarray objects is also
supported:
>>> magnitude(4, 5)
5.0
>>> magnitude(3, np.array([0, 4]))
array([3., 5.])
>>> magnitude(array, 0)
<xarray.DataArray (x: 3)>
array([1., 2., 3.])
Coordinates:
* x (x) float64 0.1 0.2 0.3
Other examples of how you could use ``apply_ufunc`` to write functions to
(very nearly) replicate existing xarray functionality:
Compute the mean (``.mean``) over one dimension::
def mean(obj, dim):
# note: apply always moves core dimensions to the end
return apply_ufunc(np.mean, obj,
input_core_dims=[[dim]],
kwargs={'axis': -1})
Inner product over a specific dimension (like ``xr.dot``)::
def _inner(x, y):
result = np.matmul(x[..., np.newaxis, :], y[..., :, np.newaxis])
return result[..., 0, 0]
def inner_product(a, b, dim):
return apply_ufunc(_inner, a, b, input_core_dims=[[dim], [dim]])
Stack objects along a new dimension (like ``xr.concat``)::
def stack(objects, dim, new_coord):
# note: this version does not stack coordinates
func = lambda *x: np.stack(x, axis=-1)
result = apply_ufunc(func, *objects,
output_core_dims=[[dim]],
join='outer',
dataset_fill_value=np.nan)
result[dim] = new_coord
return result
If your function is not vectorized but can be applied only to core
dimensions, you can use ``vectorize=True`` to turn into a vectorized
function. This wraps :py:func:`numpy.vectorize`, so the operation isn't
terribly fast. Here we'll use it to calculate the distance between
empirical samples from two probability distributions, using a scipy
function that needs to be applied to vectors::
import scipy.stats
def earth_mover_distance(first_samples,
second_samples,
dim='ensemble'):
return apply_ufunc(scipy.stats.wasserstein_distance,
first_samples, second_samples,
input_core_dims=[[dim], [dim]],
vectorize=True)
Most of NumPy's builtin functions already broadcast their inputs
appropriately for use in `apply`. You may find helper functions such as
numpy.broadcast_arrays helpful in writing your function. `apply_ufunc` also
works well with numba's vectorize and guvectorize. Further explanation with
examples are provided in the xarray documentation [3].
See also
--------
numpy.broadcast_arrays
numba.vectorize
numba.guvectorize
References
----------
.. [1] http://docs.scipy.org/doc/numpy/reference/ufuncs.html
.. [2] http://docs.scipy.org/doc/numpy/reference/c-api.generalized-ufuncs.html
.. [3] http://xarray.pydata.org/en/stable/computation.html#wrapping-custom-computation | ['Apply', 'a', 'vectorized', 'function', 'for', 'unlabeled', 'arrays', 'on', 'xarray', 'objects', '.'] | train | https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/core/computation.py#L683-L973 |
873 | jwhitlock/drf-cached-instances | sample_poll_app/cache.py | SampleCache.choice_default_invalidator | def choice_default_invalidator(self, obj):
"""Invalidated cached items when the Choice changes."""
invalid = [('Question', obj.question_id, True)]
for pk in obj.voters.values_list('pk', flat=True):
invalid.append(('User', pk, False))
return invalid | python | def choice_default_invalidator(self, obj):
"""Invalidated cached items when the Choice changes."""
invalid = [('Question', obj.question_id, True)]
for pk in obj.voters.values_list('pk', flat=True):
invalid.append(('User', pk, False))
return invalid | ['def', 'choice_default_invalidator', '(', 'self', ',', 'obj', ')', ':', 'invalid', '=', '[', '(', "'Question'", ',', 'obj', '.', 'question_id', ',', 'True', ')', ']', 'for', 'pk', 'in', 'obj', '.', 'voters', '.', 'values_list', '(', "'pk'", ',', 'flat', '=', 'True', ')', ':', 'invalid', '.', 'append', '(', '(', "'User'", ',', 'pk', ',', 'False', ')', ')', 'return', 'invalid'] | Invalidated cached items when the Choice changes. | ['Invalidated', 'cached', 'items', 'when', 'the', 'Choice', 'changes', '.'] | train | https://github.com/jwhitlock/drf-cached-instances/blob/ec4e8a6e1e83eeea6ec0b924b2eaa40a38d5963a/sample_poll_app/cache.py#L121-L126 |
874 | computational-metabolomics/msp2db | msp2db/parse.py | LibraryData._parse_files | def _parse_files(self, msp_pth, chunk, db_type, celery_obj=False):
"""Parse the MSP files and insert into database
Args:
msp_pth (str): path to msp file or directory [required]
db_type (str): The type of database to submit to (either 'sqlite', 'mysql' or 'django_mysql') [required]
chunk (int): Chunks of spectra to parse data (useful to control memory usage) [required]
celery_obj (boolean): If using Django a Celery task object can be used to keep track on ongoing tasks
[default False]
"""
if os.path.isdir(msp_pth):
c = 0
for folder, subs, files in sorted(os.walk(msp_pth)):
for msp_file in sorted(files):
msp_file_pth = os.path.join(folder, msp_file)
if os.path.isdir(msp_file_pth) or not msp_file_pth.lower().endswith(('txt', 'msp')):
continue
print('MSP FILE PATH', msp_file_pth)
self.num_lines = line_count(msp_file_pth)
# each file is processed separately but we want to still process in chunks so we save the number
# of spectra currently being processed with the c variable
with open(msp_file_pth, "r") as f:
c = self._parse_lines(f, chunk, db_type, celery_obj, c)
else:
self.num_lines = line_count(msp_pth)
with open(msp_pth, "r") as f:
self._parse_lines(f, chunk, db_type, celery_obj)
self.insert_data(remove_data=True, db_type=db_type) | python | def _parse_files(self, msp_pth, chunk, db_type, celery_obj=False):
"""Parse the MSP files and insert into database
Args:
msp_pth (str): path to msp file or directory [required]
db_type (str): The type of database to submit to (either 'sqlite', 'mysql' or 'django_mysql') [required]
chunk (int): Chunks of spectra to parse data (useful to control memory usage) [required]
celery_obj (boolean): If using Django a Celery task object can be used to keep track on ongoing tasks
[default False]
"""
if os.path.isdir(msp_pth):
c = 0
for folder, subs, files in sorted(os.walk(msp_pth)):
for msp_file in sorted(files):
msp_file_pth = os.path.join(folder, msp_file)
if os.path.isdir(msp_file_pth) or not msp_file_pth.lower().endswith(('txt', 'msp')):
continue
print('MSP FILE PATH', msp_file_pth)
self.num_lines = line_count(msp_file_pth)
# each file is processed separately but we want to still process in chunks so we save the number
# of spectra currently being processed with the c variable
with open(msp_file_pth, "r") as f:
c = self._parse_lines(f, chunk, db_type, celery_obj, c)
else:
self.num_lines = line_count(msp_pth)
with open(msp_pth, "r") as f:
self._parse_lines(f, chunk, db_type, celery_obj)
self.insert_data(remove_data=True, db_type=db_type) | ['def', '_parse_files', '(', 'self', ',', 'msp_pth', ',', 'chunk', ',', 'db_type', ',', 'celery_obj', '=', 'False', ')', ':', 'if', 'os', '.', 'path', '.', 'isdir', '(', 'msp_pth', ')', ':', 'c', '=', '0', 'for', 'folder', ',', 'subs', ',', 'files', 'in', 'sorted', '(', 'os', '.', 'walk', '(', 'msp_pth', ')', ')', ':', 'for', 'msp_file', 'in', 'sorted', '(', 'files', ')', ':', 'msp_file_pth', '=', 'os', '.', 'path', '.', 'join', '(', 'folder', ',', 'msp_file', ')', 'if', 'os', '.', 'path', '.', 'isdir', '(', 'msp_file_pth', ')', 'or', 'not', 'msp_file_pth', '.', 'lower', '(', ')', '.', 'endswith', '(', '(', "'txt'", ',', "'msp'", ')', ')', ':', 'continue', 'print', '(', "'MSP FILE PATH'", ',', 'msp_file_pth', ')', 'self', '.', 'num_lines', '=', 'line_count', '(', 'msp_file_pth', ')', '# each file is processed separately but we want to still process in chunks so we save the number', '# of spectra currently being processed with the c variable', 'with', 'open', '(', 'msp_file_pth', ',', '"r"', ')', 'as', 'f', ':', 'c', '=', 'self', '.', '_parse_lines', '(', 'f', ',', 'chunk', ',', 'db_type', ',', 'celery_obj', ',', 'c', ')', 'else', ':', 'self', '.', 'num_lines', '=', 'line_count', '(', 'msp_pth', ')', 'with', 'open', '(', 'msp_pth', ',', '"r"', ')', 'as', 'f', ':', 'self', '.', '_parse_lines', '(', 'f', ',', 'chunk', ',', 'db_type', ',', 'celery_obj', ')', 'self', '.', 'insert_data', '(', 'remove_data', '=', 'True', ',', 'db_type', '=', 'db_type', ')'] | Parse the MSP files and insert into database
Args:
msp_pth (str): path to msp file or directory [required]
db_type (str): The type of database to submit to (either 'sqlite', 'mysql' or 'django_mysql') [required]
chunk (int): Chunks of spectra to parse data (useful to control memory usage) [required]
celery_obj (boolean): If using Django a Celery task object can be used to keep track on ongoing tasks
[default False] | ['Parse', 'the', 'MSP', 'files', 'and', 'insert', 'into', 'database'] | train | https://github.com/computational-metabolomics/msp2db/blob/f86f01efca26fd2745547c9993f97337c6bef123/msp2db/parse.py#L166-L196 |
875 | inasafe/inasafe | extras/system_tools.py | tar_file | def tar_file(files, tarname):
'''Compress a file or directory into a tar file.'''
if isinstance(files, basestring):
files = [files]
o = tarfile.open(tarname, 'w:gz')
for file in files:
o.add(file)
o.close() | python | def tar_file(files, tarname):
'''Compress a file or directory into a tar file.'''
if isinstance(files, basestring):
files = [files]
o = tarfile.open(tarname, 'w:gz')
for file in files:
o.add(file)
o.close() | ['def', 'tar_file', '(', 'files', ',', 'tarname', ')', ':', 'if', 'isinstance', '(', 'files', ',', 'basestring', ')', ':', 'files', '=', '[', 'files', ']', 'o', '=', 'tarfile', '.', 'open', '(', 'tarname', ',', "'w:gz'", ')', 'for', 'file', 'in', 'files', ':', 'o', '.', 'add', '(', 'file', ')', 'o', '.', 'close', '(', ')'] | Compress a file or directory into a tar file. | ['Compress', 'a', 'file', 'or', 'directory', 'into', 'a', 'tar', 'file', '.'] | train | https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/extras/system_tools.py#L323-L332 |
876 | globus/globus-cli | globus_cli/safeio/get_option_vals.py | is_verbose | def is_verbose():
"""
Only safe to call within a click context.
"""
ctx = click.get_current_context()
state = ctx.ensure_object(CommandState)
return state.is_verbose() | python | def is_verbose():
"""
Only safe to call within a click context.
"""
ctx = click.get_current_context()
state = ctx.ensure_object(CommandState)
return state.is_verbose() | ['def', 'is_verbose', '(', ')', ':', 'ctx', '=', 'click', '.', 'get_current_context', '(', ')', 'state', '=', 'ctx', '.', 'ensure_object', '(', 'CommandState', ')', 'return', 'state', '.', 'is_verbose', '(', ')'] | Only safe to call within a click context. | ['Only', 'safe', 'to', 'call', 'within', 'a', 'click', 'context', '.'] | train | https://github.com/globus/globus-cli/blob/336675ff24da64c5ee487243f39ae39fc49a7e14/globus_cli/safeio/get_option_vals.py#L51-L57 |
877 | necaris/python3-openid | openid/consumer/consumer.py | Consumer.begin | def begin(self, user_url, anonymous=False):
"""Start the OpenID authentication process. See steps 1-2 in
the overview at the top of this file.
@param user_url: Identity URL given by the user. This method
performs a textual transformation of the URL to try and
make sure it is normalized. For example, a user_url of
example.com will be normalized to http://example.com/
normalizing and resolving any redirects the server might
issue.
@type user_url: unicode
@param anonymous: Whether to make an anonymous request of the OpenID
provider. Such a request does not ask for an authorization
assertion for an OpenID identifier, but may be used with
extensions to pass other data. e.g. "I don't care who you are,
but I'd like to know your time zone."
@type anonymous: bool
@returns: An object containing the discovered information will
be returned, with a method for building a redirect URL to
the server, as described in step 3 of the overview. This
object may also be used to add extension arguments to the
request, using its
L{addExtensionArg<openid.consumer.consumer.AuthRequest.addExtensionArg>}
method.
@returntype: L{AuthRequest<openid.consumer.consumer.AuthRequest>}
@raises openid.consumer.discover.DiscoveryFailure: when I fail to
find an OpenID server for this URL. If the C{yadis} package
is available, L{openid.consumer.discover.DiscoveryFailure} is
an alias for C{yadis.discover.DiscoveryFailure}.
"""
disco = Discovery(self.session, user_url, self.session_key_prefix)
try:
service = disco.getNextService(self._discover)
except fetchers.HTTPFetchingError as why:
raise DiscoveryFailure('Error fetching XRDS document: %s' %
(why.why, ), None)
if service is None:
raise DiscoveryFailure('No usable OpenID services found for %s' %
(user_url, ), None)
else:
return self.beginWithoutDiscovery(service, anonymous) | python | def begin(self, user_url, anonymous=False):
"""Start the OpenID authentication process. See steps 1-2 in
the overview at the top of this file.
@param user_url: Identity URL given by the user. This method
performs a textual transformation of the URL to try and
make sure it is normalized. For example, a user_url of
example.com will be normalized to http://example.com/
normalizing and resolving any redirects the server might
issue.
@type user_url: unicode
@param anonymous: Whether to make an anonymous request of the OpenID
provider. Such a request does not ask for an authorization
assertion for an OpenID identifier, but may be used with
extensions to pass other data. e.g. "I don't care who you are,
but I'd like to know your time zone."
@type anonymous: bool
@returns: An object containing the discovered information will
be returned, with a method for building a redirect URL to
the server, as described in step 3 of the overview. This
object may also be used to add extension arguments to the
request, using its
L{addExtensionArg<openid.consumer.consumer.AuthRequest.addExtensionArg>}
method.
@returntype: L{AuthRequest<openid.consumer.consumer.AuthRequest>}
@raises openid.consumer.discover.DiscoveryFailure: when I fail to
find an OpenID server for this URL. If the C{yadis} package
is available, L{openid.consumer.discover.DiscoveryFailure} is
an alias for C{yadis.discover.DiscoveryFailure}.
"""
disco = Discovery(self.session, user_url, self.session_key_prefix)
try:
service = disco.getNextService(self._discover)
except fetchers.HTTPFetchingError as why:
raise DiscoveryFailure('Error fetching XRDS document: %s' %
(why.why, ), None)
if service is None:
raise DiscoveryFailure('No usable OpenID services found for %s' %
(user_url, ), None)
else:
return self.beginWithoutDiscovery(service, anonymous) | ['def', 'begin', '(', 'self', ',', 'user_url', ',', 'anonymous', '=', 'False', ')', ':', 'disco', '=', 'Discovery', '(', 'self', '.', 'session', ',', 'user_url', ',', 'self', '.', 'session_key_prefix', ')', 'try', ':', 'service', '=', 'disco', '.', 'getNextService', '(', 'self', '.', '_discover', ')', 'except', 'fetchers', '.', 'HTTPFetchingError', 'as', 'why', ':', 'raise', 'DiscoveryFailure', '(', "'Error fetching XRDS document: %s'", '%', '(', 'why', '.', 'why', ',', ')', ',', 'None', ')', 'if', 'service', 'is', 'None', ':', 'raise', 'DiscoveryFailure', '(', "'No usable OpenID services found for %s'", '%', '(', 'user_url', ',', ')', ',', 'None', ')', 'else', ':', 'return', 'self', '.', 'beginWithoutDiscovery', '(', 'service', ',', 'anonymous', ')'] | Start the OpenID authentication process. See steps 1-2 in
the overview at the top of this file.
@param user_url: Identity URL given by the user. This method
performs a textual transformation of the URL to try and
make sure it is normalized. For example, a user_url of
example.com will be normalized to http://example.com/
normalizing and resolving any redirects the server might
issue.
@type user_url: unicode
@param anonymous: Whether to make an anonymous request of the OpenID
provider. Such a request does not ask for an authorization
assertion for an OpenID identifier, but may be used with
extensions to pass other data. e.g. "I don't care who you are,
but I'd like to know your time zone."
@type anonymous: bool
@returns: An object containing the discovered information will
be returned, with a method for building a redirect URL to
the server, as described in step 3 of the overview. This
object may also be used to add extension arguments to the
request, using its
L{addExtensionArg<openid.consumer.consumer.AuthRequest.addExtensionArg>}
method.
@returntype: L{AuthRequest<openid.consumer.consumer.AuthRequest>}
@raises openid.consumer.discover.DiscoveryFailure: when I fail to
find an OpenID server for this URL. If the C{yadis} package
is available, L{openid.consumer.discover.DiscoveryFailure} is
an alias for C{yadis.discover.DiscoveryFailure}. | ['Start', 'the', 'OpenID', 'authentication', 'process', '.', 'See', 'steps', '1', '-', '2', 'in', 'the', 'overview', 'at', 'the', 'top', 'of', 'this', 'file', '.'] | train | https://github.com/necaris/python3-openid/blob/4911bbc196dfd6f9eda7155df9903d668720ecbf/openid/consumer/consumer.py#L312-L359 |
878 | PaloAltoNetworks/pancloud | pancloud/directorysync.py | DirectorySyncService.attributes | def attributes(self, **kwargs): # pragma: no cover
"""Retrieve the attribute configuration object.
Retrieves a mapping that identifies the custom directory
attributes configured for the Directory SyncService instance,
and the mapping of the custom attributes to standard directory
attributes.
Args:
**kwargs: Supported :meth:`~pancloud.httpclient.HTTPClient.request` parameters.
Returns:
requests.Response: Requests Response() object.
Examples:
Refer to ``directory_attributes.py`` example.
"""
path = "/directory-sync-service/v1/attributes"
r = self._httpclient.request(
method="GET",
path=path,
url=self.url,
**kwargs
)
return r | python | def attributes(self, **kwargs): # pragma: no cover
"""Retrieve the attribute configuration object.
Retrieves a mapping that identifies the custom directory
attributes configured for the Directory SyncService instance,
and the mapping of the custom attributes to standard directory
attributes.
Args:
**kwargs: Supported :meth:`~pancloud.httpclient.HTTPClient.request` parameters.
Returns:
requests.Response: Requests Response() object.
Examples:
Refer to ``directory_attributes.py`` example.
"""
path = "/directory-sync-service/v1/attributes"
r = self._httpclient.request(
method="GET",
path=path,
url=self.url,
**kwargs
)
return r | ['def', 'attributes', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', '# pragma: no cover', 'path', '=', '"/directory-sync-service/v1/attributes"', 'r', '=', 'self', '.', '_httpclient', '.', 'request', '(', 'method', '=', '"GET"', ',', 'path', '=', 'path', ',', 'url', '=', 'self', '.', 'url', ',', '*', '*', 'kwargs', ')', 'return', 'r'] | Retrieve the attribute configuration object.
Retrieves a mapping that identifies the custom directory
attributes configured for the Directory SyncService instance,
and the mapping of the custom attributes to standard directory
attributes.
Args:
**kwargs: Supported :meth:`~pancloud.httpclient.HTTPClient.request` parameters.
Returns:
requests.Response: Requests Response() object.
Examples:
Refer to ``directory_attributes.py`` example. | ['Retrieve', 'the', 'attribute', 'configuration', 'object', '.'] | train | https://github.com/PaloAltoNetworks/pancloud/blob/c51e4c8aca3c988c60f062291007534edcb55285/pancloud/directorysync.py#L61-L86 |
879 | mapillary/mapillary_tools | mapillary_tools/exif_read.py | ExifRead.mapillary_tag_exists | def mapillary_tag_exists(self):
'''
Check existence of required Mapillary tags
'''
description_tag = "Image ImageDescription"
if description_tag not in self.tags:
return False
for requirement in ["MAPSequenceUUID", "MAPSettingsUserKey", "MAPCaptureTime", "MAPLongitude", "MAPLatitude"]:
if requirement not in self.tags[description_tag].values or json.loads(self.tags[description_tag].values)[requirement] in ["", None, " "]:
return False
return True | python | def mapillary_tag_exists(self):
'''
Check existence of required Mapillary tags
'''
description_tag = "Image ImageDescription"
if description_tag not in self.tags:
return False
for requirement in ["MAPSequenceUUID", "MAPSettingsUserKey", "MAPCaptureTime", "MAPLongitude", "MAPLatitude"]:
if requirement not in self.tags[description_tag].values or json.loads(self.tags[description_tag].values)[requirement] in ["", None, " "]:
return False
return True | ['def', 'mapillary_tag_exists', '(', 'self', ')', ':', 'description_tag', '=', '"Image ImageDescription"', 'if', 'description_tag', 'not', 'in', 'self', '.', 'tags', ':', 'return', 'False', 'for', 'requirement', 'in', '[', '"MAPSequenceUUID"', ',', '"MAPSettingsUserKey"', ',', '"MAPCaptureTime"', ',', '"MAPLongitude"', ',', '"MAPLatitude"', ']', ':', 'if', 'requirement', 'not', 'in', 'self', '.', 'tags', '[', 'description_tag', ']', '.', 'values', 'or', 'json', '.', 'loads', '(', 'self', '.', 'tags', '[', 'description_tag', ']', '.', 'values', ')', '[', 'requirement', ']', 'in', '[', '""', ',', 'None', ',', '" "', ']', ':', 'return', 'False', 'return', 'True'] | Check existence of required Mapillary tags | ['Check', 'existence', 'of', 'required', 'Mapillary', 'tags'] | train | https://github.com/mapillary/mapillary_tools/blob/816785e90c589cae6e8e34a5530ce8417d29591c/mapillary_tools/exif_read.py#L375-L385 |
880 | openstack/swauth | swauth/middleware.py | Swauth.get_groups | def get_groups(self, env, token):
"""Get groups for the given token.
:param env: The current WSGI environment dictionary.
:param token: Token to validate and return a group string for.
:returns: None if the token is invalid or a string containing a comma
separated list of groups the authenticated user is a member
of. The first group in the list is also considered a unique
identifier for that user.
"""
groups = None
memcache_client = cache_from_env(env)
if memcache_client:
memcache_key = '%s/auth/%s' % (self.reseller_prefix, token)
cached_auth_data = memcache_client.get(memcache_key)
if cached_auth_data:
expires, groups = cached_auth_data
if expires < time():
groups = None
s3_auth_details = env.get('swift3.auth_details')
if s3_auth_details:
if not self.s3_support:
self.logger.warning('S3 support is disabled in swauth.')
return None
if self.swauth_remote:
# TODO(gholt): Support S3-style authorization with
# swauth_remote mode
self.logger.warning('S3-style authorization not supported yet '
'with swauth_remote mode.')
return None
try:
account, user = s3_auth_details['access_key'].split(':', 1)
signature_from_user = s3_auth_details['signature']
msg = s3_auth_details['string_to_sign']
except Exception:
self.logger.debug(
'Swauth cannot parse swift3.auth_details value %r' %
(s3_auth_details, ))
return None
path = quote('/v1/%s/%s/%s' % (self.auth_account, account, user))
resp = self.make_pre_authed_request(
env, 'GET', path).get_response(self.app)
if resp.status_int // 100 != 2:
return None
if 'x-object-meta-account-id' in resp.headers:
account_id = resp.headers['x-object-meta-account-id']
else:
path = quote('/v1/%s/%s' % (self.auth_account, account))
resp2 = self.make_pre_authed_request(
env, 'HEAD', path).get_response(self.app)
if resp2.status_int // 100 != 2:
return None
account_id = resp2.headers['x-container-meta-account-id']
path = env['PATH_INFO']
env['PATH_INFO'] = path.replace("%s:%s" % (account, user),
account_id, 1)
detail = json.loads(resp.body)
if detail:
creds = detail.get('auth')
try:
auth_encoder, creds_dict = \
swauth.authtypes.validate_creds(creds)
except ValueError as e:
self.logger.error('%s' % e.args[0])
return None
password = creds_dict['hash']
# https://bugs.python.org/issue5285
if isinstance(password, six.text_type):
password = password.encode('utf-8')
if isinstance(msg, six.text_type):
msg = msg.encode('utf-8')
valid_signature = base64.encodestring(hmac.new(
password, msg, sha1).digest()).strip()
if signature_from_user != valid_signature:
return None
groups = [g['name'] for g in detail['groups']]
if '.admin' in groups:
groups.remove('.admin')
groups.append(account_id)
groups = ','.join(groups)
return groups
if not groups:
if self.swauth_remote:
with Timeout(self.swauth_remote_timeout):
conn = http_connect(self.swauth_remote_parsed.hostname,
self.swauth_remote_parsed.port, 'GET',
'%s/v2/.token/%s' % (self.swauth_remote_parsed.path,
quote(token)),
ssl=(self.swauth_remote_parsed.scheme == 'https'))
resp = conn.getresponse()
resp.read()
conn.close()
if resp.status // 100 != 2:
return None
expires_from_now = float(resp.getheader('x-auth-ttl'))
groups = resp.getheader('x-auth-groups')
if memcache_client:
memcache_client.set(
memcache_key, (time() + expires_from_now, groups),
time=expires_from_now)
else:
object_name = self._get_concealed_token(token)
path = quote('/v1/%s/.token_%s/%s' %
(self.auth_account, object_name[-1], object_name))
resp = self.make_pre_authed_request(
env, 'GET', path).get_response(self.app)
if resp.status_int // 100 != 2:
return None
detail = json.loads(resp.body)
if detail['expires'] < time():
self.make_pre_authed_request(
env, 'DELETE', path).get_response(self.app)
return None
groups = [g['name'] for g in detail['groups']]
if '.admin' in groups:
groups.remove('.admin')
groups.append(detail['account_id'])
groups = ','.join(groups)
if memcache_client:
memcache_client.set(
memcache_key,
(detail['expires'], groups),
time=float(detail['expires'] - time()))
return groups | python | def get_groups(self, env, token):
"""Get groups for the given token.
:param env: The current WSGI environment dictionary.
:param token: Token to validate and return a group string for.
:returns: None if the token is invalid or a string containing a comma
separated list of groups the authenticated user is a member
of. The first group in the list is also considered a unique
identifier for that user.
"""
groups = None
memcache_client = cache_from_env(env)
if memcache_client:
memcache_key = '%s/auth/%s' % (self.reseller_prefix, token)
cached_auth_data = memcache_client.get(memcache_key)
if cached_auth_data:
expires, groups = cached_auth_data
if expires < time():
groups = None
s3_auth_details = env.get('swift3.auth_details')
if s3_auth_details:
if not self.s3_support:
self.logger.warning('S3 support is disabled in swauth.')
return None
if self.swauth_remote:
# TODO(gholt): Support S3-style authorization with
# swauth_remote mode
self.logger.warning('S3-style authorization not supported yet '
'with swauth_remote mode.')
return None
try:
account, user = s3_auth_details['access_key'].split(':', 1)
signature_from_user = s3_auth_details['signature']
msg = s3_auth_details['string_to_sign']
except Exception:
self.logger.debug(
'Swauth cannot parse swift3.auth_details value %r' %
(s3_auth_details, ))
return None
path = quote('/v1/%s/%s/%s' % (self.auth_account, account, user))
resp = self.make_pre_authed_request(
env, 'GET', path).get_response(self.app)
if resp.status_int // 100 != 2:
return None
if 'x-object-meta-account-id' in resp.headers:
account_id = resp.headers['x-object-meta-account-id']
else:
path = quote('/v1/%s/%s' % (self.auth_account, account))
resp2 = self.make_pre_authed_request(
env, 'HEAD', path).get_response(self.app)
if resp2.status_int // 100 != 2:
return None
account_id = resp2.headers['x-container-meta-account-id']
path = env['PATH_INFO']
env['PATH_INFO'] = path.replace("%s:%s" % (account, user),
account_id, 1)
detail = json.loads(resp.body)
if detail:
creds = detail.get('auth')
try:
auth_encoder, creds_dict = \
swauth.authtypes.validate_creds(creds)
except ValueError as e:
self.logger.error('%s' % e.args[0])
return None
password = creds_dict['hash']
# https://bugs.python.org/issue5285
if isinstance(password, six.text_type):
password = password.encode('utf-8')
if isinstance(msg, six.text_type):
msg = msg.encode('utf-8')
valid_signature = base64.encodestring(hmac.new(
password, msg, sha1).digest()).strip()
if signature_from_user != valid_signature:
return None
groups = [g['name'] for g in detail['groups']]
if '.admin' in groups:
groups.remove('.admin')
groups.append(account_id)
groups = ','.join(groups)
return groups
if not groups:
if self.swauth_remote:
with Timeout(self.swauth_remote_timeout):
conn = http_connect(self.swauth_remote_parsed.hostname,
self.swauth_remote_parsed.port, 'GET',
'%s/v2/.token/%s' % (self.swauth_remote_parsed.path,
quote(token)),
ssl=(self.swauth_remote_parsed.scheme == 'https'))
resp = conn.getresponse()
resp.read()
conn.close()
if resp.status // 100 != 2:
return None
expires_from_now = float(resp.getheader('x-auth-ttl'))
groups = resp.getheader('x-auth-groups')
if memcache_client:
memcache_client.set(
memcache_key, (time() + expires_from_now, groups),
time=expires_from_now)
else:
object_name = self._get_concealed_token(token)
path = quote('/v1/%s/.token_%s/%s' %
(self.auth_account, object_name[-1], object_name))
resp = self.make_pre_authed_request(
env, 'GET', path).get_response(self.app)
if resp.status_int // 100 != 2:
return None
detail = json.loads(resp.body)
if detail['expires'] < time():
self.make_pre_authed_request(
env, 'DELETE', path).get_response(self.app)
return None
groups = [g['name'] for g in detail['groups']]
if '.admin' in groups:
groups.remove('.admin')
groups.append(detail['account_id'])
groups = ','.join(groups)
if memcache_client:
memcache_client.set(
memcache_key,
(detail['expires'], groups),
time=float(detail['expires'] - time()))
return groups | ['def', 'get_groups', '(', 'self', ',', 'env', ',', 'token', ')', ':', 'groups', '=', 'None', 'memcache_client', '=', 'cache_from_env', '(', 'env', ')', 'if', 'memcache_client', ':', 'memcache_key', '=', "'%s/auth/%s'", '%', '(', 'self', '.', 'reseller_prefix', ',', 'token', ')', 'cached_auth_data', '=', 'memcache_client', '.', 'get', '(', 'memcache_key', ')', 'if', 'cached_auth_data', ':', 'expires', ',', 'groups', '=', 'cached_auth_data', 'if', 'expires', '<', 'time', '(', ')', ':', 'groups', '=', 'None', 's3_auth_details', '=', 'env', '.', 'get', '(', "'swift3.auth_details'", ')', 'if', 's3_auth_details', ':', 'if', 'not', 'self', '.', 's3_support', ':', 'self', '.', 'logger', '.', 'warning', '(', "'S3 support is disabled in swauth.'", ')', 'return', 'None', 'if', 'self', '.', 'swauth_remote', ':', '# TODO(gholt): Support S3-style authorization with', '# swauth_remote mode', 'self', '.', 'logger', '.', 'warning', '(', "'S3-style authorization not supported yet '", "'with swauth_remote mode.'", ')', 'return', 'None', 'try', ':', 'account', ',', 'user', '=', 's3_auth_details', '[', "'access_key'", ']', '.', 'split', '(', "':'", ',', '1', ')', 'signature_from_user', '=', 's3_auth_details', '[', "'signature'", ']', 'msg', '=', 's3_auth_details', '[', "'string_to_sign'", ']', 'except', 'Exception', ':', 'self', '.', 'logger', '.', 'debug', '(', "'Swauth cannot parse swift3.auth_details value %r'", '%', '(', 's3_auth_details', ',', ')', ')', 'return', 'None', 'path', '=', 'quote', '(', "'/v1/%s/%s/%s'", '%', '(', 'self', '.', 'auth_account', ',', 'account', ',', 'user', ')', ')', 'resp', '=', 'self', '.', 'make_pre_authed_request', '(', 'env', ',', "'GET'", ',', 'path', ')', '.', 'get_response', '(', 'self', '.', 'app', ')', 'if', 'resp', '.', 'status_int', '//', '100', '!=', '2', ':', 'return', 'None', 'if', "'x-object-meta-account-id'", 'in', 'resp', '.', 'headers', ':', 'account_id', '=', 'resp', '.', 'headers', '[', "'x-object-meta-account-id'", ']', 'else', ':', 'path', '=', 'quote', '(', "'/v1/%s/%s'", '%', '(', 'self', '.', 'auth_account', ',', 'account', ')', ')', 'resp2', '=', 'self', '.', 'make_pre_authed_request', '(', 'env', ',', "'HEAD'", ',', 'path', ')', '.', 'get_response', '(', 'self', '.', 'app', ')', 'if', 'resp2', '.', 'status_int', '//', '100', '!=', '2', ':', 'return', 'None', 'account_id', '=', 'resp2', '.', 'headers', '[', "'x-container-meta-account-id'", ']', 'path', '=', 'env', '[', "'PATH_INFO'", ']', 'env', '[', "'PATH_INFO'", ']', '=', 'path', '.', 'replace', '(', '"%s:%s"', '%', '(', 'account', ',', 'user', ')', ',', 'account_id', ',', '1', ')', 'detail', '=', 'json', '.', 'loads', '(', 'resp', '.', 'body', ')', 'if', 'detail', ':', 'creds', '=', 'detail', '.', 'get', '(', "'auth'", ')', 'try', ':', 'auth_encoder', ',', 'creds_dict', '=', 'swauth', '.', 'authtypes', '.', 'validate_creds', '(', 'creds', ')', 'except', 'ValueError', 'as', 'e', ':', 'self', '.', 'logger', '.', 'error', '(', "'%s'", '%', 'e', '.', 'args', '[', '0', ']', ')', 'return', 'None', 'password', '=', 'creds_dict', '[', "'hash'", ']', '# https://bugs.python.org/issue5285', 'if', 'isinstance', '(', 'password', ',', 'six', '.', 'text_type', ')', ':', 'password', '=', 'password', '.', 'encode', '(', "'utf-8'", ')', 'if', 'isinstance', '(', 'msg', ',', 'six', '.', 'text_type', ')', ':', 'msg', '=', 'msg', '.', 'encode', '(', "'utf-8'", ')', 'valid_signature', '=', 'base64', '.', 'encodestring', '(', 'hmac', '.', 'new', '(', 'password', ',', 'msg', ',', 'sha1', ')', '.', 'digest', '(', ')', ')', '.', 'strip', '(', ')', 'if', 'signature_from_user', '!=', 'valid_signature', ':', 'return', 'None', 'groups', '=', '[', 'g', '[', "'name'", ']', 'for', 'g', 'in', 'detail', '[', "'groups'", ']', ']', 'if', "'.admin'", 'in', 'groups', ':', 'groups', '.', 'remove', '(', "'.admin'", ')', 'groups', '.', 'append', '(', 'account_id', ')', 'groups', '=', "','", '.', 'join', '(', 'groups', ')', 'return', 'groups', 'if', 'not', 'groups', ':', 'if', 'self', '.', 'swauth_remote', ':', 'with', 'Timeout', '(', 'self', '.', 'swauth_remote_timeout', ')', ':', 'conn', '=', 'http_connect', '(', 'self', '.', 'swauth_remote_parsed', '.', 'hostname', ',', 'self', '.', 'swauth_remote_parsed', '.', 'port', ',', "'GET'", ',', "'%s/v2/.token/%s'", '%', '(', 'self', '.', 'swauth_remote_parsed', '.', 'path', ',', 'quote', '(', 'token', ')', ')', ',', 'ssl', '=', '(', 'self', '.', 'swauth_remote_parsed', '.', 'scheme', '==', "'https'", ')', ')', 'resp', '=', 'conn', '.', 'getresponse', '(', ')', 'resp', '.', 'read', '(', ')', 'conn', '.', 'close', '(', ')', 'if', 'resp', '.', 'status', '//', '100', '!=', '2', ':', 'return', 'None', 'expires_from_now', '=', 'float', '(', 'resp', '.', 'getheader', '(', "'x-auth-ttl'", ')', ')', 'groups', '=', 'resp', '.', 'getheader', '(', "'x-auth-groups'", ')', 'if', 'memcache_client', ':', 'memcache_client', '.', 'set', '(', 'memcache_key', ',', '(', 'time', '(', ')', '+', 'expires_from_now', ',', 'groups', ')', ',', 'time', '=', 'expires_from_now', ')', 'else', ':', 'object_name', '=', 'self', '.', '_get_concealed_token', '(', 'token', ')', 'path', '=', 'quote', '(', "'/v1/%s/.token_%s/%s'", '%', '(', 'self', '.', 'auth_account', ',', 'object_name', '[', '-', '1', ']', ',', 'object_name', ')', ')', 'resp', '=', 'self', '.', 'make_pre_authed_request', '(', 'env', ',', "'GET'", ',', 'path', ')', '.', 'get_response', '(', 'self', '.', 'app', ')', 'if', 'resp', '.', 'status_int', '//', '100', '!=', '2', ':', 'return', 'None', 'detail', '=', 'json', '.', 'loads', '(', 'resp', '.', 'body', ')', 'if', 'detail', '[', "'expires'", ']', '<', 'time', '(', ')', ':', 'self', '.', 'make_pre_authed_request', '(', 'env', ',', "'DELETE'", ',', 'path', ')', '.', 'get_response', '(', 'self', '.', 'app', ')', 'return', 'None', 'groups', '=', '[', 'g', '[', "'name'", ']', 'for', 'g', 'in', 'detail', '[', "'groups'", ']', ']', 'if', "'.admin'", 'in', 'groups', ':', 'groups', '.', 'remove', '(', "'.admin'", ')', 'groups', '.', 'append', '(', 'detail', '[', "'account_id'", ']', ')', 'groups', '=', "','", '.', 'join', '(', 'groups', ')', 'if', 'memcache_client', ':', 'memcache_client', '.', 'set', '(', 'memcache_key', ',', '(', 'detail', '[', "'expires'", ']', ',', 'groups', ')', ',', 'time', '=', 'float', '(', 'detail', '[', "'expires'", ']', '-', 'time', '(', ')', ')', ')', 'return', 'groups'] | Get groups for the given token.
:param env: The current WSGI environment dictionary.
:param token: Token to validate and return a group string for.
:returns: None if the token is invalid or a string containing a comma
separated list of groups the authenticated user is a member
of. The first group in the list is also considered a unique
identifier for that user. | ['Get', 'groups', 'for', 'the', 'given', 'token', '.'] | train | https://github.com/openstack/swauth/blob/0c8eaf50a9e2b3317f3eba62f205546904bc6d74/swauth/middleware.py#L305-L436 |
881 | glut23/webvtt-py | webvtt/cli.py | segment | def segment(f, output, target_duration, mpegts):
"""Segment command."""
try:
target_duration = int(target_duration)
except ValueError:
exit('Error: Invalid target duration.')
try:
mpegts = int(mpegts)
except ValueError:
exit('Error: Invalid MPEGTS value.')
WebVTTSegmenter().segment(f, output, target_duration, mpegts) | python | def segment(f, output, target_duration, mpegts):
"""Segment command."""
try:
target_duration = int(target_duration)
except ValueError:
exit('Error: Invalid target duration.')
try:
mpegts = int(mpegts)
except ValueError:
exit('Error: Invalid MPEGTS value.')
WebVTTSegmenter().segment(f, output, target_duration, mpegts) | ['def', 'segment', '(', 'f', ',', 'output', ',', 'target_duration', ',', 'mpegts', ')', ':', 'try', ':', 'target_duration', '=', 'int', '(', 'target_duration', ')', 'except', 'ValueError', ':', 'exit', '(', "'Error: Invalid target duration.'", ')', 'try', ':', 'mpegts', '=', 'int', '(', 'mpegts', ')', 'except', 'ValueError', ':', 'exit', '(', "'Error: Invalid MPEGTS value.'", ')', 'WebVTTSegmenter', '(', ')', '.', 'segment', '(', 'f', ',', 'output', ',', 'target_duration', ',', 'mpegts', ')'] | Segment command. | ['Segment', 'command', '.'] | train | https://github.com/glut23/webvtt-py/blob/7b4da0123c2e2afaf31402107528721eb1d3d481/webvtt/cli.py#L35-L47 |
882 | quintusdias/glymur | glymur/codestream.py | Codestream._parse_unrecognized_segment | def _parse_unrecognized_segment(self, fptr):
"""Looks like a valid marker, but not sure from reading the specs.
"""
msg = ("Unrecognized codestream marker 0x{marker_id:x} encountered at "
"byte offset {offset}.")
msg = msg.format(marker_id=self._marker_id, offset=fptr.tell())
warnings.warn(msg, UserWarning)
cpos = fptr.tell()
read_buffer = fptr.read(2)
next_item, = struct.unpack('>H', read_buffer)
fptr.seek(cpos)
if ((next_item & 0xff00) >> 8) == 255:
# No segment associated with this marker, so reset
# to two bytes after it.
segment = Segment(id='0x{0:x}'.format(self._marker_id),
offset=self._offset, length=0)
else:
segment = self._parse_reserved_segment(fptr)
return segment | python | def _parse_unrecognized_segment(self, fptr):
"""Looks like a valid marker, but not sure from reading the specs.
"""
msg = ("Unrecognized codestream marker 0x{marker_id:x} encountered at "
"byte offset {offset}.")
msg = msg.format(marker_id=self._marker_id, offset=fptr.tell())
warnings.warn(msg, UserWarning)
cpos = fptr.tell()
read_buffer = fptr.read(2)
next_item, = struct.unpack('>H', read_buffer)
fptr.seek(cpos)
if ((next_item & 0xff00) >> 8) == 255:
# No segment associated with this marker, so reset
# to two bytes after it.
segment = Segment(id='0x{0:x}'.format(self._marker_id),
offset=self._offset, length=0)
else:
segment = self._parse_reserved_segment(fptr)
return segment | ['def', '_parse_unrecognized_segment', '(', 'self', ',', 'fptr', ')', ':', 'msg', '=', '(', '"Unrecognized codestream marker 0x{marker_id:x} encountered at "', '"byte offset {offset}."', ')', 'msg', '=', 'msg', '.', 'format', '(', 'marker_id', '=', 'self', '.', '_marker_id', ',', 'offset', '=', 'fptr', '.', 'tell', '(', ')', ')', 'warnings', '.', 'warn', '(', 'msg', ',', 'UserWarning', ')', 'cpos', '=', 'fptr', '.', 'tell', '(', ')', 'read_buffer', '=', 'fptr', '.', 'read', '(', '2', ')', 'next_item', ',', '=', 'struct', '.', 'unpack', '(', "'>H'", ',', 'read_buffer', ')', 'fptr', '.', 'seek', '(', 'cpos', ')', 'if', '(', '(', 'next_item', '&', '0xff00', ')', '>>', '8', ')', '==', '255', ':', '# No segment associated with this marker, so reset', '# to two bytes after it.', 'segment', '=', 'Segment', '(', 'id', '=', "'0x{0:x}'", '.', 'format', '(', 'self', '.', '_marker_id', ')', ',', 'offset', '=', 'self', '.', '_offset', ',', 'length', '=', '0', ')', 'else', ':', 'segment', '=', 'self', '.', '_parse_reserved_segment', '(', 'fptr', ')', 'return', 'segment'] | Looks like a valid marker, but not sure from reading the specs. | ['Looks', 'like', 'a', 'valid', 'marker', 'but', 'not', 'sure', 'from', 'reading', 'the', 'specs', '.'] | train | https://github.com/quintusdias/glymur/blob/8b8fb091130fff00f1028dc82219e69e3f9baf6d/glymur/codestream.py#L224-L242 |
883 | janpipek/physt | physt/plotting/vega.py | _create_scales | def _create_scales(hist: HistogramBase, vega: dict, kwargs: dict):
"""Find proper scales for axes."""
if hist.ndim == 1:
bins0 = hist.bins.astype(float)
else:
bins0 = hist.bins[0].astype(float)
xlim = kwargs.pop("xlim", "auto")
ylim = kwargs.pop("ylim", "auto")
if xlim is "auto":
nice_x = True
else:
nice_x = False
if ylim is "auto":
nice_y = True
else:
nice_y = False
# TODO: Unify xlim & ylim parameters with matplotlib
# TODO: Apply xscale & yscale parameters
vega["scales"] = [
{
"name": "xscale",
"type": "linear",
"range": "width",
"nice": nice_x,
"zero": None,
"domain": [bins0[0, 0], bins0[-1, 1]] if xlim == "auto" else [float(xlim[0]), float(xlim[1])],
# "domain": {"data": "table", "field": "x"}
},
{
"name": "yscale",
"type": "linear",
"range": "height",
"nice": nice_y,
"zero": True if hist.ndim == 1 else None,
"domain": {"data": "table", "field": "y"} if ylim == "auto" else [float(ylim[0]), float(ylim[1])]
}
]
if hist.ndim >= 2:
bins1 = hist.bins[1].astype(float)
vega["scales"][1]["domain"] = [bins1[0, 0], bins1[-1, 1]] | python | def _create_scales(hist: HistogramBase, vega: dict, kwargs: dict):
"""Find proper scales for axes."""
if hist.ndim == 1:
bins0 = hist.bins.astype(float)
else:
bins0 = hist.bins[0].astype(float)
xlim = kwargs.pop("xlim", "auto")
ylim = kwargs.pop("ylim", "auto")
if xlim is "auto":
nice_x = True
else:
nice_x = False
if ylim is "auto":
nice_y = True
else:
nice_y = False
# TODO: Unify xlim & ylim parameters with matplotlib
# TODO: Apply xscale & yscale parameters
vega["scales"] = [
{
"name": "xscale",
"type": "linear",
"range": "width",
"nice": nice_x,
"zero": None,
"domain": [bins0[0, 0], bins0[-1, 1]] if xlim == "auto" else [float(xlim[0]), float(xlim[1])],
# "domain": {"data": "table", "field": "x"}
},
{
"name": "yscale",
"type": "linear",
"range": "height",
"nice": nice_y,
"zero": True if hist.ndim == 1 else None,
"domain": {"data": "table", "field": "y"} if ylim == "auto" else [float(ylim[0]), float(ylim[1])]
}
]
if hist.ndim >= 2:
bins1 = hist.bins[1].astype(float)
vega["scales"][1]["domain"] = [bins1[0, 0], bins1[-1, 1]] | ['def', '_create_scales', '(', 'hist', ':', 'HistogramBase', ',', 'vega', ':', 'dict', ',', 'kwargs', ':', 'dict', ')', ':', 'if', 'hist', '.', 'ndim', '==', '1', ':', 'bins0', '=', 'hist', '.', 'bins', '.', 'astype', '(', 'float', ')', 'else', ':', 'bins0', '=', 'hist', '.', 'bins', '[', '0', ']', '.', 'astype', '(', 'float', ')', 'xlim', '=', 'kwargs', '.', 'pop', '(', '"xlim"', ',', '"auto"', ')', 'ylim', '=', 'kwargs', '.', 'pop', '(', '"ylim"', ',', '"auto"', ')', 'if', 'xlim', 'is', '"auto"', ':', 'nice_x', '=', 'True', 'else', ':', 'nice_x', '=', 'False', 'if', 'ylim', 'is', '"auto"', ':', 'nice_y', '=', 'True', 'else', ':', 'nice_y', '=', 'False', '# TODO: Unify xlim & ylim parameters with matplotlib', '# TODO: Apply xscale & yscale parameters', 'vega', '[', '"scales"', ']', '=', '[', '{', '"name"', ':', '"xscale"', ',', '"type"', ':', '"linear"', ',', '"range"', ':', '"width"', ',', '"nice"', ':', 'nice_x', ',', '"zero"', ':', 'None', ',', '"domain"', ':', '[', 'bins0', '[', '0', ',', '0', ']', ',', 'bins0', '[', '-', '1', ',', '1', ']', ']', 'if', 'xlim', '==', '"auto"', 'else', '[', 'float', '(', 'xlim', '[', '0', ']', ')', ',', 'float', '(', 'xlim', '[', '1', ']', ')', ']', ',', '# "domain": {"data": "table", "field": "x"}', '}', ',', '{', '"name"', ':', '"yscale"', ',', '"type"', ':', '"linear"', ',', '"range"', ':', '"height"', ',', '"nice"', ':', 'nice_y', ',', '"zero"', ':', 'True', 'if', 'hist', '.', 'ndim', '==', '1', 'else', 'None', ',', '"domain"', ':', '{', '"data"', ':', '"table"', ',', '"field"', ':', '"y"', '}', 'if', 'ylim', '==', '"auto"', 'else', '[', 'float', '(', 'ylim', '[', '0', ']', ')', ',', 'float', '(', 'ylim', '[', '1', ']', ')', ']', '}', ']', 'if', 'hist', '.', 'ndim', '>=', '2', ':', 'bins1', '=', 'hist', '.', 'bins', '[', '1', ']', '.', 'astype', '(', 'float', ')', 'vega', '[', '"scales"', ']', '[', '1', ']', '[', '"domain"', ']', '=', '[', 'bins1', '[', '0', ',', '0', ']', ',', 'bins1', '[', '-', '1', ',', '1', ']', ']'] | Find proper scales for axes. | ['Find', 'proper', 'scales', 'for', 'axes', '.'] | train | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/plotting/vega.py#L568-L613 |
884 | delph-in/pydelphin | delphin/mrs/components.py | Pred.surface_or_abstract | def surface_or_abstract(cls, predstr):
"""Instantiate a Pred from either its surface or abstract symbol."""
if predstr.strip('"').lstrip("'").startswith('_'):
return cls.surface(predstr)
else:
return cls.abstract(predstr) | python | def surface_or_abstract(cls, predstr):
"""Instantiate a Pred from either its surface or abstract symbol."""
if predstr.strip('"').lstrip("'").startswith('_'):
return cls.surface(predstr)
else:
return cls.abstract(predstr) | ['def', 'surface_or_abstract', '(', 'cls', ',', 'predstr', ')', ':', 'if', 'predstr', '.', 'strip', '(', '\'"\'', ')', '.', 'lstrip', '(', '"\'"', ')', '.', 'startswith', '(', "'_'", ')', ':', 'return', 'cls', '.', 'surface', '(', 'predstr', ')', 'else', ':', 'return', 'cls', '.', 'abstract', '(', 'predstr', ')'] | Instantiate a Pred from either its surface or abstract symbol. | ['Instantiate', 'a', 'Pred', 'from', 'either', 'its', 'surface', 'or', 'abstract', 'symbol', '.'] | train | https://github.com/delph-in/pydelphin/blob/7bd2cd63ab7cf74803e1d6547b9ebc014b382abd/delphin/mrs/components.py#L548-L553 |
885 | hubo1016/vlcp | vlcp/event/runnable.py | RoutineContainer.syscall_noreturn | def syscall_noreturn(self, func):
'''
Call a syscall method. A syscall method is executed outside of any routines, directly
in the scheduler loop, which gives it chances to directly operate the event loop.
See :py:method::`vlcp.event.core.Scheduler.syscall`.
'''
matcher = self.scheduler.syscall(func)
while not matcher:
yield
matcher = self.scheduler.syscall(func)
ev, _ = yield (matcher,)
return ev | python | def syscall_noreturn(self, func):
'''
Call a syscall method. A syscall method is executed outside of any routines, directly
in the scheduler loop, which gives it chances to directly operate the event loop.
See :py:method::`vlcp.event.core.Scheduler.syscall`.
'''
matcher = self.scheduler.syscall(func)
while not matcher:
yield
matcher = self.scheduler.syscall(func)
ev, _ = yield (matcher,)
return ev | ['def', 'syscall_noreturn', '(', 'self', ',', 'func', ')', ':', 'matcher', '=', 'self', '.', 'scheduler', '.', 'syscall', '(', 'func', ')', 'while', 'not', 'matcher', ':', 'yield', 'matcher', '=', 'self', '.', 'scheduler', '.', 'syscall', '(', 'func', ')', 'ev', ',', '_', '=', 'yield', '(', 'matcher', ',', ')', 'return', 'ev'] | Call a syscall method. A syscall method is executed outside of any routines, directly
in the scheduler loop, which gives it chances to directly operate the event loop.
See :py:method::`vlcp.event.core.Scheduler.syscall`. | ['Call', 'a', 'syscall', 'method', '.', 'A', 'syscall', 'method', 'is', 'executed', 'outside', 'of', 'any', 'routines', 'directly', 'in', 'the', 'scheduler', 'loop', 'which', 'gives', 'it', 'chances', 'to', 'directly', 'operate', 'the', 'event', 'loop', '.', 'See', ':', 'py', ':', 'method', '::', 'vlcp', '.', 'event', '.', 'core', '.', 'Scheduler', '.', 'syscall', '.'] | train | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/event/runnable.py#L650-L661 |
886 | riggsd/davies | davies/compass/__init__.py | Survey.included_length | def included_length(self):
"""Surveyed length, not including "excluded" shots"""
return sum([shot.length for shot in self.shots if shot.is_included]) | python | def included_length(self):
"""Surveyed length, not including "excluded" shots"""
return sum([shot.length for shot in self.shots if shot.is_included]) | ['def', 'included_length', '(', 'self', ')', ':', 'return', 'sum', '(', '[', 'shot', '.', 'length', 'for', 'shot', 'in', 'self', '.', 'shots', 'if', 'shot', '.', 'is_included', ']', ')'] | Surveyed length, not including "excluded" shots | ['Surveyed', 'length', 'not', 'including', 'excluded', 'shots'] | train | https://github.com/riggsd/davies/blob/8566c626202a875947ad01c087300108c68d80b5/davies/compass/__init__.py#L196-L198 |
887 | has2k1/plotnine | plotnine/data/__init__.py | _ordered_categories | def _ordered_categories(df, categories):
"""
Make the columns in df categorical
Parameters:
-----------
categories: dict
Of the form {str: list},
where the key the column name and the value is
the ordered category list
"""
for col, cats in categories.items():
df[col] = df[col].astype(CategoricalDtype(cats, ordered=True))
return df | python | def _ordered_categories(df, categories):
"""
Make the columns in df categorical
Parameters:
-----------
categories: dict
Of the form {str: list},
where the key the column name and the value is
the ordered category list
"""
for col, cats in categories.items():
df[col] = df[col].astype(CategoricalDtype(cats, ordered=True))
return df | ['def', '_ordered_categories', '(', 'df', ',', 'categories', ')', ':', 'for', 'col', ',', 'cats', 'in', 'categories', '.', 'items', '(', ')', ':', 'df', '[', 'col', ']', '=', 'df', '[', 'col', ']', '.', 'astype', '(', 'CategoricalDtype', '(', 'cats', ',', 'ordered', '=', 'True', ')', ')', 'return', 'df'] | Make the columns in df categorical
Parameters:
-----------
categories: dict
Of the form {str: list},
where the key the column name and the value is
the ordered category list | ['Make', 'the', 'columns', 'in', 'df', 'categorical'] | train | https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/data/__init__.py#L34-L47 |
888 | wtolson/gnsq | gnsq/producer.py | Producer.multipublish | def multipublish(self, topic, messages, block=True, timeout=None,
raise_error=True):
"""Publish an iterable of messages to the given topic.
:param topic: the topic to publish to
:param messages: iterable of bytestrings to publish
:param block: wait for a connection to become available before
publishing the message. If block is `False` and no connections
are available, :class:`~gnsq.errors.NSQNoConnections` is raised
:param timeout: if timeout is a positive number, it blocks at most
``timeout`` seconds before raising
:class:`~gnsq.errors.NSQNoConnections`
:param raise_error: if ``True``, it blocks until a response is received
from the nsqd server, and any error response is raised. Otherwise
an :class:`~gevent.event.AsyncResult` is returned
"""
result = AsyncResult()
conn = self._get_connection(block=block, timeout=timeout)
try:
self._response_queues[conn].append(result)
conn.multipublish(topic, messages)
finally:
self._put_connection(conn)
if raise_error:
return result.get()
return result | python | def multipublish(self, topic, messages, block=True, timeout=None,
raise_error=True):
"""Publish an iterable of messages to the given topic.
:param topic: the topic to publish to
:param messages: iterable of bytestrings to publish
:param block: wait for a connection to become available before
publishing the message. If block is `False` and no connections
are available, :class:`~gnsq.errors.NSQNoConnections` is raised
:param timeout: if timeout is a positive number, it blocks at most
``timeout`` seconds before raising
:class:`~gnsq.errors.NSQNoConnections`
:param raise_error: if ``True``, it blocks until a response is received
from the nsqd server, and any error response is raised. Otherwise
an :class:`~gevent.event.AsyncResult` is returned
"""
result = AsyncResult()
conn = self._get_connection(block=block, timeout=timeout)
try:
self._response_queues[conn].append(result)
conn.multipublish(topic, messages)
finally:
self._put_connection(conn)
if raise_error:
return result.get()
return result | ['def', 'multipublish', '(', 'self', ',', 'topic', ',', 'messages', ',', 'block', '=', 'True', ',', 'timeout', '=', 'None', ',', 'raise_error', '=', 'True', ')', ':', 'result', '=', 'AsyncResult', '(', ')', 'conn', '=', 'self', '.', '_get_connection', '(', 'block', '=', 'block', ',', 'timeout', '=', 'timeout', ')', 'try', ':', 'self', '.', '_response_queues', '[', 'conn', ']', '.', 'append', '(', 'result', ')', 'conn', '.', 'multipublish', '(', 'topic', ',', 'messages', ')', 'finally', ':', 'self', '.', '_put_connection', '(', 'conn', ')', 'if', 'raise_error', ':', 'return', 'result', '.', 'get', '(', ')', 'return', 'result'] | Publish an iterable of messages to the given topic.
:param topic: the topic to publish to
:param messages: iterable of bytestrings to publish
:param block: wait for a connection to become available before
publishing the message. If block is `False` and no connections
are available, :class:`~gnsq.errors.NSQNoConnections` is raised
:param timeout: if timeout is a positive number, it blocks at most
``timeout`` seconds before raising
:class:`~gnsq.errors.NSQNoConnections`
:param raise_error: if ``True``, it blocks until a response is received
from the nsqd server, and any error response is raised. Otherwise
an :class:`~gevent.event.AsyncResult` is returned | ['Publish', 'an', 'iterable', 'of', 'messages', 'to', 'the', 'given', 'topic', '.'] | train | https://github.com/wtolson/gnsq/blob/0fd02578b2c9c5fa30626d78579db2a46c10edac/gnsq/producer.py#L289-L321 |
889 | dossier/dossier.fc | python/dossier/fc/feature_collection.py | FeatureCollection.from_dict | def from_dict(cls, data, read_only=False):
'''Recreate a feature collection from a dictionary.
The dictionary is of the format dumped by :meth:`to_dict`.
Additional information, such as whether the feature collection
should be read-only, is not included in this dictionary, and
is instead passed as parameters to this function.
'''
fc = cls(read_only=read_only)
fc._features = {}
fc._from_dict_update(data)
return fc | python | def from_dict(cls, data, read_only=False):
'''Recreate a feature collection from a dictionary.
The dictionary is of the format dumped by :meth:`to_dict`.
Additional information, such as whether the feature collection
should be read-only, is not included in this dictionary, and
is instead passed as parameters to this function.
'''
fc = cls(read_only=read_only)
fc._features = {}
fc._from_dict_update(data)
return fc | ['def', 'from_dict', '(', 'cls', ',', 'data', ',', 'read_only', '=', 'False', ')', ':', 'fc', '=', 'cls', '(', 'read_only', '=', 'read_only', ')', 'fc', '.', '_features', '=', '{', '}', 'fc', '.', '_from_dict_update', '(', 'data', ')', 'return', 'fc'] | Recreate a feature collection from a dictionary.
The dictionary is of the format dumped by :meth:`to_dict`.
Additional information, such as whether the feature collection
should be read-only, is not included in this dictionary, and
is instead passed as parameters to this function. | ['Recreate', 'a', 'feature', 'collection', 'from', 'a', 'dictionary', '.'] | train | https://github.com/dossier/dossier.fc/blob/3e969d0cb2592fc06afc1c849d2b22283450b5e2/python/dossier/fc/feature_collection.py#L212-L224 |
890 | casacore/python-casacore | casacore/measures/__init__.py | measures.measure | def measure(self, v, rf, off=None):
"""Create/convert a measure using the frame state set on the measures
server instance (via :meth:`do_frame`)
:param v: The measure to convert
:param rf: The frame reference to convert to
:param off: The optional offset for the measure
"""
if off is None:
off = {}
keys = ["m0", "m1", "m2"]
for key in keys:
if key in v:
if dq.is_quantity(v[key]):
v[key] = v[key].to_dict()
return _measures.measure(self, v, rf, off) | python | def measure(self, v, rf, off=None):
"""Create/convert a measure using the frame state set on the measures
server instance (via :meth:`do_frame`)
:param v: The measure to convert
:param rf: The frame reference to convert to
:param off: The optional offset for the measure
"""
if off is None:
off = {}
keys = ["m0", "m1", "m2"]
for key in keys:
if key in v:
if dq.is_quantity(v[key]):
v[key] = v[key].to_dict()
return _measures.measure(self, v, rf, off) | ['def', 'measure', '(', 'self', ',', 'v', ',', 'rf', ',', 'off', '=', 'None', ')', ':', 'if', 'off', 'is', 'None', ':', 'off', '=', '{', '}', 'keys', '=', '[', '"m0"', ',', '"m1"', ',', '"m2"', ']', 'for', 'key', 'in', 'keys', ':', 'if', 'key', 'in', 'v', ':', 'if', 'dq', '.', 'is_quantity', '(', 'v', '[', 'key', ']', ')', ':', 'v', '[', 'key', ']', '=', 'v', '[', 'key', ']', '.', 'to_dict', '(', ')', 'return', '_measures', '.', 'measure', '(', 'self', ',', 'v', ',', 'rf', ',', 'off', ')'] | Create/convert a measure using the frame state set on the measures
server instance (via :meth:`do_frame`)
:param v: The measure to convert
:param rf: The frame reference to convert to
:param off: The optional offset for the measure | ['Create', '/', 'convert', 'a', 'measure', 'using', 'the', 'frame', 'state', 'set', 'on', 'the', 'measures', 'server', 'instance', '(', 'via', ':', 'meth', ':', 'do_frame', ')'] | train | https://github.com/casacore/python-casacore/blob/975510861ea005f7919dd9e438b5f98a1682eebe/casacore/measures/__init__.py#L102-L120 |
891 | treethought/flask-assistant | flask_assistant/core.py | Assistant.init_blueprint | def init_blueprint(self, blueprint, path="templates.yaml"):
"""Initialize a Flask Blueprint, similar to init_app, but without the access
to the application config.
Keyword Arguments:
blueprint {Flask Blueprint} -- Flask Blueprint instance to initialize
(Default: {None})
path {str} -- path to templates yaml file, relative to Blueprint
(Default: {'templates.yaml'})
"""
if self._route is not None:
raise TypeError("route cannot be set when using blueprints!")
# we need to tuck our reference to this Assistant instance
# into the blueprint object and find it later!
blueprint.assist = self
# BlueprintSetupState.add_url_rule gets called underneath the covers and
# concats the rule string, so we should set to an empty string to allow
# Blueprint('blueprint_api', __name__, url_prefix="/assist") to result in
# exposing the rule at "/assist" and not "/assist/".
blueprint.add_url_rule(
"", view_func=self._flask_assitant_view_func, methods=["POST"]
) | python | def init_blueprint(self, blueprint, path="templates.yaml"):
"""Initialize a Flask Blueprint, similar to init_app, but without the access
to the application config.
Keyword Arguments:
blueprint {Flask Blueprint} -- Flask Blueprint instance to initialize
(Default: {None})
path {str} -- path to templates yaml file, relative to Blueprint
(Default: {'templates.yaml'})
"""
if self._route is not None:
raise TypeError("route cannot be set when using blueprints!")
# we need to tuck our reference to this Assistant instance
# into the blueprint object and find it later!
blueprint.assist = self
# BlueprintSetupState.add_url_rule gets called underneath the covers and
# concats the rule string, so we should set to an empty string to allow
# Blueprint('blueprint_api', __name__, url_prefix="/assist") to result in
# exposing the rule at "/assist" and not "/assist/".
blueprint.add_url_rule(
"", view_func=self._flask_assitant_view_func, methods=["POST"]
) | ['def', 'init_blueprint', '(', 'self', ',', 'blueprint', ',', 'path', '=', '"templates.yaml"', ')', ':', 'if', 'self', '.', '_route', 'is', 'not', 'None', ':', 'raise', 'TypeError', '(', '"route cannot be set when using blueprints!"', ')', '# we need to tuck our reference to this Assistant instance', '# into the blueprint object and find it later!', 'blueprint', '.', 'assist', '=', 'self', '# BlueprintSetupState.add_url_rule gets called underneath the covers and', '# concats the rule string, so we should set to an empty string to allow', '# Blueprint(\'blueprint_api\', __name__, url_prefix="/assist") to result in', '# exposing the rule at "/assist" and not "/assist/".', 'blueprint', '.', 'add_url_rule', '(', '""', ',', 'view_func', '=', 'self', '.', '_flask_assitant_view_func', ',', 'methods', '=', '[', '"POST"', ']', ')'] | Initialize a Flask Blueprint, similar to init_app, but without the access
to the application config.
Keyword Arguments:
blueprint {Flask Blueprint} -- Flask Blueprint instance to initialize
(Default: {None})
path {str} -- path to templates yaml file, relative to Blueprint
(Default: {'templates.yaml'}) | ['Initialize', 'a', 'Flask', 'Blueprint', 'similar', 'to', 'init_app', 'but', 'without', 'the', 'access', 'to', 'the', 'application', 'config', '.'] | train | https://github.com/treethought/flask-assistant/blob/9331b9796644dfa987bcd97a13e78e9ab62923d3/flask_assistant/core.py#L132-L155 |
892 | acutesoftware/AIKIF | aikif/dataTools/cls_sql_code_generator.py | SQLCodeGenerator.populate_from_staging | def populate_from_staging(self, staging_table, from_column_list, output_table):
"""
generate SQL to insert staging table records into
the core table based on column_list (If no column list
then insert sequentially)
"""
self.sql_text += 'INSERT INTO ' + output_table + ' (\n'
for c in self.col_list:
if c != '':
self.sql_text += ' ' + c + ',\n'
self.sql_text += ' ' + self.date_updated_col + ') (\n'
self.sql_text += ' SELECT \n'
for c in from_column_list:
if c != '':
self.sql_text += ' ' + c + ',\n'
self.sql_text += ' SYSDATE \n FROM ' + staging_table
self.sql_text += '\n);\n' | python | def populate_from_staging(self, staging_table, from_column_list, output_table):
"""
generate SQL to insert staging table records into
the core table based on column_list (If no column list
then insert sequentially)
"""
self.sql_text += 'INSERT INTO ' + output_table + ' (\n'
for c in self.col_list:
if c != '':
self.sql_text += ' ' + c + ',\n'
self.sql_text += ' ' + self.date_updated_col + ') (\n'
self.sql_text += ' SELECT \n'
for c in from_column_list:
if c != '':
self.sql_text += ' ' + c + ',\n'
self.sql_text += ' SYSDATE \n FROM ' + staging_table
self.sql_text += '\n);\n' | ['def', 'populate_from_staging', '(', 'self', ',', 'staging_table', ',', 'from_column_list', ',', 'output_table', ')', ':', 'self', '.', 'sql_text', '+=', "'INSERT INTO '", '+', 'output_table', '+', "' (\\n'", 'for', 'c', 'in', 'self', '.', 'col_list', ':', 'if', 'c', '!=', "''", ':', 'self', '.', 'sql_text', '+=', "' '", '+', 'c', '+', "',\\n'", 'self', '.', 'sql_text', '+=', "' '", '+', 'self', '.', 'date_updated_col', '+', "') (\\n'", 'self', '.', 'sql_text', '+=', "' SELECT \\n'", 'for', 'c', 'in', 'from_column_list', ':', 'if', 'c', '!=', "''", ':', 'self', '.', 'sql_text', '+=', "' '", '+', 'c', '+', "',\\n'", 'self', '.', 'sql_text', '+=', "' SYSDATE \\n FROM '", '+', 'staging_table', 'self', '.', 'sql_text', '+=', "'\\n);\\n'"] | generate SQL to insert staging table records into
the core table based on column_list (If no column list
then insert sequentially) | ['generate', 'SQL', 'to', 'insert', 'staging', 'table', 'records', 'into', 'the', 'core', 'table', 'based', 'on', 'column_list', '(', 'If', 'no', 'column', 'list', 'then', 'insert', 'sequentially', ')'] | train | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/dataTools/cls_sql_code_generator.py#L181-L197 |
893 | Robpol86/colorclass | colorclass/windows.py | Windows.disable | def disable(cls):
"""Restore sys.stderr and sys.stdout to their original objects. Resets colors to their original values.
:return: If streams restored successfully.
:rtype: bool
"""
# Skip if not on Windows.
if not IS_WINDOWS:
return False
# Restore default colors.
if hasattr(sys.stderr, '_original_stream'):
getattr(sys, 'stderr').color = None
if hasattr(sys.stdout, '_original_stream'):
getattr(sys, 'stdout').color = None
# Restore original streams.
changed = False
if hasattr(sys.stderr, '_original_stream'):
changed = True
sys.stderr = getattr(sys.stderr, '_original_stream')
if hasattr(sys.stdout, '_original_stream'):
changed = True
sys.stdout = getattr(sys.stdout, '_original_stream')
return changed | python | def disable(cls):
"""Restore sys.stderr and sys.stdout to their original objects. Resets colors to their original values.
:return: If streams restored successfully.
:rtype: bool
"""
# Skip if not on Windows.
if not IS_WINDOWS:
return False
# Restore default colors.
if hasattr(sys.stderr, '_original_stream'):
getattr(sys, 'stderr').color = None
if hasattr(sys.stdout, '_original_stream'):
getattr(sys, 'stdout').color = None
# Restore original streams.
changed = False
if hasattr(sys.stderr, '_original_stream'):
changed = True
sys.stderr = getattr(sys.stderr, '_original_stream')
if hasattr(sys.stdout, '_original_stream'):
changed = True
sys.stdout = getattr(sys.stdout, '_original_stream')
return changed | ['def', 'disable', '(', 'cls', ')', ':', '# Skip if not on Windows.', 'if', 'not', 'IS_WINDOWS', ':', 'return', 'False', '# Restore default colors.', 'if', 'hasattr', '(', 'sys', '.', 'stderr', ',', "'_original_stream'", ')', ':', 'getattr', '(', 'sys', ',', "'stderr'", ')', '.', 'color', '=', 'None', 'if', 'hasattr', '(', 'sys', '.', 'stdout', ',', "'_original_stream'", ')', ':', 'getattr', '(', 'sys', ',', "'stdout'", ')', '.', 'color', '=', 'None', '# Restore original streams.', 'changed', '=', 'False', 'if', 'hasattr', '(', 'sys', '.', 'stderr', ',', "'_original_stream'", ')', ':', 'changed', '=', 'True', 'sys', '.', 'stderr', '=', 'getattr', '(', 'sys', '.', 'stderr', ',', "'_original_stream'", ')', 'if', 'hasattr', '(', 'sys', '.', 'stdout', ',', "'_original_stream'", ')', ':', 'changed', '=', 'True', 'sys', '.', 'stdout', '=', 'getattr', '(', 'sys', '.', 'stdout', ',', "'_original_stream'", ')', 'return', 'changed'] | Restore sys.stderr and sys.stdout to their original objects. Resets colors to their original values.
:return: If streams restored successfully.
:rtype: bool | ['Restore', 'sys', '.', 'stderr', 'and', 'sys', '.', 'stdout', 'to', 'their', 'original', 'objects', '.', 'Resets', 'colors', 'to', 'their', 'original', 'values', '.'] | train | https://github.com/Robpol86/colorclass/blob/692e2d6f5ad470b6221c8cb9641970dc5563a572/colorclass/windows.py#L298-L323 |
894 | Kronuz/pyScss | scss/compiler.py | Compilation._at_dump_functions | def _at_dump_functions(self, calculator, rule, scope, block):
"""
Implements @dump_functions
"""
sys.stderr.write("%s\n" % repr(rule.namespace._functions)) | python | def _at_dump_functions(self, calculator, rule, scope, block):
"""
Implements @dump_functions
"""
sys.stderr.write("%s\n" % repr(rule.namespace._functions)) | ['def', '_at_dump_functions', '(', 'self', ',', 'calculator', ',', 'rule', ',', 'scope', ',', 'block', ')', ':', 'sys', '.', 'stderr', '.', 'write', '(', '"%s\\n"', '%', 'repr', '(', 'rule', '.', 'namespace', '.', '_functions', ')', ')'] | Implements @dump_functions | ['Implements'] | train | https://github.com/Kronuz/pyScss/blob/fb32b317f6e2b4b4aad2b86a74844658ac4aa11e/scss/compiler.py#L427-L431 |
895 | berkerpeksag/astor | astor/file_util.py | CodeToAst.parse_file | def parse_file(fname):
"""Parse a python file into an AST.
This is a very thin wrapper around ast.parse
TODO: Handle encodings other than the default for Python 2
(issue #26)
"""
try:
with fopen(fname) as f:
fstr = f.read()
except IOError:
if fname != 'stdin':
raise
sys.stdout.write('\nReading from stdin:\n\n')
fstr = sys.stdin.read()
fstr = fstr.replace('\r\n', '\n').replace('\r', '\n')
if not fstr.endswith('\n'):
fstr += '\n'
return ast.parse(fstr, filename=fname) | python | def parse_file(fname):
"""Parse a python file into an AST.
This is a very thin wrapper around ast.parse
TODO: Handle encodings other than the default for Python 2
(issue #26)
"""
try:
with fopen(fname) as f:
fstr = f.read()
except IOError:
if fname != 'stdin':
raise
sys.stdout.write('\nReading from stdin:\n\n')
fstr = sys.stdin.read()
fstr = fstr.replace('\r\n', '\n').replace('\r', '\n')
if not fstr.endswith('\n'):
fstr += '\n'
return ast.parse(fstr, filename=fname) | ['def', 'parse_file', '(', 'fname', ')', ':', 'try', ':', 'with', 'fopen', '(', 'fname', ')', 'as', 'f', ':', 'fstr', '=', 'f', '.', 'read', '(', ')', 'except', 'IOError', ':', 'if', 'fname', '!=', "'stdin'", ':', 'raise', 'sys', '.', 'stdout', '.', 'write', '(', "'\\nReading from stdin:\\n\\n'", ')', 'fstr', '=', 'sys', '.', 'stdin', '.', 'read', '(', ')', 'fstr', '=', 'fstr', '.', 'replace', '(', "'\\r\\n'", ',', "'\\n'", ')', '.', 'replace', '(', "'\\r'", ',', "'\\n'", ')', 'if', 'not', 'fstr', '.', 'endswith', '(', "'\\n'", ')', ':', 'fstr', '+=', "'\\n'", 'return', 'ast', '.', 'parse', '(', 'fstr', ',', 'filename', '=', 'fname', ')'] | Parse a python file into an AST.
This is a very thin wrapper around ast.parse
TODO: Handle encodings other than the default for Python 2
(issue #26) | ['Parse', 'a', 'python', 'file', 'into', 'an', 'AST', '.'] | train | https://github.com/berkerpeksag/astor/blob/d9e893eb49d9eb2e30779680f90cd632c30e0ba1/astor/file_util.py#L55-L74 |
896 | saltstack/salt | salt/states/boto_vpc.py | vpc_peering_connection_present | def vpc_peering_connection_present(name, requester_vpc_id=None, requester_vpc_name=None,
peer_vpc_id=None, peer_vpc_name=None, conn_name=None,
peer_owner_id=None, peer_region=None, region=None,
key=None, keyid=None, profile=None):
'''
name
Name of the state
requester_vpc_id
ID of the requesting VPC. Exclusive with requester_vpc_name.
requester_vpc_name
Name tag of the requesting VPC. Exclusive with requester_vpc_id.
peer_vpc_id
ID of the VPC tp crete VPC peering connection with. This can be a VPC in
another account. Exclusive with peer_vpc_name.
peer_vpc_name
Name tag of the VPC tp crete VPC peering connection with. This can only
be a VPC in the same account, else resolving it into a vpc ID will fail.
Exclusive with peer_vpc_id.
conn_name
The name to use for this VPC peering connection.
peer_owner_id
ID of the owner of the peer VPC. Defaults to your account ID, so a value
is required if peering with a VPC in a different account.
peer_region
Region of peer VPC. For inter-region vpc peering connections. Not required
for intra-region peering connections.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string) that
contains a dict with region, key and keyid.
.. versionadded:: 2016.11.0
Example:
.. code-block:: yaml
ensure peering twixt local vpc and the other guys:
boto_vpc.vpc_peering_connection_present:
- requester_vpc_name: my_local_vpc
- peer_vpc_name: some_other_guys_vpc
- conn_name: peering_from_here_to_there
- peer_owner_id: 012345654321
'''
ret = {'name': name,
'result': True,
'comment': '',
'changes': {}
}
if __salt__['boto_vpc.is_peering_connection_pending'](conn_name=conn_name, region=region,
key=key, keyid=keyid, profile=profile):
if __salt__['boto_vpc.peering_connection_pending_from_vpc'](conn_name=conn_name,
vpc_id=requester_vpc_id,
vpc_name=requester_vpc_name,
region=region, key=key,
keyid=keyid, profile=profile):
ret['comment'] = ('VPC peering {0} already requested - pending '
'acceptance by {1}'.format(conn_name, peer_owner_id
or peer_vpc_name or peer_vpc_id))
log.info(ret['comment'])
return ret
return accept_vpc_peering_connection(name=name, conn_name=conn_name,
region=region, key=key, keyid=keyid,
profile=profile)
return request_vpc_peering_connection(name=name, requester_vpc_id=requester_vpc_id,
requester_vpc_name=requester_vpc_name,
peer_vpc_id=peer_vpc_id, peer_vpc_name=peer_vpc_name,
conn_name=conn_name, peer_owner_id=peer_owner_id,
peer_region=peer_region, region=region, key=key,
keyid=keyid, profile=profile) | python | def vpc_peering_connection_present(name, requester_vpc_id=None, requester_vpc_name=None,
peer_vpc_id=None, peer_vpc_name=None, conn_name=None,
peer_owner_id=None, peer_region=None, region=None,
key=None, keyid=None, profile=None):
'''
name
Name of the state
requester_vpc_id
ID of the requesting VPC. Exclusive with requester_vpc_name.
requester_vpc_name
Name tag of the requesting VPC. Exclusive with requester_vpc_id.
peer_vpc_id
ID of the VPC tp crete VPC peering connection with. This can be a VPC in
another account. Exclusive with peer_vpc_name.
peer_vpc_name
Name tag of the VPC tp crete VPC peering connection with. This can only
be a VPC in the same account, else resolving it into a vpc ID will fail.
Exclusive with peer_vpc_id.
conn_name
The name to use for this VPC peering connection.
peer_owner_id
ID of the owner of the peer VPC. Defaults to your account ID, so a value
is required if peering with a VPC in a different account.
peer_region
Region of peer VPC. For inter-region vpc peering connections. Not required
for intra-region peering connections.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string) that
contains a dict with region, key and keyid.
.. versionadded:: 2016.11.0
Example:
.. code-block:: yaml
ensure peering twixt local vpc and the other guys:
boto_vpc.vpc_peering_connection_present:
- requester_vpc_name: my_local_vpc
- peer_vpc_name: some_other_guys_vpc
- conn_name: peering_from_here_to_there
- peer_owner_id: 012345654321
'''
ret = {'name': name,
'result': True,
'comment': '',
'changes': {}
}
if __salt__['boto_vpc.is_peering_connection_pending'](conn_name=conn_name, region=region,
key=key, keyid=keyid, profile=profile):
if __salt__['boto_vpc.peering_connection_pending_from_vpc'](conn_name=conn_name,
vpc_id=requester_vpc_id,
vpc_name=requester_vpc_name,
region=region, key=key,
keyid=keyid, profile=profile):
ret['comment'] = ('VPC peering {0} already requested - pending '
'acceptance by {1}'.format(conn_name, peer_owner_id
or peer_vpc_name or peer_vpc_id))
log.info(ret['comment'])
return ret
return accept_vpc_peering_connection(name=name, conn_name=conn_name,
region=region, key=key, keyid=keyid,
profile=profile)
return request_vpc_peering_connection(name=name, requester_vpc_id=requester_vpc_id,
requester_vpc_name=requester_vpc_name,
peer_vpc_id=peer_vpc_id, peer_vpc_name=peer_vpc_name,
conn_name=conn_name, peer_owner_id=peer_owner_id,
peer_region=peer_region, region=region, key=key,
keyid=keyid, profile=profile) | ['def', 'vpc_peering_connection_present', '(', 'name', ',', 'requester_vpc_id', '=', 'None', ',', 'requester_vpc_name', '=', 'None', ',', 'peer_vpc_id', '=', 'None', ',', 'peer_vpc_name', '=', 'None', ',', 'conn_name', '=', 'None', ',', 'peer_owner_id', '=', 'None', ',', 'peer_region', '=', 'None', ',', 'region', '=', 'None', ',', 'key', '=', 'None', ',', 'keyid', '=', 'None', ',', 'profile', '=', 'None', ')', ':', 'ret', '=', '{', "'name'", ':', 'name', ',', "'result'", ':', 'True', ',', "'comment'", ':', "''", ',', "'changes'", ':', '{', '}', '}', 'if', '__salt__', '[', "'boto_vpc.is_peering_connection_pending'", ']', '(', 'conn_name', '=', 'conn_name', ',', 'region', '=', 'region', ',', 'key', '=', 'key', ',', 'keyid', '=', 'keyid', ',', 'profile', '=', 'profile', ')', ':', 'if', '__salt__', '[', "'boto_vpc.peering_connection_pending_from_vpc'", ']', '(', 'conn_name', '=', 'conn_name', ',', 'vpc_id', '=', 'requester_vpc_id', ',', 'vpc_name', '=', 'requester_vpc_name', ',', 'region', '=', 'region', ',', 'key', '=', 'key', ',', 'keyid', '=', 'keyid', ',', 'profile', '=', 'profile', ')', ':', 'ret', '[', "'comment'", ']', '=', '(', "'VPC peering {0} already requested - pending '", "'acceptance by {1}'", '.', 'format', '(', 'conn_name', ',', 'peer_owner_id', 'or', 'peer_vpc_name', 'or', 'peer_vpc_id', ')', ')', 'log', '.', 'info', '(', 'ret', '[', "'comment'", ']', ')', 'return', 'ret', 'return', 'accept_vpc_peering_connection', '(', 'name', '=', 'name', ',', 'conn_name', '=', 'conn_name', ',', 'region', '=', 'region', ',', 'key', '=', 'key', ',', 'keyid', '=', 'keyid', ',', 'profile', '=', 'profile', ')', 'return', 'request_vpc_peering_connection', '(', 'name', '=', 'name', ',', 'requester_vpc_id', '=', 'requester_vpc_id', ',', 'requester_vpc_name', '=', 'requester_vpc_name', ',', 'peer_vpc_id', '=', 'peer_vpc_id', ',', 'peer_vpc_name', '=', 'peer_vpc_name', ',', 'conn_name', '=', 'conn_name', ',', 'peer_owner_id', '=', 'peer_owner_id', ',', 'peer_region', '=', 'peer_region', ',', 'region', '=', 'region', ',', 'key', '=', 'key', ',', 'keyid', '=', 'keyid', ',', 'profile', '=', 'profile', ')'] | name
Name of the state
requester_vpc_id
ID of the requesting VPC. Exclusive with requester_vpc_name.
requester_vpc_name
Name tag of the requesting VPC. Exclusive with requester_vpc_id.
peer_vpc_id
ID of the VPC tp crete VPC peering connection with. This can be a VPC in
another account. Exclusive with peer_vpc_name.
peer_vpc_name
Name tag of the VPC tp crete VPC peering connection with. This can only
be a VPC in the same account, else resolving it into a vpc ID will fail.
Exclusive with peer_vpc_id.
conn_name
The name to use for this VPC peering connection.
peer_owner_id
ID of the owner of the peer VPC. Defaults to your account ID, so a value
is required if peering with a VPC in a different account.
peer_region
Region of peer VPC. For inter-region vpc peering connections. Not required
for intra-region peering connections.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string) that
contains a dict with region, key and keyid.
.. versionadded:: 2016.11.0
Example:
.. code-block:: yaml
ensure peering twixt local vpc and the other guys:
boto_vpc.vpc_peering_connection_present:
- requester_vpc_name: my_local_vpc
- peer_vpc_name: some_other_guys_vpc
- conn_name: peering_from_here_to_there
- peer_owner_id: 012345654321 | ['name', 'Name', 'of', 'the', 'state'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_vpc.py#L1649-L1736 |
897 | captin411/ofxclient | ofxclient/account.py | Account.statement | def statement(self, days=60):
"""Download the :py:class:`ofxparse.Statement` given the time range
:param days: Number of days to look back at
:type days: integer
:rtype: :py:class:`ofxparser.Statement`
"""
parsed = self.download_parsed(days=days)
return parsed.account.statement | python | def statement(self, days=60):
"""Download the :py:class:`ofxparse.Statement` given the time range
:param days: Number of days to look back at
:type days: integer
:rtype: :py:class:`ofxparser.Statement`
"""
parsed = self.download_parsed(days=days)
return parsed.account.statement | ['def', 'statement', '(', 'self', ',', 'days', '=', '60', ')', ':', 'parsed', '=', 'self', '.', 'download_parsed', '(', 'days', '=', 'days', ')', 'return', 'parsed', '.', 'account', '.', 'statement'] | Download the :py:class:`ofxparse.Statement` given the time range
:param days: Number of days to look back at
:type days: integer
:rtype: :py:class:`ofxparser.Statement` | ['Download', 'the', ':', 'py', ':', 'class', ':', 'ofxparse', '.', 'Statement', 'given', 'the', 'time', 'range'] | train | https://github.com/captin411/ofxclient/blob/4da2719f0ecbbf5eee62fb82c1b3b34ec955ee5e/ofxclient/account.py#L123-L131 |
898 | grycap/RADL | radl/radl_parse.py | RADLParser.p_ansible_sentence | def p_ansible_sentence(self, t):
"""ansible_sentence : ANSIBLE VAR LPAREN features RPAREN"""
t[0] = ansible(t[2], t[4], line=t.lineno(1)) | python | def p_ansible_sentence(self, t):
"""ansible_sentence : ANSIBLE VAR LPAREN features RPAREN"""
t[0] = ansible(t[2], t[4], line=t.lineno(1)) | ['def', 'p_ansible_sentence', '(', 'self', ',', 't', ')', ':', 't', '[', '0', ']', '=', 'ansible', '(', 't', '[', '2', ']', ',', 't', '[', '4', ']', ',', 'line', '=', 't', '.', 'lineno', '(', '1', ')', ')'] | ansible_sentence : ANSIBLE VAR LPAREN features RPAREN | ['ansible_sentence', ':', 'ANSIBLE', 'VAR', 'LPAREN', 'features', 'RPAREN'] | train | https://github.com/grycap/RADL/blob/03ccabb0313a48a5aa0e20c1f7983fddcb95e9cb/radl/radl_parse.py#L269-L272 |
899 | gwastro/pycbc-glue | pycbc_glue/gpstime.py | gpsFromUTC | def gpsFromUTC(year, month, day, hour, min, sec, leapSecs=14):
"""converts UTC to: gpsWeek, secsOfWeek, gpsDay, secsOfDay
a good reference is: http://www.oc.nps.navy.mil/~jclynch/timsys.html
This is based on the following facts (see reference above):
GPS time is basically measured in (atomic) seconds since
January 6, 1980, 00:00:00.0 (the GPS Epoch)
The GPS week starts on Saturday midnight (Sunday morning), and runs
for 604800 seconds.
Currently, GPS time is 13 seconds ahead of UTC (see above reference).
While GPS SVs transmit this difference and the date when another leap
second takes effect, the use of leap seconds cannot be predicted. This
routine is precise until the next leap second is introduced and has to be
updated after that.
SOW = Seconds of Week
SOD = Seconds of Day
Note: Python represents time in integer seconds, fractions are lost!!!
"""
secFract = sec % 1
epochTuple = gpsEpoch + (-1, -1, 0)
t0 = time.mktime(epochTuple)
t = time.mktime((year, month, day, hour, min, sec, -1, -1, 0))
# Note: time.mktime strictly works in localtime and to yield UTC, it should be
# corrected with time.timezone
# However, since we use the difference, this correction is unnecessary.
# Warning: trouble if daylight savings flag is set to -1 or 1 !!!
t = t + leapSecs
tdiff = t - t0
gpsSOW = (tdiff % secsInWeek) + secFract
gpsWeek = int(math.floor(tdiff/secsInWeek))
gpsDay = int(math.floor(gpsSOW/secsInDay))
gpsSOD = (gpsSOW % secsInDay)
return (gpsWeek, gpsSOW, gpsDay, gpsSOD) | python | def gpsFromUTC(year, month, day, hour, min, sec, leapSecs=14):
"""converts UTC to: gpsWeek, secsOfWeek, gpsDay, secsOfDay
a good reference is: http://www.oc.nps.navy.mil/~jclynch/timsys.html
This is based on the following facts (see reference above):
GPS time is basically measured in (atomic) seconds since
January 6, 1980, 00:00:00.0 (the GPS Epoch)
The GPS week starts on Saturday midnight (Sunday morning), and runs
for 604800 seconds.
Currently, GPS time is 13 seconds ahead of UTC (see above reference).
While GPS SVs transmit this difference and the date when another leap
second takes effect, the use of leap seconds cannot be predicted. This
routine is precise until the next leap second is introduced and has to be
updated after that.
SOW = Seconds of Week
SOD = Seconds of Day
Note: Python represents time in integer seconds, fractions are lost!!!
"""
secFract = sec % 1
epochTuple = gpsEpoch + (-1, -1, 0)
t0 = time.mktime(epochTuple)
t = time.mktime((year, month, day, hour, min, sec, -1, -1, 0))
# Note: time.mktime strictly works in localtime and to yield UTC, it should be
# corrected with time.timezone
# However, since we use the difference, this correction is unnecessary.
# Warning: trouble if daylight savings flag is set to -1 or 1 !!!
t = t + leapSecs
tdiff = t - t0
gpsSOW = (tdiff % secsInWeek) + secFract
gpsWeek = int(math.floor(tdiff/secsInWeek))
gpsDay = int(math.floor(gpsSOW/secsInDay))
gpsSOD = (gpsSOW % secsInDay)
return (gpsWeek, gpsSOW, gpsDay, gpsSOD) | ['def', 'gpsFromUTC', '(', 'year', ',', 'month', ',', 'day', ',', 'hour', ',', 'min', ',', 'sec', ',', 'leapSecs', '=', '14', ')', ':', 'secFract', '=', 'sec', '%', '1', 'epochTuple', '=', 'gpsEpoch', '+', '(', '-', '1', ',', '-', '1', ',', '0', ')', 't0', '=', 'time', '.', 'mktime', '(', 'epochTuple', ')', 't', '=', 'time', '.', 'mktime', '(', '(', 'year', ',', 'month', ',', 'day', ',', 'hour', ',', 'min', ',', 'sec', ',', '-', '1', ',', '-', '1', ',', '0', ')', ')', '# Note: time.mktime strictly works in localtime and to yield UTC, it should be', '# corrected with time.timezone', '# However, since we use the difference, this correction is unnecessary.', '# Warning: trouble if daylight savings flag is set to -1 or 1 !!!', 't', '=', 't', '+', 'leapSecs', 'tdiff', '=', 't', '-', 't0', 'gpsSOW', '=', '(', 'tdiff', '%', 'secsInWeek', ')', '+', 'secFract', 'gpsWeek', '=', 'int', '(', 'math', '.', 'floor', '(', 'tdiff', '/', 'secsInWeek', ')', ')', 'gpsDay', '=', 'int', '(', 'math', '.', 'floor', '(', 'gpsSOW', '/', 'secsInDay', ')', ')', 'gpsSOD', '=', '(', 'gpsSOW', '%', 'secsInDay', ')', 'return', '(', 'gpsWeek', ',', 'gpsSOW', ',', 'gpsDay', ',', 'gpsSOD', ')'] | converts UTC to: gpsWeek, secsOfWeek, gpsDay, secsOfDay
a good reference is: http://www.oc.nps.navy.mil/~jclynch/timsys.html
This is based on the following facts (see reference above):
GPS time is basically measured in (atomic) seconds since
January 6, 1980, 00:00:00.0 (the GPS Epoch)
The GPS week starts on Saturday midnight (Sunday morning), and runs
for 604800 seconds.
Currently, GPS time is 13 seconds ahead of UTC (see above reference).
While GPS SVs transmit this difference and the date when another leap
second takes effect, the use of leap seconds cannot be predicted. This
routine is precise until the next leap second is introduced and has to be
updated after that.
SOW = Seconds of Week
SOD = Seconds of Day
Note: Python represents time in integer seconds, fractions are lost!!! | ['converts', 'UTC', 'to', ':', 'gpsWeek', 'secsOfWeek', 'gpsDay', 'secsOfDay'] | train | https://github.com/gwastro/pycbc-glue/blob/a3e906bae59fbfd707c3ff82e5d008d939ec5e24/pycbc_glue/gpstime.py#L93-L131 |
Subsets and Splits