Unnamed: 0
int64 0
10k
| repository_name
stringlengths 7
54
| func_path_in_repository
stringlengths 5
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 100
30.3k
| language
stringclasses 1
value | func_code_string
stringlengths 100
30.3k
| func_code_tokens
stringlengths 138
33.2k
| func_documentation_string
stringlengths 1
15k
| func_documentation_tokens
stringlengths 5
5.14k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
|
---|---|---|---|---|---|---|---|---|---|---|---|
9,500 | StanfordVL/robosuite | robosuite/utils/transform_utils.py | random_quat | def random_quat(rand=None):
"""Return uniform random unit quaternion.
rand: array like or None
Three independent random variables that are uniformly distributed
between 0 and 1.
>>> q = random_quat()
>>> np.allclose(1.0, vector_norm(q))
True
>>> q = random_quat(np.random.random(3))
>>> q.shape
(4,)
"""
if rand is None:
rand = np.random.rand(3)
else:
assert len(rand) == 3
r1 = np.sqrt(1.0 - rand[0])
r2 = np.sqrt(rand[0])
pi2 = math.pi * 2.0
t1 = pi2 * rand[1]
t2 = pi2 * rand[2]
return np.array(
(np.sin(t1) * r1, np.cos(t1) * r1, np.sin(t2) * r2, np.cos(t2) * r2),
dtype=np.float32,
) | python | def random_quat(rand=None):
"""Return uniform random unit quaternion.
rand: array like or None
Three independent random variables that are uniformly distributed
between 0 and 1.
>>> q = random_quat()
>>> np.allclose(1.0, vector_norm(q))
True
>>> q = random_quat(np.random.random(3))
>>> q.shape
(4,)
"""
if rand is None:
rand = np.random.rand(3)
else:
assert len(rand) == 3
r1 = np.sqrt(1.0 - rand[0])
r2 = np.sqrt(rand[0])
pi2 = math.pi * 2.0
t1 = pi2 * rand[1]
t2 = pi2 * rand[2]
return np.array(
(np.sin(t1) * r1, np.cos(t1) * r1, np.sin(t2) * r2, np.cos(t2) * r2),
dtype=np.float32,
) | ['def', 'random_quat', '(', 'rand', '=', 'None', ')', ':', 'if', 'rand', 'is', 'None', ':', 'rand', '=', 'np', '.', 'random', '.', 'rand', '(', '3', ')', 'else', ':', 'assert', 'len', '(', 'rand', ')', '==', '3', 'r1', '=', 'np', '.', 'sqrt', '(', '1.0', '-', 'rand', '[', '0', ']', ')', 'r2', '=', 'np', '.', 'sqrt', '(', 'rand', '[', '0', ']', ')', 'pi2', '=', 'math', '.', 'pi', '*', '2.0', 't1', '=', 'pi2', '*', 'rand', '[', '1', ']', 't2', '=', 'pi2', '*', 'rand', '[', '2', ']', 'return', 'np', '.', 'array', '(', '(', 'np', '.', 'sin', '(', 't1', ')', '*', 'r1', ',', 'np', '.', 'cos', '(', 't1', ')', '*', 'r1', ',', 'np', '.', 'sin', '(', 't2', ')', '*', 'r2', ',', 'np', '.', 'cos', '(', 't2', ')', '*', 'r2', ')', ',', 'dtype', '=', 'np', '.', 'float32', ',', ')'] | Return uniform random unit quaternion.
rand: array like or None
Three independent random variables that are uniformly distributed
between 0 and 1.
>>> q = random_quat()
>>> np.allclose(1.0, vector_norm(q))
True
>>> q = random_quat(np.random.random(3))
>>> q.shape
(4,) | ['Return', 'uniform', 'random', 'unit', 'quaternion', '.', 'rand', ':', 'array', 'like', 'or', 'None', 'Three', 'independent', 'random', 'variables', 'that', 'are', 'uniformly', 'distributed', 'between', '0', 'and', '1', '.', '>>>', 'q', '=', 'random_quat', '()', '>>>', 'np', '.', 'allclose', '(', '1', '.', '0', 'vector_norm', '(', 'q', '))', 'True', '>>>', 'q', '=', 'random_quat', '(', 'np', '.', 'random', '.', 'random', '(', '3', '))', '>>>', 'q', '.', 'shape', '(', '4', ')'] | train | https://github.com/StanfordVL/robosuite/blob/65cd16810e2ed647e3ec88746af3412065b7f278/robosuite/utils/transform_utils.py#L147-L171 |
9,501 | saltstack/salt | salt/client/__init__.py | LocalClient.get_cli_event_returns | def get_cli_event_returns(
self,
jid,
minions,
timeout=None,
tgt='*',
tgt_type='glob',
verbose=False,
progress=False,
show_timeout=False,
show_jid=False,
**kwargs):
'''
Get the returns for the command line interface via the event system
'''
log.trace('func get_cli_event_returns()')
if verbose:
msg = 'Executing job with jid {0}'.format(jid)
print(msg)
print('-' * len(msg) + '\n')
elif show_jid:
print('jid: {0}'.format(jid))
# lazy load the connected minions
connected_minions = None
return_count = 0
for ret in self.get_iter_returns(jid,
minions,
timeout=timeout,
tgt=tgt,
tgt_type=tgt_type,
# (gtmanfred) expect_minions is popped here incase it is passed from a client
# call. If this is not popped, then it would be passed twice to
# get_iter_returns.
expect_minions=(kwargs.pop('expect_minions', False) or verbose or show_timeout),
**kwargs
):
log.debug('return event: %s', ret)
return_count = return_count + 1
if progress:
for id_, min_ret in six.iteritems(ret):
if not min_ret.get('failed') is True:
yield {'minion_count': len(minions), 'return_count': return_count}
# replace the return structure for missing minions
for id_, min_ret in six.iteritems(ret):
if min_ret.get('failed') is True:
if connected_minions is None:
connected_minions = salt.utils.minions.CkMinions(self.opts).connected_ids()
if self.opts['minion_data_cache'] \
and salt.cache.factory(self.opts).contains('minions/{0}'.format(id_), 'data') \
and connected_minions \
and id_ not in connected_minions:
yield {
id_: {
'out': 'no_return',
'ret': 'Minion did not return. [Not connected]',
'retcode': salt.defaults.exitcodes.EX_GENERIC
}
}
else:
# don't report syndics as unresponsive minions
if not os.path.exists(os.path.join(self.opts['syndic_dir'], id_)):
yield {
id_: {
'out': 'no_return',
'ret': 'Minion did not return. [No response]'
'\nThe minions may not have all finished running and any '
'remaining minions will return upon completion. To look '
'up the return data for this job later, run the following '
'command:\n\n'
'salt-run jobs.lookup_jid {0}'.format(jid),
'retcode': salt.defaults.exitcodes.EX_GENERIC
}
}
else:
yield {id_: min_ret}
self._clean_up_subscriptions(jid) | python | def get_cli_event_returns(
self,
jid,
minions,
timeout=None,
tgt='*',
tgt_type='glob',
verbose=False,
progress=False,
show_timeout=False,
show_jid=False,
**kwargs):
'''
Get the returns for the command line interface via the event system
'''
log.trace('func get_cli_event_returns()')
if verbose:
msg = 'Executing job with jid {0}'.format(jid)
print(msg)
print('-' * len(msg) + '\n')
elif show_jid:
print('jid: {0}'.format(jid))
# lazy load the connected minions
connected_minions = None
return_count = 0
for ret in self.get_iter_returns(jid,
minions,
timeout=timeout,
tgt=tgt,
tgt_type=tgt_type,
# (gtmanfred) expect_minions is popped here incase it is passed from a client
# call. If this is not popped, then it would be passed twice to
# get_iter_returns.
expect_minions=(kwargs.pop('expect_minions', False) or verbose or show_timeout),
**kwargs
):
log.debug('return event: %s', ret)
return_count = return_count + 1
if progress:
for id_, min_ret in six.iteritems(ret):
if not min_ret.get('failed') is True:
yield {'minion_count': len(minions), 'return_count': return_count}
# replace the return structure for missing minions
for id_, min_ret in six.iteritems(ret):
if min_ret.get('failed') is True:
if connected_minions is None:
connected_minions = salt.utils.minions.CkMinions(self.opts).connected_ids()
if self.opts['minion_data_cache'] \
and salt.cache.factory(self.opts).contains('minions/{0}'.format(id_), 'data') \
and connected_minions \
and id_ not in connected_minions:
yield {
id_: {
'out': 'no_return',
'ret': 'Minion did not return. [Not connected]',
'retcode': salt.defaults.exitcodes.EX_GENERIC
}
}
else:
# don't report syndics as unresponsive minions
if not os.path.exists(os.path.join(self.opts['syndic_dir'], id_)):
yield {
id_: {
'out': 'no_return',
'ret': 'Minion did not return. [No response]'
'\nThe minions may not have all finished running and any '
'remaining minions will return upon completion. To look '
'up the return data for this job later, run the following '
'command:\n\n'
'salt-run jobs.lookup_jid {0}'.format(jid),
'retcode': salt.defaults.exitcodes.EX_GENERIC
}
}
else:
yield {id_: min_ret}
self._clean_up_subscriptions(jid) | ['def', 'get_cli_event_returns', '(', 'self', ',', 'jid', ',', 'minions', ',', 'timeout', '=', 'None', ',', 'tgt', '=', "'*'", ',', 'tgt_type', '=', "'glob'", ',', 'verbose', '=', 'False', ',', 'progress', '=', 'False', ',', 'show_timeout', '=', 'False', ',', 'show_jid', '=', 'False', ',', '*', '*', 'kwargs', ')', ':', 'log', '.', 'trace', '(', "'func get_cli_event_returns()'", ')', 'if', 'verbose', ':', 'msg', '=', "'Executing job with jid {0}'", '.', 'format', '(', 'jid', ')', 'print', '(', 'msg', ')', 'print', '(', "'-'", '*', 'len', '(', 'msg', ')', '+', "'\\n'", ')', 'elif', 'show_jid', ':', 'print', '(', "'jid: {0}'", '.', 'format', '(', 'jid', ')', ')', '# lazy load the connected minions', 'connected_minions', '=', 'None', 'return_count', '=', '0', 'for', 'ret', 'in', 'self', '.', 'get_iter_returns', '(', 'jid', ',', 'minions', ',', 'timeout', '=', 'timeout', ',', 'tgt', '=', 'tgt', ',', 'tgt_type', '=', 'tgt_type', ',', '# (gtmanfred) expect_minions is popped here incase it is passed from a client', '# call. If this is not popped, then it would be passed twice to', '# get_iter_returns.', 'expect_minions', '=', '(', 'kwargs', '.', 'pop', '(', "'expect_minions'", ',', 'False', ')', 'or', 'verbose', 'or', 'show_timeout', ')', ',', '*', '*', 'kwargs', ')', ':', 'log', '.', 'debug', '(', "'return event: %s'", ',', 'ret', ')', 'return_count', '=', 'return_count', '+', '1', 'if', 'progress', ':', 'for', 'id_', ',', 'min_ret', 'in', 'six', '.', 'iteritems', '(', 'ret', ')', ':', 'if', 'not', 'min_ret', '.', 'get', '(', "'failed'", ')', 'is', 'True', ':', 'yield', '{', "'minion_count'", ':', 'len', '(', 'minions', ')', ',', "'return_count'", ':', 'return_count', '}', '# replace the return structure for missing minions', 'for', 'id_', ',', 'min_ret', 'in', 'six', '.', 'iteritems', '(', 'ret', ')', ':', 'if', 'min_ret', '.', 'get', '(', "'failed'", ')', 'is', 'True', ':', 'if', 'connected_minions', 'is', 'None', ':', 'connected_minions', '=', 'salt', '.', 'utils', '.', 'minions', '.', 'CkMinions', '(', 'self', '.', 'opts', ')', '.', 'connected_ids', '(', ')', 'if', 'self', '.', 'opts', '[', "'minion_data_cache'", ']', 'and', 'salt', '.', 'cache', '.', 'factory', '(', 'self', '.', 'opts', ')', '.', 'contains', '(', "'minions/{0}'", '.', 'format', '(', 'id_', ')', ',', "'data'", ')', 'and', 'connected_minions', 'and', 'id_', 'not', 'in', 'connected_minions', ':', 'yield', '{', 'id_', ':', '{', "'out'", ':', "'no_return'", ',', "'ret'", ':', "'Minion did not return. [Not connected]'", ',', "'retcode'", ':', 'salt', '.', 'defaults', '.', 'exitcodes', '.', 'EX_GENERIC', '}', '}', 'else', ':', "# don't report syndics as unresponsive minions", 'if', 'not', 'os', '.', 'path', '.', 'exists', '(', 'os', '.', 'path', '.', 'join', '(', 'self', '.', 'opts', '[', "'syndic_dir'", ']', ',', 'id_', ')', ')', ':', 'yield', '{', 'id_', ':', '{', "'out'", ':', "'no_return'", ',', "'ret'", ':', "'Minion did not return. [No response]'", "'\\nThe minions may not have all finished running and any '", "'remaining minions will return upon completion. To look '", "'up the return data for this job later, run the following '", "'command:\\n\\n'", "'salt-run jobs.lookup_jid {0}'", '.', 'format', '(', 'jid', ')', ',', "'retcode'", ':', 'salt', '.', 'defaults', '.', 'exitcodes', '.', 'EX_GENERIC', '}', '}', 'else', ':', 'yield', '{', 'id_', ':', 'min_ret', '}', 'self', '.', '_clean_up_subscriptions', '(', 'jid', ')'] | Get the returns for the command line interface via the event system | ['Get', 'the', 'returns', 'for', 'the', 'command', 'line', 'interface', 'via', 'the', 'event', 'system'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/client/__init__.py#L1514-L1594 |
9,502 | michael-lazar/rtv | rtv/page.py | Page.prompt_and_select_link | def prompt_and_select_link(self):
"""
Prompt the user to select a link from a list to open.
Return the link that was selected, or ``None`` if no link was selected.
"""
data = self.get_selected_item()
url_full = data.get('url_full')
permalink = data.get('permalink')
if url_full and url_full != permalink:
# The item is a link-only submission that won't contain text
link = url_full
else:
html = data.get('html')
if html:
extracted_links = self.content.extract_links(html)
if not extracted_links:
# Only one selection to choose from, so just pick it
link = permalink
else:
# Let the user decide which link to open
links = []
if permalink:
links += [{'text': 'Permalink', 'href': permalink}]
links += extracted_links
link = self.term.prompt_user_to_select_link(links)
else:
# Some items like hidden comments don't have any HTML to parse
link = permalink
return link | python | def prompt_and_select_link(self):
"""
Prompt the user to select a link from a list to open.
Return the link that was selected, or ``None`` if no link was selected.
"""
data = self.get_selected_item()
url_full = data.get('url_full')
permalink = data.get('permalink')
if url_full and url_full != permalink:
# The item is a link-only submission that won't contain text
link = url_full
else:
html = data.get('html')
if html:
extracted_links = self.content.extract_links(html)
if not extracted_links:
# Only one selection to choose from, so just pick it
link = permalink
else:
# Let the user decide which link to open
links = []
if permalink:
links += [{'text': 'Permalink', 'href': permalink}]
links += extracted_links
link = self.term.prompt_user_to_select_link(links)
else:
# Some items like hidden comments don't have any HTML to parse
link = permalink
return link | ['def', 'prompt_and_select_link', '(', 'self', ')', ':', 'data', '=', 'self', '.', 'get_selected_item', '(', ')', 'url_full', '=', 'data', '.', 'get', '(', "'url_full'", ')', 'permalink', '=', 'data', '.', 'get', '(', "'permalink'", ')', 'if', 'url_full', 'and', 'url_full', '!=', 'permalink', ':', "# The item is a link-only submission that won't contain text", 'link', '=', 'url_full', 'else', ':', 'html', '=', 'data', '.', 'get', '(', "'html'", ')', 'if', 'html', ':', 'extracted_links', '=', 'self', '.', 'content', '.', 'extract_links', '(', 'html', ')', 'if', 'not', 'extracted_links', ':', '# Only one selection to choose from, so just pick it', 'link', '=', 'permalink', 'else', ':', '# Let the user decide which link to open', 'links', '=', '[', ']', 'if', 'permalink', ':', 'links', '+=', '[', '{', "'text'", ':', "'Permalink'", ',', "'href'", ':', 'permalink', '}', ']', 'links', '+=', 'extracted_links', 'link', '=', 'self', '.', 'term', '.', 'prompt_user_to_select_link', '(', 'links', ')', 'else', ':', "# Some items like hidden comments don't have any HTML to parse", 'link', '=', 'permalink', 'return', 'link'] | Prompt the user to select a link from a list to open.
Return the link that was selected, or ``None`` if no link was selected. | ['Prompt', 'the', 'user', 'to', 'select', 'a', 'link', 'from', 'a', 'list', 'to', 'open', '.'] | train | https://github.com/michael-lazar/rtv/blob/ccef2af042566ad384977028cf0bde01bc524dda/rtv/page.py#L481-L512 |
9,503 | JensAstrup/pyOutlook | pyOutlook/internal/utils.py | check_response | def check_response(response):
""" Checks that a response is successful, raising the appropriate Exceptions otherwise. """
status_code = response.status_code
if 100 < status_code < 299:
return True
elif status_code == 401 or status_code == 403:
message = get_response_data(response)
raise AuthError('Access Token Error, Received ' + str(status_code) +
' from Outlook REST Endpoint with the message: {}'.format(message))
elif status_code == 400:
message = get_response_data(response)
raise RequestError('The request made to the Outlook API was invalid. Received the following message: {}'.
format(message))
else:
message = get_response_data(response)
raise APIError('Encountered an unknown error from the Outlook API: {}'.format(message)) | python | def check_response(response):
""" Checks that a response is successful, raising the appropriate Exceptions otherwise. """
status_code = response.status_code
if 100 < status_code < 299:
return True
elif status_code == 401 or status_code == 403:
message = get_response_data(response)
raise AuthError('Access Token Error, Received ' + str(status_code) +
' from Outlook REST Endpoint with the message: {}'.format(message))
elif status_code == 400:
message = get_response_data(response)
raise RequestError('The request made to the Outlook API was invalid. Received the following message: {}'.
format(message))
else:
message = get_response_data(response)
raise APIError('Encountered an unknown error from the Outlook API: {}'.format(message)) | ['def', 'check_response', '(', 'response', ')', ':', 'status_code', '=', 'response', '.', 'status_code', 'if', '100', '<', 'status_code', '<', '299', ':', 'return', 'True', 'elif', 'status_code', '==', '401', 'or', 'status_code', '==', '403', ':', 'message', '=', 'get_response_data', '(', 'response', ')', 'raise', 'AuthError', '(', "'Access Token Error, Received '", '+', 'str', '(', 'status_code', ')', '+', "' from Outlook REST Endpoint with the message: {}'", '.', 'format', '(', 'message', ')', ')', 'elif', 'status_code', '==', '400', ':', 'message', '=', 'get_response_data', '(', 'response', ')', 'raise', 'RequestError', '(', "'The request made to the Outlook API was invalid. Received the following message: {}'", '.', 'format', '(', 'message', ')', ')', 'else', ':', 'message', '=', 'get_response_data', '(', 'response', ')', 'raise', 'APIError', '(', "'Encountered an unknown error from the Outlook API: {}'", '.', 'format', '(', 'message', ')', ')'] | Checks that a response is successful, raising the appropriate Exceptions otherwise. | ['Checks', 'that', 'a', 'response', 'is', 'successful', 'raising', 'the', 'appropriate', 'Exceptions', 'otherwise', '.'] | train | https://github.com/JensAstrup/pyOutlook/blob/f4ca9d4a8629c0a41f78102ce84fab702a841167/pyOutlook/internal/utils.py#L30-L48 |
9,504 | ioos/cc-plugin-ncei | cc_plugin_ncei/util.py | get_z_variable | def get_z_variable(nc):
'''
Returns the name of the variable that defines the Z axis or height/depth
:param netCDF4.Dataset nc: netCDF dataset
'''
axis_z = nc.get_variables_by_attributes(axis='Z')
if axis_z:
return axis_z[0].name
valid_standard_names = ('depth', 'height', 'altitude')
z = nc.get_variables_by_attributes(standard_name=lambda x: x in valid_standard_names)
if z:
return z[0].name
return | python | def get_z_variable(nc):
'''
Returns the name of the variable that defines the Z axis or height/depth
:param netCDF4.Dataset nc: netCDF dataset
'''
axis_z = nc.get_variables_by_attributes(axis='Z')
if axis_z:
return axis_z[0].name
valid_standard_names = ('depth', 'height', 'altitude')
z = nc.get_variables_by_attributes(standard_name=lambda x: x in valid_standard_names)
if z:
return z[0].name
return | ['def', 'get_z_variable', '(', 'nc', ')', ':', 'axis_z', '=', 'nc', '.', 'get_variables_by_attributes', '(', 'axis', '=', "'Z'", ')', 'if', 'axis_z', ':', 'return', 'axis_z', '[', '0', ']', '.', 'name', 'valid_standard_names', '=', '(', "'depth'", ',', "'height'", ',', "'altitude'", ')', 'z', '=', 'nc', '.', 'get_variables_by_attributes', '(', 'standard_name', '=', 'lambda', 'x', ':', 'x', 'in', 'valid_standard_names', ')', 'if', 'z', ':', 'return', 'z', '[', '0', ']', '.', 'name', 'return'] | Returns the name of the variable that defines the Z axis or height/depth
:param netCDF4.Dataset nc: netCDF dataset | ['Returns', 'the', 'name', 'of', 'the', 'variable', 'that', 'defines', 'the', 'Z', 'axis', 'or', 'height', '/', 'depth'] | train | https://github.com/ioos/cc-plugin-ncei/blob/963fefd7fa43afd32657ac4c36aad4ddb4c25acf/cc_plugin_ncei/util.py#L95-L108 |
9,505 | LettError/MutatorMath | Lib/mutatorMath/ufo/instance.py | InstanceWriter.setGroups | def setGroups(self, groups, kerningGroupConversionRenameMaps=None):
""" Copy the groups into our font. """
skipping = []
for name, members in groups.items():
checked = []
for m in members:
if m in self.font:
checked.append(m)
else:
skipping.append(m)
if checked:
self.font.groups[name] = checked
if skipping:
if self.verbose and self.logger:
self.logger.info("\tNote: some glyphnames were removed from groups: %s (unavailable in the font)", ", ".join(skipping))
if kerningGroupConversionRenameMaps:
# in case the sources were UFO2,
# and defcon upconverted them to UFO3
# and now we have to down convert them again,
# we don't want the UFO3 public prefixes in the group names
self.font.kerningGroupConversionRenameMaps = kerningGroupConversionRenameMaps | python | def setGroups(self, groups, kerningGroupConversionRenameMaps=None):
""" Copy the groups into our font. """
skipping = []
for name, members in groups.items():
checked = []
for m in members:
if m in self.font:
checked.append(m)
else:
skipping.append(m)
if checked:
self.font.groups[name] = checked
if skipping:
if self.verbose and self.logger:
self.logger.info("\tNote: some glyphnames were removed from groups: %s (unavailable in the font)", ", ".join(skipping))
if kerningGroupConversionRenameMaps:
# in case the sources were UFO2,
# and defcon upconverted them to UFO3
# and now we have to down convert them again,
# we don't want the UFO3 public prefixes in the group names
self.font.kerningGroupConversionRenameMaps = kerningGroupConversionRenameMaps | ['def', 'setGroups', '(', 'self', ',', 'groups', ',', 'kerningGroupConversionRenameMaps', '=', 'None', ')', ':', 'skipping', '=', '[', ']', 'for', 'name', ',', 'members', 'in', 'groups', '.', 'items', '(', ')', ':', 'checked', '=', '[', ']', 'for', 'm', 'in', 'members', ':', 'if', 'm', 'in', 'self', '.', 'font', ':', 'checked', '.', 'append', '(', 'm', ')', 'else', ':', 'skipping', '.', 'append', '(', 'm', ')', 'if', 'checked', ':', 'self', '.', 'font', '.', 'groups', '[', 'name', ']', '=', 'checked', 'if', 'skipping', ':', 'if', 'self', '.', 'verbose', 'and', 'self', '.', 'logger', ':', 'self', '.', 'logger', '.', 'info', '(', '"\\tNote: some glyphnames were removed from groups: %s (unavailable in the font)"', ',', '", "', '.', 'join', '(', 'skipping', ')', ')', 'if', 'kerningGroupConversionRenameMaps', ':', '# in case the sources were UFO2, ', '# and defcon upconverted them to UFO3', '# and now we have to down convert them again,', "# we don't want the UFO3 public prefixes in the group names", 'self', '.', 'font', '.', 'kerningGroupConversionRenameMaps', '=', 'kerningGroupConversionRenameMaps'] | Copy the groups into our font. | ['Copy', 'the', 'groups', 'into', 'our', 'font', '.'] | train | https://github.com/LettError/MutatorMath/blob/10318fc4e7c9cee9df6130826829baea3054a42b/Lib/mutatorMath/ufo/instance.py#L80-L100 |
9,506 | rsmuc/health_monitoring_plugins | health_monitoring_plugins/check_snmp_eaton_ups/check_snmp_eaton_ups.py | check_ups_output_current | def check_ups_output_current(the_session, the_helper, the_snmp_value):
"""
OID .1.3.6.1.2.1.33.1.4.4.1.3.1
MIB excerpt
The present output current.
"""
a_current = calc_output_current_from_snmpvalue(the_snmp_value)
the_helper.add_metric(
label=the_helper.options.type,
value=a_current,
uom='A')
the_helper.set_summary("Output Current is {} A".format(a_current)) | python | def check_ups_output_current(the_session, the_helper, the_snmp_value):
"""
OID .1.3.6.1.2.1.33.1.4.4.1.3.1
MIB excerpt
The present output current.
"""
a_current = calc_output_current_from_snmpvalue(the_snmp_value)
the_helper.add_metric(
label=the_helper.options.type,
value=a_current,
uom='A')
the_helper.set_summary("Output Current is {} A".format(a_current)) | ['def', 'check_ups_output_current', '(', 'the_session', ',', 'the_helper', ',', 'the_snmp_value', ')', ':', 'a_current', '=', 'calc_output_current_from_snmpvalue', '(', 'the_snmp_value', ')', 'the_helper', '.', 'add_metric', '(', 'label', '=', 'the_helper', '.', 'options', '.', 'type', ',', 'value', '=', 'a_current', ',', 'uom', '=', "'A'", ')', 'the_helper', '.', 'set_summary', '(', '"Output Current is {} A"', '.', 'format', '(', 'a_current', ')', ')'] | OID .1.3.6.1.2.1.33.1.4.4.1.3.1
MIB excerpt
The present output current. | ['OID', '.', '1', '.', '3', '.', '6', '.', '1', '.', '2', '.', '1', '.', '33', '.', '1', '.', '4', '.', '4', '.', '1', '.', '3', '.', '1', 'MIB', 'excerpt', 'The', 'present', 'output', 'current', '.'] | train | https://github.com/rsmuc/health_monitoring_plugins/blob/7ac29dfb9fe46c055b018cb72ad0d7d8065589b9/health_monitoring_plugins/check_snmp_eaton_ups/check_snmp_eaton_ups.py#L121-L134 |
9,507 | pytroll/satpy | satpy/readers/grib.py | GRIBFileHandler.get_dataset | def get_dataset(self, dataset_id, ds_info):
"""Read a GRIB message into an xarray DataArray."""
msg = self._get_message(ds_info)
ds_info = self.get_metadata(msg, ds_info)
fill = msg['missingValue']
data = msg.values.astype(np.float32)
if msg.valid_key('jScansPositively') and msg['jScansPositively'] == 1:
data = data[::-1]
if isinstance(data, np.ma.MaskedArray):
data = data.filled(np.nan)
data = da.from_array(data, chunks=CHUNK_SIZE)
else:
data[data == fill] = np.nan
data = da.from_array(data, chunks=CHUNK_SIZE)
return xr.DataArray(data, attrs=ds_info, dims=('y', 'x')) | python | def get_dataset(self, dataset_id, ds_info):
"""Read a GRIB message into an xarray DataArray."""
msg = self._get_message(ds_info)
ds_info = self.get_metadata(msg, ds_info)
fill = msg['missingValue']
data = msg.values.astype(np.float32)
if msg.valid_key('jScansPositively') and msg['jScansPositively'] == 1:
data = data[::-1]
if isinstance(data, np.ma.MaskedArray):
data = data.filled(np.nan)
data = da.from_array(data, chunks=CHUNK_SIZE)
else:
data[data == fill] = np.nan
data = da.from_array(data, chunks=CHUNK_SIZE)
return xr.DataArray(data, attrs=ds_info, dims=('y', 'x')) | ['def', 'get_dataset', '(', 'self', ',', 'dataset_id', ',', 'ds_info', ')', ':', 'msg', '=', 'self', '.', '_get_message', '(', 'ds_info', ')', 'ds_info', '=', 'self', '.', 'get_metadata', '(', 'msg', ',', 'ds_info', ')', 'fill', '=', 'msg', '[', "'missingValue'", ']', 'data', '=', 'msg', '.', 'values', '.', 'astype', '(', 'np', '.', 'float32', ')', 'if', 'msg', '.', 'valid_key', '(', "'jScansPositively'", ')', 'and', 'msg', '[', "'jScansPositively'", ']', '==', '1', ':', 'data', '=', 'data', '[', ':', ':', '-', '1', ']', 'if', 'isinstance', '(', 'data', ',', 'np', '.', 'ma', '.', 'MaskedArray', ')', ':', 'data', '=', 'data', '.', 'filled', '(', 'np', '.', 'nan', ')', 'data', '=', 'da', '.', 'from_array', '(', 'data', ',', 'chunks', '=', 'CHUNK_SIZE', ')', 'else', ':', 'data', '[', 'data', '==', 'fill', ']', '=', 'np', '.', 'nan', 'data', '=', 'da', '.', 'from_array', '(', 'data', ',', 'chunks', '=', 'CHUNK_SIZE', ')', 'return', 'xr', '.', 'DataArray', '(', 'data', ',', 'attrs', '=', 'ds_info', ',', 'dims', '=', '(', "'y'", ',', "'x'", ')', ')'] | Read a GRIB message into an xarray DataArray. | ['Read', 'a', 'GRIB', 'message', 'into', 'an', 'xarray', 'DataArray', '.'] | train | https://github.com/pytroll/satpy/blob/1f21d20ac686b745fb0da9b4030d139893e066dd/satpy/readers/grib.py#L250-L266 |
9,508 | rmax/scrapy-redis | src/scrapy_redis/spiders.py | RedisMixin.schedule_next_requests | def schedule_next_requests(self):
"""Schedules a request if available"""
# TODO: While there is capacity, schedule a batch of redis requests.
for req in self.next_requests():
self.crawler.engine.crawl(req, spider=self) | python | def schedule_next_requests(self):
"""Schedules a request if available"""
# TODO: While there is capacity, schedule a batch of redis requests.
for req in self.next_requests():
self.crawler.engine.crawl(req, spider=self) | ['def', 'schedule_next_requests', '(', 'self', ')', ':', '# TODO: While there is capacity, schedule a batch of redis requests.', 'for', 'req', 'in', 'self', '.', 'next_requests', '(', ')', ':', 'self', '.', 'crawler', '.', 'engine', '.', 'crawl', '(', 'req', ',', 'spider', '=', 'self', ')'] | Schedules a request if available | ['Schedules', 'a', 'request', 'if', 'available'] | train | https://github.com/rmax/scrapy-redis/blob/31c022dd145654cb4ea1429f09852a82afa0a01c/src/scrapy_redis/spiders.py#L112-L116 |
9,509 | beerfactory/hbmqtt | hbmqtt/adapters.py | WebSocketsWriter.drain | def drain(self):
"""
Let the write buffer of the underlying transport a chance to be flushed.
"""
data = self._stream.getvalue()
if len(data):
yield from self._protocol.send(data)
self._stream = io.BytesIO(b'') | python | def drain(self):
"""
Let the write buffer of the underlying transport a chance to be flushed.
"""
data = self._stream.getvalue()
if len(data):
yield from self._protocol.send(data)
self._stream = io.BytesIO(b'') | ['def', 'drain', '(', 'self', ')', ':', 'data', '=', 'self', '.', '_stream', '.', 'getvalue', '(', ')', 'if', 'len', '(', 'data', ')', ':', 'yield', 'from', 'self', '.', '_protocol', '.', 'send', '(', 'data', ')', 'self', '.', '_stream', '=', 'io', '.', 'BytesIO', '(', "b''", ')'] | Let the write buffer of the underlying transport a chance to be flushed. | ['Let', 'the', 'write', 'buffer', 'of', 'the', 'underlying', 'transport', 'a', 'chance', 'to', 'be', 'flushed', '.'] | train | https://github.com/beerfactory/hbmqtt/blob/4aa6fe982141abc3c54e9f4d7b981ab3eba0a13c/hbmqtt/adapters.py#L114-L121 |
9,510 | manns/pyspread | pyspread/src/actions/_grid_actions.py | SelectionActions.delete | def delete(self):
"""Deletes a selection if any else deletes the cursor cell
Refreshes grid after deletion
"""
if self.grid.IsSelection():
# Delete selection
self.grid.actions.delete_selection()
else:
# Delete cell at cursor
cursor = self.grid.actions.cursor
self.grid.actions.delete_cell(cursor)
# Update grid
self.grid.ForceRefresh() | python | def delete(self):
"""Deletes a selection if any else deletes the cursor cell
Refreshes grid after deletion
"""
if self.grid.IsSelection():
# Delete selection
self.grid.actions.delete_selection()
else:
# Delete cell at cursor
cursor = self.grid.actions.cursor
self.grid.actions.delete_cell(cursor)
# Update grid
self.grid.ForceRefresh() | ['def', 'delete', '(', 'self', ')', ':', 'if', 'self', '.', 'grid', '.', 'IsSelection', '(', ')', ':', '# Delete selection', 'self', '.', 'grid', '.', 'actions', '.', 'delete_selection', '(', ')', 'else', ':', '# Delete cell at cursor', 'cursor', '=', 'self', '.', 'grid', '.', 'actions', '.', 'cursor', 'self', '.', 'grid', '.', 'actions', '.', 'delete_cell', '(', 'cursor', ')', '# Update grid', 'self', '.', 'grid', '.', 'ForceRefresh', '(', ')'] | Deletes a selection if any else deletes the cursor cell
Refreshes grid after deletion | ['Deletes', 'a', 'selection', 'if', 'any', 'else', 'deletes', 'the', 'cursor', 'cell'] | train | https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/actions/_grid_actions.py#L1553-L1570 |
9,511 | osrg/ryu | ryu/lib/ofctl_string.py | ofp_instruction_from_str | def ofp_instruction_from_str(ofproto, action_str):
"""
Parse an ovs-ofctl style action string and return a list of
jsondict representations of OFPInstructionActions, which
can then be passed to ofproto_parser.ofp_instruction_from_jsondict.
Please note that this is for making transition from ovs-ofctl
easier. Please consider using OFPAction constructors when writing
new codes.
This function takes the following arguments.
=========== =================================================
Argument Description
=========== =================================================
ofproto An ofproto module.
action_str An action string.
=========== =================================================
"""
action_re = re.compile(r"([a-z_]+)(\([^)]*\)|[^a-z_,()][^,()]*)*")
result = []
while len(action_str):
m = action_re.match(action_str)
if not m:
raise ryu.exception.OFPInvalidActionString(action_str=action_str)
action_name = m.group(1)
this_action = m.group(0)
paren_level = this_action.count('(') - this_action.count(')')
assert paren_level >= 0
try:
# Parens can be nested. Look for as many ')'s as '('s.
if paren_level > 0:
this_action, rest = _tokenize_paren_block(action_str, m.end(0))
else:
rest = action_str[m.end(0):]
if len(rest):
assert rest[0] == ','
rest = rest[1:]
except Exception:
raise ryu.exception.OFPInvalidActionString(action_str=action_str)
if action_name == 'drop':
assert this_action == 'drop'
assert len(result) == 0 and rest == ''
return []
converter = getattr(OfctlActionConverter, action_name, None)
if converter is None or not callable(converter):
raise ryu.exception.OFPInvalidActionString(action_str=action_name)
result.append(converter(ofproto, this_action))
action_str = rest
return result | python | def ofp_instruction_from_str(ofproto, action_str):
"""
Parse an ovs-ofctl style action string and return a list of
jsondict representations of OFPInstructionActions, which
can then be passed to ofproto_parser.ofp_instruction_from_jsondict.
Please note that this is for making transition from ovs-ofctl
easier. Please consider using OFPAction constructors when writing
new codes.
This function takes the following arguments.
=========== =================================================
Argument Description
=========== =================================================
ofproto An ofproto module.
action_str An action string.
=========== =================================================
"""
action_re = re.compile(r"([a-z_]+)(\([^)]*\)|[^a-z_,()][^,()]*)*")
result = []
while len(action_str):
m = action_re.match(action_str)
if not m:
raise ryu.exception.OFPInvalidActionString(action_str=action_str)
action_name = m.group(1)
this_action = m.group(0)
paren_level = this_action.count('(') - this_action.count(')')
assert paren_level >= 0
try:
# Parens can be nested. Look for as many ')'s as '('s.
if paren_level > 0:
this_action, rest = _tokenize_paren_block(action_str, m.end(0))
else:
rest = action_str[m.end(0):]
if len(rest):
assert rest[0] == ','
rest = rest[1:]
except Exception:
raise ryu.exception.OFPInvalidActionString(action_str=action_str)
if action_name == 'drop':
assert this_action == 'drop'
assert len(result) == 0 and rest == ''
return []
converter = getattr(OfctlActionConverter, action_name, None)
if converter is None or not callable(converter):
raise ryu.exception.OFPInvalidActionString(action_str=action_name)
result.append(converter(ofproto, this_action))
action_str = rest
return result | ['def', 'ofp_instruction_from_str', '(', 'ofproto', ',', 'action_str', ')', ':', 'action_re', '=', 're', '.', 'compile', '(', 'r"([a-z_]+)(\\([^)]*\\)|[^a-z_,()][^,()]*)*"', ')', 'result', '=', '[', ']', 'while', 'len', '(', 'action_str', ')', ':', 'm', '=', 'action_re', '.', 'match', '(', 'action_str', ')', 'if', 'not', 'm', ':', 'raise', 'ryu', '.', 'exception', '.', 'OFPInvalidActionString', '(', 'action_str', '=', 'action_str', ')', 'action_name', '=', 'm', '.', 'group', '(', '1', ')', 'this_action', '=', 'm', '.', 'group', '(', '0', ')', 'paren_level', '=', 'this_action', '.', 'count', '(', "'('", ')', '-', 'this_action', '.', 'count', '(', "')'", ')', 'assert', 'paren_level', '>=', '0', 'try', ':', "# Parens can be nested. Look for as many ')'s as '('s.", 'if', 'paren_level', '>', '0', ':', 'this_action', ',', 'rest', '=', '_tokenize_paren_block', '(', 'action_str', ',', 'm', '.', 'end', '(', '0', ')', ')', 'else', ':', 'rest', '=', 'action_str', '[', 'm', '.', 'end', '(', '0', ')', ':', ']', 'if', 'len', '(', 'rest', ')', ':', 'assert', 'rest', '[', '0', ']', '==', "','", 'rest', '=', 'rest', '[', '1', ':', ']', 'except', 'Exception', ':', 'raise', 'ryu', '.', 'exception', '.', 'OFPInvalidActionString', '(', 'action_str', '=', 'action_str', ')', 'if', 'action_name', '==', "'drop'", ':', 'assert', 'this_action', '==', "'drop'", 'assert', 'len', '(', 'result', ')', '==', '0', 'and', 'rest', '==', "''", 'return', '[', ']', 'converter', '=', 'getattr', '(', 'OfctlActionConverter', ',', 'action_name', ',', 'None', ')', 'if', 'converter', 'is', 'None', 'or', 'not', 'callable', '(', 'converter', ')', ':', 'raise', 'ryu', '.', 'exception', '.', 'OFPInvalidActionString', '(', 'action_str', '=', 'action_name', ')', 'result', '.', 'append', '(', 'converter', '(', 'ofproto', ',', 'this_action', ')', ')', 'action_str', '=', 'rest', 'return', 'result'] | Parse an ovs-ofctl style action string and return a list of
jsondict representations of OFPInstructionActions, which
can then be passed to ofproto_parser.ofp_instruction_from_jsondict.
Please note that this is for making transition from ovs-ofctl
easier. Please consider using OFPAction constructors when writing
new codes.
This function takes the following arguments.
=========== =================================================
Argument Description
=========== =================================================
ofproto An ofproto module.
action_str An action string.
=========== ================================================= | ['Parse', 'an', 'ovs', '-', 'ofctl', 'style', 'action', 'string', 'and', 'return', 'a', 'list', 'of', 'jsondict', 'representations', 'of', 'OFPInstructionActions', 'which', 'can', 'then', 'be', 'passed', 'to', 'ofproto_parser', '.', 'ofp_instruction_from_jsondict', '.'] | train | https://github.com/osrg/ryu/blob/6f906e72c92e10bd0264c9b91a2f7bb85b97780c/ryu/lib/ofctl_string.py#L23-L73 |
9,512 | numan/py-analytics | analytics/backends/redis.py | Redis._get_closest_week | def _get_closest_week(self, metric_date):
"""
Gets the closest monday to the date provided.
"""
#find the offset to the closest monday
days_after_monday = metric_date.isoweekday() - 1
return metric_date - datetime.timedelta(days=days_after_monday) | python | def _get_closest_week(self, metric_date):
"""
Gets the closest monday to the date provided.
"""
#find the offset to the closest monday
days_after_monday = metric_date.isoweekday() - 1
return metric_date - datetime.timedelta(days=days_after_monday) | ['def', '_get_closest_week', '(', 'self', ',', 'metric_date', ')', ':', '#find the offset to the closest monday', 'days_after_monday', '=', 'metric_date', '.', 'isoweekday', '(', ')', '-', '1', 'return', 'metric_date', '-', 'datetime', '.', 'timedelta', '(', 'days', '=', 'days_after_monday', ')'] | Gets the closest monday to the date provided. | ['Gets', 'the', 'closest', 'monday', 'to', 'the', 'date', 'provided', '.'] | train | https://github.com/numan/py-analytics/blob/abbc814925c6cc200b3329c7de9f1868e1cb8c01/analytics/backends/redis.py#L58-L65 |
9,513 | MostAwesomeDude/gentleman | gentleman/base.py | AddNodeTags | def AddNodeTags(r, node, tags, dry_run=False):
"""
Adds tags to a node.
@type node: str
@param node: node to add tags to
@type tags: list of str
@param tags: tags to add to the node
@type dry_run: bool
@param dry_run: whether to perform a dry run
@rtype: int
@return: job id
"""
query = {
"tag": tags,
"dry-run": dry_run,
}
return r.request("put", "/2/nodes/%s/tags" % node, query=query,
content=tags) | python | def AddNodeTags(r, node, tags, dry_run=False):
"""
Adds tags to a node.
@type node: str
@param node: node to add tags to
@type tags: list of str
@param tags: tags to add to the node
@type dry_run: bool
@param dry_run: whether to perform a dry run
@rtype: int
@return: job id
"""
query = {
"tag": tags,
"dry-run": dry_run,
}
return r.request("put", "/2/nodes/%s/tags" % node, query=query,
content=tags) | ['def', 'AddNodeTags', '(', 'r', ',', 'node', ',', 'tags', ',', 'dry_run', '=', 'False', ')', ':', 'query', '=', '{', '"tag"', ':', 'tags', ',', '"dry-run"', ':', 'dry_run', ',', '}', 'return', 'r', '.', 'request', '(', '"put"', ',', '"/2/nodes/%s/tags"', '%', 'node', ',', 'query', '=', 'query', ',', 'content', '=', 'tags', ')'] | Adds tags to a node.
@type node: str
@param node: node to add tags to
@type tags: list of str
@param tags: tags to add to the node
@type dry_run: bool
@param dry_run: whether to perform a dry run
@rtype: int
@return: job id | ['Adds', 'tags', 'to', 'a', 'node', '.'] | train | https://github.com/MostAwesomeDude/gentleman/blob/17fb8ffb922aa4af9d8bcab85e452c9311d41805/gentleman/base.py#L1114-L1135 |
9,514 | secdev/scapy | scapy/automaton.py | SelectableSelector._exit_door | def _exit_door(self, _input):
"""This function is passed to each SelectableObject as a callback
The SelectableObjects have to call it once there are ready"""
self.results.append(_input)
if self._ended:
return
self._ended = True
self._release_all() | python | def _exit_door(self, _input):
"""This function is passed to each SelectableObject as a callback
The SelectableObjects have to call it once there are ready"""
self.results.append(_input)
if self._ended:
return
self._ended = True
self._release_all() | ['def', '_exit_door', '(', 'self', ',', '_input', ')', ':', 'self', '.', 'results', '.', 'append', '(', '_input', ')', 'if', 'self', '.', '_ended', ':', 'return', 'self', '.', '_ended', '=', 'True', 'self', '.', '_release_all', '(', ')'] | This function is passed to each SelectableObject as a callback
The SelectableObjects have to call it once there are ready | ['This', 'function', 'is', 'passed', 'to', 'each', 'SelectableObject', 'as', 'a', 'callback', 'The', 'SelectableObjects', 'have', 'to', 'call', 'it', 'once', 'there', 'are', 'ready'] | train | https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/automaton.py#L136-L143 |
9,515 | tariqdaouda/pyGeno | pyGeno/tools/parsers/CSVTools.py | CSVFile.parse | def parse(self, filePath, skipLines=0, separator = ',', stringSeparator = '"', lineSeparator = '\n') :
"""Loads a CSV file"""
self.filename = filePath
f = open(filePath)
if lineSeparator == '\n' :
lines = f.readlines()
else :
lines = f.read().split(lineSeparator)
f.flush()
f.close()
lines = lines[skipLines:]
self.lines = []
self.comments = []
for l in lines :
# print l
if len(l) != 0 and l[0] != "#" :
self.lines.append(l)
elif l[0] == "#" :
self.comments.append(l)
self.separator = separator
self.lineSeparator = lineSeparator
self.stringSeparator = stringSeparator
self.legend = collections.OrderedDict()
i = 0
for c in self.lines[0].lower().replace(stringSeparator, '').split(separator) :
legendElement = c.strip()
if legendElement not in self.legend :
self.legend[legendElement] = i
i+=1
self.strLegend = self.lines[0].replace('\r', '\n').replace('\n', '')
self.lines = self.lines[1:] | python | def parse(self, filePath, skipLines=0, separator = ',', stringSeparator = '"', lineSeparator = '\n') :
"""Loads a CSV file"""
self.filename = filePath
f = open(filePath)
if lineSeparator == '\n' :
lines = f.readlines()
else :
lines = f.read().split(lineSeparator)
f.flush()
f.close()
lines = lines[skipLines:]
self.lines = []
self.comments = []
for l in lines :
# print l
if len(l) != 0 and l[0] != "#" :
self.lines.append(l)
elif l[0] == "#" :
self.comments.append(l)
self.separator = separator
self.lineSeparator = lineSeparator
self.stringSeparator = stringSeparator
self.legend = collections.OrderedDict()
i = 0
for c in self.lines[0].lower().replace(stringSeparator, '').split(separator) :
legendElement = c.strip()
if legendElement not in self.legend :
self.legend[legendElement] = i
i+=1
self.strLegend = self.lines[0].replace('\r', '\n').replace('\n', '')
self.lines = self.lines[1:] | ['def', 'parse', '(', 'self', ',', 'filePath', ',', 'skipLines', '=', '0', ',', 'separator', '=', "','", ',', 'stringSeparator', '=', '\'"\'', ',', 'lineSeparator', '=', "'\\n'", ')', ':', 'self', '.', 'filename', '=', 'filePath', 'f', '=', 'open', '(', 'filePath', ')', 'if', 'lineSeparator', '==', "'\\n'", ':', 'lines', '=', 'f', '.', 'readlines', '(', ')', 'else', ':', 'lines', '=', 'f', '.', 'read', '(', ')', '.', 'split', '(', 'lineSeparator', ')', 'f', '.', 'flush', '(', ')', 'f', '.', 'close', '(', ')', 'lines', '=', 'lines', '[', 'skipLines', ':', ']', 'self', '.', 'lines', '=', '[', ']', 'self', '.', 'comments', '=', '[', ']', 'for', 'l', 'in', 'lines', ':', '# print l', 'if', 'len', '(', 'l', ')', '!=', '0', 'and', 'l', '[', '0', ']', '!=', '"#"', ':', 'self', '.', 'lines', '.', 'append', '(', 'l', ')', 'elif', 'l', '[', '0', ']', '==', '"#"', ':', 'self', '.', 'comments', '.', 'append', '(', 'l', ')', 'self', '.', 'separator', '=', 'separator', 'self', '.', 'lineSeparator', '=', 'lineSeparator', 'self', '.', 'stringSeparator', '=', 'stringSeparator', 'self', '.', 'legend', '=', 'collections', '.', 'OrderedDict', '(', ')', 'i', '=', '0', 'for', 'c', 'in', 'self', '.', 'lines', '[', '0', ']', '.', 'lower', '(', ')', '.', 'replace', '(', 'stringSeparator', ',', "''", ')', '.', 'split', '(', 'separator', ')', ':', 'legendElement', '=', 'c', '.', 'strip', '(', ')', 'if', 'legendElement', 'not', 'in', 'self', '.', 'legend', ':', 'self', '.', 'legend', '[', 'legendElement', ']', '=', 'i', 'i', '+=', '1', 'self', '.', 'strLegend', '=', 'self', '.', 'lines', '[', '0', ']', '.', 'replace', '(', "'\\r'", ',', "'\\n'", ')', '.', 'replace', '(', "'\\n'", ',', "''", ')', 'self', '.', 'lines', '=', 'self', '.', 'lines', '[', '1', ':', ']'] | Loads a CSV file | ['Loads', 'a', 'CSV', 'file'] | train | https://github.com/tariqdaouda/pyGeno/blob/474b1250bf78ce5c7e7c3bbbfdbad9635d5a7d14/pyGeno/tools/parsers/CSVTools.py#L231-L266 |
9,516 | theonion/django-bulbs | bulbs/content/models.py | Content.first_image | def first_image(self):
"""Ready-only attribute that provides the value of the first non-none image that's
not the thumbnail override field.
"""
# loop through image fields and grab the first non-none one
for model_field in self._meta.fields:
if isinstance(model_field, ImageField):
if model_field.name is not 'thumbnail_override':
field_value = getattr(self, model_field.name)
if field_value.id is not None:
return field_value
# no non-none images, return None
return None | python | def first_image(self):
"""Ready-only attribute that provides the value of the first non-none image that's
not the thumbnail override field.
"""
# loop through image fields and grab the first non-none one
for model_field in self._meta.fields:
if isinstance(model_field, ImageField):
if model_field.name is not 'thumbnail_override':
field_value = getattr(self, model_field.name)
if field_value.id is not None:
return field_value
# no non-none images, return None
return None | ['def', 'first_image', '(', 'self', ')', ':', '# loop through image fields and grab the first non-none one', 'for', 'model_field', 'in', 'self', '.', '_meta', '.', 'fields', ':', 'if', 'isinstance', '(', 'model_field', ',', 'ImageField', ')', ':', 'if', 'model_field', '.', 'name', 'is', 'not', "'thumbnail_override'", ':', 'field_value', '=', 'getattr', '(', 'self', ',', 'model_field', '.', 'name', ')', 'if', 'field_value', '.', 'id', 'is', 'not', 'None', ':', 'return', 'field_value', '# no non-none images, return None', 'return', 'None'] | Ready-only attribute that provides the value of the first non-none image that's
not the thumbnail override field. | ['Ready', '-', 'only', 'attribute', 'that', 'provides', 'the', 'value', 'of', 'the', 'first', 'non', '-', 'none', 'image', 'that', 's', 'not', 'the', 'thumbnail', 'override', 'field', '.'] | train | https://github.com/theonion/django-bulbs/blob/0c0e6e3127a7dc487b96677fab95cacd2b3806da/bulbs/content/models.py#L278-L291 |
9,517 | spyder-ide/spyder | spyder/preferences/shortcuts.py | ShortcutsConfigPage.reset_to_default | def reset_to_default(self):
"""Reset to default values of the shortcuts making a confirmation."""
reset = QMessageBox.warning(self, _("Shortcuts reset"),
_("Do you want to reset "
"to default values?"),
QMessageBox.Yes | QMessageBox.No)
if reset == QMessageBox.No:
return
reset_shortcuts()
self.main.apply_shortcuts()
self.table.load_shortcuts()
self.load_from_conf()
self.set_modified(False) | python | def reset_to_default(self):
"""Reset to default values of the shortcuts making a confirmation."""
reset = QMessageBox.warning(self, _("Shortcuts reset"),
_("Do you want to reset "
"to default values?"),
QMessageBox.Yes | QMessageBox.No)
if reset == QMessageBox.No:
return
reset_shortcuts()
self.main.apply_shortcuts()
self.table.load_shortcuts()
self.load_from_conf()
self.set_modified(False) | ['def', 'reset_to_default', '(', 'self', ')', ':', 'reset', '=', 'QMessageBox', '.', 'warning', '(', 'self', ',', '_', '(', '"Shortcuts reset"', ')', ',', '_', '(', '"Do you want to reset "', '"to default values?"', ')', ',', 'QMessageBox', '.', 'Yes', '|', 'QMessageBox', '.', 'No', ')', 'if', 'reset', '==', 'QMessageBox', '.', 'No', ':', 'return', 'reset_shortcuts', '(', ')', 'self', '.', 'main', '.', 'apply_shortcuts', '(', ')', 'self', '.', 'table', '.', 'load_shortcuts', '(', ')', 'self', '.', 'load_from_conf', '(', ')', 'self', '.', 'set_modified', '(', 'False', ')'] | Reset to default values of the shortcuts making a confirmation. | ['Reset', 'to', 'default', 'values', 'of', 'the', 'shortcuts', 'making', 'a', 'confirmation', '.'] | train | https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/preferences/shortcuts.py#L895-L907 |
9,518 | ProjetPP/PPP-Core | ppp_core/config.py | Module.should_send | def should_send(self, request):
"""Returns whether or not the request should be sent to the
modules, based on the filters."""
if self.filters.get('whitelist', None):
return request.tree.type in self.filters['whitelist']
elif self.filters.get('blacklist', None):
return request.tree.type not in self.filters['blacklist']
else:
return True | python | def should_send(self, request):
"""Returns whether or not the request should be sent to the
modules, based on the filters."""
if self.filters.get('whitelist', None):
return request.tree.type in self.filters['whitelist']
elif self.filters.get('blacklist', None):
return request.tree.type not in self.filters['blacklist']
else:
return True | ['def', 'should_send', '(', 'self', ',', 'request', ')', ':', 'if', 'self', '.', 'filters', '.', 'get', '(', "'whitelist'", ',', 'None', ')', ':', 'return', 'request', '.', 'tree', '.', 'type', 'in', 'self', '.', 'filters', '[', "'whitelist'", ']', 'elif', 'self', '.', 'filters', '.', 'get', '(', "'blacklist'", ',', 'None', ')', ':', 'return', 'request', '.', 'tree', '.', 'type', 'not', 'in', 'self', '.', 'filters', '[', "'blacklist'", ']', 'else', ':', 'return', 'True'] | Returns whether or not the request should be sent to the
modules, based on the filters. | ['Returns', 'whether', 'or', 'not', 'the', 'request', 'should', 'be', 'sent', 'to', 'the', 'modules', 'based', 'on', 'the', 'filters', '.'] | train | https://github.com/ProjetPP/PPP-Core/blob/49ee5b16325aa7134e2e423cf75e7b2609df96a0/ppp_core/config.py#L28-L36 |
9,519 | HewlettPackard/python-hpOneView | hpOneView/oneview_client.py | OneViewClient.storage_volume_attachments | def storage_volume_attachments(self):
"""
Gets the StorageVolumeAttachments API client.
Returns:
StorageVolumeAttachments:
"""
if not self.__storage_volume_attachments:
self.__storage_volume_attachments = StorageVolumeAttachments(self.__connection)
return self.__storage_volume_attachments | python | def storage_volume_attachments(self):
"""
Gets the StorageVolumeAttachments API client.
Returns:
StorageVolumeAttachments:
"""
if not self.__storage_volume_attachments:
self.__storage_volume_attachments = StorageVolumeAttachments(self.__connection)
return self.__storage_volume_attachments | ['def', 'storage_volume_attachments', '(', 'self', ')', ':', 'if', 'not', 'self', '.', '__storage_volume_attachments', ':', 'self', '.', '__storage_volume_attachments', '=', 'StorageVolumeAttachments', '(', 'self', '.', '__connection', ')', 'return', 'self', '.', '__storage_volume_attachments'] | Gets the StorageVolumeAttachments API client.
Returns:
StorageVolumeAttachments: | ['Gets', 'the', 'StorageVolumeAttachments', 'API', 'client', '.'] | train | https://github.com/HewlettPackard/python-hpOneView/blob/3c6219723ef25e6e0c83d44a89007f89bc325b89/hpOneView/oneview_client.py#L869-L878 |
9,520 | mk-fg/graphite-metrics | graphite_metrics/collectors/cron_log.py | file_follow_durable | def file_follow_durable( path,
min_dump_interval=10,
xattr_name='user.collectd.logtail.pos', xattr_update=True,
**follow_kwz ):
'''Records log position into xattrs after reading line every
min_dump_interval seconds.
Checksum of the last line at the position
is also recorded (so line itself don't have to fit into xattr) to make sure
file wasn't truncated between last xattr dump and re-open.'''
from xattr import xattr
from io import open
from hashlib import sha1
from time import time
import struct
# Try to restore position
src = open(path, mode='rb')
src_xattr = xattr(src)
try:
if not xattr_name: raise KeyError
pos = src_xattr[xattr_name]
except KeyError: pos = None
if pos:
data_len = struct.calcsize('=I')
(pos,), chksum = struct.unpack('=I', pos[:data_len]), pos[data_len:]
(data_len,), chksum = struct.unpack('=I', chksum[:data_len]), chksum[data_len:]
try:
src.seek(pos - data_len)
if sha1(src.read(data_len)).digest() != chksum:
raise IOError('Last log line doesnt match checksum')
except (OSError, IOError) as err:
collectd.info('Failed to restore log position: {}'.format(err))
src.seek(0)
tailer = file_follow(src, yield_file=True, **follow_kwz)
# ...and keep it updated
pos_dump_ts_get = lambda ts=None: (ts or time()) + min_dump_interval
pos_dump_ts = pos_dump_ts_get()
while True:
line, src_chk = next(tailer)
if not line: pos_dump_ts = 0 # force-write xattr
ts = time()
if ts > pos_dump_ts:
if src is not src_chk:
src, src_xattr = src_chk, xattr(src_chk)
pos_new = src.tell()
if pos != pos_new:
pos = pos_new
if xattr_update:
src_xattr[xattr_name] =\
struct.pack('=I', pos)\
+ struct.pack('=I', len(line))\
+ sha1(line).digest()
pos_dump_ts = pos_dump_ts_get(ts)
if (yield line.decode('utf-8', 'replace')):
tailer.send(StopIteration)
break | python | def file_follow_durable( path,
min_dump_interval=10,
xattr_name='user.collectd.logtail.pos', xattr_update=True,
**follow_kwz ):
'''Records log position into xattrs after reading line every
min_dump_interval seconds.
Checksum of the last line at the position
is also recorded (so line itself don't have to fit into xattr) to make sure
file wasn't truncated between last xattr dump and re-open.'''
from xattr import xattr
from io import open
from hashlib import sha1
from time import time
import struct
# Try to restore position
src = open(path, mode='rb')
src_xattr = xattr(src)
try:
if not xattr_name: raise KeyError
pos = src_xattr[xattr_name]
except KeyError: pos = None
if pos:
data_len = struct.calcsize('=I')
(pos,), chksum = struct.unpack('=I', pos[:data_len]), pos[data_len:]
(data_len,), chksum = struct.unpack('=I', chksum[:data_len]), chksum[data_len:]
try:
src.seek(pos - data_len)
if sha1(src.read(data_len)).digest() != chksum:
raise IOError('Last log line doesnt match checksum')
except (OSError, IOError) as err:
collectd.info('Failed to restore log position: {}'.format(err))
src.seek(0)
tailer = file_follow(src, yield_file=True, **follow_kwz)
# ...and keep it updated
pos_dump_ts_get = lambda ts=None: (ts or time()) + min_dump_interval
pos_dump_ts = pos_dump_ts_get()
while True:
line, src_chk = next(tailer)
if not line: pos_dump_ts = 0 # force-write xattr
ts = time()
if ts > pos_dump_ts:
if src is not src_chk:
src, src_xattr = src_chk, xattr(src_chk)
pos_new = src.tell()
if pos != pos_new:
pos = pos_new
if xattr_update:
src_xattr[xattr_name] =\
struct.pack('=I', pos)\
+ struct.pack('=I', len(line))\
+ sha1(line).digest()
pos_dump_ts = pos_dump_ts_get(ts)
if (yield line.decode('utf-8', 'replace')):
tailer.send(StopIteration)
break | ['def', 'file_follow_durable', '(', 'path', ',', 'min_dump_interval', '=', '10', ',', 'xattr_name', '=', "'user.collectd.logtail.pos'", ',', 'xattr_update', '=', 'True', ',', '*', '*', 'follow_kwz', ')', ':', 'from', 'xattr', 'import', 'xattr', 'from', 'io', 'import', 'open', 'from', 'hashlib', 'import', 'sha1', 'from', 'time', 'import', 'time', 'import', 'struct', '# Try to restore position', 'src', '=', 'open', '(', 'path', ',', 'mode', '=', "'rb'", ')', 'src_xattr', '=', 'xattr', '(', 'src', ')', 'try', ':', 'if', 'not', 'xattr_name', ':', 'raise', 'KeyError', 'pos', '=', 'src_xattr', '[', 'xattr_name', ']', 'except', 'KeyError', ':', 'pos', '=', 'None', 'if', 'pos', ':', 'data_len', '=', 'struct', '.', 'calcsize', '(', "'=I'", ')', '(', 'pos', ',', ')', ',', 'chksum', '=', 'struct', '.', 'unpack', '(', "'=I'", ',', 'pos', '[', ':', 'data_len', ']', ')', ',', 'pos', '[', 'data_len', ':', ']', '(', 'data_len', ',', ')', ',', 'chksum', '=', 'struct', '.', 'unpack', '(', "'=I'", ',', 'chksum', '[', ':', 'data_len', ']', ')', ',', 'chksum', '[', 'data_len', ':', ']', 'try', ':', 'src', '.', 'seek', '(', 'pos', '-', 'data_len', ')', 'if', 'sha1', '(', 'src', '.', 'read', '(', 'data_len', ')', ')', '.', 'digest', '(', ')', '!=', 'chksum', ':', 'raise', 'IOError', '(', "'Last log line doesnt match checksum'", ')', 'except', '(', 'OSError', ',', 'IOError', ')', 'as', 'err', ':', 'collectd', '.', 'info', '(', "'Failed to restore log position: {}'", '.', 'format', '(', 'err', ')', ')', 'src', '.', 'seek', '(', '0', ')', 'tailer', '=', 'file_follow', '(', 'src', ',', 'yield_file', '=', 'True', ',', '*', '*', 'follow_kwz', ')', '# ...and keep it updated', 'pos_dump_ts_get', '=', 'lambda', 'ts', '=', 'None', ':', '(', 'ts', 'or', 'time', '(', ')', ')', '+', 'min_dump_interval', 'pos_dump_ts', '=', 'pos_dump_ts_get', '(', ')', 'while', 'True', ':', 'line', ',', 'src_chk', '=', 'next', '(', 'tailer', ')', 'if', 'not', 'line', ':', 'pos_dump_ts', '=', '0', '# force-write xattr', 'ts', '=', 'time', '(', ')', 'if', 'ts', '>', 'pos_dump_ts', ':', 'if', 'src', 'is', 'not', 'src_chk', ':', 'src', ',', 'src_xattr', '=', 'src_chk', ',', 'xattr', '(', 'src_chk', ')', 'pos_new', '=', 'src', '.', 'tell', '(', ')', 'if', 'pos', '!=', 'pos_new', ':', 'pos', '=', 'pos_new', 'if', 'xattr_update', ':', 'src_xattr', '[', 'xattr_name', ']', '=', 'struct', '.', 'pack', '(', "'=I'", ',', 'pos', ')', '+', 'struct', '.', 'pack', '(', "'=I'", ',', 'len', '(', 'line', ')', ')', '+', 'sha1', '(', 'line', ')', '.', 'digest', '(', ')', 'pos_dump_ts', '=', 'pos_dump_ts_get', '(', 'ts', ')', 'if', '(', 'yield', 'line', '.', 'decode', '(', "'utf-8'", ',', "'replace'", ')', ')', ':', 'tailer', '.', 'send', '(', 'StopIteration', ')', 'break'] | Records log position into xattrs after reading line every
min_dump_interval seconds.
Checksum of the last line at the position
is also recorded (so line itself don't have to fit into xattr) to make sure
file wasn't truncated between last xattr dump and re-open. | ['Records', 'log', 'position', 'into', 'xattrs', 'after', 'reading', 'line', 'every', 'min_dump_interval', 'seconds', '.', 'Checksum', 'of', 'the', 'last', 'line', 'at', 'the', 'position', 'is', 'also', 'recorded', '(', 'so', 'line', 'itself', 'don', 't', 'have', 'to', 'fit', 'into', 'xattr', ')', 'to', 'make', 'sure', 'file', 'wasn', 't', 'truncated', 'between', 'last', 'xattr', 'dump', 'and', 're', '-', 'open', '.'] | train | https://github.com/mk-fg/graphite-metrics/blob/f0ba28d1ed000b2316d3c403206eba78dd7b4c50/graphite_metrics/collectors/cron_log.py#L78-L135 |
9,521 | secdev/scapy | scapy/layers/tls/record_tls13.py | TLS13._tls_auth_decrypt | def _tls_auth_decrypt(self, s):
"""
Provided with the record header and AEAD-ciphered data, return the
sliced and clear tuple (TLSInnerPlaintext, tag). Note that
we still return the slicing of the original input in case of decryption
failure. Also, if the integrity check fails, a warning will be issued,
but we still return the sliced (unauthenticated) plaintext.
"""
rcs = self.tls_session.rcs
read_seq_num = struct.pack("!Q", rcs.seq_num)
rcs.seq_num += 1
try:
return rcs.cipher.auth_decrypt(b"", s, read_seq_num)
except CipherError as e:
return e.args
except AEADTagError as e:
pkt_info = self.firstlayer().summary()
log_runtime.info("TLS: record integrity check failed [%s]", pkt_info) # noqa: E501
return e.args | python | def _tls_auth_decrypt(self, s):
"""
Provided with the record header and AEAD-ciphered data, return the
sliced and clear tuple (TLSInnerPlaintext, tag). Note that
we still return the slicing of the original input in case of decryption
failure. Also, if the integrity check fails, a warning will be issued,
but we still return the sliced (unauthenticated) plaintext.
"""
rcs = self.tls_session.rcs
read_seq_num = struct.pack("!Q", rcs.seq_num)
rcs.seq_num += 1
try:
return rcs.cipher.auth_decrypt(b"", s, read_seq_num)
except CipherError as e:
return e.args
except AEADTagError as e:
pkt_info = self.firstlayer().summary()
log_runtime.info("TLS: record integrity check failed [%s]", pkt_info) # noqa: E501
return e.args | ['def', '_tls_auth_decrypt', '(', 'self', ',', 's', ')', ':', 'rcs', '=', 'self', '.', 'tls_session', '.', 'rcs', 'read_seq_num', '=', 'struct', '.', 'pack', '(', '"!Q"', ',', 'rcs', '.', 'seq_num', ')', 'rcs', '.', 'seq_num', '+=', '1', 'try', ':', 'return', 'rcs', '.', 'cipher', '.', 'auth_decrypt', '(', 'b""', ',', 's', ',', 'read_seq_num', ')', 'except', 'CipherError', 'as', 'e', ':', 'return', 'e', '.', 'args', 'except', 'AEADTagError', 'as', 'e', ':', 'pkt_info', '=', 'self', '.', 'firstlayer', '(', ')', '.', 'summary', '(', ')', 'log_runtime', '.', 'info', '(', '"TLS: record integrity check failed [%s]"', ',', 'pkt_info', ')', '# noqa: E501', 'return', 'e', '.', 'args'] | Provided with the record header and AEAD-ciphered data, return the
sliced and clear tuple (TLSInnerPlaintext, tag). Note that
we still return the slicing of the original input in case of decryption
failure. Also, if the integrity check fails, a warning will be issued,
but we still return the sliced (unauthenticated) plaintext. | ['Provided', 'with', 'the', 'record', 'header', 'and', 'AEAD', '-', 'ciphered', 'data', 'return', 'the', 'sliced', 'and', 'clear', 'tuple', '(', 'TLSInnerPlaintext', 'tag', ')', '.', 'Note', 'that', 'we', 'still', 'return', 'the', 'slicing', 'of', 'the', 'original', 'input', 'in', 'case', 'of', 'decryption', 'failure', '.', 'Also', 'if', 'the', 'integrity', 'check', 'fails', 'a', 'warning', 'will', 'be', 'issued', 'but', 'we', 'still', 'return', 'the', 'sliced', '(', 'unauthenticated', ')', 'plaintext', '.'] | train | https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/layers/tls/record_tls13.py#L104-L122 |
9,522 | gamechanger/schemer | schemer/validators.py | gte | def gte(min_value):
"""
Validates that a field value is greater than or equal to the
value given to this validator.
"""
def validate(value):
if value < min_value:
return e("{} is not greater than or equal to {}", value, min_value)
return validate | python | def gte(min_value):
"""
Validates that a field value is greater than or equal to the
value given to this validator.
"""
def validate(value):
if value < min_value:
return e("{} is not greater than or equal to {}", value, min_value)
return validate | ['def', 'gte', '(', 'min_value', ')', ':', 'def', 'validate', '(', 'value', ')', ':', 'if', 'value', '<', 'min_value', ':', 'return', 'e', '(', '"{} is not greater than or equal to {}"', ',', 'value', ',', 'min_value', ')', 'return', 'validate'] | Validates that a field value is greater than or equal to the
value given to this validator. | ['Validates', 'that', 'a', 'field', 'value', 'is', 'greater', 'than', 'or', 'equal', 'to', 'the', 'value', 'given', 'to', 'this', 'validator', '.'] | train | https://github.com/gamechanger/schemer/blob/1d1dd7da433d3b84ce5a80ded5a84ab4a65825ee/schemer/validators.py#L24-L32 |
9,523 | iskandr/fancyimpute | fancyimpute/dictionary_helpers.py | nested_key_indices | def nested_key_indices(nested_dict):
"""
Give an ordering to the outer and inner keys used in a dictionary that
maps to dictionaries.
"""
outer_keys, inner_keys = collect_nested_keys(nested_dict)
outer_key_indices = {k: i for (i, k) in enumerate(outer_keys)}
inner_key_indices = {k: i for (i, k) in enumerate(inner_keys)}
return outer_key_indices, inner_key_indices | python | def nested_key_indices(nested_dict):
"""
Give an ordering to the outer and inner keys used in a dictionary that
maps to dictionaries.
"""
outer_keys, inner_keys = collect_nested_keys(nested_dict)
outer_key_indices = {k: i for (i, k) in enumerate(outer_keys)}
inner_key_indices = {k: i for (i, k) in enumerate(inner_keys)}
return outer_key_indices, inner_key_indices | ['def', 'nested_key_indices', '(', 'nested_dict', ')', ':', 'outer_keys', ',', 'inner_keys', '=', 'collect_nested_keys', '(', 'nested_dict', ')', 'outer_key_indices', '=', '{', 'k', ':', 'i', 'for', '(', 'i', ',', 'k', ')', 'in', 'enumerate', '(', 'outer_keys', ')', '}', 'inner_key_indices', '=', '{', 'k', ':', 'i', 'for', '(', 'i', ',', 'k', ')', 'in', 'enumerate', '(', 'inner_keys', ')', '}', 'return', 'outer_key_indices', ',', 'inner_key_indices'] | Give an ordering to the outer and inner keys used in a dictionary that
maps to dictionaries. | ['Give', 'an', 'ordering', 'to', 'the', 'outer', 'and', 'inner', 'keys', 'used', 'in', 'a', 'dictionary', 'that', 'maps', 'to', 'dictionaries', '.'] | train | https://github.com/iskandr/fancyimpute/blob/9f0837d387c7303d5c8c925a9989ca77a1a96e3e/fancyimpute/dictionary_helpers.py#L39-L47 |
9,524 | jedie/DragonPy | dragonpy/Dragon32/MC6821_PIA.py | PIA.read_PIA0_A_control | def read_PIA0_A_control(self, cpu_cycles, op_address, address):
"""
read from 0xff01 -> PIA 0 A side control register
"""
value = 0xb3
log.error(
"%04x| read $%04x (PIA 0 A side Control reg.) send $%02x (%s) back.\t|%s",
op_address, address, value, byte2bit_string(value),
self.cfg.mem_info.get_shortest(op_address)
)
return value | python | def read_PIA0_A_control(self, cpu_cycles, op_address, address):
"""
read from 0xff01 -> PIA 0 A side control register
"""
value = 0xb3
log.error(
"%04x| read $%04x (PIA 0 A side Control reg.) send $%02x (%s) back.\t|%s",
op_address, address, value, byte2bit_string(value),
self.cfg.mem_info.get_shortest(op_address)
)
return value | ['def', 'read_PIA0_A_control', '(', 'self', ',', 'cpu_cycles', ',', 'op_address', ',', 'address', ')', ':', 'value', '=', '0xb3', 'log', '.', 'error', '(', '"%04x| read $%04x (PIA 0 A side Control reg.) send $%02x (%s) back.\\t|%s"', ',', 'op_address', ',', 'address', ',', 'value', ',', 'byte2bit_string', '(', 'value', ')', ',', 'self', '.', 'cfg', '.', 'mem_info', '.', 'get_shortest', '(', 'op_address', ')', ')', 'return', 'value'] | read from 0xff01 -> PIA 0 A side control register | ['read', 'from', '0xff01', '-', '>', 'PIA', '0', 'A', 'side', 'control', 'register'] | train | https://github.com/jedie/DragonPy/blob/6659e5b5133aab26979a498ee7453495773a4f6c/dragonpy/Dragon32/MC6821_PIA.py#L309-L319 |
9,525 | ev3dev/ev3dev-lang-python | ev3dev2/motor.py | Motor.driver_name | def driver_name(self):
"""
Returns the name of the driver that provides this tacho motor device.
"""
(self._driver_name, value) = self.get_cached_attr_string(self._driver_name, 'driver_name')
return value | python | def driver_name(self):
"""
Returns the name of the driver that provides this tacho motor device.
"""
(self._driver_name, value) = self.get_cached_attr_string(self._driver_name, 'driver_name')
return value | ['def', 'driver_name', '(', 'self', ')', ':', '(', 'self', '.', '_driver_name', ',', 'value', ')', '=', 'self', '.', 'get_cached_attr_string', '(', 'self', '.', '_driver_name', ',', "'driver_name'", ')', 'return', 'value'] | Returns the name of the driver that provides this tacho motor device. | ['Returns', 'the', 'name', 'of', 'the', 'driver', 'that', 'provides', 'this', 'tacho', 'motor', 'device', '.'] | train | https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/motor.py#L469-L474 |
9,526 | bitesofcode/projexui | projexui/widgets/xmenutemplatewidget/xmenutemplatewidget.py | XMenuTemplateWidget.renameMenu | def renameMenu( self ):
"""
Prompts the user to supply a new name for the menu.
"""
item = self.uiMenuTREE.currentItem()
name, accepted = QInputDialog.getText( self,
'Rename Menu',
'Name:',
QLineEdit.Normal,
item.text(0))
if ( accepted ):
item.setText(0, name) | python | def renameMenu( self ):
"""
Prompts the user to supply a new name for the menu.
"""
item = self.uiMenuTREE.currentItem()
name, accepted = QInputDialog.getText( self,
'Rename Menu',
'Name:',
QLineEdit.Normal,
item.text(0))
if ( accepted ):
item.setText(0, name) | ['def', 'renameMenu', '(', 'self', ')', ':', 'item', '=', 'self', '.', 'uiMenuTREE', '.', 'currentItem', '(', ')', 'name', ',', 'accepted', '=', 'QInputDialog', '.', 'getText', '(', 'self', ',', "'Rename Menu'", ',', "'Name:'", ',', 'QLineEdit', '.', 'Normal', ',', 'item', '.', 'text', '(', '0', ')', ')', 'if', '(', 'accepted', ')', ':', 'item', '.', 'setText', '(', '0', ',', 'name', ')'] | Prompts the user to supply a new name for the menu. | ['Prompts', 'the', 'user', 'to', 'supply', 'a', 'new', 'name', 'for', 'the', 'menu', '.'] | train | https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xmenutemplatewidget/xmenutemplatewidget.py#L271-L283 |
9,527 | secdev/scapy | scapy/contrib/pnio_rpc.py | IODWriteReq.get_response | def get_response(self):
"""Generate the response block of this request.
Careful: it only sets the fields which can be set from the request
"""
res = IODWriteRes()
for field in ["seqNum", "ARUUID", "API", "slotNumber",
"subslotNumber", "index"]:
res.setfieldval(field, self.getfieldval(field))
return res | python | def get_response(self):
"""Generate the response block of this request.
Careful: it only sets the fields which can be set from the request
"""
res = IODWriteRes()
for field in ["seqNum", "ARUUID", "API", "slotNumber",
"subslotNumber", "index"]:
res.setfieldval(field, self.getfieldval(field))
return res | ['def', 'get_response', '(', 'self', ')', ':', 'res', '=', 'IODWriteRes', '(', ')', 'for', 'field', 'in', '[', '"seqNum"', ',', '"ARUUID"', ',', '"API"', ',', '"slotNumber"', ',', '"subslotNumber"', ',', '"index"', ']', ':', 'res', '.', 'setfieldval', '(', 'field', ',', 'self', '.', 'getfieldval', '(', 'field', ')', ')', 'return', 'res'] | Generate the response block of this request.
Careful: it only sets the fields which can be set from the request | ['Generate', 'the', 'response', 'block', 'of', 'this', 'request', '.', 'Careful', ':', 'it', 'only', 'sets', 'the', 'fields', 'which', 'can', 'be', 'set', 'from', 'the', 'request'] | train | https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/contrib/pnio_rpc.py#L445-L453 |
9,528 | zeromake/aiko | aiko/response.py | Response.handel_default | def handel_default(self) -> None:
"""
处理设置到body上的数据默认 headers
"""
raw_body = self._body
body = cast(Optional[bytes], None)
default_type = 2
charset = self._charset or self._default_charset
if raw_body is None:
pass
elif isinstance(raw_body, bytes):
# body为bytes
default_type = 2
body = raw_body
elif isinstance(raw_body, str):
# body 为字符串
default_type = 2
body = encode_str(raw_body, charset)
elif isinstance(raw_body, (list, dict)):
# body 为json
default_type = 3
body = encode_str(json.dumps(raw_body, ensure_ascii=False), charset)
elif isinstance(raw_body, RawIOBase):
# body 为文件
default_type = 1
body = raw_body.read()
raw_body.close()
if "Content-Length" not in self._headers and \
"Transfer-Encoding" not in self._headers \
or self._headers["Transfer-Encoding"] != "chunked":
if self.length is None:
if body is not None:
self.length = len(body)
else:
self.length = 0
# 设置默认 Content-Length
self.set("Content-Length", str(self.length))
# print(body[0], body[1])
if body is not None and body.startswith(encode_str("<", charset)):
default_type = 4
if "Content-Type" not in self._headers.keys():
type_str = self.type
if type_str is None:
temp = DEFAULT_TYPE.get(default_type)
if temp is not None:
if default_type != 1:
temp += "; charset=%s" % charset
type_str = temp
if type_str is not None:
# 设置默认 Content-Type
self.set("Content-Type", type_str)
self._body = body | python | def handel_default(self) -> None:
"""
处理设置到body上的数据默认 headers
"""
raw_body = self._body
body = cast(Optional[bytes], None)
default_type = 2
charset = self._charset or self._default_charset
if raw_body is None:
pass
elif isinstance(raw_body, bytes):
# body为bytes
default_type = 2
body = raw_body
elif isinstance(raw_body, str):
# body 为字符串
default_type = 2
body = encode_str(raw_body, charset)
elif isinstance(raw_body, (list, dict)):
# body 为json
default_type = 3
body = encode_str(json.dumps(raw_body, ensure_ascii=False), charset)
elif isinstance(raw_body, RawIOBase):
# body 为文件
default_type = 1
body = raw_body.read()
raw_body.close()
if "Content-Length" not in self._headers and \
"Transfer-Encoding" not in self._headers \
or self._headers["Transfer-Encoding"] != "chunked":
if self.length is None:
if body is not None:
self.length = len(body)
else:
self.length = 0
# 设置默认 Content-Length
self.set("Content-Length", str(self.length))
# print(body[0], body[1])
if body is not None and body.startswith(encode_str("<", charset)):
default_type = 4
if "Content-Type" not in self._headers.keys():
type_str = self.type
if type_str is None:
temp = DEFAULT_TYPE.get(default_type)
if temp is not None:
if default_type != 1:
temp += "; charset=%s" % charset
type_str = temp
if type_str is not None:
# 设置默认 Content-Type
self.set("Content-Type", type_str)
self._body = body | ['def', 'handel_default', '(', 'self', ')', '->', 'None', ':', 'raw_body', '=', 'self', '.', '_body', 'body', '=', 'cast', '(', 'Optional', '[', 'bytes', ']', ',', 'None', ')', 'default_type', '=', '2', 'charset', '=', 'self', '.', '_charset', 'or', 'self', '.', '_default_charset', 'if', 'raw_body', 'is', 'None', ':', 'pass', 'elif', 'isinstance', '(', 'raw_body', ',', 'bytes', ')', ':', '# body为bytes', 'default_type', '=', '2', 'body', '=', 'raw_body', 'elif', 'isinstance', '(', 'raw_body', ',', 'str', ')', ':', '# body 为字符串', 'default_type', '=', '2', 'body', '=', 'encode_str', '(', 'raw_body', ',', 'charset', ')', 'elif', 'isinstance', '(', 'raw_body', ',', '(', 'list', ',', 'dict', ')', ')', ':', '# body 为json', 'default_type', '=', '3', 'body', '=', 'encode_str', '(', 'json', '.', 'dumps', '(', 'raw_body', ',', 'ensure_ascii', '=', 'False', ')', ',', 'charset', ')', 'elif', 'isinstance', '(', 'raw_body', ',', 'RawIOBase', ')', ':', '# body 为文件', 'default_type', '=', '1', 'body', '=', 'raw_body', '.', 'read', '(', ')', 'raw_body', '.', 'close', '(', ')', 'if', '"Content-Length"', 'not', 'in', 'self', '.', '_headers', 'and', '"Transfer-Encoding"', 'not', 'in', 'self', '.', '_headers', 'or', 'self', '.', '_headers', '[', '"Transfer-Encoding"', ']', '!=', '"chunked"', ':', 'if', 'self', '.', 'length', 'is', 'None', ':', 'if', 'body', 'is', 'not', 'None', ':', 'self', '.', 'length', '=', 'len', '(', 'body', ')', 'else', ':', 'self', '.', 'length', '=', '0', '# 设置默认 Content-Length', 'self', '.', 'set', '(', '"Content-Length"', ',', 'str', '(', 'self', '.', 'length', ')', ')', '# print(body[0], body[1])', 'if', 'body', 'is', 'not', 'None', 'and', 'body', '.', 'startswith', '(', 'encode_str', '(', '"<"', ',', 'charset', ')', ')', ':', 'default_type', '=', '4', 'if', '"Content-Type"', 'not', 'in', 'self', '.', '_headers', '.', 'keys', '(', ')', ':', 'type_str', '=', 'self', '.', 'type', 'if', 'type_str', 'is', 'None', ':', 'temp', '=', 'DEFAULT_TYPE', '.', 'get', '(', 'default_type', ')', 'if', 'temp', 'is', 'not', 'None', ':', 'if', 'default_type', '!=', '1', ':', 'temp', '+=', '"; charset=%s"', '%', 'charset', 'type_str', '=', 'temp', 'if', 'type_str', 'is', 'not', 'None', ':', '# 设置默认 Content-Type', 'self', '.', 'set', '(', '"Content-Type"', ',', 'type_str', ')', 'self', '.', '_body', '=', 'body'] | 处理设置到body上的数据默认 headers | ['处理设置到body上的数据默认', 'headers'] | train | https://github.com/zeromake/aiko/blob/53b246fa88652466a9e38ac3d1a99a6198195b0f/aiko/response.py#L261-L312 |
9,529 | jasonrbriggs/stomp.py | stomp/protocol.py | Protocol10.begin | def begin(self, transaction=None, headers=None, **keyword_headers):
"""
Begin a transaction.
:param str transaction: the identifier for the transaction (optional - if not specified
a unique transaction id will be generated)
:param dict headers: a map of any additional headers the broker requires
:param keyword_headers: any additional headers the broker requires
:return: the transaction id
:rtype: str
"""
headers = utils.merge_headers([headers, keyword_headers])
if not transaction:
transaction = utils.get_uuid()
headers[HDR_TRANSACTION] = transaction
self.send_frame(CMD_BEGIN, headers)
return transaction | python | def begin(self, transaction=None, headers=None, **keyword_headers):
"""
Begin a transaction.
:param str transaction: the identifier for the transaction (optional - if not specified
a unique transaction id will be generated)
:param dict headers: a map of any additional headers the broker requires
:param keyword_headers: any additional headers the broker requires
:return: the transaction id
:rtype: str
"""
headers = utils.merge_headers([headers, keyword_headers])
if not transaction:
transaction = utils.get_uuid()
headers[HDR_TRANSACTION] = transaction
self.send_frame(CMD_BEGIN, headers)
return transaction | ['def', 'begin', '(', 'self', ',', 'transaction', '=', 'None', ',', 'headers', '=', 'None', ',', '*', '*', 'keyword_headers', ')', ':', 'headers', '=', 'utils', '.', 'merge_headers', '(', '[', 'headers', ',', 'keyword_headers', ']', ')', 'if', 'not', 'transaction', ':', 'transaction', '=', 'utils', '.', 'get_uuid', '(', ')', 'headers', '[', 'HDR_TRANSACTION', ']', '=', 'transaction', 'self', '.', 'send_frame', '(', 'CMD_BEGIN', ',', 'headers', ')', 'return', 'transaction'] | Begin a transaction.
:param str transaction: the identifier for the transaction (optional - if not specified
a unique transaction id will be generated)
:param dict headers: a map of any additional headers the broker requires
:param keyword_headers: any additional headers the broker requires
:return: the transaction id
:rtype: str | ['Begin', 'a', 'transaction', '.'] | train | https://github.com/jasonrbriggs/stomp.py/blob/643843c5fbf25fd24339dd0e69a9411c3d8b94c7/stomp/protocol.py#L68-L85 |
9,530 | johnwheeler/flask-ask | flask_ask/core.py | Ask._parse_timestamp | def _parse_timestamp(timestamp):
"""
Parse a given timestamp value, raising ValueError if None or Flasey
"""
if timestamp:
try:
return aniso8601.parse_datetime(timestamp)
except AttributeError:
# raised by aniso8601 if raw_timestamp is not valid string
# in ISO8601 format
try:
return datetime.utcfromtimestamp(timestamp)
except:
# relax the timestamp a bit in case it was sent in millis
return datetime.utcfromtimestamp(timestamp/1000)
raise ValueError('Invalid timestamp value! Cannot parse from either ISO8601 string or UTC timestamp.') | python | def _parse_timestamp(timestamp):
"""
Parse a given timestamp value, raising ValueError if None or Flasey
"""
if timestamp:
try:
return aniso8601.parse_datetime(timestamp)
except AttributeError:
# raised by aniso8601 if raw_timestamp is not valid string
# in ISO8601 format
try:
return datetime.utcfromtimestamp(timestamp)
except:
# relax the timestamp a bit in case it was sent in millis
return datetime.utcfromtimestamp(timestamp/1000)
raise ValueError('Invalid timestamp value! Cannot parse from either ISO8601 string or UTC timestamp.') | ['def', '_parse_timestamp', '(', 'timestamp', ')', ':', 'if', 'timestamp', ':', 'try', ':', 'return', 'aniso8601', '.', 'parse_datetime', '(', 'timestamp', ')', 'except', 'AttributeError', ':', '# raised by aniso8601 if raw_timestamp is not valid string', '# in ISO8601 format', 'try', ':', 'return', 'datetime', '.', 'utcfromtimestamp', '(', 'timestamp', ')', 'except', ':', '# relax the timestamp a bit in case it was sent in millis', 'return', 'datetime', '.', 'utcfromtimestamp', '(', 'timestamp', '/', '1000', ')', 'raise', 'ValueError', '(', "'Invalid timestamp value! Cannot parse from either ISO8601 string or UTC timestamp.'", ')'] | Parse a given timestamp value, raising ValueError if None or Flasey | ['Parse', 'a', 'given', 'timestamp', 'value', 'raising', 'ValueError', 'if', 'None', 'or', 'Flasey'] | train | https://github.com/johnwheeler/flask-ask/blob/fe407646ae404a8c90b363c86d5c4c201b6a5580/flask_ask/core.py#L724-L740 |
9,531 | GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/handlers.py | KickOffJobHandler._get_input_readers | def _get_input_readers(self, state):
"""Get input readers.
Args:
state: a MapreduceState model.
Returns:
A tuple: (a list of input readers, a model._HugeTaskPayload entity).
The payload entity contains the json serialized input readers.
(None, None) when input reader inplitting returned no data to process.
"""
serialized_input_readers_key = (self._SERIALIZED_INPUT_READERS_KEY %
state.key().id_or_name())
serialized_input_readers = model._HugeTaskPayload.get_by_key_name(
serialized_input_readers_key, parent=state)
# Initialize input readers.
input_reader_class = state.mapreduce_spec.mapper.input_reader_class()
split_param = state.mapreduce_spec.mapper
if issubclass(input_reader_class, map_job.InputReader):
split_param = map_job.JobConfig._to_map_job_config(
state.mapreduce_spec,
os.environ.get("HTTP_X_APPENGINE_QUEUENAME"))
if serialized_input_readers is None:
readers = input_reader_class.split_input(split_param)
else:
readers = [input_reader_class.from_json_str(_json) for _json in
json.loads(zlib.decompress(
serialized_input_readers.payload))]
if not readers:
return None, None
# Update state and spec with actual shard count.
state.mapreduce_spec.mapper.shard_count = len(readers)
state.active_shards = len(readers)
# Prepare to save serialized input readers.
if serialized_input_readers is None:
# Use mr_state as parent so it can be easily cleaned up later.
serialized_input_readers = model._HugeTaskPayload(
key_name=serialized_input_readers_key, parent=state)
readers_json_str = [i.to_json_str() for i in readers]
serialized_input_readers.payload = zlib.compress(json.dumps(
readers_json_str))
return readers, serialized_input_readers | python | def _get_input_readers(self, state):
"""Get input readers.
Args:
state: a MapreduceState model.
Returns:
A tuple: (a list of input readers, a model._HugeTaskPayload entity).
The payload entity contains the json serialized input readers.
(None, None) when input reader inplitting returned no data to process.
"""
serialized_input_readers_key = (self._SERIALIZED_INPUT_READERS_KEY %
state.key().id_or_name())
serialized_input_readers = model._HugeTaskPayload.get_by_key_name(
serialized_input_readers_key, parent=state)
# Initialize input readers.
input_reader_class = state.mapreduce_spec.mapper.input_reader_class()
split_param = state.mapreduce_spec.mapper
if issubclass(input_reader_class, map_job.InputReader):
split_param = map_job.JobConfig._to_map_job_config(
state.mapreduce_spec,
os.environ.get("HTTP_X_APPENGINE_QUEUENAME"))
if serialized_input_readers is None:
readers = input_reader_class.split_input(split_param)
else:
readers = [input_reader_class.from_json_str(_json) for _json in
json.loads(zlib.decompress(
serialized_input_readers.payload))]
if not readers:
return None, None
# Update state and spec with actual shard count.
state.mapreduce_spec.mapper.shard_count = len(readers)
state.active_shards = len(readers)
# Prepare to save serialized input readers.
if serialized_input_readers is None:
# Use mr_state as parent so it can be easily cleaned up later.
serialized_input_readers = model._HugeTaskPayload(
key_name=serialized_input_readers_key, parent=state)
readers_json_str = [i.to_json_str() for i in readers]
serialized_input_readers.payload = zlib.compress(json.dumps(
readers_json_str))
return readers, serialized_input_readers | ['def', '_get_input_readers', '(', 'self', ',', 'state', ')', ':', 'serialized_input_readers_key', '=', '(', 'self', '.', '_SERIALIZED_INPUT_READERS_KEY', '%', 'state', '.', 'key', '(', ')', '.', 'id_or_name', '(', ')', ')', 'serialized_input_readers', '=', 'model', '.', '_HugeTaskPayload', '.', 'get_by_key_name', '(', 'serialized_input_readers_key', ',', 'parent', '=', 'state', ')', '# Initialize input readers.', 'input_reader_class', '=', 'state', '.', 'mapreduce_spec', '.', 'mapper', '.', 'input_reader_class', '(', ')', 'split_param', '=', 'state', '.', 'mapreduce_spec', '.', 'mapper', 'if', 'issubclass', '(', 'input_reader_class', ',', 'map_job', '.', 'InputReader', ')', ':', 'split_param', '=', 'map_job', '.', 'JobConfig', '.', '_to_map_job_config', '(', 'state', '.', 'mapreduce_spec', ',', 'os', '.', 'environ', '.', 'get', '(', '"HTTP_X_APPENGINE_QUEUENAME"', ')', ')', 'if', 'serialized_input_readers', 'is', 'None', ':', 'readers', '=', 'input_reader_class', '.', 'split_input', '(', 'split_param', ')', 'else', ':', 'readers', '=', '[', 'input_reader_class', '.', 'from_json_str', '(', '_json', ')', 'for', '_json', 'in', 'json', '.', 'loads', '(', 'zlib', '.', 'decompress', '(', 'serialized_input_readers', '.', 'payload', ')', ')', ']', 'if', 'not', 'readers', ':', 'return', 'None', ',', 'None', '# Update state and spec with actual shard count.', 'state', '.', 'mapreduce_spec', '.', 'mapper', '.', 'shard_count', '=', 'len', '(', 'readers', ')', 'state', '.', 'active_shards', '=', 'len', '(', 'readers', ')', '# Prepare to save serialized input readers.', 'if', 'serialized_input_readers', 'is', 'None', ':', '# Use mr_state as parent so it can be easily cleaned up later.', 'serialized_input_readers', '=', 'model', '.', '_HugeTaskPayload', '(', 'key_name', '=', 'serialized_input_readers_key', ',', 'parent', '=', 'state', ')', 'readers_json_str', '=', '[', 'i', '.', 'to_json_str', '(', ')', 'for', 'i', 'in', 'readers', ']', 'serialized_input_readers', '.', 'payload', '=', 'zlib', '.', 'compress', '(', 'json', '.', 'dumps', '(', 'readers_json_str', ')', ')', 'return', 'readers', ',', 'serialized_input_readers'] | Get input readers.
Args:
state: a MapreduceState model.
Returns:
A tuple: (a list of input readers, a model._HugeTaskPayload entity).
The payload entity contains the json serialized input readers.
(None, None) when input reader inplitting returned no data to process. | ['Get', 'input', 'readers', '.'] | train | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/handlers.py#L1435-L1480 |
9,532 | bitesofcode/projexui | projexui/widgets/xsplitter.py | XSplitterHandle.toggleCollapseAfter | def toggleCollapseAfter( self ):
"""
Collapses the splitter after this handle.
"""
if ( self.isCollapsed() ):
self.uncollapse()
else:
self.collapse( XSplitterHandle.CollapseDirection.After ) | python | def toggleCollapseAfter( self ):
"""
Collapses the splitter after this handle.
"""
if ( self.isCollapsed() ):
self.uncollapse()
else:
self.collapse( XSplitterHandle.CollapseDirection.After ) | ['def', 'toggleCollapseAfter', '(', 'self', ')', ':', 'if', '(', 'self', '.', 'isCollapsed', '(', ')', ')', ':', 'self', '.', 'uncollapse', '(', ')', 'else', ':', 'self', '.', 'collapse', '(', 'XSplitterHandle', '.', 'CollapseDirection', '.', 'After', ')'] | Collapses the splitter after this handle. | ['Collapses', 'the', 'splitter', 'after', 'this', 'handle', '.'] | train | https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xsplitter.py#L287-L294 |
9,533 | dpmcmlxxvi/pixelscan | pixelscan/pixelscan.py | scale.next | def next(self):
"""Next point in iteration
"""
x, y = next(self.scan)
xr = self.sx * x
yr = self.sy * y
return xr, yr | python | def next(self):
"""Next point in iteration
"""
x, y = next(self.scan)
xr = self.sx * x
yr = self.sy * y
return xr, yr | ['def', 'next', '(', 'self', ')', ':', 'x', ',', 'y', '=', 'next', '(', 'self', '.', 'scan', ')', 'xr', '=', 'self', '.', 'sx', '*', 'x', 'yr', '=', 'self', '.', 'sy', '*', 'y', 'return', 'xr', ',', 'yr'] | Next point in iteration | ['Next', 'point', 'in', 'iteration'] | train | https://github.com/dpmcmlxxvi/pixelscan/blob/d641207b13a8fc5bf7ac9964b982971652bb0a7e/pixelscan/pixelscan.py#L274-L280 |
9,534 | eternnoir/pyTelegramBotAPI | telebot/__init__.py | TeleBot.send_video_note | def send_video_note(self, chat_id, data, duration=None, length=None, reply_to_message_id=None, reply_markup=None,
disable_notification=None, timeout=None):
"""
Use this method to send video files, Telegram clients support mp4 videos.
:param chat_id: Integer : Unique identifier for the message recipient — User or GroupChat id
:param data: InputFile or String : Video note to send. You can either pass a file_id as String to resend a video that is already on the Telegram server
:param duration: Integer : Duration of sent video in seconds
:param length: Integer : Video width and height, Can't be None and should be in range of (0, 640)
:param reply_to_message_id:
:param reply_markup:
:return:
"""
return types.Message.de_json(
apihelper.send_video_note(self.token, chat_id, data, duration, length, reply_to_message_id, reply_markup,
disable_notification, timeout)) | python | def send_video_note(self, chat_id, data, duration=None, length=None, reply_to_message_id=None, reply_markup=None,
disable_notification=None, timeout=None):
"""
Use this method to send video files, Telegram clients support mp4 videos.
:param chat_id: Integer : Unique identifier for the message recipient — User or GroupChat id
:param data: InputFile or String : Video note to send. You can either pass a file_id as String to resend a video that is already on the Telegram server
:param duration: Integer : Duration of sent video in seconds
:param length: Integer : Video width and height, Can't be None and should be in range of (0, 640)
:param reply_to_message_id:
:param reply_markup:
:return:
"""
return types.Message.de_json(
apihelper.send_video_note(self.token, chat_id, data, duration, length, reply_to_message_id, reply_markup,
disable_notification, timeout)) | ['def', 'send_video_note', '(', 'self', ',', 'chat_id', ',', 'data', ',', 'duration', '=', 'None', ',', 'length', '=', 'None', ',', 'reply_to_message_id', '=', 'None', ',', 'reply_markup', '=', 'None', ',', 'disable_notification', '=', 'None', ',', 'timeout', '=', 'None', ')', ':', 'return', 'types', '.', 'Message', '.', 'de_json', '(', 'apihelper', '.', 'send_video_note', '(', 'self', '.', 'token', ',', 'chat_id', ',', 'data', ',', 'duration', ',', 'length', ',', 'reply_to_message_id', ',', 'reply_markup', ',', 'disable_notification', ',', 'timeout', ')', ')'] | Use this method to send video files, Telegram clients support mp4 videos.
:param chat_id: Integer : Unique identifier for the message recipient — User or GroupChat id
:param data: InputFile or String : Video note to send. You can either pass a file_id as String to resend a video that is already on the Telegram server
:param duration: Integer : Duration of sent video in seconds
:param length: Integer : Video width and height, Can't be None and should be in range of (0, 640)
:param reply_to_message_id:
:param reply_markup:
:return: | ['Use', 'this', 'method', 'to', 'send', 'video', 'files', 'Telegram', 'clients', 'support', 'mp4', 'videos', '.', ':', 'param', 'chat_id', ':', 'Integer', ':', 'Unique', 'identifier', 'for', 'the', 'message', 'recipient', '—', 'User', 'or', 'GroupChat', 'id', ':', 'param', 'data', ':', 'InputFile', 'or', 'String', ':', 'Video', 'note', 'to', 'send', '.', 'You', 'can', 'either', 'pass', 'a', 'file_id', 'as', 'String', 'to', 'resend', 'a', 'video', 'that', 'is', 'already', 'on', 'the', 'Telegram', 'server', ':', 'param', 'duration', ':', 'Integer', ':', 'Duration', 'of', 'sent', 'video', 'in', 'seconds', ':', 'param', 'length', ':', 'Integer', ':', 'Video', 'width', 'and', 'height', 'Can', 't', 'be', 'None', 'and', 'should', 'be', 'in', 'range', 'of', '(', '0', '640', ')', ':', 'param', 'reply_to_message_id', ':', ':', 'param', 'reply_markup', ':', ':', 'return', ':'] | train | https://github.com/eternnoir/pyTelegramBotAPI/blob/47b53b88123097f1b9562a6cd5d4e080b86185d1/telebot/__init__.py#L722-L736 |
9,535 | googledatalab/pydatalab | solutionbox/structured_data/mltoolbox/_structured_data/_package.py | analyze | def analyze(output_dir, dataset, cloud=False, project_id=None):
"""Blocking version of analyze_async. See documentation of analyze_async."""
job = analyze_async(
output_dir=output_dir,
dataset=dataset,
cloud=cloud,
project_id=project_id)
job.wait()
print('Analyze: ' + str(job.state)) | python | def analyze(output_dir, dataset, cloud=False, project_id=None):
"""Blocking version of analyze_async. See documentation of analyze_async."""
job = analyze_async(
output_dir=output_dir,
dataset=dataset,
cloud=cloud,
project_id=project_id)
job.wait()
print('Analyze: ' + str(job.state)) | ['def', 'analyze', '(', 'output_dir', ',', 'dataset', ',', 'cloud', '=', 'False', ',', 'project_id', '=', 'None', ')', ':', 'job', '=', 'analyze_async', '(', 'output_dir', '=', 'output_dir', ',', 'dataset', '=', 'dataset', ',', 'cloud', '=', 'cloud', ',', 'project_id', '=', 'project_id', ')', 'job', '.', 'wait', '(', ')', 'print', '(', "'Analyze: '", '+', 'str', '(', 'job', '.', 'state', ')', ')'] | Blocking version of analyze_async. See documentation of analyze_async. | ['Blocking', 'version', 'of', 'analyze_async', '.', 'See', 'documentation', 'of', 'analyze_async', '.'] | train | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/structured_data/mltoolbox/_structured_data/_package.py#L136-L144 |
9,536 | aestrivex/bctpy | bct/algorithms/similarity.py | corr_flat_dir | def corr_flat_dir(a1, a2):
'''
Returns the correlation coefficient between two flattened adjacency
matrices. Similarity metric for weighted matrices.
Parameters
----------
A1 : NxN np.ndarray
directed matrix 1
A2 : NxN np.ndarray
directed matrix 2
Returns
-------
r : float
Correlation coefficient describing edgewise similarity of a1 and a2
'''
n = len(a1)
if len(a2) != n:
raise BCTParamError("Cannot calculate flattened correlation on "
"matrices of different size")
ix = np.logical_not(np.eye(n))
return np.corrcoef(a1[ix].flat, a2[ix].flat)[0][1] | python | def corr_flat_dir(a1, a2):
'''
Returns the correlation coefficient between two flattened adjacency
matrices. Similarity metric for weighted matrices.
Parameters
----------
A1 : NxN np.ndarray
directed matrix 1
A2 : NxN np.ndarray
directed matrix 2
Returns
-------
r : float
Correlation coefficient describing edgewise similarity of a1 and a2
'''
n = len(a1)
if len(a2) != n:
raise BCTParamError("Cannot calculate flattened correlation on "
"matrices of different size")
ix = np.logical_not(np.eye(n))
return np.corrcoef(a1[ix].flat, a2[ix].flat)[0][1] | ['def', 'corr_flat_dir', '(', 'a1', ',', 'a2', ')', ':', 'n', '=', 'len', '(', 'a1', ')', 'if', 'len', '(', 'a2', ')', '!=', 'n', ':', 'raise', 'BCTParamError', '(', '"Cannot calculate flattened correlation on "', '"matrices of different size"', ')', 'ix', '=', 'np', '.', 'logical_not', '(', 'np', '.', 'eye', '(', 'n', ')', ')', 'return', 'np', '.', 'corrcoef', '(', 'a1', '[', 'ix', ']', '.', 'flat', ',', 'a2', '[', 'ix', ']', '.', 'flat', ')', '[', '0', ']', '[', '1', ']'] | Returns the correlation coefficient between two flattened adjacency
matrices. Similarity metric for weighted matrices.
Parameters
----------
A1 : NxN np.ndarray
directed matrix 1
A2 : NxN np.ndarray
directed matrix 2
Returns
-------
r : float
Correlation coefficient describing edgewise similarity of a1 and a2 | ['Returns', 'the', 'correlation', 'coefficient', 'between', 'two', 'flattened', 'adjacency', 'matrices', '.', 'Similarity', 'metric', 'for', 'weighted', 'matrices', '.'] | train | https://github.com/aestrivex/bctpy/blob/4cb0e759eb4a038750b07e23bd29958c400684b8/bct/algorithms/similarity.py#L355-L377 |
9,537 | chibisov/drf-extensions | docs/backdoc.py | Markdown._do_smart_punctuation | def _do_smart_punctuation(self, text):
"""Fancifies 'single quotes', "double quotes", and apostrophes.
Converts --, ---, and ... into en dashes, em dashes, and ellipses.
Inspiration is: <http://daringfireball.net/projects/smartypants/>
See "test/tm-cases/smarty_pants.text" for a full discussion of the
support here and
<http://code.google.com/p/python-markdown2/issues/detail?id=42> for a
discussion of some diversion from the original SmartyPants.
"""
if "'" in text: # guard for perf
text = self._do_smart_contractions(text)
text = self._opening_single_quote_re.sub("‘", text)
text = self._closing_single_quote_re.sub("’", text)
if '"' in text: # guard for perf
text = self._opening_double_quote_re.sub("“", text)
text = self._closing_double_quote_re.sub("”", text)
text = text.replace("---", "—")
text = text.replace("--", "–")
text = text.replace("...", "…")
text = text.replace(" . . . ", "…")
text = text.replace(". . .", "…")
return text | python | def _do_smart_punctuation(self, text):
"""Fancifies 'single quotes', "double quotes", and apostrophes.
Converts --, ---, and ... into en dashes, em dashes, and ellipses.
Inspiration is: <http://daringfireball.net/projects/smartypants/>
See "test/tm-cases/smarty_pants.text" for a full discussion of the
support here and
<http://code.google.com/p/python-markdown2/issues/detail?id=42> for a
discussion of some diversion from the original SmartyPants.
"""
if "'" in text: # guard for perf
text = self._do_smart_contractions(text)
text = self._opening_single_quote_re.sub("‘", text)
text = self._closing_single_quote_re.sub("’", text)
if '"' in text: # guard for perf
text = self._opening_double_quote_re.sub("“", text)
text = self._closing_double_quote_re.sub("”", text)
text = text.replace("---", "—")
text = text.replace("--", "–")
text = text.replace("...", "…")
text = text.replace(" . . . ", "…")
text = text.replace(". . .", "…")
return text | ['def', '_do_smart_punctuation', '(', 'self', ',', 'text', ')', ':', 'if', '"\'"', 'in', 'text', ':', '# guard for perf', 'text', '=', 'self', '.', '_do_smart_contractions', '(', 'text', ')', 'text', '=', 'self', '.', '_opening_single_quote_re', '.', 'sub', '(', '"‘"', ',', 'text', ')', 'text', '=', 'self', '.', '_closing_single_quote_re', '.', 'sub', '(', '"’"', ',', 'text', ')', 'if', '\'"\'', 'in', 'text', ':', '# guard for perf', 'text', '=', 'self', '.', '_opening_double_quote_re', '.', 'sub', '(', '"“"', ',', 'text', ')', 'text', '=', 'self', '.', '_closing_double_quote_re', '.', 'sub', '(', '"”"', ',', 'text', ')', 'text', '=', 'text', '.', 'replace', '(', '"---"', ',', '"—"', ')', 'text', '=', 'text', '.', 'replace', '(', '"--"', ',', '"–"', ')', 'text', '=', 'text', '.', 'replace', '(', '"..."', ',', '"…"', ')', 'text', '=', 'text', '.', 'replace', '(', '" . . . "', ',', '"…"', ')', 'text', '=', 'text', '.', 'replace', '(', '". . ."', ',', '"…"', ')', 'return', 'text'] | Fancifies 'single quotes', "double quotes", and apostrophes.
Converts --, ---, and ... into en dashes, em dashes, and ellipses.
Inspiration is: <http://daringfireball.net/projects/smartypants/>
See "test/tm-cases/smarty_pants.text" for a full discussion of the
support here and
<http://code.google.com/p/python-markdown2/issues/detail?id=42> for a
discussion of some diversion from the original SmartyPants. | ['Fancifies', 'single', 'quotes', 'double', 'quotes', 'and', 'apostrophes', '.', 'Converts', '--', '---', 'and', '...', 'into', 'en', 'dashes', 'em', 'dashes', 'and', 'ellipses', '.'] | train | https://github.com/chibisov/drf-extensions/blob/1d28a4b28890eab5cd19e93e042f8590c8c2fb8b/docs/backdoc.py#L1653-L1677 |
9,538 | watson-developer-cloud/python-sdk | ibm_watson/compare_comply_v1.py | UnalignedElement._to_dict | def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'document_label') and self.document_label is not None:
_dict['document_label'] = self.document_label
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location._to_dict()
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self, 'types') and self.types is not None:
_dict['types'] = [x._to_dict() for x in self.types]
if hasattr(self, 'categories') and self.categories is not None:
_dict['categories'] = [x._to_dict() for x in self.categories]
if hasattr(self, 'attributes') and self.attributes is not None:
_dict['attributes'] = [x._to_dict() for x in self.attributes]
return _dict | python | def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'document_label') and self.document_label is not None:
_dict['document_label'] = self.document_label
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location._to_dict()
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self, 'types') and self.types is not None:
_dict['types'] = [x._to_dict() for x in self.types]
if hasattr(self, 'categories') and self.categories is not None:
_dict['categories'] = [x._to_dict() for x in self.categories]
if hasattr(self, 'attributes') and self.attributes is not None:
_dict['attributes'] = [x._to_dict() for x in self.attributes]
return _dict | ['def', '_to_dict', '(', 'self', ')', ':', '_dict', '=', '{', '}', 'if', 'hasattr', '(', 'self', ',', "'document_label'", ')', 'and', 'self', '.', 'document_label', 'is', 'not', 'None', ':', '_dict', '[', "'document_label'", ']', '=', 'self', '.', 'document_label', 'if', 'hasattr', '(', 'self', ',', "'location'", ')', 'and', 'self', '.', 'location', 'is', 'not', 'None', ':', '_dict', '[', "'location'", ']', '=', 'self', '.', 'location', '.', '_to_dict', '(', ')', 'if', 'hasattr', '(', 'self', ',', "'text'", ')', 'and', 'self', '.', 'text', 'is', 'not', 'None', ':', '_dict', '[', "'text'", ']', '=', 'self', '.', 'text', 'if', 'hasattr', '(', 'self', ',', "'types'", ')', 'and', 'self', '.', 'types', 'is', 'not', 'None', ':', '_dict', '[', "'types'", ']', '=', '[', 'x', '.', '_to_dict', '(', ')', 'for', 'x', 'in', 'self', '.', 'types', ']', 'if', 'hasattr', '(', 'self', ',', "'categories'", ')', 'and', 'self', '.', 'categories', 'is', 'not', 'None', ':', '_dict', '[', "'categories'", ']', '=', '[', 'x', '.', '_to_dict', '(', ')', 'for', 'x', 'in', 'self', '.', 'categories', ']', 'if', 'hasattr', '(', 'self', ',', "'attributes'", ')', 'and', 'self', '.', 'attributes', 'is', 'not', 'None', ':', '_dict', '[', "'attributes'", ']', '=', '[', 'x', '.', '_to_dict', '(', ')', 'for', 'x', 'in', 'self', '.', 'attributes', ']', 'return', '_dict'] | Return a json dictionary representing this model. | ['Return', 'a', 'json', 'dictionary', 'representing', 'this', 'model', '.'] | train | https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/compare_comply_v1.py#L4999-L5014 |
9,539 | hatemile/hatemile-for-python | hatemile/implementation/assoc.py | AccessibleAssociationImplementation._get_model_table | def _get_model_table(self, part):
"""
Returns a list that represents the table.
:param part: The table header, table footer or table body.
:type part: hatemile.util.html.htmldomelement.HTMLDOMElement
:return: The list that represents the table.
:rtype: list(list(hatemile.util.html.htmldomelement.HTMLDOMElement))
"""
rows = self.parser.find(part).find_children('tr').list_results()
table = []
for row in rows:
table.append(self._get_model_row(self.parser.find(
row
).find_children('td,th').list_results()))
return self._get_valid_model_table(table) | python | def _get_model_table(self, part):
"""
Returns a list that represents the table.
:param part: The table header, table footer or table body.
:type part: hatemile.util.html.htmldomelement.HTMLDOMElement
:return: The list that represents the table.
:rtype: list(list(hatemile.util.html.htmldomelement.HTMLDOMElement))
"""
rows = self.parser.find(part).find_children('tr').list_results()
table = []
for row in rows:
table.append(self._get_model_row(self.parser.find(
row
).find_children('td,th').list_results()))
return self._get_valid_model_table(table) | ['def', '_get_model_table', '(', 'self', ',', 'part', ')', ':', 'rows', '=', 'self', '.', 'parser', '.', 'find', '(', 'part', ')', '.', 'find_children', '(', "'tr'", ')', '.', 'list_results', '(', ')', 'table', '=', '[', ']', 'for', 'row', 'in', 'rows', ':', 'table', '.', 'append', '(', 'self', '.', '_get_model_row', '(', 'self', '.', 'parser', '.', 'find', '(', 'row', ')', '.', 'find_children', '(', "'td,th'", ')', '.', 'list_results', '(', ')', ')', ')', 'return', 'self', '.', '_get_valid_model_table', '(', 'table', ')'] | Returns a list that represents the table.
:param part: The table header, table footer or table body.
:type part: hatemile.util.html.htmldomelement.HTMLDOMElement
:return: The list that represents the table.
:rtype: list(list(hatemile.util.html.htmldomelement.HTMLDOMElement)) | ['Returns', 'a', 'list', 'that', 'represents', 'the', 'table', '.'] | train | https://github.com/hatemile/hatemile-for-python/blob/1e914f9aa09f6f8d78282af131311546ecba9fb8/hatemile/implementation/assoc.py#L46-L62 |
9,540 | dhermes/bezier | src/bezier/surface.py | Surface._get_degree | def _get_degree(num_nodes):
"""Get the degree of the current surface.
Args:
num_nodes (int): The number of control points for a
B |eacute| zier surface.
Returns:
int: The degree :math:`d` such that :math:`(d + 1)(d + 2)/2`
equals ``num_nodes``.
Raises:
ValueError: If ``num_nodes`` isn't a triangular number.
"""
# 8 * num_nodes = 4(d + 1)(d + 2)
# = 4d^2 + 12d + 8
# = (2d + 3)^2 - 1
d_float = 0.5 * (np.sqrt(8.0 * num_nodes + 1.0) - 3.0)
d_int = int(np.round(d_float))
if (d_int + 1) * (d_int + 2) == 2 * num_nodes:
return d_int
else:
raise ValueError(num_nodes, "not a triangular number") | python | def _get_degree(num_nodes):
"""Get the degree of the current surface.
Args:
num_nodes (int): The number of control points for a
B |eacute| zier surface.
Returns:
int: The degree :math:`d` such that :math:`(d + 1)(d + 2)/2`
equals ``num_nodes``.
Raises:
ValueError: If ``num_nodes`` isn't a triangular number.
"""
# 8 * num_nodes = 4(d + 1)(d + 2)
# = 4d^2 + 12d + 8
# = (2d + 3)^2 - 1
d_float = 0.5 * (np.sqrt(8.0 * num_nodes + 1.0) - 3.0)
d_int = int(np.round(d_float))
if (d_int + 1) * (d_int + 2) == 2 * num_nodes:
return d_int
else:
raise ValueError(num_nodes, "not a triangular number") | ['def', '_get_degree', '(', 'num_nodes', ')', ':', '# 8 * num_nodes = 4(d + 1)(d + 2)', '# = 4d^2 + 12d + 8', '# = (2d + 3)^2 - 1', 'd_float', '=', '0.5', '*', '(', 'np', '.', 'sqrt', '(', '8.0', '*', 'num_nodes', '+', '1.0', ')', '-', '3.0', ')', 'd_int', '=', 'int', '(', 'np', '.', 'round', '(', 'd_float', ')', ')', 'if', '(', 'd_int', '+', '1', ')', '*', '(', 'd_int', '+', '2', ')', '==', '2', '*', 'num_nodes', ':', 'return', 'd_int', 'else', ':', 'raise', 'ValueError', '(', 'num_nodes', ',', '"not a triangular number"', ')'] | Get the degree of the current surface.
Args:
num_nodes (int): The number of control points for a
B |eacute| zier surface.
Returns:
int: The degree :math:`d` such that :math:`(d + 1)(d + 2)/2`
equals ``num_nodes``.
Raises:
ValueError: If ``num_nodes`` isn't a triangular number. | ['Get', 'the', 'degree', 'of', 'the', 'current', 'surface', '.'] | train | https://github.com/dhermes/bezier/blob/4f941f82637a8e70a5b159a9203132192e23406b/src/bezier/surface.py#L214-L237 |
9,541 | materialsproject/pymatgen | pymatgen/core/bonds.py | get_bond_length | def get_bond_length(sp1, sp2, bond_order=1):
"""
Get the bond length between two species.
Args:
sp1 (Specie): First specie.
sp2 (Specie): Second specie.
bond_order: For species with different possible bond orders,
this allows one to obtain the bond length for a particular bond
order. For example, to get the C=C bond length instead of the
C-C bond length, this should be set to 2. Defaults to 1.
Returns:
Bond length in Angstrom. If no data is available, the sum of the atomic
radius is used.
"""
sp1 = Element(sp1) if isinstance(sp1, str) else sp1
sp2 = Element(sp2) if isinstance(sp2, str) else sp2
try:
all_lengths = obtain_all_bond_lengths(sp1, sp2)
return all_lengths[bond_order]
# The ValueError is raised in `obtain_all_bond_lengths` where no bond
# data for both elements is found. The KeyError is raised in
# `__getitem__` method of `dict` builtin class where although bond data
# for both elements is found, the data for specified bond order does
# not exist. In both cases, sum of atomic radius is returned.
except (ValueError, KeyError):
warnings.warn("No order %d bond lengths between %s and %s found in "
"database. Returning sum of atomic radius."
% (bond_order, sp1, sp2))
return sp1.atomic_radius + sp2.atomic_radius | python | def get_bond_length(sp1, sp2, bond_order=1):
"""
Get the bond length between two species.
Args:
sp1 (Specie): First specie.
sp2 (Specie): Second specie.
bond_order: For species with different possible bond orders,
this allows one to obtain the bond length for a particular bond
order. For example, to get the C=C bond length instead of the
C-C bond length, this should be set to 2. Defaults to 1.
Returns:
Bond length in Angstrom. If no data is available, the sum of the atomic
radius is used.
"""
sp1 = Element(sp1) if isinstance(sp1, str) else sp1
sp2 = Element(sp2) if isinstance(sp2, str) else sp2
try:
all_lengths = obtain_all_bond_lengths(sp1, sp2)
return all_lengths[bond_order]
# The ValueError is raised in `obtain_all_bond_lengths` where no bond
# data for both elements is found. The KeyError is raised in
# `__getitem__` method of `dict` builtin class where although bond data
# for both elements is found, the data for specified bond order does
# not exist. In both cases, sum of atomic radius is returned.
except (ValueError, KeyError):
warnings.warn("No order %d bond lengths between %s and %s found in "
"database. Returning sum of atomic radius."
% (bond_order, sp1, sp2))
return sp1.atomic_radius + sp2.atomic_radius | ['def', 'get_bond_length', '(', 'sp1', ',', 'sp2', ',', 'bond_order', '=', '1', ')', ':', 'sp1', '=', 'Element', '(', 'sp1', ')', 'if', 'isinstance', '(', 'sp1', ',', 'str', ')', 'else', 'sp1', 'sp2', '=', 'Element', '(', 'sp2', ')', 'if', 'isinstance', '(', 'sp2', ',', 'str', ')', 'else', 'sp2', 'try', ':', 'all_lengths', '=', 'obtain_all_bond_lengths', '(', 'sp1', ',', 'sp2', ')', 'return', 'all_lengths', '[', 'bond_order', ']', '# The ValueError is raised in `obtain_all_bond_lengths` where no bond', '# data for both elements is found. The KeyError is raised in', '# `__getitem__` method of `dict` builtin class where although bond data', '# for both elements is found, the data for specified bond order does', '# not exist. In both cases, sum of atomic radius is returned.', 'except', '(', 'ValueError', ',', 'KeyError', ')', ':', 'warnings', '.', 'warn', '(', '"No order %d bond lengths between %s and %s found in "', '"database. Returning sum of atomic radius."', '%', '(', 'bond_order', ',', 'sp1', ',', 'sp2', ')', ')', 'return', 'sp1', '.', 'atomic_radius', '+', 'sp2', '.', 'atomic_radius'] | Get the bond length between two species.
Args:
sp1 (Specie): First specie.
sp2 (Specie): Second specie.
bond_order: For species with different possible bond orders,
this allows one to obtain the bond length for a particular bond
order. For example, to get the C=C bond length instead of the
C-C bond length, this should be set to 2. Defaults to 1.
Returns:
Bond length in Angstrom. If no data is available, the sum of the atomic
radius is used. | ['Get', 'the', 'bond', 'length', 'between', 'two', 'species', '.'] | train | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/core/bonds.py#L199-L229 |
9,542 | bfontaine/p7magma | magma/courses.py | CoursesList._populate | def _populate(self, soup):
"""
Populate the list, assuming ``soup`` is a ``BeautifulSoup`` object.
"""
tables = soup.select('table[rules=all]')
if not tables:
return
trs = tables[0].select('tr')[1:]
if len(trs[0]) == 5:
# M1
self._populate_small_table(trs)
else:
# M2
self._populate_large_table(trs) | python | def _populate(self, soup):
"""
Populate the list, assuming ``soup`` is a ``BeautifulSoup`` object.
"""
tables = soup.select('table[rules=all]')
if not tables:
return
trs = tables[0].select('tr')[1:]
if len(trs[0]) == 5:
# M1
self._populate_small_table(trs)
else:
# M2
self._populate_large_table(trs) | ['def', '_populate', '(', 'self', ',', 'soup', ')', ':', 'tables', '=', 'soup', '.', 'select', '(', "'table[rules=all]'", ')', 'if', 'not', 'tables', ':', 'return', 'trs', '=', 'tables', '[', '0', ']', '.', 'select', '(', "'tr'", ')', '[', '1', ':', ']', 'if', 'len', '(', 'trs', '[', '0', ']', ')', '==', '5', ':', '# M1', 'self', '.', '_populate_small_table', '(', 'trs', ')', 'else', ':', '# M2', 'self', '.', '_populate_large_table', '(', 'trs', ')'] | Populate the list, assuming ``soup`` is a ``BeautifulSoup`` object. | ['Populate', 'the', 'list', 'assuming', 'soup', 'is', 'a', 'BeautifulSoup', 'object', '.'] | train | https://github.com/bfontaine/p7magma/blob/713647aa9e3187c93c2577ef812f33ec42ae5494/magma/courses.py#L70-L84 |
9,543 | lacava/few | few/population.py | PopMixin.stacks_2_eqns | def stacks_2_eqns(self,stacks):
"""returns equation strings from stacks"""
if stacks:
return list(map(lambda p: self.stack_2_eqn(p), stacks))
else:
return [] | python | def stacks_2_eqns(self,stacks):
"""returns equation strings from stacks"""
if stacks:
return list(map(lambda p: self.stack_2_eqn(p), stacks))
else:
return [] | ['def', 'stacks_2_eqns', '(', 'self', ',', 'stacks', ')', ':', 'if', 'stacks', ':', 'return', 'list', '(', 'map', '(', 'lambda', 'p', ':', 'self', '.', 'stack_2_eqn', '(', 'p', ')', ',', 'stacks', ')', ')', 'else', ':', 'return', '[', ']'] | returns equation strings from stacks | ['returns', 'equation', 'strings', 'from', 'stacks'] | train | https://github.com/lacava/few/blob/5c72044425e9a5d73b8dc2cbb9b96e873dcb5b4a/few/population.py#L199-L204 |
9,544 | Azure/azure-cli-extensions | src/interactive/azext_interactive/azclishell/app.py | space_toolbar | def space_toolbar(settings_items, empty_space):
""" formats the toolbar """
counter = 0
for part in settings_items:
counter += len(part)
if len(settings_items) == 1:
spacing = ''
else:
spacing = empty_space[
:int(math.floor((len(empty_space) - counter) / (len(settings_items) - 1)))]
settings = spacing.join(settings_items)
empty_space = empty_space[len(NOTIFICATIONS) + len(settings) + 1:]
return settings, empty_space | python | def space_toolbar(settings_items, empty_space):
""" formats the toolbar """
counter = 0
for part in settings_items:
counter += len(part)
if len(settings_items) == 1:
spacing = ''
else:
spacing = empty_space[
:int(math.floor((len(empty_space) - counter) / (len(settings_items) - 1)))]
settings = spacing.join(settings_items)
empty_space = empty_space[len(NOTIFICATIONS) + len(settings) + 1:]
return settings, empty_space | ['def', 'space_toolbar', '(', 'settings_items', ',', 'empty_space', ')', ':', 'counter', '=', '0', 'for', 'part', 'in', 'settings_items', ':', 'counter', '+=', 'len', '(', 'part', ')', 'if', 'len', '(', 'settings_items', ')', '==', '1', ':', 'spacing', '=', "''", 'else', ':', 'spacing', '=', 'empty_space', '[', ':', 'int', '(', 'math', '.', 'floor', '(', '(', 'len', '(', 'empty_space', ')', '-', 'counter', ')', '/', '(', 'len', '(', 'settings_items', ')', '-', '1', ')', ')', ')', ']', 'settings', '=', 'spacing', '.', 'join', '(', 'settings_items', ')', 'empty_space', '=', 'empty_space', '[', 'len', '(', 'NOTIFICATIONS', ')', '+', 'len', '(', 'settings', ')', '+', '1', ':', ']', 'return', 'settings', ',', 'empty_space'] | formats the toolbar | ['formats', 'the', 'toolbar'] | train | https://github.com/Azure/azure-cli-extensions/blob/3d4854205b0f0d882f688cfa12383d14506c2e35/src/interactive/azext_interactive/azclishell/app.py#L62-L77 |
9,545 | boriel/zxbasic | arch/zx48k/optimizer.py | MemCell.destroys | def destroys(self):
""" Returns which single registers (including f, flag)
this instruction changes.
Registers are: a, b, c, d, e, i, h, l, ixh, ixl, iyh, iyl, r
LD a, X => Destroys a
LD a, a => Destroys nothing
INC a => Destroys a, f
POP af => Destroys a, f, sp
PUSH af => Destroys sp
ret => Destroys SP
"""
if self.asm in arch.zx48k.backend.ASMS:
return ALL_REGS
res = set([])
i = self.inst
o = self.opers
if i in {'push', 'ret', 'call', 'rst', 'reti', 'retn'}:
return ['sp']
if i == 'pop':
res.update('sp', single_registers(o[:1]))
elif i in {'ldi', 'ldir', 'ldd', 'lddr'}:
res.update('a', 'b', 'c', 'd', 'e', 'f')
elif i in {'otir', 'otdr', 'oti', 'otd', 'inir', 'indr', 'ini', 'ind'}:
res.update('h', 'l', 'b')
elif i in {'cpir', 'cpi', 'cpdr', 'cpd'}:
res.update('h', 'l', 'b', 'c', 'f')
elif i in ('ld', 'in'):
res.update(single_registers(o[:1]))
elif i in ('inc', 'dec'):
res.update('f', single_registers(o[:1]))
elif i == 'exx':
res.update('b', 'c', 'd', 'e', 'h', 'l')
elif i == 'ex':
res.update(single_registers(o[0]))
res.update(single_registers(o[1]))
elif i in {'ccf', 'scf', 'bit', 'cp'}:
res.add('f')
elif i in {'or', 'and', 'xor', 'add', 'adc', 'sub', 'sbc'}:
if len(o) > 1:
res.update(single_registers(o[0]))
else:
res.add('a')
res.add('f')
elif i in {'neg', 'cpl', 'daa', 'rra', 'rla', 'rrca', 'rlca', 'rrd', 'rld'}:
res.update('a', 'f')
elif i == 'djnz':
res.update('b', 'f')
elif i in {'rr', 'rl', 'rrc', 'rlc', 'srl', 'sra', 'sll', 'sla'}:
res.update(single_registers(o[0]))
res.add('f')
elif i in ('set', 'res'):
res.update(single_registers(o[1]))
return list(res) | python | def destroys(self):
""" Returns which single registers (including f, flag)
this instruction changes.
Registers are: a, b, c, d, e, i, h, l, ixh, ixl, iyh, iyl, r
LD a, X => Destroys a
LD a, a => Destroys nothing
INC a => Destroys a, f
POP af => Destroys a, f, sp
PUSH af => Destroys sp
ret => Destroys SP
"""
if self.asm in arch.zx48k.backend.ASMS:
return ALL_REGS
res = set([])
i = self.inst
o = self.opers
if i in {'push', 'ret', 'call', 'rst', 'reti', 'retn'}:
return ['sp']
if i == 'pop':
res.update('sp', single_registers(o[:1]))
elif i in {'ldi', 'ldir', 'ldd', 'lddr'}:
res.update('a', 'b', 'c', 'd', 'e', 'f')
elif i in {'otir', 'otdr', 'oti', 'otd', 'inir', 'indr', 'ini', 'ind'}:
res.update('h', 'l', 'b')
elif i in {'cpir', 'cpi', 'cpdr', 'cpd'}:
res.update('h', 'l', 'b', 'c', 'f')
elif i in ('ld', 'in'):
res.update(single_registers(o[:1]))
elif i in ('inc', 'dec'):
res.update('f', single_registers(o[:1]))
elif i == 'exx':
res.update('b', 'c', 'd', 'e', 'h', 'l')
elif i == 'ex':
res.update(single_registers(o[0]))
res.update(single_registers(o[1]))
elif i in {'ccf', 'scf', 'bit', 'cp'}:
res.add('f')
elif i in {'or', 'and', 'xor', 'add', 'adc', 'sub', 'sbc'}:
if len(o) > 1:
res.update(single_registers(o[0]))
else:
res.add('a')
res.add('f')
elif i in {'neg', 'cpl', 'daa', 'rra', 'rla', 'rrca', 'rlca', 'rrd', 'rld'}:
res.update('a', 'f')
elif i == 'djnz':
res.update('b', 'f')
elif i in {'rr', 'rl', 'rrc', 'rlc', 'srl', 'sra', 'sll', 'sla'}:
res.update(single_registers(o[0]))
res.add('f')
elif i in ('set', 'res'):
res.update(single_registers(o[1]))
return list(res) | ['def', 'destroys', '(', 'self', ')', ':', 'if', 'self', '.', 'asm', 'in', 'arch', '.', 'zx48k', '.', 'backend', '.', 'ASMS', ':', 'return', 'ALL_REGS', 'res', '=', 'set', '(', '[', ']', ')', 'i', '=', 'self', '.', 'inst', 'o', '=', 'self', '.', 'opers', 'if', 'i', 'in', '{', "'push'", ',', "'ret'", ',', "'call'", ',', "'rst'", ',', "'reti'", ',', "'retn'", '}', ':', 'return', '[', "'sp'", ']', 'if', 'i', '==', "'pop'", ':', 'res', '.', 'update', '(', "'sp'", ',', 'single_registers', '(', 'o', '[', ':', '1', ']', ')', ')', 'elif', 'i', 'in', '{', "'ldi'", ',', "'ldir'", ',', "'ldd'", ',', "'lddr'", '}', ':', 'res', '.', 'update', '(', "'a'", ',', "'b'", ',', "'c'", ',', "'d'", ',', "'e'", ',', "'f'", ')', 'elif', 'i', 'in', '{', "'otir'", ',', "'otdr'", ',', "'oti'", ',', "'otd'", ',', "'inir'", ',', "'indr'", ',', "'ini'", ',', "'ind'", '}', ':', 'res', '.', 'update', '(', "'h'", ',', "'l'", ',', "'b'", ')', 'elif', 'i', 'in', '{', "'cpir'", ',', "'cpi'", ',', "'cpdr'", ',', "'cpd'", '}', ':', 'res', '.', 'update', '(', "'h'", ',', "'l'", ',', "'b'", ',', "'c'", ',', "'f'", ')', 'elif', 'i', 'in', '(', "'ld'", ',', "'in'", ')', ':', 'res', '.', 'update', '(', 'single_registers', '(', 'o', '[', ':', '1', ']', ')', ')', 'elif', 'i', 'in', '(', "'inc'", ',', "'dec'", ')', ':', 'res', '.', 'update', '(', "'f'", ',', 'single_registers', '(', 'o', '[', ':', '1', ']', ')', ')', 'elif', 'i', '==', "'exx'", ':', 'res', '.', 'update', '(', "'b'", ',', "'c'", ',', "'d'", ',', "'e'", ',', "'h'", ',', "'l'", ')', 'elif', 'i', '==', "'ex'", ':', 'res', '.', 'update', '(', 'single_registers', '(', 'o', '[', '0', ']', ')', ')', 'res', '.', 'update', '(', 'single_registers', '(', 'o', '[', '1', ']', ')', ')', 'elif', 'i', 'in', '{', "'ccf'", ',', "'scf'", ',', "'bit'", ',', "'cp'", '}', ':', 'res', '.', 'add', '(', "'f'", ')', 'elif', 'i', 'in', '{', "'or'", ',', "'and'", ',', "'xor'", ',', "'add'", ',', "'adc'", ',', "'sub'", ',', "'sbc'", '}', ':', 'if', 'len', '(', 'o', ')', '>', '1', ':', 'res', '.', 'update', '(', 'single_registers', '(', 'o', '[', '0', ']', ')', ')', 'else', ':', 'res', '.', 'add', '(', "'a'", ')', 'res', '.', 'add', '(', "'f'", ')', 'elif', 'i', 'in', '{', "'neg'", ',', "'cpl'", ',', "'daa'", ',', "'rra'", ',', "'rla'", ',', "'rrca'", ',', "'rlca'", ',', "'rrd'", ',', "'rld'", '}', ':', 'res', '.', 'update', '(', "'a'", ',', "'f'", ')', 'elif', 'i', '==', "'djnz'", ':', 'res', '.', 'update', '(', "'b'", ',', "'f'", ')', 'elif', 'i', 'in', '{', "'rr'", ',', "'rl'", ',', "'rrc'", ',', "'rlc'", ',', "'srl'", ',', "'sra'", ',', "'sll'", ',', "'sla'", '}', ':', 'res', '.', 'update', '(', 'single_registers', '(', 'o', '[', '0', ']', ')', ')', 'res', '.', 'add', '(', "'f'", ')', 'elif', 'i', 'in', '(', "'set'", ',', "'res'", ')', ':', 'res', '.', 'update', '(', 'single_registers', '(', 'o', '[', '1', ']', ')', ')', 'return', 'list', '(', 'res', ')'] | Returns which single registers (including f, flag)
this instruction changes.
Registers are: a, b, c, d, e, i, h, l, ixh, ixl, iyh, iyl, r
LD a, X => Destroys a
LD a, a => Destroys nothing
INC a => Destroys a, f
POP af => Destroys a, f, sp
PUSH af => Destroys sp
ret => Destroys SP | ['Returns', 'which', 'single', 'registers', '(', 'including', 'f', 'flag', ')', 'this', 'instruction', 'changes', '.'] | train | https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/arch/zx48k/optimizer.py#L966-L1027 |
9,546 | MacHu-GWU/loggerFactory-project | loggerFactory/logger.py | BaseLogger.recover_all_handler | def recover_all_handler(self):
"""
Relink the file handler association you just removed.
"""
for handler in self._handler_cache:
self.logger.addHandler(handler)
self._handler_cache = list() | python | def recover_all_handler(self):
"""
Relink the file handler association you just removed.
"""
for handler in self._handler_cache:
self.logger.addHandler(handler)
self._handler_cache = list() | ['def', 'recover_all_handler', '(', 'self', ')', ':', 'for', 'handler', 'in', 'self', '.', '_handler_cache', ':', 'self', '.', 'logger', '.', 'addHandler', '(', 'handler', ')', 'self', '.', '_handler_cache', '=', 'list', '(', ')'] | Relink the file handler association you just removed. | ['Relink', 'the', 'file', 'handler', 'association', 'you', 'just', 'removed', '.'] | train | https://github.com/MacHu-GWU/loggerFactory-project/blob/4de19e275e01dc583b1af9ceeacef0c6084cd6e0/loggerFactory/logger.py#L119-L125 |
9,547 | MonashBI/arcana | arcana/study/base.py | Study.data | def data(self, name, subject_ids=None, visit_ids=None, session_ids=None,
**kwargs):
"""
Returns the Fileset(s) or Field(s) associated with the provided spec
name(s), generating derived filesets as required. Multiple names in a
list can be provided, to allow their workflows to be combined into a
single workflow.
Parameters
----------
name : str | List[str]
The name of the FilesetSpec|FieldSpec to retried the
filesets for
subject_id : str | None
The subject ID of the data to return. If provided (including None
values) the data will be return as a single item instead of a
collection
visit_id : str | None
The visit ID of the data to return. If provided (including None
values) the data will be return as a single item instead of a
c ollection
subject_ids : list[str]
The subject IDs to include in the returned collection
visit_ids : list[str]
The visit IDs to include in the returned collection
session_ids : list[str]
The session IDs (i.e. 2-tuples of the form
(<subject-id>, <visit-id>) to include in the returned collection
Returns
-------
data : BaseItem | BaseCollection | list[BaseItem | BaseCollection]
If 'subject_id' or 'visit_id' is provided then the data returned is
a single Fileset or Field. Otherwise a collection of Filesets or
Fields are returned. If muliple spec names are provided then a
list of items or collections corresponding to each spec name.
"""
if isinstance(name, basestring):
single_name = True
names = [name]
else:
names = name
single_name = False
single_item = 'subject_id' in kwargs or 'visit_id' in kwargs
filter_items = (subject_ids, visit_ids, session_ids) != (None, None,
None)
specs = [self.spec(n) for n in names]
if single_item:
if filter_items:
raise ArcanaUsageError(
"Cannot provide 'subject_id' and/or 'visit_id' in "
"combination with 'subject_ids', 'visit_ids' or "
"'session_ids'")
subject_id = kwargs.pop('subject_id', None)
visit_id = kwargs.pop('visit_id', None)
iterators = set(chain(self.FREQUENCIES[s.frequency]
for s in specs))
if subject_id is not None and visit_id is not None:
session_ids = [(subject_id, visit_id)]
elif subject_id is not None:
if self.VISIT_ID in iterators:
raise ArcanaUsageError(
"Non-None values for visit IDs need to be "
"provided to select a single item for each of '{}'"
.format("', '".join(names)))
subject_ids = [subject_id]
elif visit_id is not None:
if self.SUBJECT_ID in iterators:
raise ArcanaUsageError(
"Non-None values for subject IDs need to be "
"provided to select a single item for each of '{}'"
.format("', '".join(names)))
visit_ids = [visit_id]
elif iterators:
raise ArcanaUsageError(
"Non-None values for subject and/or visit IDs need to be "
"provided to select a single item for each of '{}'"
.format("', '".join(names)))
# Work out which pipelines need to be run
pipeline_getters = defaultdict(set)
for spec in specs:
if spec.derived or spec.derivable: # Filter out Study inputs
# Add name of spec to set of required outputs
pipeline_getters[spec.pipeline_getter].add(spec.name)
# Run required pipelines
if pipeline_getters:
kwargs = copy(kwargs)
kwargs.update({'subject_ids': subject_ids,
'visit_ids': visit_ids,
'session_ids': session_ids})
pipelines, required_outputs = zip(*(
(self.pipeline(k), v) for k, v in pipeline_getters.items()))
kwargs['required_outputs'] = required_outputs
self.processor.run(*pipelines, **kwargs)
# Find and return Item/Collection corresponding to requested spec
# names
all_data = []
for name in names:
spec = self.bound_spec(name)
data = spec.collection
if single_item:
data = data.item(subject_id=subject_id, visit_id=visit_id)
elif filter_items and spec.frequency != 'per_study':
if subject_ids is None:
subject_ids = []
if visit_ids is None:
visit_ids = []
if session_ids is None:
session_ids = []
if spec.frequency == 'per_session':
data = [d for d in data
if (d.subject_id in subject_ids or
d.visit_id in visit_ids or
d.session_id in session_ids)]
elif spec.frequency == 'per_subject':
data = [d for d in data
if (d.subject_id in subject_ids or
d.subject_id in [s[0] for s in session_ids])]
elif spec.frequency == 'per_visit':
data = [d for d in data
if (d.visit_id in visit_ids or
d.visit_id in [s[1] for s in session_ids])]
if not data:
raise ArcanaUsageError(
"No matching data found (subject_ids={}, visit_ids={} "
", session_ids={})"
.format(subject_ids, visit_ids, session_ids))
data = spec.CollectionClass(spec.name, data)
if single_name:
return data
else:
all_data.append(data)
return all_data | python | def data(self, name, subject_ids=None, visit_ids=None, session_ids=None,
**kwargs):
"""
Returns the Fileset(s) or Field(s) associated with the provided spec
name(s), generating derived filesets as required. Multiple names in a
list can be provided, to allow their workflows to be combined into a
single workflow.
Parameters
----------
name : str | List[str]
The name of the FilesetSpec|FieldSpec to retried the
filesets for
subject_id : str | None
The subject ID of the data to return. If provided (including None
values) the data will be return as a single item instead of a
collection
visit_id : str | None
The visit ID of the data to return. If provided (including None
values) the data will be return as a single item instead of a
c ollection
subject_ids : list[str]
The subject IDs to include in the returned collection
visit_ids : list[str]
The visit IDs to include in the returned collection
session_ids : list[str]
The session IDs (i.e. 2-tuples of the form
(<subject-id>, <visit-id>) to include in the returned collection
Returns
-------
data : BaseItem | BaseCollection | list[BaseItem | BaseCollection]
If 'subject_id' or 'visit_id' is provided then the data returned is
a single Fileset or Field. Otherwise a collection of Filesets or
Fields are returned. If muliple spec names are provided then a
list of items or collections corresponding to each spec name.
"""
if isinstance(name, basestring):
single_name = True
names = [name]
else:
names = name
single_name = False
single_item = 'subject_id' in kwargs or 'visit_id' in kwargs
filter_items = (subject_ids, visit_ids, session_ids) != (None, None,
None)
specs = [self.spec(n) for n in names]
if single_item:
if filter_items:
raise ArcanaUsageError(
"Cannot provide 'subject_id' and/or 'visit_id' in "
"combination with 'subject_ids', 'visit_ids' or "
"'session_ids'")
subject_id = kwargs.pop('subject_id', None)
visit_id = kwargs.pop('visit_id', None)
iterators = set(chain(self.FREQUENCIES[s.frequency]
for s in specs))
if subject_id is not None and visit_id is not None:
session_ids = [(subject_id, visit_id)]
elif subject_id is not None:
if self.VISIT_ID in iterators:
raise ArcanaUsageError(
"Non-None values for visit IDs need to be "
"provided to select a single item for each of '{}'"
.format("', '".join(names)))
subject_ids = [subject_id]
elif visit_id is not None:
if self.SUBJECT_ID in iterators:
raise ArcanaUsageError(
"Non-None values for subject IDs need to be "
"provided to select a single item for each of '{}'"
.format("', '".join(names)))
visit_ids = [visit_id]
elif iterators:
raise ArcanaUsageError(
"Non-None values for subject and/or visit IDs need to be "
"provided to select a single item for each of '{}'"
.format("', '".join(names)))
# Work out which pipelines need to be run
pipeline_getters = defaultdict(set)
for spec in specs:
if spec.derived or spec.derivable: # Filter out Study inputs
# Add name of spec to set of required outputs
pipeline_getters[spec.pipeline_getter].add(spec.name)
# Run required pipelines
if pipeline_getters:
kwargs = copy(kwargs)
kwargs.update({'subject_ids': subject_ids,
'visit_ids': visit_ids,
'session_ids': session_ids})
pipelines, required_outputs = zip(*(
(self.pipeline(k), v) for k, v in pipeline_getters.items()))
kwargs['required_outputs'] = required_outputs
self.processor.run(*pipelines, **kwargs)
# Find and return Item/Collection corresponding to requested spec
# names
all_data = []
for name in names:
spec = self.bound_spec(name)
data = spec.collection
if single_item:
data = data.item(subject_id=subject_id, visit_id=visit_id)
elif filter_items and spec.frequency != 'per_study':
if subject_ids is None:
subject_ids = []
if visit_ids is None:
visit_ids = []
if session_ids is None:
session_ids = []
if spec.frequency == 'per_session':
data = [d for d in data
if (d.subject_id in subject_ids or
d.visit_id in visit_ids or
d.session_id in session_ids)]
elif spec.frequency == 'per_subject':
data = [d for d in data
if (d.subject_id in subject_ids or
d.subject_id in [s[0] for s in session_ids])]
elif spec.frequency == 'per_visit':
data = [d for d in data
if (d.visit_id in visit_ids or
d.visit_id in [s[1] for s in session_ids])]
if not data:
raise ArcanaUsageError(
"No matching data found (subject_ids={}, visit_ids={} "
", session_ids={})"
.format(subject_ids, visit_ids, session_ids))
data = spec.CollectionClass(spec.name, data)
if single_name:
return data
else:
all_data.append(data)
return all_data | ['def', 'data', '(', 'self', ',', 'name', ',', 'subject_ids', '=', 'None', ',', 'visit_ids', '=', 'None', ',', 'session_ids', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'if', 'isinstance', '(', 'name', ',', 'basestring', ')', ':', 'single_name', '=', 'True', 'names', '=', '[', 'name', ']', 'else', ':', 'names', '=', 'name', 'single_name', '=', 'False', 'single_item', '=', "'subject_id'", 'in', 'kwargs', 'or', "'visit_id'", 'in', 'kwargs', 'filter_items', '=', '(', 'subject_ids', ',', 'visit_ids', ',', 'session_ids', ')', '!=', '(', 'None', ',', 'None', ',', 'None', ')', 'specs', '=', '[', 'self', '.', 'spec', '(', 'n', ')', 'for', 'n', 'in', 'names', ']', 'if', 'single_item', ':', 'if', 'filter_items', ':', 'raise', 'ArcanaUsageError', '(', '"Cannot provide \'subject_id\' and/or \'visit_id\' in "', '"combination with \'subject_ids\', \'visit_ids\' or "', '"\'session_ids\'"', ')', 'subject_id', '=', 'kwargs', '.', 'pop', '(', "'subject_id'", ',', 'None', ')', 'visit_id', '=', 'kwargs', '.', 'pop', '(', "'visit_id'", ',', 'None', ')', 'iterators', '=', 'set', '(', 'chain', '(', 'self', '.', 'FREQUENCIES', '[', 's', '.', 'frequency', ']', 'for', 's', 'in', 'specs', ')', ')', 'if', 'subject_id', 'is', 'not', 'None', 'and', 'visit_id', 'is', 'not', 'None', ':', 'session_ids', '=', '[', '(', 'subject_id', ',', 'visit_id', ')', ']', 'elif', 'subject_id', 'is', 'not', 'None', ':', 'if', 'self', '.', 'VISIT_ID', 'in', 'iterators', ':', 'raise', 'ArcanaUsageError', '(', '"Non-None values for visit IDs need to be "', '"provided to select a single item for each of \'{}\'"', '.', 'format', '(', '"\', \'"', '.', 'join', '(', 'names', ')', ')', ')', 'subject_ids', '=', '[', 'subject_id', ']', 'elif', 'visit_id', 'is', 'not', 'None', ':', 'if', 'self', '.', 'SUBJECT_ID', 'in', 'iterators', ':', 'raise', 'ArcanaUsageError', '(', '"Non-None values for subject IDs need to be "', '"provided to select a single item for each of \'{}\'"', '.', 'format', '(', '"\', \'"', '.', 'join', '(', 'names', ')', ')', ')', 'visit_ids', '=', '[', 'visit_id', ']', 'elif', 'iterators', ':', 'raise', 'ArcanaUsageError', '(', '"Non-None values for subject and/or visit IDs need to be "', '"provided to select a single item for each of \'{}\'"', '.', 'format', '(', '"\', \'"', '.', 'join', '(', 'names', ')', ')', ')', '# Work out which pipelines need to be run', 'pipeline_getters', '=', 'defaultdict', '(', 'set', ')', 'for', 'spec', 'in', 'specs', ':', 'if', 'spec', '.', 'derived', 'or', 'spec', '.', 'derivable', ':', '# Filter out Study inputs', '# Add name of spec to set of required outputs', 'pipeline_getters', '[', 'spec', '.', 'pipeline_getter', ']', '.', 'add', '(', 'spec', '.', 'name', ')', '# Run required pipelines', 'if', 'pipeline_getters', ':', 'kwargs', '=', 'copy', '(', 'kwargs', ')', 'kwargs', '.', 'update', '(', '{', "'subject_ids'", ':', 'subject_ids', ',', "'visit_ids'", ':', 'visit_ids', ',', "'session_ids'", ':', 'session_ids', '}', ')', 'pipelines', ',', 'required_outputs', '=', 'zip', '(', '*', '(', '(', 'self', '.', 'pipeline', '(', 'k', ')', ',', 'v', ')', 'for', 'k', ',', 'v', 'in', 'pipeline_getters', '.', 'items', '(', ')', ')', ')', 'kwargs', '[', "'required_outputs'", ']', '=', 'required_outputs', 'self', '.', 'processor', '.', 'run', '(', '*', 'pipelines', ',', '*', '*', 'kwargs', ')', '# Find and return Item/Collection corresponding to requested spec', '# names', 'all_data', '=', '[', ']', 'for', 'name', 'in', 'names', ':', 'spec', '=', 'self', '.', 'bound_spec', '(', 'name', ')', 'data', '=', 'spec', '.', 'collection', 'if', 'single_item', ':', 'data', '=', 'data', '.', 'item', '(', 'subject_id', '=', 'subject_id', ',', 'visit_id', '=', 'visit_id', ')', 'elif', 'filter_items', 'and', 'spec', '.', 'frequency', '!=', "'per_study'", ':', 'if', 'subject_ids', 'is', 'None', ':', 'subject_ids', '=', '[', ']', 'if', 'visit_ids', 'is', 'None', ':', 'visit_ids', '=', '[', ']', 'if', 'session_ids', 'is', 'None', ':', 'session_ids', '=', '[', ']', 'if', 'spec', '.', 'frequency', '==', "'per_session'", ':', 'data', '=', '[', 'd', 'for', 'd', 'in', 'data', 'if', '(', 'd', '.', 'subject_id', 'in', 'subject_ids', 'or', 'd', '.', 'visit_id', 'in', 'visit_ids', 'or', 'd', '.', 'session_id', 'in', 'session_ids', ')', ']', 'elif', 'spec', '.', 'frequency', '==', "'per_subject'", ':', 'data', '=', '[', 'd', 'for', 'd', 'in', 'data', 'if', '(', 'd', '.', 'subject_id', 'in', 'subject_ids', 'or', 'd', '.', 'subject_id', 'in', '[', 's', '[', '0', ']', 'for', 's', 'in', 'session_ids', ']', ')', ']', 'elif', 'spec', '.', 'frequency', '==', "'per_visit'", ':', 'data', '=', '[', 'd', 'for', 'd', 'in', 'data', 'if', '(', 'd', '.', 'visit_id', 'in', 'visit_ids', 'or', 'd', '.', 'visit_id', 'in', '[', 's', '[', '1', ']', 'for', 's', 'in', 'session_ids', ']', ')', ']', 'if', 'not', 'data', ':', 'raise', 'ArcanaUsageError', '(', '"No matching data found (subject_ids={}, visit_ids={} "', '", session_ids={})"', '.', 'format', '(', 'subject_ids', ',', 'visit_ids', ',', 'session_ids', ')', ')', 'data', '=', 'spec', '.', 'CollectionClass', '(', 'spec', '.', 'name', ',', 'data', ')', 'if', 'single_name', ':', 'return', 'data', 'else', ':', 'all_data', '.', 'append', '(', 'data', ')', 'return', 'all_data'] | Returns the Fileset(s) or Field(s) associated with the provided spec
name(s), generating derived filesets as required. Multiple names in a
list can be provided, to allow their workflows to be combined into a
single workflow.
Parameters
----------
name : str | List[str]
The name of the FilesetSpec|FieldSpec to retried the
filesets for
subject_id : str | None
The subject ID of the data to return. If provided (including None
values) the data will be return as a single item instead of a
collection
visit_id : str | None
The visit ID of the data to return. If provided (including None
values) the data will be return as a single item instead of a
c ollection
subject_ids : list[str]
The subject IDs to include in the returned collection
visit_ids : list[str]
The visit IDs to include in the returned collection
session_ids : list[str]
The session IDs (i.e. 2-tuples of the form
(<subject-id>, <visit-id>) to include in the returned collection
Returns
-------
data : BaseItem | BaseCollection | list[BaseItem | BaseCollection]
If 'subject_id' or 'visit_id' is provided then the data returned is
a single Fileset or Field. Otherwise a collection of Filesets or
Fields are returned. If muliple spec names are provided then a
list of items or collections corresponding to each spec name. | ['Returns', 'the', 'Fileset', '(', 's', ')', 'or', 'Field', '(', 's', ')', 'associated', 'with', 'the', 'provided', 'spec', 'name', '(', 's', ')', 'generating', 'derived', 'filesets', 'as', 'required', '.', 'Multiple', 'names', 'in', 'a', 'list', 'can', 'be', 'provided', 'to', 'allow', 'their', 'workflows', 'to', 'be', 'combined', 'into', 'a', 'single', 'workflow', '.'] | train | https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/study/base.py#L244-L376 |
9,548 | elifesciences/elife-tools | elifetools/parseJATS.py | pub_date | def pub_date(soup):
"""
Return the publishing date in struct format
pub_date_date, pub_date_day, pub_date_month, pub_date_year, pub_date_timestamp
Default date_type is pub
"""
pub_date = first(raw_parser.pub_date(soup, date_type="pub"))
if pub_date is None:
pub_date = first(raw_parser.pub_date(soup, date_type="publication"))
if pub_date is None:
return None
(day, month, year) = ymd(pub_date)
return date_struct(year, month, day) | python | def pub_date(soup):
"""
Return the publishing date in struct format
pub_date_date, pub_date_day, pub_date_month, pub_date_year, pub_date_timestamp
Default date_type is pub
"""
pub_date = first(raw_parser.pub_date(soup, date_type="pub"))
if pub_date is None:
pub_date = first(raw_parser.pub_date(soup, date_type="publication"))
if pub_date is None:
return None
(day, month, year) = ymd(pub_date)
return date_struct(year, month, day) | ['def', 'pub_date', '(', 'soup', ')', ':', 'pub_date', '=', 'first', '(', 'raw_parser', '.', 'pub_date', '(', 'soup', ',', 'date_type', '=', '"pub"', ')', ')', 'if', 'pub_date', 'is', 'None', ':', 'pub_date', '=', 'first', '(', 'raw_parser', '.', 'pub_date', '(', 'soup', ',', 'date_type', '=', '"publication"', ')', ')', 'if', 'pub_date', 'is', 'None', ':', 'return', 'None', '(', 'day', ',', 'month', ',', 'year', ')', '=', 'ymd', '(', 'pub_date', ')', 'return', 'date_struct', '(', 'year', ',', 'month', ',', 'day', ')'] | Return the publishing date in struct format
pub_date_date, pub_date_day, pub_date_month, pub_date_year, pub_date_timestamp
Default date_type is pub | ['Return', 'the', 'publishing', 'date', 'in', 'struct', 'format', 'pub_date_date', 'pub_date_day', 'pub_date_month', 'pub_date_year', 'pub_date_timestamp', 'Default', 'date_type', 'is', 'pub'] | train | https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L354-L366 |
9,549 | daboth/pagan | pagan/generator.py | generate_by_hash | def generate_by_hash(hashcode):
"""Generates an PIL image avatar based on the given
hash String. Acts as the main accessor to pagan."""
img = Image.new(IMAGE_MODE, IMAGE_SIZE, BACKGROUND_COLOR)
if len(hashcode) < 32:
print ("hashcode must have lenght >= 32, %s" % hashcode)
raise FalseHashError
allowed = "0123456789abcdef"
hashcheck = [c in allowed for c in hashcode]
if False in hashcheck:
print ("hashcode has not allowed structure %s" % hashcode)
raise FalseHashError
pixelmap = setup_pixelmap(hashcode)
draw_image(pixelmap, img)
return img | python | def generate_by_hash(hashcode):
"""Generates an PIL image avatar based on the given
hash String. Acts as the main accessor to pagan."""
img = Image.new(IMAGE_MODE, IMAGE_SIZE, BACKGROUND_COLOR)
if len(hashcode) < 32:
print ("hashcode must have lenght >= 32, %s" % hashcode)
raise FalseHashError
allowed = "0123456789abcdef"
hashcheck = [c in allowed for c in hashcode]
if False in hashcheck:
print ("hashcode has not allowed structure %s" % hashcode)
raise FalseHashError
pixelmap = setup_pixelmap(hashcode)
draw_image(pixelmap, img)
return img | ['def', 'generate_by_hash', '(', 'hashcode', ')', ':', 'img', '=', 'Image', '.', 'new', '(', 'IMAGE_MODE', ',', 'IMAGE_SIZE', ',', 'BACKGROUND_COLOR', ')', 'if', 'len', '(', 'hashcode', ')', '<', '32', ':', 'print', '(', '"hashcode must have lenght >= 32, %s"', '%', 'hashcode', ')', 'raise', 'FalseHashError', 'allowed', '=', '"0123456789abcdef"', 'hashcheck', '=', '[', 'c', 'in', 'allowed', 'for', 'c', 'in', 'hashcode', ']', 'if', 'False', 'in', 'hashcheck', ':', 'print', '(', '"hashcode has not allowed structure %s"', '%', 'hashcode', ')', 'raise', 'FalseHashError', 'pixelmap', '=', 'setup_pixelmap', '(', 'hashcode', ')', 'draw_image', '(', 'pixelmap', ',', 'img', ')', 'return', 'img'] | Generates an PIL image avatar based on the given
hash String. Acts as the main accessor to pagan. | ['Generates', 'an', 'PIL', 'image', 'avatar', 'based', 'on', 'the', 'given', 'hash', 'String', '.', 'Acts', 'as', 'the', 'main', 'accessor', 'to', 'pagan', '.'] | train | https://github.com/daboth/pagan/blob/1e6d31f78e312d242751e70566ca9a6278784915/pagan/generator.py#L282-L298 |
9,550 | stephanepechard/projy | projy/cmdline.py | run_info | def run_info(template):
""" Print information about a specific template. """
template.project_name = 'TowelStuff' # fake project name, always the same
name = template_name_from_class_name(template.__class__.__name__)
term = TerminalView()
term.print_info("Content of template {} with an example project " \
"named 'TowelStuff':".format(term.text_in_color(name, TERM_GREEN)))
dir_name = None
for file_info in sorted(template.files(), key=lambda dir: dir[0]):
directory = file_name = template_name = ''
if file_info[0]:
directory = file_info[0]
if file_info[1]:
file_name = file_info[1]
if file_info[2]:
template_name = '\t\t - ' + file_info[2]
if (directory != dir_name):
term.print_info('\n\t' + term.text_in_color(directory + '/', TERM_PINK))
dir_name = directory
term.print_info('\t\t' + term.text_in_color(file_name, TERM_YELLOW) + template_name)
# print substitutions
try:
subs = template.substitutes().keys()
if len(subs) > 0:
subs.sort()
term.print_info("\nSubstitutions of this template are: ")
max_len = 0
for key in subs:
if max_len < len(key):
max_len = len(key)
for key in subs:
term.print_info(u"\t{0:{1}} -> {2}".
format(key, max_len, template.substitutes()[key]))
except AttributeError:
pass | python | def run_info(template):
""" Print information about a specific template. """
template.project_name = 'TowelStuff' # fake project name, always the same
name = template_name_from_class_name(template.__class__.__name__)
term = TerminalView()
term.print_info("Content of template {} with an example project " \
"named 'TowelStuff':".format(term.text_in_color(name, TERM_GREEN)))
dir_name = None
for file_info in sorted(template.files(), key=lambda dir: dir[0]):
directory = file_name = template_name = ''
if file_info[0]:
directory = file_info[0]
if file_info[1]:
file_name = file_info[1]
if file_info[2]:
template_name = '\t\t - ' + file_info[2]
if (directory != dir_name):
term.print_info('\n\t' + term.text_in_color(directory + '/', TERM_PINK))
dir_name = directory
term.print_info('\t\t' + term.text_in_color(file_name, TERM_YELLOW) + template_name)
# print substitutions
try:
subs = template.substitutes().keys()
if len(subs) > 0:
subs.sort()
term.print_info("\nSubstitutions of this template are: ")
max_len = 0
for key in subs:
if max_len < len(key):
max_len = len(key)
for key in subs:
term.print_info(u"\t{0:{1}} -> {2}".
format(key, max_len, template.substitutes()[key]))
except AttributeError:
pass | ['def', 'run_info', '(', 'template', ')', ':', 'template', '.', 'project_name', '=', "'TowelStuff'", '# fake project name, always the same', 'name', '=', 'template_name_from_class_name', '(', 'template', '.', '__class__', '.', '__name__', ')', 'term', '=', 'TerminalView', '(', ')', 'term', '.', 'print_info', '(', '"Content of template {} with an example project "', '"named \'TowelStuff\':"', '.', 'format', '(', 'term', '.', 'text_in_color', '(', 'name', ',', 'TERM_GREEN', ')', ')', ')', 'dir_name', '=', 'None', 'for', 'file_info', 'in', 'sorted', '(', 'template', '.', 'files', '(', ')', ',', 'key', '=', 'lambda', 'dir', ':', 'dir', '[', '0', ']', ')', ':', 'directory', '=', 'file_name', '=', 'template_name', '=', "''", 'if', 'file_info', '[', '0', ']', ':', 'directory', '=', 'file_info', '[', '0', ']', 'if', 'file_info', '[', '1', ']', ':', 'file_name', '=', 'file_info', '[', '1', ']', 'if', 'file_info', '[', '2', ']', ':', 'template_name', '=', "'\\t\\t - '", '+', 'file_info', '[', '2', ']', 'if', '(', 'directory', '!=', 'dir_name', ')', ':', 'term', '.', 'print_info', '(', "'\\n\\t'", '+', 'term', '.', 'text_in_color', '(', 'directory', '+', "'/'", ',', 'TERM_PINK', ')', ')', 'dir_name', '=', 'directory', 'term', '.', 'print_info', '(', "'\\t\\t'", '+', 'term', '.', 'text_in_color', '(', 'file_name', ',', 'TERM_YELLOW', ')', '+', 'template_name', ')', '# print substitutions', 'try', ':', 'subs', '=', 'template', '.', 'substitutes', '(', ')', '.', 'keys', '(', ')', 'if', 'len', '(', 'subs', ')', '>', '0', ':', 'subs', '.', 'sort', '(', ')', 'term', '.', 'print_info', '(', '"\\nSubstitutions of this template are: "', ')', 'max_len', '=', '0', 'for', 'key', 'in', 'subs', ':', 'if', 'max_len', '<', 'len', '(', 'key', ')', ':', 'max_len', '=', 'len', '(', 'key', ')', 'for', 'key', 'in', 'subs', ':', 'term', '.', 'print_info', '(', 'u"\\t{0:{1}} -> {2}"', '.', 'format', '(', 'key', ',', 'max_len', ',', 'template', '.', 'substitutes', '(', ')', '[', 'key', ']', ')', ')', 'except', 'AttributeError', ':', 'pass'] | Print information about a specific template. | ['Print', 'information', 'about', 'a', 'specific', 'template', '.'] | train | https://github.com/stephanepechard/projy/blob/3146b0e3c207b977e1b51fcb33138746dae83c23/projy/cmdline.py#L54-L91 |
9,551 | rbarrois/xworkflows | src/xworkflows/base.py | ImplementationList.get_custom_implementations | def get_custom_implementations(self):
"""Retrieve a list of cutom implementations.
Yields:
(str, str, ImplementationProperty) tuples: The name of the attribute
an implementation lives at, the name of the related transition,
and the related implementation.
"""
for trname in self.custom_implems:
attr = self.transitions_at[trname]
implem = self.implementations[trname]
yield (trname, attr, implem) | python | def get_custom_implementations(self):
"""Retrieve a list of cutom implementations.
Yields:
(str, str, ImplementationProperty) tuples: The name of the attribute
an implementation lives at, the name of the related transition,
and the related implementation.
"""
for trname in self.custom_implems:
attr = self.transitions_at[trname]
implem = self.implementations[trname]
yield (trname, attr, implem) | ['def', 'get_custom_implementations', '(', 'self', ')', ':', 'for', 'trname', 'in', 'self', '.', 'custom_implems', ':', 'attr', '=', 'self', '.', 'transitions_at', '[', 'trname', ']', 'implem', '=', 'self', '.', 'implementations', '[', 'trname', ']', 'yield', '(', 'trname', ',', 'attr', ',', 'implem', ')'] | Retrieve a list of cutom implementations.
Yields:
(str, str, ImplementationProperty) tuples: The name of the attribute
an implementation lives at, the name of the related transition,
and the related implementation. | ['Retrieve', 'a', 'list', 'of', 'cutom', 'implementations', '.'] | train | https://github.com/rbarrois/xworkflows/blob/4a94b04ba83cb43f61d4b0f7db6964a667c86b5b/src/xworkflows/base.py#L734-L745 |
9,552 | mediawiki-utilities/python-mwxml | mwxml/iteration/dump.py | Dump.from_file | def from_file(cls, f):
"""
Constructs a :class:`~mwxml.iteration.dump.Dump` from a `file` pointer.
:Parameters:
f : `file`
A plain text file pointer containing XML to process
"""
element = ElementIterator.from_file(f)
assert element.tag == "mediawiki"
return cls.from_element(element) | python | def from_file(cls, f):
"""
Constructs a :class:`~mwxml.iteration.dump.Dump` from a `file` pointer.
:Parameters:
f : `file`
A plain text file pointer containing XML to process
"""
element = ElementIterator.from_file(f)
assert element.tag == "mediawiki"
return cls.from_element(element) | ['def', 'from_file', '(', 'cls', ',', 'f', ')', ':', 'element', '=', 'ElementIterator', '.', 'from_file', '(', 'f', ')', 'assert', 'element', '.', 'tag', '==', '"mediawiki"', 'return', 'cls', '.', 'from_element', '(', 'element', ')'] | Constructs a :class:`~mwxml.iteration.dump.Dump` from a `file` pointer.
:Parameters:
f : `file`
A plain text file pointer containing XML to process | ['Constructs', 'a', ':', 'class', ':', '~mwxml', '.', 'iteration', '.', 'dump', '.', 'Dump', 'from', 'a', 'file', 'pointer', '.'] | train | https://github.com/mediawiki-utilities/python-mwxml/blob/6a8c18be99cd0bcee9c496e607f08bf4dfe5b510/mwxml/iteration/dump.py#L136-L146 |
9,553 | saltstack/salt | salt/modules/cmdmod.py | _check_avail | def _check_avail(cmd):
'''
Check to see if the given command can be run
'''
if isinstance(cmd, list):
cmd = ' '.join([six.text_type(x) if not isinstance(x, six.string_types) else x
for x in cmd])
bret = True
wret = False
if __salt__['config.get']('cmd_blacklist_glob'):
blist = __salt__['config.get']('cmd_blacklist_glob', [])
for comp in blist:
if fnmatch.fnmatch(cmd, comp):
# BAD! you are blacklisted
bret = False
if __salt__['config.get']('cmd_whitelist_glob', []):
blist = __salt__['config.get']('cmd_whitelist_glob', [])
for comp in blist:
if fnmatch.fnmatch(cmd, comp):
# GOOD! You are whitelisted
wret = True
break
else:
# If no whitelist set then alls good!
wret = True
return bret and wret | python | def _check_avail(cmd):
'''
Check to see if the given command can be run
'''
if isinstance(cmd, list):
cmd = ' '.join([six.text_type(x) if not isinstance(x, six.string_types) else x
for x in cmd])
bret = True
wret = False
if __salt__['config.get']('cmd_blacklist_glob'):
blist = __salt__['config.get']('cmd_blacklist_glob', [])
for comp in blist:
if fnmatch.fnmatch(cmd, comp):
# BAD! you are blacklisted
bret = False
if __salt__['config.get']('cmd_whitelist_glob', []):
blist = __salt__['config.get']('cmd_whitelist_glob', [])
for comp in blist:
if fnmatch.fnmatch(cmd, comp):
# GOOD! You are whitelisted
wret = True
break
else:
# If no whitelist set then alls good!
wret = True
return bret and wret | ['def', '_check_avail', '(', 'cmd', ')', ':', 'if', 'isinstance', '(', 'cmd', ',', 'list', ')', ':', 'cmd', '=', "' '", '.', 'join', '(', '[', 'six', '.', 'text_type', '(', 'x', ')', 'if', 'not', 'isinstance', '(', 'x', ',', 'six', '.', 'string_types', ')', 'else', 'x', 'for', 'x', 'in', 'cmd', ']', ')', 'bret', '=', 'True', 'wret', '=', 'False', 'if', '__salt__', '[', "'config.get'", ']', '(', "'cmd_blacklist_glob'", ')', ':', 'blist', '=', '__salt__', '[', "'config.get'", ']', '(', "'cmd_blacklist_glob'", ',', '[', ']', ')', 'for', 'comp', 'in', 'blist', ':', 'if', 'fnmatch', '.', 'fnmatch', '(', 'cmd', ',', 'comp', ')', ':', '# BAD! you are blacklisted', 'bret', '=', 'False', 'if', '__salt__', '[', "'config.get'", ']', '(', "'cmd_whitelist_glob'", ',', '[', ']', ')', ':', 'blist', '=', '__salt__', '[', "'config.get'", ']', '(', "'cmd_whitelist_glob'", ',', '[', ']', ')', 'for', 'comp', 'in', 'blist', ':', 'if', 'fnmatch', '.', 'fnmatch', '(', 'cmd', ',', 'comp', ')', ':', '# GOOD! You are whitelisted', 'wret', '=', 'True', 'break', 'else', ':', '# If no whitelist set then alls good!', 'wret', '=', 'True', 'return', 'bret', 'and', 'wret'] | Check to see if the given command can be run | ['Check', 'to', 'see', 'if', 'the', 'given', 'command', 'can', 'be', 'run'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/cmdmod.py#L222-L247 |
9,554 | tanghaibao/goatools | goatools/nt_utils.py | wr_py_nts | def wr_py_nts(fout_py, nts, docstring=None, varname="nts"):
"""Save namedtuples into a Python module."""
if nts:
with open(fout_py, 'w') as prt:
prt.write('"""{DOCSTRING}"""\n\n'.format(DOCSTRING=docstring))
prt.write("# Created: {DATE}\n".format(DATE=str(datetime.date.today())))
prt_nts(prt, nts, varname)
sys.stdout.write(" {N:7,} items WROTE: {PY}\n".format(N=len(nts), PY=fout_py)) | python | def wr_py_nts(fout_py, nts, docstring=None, varname="nts"):
"""Save namedtuples into a Python module."""
if nts:
with open(fout_py, 'w') as prt:
prt.write('"""{DOCSTRING}"""\n\n'.format(DOCSTRING=docstring))
prt.write("# Created: {DATE}\n".format(DATE=str(datetime.date.today())))
prt_nts(prt, nts, varname)
sys.stdout.write(" {N:7,} items WROTE: {PY}\n".format(N=len(nts), PY=fout_py)) | ['def', 'wr_py_nts', '(', 'fout_py', ',', 'nts', ',', 'docstring', '=', 'None', ',', 'varname', '=', '"nts"', ')', ':', 'if', 'nts', ':', 'with', 'open', '(', 'fout_py', ',', "'w'", ')', 'as', 'prt', ':', 'prt', '.', 'write', '(', '\'"""{DOCSTRING}"""\\n\\n\'', '.', 'format', '(', 'DOCSTRING', '=', 'docstring', ')', ')', 'prt', '.', 'write', '(', '"# Created: {DATE}\\n"', '.', 'format', '(', 'DATE', '=', 'str', '(', 'datetime', '.', 'date', '.', 'today', '(', ')', ')', ')', ')', 'prt_nts', '(', 'prt', ',', 'nts', ',', 'varname', ')', 'sys', '.', 'stdout', '.', 'write', '(', '" {N:7,} items WROTE: {PY}\\n"', '.', 'format', '(', 'N', '=', 'len', '(', 'nts', ')', ',', 'PY', '=', 'fout_py', ')', ')'] | Save namedtuples into a Python module. | ['Save', 'namedtuples', 'into', 'a', 'Python', 'module', '.'] | train | https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/nt_utils.py#L54-L61 |
9,555 | dhocker/udmx-pyusb | example.py | main | def main():
"""
How to control a DMX light through an Anyma USB controller
"""
# Channel value list for channels 1-512
cv = [0 for v in range(0, 512)]
# Create an instance of the DMX controller and open it
print("Opening DMX controller...")
dev = pyudmx.uDMXDevice()
# This will automagically find a single Anyma-type USB DMX controller
dev.open()
# For informational purpose, display what we know about the DMX controller
print(dev.Device)
# Send messages to the light changing it to red, then green, then blue
# This is the "hard way" to do it, but illustrates how it's done
print("Setting to red...")
cv[0] = 255 # red
cv[6] = 128 # dimmer to half value
sent = dev.send_multi_value(1, cv)
print("Set to red")
sleep(3.0)
print("Setting to green...")
cv[0] = 0 # red
cv[1] = 255 # green
cv[6] = 128 # dimmer to half value
sent = dev.send_multi_value(1, cv)
print("Set to green")
sleep(3.0)
print("Setting to blue...")
cv[0] = 0 # red
cv[1] = 0 # green
cv[2] = 255 # blue
cv[6] = 128 # dimmer to half value
sent = dev.send_multi_value(1, cv)
print("Set to blue")
sleep(3.0)
# Here's an easier way to do it
print("And, again the easier way")
send_rgb(dev, 255, 0, 0, 128)
sleep(3.0)
send_rgb(dev, 0, 255, 0, 128)
sleep(3.0)
send_rgb(dev, 0, 0, 255, 128)
sleep(3.0)
print("Reset all channels and close..")
# Turns the light off
cv = [0 for v in range(0, 512)]
dev.send_multi_value(1, cv)
dev.close() | python | def main():
"""
How to control a DMX light through an Anyma USB controller
"""
# Channel value list for channels 1-512
cv = [0 for v in range(0, 512)]
# Create an instance of the DMX controller and open it
print("Opening DMX controller...")
dev = pyudmx.uDMXDevice()
# This will automagically find a single Anyma-type USB DMX controller
dev.open()
# For informational purpose, display what we know about the DMX controller
print(dev.Device)
# Send messages to the light changing it to red, then green, then blue
# This is the "hard way" to do it, but illustrates how it's done
print("Setting to red...")
cv[0] = 255 # red
cv[6] = 128 # dimmer to half value
sent = dev.send_multi_value(1, cv)
print("Set to red")
sleep(3.0)
print("Setting to green...")
cv[0] = 0 # red
cv[1] = 255 # green
cv[6] = 128 # dimmer to half value
sent = dev.send_multi_value(1, cv)
print("Set to green")
sleep(3.0)
print("Setting to blue...")
cv[0] = 0 # red
cv[1] = 0 # green
cv[2] = 255 # blue
cv[6] = 128 # dimmer to half value
sent = dev.send_multi_value(1, cv)
print("Set to blue")
sleep(3.0)
# Here's an easier way to do it
print("And, again the easier way")
send_rgb(dev, 255, 0, 0, 128)
sleep(3.0)
send_rgb(dev, 0, 255, 0, 128)
sleep(3.0)
send_rgb(dev, 0, 0, 255, 128)
sleep(3.0)
print("Reset all channels and close..")
# Turns the light off
cv = [0 for v in range(0, 512)]
dev.send_multi_value(1, cv)
dev.close() | ['def', 'main', '(', ')', ':', '# Channel value list for channels 1-512', 'cv', '=', '[', '0', 'for', 'v', 'in', 'range', '(', '0', ',', '512', ')', ']', '# Create an instance of the DMX controller and open it ', 'print', '(', '"Opening DMX controller..."', ')', 'dev', '=', 'pyudmx', '.', 'uDMXDevice', '(', ')', '# This will automagically find a single Anyma-type USB DMX controller', 'dev', '.', 'open', '(', ')', '# For informational purpose, display what we know about the DMX controller', 'print', '(', 'dev', '.', 'Device', ')', '# Send messages to the light changing it to red, then green, then blue', '# This is the "hard way" to do it, but illustrates how it\'s done', 'print', '(', '"Setting to red..."', ')', 'cv', '[', '0', ']', '=', '255', '# red', 'cv', '[', '6', ']', '=', '128', '# dimmer to half value', 'sent', '=', 'dev', '.', 'send_multi_value', '(', '1', ',', 'cv', ')', 'print', '(', '"Set to red"', ')', 'sleep', '(', '3.0', ')', 'print', '(', '"Setting to green..."', ')', 'cv', '[', '0', ']', '=', '0', '# red', 'cv', '[', '1', ']', '=', '255', '# green', 'cv', '[', '6', ']', '=', '128', '# dimmer to half value', 'sent', '=', 'dev', '.', 'send_multi_value', '(', '1', ',', 'cv', ')', 'print', '(', '"Set to green"', ')', 'sleep', '(', '3.0', ')', 'print', '(', '"Setting to blue..."', ')', 'cv', '[', '0', ']', '=', '0', '# red', 'cv', '[', '1', ']', '=', '0', '# green', 'cv', '[', '2', ']', '=', '255', '# blue', 'cv', '[', '6', ']', '=', '128', '# dimmer to half value', 'sent', '=', 'dev', '.', 'send_multi_value', '(', '1', ',', 'cv', ')', 'print', '(', '"Set to blue"', ')', 'sleep', '(', '3.0', ')', "# Here's an easier way to do it", 'print', '(', '"And, again the easier way"', ')', 'send_rgb', '(', 'dev', ',', '255', ',', '0', ',', '0', ',', '128', ')', 'sleep', '(', '3.0', ')', 'send_rgb', '(', 'dev', ',', '0', ',', '255', ',', '0', ',', '128', ')', 'sleep', '(', '3.0', ')', 'send_rgb', '(', 'dev', ',', '0', ',', '0', ',', '255', ',', '128', ')', 'sleep', '(', '3.0', ')', 'print', '(', '"Reset all channels and close.."', ')', '# Turns the light off', 'cv', '=', '[', '0', 'for', 'v', 'in', 'range', '(', '0', ',', '512', ')', ']', 'dev', '.', 'send_multi_value', '(', '1', ',', 'cv', ')', 'dev', '.', 'close', '(', ')'] | How to control a DMX light through an Anyma USB controller | ['How', 'to', 'control', 'a', 'DMX', 'light', 'through', 'an', 'Anyma', 'USB', 'controller'] | train | https://github.com/dhocker/udmx-pyusb/blob/ee7d10604ecd83857154ed6739793de3b7bd5fc1/example.py#L39-L96 |
9,556 | DomainTools/python_api | domaintools/api.py | API.reverse_ip_whois | def reverse_ip_whois(self, query=None, ip=None, country=None, server=None, include_total_count=False, page=1,
**kwargs):
"""Pass in an IP address or a list of free text query terms."""
if (ip and query) or not (ip or query):
raise ValueError('Query or IP Address (but not both) must be defined')
return self._results('reverse-ip-whois', '/v1/reverse-ip-whois', query=query, ip=ip, country=country,
server=server, include_total_count=include_total_count, page=page, items_path=('records', ),
**kwargs) | python | def reverse_ip_whois(self, query=None, ip=None, country=None, server=None, include_total_count=False, page=1,
**kwargs):
"""Pass in an IP address or a list of free text query terms."""
if (ip and query) or not (ip or query):
raise ValueError('Query or IP Address (but not both) must be defined')
return self._results('reverse-ip-whois', '/v1/reverse-ip-whois', query=query, ip=ip, country=country,
server=server, include_total_count=include_total_count, page=page, items_path=('records', ),
**kwargs) | ['def', 'reverse_ip_whois', '(', 'self', ',', 'query', '=', 'None', ',', 'ip', '=', 'None', ',', 'country', '=', 'None', ',', 'server', '=', 'None', ',', 'include_total_count', '=', 'False', ',', 'page', '=', '1', ',', '*', '*', 'kwargs', ')', ':', 'if', '(', 'ip', 'and', 'query', ')', 'or', 'not', '(', 'ip', 'or', 'query', ')', ':', 'raise', 'ValueError', '(', "'Query or IP Address (but not both) must be defined'", ')', 'return', 'self', '.', '_results', '(', "'reverse-ip-whois'", ',', "'/v1/reverse-ip-whois'", ',', 'query', '=', 'query', ',', 'ip', '=', 'ip', ',', 'country', '=', 'country', ',', 'server', '=', 'server', ',', 'include_total_count', '=', 'include_total_count', ',', 'page', '=', 'page', ',', 'items_path', '=', '(', "'records'", ',', ')', ',', '*', '*', 'kwargs', ')'] | Pass in an IP address or a list of free text query terms. | ['Pass', 'in', 'an', 'IP', 'address', 'or', 'a', 'list', 'of', 'free', 'text', 'query', 'terms', '.'] | train | https://github.com/DomainTools/python_api/blob/17be85fd4913fbe14d7660a4f4829242f1663e60/domaintools/api.py#L148-L156 |
9,557 | rlisagor/pynetlinux | pynetlinux/ifconfig.py | Interface.get_mac | def get_mac(self):
''' Obtain the device's mac address. '''
ifreq = struct.pack('16sH14s', self.name, AF_UNIX, b'\x00'*14)
res = fcntl.ioctl(sockfd, SIOCGIFHWADDR, ifreq)
address = struct.unpack('16sH14s', res)[2]
mac = struct.unpack('6B8x', address)
return ":".join(['%02X' % i for i in mac]) | python | def get_mac(self):
''' Obtain the device's mac address. '''
ifreq = struct.pack('16sH14s', self.name, AF_UNIX, b'\x00'*14)
res = fcntl.ioctl(sockfd, SIOCGIFHWADDR, ifreq)
address = struct.unpack('16sH14s', res)[2]
mac = struct.unpack('6B8x', address)
return ":".join(['%02X' % i for i in mac]) | ['def', 'get_mac', '(', 'self', ')', ':', 'ifreq', '=', 'struct', '.', 'pack', '(', "'16sH14s'", ',', 'self', '.', 'name', ',', 'AF_UNIX', ',', "b'\\x00'", '*', '14', ')', 'res', '=', 'fcntl', '.', 'ioctl', '(', 'sockfd', ',', 'SIOCGIFHWADDR', ',', 'ifreq', ')', 'address', '=', 'struct', '.', 'unpack', '(', "'16sH14s'", ',', 'res', ')', '[', '2', ']', 'mac', '=', 'struct', '.', 'unpack', '(', "'6B8x'", ',', 'address', ')', 'return', '":"', '.', 'join', '(', '[', "'%02X'", '%', 'i', 'for', 'i', 'in', 'mac', ']', ')'] | Obtain the device's mac address. | ['Obtain', 'the', 'device', 's', 'mac', 'address', '.'] | train | https://github.com/rlisagor/pynetlinux/blob/e3f16978855c6649685f0c43d4c3fcf768427ae5/pynetlinux/ifconfig.py#L179-L186 |
9,558 | hsolbrig/PyShEx | pyshex/utils/url_utils.py | generate_base | def generate_base(path: str) -> str:
""" Convert path, which can be a URL or a file path into a base URI
:param path: file location or url
:return: file location or url sans actual name
"""
if ':' in path:
parts = urlparse(path)
parts_dict = parts._asdict()
parts_dict['path'] = os.path.split(parts.path)[0] if '/' in parts.path else ''
return urlunparse(ParseResult(**parts_dict)) + '/'
else:
return (os.path.split(path)[0] if '/' in path else '') + '/' | python | def generate_base(path: str) -> str:
""" Convert path, which can be a URL or a file path into a base URI
:param path: file location or url
:return: file location or url sans actual name
"""
if ':' in path:
parts = urlparse(path)
parts_dict = parts._asdict()
parts_dict['path'] = os.path.split(parts.path)[0] if '/' in parts.path else ''
return urlunparse(ParseResult(**parts_dict)) + '/'
else:
return (os.path.split(path)[0] if '/' in path else '') + '/' | ['def', 'generate_base', '(', 'path', ':', 'str', ')', '->', 'str', ':', 'if', "':'", 'in', 'path', ':', 'parts', '=', 'urlparse', '(', 'path', ')', 'parts_dict', '=', 'parts', '.', '_asdict', '(', ')', 'parts_dict', '[', "'path'", ']', '=', 'os', '.', 'path', '.', 'split', '(', 'parts', '.', 'path', ')', '[', '0', ']', 'if', "'/'", 'in', 'parts', '.', 'path', 'else', "''", 'return', 'urlunparse', '(', 'ParseResult', '(', '*', '*', 'parts_dict', ')', ')', '+', "'/'", 'else', ':', 'return', '(', 'os', '.', 'path', '.', 'split', '(', 'path', ')', '[', '0', ']', 'if', "'/'", 'in', 'path', 'else', "''", ')', '+', "'/'"] | Convert path, which can be a URL or a file path into a base URI
:param path: file location or url
:return: file location or url sans actual name | ['Convert', 'path', 'which', 'can', 'be', 'a', 'URL', 'or', 'a', 'file', 'path', 'into', 'a', 'base', 'URI'] | train | https://github.com/hsolbrig/PyShEx/blob/9d659cc36e808afd66d4a6d60e8ea21cb12eb744/pyshex/utils/url_utils.py#L6-L18 |
9,559 | google/prettytensor | prettytensor/pretty_tensor_class.py | _conversion_function | def _conversion_function(pt_wrapper, dtype=None, name=None, as_ref=False):
"""Allows PrettyTensors and Loss to work as a tensor."""
# Ignore as_ref to not create backward compatibility issues.
_ = name, as_ref
t = pt_wrapper.tensor
if dtype and not dtype.is_compatible_with(t.dtype):
raise ValueError(
'Tensor conversion requested dtype %s for Tensor with dtype %s: %r' %
(dtype, t.dtype, t))
return t | python | def _conversion_function(pt_wrapper, dtype=None, name=None, as_ref=False):
"""Allows PrettyTensors and Loss to work as a tensor."""
# Ignore as_ref to not create backward compatibility issues.
_ = name, as_ref
t = pt_wrapper.tensor
if dtype and not dtype.is_compatible_with(t.dtype):
raise ValueError(
'Tensor conversion requested dtype %s for Tensor with dtype %s: %r' %
(dtype, t.dtype, t))
return t | ['def', '_conversion_function', '(', 'pt_wrapper', ',', 'dtype', '=', 'None', ',', 'name', '=', 'None', ',', 'as_ref', '=', 'False', ')', ':', '# Ignore as_ref to not create backward compatibility issues.', '_', '=', 'name', ',', 'as_ref', 't', '=', 'pt_wrapper', '.', 'tensor', 'if', 'dtype', 'and', 'not', 'dtype', '.', 'is_compatible_with', '(', 't', '.', 'dtype', ')', ':', 'raise', 'ValueError', '(', "'Tensor conversion requested dtype %s for Tensor with dtype %s: %r'", '%', '(', 'dtype', ',', 't', '.', 'dtype', ',', 't', ')', ')', 'return', 't'] | Allows PrettyTensors and Loss to work as a tensor. | ['Allows', 'PrettyTensors', 'and', 'Loss', 'to', 'work', 'as', 'a', 'tensor', '.'] | train | https://github.com/google/prettytensor/blob/75daa0b11252590f548da5647addc0ea610c4c45/prettytensor/pretty_tensor_class.py#L2016-L2025 |
9,560 | iotile/coretools | iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Variables/BoolVariable.py | _text2bool | def _text2bool(val):
"""
Converts strings to True/False depending on the 'truth' expressed by
the string. If the string can't be converted, the original value
will be returned.
See '__true_strings' and '__false_strings' for values considered
'true' or 'false respectively.
This is usable as 'converter' for SCons' Variables.
"""
lval = val.lower()
if lval in __true_strings: return True
if lval in __false_strings: return False
raise ValueError("Invalid value for boolean option: %s" % val) | python | def _text2bool(val):
"""
Converts strings to True/False depending on the 'truth' expressed by
the string. If the string can't be converted, the original value
will be returned.
See '__true_strings' and '__false_strings' for values considered
'true' or 'false respectively.
This is usable as 'converter' for SCons' Variables.
"""
lval = val.lower()
if lval in __true_strings: return True
if lval in __false_strings: return False
raise ValueError("Invalid value for boolean option: %s" % val) | ['def', '_text2bool', '(', 'val', ')', ':', 'lval', '=', 'val', '.', 'lower', '(', ')', 'if', 'lval', 'in', '__true_strings', ':', 'return', 'True', 'if', 'lval', 'in', '__false_strings', ':', 'return', 'False', 'raise', 'ValueError', '(', '"Invalid value for boolean option: %s"', '%', 'val', ')'] | Converts strings to True/False depending on the 'truth' expressed by
the string. If the string can't be converted, the original value
will be returned.
See '__true_strings' and '__false_strings' for values considered
'true' or 'false respectively.
This is usable as 'converter' for SCons' Variables. | ['Converts', 'strings', 'to', 'True', '/', 'False', 'depending', 'on', 'the', 'truth', 'expressed', 'by', 'the', 'string', '.', 'If', 'the', 'string', 'can', 't', 'be', 'converted', 'the', 'original', 'value', 'will', 'be', 'returned', '.'] | train | https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Variables/BoolVariable.py#L47-L61 |
9,561 | glue-viz/glue-vispy-viewers | glue_vispy_viewers/extern/vispy/util/filter.py | gaussian_filter | def gaussian_filter(data, sigma):
"""
Drop-in replacement for scipy.ndimage.gaussian_filter.
(note: results are only approximately equal to the output of
gaussian_filter)
"""
if np.isscalar(sigma):
sigma = (sigma,) * data.ndim
baseline = data.mean()
filtered = data - baseline
for ax in range(data.ndim):
s = float(sigma[ax])
if s == 0:
continue
# generate 1D gaussian kernel
ksize = int(s * 6)
x = np.arange(-ksize, ksize)
kernel = np.exp(-x**2 / (2*s**2))
kshape = [1, ] * data.ndim
kshape[ax] = len(kernel)
kernel = kernel.reshape(kshape)
# convolve as product of FFTs
shape = data.shape[ax] + ksize
scale = 1.0 / (abs(s) * (2*np.pi)**0.5)
filtered = scale * np.fft.irfft(np.fft.rfft(filtered, shape, axis=ax) *
np.fft.rfft(kernel, shape, axis=ax),
axis=ax)
# clip off extra data
sl = [slice(None)] * data.ndim
sl[ax] = slice(filtered.shape[ax]-data.shape[ax], None, None)
filtered = filtered[sl]
return filtered + baseline | python | def gaussian_filter(data, sigma):
"""
Drop-in replacement for scipy.ndimage.gaussian_filter.
(note: results are only approximately equal to the output of
gaussian_filter)
"""
if np.isscalar(sigma):
sigma = (sigma,) * data.ndim
baseline = data.mean()
filtered = data - baseline
for ax in range(data.ndim):
s = float(sigma[ax])
if s == 0:
continue
# generate 1D gaussian kernel
ksize = int(s * 6)
x = np.arange(-ksize, ksize)
kernel = np.exp(-x**2 / (2*s**2))
kshape = [1, ] * data.ndim
kshape[ax] = len(kernel)
kernel = kernel.reshape(kshape)
# convolve as product of FFTs
shape = data.shape[ax] + ksize
scale = 1.0 / (abs(s) * (2*np.pi)**0.5)
filtered = scale * np.fft.irfft(np.fft.rfft(filtered, shape, axis=ax) *
np.fft.rfft(kernel, shape, axis=ax),
axis=ax)
# clip off extra data
sl = [slice(None)] * data.ndim
sl[ax] = slice(filtered.shape[ax]-data.shape[ax], None, None)
filtered = filtered[sl]
return filtered + baseline | ['def', 'gaussian_filter', '(', 'data', ',', 'sigma', ')', ':', 'if', 'np', '.', 'isscalar', '(', 'sigma', ')', ':', 'sigma', '=', '(', 'sigma', ',', ')', '*', 'data', '.', 'ndim', 'baseline', '=', 'data', '.', 'mean', '(', ')', 'filtered', '=', 'data', '-', 'baseline', 'for', 'ax', 'in', 'range', '(', 'data', '.', 'ndim', ')', ':', 's', '=', 'float', '(', 'sigma', '[', 'ax', ']', ')', 'if', 's', '==', '0', ':', 'continue', '# generate 1D gaussian kernel', 'ksize', '=', 'int', '(', 's', '*', '6', ')', 'x', '=', 'np', '.', 'arange', '(', '-', 'ksize', ',', 'ksize', ')', 'kernel', '=', 'np', '.', 'exp', '(', '-', 'x', '**', '2', '/', '(', '2', '*', 's', '**', '2', ')', ')', 'kshape', '=', '[', '1', ',', ']', '*', 'data', '.', 'ndim', 'kshape', '[', 'ax', ']', '=', 'len', '(', 'kernel', ')', 'kernel', '=', 'kernel', '.', 'reshape', '(', 'kshape', ')', '# convolve as product of FFTs', 'shape', '=', 'data', '.', 'shape', '[', 'ax', ']', '+', 'ksize', 'scale', '=', '1.0', '/', '(', 'abs', '(', 's', ')', '*', '(', '2', '*', 'np', '.', 'pi', ')', '**', '0.5', ')', 'filtered', '=', 'scale', '*', 'np', '.', 'fft', '.', 'irfft', '(', 'np', '.', 'fft', '.', 'rfft', '(', 'filtered', ',', 'shape', ',', 'axis', '=', 'ax', ')', '*', 'np', '.', 'fft', '.', 'rfft', '(', 'kernel', ',', 'shape', ',', 'axis', '=', 'ax', ')', ',', 'axis', '=', 'ax', ')', '# clip off extra data', 'sl', '=', '[', 'slice', '(', 'None', ')', ']', '*', 'data', '.', 'ndim', 'sl', '[', 'ax', ']', '=', 'slice', '(', 'filtered', '.', 'shape', '[', 'ax', ']', '-', 'data', '.', 'shape', '[', 'ax', ']', ',', 'None', ',', 'None', ')', 'filtered', '=', 'filtered', '[', 'sl', ']', 'return', 'filtered', '+', 'baseline'] | Drop-in replacement for scipy.ndimage.gaussian_filter.
(note: results are only approximately equal to the output of
gaussian_filter) | ['Drop', '-', 'in', 'replacement', 'for', 'scipy', '.', 'ndimage', '.', 'gaussian_filter', '.'] | train | https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/util/filter.py#L8-L44 |
9,562 | Fantomas42/django-blog-zinnia | zinnia/templatetags/zinnia.py | get_tag_cloud | def get_tag_cloud(context, steps=6, min_count=None,
template='zinnia/tags/tag_cloud.html'):
"""
Return a cloud of published tags.
"""
tags = Tag.objects.usage_for_queryset(
Entry.published.all(), counts=True,
min_count=min_count)
return {'template': template,
'tags': calculate_cloud(tags, steps),
'context_tag': context.get('tag')} | python | def get_tag_cloud(context, steps=6, min_count=None,
template='zinnia/tags/tag_cloud.html'):
"""
Return a cloud of published tags.
"""
tags = Tag.objects.usage_for_queryset(
Entry.published.all(), counts=True,
min_count=min_count)
return {'template': template,
'tags': calculate_cloud(tags, steps),
'context_tag': context.get('tag')} | ['def', 'get_tag_cloud', '(', 'context', ',', 'steps', '=', '6', ',', 'min_count', '=', 'None', ',', 'template', '=', "'zinnia/tags/tag_cloud.html'", ')', ':', 'tags', '=', 'Tag', '.', 'objects', '.', 'usage_for_queryset', '(', 'Entry', '.', 'published', '.', 'all', '(', ')', ',', 'counts', '=', 'True', ',', 'min_count', '=', 'min_count', ')', 'return', '{', "'template'", ':', 'template', ',', "'tags'", ':', 'calculate_cloud', '(', 'tags', ',', 'steps', ')', ',', "'context_tag'", ':', 'context', '.', 'get', '(', "'tag'", ')', '}'] | Return a cloud of published tags. | ['Return', 'a', 'cloud', 'of', 'published', 'tags', '.'] | train | https://github.com/Fantomas42/django-blog-zinnia/blob/b4949304b104a8e1a7a7a0773cbfd024313c3a15/zinnia/templatetags/zinnia.py#L378-L388 |
9,563 | sibirrer/lenstronomy | lenstronomy/LensModel/Profiles/spp.py | SPP.mass_3d | def mass_3d(self, r, rho0, gamma):
"""
mass enclosed a 3d sphere or radius r
:param r:
:param a:
:param s:
:return:
"""
mass_3d = 4 * np.pi * rho0 /(-gamma + 3) * r ** (-gamma + 3)
return mass_3d | python | def mass_3d(self, r, rho0, gamma):
"""
mass enclosed a 3d sphere or radius r
:param r:
:param a:
:param s:
:return:
"""
mass_3d = 4 * np.pi * rho0 /(-gamma + 3) * r ** (-gamma + 3)
return mass_3d | ['def', 'mass_3d', '(', 'self', ',', 'r', ',', 'rho0', ',', 'gamma', ')', ':', 'mass_3d', '=', '4', '*', 'np', '.', 'pi', '*', 'rho0', '/', '(', '-', 'gamma', '+', '3', ')', '*', 'r', '**', '(', '-', 'gamma', '+', '3', ')', 'return', 'mass_3d'] | mass enclosed a 3d sphere or radius r
:param r:
:param a:
:param s:
:return: | ['mass', 'enclosed', 'a', '3d', 'sphere', 'or', 'radius', 'r', ':', 'param', 'r', ':', ':', 'param', 'a', ':', ':', 'param', 's', ':', ':', 'return', ':'] | train | https://github.com/sibirrer/lenstronomy/blob/4edb100a4f3f4fdc4fac9b0032d2b0283d0aa1d6/lenstronomy/LensModel/Profiles/spp.py#L123-L132 |
9,564 | lowandrew/OLCTools | spadespipeline/quality.py | QualityFeatures.find_n50 | def find_n50(self):
"""
Calculate the N50 for each strain. N50 is defined as the largest contig such that at least half of the total
genome size is contained in contigs equal to or larger than this contig
"""
for sample in self.metadata:
# Initialise the N50 attribute in case there is no assembly, and the attribute is not created in the loop
sample[self.analysistype].n50 = '-'
# Initialise a variable to store a running total of contig lengths
currentlength = 0
for contig_length in sample[self.analysistype].contig_lengths:
# Increment the current length with the length of the current contig
currentlength += contig_length
# If the current length is now greater than the total genome / 2, the current contig length is the N50
if currentlength >= sample[self.analysistype].genome_length * 0.5:
# Populate the dictionary, and break the loop
sample[self.analysistype].n50 = contig_length
break | python | def find_n50(self):
"""
Calculate the N50 for each strain. N50 is defined as the largest contig such that at least half of the total
genome size is contained in contigs equal to or larger than this contig
"""
for sample in self.metadata:
# Initialise the N50 attribute in case there is no assembly, and the attribute is not created in the loop
sample[self.analysistype].n50 = '-'
# Initialise a variable to store a running total of contig lengths
currentlength = 0
for contig_length in sample[self.analysistype].contig_lengths:
# Increment the current length with the length of the current contig
currentlength += contig_length
# If the current length is now greater than the total genome / 2, the current contig length is the N50
if currentlength >= sample[self.analysistype].genome_length * 0.5:
# Populate the dictionary, and break the loop
sample[self.analysistype].n50 = contig_length
break | ['def', 'find_n50', '(', 'self', ')', ':', 'for', 'sample', 'in', 'self', '.', 'metadata', ':', '# Initialise the N50 attribute in case there is no assembly, and the attribute is not created in the loop', 'sample', '[', 'self', '.', 'analysistype', ']', '.', 'n50', '=', "'-'", '# Initialise a variable to store a running total of contig lengths', 'currentlength', '=', '0', 'for', 'contig_length', 'in', 'sample', '[', 'self', '.', 'analysistype', ']', '.', 'contig_lengths', ':', '# Increment the current length with the length of the current contig', 'currentlength', '+=', 'contig_length', '# If the current length is now greater than the total genome / 2, the current contig length is the N50', 'if', 'currentlength', '>=', 'sample', '[', 'self', '.', 'analysistype', ']', '.', 'genome_length', '*', '0.5', ':', '# Populate the dictionary, and break the loop', 'sample', '[', 'self', '.', 'analysistype', ']', '.', 'n50', '=', 'contig_length', 'break'] | Calculate the N50 for each strain. N50 is defined as the largest contig such that at least half of the total
genome size is contained in contigs equal to or larger than this contig | ['Calculate', 'the', 'N50', 'for', 'each', 'strain', '.', 'N50', 'is', 'defined', 'as', 'the', 'largest', 'contig', 'such', 'that', 'at', 'least', 'half', 'of', 'the', 'total', 'genome', 'size', 'is', 'contained', 'in', 'contigs', 'equal', 'to', 'or', 'larger', 'than', 'this', 'contig'] | train | https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/spadespipeline/quality.py#L634-L651 |
9,565 | Rediker-Software/doac | doac/views.py | OAuthView.handle_exception | def handle_exception(self, exception):
"""
Handle a unspecified exception and return the correct method that should be used
for handling it.
If the exception has the `can_redirect` property set to False, it is
rendered to the browser. Otherwise, it will be redirected to the location
provided in the `RedirectUri` object that is associated with the request.
"""
can_redirect = getattr(exception, "can_redirect", True)
redirect_uri = getattr(self, "redirect_uri", None)
if can_redirect and redirect_uri:
return self.redirect_exception(exception)
else:
return self.render_exception(exception) | python | def handle_exception(self, exception):
"""
Handle a unspecified exception and return the correct method that should be used
for handling it.
If the exception has the `can_redirect` property set to False, it is
rendered to the browser. Otherwise, it will be redirected to the location
provided in the `RedirectUri` object that is associated with the request.
"""
can_redirect = getattr(exception, "can_redirect", True)
redirect_uri = getattr(self, "redirect_uri", None)
if can_redirect and redirect_uri:
return self.redirect_exception(exception)
else:
return self.render_exception(exception) | ['def', 'handle_exception', '(', 'self', ',', 'exception', ')', ':', 'can_redirect', '=', 'getattr', '(', 'exception', ',', '"can_redirect"', ',', 'True', ')', 'redirect_uri', '=', 'getattr', '(', 'self', ',', '"redirect_uri"', ',', 'None', ')', 'if', 'can_redirect', 'and', 'redirect_uri', ':', 'return', 'self', '.', 'redirect_exception', '(', 'exception', ')', 'else', ':', 'return', 'self', '.', 'render_exception', '(', 'exception', ')'] | Handle a unspecified exception and return the correct method that should be used
for handling it.
If the exception has the `can_redirect` property set to False, it is
rendered to the browser. Otherwise, it will be redirected to the location
provided in the `RedirectUri` object that is associated with the request. | ['Handle', 'a', 'unspecified', 'exception', 'and', 'return', 'the', 'correct', 'method', 'that', 'should', 'be', 'used', 'for', 'handling', 'it', '.'] | train | https://github.com/Rediker-Software/doac/blob/398fdd64452e4ff8662297b0381926addd77505a/doac/views.py#L22-L38 |
9,566 | rwl/pylon | pylon/io/excel.py | ExcelWriter.write_branch_data | def write_branch_data(self, file):
""" Writes branch data to an Excel spreadsheet.
"""
branch_sheet = self.book.add_sheet("Branches")
for i, branch in enumerate(self.case.branches):
for j, attr in enumerate(BRANCH_ATTRS):
branch_sheet.write(i, j, getattr(branch, attr)) | python | def write_branch_data(self, file):
""" Writes branch data to an Excel spreadsheet.
"""
branch_sheet = self.book.add_sheet("Branches")
for i, branch in enumerate(self.case.branches):
for j, attr in enumerate(BRANCH_ATTRS):
branch_sheet.write(i, j, getattr(branch, attr)) | ['def', 'write_branch_data', '(', 'self', ',', 'file', ')', ':', 'branch_sheet', '=', 'self', '.', 'book', '.', 'add_sheet', '(', '"Branches"', ')', 'for', 'i', ',', 'branch', 'in', 'enumerate', '(', 'self', '.', 'case', '.', 'branches', ')', ':', 'for', 'j', ',', 'attr', 'in', 'enumerate', '(', 'BRANCH_ATTRS', ')', ':', 'branch_sheet', '.', 'write', '(', 'i', ',', 'j', ',', 'getattr', '(', 'branch', ',', 'attr', ')', ')'] | Writes branch data to an Excel spreadsheet. | ['Writes', 'branch', 'data', 'to', 'an', 'Excel', 'spreadsheet', '.'] | train | https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pylon/io/excel.py#L66-L73 |
9,567 | onecodex/onecodex | onecodex/models/collection.py | SampleCollection._collate_results | def _collate_results(self, field=None):
"""For a list of objects associated with a classification result, return the results as a
DataFrame and dict of taxa info.
Parameters
----------
field : {'readcount_w_children', 'readcount', 'abundance'}
Which field to use for the abundance/count of a particular taxon in a sample.
- 'readcount_w_children': total reads of this taxon and all its descendants
- 'readcount': total reads of this taxon
- 'abundance': genome size-normalized relative abundances, from shotgun sequencing
Returns
-------
None, but stores a result in self._cached.
"""
import pandas as pd
field = field if field else self._kwargs["field"]
if field not in ("auto", "abundance", "readcount", "readcount_w_children"):
raise OneCodexException("Specified field ({}) not valid.".format(field))
# we'll fill these dicts that eventually turn into DataFrames
df = {"classification_id": [c.id for c in self._classifications]}
tax_info = {"tax_id": [], "name": [], "rank": [], "parent_tax_id": []}
if field == "auto":
field = "readcount_w_children"
self._cached["field"] = field
for c_idx, c in enumerate(self._classifications):
# pulling results from mainline is the slowest part of the function
result = c.results()["table"]
# d contains info about a taxon in result, including name, id, counts, rank, etc.
for d in result:
d_tax_id = d["tax_id"]
if d_tax_id not in tax_info["tax_id"]:
for k in ("tax_id", "name", "rank", "parent_tax_id"):
tax_info[k].append(d[k])
# first time we've seen this taxon, so make a vector for it
df[d_tax_id] = [0] * len(self._classifications)
df[d_tax_id][c_idx] = d[field]
# format as a Pandas DataFrame
df = pd.DataFrame(df).set_index("classification_id").fillna(0)
df.columns.name = "tax_id"
tax_info = pd.DataFrame(tax_info).set_index("tax_id")
self._cached["results"] = df
self._cached["taxonomy"] = tax_info | python | def _collate_results(self, field=None):
"""For a list of objects associated with a classification result, return the results as a
DataFrame and dict of taxa info.
Parameters
----------
field : {'readcount_w_children', 'readcount', 'abundance'}
Which field to use for the abundance/count of a particular taxon in a sample.
- 'readcount_w_children': total reads of this taxon and all its descendants
- 'readcount': total reads of this taxon
- 'abundance': genome size-normalized relative abundances, from shotgun sequencing
Returns
-------
None, but stores a result in self._cached.
"""
import pandas as pd
field = field if field else self._kwargs["field"]
if field not in ("auto", "abundance", "readcount", "readcount_w_children"):
raise OneCodexException("Specified field ({}) not valid.".format(field))
# we'll fill these dicts that eventually turn into DataFrames
df = {"classification_id": [c.id for c in self._classifications]}
tax_info = {"tax_id": [], "name": [], "rank": [], "parent_tax_id": []}
if field == "auto":
field = "readcount_w_children"
self._cached["field"] = field
for c_idx, c in enumerate(self._classifications):
# pulling results from mainline is the slowest part of the function
result = c.results()["table"]
# d contains info about a taxon in result, including name, id, counts, rank, etc.
for d in result:
d_tax_id = d["tax_id"]
if d_tax_id not in tax_info["tax_id"]:
for k in ("tax_id", "name", "rank", "parent_tax_id"):
tax_info[k].append(d[k])
# first time we've seen this taxon, so make a vector for it
df[d_tax_id] = [0] * len(self._classifications)
df[d_tax_id][c_idx] = d[field]
# format as a Pandas DataFrame
df = pd.DataFrame(df).set_index("classification_id").fillna(0)
df.columns.name = "tax_id"
tax_info = pd.DataFrame(tax_info).set_index("tax_id")
self._cached["results"] = df
self._cached["taxonomy"] = tax_info | ['def', '_collate_results', '(', 'self', ',', 'field', '=', 'None', ')', ':', 'import', 'pandas', 'as', 'pd', 'field', '=', 'field', 'if', 'field', 'else', 'self', '.', '_kwargs', '[', '"field"', ']', 'if', 'field', 'not', 'in', '(', '"auto"', ',', '"abundance"', ',', '"readcount"', ',', '"readcount_w_children"', ')', ':', 'raise', 'OneCodexException', '(', '"Specified field ({}) not valid."', '.', 'format', '(', 'field', ')', ')', "# we'll fill these dicts that eventually turn into DataFrames", 'df', '=', '{', '"classification_id"', ':', '[', 'c', '.', 'id', 'for', 'c', 'in', 'self', '.', '_classifications', ']', '}', 'tax_info', '=', '{', '"tax_id"', ':', '[', ']', ',', '"name"', ':', '[', ']', ',', '"rank"', ':', '[', ']', ',', '"parent_tax_id"', ':', '[', ']', '}', 'if', 'field', '==', '"auto"', ':', 'field', '=', '"readcount_w_children"', 'self', '.', '_cached', '[', '"field"', ']', '=', 'field', 'for', 'c_idx', ',', 'c', 'in', 'enumerate', '(', 'self', '.', '_classifications', ')', ':', '# pulling results from mainline is the slowest part of the function', 'result', '=', 'c', '.', 'results', '(', ')', '[', '"table"', ']', '# d contains info about a taxon in result, including name, id, counts, rank, etc.', 'for', 'd', 'in', 'result', ':', 'd_tax_id', '=', 'd', '[', '"tax_id"', ']', 'if', 'd_tax_id', 'not', 'in', 'tax_info', '[', '"tax_id"', ']', ':', 'for', 'k', 'in', '(', '"tax_id"', ',', '"name"', ',', '"rank"', ',', '"parent_tax_id"', ')', ':', 'tax_info', '[', 'k', ']', '.', 'append', '(', 'd', '[', 'k', ']', ')', "# first time we've seen this taxon, so make a vector for it", 'df', '[', 'd_tax_id', ']', '=', '[', '0', ']', '*', 'len', '(', 'self', '.', '_classifications', ')', 'df', '[', 'd_tax_id', ']', '[', 'c_idx', ']', '=', 'd', '[', 'field', ']', '# format as a Pandas DataFrame', 'df', '=', 'pd', '.', 'DataFrame', '(', 'df', ')', '.', 'set_index', '(', '"classification_id"', ')', '.', 'fillna', '(', '0', ')', 'df', '.', 'columns', '.', 'name', '=', '"tax_id"', 'tax_info', '=', 'pd', '.', 'DataFrame', '(', 'tax_info', ')', '.', 'set_index', '(', '"tax_id"', ')', 'self', '.', '_cached', '[', '"results"', ']', '=', 'df', 'self', '.', '_cached', '[', '"taxonomy"', ']', '=', 'tax_info'] | For a list of objects associated with a classification result, return the results as a
DataFrame and dict of taxa info.
Parameters
----------
field : {'readcount_w_children', 'readcount', 'abundance'}
Which field to use for the abundance/count of a particular taxon in a sample.
- 'readcount_w_children': total reads of this taxon and all its descendants
- 'readcount': total reads of this taxon
- 'abundance': genome size-normalized relative abundances, from shotgun sequencing
Returns
-------
None, but stores a result in self._cached. | ['For', 'a', 'list', 'of', 'objects', 'associated', 'with', 'a', 'classification', 'result', 'return', 'the', 'results', 'as', 'a', 'DataFrame', 'and', 'dict', 'of', 'taxa', 'info', '.'] | train | https://github.com/onecodex/onecodex/blob/326a0a1af140e3a57ccf31c3c9c5e17a5775c13d/onecodex/models/collection.py#L212-L271 |
9,568 | fulfilio/python-magento | magento/sales.py | CreditMemo.addcomment | def addcomment(self, creditmemo_increment_id,
comment, email=True, include_in_email=False):
"""
Add new comment to credit memo
:param creditmemo_increment_id: Credit memo increment ID
:return: bool
"""
return bool(
self.call(
'sales_order_creditmemo.addComment',
[creditmemo_increment_id, comment, email, include_in_email]
)
) | python | def addcomment(self, creditmemo_increment_id,
comment, email=True, include_in_email=False):
"""
Add new comment to credit memo
:param creditmemo_increment_id: Credit memo increment ID
:return: bool
"""
return bool(
self.call(
'sales_order_creditmemo.addComment',
[creditmemo_increment_id, comment, email, include_in_email]
)
) | ['def', 'addcomment', '(', 'self', ',', 'creditmemo_increment_id', ',', 'comment', ',', 'email', '=', 'True', ',', 'include_in_email', '=', 'False', ')', ':', 'return', 'bool', '(', 'self', '.', 'call', '(', "'sales_order_creditmemo.addComment'", ',', '[', 'creditmemo_increment_id', ',', 'comment', ',', 'email', ',', 'include_in_email', ']', ')', ')'] | Add new comment to credit memo
:param creditmemo_increment_id: Credit memo increment ID
:return: bool | ['Add', 'new', 'comment', 'to', 'credit', 'memo'] | train | https://github.com/fulfilio/python-magento/blob/720ec136a6e438a9ee4ee92848a9820b91732750/magento/sales.py#L201-L215 |
9,569 | sbarham/dsrt | build/lib/dsrt/application/Application.py | Application.load_corpus | def load_corpus(self, path, config):
'''Load a dialogue corpus; eventually, support pickles and potentially other formats'''
# use the default dataset if no path is provided
# TODO -- change this to use a pre-saved dataset
if path == '':
path = self.default_path_to_corpus
self.data = Corpus(path=path, config=self.data_config) | python | def load_corpus(self, path, config):
'''Load a dialogue corpus; eventually, support pickles and potentially other formats'''
# use the default dataset if no path is provided
# TODO -- change this to use a pre-saved dataset
if path == '':
path = self.default_path_to_corpus
self.data = Corpus(path=path, config=self.data_config) | ['def', 'load_corpus', '(', 'self', ',', 'path', ',', 'config', ')', ':', '# use the default dataset if no path is provided', '# TODO -- change this to use a pre-saved dataset', 'if', 'path', '==', "''", ':', 'path', '=', 'self', '.', 'default_path_to_corpus', 'self', '.', 'data', '=', 'Corpus', '(', 'path', '=', 'path', ',', 'config', '=', 'self', '.', 'data_config', ')'] | Load a dialogue corpus; eventually, support pickles and potentially other formats | ['Load', 'a', 'dialogue', 'corpus', ';', 'eventually', 'support', 'pickles', 'and', 'potentially', 'other', 'formats'] | train | https://github.com/sbarham/dsrt/blob/bc664739f2f52839461d3e72773b71146fd56a9a/build/lib/dsrt/application/Application.py#L197-L205 |
9,570 | sebp/scikit-survival | sksurv/svm/survival_svm.py | FastKernelSurvivalSVM.predict | def predict(self, X):
"""Rank samples according to survival times
Lower ranks indicate shorter survival, higher ranks longer survival.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
The input samples.
Returns
-------
y : ndarray, shape = (n_samples,)
Predicted ranks.
"""
kernel_mat = self._get_kernel(X, self.fit_X_)
val = numpy.dot(kernel_mat, self.coef_)
if hasattr(self, "intercept_"):
val += self.intercept_
# Order by increasing survival time if objective is pure ranking
if self.rank_ratio == 1:
val *= -1
else:
# model was fitted on log(time), transform to original scale
val = numpy.exp(val)
return val | python | def predict(self, X):
"""Rank samples according to survival times
Lower ranks indicate shorter survival, higher ranks longer survival.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
The input samples.
Returns
-------
y : ndarray, shape = (n_samples,)
Predicted ranks.
"""
kernel_mat = self._get_kernel(X, self.fit_X_)
val = numpy.dot(kernel_mat, self.coef_)
if hasattr(self, "intercept_"):
val += self.intercept_
# Order by increasing survival time if objective is pure ranking
if self.rank_ratio == 1:
val *= -1
else:
# model was fitted on log(time), transform to original scale
val = numpy.exp(val)
return val | ['def', 'predict', '(', 'self', ',', 'X', ')', ':', 'kernel_mat', '=', 'self', '.', '_get_kernel', '(', 'X', ',', 'self', '.', 'fit_X_', ')', 'val', '=', 'numpy', '.', 'dot', '(', 'kernel_mat', ',', 'self', '.', 'coef_', ')', 'if', 'hasattr', '(', 'self', ',', '"intercept_"', ')', ':', 'val', '+=', 'self', '.', 'intercept_', '# Order by increasing survival time if objective is pure ranking', 'if', 'self', '.', 'rank_ratio', '==', '1', ':', 'val', '*=', '-', '1', 'else', ':', '# model was fitted on log(time), transform to original scale', 'val', '=', 'numpy', '.', 'exp', '(', 'val', ')', 'return', 'val'] | Rank samples according to survival times
Lower ranks indicate shorter survival, higher ranks longer survival.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
The input samples.
Returns
-------
y : ndarray, shape = (n_samples,)
Predicted ranks. | ['Rank', 'samples', 'according', 'to', 'survival', 'times'] | train | https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/svm/survival_svm.py#L1000-L1028 |
9,571 | StanfordVL/robosuite | robosuite/environments/sawyer.py | SawyerEnv._get_reference | def _get_reference(self):
"""
Sets up necessary reference for robots, grippers, and objects.
"""
super()._get_reference()
# indices for joints in qpos, qvel
self.robot_joints = list(self.mujoco_robot.joints)
self._ref_joint_pos_indexes = [
self.sim.model.get_joint_qpos_addr(x) for x in self.robot_joints
]
self._ref_joint_vel_indexes = [
self.sim.model.get_joint_qvel_addr(x) for x in self.robot_joints
]
if self.use_indicator_object:
ind_qpos = self.sim.model.get_joint_qpos_addr("pos_indicator")
self._ref_indicator_pos_low, self._ref_indicator_pos_high = ind_qpos
ind_qvel = self.sim.model.get_joint_qvel_addr("pos_indicator")
self._ref_indicator_vel_low, self._ref_indicator_vel_high = ind_qvel
self.indicator_id = self.sim.model.body_name2id("pos_indicator")
# indices for grippers in qpos, qvel
if self.has_gripper:
self.gripper_joints = list(self.gripper.joints)
self._ref_gripper_joint_pos_indexes = [
self.sim.model.get_joint_qpos_addr(x) for x in self.gripper_joints
]
self._ref_gripper_joint_vel_indexes = [
self.sim.model.get_joint_qvel_addr(x) for x in self.gripper_joints
]
# indices for joint pos actuation, joint vel actuation, gripper actuation
self._ref_joint_pos_actuator_indexes = [
self.sim.model.actuator_name2id(actuator)
for actuator in self.sim.model.actuator_names
if actuator.startswith("pos")
]
self._ref_joint_vel_actuator_indexes = [
self.sim.model.actuator_name2id(actuator)
for actuator in self.sim.model.actuator_names
if actuator.startswith("vel")
]
if self.has_gripper:
self._ref_joint_gripper_actuator_indexes = [
self.sim.model.actuator_name2id(actuator)
for actuator in self.sim.model.actuator_names
if actuator.startswith("gripper")
]
# IDs of sites for gripper visualization
self.eef_site_id = self.sim.model.site_name2id("grip_site")
self.eef_cylinder_id = self.sim.model.site_name2id("grip_site_cylinder") | python | def _get_reference(self):
"""
Sets up necessary reference for robots, grippers, and objects.
"""
super()._get_reference()
# indices for joints in qpos, qvel
self.robot_joints = list(self.mujoco_robot.joints)
self._ref_joint_pos_indexes = [
self.sim.model.get_joint_qpos_addr(x) for x in self.robot_joints
]
self._ref_joint_vel_indexes = [
self.sim.model.get_joint_qvel_addr(x) for x in self.robot_joints
]
if self.use_indicator_object:
ind_qpos = self.sim.model.get_joint_qpos_addr("pos_indicator")
self._ref_indicator_pos_low, self._ref_indicator_pos_high = ind_qpos
ind_qvel = self.sim.model.get_joint_qvel_addr("pos_indicator")
self._ref_indicator_vel_low, self._ref_indicator_vel_high = ind_qvel
self.indicator_id = self.sim.model.body_name2id("pos_indicator")
# indices for grippers in qpos, qvel
if self.has_gripper:
self.gripper_joints = list(self.gripper.joints)
self._ref_gripper_joint_pos_indexes = [
self.sim.model.get_joint_qpos_addr(x) for x in self.gripper_joints
]
self._ref_gripper_joint_vel_indexes = [
self.sim.model.get_joint_qvel_addr(x) for x in self.gripper_joints
]
# indices for joint pos actuation, joint vel actuation, gripper actuation
self._ref_joint_pos_actuator_indexes = [
self.sim.model.actuator_name2id(actuator)
for actuator in self.sim.model.actuator_names
if actuator.startswith("pos")
]
self._ref_joint_vel_actuator_indexes = [
self.sim.model.actuator_name2id(actuator)
for actuator in self.sim.model.actuator_names
if actuator.startswith("vel")
]
if self.has_gripper:
self._ref_joint_gripper_actuator_indexes = [
self.sim.model.actuator_name2id(actuator)
for actuator in self.sim.model.actuator_names
if actuator.startswith("gripper")
]
# IDs of sites for gripper visualization
self.eef_site_id = self.sim.model.site_name2id("grip_site")
self.eef_cylinder_id = self.sim.model.site_name2id("grip_site_cylinder") | ['def', '_get_reference', '(', 'self', ')', ':', 'super', '(', ')', '.', '_get_reference', '(', ')', '# indices for joints in qpos, qvel', 'self', '.', 'robot_joints', '=', 'list', '(', 'self', '.', 'mujoco_robot', '.', 'joints', ')', 'self', '.', '_ref_joint_pos_indexes', '=', '[', 'self', '.', 'sim', '.', 'model', '.', 'get_joint_qpos_addr', '(', 'x', ')', 'for', 'x', 'in', 'self', '.', 'robot_joints', ']', 'self', '.', '_ref_joint_vel_indexes', '=', '[', 'self', '.', 'sim', '.', 'model', '.', 'get_joint_qvel_addr', '(', 'x', ')', 'for', 'x', 'in', 'self', '.', 'robot_joints', ']', 'if', 'self', '.', 'use_indicator_object', ':', 'ind_qpos', '=', 'self', '.', 'sim', '.', 'model', '.', 'get_joint_qpos_addr', '(', '"pos_indicator"', ')', 'self', '.', '_ref_indicator_pos_low', ',', 'self', '.', '_ref_indicator_pos_high', '=', 'ind_qpos', 'ind_qvel', '=', 'self', '.', 'sim', '.', 'model', '.', 'get_joint_qvel_addr', '(', '"pos_indicator"', ')', 'self', '.', '_ref_indicator_vel_low', ',', 'self', '.', '_ref_indicator_vel_high', '=', 'ind_qvel', 'self', '.', 'indicator_id', '=', 'self', '.', 'sim', '.', 'model', '.', 'body_name2id', '(', '"pos_indicator"', ')', '# indices for grippers in qpos, qvel', 'if', 'self', '.', 'has_gripper', ':', 'self', '.', 'gripper_joints', '=', 'list', '(', 'self', '.', 'gripper', '.', 'joints', ')', 'self', '.', '_ref_gripper_joint_pos_indexes', '=', '[', 'self', '.', 'sim', '.', 'model', '.', 'get_joint_qpos_addr', '(', 'x', ')', 'for', 'x', 'in', 'self', '.', 'gripper_joints', ']', 'self', '.', '_ref_gripper_joint_vel_indexes', '=', '[', 'self', '.', 'sim', '.', 'model', '.', 'get_joint_qvel_addr', '(', 'x', ')', 'for', 'x', 'in', 'self', '.', 'gripper_joints', ']', '# indices for joint pos actuation, joint vel actuation, gripper actuation', 'self', '.', '_ref_joint_pos_actuator_indexes', '=', '[', 'self', '.', 'sim', '.', 'model', '.', 'actuator_name2id', '(', 'actuator', ')', 'for', 'actuator', 'in', 'self', '.', 'sim', '.', 'model', '.', 'actuator_names', 'if', 'actuator', '.', 'startswith', '(', '"pos"', ')', ']', 'self', '.', '_ref_joint_vel_actuator_indexes', '=', '[', 'self', '.', 'sim', '.', 'model', '.', 'actuator_name2id', '(', 'actuator', ')', 'for', 'actuator', 'in', 'self', '.', 'sim', '.', 'model', '.', 'actuator_names', 'if', 'actuator', '.', 'startswith', '(', '"vel"', ')', ']', 'if', 'self', '.', 'has_gripper', ':', 'self', '.', '_ref_joint_gripper_actuator_indexes', '=', '[', 'self', '.', 'sim', '.', 'model', '.', 'actuator_name2id', '(', 'actuator', ')', 'for', 'actuator', 'in', 'self', '.', 'sim', '.', 'model', '.', 'actuator_names', 'if', 'actuator', '.', 'startswith', '(', '"gripper"', ')', ']', '# IDs of sites for gripper visualization', 'self', '.', 'eef_site_id', '=', 'self', '.', 'sim', '.', 'model', '.', 'site_name2id', '(', '"grip_site"', ')', 'self', '.', 'eef_cylinder_id', '=', 'self', '.', 'sim', '.', 'model', '.', 'site_name2id', '(', '"grip_site_cylinder"', ')'] | Sets up necessary reference for robots, grippers, and objects. | ['Sets', 'up', 'necessary', 'reference', 'for', 'robots', 'grippers', 'and', 'objects', '.'] | train | https://github.com/StanfordVL/robosuite/blob/65cd16810e2ed647e3ec88746af3412065b7f278/robosuite/environments/sawyer.py#L118-L174 |
9,572 | praekeltfoundation/molo | molo/core/utils.py | attach_image | def attach_image(field, nested_fields, page, record_keeper=None):
'''
Returns a function that attaches an image to page if it exists
Currenlty assumes that images have already been imported and info
has been stored in record_keeper
'''
if (field in nested_fields) and nested_fields[field]:
foreign_image_id = nested_fields[field]["id"]
# Handle the following
# record keeper may not exist
# record keeper may not have image ref
if record_keeper:
try:
local_image_id = record_keeper.get_local_image(
foreign_image_id)
local_image = Image.objects.get(id=local_image_id)
setattr(page, field, local_image)
except ObjectDoesNotExist:
raise ObjectDoesNotExist(
("executing attach_image: local image referenced"
"in record_keeper does not actually exist."),
None)
except Exception:
raise
else:
raise Exception(
("Attempted to attach image without record_keeper. "
"This functionality is not yet implemented")) | python | def attach_image(field, nested_fields, page, record_keeper=None):
'''
Returns a function that attaches an image to page if it exists
Currenlty assumes that images have already been imported and info
has been stored in record_keeper
'''
if (field in nested_fields) and nested_fields[field]:
foreign_image_id = nested_fields[field]["id"]
# Handle the following
# record keeper may not exist
# record keeper may not have image ref
if record_keeper:
try:
local_image_id = record_keeper.get_local_image(
foreign_image_id)
local_image = Image.objects.get(id=local_image_id)
setattr(page, field, local_image)
except ObjectDoesNotExist:
raise ObjectDoesNotExist(
("executing attach_image: local image referenced"
"in record_keeper does not actually exist."),
None)
except Exception:
raise
else:
raise Exception(
("Attempted to attach image without record_keeper. "
"This functionality is not yet implemented")) | ['def', 'attach_image', '(', 'field', ',', 'nested_fields', ',', 'page', ',', 'record_keeper', '=', 'None', ')', ':', 'if', '(', 'field', 'in', 'nested_fields', ')', 'and', 'nested_fields', '[', 'field', ']', ':', 'foreign_image_id', '=', 'nested_fields', '[', 'field', ']', '[', '"id"', ']', '# Handle the following', '# record keeper may not exist', '# record keeper may not have image ref', 'if', 'record_keeper', ':', 'try', ':', 'local_image_id', '=', 'record_keeper', '.', 'get_local_image', '(', 'foreign_image_id', ')', 'local_image', '=', 'Image', '.', 'objects', '.', 'get', '(', 'id', '=', 'local_image_id', ')', 'setattr', '(', 'page', ',', 'field', ',', 'local_image', ')', 'except', 'ObjectDoesNotExist', ':', 'raise', 'ObjectDoesNotExist', '(', '(', '"executing attach_image: local image referenced"', '"in record_keeper does not actually exist."', ')', ',', 'None', ')', 'except', 'Exception', ':', 'raise', 'else', ':', 'raise', 'Exception', '(', '(', '"Attempted to attach image without record_keeper. "', '"This functionality is not yet implemented"', ')', ')'] | Returns a function that attaches an image to page if it exists
Currenlty assumes that images have already been imported and info
has been stored in record_keeper | ['Returns', 'a', 'function', 'that', 'attaches', 'an', 'image', 'to', 'page', 'if', 'it', 'exists'] | train | https://github.com/praekeltfoundation/molo/blob/57702fda4fab261d67591415f7d46bc98fa38525/molo/core/utils.py#L295-L323 |
9,573 | CamDavidsonPilon/lifetimes | lifetimes/plotting.py | plot_frequency_recency_matrix | def plot_frequency_recency_matrix(
model,
T=1,
max_frequency=None,
max_recency=None,
title=None,
xlabel="Customer's Historical Frequency",
ylabel="Customer's Recency",
**kwargs
):
"""
Plot recency frequecy matrix as heatmap.
Plot a figure of expected transactions in T next units of time by a customer's frequency and recency.
Parameters
----------
model: lifetimes model
A fitted lifetimes model.
T: fload, optional
Next units of time to make predictions for
max_frequency: int, optional
The maximum frequency to plot. Default is max observed frequency.
max_recency: int, optional
The maximum recency to plot. This also determines the age of the customer.
Default to max observed age.
title: str, optional
Figure title
xlabel: str, optional
Figure xlabel
ylabel: str, optional
Figure ylabel
kwargs
Passed into the matplotlib.imshow command.
Returns
-------
axes: matplotlib.AxesSubplot
"""
from matplotlib import pyplot as plt
if max_frequency is None:
max_frequency = int(model.data["frequency"].max())
if max_recency is None:
max_recency = int(model.data["T"].max())
Z = np.zeros((max_recency + 1, max_frequency + 1))
for i, recency in enumerate(np.arange(max_recency + 1)):
for j, frequency in enumerate(np.arange(max_frequency + 1)):
Z[i, j] = model.conditional_expected_number_of_purchases_up_to_time(T, frequency, recency, max_recency)
interpolation = kwargs.pop("interpolation", "none")
ax = plt.subplot(111)
pcm = ax.imshow(Z, interpolation=interpolation, **kwargs)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if title is None:
title = (
"Expected Number of Future Purchases for {} Unit{} of Time,".format(T, "s"[T == 1 :])
+ "\nby Frequency and Recency of a Customer"
)
plt.title(title)
# turn matrix into square
forceAspect(ax)
# plot colorbar beside matrix
plt.colorbar(pcm, ax=ax)
return ax | python | def plot_frequency_recency_matrix(
model,
T=1,
max_frequency=None,
max_recency=None,
title=None,
xlabel="Customer's Historical Frequency",
ylabel="Customer's Recency",
**kwargs
):
"""
Plot recency frequecy matrix as heatmap.
Plot a figure of expected transactions in T next units of time by a customer's frequency and recency.
Parameters
----------
model: lifetimes model
A fitted lifetimes model.
T: fload, optional
Next units of time to make predictions for
max_frequency: int, optional
The maximum frequency to plot. Default is max observed frequency.
max_recency: int, optional
The maximum recency to plot. This also determines the age of the customer.
Default to max observed age.
title: str, optional
Figure title
xlabel: str, optional
Figure xlabel
ylabel: str, optional
Figure ylabel
kwargs
Passed into the matplotlib.imshow command.
Returns
-------
axes: matplotlib.AxesSubplot
"""
from matplotlib import pyplot as plt
if max_frequency is None:
max_frequency = int(model.data["frequency"].max())
if max_recency is None:
max_recency = int(model.data["T"].max())
Z = np.zeros((max_recency + 1, max_frequency + 1))
for i, recency in enumerate(np.arange(max_recency + 1)):
for j, frequency in enumerate(np.arange(max_frequency + 1)):
Z[i, j] = model.conditional_expected_number_of_purchases_up_to_time(T, frequency, recency, max_recency)
interpolation = kwargs.pop("interpolation", "none")
ax = plt.subplot(111)
pcm = ax.imshow(Z, interpolation=interpolation, **kwargs)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if title is None:
title = (
"Expected Number of Future Purchases for {} Unit{} of Time,".format(T, "s"[T == 1 :])
+ "\nby Frequency and Recency of a Customer"
)
plt.title(title)
# turn matrix into square
forceAspect(ax)
# plot colorbar beside matrix
plt.colorbar(pcm, ax=ax)
return ax | ['def', 'plot_frequency_recency_matrix', '(', 'model', ',', 'T', '=', '1', ',', 'max_frequency', '=', 'None', ',', 'max_recency', '=', 'None', ',', 'title', '=', 'None', ',', 'xlabel', '=', '"Customer\'s Historical Frequency"', ',', 'ylabel', '=', '"Customer\'s Recency"', ',', '*', '*', 'kwargs', ')', ':', 'from', 'matplotlib', 'import', 'pyplot', 'as', 'plt', 'if', 'max_frequency', 'is', 'None', ':', 'max_frequency', '=', 'int', '(', 'model', '.', 'data', '[', '"frequency"', ']', '.', 'max', '(', ')', ')', 'if', 'max_recency', 'is', 'None', ':', 'max_recency', '=', 'int', '(', 'model', '.', 'data', '[', '"T"', ']', '.', 'max', '(', ')', ')', 'Z', '=', 'np', '.', 'zeros', '(', '(', 'max_recency', '+', '1', ',', 'max_frequency', '+', '1', ')', ')', 'for', 'i', ',', 'recency', 'in', 'enumerate', '(', 'np', '.', 'arange', '(', 'max_recency', '+', '1', ')', ')', ':', 'for', 'j', ',', 'frequency', 'in', 'enumerate', '(', 'np', '.', 'arange', '(', 'max_frequency', '+', '1', ')', ')', ':', 'Z', '[', 'i', ',', 'j', ']', '=', 'model', '.', 'conditional_expected_number_of_purchases_up_to_time', '(', 'T', ',', 'frequency', ',', 'recency', ',', 'max_recency', ')', 'interpolation', '=', 'kwargs', '.', 'pop', '(', '"interpolation"', ',', '"none"', ')', 'ax', '=', 'plt', '.', 'subplot', '(', '111', ')', 'pcm', '=', 'ax', '.', 'imshow', '(', 'Z', ',', 'interpolation', '=', 'interpolation', ',', '*', '*', 'kwargs', ')', 'plt', '.', 'xlabel', '(', 'xlabel', ')', 'plt', '.', 'ylabel', '(', 'ylabel', ')', 'if', 'title', 'is', 'None', ':', 'title', '=', '(', '"Expected Number of Future Purchases for {} Unit{} of Time,"', '.', 'format', '(', 'T', ',', '"s"', '[', 'T', '==', '1', ':', ']', ')', '+', '"\\nby Frequency and Recency of a Customer"', ')', 'plt', '.', 'title', '(', 'title', ')', '# turn matrix into square', 'forceAspect', '(', 'ax', ')', '# plot colorbar beside matrix', 'plt', '.', 'colorbar', '(', 'pcm', ',', 'ax', '=', 'ax', ')', 'return', 'ax'] | Plot recency frequecy matrix as heatmap.
Plot a figure of expected transactions in T next units of time by a customer's frequency and recency.
Parameters
----------
model: lifetimes model
A fitted lifetimes model.
T: fload, optional
Next units of time to make predictions for
max_frequency: int, optional
The maximum frequency to plot. Default is max observed frequency.
max_recency: int, optional
The maximum recency to plot. This also determines the age of the customer.
Default to max observed age.
title: str, optional
Figure title
xlabel: str, optional
Figure xlabel
ylabel: str, optional
Figure ylabel
kwargs
Passed into the matplotlib.imshow command.
Returns
-------
axes: matplotlib.AxesSubplot | ['Plot', 'recency', 'frequecy', 'matrix', 'as', 'heatmap', '.'] | train | https://github.com/CamDavidsonPilon/lifetimes/blob/f926308bc03c17c1d12fead729de43885cf13321/lifetimes/plotting.py#L136-L208 |
9,574 | PBR/MQ2 | MQ2/add_qtl_to_map.py | add_qtl_to_map | def add_qtl_to_map(qtlfile, mapfile, outputfile='map_with_qtls.csv'):
""" This function adds to a genetic map for each marker the number
of significant QTLs found.
:arg qtlfile, the output from MapQTL transformed to a csv file via
'parse_mapqtl_file' which contains the closest markers.
:arg mapfile, the genetic map with all the markers.
:kwarg outputfile, the name of the output file in which the map will
be written.
"""
qtl_list = read_input_file(qtlfile, ',')
map_list = read_input_file(mapfile, ',')
map_list[0].append('# QTLs')
markers = []
markers.append(map_list[0])
qtl_cnt = 0
for marker in map_list[1:]:
markers.append(add_qtl_to_marker(marker, qtl_list[1:]))
qtl_cnt = qtl_cnt + int(markers[-1][-1])
LOG.info('- %s markers processed in %s' % (len(markers), mapfile))
LOG.info('- %s QTLs located in the map: %s' % (qtl_cnt, outputfile))
write_matrix(outputfile, markers) | python | def add_qtl_to_map(qtlfile, mapfile, outputfile='map_with_qtls.csv'):
""" This function adds to a genetic map for each marker the number
of significant QTLs found.
:arg qtlfile, the output from MapQTL transformed to a csv file via
'parse_mapqtl_file' which contains the closest markers.
:arg mapfile, the genetic map with all the markers.
:kwarg outputfile, the name of the output file in which the map will
be written.
"""
qtl_list = read_input_file(qtlfile, ',')
map_list = read_input_file(mapfile, ',')
map_list[0].append('# QTLs')
markers = []
markers.append(map_list[0])
qtl_cnt = 0
for marker in map_list[1:]:
markers.append(add_qtl_to_marker(marker, qtl_list[1:]))
qtl_cnt = qtl_cnt + int(markers[-1][-1])
LOG.info('- %s markers processed in %s' % (len(markers), mapfile))
LOG.info('- %s QTLs located in the map: %s' % (qtl_cnt, outputfile))
write_matrix(outputfile, markers) | ['def', 'add_qtl_to_map', '(', 'qtlfile', ',', 'mapfile', ',', 'outputfile', '=', "'map_with_qtls.csv'", ')', ':', 'qtl_list', '=', 'read_input_file', '(', 'qtlfile', ',', "','", ')', 'map_list', '=', 'read_input_file', '(', 'mapfile', ',', "','", ')', 'map_list', '[', '0', ']', '.', 'append', '(', "'# QTLs'", ')', 'markers', '=', '[', ']', 'markers', '.', 'append', '(', 'map_list', '[', '0', ']', ')', 'qtl_cnt', '=', '0', 'for', 'marker', 'in', 'map_list', '[', '1', ':', ']', ':', 'markers', '.', 'append', '(', 'add_qtl_to_marker', '(', 'marker', ',', 'qtl_list', '[', '1', ':', ']', ')', ')', 'qtl_cnt', '=', 'qtl_cnt', '+', 'int', '(', 'markers', '[', '-', '1', ']', '[', '-', '1', ']', ')', 'LOG', '.', 'info', '(', "'- %s markers processed in %s'", '%', '(', 'len', '(', 'markers', ')', ',', 'mapfile', ')', ')', 'LOG', '.', 'info', '(', "'- %s QTLs located in the map: %s'", '%', '(', 'qtl_cnt', ',', 'outputfile', ')', ')', 'write_matrix', '(', 'outputfile', ',', 'markers', ')'] | This function adds to a genetic map for each marker the number
of significant QTLs found.
:arg qtlfile, the output from MapQTL transformed to a csv file via
'parse_mapqtl_file' which contains the closest markers.
:arg mapfile, the genetic map with all the markers.
:kwarg outputfile, the name of the output file in which the map will
be written. | ['This', 'function', 'adds', 'to', 'a', 'genetic', 'map', 'for', 'each', 'marker', 'the', 'number', 'of', 'significant', 'QTLs', 'found', '.'] | train | https://github.com/PBR/MQ2/blob/6d84dea47e6751333004743f588f03158e35c28d/MQ2/add_qtl_to_map.py#L54-L76 |
9,575 | HumanCellAtlas/dcp-cli | hca/dss/__init__.py | DSSClient.download | def download(self, bundle_uuid, replica, version="", download_dir="",
metadata_files=('*',), data_files=('*',),
num_retries=10, min_delay_seconds=0.25):
"""
Download a bundle and save it to the local filesystem as a directory.
:param str bundle_uuid: The uuid of the bundle to download
:param str replica: the replica to download from. The supported replicas are: `aws` for Amazon Web Services, and
`gcp` for Google Cloud Platform. [aws, gcp]
:param str version: The version to download, else if not specified, download the latest. The version is a
timestamp of bundle creation in RFC3339
:param str dest_name: The destination file path for the download
:param iterable metadata_files: one or more shell patterns against which all metadata files in the bundle will be
matched case-sensitively. A file is considered a metadata file if the `indexed` property in the manifest is
set. If and only if a metadata file matches any of the patterns in `metadata_files` will it be downloaded.
:param iterable data_files: one or more shell patterns against which all data files in the bundle will be matched
case-sensitively. A file is considered a data file if the `indexed` property in the manifest is not set. The
file will be downloaded only if a data file matches any of the patterns in `data_files` will it be
downloaded.
:param int num_retries: The initial quota of download failures to accept before exiting due to
failures. The number of retries increase and decrease as file chucks succeed and fail.
:param float min_delay_seconds: The minimum number of seconds to wait in between retries.
Download a bundle and save it to the local filesystem as a directory.
By default, all data and metadata files are downloaded. To disable the downloading of data files,
use `--data-files ''` if using the CLI (or `data_files=()` if invoking `download` programmatically).
Likewise for metadata files.
If a retryable exception occurs, we wait a bit and retry again. The delay increases each time we fail and
decreases each time we successfully read a block. We set a quota for the number of failures that goes up with
every successful block read and down with each failure.
"""
errors = 0
with concurrent.futures.ThreadPoolExecutor(self.threads) as executor:
futures_to_dss_file = {executor.submit(task): dss_file
for dss_file, task in self._download_tasks(bundle_uuid,
replica,
version,
download_dir,
metadata_files,
data_files,
num_retries,
min_delay_seconds)}
for future in concurrent.futures.as_completed(futures_to_dss_file):
dss_file = futures_to_dss_file[future]
try:
future.result()
except Exception as e:
errors += 1
logger.warning('Failed to download file %s version %s from replica %s',
dss_file.uuid, dss_file.version, dss_file.replica, exc_info=e)
if errors:
raise RuntimeError('{} file(s) failed to download'.format(errors)) | python | def download(self, bundle_uuid, replica, version="", download_dir="",
metadata_files=('*',), data_files=('*',),
num_retries=10, min_delay_seconds=0.25):
"""
Download a bundle and save it to the local filesystem as a directory.
:param str bundle_uuid: The uuid of the bundle to download
:param str replica: the replica to download from. The supported replicas are: `aws` for Amazon Web Services, and
`gcp` for Google Cloud Platform. [aws, gcp]
:param str version: The version to download, else if not specified, download the latest. The version is a
timestamp of bundle creation in RFC3339
:param str dest_name: The destination file path for the download
:param iterable metadata_files: one or more shell patterns against which all metadata files in the bundle will be
matched case-sensitively. A file is considered a metadata file if the `indexed` property in the manifest is
set. If and only if a metadata file matches any of the patterns in `metadata_files` will it be downloaded.
:param iterable data_files: one or more shell patterns against which all data files in the bundle will be matched
case-sensitively. A file is considered a data file if the `indexed` property in the manifest is not set. The
file will be downloaded only if a data file matches any of the patterns in `data_files` will it be
downloaded.
:param int num_retries: The initial quota of download failures to accept before exiting due to
failures. The number of retries increase and decrease as file chucks succeed and fail.
:param float min_delay_seconds: The minimum number of seconds to wait in between retries.
Download a bundle and save it to the local filesystem as a directory.
By default, all data and metadata files are downloaded. To disable the downloading of data files,
use `--data-files ''` if using the CLI (or `data_files=()` if invoking `download` programmatically).
Likewise for metadata files.
If a retryable exception occurs, we wait a bit and retry again. The delay increases each time we fail and
decreases each time we successfully read a block. We set a quota for the number of failures that goes up with
every successful block read and down with each failure.
"""
errors = 0
with concurrent.futures.ThreadPoolExecutor(self.threads) as executor:
futures_to_dss_file = {executor.submit(task): dss_file
for dss_file, task in self._download_tasks(bundle_uuid,
replica,
version,
download_dir,
metadata_files,
data_files,
num_retries,
min_delay_seconds)}
for future in concurrent.futures.as_completed(futures_to_dss_file):
dss_file = futures_to_dss_file[future]
try:
future.result()
except Exception as e:
errors += 1
logger.warning('Failed to download file %s version %s from replica %s',
dss_file.uuid, dss_file.version, dss_file.replica, exc_info=e)
if errors:
raise RuntimeError('{} file(s) failed to download'.format(errors)) | ['def', 'download', '(', 'self', ',', 'bundle_uuid', ',', 'replica', ',', 'version', '=', '""', ',', 'download_dir', '=', '""', ',', 'metadata_files', '=', '(', "'*'", ',', ')', ',', 'data_files', '=', '(', "'*'", ',', ')', ',', 'num_retries', '=', '10', ',', 'min_delay_seconds', '=', '0.25', ')', ':', 'errors', '=', '0', 'with', 'concurrent', '.', 'futures', '.', 'ThreadPoolExecutor', '(', 'self', '.', 'threads', ')', 'as', 'executor', ':', 'futures_to_dss_file', '=', '{', 'executor', '.', 'submit', '(', 'task', ')', ':', 'dss_file', 'for', 'dss_file', ',', 'task', 'in', 'self', '.', '_download_tasks', '(', 'bundle_uuid', ',', 'replica', ',', 'version', ',', 'download_dir', ',', 'metadata_files', ',', 'data_files', ',', 'num_retries', ',', 'min_delay_seconds', ')', '}', 'for', 'future', 'in', 'concurrent', '.', 'futures', '.', 'as_completed', '(', 'futures_to_dss_file', ')', ':', 'dss_file', '=', 'futures_to_dss_file', '[', 'future', ']', 'try', ':', 'future', '.', 'result', '(', ')', 'except', 'Exception', 'as', 'e', ':', 'errors', '+=', '1', 'logger', '.', 'warning', '(', "'Failed to download file %s version %s from replica %s'", ',', 'dss_file', '.', 'uuid', ',', 'dss_file', '.', 'version', ',', 'dss_file', '.', 'replica', ',', 'exc_info', '=', 'e', ')', 'if', 'errors', ':', 'raise', 'RuntimeError', '(', "'{} file(s) failed to download'", '.', 'format', '(', 'errors', ')', ')'] | Download a bundle and save it to the local filesystem as a directory.
:param str bundle_uuid: The uuid of the bundle to download
:param str replica: the replica to download from. The supported replicas are: `aws` for Amazon Web Services, and
`gcp` for Google Cloud Platform. [aws, gcp]
:param str version: The version to download, else if not specified, download the latest. The version is a
timestamp of bundle creation in RFC3339
:param str dest_name: The destination file path for the download
:param iterable metadata_files: one or more shell patterns against which all metadata files in the bundle will be
matched case-sensitively. A file is considered a metadata file if the `indexed` property in the manifest is
set. If and only if a metadata file matches any of the patterns in `metadata_files` will it be downloaded.
:param iterable data_files: one or more shell patterns against which all data files in the bundle will be matched
case-sensitively. A file is considered a data file if the `indexed` property in the manifest is not set. The
file will be downloaded only if a data file matches any of the patterns in `data_files` will it be
downloaded.
:param int num_retries: The initial quota of download failures to accept before exiting due to
failures. The number of retries increase and decrease as file chucks succeed and fail.
:param float min_delay_seconds: The minimum number of seconds to wait in between retries.
Download a bundle and save it to the local filesystem as a directory.
By default, all data and metadata files are downloaded. To disable the downloading of data files,
use `--data-files ''` if using the CLI (or `data_files=()` if invoking `download` programmatically).
Likewise for metadata files.
If a retryable exception occurs, we wait a bit and retry again. The delay increases each time we fail and
decreases each time we successfully read a block. We set a quota for the number of failures that goes up with
every successful block read and down with each failure. | ['Download', 'a', 'bundle', 'and', 'save', 'it', 'to', 'the', 'local', 'filesystem', 'as', 'a', 'directory', '.'] | train | https://github.com/HumanCellAtlas/dcp-cli/blob/cc70817bc4e50944c709eaae160de0bf7a19f0f3/hca/dss/__init__.py#L87-L140 |
9,576 | dswah/pyGAM | pygam/utils.py | check_y | def check_y(y, link, dist, min_samples=1, verbose=True):
"""
tool to ensure that the targets:
- are in the domain of the link function
- are numerical
- have at least min_samples
- is finite
Parameters
----------
y : array-like
link : Link object
dist : Distribution object
min_samples : int, default: 1
verbose : bool, default: True
whether to print warnings
Returns
-------
y : array containing validated y-data
"""
y = np.ravel(y)
y = check_array(y, force_2d=False, min_samples=min_samples, ndim=1,
name='y data', verbose=verbose)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
if np.any(np.isnan(link.link(y, dist))):
raise ValueError('y data is not in domain of {} link function. ' \
'Expected domain: {}, but found {}' \
.format(link, get_link_domain(link, dist),
[float('%.2f'%np.min(y)),
float('%.2f'%np.max(y))]))
return y | python | def check_y(y, link, dist, min_samples=1, verbose=True):
"""
tool to ensure that the targets:
- are in the domain of the link function
- are numerical
- have at least min_samples
- is finite
Parameters
----------
y : array-like
link : Link object
dist : Distribution object
min_samples : int, default: 1
verbose : bool, default: True
whether to print warnings
Returns
-------
y : array containing validated y-data
"""
y = np.ravel(y)
y = check_array(y, force_2d=False, min_samples=min_samples, ndim=1,
name='y data', verbose=verbose)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
if np.any(np.isnan(link.link(y, dist))):
raise ValueError('y data is not in domain of {} link function. ' \
'Expected domain: {}, but found {}' \
.format(link, get_link_domain(link, dist),
[float('%.2f'%np.min(y)),
float('%.2f'%np.max(y))]))
return y | ['def', 'check_y', '(', 'y', ',', 'link', ',', 'dist', ',', 'min_samples', '=', '1', ',', 'verbose', '=', 'True', ')', ':', 'y', '=', 'np', '.', 'ravel', '(', 'y', ')', 'y', '=', 'check_array', '(', 'y', ',', 'force_2d', '=', 'False', ',', 'min_samples', '=', 'min_samples', ',', 'ndim', '=', '1', ',', 'name', '=', "'y data'", ',', 'verbose', '=', 'verbose', ')', 'with', 'warnings', '.', 'catch_warnings', '(', ')', ':', 'warnings', '.', 'simplefilter', '(', '"ignore"', ')', 'if', 'np', '.', 'any', '(', 'np', '.', 'isnan', '(', 'link', '.', 'link', '(', 'y', ',', 'dist', ')', ')', ')', ':', 'raise', 'ValueError', '(', "'y data is not in domain of {} link function. '", "'Expected domain: {}, but found {}'", '.', 'format', '(', 'link', ',', 'get_link_domain', '(', 'link', ',', 'dist', ')', ',', '[', 'float', '(', "'%.2f'", '%', 'np', '.', 'min', '(', 'y', ')', ')', ',', 'float', '(', "'%.2f'", '%', 'np', '.', 'max', '(', 'y', ')', ')', ']', ')', ')', 'return', 'y'] | tool to ensure that the targets:
- are in the domain of the link function
- are numerical
- have at least min_samples
- is finite
Parameters
----------
y : array-like
link : Link object
dist : Distribution object
min_samples : int, default: 1
verbose : bool, default: True
whether to print warnings
Returns
-------
y : array containing validated y-data | ['tool', 'to', 'ensure', 'that', 'the', 'targets', ':', '-', 'are', 'in', 'the', 'domain', 'of', 'the', 'link', 'function', '-', 'are', 'numerical', '-', 'have', 'at', 'least', 'min_samples', '-', 'is', 'finite'] | train | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/utils.py#L195-L230 |
9,577 | maas/python-libmaas | maas/client/facade.py | facade | def facade(factory):
"""Declare a method as a facade factory."""
wrapper = FacadeDescriptor(factory.__name__, factory)
return update_wrapper(wrapper, factory) | python | def facade(factory):
"""Declare a method as a facade factory."""
wrapper = FacadeDescriptor(factory.__name__, factory)
return update_wrapper(wrapper, factory) | ['def', 'facade', '(', 'factory', ')', ':', 'wrapper', '=', 'FacadeDescriptor', '(', 'factory', '.', '__name__', ',', 'factory', ')', 'return', 'update_wrapper', '(', 'wrapper', ',', 'factory', ')'] | Declare a method as a facade factory. | ['Declare', 'a', 'method', 'as', 'a', 'facade', 'factory', '.'] | train | https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/facade.py#L61-L64 |
9,578 | ParthKolekar/django-answerdiff | answerdiff/models.py | question_image_filepath | def question_image_filepath(instance, filename):
"""
Function DocString
"""
return '/'.join(['images', str(instance.question_level), str(instance.question_level_id), binascii.b2a_hex(os.urandom(15)), filename]) | python | def question_image_filepath(instance, filename):
"""
Function DocString
"""
return '/'.join(['images', str(instance.question_level), str(instance.question_level_id), binascii.b2a_hex(os.urandom(15)), filename]) | ['def', 'question_image_filepath', '(', 'instance', ',', 'filename', ')', ':', 'return', "'/'", '.', 'join', '(', '[', "'images'", ',', 'str', '(', 'instance', '.', 'question_level', ')', ',', 'str', '(', 'instance', '.', 'question_level_id', ')', ',', 'binascii', '.', 'b2a_hex', '(', 'os', '.', 'urandom', '(', '15', ')', ')', ',', 'filename', ']', ')'] | Function DocString | ['Function', 'DocString'] | train | https://github.com/ParthKolekar/django-answerdiff/blob/af4dd31db04431e76384581c3d6c8fbdfba0faf9/answerdiff/models.py#L13-L17 |
9,579 | arne-cl/discoursegraphs | src/discoursegraphs/readwrite/tiger.py | TigerSentenceGraph.__repair_unconnected_nodes | def __repair_unconnected_nodes(self):
"""
Adds a (``dominance_relation``) edge from the sentence root node to all
previously unconnected nodes (token nodes, that either represent a
punctuation mark or are part of a headline 'sentence' that has no
full syntax structure annotation).
"""
unconnected_node_ids = get_unconnected_nodes(self)
if dg.istoken(self, self.root):
# This sentence has no hierarchical structure, i.e. the root
# node is also a terminal / token node.
# We will add a virtual root node to compensate for this.
self.root = self.ns+':VROOT'
self.add_node(self.root,
layers={'tiger', 'tiger:syntax', 'tiger:sentence',
'tiger:sentence:root'})
for unconnected_node_id in unconnected_node_ids:
self.add_edge(self.root, unconnected_node_id,
layers={self.ns, self.ns+':sentence',
self.ns+':unconnected'},
edge_type=EdgeTypes.dominance_relation) | python | def __repair_unconnected_nodes(self):
"""
Adds a (``dominance_relation``) edge from the sentence root node to all
previously unconnected nodes (token nodes, that either represent a
punctuation mark or are part of a headline 'sentence' that has no
full syntax structure annotation).
"""
unconnected_node_ids = get_unconnected_nodes(self)
if dg.istoken(self, self.root):
# This sentence has no hierarchical structure, i.e. the root
# node is also a terminal / token node.
# We will add a virtual root node to compensate for this.
self.root = self.ns+':VROOT'
self.add_node(self.root,
layers={'tiger', 'tiger:syntax', 'tiger:sentence',
'tiger:sentence:root'})
for unconnected_node_id in unconnected_node_ids:
self.add_edge(self.root, unconnected_node_id,
layers={self.ns, self.ns+':sentence',
self.ns+':unconnected'},
edge_type=EdgeTypes.dominance_relation) | ['def', '__repair_unconnected_nodes', '(', 'self', ')', ':', 'unconnected_node_ids', '=', 'get_unconnected_nodes', '(', 'self', ')', 'if', 'dg', '.', 'istoken', '(', 'self', ',', 'self', '.', 'root', ')', ':', '# This sentence has no hierarchical structure, i.e. the root', '# node is also a terminal / token node.', '# We will add a virtual root node to compensate for this.', 'self', '.', 'root', '=', 'self', '.', 'ns', '+', "':VROOT'", 'self', '.', 'add_node', '(', 'self', '.', 'root', ',', 'layers', '=', '{', "'tiger'", ',', "'tiger:syntax'", ',', "'tiger:sentence'", ',', "'tiger:sentence:root'", '}', ')', 'for', 'unconnected_node_id', 'in', 'unconnected_node_ids', ':', 'self', '.', 'add_edge', '(', 'self', '.', 'root', ',', 'unconnected_node_id', ',', 'layers', '=', '{', 'self', '.', 'ns', ',', 'self', '.', 'ns', '+', "':sentence'", ',', 'self', '.', 'ns', '+', "':unconnected'", '}', ',', 'edge_type', '=', 'EdgeTypes', '.', 'dominance_relation', ')'] | Adds a (``dominance_relation``) edge from the sentence root node to all
previously unconnected nodes (token nodes, that either represent a
punctuation mark or are part of a headline 'sentence' that has no
full syntax structure annotation). | ['Adds', 'a', '(', 'dominance_relation', ')', 'edge', 'from', 'the', 'sentence', 'root', 'node', 'to', 'all', 'previously', 'unconnected', 'nodes', '(', 'token', 'nodes', 'that', 'either', 'represent', 'a', 'punctuation', 'mark', 'or', 'are', 'part', 'of', 'a', 'headline', 'sentence', 'that', 'has', 'no', 'full', 'syntax', 'structure', 'annotation', ')', '.'] | train | https://github.com/arne-cl/discoursegraphs/blob/842f0068a3190be2c75905754521b176b25a54fb/src/discoursegraphs/readwrite/tiger.py#L266-L287 |
9,580 | nugget/python-insteonplm | insteonplm/messages/standardSend.py | StandardSend.from_raw_message | def from_raw_message(cls, rawmessage):
"""Create a message from a raw byte stream."""
if (rawmessage[5] &
MESSAGE_FLAG_EXTENDED_0X10) == MESSAGE_FLAG_EXTENDED_0X10:
if len(rawmessage) >= ExtendedSend.receivedSize:
msg = ExtendedSend.from_raw_message(rawmessage)
else:
msg = None
else:
msg = StandardSend(rawmessage[2:5],
{'cmd1': rawmessage[6],
'cmd2': rawmessage[7]},
flags=rawmessage[5],
acknak=rawmessage[8:9])
return msg | python | def from_raw_message(cls, rawmessage):
"""Create a message from a raw byte stream."""
if (rawmessage[5] &
MESSAGE_FLAG_EXTENDED_0X10) == MESSAGE_FLAG_EXTENDED_0X10:
if len(rawmessage) >= ExtendedSend.receivedSize:
msg = ExtendedSend.from_raw_message(rawmessage)
else:
msg = None
else:
msg = StandardSend(rawmessage[2:5],
{'cmd1': rawmessage[6],
'cmd2': rawmessage[7]},
flags=rawmessage[5],
acknak=rawmessage[8:9])
return msg | ['def', 'from_raw_message', '(', 'cls', ',', 'rawmessage', ')', ':', 'if', '(', 'rawmessage', '[', '5', ']', '&', 'MESSAGE_FLAG_EXTENDED_0X10', ')', '==', 'MESSAGE_FLAG_EXTENDED_0X10', ':', 'if', 'len', '(', 'rawmessage', ')', '>=', 'ExtendedSend', '.', 'receivedSize', ':', 'msg', '=', 'ExtendedSend', '.', 'from_raw_message', '(', 'rawmessage', ')', 'else', ':', 'msg', '=', 'None', 'else', ':', 'msg', '=', 'StandardSend', '(', 'rawmessage', '[', '2', ':', '5', ']', ',', '{', "'cmd1'", ':', 'rawmessage', '[', '6', ']', ',', "'cmd2'", ':', 'rawmessage', '[', '7', ']', '}', ',', 'flags', '=', 'rawmessage', '[', '5', ']', ',', 'acknak', '=', 'rawmessage', '[', '8', ':', '9', ']', ')', 'return', 'msg'] | Create a message from a raw byte stream. | ['Create', 'a', 'message', 'from', 'a', 'raw', 'byte', 'stream', '.'] | train | https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/messages/standardSend.py#L47-L61 |
9,581 | modin-project/modin | modin/experimental/engines/pandas_on_ray/sql.py | is_distributed | def is_distributed(partition_column, lower_bound, upper_bound):
""" Check if is possible distribute a query given that args
Args:
partition_column: column used to share the data between the workers
lower_bound: the minimum value to be requested from the partition_column
upper_bound: the maximum value to be requested from the partition_column
Returns:
True for distributed or False if not
"""
if (
(partition_column is not None)
and (lower_bound is not None)
and (upper_bound is not None)
):
if upper_bound > lower_bound:
return True
else:
raise InvalidArguments("upper_bound must be greater than lower_bound.")
elif (partition_column is None) and (lower_bound is None) and (upper_bound is None):
return False
else:
raise InvalidArguments(
"Invalid combination of partition_column, lower_bound, upper_bound."
"All these arguments should be passed (distributed) or none of them (standard pandas)."
) | python | def is_distributed(partition_column, lower_bound, upper_bound):
""" Check if is possible distribute a query given that args
Args:
partition_column: column used to share the data between the workers
lower_bound: the minimum value to be requested from the partition_column
upper_bound: the maximum value to be requested from the partition_column
Returns:
True for distributed or False if not
"""
if (
(partition_column is not None)
and (lower_bound is not None)
and (upper_bound is not None)
):
if upper_bound > lower_bound:
return True
else:
raise InvalidArguments("upper_bound must be greater than lower_bound.")
elif (partition_column is None) and (lower_bound is None) and (upper_bound is None):
return False
else:
raise InvalidArguments(
"Invalid combination of partition_column, lower_bound, upper_bound."
"All these arguments should be passed (distributed) or none of them (standard pandas)."
) | ['def', 'is_distributed', '(', 'partition_column', ',', 'lower_bound', ',', 'upper_bound', ')', ':', 'if', '(', '(', 'partition_column', 'is', 'not', 'None', ')', 'and', '(', 'lower_bound', 'is', 'not', 'None', ')', 'and', '(', 'upper_bound', 'is', 'not', 'None', ')', ')', ':', 'if', 'upper_bound', '>', 'lower_bound', ':', 'return', 'True', 'else', ':', 'raise', 'InvalidArguments', '(', '"upper_bound must be greater than lower_bound."', ')', 'elif', '(', 'partition_column', 'is', 'None', ')', 'and', '(', 'lower_bound', 'is', 'None', ')', 'and', '(', 'upper_bound', 'is', 'None', ')', ':', 'return', 'False', 'else', ':', 'raise', 'InvalidArguments', '(', '"Invalid combination of partition_column, lower_bound, upper_bound."', '"All these arguments should be passed (distributed) or none of them (standard pandas)."', ')'] | Check if is possible distribute a query given that args
Args:
partition_column: column used to share the data between the workers
lower_bound: the minimum value to be requested from the partition_column
upper_bound: the maximum value to be requested from the partition_column
Returns:
True for distributed or False if not | ['Check', 'if', 'is', 'possible', 'distribute', 'a', 'query', 'given', 'that', 'args'] | train | https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/experimental/engines/pandas_on_ray/sql.py#L5-L31 |
9,582 | pgxcentre/geneparse | geneparse/extract/__main__.py | vcf_writer | def vcf_writer(parser, keep, extract, args):
"""Writes the data in VCF format."""
# The output
output = sys.stdout if args.output == "-" else open(args.output, "w")
try:
# Getting the samples
samples = np.array(parser.get_samples(), dtype=str)
k = _get_sample_select(samples=samples, keep=keep)
# Writing the VCF header
output.write(_VCF_HEADER.format(
date=datetime.today().strftime("%Y%m%d"),
version=__version__,
samples="\t".join(samples[k]),
))
# The data generator
generator = _get_generator(parser=parser, extract=extract, keep=k,
check_maf=args.maf)
# The number of markers extracted
nb_extracted = 0
for data in generator:
# Keeping only the required genotypes
genotypes = data.genotypes
# Computing the alternative allele frequency
af = np.nanmean(genotypes) / 2
print(data.variant.chrom, data.variant.pos, data.variant.name,
data.reference, data.coded, ".", "PASS", "AF={}".format(af),
"GT:DS", sep="\t", end="", file=output)
for geno in genotypes:
if np.isnan(geno):
output.write("\t./.:.")
else:
rounded_geno = int(round(geno, 0))
output.write("\t{}:{}".format(
_VCF_GT_MAP[rounded_geno], geno,
))
output.write("\n")
nb_extracted += 1
if nb_extracted == 0:
logger.warning("No markers matched the extract list")
finally:
output.close() | python | def vcf_writer(parser, keep, extract, args):
"""Writes the data in VCF format."""
# The output
output = sys.stdout if args.output == "-" else open(args.output, "w")
try:
# Getting the samples
samples = np.array(parser.get_samples(), dtype=str)
k = _get_sample_select(samples=samples, keep=keep)
# Writing the VCF header
output.write(_VCF_HEADER.format(
date=datetime.today().strftime("%Y%m%d"),
version=__version__,
samples="\t".join(samples[k]),
))
# The data generator
generator = _get_generator(parser=parser, extract=extract, keep=k,
check_maf=args.maf)
# The number of markers extracted
nb_extracted = 0
for data in generator:
# Keeping only the required genotypes
genotypes = data.genotypes
# Computing the alternative allele frequency
af = np.nanmean(genotypes) / 2
print(data.variant.chrom, data.variant.pos, data.variant.name,
data.reference, data.coded, ".", "PASS", "AF={}".format(af),
"GT:DS", sep="\t", end="", file=output)
for geno in genotypes:
if np.isnan(geno):
output.write("\t./.:.")
else:
rounded_geno = int(round(geno, 0))
output.write("\t{}:{}".format(
_VCF_GT_MAP[rounded_geno], geno,
))
output.write("\n")
nb_extracted += 1
if nb_extracted == 0:
logger.warning("No markers matched the extract list")
finally:
output.close() | ['def', 'vcf_writer', '(', 'parser', ',', 'keep', ',', 'extract', ',', 'args', ')', ':', '# The output', 'output', '=', 'sys', '.', 'stdout', 'if', 'args', '.', 'output', '==', '"-"', 'else', 'open', '(', 'args', '.', 'output', ',', '"w"', ')', 'try', ':', '# Getting the samples', 'samples', '=', 'np', '.', 'array', '(', 'parser', '.', 'get_samples', '(', ')', ',', 'dtype', '=', 'str', ')', 'k', '=', '_get_sample_select', '(', 'samples', '=', 'samples', ',', 'keep', '=', 'keep', ')', '# Writing the VCF header', 'output', '.', 'write', '(', '_VCF_HEADER', '.', 'format', '(', 'date', '=', 'datetime', '.', 'today', '(', ')', '.', 'strftime', '(', '"%Y%m%d"', ')', ',', 'version', '=', '__version__', ',', 'samples', '=', '"\\t"', '.', 'join', '(', 'samples', '[', 'k', ']', ')', ',', ')', ')', '# The data generator', 'generator', '=', '_get_generator', '(', 'parser', '=', 'parser', ',', 'extract', '=', 'extract', ',', 'keep', '=', 'k', ',', 'check_maf', '=', 'args', '.', 'maf', ')', '# The number of markers extracted', 'nb_extracted', '=', '0', 'for', 'data', 'in', 'generator', ':', '# Keeping only the required genotypes', 'genotypes', '=', 'data', '.', 'genotypes', '# Computing the alternative allele frequency', 'af', '=', 'np', '.', 'nanmean', '(', 'genotypes', ')', '/', '2', 'print', '(', 'data', '.', 'variant', '.', 'chrom', ',', 'data', '.', 'variant', '.', 'pos', ',', 'data', '.', 'variant', '.', 'name', ',', 'data', '.', 'reference', ',', 'data', '.', 'coded', ',', '"."', ',', '"PASS"', ',', '"AF={}"', '.', 'format', '(', 'af', ')', ',', '"GT:DS"', ',', 'sep', '=', '"\\t"', ',', 'end', '=', '""', ',', 'file', '=', 'output', ')', 'for', 'geno', 'in', 'genotypes', ':', 'if', 'np', '.', 'isnan', '(', 'geno', ')', ':', 'output', '.', 'write', '(', '"\\t./.:."', ')', 'else', ':', 'rounded_geno', '=', 'int', '(', 'round', '(', 'geno', ',', '0', ')', ')', 'output', '.', 'write', '(', '"\\t{}:{}"', '.', 'format', '(', '_VCF_GT_MAP', '[', 'rounded_geno', ']', ',', 'geno', ',', ')', ')', 'output', '.', 'write', '(', '"\\n"', ')', 'nb_extracted', '+=', '1', 'if', 'nb_extracted', '==', '0', ':', 'logger', '.', 'warning', '(', '"No markers matched the extract list"', ')', 'finally', ':', 'output', '.', 'close', '(', ')'] | Writes the data in VCF format. | ['Writes', 'the', 'data', 'in', 'VCF', 'format', '.'] | train | https://github.com/pgxcentre/geneparse/blob/f698f9708af4c7962d384a70a5a14006b1cb7108/geneparse/extract/__main__.py#L133-L184 |
9,583 | yvesalexandre/bandicoot | bandicoot/network.py | network_sampling | def network_sampling(n, filename, directory=None, snowball=False, user=None):
"""
Selects a few users and exports a CSV of indicators for them.
TODO: Returns the network/graph between the selected users.
Parameters
----------
n : int
Number of users to select.
filename : string
File to export to.
directory: string
Directory to select users from if using the default random selection.
snowball: starts from a specified user, iterates over neighbors, and does a
BFS until n neighbors are reached
"""
if snowball:
if user is None:
raise ValueError("Must specify a starting user from whom to initiate the snowball")
else:
users, agenda = [user], [user]
while len(agenda) > 0:
parent = agenda.pop()
dealphebetized_network = sorted(parent.network.items(), key=lambda k: random.random())
for neighbor in dealphebetized_network:
if neighbor[1] not in users and neighbor[1] is not None and len(users) < n:
users.append(neighbor[1])
if neighbor[1].network:
agenda.push(neighbor[1])
else:
files = [x for x in os.listdir(directory) if os.path.isfile(os.path.join(directory, x))]
shuffled_files = sorted(files, key=lambda k: random.random())
user_names = shuffled_files[:n]
users = [bc.read_csv(u[:-4], directory) for u in user_names]
if len(users) < n:
raise ValueError("Specified more users than records that exist, only {} records available".format(len(users)))
bc.to_csv([bc.utils.all(u) for u in users], filename) | python | def network_sampling(n, filename, directory=None, snowball=False, user=None):
"""
Selects a few users and exports a CSV of indicators for them.
TODO: Returns the network/graph between the selected users.
Parameters
----------
n : int
Number of users to select.
filename : string
File to export to.
directory: string
Directory to select users from if using the default random selection.
snowball: starts from a specified user, iterates over neighbors, and does a
BFS until n neighbors are reached
"""
if snowball:
if user is None:
raise ValueError("Must specify a starting user from whom to initiate the snowball")
else:
users, agenda = [user], [user]
while len(agenda) > 0:
parent = agenda.pop()
dealphebetized_network = sorted(parent.network.items(), key=lambda k: random.random())
for neighbor in dealphebetized_network:
if neighbor[1] not in users and neighbor[1] is not None and len(users) < n:
users.append(neighbor[1])
if neighbor[1].network:
agenda.push(neighbor[1])
else:
files = [x for x in os.listdir(directory) if os.path.isfile(os.path.join(directory, x))]
shuffled_files = sorted(files, key=lambda k: random.random())
user_names = shuffled_files[:n]
users = [bc.read_csv(u[:-4], directory) for u in user_names]
if len(users) < n:
raise ValueError("Specified more users than records that exist, only {} records available".format(len(users)))
bc.to_csv([bc.utils.all(u) for u in users], filename) | ['def', 'network_sampling', '(', 'n', ',', 'filename', ',', 'directory', '=', 'None', ',', 'snowball', '=', 'False', ',', 'user', '=', 'None', ')', ':', 'if', 'snowball', ':', 'if', 'user', 'is', 'None', ':', 'raise', 'ValueError', '(', '"Must specify a starting user from whom to initiate the snowball"', ')', 'else', ':', 'users', ',', 'agenda', '=', '[', 'user', ']', ',', '[', 'user', ']', 'while', 'len', '(', 'agenda', ')', '>', '0', ':', 'parent', '=', 'agenda', '.', 'pop', '(', ')', 'dealphebetized_network', '=', 'sorted', '(', 'parent', '.', 'network', '.', 'items', '(', ')', ',', 'key', '=', 'lambda', 'k', ':', 'random', '.', 'random', '(', ')', ')', 'for', 'neighbor', 'in', 'dealphebetized_network', ':', 'if', 'neighbor', '[', '1', ']', 'not', 'in', 'users', 'and', 'neighbor', '[', '1', ']', 'is', 'not', 'None', 'and', 'len', '(', 'users', ')', '<', 'n', ':', 'users', '.', 'append', '(', 'neighbor', '[', '1', ']', ')', 'if', 'neighbor', '[', '1', ']', '.', 'network', ':', 'agenda', '.', 'push', '(', 'neighbor', '[', '1', ']', ')', 'else', ':', 'files', '=', '[', 'x', 'for', 'x', 'in', 'os', '.', 'listdir', '(', 'directory', ')', 'if', 'os', '.', 'path', '.', 'isfile', '(', 'os', '.', 'path', '.', 'join', '(', 'directory', ',', 'x', ')', ')', ']', 'shuffled_files', '=', 'sorted', '(', 'files', ',', 'key', '=', 'lambda', 'k', ':', 'random', '.', 'random', '(', ')', ')', 'user_names', '=', 'shuffled_files', '[', ':', 'n', ']', 'users', '=', '[', 'bc', '.', 'read_csv', '(', 'u', '[', ':', '-', '4', ']', ',', 'directory', ')', 'for', 'u', 'in', 'user_names', ']', 'if', 'len', '(', 'users', ')', '<', 'n', ':', 'raise', 'ValueError', '(', '"Specified more users than records that exist, only {} records available"', '.', 'format', '(', 'len', '(', 'users', ')', ')', ')', 'bc', '.', 'to_csv', '(', '[', 'bc', '.', 'utils', '.', 'all', '(', 'u', ')', 'for', 'u', 'in', 'users', ']', ',', 'filename', ')'] | Selects a few users and exports a CSV of indicators for them.
TODO: Returns the network/graph between the selected users.
Parameters
----------
n : int
Number of users to select.
filename : string
File to export to.
directory: string
Directory to select users from if using the default random selection.
snowball: starts from a specified user, iterates over neighbors, and does a
BFS until n neighbors are reached | ['Selects', 'a', 'few', 'users', 'and', 'exports', 'a', 'CSV', 'of', 'indicators', 'for', 'them', '.'] | train | https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/network.py#L317-L355 |
9,584 | 3ll3d00d/vibe | backend/src/analyser/common/measurementcontroller.py | MeasurementController._deleteCompletedMeasurement | def _deleteCompletedMeasurement(self, measurementId):
"""
Deletes the named measurement from the completed measurement store if it exists.
:param measurementId:
:return:
String: error messages
Integer: count of measurements deleted
"""
message, count, deleted = self.deleteFrom(measurementId, self.completeMeasurements)
if count is 0:
message, count, deleted = self.deleteFrom(measurementId, self.failedMeasurements)
return message, count, deleted | python | def _deleteCompletedMeasurement(self, measurementId):
"""
Deletes the named measurement from the completed measurement store if it exists.
:param measurementId:
:return:
String: error messages
Integer: count of measurements deleted
"""
message, count, deleted = self.deleteFrom(measurementId, self.completeMeasurements)
if count is 0:
message, count, deleted = self.deleteFrom(measurementId, self.failedMeasurements)
return message, count, deleted | ['def', '_deleteCompletedMeasurement', '(', 'self', ',', 'measurementId', ')', ':', 'message', ',', 'count', ',', 'deleted', '=', 'self', '.', 'deleteFrom', '(', 'measurementId', ',', 'self', '.', 'completeMeasurements', ')', 'if', 'count', 'is', '0', ':', 'message', ',', 'count', ',', 'deleted', '=', 'self', '.', 'deleteFrom', '(', 'measurementId', ',', 'self', '.', 'failedMeasurements', ')', 'return', 'message', ',', 'count', ',', 'deleted'] | Deletes the named measurement from the completed measurement store if it exists.
:param measurementId:
:return:
String: error messages
Integer: count of measurements deleted | ['Deletes', 'the', 'named', 'measurement', 'from', 'the', 'completed', 'measurement', 'store', 'if', 'it', 'exists', '.', ':', 'param', 'measurementId', ':', ':', 'return', ':', 'String', ':', 'error', 'messages', 'Integer', ':', 'count', 'of', 'measurements', 'deleted'] | train | https://github.com/3ll3d00d/vibe/blob/124b029f13ac746723e92cb47e9cb56edd2e54b5/backend/src/analyser/common/measurementcontroller.py#L434-L445 |
9,585 | python-diamond/Diamond | src/collectors/tokumx/tokumx.py | TokuMXCollector._publish_metrics | def _publish_metrics(self, prev_keys, key, data, publishfn=None):
"""Recursively publish keys"""
if key not in data:
return
value = data[key]
keys = prev_keys + [key]
if not publishfn:
publishfn = self.publish
if isinstance(value, dict):
for new_key in value:
self._publish_metrics(keys, new_key, value)
elif isinstance(value, int) or isinstance(value, float):
publishfn('.'.join(keys), value)
elif isinstance(value, long):
publishfn('.'.join(keys), float(value)) | python | def _publish_metrics(self, prev_keys, key, data, publishfn=None):
"""Recursively publish keys"""
if key not in data:
return
value = data[key]
keys = prev_keys + [key]
if not publishfn:
publishfn = self.publish
if isinstance(value, dict):
for new_key in value:
self._publish_metrics(keys, new_key, value)
elif isinstance(value, int) or isinstance(value, float):
publishfn('.'.join(keys), value)
elif isinstance(value, long):
publishfn('.'.join(keys), float(value)) | ['def', '_publish_metrics', '(', 'self', ',', 'prev_keys', ',', 'key', ',', 'data', ',', 'publishfn', '=', 'None', ')', ':', 'if', 'key', 'not', 'in', 'data', ':', 'return', 'value', '=', 'data', '[', 'key', ']', 'keys', '=', 'prev_keys', '+', '[', 'key', ']', 'if', 'not', 'publishfn', ':', 'publishfn', '=', 'self', '.', 'publish', 'if', 'isinstance', '(', 'value', ',', 'dict', ')', ':', 'for', 'new_key', 'in', 'value', ':', 'self', '.', '_publish_metrics', '(', 'keys', ',', 'new_key', ',', 'value', ')', 'elif', 'isinstance', '(', 'value', ',', 'int', ')', 'or', 'isinstance', '(', 'value', ',', 'float', ')', ':', 'publishfn', '(', "'.'", '.', 'join', '(', 'keys', ')', ',', 'value', ')', 'elif', 'isinstance', '(', 'value', ',', 'long', ')', ':', 'publishfn', '(', "'.'", '.', 'join', '(', 'keys', ')', ',', 'float', '(', 'value', ')', ')'] | Recursively publish keys | ['Recursively', 'publish', 'keys'] | train | https://github.com/python-diamond/Diamond/blob/0f3eb04327d6d3ed5e53a9967d6c9d2c09714a47/src/collectors/tokumx/tokumx.py#L251-L265 |
9,586 | wmayner/pyphi | pyphi/macro.py | all_groupings | def all_groupings(partition):
"""Return all possible groupings of states for a particular coarse graining
(partition) of a network.
Args:
partition (tuple[tuple]): A partition of micro-elements into macro
elements.
Yields:
tuple[tuple[tuple]]: A grouping of micro-states into macro states of
system.
TODO: document exactly how to interpret the grouping.
"""
if not all(partition):
raise ValueError('Each part of the partition must have at least one '
'element.')
micro_groupings = [_partitions_list(len(part) + 1) if len(part) > 1
else [[[0], [1]]] for part in partition]
for grouping in itertools.product(*micro_groupings):
if all(len(element) < 3 for element in grouping):
yield tuple(tuple(tuple(tuple(state) for state in states)
for states in grouping)) | python | def all_groupings(partition):
"""Return all possible groupings of states for a particular coarse graining
(partition) of a network.
Args:
partition (tuple[tuple]): A partition of micro-elements into macro
elements.
Yields:
tuple[tuple[tuple]]: A grouping of micro-states into macro states of
system.
TODO: document exactly how to interpret the grouping.
"""
if not all(partition):
raise ValueError('Each part of the partition must have at least one '
'element.')
micro_groupings = [_partitions_list(len(part) + 1) if len(part) > 1
else [[[0], [1]]] for part in partition]
for grouping in itertools.product(*micro_groupings):
if all(len(element) < 3 for element in grouping):
yield tuple(tuple(tuple(tuple(state) for state in states)
for states in grouping)) | ['def', 'all_groupings', '(', 'partition', ')', ':', 'if', 'not', 'all', '(', 'partition', ')', ':', 'raise', 'ValueError', '(', "'Each part of the partition must have at least one '", "'element.'", ')', 'micro_groupings', '=', '[', '_partitions_list', '(', 'len', '(', 'part', ')', '+', '1', ')', 'if', 'len', '(', 'part', ')', '>', '1', 'else', '[', '[', '[', '0', ']', ',', '[', '1', ']', ']', ']', 'for', 'part', 'in', 'partition', ']', 'for', 'grouping', 'in', 'itertools', '.', 'product', '(', '*', 'micro_groupings', ')', ':', 'if', 'all', '(', 'len', '(', 'element', ')', '<', '3', 'for', 'element', 'in', 'grouping', ')', ':', 'yield', 'tuple', '(', 'tuple', '(', 'tuple', '(', 'tuple', '(', 'state', ')', 'for', 'state', 'in', 'states', ')', 'for', 'states', 'in', 'grouping', ')', ')'] | Return all possible groupings of states for a particular coarse graining
(partition) of a network.
Args:
partition (tuple[tuple]): A partition of micro-elements into macro
elements.
Yields:
tuple[tuple[tuple]]: A grouping of micro-states into macro states of
system.
TODO: document exactly how to interpret the grouping. | ['Return', 'all', 'possible', 'groupings', 'of', 'states', 'for', 'a', 'particular', 'coarse', 'graining', '(', 'partition', ')', 'of', 'a', 'network', '.'] | train | https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/macro.py#L707-L731 |
9,587 | heikomuller/sco-datastore | scodata/experiment.py | DefaultExperimentManager.to_dict | def to_dict(self, experiment):
"""Create a Json-like object for an experiment. Extends the basic
object with subject, image group, and (optional) functional data
identifiers.
Parameters
----------
experiment : ExperimentHandle
Returns
-------
Json Object
Json-like object, i.e., dictionary.
"""
# Get the basic Json object from the super class
json_obj = super(DefaultExperimentManager, self).to_dict(experiment)
# Add associated object references
json_obj['subject'] = experiment.subject_id
json_obj['images'] = experiment.image_group_id
if not experiment.fmri_data_id is None:
json_obj['fmri'] = experiment.fmri_data_id
return json_obj | python | def to_dict(self, experiment):
"""Create a Json-like object for an experiment. Extends the basic
object with subject, image group, and (optional) functional data
identifiers.
Parameters
----------
experiment : ExperimentHandle
Returns
-------
Json Object
Json-like object, i.e., dictionary.
"""
# Get the basic Json object from the super class
json_obj = super(DefaultExperimentManager, self).to_dict(experiment)
# Add associated object references
json_obj['subject'] = experiment.subject_id
json_obj['images'] = experiment.image_group_id
if not experiment.fmri_data_id is None:
json_obj['fmri'] = experiment.fmri_data_id
return json_obj | ['def', 'to_dict', '(', 'self', ',', 'experiment', ')', ':', '# Get the basic Json object from the super class', 'json_obj', '=', 'super', '(', 'DefaultExperimentManager', ',', 'self', ')', '.', 'to_dict', '(', 'experiment', ')', '# Add associated object references', 'json_obj', '[', "'subject'", ']', '=', 'experiment', '.', 'subject_id', 'json_obj', '[', "'images'", ']', '=', 'experiment', '.', 'image_group_id', 'if', 'not', 'experiment', '.', 'fmri_data_id', 'is', 'None', ':', 'json_obj', '[', "'fmri'", ']', '=', 'experiment', '.', 'fmri_data_id', 'return', 'json_obj'] | Create a Json-like object for an experiment. Extends the basic
object with subject, image group, and (optional) functional data
identifiers.
Parameters
----------
experiment : ExperimentHandle
Returns
-------
Json Object
Json-like object, i.e., dictionary. | ['Create', 'a', 'Json', '-', 'like', 'object', 'for', 'an', 'experiment', '.', 'Extends', 'the', 'basic', 'object', 'with', 'subject', 'image', 'group', 'and', '(', 'optional', ')', 'functional', 'data', 'identifiers', '.'] | train | https://github.com/heikomuller/sco-datastore/blob/7180a6b51150667e47629da566aedaa742e39342/scodata/experiment.py#L226-L247 |
9,588 | zerwes/hiyapyco | hiyapyco/odyldo.py | safe_dump | def safe_dump(data, stream=None, **kwds):
"""implementation of safe dumper using Ordered Dict Yaml Dumper"""
return yaml.dump(data, stream=stream, Dumper=ODYD, **kwds) | python | def safe_dump(data, stream=None, **kwds):
"""implementation of safe dumper using Ordered Dict Yaml Dumper"""
return yaml.dump(data, stream=stream, Dumper=ODYD, **kwds) | ['def', 'safe_dump', '(', 'data', ',', 'stream', '=', 'None', ',', '*', '*', 'kwds', ')', ':', 'return', 'yaml', '.', 'dump', '(', 'data', ',', 'stream', '=', 'stream', ',', 'Dumper', '=', 'ODYD', ',', '*', '*', 'kwds', ')'] | implementation of safe dumper using Ordered Dict Yaml Dumper | ['implementation', 'of', 'safe', 'dumper', 'using', 'Ordered', 'Dict', 'Yaml', 'Dumper'] | train | https://github.com/zerwes/hiyapyco/blob/b0b42724cc13b1412f5bb5d92fd4c637d6615edb/hiyapyco/odyldo.py#L76-L78 |
9,589 | dls-controls/pymalcolm | malcolm/core/loggable.py | Loggable.set_logger | def set_logger(self, **fields):
"""Change the name of the logger that log.* should call
Args:
**fields: Extra fields to be logged. Logger name will be:
".".join([<module_name>, <cls_name>] + fields_sorted_on_key)
"""
names = [self.__module__, self.__class__.__name__]
for field, value in sorted(fields.items()):
names.append(value)
# names should be something like this for one field:
# ["malcolm.modules.scanning.controllers.runnablecontroller",
# "RunnableController", "BL45P-ML-SCAN-01"]
self.log = logging.getLogger(".".join(names))
if fields:
self.log.addFilter(FieldFilter(fields))
return self.log | python | def set_logger(self, **fields):
"""Change the name of the logger that log.* should call
Args:
**fields: Extra fields to be logged. Logger name will be:
".".join([<module_name>, <cls_name>] + fields_sorted_on_key)
"""
names = [self.__module__, self.__class__.__name__]
for field, value in sorted(fields.items()):
names.append(value)
# names should be something like this for one field:
# ["malcolm.modules.scanning.controllers.runnablecontroller",
# "RunnableController", "BL45P-ML-SCAN-01"]
self.log = logging.getLogger(".".join(names))
if fields:
self.log.addFilter(FieldFilter(fields))
return self.log | ['def', 'set_logger', '(', 'self', ',', '*', '*', 'fields', ')', ':', 'names', '=', '[', 'self', '.', '__module__', ',', 'self', '.', '__class__', '.', '__name__', ']', 'for', 'field', ',', 'value', 'in', 'sorted', '(', 'fields', '.', 'items', '(', ')', ')', ':', 'names', '.', 'append', '(', 'value', ')', '# names should be something like this for one field:', '# ["malcolm.modules.scanning.controllers.runnablecontroller",', '# "RunnableController", "BL45P-ML-SCAN-01"]', 'self', '.', 'log', '=', 'logging', '.', 'getLogger', '(', '"."', '.', 'join', '(', 'names', ')', ')', 'if', 'fields', ':', 'self', '.', 'log', '.', 'addFilter', '(', 'FieldFilter', '(', 'fields', ')', ')', 'return', 'self', '.', 'log'] | Change the name of the logger that log.* should call
Args:
**fields: Extra fields to be logged. Logger name will be:
".".join([<module_name>, <cls_name>] + fields_sorted_on_key) | ['Change', 'the', 'name', 'of', 'the', 'logger', 'that', 'log', '.', '*', 'should', 'call'] | train | https://github.com/dls-controls/pymalcolm/blob/80ea667e4da26365a6cebc0249f52fdc744bd983/malcolm/core/loggable.py#L20-L36 |
9,590 | alvarogzp/telegram-bot-framework | bot/multithreading/scheduler.py | SchedulerApi.set_callbacks | def set_callbacks(self, worker_start_callback: callable, worker_end_callback: callable, are_async: bool = False):
"""
:param are_async: True if the callbacks execute asynchronously, posting any heavy work to another thread.
"""
# We are setting self.worker_start_callback and self.worker_end_callback
# to lambdas instead of saving them in private vars and moving the lambda logic
# to a member function for, among other reasons, making callback updates atomic,
# ie. once a callback has been posted, it will be executed as it was in that
# moment, any call to set_callbacks will only affect callbacks posted since they
# were updated, but not to any pending callback.
# If callback is async, execute the start callback in the calling thread
scheduler = self.immediate if are_async else self.background
self.worker_start_callback = lambda worker: scheduler(Work(
lambda: worker_start_callback(worker), "worker_start_callback:" + worker.name
))
# As the end callback is called *just* before the thread dies,
# there is no problem running it on the thread
self.worker_end_callback = lambda worker: self.immediate(Work(
lambda: worker_end_callback(worker), "worker_end_callback:" + worker.name
)) | python | def set_callbacks(self, worker_start_callback: callable, worker_end_callback: callable, are_async: bool = False):
"""
:param are_async: True if the callbacks execute asynchronously, posting any heavy work to another thread.
"""
# We are setting self.worker_start_callback and self.worker_end_callback
# to lambdas instead of saving them in private vars and moving the lambda logic
# to a member function for, among other reasons, making callback updates atomic,
# ie. once a callback has been posted, it will be executed as it was in that
# moment, any call to set_callbacks will only affect callbacks posted since they
# were updated, but not to any pending callback.
# If callback is async, execute the start callback in the calling thread
scheduler = self.immediate if are_async else self.background
self.worker_start_callback = lambda worker: scheduler(Work(
lambda: worker_start_callback(worker), "worker_start_callback:" + worker.name
))
# As the end callback is called *just* before the thread dies,
# there is no problem running it on the thread
self.worker_end_callback = lambda worker: self.immediate(Work(
lambda: worker_end_callback(worker), "worker_end_callback:" + worker.name
)) | ['def', 'set_callbacks', '(', 'self', ',', 'worker_start_callback', ':', 'callable', ',', 'worker_end_callback', ':', 'callable', ',', 'are_async', ':', 'bool', '=', 'False', ')', ':', '# We are setting self.worker_start_callback and self.worker_end_callback', '# to lambdas instead of saving them in private vars and moving the lambda logic', '# to a member function for, among other reasons, making callback updates atomic,', '# ie. once a callback has been posted, it will be executed as it was in that', '# moment, any call to set_callbacks will only affect callbacks posted since they', '# were updated, but not to any pending callback.', '# If callback is async, execute the start callback in the calling thread', 'scheduler', '=', 'self', '.', 'immediate', 'if', 'are_async', 'else', 'self', '.', 'background', 'self', '.', 'worker_start_callback', '=', 'lambda', 'worker', ':', 'scheduler', '(', 'Work', '(', 'lambda', ':', 'worker_start_callback', '(', 'worker', ')', ',', '"worker_start_callback:"', '+', 'worker', '.', 'name', ')', ')', '# As the end callback is called *just* before the thread dies,', '# there is no problem running it on the thread', 'self', '.', 'worker_end_callback', '=', 'lambda', 'worker', ':', 'self', '.', 'immediate', '(', 'Work', '(', 'lambda', ':', 'worker_end_callback', '(', 'worker', ')', ',', '"worker_end_callback:"', '+', 'worker', '.', 'name', ')', ')'] | :param are_async: True if the callbacks execute asynchronously, posting any heavy work to another thread. | [':', 'param', 'are_async', ':', 'True', 'if', 'the', 'callbacks', 'execute', 'asynchronously', 'posting', 'any', 'heavy', 'work', 'to', 'another', 'thread', '.'] | train | https://github.com/alvarogzp/telegram-bot-framework/blob/7b597a415c1901901c677976cb13100fc3083107/bot/multithreading/scheduler.py#L59-L80 |
9,591 | emc-openstack/storops | storops/unity/resource/port.py | UnityIpPort.get_physical_port | def get_physical_port(self):
"""Returns the link aggregation object or the ethernet port object."""
obj = None
if self.is_link_aggregation():
obj = UnityLinkAggregation.get(self._cli, self.get_id())
else:
obj = UnityEthernetPort.get(self._cli, self.get_id())
return obj | python | def get_physical_port(self):
"""Returns the link aggregation object or the ethernet port object."""
obj = None
if self.is_link_aggregation():
obj = UnityLinkAggregation.get(self._cli, self.get_id())
else:
obj = UnityEthernetPort.get(self._cli, self.get_id())
return obj | ['def', 'get_physical_port', '(', 'self', ')', ':', 'obj', '=', 'None', 'if', 'self', '.', 'is_link_aggregation', '(', ')', ':', 'obj', '=', 'UnityLinkAggregation', '.', 'get', '(', 'self', '.', '_cli', ',', 'self', '.', 'get_id', '(', ')', ')', 'else', ':', 'obj', '=', 'UnityEthernetPort', '.', 'get', '(', 'self', '.', '_cli', ',', 'self', '.', 'get_id', '(', ')', ')', 'return', 'obj'] | Returns the link aggregation object or the ethernet port object. | ['Returns', 'the', 'link', 'aggregation', 'object', 'or', 'the', 'ethernet', 'port', 'object', '.'] | train | https://github.com/emc-openstack/storops/blob/24b4b13bf065c0ef0538dd0b5ebb8f25d24176bd/storops/unity/resource/port.py#L51-L58 |
9,592 | mitsei/dlkit | dlkit/handcar/repository/managers.py | RepositoryProxyManager.get_asset_notification_session | def get_asset_notification_session(self, asset_receiver, proxy):
"""Gets the notification session for notifications pertaining to
asset changes.
arg: asset_receiver (osid.repository.AssetReceiver): the
notification callback
arg proxy (osid.proxy.Proxy): a proxy
return: (osid.repository.AssetNotificationSession) - an
AssetNotificationSession
raise: NullArgument - asset_receiver is null
raise: OperationFailed - unable to complete request
raise: Unimplemented - supports_asset_notification() is false
compliance: optional - This method must be implemented if
supports_asset_notification() is true.
"""
if asset_receiver is None:
raise NullArgument()
if not self.supports_asset_notification():
raise Unimplemented()
try:
from . import sessions
except ImportError:
raise # OperationFailed()
proxy = self._convert_proxy(proxy)
try:
session = sessions.AssetNotificationSession(asset_receiver, proxy, runtime=self._runtime)
except AttributeError:
raise # OperationFailed()
return session | python | def get_asset_notification_session(self, asset_receiver, proxy):
"""Gets the notification session for notifications pertaining to
asset changes.
arg: asset_receiver (osid.repository.AssetReceiver): the
notification callback
arg proxy (osid.proxy.Proxy): a proxy
return: (osid.repository.AssetNotificationSession) - an
AssetNotificationSession
raise: NullArgument - asset_receiver is null
raise: OperationFailed - unable to complete request
raise: Unimplemented - supports_asset_notification() is false
compliance: optional - This method must be implemented if
supports_asset_notification() is true.
"""
if asset_receiver is None:
raise NullArgument()
if not self.supports_asset_notification():
raise Unimplemented()
try:
from . import sessions
except ImportError:
raise # OperationFailed()
proxy = self._convert_proxy(proxy)
try:
session = sessions.AssetNotificationSession(asset_receiver, proxy, runtime=self._runtime)
except AttributeError:
raise # OperationFailed()
return session | ['def', 'get_asset_notification_session', '(', 'self', ',', 'asset_receiver', ',', 'proxy', ')', ':', 'if', 'asset_receiver', 'is', 'None', ':', 'raise', 'NullArgument', '(', ')', 'if', 'not', 'self', '.', 'supports_asset_notification', '(', ')', ':', 'raise', 'Unimplemented', '(', ')', 'try', ':', 'from', '.', 'import', 'sessions', 'except', 'ImportError', ':', 'raise', '# OperationFailed()', 'proxy', '=', 'self', '.', '_convert_proxy', '(', 'proxy', ')', 'try', ':', 'session', '=', 'sessions', '.', 'AssetNotificationSession', '(', 'asset_receiver', ',', 'proxy', ',', 'runtime', '=', 'self', '.', '_runtime', ')', 'except', 'AttributeError', ':', 'raise', '# OperationFailed()', 'return', 'session'] | Gets the notification session for notifications pertaining to
asset changes.
arg: asset_receiver (osid.repository.AssetReceiver): the
notification callback
arg proxy (osid.proxy.Proxy): a proxy
return: (osid.repository.AssetNotificationSession) - an
AssetNotificationSession
raise: NullArgument - asset_receiver is null
raise: OperationFailed - unable to complete request
raise: Unimplemented - supports_asset_notification() is false
compliance: optional - This method must be implemented if
supports_asset_notification() is true. | ['Gets', 'the', 'notification', 'session', 'for', 'notifications', 'pertaining', 'to', 'asset', 'changes', '.'] | train | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/handcar/repository/managers.py#L2156-L2185 |
9,593 | 4Catalyzer/flask-resty | flask_resty/api.py | Api.add_resource | def add_resource(
self,
base_rule,
base_view,
alternate_view=None,
alternate_rule=None,
id_rule=None,
app=None,
):
"""Add route or routes for a resource.
:param str base_rule: The URL rule for the resource. This will be
prefixed by the API prefix.
:param base_view: Class-based view for the resource.
:param alternate_view: If specified, an alternate class-based view for
the resource. Usually, this will be a detail view, when the base
view is a list view.
:param alternate_rule: If specified, the URL rule for the alternate
view. This will be prefixed by the API prefix. This is mutually
exclusive with id_rule, and must not be specified if alternate_view
is not specified.
:type alternate_rule: str or None
:param id_rule: If specified, a suffix to append to base_rule to get
the alternate view URL rule. If alternate_view is specified, and
alternate_rule is not, then this defaults to '<id>'. This is
mutually exclusive with alternate_rule, and must not be specified
if alternate_view is not specified.
:type id_rule: str or None
:param app: If specified, the application to which to add the route(s).
Otherwise, this will be the bound application, if present.
"""
if alternate_view:
if not alternate_rule:
id_rule = id_rule or DEFAULT_ID_RULE
alternate_rule = posixpath.join(base_rule, id_rule)
else:
assert id_rule is None
else:
assert alternate_rule is None
assert id_rule is None
app = self._get_app(app)
endpoint = self._get_endpoint(base_view, alternate_view)
# Store the view rules for reference. Doesn't support multiple routes
# mapped to same view.
views = app.extensions['resty'].views
base_rule_full = '{}{}'.format(self.prefix, base_rule)
base_view_func = base_view.as_view(endpoint)
if not alternate_view:
app.add_url_rule(base_rule_full, view_func=base_view_func)
views[base_view] = Resource(base_view, base_rule_full)
return
alternate_rule_full = '{}{}'.format(self.prefix, alternate_rule)
alternate_view_func = alternate_view.as_view(endpoint)
@functools.wraps(base_view_func)
def view_func(*args, **kwargs):
if flask.request.url_rule.rule == base_rule_full:
return base_view_func(*args, **kwargs)
else:
return alternate_view_func(*args, **kwargs)
app.add_url_rule(
base_rule_full, view_func=view_func, endpoint=endpoint,
methods=base_view.methods,
)
app.add_url_rule(
alternate_rule_full, view_func=view_func, endpoint=endpoint,
methods=alternate_view.methods,
)
views[base_view] = Resource(base_view, base_rule_full)
views[alternate_view] = Resource(alternate_view, alternate_rule_full) | python | def add_resource(
self,
base_rule,
base_view,
alternate_view=None,
alternate_rule=None,
id_rule=None,
app=None,
):
"""Add route or routes for a resource.
:param str base_rule: The URL rule for the resource. This will be
prefixed by the API prefix.
:param base_view: Class-based view for the resource.
:param alternate_view: If specified, an alternate class-based view for
the resource. Usually, this will be a detail view, when the base
view is a list view.
:param alternate_rule: If specified, the URL rule for the alternate
view. This will be prefixed by the API prefix. This is mutually
exclusive with id_rule, and must not be specified if alternate_view
is not specified.
:type alternate_rule: str or None
:param id_rule: If specified, a suffix to append to base_rule to get
the alternate view URL rule. If alternate_view is specified, and
alternate_rule is not, then this defaults to '<id>'. This is
mutually exclusive with alternate_rule, and must not be specified
if alternate_view is not specified.
:type id_rule: str or None
:param app: If specified, the application to which to add the route(s).
Otherwise, this will be the bound application, if present.
"""
if alternate_view:
if not alternate_rule:
id_rule = id_rule or DEFAULT_ID_RULE
alternate_rule = posixpath.join(base_rule, id_rule)
else:
assert id_rule is None
else:
assert alternate_rule is None
assert id_rule is None
app = self._get_app(app)
endpoint = self._get_endpoint(base_view, alternate_view)
# Store the view rules for reference. Doesn't support multiple routes
# mapped to same view.
views = app.extensions['resty'].views
base_rule_full = '{}{}'.format(self.prefix, base_rule)
base_view_func = base_view.as_view(endpoint)
if not alternate_view:
app.add_url_rule(base_rule_full, view_func=base_view_func)
views[base_view] = Resource(base_view, base_rule_full)
return
alternate_rule_full = '{}{}'.format(self.prefix, alternate_rule)
alternate_view_func = alternate_view.as_view(endpoint)
@functools.wraps(base_view_func)
def view_func(*args, **kwargs):
if flask.request.url_rule.rule == base_rule_full:
return base_view_func(*args, **kwargs)
else:
return alternate_view_func(*args, **kwargs)
app.add_url_rule(
base_rule_full, view_func=view_func, endpoint=endpoint,
methods=base_view.methods,
)
app.add_url_rule(
alternate_rule_full, view_func=view_func, endpoint=endpoint,
methods=alternate_view.methods,
)
views[base_view] = Resource(base_view, base_rule_full)
views[alternate_view] = Resource(alternate_view, alternate_rule_full) | ['def', 'add_resource', '(', 'self', ',', 'base_rule', ',', 'base_view', ',', 'alternate_view', '=', 'None', ',', 'alternate_rule', '=', 'None', ',', 'id_rule', '=', 'None', ',', 'app', '=', 'None', ',', ')', ':', 'if', 'alternate_view', ':', 'if', 'not', 'alternate_rule', ':', 'id_rule', '=', 'id_rule', 'or', 'DEFAULT_ID_RULE', 'alternate_rule', '=', 'posixpath', '.', 'join', '(', 'base_rule', ',', 'id_rule', ')', 'else', ':', 'assert', 'id_rule', 'is', 'None', 'else', ':', 'assert', 'alternate_rule', 'is', 'None', 'assert', 'id_rule', 'is', 'None', 'app', '=', 'self', '.', '_get_app', '(', 'app', ')', 'endpoint', '=', 'self', '.', '_get_endpoint', '(', 'base_view', ',', 'alternate_view', ')', "# Store the view rules for reference. Doesn't support multiple routes", '# mapped to same view.', 'views', '=', 'app', '.', 'extensions', '[', "'resty'", ']', '.', 'views', 'base_rule_full', '=', "'{}{}'", '.', 'format', '(', 'self', '.', 'prefix', ',', 'base_rule', ')', 'base_view_func', '=', 'base_view', '.', 'as_view', '(', 'endpoint', ')', 'if', 'not', 'alternate_view', ':', 'app', '.', 'add_url_rule', '(', 'base_rule_full', ',', 'view_func', '=', 'base_view_func', ')', 'views', '[', 'base_view', ']', '=', 'Resource', '(', 'base_view', ',', 'base_rule_full', ')', 'return', 'alternate_rule_full', '=', "'{}{}'", '.', 'format', '(', 'self', '.', 'prefix', ',', 'alternate_rule', ')', 'alternate_view_func', '=', 'alternate_view', '.', 'as_view', '(', 'endpoint', ')', '@', 'functools', '.', 'wraps', '(', 'base_view_func', ')', 'def', 'view_func', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'if', 'flask', '.', 'request', '.', 'url_rule', '.', 'rule', '==', 'base_rule_full', ':', 'return', 'base_view_func', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', 'else', ':', 'return', 'alternate_view_func', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', 'app', '.', 'add_url_rule', '(', 'base_rule_full', ',', 'view_func', '=', 'view_func', ',', 'endpoint', '=', 'endpoint', ',', 'methods', '=', 'base_view', '.', 'methods', ',', ')', 'app', '.', 'add_url_rule', '(', 'alternate_rule_full', ',', 'view_func', '=', 'view_func', ',', 'endpoint', '=', 'endpoint', ',', 'methods', '=', 'alternate_view', '.', 'methods', ',', ')', 'views', '[', 'base_view', ']', '=', 'Resource', '(', 'base_view', ',', 'base_rule_full', ')', 'views', '[', 'alternate_view', ']', '=', 'Resource', '(', 'alternate_view', ',', 'alternate_rule_full', ')'] | Add route or routes for a resource.
:param str base_rule: The URL rule for the resource. This will be
prefixed by the API prefix.
:param base_view: Class-based view for the resource.
:param alternate_view: If specified, an alternate class-based view for
the resource. Usually, this will be a detail view, when the base
view is a list view.
:param alternate_rule: If specified, the URL rule for the alternate
view. This will be prefixed by the API prefix. This is mutually
exclusive with id_rule, and must not be specified if alternate_view
is not specified.
:type alternate_rule: str or None
:param id_rule: If specified, a suffix to append to base_rule to get
the alternate view URL rule. If alternate_view is specified, and
alternate_rule is not, then this defaults to '<id>'. This is
mutually exclusive with alternate_rule, and must not be specified
if alternate_view is not specified.
:type id_rule: str or None
:param app: If specified, the application to which to add the route(s).
Otherwise, this will be the bound application, if present. | ['Add', 'route', 'or', 'routes', 'for', 'a', 'resource', '.'] | train | https://github.com/4Catalyzer/flask-resty/blob/a8b6502a799c270ca9ce41c6d8b7297713942097/flask_resty/api.py#L61-L137 |
9,594 | estnltk/estnltk | estnltk/mw_verbs/basic_verbchain_detection.py | _isVerbExpansible | def _isVerbExpansible( verbObj, clauseTokens, clauseID ):
'''
Kontrollib, kas tavaline verb on laiendatav etteantud osalauses:
*) verbi kontekstis (osalauses) on veel teisi verbe;
*) verb kuulub etteantud osalausesse;
*) tegemist ei ole olema-verbiga (neid vaatame mujal eraldi);
*) tegemist pole maks|mas|mast|mata-verbiga;
*) tegemist pole verbiahelaga, mille l6pus on ja/ning/ega/v6i-fraas;
Tagastab True, kui k6ik tingimused t2idetud;
'''
global _verbInfNonExpansible
# Leiame, kas fraas kuulub antud osalausesse ning on laiendatav
if verbObj[OTHER_VERBS] and verbObj[CLAUSE_IDX] == clauseID and \
re.match('^(verb)$', verbObj[PATTERN][-1], re.I):
# Leiame viimasele s6nale vastava token'i
lastToken = [token for token in clauseTokens if token[WORD_ID] == verbObj[PHRASE][-1]]
if not lastToken:
raise Exception(' Last token not found for '+str(verbObj)+' in '+str( getJsonAsTextString(clauseTokens) ))
lastToken = lastToken[0]
# Leiame, ega tegu pole maks/mas/mast/mata verbidega (neid esialgu ei laienda edasi)
# NB! Tegelikult peaks v2hemalt -mas verbe saama siiski laiendada:
# Ma ei_0 käinud_0 teda palumas_0 ümber otsustada_0 .
# Aga kuidas seda teha v6imalikult v2heste vigadega, vajab edasist uurimist ...
if not _verbInfNonExpansible.matches(lastToken):
# Kontrollime, et fraasi l6pus poleks ja/ning/ega/v6i fraasi:
# kui on, siis esialgu targu seda fraasi laiendama ei hakka:
if len(verbObj[PATTERN]) >=3 and verbObj[PATTERN][-2] == '&':
return False
return True
#
# TODO: siin tuleks ilmselt keelata ka 'saama + Verb_tud' konstruktsioonide laiendused,
# kuna need kipuvad olema pigem vigased (kuigi haruldased); Nt.
#
# ringi hääletades sai_0 rongidega jänest sõita_0 ja vagunisaatjatest neidudega öösiti napsu võetud_0 .
#
return False | python | def _isVerbExpansible( verbObj, clauseTokens, clauseID ):
'''
Kontrollib, kas tavaline verb on laiendatav etteantud osalauses:
*) verbi kontekstis (osalauses) on veel teisi verbe;
*) verb kuulub etteantud osalausesse;
*) tegemist ei ole olema-verbiga (neid vaatame mujal eraldi);
*) tegemist pole maks|mas|mast|mata-verbiga;
*) tegemist pole verbiahelaga, mille l6pus on ja/ning/ega/v6i-fraas;
Tagastab True, kui k6ik tingimused t2idetud;
'''
global _verbInfNonExpansible
# Leiame, kas fraas kuulub antud osalausesse ning on laiendatav
if verbObj[OTHER_VERBS] and verbObj[CLAUSE_IDX] == clauseID and \
re.match('^(verb)$', verbObj[PATTERN][-1], re.I):
# Leiame viimasele s6nale vastava token'i
lastToken = [token for token in clauseTokens if token[WORD_ID] == verbObj[PHRASE][-1]]
if not lastToken:
raise Exception(' Last token not found for '+str(verbObj)+' in '+str( getJsonAsTextString(clauseTokens) ))
lastToken = lastToken[0]
# Leiame, ega tegu pole maks/mas/mast/mata verbidega (neid esialgu ei laienda edasi)
# NB! Tegelikult peaks v2hemalt -mas verbe saama siiski laiendada:
# Ma ei_0 käinud_0 teda palumas_0 ümber otsustada_0 .
# Aga kuidas seda teha v6imalikult v2heste vigadega, vajab edasist uurimist ...
if not _verbInfNonExpansible.matches(lastToken):
# Kontrollime, et fraasi l6pus poleks ja/ning/ega/v6i fraasi:
# kui on, siis esialgu targu seda fraasi laiendama ei hakka:
if len(verbObj[PATTERN]) >=3 and verbObj[PATTERN][-2] == '&':
return False
return True
#
# TODO: siin tuleks ilmselt keelata ka 'saama + Verb_tud' konstruktsioonide laiendused,
# kuna need kipuvad olema pigem vigased (kuigi haruldased); Nt.
#
# ringi hääletades sai_0 rongidega jänest sõita_0 ja vagunisaatjatest neidudega öösiti napsu võetud_0 .
#
return False | ['def', '_isVerbExpansible', '(', 'verbObj', ',', 'clauseTokens', ',', 'clauseID', ')', ':', 'global', '_verbInfNonExpansible', '# Leiame, kas fraas kuulub antud osalausesse ning on laiendatav\r', 'if', 'verbObj', '[', 'OTHER_VERBS', ']', 'and', 'verbObj', '[', 'CLAUSE_IDX', ']', '==', 'clauseID', 'and', 're', '.', 'match', '(', "'^(verb)$'", ',', 'verbObj', '[', 'PATTERN', ']', '[', '-', '1', ']', ',', 're', '.', 'I', ')', ':', "# Leiame viimasele s6nale vastava token'i\r", 'lastToken', '=', '[', 'token', 'for', 'token', 'in', 'clauseTokens', 'if', 'token', '[', 'WORD_ID', ']', '==', 'verbObj', '[', 'PHRASE', ']', '[', '-', '1', ']', ']', 'if', 'not', 'lastToken', ':', 'raise', 'Exception', '(', "' Last token not found for '", '+', 'str', '(', 'verbObj', ')', '+', "' in '", '+', 'str', '(', 'getJsonAsTextString', '(', 'clauseTokens', ')', ')', ')', 'lastToken', '=', 'lastToken', '[', '0', ']', '# Leiame, ega tegu pole maks/mas/mast/mata verbidega (neid esialgu ei laienda edasi)\r', '# NB! Tegelikult peaks v2hemalt -mas verbe saama siiski laiendada:\r', '# Ma ei_0 käinud_0 teda palumas_0 ümber otsustada_0 .\r', '# Aga kuidas seda teha v6imalikult v2heste vigadega, vajab edasist uurimist ...\r', 'if', 'not', '_verbInfNonExpansible', '.', 'matches', '(', 'lastToken', ')', ':', '# Kontrollime, et fraasi l6pus poleks ja/ning/ega/v6i fraasi:\r', '# kui on, siis esialgu targu seda fraasi laiendama ei hakka:\r', 'if', 'len', '(', 'verbObj', '[', 'PATTERN', ']', ')', '>=', '3', 'and', 'verbObj', '[', 'PATTERN', ']', '[', '-', '2', ']', '==', "'&'", ':', 'return', 'False', 'return', 'True', '#\r', "# TODO: siin tuleks ilmselt keelata ka 'saama + Verb_tud' konstruktsioonide laiendused,\r", '# kuna need kipuvad olema pigem vigased (kuigi haruldased); Nt.\r', '#\r', '# ringi hääletades sai_0 rongidega jänest sõita_0 ja vagunisaatjatest neidudega öösiti napsu võetud_0 .\r', '#\r', 'return', 'False'] | Kontrollib, kas tavaline verb on laiendatav etteantud osalauses:
*) verbi kontekstis (osalauses) on veel teisi verbe;
*) verb kuulub etteantud osalausesse;
*) tegemist ei ole olema-verbiga (neid vaatame mujal eraldi);
*) tegemist pole maks|mas|mast|mata-verbiga;
*) tegemist pole verbiahelaga, mille l6pus on ja/ning/ega/v6i-fraas;
Tagastab True, kui k6ik tingimused t2idetud; | ['Kontrollib', 'kas', 'tavaline', 'verb', 'on', 'laiendatav', 'etteantud', 'osalauses', ':', '*', ')', 'verbi', 'kontekstis', '(', 'osalauses', ')', 'on', 'veel', 'teisi', 'verbe', ';', '*', ')', 'verb', 'kuulub', 'etteantud', 'osalausesse', ';', '*', ')', 'tegemist', 'ei', 'ole', 'olema', '-', 'verbiga', '(', 'neid', 'vaatame', 'mujal', 'eraldi', ')', ';', '*', ')', 'tegemist', 'pole', 'maks|mas|mast|mata', '-', 'verbiga', ';', '*', ')', 'tegemist', 'pole', 'verbiahelaga', 'mille', 'l6pus', 'on', 'ja', '/', 'ning', '/', 'ega', '/', 'v6i', '-', 'fraas', ';', 'Tagastab', 'True', 'kui', 'k6ik', 'tingimused', 't2idetud', ';'] | train | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/mw_verbs/basic_verbchain_detection.py#L1015-L1050 |
9,595 | titusjan/argos | argos/inspector/debug.py | DebugInspector._createConfig | def _createConfig(self):
""" Creates a config tree item (CTI) hierarchy containing default children.
"""
rootItem = MainGroupCti('debug inspector')
if DEBUGGING:
# Some test config items.
import numpy as np
from argos.config.untypedcti import UntypedCti
from argos.config.stringcti import StringCti
from argos.config.intcti import IntCti
from argos.config.floatcti import FloatCti, SnFloatCti
from argos.config.boolcti import BoolCti, BoolGroupCti
from argos.config.choicecti import ChoiceCti
from argos.config.qtctis import PenCti
grpItem = GroupCti("group")
rootItem.insertChild(grpItem)
lcItem = UntypedCti('line color', 123)
grpItem.insertChild(lcItem)
disabledItem = rootItem.insertChild(StringCti('disabled', "Can't touch me"))
disabledItem.enabled=False
grpItem.insertChild(IntCti('line-1 color', 7, minValue = -5, stepSize=2,
prefix="@", suffix="%", specialValueText="I'm special"))
rootItem.insertChild(StringCti('letter', 'aa', maxLength = 1))
grpItem.insertChild(FloatCti('width', 2, minValue =5, stepSize=0.45, decimals=3,
prefix="@", suffix="%", specialValueText="so very special"))
grpItem.insertChild(SnFloatCti('scientific', defaultData=-np.inf))
gridItem = rootItem.insertChild(BoolGroupCti('grid', True))
gridItem.insertChild(BoolCti('X-Axis', True))
gridItem.insertChild(BoolCti('Y-Axis', False))
rootItem.insertChild(ChoiceCti('hobbit', 2, editable=True,
configValues=['Frodo', 'Sam', 'Pippin', 'Merry']))
myPen = QtGui.QPen(QtGui.QColor('#1C8857'))
myPen.setWidth(2)
myPen.setStyle(Qt.DashDotDotLine)
rootItem.insertChild(PenCti('line', False, resetTo=myPen))
return rootItem | python | def _createConfig(self):
""" Creates a config tree item (CTI) hierarchy containing default children.
"""
rootItem = MainGroupCti('debug inspector')
if DEBUGGING:
# Some test config items.
import numpy as np
from argos.config.untypedcti import UntypedCti
from argos.config.stringcti import StringCti
from argos.config.intcti import IntCti
from argos.config.floatcti import FloatCti, SnFloatCti
from argos.config.boolcti import BoolCti, BoolGroupCti
from argos.config.choicecti import ChoiceCti
from argos.config.qtctis import PenCti
grpItem = GroupCti("group")
rootItem.insertChild(grpItem)
lcItem = UntypedCti('line color', 123)
grpItem.insertChild(lcItem)
disabledItem = rootItem.insertChild(StringCti('disabled', "Can't touch me"))
disabledItem.enabled=False
grpItem.insertChild(IntCti('line-1 color', 7, minValue = -5, stepSize=2,
prefix="@", suffix="%", specialValueText="I'm special"))
rootItem.insertChild(StringCti('letter', 'aa', maxLength = 1))
grpItem.insertChild(FloatCti('width', 2, minValue =5, stepSize=0.45, decimals=3,
prefix="@", suffix="%", specialValueText="so very special"))
grpItem.insertChild(SnFloatCti('scientific', defaultData=-np.inf))
gridItem = rootItem.insertChild(BoolGroupCti('grid', True))
gridItem.insertChild(BoolCti('X-Axis', True))
gridItem.insertChild(BoolCti('Y-Axis', False))
rootItem.insertChild(ChoiceCti('hobbit', 2, editable=True,
configValues=['Frodo', 'Sam', 'Pippin', 'Merry']))
myPen = QtGui.QPen(QtGui.QColor('#1C8857'))
myPen.setWidth(2)
myPen.setStyle(Qt.DashDotDotLine)
rootItem.insertChild(PenCti('line', False, resetTo=myPen))
return rootItem | ['def', '_createConfig', '(', 'self', ')', ':', 'rootItem', '=', 'MainGroupCti', '(', "'debug inspector'", ')', 'if', 'DEBUGGING', ':', '# Some test config items.', 'import', 'numpy', 'as', 'np', 'from', 'argos', '.', 'config', '.', 'untypedcti', 'import', 'UntypedCti', 'from', 'argos', '.', 'config', '.', 'stringcti', 'import', 'StringCti', 'from', 'argos', '.', 'config', '.', 'intcti', 'import', 'IntCti', 'from', 'argos', '.', 'config', '.', 'floatcti', 'import', 'FloatCti', ',', 'SnFloatCti', 'from', 'argos', '.', 'config', '.', 'boolcti', 'import', 'BoolCti', ',', 'BoolGroupCti', 'from', 'argos', '.', 'config', '.', 'choicecti', 'import', 'ChoiceCti', 'from', 'argos', '.', 'config', '.', 'qtctis', 'import', 'PenCti', 'grpItem', '=', 'GroupCti', '(', '"group"', ')', 'rootItem', '.', 'insertChild', '(', 'grpItem', ')', 'lcItem', '=', 'UntypedCti', '(', "'line color'", ',', '123', ')', 'grpItem', '.', 'insertChild', '(', 'lcItem', ')', 'disabledItem', '=', 'rootItem', '.', 'insertChild', '(', 'StringCti', '(', "'disabled'", ',', '"Can\'t touch me"', ')', ')', 'disabledItem', '.', 'enabled', '=', 'False', 'grpItem', '.', 'insertChild', '(', 'IntCti', '(', "'line-1 color'", ',', '7', ',', 'minValue', '=', '-', '5', ',', 'stepSize', '=', '2', ',', 'prefix', '=', '"@"', ',', 'suffix', '=', '"%"', ',', 'specialValueText', '=', '"I\'m special"', ')', ')', 'rootItem', '.', 'insertChild', '(', 'StringCti', '(', "'letter'", ',', "'aa'", ',', 'maxLength', '=', '1', ')', ')', 'grpItem', '.', 'insertChild', '(', 'FloatCti', '(', "'width'", ',', '2', ',', 'minValue', '=', '5', ',', 'stepSize', '=', '0.45', ',', 'decimals', '=', '3', ',', 'prefix', '=', '"@"', ',', 'suffix', '=', '"%"', ',', 'specialValueText', '=', '"so very special"', ')', ')', 'grpItem', '.', 'insertChild', '(', 'SnFloatCti', '(', "'scientific'", ',', 'defaultData', '=', '-', 'np', '.', 'inf', ')', ')', 'gridItem', '=', 'rootItem', '.', 'insertChild', '(', 'BoolGroupCti', '(', "'grid'", ',', 'True', ')', ')', 'gridItem', '.', 'insertChild', '(', 'BoolCti', '(', "'X-Axis'", ',', 'True', ')', ')', 'gridItem', '.', 'insertChild', '(', 'BoolCti', '(', "'Y-Axis'", ',', 'False', ')', ')', 'rootItem', '.', 'insertChild', '(', 'ChoiceCti', '(', "'hobbit'", ',', '2', ',', 'editable', '=', 'True', ',', 'configValues', '=', '[', "'Frodo'", ',', "'Sam'", ',', "'Pippin'", ',', "'Merry'", ']', ')', ')', 'myPen', '=', 'QtGui', '.', 'QPen', '(', 'QtGui', '.', 'QColor', '(', "'#1C8857'", ')', ')', 'myPen', '.', 'setWidth', '(', '2', ')', 'myPen', '.', 'setStyle', '(', 'Qt', '.', 'DashDotDotLine', ')', 'rootItem', '.', 'insertChild', '(', 'PenCti', '(', "'line'", ',', 'False', ',', 'resetTo', '=', 'myPen', ')', ')', 'return', 'rootItem'] | Creates a config tree item (CTI) hierarchy containing default children. | ['Creates', 'a', 'config', 'tree', 'item', '(', 'CTI', ')', 'hierarchy', 'containing', 'default', 'children', '.'] | train | https://github.com/titusjan/argos/blob/20d0a3cae26c36ea789a5d219c02ca7df21279dd/argos/inspector/debug.py#L52-L95 |
9,596 | BerkeleyAutomation/autolab_core | autolab_core/tensor_dataset.py | TensorDataset.generate_tensor_filename | def generate_tensor_filename(self, field_name, file_num, compressed=True):
""" Generate a filename for a tensor. """
file_ext = TENSOR_EXT
if compressed:
file_ext = COMPRESSED_TENSOR_EXT
filename = os.path.join(self.filename, 'tensors', '%s_%05d%s' %(field_name, file_num, file_ext))
return filename | python | def generate_tensor_filename(self, field_name, file_num, compressed=True):
""" Generate a filename for a tensor. """
file_ext = TENSOR_EXT
if compressed:
file_ext = COMPRESSED_TENSOR_EXT
filename = os.path.join(self.filename, 'tensors', '%s_%05d%s' %(field_name, file_num, file_ext))
return filename | ['def', 'generate_tensor_filename', '(', 'self', ',', 'field_name', ',', 'file_num', ',', 'compressed', '=', 'True', ')', ':', 'file_ext', '=', 'TENSOR_EXT', 'if', 'compressed', ':', 'file_ext', '=', 'COMPRESSED_TENSOR_EXT', 'filename', '=', 'os', '.', 'path', '.', 'join', '(', 'self', '.', 'filename', ',', "'tensors'", ',', "'%s_%05d%s'", '%', '(', 'field_name', ',', 'file_num', ',', 'file_ext', ')', ')', 'return', 'filename'] | Generate a filename for a tensor. | ['Generate', 'a', 'filename', 'for', 'a', 'tensor', '.'] | train | https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/tensor_dataset.py#L427-L433 |
9,597 | PMBio/limix-backup | limix/stats/chi2mixture.py | Chi2mixture.estimate_chi2mixture | def estimate_chi2mixture(self, lrt):
"""
estimates the parameters of a mixture of a chi-squared random variable of degree
0 and a scaled chi-squared random variable of degree d
(1-mixture)*chi2(0) + (mixture)*scale*chi2(dof),
where
scale is the scaling parameter for the scales chi-square distribution
dof are the degrees of freedom of the second component
mixture is the probability of beeing in the first component
input:
lrt [Ntests] vector of test statistics
"""
"""
step 1: estimate the probability of being in component one
"""
self.mixture = 1-(lrt<=self.tol).mean()
n_false = SP.sum(lrt>self.tol)
"""
step 2: only use the largest qmax fraction of test statistics to estimate the
remaining parameters
"""
n_fitting = SP.ceil(self.qmax * n_false)
lrt_sorted = -SP.sort(-lrt)[:n_fitting]
q = SP.linspace(0, 1,n_false)[1:n_fitting+1]
log_q = SP.log10(q)
"""
step 3: fitting scale and dof by minimizing the squared error of the log10 p-values
with their theorietical values [uniform distribution]
"""
MSE_opt = SP.inf
MSE = SP.zeros((self.n_intervals,self.n_intervals))
for i,scale in enumerate(SP.linspace(self.scale_min,self.scale_max,self.n_intervals)):
for j,dof in enumerate(SP.linspace(self.dof_min,self.dof_max,self.n_intervals)):
p = STATS.chi2.sf(lrt_sorted/scale,dof)
log_p = SP.log10(p)
MSE[i,j] = SP.mean((log_q - log_p)**2)
if MSE[i,j] < MSE_opt:
MSE_opt = MSE[i,j]
self.scale = scale
self.dof = dof | python | def estimate_chi2mixture(self, lrt):
"""
estimates the parameters of a mixture of a chi-squared random variable of degree
0 and a scaled chi-squared random variable of degree d
(1-mixture)*chi2(0) + (mixture)*scale*chi2(dof),
where
scale is the scaling parameter for the scales chi-square distribution
dof are the degrees of freedom of the second component
mixture is the probability of beeing in the first component
input:
lrt [Ntests] vector of test statistics
"""
"""
step 1: estimate the probability of being in component one
"""
self.mixture = 1-(lrt<=self.tol).mean()
n_false = SP.sum(lrt>self.tol)
"""
step 2: only use the largest qmax fraction of test statistics to estimate the
remaining parameters
"""
n_fitting = SP.ceil(self.qmax * n_false)
lrt_sorted = -SP.sort(-lrt)[:n_fitting]
q = SP.linspace(0, 1,n_false)[1:n_fitting+1]
log_q = SP.log10(q)
"""
step 3: fitting scale and dof by minimizing the squared error of the log10 p-values
with their theorietical values [uniform distribution]
"""
MSE_opt = SP.inf
MSE = SP.zeros((self.n_intervals,self.n_intervals))
for i,scale in enumerate(SP.linspace(self.scale_min,self.scale_max,self.n_intervals)):
for j,dof in enumerate(SP.linspace(self.dof_min,self.dof_max,self.n_intervals)):
p = STATS.chi2.sf(lrt_sorted/scale,dof)
log_p = SP.log10(p)
MSE[i,j] = SP.mean((log_q - log_p)**2)
if MSE[i,j] < MSE_opt:
MSE_opt = MSE[i,j]
self.scale = scale
self.dof = dof | ['def', 'estimate_chi2mixture', '(', 'self', ',', 'lrt', ')', ':', '"""\n step 1: estimate the probability of being in component one\n """', 'self', '.', 'mixture', '=', '1', '-', '(', 'lrt', '<=', 'self', '.', 'tol', ')', '.', 'mean', '(', ')', 'n_false', '=', 'SP', '.', 'sum', '(', 'lrt', '>', 'self', '.', 'tol', ')', '"""\n step 2: only use the largest qmax fraction of test statistics to estimate the\n remaining parameters\n """', 'n_fitting', '=', 'SP', '.', 'ceil', '(', 'self', '.', 'qmax', '*', 'n_false', ')', 'lrt_sorted', '=', '-', 'SP', '.', 'sort', '(', '-', 'lrt', ')', '[', ':', 'n_fitting', ']', 'q', '=', 'SP', '.', 'linspace', '(', '0', ',', '1', ',', 'n_false', ')', '[', '1', ':', 'n_fitting', '+', '1', ']', 'log_q', '=', 'SP', '.', 'log10', '(', 'q', ')', '"""\n step 3: fitting scale and dof by minimizing the squared error of the log10 p-values\n with their theorietical values [uniform distribution]\n """', 'MSE_opt', '=', 'SP', '.', 'inf', 'MSE', '=', 'SP', '.', 'zeros', '(', '(', 'self', '.', 'n_intervals', ',', 'self', '.', 'n_intervals', ')', ')', 'for', 'i', ',', 'scale', 'in', 'enumerate', '(', 'SP', '.', 'linspace', '(', 'self', '.', 'scale_min', ',', 'self', '.', 'scale_max', ',', 'self', '.', 'n_intervals', ')', ')', ':', 'for', 'j', ',', 'dof', 'in', 'enumerate', '(', 'SP', '.', 'linspace', '(', 'self', '.', 'dof_min', ',', 'self', '.', 'dof_max', ',', 'self', '.', 'n_intervals', ')', ')', ':', 'p', '=', 'STATS', '.', 'chi2', '.', 'sf', '(', 'lrt_sorted', '/', 'scale', ',', 'dof', ')', 'log_p', '=', 'SP', '.', 'log10', '(', 'p', ')', 'MSE', '[', 'i', ',', 'j', ']', '=', 'SP', '.', 'mean', '(', '(', 'log_q', '-', 'log_p', ')', '**', '2', ')', 'if', 'MSE', '[', 'i', ',', 'j', ']', '<', 'MSE_opt', ':', 'MSE_opt', '=', 'MSE', '[', 'i', ',', 'j', ']', 'self', '.', 'scale', '=', 'scale', 'self', '.', 'dof', '=', 'dof'] | estimates the parameters of a mixture of a chi-squared random variable of degree
0 and a scaled chi-squared random variable of degree d
(1-mixture)*chi2(0) + (mixture)*scale*chi2(dof),
where
scale is the scaling parameter for the scales chi-square distribution
dof are the degrees of freedom of the second component
mixture is the probability of beeing in the first component
input:
lrt [Ntests] vector of test statistics | ['estimates', 'the', 'parameters', 'of', 'a', 'mixture', 'of', 'a', 'chi', '-', 'squared', 'random', 'variable', 'of', 'degree', '0', 'and', 'a', 'scaled', 'chi', '-', 'squared', 'random', 'variable', 'of', 'degree', 'd', '(', '1', '-', 'mixture', ')', '*', 'chi2', '(', '0', ')', '+', '(', 'mixture', ')', '*', 'scale', '*', 'chi2', '(', 'dof', ')'] | train | https://github.com/PMBio/limix-backup/blob/1e201fdb5c694d0d5506f207f3de65d8ef66146c/limix/stats/chi2mixture.py#L36-L83 |
9,598 | blockstack/virtualchain | virtualchain/lib/blockchain/bitcoin_blockchain/blocks.py | BlockchainDownloader.begin | def begin(self):
"""
This method will implement the handshake of the
Bitcoin protocol. It will send the Version message,
and block until it receives a VerAck.
Once we receive the version, we'll send the verack,
and begin downloading.
"""
log.debug("handshake (version %s)" % PROTOCOL_VERSION)
version = Version()
version.services = 0 # can't send blocks
log.debug("send Version")
self.send_message(version) | python | def begin(self):
"""
This method will implement the handshake of the
Bitcoin protocol. It will send the Version message,
and block until it receives a VerAck.
Once we receive the version, we'll send the verack,
and begin downloading.
"""
log.debug("handshake (version %s)" % PROTOCOL_VERSION)
version = Version()
version.services = 0 # can't send blocks
log.debug("send Version")
self.send_message(version) | ['def', 'begin', '(', 'self', ')', ':', 'log', '.', 'debug', '(', '"handshake (version %s)"', '%', 'PROTOCOL_VERSION', ')', 'version', '=', 'Version', '(', ')', 'version', '.', 'services', '=', '0', "# can't send blocks", 'log', '.', 'debug', '(', '"send Version"', ')', 'self', '.', 'send_message', '(', 'version', ')'] | This method will implement the handshake of the
Bitcoin protocol. It will send the Version message,
and block until it receives a VerAck.
Once we receive the version, we'll send the verack,
and begin downloading. | ['This', 'method', 'will', 'implement', 'the', 'handshake', 'of', 'the', 'Bitcoin', 'protocol', '.', 'It', 'will', 'send', 'the', 'Version', 'message', 'and', 'block', 'until', 'it', 'receives', 'a', 'VerAck', '.', 'Once', 'we', 'receive', 'the', 'version', 'we', 'll', 'send', 'the', 'verack', 'and', 'begin', 'downloading', '.'] | train | https://github.com/blockstack/virtualchain/blob/fcfc970064ca7dfcab26ebd3ab955870a763ea39/virtualchain/lib/blockchain/bitcoin_blockchain/blocks.py#L305-L317 |
9,599 | edx/XBlock | xblock/runtime.py | Runtime.load_block_type | def load_block_type(self, block_type):
"""
Returns a subclass of :class:`.XBlock` that corresponds to the specified `block_type`.
"""
return XBlock.load_class(block_type, self.default_class, self.select) | python | def load_block_type(self, block_type):
"""
Returns a subclass of :class:`.XBlock` that corresponds to the specified `block_type`.
"""
return XBlock.load_class(block_type, self.default_class, self.select) | ['def', 'load_block_type', '(', 'self', ',', 'block_type', ')', ':', 'return', 'XBlock', '.', 'load_class', '(', 'block_type', ',', 'self', '.', 'default_class', ',', 'self', '.', 'select', ')'] | Returns a subclass of :class:`.XBlock` that corresponds to the specified `block_type`. | ['Returns', 'a', 'subclass', 'of', ':', 'class', ':', '.', 'XBlock', 'that', 'corresponds', 'to', 'the', 'specified', 'block_type', '.'] | train | https://github.com/edx/XBlock/blob/368bf46e2c0ee69bbb21817f428c4684936e18ee/xblock/runtime.py#L602-L606 |
Subsets and Splits