Unnamed: 0
int64 0
10k
| repository_name
stringlengths 7
54
| func_path_in_repository
stringlengths 5
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 100
30.3k
| language
stringclasses 1
value | func_code_string
stringlengths 100
30.3k
| func_code_tokens
stringlengths 138
33.2k
| func_documentation_string
stringlengths 1
15k
| func_documentation_tokens
stringlengths 5
5.14k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
|
---|---|---|---|---|---|---|---|---|---|---|---|
900 | Microsoft/nni | src/sdk/pynni/nni/networkmorphism_tuner/bayesian.py | IncrementalGaussianProcess.incremental_fit | def incremental_fit(self, train_x, train_y):
""" Incrementally fit the regressor. """
if not self._first_fitted:
raise ValueError("The first_fit function needs to be called first.")
train_x, train_y = np.array(train_x), np.array(train_y)
# Incrementally compute K
up_right_k = edit_distance_matrix(self._x, train_x)
down_left_k = np.transpose(up_right_k)
down_right_k = edit_distance_matrix(train_x)
up_k = np.concatenate((self._distance_matrix, up_right_k), axis=1)
down_k = np.concatenate((down_left_k, down_right_k), axis=1)
temp_distance_matrix = np.concatenate((up_k, down_k), axis=0)
k_matrix = bourgain_embedding_matrix(temp_distance_matrix)
diagonal = np.diag_indices_from(k_matrix)
diagonal = (diagonal[0][-len(train_x) :], diagonal[1][-len(train_x) :])
k_matrix[diagonal] += self.alpha
try:
self._l_matrix = cholesky(k_matrix, lower=True) # Line 2
except LinAlgError:
return self
self._x = np.concatenate((self._x, train_x), axis=0)
self._y = np.concatenate((self._y, train_y), axis=0)
self._distance_matrix = temp_distance_matrix
self._alpha_vector = cho_solve((self._l_matrix, True), self._y) # Line 3
return self | python | def incremental_fit(self, train_x, train_y):
""" Incrementally fit the regressor. """
if not self._first_fitted:
raise ValueError("The first_fit function needs to be called first.")
train_x, train_y = np.array(train_x), np.array(train_y)
# Incrementally compute K
up_right_k = edit_distance_matrix(self._x, train_x)
down_left_k = np.transpose(up_right_k)
down_right_k = edit_distance_matrix(train_x)
up_k = np.concatenate((self._distance_matrix, up_right_k), axis=1)
down_k = np.concatenate((down_left_k, down_right_k), axis=1)
temp_distance_matrix = np.concatenate((up_k, down_k), axis=0)
k_matrix = bourgain_embedding_matrix(temp_distance_matrix)
diagonal = np.diag_indices_from(k_matrix)
diagonal = (diagonal[0][-len(train_x) :], diagonal[1][-len(train_x) :])
k_matrix[diagonal] += self.alpha
try:
self._l_matrix = cholesky(k_matrix, lower=True) # Line 2
except LinAlgError:
return self
self._x = np.concatenate((self._x, train_x), axis=0)
self._y = np.concatenate((self._y, train_y), axis=0)
self._distance_matrix = temp_distance_matrix
self._alpha_vector = cho_solve((self._l_matrix, True), self._y) # Line 3
return self | ['def', 'incremental_fit', '(', 'self', ',', 'train_x', ',', 'train_y', ')', ':', 'if', 'not', 'self', '.', '_first_fitted', ':', 'raise', 'ValueError', '(', '"The first_fit function needs to be called first."', ')', 'train_x', ',', 'train_y', '=', 'np', '.', 'array', '(', 'train_x', ')', ',', 'np', '.', 'array', '(', 'train_y', ')', '# Incrementally compute K', 'up_right_k', '=', 'edit_distance_matrix', '(', 'self', '.', '_x', ',', 'train_x', ')', 'down_left_k', '=', 'np', '.', 'transpose', '(', 'up_right_k', ')', 'down_right_k', '=', 'edit_distance_matrix', '(', 'train_x', ')', 'up_k', '=', 'np', '.', 'concatenate', '(', '(', 'self', '.', '_distance_matrix', ',', 'up_right_k', ')', ',', 'axis', '=', '1', ')', 'down_k', '=', 'np', '.', 'concatenate', '(', '(', 'down_left_k', ',', 'down_right_k', ')', ',', 'axis', '=', '1', ')', 'temp_distance_matrix', '=', 'np', '.', 'concatenate', '(', '(', 'up_k', ',', 'down_k', ')', ',', 'axis', '=', '0', ')', 'k_matrix', '=', 'bourgain_embedding_matrix', '(', 'temp_distance_matrix', ')', 'diagonal', '=', 'np', '.', 'diag_indices_from', '(', 'k_matrix', ')', 'diagonal', '=', '(', 'diagonal', '[', '0', ']', '[', '-', 'len', '(', 'train_x', ')', ':', ']', ',', 'diagonal', '[', '1', ']', '[', '-', 'len', '(', 'train_x', ')', ':', ']', ')', 'k_matrix', '[', 'diagonal', ']', '+=', 'self', '.', 'alpha', 'try', ':', 'self', '.', '_l_matrix', '=', 'cholesky', '(', 'k_matrix', ',', 'lower', '=', 'True', ')', '# Line 2', 'except', 'LinAlgError', ':', 'return', 'self', 'self', '.', '_x', '=', 'np', '.', 'concatenate', '(', '(', 'self', '.', '_x', ',', 'train_x', ')', ',', 'axis', '=', '0', ')', 'self', '.', '_y', '=', 'np', '.', 'concatenate', '(', '(', 'self', '.', '_y', ',', 'train_y', ')', ',', 'axis', '=', '0', ')', 'self', '.', '_distance_matrix', '=', 'temp_distance_matrix', 'self', '.', '_alpha_vector', '=', 'cho_solve', '(', '(', 'self', '.', '_l_matrix', ',', 'True', ')', ',', 'self', '.', '_y', ')', '# Line 3', 'return', 'self'] | Incrementally fit the regressor. | ['Incrementally', 'fit', 'the', 'regressor', '.'] | train | https://github.com/Microsoft/nni/blob/c7cc8db32da8d2ec77a382a55089f4e17247ce41/src/sdk/pynni/nni/networkmorphism_tuner/bayesian.py#L160-L190 |
901 | aamalev/aiohttp_apiset | aiohttp_apiset/compat.py | CompatRouter.add_post | def add_post(self, *args, **kwargs):
"""
Shortcut for add_route with method POST
"""
return self.add_route(hdrs.METH_POST, *args, **kwargs) | python | def add_post(self, *args, **kwargs):
"""
Shortcut for add_route with method POST
"""
return self.add_route(hdrs.METH_POST, *args, **kwargs) | ['def', 'add_post', '(', 'self', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'return', 'self', '.', 'add_route', '(', 'hdrs', '.', 'METH_POST', ',', '*', 'args', ',', '*', '*', 'kwargs', ')'] | Shortcut for add_route with method POST | ['Shortcut', 'for', 'add_route', 'with', 'method', 'POST'] | train | https://github.com/aamalev/aiohttp_apiset/blob/ba3492ce929e39be1325d506b727a8bfb34e7b33/aiohttp_apiset/compat.py#L358-L362 |
902 | saltstack/salt | salt/states/schedule.py | disabled | def disabled(name, **kwargs):
'''
Ensure a job is disabled in the schedule
name
The unique name that is given to the scheduled job.
persist
Whether the job should persist between minion restarts, defaults to True.
'''
ret = {'name': name,
'result': True,
'changes': {},
'comment': []}
current_schedule = __salt__['schedule.list'](show_all=True, return_yaml=False)
if name in current_schedule:
if 'test' in __opts__ and __opts__['test']:
kwargs['test'] = True
result = __salt__['schedule.disable_job'](name, **kwargs)
ret['comment'].append(result['comment'])
else:
result = __salt__['schedule.disable_job'](name, **kwargs)
if not result['result']:
ret['result'] = result['result']
ret['comment'] = result['comment']
return ret
else:
ret['comment'].append('Disabled job {0} from schedule'.format(name))
else:
ret['comment'].append('Job {0} not present in schedule'.format(name))
ret['comment'] = '\n'.join(ret['comment'])
return ret | python | def disabled(name, **kwargs):
'''
Ensure a job is disabled in the schedule
name
The unique name that is given to the scheduled job.
persist
Whether the job should persist between minion restarts, defaults to True.
'''
ret = {'name': name,
'result': True,
'changes': {},
'comment': []}
current_schedule = __salt__['schedule.list'](show_all=True, return_yaml=False)
if name in current_schedule:
if 'test' in __opts__ and __opts__['test']:
kwargs['test'] = True
result = __salt__['schedule.disable_job'](name, **kwargs)
ret['comment'].append(result['comment'])
else:
result = __salt__['schedule.disable_job'](name, **kwargs)
if not result['result']:
ret['result'] = result['result']
ret['comment'] = result['comment']
return ret
else:
ret['comment'].append('Disabled job {0} from schedule'.format(name))
else:
ret['comment'].append('Job {0} not present in schedule'.format(name))
ret['comment'] = '\n'.join(ret['comment'])
return ret | ['def', 'disabled', '(', 'name', ',', '*', '*', 'kwargs', ')', ':', 'ret', '=', '{', "'name'", ':', 'name', ',', "'result'", ':', 'True', ',', "'changes'", ':', '{', '}', ',', "'comment'", ':', '[', ']', '}', 'current_schedule', '=', '__salt__', '[', "'schedule.list'", ']', '(', 'show_all', '=', 'True', ',', 'return_yaml', '=', 'False', ')', 'if', 'name', 'in', 'current_schedule', ':', 'if', "'test'", 'in', '__opts__', 'and', '__opts__', '[', "'test'", ']', ':', 'kwargs', '[', "'test'", ']', '=', 'True', 'result', '=', '__salt__', '[', "'schedule.disable_job'", ']', '(', 'name', ',', '*', '*', 'kwargs', ')', 'ret', '[', "'comment'", ']', '.', 'append', '(', 'result', '[', "'comment'", ']', ')', 'else', ':', 'result', '=', '__salt__', '[', "'schedule.disable_job'", ']', '(', 'name', ',', '*', '*', 'kwargs', ')', 'if', 'not', 'result', '[', "'result'", ']', ':', 'ret', '[', "'result'", ']', '=', 'result', '[', "'result'", ']', 'ret', '[', "'comment'", ']', '=', 'result', '[', "'comment'", ']', 'return', 'ret', 'else', ':', 'ret', '[', "'comment'", ']', '.', 'append', '(', "'Disabled job {0} from schedule'", '.', 'format', '(', 'name', ')', ')', 'else', ':', 'ret', '[', "'comment'", ']', '.', 'append', '(', "'Job {0} not present in schedule'", '.', 'format', '(', 'name', ')', ')', 'ret', '[', "'comment'", ']', '=', "'\\n'", '.', 'join', '(', 'ret', '[', "'comment'", ']', ')', 'return', 'ret'] | Ensure a job is disabled in the schedule
name
The unique name that is given to the scheduled job.
persist
Whether the job should persist between minion restarts, defaults to True. | ['Ensure', 'a', 'job', 'is', 'disabled', 'in', 'the', 'schedule'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/schedule.py#L344-L379 |
903 | basho/riak-python-client | riak/client/operations.py | RiakClientOperations.paginate_stream_index | def paginate_stream_index(self, bucket, index, startkey, endkey=None,
max_results=1000, return_terms=None,
continuation=None, timeout=None,
term_regex=None):
"""
Iterates over a streaming paginated index query. This is equivalent to
calling :meth:`stream_index` and then successively calling
:meth:`~riak.client.index_page.IndexPage.next_page` until all
results are exhausted.
Because limiting the result set is necessary to invoke
pagination, the ``max_results`` option has a default of ``1000``.
The caller should explicitly close each yielded page, either using
:func:`contextlib.closing` or calling ``close()`` explicitly. Consuming
the entire page will also close the stream. If it does not, the
associated connection might not be returned to the pool. Example::
from contextlib import closing
# Using contextlib.closing
for page in client.paginate_stream_index(mybucket, 'name_bin',
'Smith'):
with closing(page):
for key in page:
do_something(key)
# Explicit close()
for page in client.paginate_stream_index(mybucket, 'name_bin',
'Smith'):
for key in page:
do_something(key)
page.close()
:param bucket: the bucket whose index will be queried
:type bucket: RiakBucket
:param index: the index to query
:type index: string
:param startkey: the sole key to query, or beginning of the query range
:type startkey: string, integer
:param endkey: the end of the query range (optional if equality)
:type endkey: string, integer
:param return_terms: whether to include the secondary index value
:type return_terms: boolean
:param max_results: the maximum number of results to return (page
size), defaults to 1000
:type max_results: integer
:param continuation: the opaque continuation returned from a
previous paginated request
:type continuation: string
:param timeout: a timeout value in milliseconds, or 'infinity'
:type timeout: int
:param term_regex: a regular expression used to filter index terms
:type term_regex: string
:rtype: generator over instances of
:class:`~riak.client.index_page.IndexPage`
"""
# TODO FUTURE: implement "retry on connection closed"
# as in stream_mapred
page = self.stream_index(bucket, index, startkey,
endkey=endkey,
max_results=max_results,
return_terms=return_terms,
continuation=continuation,
timeout=timeout,
term_regex=term_regex)
yield page
while page.has_next_page():
page = page.next_page()
yield page | python | def paginate_stream_index(self, bucket, index, startkey, endkey=None,
max_results=1000, return_terms=None,
continuation=None, timeout=None,
term_regex=None):
"""
Iterates over a streaming paginated index query. This is equivalent to
calling :meth:`stream_index` and then successively calling
:meth:`~riak.client.index_page.IndexPage.next_page` until all
results are exhausted.
Because limiting the result set is necessary to invoke
pagination, the ``max_results`` option has a default of ``1000``.
The caller should explicitly close each yielded page, either using
:func:`contextlib.closing` or calling ``close()`` explicitly. Consuming
the entire page will also close the stream. If it does not, the
associated connection might not be returned to the pool. Example::
from contextlib import closing
# Using contextlib.closing
for page in client.paginate_stream_index(mybucket, 'name_bin',
'Smith'):
with closing(page):
for key in page:
do_something(key)
# Explicit close()
for page in client.paginate_stream_index(mybucket, 'name_bin',
'Smith'):
for key in page:
do_something(key)
page.close()
:param bucket: the bucket whose index will be queried
:type bucket: RiakBucket
:param index: the index to query
:type index: string
:param startkey: the sole key to query, or beginning of the query range
:type startkey: string, integer
:param endkey: the end of the query range (optional if equality)
:type endkey: string, integer
:param return_terms: whether to include the secondary index value
:type return_terms: boolean
:param max_results: the maximum number of results to return (page
size), defaults to 1000
:type max_results: integer
:param continuation: the opaque continuation returned from a
previous paginated request
:type continuation: string
:param timeout: a timeout value in milliseconds, or 'infinity'
:type timeout: int
:param term_regex: a regular expression used to filter index terms
:type term_regex: string
:rtype: generator over instances of
:class:`~riak.client.index_page.IndexPage`
"""
# TODO FUTURE: implement "retry on connection closed"
# as in stream_mapred
page = self.stream_index(bucket, index, startkey,
endkey=endkey,
max_results=max_results,
return_terms=return_terms,
continuation=continuation,
timeout=timeout,
term_regex=term_regex)
yield page
while page.has_next_page():
page = page.next_page()
yield page | ['def', 'paginate_stream_index', '(', 'self', ',', 'bucket', ',', 'index', ',', 'startkey', ',', 'endkey', '=', 'None', ',', 'max_results', '=', '1000', ',', 'return_terms', '=', 'None', ',', 'continuation', '=', 'None', ',', 'timeout', '=', 'None', ',', 'term_regex', '=', 'None', ')', ':', '# TODO FUTURE: implement "retry on connection closed"', '# as in stream_mapred', 'page', '=', 'self', '.', 'stream_index', '(', 'bucket', ',', 'index', ',', 'startkey', ',', 'endkey', '=', 'endkey', ',', 'max_results', '=', 'max_results', ',', 'return_terms', '=', 'return_terms', ',', 'continuation', '=', 'continuation', ',', 'timeout', '=', 'timeout', ',', 'term_regex', '=', 'term_regex', ')', 'yield', 'page', 'while', 'page', '.', 'has_next_page', '(', ')', ':', 'page', '=', 'page', '.', 'next_page', '(', ')', 'yield', 'page'] | Iterates over a streaming paginated index query. This is equivalent to
calling :meth:`stream_index` and then successively calling
:meth:`~riak.client.index_page.IndexPage.next_page` until all
results are exhausted.
Because limiting the result set is necessary to invoke
pagination, the ``max_results`` option has a default of ``1000``.
The caller should explicitly close each yielded page, either using
:func:`contextlib.closing` or calling ``close()`` explicitly. Consuming
the entire page will also close the stream. If it does not, the
associated connection might not be returned to the pool. Example::
from contextlib import closing
# Using contextlib.closing
for page in client.paginate_stream_index(mybucket, 'name_bin',
'Smith'):
with closing(page):
for key in page:
do_something(key)
# Explicit close()
for page in client.paginate_stream_index(mybucket, 'name_bin',
'Smith'):
for key in page:
do_something(key)
page.close()
:param bucket: the bucket whose index will be queried
:type bucket: RiakBucket
:param index: the index to query
:type index: string
:param startkey: the sole key to query, or beginning of the query range
:type startkey: string, integer
:param endkey: the end of the query range (optional if equality)
:type endkey: string, integer
:param return_terms: whether to include the secondary index value
:type return_terms: boolean
:param max_results: the maximum number of results to return (page
size), defaults to 1000
:type max_results: integer
:param continuation: the opaque continuation returned from a
previous paginated request
:type continuation: string
:param timeout: a timeout value in milliseconds, or 'infinity'
:type timeout: int
:param term_regex: a regular expression used to filter index terms
:type term_regex: string
:rtype: generator over instances of
:class:`~riak.client.index_page.IndexPage` | ['Iterates', 'over', 'a', 'streaming', 'paginated', 'index', 'query', '.', 'This', 'is', 'equivalent', 'to', 'calling', ':', 'meth', ':', 'stream_index', 'and', 'then', 'successively', 'calling', ':', 'meth', ':', '~riak', '.', 'client', '.', 'index_page', '.', 'IndexPage', '.', 'next_page', 'until', 'all', 'results', 'are', 'exhausted', '.'] | train | https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/client/operations.py#L303-L373 |
904 | openvax/mhcflurry | mhcflurry/class1_affinity_predictor.py | Class1AffinityPredictor.filter_networks | def filter_networks(self, predicate):
"""
Return a new Class1AffinityPredictor containing a subset of this
predictor's neural networks.
Parameters
----------
predicate : Class1NeuralNetwork -> boolean
Function specifying which neural networks to include
Returns
-------
Class1AffinityPredictor
"""
allele_to_allele_specific_models = {}
for (allele, models) in self.allele_to_allele_specific_models.items():
allele_to_allele_specific_models[allele] = [
m for m in models if predicate(m)
]
class1_pan_allele_models = [
m for m in self.class1_pan_allele_models if predicate(m)
]
return Class1AffinityPredictor(
allele_to_allele_specific_models=allele_to_allele_specific_models,
class1_pan_allele_models=class1_pan_allele_models,
allele_to_fixed_length_sequence=self.allele_to_fixed_length_sequence,
) | python | def filter_networks(self, predicate):
"""
Return a new Class1AffinityPredictor containing a subset of this
predictor's neural networks.
Parameters
----------
predicate : Class1NeuralNetwork -> boolean
Function specifying which neural networks to include
Returns
-------
Class1AffinityPredictor
"""
allele_to_allele_specific_models = {}
for (allele, models) in self.allele_to_allele_specific_models.items():
allele_to_allele_specific_models[allele] = [
m for m in models if predicate(m)
]
class1_pan_allele_models = [
m for m in self.class1_pan_allele_models if predicate(m)
]
return Class1AffinityPredictor(
allele_to_allele_specific_models=allele_to_allele_specific_models,
class1_pan_allele_models=class1_pan_allele_models,
allele_to_fixed_length_sequence=self.allele_to_fixed_length_sequence,
) | ['def', 'filter_networks', '(', 'self', ',', 'predicate', ')', ':', 'allele_to_allele_specific_models', '=', '{', '}', 'for', '(', 'allele', ',', 'models', ')', 'in', 'self', '.', 'allele_to_allele_specific_models', '.', 'items', '(', ')', ':', 'allele_to_allele_specific_models', '[', 'allele', ']', '=', '[', 'm', 'for', 'm', 'in', 'models', 'if', 'predicate', '(', 'm', ')', ']', 'class1_pan_allele_models', '=', '[', 'm', 'for', 'm', 'in', 'self', '.', 'class1_pan_allele_models', 'if', 'predicate', '(', 'm', ')', ']', 'return', 'Class1AffinityPredictor', '(', 'allele_to_allele_specific_models', '=', 'allele_to_allele_specific_models', ',', 'class1_pan_allele_models', '=', 'class1_pan_allele_models', ',', 'allele_to_fixed_length_sequence', '=', 'self', '.', 'allele_to_fixed_length_sequence', ',', ')'] | Return a new Class1AffinityPredictor containing a subset of this
predictor's neural networks.
Parameters
----------
predicate : Class1NeuralNetwork -> boolean
Function specifying which neural networks to include
Returns
-------
Class1AffinityPredictor | ['Return', 'a', 'new', 'Class1AffinityPredictor', 'containing', 'a', 'subset', 'of', 'this', 'predictor', 's', 'neural', 'networks', '.'] | train | https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/class1_affinity_predictor.py#L1130-L1157 |
905 | scidash/sciunit | sciunit/__main__.py | make_nb | def make_nb(config, path=None, stop_on_error=True, just_tests=False):
"""Create a Jupyter notebook sciunit tests for the given configuration."""
root, nb_name = nb_name_from_path(config, path)
clean = lambda varStr: re.sub('\W|^(?=\d)', '_', varStr)
name = clean(nb_name)
mpl_style = config.get('misc', 'matplotlib', fallback='inline')
cells = [new_markdown_cell('## Sciunit Testing Notebook for %s' % nb_name)]
add_code_cell(cells, (
"%%matplotlib %s\n"
"from IPython.display import display\n"
"from importlib.machinery import SourceFileLoader\n"
"%s = SourceFileLoader('scidash', '%s/__init__.py').load_module()") %
(mpl_style, name, root))
if just_tests:
add_code_cell(cells, (
"for test in %s.tests.tests:\n"
" score_array = test.judge(%s.models.models, stop_on_error=%r)\n"
" display(score_array)") % (name, name, stop_on_error))
else:
add_code_cell(cells, (
"for suite in %s.suites.suites:\n"
" score_matrix = suite.judge("
"%s.models.models, stop_on_error=%r)\n"
" display(score_matrix)") % (name, name, stop_on_error))
write_nb(root, nb_name, cells) | python | def make_nb(config, path=None, stop_on_error=True, just_tests=False):
"""Create a Jupyter notebook sciunit tests for the given configuration."""
root, nb_name = nb_name_from_path(config, path)
clean = lambda varStr: re.sub('\W|^(?=\d)', '_', varStr)
name = clean(nb_name)
mpl_style = config.get('misc', 'matplotlib', fallback='inline')
cells = [new_markdown_cell('## Sciunit Testing Notebook for %s' % nb_name)]
add_code_cell(cells, (
"%%matplotlib %s\n"
"from IPython.display import display\n"
"from importlib.machinery import SourceFileLoader\n"
"%s = SourceFileLoader('scidash', '%s/__init__.py').load_module()") %
(mpl_style, name, root))
if just_tests:
add_code_cell(cells, (
"for test in %s.tests.tests:\n"
" score_array = test.judge(%s.models.models, stop_on_error=%r)\n"
" display(score_array)") % (name, name, stop_on_error))
else:
add_code_cell(cells, (
"for suite in %s.suites.suites:\n"
" score_matrix = suite.judge("
"%s.models.models, stop_on_error=%r)\n"
" display(score_matrix)") % (name, name, stop_on_error))
write_nb(root, nb_name, cells) | ['def', 'make_nb', '(', 'config', ',', 'path', '=', 'None', ',', 'stop_on_error', '=', 'True', ',', 'just_tests', '=', 'False', ')', ':', 'root', ',', 'nb_name', '=', 'nb_name_from_path', '(', 'config', ',', 'path', ')', 'clean', '=', 'lambda', 'varStr', ':', 're', '.', 'sub', '(', "'\\W|^(?=\\d)'", ',', "'_'", ',', 'varStr', ')', 'name', '=', 'clean', '(', 'nb_name', ')', 'mpl_style', '=', 'config', '.', 'get', '(', "'misc'", ',', "'matplotlib'", ',', 'fallback', '=', "'inline'", ')', 'cells', '=', '[', 'new_markdown_cell', '(', "'## Sciunit Testing Notebook for %s'", '%', 'nb_name', ')', ']', 'add_code_cell', '(', 'cells', ',', '(', '"%%matplotlib %s\\n"', '"from IPython.display import display\\n"', '"from importlib.machinery import SourceFileLoader\\n"', '"%s = SourceFileLoader(\'scidash\', \'%s/__init__.py\').load_module()"', ')', '%', '(', 'mpl_style', ',', 'name', ',', 'root', ')', ')', 'if', 'just_tests', ':', 'add_code_cell', '(', 'cells', ',', '(', '"for test in %s.tests.tests:\\n"', '" score_array = test.judge(%s.models.models, stop_on_error=%r)\\n"', '" display(score_array)"', ')', '%', '(', 'name', ',', 'name', ',', 'stop_on_error', ')', ')', 'else', ':', 'add_code_cell', '(', 'cells', ',', '(', '"for suite in %s.suites.suites:\\n"', '" score_matrix = suite.judge("', '"%s.models.models, stop_on_error=%r)\\n"', '" display(score_matrix)"', ')', '%', '(', 'name', ',', 'name', ',', 'stop_on_error', ')', ')', 'write_nb', '(', 'root', ',', 'nb_name', ',', 'cells', ')'] | Create a Jupyter notebook sciunit tests for the given configuration. | ['Create', 'a', 'Jupyter', 'notebook', 'sciunit', 'tests', 'for', 'the', 'given', 'configuration', '.'] | train | https://github.com/scidash/sciunit/blob/41b2e38c45c0776727ab1f281a572b65be19cea1/sciunit/__main__.py#L180-L205 |
906 | softlayer/softlayer-python | SoftLayer/CLI/order/place.py | cli | def cli(env, package_keyname, location, preset, verify, billing, complex_type,
quantity, extras, order_items):
"""Place or verify an order.
This CLI command is used for placing/verifying an order of the specified package in
the given location (denoted by a datacenter's long name). Orders made via the CLI
can then be converted to be made programmatically by calling
SoftLayer.OrderingManager.place_order() with the same keynames.
Packages for ordering can be retrieved from `slcli order package-list`
Presets for ordering can be retrieved from `slcli order preset-list` (not all packages
have presets)
Items can be retrieved from `slcli order item-list`. In order to find required
items for the order, use `slcli order category-list`, and then provide the
--category option for each category code in `slcli order item-list`.
Example::
# Order an hourly VSI with 4 CPU, 16 GB RAM, 100 GB SAN disk,
# Ubuntu 16.04, and 1 Gbps public & private uplink in dal13
slcli order place --billing hourly CLOUD_SERVER DALLAS13 \\
GUEST_CORES_4 \\
RAM_16_GB \\
REBOOT_REMOTE_CONSOLE \\
1_GBPS_PUBLIC_PRIVATE_NETWORK_UPLINKS \\
BANDWIDTH_0_GB_2 \\
1_IP_ADDRESS \\
GUEST_DISK_100_GB_SAN \\
OS_UBUNTU_16_04_LTS_XENIAL_XERUS_MINIMAL_64_BIT_FOR_VSI \\
MONITORING_HOST_PING \\
NOTIFICATION_EMAIL_AND_TICKET \\
AUTOMATED_NOTIFICATION \\
UNLIMITED_SSL_VPN_USERS_1_PPTP_VPN_USER_PER_ACCOUNT \\
NESSUS_VULNERABILITY_ASSESSMENT_REPORTING \\
--extras '{"virtualGuests": [{"hostname": "test", "domain": "softlayer.com"}]}' \\
--complex-type SoftLayer_Container_Product_Order_Virtual_Guest
"""
manager = ordering.OrderingManager(env.client)
if extras:
try:
extras = json.loads(extras)
except ValueError as err:
raise exceptions.CLIAbort("There was an error when parsing the --extras value: {}".format(err))
args = (package_keyname, location, order_items)
kwargs = {'preset_keyname': preset,
'extras': extras,
'quantity': quantity,
'complex_type': complex_type,
'hourly': bool(billing == 'hourly')}
if verify:
table = formatting.Table(COLUMNS)
order_to_place = manager.verify_order(*args, **kwargs)
for price in order_to_place['orderContainers'][0]['prices']:
cost_key = 'hourlyRecurringFee' if billing == 'hourly' else 'recurringFee'
table.add_row([
price['item']['keyName'],
price['item']['description'],
price[cost_key] if cost_key in price else formatting.blank()
])
else:
if not (env.skip_confirmations or formatting.confirm(
"This action will incur charges on your account. Continue?")):
raise exceptions.CLIAbort("Aborting order.")
order = manager.place_order(*args, **kwargs)
table = formatting.KeyValueTable(['name', 'value'])
table.align['name'] = 'r'
table.align['value'] = 'l'
table.add_row(['id', order['orderId']])
table.add_row(['created', order['orderDate']])
table.add_row(['status', order['placedOrder']['status']])
env.fout(table) | python | def cli(env, package_keyname, location, preset, verify, billing, complex_type,
quantity, extras, order_items):
"""Place or verify an order.
This CLI command is used for placing/verifying an order of the specified package in
the given location (denoted by a datacenter's long name). Orders made via the CLI
can then be converted to be made programmatically by calling
SoftLayer.OrderingManager.place_order() with the same keynames.
Packages for ordering can be retrieved from `slcli order package-list`
Presets for ordering can be retrieved from `slcli order preset-list` (not all packages
have presets)
Items can be retrieved from `slcli order item-list`. In order to find required
items for the order, use `slcli order category-list`, and then provide the
--category option for each category code in `slcli order item-list`.
Example::
# Order an hourly VSI with 4 CPU, 16 GB RAM, 100 GB SAN disk,
# Ubuntu 16.04, and 1 Gbps public & private uplink in dal13
slcli order place --billing hourly CLOUD_SERVER DALLAS13 \\
GUEST_CORES_4 \\
RAM_16_GB \\
REBOOT_REMOTE_CONSOLE \\
1_GBPS_PUBLIC_PRIVATE_NETWORK_UPLINKS \\
BANDWIDTH_0_GB_2 \\
1_IP_ADDRESS \\
GUEST_DISK_100_GB_SAN \\
OS_UBUNTU_16_04_LTS_XENIAL_XERUS_MINIMAL_64_BIT_FOR_VSI \\
MONITORING_HOST_PING \\
NOTIFICATION_EMAIL_AND_TICKET \\
AUTOMATED_NOTIFICATION \\
UNLIMITED_SSL_VPN_USERS_1_PPTP_VPN_USER_PER_ACCOUNT \\
NESSUS_VULNERABILITY_ASSESSMENT_REPORTING \\
--extras '{"virtualGuests": [{"hostname": "test", "domain": "softlayer.com"}]}' \\
--complex-type SoftLayer_Container_Product_Order_Virtual_Guest
"""
manager = ordering.OrderingManager(env.client)
if extras:
try:
extras = json.loads(extras)
except ValueError as err:
raise exceptions.CLIAbort("There was an error when parsing the --extras value: {}".format(err))
args = (package_keyname, location, order_items)
kwargs = {'preset_keyname': preset,
'extras': extras,
'quantity': quantity,
'complex_type': complex_type,
'hourly': bool(billing == 'hourly')}
if verify:
table = formatting.Table(COLUMNS)
order_to_place = manager.verify_order(*args, **kwargs)
for price in order_to_place['orderContainers'][0]['prices']:
cost_key = 'hourlyRecurringFee' if billing == 'hourly' else 'recurringFee'
table.add_row([
price['item']['keyName'],
price['item']['description'],
price[cost_key] if cost_key in price else formatting.blank()
])
else:
if not (env.skip_confirmations or formatting.confirm(
"This action will incur charges on your account. Continue?")):
raise exceptions.CLIAbort("Aborting order.")
order = manager.place_order(*args, **kwargs)
table = formatting.KeyValueTable(['name', 'value'])
table.align['name'] = 'r'
table.align['value'] = 'l'
table.add_row(['id', order['orderId']])
table.add_row(['created', order['orderDate']])
table.add_row(['status', order['placedOrder']['status']])
env.fout(table) | ['def', 'cli', '(', 'env', ',', 'package_keyname', ',', 'location', ',', 'preset', ',', 'verify', ',', 'billing', ',', 'complex_type', ',', 'quantity', ',', 'extras', ',', 'order_items', ')', ':', 'manager', '=', 'ordering', '.', 'OrderingManager', '(', 'env', '.', 'client', ')', 'if', 'extras', ':', 'try', ':', 'extras', '=', 'json', '.', 'loads', '(', 'extras', ')', 'except', 'ValueError', 'as', 'err', ':', 'raise', 'exceptions', '.', 'CLIAbort', '(', '"There was an error when parsing the --extras value: {}"', '.', 'format', '(', 'err', ')', ')', 'args', '=', '(', 'package_keyname', ',', 'location', ',', 'order_items', ')', 'kwargs', '=', '{', "'preset_keyname'", ':', 'preset', ',', "'extras'", ':', 'extras', ',', "'quantity'", ':', 'quantity', ',', "'complex_type'", ':', 'complex_type', ',', "'hourly'", ':', 'bool', '(', 'billing', '==', "'hourly'", ')', '}', 'if', 'verify', ':', 'table', '=', 'formatting', '.', 'Table', '(', 'COLUMNS', ')', 'order_to_place', '=', 'manager', '.', 'verify_order', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', 'for', 'price', 'in', 'order_to_place', '[', "'orderContainers'", ']', '[', '0', ']', '[', "'prices'", ']', ':', 'cost_key', '=', "'hourlyRecurringFee'", 'if', 'billing', '==', "'hourly'", 'else', "'recurringFee'", 'table', '.', 'add_row', '(', '[', 'price', '[', "'item'", ']', '[', "'keyName'", ']', ',', 'price', '[', "'item'", ']', '[', "'description'", ']', ',', 'price', '[', 'cost_key', ']', 'if', 'cost_key', 'in', 'price', 'else', 'formatting', '.', 'blank', '(', ')', ']', ')', 'else', ':', 'if', 'not', '(', 'env', '.', 'skip_confirmations', 'or', 'formatting', '.', 'confirm', '(', '"This action will incur charges on your account. Continue?"', ')', ')', ':', 'raise', 'exceptions', '.', 'CLIAbort', '(', '"Aborting order."', ')', 'order', '=', 'manager', '.', 'place_order', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', 'table', '=', 'formatting', '.', 'KeyValueTable', '(', '[', "'name'", ',', "'value'", ']', ')', 'table', '.', 'align', '[', "'name'", ']', '=', "'r'", 'table', '.', 'align', '[', "'value'", ']', '=', "'l'", 'table', '.', 'add_row', '(', '[', "'id'", ',', 'order', '[', "'orderId'", ']', ']', ')', 'table', '.', 'add_row', '(', '[', "'created'", ',', 'order', '[', "'orderDate'", ']', ']', ')', 'table', '.', 'add_row', '(', '[', "'status'", ',', 'order', '[', "'placedOrder'", ']', '[', "'status'", ']', ']', ')', 'env', '.', 'fout', '(', 'table', ')'] | Place or verify an order.
This CLI command is used for placing/verifying an order of the specified package in
the given location (denoted by a datacenter's long name). Orders made via the CLI
can then be converted to be made programmatically by calling
SoftLayer.OrderingManager.place_order() with the same keynames.
Packages for ordering can be retrieved from `slcli order package-list`
Presets for ordering can be retrieved from `slcli order preset-list` (not all packages
have presets)
Items can be retrieved from `slcli order item-list`. In order to find required
items for the order, use `slcli order category-list`, and then provide the
--category option for each category code in `slcli order item-list`.
Example::
# Order an hourly VSI with 4 CPU, 16 GB RAM, 100 GB SAN disk,
# Ubuntu 16.04, and 1 Gbps public & private uplink in dal13
slcli order place --billing hourly CLOUD_SERVER DALLAS13 \\
GUEST_CORES_4 \\
RAM_16_GB \\
REBOOT_REMOTE_CONSOLE \\
1_GBPS_PUBLIC_PRIVATE_NETWORK_UPLINKS \\
BANDWIDTH_0_GB_2 \\
1_IP_ADDRESS \\
GUEST_DISK_100_GB_SAN \\
OS_UBUNTU_16_04_LTS_XENIAL_XERUS_MINIMAL_64_BIT_FOR_VSI \\
MONITORING_HOST_PING \\
NOTIFICATION_EMAIL_AND_TICKET \\
AUTOMATED_NOTIFICATION \\
UNLIMITED_SSL_VPN_USERS_1_PPTP_VPN_USER_PER_ACCOUNT \\
NESSUS_VULNERABILITY_ASSESSMENT_REPORTING \\
--extras '{"virtualGuests": [{"hostname": "test", "domain": "softlayer.com"}]}' \\
--complex-type SoftLayer_Container_Product_Order_Virtual_Guest | ['Place', 'or', 'verify', 'an', 'order', '.'] | train | https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/CLI/order/place.py#L41-L120 |
907 | zapier/django-rest-hooks | rest_hooks/models.py | custom_action | def custom_action(sender, action,
instance,
user=None,
**kwargs):
"""
Manually trigger a custom action (or even a standard action).
"""
opts = get_opts(instance)
model = '.'.join([opts.app_label, opts.object_name])
distill_model_event(instance, model, action, user_override=user) | python | def custom_action(sender, action,
instance,
user=None,
**kwargs):
"""
Manually trigger a custom action (or even a standard action).
"""
opts = get_opts(instance)
model = '.'.join([opts.app_label, opts.object_name])
distill_model_event(instance, model, action, user_override=user) | ['def', 'custom_action', '(', 'sender', ',', 'action', ',', 'instance', ',', 'user', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'opts', '=', 'get_opts', '(', 'instance', ')', 'model', '=', "'.'", '.', 'join', '(', '[', 'opts', '.', 'app_label', ',', 'opts', '.', 'object_name', ']', ')', 'distill_model_event', '(', 'instance', ',', 'model', ',', 'action', ',', 'user_override', '=', 'user', ')'] | Manually trigger a custom action (or even a standard action). | ['Manually', 'trigger', 'a', 'custom', 'action', '(', 'or', 'even', 'a', 'standard', 'action', ')', '.'] | train | https://github.com/zapier/django-rest-hooks/blob/cf4f9588cd9f2d4696f2f0654a205722ee19b80e/rest_hooks/models.py#L158-L167 |
908 | istresearch/scrapy-cluster | kafka-monitor/kafka_monitor.py | KafkaMonitor._dump_stats | def _dump_stats(self):
'''
Dumps the stats out
'''
extras = {}
if 'total' in self.stats_dict:
self.logger.debug("Compiling total/fail dump stats")
for key in self.stats_dict['total']:
final = 'total_{t}'.format(t=key)
extras[final] = self.stats_dict['total'][key].value()
for key in self.stats_dict['fail']:
final = 'fail_{t}'.format(t=key)
extras[final] = self.stats_dict['fail'][key].value()
if 'plugins' in self.stats_dict:
self.logger.debug("Compiling plugin dump stats")
for name in self.stats_dict['plugins']:
for key in self.stats_dict['plugins'][name]:
final = 'plugin_{n}_{t}'.format(n=name, t=key)
extras[final] = self.stats_dict['plugins'][name][key].value()
if not self.logger.json:
self.logger.info('Kafka Monitor Stats Dump:\n{0}'.format(
json.dumps(extras, indent=4, sort_keys=True)))
else:
self.logger.info('Kafka Monitor Stats Dump', extra=extras) | python | def _dump_stats(self):
'''
Dumps the stats out
'''
extras = {}
if 'total' in self.stats_dict:
self.logger.debug("Compiling total/fail dump stats")
for key in self.stats_dict['total']:
final = 'total_{t}'.format(t=key)
extras[final] = self.stats_dict['total'][key].value()
for key in self.stats_dict['fail']:
final = 'fail_{t}'.format(t=key)
extras[final] = self.stats_dict['fail'][key].value()
if 'plugins' in self.stats_dict:
self.logger.debug("Compiling plugin dump stats")
for name in self.stats_dict['plugins']:
for key in self.stats_dict['plugins'][name]:
final = 'plugin_{n}_{t}'.format(n=name, t=key)
extras[final] = self.stats_dict['plugins'][name][key].value()
if not self.logger.json:
self.logger.info('Kafka Monitor Stats Dump:\n{0}'.format(
json.dumps(extras, indent=4, sort_keys=True)))
else:
self.logger.info('Kafka Monitor Stats Dump', extra=extras) | ['def', '_dump_stats', '(', 'self', ')', ':', 'extras', '=', '{', '}', 'if', "'total'", 'in', 'self', '.', 'stats_dict', ':', 'self', '.', 'logger', '.', 'debug', '(', '"Compiling total/fail dump stats"', ')', 'for', 'key', 'in', 'self', '.', 'stats_dict', '[', "'total'", ']', ':', 'final', '=', "'total_{t}'", '.', 'format', '(', 't', '=', 'key', ')', 'extras', '[', 'final', ']', '=', 'self', '.', 'stats_dict', '[', "'total'", ']', '[', 'key', ']', '.', 'value', '(', ')', 'for', 'key', 'in', 'self', '.', 'stats_dict', '[', "'fail'", ']', ':', 'final', '=', "'fail_{t}'", '.', 'format', '(', 't', '=', 'key', ')', 'extras', '[', 'final', ']', '=', 'self', '.', 'stats_dict', '[', "'fail'", ']', '[', 'key', ']', '.', 'value', '(', ')', 'if', "'plugins'", 'in', 'self', '.', 'stats_dict', ':', 'self', '.', 'logger', '.', 'debug', '(', '"Compiling plugin dump stats"', ')', 'for', 'name', 'in', 'self', '.', 'stats_dict', '[', "'plugins'", ']', ':', 'for', 'key', 'in', 'self', '.', 'stats_dict', '[', "'plugins'", ']', '[', 'name', ']', ':', 'final', '=', "'plugin_{n}_{t}'", '.', 'format', '(', 'n', '=', 'name', ',', 't', '=', 'key', ')', 'extras', '[', 'final', ']', '=', 'self', '.', 'stats_dict', '[', "'plugins'", ']', '[', 'name', ']', '[', 'key', ']', '.', 'value', '(', ')', 'if', 'not', 'self', '.', 'logger', '.', 'json', ':', 'self', '.', 'logger', '.', 'info', '(', "'Kafka Monitor Stats Dump:\\n{0}'", '.', 'format', '(', 'json', '.', 'dumps', '(', 'extras', ',', 'indent', '=', '4', ',', 'sort_keys', '=', 'True', ')', ')', ')', 'else', ':', 'self', '.', 'logger', '.', 'info', '(', "'Kafka Monitor Stats Dump'", ',', 'extra', '=', 'extras', ')'] | Dumps the stats out | ['Dumps', 'the', 'stats', 'out'] | train | https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/kafka-monitor/kafka_monitor.py#L367-L392 |
909 | joealcorn/xbox | xbox/resource.py | Clip.get | def get(cls, xuid, scid, clip_id):
'''
Gets a specific game clip
:param xuid: xuid of an xbox live user
:param scid: scid of a clip
:param clip_id: id of a clip
'''
url = (
'https://gameclipsmetadata.xboxlive.com/users'
'/xuid(%(xuid)s)/scids/%(scid)s/clips/%(clip_id)s' % {
'xuid': xuid,
'scid': scid,
'clip_id': clip_id,
}
)
resp = xbox.client._get(url)
# scid does not seem to matter when fetching clips,
# as long as it looks like a uuid it should be fine.
# perhaps we'll raise an exception in future
if resp.status_code == 404:
msg = 'Could not find clip: xuid=%s, scid=%s, clip_id=%s' % (
xuid, scid, clip_id,
)
raise ClipNotFound(msg)
data = resp.json()
# as we don't have the user object let's
# create a lazily evaluated proxy object
# that will fetch it only when required
user = UserProxy(xuid)
return cls(user, data['gameClip']) | python | def get(cls, xuid, scid, clip_id):
'''
Gets a specific game clip
:param xuid: xuid of an xbox live user
:param scid: scid of a clip
:param clip_id: id of a clip
'''
url = (
'https://gameclipsmetadata.xboxlive.com/users'
'/xuid(%(xuid)s)/scids/%(scid)s/clips/%(clip_id)s' % {
'xuid': xuid,
'scid': scid,
'clip_id': clip_id,
}
)
resp = xbox.client._get(url)
# scid does not seem to matter when fetching clips,
# as long as it looks like a uuid it should be fine.
# perhaps we'll raise an exception in future
if resp.status_code == 404:
msg = 'Could not find clip: xuid=%s, scid=%s, clip_id=%s' % (
xuid, scid, clip_id,
)
raise ClipNotFound(msg)
data = resp.json()
# as we don't have the user object let's
# create a lazily evaluated proxy object
# that will fetch it only when required
user = UserProxy(xuid)
return cls(user, data['gameClip']) | ['def', 'get', '(', 'cls', ',', 'xuid', ',', 'scid', ',', 'clip_id', ')', ':', 'url', '=', '(', "'https://gameclipsmetadata.xboxlive.com/users'", "'/xuid(%(xuid)s)/scids/%(scid)s/clips/%(clip_id)s'", '%', '{', "'xuid'", ':', 'xuid', ',', "'scid'", ':', 'scid', ',', "'clip_id'", ':', 'clip_id', ',', '}', ')', 'resp', '=', 'xbox', '.', 'client', '.', '_get', '(', 'url', ')', '# scid does not seem to matter when fetching clips,', '# as long as it looks like a uuid it should be fine.', "# perhaps we'll raise an exception in future", 'if', 'resp', '.', 'status_code', '==', '404', ':', 'msg', '=', "'Could not find clip: xuid=%s, scid=%s, clip_id=%s'", '%', '(', 'xuid', ',', 'scid', ',', 'clip_id', ',', ')', 'raise', 'ClipNotFound', '(', 'msg', ')', 'data', '=', 'resp', '.', 'json', '(', ')', "# as we don't have the user object let's", '# create a lazily evaluated proxy object', '# that will fetch it only when required', 'user', '=', 'UserProxy', '(', 'xuid', ')', 'return', 'cls', '(', 'user', ',', 'data', '[', "'gameClip'", ']', ')'] | Gets a specific game clip
:param xuid: xuid of an xbox live user
:param scid: scid of a clip
:param clip_id: id of a clip | ['Gets', 'a', 'specific', 'game', 'clip'] | train | https://github.com/joealcorn/xbox/blob/3d2aeba10244dcb58d714d76fc88487c74bd1510/xbox/resource.py#L201-L234 |
910 | quantmind/ccy | ccy/core/currency.py | currency_pair | def currency_pair(code):
'''Construct a :class:`ccy_pair` from a six letter string.'''
c = str(code)
c1 = currency(c[:3])
c2 = currency(c[3:])
return ccy_pair(c1, c2) | python | def currency_pair(code):
'''Construct a :class:`ccy_pair` from a six letter string.'''
c = str(code)
c1 = currency(c[:3])
c2 = currency(c[3:])
return ccy_pair(c1, c2) | ['def', 'currency_pair', '(', 'code', ')', ':', 'c', '=', 'str', '(', 'code', ')', 'c1', '=', 'currency', '(', 'c', '[', ':', '3', ']', ')', 'c2', '=', 'currency', '(', 'c', '[', '3', ':', ']', ')', 'return', 'ccy_pair', '(', 'c1', ',', 'c2', ')'] | Construct a :class:`ccy_pair` from a six letter string. | ['Construct', 'a', ':', 'class', ':', 'ccy_pair', 'from', 'a', 'six', 'letter', 'string', '.'] | train | https://github.com/quantmind/ccy/blob/068cf6887489087cd26657a937a932e82106b47f/ccy/core/currency.py#L211-L216 |
911 | hydpy-dev/hydpy | hydpy/auxs/xmltools.py | XMLSelector.devices | def devices(self) -> selectiontools.Selection:
"""The additional devices defined for the respective `reader`
or `writer` element contained within a |Selection| object. ToDo
If the `reader` or `writer` element does not define its own additional
devices, |XMLInterface.devices| of |XMLInterface| is used.
>>> from hydpy.core.examples import prepare_full_example_1
>>> prepare_full_example_1()
>>> from hydpy import HydPy, TestIO, XMLInterface
>>> hp = HydPy('LahnH')
>>> with TestIO():
... hp.prepare_network()
... interface = XMLInterface('single_run.xml')
>>> series_io = interface.series_io
>>> for seq in (series_io.readers + series_io.writers):
... print(seq.info, seq.devices.nodes, seq.devices.elements)
all input data Nodes() \
Elements("land_dill", "land_lahn_1", "land_lahn_2", "land_lahn_3")
precipitation Nodes() Elements("land_lahn_1", "land_lahn_2")
soilmoisture Nodes("dill") Elements("land_dill", "land_lahn_1")
averaged Nodes() Elements()
"""
devices = self.find('devices')
master = self
while devices is None:
master = master.master
devices = master.find('devices')
return _query_devices(devices) | python | def devices(self) -> selectiontools.Selection:
"""The additional devices defined for the respective `reader`
or `writer` element contained within a |Selection| object. ToDo
If the `reader` or `writer` element does not define its own additional
devices, |XMLInterface.devices| of |XMLInterface| is used.
>>> from hydpy.core.examples import prepare_full_example_1
>>> prepare_full_example_1()
>>> from hydpy import HydPy, TestIO, XMLInterface
>>> hp = HydPy('LahnH')
>>> with TestIO():
... hp.prepare_network()
... interface = XMLInterface('single_run.xml')
>>> series_io = interface.series_io
>>> for seq in (series_io.readers + series_io.writers):
... print(seq.info, seq.devices.nodes, seq.devices.elements)
all input data Nodes() \
Elements("land_dill", "land_lahn_1", "land_lahn_2", "land_lahn_3")
precipitation Nodes() Elements("land_lahn_1", "land_lahn_2")
soilmoisture Nodes("dill") Elements("land_dill", "land_lahn_1")
averaged Nodes() Elements()
"""
devices = self.find('devices')
master = self
while devices is None:
master = master.master
devices = master.find('devices')
return _query_devices(devices) | ['def', 'devices', '(', 'self', ')', '->', 'selectiontools', '.', 'Selection', ':', 'devices', '=', 'self', '.', 'find', '(', "'devices'", ')', 'master', '=', 'self', 'while', 'devices', 'is', 'None', ':', 'master', '=', 'master', '.', 'master', 'devices', '=', 'master', '.', 'find', '(', "'devices'", ')', 'return', '_query_devices', '(', 'devices', ')'] | The additional devices defined for the respective `reader`
or `writer` element contained within a |Selection| object. ToDo
If the `reader` or `writer` element does not define its own additional
devices, |XMLInterface.devices| of |XMLInterface| is used.
>>> from hydpy.core.examples import prepare_full_example_1
>>> prepare_full_example_1()
>>> from hydpy import HydPy, TestIO, XMLInterface
>>> hp = HydPy('LahnH')
>>> with TestIO():
... hp.prepare_network()
... interface = XMLInterface('single_run.xml')
>>> series_io = interface.series_io
>>> for seq in (series_io.readers + series_io.writers):
... print(seq.info, seq.devices.nodes, seq.devices.elements)
all input data Nodes() \
Elements("land_dill", "land_lahn_1", "land_lahn_2", "land_lahn_3")
precipitation Nodes() Elements("land_lahn_1", "land_lahn_2")
soilmoisture Nodes("dill") Elements("land_dill", "land_lahn_1")
averaged Nodes() Elements() | ['The', 'additional', 'devices', 'defined', 'for', 'the', 'respective', 'reader', 'or', 'writer', 'element', 'contained', 'within', 'a', '|Selection|', 'object', '.', 'ToDo'] | train | https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/auxs/xmltools.py#L875-L904 |
912 | JelleAalbers/multihist | multihist.py | Histdd.std | def std(self, axis):
"""Returns d-1 dimensional histogram of (estimated) std value along axis
NB this is very different from just std of the histogram values (which describe bin counts)
"""
def weighted_std(values, weights, axis):
# Stolen from http://stackoverflow.com/questions/2413522
average = np.average(values, weights=weights, axis=axis)
average = average[self._simsalabim_slice(axis)]
variance = np.average((values-average)**2, weights=weights, axis=axis)
return np.sqrt(variance)
axis = self.get_axis_number(axis)
std_hist = weighted_std(self.all_axis_bin_centers(axis),
weights=self.histogram, axis=axis)
if self.dimensions == 2:
new_hist = Hist1d
else:
new_hist = Histdd
return new_hist.from_histogram(histogram=std_hist,
bin_edges=itemgetter(*self.other_axes(axis))(self.bin_edges),
axis_names=self.axis_names_without(axis)) | python | def std(self, axis):
"""Returns d-1 dimensional histogram of (estimated) std value along axis
NB this is very different from just std of the histogram values (which describe bin counts)
"""
def weighted_std(values, weights, axis):
# Stolen from http://stackoverflow.com/questions/2413522
average = np.average(values, weights=weights, axis=axis)
average = average[self._simsalabim_slice(axis)]
variance = np.average((values-average)**2, weights=weights, axis=axis)
return np.sqrt(variance)
axis = self.get_axis_number(axis)
std_hist = weighted_std(self.all_axis_bin_centers(axis),
weights=self.histogram, axis=axis)
if self.dimensions == 2:
new_hist = Hist1d
else:
new_hist = Histdd
return new_hist.from_histogram(histogram=std_hist,
bin_edges=itemgetter(*self.other_axes(axis))(self.bin_edges),
axis_names=self.axis_names_without(axis)) | ['def', 'std', '(', 'self', ',', 'axis', ')', ':', 'def', 'weighted_std', '(', 'values', ',', 'weights', ',', 'axis', ')', ':', '# Stolen from http://stackoverflow.com/questions/2413522', 'average', '=', 'np', '.', 'average', '(', 'values', ',', 'weights', '=', 'weights', ',', 'axis', '=', 'axis', ')', 'average', '=', 'average', '[', 'self', '.', '_simsalabim_slice', '(', 'axis', ')', ']', 'variance', '=', 'np', '.', 'average', '(', '(', 'values', '-', 'average', ')', '**', '2', ',', 'weights', '=', 'weights', ',', 'axis', '=', 'axis', ')', 'return', 'np', '.', 'sqrt', '(', 'variance', ')', 'axis', '=', 'self', '.', 'get_axis_number', '(', 'axis', ')', 'std_hist', '=', 'weighted_std', '(', 'self', '.', 'all_axis_bin_centers', '(', 'axis', ')', ',', 'weights', '=', 'self', '.', 'histogram', ',', 'axis', '=', 'axis', ')', 'if', 'self', '.', 'dimensions', '==', '2', ':', 'new_hist', '=', 'Hist1d', 'else', ':', 'new_hist', '=', 'Histdd', 'return', 'new_hist', '.', 'from_histogram', '(', 'histogram', '=', 'std_hist', ',', 'bin_edges', '=', 'itemgetter', '(', '*', 'self', '.', 'other_axes', '(', 'axis', ')', ')', '(', 'self', '.', 'bin_edges', ')', ',', 'axis_names', '=', 'self', '.', 'axis_names_without', '(', 'axis', ')', ')'] | Returns d-1 dimensional histogram of (estimated) std value along axis
NB this is very different from just std of the histogram values (which describe bin counts) | ['Returns', 'd', '-', '1', 'dimensional', 'histogram', 'of', '(', 'estimated', ')', 'std', 'value', 'along', 'axis', 'NB', 'this', 'is', 'very', 'different', 'from', 'just', 'std', 'of', 'the', 'histogram', 'values', '(', 'which', 'describe', 'bin', 'counts', ')'] | train | https://github.com/JelleAalbers/multihist/blob/072288277f807e7e388fdf424c3921c80576f3ab/multihist.py#L560-L580 |
913 | broadinstitute/fiss | firecloud/method.py | Method.inputs_outputs | def inputs_outputs(self):
"""Get information on method inputs & outputs."""
r = fapi.get_inputs_outputs(self.namespace, self.name,
self.snapshot_id, self.api_url)
fapi._check_response_code(r, 200)
return r.json() | python | def inputs_outputs(self):
"""Get information on method inputs & outputs."""
r = fapi.get_inputs_outputs(self.namespace, self.name,
self.snapshot_id, self.api_url)
fapi._check_response_code(r, 200)
return r.json() | ['def', 'inputs_outputs', '(', 'self', ')', ':', 'r', '=', 'fapi', '.', 'get_inputs_outputs', '(', 'self', '.', 'namespace', ',', 'self', '.', 'name', ',', 'self', '.', 'snapshot_id', ',', 'self', '.', 'api_url', ')', 'fapi', '.', '_check_response_code', '(', 'r', ',', '200', ')', 'return', 'r', '.', 'json', '(', ')'] | Get information on method inputs & outputs. | ['Get', 'information', 'on', 'method', 'inputs', '&', 'outputs', '.'] | train | https://github.com/broadinstitute/fiss/blob/dddf91547479506dbbafb69ec84d44dcc4a94ab4/firecloud/method.py#L62-L67 |
914 | AbletonAG/abl.vpath | abl/vpath/base/fs.py | BaseUri.open | def open(self, options=None, mimetype='application/octet-stream'):
"""
open: return a file like object for self.
The method can be used with the 'with' statment.
"""
return self.connection.open(self, options, mimetype) | python | def open(self, options=None, mimetype='application/octet-stream'):
"""
open: return a file like object for self.
The method can be used with the 'with' statment.
"""
return self.connection.open(self, options, mimetype) | ['def', 'open', '(', 'self', ',', 'options', '=', 'None', ',', 'mimetype', '=', "'application/octet-stream'", ')', ':', 'return', 'self', '.', 'connection', '.', 'open', '(', 'self', ',', 'options', ',', 'mimetype', ')'] | open: return a file like object for self.
The method can be used with the 'with' statment. | ['open', ':', 'return', 'a', 'file', 'like', 'object', 'for', 'self', '.', 'The', 'method', 'can', 'be', 'used', 'with', 'the', 'with', 'statment', '.'] | train | https://github.com/AbletonAG/abl.vpath/blob/a57491347f6e7567afa047216e5b6f6035226eaf/abl/vpath/base/fs.py#L593-L598 |
915 | Unidata/siphon | siphon/cdmr/dataset.py | Group.path | def path(self):
"""Return the full path to the Group, including any parent Groups."""
# If root, return '/'
if self.dataset is self:
return ''
else: # Otherwise recurse
return self.dataset.path + '/' + self.name | python | def path(self):
"""Return the full path to the Group, including any parent Groups."""
# If root, return '/'
if self.dataset is self:
return ''
else: # Otherwise recurse
return self.dataset.path + '/' + self.name | ['def', 'path', '(', 'self', ')', ':', "# If root, return '/'", 'if', 'self', '.', 'dataset', 'is', 'self', ':', 'return', "''", 'else', ':', '# Otherwise recurse', 'return', 'self', '.', 'dataset', '.', 'path', '+', "'/'", '+', 'self', '.', 'name'] | Return the full path to the Group, including any parent Groups. | ['Return', 'the', 'full', 'path', 'to', 'the', 'Group', 'including', 'any', 'parent', 'Groups', '.'] | train | https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/cdmr/dataset.py#L53-L59 |
916 | spyder-ide/spyder | spyder/plugins/editor/panels/linenumber.py | LineNumberArea.mouseMoveEvent | def mouseMoveEvent(self, event):
"""Override Qt method.
Show code analisis, if left button pressed select lines.
"""
line_number = self.editor.get_linenumber_from_mouse_event(event)
block = self.editor.document().findBlockByNumber(line_number-1)
data = block.userData()
# this disables pyflakes messages if there is an active drag/selection
# operation
check = self._released == -1
if data and data.code_analysis and check:
self.editor.show_code_analysis_results(line_number,
data)
else:
self.editor.hide_tooltip()
if event.buttons() == Qt.LeftButton:
self._released = line_number
self.editor.select_lines(self._pressed, self._released) | python | def mouseMoveEvent(self, event):
"""Override Qt method.
Show code analisis, if left button pressed select lines.
"""
line_number = self.editor.get_linenumber_from_mouse_event(event)
block = self.editor.document().findBlockByNumber(line_number-1)
data = block.userData()
# this disables pyflakes messages if there is an active drag/selection
# operation
check = self._released == -1
if data and data.code_analysis and check:
self.editor.show_code_analysis_results(line_number,
data)
else:
self.editor.hide_tooltip()
if event.buttons() == Qt.LeftButton:
self._released = line_number
self.editor.select_lines(self._pressed, self._released) | ['def', 'mouseMoveEvent', '(', 'self', ',', 'event', ')', ':', 'line_number', '=', 'self', '.', 'editor', '.', 'get_linenumber_from_mouse_event', '(', 'event', ')', 'block', '=', 'self', '.', 'editor', '.', 'document', '(', ')', '.', 'findBlockByNumber', '(', 'line_number', '-', '1', ')', 'data', '=', 'block', '.', 'userData', '(', ')', '# this disables pyflakes messages if there is an active drag/selection', '# operation', 'check', '=', 'self', '.', '_released', '==', '-', '1', 'if', 'data', 'and', 'data', '.', 'code_analysis', 'and', 'check', ':', 'self', '.', 'editor', '.', 'show_code_analysis_results', '(', 'line_number', ',', 'data', ')', 'else', ':', 'self', '.', 'editor', '.', 'hide_tooltip', '(', ')', 'if', 'event', '.', 'buttons', '(', ')', '==', 'Qt', '.', 'LeftButton', ':', 'self', '.', '_released', '=', 'line_number', 'self', '.', 'editor', '.', 'select_lines', '(', 'self', '.', '_pressed', ',', 'self', '.', '_released', ')'] | Override Qt method.
Show code analisis, if left button pressed select lines. | ['Override', 'Qt', 'method', '.'] | train | https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/editor/panels/linenumber.py#L114-L134 |
917 | adamrehn/slidingwindow | slidingwindow/SlidingWindow.py | generate | def generate(data, dimOrder, maxWindowSize, overlapPercent, transforms = []):
"""
Generates a set of sliding windows for the specified dataset.
"""
# Determine the dimensions of the input data
width = data.shape[dimOrder.index('w')]
height = data.shape[dimOrder.index('h')]
# Generate the windows
return generateForSize(width, height, dimOrder, maxWindowSize, overlapPercent, transforms) | python | def generate(data, dimOrder, maxWindowSize, overlapPercent, transforms = []):
"""
Generates a set of sliding windows for the specified dataset.
"""
# Determine the dimensions of the input data
width = data.shape[dimOrder.index('w')]
height = data.shape[dimOrder.index('h')]
# Generate the windows
return generateForSize(width, height, dimOrder, maxWindowSize, overlapPercent, transforms) | ['def', 'generate', '(', 'data', ',', 'dimOrder', ',', 'maxWindowSize', ',', 'overlapPercent', ',', 'transforms', '=', '[', ']', ')', ':', '# Determine the dimensions of the input data', 'width', '=', 'data', '.', 'shape', '[', 'dimOrder', '.', 'index', '(', "'w'", ')', ']', 'height', '=', 'data', '.', 'shape', '[', 'dimOrder', '.', 'index', '(', "'h'", ')', ']', '# Generate the windows', 'return', 'generateForSize', '(', 'width', ',', 'height', ',', 'dimOrder', ',', 'maxWindowSize', ',', 'overlapPercent', ',', 'transforms', ')'] | Generates a set of sliding windows for the specified dataset. | ['Generates', 'a', 'set', 'of', 'sliding', 'windows', 'for', 'the', 'specified', 'dataset', '.'] | train | https://github.com/adamrehn/slidingwindow/blob/17ea9395b48671e8cb7321b9510c6b25fec5e45f/slidingwindow/SlidingWindow.py#L87-L97 |
918 | apple/turicreate | deps/src/libxml2-2.9.1/python/libxml2.py | relaxNgValidCtxtCore.setValidityErrorHandler | def setValidityErrorHandler(self, err_func, warn_func, arg=None):
"""
Register error and warning handlers for RelaxNG validation.
These will be called back as f(msg,arg)
"""
libxml2mod.xmlRelaxNGSetValidErrors(self._o, err_func, warn_func, arg) | python | def setValidityErrorHandler(self, err_func, warn_func, arg=None):
"""
Register error and warning handlers for RelaxNG validation.
These will be called back as f(msg,arg)
"""
libxml2mod.xmlRelaxNGSetValidErrors(self._o, err_func, warn_func, arg) | ['def', 'setValidityErrorHandler', '(', 'self', ',', 'err_func', ',', 'warn_func', ',', 'arg', '=', 'None', ')', ':', 'libxml2mod', '.', 'xmlRelaxNGSetValidErrors', '(', 'self', '.', '_o', ',', 'err_func', ',', 'warn_func', ',', 'arg', ')'] | Register error and warning handlers for RelaxNG validation.
These will be called back as f(msg,arg) | ['Register', 'error', 'and', 'warning', 'handlers', 'for', 'RelaxNG', 'validation', '.', 'These', 'will', 'be', 'called', 'back', 'as', 'f', '(', 'msg', 'arg', ')'] | train | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L705-L710 |
919 | lsbardel/python-stdnet | stdnet/odm/session.py | SessionModel.expunge | def expunge(self, instance):
'''Remove *instance* from the :class:`Session`. Instance could be a
:class:`Model` or an id.
:parameter instance: a :class:`Model` or an *id*
:rtype: the :class:`Model` removed from session or ``None`` if
it was not in the session.
'''
instance = self.pop(instance)
instance.session = None
return instance | python | def expunge(self, instance):
'''Remove *instance* from the :class:`Session`. Instance could be a
:class:`Model` or an id.
:parameter instance: a :class:`Model` or an *id*
:rtype: the :class:`Model` removed from session or ``None`` if
it was not in the session.
'''
instance = self.pop(instance)
instance.session = None
return instance | ['def', 'expunge', '(', 'self', ',', 'instance', ')', ':', 'instance', '=', 'self', '.', 'pop', '(', 'instance', ')', 'instance', '.', 'session', '=', 'None', 'return', 'instance'] | Remove *instance* from the :class:`Session`. Instance could be a
:class:`Model` or an id.
:parameter instance: a :class:`Model` or an *id*
:rtype: the :class:`Model` removed from session or ``None`` if
it was not in the session. | ['Remove', '*', 'instance', '*', 'from', 'the', ':', 'class', ':', 'Session', '.', 'Instance', 'could', 'be', 'a', ':', 'class', ':', 'Model', 'or', 'an', 'id', '.', ':', 'parameter', 'instance', ':', 'a', ':', 'class', ':', 'Model', 'or', 'an', '*', 'id', '*', ':', 'rtype', ':', 'the', ':', 'class', ':', 'Model', 'removed', 'from', 'session', 'or', 'None', 'if', 'it', 'was', 'not', 'in', 'the', 'session', '.'] | train | https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/session.py#L192-L202 |
920 | gem/oq-engine | openquake/baselib/datastore.py | get_calc_ids | def get_calc_ids(datadir=None):
"""
Extract the available calculation IDs from the datadir, in order.
"""
datadir = datadir or get_datadir()
if not os.path.exists(datadir):
return []
calc_ids = set()
for f in os.listdir(datadir):
mo = re.match(CALC_REGEX, f)
if mo:
calc_ids.add(int(mo.group(2)))
return sorted(calc_ids) | python | def get_calc_ids(datadir=None):
"""
Extract the available calculation IDs from the datadir, in order.
"""
datadir = datadir or get_datadir()
if not os.path.exists(datadir):
return []
calc_ids = set()
for f in os.listdir(datadir):
mo = re.match(CALC_REGEX, f)
if mo:
calc_ids.add(int(mo.group(2)))
return sorted(calc_ids) | ['def', 'get_calc_ids', '(', 'datadir', '=', 'None', ')', ':', 'datadir', '=', 'datadir', 'or', 'get_datadir', '(', ')', 'if', 'not', 'os', '.', 'path', '.', 'exists', '(', 'datadir', ')', ':', 'return', '[', ']', 'calc_ids', '=', 'set', '(', ')', 'for', 'f', 'in', 'os', '.', 'listdir', '(', 'datadir', ')', ':', 'mo', '=', 're', '.', 'match', '(', 'CALC_REGEX', ',', 'f', ')', 'if', 'mo', ':', 'calc_ids', '.', 'add', '(', 'int', '(', 'mo', '.', 'group', '(', '2', ')', ')', ')', 'return', 'sorted', '(', 'calc_ids', ')'] | Extract the available calculation IDs from the datadir, in order. | ['Extract', 'the', 'available', 'calculation', 'IDs', 'from', 'the', 'datadir', 'in', 'order', '.'] | train | https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/baselib/datastore.py#L46-L58 |
921 | OSSOS/MOP | src/ossos/core/ossos/ssos.py | TrackTarget.query_ssos | def query_ssos(self, target_name, lunation_count=None):
"""Send a query to the SSOS web service, looking for available observations using the given track.
:param target_name: name of target to query against SSOIS db
:param lunation_count: ignored
:rtype: SSOSData
"""
# we observe ~ a week either side of new moon
# but we don't know when in the dark run the discovery happened
# so be generous with the search boundaries, add extra 2 weeks
# current date just has to be the night of the triplet,
from mp_ephem import horizons
search_start_date = Time('1999-01-01', scale='utc')
search_end_date = Time(datetime.datetime.now().strftime('%Y-%m-%d'), scale='utc')
logger.info("Sending query to SSOS start_date: {} end_data: {}\n".format(search_start_date, search_end_date))
query = Query(target_name,
search_start_date=search_start_date,
search_end_date=search_end_date)
logger.debug("Parsing query results...")
tracks_data = self.ssos_parser.parse(query.get())
tracks_data.mpc_observations = {}
start_time = Time(search_start_date)
stop_time = Time(search_end_date)
step_size = 5 * units.hour
self.orbit = horizons.Body(target_name, start_time, stop_time, step_size)
ref_sky_coord = None
for source in tracks_data.get_sources():
astrom_observations = tracks_data.observations
source_readings = source.get_readings()
for idx in range(len(source_readings)):
source_reading = source_readings[idx]
assert isinstance(source_reading, SourceReading)
if ref_sky_coord is None or source_reading.sky_coord.separation(ref_sky_coord) > 40 * units.arcsec:
ref_sky_coord = source_reading.sky_coord
source_reading.reference_sky_coord = ref_sky_coord
astrom_observation = astrom_observations[idx]
self.orbit.predict(Time(astrom_observation.mjd, format='mjd', scale='utc'))
source_reading.pa = self.orbit.pa
# why are these being recorded just in pixels? Because the error ellipse is drawn in pixels.
# TODO: Modify error ellipse drawing routine to use WCS but be sure
# that this does not cause trouble with the use of dra/ddec for cutout computer
source_reading.dx = self.orbit.dra
source_reading.dy = self.orbit.ddec
logger.debug("Sending back set of observations that might contain the target: {}".format(tracks_data))
return tracks_data | python | def query_ssos(self, target_name, lunation_count=None):
"""Send a query to the SSOS web service, looking for available observations using the given track.
:param target_name: name of target to query against SSOIS db
:param lunation_count: ignored
:rtype: SSOSData
"""
# we observe ~ a week either side of new moon
# but we don't know when in the dark run the discovery happened
# so be generous with the search boundaries, add extra 2 weeks
# current date just has to be the night of the triplet,
from mp_ephem import horizons
search_start_date = Time('1999-01-01', scale='utc')
search_end_date = Time(datetime.datetime.now().strftime('%Y-%m-%d'), scale='utc')
logger.info("Sending query to SSOS start_date: {} end_data: {}\n".format(search_start_date, search_end_date))
query = Query(target_name,
search_start_date=search_start_date,
search_end_date=search_end_date)
logger.debug("Parsing query results...")
tracks_data = self.ssos_parser.parse(query.get())
tracks_data.mpc_observations = {}
start_time = Time(search_start_date)
stop_time = Time(search_end_date)
step_size = 5 * units.hour
self.orbit = horizons.Body(target_name, start_time, stop_time, step_size)
ref_sky_coord = None
for source in tracks_data.get_sources():
astrom_observations = tracks_data.observations
source_readings = source.get_readings()
for idx in range(len(source_readings)):
source_reading = source_readings[idx]
assert isinstance(source_reading, SourceReading)
if ref_sky_coord is None or source_reading.sky_coord.separation(ref_sky_coord) > 40 * units.arcsec:
ref_sky_coord = source_reading.sky_coord
source_reading.reference_sky_coord = ref_sky_coord
astrom_observation = astrom_observations[idx]
self.orbit.predict(Time(astrom_observation.mjd, format='mjd', scale='utc'))
source_reading.pa = self.orbit.pa
# why are these being recorded just in pixels? Because the error ellipse is drawn in pixels.
# TODO: Modify error ellipse drawing routine to use WCS but be sure
# that this does not cause trouble with the use of dra/ddec for cutout computer
source_reading.dx = self.orbit.dra
source_reading.dy = self.orbit.ddec
logger.debug("Sending back set of observations that might contain the target: {}".format(tracks_data))
return tracks_data | ['def', 'query_ssos', '(', 'self', ',', 'target_name', ',', 'lunation_count', '=', 'None', ')', ':', '# we observe ~ a week either side of new moon', "# but we don't know when in the dark run the discovery happened", '# so be generous with the search boundaries, add extra 2 weeks', '# current date just has to be the night of the triplet,', 'from', 'mp_ephem', 'import', 'horizons', 'search_start_date', '=', 'Time', '(', "'1999-01-01'", ',', 'scale', '=', "'utc'", ')', 'search_end_date', '=', 'Time', '(', 'datetime', '.', 'datetime', '.', 'now', '(', ')', '.', 'strftime', '(', "'%Y-%m-%d'", ')', ',', 'scale', '=', "'utc'", ')', 'logger', '.', 'info', '(', '"Sending query to SSOS start_date: {} end_data: {}\\n"', '.', 'format', '(', 'search_start_date', ',', 'search_end_date', ')', ')', 'query', '=', 'Query', '(', 'target_name', ',', 'search_start_date', '=', 'search_start_date', ',', 'search_end_date', '=', 'search_end_date', ')', 'logger', '.', 'debug', '(', '"Parsing query results..."', ')', 'tracks_data', '=', 'self', '.', 'ssos_parser', '.', 'parse', '(', 'query', '.', 'get', '(', ')', ')', 'tracks_data', '.', 'mpc_observations', '=', '{', '}', 'start_time', '=', 'Time', '(', 'search_start_date', ')', 'stop_time', '=', 'Time', '(', 'search_end_date', ')', 'step_size', '=', '5', '*', 'units', '.', 'hour', 'self', '.', 'orbit', '=', 'horizons', '.', 'Body', '(', 'target_name', ',', 'start_time', ',', 'stop_time', ',', 'step_size', ')', 'ref_sky_coord', '=', 'None', 'for', 'source', 'in', 'tracks_data', '.', 'get_sources', '(', ')', ':', 'astrom_observations', '=', 'tracks_data', '.', 'observations', 'source_readings', '=', 'source', '.', 'get_readings', '(', ')', 'for', 'idx', 'in', 'range', '(', 'len', '(', 'source_readings', ')', ')', ':', 'source_reading', '=', 'source_readings', '[', 'idx', ']', 'assert', 'isinstance', '(', 'source_reading', ',', 'SourceReading', ')', 'if', 'ref_sky_coord', 'is', 'None', 'or', 'source_reading', '.', 'sky_coord', '.', 'separation', '(', 'ref_sky_coord', ')', '>', '40', '*', 'units', '.', 'arcsec', ':', 'ref_sky_coord', '=', 'source_reading', '.', 'sky_coord', 'source_reading', '.', 'reference_sky_coord', '=', 'ref_sky_coord', 'astrom_observation', '=', 'astrom_observations', '[', 'idx', ']', 'self', '.', 'orbit', '.', 'predict', '(', 'Time', '(', 'astrom_observation', '.', 'mjd', ',', 'format', '=', "'mjd'", ',', 'scale', '=', "'utc'", ')', ')', 'source_reading', '.', 'pa', '=', 'self', '.', 'orbit', '.', 'pa', '# why are these being recorded just in pixels? Because the error ellipse is drawn in pixels.', '# TODO: Modify error ellipse drawing routine to use WCS but be sure', '# that this does not cause trouble with the use of dra/ddec for cutout computer', 'source_reading', '.', 'dx', '=', 'self', '.', 'orbit', '.', 'dra', 'source_reading', '.', 'dy', '=', 'self', '.', 'orbit', '.', 'ddec', 'logger', '.', 'debug', '(', '"Sending back set of observations that might contain the target: {}"', '.', 'format', '(', 'tracks_data', ')', ')', 'return', 'tracks_data'] | Send a query to the SSOS web service, looking for available observations using the given track.
:param target_name: name of target to query against SSOIS db
:param lunation_count: ignored
:rtype: SSOSData | ['Send', 'a', 'query', 'to', 'the', 'SSOS', 'web', 'service', 'looking', 'for', 'available', 'observations', 'using', 'the', 'given', 'track', '.'] | train | https://github.com/OSSOS/MOP/blob/94f91d32ad5ec081d5a1ebd67604a838003465af/src/ossos/core/ossos/ssos.py#L189-L239 |
922 | DataBiosphere/toil | src/toil/serviceManager.py | ServiceManager.getServiceJobsToStart | def getServiceJobsToStart(self, maxWait):
"""
:param float maxWait: Time in seconds to wait to get a job before returning.
:return: a tuple of (serviceJobStoreID, memory, cores, disk, ..) representing
a service job to start.
:rtype: toil.job.ServiceJobNode
"""
try:
serviceJob = self._serviceJobGraphsToStart.get(timeout=maxWait)
assert self.jobsIssuedToServiceManager >= 0
self.jobsIssuedToServiceManager -= 1
return serviceJob
except Empty:
return None | python | def getServiceJobsToStart(self, maxWait):
"""
:param float maxWait: Time in seconds to wait to get a job before returning.
:return: a tuple of (serviceJobStoreID, memory, cores, disk, ..) representing
a service job to start.
:rtype: toil.job.ServiceJobNode
"""
try:
serviceJob = self._serviceJobGraphsToStart.get(timeout=maxWait)
assert self.jobsIssuedToServiceManager >= 0
self.jobsIssuedToServiceManager -= 1
return serviceJob
except Empty:
return None | ['def', 'getServiceJobsToStart', '(', 'self', ',', 'maxWait', ')', ':', 'try', ':', 'serviceJob', '=', 'self', '.', '_serviceJobGraphsToStart', '.', 'get', '(', 'timeout', '=', 'maxWait', ')', 'assert', 'self', '.', 'jobsIssuedToServiceManager', '>=', '0', 'self', '.', 'jobsIssuedToServiceManager', '-=', '1', 'return', 'serviceJob', 'except', 'Empty', ':', 'return', 'None'] | :param float maxWait: Time in seconds to wait to get a job before returning.
:return: a tuple of (serviceJobStoreID, memory, cores, disk, ..) representing
a service job to start.
:rtype: toil.job.ServiceJobNode | [':', 'param', 'float', 'maxWait', ':', 'Time', 'in', 'seconds', 'to', 'wait', 'to', 'get', 'a', 'job', 'before', 'returning', '.', ':', 'return', ':', 'a', 'tuple', 'of', '(', 'serviceJobStoreID', 'memory', 'cores', 'disk', '..', ')', 'representing', 'a', 'service', 'job', 'to', 'start', '.', ':', 'rtype', ':', 'toil', '.', 'job', '.', 'ServiceJobNode'] | train | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/serviceManager.py#L106-L119 |
923 | keon/algorithms | algorithms/strings/first_unique_char.py | first_unique_char | def first_unique_char(s):
"""
:type s: str
:rtype: int
"""
if (len(s) == 1):
return 0
ban = []
for i in range(len(s)):
if all(s[i] != s[k] for k in range(i + 1, len(s))) == True and s[i] not in ban:
return i
else:
ban.append(s[i])
return -1 | python | def first_unique_char(s):
"""
:type s: str
:rtype: int
"""
if (len(s) == 1):
return 0
ban = []
for i in range(len(s)):
if all(s[i] != s[k] for k in range(i + 1, len(s))) == True and s[i] not in ban:
return i
else:
ban.append(s[i])
return -1 | ['def', 'first_unique_char', '(', 's', ')', ':', 'if', '(', 'len', '(', 's', ')', '==', '1', ')', ':', 'return', '0', 'ban', '=', '[', ']', 'for', 'i', 'in', 'range', '(', 'len', '(', 's', ')', ')', ':', 'if', 'all', '(', 's', '[', 'i', ']', '!=', 's', '[', 'k', ']', 'for', 'k', 'in', 'range', '(', 'i', '+', '1', ',', 'len', '(', 's', ')', ')', ')', '==', 'True', 'and', 's', '[', 'i', ']', 'not', 'in', 'ban', ':', 'return', 'i', 'else', ':', 'ban', '.', 'append', '(', 's', '[', 'i', ']', ')', 'return', '-', '1'] | :type s: str
:rtype: int | [':', 'type', 's', ':', 'str', ':', 'rtype', ':', 'int'] | train | https://github.com/keon/algorithms/blob/4d6569464a62a75c1357acc97e2dd32ee2f9f4a3/algorithms/strings/first_unique_char.py#L14-L27 |
924 | galaxyproject/pulsar | pulsar/client/manager.py | ClientManager.get_client | def get_client(self, destination_params, job_id, **kwargs):
"""Build a client given specific destination parameters and job_id."""
destination_params = _parse_destination_params(destination_params)
destination_params.update(**kwargs)
job_manager_interface_class = self.job_manager_interface_class
job_manager_interface_args = dict(destination_params=destination_params, **self.job_manager_interface_args)
job_manager_interface = job_manager_interface_class(**job_manager_interface_args)
return self.client_class(destination_params, job_id, job_manager_interface, **self.extra_client_kwds) | python | def get_client(self, destination_params, job_id, **kwargs):
"""Build a client given specific destination parameters and job_id."""
destination_params = _parse_destination_params(destination_params)
destination_params.update(**kwargs)
job_manager_interface_class = self.job_manager_interface_class
job_manager_interface_args = dict(destination_params=destination_params, **self.job_manager_interface_args)
job_manager_interface = job_manager_interface_class(**job_manager_interface_args)
return self.client_class(destination_params, job_id, job_manager_interface, **self.extra_client_kwds) | ['def', 'get_client', '(', 'self', ',', 'destination_params', ',', 'job_id', ',', '*', '*', 'kwargs', ')', ':', 'destination_params', '=', '_parse_destination_params', '(', 'destination_params', ')', 'destination_params', '.', 'update', '(', '*', '*', 'kwargs', ')', 'job_manager_interface_class', '=', 'self', '.', 'job_manager_interface_class', 'job_manager_interface_args', '=', 'dict', '(', 'destination_params', '=', 'destination_params', ',', '*', '*', 'self', '.', 'job_manager_interface_args', ')', 'job_manager_interface', '=', 'job_manager_interface_class', '(', '*', '*', 'job_manager_interface_args', ')', 'return', 'self', '.', 'client_class', '(', 'destination_params', ',', 'job_id', ',', 'job_manager_interface', ',', '*', '*', 'self', '.', 'extra_client_kwds', ')'] | Build a client given specific destination parameters and job_id. | ['Build', 'a', 'client', 'given', 'specific', 'destination', 'parameters', 'and', 'job_id', '.'] | train | https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/client/manager.py#L83-L90 |
925 | seperman/s3utils | s3utils/s3utils.py | S3utils.ls | def ls(self, folder="", begin_from_file="", num=-1, get_grants=False, all_grant_data=False):
"""
gets the list of file names (keys) in a s3 folder
Parameters
----------
folder : string
Path to file on S3
num: integer, optional
number of results to return, by default it returns all results.
begin_from_file: string, optional
which file to start from on S3.
This is usedful in case you are iterating over lists of files and you need to page the result by
starting listing from a certain file and fetching certain num (number) of files.
Examples
--------
>>> from s3utils import S3utils
>>> s3utils = S3utils(
... AWS_ACCESS_KEY_ID = 'your access key',
... AWS_SECRET_ACCESS_KEY = 'your secret key',
... AWS_STORAGE_BUCKET_NAME = 'your bucket name',
... S3UTILS_DEBUG_LEVEL = 1, #change it to 0 for less verbose
... )
>>> print(s3utils.ls("test/"))
{u'test/myfolder/', u'test/myfolder/em/', u'test/myfolder/hoho/', u'test/myfolder/hoho/.DS_Store', u'test/myfolder/hoho/haha/', u'test/myfolder/hoho/haha/ff', u'test/myfolder/hoho/haha/photo.JPG'}
"""
# S3 object key can't start with /
folder = re.sub(r"^/", "", folder)
bucket_files = self.bucket.list(prefix=folder, marker=begin_from_file)
# in case listing grants
if get_grants:
list_of_files = OrderedDict()
for (i, v) in enumerate(bucket_files):
file_info = {v.name: self.__get_grants(v.name, all_grant_data)}
list_of_files.update(file_info)
if i == num:
break
else:
list_of_files = set([])
for (i, v) in enumerate(bucket_files):
list_of_files.add(v.name)
if i == num:
break
return list_of_files | python | def ls(self, folder="", begin_from_file="", num=-1, get_grants=False, all_grant_data=False):
"""
gets the list of file names (keys) in a s3 folder
Parameters
----------
folder : string
Path to file on S3
num: integer, optional
number of results to return, by default it returns all results.
begin_from_file: string, optional
which file to start from on S3.
This is usedful in case you are iterating over lists of files and you need to page the result by
starting listing from a certain file and fetching certain num (number) of files.
Examples
--------
>>> from s3utils import S3utils
>>> s3utils = S3utils(
... AWS_ACCESS_KEY_ID = 'your access key',
... AWS_SECRET_ACCESS_KEY = 'your secret key',
... AWS_STORAGE_BUCKET_NAME = 'your bucket name',
... S3UTILS_DEBUG_LEVEL = 1, #change it to 0 for less verbose
... )
>>> print(s3utils.ls("test/"))
{u'test/myfolder/', u'test/myfolder/em/', u'test/myfolder/hoho/', u'test/myfolder/hoho/.DS_Store', u'test/myfolder/hoho/haha/', u'test/myfolder/hoho/haha/ff', u'test/myfolder/hoho/haha/photo.JPG'}
"""
# S3 object key can't start with /
folder = re.sub(r"^/", "", folder)
bucket_files = self.bucket.list(prefix=folder, marker=begin_from_file)
# in case listing grants
if get_grants:
list_of_files = OrderedDict()
for (i, v) in enumerate(bucket_files):
file_info = {v.name: self.__get_grants(v.name, all_grant_data)}
list_of_files.update(file_info)
if i == num:
break
else:
list_of_files = set([])
for (i, v) in enumerate(bucket_files):
list_of_files.add(v.name)
if i == num:
break
return list_of_files | ['def', 'ls', '(', 'self', ',', 'folder', '=', '""', ',', 'begin_from_file', '=', '""', ',', 'num', '=', '-', '1', ',', 'get_grants', '=', 'False', ',', 'all_grant_data', '=', 'False', ')', ':', "# S3 object key can't start with /", 'folder', '=', 're', '.', 'sub', '(', 'r"^/"', ',', '""', ',', 'folder', ')', 'bucket_files', '=', 'self', '.', 'bucket', '.', 'list', '(', 'prefix', '=', 'folder', ',', 'marker', '=', 'begin_from_file', ')', '# in case listing grants', 'if', 'get_grants', ':', 'list_of_files', '=', 'OrderedDict', '(', ')', 'for', '(', 'i', ',', 'v', ')', 'in', 'enumerate', '(', 'bucket_files', ')', ':', 'file_info', '=', '{', 'v', '.', 'name', ':', 'self', '.', '__get_grants', '(', 'v', '.', 'name', ',', 'all_grant_data', ')', '}', 'list_of_files', '.', 'update', '(', 'file_info', ')', 'if', 'i', '==', 'num', ':', 'break', 'else', ':', 'list_of_files', '=', 'set', '(', '[', ']', ')', 'for', '(', 'i', ',', 'v', ')', 'in', 'enumerate', '(', 'bucket_files', ')', ':', 'list_of_files', '.', 'add', '(', 'v', '.', 'name', ')', 'if', 'i', '==', 'num', ':', 'break', 'return', 'list_of_files'] | gets the list of file names (keys) in a s3 folder
Parameters
----------
folder : string
Path to file on S3
num: integer, optional
number of results to return, by default it returns all results.
begin_from_file: string, optional
which file to start from on S3.
This is usedful in case you are iterating over lists of files and you need to page the result by
starting listing from a certain file and fetching certain num (number) of files.
Examples
--------
>>> from s3utils import S3utils
>>> s3utils = S3utils(
... AWS_ACCESS_KEY_ID = 'your access key',
... AWS_SECRET_ACCESS_KEY = 'your secret key',
... AWS_STORAGE_BUCKET_NAME = 'your bucket name',
... S3UTILS_DEBUG_LEVEL = 1, #change it to 0 for less verbose
... )
>>> print(s3utils.ls("test/"))
{u'test/myfolder/', u'test/myfolder/em/', u'test/myfolder/hoho/', u'test/myfolder/hoho/.DS_Store', u'test/myfolder/hoho/haha/', u'test/myfolder/hoho/haha/ff', u'test/myfolder/hoho/haha/photo.JPG'} | ['gets', 'the', 'list', 'of', 'file', 'names', '(', 'keys', ')', 'in', 'a', 's3', 'folder'] | train | https://github.com/seperman/s3utils/blob/aea41388a023dcf1e95588402077e31097514cf1/s3utils/s3utils.py#L613-L667 |
926 | kytos/python-openflow | pyof/v0x04/controller2switch/meter_mod.py | MeterBandHeader.unpack | def unpack(self, buff=None, offset=0):
"""Unpack *buff* into this object.
This method will convert a binary data into a readable value according
to the attribute format.
Args:
buff (bytes): Binary buffer.
offset (int): Where to begin unpacking.
Raises:
:exc:`~.exceptions.UnpackException`: If unpack fails.
"""
band_type = UBInt16(enum_ref=MeterBandType)
band_type.unpack(buff, offset)
self.__class__ = MeterBandType(band_type.value).find_class()
length = UBInt16()
length.unpack(buff, offset=offset+2)
super().unpack(buff[:offset+length.value], offset) | python | def unpack(self, buff=None, offset=0):
"""Unpack *buff* into this object.
This method will convert a binary data into a readable value according
to the attribute format.
Args:
buff (bytes): Binary buffer.
offset (int): Where to begin unpacking.
Raises:
:exc:`~.exceptions.UnpackException`: If unpack fails.
"""
band_type = UBInt16(enum_ref=MeterBandType)
band_type.unpack(buff, offset)
self.__class__ = MeterBandType(band_type.value).find_class()
length = UBInt16()
length.unpack(buff, offset=offset+2)
super().unpack(buff[:offset+length.value], offset) | ['def', 'unpack', '(', 'self', ',', 'buff', '=', 'None', ',', 'offset', '=', '0', ')', ':', 'band_type', '=', 'UBInt16', '(', 'enum_ref', '=', 'MeterBandType', ')', 'band_type', '.', 'unpack', '(', 'buff', ',', 'offset', ')', 'self', '.', '__class__', '=', 'MeterBandType', '(', 'band_type', '.', 'value', ')', '.', 'find_class', '(', ')', 'length', '=', 'UBInt16', '(', ')', 'length', '.', 'unpack', '(', 'buff', ',', 'offset', '=', 'offset', '+', '2', ')', 'super', '(', ')', '.', 'unpack', '(', 'buff', '[', ':', 'offset', '+', 'length', '.', 'value', ']', ',', 'offset', ')'] | Unpack *buff* into this object.
This method will convert a binary data into a readable value according
to the attribute format.
Args:
buff (bytes): Binary buffer.
offset (int): Where to begin unpacking.
Raises:
:exc:`~.exceptions.UnpackException`: If unpack fails. | ['Unpack', '*', 'buff', '*', 'into', 'this', 'object', '.'] | train | https://github.com/kytos/python-openflow/blob/4f2d0d08ab28e102ed88fe57a4ee17729f1e1bb7/pyof/v0x04/controller2switch/meter_mod.py#L96-L117 |
927 | pyopenapi/pyswagger | pyswagger/utils.py | walk | def walk(start, ofn, cyc=None):
""" Non recursive DFS to detect cycles
:param start: start vertex in graph
:param ofn: function to get the list of outgoing edges of a vertex
:param cyc: list of existing cycles, cycles are represented in a list started with minimum vertex.
:return: cycles
:rtype: list of lists
"""
ctx, stk = {}, [start]
cyc = [] if cyc == None else cyc
while len(stk):
top = stk[-1]
if top not in ctx:
ctx.update({top:list(ofn(top))})
if len(ctx[top]):
n = ctx[top][0]
if n in stk:
# cycles found,
# normalize the representation of cycles,
# start from the smallest vertex, ex.
# 4 -> 5 -> 2 -> 7 -> 9 would produce
# (2, 7, 9, 4, 5)
nc = stk[stk.index(n):]
ni = nc.index(min(nc))
nc = nc[ni:] + nc[:ni] + [min(nc)]
if nc not in cyc:
cyc.append(nc)
ctx[top].pop(0)
else:
stk.append(n)
else:
ctx.pop(top)
stk.pop()
if len(stk):
ctx[stk[-1]].remove(top)
return cyc | python | def walk(start, ofn, cyc=None):
""" Non recursive DFS to detect cycles
:param start: start vertex in graph
:param ofn: function to get the list of outgoing edges of a vertex
:param cyc: list of existing cycles, cycles are represented in a list started with minimum vertex.
:return: cycles
:rtype: list of lists
"""
ctx, stk = {}, [start]
cyc = [] if cyc == None else cyc
while len(stk):
top = stk[-1]
if top not in ctx:
ctx.update({top:list(ofn(top))})
if len(ctx[top]):
n = ctx[top][0]
if n in stk:
# cycles found,
# normalize the representation of cycles,
# start from the smallest vertex, ex.
# 4 -> 5 -> 2 -> 7 -> 9 would produce
# (2, 7, 9, 4, 5)
nc = stk[stk.index(n):]
ni = nc.index(min(nc))
nc = nc[ni:] + nc[:ni] + [min(nc)]
if nc not in cyc:
cyc.append(nc)
ctx[top].pop(0)
else:
stk.append(n)
else:
ctx.pop(top)
stk.pop()
if len(stk):
ctx[stk[-1]].remove(top)
return cyc | ['def', 'walk', '(', 'start', ',', 'ofn', ',', 'cyc', '=', 'None', ')', ':', 'ctx', ',', 'stk', '=', '{', '}', ',', '[', 'start', ']', 'cyc', '=', '[', ']', 'if', 'cyc', '==', 'None', 'else', 'cyc', 'while', 'len', '(', 'stk', ')', ':', 'top', '=', 'stk', '[', '-', '1', ']', 'if', 'top', 'not', 'in', 'ctx', ':', 'ctx', '.', 'update', '(', '{', 'top', ':', 'list', '(', 'ofn', '(', 'top', ')', ')', '}', ')', 'if', 'len', '(', 'ctx', '[', 'top', ']', ')', ':', 'n', '=', 'ctx', '[', 'top', ']', '[', '0', ']', 'if', 'n', 'in', 'stk', ':', '# cycles found,', '# normalize the representation of cycles,', '# start from the smallest vertex, ex.', '# 4 -> 5 -> 2 -> 7 -> 9 would produce', '# (2, 7, 9, 4, 5)', 'nc', '=', 'stk', '[', 'stk', '.', 'index', '(', 'n', ')', ':', ']', 'ni', '=', 'nc', '.', 'index', '(', 'min', '(', 'nc', ')', ')', 'nc', '=', 'nc', '[', 'ni', ':', ']', '+', 'nc', '[', ':', 'ni', ']', '+', '[', 'min', '(', 'nc', ')', ']', 'if', 'nc', 'not', 'in', 'cyc', ':', 'cyc', '.', 'append', '(', 'nc', ')', 'ctx', '[', 'top', ']', '.', 'pop', '(', '0', ')', 'else', ':', 'stk', '.', 'append', '(', 'n', ')', 'else', ':', 'ctx', '.', 'pop', '(', 'top', ')', 'stk', '.', 'pop', '(', ')', 'if', 'len', '(', 'stk', ')', ':', 'ctx', '[', 'stk', '[', '-', '1', ']', ']', '.', 'remove', '(', 'top', ')', 'return', 'cyc'] | Non recursive DFS to detect cycles
:param start: start vertex in graph
:param ofn: function to get the list of outgoing edges of a vertex
:param cyc: list of existing cycles, cycles are represented in a list started with minimum vertex.
:return: cycles
:rtype: list of lists | ['Non', 'recursive', 'DFS', 'to', 'detect', 'cycles'] | train | https://github.com/pyopenapi/pyswagger/blob/333c4ca08e758cd2194943d9904a3eda3fe43977/pyswagger/utils.py#L439-L480 |
928 | saltstack/salt | salt/modules/postgres.py | default_privileges_revoke | def default_privileges_revoke(name,
object_name,
object_type,
defprivileges=None,
prepend='public',
maintenance_db=None,
user=None,
host=None,
port=None,
password=None,
runas=None):
'''
.. versionadded:: 2019.0.0
Revoke default privileges on a postgres object
CLI Example:
.. code-block:: bash
salt '*' postgres.default_privileges_revoke user_name table_name table \\
SELECT,UPDATE maintenance_db=db_name
name
Name of the role whose default privileges should be revoked
object_name
Name of the object on which the revoke is to be performed
object_type
The object type, which can be one of the following:
- table
- sequence
- schema
- group
- function
privileges
Comma separated list of privileges to revoke, from the list below:
- INSERT
- CREATE
- TRUNCATE
- TRIGGER
- SELECT
- USAGE
- UPDATE
- EXECUTE
- REFERENCES
- DELETE
- ALL
maintenance_db
The database to connect to
user
database username if different from config or default
password
user password if any password for a specified user
host
Database host if different from config or default
port
Database port if different from config or default
runas
System user all operations should be performed on behalf of
'''
object_type, defprivileges, _defprivs = _mod_defpriv_opts(object_type, defprivileges)
_validate_default_privileges(object_type, _defprivs, defprivileges)
if not has_default_privileges(name, object_name, object_type, defprivileges,
prepend=prepend, maintenance_db=maintenance_db, user=user,
host=host, port=port, password=password, runas=runas):
log.info('The object: %s of type: %s does not'
' have default privileges: %s set', object_name, object_type, defprivileges)
return False
_grants = ','.join(_defprivs)
if object_type in ['table', 'sequence']:
on_part = '{0}.{1}'.format(prepend, object_name)
else:
on_part = object_name
if object_type == 'group':
query = 'ALTER DEFAULT PRIVILEGES REVOKE {0} FROM {1}'.format(object_name, name)
else:
query = 'ALTER DEFAULT PRIVILEGES IN SCHEMA {2} REVOKE {0} ON {1}S FROM {3}'.format(
_grants, object_type.upper(), prepend, name)
ret = _psql_prepare_and_run(['-c', query],
user=user,
host=host,
port=port,
maintenance_db=maintenance_db,
password=password,
runas=runas)
return ret['retcode'] == 0 | python | def default_privileges_revoke(name,
object_name,
object_type,
defprivileges=None,
prepend='public',
maintenance_db=None,
user=None,
host=None,
port=None,
password=None,
runas=None):
'''
.. versionadded:: 2019.0.0
Revoke default privileges on a postgres object
CLI Example:
.. code-block:: bash
salt '*' postgres.default_privileges_revoke user_name table_name table \\
SELECT,UPDATE maintenance_db=db_name
name
Name of the role whose default privileges should be revoked
object_name
Name of the object on which the revoke is to be performed
object_type
The object type, which can be one of the following:
- table
- sequence
- schema
- group
- function
privileges
Comma separated list of privileges to revoke, from the list below:
- INSERT
- CREATE
- TRUNCATE
- TRIGGER
- SELECT
- USAGE
- UPDATE
- EXECUTE
- REFERENCES
- DELETE
- ALL
maintenance_db
The database to connect to
user
database username if different from config or default
password
user password if any password for a specified user
host
Database host if different from config or default
port
Database port if different from config or default
runas
System user all operations should be performed on behalf of
'''
object_type, defprivileges, _defprivs = _mod_defpriv_opts(object_type, defprivileges)
_validate_default_privileges(object_type, _defprivs, defprivileges)
if not has_default_privileges(name, object_name, object_type, defprivileges,
prepend=prepend, maintenance_db=maintenance_db, user=user,
host=host, port=port, password=password, runas=runas):
log.info('The object: %s of type: %s does not'
' have default privileges: %s set', object_name, object_type, defprivileges)
return False
_grants = ','.join(_defprivs)
if object_type in ['table', 'sequence']:
on_part = '{0}.{1}'.format(prepend, object_name)
else:
on_part = object_name
if object_type == 'group':
query = 'ALTER DEFAULT PRIVILEGES REVOKE {0} FROM {1}'.format(object_name, name)
else:
query = 'ALTER DEFAULT PRIVILEGES IN SCHEMA {2} REVOKE {0} ON {1}S FROM {3}'.format(
_grants, object_type.upper(), prepend, name)
ret = _psql_prepare_and_run(['-c', query],
user=user,
host=host,
port=port,
maintenance_db=maintenance_db,
password=password,
runas=runas)
return ret['retcode'] == 0 | ['def', 'default_privileges_revoke', '(', 'name', ',', 'object_name', ',', 'object_type', ',', 'defprivileges', '=', 'None', ',', 'prepend', '=', "'public'", ',', 'maintenance_db', '=', 'None', ',', 'user', '=', 'None', ',', 'host', '=', 'None', ',', 'port', '=', 'None', ',', 'password', '=', 'None', ',', 'runas', '=', 'None', ')', ':', 'object_type', ',', 'defprivileges', ',', '_defprivs', '=', '_mod_defpriv_opts', '(', 'object_type', ',', 'defprivileges', ')', '_validate_default_privileges', '(', 'object_type', ',', '_defprivs', ',', 'defprivileges', ')', 'if', 'not', 'has_default_privileges', '(', 'name', ',', 'object_name', ',', 'object_type', ',', 'defprivileges', ',', 'prepend', '=', 'prepend', ',', 'maintenance_db', '=', 'maintenance_db', ',', 'user', '=', 'user', ',', 'host', '=', 'host', ',', 'port', '=', 'port', ',', 'password', '=', 'password', ',', 'runas', '=', 'runas', ')', ':', 'log', '.', 'info', '(', "'The object: %s of type: %s does not'", "' have default privileges: %s set'", ',', 'object_name', ',', 'object_type', ',', 'defprivileges', ')', 'return', 'False', '_grants', '=', "','", '.', 'join', '(', '_defprivs', ')', 'if', 'object_type', 'in', '[', "'table'", ',', "'sequence'", ']', ':', 'on_part', '=', "'{0}.{1}'", '.', 'format', '(', 'prepend', ',', 'object_name', ')', 'else', ':', 'on_part', '=', 'object_name', 'if', 'object_type', '==', "'group'", ':', 'query', '=', "'ALTER DEFAULT PRIVILEGES REVOKE {0} FROM {1}'", '.', 'format', '(', 'object_name', ',', 'name', ')', 'else', ':', 'query', '=', "'ALTER DEFAULT PRIVILEGES IN SCHEMA {2} REVOKE {0} ON {1}S FROM {3}'", '.', 'format', '(', '_grants', ',', 'object_type', '.', 'upper', '(', ')', ',', 'prepend', ',', 'name', ')', 'ret', '=', '_psql_prepare_and_run', '(', '[', "'-c'", ',', 'query', ']', ',', 'user', '=', 'user', ',', 'host', '=', 'host', ',', 'port', '=', 'port', ',', 'maintenance_db', '=', 'maintenance_db', ',', 'password', '=', 'password', ',', 'runas', '=', 'runas', ')', 'return', 'ret', '[', "'retcode'", ']', '==', '0'] | .. versionadded:: 2019.0.0
Revoke default privileges on a postgres object
CLI Example:
.. code-block:: bash
salt '*' postgres.default_privileges_revoke user_name table_name table \\
SELECT,UPDATE maintenance_db=db_name
name
Name of the role whose default privileges should be revoked
object_name
Name of the object on which the revoke is to be performed
object_type
The object type, which can be one of the following:
- table
- sequence
- schema
- group
- function
privileges
Comma separated list of privileges to revoke, from the list below:
- INSERT
- CREATE
- TRUNCATE
- TRIGGER
- SELECT
- USAGE
- UPDATE
- EXECUTE
- REFERENCES
- DELETE
- ALL
maintenance_db
The database to connect to
user
database username if different from config or default
password
user password if any password for a specified user
host
Database host if different from config or default
port
Database port if different from config or default
runas
System user all operations should be performed on behalf of | ['..', 'versionadded', '::', '2019', '.', '0', '.', '0'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/postgres.py#L3331-L3434 |
929 | noahbenson/neuropythy | neuropythy/util/core.py | czdivide | def czdivide(a, b, null=0):
'''
czdivide(a, b) returns the quotient a / b as a numpy array object. Like numpy's divide function
or a/b syntax, czdivide will thread over the latest dimension possible. Unlike numpy's divide,
czdivide works with sparse matrices. Additionally, czdivide multiplies a by the zinv of b, so
divide-by-zero entries are replaced with 0 in the result.
The optional argument null (default: 0) may be given to specify that zeros in the arary b should
instead be replaced with the given value in the result. Note that if this value is not equal to
0, then any sparse array passed as argument b must be reified.
The czdivide function never raises an error due to divide-by-zero; if you desire this behavior,
use the cdivide function instead.
'''
if null == 0: return a.multiply(zinv(b)) if sps.issparse(a) else a * zinv(b)
elif sps.issparse(b): b = b.toarray()
else: b = np.asarray(b)
z = np.isclose(b, 0)
q = np.logical_not(z)
zi = q / (b + z)
if sps.issparse(a):
r = a.multiply(zi).tocsr()
else:
r = np.asarray(a) * zi
r[np.ones(a.shape, dtype=np.bool)*z] = null
return r | python | def czdivide(a, b, null=0):
'''
czdivide(a, b) returns the quotient a / b as a numpy array object. Like numpy's divide function
or a/b syntax, czdivide will thread over the latest dimension possible. Unlike numpy's divide,
czdivide works with sparse matrices. Additionally, czdivide multiplies a by the zinv of b, so
divide-by-zero entries are replaced with 0 in the result.
The optional argument null (default: 0) may be given to specify that zeros in the arary b should
instead be replaced with the given value in the result. Note that if this value is not equal to
0, then any sparse array passed as argument b must be reified.
The czdivide function never raises an error due to divide-by-zero; if you desire this behavior,
use the cdivide function instead.
'''
if null == 0: return a.multiply(zinv(b)) if sps.issparse(a) else a * zinv(b)
elif sps.issparse(b): b = b.toarray()
else: b = np.asarray(b)
z = np.isclose(b, 0)
q = np.logical_not(z)
zi = q / (b + z)
if sps.issparse(a):
r = a.multiply(zi).tocsr()
else:
r = np.asarray(a) * zi
r[np.ones(a.shape, dtype=np.bool)*z] = null
return r | ['def', 'czdivide', '(', 'a', ',', 'b', ',', 'null', '=', '0', ')', ':', 'if', 'null', '==', '0', ':', 'return', 'a', '.', 'multiply', '(', 'zinv', '(', 'b', ')', ')', 'if', 'sps', '.', 'issparse', '(', 'a', ')', 'else', 'a', '*', 'zinv', '(', 'b', ')', 'elif', 'sps', '.', 'issparse', '(', 'b', ')', ':', 'b', '=', 'b', '.', 'toarray', '(', ')', 'else', ':', 'b', '=', 'np', '.', 'asarray', '(', 'b', ')', 'z', '=', 'np', '.', 'isclose', '(', 'b', ',', '0', ')', 'q', '=', 'np', '.', 'logical_not', '(', 'z', ')', 'zi', '=', 'q', '/', '(', 'b', '+', 'z', ')', 'if', 'sps', '.', 'issparse', '(', 'a', ')', ':', 'r', '=', 'a', '.', 'multiply', '(', 'zi', ')', '.', 'tocsr', '(', ')', 'else', ':', 'r', '=', 'np', '.', 'asarray', '(', 'a', ')', '*', 'zi', 'r', '[', 'np', '.', 'ones', '(', 'a', '.', 'shape', ',', 'dtype', '=', 'np', '.', 'bool', ')', '*', 'z', ']', '=', 'null', 'return', 'r'] | czdivide(a, b) returns the quotient a / b as a numpy array object. Like numpy's divide function
or a/b syntax, czdivide will thread over the latest dimension possible. Unlike numpy's divide,
czdivide works with sparse matrices. Additionally, czdivide multiplies a by the zinv of b, so
divide-by-zero entries are replaced with 0 in the result.
The optional argument null (default: 0) may be given to specify that zeros in the arary b should
instead be replaced with the given value in the result. Note that if this value is not equal to
0, then any sparse array passed as argument b must be reified.
The czdivide function never raises an error due to divide-by-zero; if you desire this behavior,
use the cdivide function instead. | ['czdivide', '(', 'a', 'b', ')', 'returns', 'the', 'quotient', 'a', '/', 'b', 'as', 'a', 'numpy', 'array', 'object', '.', 'Like', 'numpy', 's', 'divide', 'function', 'or', 'a', '/', 'b', 'syntax', 'czdivide', 'will', 'thread', 'over', 'the', 'latest', 'dimension', 'possible', '.', 'Unlike', 'numpy', 's', 'divide', 'czdivide', 'works', 'with', 'sparse', 'matrices', '.', 'Additionally', 'czdivide', 'multiplies', 'a', 'by', 'the', 'zinv', 'of', 'b', 'so', 'divide', '-', 'by', '-', 'zero', 'entries', 'are', 'replaced', 'with', '0', 'in', 'the', 'result', '.'] | train | https://github.com/noahbenson/neuropythy/blob/b588889f6db36ddb9602ae4a72c1c0d3f41586b2/neuropythy/util/core.py#L716-L741 |
930 | evhub/coconut | coconut/command/util.py | splitname | def splitname(path):
"""Split a path into a directory, name, and extensions."""
dirpath, filename = os.path.split(path)
# we don't use os.path.splitext here because we want all extensions,
# not just the last, to be put in exts
name, exts = filename.split(os.extsep, 1)
return dirpath, name, exts | python | def splitname(path):
"""Split a path into a directory, name, and extensions."""
dirpath, filename = os.path.split(path)
# we don't use os.path.splitext here because we want all extensions,
# not just the last, to be put in exts
name, exts = filename.split(os.extsep, 1)
return dirpath, name, exts | ['def', 'splitname', '(', 'path', ')', ':', 'dirpath', ',', 'filename', '=', 'os', '.', 'path', '.', 'split', '(', 'path', ')', "# we don't use os.path.splitext here because we want all extensions,", '# not just the last, to be put in exts', 'name', ',', 'exts', '=', 'filename', '.', 'split', '(', 'os', '.', 'extsep', ',', '1', ')', 'return', 'dirpath', ',', 'name', ',', 'exts'] | Split a path into a directory, name, and extensions. | ['Split', 'a', 'path', 'into', 'a', 'directory', 'name', 'and', 'extensions', '.'] | train | https://github.com/evhub/coconut/blob/ff97177344e7604e89a0a98a977a87ed2a56fc6d/coconut/command/util.py#L219-L225 |
931 | inasafe/inasafe | safe/gis/raster/reclassify.py | reclassify | def reclassify(layer, exposure_key=None, overwrite_input=False):
"""Reclassify a continuous raster layer.
Issue https://github.com/inasafe/inasafe/issues/3182
This function is a wrapper for the code from
https://github.com/chiatt/gdal_reclassify
For instance if you want to reclassify like this table :
Original Value | Class
- ∞ < val <= 0 | 1
0 < val <= 0.5 | 2
0.5 < val <= 5 | 3
5 < val < + ∞ | 6
You need a dictionary :
ranges = OrderedDict()
ranges[1] = [None, 0]
ranges[2] = [0.0, 0.5]
ranges[3] = [0.5, 5]
ranges[6] = [5, None]
:param layer: The raster layer.
:type layer: QgsRasterLayer
:param overwrite_input: Option for the output layer. True will overwrite
the input layer. False will create a temporary layer.
:type overwrite_input: bool
:param exposure_key: The exposure key.
:type exposure_key: str
:return: The classified raster layer.
:rtype: QgsRasterLayer
.. versionadded:: 4.0
"""
output_layer_name = reclassify_raster_steps['output_layer_name']
output_layer_name = output_layer_name % layer.keywords['layer_purpose']
if exposure_key:
classification_key = active_classification(
layer.keywords, exposure_key)
thresholds = active_thresholds_value_maps(layer.keywords, exposure_key)
layer.keywords['thresholds'] = thresholds
layer.keywords['classification'] = classification_key
else:
classification_key = layer.keywords.get('classification')
thresholds = layer.keywords.get('thresholds')
if not thresholds:
raise InvalidKeywordsForProcessingAlgorithm(
'thresholds are missing from the layer %s'
% layer.keywords['layer_purpose'])
if not classification_key:
raise InvalidKeywordsForProcessingAlgorithm(
'classification is missing from the layer %s'
% layer.keywords['layer_purpose'])
ranges = {}
value_map = {}
hazard_classes = definition(classification_key)['classes']
for hazard_class in hazard_classes:
ranges[hazard_class['value']] = thresholds[hazard_class['key']]
value_map[hazard_class['key']] = [hazard_class['value']]
if overwrite_input:
output_raster = layer.source()
else:
output_raster = unique_filename(suffix='.tiff', dir=temp_dir())
driver = gdal.GetDriverByName('GTiff')
raster_file = gdal.Open(layer.source())
band = raster_file.GetRasterBand(1)
no_data = band.GetNoDataValue()
source = band.ReadAsArray()
destination = source.copy()
for value, interval in list(ranges.items()):
v_min = interval[0]
v_max = interval[1]
if v_min is None:
destination[np.where(source <= v_max)] = value
elif v_max is None:
destination[np.where(source > v_min)] = value
elif v_min < v_max:
destination[np.where((v_min < source) & (source <= v_max))] = value
# Tag no data cells
destination[np.where(source == no_data)] = no_data_value
# Create the new file.
output_file = driver.Create(
output_raster, raster_file.RasterXSize, raster_file.RasterYSize, 1)
output_file.GetRasterBand(1).WriteArray(destination)
output_file.GetRasterBand(1).SetNoDataValue(no_data_value)
# CRS
output_file.SetProjection(raster_file.GetProjection())
output_file.SetGeoTransform(raster_file.GetGeoTransform())
output_file.FlushCache()
del output_file
if not isfile(output_raster):
raise FileNotFoundError
reclassified = QgsRasterLayer(output_raster, output_layer_name)
# We transfer keywords to the output.
reclassified.keywords = layer.keywords.copy()
reclassified.keywords['layer_mode'] = 'classified'
value_map = {}
hazard_classes = definition(classification_key)['classes']
for hazard_class in reversed(hazard_classes):
value_map[hazard_class['key']] = [hazard_class['value']]
reclassified.keywords['value_map'] = value_map
reclassified.keywords['title'] = output_layer_name
check_layer(reclassified)
return reclassified | python | def reclassify(layer, exposure_key=None, overwrite_input=False):
"""Reclassify a continuous raster layer.
Issue https://github.com/inasafe/inasafe/issues/3182
This function is a wrapper for the code from
https://github.com/chiatt/gdal_reclassify
For instance if you want to reclassify like this table :
Original Value | Class
- ∞ < val <= 0 | 1
0 < val <= 0.5 | 2
0.5 < val <= 5 | 3
5 < val < + ∞ | 6
You need a dictionary :
ranges = OrderedDict()
ranges[1] = [None, 0]
ranges[2] = [0.0, 0.5]
ranges[3] = [0.5, 5]
ranges[6] = [5, None]
:param layer: The raster layer.
:type layer: QgsRasterLayer
:param overwrite_input: Option for the output layer. True will overwrite
the input layer. False will create a temporary layer.
:type overwrite_input: bool
:param exposure_key: The exposure key.
:type exposure_key: str
:return: The classified raster layer.
:rtype: QgsRasterLayer
.. versionadded:: 4.0
"""
output_layer_name = reclassify_raster_steps['output_layer_name']
output_layer_name = output_layer_name % layer.keywords['layer_purpose']
if exposure_key:
classification_key = active_classification(
layer.keywords, exposure_key)
thresholds = active_thresholds_value_maps(layer.keywords, exposure_key)
layer.keywords['thresholds'] = thresholds
layer.keywords['classification'] = classification_key
else:
classification_key = layer.keywords.get('classification')
thresholds = layer.keywords.get('thresholds')
if not thresholds:
raise InvalidKeywordsForProcessingAlgorithm(
'thresholds are missing from the layer %s'
% layer.keywords['layer_purpose'])
if not classification_key:
raise InvalidKeywordsForProcessingAlgorithm(
'classification is missing from the layer %s'
% layer.keywords['layer_purpose'])
ranges = {}
value_map = {}
hazard_classes = definition(classification_key)['classes']
for hazard_class in hazard_classes:
ranges[hazard_class['value']] = thresholds[hazard_class['key']]
value_map[hazard_class['key']] = [hazard_class['value']]
if overwrite_input:
output_raster = layer.source()
else:
output_raster = unique_filename(suffix='.tiff', dir=temp_dir())
driver = gdal.GetDriverByName('GTiff')
raster_file = gdal.Open(layer.source())
band = raster_file.GetRasterBand(1)
no_data = band.GetNoDataValue()
source = band.ReadAsArray()
destination = source.copy()
for value, interval in list(ranges.items()):
v_min = interval[0]
v_max = interval[1]
if v_min is None:
destination[np.where(source <= v_max)] = value
elif v_max is None:
destination[np.where(source > v_min)] = value
elif v_min < v_max:
destination[np.where((v_min < source) & (source <= v_max))] = value
# Tag no data cells
destination[np.where(source == no_data)] = no_data_value
# Create the new file.
output_file = driver.Create(
output_raster, raster_file.RasterXSize, raster_file.RasterYSize, 1)
output_file.GetRasterBand(1).WriteArray(destination)
output_file.GetRasterBand(1).SetNoDataValue(no_data_value)
# CRS
output_file.SetProjection(raster_file.GetProjection())
output_file.SetGeoTransform(raster_file.GetGeoTransform())
output_file.FlushCache()
del output_file
if not isfile(output_raster):
raise FileNotFoundError
reclassified = QgsRasterLayer(output_raster, output_layer_name)
# We transfer keywords to the output.
reclassified.keywords = layer.keywords.copy()
reclassified.keywords['layer_mode'] = 'classified'
value_map = {}
hazard_classes = definition(classification_key)['classes']
for hazard_class in reversed(hazard_classes):
value_map[hazard_class['key']] = [hazard_class['value']]
reclassified.keywords['value_map'] = value_map
reclassified.keywords['title'] = output_layer_name
check_layer(reclassified)
return reclassified | ['def', 'reclassify', '(', 'layer', ',', 'exposure_key', '=', 'None', ',', 'overwrite_input', '=', 'False', ')', ':', 'output_layer_name', '=', 'reclassify_raster_steps', '[', "'output_layer_name'", ']', 'output_layer_name', '=', 'output_layer_name', '%', 'layer', '.', 'keywords', '[', "'layer_purpose'", ']', 'if', 'exposure_key', ':', 'classification_key', '=', 'active_classification', '(', 'layer', '.', 'keywords', ',', 'exposure_key', ')', 'thresholds', '=', 'active_thresholds_value_maps', '(', 'layer', '.', 'keywords', ',', 'exposure_key', ')', 'layer', '.', 'keywords', '[', "'thresholds'", ']', '=', 'thresholds', 'layer', '.', 'keywords', '[', "'classification'", ']', '=', 'classification_key', 'else', ':', 'classification_key', '=', 'layer', '.', 'keywords', '.', 'get', '(', "'classification'", ')', 'thresholds', '=', 'layer', '.', 'keywords', '.', 'get', '(', "'thresholds'", ')', 'if', 'not', 'thresholds', ':', 'raise', 'InvalidKeywordsForProcessingAlgorithm', '(', "'thresholds are missing from the layer %s'", '%', 'layer', '.', 'keywords', '[', "'layer_purpose'", ']', ')', 'if', 'not', 'classification_key', ':', 'raise', 'InvalidKeywordsForProcessingAlgorithm', '(', "'classification is missing from the layer %s'", '%', 'layer', '.', 'keywords', '[', "'layer_purpose'", ']', ')', 'ranges', '=', '{', '}', 'value_map', '=', '{', '}', 'hazard_classes', '=', 'definition', '(', 'classification_key', ')', '[', "'classes'", ']', 'for', 'hazard_class', 'in', 'hazard_classes', ':', 'ranges', '[', 'hazard_class', '[', "'value'", ']', ']', '=', 'thresholds', '[', 'hazard_class', '[', "'key'", ']', ']', 'value_map', '[', 'hazard_class', '[', "'key'", ']', ']', '=', '[', 'hazard_class', '[', "'value'", ']', ']', 'if', 'overwrite_input', ':', 'output_raster', '=', 'layer', '.', 'source', '(', ')', 'else', ':', 'output_raster', '=', 'unique_filename', '(', 'suffix', '=', "'.tiff'", ',', 'dir', '=', 'temp_dir', '(', ')', ')', 'driver', '=', 'gdal', '.', 'GetDriverByName', '(', "'GTiff'", ')', 'raster_file', '=', 'gdal', '.', 'Open', '(', 'layer', '.', 'source', '(', ')', ')', 'band', '=', 'raster_file', '.', 'GetRasterBand', '(', '1', ')', 'no_data', '=', 'band', '.', 'GetNoDataValue', '(', ')', 'source', '=', 'band', '.', 'ReadAsArray', '(', ')', 'destination', '=', 'source', '.', 'copy', '(', ')', 'for', 'value', ',', 'interval', 'in', 'list', '(', 'ranges', '.', 'items', '(', ')', ')', ':', 'v_min', '=', 'interval', '[', '0', ']', 'v_max', '=', 'interval', '[', '1', ']', 'if', 'v_min', 'is', 'None', ':', 'destination', '[', 'np', '.', 'where', '(', 'source', '<=', 'v_max', ')', ']', '=', 'value', 'elif', 'v_max', 'is', 'None', ':', 'destination', '[', 'np', '.', 'where', '(', 'source', '>', 'v_min', ')', ']', '=', 'value', 'elif', 'v_min', '<', 'v_max', ':', 'destination', '[', 'np', '.', 'where', '(', '(', 'v_min', '<', 'source', ')', '&', '(', 'source', '<=', 'v_max', ')', ')', ']', '=', 'value', '# Tag no data cells', 'destination', '[', 'np', '.', 'where', '(', 'source', '==', 'no_data', ')', ']', '=', 'no_data_value', '# Create the new file.', 'output_file', '=', 'driver', '.', 'Create', '(', 'output_raster', ',', 'raster_file', '.', 'RasterXSize', ',', 'raster_file', '.', 'RasterYSize', ',', '1', ')', 'output_file', '.', 'GetRasterBand', '(', '1', ')', '.', 'WriteArray', '(', 'destination', ')', 'output_file', '.', 'GetRasterBand', '(', '1', ')', '.', 'SetNoDataValue', '(', 'no_data_value', ')', '# CRS', 'output_file', '.', 'SetProjection', '(', 'raster_file', '.', 'GetProjection', '(', ')', ')', 'output_file', '.', 'SetGeoTransform', '(', 'raster_file', '.', 'GetGeoTransform', '(', ')', ')', 'output_file', '.', 'FlushCache', '(', ')', 'del', 'output_file', 'if', 'not', 'isfile', '(', 'output_raster', ')', ':', 'raise', 'FileNotFoundError', 'reclassified', '=', 'QgsRasterLayer', '(', 'output_raster', ',', 'output_layer_name', ')', '# We transfer keywords to the output.', 'reclassified', '.', 'keywords', '=', 'layer', '.', 'keywords', '.', 'copy', '(', ')', 'reclassified', '.', 'keywords', '[', "'layer_mode'", ']', '=', "'classified'", 'value_map', '=', '{', '}', 'hazard_classes', '=', 'definition', '(', 'classification_key', ')', '[', "'classes'", ']', 'for', 'hazard_class', 'in', 'reversed', '(', 'hazard_classes', ')', ':', 'value_map', '[', 'hazard_class', '[', "'key'", ']', ']', '=', '[', 'hazard_class', '[', "'value'", ']', ']', 'reclassified', '.', 'keywords', '[', "'value_map'", ']', '=', 'value_map', 'reclassified', '.', 'keywords', '[', "'title'", ']', '=', 'output_layer_name', 'check_layer', '(', 'reclassified', ')', 'return', 'reclassified'] | Reclassify a continuous raster layer.
Issue https://github.com/inasafe/inasafe/issues/3182
This function is a wrapper for the code from
https://github.com/chiatt/gdal_reclassify
For instance if you want to reclassify like this table :
Original Value | Class
- ∞ < val <= 0 | 1
0 < val <= 0.5 | 2
0.5 < val <= 5 | 3
5 < val < + ∞ | 6
You need a dictionary :
ranges = OrderedDict()
ranges[1] = [None, 0]
ranges[2] = [0.0, 0.5]
ranges[3] = [0.5, 5]
ranges[6] = [5, None]
:param layer: The raster layer.
:type layer: QgsRasterLayer
:param overwrite_input: Option for the output layer. True will overwrite
the input layer. False will create a temporary layer.
:type overwrite_input: bool
:param exposure_key: The exposure key.
:type exposure_key: str
:return: The classified raster layer.
:rtype: QgsRasterLayer
.. versionadded:: 4.0 | ['Reclassify', 'a', 'continuous', 'raster', 'layer', '.'] | train | https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gis/raster/reclassify.py#L29-L155 |
932 | flyte/xbee-helper | xbee_helper/device.py | ZigBee._get_parameter | def _get_parameter(self, parameter, dest_addr_long=None):
"""
Fetches and returns the value of the specified parameter.
"""
frame = self._send_and_wait(
command=parameter, dest_addr_long=dest_addr_long)
return frame["parameter"] | python | def _get_parameter(self, parameter, dest_addr_long=None):
"""
Fetches and returns the value of the specified parameter.
"""
frame = self._send_and_wait(
command=parameter, dest_addr_long=dest_addr_long)
return frame["parameter"] | ['def', '_get_parameter', '(', 'self', ',', 'parameter', ',', 'dest_addr_long', '=', 'None', ')', ':', 'frame', '=', 'self', '.', '_send_and_wait', '(', 'command', '=', 'parameter', ',', 'dest_addr_long', '=', 'dest_addr_long', ')', 'return', 'frame', '[', '"parameter"', ']'] | Fetches and returns the value of the specified parameter. | ['Fetches', 'and', 'returns', 'the', 'value', 'of', 'the', 'specified', 'parameter', '.'] | train | https://github.com/flyte/xbee-helper/blob/8b47675ad44d8a57defea459682d129379af348d/xbee_helper/device.py#L166-L172 |
933 | Koed00/django-q | django_q/tasks.py | Chain.result | def result(self, wait=0):
"""
return the full list of results from the chain when it finishes. blocks until timeout.
:param int wait: how many milliseconds to wait for a result
:return: an unsorted list of results
"""
if self.started:
return result_group(self.group, wait=wait, count=self.length(), cached=self.cached) | python | def result(self, wait=0):
"""
return the full list of results from the chain when it finishes. blocks until timeout.
:param int wait: how many milliseconds to wait for a result
:return: an unsorted list of results
"""
if self.started:
return result_group(self.group, wait=wait, count=self.length(), cached=self.cached) | ['def', 'result', '(', 'self', ',', 'wait', '=', '0', ')', ':', 'if', 'self', '.', 'started', ':', 'return', 'result_group', '(', 'self', '.', 'group', ',', 'wait', '=', 'wait', ',', 'count', '=', 'self', '.', 'length', '(', ')', ',', 'cached', '=', 'self', '.', 'cached', ')'] | return the full list of results from the chain when it finishes. blocks until timeout.
:param int wait: how many milliseconds to wait for a result
:return: an unsorted list of results | ['return', 'the', 'full', 'list', 'of', 'results', 'from', 'the', 'chain', 'when', 'it', 'finishes', '.', 'blocks', 'until', 'timeout', '.', ':', 'param', 'int', 'wait', ':', 'how', 'many', 'milliseconds', 'to', 'wait', 'for', 'a', 'result', ':', 'return', ':', 'an', 'unsorted', 'list', 'of', 'results'] | train | https://github.com/Koed00/django-q/blob/c84fd11a67c9a47d821786dfcdc189bb258c6f54/django_q/tasks.py#L542-L549 |
934 | haaksmash/pyutils | utils/lists.py | flatten | def flatten(iterable):
"""Fully flattens an iterable:
In: flatten([1,2,3,4,[5,6,[7,8]]])
Out: [1,2,3,4,5,6,7,8]
"""
container = iterable.__class__
placeholder = []
for item in iterable:
try:
placeholder.extend(flatten(item))
except TypeError:
placeholder.append(item)
return container(placeholder) | python | def flatten(iterable):
"""Fully flattens an iterable:
In: flatten([1,2,3,4,[5,6,[7,8]]])
Out: [1,2,3,4,5,6,7,8]
"""
container = iterable.__class__
placeholder = []
for item in iterable:
try:
placeholder.extend(flatten(item))
except TypeError:
placeholder.append(item)
return container(placeholder) | ['def', 'flatten', '(', 'iterable', ')', ':', 'container', '=', 'iterable', '.', '__class__', 'placeholder', '=', '[', ']', 'for', 'item', 'in', 'iterable', ':', 'try', ':', 'placeholder', '.', 'extend', '(', 'flatten', '(', 'item', ')', ')', 'except', 'TypeError', ':', 'placeholder', '.', 'append', '(', 'item', ')', 'return', 'container', '(', 'placeholder', ')'] | Fully flattens an iterable:
In: flatten([1,2,3,4,[5,6,[7,8]]])
Out: [1,2,3,4,5,6,7,8] | ['Fully', 'flattens', 'an', 'iterable', ':', 'In', ':', 'flatten', '(', '[', '1', '2', '3', '4', '[', '5', '6', '[', '7', '8', ']]]', ')', 'Out', ':', '[', '1', '2', '3', '4', '5', '6', '7', '8', ']'] | train | https://github.com/haaksmash/pyutils/blob/6ba851d11e53812dfc9017537a4f2de198851708/utils/lists.py#L17-L31 |
935 | bradrf/configstruct | configstruct/config_struct.py | ConfigStruct.save | def save(self, conflict_resolver=choose_mine):
'''Save all options in memory to the `config_file`.
Options are read once more from the file (to allow other writers to save configuration),
keys in conflict are resolved, and the final results are written back to the file.
:param conflict_resolver: a simple lambda or function to choose when an option key is
provided from an outside source (THEIRS, usually a file on disk) but is also already
set on this ConfigStruct (MINE)
'''
config = self._load(conflict_resolver) # in case some other process has added items
with open(self._config_file, 'wb') as cf:
config.write(cf) | python | def save(self, conflict_resolver=choose_mine):
'''Save all options in memory to the `config_file`.
Options are read once more from the file (to allow other writers to save configuration),
keys in conflict are resolved, and the final results are written back to the file.
:param conflict_resolver: a simple lambda or function to choose when an option key is
provided from an outside source (THEIRS, usually a file on disk) but is also already
set on this ConfigStruct (MINE)
'''
config = self._load(conflict_resolver) # in case some other process has added items
with open(self._config_file, 'wb') as cf:
config.write(cf) | ['def', 'save', '(', 'self', ',', 'conflict_resolver', '=', 'choose_mine', ')', ':', 'config', '=', 'self', '.', '_load', '(', 'conflict_resolver', ')', '# in case some other process has added items', 'with', 'open', '(', 'self', '.', '_config_file', ',', "'wb'", ')', 'as', 'cf', ':', 'config', '.', 'write', '(', 'cf', ')'] | Save all options in memory to the `config_file`.
Options are read once more from the file (to allow other writers to save configuration),
keys in conflict are resolved, and the final results are written back to the file.
:param conflict_resolver: a simple lambda or function to choose when an option key is
provided from an outside source (THEIRS, usually a file on disk) but is also already
set on this ConfigStruct (MINE) | ['Save', 'all', 'options', 'in', 'memory', 'to', 'the', 'config_file', '.'] | train | https://github.com/bradrf/configstruct/blob/aeea8fbba1e2daa0a0c38eeb9622d1716c0bb3e8/configstruct/config_struct.py#L97-L110 |
936 | EmbodiedCognition/py-c3d | c3d.py | Group.add_param | def add_param(self, name, **kwargs):
'''Add a parameter to this group.
Parameters
----------
name : str
Name of the parameter to add to this group. The name will
automatically be case-normalized.
Additional keyword arguments will be passed to the `Param` constructor.
'''
self.params[name.upper()] = Param(name.upper(), **kwargs) | python | def add_param(self, name, **kwargs):
'''Add a parameter to this group.
Parameters
----------
name : str
Name of the parameter to add to this group. The name will
automatically be case-normalized.
Additional keyword arguments will be passed to the `Param` constructor.
'''
self.params[name.upper()] = Param(name.upper(), **kwargs) | ['def', 'add_param', '(', 'self', ',', 'name', ',', '*', '*', 'kwargs', ')', ':', 'self', '.', 'params', '[', 'name', '.', 'upper', '(', ')', ']', '=', 'Param', '(', 'name', '.', 'upper', '(', ')', ',', '*', '*', 'kwargs', ')'] | Add a parameter to this group.
Parameters
----------
name : str
Name of the parameter to add to this group. The name will
automatically be case-normalized.
Additional keyword arguments will be passed to the `Param` constructor. | ['Add', 'a', 'parameter', 'to', 'this', 'group', '.'] | train | https://github.com/EmbodiedCognition/py-c3d/blob/391493d9cb4c6b4aaeee4de2930685e3a67f5845/c3d.py#L425-L436 |
937 | tensorflow/tensorboard | tensorboard/compat/tensorflow_stub/io/gfile.py | S3FileSystem.glob | def glob(self, filename):
"""Returns a list of files that match the given pattern(s)."""
# Only support prefix with * at the end and no ? in the string
star_i = filename.find('*')
quest_i = filename.find('?')
if quest_i >= 0:
raise NotImplementedError(
"{} not supported by compat glob".format(filename))
if star_i != len(filename) - 1:
# Just return empty so we can use glob from directory watcher
#
# TODO: Remove and instead handle in GetLogdirSubdirectories.
# However, we would need to handle it for all non-local registered
# filesystems in some way.
return []
filename = filename[:-1]
client = boto3.client("s3")
bucket, path = self.bucket_and_path(filename)
p = client.get_paginator("list_objects")
keys = []
for r in p.paginate(Bucket=bucket, Prefix=path):
for o in r.get("Contents", []):
key = o["Key"][len(path):]
if key: # Skip the base dir, which would add an empty string
keys.append(filename + key)
return keys | python | def glob(self, filename):
"""Returns a list of files that match the given pattern(s)."""
# Only support prefix with * at the end and no ? in the string
star_i = filename.find('*')
quest_i = filename.find('?')
if quest_i >= 0:
raise NotImplementedError(
"{} not supported by compat glob".format(filename))
if star_i != len(filename) - 1:
# Just return empty so we can use glob from directory watcher
#
# TODO: Remove and instead handle in GetLogdirSubdirectories.
# However, we would need to handle it for all non-local registered
# filesystems in some way.
return []
filename = filename[:-1]
client = boto3.client("s3")
bucket, path = self.bucket_and_path(filename)
p = client.get_paginator("list_objects")
keys = []
for r in p.paginate(Bucket=bucket, Prefix=path):
for o in r.get("Contents", []):
key = o["Key"][len(path):]
if key: # Skip the base dir, which would add an empty string
keys.append(filename + key)
return keys | ['def', 'glob', '(', 'self', ',', 'filename', ')', ':', '# Only support prefix with * at the end and no ? in the string', 'star_i', '=', 'filename', '.', 'find', '(', "'*'", ')', 'quest_i', '=', 'filename', '.', 'find', '(', "'?'", ')', 'if', 'quest_i', '>=', '0', ':', 'raise', 'NotImplementedError', '(', '"{} not supported by compat glob"', '.', 'format', '(', 'filename', ')', ')', 'if', 'star_i', '!=', 'len', '(', 'filename', ')', '-', '1', ':', '# Just return empty so we can use glob from directory watcher', '#', '# TODO: Remove and instead handle in GetLogdirSubdirectories.', '# However, we would need to handle it for all non-local registered', '# filesystems in some way.', 'return', '[', ']', 'filename', '=', 'filename', '[', ':', '-', '1', ']', 'client', '=', 'boto3', '.', 'client', '(', '"s3"', ')', 'bucket', ',', 'path', '=', 'self', '.', 'bucket_and_path', '(', 'filename', ')', 'p', '=', 'client', '.', 'get_paginator', '(', '"list_objects"', ')', 'keys', '=', '[', ']', 'for', 'r', 'in', 'p', '.', 'paginate', '(', 'Bucket', '=', 'bucket', ',', 'Prefix', '=', 'path', ')', ':', 'for', 'o', 'in', 'r', '.', 'get', '(', '"Contents"', ',', '[', ']', ')', ':', 'key', '=', 'o', '[', '"Key"', ']', '[', 'len', '(', 'path', ')', ':', ']', 'if', 'key', ':', '# Skip the base dir, which would add an empty string', 'keys', '.', 'append', '(', 'filename', '+', 'key', ')', 'return', 'keys'] | Returns a list of files that match the given pattern(s). | ['Returns', 'a', 'list', 'of', 'files', 'that', 'match', 'the', 'given', 'pattern', '(', 's', ')', '.'] | train | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/compat/tensorflow_stub/io/gfile.py#L231-L256 |
938 | spacetelescope/stsci.tools | lib/stsci/tools/fileutil.py | parseFilename | def parseFilename(filename):
"""
Parse out filename from any specified extensions.
Returns rootname and string version of extension name.
"""
# Parse out any extension specified in filename
_indx = filename.find('[')
if _indx > 0:
# Read extension name provided
_fname = filename[:_indx]
_extn = filename[_indx + 1:-1]
else:
_fname = filename
_extn = None
return _fname, _extn | python | def parseFilename(filename):
"""
Parse out filename from any specified extensions.
Returns rootname and string version of extension name.
"""
# Parse out any extension specified in filename
_indx = filename.find('[')
if _indx > 0:
# Read extension name provided
_fname = filename[:_indx]
_extn = filename[_indx + 1:-1]
else:
_fname = filename
_extn = None
return _fname, _extn | ['def', 'parseFilename', '(', 'filename', ')', ':', '# Parse out any extension specified in filename', '_indx', '=', 'filename', '.', 'find', '(', "'['", ')', 'if', '_indx', '>', '0', ':', '# Read extension name provided', '_fname', '=', 'filename', '[', ':', '_indx', ']', '_extn', '=', 'filename', '[', '_indx', '+', '1', ':', '-', '1', ']', 'else', ':', '_fname', '=', 'filename', '_extn', '=', 'None', 'return', '_fname', ',', '_extn'] | Parse out filename from any specified extensions.
Returns rootname and string version of extension name. | ['Parse', 'out', 'filename', 'from', 'any', 'specified', 'extensions', '.'] | train | https://github.com/spacetelescope/stsci.tools/blob/9a022503ad24ca54ce83331482dfa3ff6de9f403/lib/stsci/tools/fileutil.py#L826-L843 |
939 | angr/angr | angr/analyses/decompiler/clinic.py | Clinic._make_callsites | def _make_callsites(self, stack_pointer_tracker=None):
"""
Simplify all function call statements.
:return: None
"""
# Computing reaching definitions
rd = self.project.analyses.ReachingDefinitions(func=self.function, func_graph=self.graph, observe_all=True)
for key in self._blocks:
block = self._blocks[key]
csm = self.project.analyses.AILCallSiteMaker(block, reaching_definitions=rd)
if csm.result_block:
ail_block = csm.result_block
simp = self.project.analyses.AILBlockSimplifier(ail_block, stack_pointer_tracker=stack_pointer_tracker)
self._blocks[key] = simp.result_block
self._update_graph() | python | def _make_callsites(self, stack_pointer_tracker=None):
"""
Simplify all function call statements.
:return: None
"""
# Computing reaching definitions
rd = self.project.analyses.ReachingDefinitions(func=self.function, func_graph=self.graph, observe_all=True)
for key in self._blocks:
block = self._blocks[key]
csm = self.project.analyses.AILCallSiteMaker(block, reaching_definitions=rd)
if csm.result_block:
ail_block = csm.result_block
simp = self.project.analyses.AILBlockSimplifier(ail_block, stack_pointer_tracker=stack_pointer_tracker)
self._blocks[key] = simp.result_block
self._update_graph() | ['def', '_make_callsites', '(', 'self', ',', 'stack_pointer_tracker', '=', 'None', ')', ':', '# Computing reaching definitions', 'rd', '=', 'self', '.', 'project', '.', 'analyses', '.', 'ReachingDefinitions', '(', 'func', '=', 'self', '.', 'function', ',', 'func_graph', '=', 'self', '.', 'graph', ',', 'observe_all', '=', 'True', ')', 'for', 'key', 'in', 'self', '.', '_blocks', ':', 'block', '=', 'self', '.', '_blocks', '[', 'key', ']', 'csm', '=', 'self', '.', 'project', '.', 'analyses', '.', 'AILCallSiteMaker', '(', 'block', ',', 'reaching_definitions', '=', 'rd', ')', 'if', 'csm', '.', 'result_block', ':', 'ail_block', '=', 'csm', '.', 'result_block', 'simp', '=', 'self', '.', 'project', '.', 'analyses', '.', 'AILBlockSimplifier', '(', 'ail_block', ',', 'stack_pointer_tracker', '=', 'stack_pointer_tracker', ')', 'self', '.', '_blocks', '[', 'key', ']', '=', 'simp', '.', 'result_block', 'self', '.', '_update_graph', '(', ')'] | Simplify all function call statements.
:return: None | ['Simplify', 'all', 'function', 'call', 'statements', '.'] | train | https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/analyses/decompiler/clinic.py#L213-L231 |
940 | noahbenson/neuropythy | neuropythy/geometry/util.py | vector_angle_cos | def vector_angle_cos(u, v):
'''
vector_angle_cos(u, v) yields the cosine of the angle between the two vectors u and v. If u
or v (or both) is a (d x n) matrix of n vectors, the result will be a length n vector of the
cosines.
'''
u = np.asarray(u)
v = np.asarray(v)
return (u * v).sum(0) / np.sqrt((u ** 2).sum(0) * (v ** 2).sum(0)) | python | def vector_angle_cos(u, v):
'''
vector_angle_cos(u, v) yields the cosine of the angle between the two vectors u and v. If u
or v (or both) is a (d x n) matrix of n vectors, the result will be a length n vector of the
cosines.
'''
u = np.asarray(u)
v = np.asarray(v)
return (u * v).sum(0) / np.sqrt((u ** 2).sum(0) * (v ** 2).sum(0)) | ['def', 'vector_angle_cos', '(', 'u', ',', 'v', ')', ':', 'u', '=', 'np', '.', 'asarray', '(', 'u', ')', 'v', '=', 'np', '.', 'asarray', '(', 'v', ')', 'return', '(', 'u', '*', 'v', ')', '.', 'sum', '(', '0', ')', '/', 'np', '.', 'sqrt', '(', '(', 'u', '**', '2', ')', '.', 'sum', '(', '0', ')', '*', '(', 'v', '**', '2', ')', '.', 'sum', '(', '0', ')', ')'] | vector_angle_cos(u, v) yields the cosine of the angle between the two vectors u and v. If u
or v (or both) is a (d x n) matrix of n vectors, the result will be a length n vector of the
cosines. | ['vector_angle_cos', '(', 'u', 'v', ')', 'yields', 'the', 'cosine', 'of', 'the', 'angle', 'between', 'the', 'two', 'vectors', 'u', 'and', 'v', '.', 'If', 'u', 'or', 'v', '(', 'or', 'both', ')', 'is', 'a', '(', 'd', 'x', 'n', ')', 'matrix', 'of', 'n', 'vectors', 'the', 'result', 'will', 'be', 'a', 'length', 'n', 'vector', 'of', 'the', 'cosines', '.'] | train | https://github.com/noahbenson/neuropythy/blob/b588889f6db36ddb9602ae4a72c1c0d3f41586b2/neuropythy/geometry/util.py#L23-L31 |
941 | ampl/amplpy | amplpy/errorhandler.py | ErrorHandler.error | def error(self, amplexception):
"""
Receives notification of an error.
"""
msg = '\t'+str(amplexception).replace('\n', '\n\t')
print('Error:\n{:s}'.format(msg))
raise amplexception | python | def error(self, amplexception):
"""
Receives notification of an error.
"""
msg = '\t'+str(amplexception).replace('\n', '\n\t')
print('Error:\n{:s}'.format(msg))
raise amplexception | ['def', 'error', '(', 'self', ',', 'amplexception', ')', ':', 'msg', '=', "'\\t'", '+', 'str', '(', 'amplexception', ')', '.', 'replace', '(', "'\\n'", ',', "'\\n\\t'", ')', 'print', '(', "'Error:\\n{:s}'", '.', 'format', '(', 'msg', ')', ')', 'raise', 'amplexception'] | Receives notification of an error. | ['Receives', 'notification', 'of', 'an', 'error', '.'] | train | https://github.com/ampl/amplpy/blob/39df6954049a11a8f666aed26853259b4687099a/amplpy/errorhandler.py#L18-L24 |
942 | HacKanCuBa/passphrase-py | passphrase/aux.py | Aux.make_chars_uppercase | def make_chars_uppercase(
lst: Union[list, tuple, str, set],
uppercase: int
) -> Union[list, tuple, str, set]:
"""Make uppercase some randomly selected characters.
The characters can be in a (mix of) string, list, tuple or set.
Keyword arguments:
lst -- the object to make all chars uppercase, which can be a (mix of)
list, tuple, string or set.
uppercase -- Number of characters to be set as uppercase.
"""
if not isinstance(lst, (list, tuple, str, set)):
raise TypeError('lst must be a list, a tuple, a set or a string')
if not isinstance(uppercase, int):
raise TypeError('uppercase must be an integer')
if uppercase < 0:
raise ValueError('uppercase must be bigger than zero')
lowercase = Aux.lowercase_count(lst)
if uppercase == 0 or lowercase == 0:
return lst
elif uppercase >= lowercase:
# Make it all uppercase
return Aux.make_all_uppercase(lst)
arr = list(lst)
# Check if at least an element is supported
# This is required to avoid an infinite loop below
supported = False
for element in arr:
if isinstance(element, (list, tuple, str, set)):
supported = True
break
if supported:
# Pick a word at random, then make a character uppercase
count = 0
while count < uppercase:
windex = randbelow(len(arr))
element = arr[windex]
# Skip unsupported types or empty ones
if element:
aux = element
if isinstance(element, str):
aux = Aux._make_one_char_uppercase(element)
elif isinstance(element, (list, tuple, set)):
aux = Aux.make_chars_uppercase(element, 1)
if aux != element:
arr[windex] = aux
count += 1
if isinstance(lst, set):
return set(arr)
elif isinstance(lst, str):
return ''.join(arr)
elif isinstance(lst, tuple):
return tuple(arr)
return arr | python | def make_chars_uppercase(
lst: Union[list, tuple, str, set],
uppercase: int
) -> Union[list, tuple, str, set]:
"""Make uppercase some randomly selected characters.
The characters can be in a (mix of) string, list, tuple or set.
Keyword arguments:
lst -- the object to make all chars uppercase, which can be a (mix of)
list, tuple, string or set.
uppercase -- Number of characters to be set as uppercase.
"""
if not isinstance(lst, (list, tuple, str, set)):
raise TypeError('lst must be a list, a tuple, a set or a string')
if not isinstance(uppercase, int):
raise TypeError('uppercase must be an integer')
if uppercase < 0:
raise ValueError('uppercase must be bigger than zero')
lowercase = Aux.lowercase_count(lst)
if uppercase == 0 or lowercase == 0:
return lst
elif uppercase >= lowercase:
# Make it all uppercase
return Aux.make_all_uppercase(lst)
arr = list(lst)
# Check if at least an element is supported
# This is required to avoid an infinite loop below
supported = False
for element in arr:
if isinstance(element, (list, tuple, str, set)):
supported = True
break
if supported:
# Pick a word at random, then make a character uppercase
count = 0
while count < uppercase:
windex = randbelow(len(arr))
element = arr[windex]
# Skip unsupported types or empty ones
if element:
aux = element
if isinstance(element, str):
aux = Aux._make_one_char_uppercase(element)
elif isinstance(element, (list, tuple, set)):
aux = Aux.make_chars_uppercase(element, 1)
if aux != element:
arr[windex] = aux
count += 1
if isinstance(lst, set):
return set(arr)
elif isinstance(lst, str):
return ''.join(arr)
elif isinstance(lst, tuple):
return tuple(arr)
return arr | ['def', 'make_chars_uppercase', '(', 'lst', ':', 'Union', '[', 'list', ',', 'tuple', ',', 'str', ',', 'set', ']', ',', 'uppercase', ':', 'int', ')', '->', 'Union', '[', 'list', ',', 'tuple', ',', 'str', ',', 'set', ']', ':', 'if', 'not', 'isinstance', '(', 'lst', ',', '(', 'list', ',', 'tuple', ',', 'str', ',', 'set', ')', ')', ':', 'raise', 'TypeError', '(', "'lst must be a list, a tuple, a set or a string'", ')', 'if', 'not', 'isinstance', '(', 'uppercase', ',', 'int', ')', ':', 'raise', 'TypeError', '(', "'uppercase must be an integer'", ')', 'if', 'uppercase', '<', '0', ':', 'raise', 'ValueError', '(', "'uppercase must be bigger than zero'", ')', 'lowercase', '=', 'Aux', '.', 'lowercase_count', '(', 'lst', ')', 'if', 'uppercase', '==', '0', 'or', 'lowercase', '==', '0', ':', 'return', 'lst', 'elif', 'uppercase', '>=', 'lowercase', ':', '# Make it all uppercase', 'return', 'Aux', '.', 'make_all_uppercase', '(', 'lst', ')', 'arr', '=', 'list', '(', 'lst', ')', '# Check if at least an element is supported', '# This is required to avoid an infinite loop below', 'supported', '=', 'False', 'for', 'element', 'in', 'arr', ':', 'if', 'isinstance', '(', 'element', ',', '(', 'list', ',', 'tuple', ',', 'str', ',', 'set', ')', ')', ':', 'supported', '=', 'True', 'break', 'if', 'supported', ':', '# Pick a word at random, then make a character uppercase', 'count', '=', '0', 'while', 'count', '<', 'uppercase', ':', 'windex', '=', 'randbelow', '(', 'len', '(', 'arr', ')', ')', 'element', '=', 'arr', '[', 'windex', ']', '# Skip unsupported types or empty ones', 'if', 'element', ':', 'aux', '=', 'element', 'if', 'isinstance', '(', 'element', ',', 'str', ')', ':', 'aux', '=', 'Aux', '.', '_make_one_char_uppercase', '(', 'element', ')', 'elif', 'isinstance', '(', 'element', ',', '(', 'list', ',', 'tuple', ',', 'set', ')', ')', ':', 'aux', '=', 'Aux', '.', 'make_chars_uppercase', '(', 'element', ',', '1', ')', 'if', 'aux', '!=', 'element', ':', 'arr', '[', 'windex', ']', '=', 'aux', 'count', '+=', '1', 'if', 'isinstance', '(', 'lst', ',', 'set', ')', ':', 'return', 'set', '(', 'arr', ')', 'elif', 'isinstance', '(', 'lst', ',', 'str', ')', ':', 'return', "''", '.', 'join', '(', 'arr', ')', 'elif', 'isinstance', '(', 'lst', ',', 'tuple', ')', ':', 'return', 'tuple', '(', 'arr', ')', 'return', 'arr'] | Make uppercase some randomly selected characters.
The characters can be in a (mix of) string, list, tuple or set.
Keyword arguments:
lst -- the object to make all chars uppercase, which can be a (mix of)
list, tuple, string or set.
uppercase -- Number of characters to be set as uppercase. | ['Make', 'uppercase', 'some', 'randomly', 'selected', 'characters', '.'] | train | https://github.com/HacKanCuBa/passphrase-py/blob/219d6374338ed9a1475b4f09b0d85212376f11e0/passphrase/aux.py#L120-L183 |
943 | VasilyStepanov/pywidl | pywidl/grammar.py | p_SingleType_any | def p_SingleType_any(p):
"""SingleType : any TypeSuffixStartingWithArray"""
p[0] = helper.unwrapTypeSuffix(model.SimpleType(
model.SimpleType.ANY), p[2]) | python | def p_SingleType_any(p):
"""SingleType : any TypeSuffixStartingWithArray"""
p[0] = helper.unwrapTypeSuffix(model.SimpleType(
model.SimpleType.ANY), p[2]) | ['def', 'p_SingleType_any', '(', 'p', ')', ':', 'p', '[', '0', ']', '=', 'helper', '.', 'unwrapTypeSuffix', '(', 'model', '.', 'SimpleType', '(', 'model', '.', 'SimpleType', '.', 'ANY', ')', ',', 'p', '[', '2', ']', ')'] | SingleType : any TypeSuffixStartingWithArray | ['SingleType', ':', 'any', 'TypeSuffixStartingWithArray'] | train | https://github.com/VasilyStepanov/pywidl/blob/8d84b2e53157bfe276bf16301c19e8b6b32e861e/pywidl/grammar.py#L714-L717 |
944 | swisscom/cleanerversion | versions/admin.py | VersionedAdmin.get_object | def get_object(self, request, object_id, from_field=None):
"""
our implementation of get_object allows for cloning when updating an
object, not cloning when the button 'save but not clone' is pushed
and at no other time will clone be called
"""
# from_field breaks in 1.7.8
obj = super(VersionedAdmin, self).get_object(request,
object_id)
# Only clone if update view as get_object() is also called for change,
# delete, and history views
if request.method == 'POST' and \
obj and \
obj.is_latest and \
'will_not_clone' not in request.path and \
'delete' not in request.path and \
'restore' not in request.path:
obj = obj.clone()
return obj | python | def get_object(self, request, object_id, from_field=None):
"""
our implementation of get_object allows for cloning when updating an
object, not cloning when the button 'save but not clone' is pushed
and at no other time will clone be called
"""
# from_field breaks in 1.7.8
obj = super(VersionedAdmin, self).get_object(request,
object_id)
# Only clone if update view as get_object() is also called for change,
# delete, and history views
if request.method == 'POST' and \
obj and \
obj.is_latest and \
'will_not_clone' not in request.path and \
'delete' not in request.path and \
'restore' not in request.path:
obj = obj.clone()
return obj | ['def', 'get_object', '(', 'self', ',', 'request', ',', 'object_id', ',', 'from_field', '=', 'None', ')', ':', '# from_field breaks in 1.7.8', 'obj', '=', 'super', '(', 'VersionedAdmin', ',', 'self', ')', '.', 'get_object', '(', 'request', ',', 'object_id', ')', '# Only clone if update view as get_object() is also called for change,', '# delete, and history views', 'if', 'request', '.', 'method', '==', "'POST'", 'and', 'obj', 'and', 'obj', '.', 'is_latest', 'and', "'will_not_clone'", 'not', 'in', 'request', '.', 'path', 'and', "'delete'", 'not', 'in', 'request', '.', 'path', 'and', "'restore'", 'not', 'in', 'request', '.', 'path', ':', 'obj', '=', 'obj', '.', 'clone', '(', ')', 'return', 'obj'] | our implementation of get_object allows for cloning when updating an
object, not cloning when the button 'save but not clone' is pushed
and at no other time will clone be called | ['our', 'implementation', 'of', 'get_object', 'allows', 'for', 'cloning', 'when', 'updating', 'an', 'object', 'not', 'cloning', 'when', 'the', 'button', 'save', 'but', 'not', 'clone', 'is', 'pushed', 'and', 'at', 'no', 'other', 'time', 'will', 'clone', 'be', 'called'] | train | https://github.com/swisscom/cleanerversion/blob/becadbab5d7b474a0e9a596b99e97682402d2f2c/versions/admin.py#L240-L259 |
945 | rosenbrockc/fortpy | fortpy/scripts/analyze.py | FortpyShell.do_rmfit | def do_rmfit(self, arg):
"""Removes a fit function from a variable. See 'fit'."""
if arg in self.curargs["fits"]:
del self.curargs["fits"][arg]
#We also need to remove the variable entry if it exists.
if "timing" in arg:
fitvar = "{}|fit".format(arg)
else:
fitvar = "{}.fit".format(arg)
if fitvar in self.curargs["dependents"]:
self.curargs["dependents"].remove(fitvar) | python | def do_rmfit(self, arg):
"""Removes a fit function from a variable. See 'fit'."""
if arg in self.curargs["fits"]:
del self.curargs["fits"][arg]
#We also need to remove the variable entry if it exists.
if "timing" in arg:
fitvar = "{}|fit".format(arg)
else:
fitvar = "{}.fit".format(arg)
if fitvar in self.curargs["dependents"]:
self.curargs["dependents"].remove(fitvar) | ['def', 'do_rmfit', '(', 'self', ',', 'arg', ')', ':', 'if', 'arg', 'in', 'self', '.', 'curargs', '[', '"fits"', ']', ':', 'del', 'self', '.', 'curargs', '[', '"fits"', ']', '[', 'arg', ']', '#We also need to remove the variable entry if it exists.', 'if', '"timing"', 'in', 'arg', ':', 'fitvar', '=', '"{}|fit"', '.', 'format', '(', 'arg', ')', 'else', ':', 'fitvar', '=', '"{}.fit"', '.', 'format', '(', 'arg', ')', 'if', 'fitvar', 'in', 'self', '.', 'curargs', '[', '"dependents"', ']', ':', 'self', '.', 'curargs', '[', '"dependents"', ']', '.', 'remove', '(', 'fitvar', ')'] | Removes a fit function from a variable. See 'fit'. | ['Removes', 'a', 'fit', 'function', 'from', 'a', 'variable', '.', 'See', 'fit', '.'] | train | https://github.com/rosenbrockc/fortpy/blob/1ed0757c52d549e41d9d44bdea68cb89529293a5/fortpy/scripts/analyze.py#L822-L832 |
946 | mitsei/dlkit | dlkit/json_/logging_/sessions.py | LogEntryAdminSession.alias_log_entry | def alias_log_entry(self, log_entry_id, alias_id):
"""Adds an ``Id`` to a ``LogEntry`` for the purpose of creating compatibility.
The primary ``Id`` of the ``LogEntry`` is determined by the
provider. The new ``Id`` performs as an alias to the primary
``Id``. If the alias is a pointer to another log entry, it is
reassigned to the given log entry ``Id``.
arg: log_entry_id (osid.id.Id): the ``Id`` of a ``LogEntry``
arg: alias_id (osid.id.Id): the alias ``Id``
raise: AlreadyExists - ``alias_id`` is already assigned
raise: NotFound - ``log_entry_id`` not found
raise: NullArgument - ``log_entry_id`` or ``alias_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceAdminSession.alias_resources_template
self._alias_id(primary_id=log_entry_id, equivalent_id=alias_id) | python | def alias_log_entry(self, log_entry_id, alias_id):
"""Adds an ``Id`` to a ``LogEntry`` for the purpose of creating compatibility.
The primary ``Id`` of the ``LogEntry`` is determined by the
provider. The new ``Id`` performs as an alias to the primary
``Id``. If the alias is a pointer to another log entry, it is
reassigned to the given log entry ``Id``.
arg: log_entry_id (osid.id.Id): the ``Id`` of a ``LogEntry``
arg: alias_id (osid.id.Id): the alias ``Id``
raise: AlreadyExists - ``alias_id`` is already assigned
raise: NotFound - ``log_entry_id`` not found
raise: NullArgument - ``log_entry_id`` or ``alias_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceAdminSession.alias_resources_template
self._alias_id(primary_id=log_entry_id, equivalent_id=alias_id) | ['def', 'alias_log_entry', '(', 'self', ',', 'log_entry_id', ',', 'alias_id', ')', ':', '# Implemented from template for', '# osid.resource.ResourceAdminSession.alias_resources_template', 'self', '.', '_alias_id', '(', 'primary_id', '=', 'log_entry_id', ',', 'equivalent_id', '=', 'alias_id', ')'] | Adds an ``Id`` to a ``LogEntry`` for the purpose of creating compatibility.
The primary ``Id`` of the ``LogEntry`` is determined by the
provider. The new ``Id`` performs as an alias to the primary
``Id``. If the alias is a pointer to another log entry, it is
reassigned to the given log entry ``Id``.
arg: log_entry_id (osid.id.Id): the ``Id`` of a ``LogEntry``
arg: alias_id (osid.id.Id): the alias ``Id``
raise: AlreadyExists - ``alias_id`` is already assigned
raise: NotFound - ``log_entry_id`` not found
raise: NullArgument - ``log_entry_id`` or ``alias_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* | ['Adds', 'an', 'Id', 'to', 'a', 'LogEntry', 'for', 'the', 'purpose', 'of', 'creating', 'compatibility', '.'] | train | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/logging_/sessions.py#L1160-L1181 |
947 | edx/django-config-models | config_models/templatetags.py | submit_row | def submit_row(context):
"""
Overrides 'django.contrib.admin.templatetags.admin_modify.submit_row'.
Manipulates the context going into that function by hiding all of the buttons
in the submit row if the key `readonly` is set in the context.
"""
ctx = original_submit_row(context)
if context.get('readonly', False):
ctx.update({
'show_delete_link': False,
'show_save_as_new': False,
'show_save_and_add_another': False,
'show_save_and_continue': False,
'show_save': False,
})
else:
return ctx | python | def submit_row(context):
"""
Overrides 'django.contrib.admin.templatetags.admin_modify.submit_row'.
Manipulates the context going into that function by hiding all of the buttons
in the submit row if the key `readonly` is set in the context.
"""
ctx = original_submit_row(context)
if context.get('readonly', False):
ctx.update({
'show_delete_link': False,
'show_save_as_new': False,
'show_save_and_add_another': False,
'show_save_and_continue': False,
'show_save': False,
})
else:
return ctx | ['def', 'submit_row', '(', 'context', ')', ':', 'ctx', '=', 'original_submit_row', '(', 'context', ')', 'if', 'context', '.', 'get', '(', "'readonly'", ',', 'False', ')', ':', 'ctx', '.', 'update', '(', '{', "'show_delete_link'", ':', 'False', ',', "'show_save_as_new'", ':', 'False', ',', "'show_save_and_add_another'", ':', 'False', ',', "'show_save_and_continue'", ':', 'False', ',', "'show_save'", ':', 'False', ',', '}', ')', 'else', ':', 'return', 'ctx'] | Overrides 'django.contrib.admin.templatetags.admin_modify.submit_row'.
Manipulates the context going into that function by hiding all of the buttons
in the submit row if the key `readonly` is set in the context. | ['Overrides', 'django', '.', 'contrib', '.', 'admin', '.', 'templatetags', '.', 'admin_modify', '.', 'submit_row', '.'] | train | https://github.com/edx/django-config-models/blob/f22c05fe3ccb182a6be4dbe313e9d6749dffd3e4/config_models/templatetags.py#L12-L30 |
948 | blankenberg/pyBamParser | lib/pyBamParser/read/__init__.py | BAMRead.indel_at | def indel_at( self, position, check_insertions=True, check_deletions=True, one_based=True ):
"""Does the read contain an indel at the given position?
Return True if the read contains an insertion at the given position
(position must be the base before the insertion event) or if the read
contains a deletion where the base at position is deleted. Return False
otherwise."""
(insertions, deletions) = self.get_indels( one_based=one_based )
if check_insertions:
for insertion in insertions:
if insertion[0] == position:
return True
if check_deletions:
for deletion in deletions:
if deletion[0] < position < deletion[0] + deletion[1] + 1:
return True
return False | python | def indel_at( self, position, check_insertions=True, check_deletions=True, one_based=True ):
"""Does the read contain an indel at the given position?
Return True if the read contains an insertion at the given position
(position must be the base before the insertion event) or if the read
contains a deletion where the base at position is deleted. Return False
otherwise."""
(insertions, deletions) = self.get_indels( one_based=one_based )
if check_insertions:
for insertion in insertions:
if insertion[0] == position:
return True
if check_deletions:
for deletion in deletions:
if deletion[0] < position < deletion[0] + deletion[1] + 1:
return True
return False | ['def', 'indel_at', '(', 'self', ',', 'position', ',', 'check_insertions', '=', 'True', ',', 'check_deletions', '=', 'True', ',', 'one_based', '=', 'True', ')', ':', '(', 'insertions', ',', 'deletions', ')', '=', 'self', '.', 'get_indels', '(', 'one_based', '=', 'one_based', ')', 'if', 'check_insertions', ':', 'for', 'insertion', 'in', 'insertions', ':', 'if', 'insertion', '[', '0', ']', '==', 'position', ':', 'return', 'True', 'if', 'check_deletions', ':', 'for', 'deletion', 'in', 'deletions', ':', 'if', 'deletion', '[', '0', ']', '<', 'position', '<', 'deletion', '[', '0', ']', '+', 'deletion', '[', '1', ']', '+', '1', ':', 'return', 'True', 'return', 'False'] | Does the read contain an indel at the given position?
Return True if the read contains an insertion at the given position
(position must be the base before the insertion event) or if the read
contains a deletion where the base at position is deleted. Return False
otherwise. | ['Does', 'the', 'read', 'contain', 'an', 'indel', 'at', 'the', 'given', 'position?', 'Return', 'True', 'if', 'the', 'read', 'contains', 'an', 'insertion', 'at', 'the', 'given', 'position', '(', 'position', 'must', 'be', 'the', 'base', 'before', 'the', 'insertion', 'event', ')', 'or', 'if', 'the', 'read', 'contains', 'a', 'deletion', 'where', 'the', 'base', 'at', 'position', 'is', 'deleted', '.', 'Return', 'False', 'otherwise', '.'] | train | https://github.com/blankenberg/pyBamParser/blob/5a357f23ae51b97f3167e8da18ff0cb6db8ca4a0/lib/pyBamParser/read/__init__.py#L240-L255 |
949 | bububa/pyTOP | pyTOP/crawler.py | Crawler.get_cats | def get_cats(self):
'''Get top keywords categories'''
start_url = 'http://top.taobao.com/index.php?from=tbsy'
rs = self.fetch(start_url)
if not rs: return None
soup = BeautifulSoup(rs.content, convertEntities=BeautifulSoup.HTML_ENTITIES, markupMassage=hexentityMassage)
cats = [{'id':'TR_%s'%li['id'].encode('utf-8').upper(), 'title':li.a.text.encode('utf-8').strip()} for li in soup.find('div', id='nav').findAll('li') if li['id']!='index']
threadPool = ThreadPool(len(cats) if len(cats)<=5 else 5)
for cat in cats:
threadPool.run(self.get_cats_thread, callback=None, cat=cat)
cats = threadPool.killAllWorkers(None)
return cats | python | def get_cats(self):
'''Get top keywords categories'''
start_url = 'http://top.taobao.com/index.php?from=tbsy'
rs = self.fetch(start_url)
if not rs: return None
soup = BeautifulSoup(rs.content, convertEntities=BeautifulSoup.HTML_ENTITIES, markupMassage=hexentityMassage)
cats = [{'id':'TR_%s'%li['id'].encode('utf-8').upper(), 'title':li.a.text.encode('utf-8').strip()} for li in soup.find('div', id='nav').findAll('li') if li['id']!='index']
threadPool = ThreadPool(len(cats) if len(cats)<=5 else 5)
for cat in cats:
threadPool.run(self.get_cats_thread, callback=None, cat=cat)
cats = threadPool.killAllWorkers(None)
return cats | ['def', 'get_cats', '(', 'self', ')', ':', 'start_url', '=', "'http://top.taobao.com/index.php?from=tbsy'", 'rs', '=', 'self', '.', 'fetch', '(', 'start_url', ')', 'if', 'not', 'rs', ':', 'return', 'None', 'soup', '=', 'BeautifulSoup', '(', 'rs', '.', 'content', ',', 'convertEntities', '=', 'BeautifulSoup', '.', 'HTML_ENTITIES', ',', 'markupMassage', '=', 'hexentityMassage', ')', 'cats', '=', '[', '{', "'id'", ':', "'TR_%s'", '%', 'li', '[', "'id'", ']', '.', 'encode', '(', "'utf-8'", ')', '.', 'upper', '(', ')', ',', "'title'", ':', 'li', '.', 'a', '.', 'text', '.', 'encode', '(', "'utf-8'", ')', '.', 'strip', '(', ')', '}', 'for', 'li', 'in', 'soup', '.', 'find', '(', "'div'", ',', 'id', '=', "'nav'", ')', '.', 'findAll', '(', "'li'", ')', 'if', 'li', '[', "'id'", ']', '!=', "'index'", ']', 'threadPool', '=', 'ThreadPool', '(', 'len', '(', 'cats', ')', 'if', 'len', '(', 'cats', ')', '<=', '5', 'else', '5', ')', 'for', 'cat', 'in', 'cats', ':', 'threadPool', '.', 'run', '(', 'self', '.', 'get_cats_thread', ',', 'callback', '=', 'None', ',', 'cat', '=', 'cat', ')', 'cats', '=', 'threadPool', '.', 'killAllWorkers', '(', 'None', ')', 'return', 'cats'] | Get top keywords categories | ['Get', 'top', 'keywords', 'categories'] | train | https://github.com/bububa/pyTOP/blob/1e48009bcfe886be392628244b370e6374e1f2b2/pyTOP/crawler.py#L308-L319 |
950 | RudolfCardinal/pythonlib | cardinal_pythonlib/tools/backup_mysql_database.py | cmdargs | def cmdargs(mysqldump: str,
username: str,
password: str,
database: str,
verbose: bool,
with_drop_create_database: bool,
max_allowed_packet: str,
hide_password: bool = False) -> List[str]:
"""
Returns command arguments for a ``mysqldump`` call.
Args:
mysqldump: ``mysqldump`` executable filename
username: user name
password: password
database: database name
verbose: verbose output?
with_drop_create_database: produce commands to ``DROP`` the database
and recreate it?
max_allowed_packet: passed to ``mysqldump``
hide_password: obscure the password (will break the arguments but
provide a safe version to show the user)?
Returns:
list of command-line arguments
"""
ca = [
mysqldump,
"-u", username,
"-p{}".format("*****" if hide_password else password),
"--max_allowed_packet={}".format(max_allowed_packet),
"--hex-blob", # preferable to raw binary in our .sql file
]
if verbose:
ca.append("--verbose")
if with_drop_create_database:
ca.extend([
"--add-drop-database",
"--databases",
database
])
else:
ca.append(database)
pass
return ca | python | def cmdargs(mysqldump: str,
username: str,
password: str,
database: str,
verbose: bool,
with_drop_create_database: bool,
max_allowed_packet: str,
hide_password: bool = False) -> List[str]:
"""
Returns command arguments for a ``mysqldump`` call.
Args:
mysqldump: ``mysqldump`` executable filename
username: user name
password: password
database: database name
verbose: verbose output?
with_drop_create_database: produce commands to ``DROP`` the database
and recreate it?
max_allowed_packet: passed to ``mysqldump``
hide_password: obscure the password (will break the arguments but
provide a safe version to show the user)?
Returns:
list of command-line arguments
"""
ca = [
mysqldump,
"-u", username,
"-p{}".format("*****" if hide_password else password),
"--max_allowed_packet={}".format(max_allowed_packet),
"--hex-blob", # preferable to raw binary in our .sql file
]
if verbose:
ca.append("--verbose")
if with_drop_create_database:
ca.extend([
"--add-drop-database",
"--databases",
database
])
else:
ca.append(database)
pass
return ca | ['def', 'cmdargs', '(', 'mysqldump', ':', 'str', ',', 'username', ':', 'str', ',', 'password', ':', 'str', ',', 'database', ':', 'str', ',', 'verbose', ':', 'bool', ',', 'with_drop_create_database', ':', 'bool', ',', 'max_allowed_packet', ':', 'str', ',', 'hide_password', ':', 'bool', '=', 'False', ')', '->', 'List', '[', 'str', ']', ':', 'ca', '=', '[', 'mysqldump', ',', '"-u"', ',', 'username', ',', '"-p{}"', '.', 'format', '(', '"*****"', 'if', 'hide_password', 'else', 'password', ')', ',', '"--max_allowed_packet={}"', '.', 'format', '(', 'max_allowed_packet', ')', ',', '"--hex-blob"', ',', '# preferable to raw binary in our .sql file', ']', 'if', 'verbose', ':', 'ca', '.', 'append', '(', '"--verbose"', ')', 'if', 'with_drop_create_database', ':', 'ca', '.', 'extend', '(', '[', '"--add-drop-database"', ',', '"--databases"', ',', 'database', ']', ')', 'else', ':', 'ca', '.', 'append', '(', 'database', ')', 'pass', 'return', 'ca'] | Returns command arguments for a ``mysqldump`` call.
Args:
mysqldump: ``mysqldump`` executable filename
username: user name
password: password
database: database name
verbose: verbose output?
with_drop_create_database: produce commands to ``DROP`` the database
and recreate it?
max_allowed_packet: passed to ``mysqldump``
hide_password: obscure the password (will break the arguments but
provide a safe version to show the user)?
Returns:
list of command-line arguments | ['Returns', 'command', 'arguments', 'for', 'a', 'mysqldump', 'call', '.'] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/tools/backup_mysql_database.py#L46-L90 |
951 | pypa/setuptools | setuptools/__init__.py | _find_all_simple | def _find_all_simple(path):
"""
Find all files under 'path'
"""
results = (
os.path.join(base, file)
for base, dirs, files in os.walk(path, followlinks=True)
for file in files
)
return filter(os.path.isfile, results) | python | def _find_all_simple(path):
"""
Find all files under 'path'
"""
results = (
os.path.join(base, file)
for base, dirs, files in os.walk(path, followlinks=True)
for file in files
)
return filter(os.path.isfile, results) | ['def', '_find_all_simple', '(', 'path', ')', ':', 'results', '=', '(', 'os', '.', 'path', '.', 'join', '(', 'base', ',', 'file', ')', 'for', 'base', ',', 'dirs', ',', 'files', 'in', 'os', '.', 'walk', '(', 'path', ',', 'followlinks', '=', 'True', ')', 'for', 'file', 'in', 'files', ')', 'return', 'filter', '(', 'os', '.', 'path', '.', 'isfile', ',', 'results', ')'] | Find all files under 'path' | ['Find', 'all', 'files', 'under', 'path'] | train | https://github.com/pypa/setuptools/blob/83c667e0b2a98193851c07115d1af65011ed0fb6/setuptools/__init__.py#L203-L212 |
952 | sorgerlab/indra | indra/sources/trips/api.py | process_xml | def process_xml(xml_string):
"""Return a TripsProcessor by processing a TRIPS EKB XML string.
Parameters
----------
xml_string : str
A TRIPS extraction knowledge base (EKB) string to be processed.
http://trips.ihmc.us/parser/api.html
Returns
-------
tp : TripsProcessor
A TripsProcessor containing the extracted INDRA Statements
in tp.statements.
"""
tp = TripsProcessor(xml_string)
if tp.tree is None:
return None
tp.get_modifications_indirect()
tp.get_activations_causal()
tp.get_activations_stimulate()
tp.get_complexes()
tp.get_modifications()
tp.get_active_forms()
tp.get_active_forms_state()
tp.get_activations()
tp.get_translocation()
tp.get_regulate_amounts()
tp.get_degradations()
tp.get_syntheses()
tp.get_conversions()
tp.get_simple_increase_decrease()
return tp | python | def process_xml(xml_string):
"""Return a TripsProcessor by processing a TRIPS EKB XML string.
Parameters
----------
xml_string : str
A TRIPS extraction knowledge base (EKB) string to be processed.
http://trips.ihmc.us/parser/api.html
Returns
-------
tp : TripsProcessor
A TripsProcessor containing the extracted INDRA Statements
in tp.statements.
"""
tp = TripsProcessor(xml_string)
if tp.tree is None:
return None
tp.get_modifications_indirect()
tp.get_activations_causal()
tp.get_activations_stimulate()
tp.get_complexes()
tp.get_modifications()
tp.get_active_forms()
tp.get_active_forms_state()
tp.get_activations()
tp.get_translocation()
tp.get_regulate_amounts()
tp.get_degradations()
tp.get_syntheses()
tp.get_conversions()
tp.get_simple_increase_decrease()
return tp | ['def', 'process_xml', '(', 'xml_string', ')', ':', 'tp', '=', 'TripsProcessor', '(', 'xml_string', ')', 'if', 'tp', '.', 'tree', 'is', 'None', ':', 'return', 'None', 'tp', '.', 'get_modifications_indirect', '(', ')', 'tp', '.', 'get_activations_causal', '(', ')', 'tp', '.', 'get_activations_stimulate', '(', ')', 'tp', '.', 'get_complexes', '(', ')', 'tp', '.', 'get_modifications', '(', ')', 'tp', '.', 'get_active_forms', '(', ')', 'tp', '.', 'get_active_forms_state', '(', ')', 'tp', '.', 'get_activations', '(', ')', 'tp', '.', 'get_translocation', '(', ')', 'tp', '.', 'get_regulate_amounts', '(', ')', 'tp', '.', 'get_degradations', '(', ')', 'tp', '.', 'get_syntheses', '(', ')', 'tp', '.', 'get_conversions', '(', ')', 'tp', '.', 'get_simple_increase_decrease', '(', ')', 'return', 'tp'] | Return a TripsProcessor by processing a TRIPS EKB XML string.
Parameters
----------
xml_string : str
A TRIPS extraction knowledge base (EKB) string to be processed.
http://trips.ihmc.us/parser/api.html
Returns
-------
tp : TripsProcessor
A TripsProcessor containing the extracted INDRA Statements
in tp.statements. | ['Return', 'a', 'TripsProcessor', 'by', 'processing', 'a', 'TRIPS', 'EKB', 'XML', 'string', '.'] | train | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/trips/api.py#L102-L134 |
953 | Julius2342/pyvlx | pyvlx/string_helper.py | string_to_bytes | def string_to_bytes(string, size):
"""Convert string to bytes add padding."""
if len(string) > size:
raise PyVLXException("string_to_bytes::string_to_large")
encoded = bytes(string, encoding='utf-8')
return encoded + bytes(size-len(encoded)) | python | def string_to_bytes(string, size):
"""Convert string to bytes add padding."""
if len(string) > size:
raise PyVLXException("string_to_bytes::string_to_large")
encoded = bytes(string, encoding='utf-8')
return encoded + bytes(size-len(encoded)) | ['def', 'string_to_bytes', '(', 'string', ',', 'size', ')', ':', 'if', 'len', '(', 'string', ')', '>', 'size', ':', 'raise', 'PyVLXException', '(', '"string_to_bytes::string_to_large"', ')', 'encoded', '=', 'bytes', '(', 'string', ',', 'encoding', '=', "'utf-8'", ')', 'return', 'encoded', '+', 'bytes', '(', 'size', '-', 'len', '(', 'encoded', ')', ')'] | Convert string to bytes add padding. | ['Convert', 'string', 'to', 'bytes', 'add', 'padding', '.'] | train | https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/string_helper.py#L5-L10 |
954 | mlperf/training | image_classification/tensorflow/official/resnet/resnet_run_loop.py | resnet_model_fn | def resnet_model_fn(features, labels, mode, model_class,
resnet_size, weight_decay, learning_rate_fn, momentum,
data_format, version, loss_scale, loss_filter_fn=None,
dtype=resnet_model.DEFAULT_DTYPE,
label_smoothing=0.0, enable_lars=False):
"""Shared functionality for different resnet model_fns.
Initializes the ResnetModel representing the model layers
and uses that model to build the necessary EstimatorSpecs for
the `mode` in question. For training, this means building losses,
the optimizer, and the train op that get passed into the EstimatorSpec.
For evaluation and prediction, the EstimatorSpec is returned without
a train op, but with the necessary parameters for the given mode.
Args:
features: tensor representing input images
labels: tensor representing class labels for all input images
mode: current estimator mode; should be one of
`tf.estimator.ModeKeys.TRAIN`, `EVALUATE`, `PREDICT`
model_class: a class representing a TensorFlow model that has a __call__
function. We assume here that this is a subclass of ResnetModel.
resnet_size: A single integer for the size of the ResNet model.
weight_decay: weight decay loss rate used to regularize learned variables.
learning_rate_fn: function that returns the current learning rate given
the current global_step
momentum: momentum term used for optimization
data_format: Input format ('channels_last', 'channels_first', or None).
If set to None, the format is dependent on whether a GPU is available.
version: Integer representing which version of the ResNet network to use.
See README for details. Valid values: [1, 2]
loss_scale: The factor to scale the loss for numerical stability. A detailed
summary is present in the arg parser help text.
loss_filter_fn: function that takes a string variable name and returns
True if the var should be included in loss calculation, and False
otherwise. If None, batch_normalization variables will be excluded
from the loss.
dtype: the TensorFlow dtype to use for calculations.
Returns:
EstimatorSpec parameterized according to the input params and the
current mode.
"""
# Generate a summary node for the images
tf.summary.image('images', features, max_outputs=6)
# Checks that features/images have same data type being used for calculations.
assert features.dtype == dtype
features = tf.cast(features, dtype)
model = model_class(resnet_size, data_format, version=version, dtype=dtype)
logits = model(features, mode == tf.estimator.ModeKeys.TRAIN)
# This acts as a no-op if the logits are already in fp32 (provided logits are
# not a SparseTensor). If dtype is is low precision, logits must be cast to
# fp32 for numerical stability.
logits = tf.cast(logits, tf.float32)
num_examples_metric = tf_mlperf_log.sum_metric(tensor=tf.shape(logits)[0], name=_NUM_EXAMPLES_NAME)
predictions = {
'classes': tf.argmax(logits, axis=1),
'probabilities': tf.nn.softmax(logits, name='softmax_tensor')
}
if mode == tf.estimator.ModeKeys.PREDICT:
# Return the predictions and the specification for serving a SavedModel
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
export_outputs={
'predict': tf.estimator.export.PredictOutput(predictions)
})
# Calculate loss, which includes softmax cross entropy and L2 regularization.
mlperf_log.resnet_print(key=mlperf_log.MODEL_HP_LOSS_FN, value=mlperf_log.CCE)
if label_smoothing != 0.0:
one_hot_labels = tf.one_hot(labels, 1001)
cross_entropy = tf.losses.softmax_cross_entropy(
logits=logits, onehot_labels=one_hot_labels,
label_smoothing=label_smoothing)
else:
cross_entropy = tf.losses.sparse_softmax_cross_entropy(
logits=logits, labels=labels)
# Create a tensor named cross_entropy for logging purposes.
tf.identity(cross_entropy, name='cross_entropy')
tf.summary.scalar('cross_entropy', cross_entropy)
# If no loss_filter_fn is passed, assume we want the default behavior,
# which is that batch_normalization variables are excluded from loss.
def exclude_batch_norm(name):
return 'batch_normalization' not in name
loss_filter_fn = loss_filter_fn or exclude_batch_norm
mlperf_log.resnet_print(key=mlperf_log.MODEL_EXCLUDE_BN_FROM_L2,
value=not loss_filter_fn('batch_normalization'))
# Add weight decay to the loss.
mlperf_log.resnet_print(key=mlperf_log.MODEL_L2_REGULARIZATION,
value=weight_decay)
l2_loss = weight_decay * tf.add_n(
# loss is computed using fp32 for numerical stability.
[tf.nn.l2_loss(tf.cast(v, tf.float32)) for v in tf.trainable_variables()
if loss_filter_fn(v.name)])
tf.summary.scalar('l2_loss', l2_loss)
loss = cross_entropy + l2_loss
if mode == tf.estimator.ModeKeys.TRAIN:
global_step = tf.train.get_or_create_global_step()
learning_rate = learning_rate_fn(global_step)
log_id = mlperf_log.resnet_print(key=mlperf_log.OPT_LR, deferred=True)
learning_rate = tf_mlperf_log.log_deferred(op=learning_rate, log_id=log_id,
every_n=100)
# Create a tensor named learning_rate for logging purposes
tf.identity(learning_rate, name='learning_rate')
tf.summary.scalar('learning_rate', learning_rate)
mlperf_log.resnet_print(key=mlperf_log.OPT_NAME,
value=mlperf_log.SGD_WITH_MOMENTUM)
mlperf_log.resnet_print(key=mlperf_log.OPT_MOMENTUM, value=momentum)
if enable_lars:
optimizer = tf.contrib.opt.LARSOptimizer(
learning_rate,
momentum=momentum,
weight_decay=weight_decay,
skip_list=['batch_normalization', 'bias'])
else:
optimizer = tf.train.MomentumOptimizer(
learning_rate=learning_rate,
momentum=momentum
)
if loss_scale != 1:
# When computing fp16 gradients, often intermediate tensor values are
# so small, they underflow to 0. To avoid this, we multiply the loss by
# loss_scale to make these tensor values loss_scale times bigger.
scaled_grad_vars = optimizer.compute_gradients(loss * loss_scale)
# Once the gradient computation is complete we can scale the gradients
# back to the correct scale before passing them to the optimizer.
unscaled_grad_vars = [(grad / loss_scale, var)
for grad, var in scaled_grad_vars]
minimize_op = optimizer.apply_gradients(unscaled_grad_vars, global_step)
else:
minimize_op = optimizer.minimize(loss, global_step)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
train_op = tf.group(minimize_op, update_ops, num_examples_metric[1])
else:
train_op = None
accuracy = tf.metrics.accuracy(labels, predictions['classes'])
accuracy_top_5 = tf.metrics.mean(tf.nn.in_top_k(predictions=logits,
targets=labels,
k=5,
name='top_5_op'))
metrics = {'accuracy': accuracy,
'accuracy_top_5': accuracy_top_5,
_NUM_EXAMPLES_NAME: num_examples_metric}
# Create a tensor named train_accuracy for logging purposes
tf.identity(accuracy[1], name='train_accuracy')
tf.identity(accuracy_top_5[1], name='train_accuracy_top_5')
tf.summary.scalar('train_accuracy', accuracy[1])
tf.summary.scalar('train_accuracy_top_5', accuracy_top_5[1])
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metric_ops=metrics) | python | def resnet_model_fn(features, labels, mode, model_class,
resnet_size, weight_decay, learning_rate_fn, momentum,
data_format, version, loss_scale, loss_filter_fn=None,
dtype=resnet_model.DEFAULT_DTYPE,
label_smoothing=0.0, enable_lars=False):
"""Shared functionality for different resnet model_fns.
Initializes the ResnetModel representing the model layers
and uses that model to build the necessary EstimatorSpecs for
the `mode` in question. For training, this means building losses,
the optimizer, and the train op that get passed into the EstimatorSpec.
For evaluation and prediction, the EstimatorSpec is returned without
a train op, but with the necessary parameters for the given mode.
Args:
features: tensor representing input images
labels: tensor representing class labels for all input images
mode: current estimator mode; should be one of
`tf.estimator.ModeKeys.TRAIN`, `EVALUATE`, `PREDICT`
model_class: a class representing a TensorFlow model that has a __call__
function. We assume here that this is a subclass of ResnetModel.
resnet_size: A single integer for the size of the ResNet model.
weight_decay: weight decay loss rate used to regularize learned variables.
learning_rate_fn: function that returns the current learning rate given
the current global_step
momentum: momentum term used for optimization
data_format: Input format ('channels_last', 'channels_first', or None).
If set to None, the format is dependent on whether a GPU is available.
version: Integer representing which version of the ResNet network to use.
See README for details. Valid values: [1, 2]
loss_scale: The factor to scale the loss for numerical stability. A detailed
summary is present in the arg parser help text.
loss_filter_fn: function that takes a string variable name and returns
True if the var should be included in loss calculation, and False
otherwise. If None, batch_normalization variables will be excluded
from the loss.
dtype: the TensorFlow dtype to use for calculations.
Returns:
EstimatorSpec parameterized according to the input params and the
current mode.
"""
# Generate a summary node for the images
tf.summary.image('images', features, max_outputs=6)
# Checks that features/images have same data type being used for calculations.
assert features.dtype == dtype
features = tf.cast(features, dtype)
model = model_class(resnet_size, data_format, version=version, dtype=dtype)
logits = model(features, mode == tf.estimator.ModeKeys.TRAIN)
# This acts as a no-op if the logits are already in fp32 (provided logits are
# not a SparseTensor). If dtype is is low precision, logits must be cast to
# fp32 for numerical stability.
logits = tf.cast(logits, tf.float32)
num_examples_metric = tf_mlperf_log.sum_metric(tensor=tf.shape(logits)[0], name=_NUM_EXAMPLES_NAME)
predictions = {
'classes': tf.argmax(logits, axis=1),
'probabilities': tf.nn.softmax(logits, name='softmax_tensor')
}
if mode == tf.estimator.ModeKeys.PREDICT:
# Return the predictions and the specification for serving a SavedModel
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
export_outputs={
'predict': tf.estimator.export.PredictOutput(predictions)
})
# Calculate loss, which includes softmax cross entropy and L2 regularization.
mlperf_log.resnet_print(key=mlperf_log.MODEL_HP_LOSS_FN, value=mlperf_log.CCE)
if label_smoothing != 0.0:
one_hot_labels = tf.one_hot(labels, 1001)
cross_entropy = tf.losses.softmax_cross_entropy(
logits=logits, onehot_labels=one_hot_labels,
label_smoothing=label_smoothing)
else:
cross_entropy = tf.losses.sparse_softmax_cross_entropy(
logits=logits, labels=labels)
# Create a tensor named cross_entropy for logging purposes.
tf.identity(cross_entropy, name='cross_entropy')
tf.summary.scalar('cross_entropy', cross_entropy)
# If no loss_filter_fn is passed, assume we want the default behavior,
# which is that batch_normalization variables are excluded from loss.
def exclude_batch_norm(name):
return 'batch_normalization' not in name
loss_filter_fn = loss_filter_fn or exclude_batch_norm
mlperf_log.resnet_print(key=mlperf_log.MODEL_EXCLUDE_BN_FROM_L2,
value=not loss_filter_fn('batch_normalization'))
# Add weight decay to the loss.
mlperf_log.resnet_print(key=mlperf_log.MODEL_L2_REGULARIZATION,
value=weight_decay)
l2_loss = weight_decay * tf.add_n(
# loss is computed using fp32 for numerical stability.
[tf.nn.l2_loss(tf.cast(v, tf.float32)) for v in tf.trainable_variables()
if loss_filter_fn(v.name)])
tf.summary.scalar('l2_loss', l2_loss)
loss = cross_entropy + l2_loss
if mode == tf.estimator.ModeKeys.TRAIN:
global_step = tf.train.get_or_create_global_step()
learning_rate = learning_rate_fn(global_step)
log_id = mlperf_log.resnet_print(key=mlperf_log.OPT_LR, deferred=True)
learning_rate = tf_mlperf_log.log_deferred(op=learning_rate, log_id=log_id,
every_n=100)
# Create a tensor named learning_rate for logging purposes
tf.identity(learning_rate, name='learning_rate')
tf.summary.scalar('learning_rate', learning_rate)
mlperf_log.resnet_print(key=mlperf_log.OPT_NAME,
value=mlperf_log.SGD_WITH_MOMENTUM)
mlperf_log.resnet_print(key=mlperf_log.OPT_MOMENTUM, value=momentum)
if enable_lars:
optimizer = tf.contrib.opt.LARSOptimizer(
learning_rate,
momentum=momentum,
weight_decay=weight_decay,
skip_list=['batch_normalization', 'bias'])
else:
optimizer = tf.train.MomentumOptimizer(
learning_rate=learning_rate,
momentum=momentum
)
if loss_scale != 1:
# When computing fp16 gradients, often intermediate tensor values are
# so small, they underflow to 0. To avoid this, we multiply the loss by
# loss_scale to make these tensor values loss_scale times bigger.
scaled_grad_vars = optimizer.compute_gradients(loss * loss_scale)
# Once the gradient computation is complete we can scale the gradients
# back to the correct scale before passing them to the optimizer.
unscaled_grad_vars = [(grad / loss_scale, var)
for grad, var in scaled_grad_vars]
minimize_op = optimizer.apply_gradients(unscaled_grad_vars, global_step)
else:
minimize_op = optimizer.minimize(loss, global_step)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
train_op = tf.group(minimize_op, update_ops, num_examples_metric[1])
else:
train_op = None
accuracy = tf.metrics.accuracy(labels, predictions['classes'])
accuracy_top_5 = tf.metrics.mean(tf.nn.in_top_k(predictions=logits,
targets=labels,
k=5,
name='top_5_op'))
metrics = {'accuracy': accuracy,
'accuracy_top_5': accuracy_top_5,
_NUM_EXAMPLES_NAME: num_examples_metric}
# Create a tensor named train_accuracy for logging purposes
tf.identity(accuracy[1], name='train_accuracy')
tf.identity(accuracy_top_5[1], name='train_accuracy_top_5')
tf.summary.scalar('train_accuracy', accuracy[1])
tf.summary.scalar('train_accuracy_top_5', accuracy_top_5[1])
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metric_ops=metrics) | ['def', 'resnet_model_fn', '(', 'features', ',', 'labels', ',', 'mode', ',', 'model_class', ',', 'resnet_size', ',', 'weight_decay', ',', 'learning_rate_fn', ',', 'momentum', ',', 'data_format', ',', 'version', ',', 'loss_scale', ',', 'loss_filter_fn', '=', 'None', ',', 'dtype', '=', 'resnet_model', '.', 'DEFAULT_DTYPE', ',', 'label_smoothing', '=', '0.0', ',', 'enable_lars', '=', 'False', ')', ':', '# Generate a summary node for the images', 'tf', '.', 'summary', '.', 'image', '(', "'images'", ',', 'features', ',', 'max_outputs', '=', '6', ')', '# Checks that features/images have same data type being used for calculations.', 'assert', 'features', '.', 'dtype', '==', 'dtype', 'features', '=', 'tf', '.', 'cast', '(', 'features', ',', 'dtype', ')', 'model', '=', 'model_class', '(', 'resnet_size', ',', 'data_format', ',', 'version', '=', 'version', ',', 'dtype', '=', 'dtype', ')', 'logits', '=', 'model', '(', 'features', ',', 'mode', '==', 'tf', '.', 'estimator', '.', 'ModeKeys', '.', 'TRAIN', ')', '# This acts as a no-op if the logits are already in fp32 (provided logits are', '# not a SparseTensor). If dtype is is low precision, logits must be cast to', '# fp32 for numerical stability.', 'logits', '=', 'tf', '.', 'cast', '(', 'logits', ',', 'tf', '.', 'float32', ')', 'num_examples_metric', '=', 'tf_mlperf_log', '.', 'sum_metric', '(', 'tensor', '=', 'tf', '.', 'shape', '(', 'logits', ')', '[', '0', ']', ',', 'name', '=', '_NUM_EXAMPLES_NAME', ')', 'predictions', '=', '{', "'classes'", ':', 'tf', '.', 'argmax', '(', 'logits', ',', 'axis', '=', '1', ')', ',', "'probabilities'", ':', 'tf', '.', 'nn', '.', 'softmax', '(', 'logits', ',', 'name', '=', "'softmax_tensor'", ')', '}', 'if', 'mode', '==', 'tf', '.', 'estimator', '.', 'ModeKeys', '.', 'PREDICT', ':', '# Return the predictions and the specification for serving a SavedModel', 'return', 'tf', '.', 'estimator', '.', 'EstimatorSpec', '(', 'mode', '=', 'mode', ',', 'predictions', '=', 'predictions', ',', 'export_outputs', '=', '{', "'predict'", ':', 'tf', '.', 'estimator', '.', 'export', '.', 'PredictOutput', '(', 'predictions', ')', '}', ')', '# Calculate loss, which includes softmax cross entropy and L2 regularization.', 'mlperf_log', '.', 'resnet_print', '(', 'key', '=', 'mlperf_log', '.', 'MODEL_HP_LOSS_FN', ',', 'value', '=', 'mlperf_log', '.', 'CCE', ')', 'if', 'label_smoothing', '!=', '0.0', ':', 'one_hot_labels', '=', 'tf', '.', 'one_hot', '(', 'labels', ',', '1001', ')', 'cross_entropy', '=', 'tf', '.', 'losses', '.', 'softmax_cross_entropy', '(', 'logits', '=', 'logits', ',', 'onehot_labels', '=', 'one_hot_labels', ',', 'label_smoothing', '=', 'label_smoothing', ')', 'else', ':', 'cross_entropy', '=', 'tf', '.', 'losses', '.', 'sparse_softmax_cross_entropy', '(', 'logits', '=', 'logits', ',', 'labels', '=', 'labels', ')', '# Create a tensor named cross_entropy for logging purposes.', 'tf', '.', 'identity', '(', 'cross_entropy', ',', 'name', '=', "'cross_entropy'", ')', 'tf', '.', 'summary', '.', 'scalar', '(', "'cross_entropy'", ',', 'cross_entropy', ')', '# If no loss_filter_fn is passed, assume we want the default behavior,', '# which is that batch_normalization variables are excluded from loss.', 'def', 'exclude_batch_norm', '(', 'name', ')', ':', 'return', "'batch_normalization'", 'not', 'in', 'name', 'loss_filter_fn', '=', 'loss_filter_fn', 'or', 'exclude_batch_norm', 'mlperf_log', '.', 'resnet_print', '(', 'key', '=', 'mlperf_log', '.', 'MODEL_EXCLUDE_BN_FROM_L2', ',', 'value', '=', 'not', 'loss_filter_fn', '(', "'batch_normalization'", ')', ')', '# Add weight decay to the loss.', 'mlperf_log', '.', 'resnet_print', '(', 'key', '=', 'mlperf_log', '.', 'MODEL_L2_REGULARIZATION', ',', 'value', '=', 'weight_decay', ')', 'l2_loss', '=', 'weight_decay', '*', 'tf', '.', 'add_n', '(', '# loss is computed using fp32 for numerical stability.', '[', 'tf', '.', 'nn', '.', 'l2_loss', '(', 'tf', '.', 'cast', '(', 'v', ',', 'tf', '.', 'float32', ')', ')', 'for', 'v', 'in', 'tf', '.', 'trainable_variables', '(', ')', 'if', 'loss_filter_fn', '(', 'v', '.', 'name', ')', ']', ')', 'tf', '.', 'summary', '.', 'scalar', '(', "'l2_loss'", ',', 'l2_loss', ')', 'loss', '=', 'cross_entropy', '+', 'l2_loss', 'if', 'mode', '==', 'tf', '.', 'estimator', '.', 'ModeKeys', '.', 'TRAIN', ':', 'global_step', '=', 'tf', '.', 'train', '.', 'get_or_create_global_step', '(', ')', 'learning_rate', '=', 'learning_rate_fn', '(', 'global_step', ')', 'log_id', '=', 'mlperf_log', '.', 'resnet_print', '(', 'key', '=', 'mlperf_log', '.', 'OPT_LR', ',', 'deferred', '=', 'True', ')', 'learning_rate', '=', 'tf_mlperf_log', '.', 'log_deferred', '(', 'op', '=', 'learning_rate', ',', 'log_id', '=', 'log_id', ',', 'every_n', '=', '100', ')', '# Create a tensor named learning_rate for logging purposes', 'tf', '.', 'identity', '(', 'learning_rate', ',', 'name', '=', "'learning_rate'", ')', 'tf', '.', 'summary', '.', 'scalar', '(', "'learning_rate'", ',', 'learning_rate', ')', 'mlperf_log', '.', 'resnet_print', '(', 'key', '=', 'mlperf_log', '.', 'OPT_NAME', ',', 'value', '=', 'mlperf_log', '.', 'SGD_WITH_MOMENTUM', ')', 'mlperf_log', '.', 'resnet_print', '(', 'key', '=', 'mlperf_log', '.', 'OPT_MOMENTUM', ',', 'value', '=', 'momentum', ')', 'if', 'enable_lars', ':', 'optimizer', '=', 'tf', '.', 'contrib', '.', 'opt', '.', 'LARSOptimizer', '(', 'learning_rate', ',', 'momentum', '=', 'momentum', ',', 'weight_decay', '=', 'weight_decay', ',', 'skip_list', '=', '[', "'batch_normalization'", ',', "'bias'", ']', ')', 'else', ':', 'optimizer', '=', 'tf', '.', 'train', '.', 'MomentumOptimizer', '(', 'learning_rate', '=', 'learning_rate', ',', 'momentum', '=', 'momentum', ')', 'if', 'loss_scale', '!=', '1', ':', '# When computing fp16 gradients, often intermediate tensor values are', '# so small, they underflow to 0. To avoid this, we multiply the loss by', '# loss_scale to make these tensor values loss_scale times bigger.', 'scaled_grad_vars', '=', 'optimizer', '.', 'compute_gradients', '(', 'loss', '*', 'loss_scale', ')', '# Once the gradient computation is complete we can scale the gradients', '# back to the correct scale before passing them to the optimizer.', 'unscaled_grad_vars', '=', '[', '(', 'grad', '/', 'loss_scale', ',', 'var', ')', 'for', 'grad', ',', 'var', 'in', 'scaled_grad_vars', ']', 'minimize_op', '=', 'optimizer', '.', 'apply_gradients', '(', 'unscaled_grad_vars', ',', 'global_step', ')', 'else', ':', 'minimize_op', '=', 'optimizer', '.', 'minimize', '(', 'loss', ',', 'global_step', ')', 'update_ops', '=', 'tf', '.', 'get_collection', '(', 'tf', '.', 'GraphKeys', '.', 'UPDATE_OPS', ')', 'train_op', '=', 'tf', '.', 'group', '(', 'minimize_op', ',', 'update_ops', ',', 'num_examples_metric', '[', '1', ']', ')', 'else', ':', 'train_op', '=', 'None', 'accuracy', '=', 'tf', '.', 'metrics', '.', 'accuracy', '(', 'labels', ',', 'predictions', '[', "'classes'", ']', ')', 'accuracy_top_5', '=', 'tf', '.', 'metrics', '.', 'mean', '(', 'tf', '.', 'nn', '.', 'in_top_k', '(', 'predictions', '=', 'logits', ',', 'targets', '=', 'labels', ',', 'k', '=', '5', ',', 'name', '=', "'top_5_op'", ')', ')', 'metrics', '=', '{', "'accuracy'", ':', 'accuracy', ',', "'accuracy_top_5'", ':', 'accuracy_top_5', ',', '_NUM_EXAMPLES_NAME', ':', 'num_examples_metric', '}', '# Create a tensor named train_accuracy for logging purposes', 'tf', '.', 'identity', '(', 'accuracy', '[', '1', ']', ',', 'name', '=', "'train_accuracy'", ')', 'tf', '.', 'identity', '(', 'accuracy_top_5', '[', '1', ']', ',', 'name', '=', "'train_accuracy_top_5'", ')', 'tf', '.', 'summary', '.', 'scalar', '(', "'train_accuracy'", ',', 'accuracy', '[', '1', ']', ')', 'tf', '.', 'summary', '.', 'scalar', '(', "'train_accuracy_top_5'", ',', 'accuracy_top_5', '[', '1', ']', ')', 'return', 'tf', '.', 'estimator', '.', 'EstimatorSpec', '(', 'mode', '=', 'mode', ',', 'predictions', '=', 'predictions', ',', 'loss', '=', 'loss', ',', 'train_op', '=', 'train_op', ',', 'eval_metric_ops', '=', 'metrics', ')'] | Shared functionality for different resnet model_fns.
Initializes the ResnetModel representing the model layers
and uses that model to build the necessary EstimatorSpecs for
the `mode` in question. For training, this means building losses,
the optimizer, and the train op that get passed into the EstimatorSpec.
For evaluation and prediction, the EstimatorSpec is returned without
a train op, but with the necessary parameters for the given mode.
Args:
features: tensor representing input images
labels: tensor representing class labels for all input images
mode: current estimator mode; should be one of
`tf.estimator.ModeKeys.TRAIN`, `EVALUATE`, `PREDICT`
model_class: a class representing a TensorFlow model that has a __call__
function. We assume here that this is a subclass of ResnetModel.
resnet_size: A single integer for the size of the ResNet model.
weight_decay: weight decay loss rate used to regularize learned variables.
learning_rate_fn: function that returns the current learning rate given
the current global_step
momentum: momentum term used for optimization
data_format: Input format ('channels_last', 'channels_first', or None).
If set to None, the format is dependent on whether a GPU is available.
version: Integer representing which version of the ResNet network to use.
See README for details. Valid values: [1, 2]
loss_scale: The factor to scale the loss for numerical stability. A detailed
summary is present in the arg parser help text.
loss_filter_fn: function that takes a string variable name and returns
True if the var should be included in loss calculation, and False
otherwise. If None, batch_normalization variables will be excluded
from the loss.
dtype: the TensorFlow dtype to use for calculations.
Returns:
EstimatorSpec parameterized according to the input params and the
current mode. | ['Shared', 'functionality', 'for', 'different', 'resnet', 'model_fns', '.'] | train | https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/image_classification/tensorflow/official/resnet/resnet_run_loop.py#L221-L402 |
955 | tcalmant/ipopo | pelix/rsa/__init__.py | RemoteServiceAdminEvent.fromimporterror | def fromimporterror(cls, bundle, importerid, rsid, exception, endpoint):
# type: (Bundle, Tuple[str, str], Tuple[Tuple[str, str], int], Optional[Tuple[Any, Any, Any]], EndpointDescription) -> RemoteServiceAdminEvent
"""
Creates a RemoteServiceAdminEvent object from an import error
"""
return RemoteServiceAdminEvent(
RemoteServiceAdminEvent.IMPORT_ERROR,
bundle,
importerid,
rsid,
None,
None,
exception,
endpoint,
) | python | def fromimporterror(cls, bundle, importerid, rsid, exception, endpoint):
# type: (Bundle, Tuple[str, str], Tuple[Tuple[str, str], int], Optional[Tuple[Any, Any, Any]], EndpointDescription) -> RemoteServiceAdminEvent
"""
Creates a RemoteServiceAdminEvent object from an import error
"""
return RemoteServiceAdminEvent(
RemoteServiceAdminEvent.IMPORT_ERROR,
bundle,
importerid,
rsid,
None,
None,
exception,
endpoint,
) | ['def', 'fromimporterror', '(', 'cls', ',', 'bundle', ',', 'importerid', ',', 'rsid', ',', 'exception', ',', 'endpoint', ')', ':', '# type: (Bundle, Tuple[str, str], Tuple[Tuple[str, str], int], Optional[Tuple[Any, Any, Any]], EndpointDescription) -> RemoteServiceAdminEvent', 'return', 'RemoteServiceAdminEvent', '(', 'RemoteServiceAdminEvent', '.', 'IMPORT_ERROR', ',', 'bundle', ',', 'importerid', ',', 'rsid', ',', 'None', ',', 'None', ',', 'exception', ',', 'endpoint', ',', ')'] | Creates a RemoteServiceAdminEvent object from an import error | ['Creates', 'a', 'RemoteServiceAdminEvent', 'object', 'from', 'an', 'import', 'error'] | train | https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/rsa/__init__.py#L929-L943 |
956 | cdgriffith/Reusables | reusables/file_operations.py | dup_finder | def dup_finder(file_path, directory=".", enable_scandir=False):
"""
Check a directory for duplicates of the specified file. This is meant
for a single file only, for checking a directory for dups, use
directory_duplicates.
This is designed to be as fast as possible by doing lighter checks
before progressing to
more extensive ones, in order they are:
1. File size
2. First twenty bytes
3. Full SHA256 compare
.. code:: python
list(reusables.dup_finder(
"test_structure\\files_2\\empty_file"))
# ['C:\\Reusables\\test\\data\\fake_dir',
# 'C:\\Reusables\\test\\data\\test_structure\\Files\\empty_file_1',
# 'C:\\Reusables\\test\\data\\test_structure\\Files\\empty_file_2',
# 'C:\\Reusables\\test\\data\\test_structure\\files_2\\empty_file']
:param file_path: Path to file to check for duplicates of
:param directory: Directory to dig recursively into to look for duplicates
:param enable_scandir: on python < 3.5 enable external scandir package
:return: generators
"""
size = os.path.getsize(file_path)
if size == 0:
for empty_file in remove_empty_files(directory, dry_run=True):
yield empty_file
else:
with open(file_path, 'rb') as f:
first_twenty = f.read(20)
file_sha256 = file_hash(file_path, "sha256")
for root, directories, files in _walk(directory,
enable_scandir=enable_scandir):
for each_file in files:
test_file = os.path.join(root, each_file)
if os.path.getsize(test_file) == size:
try:
with open(test_file, 'rb') as f:
test_first_twenty = f.read(20)
except OSError:
logger.warning("Could not open file to compare - "
"{0}".format(test_file))
else:
if first_twenty == test_first_twenty:
if file_hash(test_file, "sha256") == file_sha256:
yield os.path.abspath(test_file) | python | def dup_finder(file_path, directory=".", enable_scandir=False):
"""
Check a directory for duplicates of the specified file. This is meant
for a single file only, for checking a directory for dups, use
directory_duplicates.
This is designed to be as fast as possible by doing lighter checks
before progressing to
more extensive ones, in order they are:
1. File size
2. First twenty bytes
3. Full SHA256 compare
.. code:: python
list(reusables.dup_finder(
"test_structure\\files_2\\empty_file"))
# ['C:\\Reusables\\test\\data\\fake_dir',
# 'C:\\Reusables\\test\\data\\test_structure\\Files\\empty_file_1',
# 'C:\\Reusables\\test\\data\\test_structure\\Files\\empty_file_2',
# 'C:\\Reusables\\test\\data\\test_structure\\files_2\\empty_file']
:param file_path: Path to file to check for duplicates of
:param directory: Directory to dig recursively into to look for duplicates
:param enable_scandir: on python < 3.5 enable external scandir package
:return: generators
"""
size = os.path.getsize(file_path)
if size == 0:
for empty_file in remove_empty_files(directory, dry_run=True):
yield empty_file
else:
with open(file_path, 'rb') as f:
first_twenty = f.read(20)
file_sha256 = file_hash(file_path, "sha256")
for root, directories, files in _walk(directory,
enable_scandir=enable_scandir):
for each_file in files:
test_file = os.path.join(root, each_file)
if os.path.getsize(test_file) == size:
try:
with open(test_file, 'rb') as f:
test_first_twenty = f.read(20)
except OSError:
logger.warning("Could not open file to compare - "
"{0}".format(test_file))
else:
if first_twenty == test_first_twenty:
if file_hash(test_file, "sha256") == file_sha256:
yield os.path.abspath(test_file) | ['def', 'dup_finder', '(', 'file_path', ',', 'directory', '=', '"."', ',', 'enable_scandir', '=', 'False', ')', ':', 'size', '=', 'os', '.', 'path', '.', 'getsize', '(', 'file_path', ')', 'if', 'size', '==', '0', ':', 'for', 'empty_file', 'in', 'remove_empty_files', '(', 'directory', ',', 'dry_run', '=', 'True', ')', ':', 'yield', 'empty_file', 'else', ':', 'with', 'open', '(', 'file_path', ',', "'rb'", ')', 'as', 'f', ':', 'first_twenty', '=', 'f', '.', 'read', '(', '20', ')', 'file_sha256', '=', 'file_hash', '(', 'file_path', ',', '"sha256"', ')', 'for', 'root', ',', 'directories', ',', 'files', 'in', '_walk', '(', 'directory', ',', 'enable_scandir', '=', 'enable_scandir', ')', ':', 'for', 'each_file', 'in', 'files', ':', 'test_file', '=', 'os', '.', 'path', '.', 'join', '(', 'root', ',', 'each_file', ')', 'if', 'os', '.', 'path', '.', 'getsize', '(', 'test_file', ')', '==', 'size', ':', 'try', ':', 'with', 'open', '(', 'test_file', ',', "'rb'", ')', 'as', 'f', ':', 'test_first_twenty', '=', 'f', '.', 'read', '(', '20', ')', 'except', 'OSError', ':', 'logger', '.', 'warning', '(', '"Could not open file to compare - "', '"{0}"', '.', 'format', '(', 'test_file', ')', ')', 'else', ':', 'if', 'first_twenty', '==', 'test_first_twenty', ':', 'if', 'file_hash', '(', 'test_file', ',', '"sha256"', ')', '==', 'file_sha256', ':', 'yield', 'os', '.', 'path', '.', 'abspath', '(', 'test_file', ')'] | Check a directory for duplicates of the specified file. This is meant
for a single file only, for checking a directory for dups, use
directory_duplicates.
This is designed to be as fast as possible by doing lighter checks
before progressing to
more extensive ones, in order they are:
1. File size
2. First twenty bytes
3. Full SHA256 compare
.. code:: python
list(reusables.dup_finder(
"test_structure\\files_2\\empty_file"))
# ['C:\\Reusables\\test\\data\\fake_dir',
# 'C:\\Reusables\\test\\data\\test_structure\\Files\\empty_file_1',
# 'C:\\Reusables\\test\\data\\test_structure\\Files\\empty_file_2',
# 'C:\\Reusables\\test\\data\\test_structure\\files_2\\empty_file']
:param file_path: Path to file to check for duplicates of
:param directory: Directory to dig recursively into to look for duplicates
:param enable_scandir: on python < 3.5 enable external scandir package
:return: generators | ['Check', 'a', 'directory', 'for', 'duplicates', 'of', 'the', 'specified', 'file', '.', 'This', 'is', 'meant', 'for', 'a', 'single', 'file', 'only', 'for', 'checking', 'a', 'directory', 'for', 'dups', 'use', 'directory_duplicates', '.'] | train | https://github.com/cdgriffith/Reusables/blob/bc32f72e4baee7d76a6d58b88fcb23dd635155cd/reusables/file_operations.py#L630-L681 |
957 | ozak/georasters | georasters/georasters.py | aggregate | def aggregate(raster, ndv, block_size):
'''
Aggregate raster to smaller resolution, by adding cells.
Usage:
aggregate(raster, ndv, block_size)
where:
raster is a Numpy array created by importing the raster (e.g. geotiff)
ndv is the NoData Value for the raster (can be read using the get_geo_info function)
block_size is a duple of factors by which the raster will be shrinked
Example:
raster = HMISea.tif
ndv, xsize, ysize, geot, projection, datatype = get_geo_info(raster)
costs = load_tiff(raster)
costs2=aggregate(costs, ndv, (10,10))
'''
raster2 = block_reduce(raster, block_size, func=np.ma.sum)
return raster2 | python | def aggregate(raster, ndv, block_size):
'''
Aggregate raster to smaller resolution, by adding cells.
Usage:
aggregate(raster, ndv, block_size)
where:
raster is a Numpy array created by importing the raster (e.g. geotiff)
ndv is the NoData Value for the raster (can be read using the get_geo_info function)
block_size is a duple of factors by which the raster will be shrinked
Example:
raster = HMISea.tif
ndv, xsize, ysize, geot, projection, datatype = get_geo_info(raster)
costs = load_tiff(raster)
costs2=aggregate(costs, ndv, (10,10))
'''
raster2 = block_reduce(raster, block_size, func=np.ma.sum)
return raster2 | ['def', 'aggregate', '(', 'raster', ',', 'ndv', ',', 'block_size', ')', ':', 'raster2', '=', 'block_reduce', '(', 'raster', ',', 'block_size', ',', 'func', '=', 'np', '.', 'ma', '.', 'sum', ')', 'return', 'raster2'] | Aggregate raster to smaller resolution, by adding cells.
Usage:
aggregate(raster, ndv, block_size)
where:
raster is a Numpy array created by importing the raster (e.g. geotiff)
ndv is the NoData Value for the raster (can be read using the get_geo_info function)
block_size is a duple of factors by which the raster will be shrinked
Example:
raster = HMISea.tif
ndv, xsize, ysize, geot, projection, datatype = get_geo_info(raster)
costs = load_tiff(raster)
costs2=aggregate(costs, ndv, (10,10)) | ['Aggregate', 'raster', 'to', 'smaller', 'resolution', 'by', 'adding', 'cells', '.', 'Usage', ':', 'aggregate', '(', 'raster', 'ndv', 'block_size', ')'] | train | https://github.com/ozak/georasters/blob/0612bd91bb2a2cb2f1d59ba89c1ff131dae27d70/georasters/georasters.py#L111-L134 |
958 | bwhite/hadoopy | hadoopy/thirdparty/pyinstaller/PyInstaller/bindepend.py | check_extract_from_egg | def check_extract_from_egg(pth, todir=None):
r"""
Check if path points to a file inside a python egg file, extract the
file from the egg to a cache directory (following pkg_resources
convention) and return [(extracted path, egg file path, relative path
inside egg file)].
Otherwise, just return [(original path, None, None)].
If path points to an egg file directly, return a list with all files
from the egg formatted like above.
Example:
>>> check_extract_from_egg(r'C:\Python26\Lib\site-packages\my.egg\mymodule\my.pyd')
[(r'C:\Users\UserName\AppData\Roaming\Python-Eggs\my.egg-tmp\mymodule\my.pyd',
r'C:\Python26\Lib\site-packages\my.egg', r'mymodule/my.pyd')]
"""
rv = []
if os.path.altsep:
pth = pth.replace(os.path.altsep, os.path.sep)
components = pth.split(os.path.sep)
for i, name in enumerate(components):
if name.lower().endswith(".egg"):
eggpth = os.path.sep.join(components[:i + 1])
if os.path.isfile(eggpth):
# eggs can also be directories!
try:
egg = zipfile.ZipFile(eggpth)
except zipfile.BadZipfile, e:
raise SystemExit("Error: %s %s" % (eggpth, e))
if todir is None:
# Use the same directory as setuptools/pkg_resources. So,
# if the specific egg was accessed before (not necessarily
# by pyinstaller), the extracted contents already exist
# (pkg_resources puts them there) and can be used.
todir = os.path.join(pkg_resouces_get_default_cache(),
name + "-tmp")
if components[i + 1:]:
members = ["/".join(components[i + 1:])]
else:
members = egg.namelist()
for member in members:
pth = os.path.join(todir, member)
if not os.path.isfile(pth):
dirname = os.path.dirname(pth)
if not os.path.isdir(dirname):
os.makedirs(dirname)
f = open(pth, "wb")
f.write(egg.read(member))
f.close()
rv.append((pth, eggpth, member))
return rv
return [(pth, None, None)] | python | def check_extract_from_egg(pth, todir=None):
r"""
Check if path points to a file inside a python egg file, extract the
file from the egg to a cache directory (following pkg_resources
convention) and return [(extracted path, egg file path, relative path
inside egg file)].
Otherwise, just return [(original path, None, None)].
If path points to an egg file directly, return a list with all files
from the egg formatted like above.
Example:
>>> check_extract_from_egg(r'C:\Python26\Lib\site-packages\my.egg\mymodule\my.pyd')
[(r'C:\Users\UserName\AppData\Roaming\Python-Eggs\my.egg-tmp\mymodule\my.pyd',
r'C:\Python26\Lib\site-packages\my.egg', r'mymodule/my.pyd')]
"""
rv = []
if os.path.altsep:
pth = pth.replace(os.path.altsep, os.path.sep)
components = pth.split(os.path.sep)
for i, name in enumerate(components):
if name.lower().endswith(".egg"):
eggpth = os.path.sep.join(components[:i + 1])
if os.path.isfile(eggpth):
# eggs can also be directories!
try:
egg = zipfile.ZipFile(eggpth)
except zipfile.BadZipfile, e:
raise SystemExit("Error: %s %s" % (eggpth, e))
if todir is None:
# Use the same directory as setuptools/pkg_resources. So,
# if the specific egg was accessed before (not necessarily
# by pyinstaller), the extracted contents already exist
# (pkg_resources puts them there) and can be used.
todir = os.path.join(pkg_resouces_get_default_cache(),
name + "-tmp")
if components[i + 1:]:
members = ["/".join(components[i + 1:])]
else:
members = egg.namelist()
for member in members:
pth = os.path.join(todir, member)
if not os.path.isfile(pth):
dirname = os.path.dirname(pth)
if not os.path.isdir(dirname):
os.makedirs(dirname)
f = open(pth, "wb")
f.write(egg.read(member))
f.close()
rv.append((pth, eggpth, member))
return rv
return [(pth, None, None)] | ['def', 'check_extract_from_egg', '(', 'pth', ',', 'todir', '=', 'None', ')', ':', 'rv', '=', '[', ']', 'if', 'os', '.', 'path', '.', 'altsep', ':', 'pth', '=', 'pth', '.', 'replace', '(', 'os', '.', 'path', '.', 'altsep', ',', 'os', '.', 'path', '.', 'sep', ')', 'components', '=', 'pth', '.', 'split', '(', 'os', '.', 'path', '.', 'sep', ')', 'for', 'i', ',', 'name', 'in', 'enumerate', '(', 'components', ')', ':', 'if', 'name', '.', 'lower', '(', ')', '.', 'endswith', '(', '".egg"', ')', ':', 'eggpth', '=', 'os', '.', 'path', '.', 'sep', '.', 'join', '(', 'components', '[', ':', 'i', '+', '1', ']', ')', 'if', 'os', '.', 'path', '.', 'isfile', '(', 'eggpth', ')', ':', '# eggs can also be directories!', 'try', ':', 'egg', '=', 'zipfile', '.', 'ZipFile', '(', 'eggpth', ')', 'except', 'zipfile', '.', 'BadZipfile', ',', 'e', ':', 'raise', 'SystemExit', '(', '"Error: %s %s"', '%', '(', 'eggpth', ',', 'e', ')', ')', 'if', 'todir', 'is', 'None', ':', '# Use the same directory as setuptools/pkg_resources. So,', '# if the specific egg was accessed before (not necessarily', '# by pyinstaller), the extracted contents already exist', '# (pkg_resources puts them there) and can be used.', 'todir', '=', 'os', '.', 'path', '.', 'join', '(', 'pkg_resouces_get_default_cache', '(', ')', ',', 'name', '+', '"-tmp"', ')', 'if', 'components', '[', 'i', '+', '1', ':', ']', ':', 'members', '=', '[', '"/"', '.', 'join', '(', 'components', '[', 'i', '+', '1', ':', ']', ')', ']', 'else', ':', 'members', '=', 'egg', '.', 'namelist', '(', ')', 'for', 'member', 'in', 'members', ':', 'pth', '=', 'os', '.', 'path', '.', 'join', '(', 'todir', ',', 'member', ')', 'if', 'not', 'os', '.', 'path', '.', 'isfile', '(', 'pth', ')', ':', 'dirname', '=', 'os', '.', 'path', '.', 'dirname', '(', 'pth', ')', 'if', 'not', 'os', '.', 'path', '.', 'isdir', '(', 'dirname', ')', ':', 'os', '.', 'makedirs', '(', 'dirname', ')', 'f', '=', 'open', '(', 'pth', ',', '"wb"', ')', 'f', '.', 'write', '(', 'egg', '.', 'read', '(', 'member', ')', ')', 'f', '.', 'close', '(', ')', 'rv', '.', 'append', '(', '(', 'pth', ',', 'eggpth', ',', 'member', ')', ')', 'return', 'rv', 'return', '[', '(', 'pth', ',', 'None', ',', 'None', ')', ']'] | r"""
Check if path points to a file inside a python egg file, extract the
file from the egg to a cache directory (following pkg_resources
convention) and return [(extracted path, egg file path, relative path
inside egg file)].
Otherwise, just return [(original path, None, None)].
If path points to an egg file directly, return a list with all files
from the egg formatted like above.
Example:
>>> check_extract_from_egg(r'C:\Python26\Lib\site-packages\my.egg\mymodule\my.pyd')
[(r'C:\Users\UserName\AppData\Roaming\Python-Eggs\my.egg-tmp\mymodule\my.pyd',
r'C:\Python26\Lib\site-packages\my.egg', r'mymodule/my.pyd')] | ['r', 'Check', 'if', 'path', 'points', 'to', 'a', 'file', 'inside', 'a', 'python', 'egg', 'file', 'extract', 'the', 'file', 'from', 'the', 'egg', 'to', 'a', 'cache', 'directory', '(', 'following', 'pkg_resources', 'convention', ')', 'and', 'return', '[', '(', 'extracted', 'path', 'egg', 'file', 'path', 'relative', 'path', 'inside', 'egg', 'file', ')', ']', '.', 'Otherwise', 'just', 'return', '[', '(', 'original', 'path', 'None', 'None', ')', ']', '.', 'If', 'path', 'points', 'to', 'an', 'egg', 'file', 'directly', 'return', 'a', 'list', 'with', 'all', 'files', 'from', 'the', 'egg', 'formatted', 'like', 'above', '.'] | train | https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/thirdparty/pyinstaller/PyInstaller/bindepend.py#L178-L228 |
959 | SiLab-Bonn/pyBAR_fei4_interpreter | pybar_fei4_interpreter/analysis_utils.py | hist_3d_index | def hist_3d_index(x, y, z, shape):
"""
Fast 3d histogram of 3D indices with C++ inner loop optimization.
Is more than 2 orders faster than np.histogramdd().
The indices are given in x, y, z coordinates and have to fit into a histogram of the dimensions shape.
Parameters
----------
x : array like
y : array like
z : array like
shape : tuple
tuple with x,y,z dimensions: (x, y, z)
Returns
-------
np.ndarray with given shape
"""
if len(shape) != 3:
raise InvalidInputError('The shape has to describe a 3-d histogram')
# change memory alignment for c++ library
x = np.ascontiguousarray(x.astype(np.int32))
y = np.ascontiguousarray(y.astype(np.int32))
z = np.ascontiguousarray(z.astype(np.int32))
result = np.zeros(shape=shape, dtype=np.uint32).ravel() # ravel hist in c-style, 3D --> 1D
analysis_functions.hist_3d(x, y, z, shape[0], shape[1], shape[2], result)
return np.reshape(result, shape) | python | def hist_3d_index(x, y, z, shape):
"""
Fast 3d histogram of 3D indices with C++ inner loop optimization.
Is more than 2 orders faster than np.histogramdd().
The indices are given in x, y, z coordinates and have to fit into a histogram of the dimensions shape.
Parameters
----------
x : array like
y : array like
z : array like
shape : tuple
tuple with x,y,z dimensions: (x, y, z)
Returns
-------
np.ndarray with given shape
"""
if len(shape) != 3:
raise InvalidInputError('The shape has to describe a 3-d histogram')
# change memory alignment for c++ library
x = np.ascontiguousarray(x.astype(np.int32))
y = np.ascontiguousarray(y.astype(np.int32))
z = np.ascontiguousarray(z.astype(np.int32))
result = np.zeros(shape=shape, dtype=np.uint32).ravel() # ravel hist in c-style, 3D --> 1D
analysis_functions.hist_3d(x, y, z, shape[0], shape[1], shape[2], result)
return np.reshape(result, shape) | ['def', 'hist_3d_index', '(', 'x', ',', 'y', ',', 'z', ',', 'shape', ')', ':', 'if', 'len', '(', 'shape', ')', '!=', '3', ':', 'raise', 'InvalidInputError', '(', "'The shape has to describe a 3-d histogram'", ')', '# change memory alignment for c++ library', 'x', '=', 'np', '.', 'ascontiguousarray', '(', 'x', '.', 'astype', '(', 'np', '.', 'int32', ')', ')', 'y', '=', 'np', '.', 'ascontiguousarray', '(', 'y', '.', 'astype', '(', 'np', '.', 'int32', ')', ')', 'z', '=', 'np', '.', 'ascontiguousarray', '(', 'z', '.', 'astype', '(', 'np', '.', 'int32', ')', ')', 'result', '=', 'np', '.', 'zeros', '(', 'shape', '=', 'shape', ',', 'dtype', '=', 'np', '.', 'uint32', ')', '.', 'ravel', '(', ')', '# ravel hist in c-style, 3D --> 1D', 'analysis_functions', '.', 'hist_3d', '(', 'x', ',', 'y', ',', 'z', ',', 'shape', '[', '0', ']', ',', 'shape', '[', '1', ']', ',', 'shape', '[', '2', ']', ',', 'result', ')', 'return', 'np', '.', 'reshape', '(', 'result', ',', 'shape', ')'] | Fast 3d histogram of 3D indices with C++ inner loop optimization.
Is more than 2 orders faster than np.histogramdd().
The indices are given in x, y, z coordinates and have to fit into a histogram of the dimensions shape.
Parameters
----------
x : array like
y : array like
z : array like
shape : tuple
tuple with x,y,z dimensions: (x, y, z)
Returns
-------
np.ndarray with given shape | ['Fast', '3d', 'histogram', 'of', '3D', 'indices', 'with', 'C', '++', 'inner', 'loop', 'optimization', '.', 'Is', 'more', 'than', '2', 'orders', 'faster', 'than', 'np', '.', 'histogramdd', '()', '.', 'The', 'indices', 'are', 'given', 'in', 'x', 'y', 'z', 'coordinates', 'and', 'have', 'to', 'fit', 'into', 'a', 'histogram', 'of', 'the', 'dimensions', 'shape', '.', 'Parameters', '----------', 'x', ':', 'array', 'like', 'y', ':', 'array', 'like', 'z', ':', 'array', 'like', 'shape', ':', 'tuple', 'tuple', 'with', 'x', 'y', 'z', 'dimensions', ':', '(', 'x', 'y', 'z', ')'] | train | https://github.com/SiLab-Bonn/pyBAR_fei4_interpreter/blob/0f8df18557598d6db0c64baa708e587c84bb787b/pybar_fei4_interpreter/analysis_utils.py#L114-L140 |
960 | wummel/dosage | dosagelib/scraper.py | Scraper.setComplete | def setComplete(self, basepath):
"""Set complete flag for this comic, ie. all comics are downloaded."""
if self.endOfLife:
filename = self.getCompleteFile(basepath)
if not os.path.exists(filename):
with open(filename, 'w') as f:
f.write('All comics should be downloaded here.') | python | def setComplete(self, basepath):
"""Set complete flag for this comic, ie. all comics are downloaded."""
if self.endOfLife:
filename = self.getCompleteFile(basepath)
if not os.path.exists(filename):
with open(filename, 'w') as f:
f.write('All comics should be downloaded here.') | ['def', 'setComplete', '(', 'self', ',', 'basepath', ')', ':', 'if', 'self', '.', 'endOfLife', ':', 'filename', '=', 'self', '.', 'getCompleteFile', '(', 'basepath', ')', 'if', 'not', 'os', '.', 'path', '.', 'exists', '(', 'filename', ')', ':', 'with', 'open', '(', 'filename', ',', "'w'", ')', 'as', 'f', ':', 'f', '.', 'write', '(', "'All comics should be downloaded here.'", ')'] | Set complete flag for this comic, ie. all comics are downloaded. | ['Set', 'complete', 'flag', 'for', 'this', 'comic', 'ie', '.', 'all', 'comics', 'are', 'downloaded', '.'] | train | https://github.com/wummel/dosage/blob/a0109c3a46219f280e6e5e77183674e40da0f304/dosagelib/scraper.py#L272-L278 |
961 | jwodder/doapi | doapi/image.py | Image.fetch | def fetch(self):
"""
Fetch & return a new `Image` object representing the image's current
state
:rtype: Image
:raises DOAPIError: if the API endpoint replies with an error (e.g., if
the image no longer exists)
"""
api = self.doapi_manager
return api._image(api.request(self.url)["image"]) | python | def fetch(self):
"""
Fetch & return a new `Image` object representing the image's current
state
:rtype: Image
:raises DOAPIError: if the API endpoint replies with an error (e.g., if
the image no longer exists)
"""
api = self.doapi_manager
return api._image(api.request(self.url)["image"]) | ['def', 'fetch', '(', 'self', ')', ':', 'api', '=', 'self', '.', 'doapi_manager', 'return', 'api', '.', '_image', '(', 'api', '.', 'request', '(', 'self', '.', 'url', ')', '[', '"image"', ']', ')'] | Fetch & return a new `Image` object representing the image's current
state
:rtype: Image
:raises DOAPIError: if the API endpoint replies with an error (e.g., if
the image no longer exists) | ['Fetch', '&', 'return', 'a', 'new', 'Image', 'object', 'representing', 'the', 'image', 's', 'current', 'state'] | train | https://github.com/jwodder/doapi/blob/b1306de86a01d8ae7b9c1fe2699765bb82e4f310/doapi/image.py#L69-L79 |
962 | phaethon/kamene | kamene/modules/p0f.py | p0f | def p0f(pkt):
"""Passive OS fingerprinting: which OS emitted this TCP packet ?
p0f(packet) -> accuracy, [list of guesses]
"""
db, sig = packet2p0f(pkt)
if db:
pb = db.get_base()
else:
pb = []
if not pb:
warning("p0f base empty.")
return []
#s = len(pb[0][0])
r = []
max = len(sig[4].split(",")) + 5
for b in pb:
d = p0f_correl(sig,b)
if d == max:
r.append((b[6], b[7], b[1] - pkt[IP].ttl))
return r | python | def p0f(pkt):
"""Passive OS fingerprinting: which OS emitted this TCP packet ?
p0f(packet) -> accuracy, [list of guesses]
"""
db, sig = packet2p0f(pkt)
if db:
pb = db.get_base()
else:
pb = []
if not pb:
warning("p0f base empty.")
return []
#s = len(pb[0][0])
r = []
max = len(sig[4].split(",")) + 5
for b in pb:
d = p0f_correl(sig,b)
if d == max:
r.append((b[6], b[7], b[1] - pkt[IP].ttl))
return r | ['def', 'p0f', '(', 'pkt', ')', ':', 'db', ',', 'sig', '=', 'packet2p0f', '(', 'pkt', ')', 'if', 'db', ':', 'pb', '=', 'db', '.', 'get_base', '(', ')', 'else', ':', 'pb', '=', '[', ']', 'if', 'not', 'pb', ':', 'warning', '(', '"p0f base empty."', ')', 'return', '[', ']', '#s = len(pb[0][0])', 'r', '=', '[', ']', 'max', '=', 'len', '(', 'sig', '[', '4', ']', '.', 'split', '(', '","', ')', ')', '+', '5', 'for', 'b', 'in', 'pb', ':', 'd', '=', 'p0f_correl', '(', 'sig', ',', 'b', ')', 'if', 'd', '==', 'max', ':', 'r', '.', 'append', '(', '(', 'b', '[', '6', ']', ',', 'b', '[', '7', ']', ',', 'b', '[', '1', ']', '-', 'pkt', '[', 'IP', ']', '.', 'ttl', ')', ')', 'return', 'r'] | Passive OS fingerprinting: which OS emitted this TCP packet ?
p0f(packet) -> accuracy, [list of guesses] | ['Passive', 'OS', 'fingerprinting', ':', 'which', 'OS', 'emitted', 'this', 'TCP', 'packet', '?', 'p0f', '(', 'packet', ')', '-', '>', 'accuracy', '[', 'list', 'of', 'guesses', ']'] | train | https://github.com/phaethon/kamene/blob/11d4064844f4f68ac5d7546f5633ac7d02082914/kamene/modules/p0f.py#L264-L283 |
963 | manns/pyspread | pyspread/src/lib/vlc.py | debug_callback | def debug_callback(event, *args, **kwds):
'''Example callback, useful for debugging.
'''
l = ['event %s' % (event.type,)]
if args:
l.extend(map(str, args))
if kwds:
l.extend(sorted('%s=%s' % t for t in kwds.items()))
print('Debug callback (%s)' % ', '.join(l)) | python | def debug_callback(event, *args, **kwds):
'''Example callback, useful for debugging.
'''
l = ['event %s' % (event.type,)]
if args:
l.extend(map(str, args))
if kwds:
l.extend(sorted('%s=%s' % t for t in kwds.items()))
print('Debug callback (%s)' % ', '.join(l)) | ['def', 'debug_callback', '(', 'event', ',', '*', 'args', ',', '*', '*', 'kwds', ')', ':', 'l', '=', '[', "'event %s'", '%', '(', 'event', '.', 'type', ',', ')', ']', 'if', 'args', ':', 'l', '.', 'extend', '(', 'map', '(', 'str', ',', 'args', ')', ')', 'if', 'kwds', ':', 'l', '.', 'extend', '(', 'sorted', '(', "'%s=%s'", '%', 't', 'for', 't', 'in', 'kwds', '.', 'items', '(', ')', ')', ')', 'print', '(', "'Debug callback (%s)'", '%', "', '", '.', 'join', '(', 'l', ')', ')'] | Example callback, useful for debugging. | ['Example', 'callback', 'useful', 'for', 'debugging', '.'] | train | https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/lib/vlc.py#L6882-L6890 |
964 | squeaky-pl/japronto | misc/cpu.py | save | def save():
"""
save function
"""
results = {}
cpu_number = 0
while True:
try:
_file = open(
CPU_PREFIX + 'cpu{}/cpufreq/scaling_governor'.format(cpu_number))
except:
break
governor = _file.read().strip()
results.setdefault(cpu_number, {})['governor'] = governor
_file.close()
try:
_file = open(
CPU_PREFIX + 'cpu{}/cpufreq/scaling_cur_freq'.format(cpu_number))
except:
break
results[cpu_number]['freq'] = _file.read().strip()
_file.close()
cpu_number += 1
return results | python | def save():
"""
save function
"""
results = {}
cpu_number = 0
while True:
try:
_file = open(
CPU_PREFIX + 'cpu{}/cpufreq/scaling_governor'.format(cpu_number))
except:
break
governor = _file.read().strip()
results.setdefault(cpu_number, {})['governor'] = governor
_file.close()
try:
_file = open(
CPU_PREFIX + 'cpu{}/cpufreq/scaling_cur_freq'.format(cpu_number))
except:
break
results[cpu_number]['freq'] = _file.read().strip()
_file.close()
cpu_number += 1
return results | ['def', 'save', '(', ')', ':', 'results', '=', '{', '}', 'cpu_number', '=', '0', 'while', 'True', ':', 'try', ':', '_file', '=', 'open', '(', 'CPU_PREFIX', '+', "'cpu{}/cpufreq/scaling_governor'", '.', 'format', '(', 'cpu_number', ')', ')', 'except', ':', 'break', 'governor', '=', '_file', '.', 'read', '(', ')', '.', 'strip', '(', ')', 'results', '.', 'setdefault', '(', 'cpu_number', ',', '{', '}', ')', '[', "'governor'", ']', '=', 'governor', '_file', '.', 'close', '(', ')', 'try', ':', '_file', '=', 'open', '(', 'CPU_PREFIX', '+', "'cpu{}/cpufreq/scaling_cur_freq'", '.', 'format', '(', 'cpu_number', ')', ')', 'except', ':', 'break', 'results', '[', 'cpu_number', ']', '[', "'freq'", ']', '=', '_file', '.', 'read', '(', ')', '.', 'strip', '(', ')', '_file', '.', 'close', '(', ')', 'cpu_number', '+=', '1', 'return', 'results'] | save function | ['save', 'function'] | train | https://github.com/squeaky-pl/japronto/blob/a526277a2f59100388c9f39d4ca22bfb4909955b/misc/cpu.py#L14-L45 |
965 | theonion/django-bulbs | bulbs/special_coverage/models.py | SpecialCoverage._save_percolator | def _save_percolator(self):
"""
Saves the query field as an elasticsearch percolator
"""
index = Content.search_objects.mapping.index
query_filter = self.get_content(published=False).to_dict()
q = {}
if "query" in query_filter:
q = {"query": query_filter.get("query", {})}
else:
# We don't know how to save this
return
# We'll need this data, to decide which special coverage section to use
q["sponsored"] = bool(self.tunic_campaign_id)
# Elasticsearch v1.4 percolator "field_value_factor" does not
# support missing fields, so always need to include
q["start_date"] = self.start_date
# NOTE: set end_date to datetime.max if special coverage has no end date
# (i.e. is a neverending special coverage)
q["end_date"] = self.end_date if self.end_date else datetime.max.replace(tzinfo=pytz.UTC)
# Elasticsearch v1.4 percolator range query does not support DateTime range queries
# (PercolateContext.nowInMillisImpl is not implemented).
if q["start_date"]:
q['start_date_epoch'] = datetime_to_epoch_seconds(q["start_date"])
if q["end_date"]:
q['end_date_epoch'] = datetime_to_epoch_seconds(q["end_date"])
# Store manually included IDs for percolator retrieval scoring (boost
# manually included content).
if self.query:
q['included_ids'] = self.query.get('included_ids', [])
es.index(
index=index,
doc_type=".percolator",
body=q,
id=self.es_id
) | python | def _save_percolator(self):
"""
Saves the query field as an elasticsearch percolator
"""
index = Content.search_objects.mapping.index
query_filter = self.get_content(published=False).to_dict()
q = {}
if "query" in query_filter:
q = {"query": query_filter.get("query", {})}
else:
# We don't know how to save this
return
# We'll need this data, to decide which special coverage section to use
q["sponsored"] = bool(self.tunic_campaign_id)
# Elasticsearch v1.4 percolator "field_value_factor" does not
# support missing fields, so always need to include
q["start_date"] = self.start_date
# NOTE: set end_date to datetime.max if special coverage has no end date
# (i.e. is a neverending special coverage)
q["end_date"] = self.end_date if self.end_date else datetime.max.replace(tzinfo=pytz.UTC)
# Elasticsearch v1.4 percolator range query does not support DateTime range queries
# (PercolateContext.nowInMillisImpl is not implemented).
if q["start_date"]:
q['start_date_epoch'] = datetime_to_epoch_seconds(q["start_date"])
if q["end_date"]:
q['end_date_epoch'] = datetime_to_epoch_seconds(q["end_date"])
# Store manually included IDs for percolator retrieval scoring (boost
# manually included content).
if self.query:
q['included_ids'] = self.query.get('included_ids', [])
es.index(
index=index,
doc_type=".percolator",
body=q,
id=self.es_id
) | ['def', '_save_percolator', '(', 'self', ')', ':', 'index', '=', 'Content', '.', 'search_objects', '.', 'mapping', '.', 'index', 'query_filter', '=', 'self', '.', 'get_content', '(', 'published', '=', 'False', ')', '.', 'to_dict', '(', ')', 'q', '=', '{', '}', 'if', '"query"', 'in', 'query_filter', ':', 'q', '=', '{', '"query"', ':', 'query_filter', '.', 'get', '(', '"query"', ',', '{', '}', ')', '}', 'else', ':', "# We don't know how to save this", 'return', "# We'll need this data, to decide which special coverage section to use", 'q', '[', '"sponsored"', ']', '=', 'bool', '(', 'self', '.', 'tunic_campaign_id', ')', '# Elasticsearch v1.4 percolator "field_value_factor" does not', '# support missing fields, so always need to include', 'q', '[', '"start_date"', ']', '=', 'self', '.', 'start_date', '# NOTE: set end_date to datetime.max if special coverage has no end date', '# (i.e. is a neverending special coverage)', 'q', '[', '"end_date"', ']', '=', 'self', '.', 'end_date', 'if', 'self', '.', 'end_date', 'else', 'datetime', '.', 'max', '.', 'replace', '(', 'tzinfo', '=', 'pytz', '.', 'UTC', ')', '# Elasticsearch v1.4 percolator range query does not support DateTime range queries', '# (PercolateContext.nowInMillisImpl is not implemented).', 'if', 'q', '[', '"start_date"', ']', ':', 'q', '[', "'start_date_epoch'", ']', '=', 'datetime_to_epoch_seconds', '(', 'q', '[', '"start_date"', ']', ')', 'if', 'q', '[', '"end_date"', ']', ':', 'q', '[', "'end_date_epoch'", ']', '=', 'datetime_to_epoch_seconds', '(', 'q', '[', '"end_date"', ']', ')', '# Store manually included IDs for percolator retrieval scoring (boost', '# manually included content).', 'if', 'self', '.', 'query', ':', 'q', '[', "'included_ids'", ']', '=', 'self', '.', 'query', '.', 'get', '(', "'included_ids'", ',', '[', ']', ')', 'es', '.', 'index', '(', 'index', '=', 'index', ',', 'doc_type', '=', '".percolator"', ',', 'body', '=', 'q', ',', 'id', '=', 'self', '.', 'es_id', ')'] | Saves the query field as an elasticsearch percolator | ['Saves', 'the', 'query', 'field', 'as', 'an', 'elasticsearch', 'percolator'] | train | https://github.com/theonion/django-bulbs/blob/0c0e6e3127a7dc487b96677fab95cacd2b3806da/bulbs/special_coverage/models.py#L102-L144 |
966 | hubo1016/vlcp | vlcp/event/lock.py | Semaphore.create | def create(self):
"""
Create the subqueue to change the default behavior of Lock to semaphore.
"""
self.queue = self.scheduler.queue.addSubQueue(self.priority, LockEvent.createMatcher(self.context, self.key),
maxdefault = self.size, defaultQueueClass = CBQueue.AutoClassQueue.initHelper('locker', subqueuelimit = 1)) | python | def create(self):
"""
Create the subqueue to change the default behavior of Lock to semaphore.
"""
self.queue = self.scheduler.queue.addSubQueue(self.priority, LockEvent.createMatcher(self.context, self.key),
maxdefault = self.size, defaultQueueClass = CBQueue.AutoClassQueue.initHelper('locker', subqueuelimit = 1)) | ['def', 'create', '(', 'self', ')', ':', 'self', '.', 'queue', '=', 'self', '.', 'scheduler', '.', 'queue', '.', 'addSubQueue', '(', 'self', '.', 'priority', ',', 'LockEvent', '.', 'createMatcher', '(', 'self', '.', 'context', ',', 'self', '.', 'key', ')', ',', 'maxdefault', '=', 'self', '.', 'size', ',', 'defaultQueueClass', '=', 'CBQueue', '.', 'AutoClassQueue', '.', 'initHelper', '(', "'locker'", ',', 'subqueuelimit', '=', '1', ')', ')'] | Create the subqueue to change the default behavior of Lock to semaphore. | ['Create', 'the', 'subqueue', 'to', 'change', 'the', 'default', 'behavior', 'of', 'Lock', 'to', 'semaphore', '.'] | train | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/event/lock.py#L141-L146 |
967 | btel/svg_utils | src/svgutils/compose.py | Figure.tostr | def tostr(self):
"""Export SVG as a string"""
element = _transform.SVGFigure(self.width, self.height)
element.append(self)
svgstr = element.to_str()
return svgstr | python | def tostr(self):
"""Export SVG as a string"""
element = _transform.SVGFigure(self.width, self.height)
element.append(self)
svgstr = element.to_str()
return svgstr | ['def', 'tostr', '(', 'self', ')', ':', 'element', '=', '_transform', '.', 'SVGFigure', '(', 'self', '.', 'width', ',', 'self', '.', 'height', ')', 'element', '.', 'append', '(', 'self', ')', 'svgstr', '=', 'element', '.', 'to_str', '(', ')', 'return', 'svgstr'] | Export SVG as a string | ['Export', 'SVG', 'as', 'a', 'string'] | train | https://github.com/btel/svg_utils/blob/ee00726ebed1bd97fd496b15b6a8e7f233ebb5e3/src/svgutils/compose.py#L304-L309 |
968 | TkTech/Jawa | jawa/util/bytecode.py | load_bytecode_definitions | def load_bytecode_definitions(*, path=None) -> dict:
"""Load bytecode definitions from JSON file.
If no path is provided the default bytecode.json will be loaded.
:param path: Either None or a path to a JSON file to load containing
bytecode definitions.
"""
if path is not None:
with open(path, 'rb') as file_in:
j = json.load(file_in)
else:
try:
j = json.loads(pkgutil.get_data('jawa.util', 'bytecode.json'))
except json.JSONDecodeError:
# Unfortunately our best way to handle missing/malformed/empty
# bytecode.json files since it may not actually be backed by a
# "real" file.
return {}
for definition in j.values():
# If the entry has any operands take the text labels and convert
# them into pre-cached struct objects and operand types.
operands = definition['operands']
if operands:
definition['operands'] = [
[getattr(OperandFmts, oo[0]), OperandTypes[oo[1]]]
for oo in operands
]
# Return one dict that contains both mnemonic keys and opcode keys.
return {**j, **{v['op']: v for v in j.values()}} | python | def load_bytecode_definitions(*, path=None) -> dict:
"""Load bytecode definitions from JSON file.
If no path is provided the default bytecode.json will be loaded.
:param path: Either None or a path to a JSON file to load containing
bytecode definitions.
"""
if path is not None:
with open(path, 'rb') as file_in:
j = json.load(file_in)
else:
try:
j = json.loads(pkgutil.get_data('jawa.util', 'bytecode.json'))
except json.JSONDecodeError:
# Unfortunately our best way to handle missing/malformed/empty
# bytecode.json files since it may not actually be backed by a
# "real" file.
return {}
for definition in j.values():
# If the entry has any operands take the text labels and convert
# them into pre-cached struct objects and operand types.
operands = definition['operands']
if operands:
definition['operands'] = [
[getattr(OperandFmts, oo[0]), OperandTypes[oo[1]]]
for oo in operands
]
# Return one dict that contains both mnemonic keys and opcode keys.
return {**j, **{v['op']: v for v in j.values()}} | ['def', 'load_bytecode_definitions', '(', '*', ',', 'path', '=', 'None', ')', '->', 'dict', ':', 'if', 'path', 'is', 'not', 'None', ':', 'with', 'open', '(', 'path', ',', "'rb'", ')', 'as', 'file_in', ':', 'j', '=', 'json', '.', 'load', '(', 'file_in', ')', 'else', ':', 'try', ':', 'j', '=', 'json', '.', 'loads', '(', 'pkgutil', '.', 'get_data', '(', "'jawa.util'", ',', "'bytecode.json'", ')', ')', 'except', 'json', '.', 'JSONDecodeError', ':', '# Unfortunately our best way to handle missing/malformed/empty', '# bytecode.json files since it may not actually be backed by a', '# "real" file.', 'return', '{', '}', 'for', 'definition', 'in', 'j', '.', 'values', '(', ')', ':', '# If the entry has any operands take the text labels and convert', '# them into pre-cached struct objects and operand types.', 'operands', '=', 'definition', '[', "'operands'", ']', 'if', 'operands', ':', 'definition', '[', "'operands'", ']', '=', '[', '[', 'getattr', '(', 'OperandFmts', ',', 'oo', '[', '0', ']', ')', ',', 'OperandTypes', '[', 'oo', '[', '1', ']', ']', ']', 'for', 'oo', 'in', 'operands', ']', '# Return one dict that contains both mnemonic keys and opcode keys.', 'return', '{', '*', '*', 'j', ',', '*', '*', '{', 'v', '[', "'op'", ']', ':', 'v', 'for', 'v', 'in', 'j', '.', 'values', '(', ')', '}', '}'] | Load bytecode definitions from JSON file.
If no path is provided the default bytecode.json will be loaded.
:param path: Either None or a path to a JSON file to load containing
bytecode definitions. | ['Load', 'bytecode', 'definitions', 'from', 'JSON', 'file', '.'] | train | https://github.com/TkTech/Jawa/blob/94c8424e699029ac33fbc0e866fff0ecb2742289/jawa/util/bytecode.py#L262-L293 |
969 | saltstack/salt | salt/modules/status.py | version | def version():
'''
Return the system version for this minion
.. versionchanged:: 2016.11.4
Added support for AIX
.. versionchanged:: 2018.3.0
Added support for OpenBSD
CLI Example:
.. code-block:: bash
salt '*' status.version
'''
def linux_version():
'''
linux specific implementation of version
'''
try:
with salt.utils.files.fopen('/proc/version', 'r') as fp_:
return salt.utils.stringutils.to_unicode(fp_.read()).strip()
except IOError:
return {}
def bsd_version():
'''
bsd specific implementation of version
'''
return __salt__['cmd.run']('sysctl -n kern.version')
# dict that returns a function that does the right thing per platform
get_version = {
'Linux': linux_version,
'FreeBSD': bsd_version,
'OpenBSD': bsd_version,
'AIX': lambda: __salt__['cmd.run']('oslevel -s'),
}
errmsg = 'This method is unsupported on the current operating system!'
return get_version.get(__grains__['kernel'], lambda: errmsg)() | python | def version():
'''
Return the system version for this minion
.. versionchanged:: 2016.11.4
Added support for AIX
.. versionchanged:: 2018.3.0
Added support for OpenBSD
CLI Example:
.. code-block:: bash
salt '*' status.version
'''
def linux_version():
'''
linux specific implementation of version
'''
try:
with salt.utils.files.fopen('/proc/version', 'r') as fp_:
return salt.utils.stringutils.to_unicode(fp_.read()).strip()
except IOError:
return {}
def bsd_version():
'''
bsd specific implementation of version
'''
return __salt__['cmd.run']('sysctl -n kern.version')
# dict that returns a function that does the right thing per platform
get_version = {
'Linux': linux_version,
'FreeBSD': bsd_version,
'OpenBSD': bsd_version,
'AIX': lambda: __salt__['cmd.run']('oslevel -s'),
}
errmsg = 'This method is unsupported on the current operating system!'
return get_version.get(__grains__['kernel'], lambda: errmsg)() | ['def', 'version', '(', ')', ':', 'def', 'linux_version', '(', ')', ':', "'''\n linux specific implementation of version\n '''", 'try', ':', 'with', 'salt', '.', 'utils', '.', 'files', '.', 'fopen', '(', "'/proc/version'", ',', "'r'", ')', 'as', 'fp_', ':', 'return', 'salt', '.', 'utils', '.', 'stringutils', '.', 'to_unicode', '(', 'fp_', '.', 'read', '(', ')', ')', '.', 'strip', '(', ')', 'except', 'IOError', ':', 'return', '{', '}', 'def', 'bsd_version', '(', ')', ':', "'''\n bsd specific implementation of version\n '''", 'return', '__salt__', '[', "'cmd.run'", ']', '(', "'sysctl -n kern.version'", ')', '# dict that returns a function that does the right thing per platform', 'get_version', '=', '{', "'Linux'", ':', 'linux_version', ',', "'FreeBSD'", ':', 'bsd_version', ',', "'OpenBSD'", ':', 'bsd_version', ',', "'AIX'", ':', 'lambda', ':', '__salt__', '[', "'cmd.run'", ']', '(', "'oslevel -s'", ')', ',', '}', 'errmsg', '=', "'This method is unsupported on the current operating system!'", 'return', 'get_version', '.', 'get', '(', '__grains__', '[', "'kernel'", ']', ',', 'lambda', ':', 'errmsg', ')', '(', ')'] | Return the system version for this minion
.. versionchanged:: 2016.11.4
Added support for AIX
.. versionchanged:: 2018.3.0
Added support for OpenBSD
CLI Example:
.. code-block:: bash
salt '*' status.version | ['Return', 'the', 'system', 'version', 'for', 'this', 'minion'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/status.py#L1551-L1592 |
970 | soasme/dogeon | dson/__init__.py | loads | def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, object_pairs_hook=None, **kw):
r"""Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a DSON
document) to a Python object.
If ``s`` is a ``str`` instance and is encoded with an ASCII based encoding
other than utf-8 (e.g. latin-1) then an appropriate ``encoding`` name
must be specified. Encodings that are not ASCII based (such as UCS-2)
are not allowed and should be decoded to ``unicode`` first.
``object_hook`` is an optional function that will be called with the
result of any object literal decode (a ``dict``). The return value of
``object_hook`` will be used instead of the ``dict``. This feature
can be used to implement custom decoders (e.g. DSON-RPC class hinting).
``object_pairs_hook`` is an optional function that will be called with the
result of any object literal decoded with an ordered list of pairs. The
return value of ``object_pairs_hook`` will be used instead of the ``dict``.
This feature can be used to implement custom decoders that rely on the
order that the key and value pairs are decoded (for example,
collections.OrderedDict will remember the order of insertion). If
``object_hook`` is also defined, the ``object_pairs_hook`` takes priority.
``parse_float``, if specified, will be called with the string
of every DSON float to be decoded. By default this is equivalent to
float(num_str). This can be used to use another datatype or parser
for DSON floats (e.g. decimal.Decimal).
``parse_int``, if specified, will be called with the string
of every DSON int to be decoded. By default this is equivalent to
int(num_str). This can be used to use another datatype or parser
for DSON integers (e.g. float).
``parse_constant``, if specified, will be called with one of the
following strings: -Infinity, Infinity, NaN, null, true, false.
This can be used to raise an exception if invalid DSON numbers
are encountered.
To use a custom ``DSONDecoder`` subclass, specify it with the ``cls``
kwarg; otherwise ``DSONDecoder`` is used.
"""
if (cls is None and encoding is None and object_hook is None and
parse_int is None and parse_float is None and
parse_constant is None and object_pairs_hook is None and not kw):
return _default_decoder.decode(s)
if cls is None:
cls = DSONDecoder
if object_hook is not None:
kw['object_hook'] = object_hook
if object_pairs_hook is not None:
kw['object_pairs_hook'] = object_pairs_hook
if parse_float is not None:
kw['parse_float'] = parse_float
if parse_int is not None:
kw['parse_int'] = parse_int
if parse_constant is not None:
kw['parse_constant'] = parse_constant
return cls(encoding=encoding, **kw).decode(s) | python | def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, object_pairs_hook=None, **kw):
r"""Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a DSON
document) to a Python object.
If ``s`` is a ``str`` instance and is encoded with an ASCII based encoding
other than utf-8 (e.g. latin-1) then an appropriate ``encoding`` name
must be specified. Encodings that are not ASCII based (such as UCS-2)
are not allowed and should be decoded to ``unicode`` first.
``object_hook`` is an optional function that will be called with the
result of any object literal decode (a ``dict``). The return value of
``object_hook`` will be used instead of the ``dict``. This feature
can be used to implement custom decoders (e.g. DSON-RPC class hinting).
``object_pairs_hook`` is an optional function that will be called with the
result of any object literal decoded with an ordered list of pairs. The
return value of ``object_pairs_hook`` will be used instead of the ``dict``.
This feature can be used to implement custom decoders that rely on the
order that the key and value pairs are decoded (for example,
collections.OrderedDict will remember the order of insertion). If
``object_hook`` is also defined, the ``object_pairs_hook`` takes priority.
``parse_float``, if specified, will be called with the string
of every DSON float to be decoded. By default this is equivalent to
float(num_str). This can be used to use another datatype or parser
for DSON floats (e.g. decimal.Decimal).
``parse_int``, if specified, will be called with the string
of every DSON int to be decoded. By default this is equivalent to
int(num_str). This can be used to use another datatype or parser
for DSON integers (e.g. float).
``parse_constant``, if specified, will be called with one of the
following strings: -Infinity, Infinity, NaN, null, true, false.
This can be used to raise an exception if invalid DSON numbers
are encountered.
To use a custom ``DSONDecoder`` subclass, specify it with the ``cls``
kwarg; otherwise ``DSONDecoder`` is used.
"""
if (cls is None and encoding is None and object_hook is None and
parse_int is None and parse_float is None and
parse_constant is None and object_pairs_hook is None and not kw):
return _default_decoder.decode(s)
if cls is None:
cls = DSONDecoder
if object_hook is not None:
kw['object_hook'] = object_hook
if object_pairs_hook is not None:
kw['object_pairs_hook'] = object_pairs_hook
if parse_float is not None:
kw['parse_float'] = parse_float
if parse_int is not None:
kw['parse_int'] = parse_int
if parse_constant is not None:
kw['parse_constant'] = parse_constant
return cls(encoding=encoding, **kw).decode(s) | ['def', 'loads', '(', 's', ',', 'encoding', '=', 'None', ',', 'cls', '=', 'None', ',', 'object_hook', '=', 'None', ',', 'parse_float', '=', 'None', ',', 'parse_int', '=', 'None', ',', 'parse_constant', '=', 'None', ',', 'object_pairs_hook', '=', 'None', ',', '*', '*', 'kw', ')', ':', 'if', '(', 'cls', 'is', 'None', 'and', 'encoding', 'is', 'None', 'and', 'object_hook', 'is', 'None', 'and', 'parse_int', 'is', 'None', 'and', 'parse_float', 'is', 'None', 'and', 'parse_constant', 'is', 'None', 'and', 'object_pairs_hook', 'is', 'None', 'and', 'not', 'kw', ')', ':', 'return', '_default_decoder', '.', 'decode', '(', 's', ')', 'if', 'cls', 'is', 'None', ':', 'cls', '=', 'DSONDecoder', 'if', 'object_hook', 'is', 'not', 'None', ':', 'kw', '[', "'object_hook'", ']', '=', 'object_hook', 'if', 'object_pairs_hook', 'is', 'not', 'None', ':', 'kw', '[', "'object_pairs_hook'", ']', '=', 'object_pairs_hook', 'if', 'parse_float', 'is', 'not', 'None', ':', 'kw', '[', "'parse_float'", ']', '=', 'parse_float', 'if', 'parse_int', 'is', 'not', 'None', ':', 'kw', '[', "'parse_int'", ']', '=', 'parse_int', 'if', 'parse_constant', 'is', 'not', 'None', ':', 'kw', '[', "'parse_constant'", ']', '=', 'parse_constant', 'return', 'cls', '(', 'encoding', '=', 'encoding', ',', '*', '*', 'kw', ')', '.', 'decode', '(', 's', ')'] | r"""Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a DSON
document) to a Python object.
If ``s`` is a ``str`` instance and is encoded with an ASCII based encoding
other than utf-8 (e.g. latin-1) then an appropriate ``encoding`` name
must be specified. Encodings that are not ASCII based (such as UCS-2)
are not allowed and should be decoded to ``unicode`` first.
``object_hook`` is an optional function that will be called with the
result of any object literal decode (a ``dict``). The return value of
``object_hook`` will be used instead of the ``dict``. This feature
can be used to implement custom decoders (e.g. DSON-RPC class hinting).
``object_pairs_hook`` is an optional function that will be called with the
result of any object literal decoded with an ordered list of pairs. The
return value of ``object_pairs_hook`` will be used instead of the ``dict``.
This feature can be used to implement custom decoders that rely on the
order that the key and value pairs are decoded (for example,
collections.OrderedDict will remember the order of insertion). If
``object_hook`` is also defined, the ``object_pairs_hook`` takes priority.
``parse_float``, if specified, will be called with the string
of every DSON float to be decoded. By default this is equivalent to
float(num_str). This can be used to use another datatype or parser
for DSON floats (e.g. decimal.Decimal).
``parse_int``, if specified, will be called with the string
of every DSON int to be decoded. By default this is equivalent to
int(num_str). This can be used to use another datatype or parser
for DSON integers (e.g. float).
``parse_constant``, if specified, will be called with one of the
following strings: -Infinity, Infinity, NaN, null, true, false.
This can be used to raise an exception if invalid DSON numbers
are encountered.
To use a custom ``DSONDecoder`` subclass, specify it with the ``cls``
kwarg; otherwise ``DSONDecoder`` is used. | ['r', 'Deserialize', 's', '(', 'a', 'str', 'or', 'unicode', 'instance', 'containing', 'a', 'DSON', 'document', ')', 'to', 'a', 'Python', 'object', '.'] | train | https://github.com/soasme/dogeon/blob/496b9a5b099946d14434ed0cd7a94a270f607207/dson/__init__.py#L296-L354 |
971 | moderngl/moderngl | moderngl/context.py | Context.core_profile_check | def core_profile_check(self) -> None:
'''
Core profile check.
FOR DEBUG PURPOSES ONLY
'''
profile_mask = self.info['GL_CONTEXT_PROFILE_MASK']
if profile_mask != 1:
warnings.warn('The window should request a CORE OpenGL profile')
version_code = self.version_code
if not version_code:
major, minor = map(int, self.info['GL_VERSION'].split('.', 2)[:2])
version_code = major * 100 + minor * 10
if version_code < 330:
warnings.warn('The window should support OpenGL 3.3+ (version_code=%d)' % version_code) | python | def core_profile_check(self) -> None:
'''
Core profile check.
FOR DEBUG PURPOSES ONLY
'''
profile_mask = self.info['GL_CONTEXT_PROFILE_MASK']
if profile_mask != 1:
warnings.warn('The window should request a CORE OpenGL profile')
version_code = self.version_code
if not version_code:
major, minor = map(int, self.info['GL_VERSION'].split('.', 2)[:2])
version_code = major * 100 + minor * 10
if version_code < 330:
warnings.warn('The window should support OpenGL 3.3+ (version_code=%d)' % version_code) | ['def', 'core_profile_check', '(', 'self', ')', '->', 'None', ':', 'profile_mask', '=', 'self', '.', 'info', '[', "'GL_CONTEXT_PROFILE_MASK'", ']', 'if', 'profile_mask', '!=', '1', ':', 'warnings', '.', 'warn', '(', "'The window should request a CORE OpenGL profile'", ')', 'version_code', '=', 'self', '.', 'version_code', 'if', 'not', 'version_code', ':', 'major', ',', 'minor', '=', 'map', '(', 'int', ',', 'self', '.', 'info', '[', "'GL_VERSION'", ']', '.', 'split', '(', "'.'", ',', '2', ')', '[', ':', '2', ']', ')', 'version_code', '=', 'major', '*', '100', '+', 'minor', '*', '10', 'if', 'version_code', '<', '330', ':', 'warnings', '.', 'warn', '(', "'The window should support OpenGL 3.3+ (version_code=%d)'", '%', 'version_code', ')'] | Core profile check.
FOR DEBUG PURPOSES ONLY | ['Core', 'profile', 'check', '.'] | train | https://github.com/moderngl/moderngl/blob/a8f5dce8dc72ae84a2f9523887fb5f6b620049b9/moderngl/context.py#L1072-L1089 |
972 | vtkiorg/vtki | vtki/grid.py | UniformGrid._from_specs | def _from_specs(self, dims, spacing=(1.0,1.0,1.0), origin=(0.0, 0.0, 0.0)):
"""
Create VTK image data directly from numpy arrays. A uniform grid is
defined by the node spacings for each axis (uniform along each
individual axis) and the number of nodes on each axis. These are
relative to a specified origin (default is ``(0.0, 0.0, 0.0)``).
Parameters
----------
dims : tuple(int)
Length 3 tuple of ints specifying how many nodes along each axis
spacing : tuple(float)
Length 3 tuple of floats/ints specifying the node spacings for each axis
origin : tuple(float)
Length 3 tuple of floats/ints specifying minimum value for each axis
"""
xn, yn, zn = dims[0], dims[1], dims[2]
xs, ys, zs = spacing[0], spacing[1], spacing[2]
xo, yo, zo = origin[0], origin[1], origin[2]
self.SetDimensions(xn, yn, zn)
self.SetOrigin(xo, yo, zo)
self.SetSpacing(xs, ys, zs) | python | def _from_specs(self, dims, spacing=(1.0,1.0,1.0), origin=(0.0, 0.0, 0.0)):
"""
Create VTK image data directly from numpy arrays. A uniform grid is
defined by the node spacings for each axis (uniform along each
individual axis) and the number of nodes on each axis. These are
relative to a specified origin (default is ``(0.0, 0.0, 0.0)``).
Parameters
----------
dims : tuple(int)
Length 3 tuple of ints specifying how many nodes along each axis
spacing : tuple(float)
Length 3 tuple of floats/ints specifying the node spacings for each axis
origin : tuple(float)
Length 3 tuple of floats/ints specifying minimum value for each axis
"""
xn, yn, zn = dims[0], dims[1], dims[2]
xs, ys, zs = spacing[0], spacing[1], spacing[2]
xo, yo, zo = origin[0], origin[1], origin[2]
self.SetDimensions(xn, yn, zn)
self.SetOrigin(xo, yo, zo)
self.SetSpacing(xs, ys, zs) | ['def', '_from_specs', '(', 'self', ',', 'dims', ',', 'spacing', '=', '(', '1.0', ',', '1.0', ',', '1.0', ')', ',', 'origin', '=', '(', '0.0', ',', '0.0', ',', '0.0', ')', ')', ':', 'xn', ',', 'yn', ',', 'zn', '=', 'dims', '[', '0', ']', ',', 'dims', '[', '1', ']', ',', 'dims', '[', '2', ']', 'xs', ',', 'ys', ',', 'zs', '=', 'spacing', '[', '0', ']', ',', 'spacing', '[', '1', ']', ',', 'spacing', '[', '2', ']', 'xo', ',', 'yo', ',', 'zo', '=', 'origin', '[', '0', ']', ',', 'origin', '[', '1', ']', ',', 'origin', '[', '2', ']', 'self', '.', 'SetDimensions', '(', 'xn', ',', 'yn', ',', 'zn', ')', 'self', '.', 'SetOrigin', '(', 'xo', ',', 'yo', ',', 'zo', ')', 'self', '.', 'SetSpacing', '(', 'xs', ',', 'ys', ',', 'zs', ')'] | Create VTK image data directly from numpy arrays. A uniform grid is
defined by the node spacings for each axis (uniform along each
individual axis) and the number of nodes on each axis. These are
relative to a specified origin (default is ``(0.0, 0.0, 0.0)``).
Parameters
----------
dims : tuple(int)
Length 3 tuple of ints specifying how many nodes along each axis
spacing : tuple(float)
Length 3 tuple of floats/ints specifying the node spacings for each axis
origin : tuple(float)
Length 3 tuple of floats/ints specifying minimum value for each axis | ['Create', 'VTK', 'image', 'data', 'directly', 'from', 'numpy', 'arrays', '.', 'A', 'uniform', 'grid', 'is', 'defined', 'by', 'the', 'node', 'spacings', 'for', 'each', 'axis', '(', 'uniform', 'along', 'each', 'individual', 'axis', ')', 'and', 'the', 'number', 'of', 'nodes', 'on', 'each', 'axis', '.', 'These', 'are', 'relative', 'to', 'a', 'specified', 'origin', '(', 'default', 'is', '(', '0', '.', '0', '0', '.', '0', '0', '.', '0', ')', ')', '.'] | train | https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/grid.py#L369-L392 |
973 | datacats/datacats | datacats/environment.py | Environment.data_complete | def data_complete(self):
"""
Return True if all the expected datadir files are present
"""
return task.data_complete(self.datadir, self.sitedir,
self._get_container_name) | python | def data_complete(self):
"""
Return True if all the expected datadir files are present
"""
return task.data_complete(self.datadir, self.sitedir,
self._get_container_name) | ['def', 'data_complete', '(', 'self', ')', ':', 'return', 'task', '.', 'data_complete', '(', 'self', '.', 'datadir', ',', 'self', '.', 'sitedir', ',', 'self', '.', '_get_container_name', ')'] | Return True if all the expected datadir files are present | ['Return', 'True', 'if', 'all', 'the', 'expected', 'datadir', 'files', 'are', 'present'] | train | https://github.com/datacats/datacats/blob/e4bae503efa997660fb3f34fe166699569653157/datacats/environment.py#L182-L187 |
974 | foremast/foremast | src/foremast/consts.py | extract_formats | def extract_formats(config_handle):
"""Get application formats.
See :class:`gogoutils.Formats` for available options.
Args:
config_handle (configparser.ConfigParser): Instance of configurations.
Returns:
dict: Formats in ``{$format_type: $format_pattern}``.
"""
configurations = dict(config_handle)
formats = dict(configurations.get('formats', {}))
return formats | python | def extract_formats(config_handle):
"""Get application formats.
See :class:`gogoutils.Formats` for available options.
Args:
config_handle (configparser.ConfigParser): Instance of configurations.
Returns:
dict: Formats in ``{$format_type: $format_pattern}``.
"""
configurations = dict(config_handle)
formats = dict(configurations.get('formats', {}))
return formats | ['def', 'extract_formats', '(', 'config_handle', ')', ':', 'configurations', '=', 'dict', '(', 'config_handle', ')', 'formats', '=', 'dict', '(', 'configurations', '.', 'get', '(', "'formats'", ',', '{', '}', ')', ')', 'return', 'formats'] | Get application formats.
See :class:`gogoutils.Formats` for available options.
Args:
config_handle (configparser.ConfigParser): Instance of configurations.
Returns:
dict: Formats in ``{$format_type: $format_pattern}``. | ['Get', 'application', 'formats', '.'] | train | https://github.com/foremast/foremast/blob/fb70f29b8ce532f061685a17d120486e47b215ba/src/foremast/consts.py#L77-L91 |
975 | Telefonica/toolium | toolium/utils.py | Utils.get_center | def get_center(self, element):
"""Get center coordinates of an element
:param element: either a WebElement, PageElement or element locator as a tuple (locator_type, locator_value)
:returns: dict with center coordinates
"""
web_element = self.get_web_element(element)
location = web_element.location
size = web_element.size
return {'x': location['x'] + (size['width'] / 2), 'y': location['y'] + (size['height'] / 2)} | python | def get_center(self, element):
"""Get center coordinates of an element
:param element: either a WebElement, PageElement or element locator as a tuple (locator_type, locator_value)
:returns: dict with center coordinates
"""
web_element = self.get_web_element(element)
location = web_element.location
size = web_element.size
return {'x': location['x'] + (size['width'] / 2), 'y': location['y'] + (size['height'] / 2)} | ['def', 'get_center', '(', 'self', ',', 'element', ')', ':', 'web_element', '=', 'self', '.', 'get_web_element', '(', 'element', ')', 'location', '=', 'web_element', '.', 'location', 'size', '=', 'web_element', '.', 'size', 'return', '{', "'x'", ':', 'location', '[', "'x'", ']', '+', '(', 'size', '[', "'width'", ']', '/', '2', ')', ',', "'y'", ':', 'location', '[', "'y'", ']', '+', '(', 'size', '[', "'height'", ']', '/', '2', ')', '}'] | Get center coordinates of an element
:param element: either a WebElement, PageElement or element locator as a tuple (locator_type, locator_value)
:returns: dict with center coordinates | ['Get', 'center', 'coordinates', 'of', 'an', 'element'] | train | https://github.com/Telefonica/toolium/blob/56847c243b3a98876df74c184b75e43f8810e475/toolium/utils.py#L561-L570 |
976 | casacore/python-casacore | casacore/tables/msutil.py | addImagingColumns | def addImagingColumns(msname, ack=True):
""" Add the columns to an MS needed for the casa imager.
It adds the columns MODEL_DATA, CORRECTED_DATA, and IMAGING_WEIGHT.
It also sets the CHANNEL_SELECTION keyword needed for the older casa
imagers.
A column is not added if already existing.
"""
# numpy is needed
import numpy as np
# Open the MS
t = table(msname, readonly=False, ack=False)
cnames = t.colnames()
# Get the description of the DATA column.
try:
cdesc = t.getcoldesc('DATA')
except:
raise ValueError('Column DATA does not exist')
# Determine if the DATA storage specification is tiled.
hasTiled = False
try:
dminfo = t.getdminfo("DATA")
if dminfo['TYPE'][:5] == 'Tiled':
hasTiled = True
except:
hasTiled = False
# Use TiledShapeStMan if needed.
if not hasTiled:
dminfo = {'TYPE': 'TiledShapeStMan',
'SPEC': {'DEFAULTTILESHAPE': [4, 32, 128]}}
# Add the columns(if not existing). Use the description of the DATA column.
if 'MODEL_DATA' in cnames:
six.print_("Column MODEL_DATA not added; it already exists")
else:
dminfo['NAME'] = 'modeldata'
cdesc['comment'] = 'The model data column'
t.addcols(maketabdesc(makecoldesc('MODEL_DATA', cdesc)), dminfo)
if ack:
six.print_("added column MODEL_DATA")
if 'CORRECTED_DATA' in cnames:
six.print_("Column CORRECTED_DATA not added; it already exists")
else:
dminfo['NAME'] = 'correcteddata'
cdesc['comment'] = 'The corrected data column'
t.addcols(maketabdesc(makecoldesc('CORRECTED_DATA', cdesc)), dminfo)
if ack:
six.print_("'added column CORRECTED_DATA")
if 'IMAGING_WEIGHT' in cnames:
six.print_("Column IMAGING_WEIGHT not added; it already exists")
else:
# Add IMAGING_WEIGHT which is 1-dim and has type float.
# It needs a shape, otherwise the CASA imager complains.
shp = []
if 'shape' in cdesc:
shp = cdesc['shape']
if len(shp) > 0:
shp = [shp[0]] # use nchan from shape
else:
shp = [t.getcell('DATA', 0).shape[0]] # use nchan from actual data
cd = makearrcoldesc('IMAGING_WEIGHT', 0, ndim=1, shape=shp,
valuetype='float')
dminfo = {'TYPE': 'TiledShapeStMan',
'SPEC': {'DEFAULTTILESHAPE': [32, 128]}}
dminfo['NAME'] = 'imagingweight'
t.addcols(maketabdesc(cd), dminfo)
if ack:
six.print_("added column IMAGING_WEIGHT")
# Add or overwrite keyword CHANNEL_SELECTION.
if 'CHANNEL_SELECTION' in t.colkeywordnames('MODEL_DATA'):
t.removecolkeyword('MODEL_DATA', 'CHANNEL_SELECTION')
# Define the CHANNEL_SELECTION keyword containing the channels of
# all spectral windows.
tspw = table(t.getkeyword('SPECTRAL_WINDOW'), ack=False)
nchans = tspw.getcol('NUM_CHAN')
chans = [[0, nch] for nch in nchans]
t.putcolkeyword('MODEL_DATA', 'CHANNEL_SELECTION', np.int32(chans))
if ack:
six.print_("defined keyword CHANNEL_SELECTION in column MODEL_DATA")
# Flush the table to make sure it is written.
t.flush() | python | def addImagingColumns(msname, ack=True):
""" Add the columns to an MS needed for the casa imager.
It adds the columns MODEL_DATA, CORRECTED_DATA, and IMAGING_WEIGHT.
It also sets the CHANNEL_SELECTION keyword needed for the older casa
imagers.
A column is not added if already existing.
"""
# numpy is needed
import numpy as np
# Open the MS
t = table(msname, readonly=False, ack=False)
cnames = t.colnames()
# Get the description of the DATA column.
try:
cdesc = t.getcoldesc('DATA')
except:
raise ValueError('Column DATA does not exist')
# Determine if the DATA storage specification is tiled.
hasTiled = False
try:
dminfo = t.getdminfo("DATA")
if dminfo['TYPE'][:5] == 'Tiled':
hasTiled = True
except:
hasTiled = False
# Use TiledShapeStMan if needed.
if not hasTiled:
dminfo = {'TYPE': 'TiledShapeStMan',
'SPEC': {'DEFAULTTILESHAPE': [4, 32, 128]}}
# Add the columns(if not existing). Use the description of the DATA column.
if 'MODEL_DATA' in cnames:
six.print_("Column MODEL_DATA not added; it already exists")
else:
dminfo['NAME'] = 'modeldata'
cdesc['comment'] = 'The model data column'
t.addcols(maketabdesc(makecoldesc('MODEL_DATA', cdesc)), dminfo)
if ack:
six.print_("added column MODEL_DATA")
if 'CORRECTED_DATA' in cnames:
six.print_("Column CORRECTED_DATA not added; it already exists")
else:
dminfo['NAME'] = 'correcteddata'
cdesc['comment'] = 'The corrected data column'
t.addcols(maketabdesc(makecoldesc('CORRECTED_DATA', cdesc)), dminfo)
if ack:
six.print_("'added column CORRECTED_DATA")
if 'IMAGING_WEIGHT' in cnames:
six.print_("Column IMAGING_WEIGHT not added; it already exists")
else:
# Add IMAGING_WEIGHT which is 1-dim and has type float.
# It needs a shape, otherwise the CASA imager complains.
shp = []
if 'shape' in cdesc:
shp = cdesc['shape']
if len(shp) > 0:
shp = [shp[0]] # use nchan from shape
else:
shp = [t.getcell('DATA', 0).shape[0]] # use nchan from actual data
cd = makearrcoldesc('IMAGING_WEIGHT', 0, ndim=1, shape=shp,
valuetype='float')
dminfo = {'TYPE': 'TiledShapeStMan',
'SPEC': {'DEFAULTTILESHAPE': [32, 128]}}
dminfo['NAME'] = 'imagingweight'
t.addcols(maketabdesc(cd), dminfo)
if ack:
six.print_("added column IMAGING_WEIGHT")
# Add or overwrite keyword CHANNEL_SELECTION.
if 'CHANNEL_SELECTION' in t.colkeywordnames('MODEL_DATA'):
t.removecolkeyword('MODEL_DATA', 'CHANNEL_SELECTION')
# Define the CHANNEL_SELECTION keyword containing the channels of
# all spectral windows.
tspw = table(t.getkeyword('SPECTRAL_WINDOW'), ack=False)
nchans = tspw.getcol('NUM_CHAN')
chans = [[0, nch] for nch in nchans]
t.putcolkeyword('MODEL_DATA', 'CHANNEL_SELECTION', np.int32(chans))
if ack:
six.print_("defined keyword CHANNEL_SELECTION in column MODEL_DATA")
# Flush the table to make sure it is written.
t.flush() | ['def', 'addImagingColumns', '(', 'msname', ',', 'ack', '=', 'True', ')', ':', '# numpy is needed', 'import', 'numpy', 'as', 'np', '# Open the MS', 't', '=', 'table', '(', 'msname', ',', 'readonly', '=', 'False', ',', 'ack', '=', 'False', ')', 'cnames', '=', 't', '.', 'colnames', '(', ')', '# Get the description of the DATA column.', 'try', ':', 'cdesc', '=', 't', '.', 'getcoldesc', '(', "'DATA'", ')', 'except', ':', 'raise', 'ValueError', '(', "'Column DATA does not exist'", ')', '# Determine if the DATA storage specification is tiled.', 'hasTiled', '=', 'False', 'try', ':', 'dminfo', '=', 't', '.', 'getdminfo', '(', '"DATA"', ')', 'if', 'dminfo', '[', "'TYPE'", ']', '[', ':', '5', ']', '==', "'Tiled'", ':', 'hasTiled', '=', 'True', 'except', ':', 'hasTiled', '=', 'False', '# Use TiledShapeStMan if needed.', 'if', 'not', 'hasTiled', ':', 'dminfo', '=', '{', "'TYPE'", ':', "'TiledShapeStMan'", ',', "'SPEC'", ':', '{', "'DEFAULTTILESHAPE'", ':', '[', '4', ',', '32', ',', '128', ']', '}', '}', '# Add the columns(if not existing). Use the description of the DATA column.', 'if', "'MODEL_DATA'", 'in', 'cnames', ':', 'six', '.', 'print_', '(', '"Column MODEL_DATA not added; it already exists"', ')', 'else', ':', 'dminfo', '[', "'NAME'", ']', '=', "'modeldata'", 'cdesc', '[', "'comment'", ']', '=', "'The model data column'", 't', '.', 'addcols', '(', 'maketabdesc', '(', 'makecoldesc', '(', "'MODEL_DATA'", ',', 'cdesc', ')', ')', ',', 'dminfo', ')', 'if', 'ack', ':', 'six', '.', 'print_', '(', '"added column MODEL_DATA"', ')', 'if', "'CORRECTED_DATA'", 'in', 'cnames', ':', 'six', '.', 'print_', '(', '"Column CORRECTED_DATA not added; it already exists"', ')', 'else', ':', 'dminfo', '[', "'NAME'", ']', '=', "'correcteddata'", 'cdesc', '[', "'comment'", ']', '=', "'The corrected data column'", 't', '.', 'addcols', '(', 'maketabdesc', '(', 'makecoldesc', '(', "'CORRECTED_DATA'", ',', 'cdesc', ')', ')', ',', 'dminfo', ')', 'if', 'ack', ':', 'six', '.', 'print_', '(', '"\'added column CORRECTED_DATA"', ')', 'if', "'IMAGING_WEIGHT'", 'in', 'cnames', ':', 'six', '.', 'print_', '(', '"Column IMAGING_WEIGHT not added; it already exists"', ')', 'else', ':', '# Add IMAGING_WEIGHT which is 1-dim and has type float.', '# It needs a shape, otherwise the CASA imager complains.', 'shp', '=', '[', ']', 'if', "'shape'", 'in', 'cdesc', ':', 'shp', '=', 'cdesc', '[', "'shape'", ']', 'if', 'len', '(', 'shp', ')', '>', '0', ':', 'shp', '=', '[', 'shp', '[', '0', ']', ']', '# use nchan from shape', 'else', ':', 'shp', '=', '[', 't', '.', 'getcell', '(', "'DATA'", ',', '0', ')', '.', 'shape', '[', '0', ']', ']', '# use nchan from actual data', 'cd', '=', 'makearrcoldesc', '(', "'IMAGING_WEIGHT'", ',', '0', ',', 'ndim', '=', '1', ',', 'shape', '=', 'shp', ',', 'valuetype', '=', "'float'", ')', 'dminfo', '=', '{', "'TYPE'", ':', "'TiledShapeStMan'", ',', "'SPEC'", ':', '{', "'DEFAULTTILESHAPE'", ':', '[', '32', ',', '128', ']', '}', '}', 'dminfo', '[', "'NAME'", ']', '=', "'imagingweight'", 't', '.', 'addcols', '(', 'maketabdesc', '(', 'cd', ')', ',', 'dminfo', ')', 'if', 'ack', ':', 'six', '.', 'print_', '(', '"added column IMAGING_WEIGHT"', ')', '# Add or overwrite keyword CHANNEL_SELECTION.', 'if', "'CHANNEL_SELECTION'", 'in', 't', '.', 'colkeywordnames', '(', "'MODEL_DATA'", ')', ':', 't', '.', 'removecolkeyword', '(', "'MODEL_DATA'", ',', "'CHANNEL_SELECTION'", ')', '# Define the CHANNEL_SELECTION keyword containing the channels of', '# all spectral windows.', 'tspw', '=', 'table', '(', 't', '.', 'getkeyword', '(', "'SPECTRAL_WINDOW'", ')', ',', 'ack', '=', 'False', ')', 'nchans', '=', 'tspw', '.', 'getcol', '(', "'NUM_CHAN'", ')', 'chans', '=', '[', '[', '0', ',', 'nch', ']', 'for', 'nch', 'in', 'nchans', ']', 't', '.', 'putcolkeyword', '(', "'MODEL_DATA'", ',', "'CHANNEL_SELECTION'", ',', 'np', '.', 'int32', '(', 'chans', ')', ')', 'if', 'ack', ':', 'six', '.', 'print_', '(', '"defined keyword CHANNEL_SELECTION in column MODEL_DATA"', ')', '# Flush the table to make sure it is written.', 't', '.', 'flush', '(', ')'] | Add the columns to an MS needed for the casa imager.
It adds the columns MODEL_DATA, CORRECTED_DATA, and IMAGING_WEIGHT.
It also sets the CHANNEL_SELECTION keyword needed for the older casa
imagers.
A column is not added if already existing. | ['Add', 'the', 'columns', 'to', 'an', 'MS', 'needed', 'for', 'the', 'casa', 'imager', '.'] | train | https://github.com/casacore/python-casacore/blob/975510861ea005f7919dd9e438b5f98a1682eebe/casacore/tables/msutil.py#L48-L128 |
977 | rstoneback/pysatMagVect | pysatMagVect/satellite.py | add_mag_drifts | def add_mag_drifts(inst):
"""Adds ion drifts in magnetic coordinates using ion drifts in S/C coordinates
along with pre-calculated unit vectors for magnetic coordinates.
Note
----
Requires ion drifts under labels 'iv_*' where * = (x,y,z) along with
unit vectors labels 'unit_zonal_*', 'unit_fa_*', and 'unit_mer_*',
where the unit vectors are expressed in S/C coordinates. These
vectors are calculated by add_mag_drift_unit_vectors.
Parameters
----------
inst : pysat.Instrument
Instrument object will be modified to include new ion drift magnitudes
Returns
-------
None
Instrument object modified in place
"""
inst['iv_zon'] = {'data':inst['unit_zon_x'] * inst['iv_x'] + inst['unit_zon_y']*inst['iv_y'] + inst['unit_zon_z']*inst['iv_z'],
'units':'m/s',
'long_name':'Zonal ion velocity',
'notes':('Ion velocity relative to co-rotation along zonal '
'direction, normal to meridional plane. Positive east. '
'Velocity obtained using ion velocities relative '
'to co-rotation in the instrument frame along '
'with the corresponding unit vectors expressed in '
'the instrument frame. '),
'label': 'Zonal Ion Velocity',
'axis': 'Zonal Ion Velocity',
'desc': 'Zonal ion velocity',
'scale': 'Linear',
'value_min':-500.,
'value_max':500.}
inst['iv_fa'] = {'data':inst['unit_fa_x'] * inst['iv_x'] + inst['unit_fa_y'] * inst['iv_y'] + inst['unit_fa_z'] * inst['iv_z'],
'units':'m/s',
'long_name':'Field-Aligned ion velocity',
'notes':('Ion velocity relative to co-rotation along magnetic field line. Positive along the field. ',
'Velocity obtained using ion velocities relative '
'to co-rotation in the instrument frame along '
'with the corresponding unit vectors expressed in '
'the instrument frame. '),
'label':'Field-Aligned Ion Velocity',
'axis':'Field-Aligned Ion Velocity',
'desc':'Field-Aligned Ion Velocity',
'scale':'Linear',
'value_min':-500.,
'value_max':500.}
inst['iv_mer'] = {'data':inst['unit_mer_x'] * inst['iv_x'] + inst['unit_mer_y']*inst['iv_y'] + inst['unit_mer_z']*inst['iv_z'],
'units':'m/s',
'long_name':'Meridional ion velocity',
'notes':('Velocity along meridional direction, perpendicular '
'to field and within meridional plane. Positive is up at magnetic equator. ',
'Velocity obtained using ion velocities relative '
'to co-rotation in the instrument frame along '
'with the corresponding unit vectors expressed in '
'the instrument frame. '),
'label':'Meridional Ion Velocity',
'axis':'Meridional Ion Velocity',
'desc':'Meridional Ion Velocity',
'scale':'Linear',
'value_min':-500.,
'value_max':500.}
return | python | def add_mag_drifts(inst):
"""Adds ion drifts in magnetic coordinates using ion drifts in S/C coordinates
along with pre-calculated unit vectors for magnetic coordinates.
Note
----
Requires ion drifts under labels 'iv_*' where * = (x,y,z) along with
unit vectors labels 'unit_zonal_*', 'unit_fa_*', and 'unit_mer_*',
where the unit vectors are expressed in S/C coordinates. These
vectors are calculated by add_mag_drift_unit_vectors.
Parameters
----------
inst : pysat.Instrument
Instrument object will be modified to include new ion drift magnitudes
Returns
-------
None
Instrument object modified in place
"""
inst['iv_zon'] = {'data':inst['unit_zon_x'] * inst['iv_x'] + inst['unit_zon_y']*inst['iv_y'] + inst['unit_zon_z']*inst['iv_z'],
'units':'m/s',
'long_name':'Zonal ion velocity',
'notes':('Ion velocity relative to co-rotation along zonal '
'direction, normal to meridional plane. Positive east. '
'Velocity obtained using ion velocities relative '
'to co-rotation in the instrument frame along '
'with the corresponding unit vectors expressed in '
'the instrument frame. '),
'label': 'Zonal Ion Velocity',
'axis': 'Zonal Ion Velocity',
'desc': 'Zonal ion velocity',
'scale': 'Linear',
'value_min':-500.,
'value_max':500.}
inst['iv_fa'] = {'data':inst['unit_fa_x'] * inst['iv_x'] + inst['unit_fa_y'] * inst['iv_y'] + inst['unit_fa_z'] * inst['iv_z'],
'units':'m/s',
'long_name':'Field-Aligned ion velocity',
'notes':('Ion velocity relative to co-rotation along magnetic field line. Positive along the field. ',
'Velocity obtained using ion velocities relative '
'to co-rotation in the instrument frame along '
'with the corresponding unit vectors expressed in '
'the instrument frame. '),
'label':'Field-Aligned Ion Velocity',
'axis':'Field-Aligned Ion Velocity',
'desc':'Field-Aligned Ion Velocity',
'scale':'Linear',
'value_min':-500.,
'value_max':500.}
inst['iv_mer'] = {'data':inst['unit_mer_x'] * inst['iv_x'] + inst['unit_mer_y']*inst['iv_y'] + inst['unit_mer_z']*inst['iv_z'],
'units':'m/s',
'long_name':'Meridional ion velocity',
'notes':('Velocity along meridional direction, perpendicular '
'to field and within meridional plane. Positive is up at magnetic equator. ',
'Velocity obtained using ion velocities relative '
'to co-rotation in the instrument frame along '
'with the corresponding unit vectors expressed in '
'the instrument frame. '),
'label':'Meridional Ion Velocity',
'axis':'Meridional Ion Velocity',
'desc':'Meridional Ion Velocity',
'scale':'Linear',
'value_min':-500.,
'value_max':500.}
return | ['def', 'add_mag_drifts', '(', 'inst', ')', ':', 'inst', '[', "'iv_zon'", ']', '=', '{', "'data'", ':', 'inst', '[', "'unit_zon_x'", ']', '*', 'inst', '[', "'iv_x'", ']', '+', 'inst', '[', "'unit_zon_y'", ']', '*', 'inst', '[', "'iv_y'", ']', '+', 'inst', '[', "'unit_zon_z'", ']', '*', 'inst', '[', "'iv_z'", ']', ',', "'units'", ':', "'m/s'", ',', "'long_name'", ':', "'Zonal ion velocity'", ',', "'notes'", ':', '(', "'Ion velocity relative to co-rotation along zonal '", "'direction, normal to meridional plane. Positive east. '", "'Velocity obtained using ion velocities relative '", "'to co-rotation in the instrument frame along '", "'with the corresponding unit vectors expressed in '", "'the instrument frame. '", ')', ',', "'label'", ':', "'Zonal Ion Velocity'", ',', "'axis'", ':', "'Zonal Ion Velocity'", ',', "'desc'", ':', "'Zonal ion velocity'", ',', "'scale'", ':', "'Linear'", ',', "'value_min'", ':', '-', '500.', ',', "'value_max'", ':', '500.', '}', 'inst', '[', "'iv_fa'", ']', '=', '{', "'data'", ':', 'inst', '[', "'unit_fa_x'", ']', '*', 'inst', '[', "'iv_x'", ']', '+', 'inst', '[', "'unit_fa_y'", ']', '*', 'inst', '[', "'iv_y'", ']', '+', 'inst', '[', "'unit_fa_z'", ']', '*', 'inst', '[', "'iv_z'", ']', ',', "'units'", ':', "'m/s'", ',', "'long_name'", ':', "'Field-Aligned ion velocity'", ',', "'notes'", ':', '(', "'Ion velocity relative to co-rotation along magnetic field line. Positive along the field. '", ',', "'Velocity obtained using ion velocities relative '", "'to co-rotation in the instrument frame along '", "'with the corresponding unit vectors expressed in '", "'the instrument frame. '", ')', ',', "'label'", ':', "'Field-Aligned Ion Velocity'", ',', "'axis'", ':', "'Field-Aligned Ion Velocity'", ',', "'desc'", ':', "'Field-Aligned Ion Velocity'", ',', "'scale'", ':', "'Linear'", ',', "'value_min'", ':', '-', '500.', ',', "'value_max'", ':', '500.', '}', 'inst', '[', "'iv_mer'", ']', '=', '{', "'data'", ':', 'inst', '[', "'unit_mer_x'", ']', '*', 'inst', '[', "'iv_x'", ']', '+', 'inst', '[', "'unit_mer_y'", ']', '*', 'inst', '[', "'iv_y'", ']', '+', 'inst', '[', "'unit_mer_z'", ']', '*', 'inst', '[', "'iv_z'", ']', ',', "'units'", ':', "'m/s'", ',', "'long_name'", ':', "'Meridional ion velocity'", ',', "'notes'", ':', '(', "'Velocity along meridional direction, perpendicular '", "'to field and within meridional plane. Positive is up at magnetic equator. '", ',', "'Velocity obtained using ion velocities relative '", "'to co-rotation in the instrument frame along '", "'with the corresponding unit vectors expressed in '", "'the instrument frame. '", ')', ',', "'label'", ':', "'Meridional Ion Velocity'", ',', "'axis'", ':', "'Meridional Ion Velocity'", ',', "'desc'", ':', "'Meridional Ion Velocity'", ',', "'scale'", ':', "'Linear'", ',', "'value_min'", ':', '-', '500.', ',', "'value_max'", ':', '500.', '}', 'return'] | Adds ion drifts in magnetic coordinates using ion drifts in S/C coordinates
along with pre-calculated unit vectors for magnetic coordinates.
Note
----
Requires ion drifts under labels 'iv_*' where * = (x,y,z) along with
unit vectors labels 'unit_zonal_*', 'unit_fa_*', and 'unit_mer_*',
where the unit vectors are expressed in S/C coordinates. These
vectors are calculated by add_mag_drift_unit_vectors.
Parameters
----------
inst : pysat.Instrument
Instrument object will be modified to include new ion drift magnitudes
Returns
-------
None
Instrument object modified in place | ['Adds', 'ion', 'drifts', 'in', 'magnetic', 'coordinates', 'using', 'ion', 'drifts', 'in', 'S', '/', 'C', 'coordinates', 'along', 'with', 'pre', '-', 'calculated', 'unit', 'vectors', 'for', 'magnetic', 'coordinates', '.', 'Note', '----', 'Requires', 'ion', 'drifts', 'under', 'labels', 'iv_', '*', 'where', '*', '=', '(', 'x', 'y', 'z', ')', 'along', 'with', 'unit', 'vectors', 'labels', 'unit_zonal_', '*', 'unit_fa_', '*', 'and', 'unit_mer_', '*', 'where', 'the', 'unit', 'vectors', 'are', 'expressed', 'in', 'S', '/', 'C', 'coordinates', '.', 'These', 'vectors', 'are', 'calculated', 'by', 'add_mag_drift_unit_vectors', '.', 'Parameters', '----------', 'inst', ':', 'pysat', '.', 'Instrument', 'Instrument', 'object', 'will', 'be', 'modified', 'to', 'include', 'new', 'ion', 'drift', 'magnitudes', 'Returns', '-------', 'None', 'Instrument', 'object', 'modified', 'in', 'place'] | train | https://github.com/rstoneback/pysatMagVect/blob/3fdc87ffbe05be58123f80f880d1237c2f34c7be/pysatMagVect/satellite.py#L345-L415 |
978 | lxyu/pinyin | pinyin/cedict.py | _words_at_the_beginning | def _words_at_the_beginning(word, tree, prefix=""):
'''
We return all portions of the tree corresponding to the beginning
of `word`. This is used recursively, so we pass the prefix so we
can return meaningful words+translations.
'''
l = []
if "" in tree:
l.append([prefix, tree[""]])
if len(word) > 0 and word[0] in tree:
l.extend(_words_at_the_beginning(
word[1:],
tree[word[0]],
prefix=prefix+word[0]
))
return l | python | def _words_at_the_beginning(word, tree, prefix=""):
'''
We return all portions of the tree corresponding to the beginning
of `word`. This is used recursively, so we pass the prefix so we
can return meaningful words+translations.
'''
l = []
if "" in tree:
l.append([prefix, tree[""]])
if len(word) > 0 and word[0] in tree:
l.extend(_words_at_the_beginning(
word[1:],
tree[word[0]],
prefix=prefix+word[0]
))
return l | ['def', '_words_at_the_beginning', '(', 'word', ',', 'tree', ',', 'prefix', '=', '""', ')', ':', 'l', '=', '[', ']', 'if', '""', 'in', 'tree', ':', 'l', '.', 'append', '(', '[', 'prefix', ',', 'tree', '[', '""', ']', ']', ')', 'if', 'len', '(', 'word', ')', '>', '0', 'and', 'word', '[', '0', ']', 'in', 'tree', ':', 'l', '.', 'extend', '(', '_words_at_the_beginning', '(', 'word', '[', '1', ':', ']', ',', 'tree', '[', 'word', '[', '0', ']', ']', ',', 'prefix', '=', 'prefix', '+', 'word', '[', '0', ']', ')', ')', 'return', 'l'] | We return all portions of the tree corresponding to the beginning
of `word`. This is used recursively, so we pass the prefix so we
can return meaningful words+translations. | ['We', 'return', 'all', 'portions', 'of', 'the', 'tree', 'corresponding', 'to', 'the', 'beginning', 'of', 'word', '.', 'This', 'is', 'used', 'recursively', 'so', 'we', 'pass', 'the', 'prefix', 'so', 'we', 'can', 'return', 'meaningful', 'words', '+', 'translations', '.'] | train | https://github.com/lxyu/pinyin/blob/f9cac5902b0cfaf91d93af633dfc75a51d2bf0cd/pinyin/cedict.py#L104-L119 |
979 | fprimex/zdesk | zdesk/zdesk_api.py | ZendeskAPI.help_center_categories_list | def help_center_categories_list(self, locale=None, sort_by=None, sort_order=None, **kwargs):
"https://developer.zendesk.com/rest_api/docs/help_center/categories#list-categories"
api_path = "/api/v2/help_center/categories.json"
if locale:
api_opt_path = "/api/v2/help_center/{locale}/categories.json"
api_path = api_opt_path.format(locale=locale)
api_query = {}
if "query" in kwargs.keys():
api_query.update(kwargs["query"])
del kwargs["query"]
if sort_by:
api_query.update({
"sort_by": sort_by,
})
if sort_order:
api_query.update({
"sort_order": sort_order,
})
return self.call(api_path, query=api_query, **kwargs) | python | def help_center_categories_list(self, locale=None, sort_by=None, sort_order=None, **kwargs):
"https://developer.zendesk.com/rest_api/docs/help_center/categories#list-categories"
api_path = "/api/v2/help_center/categories.json"
if locale:
api_opt_path = "/api/v2/help_center/{locale}/categories.json"
api_path = api_opt_path.format(locale=locale)
api_query = {}
if "query" in kwargs.keys():
api_query.update(kwargs["query"])
del kwargs["query"]
if sort_by:
api_query.update({
"sort_by": sort_by,
})
if sort_order:
api_query.update({
"sort_order": sort_order,
})
return self.call(api_path, query=api_query, **kwargs) | ['def', 'help_center_categories_list', '(', 'self', ',', 'locale', '=', 'None', ',', 'sort_by', '=', 'None', ',', 'sort_order', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'api_path', '=', '"/api/v2/help_center/categories.json"', 'if', 'locale', ':', 'api_opt_path', '=', '"/api/v2/help_center/{locale}/categories.json"', 'api_path', '=', 'api_opt_path', '.', 'format', '(', 'locale', '=', 'locale', ')', 'api_query', '=', '{', '}', 'if', '"query"', 'in', 'kwargs', '.', 'keys', '(', ')', ':', 'api_query', '.', 'update', '(', 'kwargs', '[', '"query"', ']', ')', 'del', 'kwargs', '[', '"query"', ']', 'if', 'sort_by', ':', 'api_query', '.', 'update', '(', '{', '"sort_by"', ':', 'sort_by', ',', '}', ')', 'if', 'sort_order', ':', 'api_query', '.', 'update', '(', '{', '"sort_order"', ':', 'sort_order', ',', '}', ')', 'return', 'self', '.', 'call', '(', 'api_path', ',', 'query', '=', 'api_query', ',', '*', '*', 'kwargs', ')'] | https://developer.zendesk.com/rest_api/docs/help_center/categories#list-categories | ['https', ':', '//', 'developer', '.', 'zendesk', '.', 'com', '/', 'rest_api', '/', 'docs', '/', 'help_center', '/', 'categories#list', '-', 'categories'] | train | https://github.com/fprimex/zdesk/blob/851611c13b4d530e9df31390b3ec709baf0a0188/zdesk/zdesk_api.py#L1686-L1704 |
980 | PierreRust/apigpio | apigpio/apigpio.py | Pi.set_noise_filter | def set_noise_filter(self, user_gpio, steady, active):
"""
Sets a noise filter on a GPIO.
Level changes on the GPIO are ignored until a level which has
been stable for [*steady*] microseconds is detected. Level
changes on the GPIO are then reported for [*active*]
microseconds after which the process repeats.
user_gpio:= 0-31
steady:= 0-300000
active:= 0-1000000
Returns 0 if OK, otherwise PI_BAD_USER_GPIO, or PI_BAD_FILTER.
This filter affects the GPIO samples returned to callbacks set up
with [*callback*] and [*wait_for_edge*].
It does not affect levels read by [*read*],
[*read_bank_1*], or [*read_bank_2*].
Level changes before and after the active period may
be reported. Your software must be designed to cope with
such reports.
...
pi.set_noise_filter(23, 1000, 5000)
...
"""
# pigpio message format
# I p1 user_gpio
# I p2 steady
# I p3 4
## extension ##
# I active
extents = [struct.pack("I", active)]
res = yield from self._pigpio_aio_command_ext(_PI_CMD_FN, user_gpio,
steady, 4, extents)
return _u2i(res) | python | def set_noise_filter(self, user_gpio, steady, active):
"""
Sets a noise filter on a GPIO.
Level changes on the GPIO are ignored until a level which has
been stable for [*steady*] microseconds is detected. Level
changes on the GPIO are then reported for [*active*]
microseconds after which the process repeats.
user_gpio:= 0-31
steady:= 0-300000
active:= 0-1000000
Returns 0 if OK, otherwise PI_BAD_USER_GPIO, or PI_BAD_FILTER.
This filter affects the GPIO samples returned to callbacks set up
with [*callback*] and [*wait_for_edge*].
It does not affect levels read by [*read*],
[*read_bank_1*], or [*read_bank_2*].
Level changes before and after the active period may
be reported. Your software must be designed to cope with
such reports.
...
pi.set_noise_filter(23, 1000, 5000)
...
"""
# pigpio message format
# I p1 user_gpio
# I p2 steady
# I p3 4
## extension ##
# I active
extents = [struct.pack("I", active)]
res = yield from self._pigpio_aio_command_ext(_PI_CMD_FN, user_gpio,
steady, 4, extents)
return _u2i(res) | ['def', 'set_noise_filter', '(', 'self', ',', 'user_gpio', ',', 'steady', ',', 'active', ')', ':', '# pigpio message format', '# I p1 user_gpio', '# I p2 steady', '# I p3 4', '## extension ##', '# I active', 'extents', '=', '[', 'struct', '.', 'pack', '(', '"I"', ',', 'active', ')', ']', 'res', '=', 'yield', 'from', 'self', '.', '_pigpio_aio_command_ext', '(', '_PI_CMD_FN', ',', 'user_gpio', ',', 'steady', ',', '4', ',', 'extents', ')', 'return', '_u2i', '(', 'res', ')'] | Sets a noise filter on a GPIO.
Level changes on the GPIO are ignored until a level which has
been stable for [*steady*] microseconds is detected. Level
changes on the GPIO are then reported for [*active*]
microseconds after which the process repeats.
user_gpio:= 0-31
steady:= 0-300000
active:= 0-1000000
Returns 0 if OK, otherwise PI_BAD_USER_GPIO, or PI_BAD_FILTER.
This filter affects the GPIO samples returned to callbacks set up
with [*callback*] and [*wait_for_edge*].
It does not affect levels read by [*read*],
[*read_bank_1*], or [*read_bank_2*].
Level changes before and after the active period may
be reported. Your software must be designed to cope with
such reports.
...
pi.set_noise_filter(23, 1000, 5000)
... | ['Sets', 'a', 'noise', 'filter', 'on', 'a', 'GPIO', '.', 'Level', 'changes', 'on', 'the', 'GPIO', 'are', 'ignored', 'until', 'a', 'level', 'which', 'has', 'been', 'stable', 'for', '[', '*', 'steady', '*', ']', 'microseconds', 'is', 'detected', '.', 'Level', 'changes', 'on', 'the', 'GPIO', 'are', 'then', 'reported', 'for', '[', '*', 'active', '*', ']', 'microseconds', 'after', 'which', 'the', 'process', 'repeats', '.', 'user_gpio', ':', '=', '0', '-', '31', 'steady', ':', '=', '0', '-', '300000', 'active', ':', '=', '0', '-', '1000000', 'Returns', '0', 'if', 'OK', 'otherwise', 'PI_BAD_USER_GPIO', 'or', 'PI_BAD_FILTER', '.', 'This', 'filter', 'affects', 'the', 'GPIO', 'samples', 'returned', 'to', 'callbacks', 'set', 'up', 'with', '[', '*', 'callback', '*', ']', 'and', '[', '*', 'wait_for_edge', '*', ']', '.', 'It', 'does', 'not', 'affect', 'levels', 'read', 'by', '[', '*', 'read', '*', ']', '[', '*', 'read_bank_1', '*', ']', 'or', '[', '*', 'read_bank_2', '*', ']', '.', 'Level', 'changes', 'before', 'and', 'after', 'the', 'active', 'period', 'may', 'be', 'reported', '.', 'Your', 'software', 'must', 'be', 'designed', 'to', 'cope', 'with', 'such', 'reports', '.', '...', 'pi', '.', 'set_noise_filter', '(', '23', '1000', '5000', ')', '...'] | train | https://github.com/PierreRust/apigpio/blob/2b969f40e06219b43a43498d8baf87f5935ceab2/apigpio/apigpio.py#L937-L969 |
981 | angr/angr | angr/state_plugins/solver.py | SimSolver.simplify | def simplify(self, e=None):
"""
Simplifies `e`. If `e` is None, simplifies the constraints of this
state.
"""
if e is None:
return self._solver.simplify()
elif isinstance(e, (int, float, bool)):
return e
elif isinstance(e, claripy.ast.Base) and e.op in claripy.operations.leaf_operations_concrete:
return e
elif isinstance(e, SimActionObject) and e.op in claripy.operations.leaf_operations_concrete:
return e.ast
elif not isinstance(e, (SimActionObject, claripy.ast.Base)):
return e
else:
return self._claripy_simplify(e) | python | def simplify(self, e=None):
"""
Simplifies `e`. If `e` is None, simplifies the constraints of this
state.
"""
if e is None:
return self._solver.simplify()
elif isinstance(e, (int, float, bool)):
return e
elif isinstance(e, claripy.ast.Base) and e.op in claripy.operations.leaf_operations_concrete:
return e
elif isinstance(e, SimActionObject) and e.op in claripy.operations.leaf_operations_concrete:
return e.ast
elif not isinstance(e, (SimActionObject, claripy.ast.Base)):
return e
else:
return self._claripy_simplify(e) | ['def', 'simplify', '(', 'self', ',', 'e', '=', 'None', ')', ':', 'if', 'e', 'is', 'None', ':', 'return', 'self', '.', '_solver', '.', 'simplify', '(', ')', 'elif', 'isinstance', '(', 'e', ',', '(', 'int', ',', 'float', ',', 'bool', ')', ')', ':', 'return', 'e', 'elif', 'isinstance', '(', 'e', ',', 'claripy', '.', 'ast', '.', 'Base', ')', 'and', 'e', '.', 'op', 'in', 'claripy', '.', 'operations', '.', 'leaf_operations_concrete', ':', 'return', 'e', 'elif', 'isinstance', '(', 'e', ',', 'SimActionObject', ')', 'and', 'e', '.', 'op', 'in', 'claripy', '.', 'operations', '.', 'leaf_operations_concrete', ':', 'return', 'e', '.', 'ast', 'elif', 'not', 'isinstance', '(', 'e', ',', '(', 'SimActionObject', ',', 'claripy', '.', 'ast', '.', 'Base', ')', ')', ':', 'return', 'e', 'else', ':', 'return', 'self', '.', '_claripy_simplify', '(', 'e', ')'] | Simplifies `e`. If `e` is None, simplifies the constraints of this
state. | ['Simplifies', 'e', '.', 'If', 'e', 'is', 'None', 'simplifies', 'the', 'constraints', 'of', 'this', 'state', '.'] | train | https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/state_plugins/solver.py#L850-L866 |
982 | mrname/haralyzer | haralyzer/multihar.py | MultiHarParser.time_to_first_byte | def time_to_first_byte(self):
"""
The aggregate time to first byte for all pages.
"""
ttfb = []
for page in self.pages:
if page.time_to_first_byte is not None:
ttfb.append(page.time_to_first_byte)
return round(mean(ttfb), self.decimal_precision) | python | def time_to_first_byte(self):
"""
The aggregate time to first byte for all pages.
"""
ttfb = []
for page in self.pages:
if page.time_to_first_byte is not None:
ttfb.append(page.time_to_first_byte)
return round(mean(ttfb), self.decimal_precision) | ['def', 'time_to_first_byte', '(', 'self', ')', ':', 'ttfb', '=', '[', ']', 'for', 'page', 'in', 'self', '.', 'pages', ':', 'if', 'page', '.', 'time_to_first_byte', 'is', 'not', 'None', ':', 'ttfb', '.', 'append', '(', 'page', '.', 'time_to_first_byte', ')', 'return', 'round', '(', 'mean', '(', 'ttfb', ')', ',', 'self', '.', 'decimal_precision', ')'] | The aggregate time to first byte for all pages. | ['The', 'aggregate', 'time', 'to', 'first', 'byte', 'for', 'all', 'pages', '.'] | train | https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/multihar.py#L105-L113 |
983 | latchset/jwcrypto | jwcrypto/jwk.py | JWK.from_pem | def from_pem(cls, data, password=None):
"""Creates a key from PKCS#8 formatted data loaded from a PEM file.
See the function `import_from_pem` for details.
:param data(bytes): The data contained in a PEM file.
:param password(bytes): An optional password to unwrap the key.
"""
obj = cls()
obj.import_from_pem(data, password)
return obj | python | def from_pem(cls, data, password=None):
"""Creates a key from PKCS#8 formatted data loaded from a PEM file.
See the function `import_from_pem` for details.
:param data(bytes): The data contained in a PEM file.
:param password(bytes): An optional password to unwrap the key.
"""
obj = cls()
obj.import_from_pem(data, password)
return obj | ['def', 'from_pem', '(', 'cls', ',', 'data', ',', 'password', '=', 'None', ')', ':', 'obj', '=', 'cls', '(', ')', 'obj', '.', 'import_from_pem', '(', 'data', ',', 'password', ')', 'return', 'obj'] | Creates a key from PKCS#8 formatted data loaded from a PEM file.
See the function `import_from_pem` for details.
:param data(bytes): The data contained in a PEM file.
:param password(bytes): An optional password to unwrap the key. | ['Creates', 'a', 'key', 'from', 'PKCS#8', 'formatted', 'data', 'loaded', 'from', 'a', 'PEM', 'file', '.', 'See', 'the', 'function', 'import_from_pem', 'for', 'details', '.'] | train | https://github.com/latchset/jwcrypto/blob/961df898dc08f63fe3d900f2002618740bc66b4a/jwcrypto/jwk.py#L837-L846 |
984 | bunq/sdk_python | bunq/sdk/security.py | _add_header_client_encryption_hmac | def _add_header_client_encryption_hmac(request_bytes, key, iv, custom_headers):
"""
:type request_bytes: bytes
:type key: bytes
:type iv: bytes
:type custom_headers: dict[str, str]
:rtype: None
"""
hashed = hmac.new(key, iv + request_bytes, sha1)
hashed_base64 = base64.b64encode(hashed.digest()).decode()
custom_headers[_HEADER_CLIENT_ENCRYPTION_HMAC] = hashed_base64 | python | def _add_header_client_encryption_hmac(request_bytes, key, iv, custom_headers):
"""
:type request_bytes: bytes
:type key: bytes
:type iv: bytes
:type custom_headers: dict[str, str]
:rtype: None
"""
hashed = hmac.new(key, iv + request_bytes, sha1)
hashed_base64 = base64.b64encode(hashed.digest()).decode()
custom_headers[_HEADER_CLIENT_ENCRYPTION_HMAC] = hashed_base64 | ['def', '_add_header_client_encryption_hmac', '(', 'request_bytes', ',', 'key', ',', 'iv', ',', 'custom_headers', ')', ':', 'hashed', '=', 'hmac', '.', 'new', '(', 'key', ',', 'iv', '+', 'request_bytes', ',', 'sha1', ')', 'hashed_base64', '=', 'base64', '.', 'b64encode', '(', 'hashed', '.', 'digest', '(', ')', ')', '.', 'decode', '(', ')', 'custom_headers', '[', '_HEADER_CLIENT_ENCRYPTION_HMAC', ']', '=', 'hashed_base64'] | :type request_bytes: bytes
:type key: bytes
:type iv: bytes
:type custom_headers: dict[str, str]
:rtype: None | [':', 'type', 'request_bytes', ':', 'bytes', ':', 'type', 'key', ':', 'bytes', ':', 'type', 'iv', ':', 'bytes', ':', 'type', 'custom_headers', ':', 'dict', '[', 'str', 'str', ']'] | train | https://github.com/bunq/sdk_python/blob/da6c9b83e6d83ee8062617f53c6eb7293c0d863d/bunq/sdk/security.py#L221-L233 |
985 | saltstack/salt | salt/wheel/pillar_roots.py | list_env | def list_env(saltenv='base'):
'''
Return all of the file paths found in an environment
'''
ret = {}
if saltenv not in __opts__['pillar_roots']:
return ret
for f_root in __opts__['pillar_roots'][saltenv]:
ret[f_root] = {}
for root, dirs, files in salt.utils.path.os_walk(f_root):
sub = ret[f_root]
if root != f_root:
# grab subroot ref
sroot = root
above = []
# Populate the above dict
while not os.path.samefile(sroot, f_root):
base = os.path.basename(sroot)
if base:
above.insert(0, base)
sroot = os.path.dirname(sroot)
for aroot in above:
sub = sub[aroot]
for dir_ in dirs:
sub[dir_] = {}
for fn_ in files:
sub[fn_] = 'f'
return ret | python | def list_env(saltenv='base'):
'''
Return all of the file paths found in an environment
'''
ret = {}
if saltenv not in __opts__['pillar_roots']:
return ret
for f_root in __opts__['pillar_roots'][saltenv]:
ret[f_root] = {}
for root, dirs, files in salt.utils.path.os_walk(f_root):
sub = ret[f_root]
if root != f_root:
# grab subroot ref
sroot = root
above = []
# Populate the above dict
while not os.path.samefile(sroot, f_root):
base = os.path.basename(sroot)
if base:
above.insert(0, base)
sroot = os.path.dirname(sroot)
for aroot in above:
sub = sub[aroot]
for dir_ in dirs:
sub[dir_] = {}
for fn_ in files:
sub[fn_] = 'f'
return ret | ['def', 'list_env', '(', 'saltenv', '=', "'base'", ')', ':', 'ret', '=', '{', '}', 'if', 'saltenv', 'not', 'in', '__opts__', '[', "'pillar_roots'", ']', ':', 'return', 'ret', 'for', 'f_root', 'in', '__opts__', '[', "'pillar_roots'", ']', '[', 'saltenv', ']', ':', 'ret', '[', 'f_root', ']', '=', '{', '}', 'for', 'root', ',', 'dirs', ',', 'files', 'in', 'salt', '.', 'utils', '.', 'path', '.', 'os_walk', '(', 'f_root', ')', ':', 'sub', '=', 'ret', '[', 'f_root', ']', 'if', 'root', '!=', 'f_root', ':', '# grab subroot ref', 'sroot', '=', 'root', 'above', '=', '[', ']', '# Populate the above dict', 'while', 'not', 'os', '.', 'path', '.', 'samefile', '(', 'sroot', ',', 'f_root', ')', ':', 'base', '=', 'os', '.', 'path', '.', 'basename', '(', 'sroot', ')', 'if', 'base', ':', 'above', '.', 'insert', '(', '0', ',', 'base', ')', 'sroot', '=', 'os', '.', 'path', '.', 'dirname', '(', 'sroot', ')', 'for', 'aroot', 'in', 'above', ':', 'sub', '=', 'sub', '[', 'aroot', ']', 'for', 'dir_', 'in', 'dirs', ':', 'sub', '[', 'dir_', ']', '=', '{', '}', 'for', 'fn_', 'in', 'files', ':', 'sub', '[', 'fn_', ']', '=', "'f'", 'return', 'ret'] | Return all of the file paths found in an environment | ['Return', 'all', 'of', 'the', 'file', 'paths', 'found', 'in', 'an', 'environment'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/wheel/pillar_roots.py#L39-L66 |
986 | sorgerlab/indra | indra/statements/resources.py | get_valid_location | def get_valid_location(location):
"""Check if the given location represents a valid cellular component."""
# If we're given None, return None
if location is not None and cellular_components.get(location) is None:
loc = cellular_components_reverse.get(location)
if loc is None:
raise InvalidLocationError(location)
else:
return loc
return location | python | def get_valid_location(location):
"""Check if the given location represents a valid cellular component."""
# If we're given None, return None
if location is not None and cellular_components.get(location) is None:
loc = cellular_components_reverse.get(location)
if loc is None:
raise InvalidLocationError(location)
else:
return loc
return location | ['def', 'get_valid_location', '(', 'location', ')', ':', "# If we're given None, return None", 'if', 'location', 'is', 'not', 'None', 'and', 'cellular_components', '.', 'get', '(', 'location', ')', 'is', 'None', ':', 'loc', '=', 'cellular_components_reverse', '.', 'get', '(', 'location', ')', 'if', 'loc', 'is', 'None', ':', 'raise', 'InvalidLocationError', '(', 'location', ')', 'else', ':', 'return', 'loc', 'return', 'location'] | Check if the given location represents a valid cellular component. | ['Check', 'if', 'the', 'given', 'location', 'represents', 'a', 'valid', 'cellular', 'component', '.'] | train | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/statements/resources.py#L26-L35 |
987 | DLR-RM/RAFCON | source/rafcon/core/start.py | parse_state_machine_path | def parse_state_machine_path(path):
"""Parser for argparse checking pfor a proper state machine path
:param str path: Input path from the user
:return: The path
:raises argparse.ArgumentTypeError: if the path does not contain a statemachine.json file
"""
sm_root_file = join(path, storage.STATEMACHINE_FILE)
if exists(sm_root_file):
return path
else:
sm_root_file = join(path, storage.STATEMACHINE_FILE_OLD)
if exists(sm_root_file):
return path
raise argparse.ArgumentTypeError("Failed to open {0}: {1} not found in path".format(path,
storage.STATEMACHINE_FILE)) | python | def parse_state_machine_path(path):
"""Parser for argparse checking pfor a proper state machine path
:param str path: Input path from the user
:return: The path
:raises argparse.ArgumentTypeError: if the path does not contain a statemachine.json file
"""
sm_root_file = join(path, storage.STATEMACHINE_FILE)
if exists(sm_root_file):
return path
else:
sm_root_file = join(path, storage.STATEMACHINE_FILE_OLD)
if exists(sm_root_file):
return path
raise argparse.ArgumentTypeError("Failed to open {0}: {1} not found in path".format(path,
storage.STATEMACHINE_FILE)) | ['def', 'parse_state_machine_path', '(', 'path', ')', ':', 'sm_root_file', '=', 'join', '(', 'path', ',', 'storage', '.', 'STATEMACHINE_FILE', ')', 'if', 'exists', '(', 'sm_root_file', ')', ':', 'return', 'path', 'else', ':', 'sm_root_file', '=', 'join', '(', 'path', ',', 'storage', '.', 'STATEMACHINE_FILE_OLD', ')', 'if', 'exists', '(', 'sm_root_file', ')', ':', 'return', 'path', 'raise', 'argparse', '.', 'ArgumentTypeError', '(', '"Failed to open {0}: {1} not found in path"', '.', 'format', '(', 'path', ',', 'storage', '.', 'STATEMACHINE_FILE', ')', ')'] | Parser for argparse checking pfor a proper state machine path
:param str path: Input path from the user
:return: The path
:raises argparse.ArgumentTypeError: if the path does not contain a statemachine.json file | ['Parser', 'for', 'argparse', 'checking', 'pfor', 'a', 'proper', 'state', 'machine', 'path'] | train | https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/core/start.py#L98-L113 |
988 | spyder-ide/spyder | spyder/widgets/comboboxes.py | is_module_or_package | def is_module_or_package(path):
"""Return True if path is a Python module/package"""
is_module = osp.isfile(path) and osp.splitext(path)[1] in ('.py', '.pyw')
is_package = osp.isdir(path) and osp.isfile(osp.join(path, '__init__.py'))
return is_module or is_package | python | def is_module_or_package(path):
"""Return True if path is a Python module/package"""
is_module = osp.isfile(path) and osp.splitext(path)[1] in ('.py', '.pyw')
is_package = osp.isdir(path) and osp.isfile(osp.join(path, '__init__.py'))
return is_module or is_package | ['def', 'is_module_or_package', '(', 'path', ')', ':', 'is_module', '=', 'osp', '.', 'isfile', '(', 'path', ')', 'and', 'osp', '.', 'splitext', '(', 'path', ')', '[', '1', ']', 'in', '(', "'.py'", ',', "'.pyw'", ')', 'is_package', '=', 'osp', '.', 'isdir', '(', 'path', ')', 'and', 'osp', '.', 'isfile', '(', 'osp', '.', 'join', '(', 'path', ',', "'__init__.py'", ')', ')', 'return', 'is_module', 'or', 'is_package'] | Return True if path is a Python module/package | ['Return', 'True', 'if', 'path', 'is', 'a', 'Python', 'module', '/', 'package'] | train | https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/widgets/comboboxes.py#L336-L340 |
989 | log2timeline/plaso | plaso/storage/sqlite/sqlite_file.py | SQLiteStorageFile.GetSortedEvents | def GetSortedEvents(self, time_range=None):
"""Retrieves the events in increasing chronological order.
Args:
time_range (Optional[TimeRange]): time range used to filter events
that fall in a specific period.
Yield:
EventObject: event.
"""
filter_expression = None
if time_range:
filter_expression = []
if time_range.start_timestamp:
filter_expression.append(
'_timestamp >= {0:d}'.format(time_range.start_timestamp))
if time_range.end_timestamp:
filter_expression.append(
'_timestamp <= {0:d}'.format(time_range.end_timestamp))
filter_expression = ' AND '.join(filter_expression)
event_generator = self._GetAttributeContainers(
self._CONTAINER_TYPE_EVENT, filter_expression=filter_expression,
order_by='_timestamp')
for event in event_generator:
if hasattr(event, 'event_data_row_identifier'):
event_data_identifier = identifiers.SQLTableIdentifier(
'event_data', event.event_data_row_identifier)
event.SetEventDataIdentifier(event_data_identifier)
del event.event_data_row_identifier
yield event | python | def GetSortedEvents(self, time_range=None):
"""Retrieves the events in increasing chronological order.
Args:
time_range (Optional[TimeRange]): time range used to filter events
that fall in a specific period.
Yield:
EventObject: event.
"""
filter_expression = None
if time_range:
filter_expression = []
if time_range.start_timestamp:
filter_expression.append(
'_timestamp >= {0:d}'.format(time_range.start_timestamp))
if time_range.end_timestamp:
filter_expression.append(
'_timestamp <= {0:d}'.format(time_range.end_timestamp))
filter_expression = ' AND '.join(filter_expression)
event_generator = self._GetAttributeContainers(
self._CONTAINER_TYPE_EVENT, filter_expression=filter_expression,
order_by='_timestamp')
for event in event_generator:
if hasattr(event, 'event_data_row_identifier'):
event_data_identifier = identifiers.SQLTableIdentifier(
'event_data', event.event_data_row_identifier)
event.SetEventDataIdentifier(event_data_identifier)
del event.event_data_row_identifier
yield event | ['def', 'GetSortedEvents', '(', 'self', ',', 'time_range', '=', 'None', ')', ':', 'filter_expression', '=', 'None', 'if', 'time_range', ':', 'filter_expression', '=', '[', ']', 'if', 'time_range', '.', 'start_timestamp', ':', 'filter_expression', '.', 'append', '(', "'_timestamp >= {0:d}'", '.', 'format', '(', 'time_range', '.', 'start_timestamp', ')', ')', 'if', 'time_range', '.', 'end_timestamp', ':', 'filter_expression', '.', 'append', '(', "'_timestamp <= {0:d}'", '.', 'format', '(', 'time_range', '.', 'end_timestamp', ')', ')', 'filter_expression', '=', "' AND '", '.', 'join', '(', 'filter_expression', ')', 'event_generator', '=', 'self', '.', '_GetAttributeContainers', '(', 'self', '.', '_CONTAINER_TYPE_EVENT', ',', 'filter_expression', '=', 'filter_expression', ',', 'order_by', '=', "'_timestamp'", ')', 'for', 'event', 'in', 'event_generator', ':', 'if', 'hasattr', '(', 'event', ',', "'event_data_row_identifier'", ')', ':', 'event_data_identifier', '=', 'identifiers', '.', 'SQLTableIdentifier', '(', "'event_data'", ',', 'event', '.', 'event_data_row_identifier', ')', 'event', '.', 'SetEventDataIdentifier', '(', 'event_data_identifier', ')', 'del', 'event', '.', 'event_data_row_identifier', 'yield', 'event'] | Retrieves the events in increasing chronological order.
Args:
time_range (Optional[TimeRange]): time range used to filter events
that fall in a specific period.
Yield:
EventObject: event. | ['Retrieves', 'the', 'events', 'in', 'increasing', 'chronological', 'order', '.'] | train | https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/storage/sqlite/sqlite_file.py#L920-L956 |
990 | mattimck/python-exist | exist/auth.py | ExistAuth.browser_authorize | def browser_authorize(self):
"""
Open a browser to the authorization url and spool up a CherryPy
server to accept the response
"""
url = self.authorize_url()
# Open the web browser in a new thread for command-line browser support
threading.Timer(1, webbrowser.open, args=(url,)).start()
server_config = {
'server.socket_host': '0.0.0.0',
'server.socket_port': 443,
'server.ssl_module': 'pyopenssl',
'server.ssl_certificate': 'tests/files/certificate.cert',
'server.ssl_private_key': 'tests/files/key.key',
}
cherrypy.config.update(server_config)
cherrypy.quickstart(self) | python | def browser_authorize(self):
"""
Open a browser to the authorization url and spool up a CherryPy
server to accept the response
"""
url = self.authorize_url()
# Open the web browser in a new thread for command-line browser support
threading.Timer(1, webbrowser.open, args=(url,)).start()
server_config = {
'server.socket_host': '0.0.0.0',
'server.socket_port': 443,
'server.ssl_module': 'pyopenssl',
'server.ssl_certificate': 'tests/files/certificate.cert',
'server.ssl_private_key': 'tests/files/key.key',
}
cherrypy.config.update(server_config)
cherrypy.quickstart(self) | ['def', 'browser_authorize', '(', 'self', ')', ':', 'url', '=', 'self', '.', 'authorize_url', '(', ')', '# Open the web browser in a new thread for command-line browser support', 'threading', '.', 'Timer', '(', '1', ',', 'webbrowser', '.', 'open', ',', 'args', '=', '(', 'url', ',', ')', ')', '.', 'start', '(', ')', 'server_config', '=', '{', "'server.socket_host'", ':', "'0.0.0.0'", ',', "'server.socket_port'", ':', '443', ',', "'server.ssl_module'", ':', "'pyopenssl'", ',', "'server.ssl_certificate'", ':', "'tests/files/certificate.cert'", ',', "'server.ssl_private_key'", ':', "'tests/files/key.key'", ',', '}', 'cherrypy', '.', 'config', '.', 'update', '(', 'server_config', ')', 'cherrypy', '.', 'quickstart', '(', 'self', ')'] | Open a browser to the authorization url and spool up a CherryPy
server to accept the response | ['Open', 'a', 'browser', 'to', 'the', 'authorization', 'url', 'and', 'spool', 'up', 'a', 'CherryPy', 'server', 'to', 'accept', 'the', 'response'] | train | https://github.com/mattimck/python-exist/blob/2c4be9d176d8e8007c4e020ee7cd6263a2096abb/exist/auth.py#L116-L135 |
991 | bcbio/bcbio-nextgen | bcbio/hla/optitype.py | run | def run(data):
"""HLA typing with OptiType, parsing output from called genotype files.
"""
hlas = []
for hla_fq in tz.get_in(["hla", "fastq"], data, []):
hla_type = re.search("[.-](?P<hlatype>HLA-[\w-]+).fq", hla_fq).group("hlatype")
if hla_type in SUPPORTED_HLAS:
if utils.file_exists(hla_fq):
hlas.append((hla_type, hla_fq))
if len(hlas) > 0:
out_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), "align",
dd.get_sample_name(data), "hla",
"OptiType-HLA-A_B_C"))
# When running UMIs and hla typing we want to pick the original fastqs
if len(hlas) > len(SUPPORTED_HLAS):
hlas = [x for x in hlas if os.path.basename(x[1]).find("-cumi") == -1]
if len(hlas) == len(SUPPORTED_HLAS):
hla_fq = combine_hla_fqs(hlas, out_dir + "-input.fq", data)
if utils.file_exists(hla_fq):
out_file = glob.glob(os.path.join(out_dir, "*", "*_result.tsv"))
if len(out_file) > 0:
out_file = out_file[0]
else:
out_file = _call_hla(hla_fq, out_dir, data)
out_file = _prepare_calls(out_file, os.path.dirname(out_dir), data)
data["hla"].update({"call_file": out_file,
"hlacaller": "optitype"})
return data | python | def run(data):
"""HLA typing with OptiType, parsing output from called genotype files.
"""
hlas = []
for hla_fq in tz.get_in(["hla", "fastq"], data, []):
hla_type = re.search("[.-](?P<hlatype>HLA-[\w-]+).fq", hla_fq).group("hlatype")
if hla_type in SUPPORTED_HLAS:
if utils.file_exists(hla_fq):
hlas.append((hla_type, hla_fq))
if len(hlas) > 0:
out_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), "align",
dd.get_sample_name(data), "hla",
"OptiType-HLA-A_B_C"))
# When running UMIs and hla typing we want to pick the original fastqs
if len(hlas) > len(SUPPORTED_HLAS):
hlas = [x for x in hlas if os.path.basename(x[1]).find("-cumi") == -1]
if len(hlas) == len(SUPPORTED_HLAS):
hla_fq = combine_hla_fqs(hlas, out_dir + "-input.fq", data)
if utils.file_exists(hla_fq):
out_file = glob.glob(os.path.join(out_dir, "*", "*_result.tsv"))
if len(out_file) > 0:
out_file = out_file[0]
else:
out_file = _call_hla(hla_fq, out_dir, data)
out_file = _prepare_calls(out_file, os.path.dirname(out_dir), data)
data["hla"].update({"call_file": out_file,
"hlacaller": "optitype"})
return data | ['def', 'run', '(', 'data', ')', ':', 'hlas', '=', '[', ']', 'for', 'hla_fq', 'in', 'tz', '.', 'get_in', '(', '[', '"hla"', ',', '"fastq"', ']', ',', 'data', ',', '[', ']', ')', ':', 'hla_type', '=', 're', '.', 'search', '(', '"[.-](?P<hlatype>HLA-[\\w-]+).fq"', ',', 'hla_fq', ')', '.', 'group', '(', '"hlatype"', ')', 'if', 'hla_type', 'in', 'SUPPORTED_HLAS', ':', 'if', 'utils', '.', 'file_exists', '(', 'hla_fq', ')', ':', 'hlas', '.', 'append', '(', '(', 'hla_type', ',', 'hla_fq', ')', ')', 'if', 'len', '(', 'hlas', ')', '>', '0', ':', 'out_dir', '=', 'utils', '.', 'safe_makedir', '(', 'os', '.', 'path', '.', 'join', '(', 'dd', '.', 'get_work_dir', '(', 'data', ')', ',', '"align"', ',', 'dd', '.', 'get_sample_name', '(', 'data', ')', ',', '"hla"', ',', '"OptiType-HLA-A_B_C"', ')', ')', '# When running UMIs and hla typing we want to pick the original fastqs', 'if', 'len', '(', 'hlas', ')', '>', 'len', '(', 'SUPPORTED_HLAS', ')', ':', 'hlas', '=', '[', 'x', 'for', 'x', 'in', 'hlas', 'if', 'os', '.', 'path', '.', 'basename', '(', 'x', '[', '1', ']', ')', '.', 'find', '(', '"-cumi"', ')', '==', '-', '1', ']', 'if', 'len', '(', 'hlas', ')', '==', 'len', '(', 'SUPPORTED_HLAS', ')', ':', 'hla_fq', '=', 'combine_hla_fqs', '(', 'hlas', ',', 'out_dir', '+', '"-input.fq"', ',', 'data', ')', 'if', 'utils', '.', 'file_exists', '(', 'hla_fq', ')', ':', 'out_file', '=', 'glob', '.', 'glob', '(', 'os', '.', 'path', '.', 'join', '(', 'out_dir', ',', '"*"', ',', '"*_result.tsv"', ')', ')', 'if', 'len', '(', 'out_file', ')', '>', '0', ':', 'out_file', '=', 'out_file', '[', '0', ']', 'else', ':', 'out_file', '=', '_call_hla', '(', 'hla_fq', ',', 'out_dir', ',', 'data', ')', 'out_file', '=', '_prepare_calls', '(', 'out_file', ',', 'os', '.', 'path', '.', 'dirname', '(', 'out_dir', ')', ',', 'data', ')', 'data', '[', '"hla"', ']', '.', 'update', '(', '{', '"call_file"', ':', 'out_file', ',', '"hlacaller"', ':', '"optitype"', '}', ')', 'return', 'data'] | HLA typing with OptiType, parsing output from called genotype files. | ['HLA', 'typing', 'with', 'OptiType', 'parsing', 'output', 'from', 'called', 'genotype', 'files', '.'] | train | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/hla/optitype.py#L23-L50 |
992 | ambitioninc/django-entity | entity/models.py | EntityGroup.bulk_overwrite | def bulk_overwrite(self, entities_and_kinds):
"""
Update the group to the given entities and sub-entity groups.
After this operation, the only members of this EntityGroup
will be the given entities, and sub-entity groups.
:type entities_and_kinds: List of (Entity, EntityKind) pairs.
:param entities_and_kinds: A list of entity, entity-kind pairs
to set to the EntityGroup. In the pairs the entity-kind
can be ``None``, to add a single entity, or some entity
kind to add all sub-entities of that kind.
"""
EntityGroupMembership.objects.filter(entity_group=self).delete()
return self.bulk_add_entities(entities_and_kinds) | python | def bulk_overwrite(self, entities_and_kinds):
"""
Update the group to the given entities and sub-entity groups.
After this operation, the only members of this EntityGroup
will be the given entities, and sub-entity groups.
:type entities_and_kinds: List of (Entity, EntityKind) pairs.
:param entities_and_kinds: A list of entity, entity-kind pairs
to set to the EntityGroup. In the pairs the entity-kind
can be ``None``, to add a single entity, or some entity
kind to add all sub-entities of that kind.
"""
EntityGroupMembership.objects.filter(entity_group=self).delete()
return self.bulk_add_entities(entities_and_kinds) | ['def', 'bulk_overwrite', '(', 'self', ',', 'entities_and_kinds', ')', ':', 'EntityGroupMembership', '.', 'objects', '.', 'filter', '(', 'entity_group', '=', 'self', ')', '.', 'delete', '(', ')', 'return', 'self', '.', 'bulk_add_entities', '(', 'entities_and_kinds', ')'] | Update the group to the given entities and sub-entity groups.
After this operation, the only members of this EntityGroup
will be the given entities, and sub-entity groups.
:type entities_and_kinds: List of (Entity, EntityKind) pairs.
:param entities_and_kinds: A list of entity, entity-kind pairs
to set to the EntityGroup. In the pairs the entity-kind
can be ``None``, to add a single entity, or some entity
kind to add all sub-entities of that kind. | ['Update', 'the', 'group', 'to', 'the', 'given', 'entities', 'and', 'sub', '-', 'entity', 'groups', '.'] | train | https://github.com/ambitioninc/django-entity/blob/ebc61f34313c52f4ef5819eb1da25b2ad837e80c/entity/models.py#L493-L507 |
993 | pantsbuild/pants | src/python/pants/base/exception_sink.py | SignalHandler.signal_handler_mapping | def signal_handler_mapping(self):
"""A dict mapping (signal number) -> (a method handling the signal)."""
# Could use an enum here, but we never end up doing any matching on the specific signal value,
# instead just iterating over the registered signals to set handlers, so a dict is probably
# better.
return {
signal.SIGINT: self.handle_sigint,
signal.SIGQUIT: self.handle_sigquit,
signal.SIGTERM: self.handle_sigterm,
} | python | def signal_handler_mapping(self):
"""A dict mapping (signal number) -> (a method handling the signal)."""
# Could use an enum here, but we never end up doing any matching on the specific signal value,
# instead just iterating over the registered signals to set handlers, so a dict is probably
# better.
return {
signal.SIGINT: self.handle_sigint,
signal.SIGQUIT: self.handle_sigquit,
signal.SIGTERM: self.handle_sigterm,
} | ['def', 'signal_handler_mapping', '(', 'self', ')', ':', '# Could use an enum here, but we never end up doing any matching on the specific signal value,', '# instead just iterating over the registered signals to set handlers, so a dict is probably', '# better.', 'return', '{', 'signal', '.', 'SIGINT', ':', 'self', '.', 'handle_sigint', ',', 'signal', '.', 'SIGQUIT', ':', 'self', '.', 'handle_sigquit', ',', 'signal', '.', 'SIGTERM', ':', 'self', '.', 'handle_sigterm', ',', '}'] | A dict mapping (signal number) -> (a method handling the signal). | ['A', 'dict', 'mapping', '(', 'signal', 'number', ')', '-', '>', '(', 'a', 'method', 'handling', 'the', 'signal', ')', '.'] | train | https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/base/exception_sink.py#L40-L49 |
994 | geomet/geomet | geomet/wkb.py | _get_geom_type | def _get_geom_type(type_bytes):
"""Get the GeoJSON geometry type label from a WKB type byte string.
:param type_bytes:
4 byte string in big endian byte order containing a WKB type number.
It may also contain a "has SRID" flag in the high byte (the first type,
since this is big endian byte order), indicated as 0x20. If the SRID
flag is not set, the high byte will always be null (0x00).
:returns:
3-tuple ofGeoJSON geometry type label, the bytes resprenting the
geometry type, and a separate "has SRID" flag. If the input
`type_bytes` contains an SRID flag, it will be removed.
>>> # Z Point, with SRID flag
>>> _get_geom_type(b'\\x20\\x00\\x03\\xe9') == (
... 'Point', b'\\x00\\x00\\x03\\xe9', True)
True
>>> # 2D MultiLineString, without SRID flag
>>> _get_geom_type(b'\\x00\\x00\\x00\\x05') == (
... 'MultiLineString', b'\\x00\\x00\\x00\\x05', False)
True
"""
# slice off the high byte, which may contain the SRID flag
high_byte = type_bytes[0]
if six.PY3:
high_byte = bytes([high_byte])
has_srid = high_byte == b'\x20'
if has_srid:
# replace the high byte with a null byte
type_bytes = as_bin_str(b'\x00' + type_bytes[1:])
else:
type_bytes = as_bin_str(type_bytes)
# look up the geometry type
geom_type = _BINARY_TO_GEOM_TYPE.get(type_bytes)
return geom_type, type_bytes, has_srid | python | def _get_geom_type(type_bytes):
"""Get the GeoJSON geometry type label from a WKB type byte string.
:param type_bytes:
4 byte string in big endian byte order containing a WKB type number.
It may also contain a "has SRID" flag in the high byte (the first type,
since this is big endian byte order), indicated as 0x20. If the SRID
flag is not set, the high byte will always be null (0x00).
:returns:
3-tuple ofGeoJSON geometry type label, the bytes resprenting the
geometry type, and a separate "has SRID" flag. If the input
`type_bytes` contains an SRID flag, it will be removed.
>>> # Z Point, with SRID flag
>>> _get_geom_type(b'\\x20\\x00\\x03\\xe9') == (
... 'Point', b'\\x00\\x00\\x03\\xe9', True)
True
>>> # 2D MultiLineString, without SRID flag
>>> _get_geom_type(b'\\x00\\x00\\x00\\x05') == (
... 'MultiLineString', b'\\x00\\x00\\x00\\x05', False)
True
"""
# slice off the high byte, which may contain the SRID flag
high_byte = type_bytes[0]
if six.PY3:
high_byte = bytes([high_byte])
has_srid = high_byte == b'\x20'
if has_srid:
# replace the high byte with a null byte
type_bytes = as_bin_str(b'\x00' + type_bytes[1:])
else:
type_bytes = as_bin_str(type_bytes)
# look up the geometry type
geom_type = _BINARY_TO_GEOM_TYPE.get(type_bytes)
return geom_type, type_bytes, has_srid | ['def', '_get_geom_type', '(', 'type_bytes', ')', ':', '# slice off the high byte, which may contain the SRID flag', 'high_byte', '=', 'type_bytes', '[', '0', ']', 'if', 'six', '.', 'PY3', ':', 'high_byte', '=', 'bytes', '(', '[', 'high_byte', ']', ')', 'has_srid', '=', 'high_byte', '==', "b'\\x20'", 'if', 'has_srid', ':', '# replace the high byte with a null byte', 'type_bytes', '=', 'as_bin_str', '(', "b'\\x00'", '+', 'type_bytes', '[', '1', ':', ']', ')', 'else', ':', 'type_bytes', '=', 'as_bin_str', '(', 'type_bytes', ')', '# look up the geometry type', 'geom_type', '=', '_BINARY_TO_GEOM_TYPE', '.', 'get', '(', 'type_bytes', ')', 'return', 'geom_type', ',', 'type_bytes', ',', 'has_srid'] | Get the GeoJSON geometry type label from a WKB type byte string.
:param type_bytes:
4 byte string in big endian byte order containing a WKB type number.
It may also contain a "has SRID" flag in the high byte (the first type,
since this is big endian byte order), indicated as 0x20. If the SRID
flag is not set, the high byte will always be null (0x00).
:returns:
3-tuple ofGeoJSON geometry type label, the bytes resprenting the
geometry type, and a separate "has SRID" flag. If the input
`type_bytes` contains an SRID flag, it will be removed.
>>> # Z Point, with SRID flag
>>> _get_geom_type(b'\\x20\\x00\\x03\\xe9') == (
... 'Point', b'\\x00\\x00\\x03\\xe9', True)
True
>>> # 2D MultiLineString, without SRID flag
>>> _get_geom_type(b'\\x00\\x00\\x00\\x05') == (
... 'MultiLineString', b'\\x00\\x00\\x00\\x05', False)
True | ['Get', 'the', 'GeoJSON', 'geometry', 'type', 'label', 'from', 'a', 'WKB', 'type', 'byte', 'string', '.'] | train | https://github.com/geomet/geomet/blob/b82d7118113ab723751eba3de5df98c368423c2b/geomet/wkb.py#L110-L147 |
995 | docker/docker-py | docker/types/daemon.py | CancellableStream.close | def close(self):
"""
Closes the event streaming.
"""
if not self._response.raw.closed:
# find the underlying socket object
# based on api.client._get_raw_response_socket
sock_fp = self._response.raw._fp.fp
if hasattr(sock_fp, 'raw'):
sock_raw = sock_fp.raw
if hasattr(sock_raw, 'sock'):
sock = sock_raw.sock
elif hasattr(sock_raw, '_sock'):
sock = sock_raw._sock
elif hasattr(sock_fp, 'channel'):
# We're working with a paramiko (SSH) channel, which doesn't
# support cancelable streams with the current implementation
raise DockerException(
'Cancellable streams not supported for the SSH protocol'
)
else:
sock = sock_fp._sock
if hasattr(urllib3.contrib, 'pyopenssl') and isinstance(
sock, urllib3.contrib.pyopenssl.WrappedSocket):
sock = sock.socket
sock.shutdown(socket.SHUT_RDWR)
sock.close() | python | def close(self):
"""
Closes the event streaming.
"""
if not self._response.raw.closed:
# find the underlying socket object
# based on api.client._get_raw_response_socket
sock_fp = self._response.raw._fp.fp
if hasattr(sock_fp, 'raw'):
sock_raw = sock_fp.raw
if hasattr(sock_raw, 'sock'):
sock = sock_raw.sock
elif hasattr(sock_raw, '_sock'):
sock = sock_raw._sock
elif hasattr(sock_fp, 'channel'):
# We're working with a paramiko (SSH) channel, which doesn't
# support cancelable streams with the current implementation
raise DockerException(
'Cancellable streams not supported for the SSH protocol'
)
else:
sock = sock_fp._sock
if hasattr(urllib3.contrib, 'pyopenssl') and isinstance(
sock, urllib3.contrib.pyopenssl.WrappedSocket):
sock = sock.socket
sock.shutdown(socket.SHUT_RDWR)
sock.close() | ['def', 'close', '(', 'self', ')', ':', 'if', 'not', 'self', '.', '_response', '.', 'raw', '.', 'closed', ':', '# find the underlying socket object', '# based on api.client._get_raw_response_socket', 'sock_fp', '=', 'self', '.', '_response', '.', 'raw', '.', '_fp', '.', 'fp', 'if', 'hasattr', '(', 'sock_fp', ',', "'raw'", ')', ':', 'sock_raw', '=', 'sock_fp', '.', 'raw', 'if', 'hasattr', '(', 'sock_raw', ',', "'sock'", ')', ':', 'sock', '=', 'sock_raw', '.', 'sock', 'elif', 'hasattr', '(', 'sock_raw', ',', "'_sock'", ')', ':', 'sock', '=', 'sock_raw', '.', '_sock', 'elif', 'hasattr', '(', 'sock_fp', ',', "'channel'", ')', ':', "# We're working with a paramiko (SSH) channel, which doesn't", '# support cancelable streams with the current implementation', 'raise', 'DockerException', '(', "'Cancellable streams not supported for the SSH protocol'", ')', 'else', ':', 'sock', '=', 'sock_fp', '.', '_sock', 'if', 'hasattr', '(', 'urllib3', '.', 'contrib', ',', "'pyopenssl'", ')', 'and', 'isinstance', '(', 'sock', ',', 'urllib3', '.', 'contrib', '.', 'pyopenssl', '.', 'WrappedSocket', ')', ':', 'sock', '=', 'sock', '.', 'socket', 'sock', '.', 'shutdown', '(', 'socket', '.', 'SHUT_RDWR', ')', 'sock', '.', 'close', '(', ')'] | Closes the event streaming. | ['Closes', 'the', 'event', 'streaming', '.'] | train | https://github.com/docker/docker-py/blob/613d6aad83acc9931ff2ecfd6a6c7bd8061dc125/docker/types/daemon.py#L40-L74 |
996 | log2timeline/plaso | plaso/containers/artifacts.py | OperatingSystemArtifact.version_tuple | def version_tuple(self):
"""tuple[int]: version tuple or None if version is not set or invalid."""
try:
return tuple([int(digit, 10) for digit in self.version.split('.')])
except (AttributeError, TypeError, ValueError):
return None | python | def version_tuple(self):
"""tuple[int]: version tuple or None if version is not set or invalid."""
try:
return tuple([int(digit, 10) for digit in self.version.split('.')])
except (AttributeError, TypeError, ValueError):
return None | ['def', 'version_tuple', '(', 'self', ')', ':', 'try', ':', 'return', 'tuple', '(', '[', 'int', '(', 'digit', ',', '10', ')', 'for', 'digit', 'in', 'self', '.', 'version', '.', 'split', '(', "'.'", ')', ']', ')', 'except', '(', 'AttributeError', ',', 'TypeError', ',', 'ValueError', ')', ':', 'return', 'None'] | tuple[int]: version tuple or None if version is not set or invalid. | ['tuple', '[', 'int', ']', ':', 'version', 'tuple', 'or', 'None', 'if', 'version', 'is', 'not', 'set', 'or', 'invalid', '.'] | train | https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/containers/artifacts.py#L135-L140 |
997 | KelSolaar/Foundations | foundations/nodes.py | AbstractCompositeNode.remove_child | def remove_child(self, index):
"""
Removes child at given index from the Node children.
Usage::
>>> node_a = AbstractCompositeNode("MyNodeA")
>>> node_b = AbstractCompositeNode("MyNodeB", node_a)
>>> node_c = AbstractCompositeNode("MyNodeC", node_a)
>>> node_a.remove_child(1)
True
>>> [child.name for child in node_a.children]
[u'MyNodeB']
:param index: Node index.
:type index: int
:return: Removed child.
:rtype: AbstractNode or AbstractCompositeNode or Object
"""
if index < 0 or index > len(self.__children):
return
child = self.__children.pop(index)
child.parent = None
return child | python | def remove_child(self, index):
"""
Removes child at given index from the Node children.
Usage::
>>> node_a = AbstractCompositeNode("MyNodeA")
>>> node_b = AbstractCompositeNode("MyNodeB", node_a)
>>> node_c = AbstractCompositeNode("MyNodeC", node_a)
>>> node_a.remove_child(1)
True
>>> [child.name for child in node_a.children]
[u'MyNodeB']
:param index: Node index.
:type index: int
:return: Removed child.
:rtype: AbstractNode or AbstractCompositeNode or Object
"""
if index < 0 or index > len(self.__children):
return
child = self.__children.pop(index)
child.parent = None
return child | ['def', 'remove_child', '(', 'self', ',', 'index', ')', ':', 'if', 'index', '<', '0', 'or', 'index', '>', 'len', '(', 'self', '.', '__children', ')', ':', 'return', 'child', '=', 'self', '.', '__children', '.', 'pop', '(', 'index', ')', 'child', '.', 'parent', '=', 'None', 'return', 'child'] | Removes child at given index from the Node children.
Usage::
>>> node_a = AbstractCompositeNode("MyNodeA")
>>> node_b = AbstractCompositeNode("MyNodeB", node_a)
>>> node_c = AbstractCompositeNode("MyNodeC", node_a)
>>> node_a.remove_child(1)
True
>>> [child.name for child in node_a.children]
[u'MyNodeB']
:param index: Node index.
:type index: int
:return: Removed child.
:rtype: AbstractNode or AbstractCompositeNode or Object | ['Removes', 'child', 'at', 'given', 'index', 'from', 'the', 'Node', 'children', '.'] | train | https://github.com/KelSolaar/Foundations/blob/5c141330faf09dad70a12bc321f4c564917d0a91/foundations/nodes.py#L775-L800 |
998 | rflamary/POT | ot/da.py | BaseTransport.fit | def fit(self, Xs=None, ys=None, Xt=None, yt=None):
"""Build a coupling matrix from source and target sets of samples
(Xs, ys) and (Xt, yt)
Parameters
----------
Xs : array-like, shape (n_source_samples, n_features)
The training input samples.
ys : array-like, shape (n_source_samples,)
The class labels
Xt : array-like, shape (n_target_samples, n_features)
The training input samples.
yt : array-like, shape (n_target_samples,)
The class labels. If some target samples are unlabeled, fill the
yt's elements with -1.
Warning: Note that, due to this convention -1 cannot be used as a
class label
Returns
-------
self : object
Returns self.
"""
# check the necessary inputs parameters are here
if check_params(Xs=Xs, Xt=Xt):
# pairwise distance
self.cost_ = dist(Xs, Xt, metric=self.metric)
self.cost_ = cost_normalization(self.cost_, self.norm)
if (ys is not None) and (yt is not None):
if self.limit_max != np.infty:
self.limit_max = self.limit_max * np.max(self.cost_)
# assumes labeled source samples occupy the first rows
# and labeled target samples occupy the first columns
classes = [c for c in np.unique(ys) if c != -1]
for c in classes:
idx_s = np.where((ys != c) & (ys != -1))
idx_t = np.where(yt == c)
# all the coefficients corresponding to a source sample
# and a target sample :
# with different labels get a infinite
for j in idx_t[0]:
self.cost_[idx_s[0], j] = self.limit_max
# distribution estimation
self.mu_s = self.distribution_estimation(Xs)
self.mu_t = self.distribution_estimation(Xt)
# store arrays of samples
self.xs_ = Xs
self.xt_ = Xt
return self | python | def fit(self, Xs=None, ys=None, Xt=None, yt=None):
"""Build a coupling matrix from source and target sets of samples
(Xs, ys) and (Xt, yt)
Parameters
----------
Xs : array-like, shape (n_source_samples, n_features)
The training input samples.
ys : array-like, shape (n_source_samples,)
The class labels
Xt : array-like, shape (n_target_samples, n_features)
The training input samples.
yt : array-like, shape (n_target_samples,)
The class labels. If some target samples are unlabeled, fill the
yt's elements with -1.
Warning: Note that, due to this convention -1 cannot be used as a
class label
Returns
-------
self : object
Returns self.
"""
# check the necessary inputs parameters are here
if check_params(Xs=Xs, Xt=Xt):
# pairwise distance
self.cost_ = dist(Xs, Xt, metric=self.metric)
self.cost_ = cost_normalization(self.cost_, self.norm)
if (ys is not None) and (yt is not None):
if self.limit_max != np.infty:
self.limit_max = self.limit_max * np.max(self.cost_)
# assumes labeled source samples occupy the first rows
# and labeled target samples occupy the first columns
classes = [c for c in np.unique(ys) if c != -1]
for c in classes:
idx_s = np.where((ys != c) & (ys != -1))
idx_t = np.where(yt == c)
# all the coefficients corresponding to a source sample
# and a target sample :
# with different labels get a infinite
for j in idx_t[0]:
self.cost_[idx_s[0], j] = self.limit_max
# distribution estimation
self.mu_s = self.distribution_estimation(Xs)
self.mu_t = self.distribution_estimation(Xt)
# store arrays of samples
self.xs_ = Xs
self.xt_ = Xt
return self | ['def', 'fit', '(', 'self', ',', 'Xs', '=', 'None', ',', 'ys', '=', 'None', ',', 'Xt', '=', 'None', ',', 'yt', '=', 'None', ')', ':', '# check the necessary inputs parameters are here', 'if', 'check_params', '(', 'Xs', '=', 'Xs', ',', 'Xt', '=', 'Xt', ')', ':', '# pairwise distance', 'self', '.', 'cost_', '=', 'dist', '(', 'Xs', ',', 'Xt', ',', 'metric', '=', 'self', '.', 'metric', ')', 'self', '.', 'cost_', '=', 'cost_normalization', '(', 'self', '.', 'cost_', ',', 'self', '.', 'norm', ')', 'if', '(', 'ys', 'is', 'not', 'None', ')', 'and', '(', 'yt', 'is', 'not', 'None', ')', ':', 'if', 'self', '.', 'limit_max', '!=', 'np', '.', 'infty', ':', 'self', '.', 'limit_max', '=', 'self', '.', 'limit_max', '*', 'np', '.', 'max', '(', 'self', '.', 'cost_', ')', '# assumes labeled source samples occupy the first rows', '# and labeled target samples occupy the first columns', 'classes', '=', '[', 'c', 'for', 'c', 'in', 'np', '.', 'unique', '(', 'ys', ')', 'if', 'c', '!=', '-', '1', ']', 'for', 'c', 'in', 'classes', ':', 'idx_s', '=', 'np', '.', 'where', '(', '(', 'ys', '!=', 'c', ')', '&', '(', 'ys', '!=', '-', '1', ')', ')', 'idx_t', '=', 'np', '.', 'where', '(', 'yt', '==', 'c', ')', '# all the coefficients corresponding to a source sample', '# and a target sample :', '# with different labels get a infinite', 'for', 'j', 'in', 'idx_t', '[', '0', ']', ':', 'self', '.', 'cost_', '[', 'idx_s', '[', '0', ']', ',', 'j', ']', '=', 'self', '.', 'limit_max', '# distribution estimation', 'self', '.', 'mu_s', '=', 'self', '.', 'distribution_estimation', '(', 'Xs', ')', 'self', '.', 'mu_t', '=', 'self', '.', 'distribution_estimation', '(', 'Xt', ')', '# store arrays of samples', 'self', '.', 'xs_', '=', 'Xs', 'self', '.', 'xt_', '=', 'Xt', 'return', 'self'] | Build a coupling matrix from source and target sets of samples
(Xs, ys) and (Xt, yt)
Parameters
----------
Xs : array-like, shape (n_source_samples, n_features)
The training input samples.
ys : array-like, shape (n_source_samples,)
The class labels
Xt : array-like, shape (n_target_samples, n_features)
The training input samples.
yt : array-like, shape (n_target_samples,)
The class labels. If some target samples are unlabeled, fill the
yt's elements with -1.
Warning: Note that, due to this convention -1 cannot be used as a
class label
Returns
-------
self : object
Returns self. | ['Build', 'a', 'coupling', 'matrix', 'from', 'source', 'and', 'target', 'sets', 'of', 'samples', '(', 'Xs', 'ys', ')', 'and', '(', 'Xt', 'yt', ')'] | train | https://github.com/rflamary/POT/blob/c5108efc7b6702e1af3928bef1032e6b37734d1c/ot/da.py#L783-L841 |
999 | dls-controls/pymalcolm | malcolm/modules/web/controllers/websocketclientcomms.py | WebsocketClientComms.sync_proxy | def sync_proxy(self, mri, block):
"""Abstract method telling the ClientComms to sync this proxy Block
with its remote counterpart. Should wait until it is connected
Args:
mri (str): The mri for the remote block
block (BlockModel): The local proxy Block to keep in sync
"""
# Send a root Subscribe to the server
subscribe = Subscribe(path=[mri], delta=True)
done_queue = Queue()
def handle_response(response):
# Called from tornado
if not isinstance(response, Delta):
# Return or Error is the end of our subscription, log and ignore
self.log.debug("Proxy got response %r", response)
done_queue.put(None)
else:
cothread.Callback(
self._handle_response, response, block, done_queue)
subscribe.set_callback(handle_response)
IOLoopHelper.call(self._send_request, subscribe)
done_queue.get(timeout=DEFAULT_TIMEOUT) | python | def sync_proxy(self, mri, block):
"""Abstract method telling the ClientComms to sync this proxy Block
with its remote counterpart. Should wait until it is connected
Args:
mri (str): The mri for the remote block
block (BlockModel): The local proxy Block to keep in sync
"""
# Send a root Subscribe to the server
subscribe = Subscribe(path=[mri], delta=True)
done_queue = Queue()
def handle_response(response):
# Called from tornado
if not isinstance(response, Delta):
# Return or Error is the end of our subscription, log and ignore
self.log.debug("Proxy got response %r", response)
done_queue.put(None)
else:
cothread.Callback(
self._handle_response, response, block, done_queue)
subscribe.set_callback(handle_response)
IOLoopHelper.call(self._send_request, subscribe)
done_queue.get(timeout=DEFAULT_TIMEOUT) | ['def', 'sync_proxy', '(', 'self', ',', 'mri', ',', 'block', ')', ':', '# Send a root Subscribe to the server', 'subscribe', '=', 'Subscribe', '(', 'path', '=', '[', 'mri', ']', ',', 'delta', '=', 'True', ')', 'done_queue', '=', 'Queue', '(', ')', 'def', 'handle_response', '(', 'response', ')', ':', '# Called from tornado', 'if', 'not', 'isinstance', '(', 'response', ',', 'Delta', ')', ':', '# Return or Error is the end of our subscription, log and ignore', 'self', '.', 'log', '.', 'debug', '(', '"Proxy got response %r"', ',', 'response', ')', 'done_queue', '.', 'put', '(', 'None', ')', 'else', ':', 'cothread', '.', 'Callback', '(', 'self', '.', '_handle_response', ',', 'response', ',', 'block', ',', 'done_queue', ')', 'subscribe', '.', 'set_callback', '(', 'handle_response', ')', 'IOLoopHelper', '.', 'call', '(', 'self', '.', '_send_request', ',', 'subscribe', ')', 'done_queue', '.', 'get', '(', 'timeout', '=', 'DEFAULT_TIMEOUT', ')'] | Abstract method telling the ClientComms to sync this proxy Block
with its remote counterpart. Should wait until it is connected
Args:
mri (str): The mri for the remote block
block (BlockModel): The local proxy Block to keep in sync | ['Abstract', 'method', 'telling', 'the', 'ClientComms', 'to', 'sync', 'this', 'proxy', 'Block', 'with', 'its', 'remote', 'counterpart', '.', 'Should', 'wait', 'until', 'it', 'is', 'connected'] | train | https://github.com/dls-controls/pymalcolm/blob/80ea667e4da26365a6cebc0249f52fdc744bd983/malcolm/modules/web/controllers/websocketclientcomms.py#L139-L163 |
Subsets and Splits