Unnamed: 0
int64 0
10k
| repository_name
stringlengths 7
54
| func_path_in_repository
stringlengths 5
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 100
30.3k
| language
stringclasses 1
value | func_code_string
stringlengths 100
30.3k
| func_code_tokens
stringlengths 138
33.2k
| func_documentation_string
stringlengths 1
15k
| func_documentation_tokens
stringlengths 5
5.14k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
|
---|---|---|---|---|---|---|---|---|---|---|---|
2,700 | gwastro/pycbc | pycbc/sensitivity.py | compute_search_volume_in_bins | def compute_search_volume_in_bins(found, total, ndbins, sim_to_bins_function):
"""
Calculate search sensitive volume by integrating efficiency in distance bins
No cosmological corrections are applied: flat space is assumed.
The first dimension of ndbins must be bins over injected distance.
sim_to_bins_function must maps an object to a tuple indexing the ndbins.
"""
eff, err = compute_search_efficiency_in_bins(
found, total, ndbins, sim_to_bins_function)
dx = ndbins[0].upper() - ndbins[0].lower()
r = ndbins[0].centres()
# volume and errors have one fewer dimension than the input NDBins
vol = bin_utils.BinnedArray(bin_utils.NDBins(ndbins[1:]))
errors = bin_utils.BinnedArray(bin_utils.NDBins(ndbins[1:]))
# integrate efficiency to obtain volume
vol.array = numpy.trapz(eff.array.T * 4. * numpy.pi * r**2, r, dx)
# propagate errors in eff to errors in V
errors.array = numpy.sqrt(
((4 * numpy.pi * r**2 * err.array.T * dx)**2).sum(axis=-1)
)
return vol, errors | python | def compute_search_volume_in_bins(found, total, ndbins, sim_to_bins_function):
"""
Calculate search sensitive volume by integrating efficiency in distance bins
No cosmological corrections are applied: flat space is assumed.
The first dimension of ndbins must be bins over injected distance.
sim_to_bins_function must maps an object to a tuple indexing the ndbins.
"""
eff, err = compute_search_efficiency_in_bins(
found, total, ndbins, sim_to_bins_function)
dx = ndbins[0].upper() - ndbins[0].lower()
r = ndbins[0].centres()
# volume and errors have one fewer dimension than the input NDBins
vol = bin_utils.BinnedArray(bin_utils.NDBins(ndbins[1:]))
errors = bin_utils.BinnedArray(bin_utils.NDBins(ndbins[1:]))
# integrate efficiency to obtain volume
vol.array = numpy.trapz(eff.array.T * 4. * numpy.pi * r**2, r, dx)
# propagate errors in eff to errors in V
errors.array = numpy.sqrt(
((4 * numpy.pi * r**2 * err.array.T * dx)**2).sum(axis=-1)
)
return vol, errors | ['def', 'compute_search_volume_in_bins', '(', 'found', ',', 'total', ',', 'ndbins', ',', 'sim_to_bins_function', ')', ':', 'eff', ',', 'err', '=', 'compute_search_efficiency_in_bins', '(', 'found', ',', 'total', ',', 'ndbins', ',', 'sim_to_bins_function', ')', 'dx', '=', 'ndbins', '[', '0', ']', '.', 'upper', '(', ')', '-', 'ndbins', '[', '0', ']', '.', 'lower', '(', ')', 'r', '=', 'ndbins', '[', '0', ']', '.', 'centres', '(', ')', '# volume and errors have one fewer dimension than the input NDBins', 'vol', '=', 'bin_utils', '.', 'BinnedArray', '(', 'bin_utils', '.', 'NDBins', '(', 'ndbins', '[', '1', ':', ']', ')', ')', 'errors', '=', 'bin_utils', '.', 'BinnedArray', '(', 'bin_utils', '.', 'NDBins', '(', 'ndbins', '[', '1', ':', ']', ')', ')', '# integrate efficiency to obtain volume', 'vol', '.', 'array', '=', 'numpy', '.', 'trapz', '(', 'eff', '.', 'array', '.', 'T', '*', '4.', '*', 'numpy', '.', 'pi', '*', 'r', '**', '2', ',', 'r', ',', 'dx', ')', '# propagate errors in eff to errors in V', 'errors', '.', 'array', '=', 'numpy', '.', 'sqrt', '(', '(', '(', '4', '*', 'numpy', '.', 'pi', '*', 'r', '**', '2', '*', 'err', '.', 'array', '.', 'T', '*', 'dx', ')', '**', '2', ')', '.', 'sum', '(', 'axis', '=', '-', '1', ')', ')', 'return', 'vol', ',', 'errors'] | Calculate search sensitive volume by integrating efficiency in distance bins
No cosmological corrections are applied: flat space is assumed.
The first dimension of ndbins must be bins over injected distance.
sim_to_bins_function must maps an object to a tuple indexing the ndbins. | ['Calculate', 'search', 'sensitive', 'volume', 'by', 'integrating', 'efficiency', 'in', 'distance', 'bins'] | train | https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/sensitivity.py#L35-L60 |
2,701 | secnot/rectpack | rectpack/maxrects.py | MaxRects._split | def _split(self, rect):
"""
Split all max_rects intersecting the rectangle rect into up to
4 new max_rects.
Arguments:
rect (Rectangle): Rectangle
Returns:
split (Rectangle list): List of rectangles resulting from the split
"""
max_rects = collections.deque()
for r in self._max_rects:
if r.intersects(rect):
max_rects.extend(self._generate_splits(r, rect))
else:
max_rects.append(r)
# Add newly generated max_rects
self._max_rects = list(max_rects) | python | def _split(self, rect):
"""
Split all max_rects intersecting the rectangle rect into up to
4 new max_rects.
Arguments:
rect (Rectangle): Rectangle
Returns:
split (Rectangle list): List of rectangles resulting from the split
"""
max_rects = collections.deque()
for r in self._max_rects:
if r.intersects(rect):
max_rects.extend(self._generate_splits(r, rect))
else:
max_rects.append(r)
# Add newly generated max_rects
self._max_rects = list(max_rects) | ['def', '_split', '(', 'self', ',', 'rect', ')', ':', 'max_rects', '=', 'collections', '.', 'deque', '(', ')', 'for', 'r', 'in', 'self', '.', '_max_rects', ':', 'if', 'r', '.', 'intersects', '(', 'rect', ')', ':', 'max_rects', '.', 'extend', '(', 'self', '.', '_generate_splits', '(', 'r', ',', 'rect', ')', ')', 'else', ':', 'max_rects', '.', 'append', '(', 'r', ')', '# Add newly generated max_rects', 'self', '.', '_max_rects', '=', 'list', '(', 'max_rects', ')'] | Split all max_rects intersecting the rectangle rect into up to
4 new max_rects.
Arguments:
rect (Rectangle): Rectangle
Returns:
split (Rectangle list): List of rectangles resulting from the split | ['Split', 'all', 'max_rects', 'intersecting', 'the', 'rectangle', 'rect', 'into', 'up', 'to', '4', 'new', 'max_rects', '.', 'Arguments', ':', 'rect', '(', 'Rectangle', ')', ':', 'Rectangle'] | train | https://github.com/secnot/rectpack/blob/21d46be48fd453500ea49de699bc9eabc427bdf7/rectpack/maxrects.py#L96-L116 |
2,702 | juju/charm-helpers | charmhelpers/contrib/openstack/amulet/utils.py | OpenStackAmuletUtils.authenticate_keystone | def authenticate_keystone(self, keystone_ip, username, password,
api_version=False, admin_port=False,
user_domain_name=None, domain_name=None,
project_domain_name=None, project_name=None):
"""Authenticate with Keystone"""
self.log.debug('Authenticating with keystone...')
if not api_version:
api_version = 2
sess, auth = self.get_keystone_session(
keystone_ip=keystone_ip,
username=username,
password=password,
api_version=api_version,
admin_port=admin_port,
user_domain_name=user_domain_name,
domain_name=domain_name,
project_domain_name=project_domain_name,
project_name=project_name
)
if api_version == 2:
client = keystone_client.Client(session=sess)
else:
client = keystone_client_v3.Client(session=sess)
# This populates the client.service_catalog
client.auth_ref = auth.get_access(sess)
return client | python | def authenticate_keystone(self, keystone_ip, username, password,
api_version=False, admin_port=False,
user_domain_name=None, domain_name=None,
project_domain_name=None, project_name=None):
"""Authenticate with Keystone"""
self.log.debug('Authenticating with keystone...')
if not api_version:
api_version = 2
sess, auth = self.get_keystone_session(
keystone_ip=keystone_ip,
username=username,
password=password,
api_version=api_version,
admin_port=admin_port,
user_domain_name=user_domain_name,
domain_name=domain_name,
project_domain_name=project_domain_name,
project_name=project_name
)
if api_version == 2:
client = keystone_client.Client(session=sess)
else:
client = keystone_client_v3.Client(session=sess)
# This populates the client.service_catalog
client.auth_ref = auth.get_access(sess)
return client | ['def', 'authenticate_keystone', '(', 'self', ',', 'keystone_ip', ',', 'username', ',', 'password', ',', 'api_version', '=', 'False', ',', 'admin_port', '=', 'False', ',', 'user_domain_name', '=', 'None', ',', 'domain_name', '=', 'None', ',', 'project_domain_name', '=', 'None', ',', 'project_name', '=', 'None', ')', ':', 'self', '.', 'log', '.', 'debug', '(', "'Authenticating with keystone...'", ')', 'if', 'not', 'api_version', ':', 'api_version', '=', '2', 'sess', ',', 'auth', '=', 'self', '.', 'get_keystone_session', '(', 'keystone_ip', '=', 'keystone_ip', ',', 'username', '=', 'username', ',', 'password', '=', 'password', ',', 'api_version', '=', 'api_version', ',', 'admin_port', '=', 'admin_port', ',', 'user_domain_name', '=', 'user_domain_name', ',', 'domain_name', '=', 'domain_name', ',', 'project_domain_name', '=', 'project_domain_name', ',', 'project_name', '=', 'project_name', ')', 'if', 'api_version', '==', '2', ':', 'client', '=', 'keystone_client', '.', 'Client', '(', 'session', '=', 'sess', ')', 'else', ':', 'client', '=', 'keystone_client_v3', '.', 'Client', '(', 'session', '=', 'sess', ')', '# This populates the client.service_catalog', 'client', '.', 'auth_ref', '=', 'auth', '.', 'get_access', '(', 'sess', ')', 'return', 'client'] | Authenticate with Keystone | ['Authenticate', 'with', 'Keystone'] | train | https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/openstack/amulet/utils.py#L478-L503 |
2,703 | robinandeer/puzzle | puzzle/plugins/vcf/mixins/variant_extras/annotations.py | AnnotationExtras._add_genetic_models | def _add_genetic_models(self, variant_obj, info_dict):
"""Add the genetic models found
Args:
variant_obj (puzzle.models.Variant)
info_dict (dict): A info dictionary
"""
genetic_models_entry = info_dict.get('GeneticModels')
if genetic_models_entry:
genetic_models = []
for family_annotation in genetic_models_entry.split(','):
for genetic_model in family_annotation.split(':')[-1].split('|'):
genetic_models.append(genetic_model)
logger.debug("Updating genetic models to: {0}".format(
', '.join(genetic_models)))
variant_obj.genetic_models = genetic_models | python | def _add_genetic_models(self, variant_obj, info_dict):
"""Add the genetic models found
Args:
variant_obj (puzzle.models.Variant)
info_dict (dict): A info dictionary
"""
genetic_models_entry = info_dict.get('GeneticModels')
if genetic_models_entry:
genetic_models = []
for family_annotation in genetic_models_entry.split(','):
for genetic_model in family_annotation.split(':')[-1].split('|'):
genetic_models.append(genetic_model)
logger.debug("Updating genetic models to: {0}".format(
', '.join(genetic_models)))
variant_obj.genetic_models = genetic_models | ['def', '_add_genetic_models', '(', 'self', ',', 'variant_obj', ',', 'info_dict', ')', ':', 'genetic_models_entry', '=', 'info_dict', '.', 'get', '(', "'GeneticModels'", ')', 'if', 'genetic_models_entry', ':', 'genetic_models', '=', '[', ']', 'for', 'family_annotation', 'in', 'genetic_models_entry', '.', 'split', '(', "','", ')', ':', 'for', 'genetic_model', 'in', 'family_annotation', '.', 'split', '(', "':'", ')', '[', '-', '1', ']', '.', 'split', '(', "'|'", ')', ':', 'genetic_models', '.', 'append', '(', 'genetic_model', ')', 'logger', '.', 'debug', '(', '"Updating genetic models to: {0}"', '.', 'format', '(', "', '", '.', 'join', '(', 'genetic_models', ')', ')', ')', 'variant_obj', '.', 'genetic_models', '=', 'genetic_models'] | Add the genetic models found
Args:
variant_obj (puzzle.models.Variant)
info_dict (dict): A info dictionary | ['Add', 'the', 'genetic', 'models', 'found', 'Args', ':', 'variant_obj', '(', 'puzzle', '.', 'models', '.', 'Variant', ')', 'info_dict', '(', 'dict', ')', ':', 'A', 'info', 'dictionary'] | train | https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/plugins/vcf/mixins/variant_extras/annotations.py#L56-L73 |
2,704 | ClimateImpactLab/DataFS | datafs/core/data_archive.py | DataArchive.isfile | def isfile(self, version=None, *args, **kwargs):
'''
Check whether the path exists and is a file
'''
version = _process_version(self, version)
path = self.get_version_path(version)
self.authority.fs.isfile(path, *args, **kwargs) | python | def isfile(self, version=None, *args, **kwargs):
'''
Check whether the path exists and is a file
'''
version = _process_version(self, version)
path = self.get_version_path(version)
self.authority.fs.isfile(path, *args, **kwargs) | ['def', 'isfile', '(', 'self', ',', 'version', '=', 'None', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'version', '=', '_process_version', '(', 'self', ',', 'version', ')', 'path', '=', 'self', '.', 'get_version_path', '(', 'version', ')', 'self', '.', 'authority', '.', 'fs', '.', 'isfile', '(', 'path', ',', '*', 'args', ',', '*', '*', 'kwargs', ')'] | Check whether the path exists and is a file | ['Check', 'whether', 'the', 'path', 'exists', 'and', 'is', 'a', 'file'] | train | https://github.com/ClimateImpactLab/DataFS/blob/0d32c2b4e18d300a11b748a552f6adbc3dd8f59d/datafs/core/data_archive.py#L678-L685 |
2,705 | ocslegna/auto_py_torrent | auto_py_torrent/auto_py_torrent.py | AutoPy.get_magnet | def get_magnet(self, url):
"""Get magnet from torrent page. Url already got domain."""
content_most_rated = requests.get(url)
rated_soup = BeautifulSoup(content_most_rated.content, 'lxml')
if self.page == 'torrent_project':
self.magnet = rated_soup.find(
'a', href=True, text=re.compile('Download'))['href']
elif self.page == 'the_pirate_bay':
self.magnet = rated_soup.find(
'a', href=True, text=re.compile('Get this torrent'))['href']
elif self.page == '1337x':
div1337 = rated_soup.find(
'div', {'class': 'torrent-category-detail'})
self.magnet = div1337.find('a', href=re.compile('magnet'))['href']
elif self.page == 'isohunt':
self.magnet = rated_soup.find(
'a', href=re.compile('magnet'))['href']
else:
print('Wrong page to get magnet!')
sys.exit(1) | python | def get_magnet(self, url):
"""Get magnet from torrent page. Url already got domain."""
content_most_rated = requests.get(url)
rated_soup = BeautifulSoup(content_most_rated.content, 'lxml')
if self.page == 'torrent_project':
self.magnet = rated_soup.find(
'a', href=True, text=re.compile('Download'))['href']
elif self.page == 'the_pirate_bay':
self.magnet = rated_soup.find(
'a', href=True, text=re.compile('Get this torrent'))['href']
elif self.page == '1337x':
div1337 = rated_soup.find(
'div', {'class': 'torrent-category-detail'})
self.magnet = div1337.find('a', href=re.compile('magnet'))['href']
elif self.page == 'isohunt':
self.magnet = rated_soup.find(
'a', href=re.compile('magnet'))['href']
else:
print('Wrong page to get magnet!')
sys.exit(1) | ['def', 'get_magnet', '(', 'self', ',', 'url', ')', ':', 'content_most_rated', '=', 'requests', '.', 'get', '(', 'url', ')', 'rated_soup', '=', 'BeautifulSoup', '(', 'content_most_rated', '.', 'content', ',', "'lxml'", ')', 'if', 'self', '.', 'page', '==', "'torrent_project'", ':', 'self', '.', 'magnet', '=', 'rated_soup', '.', 'find', '(', "'a'", ',', 'href', '=', 'True', ',', 'text', '=', 're', '.', 'compile', '(', "'Download'", ')', ')', '[', "'href'", ']', 'elif', 'self', '.', 'page', '==', "'the_pirate_bay'", ':', 'self', '.', 'magnet', '=', 'rated_soup', '.', 'find', '(', "'a'", ',', 'href', '=', 'True', ',', 'text', '=', 're', '.', 'compile', '(', "'Get this torrent'", ')', ')', '[', "'href'", ']', 'elif', 'self', '.', 'page', '==', "'1337x'", ':', 'div1337', '=', 'rated_soup', '.', 'find', '(', "'div'", ',', '{', "'class'", ':', "'torrent-category-detail'", '}', ')', 'self', '.', 'magnet', '=', 'div1337', '.', 'find', '(', "'a'", ',', 'href', '=', 're', '.', 'compile', '(', "'magnet'", ')', ')', '[', "'href'", ']', 'elif', 'self', '.', 'page', '==', "'isohunt'", ':', 'self', '.', 'magnet', '=', 'rated_soup', '.', 'find', '(', "'a'", ',', 'href', '=', 're', '.', 'compile', '(', "'magnet'", ')', ')', '[', "'href'", ']', 'else', ':', 'print', '(', "'Wrong page to get magnet!'", ')', 'sys', '.', 'exit', '(', '1', ')'] | Get magnet from torrent page. Url already got domain. | ['Get', 'magnet', 'from', 'torrent', 'page', '.', 'Url', 'already', 'got', 'domain', '.'] | train | https://github.com/ocslegna/auto_py_torrent/blob/32761fe18b3112e6e3754da863488b50929fcc41/auto_py_torrent/auto_py_torrent.py#L227-L251 |
2,706 | deepmind/sonnet | sonnet/python/modules/basic_rnn.py | BidirectionalRNN._build | def _build(self, input_sequence, state):
"""Connects the BidirectionalRNN module into the graph.
Args:
input_sequence: tensor (time, batch, [feature_1, ..]). It must be
time_major.
state: tuple of states for the forward and backward cores.
Returns:
A dict with forward/backard states and output sequences:
"outputs":{
"forward": ...,
"backward": ...},
"state": {
"forward": ...,
"backward": ...}
Raises:
ValueError: in case time dimension is not statically known.
"""
input_shape = input_sequence.get_shape()
if input_shape[0] is None:
raise ValueError("Time dimension of input (dim 0) must be statically"
"known.")
seq_length = int(input_shape[0])
forward_state, backward_state = state
# Lists for the forward backward output and state.
output_sequence_f = []
output_sequence_b = []
# Forward pass over the sequence.
with tf.name_scope("forward_rnn"):
core_state = forward_state
for i in six.moves.range(seq_length):
core_output, core_state = self._forward_core(
input_sequence[i, :,], core_state)
output_sequence_f.append((core_output, core_state))
output_sequence_f = nest.map_structure(
lambda *vals: tf.stack(vals), *output_sequence_f)
# Backward pass over the sequence.
with tf.name_scope("backward_rnn"):
core_state = backward_state
for i in six.moves.range(seq_length - 1, -1, -1):
core_output, core_state = self._backward_core(
input_sequence[i, :,], core_state)
output_sequence_b.append((core_output, core_state))
output_sequence_b = nest.map_structure(
lambda *vals: tf.stack(vals), *output_sequence_b)
# Compose the full output and state sequeneces.
return {
"outputs": {
"forward": output_sequence_f[0],
"backward": output_sequence_b[0]
},
"state": {
"forward": output_sequence_f[1],
"backward": output_sequence_b[1]
}
} | python | def _build(self, input_sequence, state):
"""Connects the BidirectionalRNN module into the graph.
Args:
input_sequence: tensor (time, batch, [feature_1, ..]). It must be
time_major.
state: tuple of states for the forward and backward cores.
Returns:
A dict with forward/backard states and output sequences:
"outputs":{
"forward": ...,
"backward": ...},
"state": {
"forward": ...,
"backward": ...}
Raises:
ValueError: in case time dimension is not statically known.
"""
input_shape = input_sequence.get_shape()
if input_shape[0] is None:
raise ValueError("Time dimension of input (dim 0) must be statically"
"known.")
seq_length = int(input_shape[0])
forward_state, backward_state = state
# Lists for the forward backward output and state.
output_sequence_f = []
output_sequence_b = []
# Forward pass over the sequence.
with tf.name_scope("forward_rnn"):
core_state = forward_state
for i in six.moves.range(seq_length):
core_output, core_state = self._forward_core(
input_sequence[i, :,], core_state)
output_sequence_f.append((core_output, core_state))
output_sequence_f = nest.map_structure(
lambda *vals: tf.stack(vals), *output_sequence_f)
# Backward pass over the sequence.
with tf.name_scope("backward_rnn"):
core_state = backward_state
for i in six.moves.range(seq_length - 1, -1, -1):
core_output, core_state = self._backward_core(
input_sequence[i, :,], core_state)
output_sequence_b.append((core_output, core_state))
output_sequence_b = nest.map_structure(
lambda *vals: tf.stack(vals), *output_sequence_b)
# Compose the full output and state sequeneces.
return {
"outputs": {
"forward": output_sequence_f[0],
"backward": output_sequence_b[0]
},
"state": {
"forward": output_sequence_f[1],
"backward": output_sequence_b[1]
}
} | ['def', '_build', '(', 'self', ',', 'input_sequence', ',', 'state', ')', ':', 'input_shape', '=', 'input_sequence', '.', 'get_shape', '(', ')', 'if', 'input_shape', '[', '0', ']', 'is', 'None', ':', 'raise', 'ValueError', '(', '"Time dimension of input (dim 0) must be statically"', '"known."', ')', 'seq_length', '=', 'int', '(', 'input_shape', '[', '0', ']', ')', 'forward_state', ',', 'backward_state', '=', 'state', '# Lists for the forward backward output and state.', 'output_sequence_f', '=', '[', ']', 'output_sequence_b', '=', '[', ']', '# Forward pass over the sequence.', 'with', 'tf', '.', 'name_scope', '(', '"forward_rnn"', ')', ':', 'core_state', '=', 'forward_state', 'for', 'i', 'in', 'six', '.', 'moves', '.', 'range', '(', 'seq_length', ')', ':', 'core_output', ',', 'core_state', '=', 'self', '.', '_forward_core', '(', 'input_sequence', '[', 'i', ',', ':', ',', ']', ',', 'core_state', ')', 'output_sequence_f', '.', 'append', '(', '(', 'core_output', ',', 'core_state', ')', ')', 'output_sequence_f', '=', 'nest', '.', 'map_structure', '(', 'lambda', '*', 'vals', ':', 'tf', '.', 'stack', '(', 'vals', ')', ',', '*', 'output_sequence_f', ')', '# Backward pass over the sequence.', 'with', 'tf', '.', 'name_scope', '(', '"backward_rnn"', ')', ':', 'core_state', '=', 'backward_state', 'for', 'i', 'in', 'six', '.', 'moves', '.', 'range', '(', 'seq_length', '-', '1', ',', '-', '1', ',', '-', '1', ')', ':', 'core_output', ',', 'core_state', '=', 'self', '.', '_backward_core', '(', 'input_sequence', '[', 'i', ',', ':', ',', ']', ',', 'core_state', ')', 'output_sequence_b', '.', 'append', '(', '(', 'core_output', ',', 'core_state', ')', ')', 'output_sequence_b', '=', 'nest', '.', 'map_structure', '(', 'lambda', '*', 'vals', ':', 'tf', '.', 'stack', '(', 'vals', ')', ',', '*', 'output_sequence_b', ')', '# Compose the full output and state sequeneces.', 'return', '{', '"outputs"', ':', '{', '"forward"', ':', 'output_sequence_f', '[', '0', ']', ',', '"backward"', ':', 'output_sequence_b', '[', '0', ']', '}', ',', '"state"', ':', '{', '"forward"', ':', 'output_sequence_f', '[', '1', ']', ',', '"backward"', ':', 'output_sequence_b', '[', '1', ']', '}', '}'] | Connects the BidirectionalRNN module into the graph.
Args:
input_sequence: tensor (time, batch, [feature_1, ..]). It must be
time_major.
state: tuple of states for the forward and backward cores.
Returns:
A dict with forward/backard states and output sequences:
"outputs":{
"forward": ...,
"backward": ...},
"state": {
"forward": ...,
"backward": ...}
Raises:
ValueError: in case time dimension is not statically known. | ['Connects', 'the', 'BidirectionalRNN', 'module', 'into', 'the', 'graph', '.'] | train | https://github.com/deepmind/sonnet/blob/00612ca3178964d86b556e062694d808ff81fcca/sonnet/python/modules/basic_rnn.py#L583-L649 |
2,707 | RRZE-HPC/pycachesim | cachesim/cache.py | MainMemory.store_from | def store_from(self, last_level_store):
"""Set level where to store to."""
assert isinstance(last_level_store, Cache), \
"last_level needs to be a Cache object."
assert last_level_store.store_to is None, \
"last_level_store must be a last level cache (.store_to is None)."
self.last_level_store = last_level_store | python | def store_from(self, last_level_store):
"""Set level where to store to."""
assert isinstance(last_level_store, Cache), \
"last_level needs to be a Cache object."
assert last_level_store.store_to is None, \
"last_level_store must be a last level cache (.store_to is None)."
self.last_level_store = last_level_store | ['def', 'store_from', '(', 'self', ',', 'last_level_store', ')', ':', 'assert', 'isinstance', '(', 'last_level_store', ',', 'Cache', ')', ',', '"last_level needs to be a Cache object."', 'assert', 'last_level_store', '.', 'store_to', 'is', 'None', ',', '"last_level_store must be a last level cache (.store_to is None)."', 'self', '.', 'last_level_store', '=', 'last_level_store'] | Set level where to store to. | ['Set', 'level', 'where', 'to', 'store', 'to', '.'] | train | https://github.com/RRZE-HPC/pycachesim/blob/6dd084d29cf91ec19b016e0db9ccdfc8d1f63c5b/cachesim/cache.py#L449-L455 |
2,708 | belbio/bel | bel/edge/computed.py | compute_edges | def compute_edges(ast: BELAst, spec: BELSpec) -> Edges:
"""Compute edges"""
edges = []
if ast.bel_object.__class__.__name__ == "BELAst":
edges.append(ast.bel_object)
process_ast(edges, ast, spec)
return edges | python | def compute_edges(ast: BELAst, spec: BELSpec) -> Edges:
"""Compute edges"""
edges = []
if ast.bel_object.__class__.__name__ == "BELAst":
edges.append(ast.bel_object)
process_ast(edges, ast, spec)
return edges | ['def', 'compute_edges', '(', 'ast', ':', 'BELAst', ',', 'spec', ':', 'BELSpec', ')', '->', 'Edges', ':', 'edges', '=', '[', ']', 'if', 'ast', '.', 'bel_object', '.', '__class__', '.', '__name__', '==', '"BELAst"', ':', 'edges', '.', 'append', '(', 'ast', '.', 'bel_object', ')', 'process_ast', '(', 'edges', ',', 'ast', ',', 'spec', ')', 'return', 'edges'] | Compute edges | ['Compute', 'edges'] | train | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/edge/computed.py#L23-L31 |
2,709 | snbuback/django_services | django_services/api/api.py | getattr_in_cls_list | def getattr_in_cls_list(cls_list, attr, default):
""" Search for an attribute (attr) in class list (cls_list). Returns
attribute value if exists or None if not. """
for cls in cls_list:
if hasattr(cls, attr):
return getattr(cls, attr)
return default | python | def getattr_in_cls_list(cls_list, attr, default):
""" Search for an attribute (attr) in class list (cls_list). Returns
attribute value if exists or None if not. """
for cls in cls_list:
if hasattr(cls, attr):
return getattr(cls, attr)
return default | ['def', 'getattr_in_cls_list', '(', 'cls_list', ',', 'attr', ',', 'default', ')', ':', 'for', 'cls', 'in', 'cls_list', ':', 'if', 'hasattr', '(', 'cls', ',', 'attr', ')', ':', 'return', 'getattr', '(', 'cls', ',', 'attr', ')', 'return', 'default'] | Search for an attribute (attr) in class list (cls_list). Returns
attribute value if exists or None if not. | ['Search', 'for', 'an', 'attribute', '(', 'attr', ')', 'in', 'class', 'list', '(', 'cls_list', ')', '.', 'Returns', 'attribute', 'value', 'if', 'exists', 'or', 'None', 'if', 'not', '.'] | train | https://github.com/snbuback/django_services/blob/58cbdea878bb11197add0ed1008a9206e4d92671/django_services/api/api.py#L39-L45 |
2,710 | hughsie/python-appstream | appstream/store.py | Store.get_components | def get_components(self):
""" Returns all the applications from the store """
components = []
for app_id in self.components:
components.append(self.components[app_id])
return components | python | def get_components(self):
""" Returns all the applications from the store """
components = []
for app_id in self.components:
components.append(self.components[app_id])
return components | ['def', 'get_components', '(', 'self', ')', ':', 'components', '=', '[', ']', 'for', 'app_id', 'in', 'self', '.', 'components', ':', 'components', '.', 'append', '(', 'self', '.', 'components', '[', 'app_id', ']', ')', 'return', 'components'] | Returns all the applications from the store | ['Returns', 'all', 'the', 'applications', 'from', 'the', 'store'] | train | https://github.com/hughsie/python-appstream/blob/f2606380278c5728ee7f8e7d19914c54fca05e76/appstream/store.py#L74-L79 |
2,711 | Microsoft/azure-devops-python-api | azure-devops/azure/devops/v5_1/git/git_client_base.py | GitClientBase.update_repository | def update_repository(self, new_repository_info, repository_id, project=None):
"""UpdateRepository.
[Preview API] Updates the Git repository with either a new repo name or a new default branch.
:param :class:`<GitRepository> <azure.devops.v5_1.git.models.GitRepository>` new_repository_info: Specify a new repo name or a new default branch of the repository
:param str repository_id: The name or ID of the repository.
:param str project: Project ID or project name
:rtype: :class:`<GitRepository> <azure.devops.v5_1.git.models.GitRepository>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if repository_id is not None:
route_values['repositoryId'] = self._serialize.url('repository_id', repository_id, 'str')
content = self._serialize.body(new_repository_info, 'GitRepository')
response = self._send(http_method='PATCH',
location_id='225f7195-f9c7-4d14-ab28-a83f7ff77e1f',
version='5.1-preview.1',
route_values=route_values,
content=content)
return self._deserialize('GitRepository', response) | python | def update_repository(self, new_repository_info, repository_id, project=None):
"""UpdateRepository.
[Preview API] Updates the Git repository with either a new repo name or a new default branch.
:param :class:`<GitRepository> <azure.devops.v5_1.git.models.GitRepository>` new_repository_info: Specify a new repo name or a new default branch of the repository
:param str repository_id: The name or ID of the repository.
:param str project: Project ID or project name
:rtype: :class:`<GitRepository> <azure.devops.v5_1.git.models.GitRepository>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if repository_id is not None:
route_values['repositoryId'] = self._serialize.url('repository_id', repository_id, 'str')
content = self._serialize.body(new_repository_info, 'GitRepository')
response = self._send(http_method='PATCH',
location_id='225f7195-f9c7-4d14-ab28-a83f7ff77e1f',
version='5.1-preview.1',
route_values=route_values,
content=content)
return self._deserialize('GitRepository', response) | ['def', 'update_repository', '(', 'self', ',', 'new_repository_info', ',', 'repository_id', ',', 'project', '=', 'None', ')', ':', 'route_values', '=', '{', '}', 'if', 'project', 'is', 'not', 'None', ':', 'route_values', '[', "'project'", ']', '=', 'self', '.', '_serialize', '.', 'url', '(', "'project'", ',', 'project', ',', "'str'", ')', 'if', 'repository_id', 'is', 'not', 'None', ':', 'route_values', '[', "'repositoryId'", ']', '=', 'self', '.', '_serialize', '.', 'url', '(', "'repository_id'", ',', 'repository_id', ',', "'str'", ')', 'content', '=', 'self', '.', '_serialize', '.', 'body', '(', 'new_repository_info', ',', "'GitRepository'", ')', 'response', '=', 'self', '.', '_send', '(', 'http_method', '=', "'PATCH'", ',', 'location_id', '=', "'225f7195-f9c7-4d14-ab28-a83f7ff77e1f'", ',', 'version', '=', "'5.1-preview.1'", ',', 'route_values', '=', 'route_values', ',', 'content', '=', 'content', ')', 'return', 'self', '.', '_deserialize', '(', "'GitRepository'", ',', 'response', ')'] | UpdateRepository.
[Preview API] Updates the Git repository with either a new repo name or a new default branch.
:param :class:`<GitRepository> <azure.devops.v5_1.git.models.GitRepository>` new_repository_info: Specify a new repo name or a new default branch of the repository
:param str repository_id: The name or ID of the repository.
:param str project: Project ID or project name
:rtype: :class:`<GitRepository> <azure.devops.v5_1.git.models.GitRepository>` | ['UpdateRepository', '.', '[', 'Preview', 'API', ']', 'Updates', 'the', 'Git', 'repository', 'with', 'either', 'a', 'new', 'repo', 'name', 'or', 'a', 'new', 'default', 'branch', '.', ':', 'param', ':', 'class', ':', '<GitRepository', '>', '<azure', '.', 'devops', '.', 'v5_1', '.', 'git', '.', 'models', '.', 'GitRepository', '>', 'new_repository_info', ':', 'Specify', 'a', 'new', 'repo', 'name', 'or', 'a', 'new', 'default', 'branch', 'of', 'the', 'repository', ':', 'param', 'str', 'repository_id', ':', 'The', 'name', 'or', 'ID', 'of', 'the', 'repository', '.', ':', 'param', 'str', 'project', ':', 'Project', 'ID', 'or', 'project', 'name', ':', 'rtype', ':', ':', 'class', ':', '<GitRepository', '>', '<azure', '.', 'devops', '.', 'v5_1', '.', 'git', '.', 'models', '.', 'GitRepository', '>'] | train | https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_1/git/git_client_base.py#L2999-L3018 |
2,712 | csparpa/pyowm | pyowm/weatherapi25/owm25.py | OWM25.weather_at_places | def weather_at_places(self, pattern, searchtype, limit=None):
"""
Queries the OWM Weather API for the currently observed weather in all the
locations whose name is matching the specified text search parameters.
A twofold search can be issued: *'accurate'* (exact matching) and
*'like'* (matches names that are similar to the supplied pattern).
:param pattern: the string pattern (not a regex) to be searched for the
toponym
:type pattern: str
:param searchtype: the search mode to be used, must be *'accurate'* for
an exact matching or *'like'* for a likelihood matching
:type: searchtype: str
:param limit: the maximum number of *Observation* items in the returned
list (default is ``None``, which stands for any number of items)
:param limit: int or ``None``
:returns: a list of *Observation* objects or ``None`` if no weather
data is available
:raises: *ParseResponseException* when OWM Weather API responses' data
cannot be parsed, *APICallException* when OWM Weather API can not be
reached, *ValueError* when bad value is supplied for the search
type or the maximum number of items retrieved
"""
assert isinstance(pattern, str), "'pattern' must be a str"
assert isinstance(searchtype, str), "'searchtype' must be a str"
if searchtype != "accurate" and searchtype != "like":
raise ValueError("'searchtype' value must be 'accurate' or 'like'")
if limit is not None:
assert isinstance(limit, int), "'limit' must be an int or None"
if limit < 1:
raise ValueError("'limit' must be None or greater than zero")
params = {'q': pattern, 'type': searchtype, 'lang': self._language}
if limit is not None:
# fix for OWM 2.5 API bug!
params['cnt'] = limit - 1
uri = http_client.HttpClient.to_url(FIND_OBSERVATIONS_URL,
self._API_key,
self._subscription_type,
self._use_ssl)
_, json_data = self._wapi.cacheable_get_json(uri, params=params)
return self._parsers['observation_list'].parse_JSON(json_data) | python | def weather_at_places(self, pattern, searchtype, limit=None):
"""
Queries the OWM Weather API for the currently observed weather in all the
locations whose name is matching the specified text search parameters.
A twofold search can be issued: *'accurate'* (exact matching) and
*'like'* (matches names that are similar to the supplied pattern).
:param pattern: the string pattern (not a regex) to be searched for the
toponym
:type pattern: str
:param searchtype: the search mode to be used, must be *'accurate'* for
an exact matching or *'like'* for a likelihood matching
:type: searchtype: str
:param limit: the maximum number of *Observation* items in the returned
list (default is ``None``, which stands for any number of items)
:param limit: int or ``None``
:returns: a list of *Observation* objects or ``None`` if no weather
data is available
:raises: *ParseResponseException* when OWM Weather API responses' data
cannot be parsed, *APICallException* when OWM Weather API can not be
reached, *ValueError* when bad value is supplied for the search
type or the maximum number of items retrieved
"""
assert isinstance(pattern, str), "'pattern' must be a str"
assert isinstance(searchtype, str), "'searchtype' must be a str"
if searchtype != "accurate" and searchtype != "like":
raise ValueError("'searchtype' value must be 'accurate' or 'like'")
if limit is not None:
assert isinstance(limit, int), "'limit' must be an int or None"
if limit < 1:
raise ValueError("'limit' must be None or greater than zero")
params = {'q': pattern, 'type': searchtype, 'lang': self._language}
if limit is not None:
# fix for OWM 2.5 API bug!
params['cnt'] = limit - 1
uri = http_client.HttpClient.to_url(FIND_OBSERVATIONS_URL,
self._API_key,
self._subscription_type,
self._use_ssl)
_, json_data = self._wapi.cacheable_get_json(uri, params=params)
return self._parsers['observation_list'].parse_JSON(json_data) | ['def', 'weather_at_places', '(', 'self', ',', 'pattern', ',', 'searchtype', ',', 'limit', '=', 'None', ')', ':', 'assert', 'isinstance', '(', 'pattern', ',', 'str', ')', ',', '"\'pattern\' must be a str"', 'assert', 'isinstance', '(', 'searchtype', ',', 'str', ')', ',', '"\'searchtype\' must be a str"', 'if', 'searchtype', '!=', '"accurate"', 'and', 'searchtype', '!=', '"like"', ':', 'raise', 'ValueError', '(', '"\'searchtype\' value must be \'accurate\' or \'like\'"', ')', 'if', 'limit', 'is', 'not', 'None', ':', 'assert', 'isinstance', '(', 'limit', ',', 'int', ')', ',', '"\'limit\' must be an int or None"', 'if', 'limit', '<', '1', ':', 'raise', 'ValueError', '(', '"\'limit\' must be None or greater than zero"', ')', 'params', '=', '{', "'q'", ':', 'pattern', ',', "'type'", ':', 'searchtype', ',', "'lang'", ':', 'self', '.', '_language', '}', 'if', 'limit', 'is', 'not', 'None', ':', '# fix for OWM 2.5 API bug!', 'params', '[', "'cnt'", ']', '=', 'limit', '-', '1', 'uri', '=', 'http_client', '.', 'HttpClient', '.', 'to_url', '(', 'FIND_OBSERVATIONS_URL', ',', 'self', '.', '_API_key', ',', 'self', '.', '_subscription_type', ',', 'self', '.', '_use_ssl', ')', '_', ',', 'json_data', '=', 'self', '.', '_wapi', '.', 'cacheable_get_json', '(', 'uri', ',', 'params', '=', 'params', ')', 'return', 'self', '.', '_parsers', '[', "'observation_list'", ']', '.', 'parse_JSON', '(', 'json_data', ')'] | Queries the OWM Weather API for the currently observed weather in all the
locations whose name is matching the specified text search parameters.
A twofold search can be issued: *'accurate'* (exact matching) and
*'like'* (matches names that are similar to the supplied pattern).
:param pattern: the string pattern (not a regex) to be searched for the
toponym
:type pattern: str
:param searchtype: the search mode to be used, must be *'accurate'* for
an exact matching or *'like'* for a likelihood matching
:type: searchtype: str
:param limit: the maximum number of *Observation* items in the returned
list (default is ``None``, which stands for any number of items)
:param limit: int or ``None``
:returns: a list of *Observation* objects or ``None`` if no weather
data is available
:raises: *ParseResponseException* when OWM Weather API responses' data
cannot be parsed, *APICallException* when OWM Weather API can not be
reached, *ValueError* when bad value is supplied for the search
type or the maximum number of items retrieved | ['Queries', 'the', 'OWM', 'Weather', 'API', 'for', 'the', 'currently', 'observed', 'weather', 'in', 'all', 'the', 'locations', 'whose', 'name', 'is', 'matching', 'the', 'specified', 'text', 'search', 'parameters', '.', 'A', 'twofold', 'search', 'can', 'be', 'issued', ':', '*', 'accurate', '*', '(', 'exact', 'matching', ')', 'and', '*', 'like', '*', '(', 'matches', 'names', 'that', 'are', 'similar', 'to', 'the', 'supplied', 'pattern', ')', '.'] | train | https://github.com/csparpa/pyowm/blob/cdd59eb72f32f7238624ceef9b2e2329a5ebd472/pyowm/weatherapi25/owm25.py#L338-L378 |
2,713 | Alignak-monitoring/alignak | alignak/daemons/schedulerdaemon.py | Alignak.clean_previous_run | def clean_previous_run(self):
"""Clean variables from previous configuration
:return: None
"""
# Execute the base class treatment...
super(Alignak, self).clean_previous_run()
# Clean all lists
self.pollers.clear()
self.reactionners.clear()
self.brokers.clear() | python | def clean_previous_run(self):
"""Clean variables from previous configuration
:return: None
"""
# Execute the base class treatment...
super(Alignak, self).clean_previous_run()
# Clean all lists
self.pollers.clear()
self.reactionners.clear()
self.brokers.clear() | ['def', 'clean_previous_run', '(', 'self', ')', ':', '# Execute the base class treatment...', 'super', '(', 'Alignak', ',', 'self', ')', '.', 'clean_previous_run', '(', ')', '# Clean all lists', 'self', '.', 'pollers', '.', 'clear', '(', ')', 'self', '.', 'reactionners', '.', 'clear', '(', ')', 'self', '.', 'brokers', '.', 'clear', '(', ')'] | Clean variables from previous configuration
:return: None | ['Clean', 'variables', 'from', 'previous', 'configuration'] | train | https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/daemons/schedulerdaemon.py#L503-L514 |
2,714 | Opentrons/opentrons | api/src/opentrons/hardware_control/__init__.py | API.set_lights | def set_lights(self, button: bool = None, rails: bool = None):
""" Control the robot lights.
:param button: If specified, turn the button light on (`True`) or
off (`False`). If not specified, do not change the
button light.
:param rails: If specified, turn the rail lights on (`True`) or
off (`False`). If not specified, do not change the
rail lights.
"""
self._backend.set_lights(button, rails) | python | def set_lights(self, button: bool = None, rails: bool = None):
""" Control the robot lights.
:param button: If specified, turn the button light on (`True`) or
off (`False`). If not specified, do not change the
button light.
:param rails: If specified, turn the rail lights on (`True`) or
off (`False`). If not specified, do not change the
rail lights.
"""
self._backend.set_lights(button, rails) | ['def', 'set_lights', '(', 'self', ',', 'button', ':', 'bool', '=', 'None', ',', 'rails', ':', 'bool', '=', 'None', ')', ':', 'self', '.', '_backend', '.', 'set_lights', '(', 'button', ',', 'rails', ')'] | Control the robot lights.
:param button: If specified, turn the button light on (`True`) or
off (`False`). If not specified, do not change the
button light.
:param rails: If specified, turn the rail lights on (`True`) or
off (`False`). If not specified, do not change the
rail lights. | ['Control', 'the', 'robot', 'lights', '.'] | train | https://github.com/Opentrons/opentrons/blob/a7c15cc2636ecb64ab56c7edc1d8a57163aaeadf/api/src/opentrons/hardware_control/__init__.py#L205-L215 |
2,715 | softlayer/softlayer-python | SoftLayer/managers/network.py | NetworkManager.get_subnet | def get_subnet(self, subnet_id, **kwargs):
"""Returns information about a single subnet.
:param string id: Either the ID for the subnet or its network
identifier
:returns: A dictionary of information about the subnet
"""
if 'mask' not in kwargs:
kwargs['mask'] = DEFAULT_SUBNET_MASK
return self.subnet.getObject(id=subnet_id, **kwargs) | python | def get_subnet(self, subnet_id, **kwargs):
"""Returns information about a single subnet.
:param string id: Either the ID for the subnet or its network
identifier
:returns: A dictionary of information about the subnet
"""
if 'mask' not in kwargs:
kwargs['mask'] = DEFAULT_SUBNET_MASK
return self.subnet.getObject(id=subnet_id, **kwargs) | ['def', 'get_subnet', '(', 'self', ',', 'subnet_id', ',', '*', '*', 'kwargs', ')', ':', 'if', "'mask'", 'not', 'in', 'kwargs', ':', 'kwargs', '[', "'mask'", ']', '=', 'DEFAULT_SUBNET_MASK', 'return', 'self', '.', 'subnet', '.', 'getObject', '(', 'id', '=', 'subnet_id', ',', '*', '*', 'kwargs', ')'] | Returns information about a single subnet.
:param string id: Either the ID for the subnet or its network
identifier
:returns: A dictionary of information about the subnet | ['Returns', 'information', 'about', 'a', 'single', 'subnet', '.'] | train | https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/managers/network.py#L391-L401 |
2,716 | MolSSI-BSE/basis_set_exchange | basis_set_exchange/sort.py | sort_single_reference | def sort_single_reference(ref_entry):
"""Sorts a dictionary containing data for a single reference into a standard order
"""
# yapf: disable
_keyorder = [
# Schema stuff
# This function gets called on the schema 'entry', too
'schema_type', 'schema_version',
# Type of the entry
'type',
# Actual publication info
'authors', 'title', 'booktitle', 'series', 'editors', 'journal',
'institution', 'volume', 'number', 'page', 'year', 'note', 'publisher',
'address', 'isbn', 'doi'
]
# yapf: enable
sorted_entry = sorted(ref_entry.items(), key=lambda x: _keyorder.index(x[0]))
if _use_odict:
return OrderedDict(sorted_entry)
else:
return dict(sorted_entry) | python | def sort_single_reference(ref_entry):
"""Sorts a dictionary containing data for a single reference into a standard order
"""
# yapf: disable
_keyorder = [
# Schema stuff
# This function gets called on the schema 'entry', too
'schema_type', 'schema_version',
# Type of the entry
'type',
# Actual publication info
'authors', 'title', 'booktitle', 'series', 'editors', 'journal',
'institution', 'volume', 'number', 'page', 'year', 'note', 'publisher',
'address', 'isbn', 'doi'
]
# yapf: enable
sorted_entry = sorted(ref_entry.items(), key=lambda x: _keyorder.index(x[0]))
if _use_odict:
return OrderedDict(sorted_entry)
else:
return dict(sorted_entry) | ['def', 'sort_single_reference', '(', 'ref_entry', ')', ':', '# yapf: disable', '_keyorder', '=', '[', '# Schema stuff', "# This function gets called on the schema 'entry', too", "'schema_type'", ',', "'schema_version'", ',', '# Type of the entry', "'type'", ',', '# Actual publication info', "'authors'", ',', "'title'", ',', "'booktitle'", ',', "'series'", ',', "'editors'", ',', "'journal'", ',', "'institution'", ',', "'volume'", ',', "'number'", ',', "'page'", ',', "'year'", ',', "'note'", ',', "'publisher'", ',', "'address'", ',', "'isbn'", ',', "'doi'", ']', '# yapf: enable', 'sorted_entry', '=', 'sorted', '(', 'ref_entry', '.', 'items', '(', ')', ',', 'key', '=', 'lambda', 'x', ':', '_keyorder', '.', 'index', '(', 'x', '[', '0', ']', ')', ')', 'if', '_use_odict', ':', 'return', 'OrderedDict', '(', 'sorted_entry', ')', 'else', ':', 'return', 'dict', '(', 'sorted_entry', ')'] | Sorts a dictionary containing data for a single reference into a standard order | ['Sorts', 'a', 'dictionary', 'containing', 'data', 'for', 'a', 'single', 'reference', 'into', 'a', 'standard', 'order'] | train | https://github.com/MolSSI-BSE/basis_set_exchange/blob/e79110aaeb65f392ed5032420322dee3336948f7/basis_set_exchange/sort.py#L190-L215 |
2,717 | lago-project/lago | lago/prefix.py | Prefix.cleanup | def cleanup(self):
"""
Stops any running entities in the prefix and uninitializes it, usually
you want to do this if you are going to remove the prefix afterwards
Returns:
None
"""
with LogTask('Stop prefix'):
self.stop()
with LogTask("Tag prefix as uninitialized"):
os.unlink(self.paths.prefix_lagofile()) | python | def cleanup(self):
"""
Stops any running entities in the prefix and uninitializes it, usually
you want to do this if you are going to remove the prefix afterwards
Returns:
None
"""
with LogTask('Stop prefix'):
self.stop()
with LogTask("Tag prefix as uninitialized"):
os.unlink(self.paths.prefix_lagofile()) | ['def', 'cleanup', '(', 'self', ')', ':', 'with', 'LogTask', '(', "'Stop prefix'", ')', ':', 'self', '.', 'stop', '(', ')', 'with', 'LogTask', '(', '"Tag prefix as uninitialized"', ')', ':', 'os', '.', 'unlink', '(', 'self', '.', 'paths', '.', 'prefix_lagofile', '(', ')', ')'] | Stops any running entities in the prefix and uninitializes it, usually
you want to do this if you are going to remove the prefix afterwards
Returns:
None | ['Stops', 'any', 'running', 'entities', 'in', 'the', 'prefix', 'and', 'uninitializes', 'it', 'usually', 'you', 'want', 'to', 'do', 'this', 'if', 'you', 'are', 'going', 'to', 'remove', 'the', 'prefix', 'afterwards'] | train | https://github.com/lago-project/lago/blob/5b8970f7687e063e4619066d5b8093ca997678c9/lago/prefix.py#L228-L239 |
2,718 | necrolyte2/bootstrap_vi | bootstrap_vi.py | bootstrap_vi | def bootstrap_vi(version=None, venvargs=None):
'''
Bootstrap virtualenv into current directory
:param str version: Virtualenv version like 13.1.0 or None for latest version
:param list venvargs: argv list for virtualenv.py or None for default
'''
if not version:
version = get_latest_virtualenv_version()
tarball = download_virtualenv(version)
p = subprocess.Popen('tar xzvf {0}'.format(tarball), shell=True)
p.wait()
p = 'virtualenv-{0}'.format(version)
create_virtualenv(p, venvargs) | python | def bootstrap_vi(version=None, venvargs=None):
'''
Bootstrap virtualenv into current directory
:param str version: Virtualenv version like 13.1.0 or None for latest version
:param list venvargs: argv list for virtualenv.py or None for default
'''
if not version:
version = get_latest_virtualenv_version()
tarball = download_virtualenv(version)
p = subprocess.Popen('tar xzvf {0}'.format(tarball), shell=True)
p.wait()
p = 'virtualenv-{0}'.format(version)
create_virtualenv(p, venvargs) | ['def', 'bootstrap_vi', '(', 'version', '=', 'None', ',', 'venvargs', '=', 'None', ')', ':', 'if', 'not', 'version', ':', 'version', '=', 'get_latest_virtualenv_version', '(', ')', 'tarball', '=', 'download_virtualenv', '(', 'version', ')', 'p', '=', 'subprocess', '.', 'Popen', '(', "'tar xzvf {0}'", '.', 'format', '(', 'tarball', ')', ',', 'shell', '=', 'True', ')', 'p', '.', 'wait', '(', ')', 'p', '=', "'virtualenv-{0}'", '.', 'format', '(', 'version', ')', 'create_virtualenv', '(', 'p', ',', 'venvargs', ')'] | Bootstrap virtualenv into current directory
:param str version: Virtualenv version like 13.1.0 or None for latest version
:param list venvargs: argv list for virtualenv.py or None for default | ['Bootstrap', 'virtualenv', 'into', 'current', 'directory'] | train | https://github.com/necrolyte2/bootstrap_vi/blob/cde96df76ecea1850cd26c2234ac13b3420d64dd/bootstrap_vi.py#L112-L125 |
2,719 | spacetelescope/drizzlepac | drizzlepac/processInput.py | addIVMInputs | def addIVMInputs(imageObjectList,ivmlist):
""" Add IVM filenames provided by user to outputNames dictionary for each input imageObject.
"""
if ivmlist is None:
return
for img,ivmname in zip(imageObjectList,ivmlist):
img.updateIVMName(ivmname) | python | def addIVMInputs(imageObjectList,ivmlist):
""" Add IVM filenames provided by user to outputNames dictionary for each input imageObject.
"""
if ivmlist is None:
return
for img,ivmname in zip(imageObjectList,ivmlist):
img.updateIVMName(ivmname) | ['def', 'addIVMInputs', '(', 'imageObjectList', ',', 'ivmlist', ')', ':', 'if', 'ivmlist', 'is', 'None', ':', 'return', 'for', 'img', ',', 'ivmname', 'in', 'zip', '(', 'imageObjectList', ',', 'ivmlist', ')', ':', 'img', '.', 'updateIVMName', '(', 'ivmname', ')'] | Add IVM filenames provided by user to outputNames dictionary for each input imageObject. | ['Add', 'IVM', 'filenames', 'provided', 'by', 'user', 'to', 'outputNames', 'dictionary', 'for', 'each', 'input', 'imageObject', '.'] | train | https://github.com/spacetelescope/drizzlepac/blob/15bec3c929a6a869d9e71b9398ced43ede0620f1/drizzlepac/processInput.py#L313-L320 |
2,720 | LIVVkit/LIVVkit | livvkit/components/verification.py | _print_summary | def _print_summary(case, summary):
""" Show some statistics from the run """
for dof, data in summary.items():
b4b = data["Bit for Bit"]
conf = data["Configurations"]
stdout = data["Std. Out Files"]
print(" " + case + " " + str(dof))
print(" --------------------")
print(" Bit for bit matches : " + str(b4b[0]) + " of " + str(b4b[1]))
print(" Configuration matches : " + str(conf[0]) + " of " + str(conf[1]))
print(" Std. Out files parsed : " + str(stdout))
print("") | python | def _print_summary(case, summary):
""" Show some statistics from the run """
for dof, data in summary.items():
b4b = data["Bit for Bit"]
conf = data["Configurations"]
stdout = data["Std. Out Files"]
print(" " + case + " " + str(dof))
print(" --------------------")
print(" Bit for bit matches : " + str(b4b[0]) + " of " + str(b4b[1]))
print(" Configuration matches : " + str(conf[0]) + " of " + str(conf[1]))
print(" Std. Out files parsed : " + str(stdout))
print("") | ['def', '_print_summary', '(', 'case', ',', 'summary', ')', ':', 'for', 'dof', ',', 'data', 'in', 'summary', '.', 'items', '(', ')', ':', 'b4b', '=', 'data', '[', '"Bit for Bit"', ']', 'conf', '=', 'data', '[', '"Configurations"', ']', 'stdout', '=', 'data', '[', '"Std. Out Files"', ']', 'print', '(', '" "', '+', 'case', '+', '" "', '+', 'str', '(', 'dof', ')', ')', 'print', '(', '" --------------------"', ')', 'print', '(', '" Bit for bit matches : "', '+', 'str', '(', 'b4b', '[', '0', ']', ')', '+', '" of "', '+', 'str', '(', 'b4b', '[', '1', ']', ')', ')', 'print', '(', '" Configuration matches : "', '+', 'str', '(', 'conf', '[', '0', ']', ')', '+', '" of "', '+', 'str', '(', 'conf', '[', '1', ']', ')', ')', 'print', '(', '" Std. Out files parsed : "', '+', 'str', '(', 'stdout', ')', ')', 'print', '(', '""', ')'] | Show some statistics from the run | ['Show', 'some', 'statistics', 'from', 'the', 'run'] | train | https://github.com/LIVVkit/LIVVkit/blob/680120cd437e408673e62e535fc0a246c7fc17db/livvkit/components/verification.py#L100-L111 |
2,721 | GNS3/gns3-server | gns3server/compute/dynamips/nios/nio.py | NIO.bind_filter | def bind_filter(self, direction, filter_name):
"""
Adds a packet filter to this NIO.
Filter "freq_drop" drops packets.
Filter "capture" captures packets.
:param direction: "in", "out" or "both"
:param filter_name: name of the filter to apply
"""
if direction not in self._dynamips_direction:
raise DynamipsError("Unknown direction {} to bind filter {}:".format(direction, filter_name))
dynamips_direction = self._dynamips_direction[direction]
yield from self._hypervisor.send("nio bind_filter {name} {direction} {filter}".format(name=self._name,
direction=dynamips_direction,
filter=filter_name))
if direction == "in":
self._input_filter = filter_name
elif direction == "out":
self._output_filter = filter_name
elif direction == "both":
self._input_filter = filter_name
self._output_filter = filter_name | python | def bind_filter(self, direction, filter_name):
"""
Adds a packet filter to this NIO.
Filter "freq_drop" drops packets.
Filter "capture" captures packets.
:param direction: "in", "out" or "both"
:param filter_name: name of the filter to apply
"""
if direction not in self._dynamips_direction:
raise DynamipsError("Unknown direction {} to bind filter {}:".format(direction, filter_name))
dynamips_direction = self._dynamips_direction[direction]
yield from self._hypervisor.send("nio bind_filter {name} {direction} {filter}".format(name=self._name,
direction=dynamips_direction,
filter=filter_name))
if direction == "in":
self._input_filter = filter_name
elif direction == "out":
self._output_filter = filter_name
elif direction == "both":
self._input_filter = filter_name
self._output_filter = filter_name | ['def', 'bind_filter', '(', 'self', ',', 'direction', ',', 'filter_name', ')', ':', 'if', 'direction', 'not', 'in', 'self', '.', '_dynamips_direction', ':', 'raise', 'DynamipsError', '(', '"Unknown direction {} to bind filter {}:"', '.', 'format', '(', 'direction', ',', 'filter_name', ')', ')', 'dynamips_direction', '=', 'self', '.', '_dynamips_direction', '[', 'direction', ']', 'yield', 'from', 'self', '.', '_hypervisor', '.', 'send', '(', '"nio bind_filter {name} {direction} {filter}"', '.', 'format', '(', 'name', '=', 'self', '.', '_name', ',', 'direction', '=', 'dynamips_direction', ',', 'filter', '=', 'filter_name', ')', ')', 'if', 'direction', '==', '"in"', ':', 'self', '.', '_input_filter', '=', 'filter_name', 'elif', 'direction', '==', '"out"', ':', 'self', '.', '_output_filter', '=', 'filter_name', 'elif', 'direction', '==', '"both"', ':', 'self', '.', '_input_filter', '=', 'filter_name', 'self', '.', '_output_filter', '=', 'filter_name'] | Adds a packet filter to this NIO.
Filter "freq_drop" drops packets.
Filter "capture" captures packets.
:param direction: "in", "out" or "both"
:param filter_name: name of the filter to apply | ['Adds', 'a', 'packet', 'filter', 'to', 'this', 'NIO', '.', 'Filter', 'freq_drop', 'drops', 'packets', '.', 'Filter', 'capture', 'captures', 'packets', '.'] | train | https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/compute/dynamips/nios/nio.py#L95-L119 |
2,722 | adamchainz/django-mysql | django_mysql/models/handler.py | Handler._extract_where | def _extract_where(cls, queryset):
"""
Was this a queryset with filters/excludes/expressions set? If so,
extract the WHERE clause from the ORM output so we can use it in the
handler queries.
"""
if not cls._is_simple_query(queryset.query):
raise ValueError("This QuerySet's WHERE clause is too complex to "
"be used in a HANDLER")
sql, params = queryset.query.sql_with_params()
where_pos = sql.find('WHERE ')
if where_pos != -1:
# Cut the query to extract just its WHERE clause
where_clause = sql[where_pos:]
# Replace absolute table.column references with relative ones
# since that is all HANDLER can work with
# This is a bit flakey - if you inserted extra SQL with extra() or
# an expression or something it might break.
where_clause, _ = cls.absolute_col_re.subn(r"\1", where_clause)
return (where_clause, params)
else:
return ("", ()) | python | def _extract_where(cls, queryset):
"""
Was this a queryset with filters/excludes/expressions set? If so,
extract the WHERE clause from the ORM output so we can use it in the
handler queries.
"""
if not cls._is_simple_query(queryset.query):
raise ValueError("This QuerySet's WHERE clause is too complex to "
"be used in a HANDLER")
sql, params = queryset.query.sql_with_params()
where_pos = sql.find('WHERE ')
if where_pos != -1:
# Cut the query to extract just its WHERE clause
where_clause = sql[where_pos:]
# Replace absolute table.column references with relative ones
# since that is all HANDLER can work with
# This is a bit flakey - if you inserted extra SQL with extra() or
# an expression or something it might break.
where_clause, _ = cls.absolute_col_re.subn(r"\1", where_clause)
return (where_clause, params)
else:
return ("", ()) | ['def', '_extract_where', '(', 'cls', ',', 'queryset', ')', ':', 'if', 'not', 'cls', '.', '_is_simple_query', '(', 'queryset', '.', 'query', ')', ':', 'raise', 'ValueError', '(', '"This QuerySet\'s WHERE clause is too complex to "', '"be used in a HANDLER"', ')', 'sql', ',', 'params', '=', 'queryset', '.', 'query', '.', 'sql_with_params', '(', ')', 'where_pos', '=', 'sql', '.', 'find', '(', "'WHERE '", ')', 'if', 'where_pos', '!=', '-', '1', ':', '# Cut the query to extract just its WHERE clause', 'where_clause', '=', 'sql', '[', 'where_pos', ':', ']', '# Replace absolute table.column references with relative ones', '# since that is all HANDLER can work with', '# This is a bit flakey - if you inserted extra SQL with extra() or', '# an expression or something it might break.', 'where_clause', ',', '_', '=', 'cls', '.', 'absolute_col_re', '.', 'subn', '(', 'r"\\1"', ',', 'where_clause', ')', 'return', '(', 'where_clause', ',', 'params', ')', 'else', ':', 'return', '(', '""', ',', '(', ')', ')'] | Was this a queryset with filters/excludes/expressions set? If so,
extract the WHERE clause from the ORM output so we can use it in the
handler queries. | ['Was', 'this', 'a', 'queryset', 'with', 'filters', '/', 'excludes', '/', 'expressions', 'set?', 'If', 'so', 'extract', 'the', 'WHERE', 'clause', 'from', 'the', 'ORM', 'output', 'so', 'we', 'can', 'use', 'it', 'in', 'the', 'handler', 'queries', '.'] | train | https://github.com/adamchainz/django-mysql/blob/967daa4245cf55c9bc5dc018e560f417c528916a/django_mysql/models/handler.py#L189-L211 |
2,723 | opereto/pyopereto | pyopereto/client.py | OperetoClient.search_agents | def search_agents(self, start=0, limit=100, filter={}, **kwargs):
'''
search_agents(self, start=0, limit=100, filter={}, **kwargs)
Search agents
:Parameters:
* *start* (`int`) -- start index to retrieve from. Default is 0
* *limit* (`int`) -- maximum number of entities to retrieve. Default is 100
* *filter* (`object`) -- free text search pattern (checks in agent data and properties)
:return: List of search results or empty list
:Example:
.. code-block:: python
filter = {'generic': 'my Agent'}
search_result = opereto_client.search_agents(filter=filter)
'''
request_data = {'start': start, 'limit': limit, 'filter': filter}
request_data.update(kwargs)
return self._call_rest_api('post', '/search/agents', data=request_data, error='Failed to search agents') | python | def search_agents(self, start=0, limit=100, filter={}, **kwargs):
'''
search_agents(self, start=0, limit=100, filter={}, **kwargs)
Search agents
:Parameters:
* *start* (`int`) -- start index to retrieve from. Default is 0
* *limit* (`int`) -- maximum number of entities to retrieve. Default is 100
* *filter* (`object`) -- free text search pattern (checks in agent data and properties)
:return: List of search results or empty list
:Example:
.. code-block:: python
filter = {'generic': 'my Agent'}
search_result = opereto_client.search_agents(filter=filter)
'''
request_data = {'start': start, 'limit': limit, 'filter': filter}
request_data.update(kwargs)
return self._call_rest_api('post', '/search/agents', data=request_data, error='Failed to search agents') | ['def', 'search_agents', '(', 'self', ',', 'start', '=', '0', ',', 'limit', '=', '100', ',', 'filter', '=', '{', '}', ',', '*', '*', 'kwargs', ')', ':', 'request_data', '=', '{', "'start'", ':', 'start', ',', "'limit'", ':', 'limit', ',', "'filter'", ':', 'filter', '}', 'request_data', '.', 'update', '(', 'kwargs', ')', 'return', 'self', '.', '_call_rest_api', '(', "'post'", ',', "'/search/agents'", ',', 'data', '=', 'request_data', ',', 'error', '=', "'Failed to search agents'", ')'] | search_agents(self, start=0, limit=100, filter={}, **kwargs)
Search agents
:Parameters:
* *start* (`int`) -- start index to retrieve from. Default is 0
* *limit* (`int`) -- maximum number of entities to retrieve. Default is 100
* *filter* (`object`) -- free text search pattern (checks in agent data and properties)
:return: List of search results or empty list
:Example:
.. code-block:: python
filter = {'generic': 'my Agent'}
search_result = opereto_client.search_agents(filter=filter) | ['search_agents', '(', 'self', 'start', '=', '0', 'limit', '=', '100', 'filter', '=', '{}', '**', 'kwargs', ')'] | train | https://github.com/opereto/pyopereto/blob/16ca987738a7e1b82b52b0b099794a74ed557223/pyopereto/client.py#L701-L723 |
2,724 | chibisov/drf-extensions | docs/backdoc.py | _slugify | def _slugify(text, delim=u'-'):
"""Generates an ASCII-only slug."""
result = []
for word in _punct_re.split(text.lower()):
word = word.encode('utf-8')
if word:
result.append(word)
slugified = delim.join([i.decode('utf-8') for i in result])
return re.sub('[^a-zA-Z0-9\\s\\-]{1}', replace_char, slugified).lower() | python | def _slugify(text, delim=u'-'):
"""Generates an ASCII-only slug."""
result = []
for word in _punct_re.split(text.lower()):
word = word.encode('utf-8')
if word:
result.append(word)
slugified = delim.join([i.decode('utf-8') for i in result])
return re.sub('[^a-zA-Z0-9\\s\\-]{1}', replace_char, slugified).lower() | ['def', '_slugify', '(', 'text', ',', 'delim', '=', "u'-'", ')', ':', 'result', '=', '[', ']', 'for', 'word', 'in', '_punct_re', '.', 'split', '(', 'text', '.', 'lower', '(', ')', ')', ':', 'word', '=', 'word', '.', 'encode', '(', "'utf-8'", ')', 'if', 'word', ':', 'result', '.', 'append', '(', 'word', ')', 'slugified', '=', 'delim', '.', 'join', '(', '[', 'i', '.', 'decode', '(', "'utf-8'", ')', 'for', 'i', 'in', 'result', ']', ')', 'return', 're', '.', 'sub', '(', "'[^a-zA-Z0-9\\\\s\\\\-]{1}'", ',', 'replace_char', ',', 'slugified', ')', '.', 'lower', '(', ')'] | Generates an ASCII-only slug. | ['Generates', 'an', 'ASCII', '-', 'only', 'slug', '.'] | train | https://github.com/chibisov/drf-extensions/blob/1d28a4b28890eab5cd19e93e042f8590c8c2fb8b/docs/backdoc.py#L1961-L1969 |
2,725 | gmr/tinman | tinman/application.py | Application._prepare_version | def _prepare_version(self):
"""Setup the application version"""
if config.VERSION not in self._config:
self._config[config.VERSION] = __version__ | python | def _prepare_version(self):
"""Setup the application version"""
if config.VERSION not in self._config:
self._config[config.VERSION] = __version__ | ['def', '_prepare_version', '(', 'self', ')', ':', 'if', 'config', '.', 'VERSION', 'not', 'in', 'self', '.', '_config', ':', 'self', '.', '_config', '[', 'config', '.', 'VERSION', ']', '=', '__version__'] | Setup the application version | ['Setup', 'the', 'application', 'version'] | train | https://github.com/gmr/tinman/blob/98f0acd15a228d752caa1864cdf02aaa3d492a9f/tinman/application.py#L242-L245 |
2,726 | pkgw/pwkit | pwkit/lmmin.py | _calc_covariance | def _calc_covariance(r, pmut, tol=1e-14):
"""Calculate the covariance matrix of the fitted parameters
Parameters:
r - n-by-n matrix, the full upper triangle of R
pmut - n-vector, defines the permutation of R
tol - scalar, relative column scale for determining rank
deficiency. Default 1e-14.
Returns:
cov - n-by-n matrix, the covariance matrix C
Given an n-by-n matrix A, the corresponding covariance matrix
is
C = inverse(A^T A)
This routine is given information relating to the pivoted transposed
QR factorization of A, which is defined by matrices such that
A P = R Q
where P is a permutation matrix, Q has orthogonal rows, and R is a
lower triangular matrix with diagonal elements of nonincreasing
magnitude. In particular we take the full lower triangle of R ('r')
and a vector describing P ('pmut'). The covariance matrix is then
C = P inverse(R^T R) P^T
If A is nearly rank-deficient, it may be desirable to compute the
covariance matrix corresponding to the linearly-independent columns of
A. We use a tolerance, 'tol', to define the numerical rank of A. If j
is the largest integer such that |R[j,j]| > tol*|R[0,0]|, then we
compute the covariance matrix for the first j columns of R. For k > j,
the corresponding covariance entries (pmut[k]) are set to zero.
"""
# This routine could save an allocation by operating on r in-place,
# which might be worthwhile for large n, and is what the original
# Fortran does.
n = r.shape[1]
assert r.shape[0] >= n
r = r.copy()
# Form the inverse of R in the full lower triangle of R.
jrank = -1
abstol = tol * abs(r[0,0])
for i in range(n):
if abs(r[i,i]) <= abstol:
break
r[i,i] **= -1
for j in range(i):
temp = r[i,i] * r[i,j]
r[i,j] = 0.
r[i,:j+1] -= temp * r[j,:j+1]
jrank = i
# Form the full lower triangle of the inverse(R^T R) in the full
# lower triangle of R.
for i in range(jrank + 1):
for j in range(i):
r[j,:j+1] += r[i,j] * r[i,:j+1]
r[i,:i+1] *= r[i,i]
# Form the full upper triangle of the covariance matrix in the
# strict upper triangle of R and in wa.
wa = np.empty(n)
wa.fill(r[0,0])
for i in range(n):
pi = pmut[i]
sing = i > jrank
for j in range(i + 1):
if sing:
r[i,j] = 0.
pj = pmut[j]
if pj > pi:
r[pi,pj] = r[i,j]
elif pj < pi:
r[pj,pi] = r[i,j]
wa[pi] = r[i,i]
# Symmetrize.
for i in range(n):
r[i,:i+1] = r[:i+1,i]
r[i,i] = wa[i]
return r | python | def _calc_covariance(r, pmut, tol=1e-14):
"""Calculate the covariance matrix of the fitted parameters
Parameters:
r - n-by-n matrix, the full upper triangle of R
pmut - n-vector, defines the permutation of R
tol - scalar, relative column scale for determining rank
deficiency. Default 1e-14.
Returns:
cov - n-by-n matrix, the covariance matrix C
Given an n-by-n matrix A, the corresponding covariance matrix
is
C = inverse(A^T A)
This routine is given information relating to the pivoted transposed
QR factorization of A, which is defined by matrices such that
A P = R Q
where P is a permutation matrix, Q has orthogonal rows, and R is a
lower triangular matrix with diagonal elements of nonincreasing
magnitude. In particular we take the full lower triangle of R ('r')
and a vector describing P ('pmut'). The covariance matrix is then
C = P inverse(R^T R) P^T
If A is nearly rank-deficient, it may be desirable to compute the
covariance matrix corresponding to the linearly-independent columns of
A. We use a tolerance, 'tol', to define the numerical rank of A. If j
is the largest integer such that |R[j,j]| > tol*|R[0,0]|, then we
compute the covariance matrix for the first j columns of R. For k > j,
the corresponding covariance entries (pmut[k]) are set to zero.
"""
# This routine could save an allocation by operating on r in-place,
# which might be worthwhile for large n, and is what the original
# Fortran does.
n = r.shape[1]
assert r.shape[0] >= n
r = r.copy()
# Form the inverse of R in the full lower triangle of R.
jrank = -1
abstol = tol * abs(r[0,0])
for i in range(n):
if abs(r[i,i]) <= abstol:
break
r[i,i] **= -1
for j in range(i):
temp = r[i,i] * r[i,j]
r[i,j] = 0.
r[i,:j+1] -= temp * r[j,:j+1]
jrank = i
# Form the full lower triangle of the inverse(R^T R) in the full
# lower triangle of R.
for i in range(jrank + 1):
for j in range(i):
r[j,:j+1] += r[i,j] * r[i,:j+1]
r[i,:i+1] *= r[i,i]
# Form the full upper triangle of the covariance matrix in the
# strict upper triangle of R and in wa.
wa = np.empty(n)
wa.fill(r[0,0])
for i in range(n):
pi = pmut[i]
sing = i > jrank
for j in range(i + 1):
if sing:
r[i,j] = 0.
pj = pmut[j]
if pj > pi:
r[pi,pj] = r[i,j]
elif pj < pi:
r[pj,pi] = r[i,j]
wa[pi] = r[i,i]
# Symmetrize.
for i in range(n):
r[i,:i+1] = r[:i+1,i]
r[i,i] = wa[i]
return r | ['def', '_calc_covariance', '(', 'r', ',', 'pmut', ',', 'tol', '=', '1e-14', ')', ':', '# This routine could save an allocation by operating on r in-place,', '# which might be worthwhile for large n, and is what the original', '# Fortran does.', 'n', '=', 'r', '.', 'shape', '[', '1', ']', 'assert', 'r', '.', 'shape', '[', '0', ']', '>=', 'n', 'r', '=', 'r', '.', 'copy', '(', ')', '# Form the inverse of R in the full lower triangle of R.', 'jrank', '=', '-', '1', 'abstol', '=', 'tol', '*', 'abs', '(', 'r', '[', '0', ',', '0', ']', ')', 'for', 'i', 'in', 'range', '(', 'n', ')', ':', 'if', 'abs', '(', 'r', '[', 'i', ',', 'i', ']', ')', '<=', 'abstol', ':', 'break', 'r', '[', 'i', ',', 'i', ']', '**=', '-', '1', 'for', 'j', 'in', 'range', '(', 'i', ')', ':', 'temp', '=', 'r', '[', 'i', ',', 'i', ']', '*', 'r', '[', 'i', ',', 'j', ']', 'r', '[', 'i', ',', 'j', ']', '=', '0.', 'r', '[', 'i', ',', ':', 'j', '+', '1', ']', '-=', 'temp', '*', 'r', '[', 'j', ',', ':', 'j', '+', '1', ']', 'jrank', '=', 'i', '# Form the full lower triangle of the inverse(R^T R) in the full', '# lower triangle of R.', 'for', 'i', 'in', 'range', '(', 'jrank', '+', '1', ')', ':', 'for', 'j', 'in', 'range', '(', 'i', ')', ':', 'r', '[', 'j', ',', ':', 'j', '+', '1', ']', '+=', 'r', '[', 'i', ',', 'j', ']', '*', 'r', '[', 'i', ',', ':', 'j', '+', '1', ']', 'r', '[', 'i', ',', ':', 'i', '+', '1', ']', '*=', 'r', '[', 'i', ',', 'i', ']', '# Form the full upper triangle of the covariance matrix in the', '# strict upper triangle of R and in wa.', 'wa', '=', 'np', '.', 'empty', '(', 'n', ')', 'wa', '.', 'fill', '(', 'r', '[', '0', ',', '0', ']', ')', 'for', 'i', 'in', 'range', '(', 'n', ')', ':', 'pi', '=', 'pmut', '[', 'i', ']', 'sing', '=', 'i', '>', 'jrank', 'for', 'j', 'in', 'range', '(', 'i', '+', '1', ')', ':', 'if', 'sing', ':', 'r', '[', 'i', ',', 'j', ']', '=', '0.', 'pj', '=', 'pmut', '[', 'j', ']', 'if', 'pj', '>', 'pi', ':', 'r', '[', 'pi', ',', 'pj', ']', '=', 'r', '[', 'i', ',', 'j', ']', 'elif', 'pj', '<', 'pi', ':', 'r', '[', 'pj', ',', 'pi', ']', '=', 'r', '[', 'i', ',', 'j', ']', 'wa', '[', 'pi', ']', '=', 'r', '[', 'i', ',', 'i', ']', '# Symmetrize.', 'for', 'i', 'in', 'range', '(', 'n', ')', ':', 'r', '[', 'i', ',', ':', 'i', '+', '1', ']', '=', 'r', '[', ':', 'i', '+', '1', ',', 'i', ']', 'r', '[', 'i', ',', 'i', ']', '=', 'wa', '[', 'i', ']', 'return', 'r'] | Calculate the covariance matrix of the fitted parameters
Parameters:
r - n-by-n matrix, the full upper triangle of R
pmut - n-vector, defines the permutation of R
tol - scalar, relative column scale for determining rank
deficiency. Default 1e-14.
Returns:
cov - n-by-n matrix, the covariance matrix C
Given an n-by-n matrix A, the corresponding covariance matrix
is
C = inverse(A^T A)
This routine is given information relating to the pivoted transposed
QR factorization of A, which is defined by matrices such that
A P = R Q
where P is a permutation matrix, Q has orthogonal rows, and R is a
lower triangular matrix with diagonal elements of nonincreasing
magnitude. In particular we take the full lower triangle of R ('r')
and a vector describing P ('pmut'). The covariance matrix is then
C = P inverse(R^T R) P^T
If A is nearly rank-deficient, it may be desirable to compute the
covariance matrix corresponding to the linearly-independent columns of
A. We use a tolerance, 'tol', to define the numerical rank of A. If j
is the largest integer such that |R[j,j]| > tol*|R[0,0]|, then we
compute the covariance matrix for the first j columns of R. For k > j,
the corresponding covariance entries (pmut[k]) are set to zero. | ['Calculate', 'the', 'covariance', 'matrix', 'of', 'the', 'fitted', 'parameters'] | train | https://github.com/pkgw/pwkit/blob/d40957a1c3d2ea34e7ceac2267ee9635135f2793/pwkit/lmmin.py#L1064-L1162 |
2,727 | spyder-ide/spyder-notebook | spyder_notebook/notebookplugin.py | NotebookPlugin.add_to_recent | def add_to_recent(self, notebook):
"""
Add an entry to recent notebooks.
We only maintain the list of the 20 most recent notebooks.
"""
if notebook not in self.recent_notebooks:
self.recent_notebooks.insert(0, notebook)
self.recent_notebooks = self.recent_notebooks[:20] | python | def add_to_recent(self, notebook):
"""
Add an entry to recent notebooks.
We only maintain the list of the 20 most recent notebooks.
"""
if notebook not in self.recent_notebooks:
self.recent_notebooks.insert(0, notebook)
self.recent_notebooks = self.recent_notebooks[:20] | ['def', 'add_to_recent', '(', 'self', ',', 'notebook', ')', ':', 'if', 'notebook', 'not', 'in', 'self', '.', 'recent_notebooks', ':', 'self', '.', 'recent_notebooks', '.', 'insert', '(', '0', ',', 'notebook', ')', 'self', '.', 'recent_notebooks', '=', 'self', '.', 'recent_notebooks', '[', ':', '20', ']'] | Add an entry to recent notebooks.
We only maintain the list of the 20 most recent notebooks. | ['Add', 'an', 'entry', 'to', 'recent', 'notebooks', '.', 'We', 'only', 'maintain', 'the', 'list', 'of', 'the', '20', 'most', 'recent', 'notebooks', '.'] | train | https://github.com/spyder-ide/spyder-notebook/blob/54e626b9d2a3fccd3e4625b0f97fe06e5bb1a6db/spyder_notebook/notebookplugin.py#L259-L267 |
2,728 | tensorflow/tensorboard | tensorboard/plugins/interactive_inference/utils/inference_utils.py | get_label_vocab | def get_label_vocab(vocab_path):
"""Returns a list of label strings loaded from the provided path."""
if vocab_path:
try:
with tf.io.gfile.GFile(vocab_path, 'r') as f:
return [line.rstrip('\n') for line in f]
except tf.errors.NotFoundError as err:
tf.logging.error('error reading vocab file: %s', err)
return [] | python | def get_label_vocab(vocab_path):
"""Returns a list of label strings loaded from the provided path."""
if vocab_path:
try:
with tf.io.gfile.GFile(vocab_path, 'r') as f:
return [line.rstrip('\n') for line in f]
except tf.errors.NotFoundError as err:
tf.logging.error('error reading vocab file: %s', err)
return [] | ['def', 'get_label_vocab', '(', 'vocab_path', ')', ':', 'if', 'vocab_path', ':', 'try', ':', 'with', 'tf', '.', 'io', '.', 'gfile', '.', 'GFile', '(', 'vocab_path', ',', "'r'", ')', 'as', 'f', ':', 'return', '[', 'line', '.', 'rstrip', '(', "'\\n'", ')', 'for', 'line', 'in', 'f', ']', 'except', 'tf', '.', 'errors', '.', 'NotFoundError', 'as', 'err', ':', 'tf', '.', 'logging', '.', 'error', '(', "'error reading vocab file: %s'", ',', 'err', ')', 'return', '[', ']'] | Returns a list of label strings loaded from the provided path. | ['Returns', 'a', 'list', 'of', 'label', 'strings', 'loaded', 'from', 'the', 'provided', 'path', '.'] | train | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/interactive_inference/utils/inference_utils.py#L649-L657 |
2,729 | theiviaxx/Frog | frog/models.py | Video.generateThumbnail | def generateThumbnail(self):
"""Generates a square thumbnail"""
source = ROOT / self.source.name
thumbnail = source.parent / '_{}.jpg'.format(source.namebase)
# -- Save thumbnail and put into queue
poster = source.parent / '__{}.jpg'.format(source.namebase)
cmd = [FROG_FFMPEG, '-i', str(source), '-ss', '1', '-vframes', '1', str(thumbnail), '-y']
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
proc.communicate()
image = pilImage.open(thumbnail)
image.save(poster)
self.poster = poster.replace(ROOT, '')
box, width, height = cropBox(self.width, self.height)
# Resize
image.thumbnail((width, height), pilImage.ANTIALIAS)
# Crop from center
box = cropBox(*image.size)[0]
image = image.crop(box)
# save
self.thumbnail = thumbnail.replace(ROOT, '')
image.save(thumbnail) | python | def generateThumbnail(self):
"""Generates a square thumbnail"""
source = ROOT / self.source.name
thumbnail = source.parent / '_{}.jpg'.format(source.namebase)
# -- Save thumbnail and put into queue
poster = source.parent / '__{}.jpg'.format(source.namebase)
cmd = [FROG_FFMPEG, '-i', str(source), '-ss', '1', '-vframes', '1', str(thumbnail), '-y']
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
proc.communicate()
image = pilImage.open(thumbnail)
image.save(poster)
self.poster = poster.replace(ROOT, '')
box, width, height = cropBox(self.width, self.height)
# Resize
image.thumbnail((width, height), pilImage.ANTIALIAS)
# Crop from center
box = cropBox(*image.size)[0]
image = image.crop(box)
# save
self.thumbnail = thumbnail.replace(ROOT, '')
image.save(thumbnail) | ['def', 'generateThumbnail', '(', 'self', ')', ':', 'source', '=', 'ROOT', '/', 'self', '.', 'source', '.', 'name', 'thumbnail', '=', 'source', '.', 'parent', '/', "'_{}.jpg'", '.', 'format', '(', 'source', '.', 'namebase', ')', '# -- Save thumbnail and put into queue', 'poster', '=', 'source', '.', 'parent', '/', "'__{}.jpg'", '.', 'format', '(', 'source', '.', 'namebase', ')', 'cmd', '=', '[', 'FROG_FFMPEG', ',', "'-i'", ',', 'str', '(', 'source', ')', ',', "'-ss'", ',', "'1'", ',', "'-vframes'", ',', "'1'", ',', 'str', '(', 'thumbnail', ')', ',', "'-y'", ']', 'proc', '=', 'subprocess', '.', 'Popen', '(', 'cmd', ',', 'stdout', '=', 'subprocess', '.', 'PIPE', ',', 'stderr', '=', 'subprocess', '.', 'STDOUT', ')', 'proc', '.', 'communicate', '(', ')', 'image', '=', 'pilImage', '.', 'open', '(', 'thumbnail', ')', 'image', '.', 'save', '(', 'poster', ')', 'self', '.', 'poster', '=', 'poster', '.', 'replace', '(', 'ROOT', ',', "''", ')', 'box', ',', 'width', ',', 'height', '=', 'cropBox', '(', 'self', '.', 'width', ',', 'self', '.', 'height', ')', '# Resize', 'image', '.', 'thumbnail', '(', '(', 'width', ',', 'height', ')', ',', 'pilImage', '.', 'ANTIALIAS', ')', '# Crop from center', 'box', '=', 'cropBox', '(', '*', 'image', '.', 'size', ')', '[', '0', ']', 'image', '=', 'image', '.', 'crop', '(', 'box', ')', '# save', 'self', '.', 'thumbnail', '=', 'thumbnail', '.', 'replace', '(', 'ROOT', ',', "''", ')', 'image', '.', 'save', '(', 'thumbnail', ')'] | Generates a square thumbnail | ['Generates', 'a', 'square', 'thumbnail'] | train | https://github.com/theiviaxx/Frog/blob/a9475463a8eed1323fe3ef5d51f9751fb1dc9edd/frog/models.py#L441-L464 |
2,730 | saltstack/salt | salt/daemons/masterapi.py | AutoKey.check_autosign | def check_autosign(self, keyid, autosign_grains=None):
'''
Checks if the specified keyid should automatically be signed.
'''
if self.opts['auto_accept']:
return True
if self.check_signing_file(keyid, self.opts.get('autosign_file', None)):
return True
if self.check_autosign_dir(keyid):
return True
if self.check_autosign_grains(autosign_grains):
return True
return False | python | def check_autosign(self, keyid, autosign_grains=None):
'''
Checks if the specified keyid should automatically be signed.
'''
if self.opts['auto_accept']:
return True
if self.check_signing_file(keyid, self.opts.get('autosign_file', None)):
return True
if self.check_autosign_dir(keyid):
return True
if self.check_autosign_grains(autosign_grains):
return True
return False | ['def', 'check_autosign', '(', 'self', ',', 'keyid', ',', 'autosign_grains', '=', 'None', ')', ':', 'if', 'self', '.', 'opts', '[', "'auto_accept'", ']', ':', 'return', 'True', 'if', 'self', '.', 'check_signing_file', '(', 'keyid', ',', 'self', '.', 'opts', '.', 'get', '(', "'autosign_file'", ',', 'None', ')', ')', ':', 'return', 'True', 'if', 'self', '.', 'check_autosign_dir', '(', 'keyid', ')', ':', 'return', 'True', 'if', 'self', '.', 'check_autosign_grains', '(', 'autosign_grains', ')', ':', 'return', 'True', 'return', 'False'] | Checks if the specified keyid should automatically be signed. | ['Checks', 'if', 'the', 'specified', 'keyid', 'should', 'automatically', 'be', 'signed', '.'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/daemons/masterapi.py#L427-L439 |
2,731 | wandb/client | wandb/vendor/prompt_toolkit/contrib/telnet/server.py | TelnetServer._process_callbacks | def _process_callbacks(self):
"""
Process callbacks from `call_from_executor` in eventloop.
"""
# Flush all the pipe content.
os.read(self._schedule_pipe[0], 1024)
# Process calls from executor.
calls_from_executor, self._calls_from_executor = self._calls_from_executor, []
for c in calls_from_executor:
c() | python | def _process_callbacks(self):
"""
Process callbacks from `call_from_executor` in eventloop.
"""
# Flush all the pipe content.
os.read(self._schedule_pipe[0], 1024)
# Process calls from executor.
calls_from_executor, self._calls_from_executor = self._calls_from_executor, []
for c in calls_from_executor:
c() | ['def', '_process_callbacks', '(', 'self', ')', ':', '# Flush all the pipe content.', 'os', '.', 'read', '(', 'self', '.', '_schedule_pipe', '[', '0', ']', ',', '1024', ')', '# Process calls from executor.', 'calls_from_executor', ',', 'self', '.', '_calls_from_executor', '=', 'self', '.', '_calls_from_executor', ',', '[', ']', 'for', 'c', 'in', 'calls_from_executor', ':', 'c', '(', ')'] | Process callbacks from `call_from_executor` in eventloop. | ['Process', 'callbacks', 'from', 'call_from_executor', 'in', 'eventloop', '.'] | train | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/contrib/telnet/server.py#L338-L348 |
2,732 | epfl-lts2/pygsp | pygsp/filters/filter.py | Filter.synthesize | def synthesize(self, s, method='chebyshev', order=30):
r"""Convenience wrapper around :meth:`filter`.
Will be an alias to `adjoint().filter()` in the future.
"""
if s.shape[-1] != self.Nf:
raise ValueError('Last dimension (#features) should be the number '
'of filters Nf = {}, got {}.'.format(self.Nf,
s.shape))
return self.filter(s, method, order) | python | def synthesize(self, s, method='chebyshev', order=30):
r"""Convenience wrapper around :meth:`filter`.
Will be an alias to `adjoint().filter()` in the future.
"""
if s.shape[-1] != self.Nf:
raise ValueError('Last dimension (#features) should be the number '
'of filters Nf = {}, got {}.'.format(self.Nf,
s.shape))
return self.filter(s, method, order) | ['def', 'synthesize', '(', 'self', ',', 's', ',', 'method', '=', "'chebyshev'", ',', 'order', '=', '30', ')', ':', 'if', 's', '.', 'shape', '[', '-', '1', ']', '!=', 'self', '.', 'Nf', ':', 'raise', 'ValueError', '(', "'Last dimension (#features) should be the number '", "'of filters Nf = {}, got {}.'", '.', 'format', '(', 'self', '.', 'Nf', ',', 's', '.', 'shape', ')', ')', 'return', 'self', '.', 'filter', '(', 's', ',', 'method', ',', 'order', ')'] | r"""Convenience wrapper around :meth:`filter`.
Will be an alias to `adjoint().filter()` in the future. | ['r', 'Convenience', 'wrapper', 'around', ':', 'meth', ':', 'filter', '.'] | train | https://github.com/epfl-lts2/pygsp/blob/8ce5bde39206129287375af24fdbcd7edddca8c5/pygsp/filters/filter.py#L341-L350 |
2,733 | nvbn/thefuck | thefuck/types.py | Rule.from_path | def from_path(cls, path):
"""Creates rule instance from path.
:type path: pathlib.Path
:rtype: Rule
"""
name = path.name[:-3]
with logs.debug_time(u'Importing rule: {};'.format(name)):
rule_module = load_source(name, str(path))
priority = getattr(rule_module, 'priority', DEFAULT_PRIORITY)
return cls(name, rule_module.match,
rule_module.get_new_command,
getattr(rule_module, 'enabled_by_default', True),
getattr(rule_module, 'side_effect', None),
settings.priority.get(name, priority),
getattr(rule_module, 'requires_output', True)) | python | def from_path(cls, path):
"""Creates rule instance from path.
:type path: pathlib.Path
:rtype: Rule
"""
name = path.name[:-3]
with logs.debug_time(u'Importing rule: {};'.format(name)):
rule_module = load_source(name, str(path))
priority = getattr(rule_module, 'priority', DEFAULT_PRIORITY)
return cls(name, rule_module.match,
rule_module.get_new_command,
getattr(rule_module, 'enabled_by_default', True),
getattr(rule_module, 'side_effect', None),
settings.priority.get(name, priority),
getattr(rule_module, 'requires_output', True)) | ['def', 'from_path', '(', 'cls', ',', 'path', ')', ':', 'name', '=', 'path', '.', 'name', '[', ':', '-', '3', ']', 'with', 'logs', '.', 'debug_time', '(', "u'Importing rule: {};'", '.', 'format', '(', 'name', ')', ')', ':', 'rule_module', '=', 'load_source', '(', 'name', ',', 'str', '(', 'path', ')', ')', 'priority', '=', 'getattr', '(', 'rule_module', ',', "'priority'", ',', 'DEFAULT_PRIORITY', ')', 'return', 'cls', '(', 'name', ',', 'rule_module', '.', 'match', ',', 'rule_module', '.', 'get_new_command', ',', 'getattr', '(', 'rule_module', ',', "'enabled_by_default'", ',', 'True', ')', ',', 'getattr', '(', 'rule_module', ',', "'side_effect'", ',', 'None', ')', ',', 'settings', '.', 'priority', '.', 'get', '(', 'name', ',', 'priority', ')', ',', 'getattr', '(', 'rule_module', ',', "'requires_output'", ',', 'True', ')', ')'] | Creates rule instance from path.
:type path: pathlib.Path
:rtype: Rule | ['Creates', 'rule', 'instance', 'from', 'path', '.'] | train | https://github.com/nvbn/thefuck/blob/40ab4eb62db57627bff10cf029d29c94704086a2/thefuck/types.py#L131-L147 |
2,734 | JdeRobot/base | src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py | MAVLink.mission_write_partial_list_encode | def mission_write_partial_list_encode(self, target_system, target_component, start_index, end_index):
'''
This message is sent to the MAV to write a partial list. If start
index == end index, only one item will be transmitted
/ updated. If the start index is NOT 0 and above the
current list size, this request should be REJECTED!
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
start_index : Start index, 0 by default and smaller / equal to the largest index of the current onboard list. (int16_t)
end_index : End index, equal or greater than start index. (int16_t)
'''
return MAVLink_mission_write_partial_list_message(target_system, target_component, start_index, end_index) | python | def mission_write_partial_list_encode(self, target_system, target_component, start_index, end_index):
'''
This message is sent to the MAV to write a partial list. If start
index == end index, only one item will be transmitted
/ updated. If the start index is NOT 0 and above the
current list size, this request should be REJECTED!
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
start_index : Start index, 0 by default and smaller / equal to the largest index of the current onboard list. (int16_t)
end_index : End index, equal or greater than start index. (int16_t)
'''
return MAVLink_mission_write_partial_list_message(target_system, target_component, start_index, end_index) | ['def', 'mission_write_partial_list_encode', '(', 'self', ',', 'target_system', ',', 'target_component', ',', 'start_index', ',', 'end_index', ')', ':', 'return', 'MAVLink_mission_write_partial_list_message', '(', 'target_system', ',', 'target_component', ',', 'start_index', ',', 'end_index', ')'] | This message is sent to the MAV to write a partial list. If start
index == end index, only one item will be transmitted
/ updated. If the start index is NOT 0 and above the
current list size, this request should be REJECTED!
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
start_index : Start index, 0 by default and smaller / equal to the largest index of the current onboard list. (int16_t)
end_index : End index, equal or greater than start index. (int16_t) | ['This', 'message', 'is', 'sent', 'to', 'the', 'MAV', 'to', 'write', 'a', 'partial', 'list', '.', 'If', 'start', 'index', '==', 'end', 'index', 'only', 'one', 'item', 'will', 'be', 'transmitted', '/', 'updated', '.', 'If', 'the', 'start', 'index', 'is', 'NOT', '0', 'and', 'above', 'the', 'current', 'list', 'size', 'this', 'request', 'should', 'be', 'REJECTED!'] | train | https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py#L9437-L9450 |
2,735 | ltalirz/aiida-phtools | aiida_phtools/calculations/distance_matrix.py | DistanceMatrixCalculation._validate_inputs | def _validate_inputs(self, inputdict):
""" Validate input links.
"""
# Check code
try:
code = inputdict.pop(self.get_linkname('code'))
except KeyError:
raise InputValidationError("No code specified for this "
"calculation")
# Check input files
try:
surface_sample = inputdict.pop(self.get_linkname('surface_sample'))
if not isinstance(surface_sample, SinglefileData):
raise InputValidationError(
"surface_sample not of type SinglefileData")
except KeyError:
raise InputValidationError(
"No input structure specified for calculation")
try:
cell = inputdict.pop(self.get_linkname('cell'))
if not isinstance(cell, SinglefileData):
raise InputValidationError("cell not of type SinglefileData")
except KeyError:
raise InputValidationError(
"No input structure specified for calculation")
# Check that nothing is left unparsed
if inputdict:
raise ValidationError("Unrecognized inputs: {}".format(inputdict))
return code, surface_sample, cell | python | def _validate_inputs(self, inputdict):
""" Validate input links.
"""
# Check code
try:
code = inputdict.pop(self.get_linkname('code'))
except KeyError:
raise InputValidationError("No code specified for this "
"calculation")
# Check input files
try:
surface_sample = inputdict.pop(self.get_linkname('surface_sample'))
if not isinstance(surface_sample, SinglefileData):
raise InputValidationError(
"surface_sample not of type SinglefileData")
except KeyError:
raise InputValidationError(
"No input structure specified for calculation")
try:
cell = inputdict.pop(self.get_linkname('cell'))
if not isinstance(cell, SinglefileData):
raise InputValidationError("cell not of type SinglefileData")
except KeyError:
raise InputValidationError(
"No input structure specified for calculation")
# Check that nothing is left unparsed
if inputdict:
raise ValidationError("Unrecognized inputs: {}".format(inputdict))
return code, surface_sample, cell | ['def', '_validate_inputs', '(', 'self', ',', 'inputdict', ')', ':', '# Check code', 'try', ':', 'code', '=', 'inputdict', '.', 'pop', '(', 'self', '.', 'get_linkname', '(', "'code'", ')', ')', 'except', 'KeyError', ':', 'raise', 'InputValidationError', '(', '"No code specified for this "', '"calculation"', ')', '# Check input files', 'try', ':', 'surface_sample', '=', 'inputdict', '.', 'pop', '(', 'self', '.', 'get_linkname', '(', "'surface_sample'", ')', ')', 'if', 'not', 'isinstance', '(', 'surface_sample', ',', 'SinglefileData', ')', ':', 'raise', 'InputValidationError', '(', '"surface_sample not of type SinglefileData"', ')', 'except', 'KeyError', ':', 'raise', 'InputValidationError', '(', '"No input structure specified for calculation"', ')', 'try', ':', 'cell', '=', 'inputdict', '.', 'pop', '(', 'self', '.', 'get_linkname', '(', "'cell'", ')', ')', 'if', 'not', 'isinstance', '(', 'cell', ',', 'SinglefileData', ')', ':', 'raise', 'InputValidationError', '(', '"cell not of type SinglefileData"', ')', 'except', 'KeyError', ':', 'raise', 'InputValidationError', '(', '"No input structure specified for calculation"', ')', '# Check that nothing is left unparsed', 'if', 'inputdict', ':', 'raise', 'ValidationError', '(', '"Unrecognized inputs: {}"', '.', 'format', '(', 'inputdict', ')', ')', 'return', 'code', ',', 'surface_sample', ',', 'cell'] | Validate input links. | ['Validate', 'input', 'links', '.'] | train | https://github.com/ltalirz/aiida-phtools/blob/acec3339425fe92d3f55e725a199123de9a1febc/aiida_phtools/calculations/distance_matrix.py#L60-L92 |
2,736 | 255BITS/hyperchamber | examples/shared/variational_autoencoder.py | VariationalAutoencoder.transform | def transform(self, X):
"""Transform data by mapping it into the latent space."""
# Note: This maps to mean of distribution, we could alternatively
# sample from Gaussian distribution
return self.sess.run(self.z_mean, feed_dict={self.x: X}) | python | def transform(self, X):
"""Transform data by mapping it into the latent space."""
# Note: This maps to mean of distribution, we could alternatively
# sample from Gaussian distribution
return self.sess.run(self.z_mean, feed_dict={self.x: X}) | ['def', 'transform', '(', 'self', ',', 'X', ')', ':', '# Note: This maps to mean of distribution, we could alternatively', '# sample from Gaussian distribution', 'return', 'self', '.', 'sess', '.', 'run', '(', 'self', '.', 'z_mean', ',', 'feed_dict', '=', '{', 'self', '.', 'x', ':', 'X', '}', ')'] | Transform data by mapping it into the latent space. | ['Transform', 'data', 'by', 'mapping', 'it', 'into', 'the', 'latent', 'space', '.'] | train | https://github.com/255BITS/hyperchamber/blob/4d5774bde9ea6ce1113f77a069ffc605148482b8/examples/shared/variational_autoencoder.py#L164-L168 |
2,737 | T-002/pycast | bin/examples/LivingRoomEnergy/server.py | holtWinters | def holtWinters(request):
"""
Performs Holt Winters Smoothing on the given post data.
Expects the following values set in the post of the request:
smoothingFactor - float
trendSmoothingFactor - float
seasonSmoothingFactor - float
seasonLength - integer
valuesToForecast - integer
data - two dimensional array of [timestamp, value]
"""
#Parse arguments
smoothingFactor = float(request.POST.get('smoothingFactor', 0.2))
trendSmoothingFactor = float(request.POST.get('trendSmoothingFactor', 0.3))
seasonSmoothingFactor = float(request.POST.get('seasonSmoothingFactor', 0.4))
seasonLength = int(request.POST.get('seasonLength', 6))
valuesToForecast = int(request.POST.get('valuesToForecast', 0))
data = json.loads(request.POST.get('data', []))
#perform smoothing
hwm = HoltWintersMethod(smoothingFactor = smoothingFactor,
trendSmoothingFactor = trendSmoothingFactor,
seasonSmoothingFactor = seasonSmoothingFactor,
seasonLength = seasonLength,
valuesToForecast = valuesToForecast)
original = TimeSeries.from_twodim_list(data)
original.set_timeformat("%d.%m")
smoothed = hwm.execute(original)
smoothed.set_timeformat("%d.%m")
error = SMAPE()
error.initialize(original, smoothed)
#process the result
result = { 'original': original,
'smoothed': smoothed,
'error': round(error.get_error(), 3)
}
return itty.Response(json.dumps(result, cls=PycastEncoder), content_type='application/json') | python | def holtWinters(request):
"""
Performs Holt Winters Smoothing on the given post data.
Expects the following values set in the post of the request:
smoothingFactor - float
trendSmoothingFactor - float
seasonSmoothingFactor - float
seasonLength - integer
valuesToForecast - integer
data - two dimensional array of [timestamp, value]
"""
#Parse arguments
smoothingFactor = float(request.POST.get('smoothingFactor', 0.2))
trendSmoothingFactor = float(request.POST.get('trendSmoothingFactor', 0.3))
seasonSmoothingFactor = float(request.POST.get('seasonSmoothingFactor', 0.4))
seasonLength = int(request.POST.get('seasonLength', 6))
valuesToForecast = int(request.POST.get('valuesToForecast', 0))
data = json.loads(request.POST.get('data', []))
#perform smoothing
hwm = HoltWintersMethod(smoothingFactor = smoothingFactor,
trendSmoothingFactor = trendSmoothingFactor,
seasonSmoothingFactor = seasonSmoothingFactor,
seasonLength = seasonLength,
valuesToForecast = valuesToForecast)
original = TimeSeries.from_twodim_list(data)
original.set_timeformat("%d.%m")
smoothed = hwm.execute(original)
smoothed.set_timeformat("%d.%m")
error = SMAPE()
error.initialize(original, smoothed)
#process the result
result = { 'original': original,
'smoothed': smoothed,
'error': round(error.get_error(), 3)
}
return itty.Response(json.dumps(result, cls=PycastEncoder), content_type='application/json') | ['def', 'holtWinters', '(', 'request', ')', ':', '#Parse arguments', 'smoothingFactor', '=', 'float', '(', 'request', '.', 'POST', '.', 'get', '(', "'smoothingFactor'", ',', '0.2', ')', ')', 'trendSmoothingFactor', '=', 'float', '(', 'request', '.', 'POST', '.', 'get', '(', "'trendSmoothingFactor'", ',', '0.3', ')', ')', 'seasonSmoothingFactor', '=', 'float', '(', 'request', '.', 'POST', '.', 'get', '(', "'seasonSmoothingFactor'", ',', '0.4', ')', ')', 'seasonLength', '=', 'int', '(', 'request', '.', 'POST', '.', 'get', '(', "'seasonLength'", ',', '6', ')', ')', 'valuesToForecast', '=', 'int', '(', 'request', '.', 'POST', '.', 'get', '(', "'valuesToForecast'", ',', '0', ')', ')', 'data', '=', 'json', '.', 'loads', '(', 'request', '.', 'POST', '.', 'get', '(', "'data'", ',', '[', ']', ')', ')', '#perform smoothing', 'hwm', '=', 'HoltWintersMethod', '(', 'smoothingFactor', '=', 'smoothingFactor', ',', 'trendSmoothingFactor', '=', 'trendSmoothingFactor', ',', 'seasonSmoothingFactor', '=', 'seasonSmoothingFactor', ',', 'seasonLength', '=', 'seasonLength', ',', 'valuesToForecast', '=', 'valuesToForecast', ')', 'original', '=', 'TimeSeries', '.', 'from_twodim_list', '(', 'data', ')', 'original', '.', 'set_timeformat', '(', '"%d.%m"', ')', 'smoothed', '=', 'hwm', '.', 'execute', '(', 'original', ')', 'smoothed', '.', 'set_timeformat', '(', '"%d.%m"', ')', 'error', '=', 'SMAPE', '(', ')', 'error', '.', 'initialize', '(', 'original', ',', 'smoothed', ')', '#process the result', 'result', '=', '{', "'original'", ':', 'original', ',', "'smoothed'", ':', 'smoothed', ',', "'error'", ':', 'round', '(', 'error', '.', 'get_error', '(', ')', ',', '3', ')', '}', 'return', 'itty', '.', 'Response', '(', 'json', '.', 'dumps', '(', 'result', ',', 'cls', '=', 'PycastEncoder', ')', ',', 'content_type', '=', "'application/json'", ')'] | Performs Holt Winters Smoothing on the given post data.
Expects the following values set in the post of the request:
smoothingFactor - float
trendSmoothingFactor - float
seasonSmoothingFactor - float
seasonLength - integer
valuesToForecast - integer
data - two dimensional array of [timestamp, value] | ['Performs', 'Holt', 'Winters', 'Smoothing', 'on', 'the', 'given', 'post', 'data', '.', 'Expects', 'the', 'following', 'values', 'set', 'in', 'the', 'post', 'of', 'the', 'request', ':', 'smoothingFactor', '-', 'float', 'trendSmoothingFactor', '-', 'float', 'seasonSmoothingFactor', '-', 'float', 'seasonLength', '-', 'integer', 'valuesToForecast', '-', 'integer', 'data', '-', 'two', 'dimensional', 'array', 'of', '[', 'timestamp', 'value', ']'] | train | https://github.com/T-002/pycast/blob/8a53505c6d8367e0ea572e8af768e80b29e1cc41/bin/examples/LivingRoomEnergy/server.py#L87-L125 |
2,738 | onnx/onnxmltools | onnxmltools/convert/sparkml/_parse.py | _parse_sparkml | def _parse_sparkml(spark, scope, model, global_inputs, output_dict):
'''
This is a delegate function. It doesn't nothing but invoke the correct parsing function according to the input
model's type.
:param scope: Scope object
:param model: A spark-ml object (e.g., OneHotEncoder and LogisticRegression)
:param inputs: A list of variables
:return: The output variables produced by the input model
'''
if isinstance(model, PipelineModel):
return _parse_sparkml_pipeline(spark, scope, model, global_inputs, output_dict)
else:
return _parse_sparkml_simple_model(spark, scope, model, global_inputs, output_dict) | python | def _parse_sparkml(spark, scope, model, global_inputs, output_dict):
'''
This is a delegate function. It doesn't nothing but invoke the correct parsing function according to the input
model's type.
:param scope: Scope object
:param model: A spark-ml object (e.g., OneHotEncoder and LogisticRegression)
:param inputs: A list of variables
:return: The output variables produced by the input model
'''
if isinstance(model, PipelineModel):
return _parse_sparkml_pipeline(spark, scope, model, global_inputs, output_dict)
else:
return _parse_sparkml_simple_model(spark, scope, model, global_inputs, output_dict) | ['def', '_parse_sparkml', '(', 'spark', ',', 'scope', ',', 'model', ',', 'global_inputs', ',', 'output_dict', ')', ':', 'if', 'isinstance', '(', 'model', ',', 'PipelineModel', ')', ':', 'return', '_parse_sparkml_pipeline', '(', 'spark', ',', 'scope', ',', 'model', ',', 'global_inputs', ',', 'output_dict', ')', 'else', ':', 'return', '_parse_sparkml_simple_model', '(', 'spark', ',', 'scope', ',', 'model', ',', 'global_inputs', ',', 'output_dict', ')'] | This is a delegate function. It doesn't nothing but invoke the correct parsing function according to the input
model's type.
:param scope: Scope object
:param model: A spark-ml object (e.g., OneHotEncoder and LogisticRegression)
:param inputs: A list of variables
:return: The output variables produced by the input model | ['This', 'is', 'a', 'delegate', 'function', '.', 'It', 'doesn', 't', 'nothing', 'but', 'invoke', 'the', 'correct', 'parsing', 'function', 'according', 'to', 'the', 'input', 'model', 's', 'type', '.', ':', 'param', 'scope', ':', 'Scope', 'object', ':', 'param', 'model', ':', 'A', 'spark', '-', 'ml', 'object', '(', 'e', '.', 'g', '.', 'OneHotEncoder', 'and', 'LogisticRegression', ')', ':', 'param', 'inputs', ':', 'A', 'list', 'of', 'variables', ':', 'return', ':', 'The', 'output', 'variables', 'produced', 'by', 'the', 'input', 'model'] | train | https://github.com/onnx/onnxmltools/blob/d4e4c31990fc2d9fd1f92139f497d360914c9df2/onnxmltools/convert/sparkml/_parse.py#L77-L89 |
2,739 | flowersteam/explauto | explauto/sensorimotor_model/inverse/cma.py | CMADataLogger.plot_all | def plot_all(self, fig=None, iabscissa=1, iteridx=None,
foffset=1e-19, x_opt=None, fontsize=9):
"""
plot data from a `CMADataLogger` (using the files written by the logger).
Arguments
---------
`fig`
figure number, by default 425
`iabscissa`
``0==plot`` versus iteration count,
``1==plot`` versus function evaluation number
`iteridx`
iteration indices to plot
Return `CMADataLogger` itself.
Examples
--------
::
import cma
logger = cma.CMADataLogger() # with default name
# try to plot the "default logging" data (e.g.
# from previous fmin calls, which is essentially what
# also cma.plot() does)
logger.plot_all()
cma.savefig('fig425.png') # save current figure
logger.closefig()
Dependencies: matlabplotlib/pyplot.
"""
try:
# pyplot: prodedural interface for matplotlib
from matplotlib.pyplot import figure, subplot, gcf
except ImportError:
ImportError('could not find matplotlib.pyplot module, function plot() is not available')
return
if fig is None:
fig = 426
if iabscissa not in (0, 1):
iabscissa = 1
self.load()
dat = self
# index out some data
if iteridx is not None:
self.select_data(iteridx)
if len(dat.f) == 0:
print('nothing to plot')
return
# not in use anymore, see formatter above
# xticklocs = np.arange(5) * np.round(minxend/4., -int(np.log10(minxend/4.)))
# dfit(dfit<1e-98) = NaN;
# TODO: if abscissa==0 plot in chunks, ie loop over subsets where dat.f[:,0]==countiter is monotonous
figure(fig)
self._enter_plotting(fontsize)
self.fighandle = gcf() # fighandle.number
if 1 < 3:
subplot(2, 3, 1)
self.plot_divers(iabscissa, foffset)
pyplot.xlabel('')
# standard deviations
subplot(2, 3, 4)
self.plot_stds(iabscissa)
# Scaling
subplot(2, 3, 2)
self.plot_axes_scaling(iabscissa)
pyplot.xlabel('')
# spectrum of correlation matrix
subplot(2, 3, 5)
self.plot_correlations(iabscissa)
# x-vectors
subplot(2, 3, 3)
self.plot_xrecent(iabscissa, x_opt)
pyplot.xlabel('')
subplot(2, 3, 6)
self.plot_mean(iabscissa, x_opt)
self._finalize_plotting()
return self | python | def plot_all(self, fig=None, iabscissa=1, iteridx=None,
foffset=1e-19, x_opt=None, fontsize=9):
"""
plot data from a `CMADataLogger` (using the files written by the logger).
Arguments
---------
`fig`
figure number, by default 425
`iabscissa`
``0==plot`` versus iteration count,
``1==plot`` versus function evaluation number
`iteridx`
iteration indices to plot
Return `CMADataLogger` itself.
Examples
--------
::
import cma
logger = cma.CMADataLogger() # with default name
# try to plot the "default logging" data (e.g.
# from previous fmin calls, which is essentially what
# also cma.plot() does)
logger.plot_all()
cma.savefig('fig425.png') # save current figure
logger.closefig()
Dependencies: matlabplotlib/pyplot.
"""
try:
# pyplot: prodedural interface for matplotlib
from matplotlib.pyplot import figure, subplot, gcf
except ImportError:
ImportError('could not find matplotlib.pyplot module, function plot() is not available')
return
if fig is None:
fig = 426
if iabscissa not in (0, 1):
iabscissa = 1
self.load()
dat = self
# index out some data
if iteridx is not None:
self.select_data(iteridx)
if len(dat.f) == 0:
print('nothing to plot')
return
# not in use anymore, see formatter above
# xticklocs = np.arange(5) * np.round(minxend/4., -int(np.log10(minxend/4.)))
# dfit(dfit<1e-98) = NaN;
# TODO: if abscissa==0 plot in chunks, ie loop over subsets where dat.f[:,0]==countiter is monotonous
figure(fig)
self._enter_plotting(fontsize)
self.fighandle = gcf() # fighandle.number
if 1 < 3:
subplot(2, 3, 1)
self.plot_divers(iabscissa, foffset)
pyplot.xlabel('')
# standard deviations
subplot(2, 3, 4)
self.plot_stds(iabscissa)
# Scaling
subplot(2, 3, 2)
self.plot_axes_scaling(iabscissa)
pyplot.xlabel('')
# spectrum of correlation matrix
subplot(2, 3, 5)
self.plot_correlations(iabscissa)
# x-vectors
subplot(2, 3, 3)
self.plot_xrecent(iabscissa, x_opt)
pyplot.xlabel('')
subplot(2, 3, 6)
self.plot_mean(iabscissa, x_opt)
self._finalize_plotting()
return self | ['def', 'plot_all', '(', 'self', ',', 'fig', '=', 'None', ',', 'iabscissa', '=', '1', ',', 'iteridx', '=', 'None', ',', 'foffset', '=', '1e-19', ',', 'x_opt', '=', 'None', ',', 'fontsize', '=', '9', ')', ':', 'try', ':', '# pyplot: prodedural interface for matplotlib', 'from', 'matplotlib', '.', 'pyplot', 'import', 'figure', ',', 'subplot', ',', 'gcf', 'except', 'ImportError', ':', 'ImportError', '(', "'could not find matplotlib.pyplot module, function plot() is not available'", ')', 'return', 'if', 'fig', 'is', 'None', ':', 'fig', '=', '426', 'if', 'iabscissa', 'not', 'in', '(', '0', ',', '1', ')', ':', 'iabscissa', '=', '1', 'self', '.', 'load', '(', ')', 'dat', '=', 'self', '# index out some data', 'if', 'iteridx', 'is', 'not', 'None', ':', 'self', '.', 'select_data', '(', 'iteridx', ')', 'if', 'len', '(', 'dat', '.', 'f', ')', '==', '0', ':', 'print', '(', "'nothing to plot'", ')', 'return', '# not in use anymore, see formatter above', '# xticklocs = np.arange(5) * np.round(minxend/4., -int(np.log10(minxend/4.)))', '# dfit(dfit<1e-98) = NaN;', '# TODO: if abscissa==0 plot in chunks, ie loop over subsets where dat.f[:,0]==countiter is monotonous', 'figure', '(', 'fig', ')', 'self', '.', '_enter_plotting', '(', 'fontsize', ')', 'self', '.', 'fighandle', '=', 'gcf', '(', ')', '# fighandle.number', 'if', '1', '<', '3', ':', 'subplot', '(', '2', ',', '3', ',', '1', ')', 'self', '.', 'plot_divers', '(', 'iabscissa', ',', 'foffset', ')', 'pyplot', '.', 'xlabel', '(', "''", ')', '# standard deviations', 'subplot', '(', '2', ',', '3', ',', '4', ')', 'self', '.', 'plot_stds', '(', 'iabscissa', ')', '# Scaling', 'subplot', '(', '2', ',', '3', ',', '2', ')', 'self', '.', 'plot_axes_scaling', '(', 'iabscissa', ')', 'pyplot', '.', 'xlabel', '(', "''", ')', '# spectrum of correlation matrix', 'subplot', '(', '2', ',', '3', ',', '5', ')', 'self', '.', 'plot_correlations', '(', 'iabscissa', ')', '# x-vectors', 'subplot', '(', '2', ',', '3', ',', '3', ')', 'self', '.', 'plot_xrecent', '(', 'iabscissa', ',', 'x_opt', ')', 'pyplot', '.', 'xlabel', '(', "''", ')', 'subplot', '(', '2', ',', '3', ',', '6', ')', 'self', '.', 'plot_mean', '(', 'iabscissa', ',', 'x_opt', ')', 'self', '.', '_finalize_plotting', '(', ')', 'return', 'self'] | plot data from a `CMADataLogger` (using the files written by the logger).
Arguments
---------
`fig`
figure number, by default 425
`iabscissa`
``0==plot`` versus iteration count,
``1==plot`` versus function evaluation number
`iteridx`
iteration indices to plot
Return `CMADataLogger` itself.
Examples
--------
::
import cma
logger = cma.CMADataLogger() # with default name
# try to plot the "default logging" data (e.g.
# from previous fmin calls, which is essentially what
# also cma.plot() does)
logger.plot_all()
cma.savefig('fig425.png') # save current figure
logger.closefig()
Dependencies: matlabplotlib/pyplot. | ['plot', 'data', 'from', 'a', 'CMADataLogger', '(', 'using', 'the', 'files', 'written', 'by', 'the', 'logger', ')', '.'] | train | https://github.com/flowersteam/explauto/blob/cf0f81ecb9f6412f7276a95bd27359000e1e26b6/explauto/sensorimotor_model/inverse/cma.py#L6173-L6267 |
2,740 | fhcrc/taxtastic | taxtastic/ncbi.py | read_names | def read_names(rows, source_id=1):
"""Return an iterator of rows ready to insert into table
"names". Adds columns "is_primary" (identifying the primary name
for each tax_id with a vaule of 1) and "is_classified" (always None).
* rows - iterator of lists (eg, output from read_archive or read_dmp)
* unclassified_regex - a compiled re matching "unclassified" names
From the NCBI docs:
Taxonomy names file (names.dmp):
tax_id -- the id of node associated with this name
name_txt -- name itself
unique name -- the unique variant of this name if name not unique
name class -- (synonym, common name, ...)
"""
ncbi_keys = ['tax_id', 'tax_name', 'unique_name', 'name_class']
extra_keys = ['source_id', 'is_primary', 'is_classified']
# is_classified applies to species only; we will set this value
# later
is_classified = None
tax_id = ncbi_keys.index('tax_id')
tax_name = ncbi_keys.index('tax_name')
unique_name = ncbi_keys.index('unique_name')
name_class = ncbi_keys.index('name_class')
yield ncbi_keys + extra_keys
for tid, grp in itertools.groupby(rows, itemgetter(tax_id)):
# confirm that each tax_id has exactly one scientific name
num_primary = 0
for r in grp:
is_primary = r[name_class] == 'scientific name'
# fix primary key uniqueness violation
if r[unique_name]:
r[tax_name] = r[unique_name]
num_primary += is_primary
yield (r + [source_id, is_primary, is_classified])
assert num_primary == 1 | python | def read_names(rows, source_id=1):
"""Return an iterator of rows ready to insert into table
"names". Adds columns "is_primary" (identifying the primary name
for each tax_id with a vaule of 1) and "is_classified" (always None).
* rows - iterator of lists (eg, output from read_archive or read_dmp)
* unclassified_regex - a compiled re matching "unclassified" names
From the NCBI docs:
Taxonomy names file (names.dmp):
tax_id -- the id of node associated with this name
name_txt -- name itself
unique name -- the unique variant of this name if name not unique
name class -- (synonym, common name, ...)
"""
ncbi_keys = ['tax_id', 'tax_name', 'unique_name', 'name_class']
extra_keys = ['source_id', 'is_primary', 'is_classified']
# is_classified applies to species only; we will set this value
# later
is_classified = None
tax_id = ncbi_keys.index('tax_id')
tax_name = ncbi_keys.index('tax_name')
unique_name = ncbi_keys.index('unique_name')
name_class = ncbi_keys.index('name_class')
yield ncbi_keys + extra_keys
for tid, grp in itertools.groupby(rows, itemgetter(tax_id)):
# confirm that each tax_id has exactly one scientific name
num_primary = 0
for r in grp:
is_primary = r[name_class] == 'scientific name'
# fix primary key uniqueness violation
if r[unique_name]:
r[tax_name] = r[unique_name]
num_primary += is_primary
yield (r + [source_id, is_primary, is_classified])
assert num_primary == 1 | ['def', 'read_names', '(', 'rows', ',', 'source_id', '=', '1', ')', ':', 'ncbi_keys', '=', '[', "'tax_id'", ',', "'tax_name'", ',', "'unique_name'", ',', "'name_class'", ']', 'extra_keys', '=', '[', "'source_id'", ',', "'is_primary'", ',', "'is_classified'", ']', '# is_classified applies to species only; we will set this value', '# later', 'is_classified', '=', 'None', 'tax_id', '=', 'ncbi_keys', '.', 'index', '(', "'tax_id'", ')', 'tax_name', '=', 'ncbi_keys', '.', 'index', '(', "'tax_name'", ')', 'unique_name', '=', 'ncbi_keys', '.', 'index', '(', "'unique_name'", ')', 'name_class', '=', 'ncbi_keys', '.', 'index', '(', "'name_class'", ')', 'yield', 'ncbi_keys', '+', 'extra_keys', 'for', 'tid', ',', 'grp', 'in', 'itertools', '.', 'groupby', '(', 'rows', ',', 'itemgetter', '(', 'tax_id', ')', ')', ':', '# confirm that each tax_id has exactly one scientific name', 'num_primary', '=', '0', 'for', 'r', 'in', 'grp', ':', 'is_primary', '=', 'r', '[', 'name_class', ']', '==', "'scientific name'", '# fix primary key uniqueness violation', 'if', 'r', '[', 'unique_name', ']', ':', 'r', '[', 'tax_name', ']', '=', 'r', '[', 'unique_name', ']', 'num_primary', '+=', 'is_primary', 'yield', '(', 'r', '+', '[', 'source_id', ',', 'is_primary', ',', 'is_classified', ']', ')', 'assert', 'num_primary', '==', '1'] | Return an iterator of rows ready to insert into table
"names". Adds columns "is_primary" (identifying the primary name
for each tax_id with a vaule of 1) and "is_classified" (always None).
* rows - iterator of lists (eg, output from read_archive or read_dmp)
* unclassified_regex - a compiled re matching "unclassified" names
From the NCBI docs:
Taxonomy names file (names.dmp):
tax_id -- the id of node associated with this name
name_txt -- name itself
unique name -- the unique variant of this name if name not unique
name class -- (synonym, common name, ...) | ['Return', 'an', 'iterator', 'of', 'rows', 'ready', 'to', 'insert', 'into', 'table', 'names', '.', 'Adds', 'columns', 'is_primary', '(', 'identifying', 'the', 'primary', 'name', 'for', 'each', 'tax_id', 'with', 'a', 'vaule', 'of', '1', ')', 'and', 'is_classified', '(', 'always', 'None', ')', '.'] | train | https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/ncbi.py#L283-L326 |
2,741 | estnltk/estnltk | estnltk/text.py | Text.starts | def starts(self, layer):
"""Retrieve start positions of elements if given layer."""
starts = []
for data in self[layer]:
starts.append(data[START])
return starts | python | def starts(self, layer):
"""Retrieve start positions of elements if given layer."""
starts = []
for data in self[layer]:
starts.append(data[START])
return starts | ['def', 'starts', '(', 'self', ',', 'layer', ')', ':', 'starts', '=', '[', ']', 'for', 'data', 'in', 'self', '[', 'layer', ']', ':', 'starts', '.', 'append', '(', 'data', '[', 'START', ']', ')', 'return', 'starts'] | Retrieve start positions of elements if given layer. | ['Retrieve', 'start', 'positions', 'of', 'elements', 'if', 'given', 'layer', '.'] | train | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L251-L256 |
2,742 | pgmpy/pgmpy | pgmpy/sampling/Sampling.py | GibbsSampling._get_kernel_from_bayesian_model | def _get_kernel_from_bayesian_model(self, model):
"""
Computes the Gibbs transition models from a Bayesian Network.
'Probabilistic Graphical Model Principles and Techniques', Koller and
Friedman, Section 12.3.3 pp 512-513.
Parameters:
-----------
model: BayesianModel
The model from which probabilities will be computed.
"""
self.variables = np.array(model.nodes())
self.cardinalities = {var: model.get_cpds(var).variable_card for var in self.variables}
for var in self.variables:
other_vars = [v for v in self.variables if var != v]
other_cards = [self.cardinalities[v] for v in other_vars]
cpds = [cpd for cpd in model.cpds if var in cpd.scope()]
prod_cpd = factor_product(*cpds)
kernel = {}
scope = set(prod_cpd.scope())
for tup in itertools.product(*[range(card) for card in other_cards]):
states = [State(v, s) for v, s in zip(other_vars, tup) if v in scope]
prod_cpd_reduced = prod_cpd.reduce(states, inplace=False)
kernel[tup] = prod_cpd_reduced.values / sum(prod_cpd_reduced.values)
self.transition_models[var] = kernel | python | def _get_kernel_from_bayesian_model(self, model):
"""
Computes the Gibbs transition models from a Bayesian Network.
'Probabilistic Graphical Model Principles and Techniques', Koller and
Friedman, Section 12.3.3 pp 512-513.
Parameters:
-----------
model: BayesianModel
The model from which probabilities will be computed.
"""
self.variables = np.array(model.nodes())
self.cardinalities = {var: model.get_cpds(var).variable_card for var in self.variables}
for var in self.variables:
other_vars = [v for v in self.variables if var != v]
other_cards = [self.cardinalities[v] for v in other_vars]
cpds = [cpd for cpd in model.cpds if var in cpd.scope()]
prod_cpd = factor_product(*cpds)
kernel = {}
scope = set(prod_cpd.scope())
for tup in itertools.product(*[range(card) for card in other_cards]):
states = [State(v, s) for v, s in zip(other_vars, tup) if v in scope]
prod_cpd_reduced = prod_cpd.reduce(states, inplace=False)
kernel[tup] = prod_cpd_reduced.values / sum(prod_cpd_reduced.values)
self.transition_models[var] = kernel | ['def', '_get_kernel_from_bayesian_model', '(', 'self', ',', 'model', ')', ':', 'self', '.', 'variables', '=', 'np', '.', 'array', '(', 'model', '.', 'nodes', '(', ')', ')', 'self', '.', 'cardinalities', '=', '{', 'var', ':', 'model', '.', 'get_cpds', '(', 'var', ')', '.', 'variable_card', 'for', 'var', 'in', 'self', '.', 'variables', '}', 'for', 'var', 'in', 'self', '.', 'variables', ':', 'other_vars', '=', '[', 'v', 'for', 'v', 'in', 'self', '.', 'variables', 'if', 'var', '!=', 'v', ']', 'other_cards', '=', '[', 'self', '.', 'cardinalities', '[', 'v', ']', 'for', 'v', 'in', 'other_vars', ']', 'cpds', '=', '[', 'cpd', 'for', 'cpd', 'in', 'model', '.', 'cpds', 'if', 'var', 'in', 'cpd', '.', 'scope', '(', ')', ']', 'prod_cpd', '=', 'factor_product', '(', '*', 'cpds', ')', 'kernel', '=', '{', '}', 'scope', '=', 'set', '(', 'prod_cpd', '.', 'scope', '(', ')', ')', 'for', 'tup', 'in', 'itertools', '.', 'product', '(', '*', '[', 'range', '(', 'card', ')', 'for', 'card', 'in', 'other_cards', ']', ')', ':', 'states', '=', '[', 'State', '(', 'v', ',', 's', ')', 'for', 'v', ',', 's', 'in', 'zip', '(', 'other_vars', ',', 'tup', ')', 'if', 'v', 'in', 'scope', ']', 'prod_cpd_reduced', '=', 'prod_cpd', '.', 'reduce', '(', 'states', ',', 'inplace', '=', 'False', ')', 'kernel', '[', 'tup', ']', '=', 'prod_cpd_reduced', '.', 'values', '/', 'sum', '(', 'prod_cpd_reduced', '.', 'values', ')', 'self', '.', 'transition_models', '[', 'var', ']', '=', 'kernel'] | Computes the Gibbs transition models from a Bayesian Network.
'Probabilistic Graphical Model Principles and Techniques', Koller and
Friedman, Section 12.3.3 pp 512-513.
Parameters:
-----------
model: BayesianModel
The model from which probabilities will be computed. | ['Computes', 'the', 'Gibbs', 'transition', 'models', 'from', 'a', 'Bayesian', 'Network', '.', 'Probabilistic', 'Graphical', 'Model', 'Principles', 'and', 'Techniques', 'Koller', 'and', 'Friedman', 'Section', '12', '.', '3', '.', '3', 'pp', '512', '-', '513', '.'] | train | https://github.com/pgmpy/pgmpy/blob/9381a66aba3c3871d3ccd00672b148d17d63239e/pgmpy/sampling/Sampling.py#L278-L303 |
2,743 | michaelaye/pyciss | pyciss/opusapi.py | OPUS.create_request_with_query | def create_request_with_query(self, kind, query, size="thumb", fmt="json"):
"""api/data.[fmt], api/images/[size].[fmt] api/files.[fmt]
kind = ['data', 'images', 'files']
"""
if kind == "data" or kind == "files":
url = "{}/{}.{}".format(base_url, kind, fmt)
elif kind == "images":
url = "{}/images/{}.{}".format(base_url, size, fmt)
self.url = url
self.r = requests.get(url, params=unquote(urlencode(query))) | python | def create_request_with_query(self, kind, query, size="thumb", fmt="json"):
"""api/data.[fmt], api/images/[size].[fmt] api/files.[fmt]
kind = ['data', 'images', 'files']
"""
if kind == "data" or kind == "files":
url = "{}/{}.{}".format(base_url, kind, fmt)
elif kind == "images":
url = "{}/images/{}.{}".format(base_url, size, fmt)
self.url = url
self.r = requests.get(url, params=unquote(urlencode(query))) | ['def', 'create_request_with_query', '(', 'self', ',', 'kind', ',', 'query', ',', 'size', '=', '"thumb"', ',', 'fmt', '=', '"json"', ')', ':', 'if', 'kind', '==', '"data"', 'or', 'kind', '==', '"files"', ':', 'url', '=', '"{}/{}.{}"', '.', 'format', '(', 'base_url', ',', 'kind', ',', 'fmt', ')', 'elif', 'kind', '==', '"images"', ':', 'url', '=', '"{}/images/{}.{}"', '.', 'format', '(', 'base_url', ',', 'size', ',', 'fmt', ')', 'self', '.', 'url', '=', 'url', 'self', '.', 'r', '=', 'requests', '.', 'get', '(', 'url', ',', 'params', '=', 'unquote', '(', 'urlencode', '(', 'query', ')', ')', ')'] | api/data.[fmt], api/images/[size].[fmt] api/files.[fmt]
kind = ['data', 'images', 'files'] | ['api', '/', 'data', '.', '[', 'fmt', ']', 'api', '/', 'images', '/', '[', 'size', ']', '.', '[', 'fmt', ']', 'api', '/', 'files', '.', '[', 'fmt', ']'] | train | https://github.com/michaelaye/pyciss/blob/019256424466060babead7edab86736c881b0831/pyciss/opusapi.py#L184-L196 |
2,744 | gdoermann/voicebase | voicebase/api/media.py | clean_dict | def clean_dict(d, test=lambda v: v):
"""
Return only keys that meet the test
:param d: Dictionary
:param test: the test to run on the value (example override is: "lambda v: v is not None")
:return: Cleaned dictionary
"""
return {k: v for k, v in d.items() if test(v)} | python | def clean_dict(d, test=lambda v: v):
"""
Return only keys that meet the test
:param d: Dictionary
:param test: the test to run on the value (example override is: "lambda v: v is not None")
:return: Cleaned dictionary
"""
return {k: v for k, v in d.items() if test(v)} | ['def', 'clean_dict', '(', 'd', ',', 'test', '=', 'lambda', 'v', ':', 'v', ')', ':', 'return', '{', 'k', ':', 'v', 'for', 'k', ',', 'v', 'in', 'd', '.', 'items', '(', ')', 'if', 'test', '(', 'v', ')', '}'] | Return only keys that meet the test
:param d: Dictionary
:param test: the test to run on the value (example override is: "lambda v: v is not None")
:return: Cleaned dictionary | ['Return', 'only', 'keys', 'that', 'meet', 'the', 'test', ':', 'param', 'd', ':', 'Dictionary', ':', 'param', 'test', ':', 'the', 'test', 'to', 'run', 'on', 'the', 'value', '(', 'example', 'override', 'is', ':', 'lambda', 'v', ':', 'v', 'is', 'not', 'None', ')', ':', 'return', ':', 'Cleaned', 'dictionary'] | train | https://github.com/gdoermann/voicebase/blob/53cb4735327898a7a284dea3a60ace0b3956a8ec/voicebase/api/media.py#L14-L21 |
2,745 | bcbio/bcbio-nextgen | bcbio/rnaseq/express.py | _get_column | def _get_column(in_file, out_file, column, data=None):
"""Subset one column from a file
"""
with file_transaction(data, out_file) as tx_out_file:
with open(in_file) as in_handle:
with open(tx_out_file, 'w') as out_handle:
for line in in_handle:
cols = line.strip().split("\t")
if line.find("eff_count") > 0:
continue
number = cols[column]
if column == 7:
number = int(round(float(number), 0))
out_handle.write("%s\t%s\n" % (cols[1], number))
return out_file | python | def _get_column(in_file, out_file, column, data=None):
"""Subset one column from a file
"""
with file_transaction(data, out_file) as tx_out_file:
with open(in_file) as in_handle:
with open(tx_out_file, 'w') as out_handle:
for line in in_handle:
cols = line.strip().split("\t")
if line.find("eff_count") > 0:
continue
number = cols[column]
if column == 7:
number = int(round(float(number), 0))
out_handle.write("%s\t%s\n" % (cols[1], number))
return out_file | ['def', '_get_column', '(', 'in_file', ',', 'out_file', ',', 'column', ',', 'data', '=', 'None', ')', ':', 'with', 'file_transaction', '(', 'data', ',', 'out_file', ')', 'as', 'tx_out_file', ':', 'with', 'open', '(', 'in_file', ')', 'as', 'in_handle', ':', 'with', 'open', '(', 'tx_out_file', ',', "'w'", ')', 'as', 'out_handle', ':', 'for', 'line', 'in', 'in_handle', ':', 'cols', '=', 'line', '.', 'strip', '(', ')', '.', 'split', '(', '"\\t"', ')', 'if', 'line', '.', 'find', '(', '"eff_count"', ')', '>', '0', ':', 'continue', 'number', '=', 'cols', '[', 'column', ']', 'if', 'column', '==', '7', ':', 'number', '=', 'int', '(', 'round', '(', 'float', '(', 'number', ')', ',', '0', ')', ')', 'out_handle', '.', 'write', '(', '"%s\\t%s\\n"', '%', '(', 'cols', '[', '1', ']', ',', 'number', ')', ')', 'return', 'out_file'] | Subset one column from a file | ['Subset', 'one', 'column', 'from', 'a', 'file'] | train | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/express.py#L41-L55 |
2,746 | mlavin/argyle | argyle/system.py | create_user | def create_user(name, groups=None, key_file=None):
"""Create a user. Adds a key file to authorized_keys if given."""
groups = groups or []
if not user_exists(name):
for group in groups:
if not group_exists(group):
sudo(u"addgroup %s" % group)
groups = groups and u'-G %s' % u','.join(groups) or ''
sudo(u"useradd -m %s -s /bin/bash %s" % (groups, name))
sudo(u"passwd -d %s" % name)
if key_file:
sudo(u"mkdir -p /home/%s/.ssh" % name)
put(key_file, u"/home/%s/.ssh/authorized_keys" % name, use_sudo=True)
sudo(u"chown -R %(name)s:%(name)s /home/%(name)s/.ssh" % {'name': name}) | python | def create_user(name, groups=None, key_file=None):
"""Create a user. Adds a key file to authorized_keys if given."""
groups = groups or []
if not user_exists(name):
for group in groups:
if not group_exists(group):
sudo(u"addgroup %s" % group)
groups = groups and u'-G %s' % u','.join(groups) or ''
sudo(u"useradd -m %s -s /bin/bash %s" % (groups, name))
sudo(u"passwd -d %s" % name)
if key_file:
sudo(u"mkdir -p /home/%s/.ssh" % name)
put(key_file, u"/home/%s/.ssh/authorized_keys" % name, use_sudo=True)
sudo(u"chown -R %(name)s:%(name)s /home/%(name)s/.ssh" % {'name': name}) | ['def', 'create_user', '(', 'name', ',', 'groups', '=', 'None', ',', 'key_file', '=', 'None', ')', ':', 'groups', '=', 'groups', 'or', '[', ']', 'if', 'not', 'user_exists', '(', 'name', ')', ':', 'for', 'group', 'in', 'groups', ':', 'if', 'not', 'group_exists', '(', 'group', ')', ':', 'sudo', '(', 'u"addgroup %s"', '%', 'group', ')', 'groups', '=', 'groups', 'and', "u'-G %s'", '%', "u','", '.', 'join', '(', 'groups', ')', 'or', "''", 'sudo', '(', 'u"useradd -m %s -s /bin/bash %s"', '%', '(', 'groups', ',', 'name', ')', ')', 'sudo', '(', 'u"passwd -d %s"', '%', 'name', ')', 'if', 'key_file', ':', 'sudo', '(', 'u"mkdir -p /home/%s/.ssh"', '%', 'name', ')', 'put', '(', 'key_file', ',', 'u"/home/%s/.ssh/authorized_keys"', '%', 'name', ',', 'use_sudo', '=', 'True', ')', 'sudo', '(', 'u"chown -R %(name)s:%(name)s /home/%(name)s/.ssh"', '%', '{', "'name'", ':', 'name', '}', ')'] | Create a user. Adds a key file to authorized_keys if given. | ['Create', 'a', 'user', '.', 'Adds', 'a', 'key', 'file', 'to', 'authorized_keys', 'if', 'given', '.'] | train | https://github.com/mlavin/argyle/blob/92cc6e1dd9b8e7cb41c5098a79d05e14b8243d72/argyle/system.py#L111-L125 |
2,747 | Kane610/deconz | pydeconz/__init__.py | DeconzSession.async_event_handler | def async_event_handler(self, event: dict) -> None:
"""Receive event from websocket and identifies where the event belong.
{
"t": "event",
"e": "changed",
"r": "sensors",
"id": "12",
"state": { "buttonevent": 2002 }
}
"""
if event['e'] == 'added':
if event['r'] == 'lights' and event['id'] not in self.lights:
device_type = 'light'
device = self.lights[event['id']] = DeconzLight(
event['id'], event['light'], self.async_put_state)
elif event['r'] == 'sensors' and event['id'] not in self.sensors:
if supported_sensor(event['sensor']):
device_type = 'sensor'
device = self.sensors[event['id']] = create_sensor(
event['id'], event['sensor'], self.async_put_state)
else:
_LOGGER.warning('Unsupported sensor %s', event)
return
else:
_LOGGER.debug('Unsupported event %s', event)
return
if self.async_add_device_callback:
self.async_add_device_callback(device_type, device)
elif event['e'] == 'changed':
if event['r'] == 'groups' and event['id'] in self.groups:
self.groups[event['id']].async_update(event)
elif event['r'] == 'lights' and event['id'] in self.lights:
self.lights[event['id']].async_update(event)
self.update_group_color([event['id']])
elif event['r'] == 'sensors' and event['id'] in self.sensors:
self.sensors[event['id']].async_update(event)
else:
_LOGGER.debug('Unsupported event %s', event)
elif event['e'] == 'deleted':
_LOGGER.debug('Removed event %s', event)
else:
_LOGGER.debug('Unsupported event %s', event) | python | def async_event_handler(self, event: dict) -> None:
"""Receive event from websocket and identifies where the event belong.
{
"t": "event",
"e": "changed",
"r": "sensors",
"id": "12",
"state": { "buttonevent": 2002 }
}
"""
if event['e'] == 'added':
if event['r'] == 'lights' and event['id'] not in self.lights:
device_type = 'light'
device = self.lights[event['id']] = DeconzLight(
event['id'], event['light'], self.async_put_state)
elif event['r'] == 'sensors' and event['id'] not in self.sensors:
if supported_sensor(event['sensor']):
device_type = 'sensor'
device = self.sensors[event['id']] = create_sensor(
event['id'], event['sensor'], self.async_put_state)
else:
_LOGGER.warning('Unsupported sensor %s', event)
return
else:
_LOGGER.debug('Unsupported event %s', event)
return
if self.async_add_device_callback:
self.async_add_device_callback(device_type, device)
elif event['e'] == 'changed':
if event['r'] == 'groups' and event['id'] in self.groups:
self.groups[event['id']].async_update(event)
elif event['r'] == 'lights' and event['id'] in self.lights:
self.lights[event['id']].async_update(event)
self.update_group_color([event['id']])
elif event['r'] == 'sensors' and event['id'] in self.sensors:
self.sensors[event['id']].async_update(event)
else:
_LOGGER.debug('Unsupported event %s', event)
elif event['e'] == 'deleted':
_LOGGER.debug('Removed event %s', event)
else:
_LOGGER.debug('Unsupported event %s', event) | ['def', 'async_event_handler', '(', 'self', ',', 'event', ':', 'dict', ')', '->', 'None', ':', 'if', 'event', '[', "'e'", ']', '==', "'added'", ':', 'if', 'event', '[', "'r'", ']', '==', "'lights'", 'and', 'event', '[', "'id'", ']', 'not', 'in', 'self', '.', 'lights', ':', 'device_type', '=', "'light'", 'device', '=', 'self', '.', 'lights', '[', 'event', '[', "'id'", ']', ']', '=', 'DeconzLight', '(', 'event', '[', "'id'", ']', ',', 'event', '[', "'light'", ']', ',', 'self', '.', 'async_put_state', ')', 'elif', 'event', '[', "'r'", ']', '==', "'sensors'", 'and', 'event', '[', "'id'", ']', 'not', 'in', 'self', '.', 'sensors', ':', 'if', 'supported_sensor', '(', 'event', '[', "'sensor'", ']', ')', ':', 'device_type', '=', "'sensor'", 'device', '=', 'self', '.', 'sensors', '[', 'event', '[', "'id'", ']', ']', '=', 'create_sensor', '(', 'event', '[', "'id'", ']', ',', 'event', '[', "'sensor'", ']', ',', 'self', '.', 'async_put_state', ')', 'else', ':', '_LOGGER', '.', 'warning', '(', "'Unsupported sensor %s'", ',', 'event', ')', 'return', 'else', ':', '_LOGGER', '.', 'debug', '(', "'Unsupported event %s'", ',', 'event', ')', 'return', 'if', 'self', '.', 'async_add_device_callback', ':', 'self', '.', 'async_add_device_callback', '(', 'device_type', ',', 'device', ')', 'elif', 'event', '[', "'e'", ']', '==', "'changed'", ':', 'if', 'event', '[', "'r'", ']', '==', "'groups'", 'and', 'event', '[', "'id'", ']', 'in', 'self', '.', 'groups', ':', 'self', '.', 'groups', '[', 'event', '[', "'id'", ']', ']', '.', 'async_update', '(', 'event', ')', 'elif', 'event', '[', "'r'", ']', '==', "'lights'", 'and', 'event', '[', "'id'", ']', 'in', 'self', '.', 'lights', ':', 'self', '.', 'lights', '[', 'event', '[', "'id'", ']', ']', '.', 'async_update', '(', 'event', ')', 'self', '.', 'update_group_color', '(', '[', 'event', '[', "'id'", ']', ']', ')', 'elif', 'event', '[', "'r'", ']', '==', "'sensors'", 'and', 'event', '[', "'id'", ']', 'in', 'self', '.', 'sensors', ':', 'self', '.', 'sensors', '[', 'event', '[', "'id'", ']', ']', '.', 'async_update', '(', 'event', ')', 'else', ':', '_LOGGER', '.', 'debug', '(', "'Unsupported event %s'", ',', 'event', ')', 'elif', 'event', '[', "'e'", ']', '==', "'deleted'", ':', '_LOGGER', '.', 'debug', '(', "'Removed event %s'", ',', 'event', ')', 'else', ':', '_LOGGER', '.', 'debug', '(', "'Unsupported event %s'", ',', 'event', ')'] | Receive event from websocket and identifies where the event belong.
{
"t": "event",
"e": "changed",
"r": "sensors",
"id": "12",
"state": { "buttonevent": 2002 }
} | ['Receive', 'event', 'from', 'websocket', 'and', 'identifies', 'where', 'the', 'event', 'belong', '.'] | train | https://github.com/Kane610/deconz/blob/8a9498dbbc8c168d4a081173ad6c3b1e17fffdf6/pydeconz/__init__.py#L145-L198 |
2,748 | relwell/corenlp-xml-lib | corenlp_xml/document.py | Token.pos | def pos(self):
"""
Lazy-loads the part of speech tag for this word
:getter: Returns the plain string value of the POS tag for the word
:type: str
"""
if self._pos is None:
poses = self._element.xpath('POS/text()')
if len(poses) > 0:
self._pos = poses[0]
return self._pos | python | def pos(self):
"""
Lazy-loads the part of speech tag for this word
:getter: Returns the plain string value of the POS tag for the word
:type: str
"""
if self._pos is None:
poses = self._element.xpath('POS/text()')
if len(poses) > 0:
self._pos = poses[0]
return self._pos | ['def', 'pos', '(', 'self', ')', ':', 'if', 'self', '.', '_pos', 'is', 'None', ':', 'poses', '=', 'self', '.', '_element', '.', 'xpath', '(', "'POS/text()'", ')', 'if', 'len', '(', 'poses', ')', '>', '0', ':', 'self', '.', '_pos', '=', 'poses', '[', '0', ']', 'return', 'self', '.', '_pos'] | Lazy-loads the part of speech tag for this word
:getter: Returns the plain string value of the POS tag for the word
:type: str | ['Lazy', '-', 'loads', 'the', 'part', 'of', 'speech', 'tag', 'for', 'this', 'word'] | train | https://github.com/relwell/corenlp-xml-lib/blob/9b0f8c912ba3ecedd34473f74a9f2d033a75baf9/corenlp_xml/document.py#L416-L428 |
2,749 | mozilla/DeepSpeech | bin/benchmark_nc.py | extract_native_client_tarball | def extract_native_client_tarball(dir):
r'''
Download a native_client.tar.xz file from TaskCluster and extract it to dir.
'''
assert_valid_dir(dir)
target_tarball = os.path.join(dir, 'native_client.tar.xz')
if os.path.isfile(target_tarball) and os.stat(target_tarball).st_size == 0:
return
subprocess.check_call(['pixz', '-d', 'native_client.tar.xz'], cwd=dir)
subprocess.check_call(['tar', 'xf', 'native_client.tar'], cwd=dir)
os.unlink(os.path.join(dir, 'native_client.tar'))
open(target_tarball, 'w').close() | python | def extract_native_client_tarball(dir):
r'''
Download a native_client.tar.xz file from TaskCluster and extract it to dir.
'''
assert_valid_dir(dir)
target_tarball = os.path.join(dir, 'native_client.tar.xz')
if os.path.isfile(target_tarball) and os.stat(target_tarball).st_size == 0:
return
subprocess.check_call(['pixz', '-d', 'native_client.tar.xz'], cwd=dir)
subprocess.check_call(['tar', 'xf', 'native_client.tar'], cwd=dir)
os.unlink(os.path.join(dir, 'native_client.tar'))
open(target_tarball, 'w').close() | ['def', 'extract_native_client_tarball', '(', 'dir', ')', ':', 'assert_valid_dir', '(', 'dir', ')', 'target_tarball', '=', 'os', '.', 'path', '.', 'join', '(', 'dir', ',', "'native_client.tar.xz'", ')', 'if', 'os', '.', 'path', '.', 'isfile', '(', 'target_tarball', ')', 'and', 'os', '.', 'stat', '(', 'target_tarball', ')', '.', 'st_size', '==', '0', ':', 'return', 'subprocess', '.', 'check_call', '(', '[', "'pixz'", ',', "'-d'", ',', "'native_client.tar.xz'", ']', ',', 'cwd', '=', 'dir', ')', 'subprocess', '.', 'check_call', '(', '[', "'tar'", ',', "'xf'", ',', "'native_client.tar'", ']', ',', 'cwd', '=', 'dir', ')', 'os', '.', 'unlink', '(', 'os', '.', 'path', '.', 'join', '(', 'dir', ',', "'native_client.tar'", ')', ')', 'open', '(', 'target_tarball', ',', "'w'", ')', '.', 'close', '(', ')'] | r'''
Download a native_client.tar.xz file from TaskCluster and extract it to dir. | ['r', 'Download', 'a', 'native_client', '.', 'tar', '.', 'xz', 'file', 'from', 'TaskCluster', 'and', 'extract', 'it', 'to', 'dir', '.'] | train | https://github.com/mozilla/DeepSpeech/blob/f64aa73e7fbe9dde40d4fcf23b42ab304747d152/bin/benchmark_nc.py#L97-L110 |
2,750 | hazelcast/hazelcast-python-client | hazelcast/protocol/codec/atomic_reference_set_and_get_codec.py | calculate_size | def calculate_size(name, new_value):
""" Calculates the request payload size"""
data_size = 0
data_size += calculate_size_str(name)
data_size += BOOLEAN_SIZE_IN_BYTES
if new_value is not None:
data_size += calculate_size_data(new_value)
return data_size | python | def calculate_size(name, new_value):
""" Calculates the request payload size"""
data_size = 0
data_size += calculate_size_str(name)
data_size += BOOLEAN_SIZE_IN_BYTES
if new_value is not None:
data_size += calculate_size_data(new_value)
return data_size | ['def', 'calculate_size', '(', 'name', ',', 'new_value', ')', ':', 'data_size', '=', '0', 'data_size', '+=', 'calculate_size_str', '(', 'name', ')', 'data_size', '+=', 'BOOLEAN_SIZE_IN_BYTES', 'if', 'new_value', 'is', 'not', 'None', ':', 'data_size', '+=', 'calculate_size_data', '(', 'new_value', ')', 'return', 'data_size'] | Calculates the request payload size | ['Calculates', 'the', 'request', 'payload', 'size'] | train | https://github.com/hazelcast/hazelcast-python-client/blob/3f6639443c23d6d036aa343f8e094f052250d2c1/hazelcast/protocol/codec/atomic_reference_set_and_get_codec.py#L10-L17 |
2,751 | Microsoft/azure-devops-python-api | azure-devops/azure/devops/v5_0/service_hooks/service_hooks_client.py | ServiceHooksClient.get_notification | def get_notification(self, subscription_id, notification_id):
"""GetNotification.
Get a specific notification for a subscription.
:param str subscription_id: ID for a subscription.
:param int notification_id:
:rtype: :class:`<Notification> <azure.devops.v5_0.service_hooks.models.Notification>`
"""
route_values = {}
if subscription_id is not None:
route_values['subscriptionId'] = self._serialize.url('subscription_id', subscription_id, 'str')
if notification_id is not None:
route_values['notificationId'] = self._serialize.url('notification_id', notification_id, 'int')
response = self._send(http_method='GET',
location_id='0c62d343-21b0-4732-997b-017fde84dc28',
version='5.0',
route_values=route_values)
return self._deserialize('Notification', response) | python | def get_notification(self, subscription_id, notification_id):
"""GetNotification.
Get a specific notification for a subscription.
:param str subscription_id: ID for a subscription.
:param int notification_id:
:rtype: :class:`<Notification> <azure.devops.v5_0.service_hooks.models.Notification>`
"""
route_values = {}
if subscription_id is not None:
route_values['subscriptionId'] = self._serialize.url('subscription_id', subscription_id, 'str')
if notification_id is not None:
route_values['notificationId'] = self._serialize.url('notification_id', notification_id, 'int')
response = self._send(http_method='GET',
location_id='0c62d343-21b0-4732-997b-017fde84dc28',
version='5.0',
route_values=route_values)
return self._deserialize('Notification', response) | ['def', 'get_notification', '(', 'self', ',', 'subscription_id', ',', 'notification_id', ')', ':', 'route_values', '=', '{', '}', 'if', 'subscription_id', 'is', 'not', 'None', ':', 'route_values', '[', "'subscriptionId'", ']', '=', 'self', '.', '_serialize', '.', 'url', '(', "'subscription_id'", ',', 'subscription_id', ',', "'str'", ')', 'if', 'notification_id', 'is', 'not', 'None', ':', 'route_values', '[', "'notificationId'", ']', '=', 'self', '.', '_serialize', '.', 'url', '(', "'notification_id'", ',', 'notification_id', ',', "'int'", ')', 'response', '=', 'self', '.', '_send', '(', 'http_method', '=', "'GET'", ',', 'location_id', '=', "'0c62d343-21b0-4732-997b-017fde84dc28'", ',', 'version', '=', "'5.0'", ',', 'route_values', '=', 'route_values', ')', 'return', 'self', '.', '_deserialize', '(', "'Notification'", ',', 'response', ')'] | GetNotification.
Get a specific notification for a subscription.
:param str subscription_id: ID for a subscription.
:param int notification_id:
:rtype: :class:`<Notification> <azure.devops.v5_0.service_hooks.models.Notification>` | ['GetNotification', '.', 'Get', 'a', 'specific', 'notification', 'for', 'a', 'subscription', '.', ':', 'param', 'str', 'subscription_id', ':', 'ID', 'for', 'a', 'subscription', '.', ':', 'param', 'int', 'notification_id', ':', ':', 'rtype', ':', ':', 'class', ':', '<Notification', '>', '<azure', '.', 'devops', '.', 'v5_0', '.', 'service_hooks', '.', 'models', '.', 'Notification', '>'] | train | https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_0/service_hooks/service_hooks_client.py#L172-L188 |
2,752 | koreyou/word_embedding_loader | word_embedding_loader/cli.py | list | def list():
"""
List available format.
"""
choice_len = max(map(len, _input_choices.keys()))
tmpl = " {:<%d}: {}\n" % choice_len
text = ''.join(map(
lambda k_v: tmpl.format(k_v[0], k_v[1][0]), six.iteritems(_input_choices)))
click.echo(text) | python | def list():
"""
List available format.
"""
choice_len = max(map(len, _input_choices.keys()))
tmpl = " {:<%d}: {}\n" % choice_len
text = ''.join(map(
lambda k_v: tmpl.format(k_v[0], k_v[1][0]), six.iteritems(_input_choices)))
click.echo(text) | ['def', 'list', '(', ')', ':', 'choice_len', '=', 'max', '(', 'map', '(', 'len', ',', '_input_choices', '.', 'keys', '(', ')', ')', ')', 'tmpl', '=', '" {:<%d}: {}\\n"', '%', 'choice_len', 'text', '=', "''", '.', 'join', '(', 'map', '(', 'lambda', 'k_v', ':', 'tmpl', '.', 'format', '(', 'k_v', '[', '0', ']', ',', 'k_v', '[', '1', ']', '[', '0', ']', ')', ',', 'six', '.', 'iteritems', '(', '_input_choices', ')', ')', ')', 'click', '.', 'echo', '(', 'text', ')'] | List available format. | ['List', 'available', 'format', '.'] | train | https://github.com/koreyou/word_embedding_loader/blob/1bc123f1a8bea12646576dcd768dae3ecea39c06/word_embedding_loader/cli.py#L72-L80 |
2,753 | bmcfee/pumpp | pumpp/sampler.py | SequentialSampler.indices | def indices(self, data):
'''Generate patch start indices
Parameters
----------
data : dict of np.ndarray
As produced by pumpp.transform
Yields
------
start : int >= 0
The start index of a sample patch
'''
duration = self.data_duration(data)
for start in range(0, duration - self.duration, self.stride):
yield start | python | def indices(self, data):
'''Generate patch start indices
Parameters
----------
data : dict of np.ndarray
As produced by pumpp.transform
Yields
------
start : int >= 0
The start index of a sample patch
'''
duration = self.data_duration(data)
for start in range(0, duration - self.duration, self.stride):
yield start | ['def', 'indices', '(', 'self', ',', 'data', ')', ':', 'duration', '=', 'self', '.', 'data_duration', '(', 'data', ')', 'for', 'start', 'in', 'range', '(', '0', ',', 'duration', '-', 'self', '.', 'duration', ',', 'self', '.', 'stride', ')', ':', 'yield', 'start'] | Generate patch start indices
Parameters
----------
data : dict of np.ndarray
As produced by pumpp.transform
Yields
------
start : int >= 0
The start index of a sample patch | ['Generate', 'patch', 'start', 'indices'] | train | https://github.com/bmcfee/pumpp/blob/06a17b888271dd1f6cd41bddb22b0eb04d494056/pumpp/sampler.py#L210-L226 |
2,754 | saltstack/salt | salt/modules/pdbedit.py | modify | def modify(
login, password=None, password_hashed=False,
domain=None, profile=None, script=None,
drive=None, homedir=None, fullname=None,
account_desc=None, account_control=None,
machine_sid=None, user_sid=None,
reset_login_hours=False, reset_bad_password_count=False,
):
'''
Modify user account
login : string
login name
password : string
password
password_hashed : boolean
set if password is a nt hash instead of plain text
domain : string
users domain
profile : string
profile path
script : string
logon script
drive : string
home drive
homedir : string
home directory
fullname : string
full name
account_desc : string
account description
machine_sid : string
specify the machines new primary group SID or rid
user_sid : string
specify the users new primary group SID or rid
account_control : string
specify user account control properties
.. note::
Only the following can be set:
- N: No password required
- D: Account disabled
- H: Home directory required
- L: Automatic Locking
- X: Password does not expire
reset_login_hours : boolean
reset the users allowed logon hours
reset_bad_password_count : boolean
reset the stored bad login counter
.. note::
if user is absent and password is provided, the user will be created
CLI Example:
.. code-block:: bash
salt '*' pdbedit.modify inara fullname='Inara Serra'
salt '*' pdbedit.modify simon password=r1v3r
salt '*' pdbedit.modify jane drive='V:' homedir='\\\\serenity\\jane\\profile'
salt '*' pdbedit.modify mal account_control=NX
'''
ret = 'unchanged'
# flag mapping
flags = {
'domain': '--domain=',
'full name': '--fullname=',
'account desc': '--account-desc=',
'home directory': '--homedir=',
'homedir drive': '--drive=',
'profile path': '--profile=',
'logon script': '--script=',
'account flags': '--account-control=',
'user sid': '-U ',
'machine sid': '-M ',
}
# field mapping
provided = {
'domain': domain,
'full name': fullname,
'account desc': account_desc,
'home directory': homedir,
'homedir drive': drive,
'profile path': profile,
'logon script': script,
'account flags': account_control,
'user sid': user_sid,
'machine sid': machine_sid,
}
# update password
if password:
ret = create(login, password, password_hashed)[login]
if ret not in ['updated', 'created', 'unchanged']:
return {login: ret}
elif login not in list_users(False):
return {login: 'absent'}
# check for changes
current = get_user(login, hashes=True)
changes = {}
for key, val in provided.items():
if key in ['user sid', 'machine sid']:
if val is not None and key in current and not current[key].endswith(six.text_type(val)):
changes[key] = six.text_type(val)
elif key in ['account flags']:
if val is not None:
if val.startswith('['):
val = val[1:-1]
new = []
for f in val.upper():
if f not in ['N', 'D', 'H', 'L', 'X']:
logmsg = 'pdbedit.modify - unknown {} flag for account_control, ignored'.format(f)
log.warning(logmsg)
else:
new.append(f)
changes[key] = "[{flags}]".format(flags="".join(new))
else:
if val is not None and key in current and current[key] != val:
changes[key] = val
# apply changes
if changes or reset_login_hours or reset_bad_password_count:
cmds = []
for change in changes:
cmds.append('{flag}{value}'.format(
flag=flags[change],
value=_quote_args(changes[change]),
))
if reset_login_hours:
cmds.append('--logon-hours-reset')
if reset_bad_password_count:
cmds.append('--bad-password-count-reset')
res = __salt__['cmd.run_all'](
'pdbedit --modify --user {login} {changes}'.format(
login=_quote_args(login),
changes=" ".join(cmds),
),
)
if res['retcode'] > 0:
return {login: res['stderr'] if 'stderr' in res else res['stdout']}
if ret != 'created':
ret = 'updated'
return {login: ret} | python | def modify(
login, password=None, password_hashed=False,
domain=None, profile=None, script=None,
drive=None, homedir=None, fullname=None,
account_desc=None, account_control=None,
machine_sid=None, user_sid=None,
reset_login_hours=False, reset_bad_password_count=False,
):
'''
Modify user account
login : string
login name
password : string
password
password_hashed : boolean
set if password is a nt hash instead of plain text
domain : string
users domain
profile : string
profile path
script : string
logon script
drive : string
home drive
homedir : string
home directory
fullname : string
full name
account_desc : string
account description
machine_sid : string
specify the machines new primary group SID or rid
user_sid : string
specify the users new primary group SID or rid
account_control : string
specify user account control properties
.. note::
Only the following can be set:
- N: No password required
- D: Account disabled
- H: Home directory required
- L: Automatic Locking
- X: Password does not expire
reset_login_hours : boolean
reset the users allowed logon hours
reset_bad_password_count : boolean
reset the stored bad login counter
.. note::
if user is absent and password is provided, the user will be created
CLI Example:
.. code-block:: bash
salt '*' pdbedit.modify inara fullname='Inara Serra'
salt '*' pdbedit.modify simon password=r1v3r
salt '*' pdbedit.modify jane drive='V:' homedir='\\\\serenity\\jane\\profile'
salt '*' pdbedit.modify mal account_control=NX
'''
ret = 'unchanged'
# flag mapping
flags = {
'domain': '--domain=',
'full name': '--fullname=',
'account desc': '--account-desc=',
'home directory': '--homedir=',
'homedir drive': '--drive=',
'profile path': '--profile=',
'logon script': '--script=',
'account flags': '--account-control=',
'user sid': '-U ',
'machine sid': '-M ',
}
# field mapping
provided = {
'domain': domain,
'full name': fullname,
'account desc': account_desc,
'home directory': homedir,
'homedir drive': drive,
'profile path': profile,
'logon script': script,
'account flags': account_control,
'user sid': user_sid,
'machine sid': machine_sid,
}
# update password
if password:
ret = create(login, password, password_hashed)[login]
if ret not in ['updated', 'created', 'unchanged']:
return {login: ret}
elif login not in list_users(False):
return {login: 'absent'}
# check for changes
current = get_user(login, hashes=True)
changes = {}
for key, val in provided.items():
if key in ['user sid', 'machine sid']:
if val is not None and key in current and not current[key].endswith(six.text_type(val)):
changes[key] = six.text_type(val)
elif key in ['account flags']:
if val is not None:
if val.startswith('['):
val = val[1:-1]
new = []
for f in val.upper():
if f not in ['N', 'D', 'H', 'L', 'X']:
logmsg = 'pdbedit.modify - unknown {} flag for account_control, ignored'.format(f)
log.warning(logmsg)
else:
new.append(f)
changes[key] = "[{flags}]".format(flags="".join(new))
else:
if val is not None and key in current and current[key] != val:
changes[key] = val
# apply changes
if changes or reset_login_hours or reset_bad_password_count:
cmds = []
for change in changes:
cmds.append('{flag}{value}'.format(
flag=flags[change],
value=_quote_args(changes[change]),
))
if reset_login_hours:
cmds.append('--logon-hours-reset')
if reset_bad_password_count:
cmds.append('--bad-password-count-reset')
res = __salt__['cmd.run_all'](
'pdbedit --modify --user {login} {changes}'.format(
login=_quote_args(login),
changes=" ".join(cmds),
),
)
if res['retcode'] > 0:
return {login: res['stderr'] if 'stderr' in res else res['stdout']}
if ret != 'created':
ret = 'updated'
return {login: ret} | ['def', 'modify', '(', 'login', ',', 'password', '=', 'None', ',', 'password_hashed', '=', 'False', ',', 'domain', '=', 'None', ',', 'profile', '=', 'None', ',', 'script', '=', 'None', ',', 'drive', '=', 'None', ',', 'homedir', '=', 'None', ',', 'fullname', '=', 'None', ',', 'account_desc', '=', 'None', ',', 'account_control', '=', 'None', ',', 'machine_sid', '=', 'None', ',', 'user_sid', '=', 'None', ',', 'reset_login_hours', '=', 'False', ',', 'reset_bad_password_count', '=', 'False', ',', ')', ':', 'ret', '=', "'unchanged'", '# flag mapping', 'flags', '=', '{', "'domain'", ':', "'--domain='", ',', "'full name'", ':', "'--fullname='", ',', "'account desc'", ':', "'--account-desc='", ',', "'home directory'", ':', "'--homedir='", ',', "'homedir drive'", ':', "'--drive='", ',', "'profile path'", ':', "'--profile='", ',', "'logon script'", ':', "'--script='", ',', "'account flags'", ':', "'--account-control='", ',', "'user sid'", ':', "'-U '", ',', "'machine sid'", ':', "'-M '", ',', '}', '# field mapping', 'provided', '=', '{', "'domain'", ':', 'domain', ',', "'full name'", ':', 'fullname', ',', "'account desc'", ':', 'account_desc', ',', "'home directory'", ':', 'homedir', ',', "'homedir drive'", ':', 'drive', ',', "'profile path'", ':', 'profile', ',', "'logon script'", ':', 'script', ',', "'account flags'", ':', 'account_control', ',', "'user sid'", ':', 'user_sid', ',', "'machine sid'", ':', 'machine_sid', ',', '}', '# update password', 'if', 'password', ':', 'ret', '=', 'create', '(', 'login', ',', 'password', ',', 'password_hashed', ')', '[', 'login', ']', 'if', 'ret', 'not', 'in', '[', "'updated'", ',', "'created'", ',', "'unchanged'", ']', ':', 'return', '{', 'login', ':', 'ret', '}', 'elif', 'login', 'not', 'in', 'list_users', '(', 'False', ')', ':', 'return', '{', 'login', ':', "'absent'", '}', '# check for changes', 'current', '=', 'get_user', '(', 'login', ',', 'hashes', '=', 'True', ')', 'changes', '=', '{', '}', 'for', 'key', ',', 'val', 'in', 'provided', '.', 'items', '(', ')', ':', 'if', 'key', 'in', '[', "'user sid'", ',', "'machine sid'", ']', ':', 'if', 'val', 'is', 'not', 'None', 'and', 'key', 'in', 'current', 'and', 'not', 'current', '[', 'key', ']', '.', 'endswith', '(', 'six', '.', 'text_type', '(', 'val', ')', ')', ':', 'changes', '[', 'key', ']', '=', 'six', '.', 'text_type', '(', 'val', ')', 'elif', 'key', 'in', '[', "'account flags'", ']', ':', 'if', 'val', 'is', 'not', 'None', ':', 'if', 'val', '.', 'startswith', '(', "'['", ')', ':', 'val', '=', 'val', '[', '1', ':', '-', '1', ']', 'new', '=', '[', ']', 'for', 'f', 'in', 'val', '.', 'upper', '(', ')', ':', 'if', 'f', 'not', 'in', '[', "'N'", ',', "'D'", ',', "'H'", ',', "'L'", ',', "'X'", ']', ':', 'logmsg', '=', "'pdbedit.modify - unknown {} flag for account_control, ignored'", '.', 'format', '(', 'f', ')', 'log', '.', 'warning', '(', 'logmsg', ')', 'else', ':', 'new', '.', 'append', '(', 'f', ')', 'changes', '[', 'key', ']', '=', '"[{flags}]"', '.', 'format', '(', 'flags', '=', '""', '.', 'join', '(', 'new', ')', ')', 'else', ':', 'if', 'val', 'is', 'not', 'None', 'and', 'key', 'in', 'current', 'and', 'current', '[', 'key', ']', '!=', 'val', ':', 'changes', '[', 'key', ']', '=', 'val', '# apply changes', 'if', 'changes', 'or', 'reset_login_hours', 'or', 'reset_bad_password_count', ':', 'cmds', '=', '[', ']', 'for', 'change', 'in', 'changes', ':', 'cmds', '.', 'append', '(', "'{flag}{value}'", '.', 'format', '(', 'flag', '=', 'flags', '[', 'change', ']', ',', 'value', '=', '_quote_args', '(', 'changes', '[', 'change', ']', ')', ',', ')', ')', 'if', 'reset_login_hours', ':', 'cmds', '.', 'append', '(', "'--logon-hours-reset'", ')', 'if', 'reset_bad_password_count', ':', 'cmds', '.', 'append', '(', "'--bad-password-count-reset'", ')', 'res', '=', '__salt__', '[', "'cmd.run_all'", ']', '(', "'pdbedit --modify --user {login} {changes}'", '.', 'format', '(', 'login', '=', '_quote_args', '(', 'login', ')', ',', 'changes', '=', '" "', '.', 'join', '(', 'cmds', ')', ',', ')', ',', ')', 'if', 'res', '[', "'retcode'", ']', '>', '0', ':', 'return', '{', 'login', ':', 'res', '[', "'stderr'", ']', 'if', "'stderr'", 'in', 'res', 'else', 'res', '[', "'stdout'", ']', '}', 'if', 'ret', '!=', "'created'", ':', 'ret', '=', "'updated'", 'return', '{', 'login', ':', 'ret', '}'] | Modify user account
login : string
login name
password : string
password
password_hashed : boolean
set if password is a nt hash instead of plain text
domain : string
users domain
profile : string
profile path
script : string
logon script
drive : string
home drive
homedir : string
home directory
fullname : string
full name
account_desc : string
account description
machine_sid : string
specify the machines new primary group SID or rid
user_sid : string
specify the users new primary group SID or rid
account_control : string
specify user account control properties
.. note::
Only the following can be set:
- N: No password required
- D: Account disabled
- H: Home directory required
- L: Automatic Locking
- X: Password does not expire
reset_login_hours : boolean
reset the users allowed logon hours
reset_bad_password_count : boolean
reset the stored bad login counter
.. note::
if user is absent and password is provided, the user will be created
CLI Example:
.. code-block:: bash
salt '*' pdbedit.modify inara fullname='Inara Serra'
salt '*' pdbedit.modify simon password=r1v3r
salt '*' pdbedit.modify jane drive='V:' homedir='\\\\serenity\\jane\\profile'
salt '*' pdbedit.modify mal account_control=NX | ['Modify', 'user', 'account'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/pdbedit.py#L236-L385 |
2,755 | dbcli/athenacli | athenacli/packages/parseutils.py | queries_start_with | def queries_start_with(queries, prefixes):
"""Check if any queries start with any item from *prefixes*."""
for query in sqlparse.split(queries):
if query and query_starts_with(query, prefixes) is True:
return True
return False | python | def queries_start_with(queries, prefixes):
"""Check if any queries start with any item from *prefixes*."""
for query in sqlparse.split(queries):
if query and query_starts_with(query, prefixes) is True:
return True
return False | ['def', 'queries_start_with', '(', 'queries', ',', 'prefixes', ')', ':', 'for', 'query', 'in', 'sqlparse', '.', 'split', '(', 'queries', ')', ':', 'if', 'query', 'and', 'query_starts_with', '(', 'query', ',', 'prefixes', ')', 'is', 'True', ':', 'return', 'True', 'return', 'False'] | Check if any queries start with any item from *prefixes*. | ['Check', 'if', 'any', 'queries', 'start', 'with', 'any', 'item', 'from', '*', 'prefixes', '*', '.'] | train | https://github.com/dbcli/athenacli/blob/bcab59e4953145866430083e902ed4d042d4ebba/athenacli/packages/parseutils.py#L194-L199 |
2,756 | geopy/geopy | geopy/geocoders/geolake.py | Geolake.geocode | def geocode(
self,
query,
country_codes=None,
exactly_one=True,
timeout=DEFAULT_SENTINEL,
):
"""
Return a location point by address.
:param str query: The address or query you wish to geocode.
For a structured query, provide a dictionary whose keys
are one of: `country`, `state`, `city`, `zipcode`, `street`, `address`,
`houseNumber` or `subNumber`.
:param country_codes: Provides the geocoder with a list
of country codes that the query may reside in. This value will
limit the geocoder to the supplied countries. The country code
is a 2 character code as defined by the ISO-3166-1 alpha-2
standard (e.g. ``FR``). Multiple countries can be specified with
a Python list.
.. versionchanged:: 1.19.0
Previously only a Python list of countries could be specified.
Now a single country as a string can be specified as well.
:type country_codes: str or list
:param bool exactly_one: Return one result or a list of one result.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
:rtype: ``None``, :class:`geopy.location.Location` or a list of them, if
``exactly_one=False``.
"""
if isinstance(query, dict):
params = {
key: val
for key, val
in query.items()
if key in self.structured_query_params
}
params['api_key'] = self.api_key
else:
params = {
'api_key': self.api_key,
'q': self.format_string % query,
}
if not country_codes:
country_codes = []
if isinstance(country_codes, string_compare):
country_codes = [country_codes]
if country_codes:
params['countryCodes'] = ",".join(country_codes)
url = "?".join((self.api, urlencode(params)))
logger.debug("%s.geocode: %s", self.__class__.__name__, url)
return self._parse_json(
self._call_geocoder(url, timeout=timeout), exactly_one
) | python | def geocode(
self,
query,
country_codes=None,
exactly_one=True,
timeout=DEFAULT_SENTINEL,
):
"""
Return a location point by address.
:param str query: The address or query you wish to geocode.
For a structured query, provide a dictionary whose keys
are one of: `country`, `state`, `city`, `zipcode`, `street`, `address`,
`houseNumber` or `subNumber`.
:param country_codes: Provides the geocoder with a list
of country codes that the query may reside in. This value will
limit the geocoder to the supplied countries. The country code
is a 2 character code as defined by the ISO-3166-1 alpha-2
standard (e.g. ``FR``). Multiple countries can be specified with
a Python list.
.. versionchanged:: 1.19.0
Previously only a Python list of countries could be specified.
Now a single country as a string can be specified as well.
:type country_codes: str or list
:param bool exactly_one: Return one result or a list of one result.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
:rtype: ``None``, :class:`geopy.location.Location` or a list of them, if
``exactly_one=False``.
"""
if isinstance(query, dict):
params = {
key: val
for key, val
in query.items()
if key in self.structured_query_params
}
params['api_key'] = self.api_key
else:
params = {
'api_key': self.api_key,
'q': self.format_string % query,
}
if not country_codes:
country_codes = []
if isinstance(country_codes, string_compare):
country_codes = [country_codes]
if country_codes:
params['countryCodes'] = ",".join(country_codes)
url = "?".join((self.api, urlencode(params)))
logger.debug("%s.geocode: %s", self.__class__.__name__, url)
return self._parse_json(
self._call_geocoder(url, timeout=timeout), exactly_one
) | ['def', 'geocode', '(', 'self', ',', 'query', ',', 'country_codes', '=', 'None', ',', 'exactly_one', '=', 'True', ',', 'timeout', '=', 'DEFAULT_SENTINEL', ',', ')', ':', 'if', 'isinstance', '(', 'query', ',', 'dict', ')', ':', 'params', '=', '{', 'key', ':', 'val', 'for', 'key', ',', 'val', 'in', 'query', '.', 'items', '(', ')', 'if', 'key', 'in', 'self', '.', 'structured_query_params', '}', 'params', '[', "'api_key'", ']', '=', 'self', '.', 'api_key', 'else', ':', 'params', '=', '{', "'api_key'", ':', 'self', '.', 'api_key', ',', "'q'", ':', 'self', '.', 'format_string', '%', 'query', ',', '}', 'if', 'not', 'country_codes', ':', 'country_codes', '=', '[', ']', 'if', 'isinstance', '(', 'country_codes', ',', 'string_compare', ')', ':', 'country_codes', '=', '[', 'country_codes', ']', 'if', 'country_codes', ':', 'params', '[', "'countryCodes'", ']', '=', '","', '.', 'join', '(', 'country_codes', ')', 'url', '=', '"?"', '.', 'join', '(', '(', 'self', '.', 'api', ',', 'urlencode', '(', 'params', ')', ')', ')', 'logger', '.', 'debug', '(', '"%s.geocode: %s"', ',', 'self', '.', '__class__', '.', '__name__', ',', 'url', ')', 'return', 'self', '.', '_parse_json', '(', 'self', '.', '_call_geocoder', '(', 'url', ',', 'timeout', '=', 'timeout', ')', ',', 'exactly_one', ')'] | Return a location point by address.
:param str query: The address or query you wish to geocode.
For a structured query, provide a dictionary whose keys
are one of: `country`, `state`, `city`, `zipcode`, `street`, `address`,
`houseNumber` or `subNumber`.
:param country_codes: Provides the geocoder with a list
of country codes that the query may reside in. This value will
limit the geocoder to the supplied countries. The country code
is a 2 character code as defined by the ISO-3166-1 alpha-2
standard (e.g. ``FR``). Multiple countries can be specified with
a Python list.
.. versionchanged:: 1.19.0
Previously only a Python list of countries could be specified.
Now a single country as a string can be specified as well.
:type country_codes: str or list
:param bool exactly_one: Return one result or a list of one result.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
:rtype: ``None``, :class:`geopy.location.Location` or a list of them, if
``exactly_one=False``. | ['Return', 'a', 'location', 'point', 'by', 'address', '.'] | train | https://github.com/geopy/geopy/blob/02c838d965e76497f3c3d61f53808c86b5c58224/geopy/geocoders/geolake.py#L87-L154 |
2,757 | DLR-RM/RAFCON | source/rafcon/gui/controllers/utils/tree_view_controller.py | TreeViewController.select_entry | def select_entry(self, core_element_id, by_cursor=True):
"""Selects the row entry belonging to the given core_element_id by cursor or tree selection"""
path = self.get_path_for_core_element(core_element_id)
if path:
if by_cursor:
self.tree_view.set_cursor(path)
else:
self.tree_view.get_selection().select_path(path)
else:
self._logger.warning("Path not valid: {0} (by_cursor {1})".format(str(core_element_id), str(by_cursor))) | python | def select_entry(self, core_element_id, by_cursor=True):
"""Selects the row entry belonging to the given core_element_id by cursor or tree selection"""
path = self.get_path_for_core_element(core_element_id)
if path:
if by_cursor:
self.tree_view.set_cursor(path)
else:
self.tree_view.get_selection().select_path(path)
else:
self._logger.warning("Path not valid: {0} (by_cursor {1})".format(str(core_element_id), str(by_cursor))) | ['def', 'select_entry', '(', 'self', ',', 'core_element_id', ',', 'by_cursor', '=', 'True', ')', ':', 'path', '=', 'self', '.', 'get_path_for_core_element', '(', 'core_element_id', ')', 'if', 'path', ':', 'if', 'by_cursor', ':', 'self', '.', 'tree_view', '.', 'set_cursor', '(', 'path', ')', 'else', ':', 'self', '.', 'tree_view', '.', 'get_selection', '(', ')', '.', 'select_path', '(', 'path', ')', 'else', ':', 'self', '.', '_logger', '.', 'warning', '(', '"Path not valid: {0} (by_cursor {1})"', '.', 'format', '(', 'str', '(', 'core_element_id', ')', ',', 'str', '(', 'by_cursor', ')', ')', ')'] | Selects the row entry belonging to the given core_element_id by cursor or tree selection | ['Selects', 'the', 'row', 'entry', 'belonging', 'to', 'the', 'given', 'core_element_id', 'by', 'cursor', 'or', 'tree', 'selection'] | train | https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/controllers/utils/tree_view_controller.py#L852-L861 |
2,758 | koszullab/metaTOR | metator/scripts/fasta_utils.py | rename_proteins | def rename_proteins(prot_in, prot_out=None, chunk_size=DEFAULT_CHUNK_SIZE):
"""Rename prodigal output files
Rename output files from prodigal according to the following naming
scheme: >contigX_chunkY__geneZ
Chunk numbering starts at 0 and gene identification is taken from prodigal.
Parameters
----------
prot_in : file, str or pathlib.Path
The input protein file in FASTA format to be renamed.
prot_out : file, str or pathlib.Path
The output protein file to be renamed into.
chunk_size : int, optional
The size of the chunks (in bp) used in the pipeline. Default is 1000.
"""
if prot_out is None:
prot_out = "{}_renamed.fa".format(prot_in.split(".")[0])
with open(prot_out, "w") as prot_out_handle:
for record in SeqIO.parse(prot_in, "fasta"):
header = record.description
name, pos_start, _, _, _ = header.split("#")
chunk_start = int(pos_start) // chunk_size
name_split = name.split("_")
contig_name = "_".join(name_split[:-1])
gene_id = name_split[-1]
new_record_id = "{}_{}__gene{}".format(
contig_name, chunk_start, gene_id
)
prot_out_handle.write(">{}\n".format(new_record_id))
prot_out_handle.write("{}\n".format(str(record.seq))) | python | def rename_proteins(prot_in, prot_out=None, chunk_size=DEFAULT_CHUNK_SIZE):
"""Rename prodigal output files
Rename output files from prodigal according to the following naming
scheme: >contigX_chunkY__geneZ
Chunk numbering starts at 0 and gene identification is taken from prodigal.
Parameters
----------
prot_in : file, str or pathlib.Path
The input protein file in FASTA format to be renamed.
prot_out : file, str or pathlib.Path
The output protein file to be renamed into.
chunk_size : int, optional
The size of the chunks (in bp) used in the pipeline. Default is 1000.
"""
if prot_out is None:
prot_out = "{}_renamed.fa".format(prot_in.split(".")[0])
with open(prot_out, "w") as prot_out_handle:
for record in SeqIO.parse(prot_in, "fasta"):
header = record.description
name, pos_start, _, _, _ = header.split("#")
chunk_start = int(pos_start) // chunk_size
name_split = name.split("_")
contig_name = "_".join(name_split[:-1])
gene_id = name_split[-1]
new_record_id = "{}_{}__gene{}".format(
contig_name, chunk_start, gene_id
)
prot_out_handle.write(">{}\n".format(new_record_id))
prot_out_handle.write("{}\n".format(str(record.seq))) | ['def', 'rename_proteins', '(', 'prot_in', ',', 'prot_out', '=', 'None', ',', 'chunk_size', '=', 'DEFAULT_CHUNK_SIZE', ')', ':', 'if', 'prot_out', 'is', 'None', ':', 'prot_out', '=', '"{}_renamed.fa"', '.', 'format', '(', 'prot_in', '.', 'split', '(', '"."', ')', '[', '0', ']', ')', 'with', 'open', '(', 'prot_out', ',', '"w"', ')', 'as', 'prot_out_handle', ':', 'for', 'record', 'in', 'SeqIO', '.', 'parse', '(', 'prot_in', ',', '"fasta"', ')', ':', 'header', '=', 'record', '.', 'description', 'name', ',', 'pos_start', ',', '_', ',', '_', ',', '_', '=', 'header', '.', 'split', '(', '"#"', ')', 'chunk_start', '=', 'int', '(', 'pos_start', ')', '//', 'chunk_size', 'name_split', '=', 'name', '.', 'split', '(', '"_"', ')', 'contig_name', '=', '"_"', '.', 'join', '(', 'name_split', '[', ':', '-', '1', ']', ')', 'gene_id', '=', 'name_split', '[', '-', '1', ']', 'new_record_id', '=', '"{}_{}__gene{}"', '.', 'format', '(', 'contig_name', ',', 'chunk_start', ',', 'gene_id', ')', 'prot_out_handle', '.', 'write', '(', '">{}\\n"', '.', 'format', '(', 'new_record_id', ')', ')', 'prot_out_handle', '.', 'write', '(', '"{}\\n"', '.', 'format', '(', 'str', '(', 'record', '.', 'seq', ')', ')', ')'] | Rename prodigal output files
Rename output files from prodigal according to the following naming
scheme: >contigX_chunkY__geneZ
Chunk numbering starts at 0 and gene identification is taken from prodigal.
Parameters
----------
prot_in : file, str or pathlib.Path
The input protein file in FASTA format to be renamed.
prot_out : file, str or pathlib.Path
The output protein file to be renamed into.
chunk_size : int, optional
The size of the chunks (in bp) used in the pipeline. Default is 1000. | ['Rename', 'prodigal', 'output', 'files'] | train | https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/fasta_utils.py#L106-L144 |
2,759 | astropy/photutils | photutils/psf/photometry.py | IterativelySubtractedPSFPhotometry._do_photometry | def _do_photometry(self, param_tab, n_start=1):
"""
Helper function which performs the iterations of the photometry
process.
Parameters
----------
param_names : list
Names of the columns which represent the initial guesses.
For example, ['x_0', 'y_0', 'flux_0'], for intial guesses on
the center positions and the flux.
n_start : int
Integer representing the start index of the iteration. It
is 1 if init_guesses are None, and 2 otherwise.
Returns
-------
output_table : `~astropy.table.Table` or None
Table with the photometry results, i.e., centroids and
fluxes estimations and the initial estimates used to start
the fitting process.
"""
output_table = Table()
self._define_fit_param_names()
for (init_parname, fit_parname) in zip(self._pars_to_set.keys(),
self._pars_to_output.keys()):
output_table.add_column(Column(name=init_parname))
output_table.add_column(Column(name=fit_parname))
sources = self.finder(self._residual_image)
n = n_start
while(sources is not None and
(self.niters is None or n <= self.niters)):
apertures = CircularAperture((sources['xcentroid'],
sources['ycentroid']),
r=self.aperture_radius)
sources['aperture_flux'] = aperture_photometry(
self._residual_image, apertures)['aperture_sum']
init_guess_tab = Table(names=['id', 'x_0', 'y_0', 'flux_0'],
data=[sources['id'], sources['xcentroid'],
sources['ycentroid'],
sources['aperture_flux']])
for param_tab_name, param_name in self._pars_to_set.items():
if param_tab_name not in (['x_0', 'y_0', 'flux_0']):
init_guess_tab.add_column(
Column(name=param_tab_name,
data=(getattr(self.psf_model,
param_name) *
np.ones(len(sources)))))
star_groups = self.group_maker(init_guess_tab)
table, self._residual_image = super().nstar(
self._residual_image, star_groups)
star_groups = star_groups.group_by('group_id')
table = hstack([star_groups, table])
table['iter_detected'] = n*np.ones(table['x_fit'].shape,
dtype=np.int32)
output_table = vstack([output_table, table])
# do not warn if no sources are found beyond the first iteration
with warnings.catch_warnings():
warnings.simplefilter('ignore', NoDetectionsWarning)
sources = self.finder(self._residual_image)
n += 1
return output_table | python | def _do_photometry(self, param_tab, n_start=1):
"""
Helper function which performs the iterations of the photometry
process.
Parameters
----------
param_names : list
Names of the columns which represent the initial guesses.
For example, ['x_0', 'y_0', 'flux_0'], for intial guesses on
the center positions and the flux.
n_start : int
Integer representing the start index of the iteration. It
is 1 if init_guesses are None, and 2 otherwise.
Returns
-------
output_table : `~astropy.table.Table` or None
Table with the photometry results, i.e., centroids and
fluxes estimations and the initial estimates used to start
the fitting process.
"""
output_table = Table()
self._define_fit_param_names()
for (init_parname, fit_parname) in zip(self._pars_to_set.keys(),
self._pars_to_output.keys()):
output_table.add_column(Column(name=init_parname))
output_table.add_column(Column(name=fit_parname))
sources = self.finder(self._residual_image)
n = n_start
while(sources is not None and
(self.niters is None or n <= self.niters)):
apertures = CircularAperture((sources['xcentroid'],
sources['ycentroid']),
r=self.aperture_radius)
sources['aperture_flux'] = aperture_photometry(
self._residual_image, apertures)['aperture_sum']
init_guess_tab = Table(names=['id', 'x_0', 'y_0', 'flux_0'],
data=[sources['id'], sources['xcentroid'],
sources['ycentroid'],
sources['aperture_flux']])
for param_tab_name, param_name in self._pars_to_set.items():
if param_tab_name not in (['x_0', 'y_0', 'flux_0']):
init_guess_tab.add_column(
Column(name=param_tab_name,
data=(getattr(self.psf_model,
param_name) *
np.ones(len(sources)))))
star_groups = self.group_maker(init_guess_tab)
table, self._residual_image = super().nstar(
self._residual_image, star_groups)
star_groups = star_groups.group_by('group_id')
table = hstack([star_groups, table])
table['iter_detected'] = n*np.ones(table['x_fit'].shape,
dtype=np.int32)
output_table = vstack([output_table, table])
# do not warn if no sources are found beyond the first iteration
with warnings.catch_warnings():
warnings.simplefilter('ignore', NoDetectionsWarning)
sources = self.finder(self._residual_image)
n += 1
return output_table | ['def', '_do_photometry', '(', 'self', ',', 'param_tab', ',', 'n_start', '=', '1', ')', ':', 'output_table', '=', 'Table', '(', ')', 'self', '.', '_define_fit_param_names', '(', ')', 'for', '(', 'init_parname', ',', 'fit_parname', ')', 'in', 'zip', '(', 'self', '.', '_pars_to_set', '.', 'keys', '(', ')', ',', 'self', '.', '_pars_to_output', '.', 'keys', '(', ')', ')', ':', 'output_table', '.', 'add_column', '(', 'Column', '(', 'name', '=', 'init_parname', ')', ')', 'output_table', '.', 'add_column', '(', 'Column', '(', 'name', '=', 'fit_parname', ')', ')', 'sources', '=', 'self', '.', 'finder', '(', 'self', '.', '_residual_image', ')', 'n', '=', 'n_start', 'while', '(', 'sources', 'is', 'not', 'None', 'and', '(', 'self', '.', 'niters', 'is', 'None', 'or', 'n', '<=', 'self', '.', 'niters', ')', ')', ':', 'apertures', '=', 'CircularAperture', '(', '(', 'sources', '[', "'xcentroid'", ']', ',', 'sources', '[', "'ycentroid'", ']', ')', ',', 'r', '=', 'self', '.', 'aperture_radius', ')', 'sources', '[', "'aperture_flux'", ']', '=', 'aperture_photometry', '(', 'self', '.', '_residual_image', ',', 'apertures', ')', '[', "'aperture_sum'", ']', 'init_guess_tab', '=', 'Table', '(', 'names', '=', '[', "'id'", ',', "'x_0'", ',', "'y_0'", ',', "'flux_0'", ']', ',', 'data', '=', '[', 'sources', '[', "'id'", ']', ',', 'sources', '[', "'xcentroid'", ']', ',', 'sources', '[', "'ycentroid'", ']', ',', 'sources', '[', "'aperture_flux'", ']', ']', ')', 'for', 'param_tab_name', ',', 'param_name', 'in', 'self', '.', '_pars_to_set', '.', 'items', '(', ')', ':', 'if', 'param_tab_name', 'not', 'in', '(', '[', "'x_0'", ',', "'y_0'", ',', "'flux_0'", ']', ')', ':', 'init_guess_tab', '.', 'add_column', '(', 'Column', '(', 'name', '=', 'param_tab_name', ',', 'data', '=', '(', 'getattr', '(', 'self', '.', 'psf_model', ',', 'param_name', ')', '*', 'np', '.', 'ones', '(', 'len', '(', 'sources', ')', ')', ')', ')', ')', 'star_groups', '=', 'self', '.', 'group_maker', '(', 'init_guess_tab', ')', 'table', ',', 'self', '.', '_residual_image', '=', 'super', '(', ')', '.', 'nstar', '(', 'self', '.', '_residual_image', ',', 'star_groups', ')', 'star_groups', '=', 'star_groups', '.', 'group_by', '(', "'group_id'", ')', 'table', '=', 'hstack', '(', '[', 'star_groups', ',', 'table', ']', ')', 'table', '[', "'iter_detected'", ']', '=', 'n', '*', 'np', '.', 'ones', '(', 'table', '[', "'x_fit'", ']', '.', 'shape', ',', 'dtype', '=', 'np', '.', 'int32', ')', 'output_table', '=', 'vstack', '(', '[', 'output_table', ',', 'table', ']', ')', '# do not warn if no sources are found beyond the first iteration', 'with', 'warnings', '.', 'catch_warnings', '(', ')', ':', 'warnings', '.', 'simplefilter', '(', "'ignore'", ',', 'NoDetectionsWarning', ')', 'sources', '=', 'self', '.', 'finder', '(', 'self', '.', '_residual_image', ')', 'n', '+=', '1', 'return', 'output_table'] | Helper function which performs the iterations of the photometry
process.
Parameters
----------
param_names : list
Names of the columns which represent the initial guesses.
For example, ['x_0', 'y_0', 'flux_0'], for intial guesses on
the center positions and the flux.
n_start : int
Integer representing the start index of the iteration. It
is 1 if init_guesses are None, and 2 otherwise.
Returns
-------
output_table : `~astropy.table.Table` or None
Table with the photometry results, i.e., centroids and
fluxes estimations and the initial estimates used to start
the fitting process. | ['Helper', 'function', 'which', 'performs', 'the', 'iterations', 'of', 'the', 'photometry', 'process', '.'] | train | https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/psf/photometry.py#L666-L740 |
2,760 | secdev/scapy | scapy/layers/l2.py | is_promisc | def is_promisc(ip, fake_bcast="ff:ff:00:00:00:00", **kargs):
"""Try to guess if target is in Promisc mode. The target is provided by its ip.""" # noqa: E501
responses = srp1(Ether(dst=fake_bcast) / ARP(op="who-has", pdst=ip), type=ETH_P_ARP, iface_hint=ip, timeout=1, verbose=0, **kargs) # noqa: E501
return responses is not None | python | def is_promisc(ip, fake_bcast="ff:ff:00:00:00:00", **kargs):
"""Try to guess if target is in Promisc mode. The target is provided by its ip.""" # noqa: E501
responses = srp1(Ether(dst=fake_bcast) / ARP(op="who-has", pdst=ip), type=ETH_P_ARP, iface_hint=ip, timeout=1, verbose=0, **kargs) # noqa: E501
return responses is not None | ['def', 'is_promisc', '(', 'ip', ',', 'fake_bcast', '=', '"ff:ff:00:00:00:00"', ',', '*', '*', 'kargs', ')', ':', '# noqa: E501', 'responses', '=', 'srp1', '(', 'Ether', '(', 'dst', '=', 'fake_bcast', ')', '/', 'ARP', '(', 'op', '=', '"who-has"', ',', 'pdst', '=', 'ip', ')', ',', 'type', '=', 'ETH_P_ARP', ',', 'iface_hint', '=', 'ip', ',', 'timeout', '=', '1', ',', 'verbose', '=', '0', ',', '*', '*', 'kargs', ')', '# noqa: E501', 'return', 'responses', 'is', 'not', 'None'] | Try to guess if target is in Promisc mode. The target is provided by its ip. | ['Try', 'to', 'guess', 'if', 'target', 'is', 'in', 'Promisc', 'mode', '.', 'The', 'target', 'is', 'provided', 'by', 'its', 'ip', '.'] | train | https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/layers/l2.py#L640-L645 |
2,761 | Chilipp/psyplot | psyplot/data.py | CFDecoder._check_triangular_bounds | def _check_triangular_bounds(self, var, coords=None, axis='x', nans=None):
"""
Checks whether the bounds in the variable attribute are triangular
Parameters
----------
%(CFDecoder.get_cell_node_coord.parameters)s
Returns
-------
bool or None
True, if unstructered, None if it could not be determined
xarray.Coordinate or None
the bounds corrdinate (if existent)"""
# !!! WILL BE REMOVED IN THE NEAR FUTURE! !!!
bounds = self.get_cell_node_coord(var, coords, axis=axis,
nans=nans)
if bounds is not None:
return bounds.shape[-1] == 3, bounds
else:
return None, None | python | def _check_triangular_bounds(self, var, coords=None, axis='x', nans=None):
"""
Checks whether the bounds in the variable attribute are triangular
Parameters
----------
%(CFDecoder.get_cell_node_coord.parameters)s
Returns
-------
bool or None
True, if unstructered, None if it could not be determined
xarray.Coordinate or None
the bounds corrdinate (if existent)"""
# !!! WILL BE REMOVED IN THE NEAR FUTURE! !!!
bounds = self.get_cell_node_coord(var, coords, axis=axis,
nans=nans)
if bounds is not None:
return bounds.shape[-1] == 3, bounds
else:
return None, None | ['def', '_check_triangular_bounds', '(', 'self', ',', 'var', ',', 'coords', '=', 'None', ',', 'axis', '=', "'x'", ',', 'nans', '=', 'None', ')', ':', '# !!! WILL BE REMOVED IN THE NEAR FUTURE! !!!', 'bounds', '=', 'self', '.', 'get_cell_node_coord', '(', 'var', ',', 'coords', ',', 'axis', '=', 'axis', ',', 'nans', '=', 'nans', ')', 'if', 'bounds', 'is', 'not', 'None', ':', 'return', 'bounds', '.', 'shape', '[', '-', '1', ']', '==', '3', ',', 'bounds', 'else', ':', 'return', 'None', ',', 'None'] | Checks whether the bounds in the variable attribute are triangular
Parameters
----------
%(CFDecoder.get_cell_node_coord.parameters)s
Returns
-------
bool or None
True, if unstructered, None if it could not be determined
xarray.Coordinate or None
the bounds corrdinate (if existent) | ['Checks', 'whether', 'the', 'bounds', 'in', 'the', 'variable', 'attribute', 'are', 'triangular'] | train | https://github.com/Chilipp/psyplot/blob/75a0a15a9a1dd018e79d2df270d56c4bf5f311d5/psyplot/data.py#L795-L815 |
2,762 | ssato/python-anyconfig | src/anyconfig/backend/xml.py | _process_elem_attrs | def _process_elem_attrs(elem, dic, subdic, container=dict, attrs="@attrs",
**options):
"""
:param elem: ET Element object or None
:param dic: <container> (dict[-like]) object converted from elem
:param subdic: Sub <container> object converted from elem
:param options:
Keyword options, see the description of :func:`elem_to_container` for
more details.
:return: None but updating dic and subdic as side effects
"""
adic = _parse_attrs(elem, container=container, **options)
if not elem.text and not len(elem) and options.get("merge_attrs"):
dic[elem.tag] = adic
else:
subdic[attrs] = adic | python | def _process_elem_attrs(elem, dic, subdic, container=dict, attrs="@attrs",
**options):
"""
:param elem: ET Element object or None
:param dic: <container> (dict[-like]) object converted from elem
:param subdic: Sub <container> object converted from elem
:param options:
Keyword options, see the description of :func:`elem_to_container` for
more details.
:return: None but updating dic and subdic as side effects
"""
adic = _parse_attrs(elem, container=container, **options)
if not elem.text and not len(elem) and options.get("merge_attrs"):
dic[elem.tag] = adic
else:
subdic[attrs] = adic | ['def', '_process_elem_attrs', '(', 'elem', ',', 'dic', ',', 'subdic', ',', 'container', '=', 'dict', ',', 'attrs', '=', '"@attrs"', ',', '*', '*', 'options', ')', ':', 'adic', '=', '_parse_attrs', '(', 'elem', ',', 'container', '=', 'container', ',', '*', '*', 'options', ')', 'if', 'not', 'elem', '.', 'text', 'and', 'not', 'len', '(', 'elem', ')', 'and', 'options', '.', 'get', '(', '"merge_attrs"', ')', ':', 'dic', '[', 'elem', '.', 'tag', ']', '=', 'adic', 'else', ':', 'subdic', '[', 'attrs', ']', '=', 'adic'] | :param elem: ET Element object or None
:param dic: <container> (dict[-like]) object converted from elem
:param subdic: Sub <container> object converted from elem
:param options:
Keyword options, see the description of :func:`elem_to_container` for
more details.
:return: None but updating dic and subdic as side effects | [':', 'param', 'elem', ':', 'ET', 'Element', 'object', 'or', 'None', ':', 'param', 'dic', ':', '<container', '>', '(', 'dict', '[', '-', 'like', ']', ')', 'object', 'converted', 'from', 'elem', ':', 'param', 'subdic', ':', 'Sub', '<container', '>', 'object', 'converted', 'from', 'elem', ':', 'param', 'options', ':', 'Keyword', 'options', 'see', 'the', 'description', 'of', ':', 'func', ':', 'elem_to_container', 'for', 'more', 'details', '.'] | train | https://github.com/ssato/python-anyconfig/blob/f2f4fb8d8e232aadea866c202e1dd7a5967e2877/src/anyconfig/backend/xml.py#L221-L237 |
2,763 | inveniosoftware/invenio-files-rest | invenio_files_rest/serializer.py | PartSchema.wrap | def wrap(self, data, many):
"""Wrap response in envelope."""
if not many:
return data
else:
data = {'parts': data}
multipart = self.context.get('multipart')
if multipart:
data.update(MultipartObjectSchema(context={
'bucket': multipart.bucket}).dump(multipart).data)
return data | python | def wrap(self, data, many):
"""Wrap response in envelope."""
if not many:
return data
else:
data = {'parts': data}
multipart = self.context.get('multipart')
if multipart:
data.update(MultipartObjectSchema(context={
'bucket': multipart.bucket}).dump(multipart).data)
return data | ['def', 'wrap', '(', 'self', ',', 'data', ',', 'many', ')', ':', 'if', 'not', 'many', ':', 'return', 'data', 'else', ':', 'data', '=', '{', "'parts'", ':', 'data', '}', 'multipart', '=', 'self', '.', 'context', '.', 'get', '(', "'multipart'", ')', 'if', 'multipart', ':', 'data', '.', 'update', '(', 'MultipartObjectSchema', '(', 'context', '=', '{', "'bucket'", ':', 'multipart', '.', 'bucket', '}', ')', '.', 'dump', '(', 'multipart', ')', '.', 'data', ')', 'return', 'data'] | Wrap response in envelope. | ['Wrap', 'response', 'in', 'envelope', '.'] | train | https://github.com/inveniosoftware/invenio-files-rest/blob/59a950da61cc8d5882a03c6fde6db2e2ed10befd/invenio_files_rest/serializer.py#L180-L190 |
2,764 | dpkp/kafka-python | kafka/client_async.py | KafkaClient._maybe_connect | def _maybe_connect(self, node_id):
"""Idempotent non-blocking connection attempt to the given node id."""
with self._lock:
conn = self._conns.get(node_id)
if conn is None:
broker = self.cluster.broker_metadata(node_id)
assert broker, 'Broker id %s not in current metadata' % (node_id,)
log.debug("Initiating connection to node %s at %s:%s",
node_id, broker.host, broker.port)
host, port, afi = get_ip_port_afi(broker.host)
cb = WeakMethod(self._conn_state_change)
conn = BrokerConnection(host, broker.port, afi,
state_change_callback=cb,
node_id=node_id,
**self.config)
self._conns[node_id] = conn
# Check if existing connection should be recreated because host/port changed
elif self._should_recycle_connection(conn):
self._conns.pop(node_id)
return False
elif conn.connected():
return True
conn.connect()
return conn.connected() | python | def _maybe_connect(self, node_id):
"""Idempotent non-blocking connection attempt to the given node id."""
with self._lock:
conn = self._conns.get(node_id)
if conn is None:
broker = self.cluster.broker_metadata(node_id)
assert broker, 'Broker id %s not in current metadata' % (node_id,)
log.debug("Initiating connection to node %s at %s:%s",
node_id, broker.host, broker.port)
host, port, afi = get_ip_port_afi(broker.host)
cb = WeakMethod(self._conn_state_change)
conn = BrokerConnection(host, broker.port, afi,
state_change_callback=cb,
node_id=node_id,
**self.config)
self._conns[node_id] = conn
# Check if existing connection should be recreated because host/port changed
elif self._should_recycle_connection(conn):
self._conns.pop(node_id)
return False
elif conn.connected():
return True
conn.connect()
return conn.connected() | ['def', '_maybe_connect', '(', 'self', ',', 'node_id', ')', ':', 'with', 'self', '.', '_lock', ':', 'conn', '=', 'self', '.', '_conns', '.', 'get', '(', 'node_id', ')', 'if', 'conn', 'is', 'None', ':', 'broker', '=', 'self', '.', 'cluster', '.', 'broker_metadata', '(', 'node_id', ')', 'assert', 'broker', ',', "'Broker id %s not in current metadata'", '%', '(', 'node_id', ',', ')', 'log', '.', 'debug', '(', '"Initiating connection to node %s at %s:%s"', ',', 'node_id', ',', 'broker', '.', 'host', ',', 'broker', '.', 'port', ')', 'host', ',', 'port', ',', 'afi', '=', 'get_ip_port_afi', '(', 'broker', '.', 'host', ')', 'cb', '=', 'WeakMethod', '(', 'self', '.', '_conn_state_change', ')', 'conn', '=', 'BrokerConnection', '(', 'host', ',', 'broker', '.', 'port', ',', 'afi', ',', 'state_change_callback', '=', 'cb', ',', 'node_id', '=', 'node_id', ',', '*', '*', 'self', '.', 'config', ')', 'self', '.', '_conns', '[', 'node_id', ']', '=', 'conn', '# Check if existing connection should be recreated because host/port changed', 'elif', 'self', '.', '_should_recycle_connection', '(', 'conn', ')', ':', 'self', '.', '_conns', '.', 'pop', '(', 'node_id', ')', 'return', 'False', 'elif', 'conn', '.', 'connected', '(', ')', ':', 'return', 'True', 'conn', '.', 'connect', '(', ')', 'return', 'conn', '.', 'connected', '(', ')'] | Idempotent non-blocking connection attempt to the given node id. | ['Idempotent', 'non', '-', 'blocking', 'connection', 'attempt', 'to', 'the', 'given', 'node', 'id', '.'] | train | https://github.com/dpkp/kafka-python/blob/f6a8a38937688ea2cc5dc13d3d1039493be5c9b5/kafka/client_async.py#L360-L388 |
2,765 | nuagenetworks/bambou | bambou/nurest_object.py | NUMetaRESTObject.rest_name | def rest_name(cls):
""" Represents a singular REST name
"""
if cls.__name__ == "NURESTRootObject" or cls.__name__ == "NURESTObject":
return "Not Implemented"
if cls.__rest_name__ is None:
raise NotImplementedError('%s has no defined name. Implement rest_name property first.' % cls)
return cls.__rest_name__ | python | def rest_name(cls):
""" Represents a singular REST name
"""
if cls.__name__ == "NURESTRootObject" or cls.__name__ == "NURESTObject":
return "Not Implemented"
if cls.__rest_name__ is None:
raise NotImplementedError('%s has no defined name. Implement rest_name property first.' % cls)
return cls.__rest_name__ | ['def', 'rest_name', '(', 'cls', ')', ':', 'if', 'cls', '.', '__name__', '==', '"NURESTRootObject"', 'or', 'cls', '.', '__name__', '==', '"NURESTObject"', ':', 'return', '"Not Implemented"', 'if', 'cls', '.', '__rest_name__', 'is', 'None', ':', 'raise', 'NotImplementedError', '(', "'%s has no defined name. Implement rest_name property first.'", '%', 'cls', ')', 'return', 'cls', '.', '__rest_name__'] | Represents a singular REST name | ['Represents', 'a', 'singular', 'REST', 'name'] | train | https://github.com/nuagenetworks/bambou/blob/d334fea23e384d3df8e552fe1849ad707941c666/bambou/nurest_object.py#L51-L60 |
2,766 | saltstack/salt | salt/version.py | msi_conformant_version | def msi_conformant_version():
'''
An msi installer uninstalls/replaces a lower "internal version" of itself.
"internal version" is ivMAJOR.ivMINOR.ivBUILD with max values 255.255.65535.
Using the build nr allows continuous integration of the installer.
"Display version" is indipendent and free format: Year.Month.Bugfix as in Salt 2016.11.3.
Calculation of the internal version fields:
ivMAJOR = 'short year' (2 digits).
ivMINOR = 20*(month-1) + Bugfix
Combine Month and Bugfix to free ivBUILD for the build number
This limits Bugfix < 20.
The msi automatically replaces only 19 bugfixes of a month, one must uninstall manually.
ivBUILD = git commit count (noc)
noc for tags is 0, representing the final word, translates to the highest build number (65535).
Examples:
git checkout Display version Internal version Remark
develop 2016.11.0-742 16.200.742 The develop branch has bugfix 0
2016.11 2016.11.2-78 16.202.78
2016.11 2016.11.9-88 16.209.88
2018.8 2018.3.2-1306 18.42.1306
v2016.11.0 2016.11.0 16.200.65535 Tags have noc 0
v2016.11.2 2016.11.2 16.202.65535
'''
short_year = int(six.text_type(__saltstack_version__.major)[2:])
month = __saltstack_version__.minor
bugfix = __saltstack_version__.bugfix
if bugfix > 19:
bugfix = 19
noc = __saltstack_version__.noc
if noc == 0:
noc = 65535
return '{}.{}.{}'.format(short_year, 20*(month-1)+bugfix, noc) | python | def msi_conformant_version():
'''
An msi installer uninstalls/replaces a lower "internal version" of itself.
"internal version" is ivMAJOR.ivMINOR.ivBUILD with max values 255.255.65535.
Using the build nr allows continuous integration of the installer.
"Display version" is indipendent and free format: Year.Month.Bugfix as in Salt 2016.11.3.
Calculation of the internal version fields:
ivMAJOR = 'short year' (2 digits).
ivMINOR = 20*(month-1) + Bugfix
Combine Month and Bugfix to free ivBUILD for the build number
This limits Bugfix < 20.
The msi automatically replaces only 19 bugfixes of a month, one must uninstall manually.
ivBUILD = git commit count (noc)
noc for tags is 0, representing the final word, translates to the highest build number (65535).
Examples:
git checkout Display version Internal version Remark
develop 2016.11.0-742 16.200.742 The develop branch has bugfix 0
2016.11 2016.11.2-78 16.202.78
2016.11 2016.11.9-88 16.209.88
2018.8 2018.3.2-1306 18.42.1306
v2016.11.0 2016.11.0 16.200.65535 Tags have noc 0
v2016.11.2 2016.11.2 16.202.65535
'''
short_year = int(six.text_type(__saltstack_version__.major)[2:])
month = __saltstack_version__.minor
bugfix = __saltstack_version__.bugfix
if bugfix > 19:
bugfix = 19
noc = __saltstack_version__.noc
if noc == 0:
noc = 65535
return '{}.{}.{}'.format(short_year, 20*(month-1)+bugfix, noc) | ['def', 'msi_conformant_version', '(', ')', ':', 'short_year', '=', 'int', '(', 'six', '.', 'text_type', '(', '__saltstack_version__', '.', 'major', ')', '[', '2', ':', ']', ')', 'month', '=', '__saltstack_version__', '.', 'minor', 'bugfix', '=', '__saltstack_version__', '.', 'bugfix', 'if', 'bugfix', '>', '19', ':', 'bugfix', '=', '19', 'noc', '=', '__saltstack_version__', '.', 'noc', 'if', 'noc', '==', '0', ':', 'noc', '=', '65535', 'return', "'{}.{}.{}'", '.', 'format', '(', 'short_year', ',', '20', '*', '(', 'month', '-', '1', ')', '+', 'bugfix', ',', 'noc', ')'] | An msi installer uninstalls/replaces a lower "internal version" of itself.
"internal version" is ivMAJOR.ivMINOR.ivBUILD with max values 255.255.65535.
Using the build nr allows continuous integration of the installer.
"Display version" is indipendent and free format: Year.Month.Bugfix as in Salt 2016.11.3.
Calculation of the internal version fields:
ivMAJOR = 'short year' (2 digits).
ivMINOR = 20*(month-1) + Bugfix
Combine Month and Bugfix to free ivBUILD for the build number
This limits Bugfix < 20.
The msi automatically replaces only 19 bugfixes of a month, one must uninstall manually.
ivBUILD = git commit count (noc)
noc for tags is 0, representing the final word, translates to the highest build number (65535).
Examples:
git checkout Display version Internal version Remark
develop 2016.11.0-742 16.200.742 The develop branch has bugfix 0
2016.11 2016.11.2-78 16.202.78
2016.11 2016.11.9-88 16.209.88
2018.8 2018.3.2-1306 18.42.1306
v2016.11.0 2016.11.0 16.200.65535 Tags have noc 0
v2016.11.2 2016.11.2 16.202.65535 | ['An', 'msi', 'installer', 'uninstalls', '/', 'replaces', 'a', 'lower', 'internal', 'version', 'of', 'itself', '.', 'internal', 'version', 'is', 'ivMAJOR', '.', 'ivMINOR', '.', 'ivBUILD', 'with', 'max', 'values', '255', '.', '255', '.', '65535', '.', 'Using', 'the', 'build', 'nr', 'allows', 'continuous', 'integration', 'of', 'the', 'installer', '.', 'Display', 'version', 'is', 'indipendent', 'and', 'free', 'format', ':', 'Year', '.', 'Month', '.', 'Bugfix', 'as', 'in', 'Salt', '2016', '.', '11', '.', '3', '.', 'Calculation', 'of', 'the', 'internal', 'version', 'fields', ':', 'ivMAJOR', '=', 'short', 'year', '(', '2', 'digits', ')', '.', 'ivMINOR', '=', '20', '*', '(', 'month', '-', '1', ')', '+', 'Bugfix', 'Combine', 'Month', 'and', 'Bugfix', 'to', 'free', 'ivBUILD', 'for', 'the', 'build', 'number', 'This', 'limits', 'Bugfix', '<', '20', '.', 'The', 'msi', 'automatically', 'replaces', 'only', '19', 'bugfixes', 'of', 'a', 'month', 'one', 'must', 'uninstall', 'manually', '.', 'ivBUILD', '=', 'git', 'commit', 'count', '(', 'noc', ')', 'noc', 'for', 'tags', 'is', '0', 'representing', 'the', 'final', 'word', 'translates', 'to', 'the', 'highest', 'build', 'number', '(', '65535', ')', '.'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/version.py#L746-L779 |
2,767 | wonambi-python/wonambi | wonambi/ioeeg/abf.py | _read_header | def _read_header(fid):
"""Based on neo/rawio/axonrawio.py, but I only kept of data with no-gaps
and in one segment.
"""
fid.seek(0, SEEK_SET)
fFileSignature = fid.read(4)
assert fFileSignature == b'ABF2', 'only format ABF2 is currently supported'
header = {}
for key, offset, fmt in headerDescriptionV2:
fid.seek(0 + offset, SEEK_SET)
val = unpack(fmt, fid.read(calcsize(fmt)))
if len(val) == 1:
header[key] = val[0]
else:
header[key] = val
# sections
sections = {}
for s, sectionName in enumerate(sectionNames):
fid.seek(76 + s * 16)
uBlockIndex, uBytes, llNumEntries = unpack('IIl', fid.read(calcsize('IIl')))
sections[sectionName] = {}
sections[sectionName]['uBlockIndex'] = uBlockIndex
sections[sectionName]['uBytes'] = uBytes
sections[sectionName]['llNumEntries'] = llNumEntries
header['sections'] = sections
# strings sections
# hack for reading channels names and units
fid.seek(sections['StringsSection']['uBlockIndex'] * BLOCKSIZE)
big_string = fid.read(sections['StringsSection']['uBytes'])
goodstart = -1
for key in [b'AXENGN', b'clampex', b'Clampex', b'CLAMPEX', b'axoscope', b'Clampfit']:
goodstart = big_string.find(key)
if goodstart != -1:
break
assert goodstart != -1, 'This file does not contain clampex, axoscope or clampfit in the header'
big_string = big_string[goodstart:]
strings = big_string.split(b'\x00')
# ADC sections
header['listADCInfo'] = []
for i in range(sections['ADCSection']['llNumEntries']):
# read ADCInfo
fid.seek(sections['ADCSection']['uBlockIndex'] *
BLOCKSIZE + sections['ADCSection']['uBytes'] * i)
ADCInfo = _read_info_as_dict(fid, ADCInfoDescription)
ADCInfo['ADCChNames'] = strings[ADCInfo['lADCChannelNameIndex'] - 1]
ADCInfo['ADCChUnits'] = strings[ADCInfo['lADCUnitsIndex'] - 1]
header['listADCInfo'].append(ADCInfo)
# protocol sections
fid.seek(sections['ProtocolSection']['uBlockIndex'] * BLOCKSIZE)
header['protocol'] = _read_info_as_dict(fid, protocolInfoDescription)
header['sProtocolPath'] = strings[header['uProtocolPathIndex'] - 1]
# DAC sections
header['listDACInfo'] = []
for i in range(sections['DACSection']['llNumEntries']):
# read DACInfo
fid.seek(sections['DACSection']['uBlockIndex'] *
BLOCKSIZE + sections['DACSection']['uBytes'] * i)
DACInfo = _read_info_as_dict(fid, DACInfoDescription)
DACInfo['DACChNames'] = strings[DACInfo['lDACChannelNameIndex'] - 1]
DACInfo['DACChUnits'] = strings[
DACInfo['lDACChannelUnitsIndex'] - 1]
header['listDACInfo'].append(DACInfo)
""" Not present in test file. No tests, no code.
# tags
listTag = []
for i in range(sections['TagSection']['llNumEntries']):
fid.seek(sections['TagSection']['uBlockIndex'] *
BLOCKSIZE + sections['TagSection']['uBytes'] * i)
tag = _read_info_as_dict(fid, TagInfoDescription)
listTag.append(tag)
header['listTag'] = listTag
# EpochPerDAC sections
# header['dictEpochInfoPerDAC'] is dict of dicts:
# - the first index is the DAC number
# - the second index is the epoch number
# It has to be done like that because data may not exist
# and may not be in sorted order
header['dictEpochInfoPerDAC'] = {}
for i in range(sections['EpochPerDACSection']['llNumEntries']):
# read DACInfo
fid.seek(sections['EpochPerDACSection']['uBlockIndex'] *
BLOCKSIZE +
sections['EpochPerDACSection']['uBytes'] * i)
EpochInfoPerDAC = _read_info_as_dict(fid, EpochInfoPerDACDescription)
DACNum = EpochInfoPerDAC['nDACNum']
EpochNum = EpochInfoPerDAC['nEpochNum']
# Checking if the key exists, if not, the value is empty
# so we have to create empty dict to populate
if DACNum not in header['dictEpochInfoPerDAC']:
header['dictEpochInfoPerDAC'][DACNum] = {}
header['dictEpochInfoPerDAC'][DACNum][EpochNum] =\
EpochInfoPerDAC
"""
return header | python | def _read_header(fid):
"""Based on neo/rawio/axonrawio.py, but I only kept of data with no-gaps
and in one segment.
"""
fid.seek(0, SEEK_SET)
fFileSignature = fid.read(4)
assert fFileSignature == b'ABF2', 'only format ABF2 is currently supported'
header = {}
for key, offset, fmt in headerDescriptionV2:
fid.seek(0 + offset, SEEK_SET)
val = unpack(fmt, fid.read(calcsize(fmt)))
if len(val) == 1:
header[key] = val[0]
else:
header[key] = val
# sections
sections = {}
for s, sectionName in enumerate(sectionNames):
fid.seek(76 + s * 16)
uBlockIndex, uBytes, llNumEntries = unpack('IIl', fid.read(calcsize('IIl')))
sections[sectionName] = {}
sections[sectionName]['uBlockIndex'] = uBlockIndex
sections[sectionName]['uBytes'] = uBytes
sections[sectionName]['llNumEntries'] = llNumEntries
header['sections'] = sections
# strings sections
# hack for reading channels names and units
fid.seek(sections['StringsSection']['uBlockIndex'] * BLOCKSIZE)
big_string = fid.read(sections['StringsSection']['uBytes'])
goodstart = -1
for key in [b'AXENGN', b'clampex', b'Clampex', b'CLAMPEX', b'axoscope', b'Clampfit']:
goodstart = big_string.find(key)
if goodstart != -1:
break
assert goodstart != -1, 'This file does not contain clampex, axoscope or clampfit in the header'
big_string = big_string[goodstart:]
strings = big_string.split(b'\x00')
# ADC sections
header['listADCInfo'] = []
for i in range(sections['ADCSection']['llNumEntries']):
# read ADCInfo
fid.seek(sections['ADCSection']['uBlockIndex'] *
BLOCKSIZE + sections['ADCSection']['uBytes'] * i)
ADCInfo = _read_info_as_dict(fid, ADCInfoDescription)
ADCInfo['ADCChNames'] = strings[ADCInfo['lADCChannelNameIndex'] - 1]
ADCInfo['ADCChUnits'] = strings[ADCInfo['lADCUnitsIndex'] - 1]
header['listADCInfo'].append(ADCInfo)
# protocol sections
fid.seek(sections['ProtocolSection']['uBlockIndex'] * BLOCKSIZE)
header['protocol'] = _read_info_as_dict(fid, protocolInfoDescription)
header['sProtocolPath'] = strings[header['uProtocolPathIndex'] - 1]
# DAC sections
header['listDACInfo'] = []
for i in range(sections['DACSection']['llNumEntries']):
# read DACInfo
fid.seek(sections['DACSection']['uBlockIndex'] *
BLOCKSIZE + sections['DACSection']['uBytes'] * i)
DACInfo = _read_info_as_dict(fid, DACInfoDescription)
DACInfo['DACChNames'] = strings[DACInfo['lDACChannelNameIndex'] - 1]
DACInfo['DACChUnits'] = strings[
DACInfo['lDACChannelUnitsIndex'] - 1]
header['listDACInfo'].append(DACInfo)
""" Not present in test file. No tests, no code.
# tags
listTag = []
for i in range(sections['TagSection']['llNumEntries']):
fid.seek(sections['TagSection']['uBlockIndex'] *
BLOCKSIZE + sections['TagSection']['uBytes'] * i)
tag = _read_info_as_dict(fid, TagInfoDescription)
listTag.append(tag)
header['listTag'] = listTag
# EpochPerDAC sections
# header['dictEpochInfoPerDAC'] is dict of dicts:
# - the first index is the DAC number
# - the second index is the epoch number
# It has to be done like that because data may not exist
# and may not be in sorted order
header['dictEpochInfoPerDAC'] = {}
for i in range(sections['EpochPerDACSection']['llNumEntries']):
# read DACInfo
fid.seek(sections['EpochPerDACSection']['uBlockIndex'] *
BLOCKSIZE +
sections['EpochPerDACSection']['uBytes'] * i)
EpochInfoPerDAC = _read_info_as_dict(fid, EpochInfoPerDACDescription)
DACNum = EpochInfoPerDAC['nDACNum']
EpochNum = EpochInfoPerDAC['nEpochNum']
# Checking if the key exists, if not, the value is empty
# so we have to create empty dict to populate
if DACNum not in header['dictEpochInfoPerDAC']:
header['dictEpochInfoPerDAC'][DACNum] = {}
header['dictEpochInfoPerDAC'][DACNum][EpochNum] =\
EpochInfoPerDAC
"""
return header | ['def', '_read_header', '(', 'fid', ')', ':', 'fid', '.', 'seek', '(', '0', ',', 'SEEK_SET', ')', 'fFileSignature', '=', 'fid', '.', 'read', '(', '4', ')', 'assert', 'fFileSignature', '==', "b'ABF2'", ',', "'only format ABF2 is currently supported'", 'header', '=', '{', '}', 'for', 'key', ',', 'offset', ',', 'fmt', 'in', 'headerDescriptionV2', ':', 'fid', '.', 'seek', '(', '0', '+', 'offset', ',', 'SEEK_SET', ')', 'val', '=', 'unpack', '(', 'fmt', ',', 'fid', '.', 'read', '(', 'calcsize', '(', 'fmt', ')', ')', ')', 'if', 'len', '(', 'val', ')', '==', '1', ':', 'header', '[', 'key', ']', '=', 'val', '[', '0', ']', 'else', ':', 'header', '[', 'key', ']', '=', 'val', '# sections', 'sections', '=', '{', '}', 'for', 's', ',', 'sectionName', 'in', 'enumerate', '(', 'sectionNames', ')', ':', 'fid', '.', 'seek', '(', '76', '+', 's', '*', '16', ')', 'uBlockIndex', ',', 'uBytes', ',', 'llNumEntries', '=', 'unpack', '(', "'IIl'", ',', 'fid', '.', 'read', '(', 'calcsize', '(', "'IIl'", ')', ')', ')', 'sections', '[', 'sectionName', ']', '=', '{', '}', 'sections', '[', 'sectionName', ']', '[', "'uBlockIndex'", ']', '=', 'uBlockIndex', 'sections', '[', 'sectionName', ']', '[', "'uBytes'", ']', '=', 'uBytes', 'sections', '[', 'sectionName', ']', '[', "'llNumEntries'", ']', '=', 'llNumEntries', 'header', '[', "'sections'", ']', '=', 'sections', '# strings sections', '# hack for reading channels names and units', 'fid', '.', 'seek', '(', 'sections', '[', "'StringsSection'", ']', '[', "'uBlockIndex'", ']', '*', 'BLOCKSIZE', ')', 'big_string', '=', 'fid', '.', 'read', '(', 'sections', '[', "'StringsSection'", ']', '[', "'uBytes'", ']', ')', 'goodstart', '=', '-', '1', 'for', 'key', 'in', '[', "b'AXENGN'", ',', "b'clampex'", ',', "b'Clampex'", ',', "b'CLAMPEX'", ',', "b'axoscope'", ',', "b'Clampfit'", ']', ':', 'goodstart', '=', 'big_string', '.', 'find', '(', 'key', ')', 'if', 'goodstart', '!=', '-', '1', ':', 'break', 'assert', 'goodstart', '!=', '-', '1', ',', "'This file does not contain clampex, axoscope or clampfit in the header'", 'big_string', '=', 'big_string', '[', 'goodstart', ':', ']', 'strings', '=', 'big_string', '.', 'split', '(', "b'\\x00'", ')', '# ADC sections', 'header', '[', "'listADCInfo'", ']', '=', '[', ']', 'for', 'i', 'in', 'range', '(', 'sections', '[', "'ADCSection'", ']', '[', "'llNumEntries'", ']', ')', ':', '# read ADCInfo', 'fid', '.', 'seek', '(', 'sections', '[', "'ADCSection'", ']', '[', "'uBlockIndex'", ']', '*', 'BLOCKSIZE', '+', 'sections', '[', "'ADCSection'", ']', '[', "'uBytes'", ']', '*', 'i', ')', 'ADCInfo', '=', '_read_info_as_dict', '(', 'fid', ',', 'ADCInfoDescription', ')', 'ADCInfo', '[', "'ADCChNames'", ']', '=', 'strings', '[', 'ADCInfo', '[', "'lADCChannelNameIndex'", ']', '-', '1', ']', 'ADCInfo', '[', "'ADCChUnits'", ']', '=', 'strings', '[', 'ADCInfo', '[', "'lADCUnitsIndex'", ']', '-', '1', ']', 'header', '[', "'listADCInfo'", ']', '.', 'append', '(', 'ADCInfo', ')', '# protocol sections', 'fid', '.', 'seek', '(', 'sections', '[', "'ProtocolSection'", ']', '[', "'uBlockIndex'", ']', '*', 'BLOCKSIZE', ')', 'header', '[', "'protocol'", ']', '=', '_read_info_as_dict', '(', 'fid', ',', 'protocolInfoDescription', ')', 'header', '[', "'sProtocolPath'", ']', '=', 'strings', '[', 'header', '[', "'uProtocolPathIndex'", ']', '-', '1', ']', '# DAC sections', 'header', '[', "'listDACInfo'", ']', '=', '[', ']', 'for', 'i', 'in', 'range', '(', 'sections', '[', "'DACSection'", ']', '[', "'llNumEntries'", ']', ')', ':', '# read DACInfo', 'fid', '.', 'seek', '(', 'sections', '[', "'DACSection'", ']', '[', "'uBlockIndex'", ']', '*', 'BLOCKSIZE', '+', 'sections', '[', "'DACSection'", ']', '[', "'uBytes'", ']', '*', 'i', ')', 'DACInfo', '=', '_read_info_as_dict', '(', 'fid', ',', 'DACInfoDescription', ')', 'DACInfo', '[', "'DACChNames'", ']', '=', 'strings', '[', 'DACInfo', '[', "'lDACChannelNameIndex'", ']', '-', '1', ']', 'DACInfo', '[', "'DACChUnits'", ']', '=', 'strings', '[', 'DACInfo', '[', "'lDACChannelUnitsIndex'", ']', '-', '1', ']', 'header', '[', "'listDACInfo'", ']', '.', 'append', '(', 'DACInfo', ')', '""" Not present in test file. No tests, no code.\n # tags\n listTag = []\n for i in range(sections[\'TagSection\'][\'llNumEntries\']):\n fid.seek(sections[\'TagSection\'][\'uBlockIndex\'] *\n BLOCKSIZE + sections[\'TagSection\'][\'uBytes\'] * i)\n tag = _read_info_as_dict(fid, TagInfoDescription)\n listTag.append(tag)\n\n header[\'listTag\'] = listTag\n\n # EpochPerDAC sections\n # header[\'dictEpochInfoPerDAC\'] is dict of dicts:\n # - the first index is the DAC number\n # - the second index is the epoch number\n # It has to be done like that because data may not exist\n # and may not be in sorted order\n header[\'dictEpochInfoPerDAC\'] = {}\n for i in range(sections[\'EpochPerDACSection\'][\'llNumEntries\']):\n # read DACInfo\n fid.seek(sections[\'EpochPerDACSection\'][\'uBlockIndex\'] *\n BLOCKSIZE +\n sections[\'EpochPerDACSection\'][\'uBytes\'] * i)\n EpochInfoPerDAC = _read_info_as_dict(fid, EpochInfoPerDACDescription)\n DACNum = EpochInfoPerDAC[\'nDACNum\']\n EpochNum = EpochInfoPerDAC[\'nEpochNum\']\n # Checking if the key exists, if not, the value is empty\n # so we have to create empty dict to populate\n if DACNum not in header[\'dictEpochInfoPerDAC\']:\n header[\'dictEpochInfoPerDAC\'][DACNum] = {}\n\n header[\'dictEpochInfoPerDAC\'][DACNum][EpochNum] =\\\n EpochInfoPerDAC\n """', 'return', 'header'] | Based on neo/rawio/axonrawio.py, but I only kept of data with no-gaps
and in one segment. | ['Based', 'on', 'neo', '/', 'rawio', '/', 'axonrawio', '.', 'py', 'but', 'I', 'only', 'kept', 'of', 'data', 'with', 'no', '-', 'gaps', 'and', 'in', 'one', 'segment', '.'] | train | https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/ioeeg/abf.py#L143-L248 |
2,768 | mthornhill/django-postal | src/postal/resource.py | Emitter.get | def get(cls, format):
"""
Gets an emitter, returns the class and a content-type.
"""
if cls.EMITTERS.has_key(format):
return cls.EMITTERS.get(format)
raise ValueError("No emitters found for type %s" % format) | python | def get(cls, format):
"""
Gets an emitter, returns the class and a content-type.
"""
if cls.EMITTERS.has_key(format):
return cls.EMITTERS.get(format)
raise ValueError("No emitters found for type %s" % format) | ['def', 'get', '(', 'cls', ',', 'format', ')', ':', 'if', 'cls', '.', 'EMITTERS', '.', 'has_key', '(', 'format', ')', ':', 'return', 'cls', '.', 'EMITTERS', '.', 'get', '(', 'format', ')', 'raise', 'ValueError', '(', '"No emitters found for type %s"', '%', 'format', ')'] | Gets an emitter, returns the class and a content-type. | ['Gets', 'an', 'emitter', 'returns', 'the', 'class', 'and', 'a', 'content', '-', 'type', '.'] | train | https://github.com/mthornhill/django-postal/blob/21d65e09b45f0515cde6166345f46c3f506dd08f/src/postal/resource.py#L303-L310 |
2,769 | roycehaynes/scrapy-rabbitmq | scrapy_rabbitmq/queue.py | SpiderQueue.pop | def pop(self):
"""Pop a request"""
method_frame, header, body = self.server.basic_get(queue=self.key)
if body:
return self._decode_request(body) | python | def pop(self):
"""Pop a request"""
method_frame, header, body = self.server.basic_get(queue=self.key)
if body:
return self._decode_request(body) | ['def', 'pop', '(', 'self', ')', ':', 'method_frame', ',', 'header', ',', 'body', '=', 'self', '.', 'server', '.', 'basic_get', '(', 'queue', '=', 'self', '.', 'key', ')', 'if', 'body', ':', 'return', 'self', '.', '_decode_request', '(', 'body', ')'] | Pop a request | ['Pop', 'a', 'request'] | train | https://github.com/roycehaynes/scrapy-rabbitmq/blob/5053b500aff1d6679cc0e3d3e338c2bf74fadc22/scrapy_rabbitmq/queue.py#L65-L71 |
2,770 | LudovicRousseau/pyscard | smartcard/ExclusiveTransmitCardConnection.py | ExclusiveTransmitCardConnection.transmit | def transmit(self, bytes, protocol=None):
'''Gain exclusive access to card during APDU transmission for if this
decorator decorates a PCSCCardConnection.'''
data, sw1, sw2 = CardConnectionDecorator.transmit(
self, bytes, protocol)
return data, sw1, sw2 | python | def transmit(self, bytes, protocol=None):
'''Gain exclusive access to card during APDU transmission for if this
decorator decorates a PCSCCardConnection.'''
data, sw1, sw2 = CardConnectionDecorator.transmit(
self, bytes, protocol)
return data, sw1, sw2 | ['def', 'transmit', '(', 'self', ',', 'bytes', ',', 'protocol', '=', 'None', ')', ':', 'data', ',', 'sw1', ',', 'sw2', '=', 'CardConnectionDecorator', '.', 'transmit', '(', 'self', ',', 'bytes', ',', 'protocol', ')', 'return', 'data', ',', 'sw1', ',', 'sw2'] | Gain exclusive access to card during APDU transmission for if this
decorator decorates a PCSCCardConnection. | ['Gain', 'exclusive', 'access', 'to', 'card', 'during', 'APDU', 'transmission', 'for', 'if', 'this', 'decorator', 'decorates', 'a', 'PCSCCardConnection', '.'] | train | https://github.com/LudovicRousseau/pyscard/blob/62e675028086c75656444cc21d563d9f08ebf8e7/smartcard/ExclusiveTransmitCardConnection.py#L85-L90 |
2,771 | dnanexus/dx-toolkit | src/python/dxpy/api.py | system_find_global_workflows | def system_find_global_workflows(input_params={}, always_retry=True, **kwargs):
"""
Invokes the /system/findGlobalWorkflows API method.
"""
return DXHTTPRequest('/system/findGlobalWorkflows', input_params, always_retry=always_retry, **kwargs) | python | def system_find_global_workflows(input_params={}, always_retry=True, **kwargs):
"""
Invokes the /system/findGlobalWorkflows API method.
"""
return DXHTTPRequest('/system/findGlobalWorkflows', input_params, always_retry=always_retry, **kwargs) | ['def', 'system_find_global_workflows', '(', 'input_params', '=', '{', '}', ',', 'always_retry', '=', 'True', ',', '*', '*', 'kwargs', ')', ':', 'return', 'DXHTTPRequest', '(', "'/system/findGlobalWorkflows'", ',', 'input_params', ',', 'always_retry', '=', 'always_retry', ',', '*', '*', 'kwargs', ')'] | Invokes the /system/findGlobalWorkflows API method. | ['Invokes', 'the', '/', 'system', '/', 'findGlobalWorkflows', 'API', 'method', '.'] | train | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/api.py#L1235-L1239 |
2,772 | JukeboxPipeline/jukeboxmaya | src/jukeboxmaya/reftrack/asset.py | AssetReftypeInterface.delete | def delete(self, refobj):
"""Delete the content of the given refobj
:param refobj: the refobj that represents the content that should be deleted
:type refobj: refobj
:returns: None
:rtype: None
:raises: None
"""
refobjinter = self.get_refobjinter()
reference = refobjinter.get_reference(refobj)
if reference:
fullns = cmds.referenceQuery(reference, namespace=True)
cmds.file(removeReference=True, referenceNode=reference)
else:
parentns = common.get_namespace(refobj)
ns = cmds.getAttr("%s.namespace" % refobj)
fullns = ":".join((parentns.rstrip(":"), ns.lstrip(":")))
cmds.namespace(removeNamespace=fullns, deleteNamespaceContent=True) | python | def delete(self, refobj):
"""Delete the content of the given refobj
:param refobj: the refobj that represents the content that should be deleted
:type refobj: refobj
:returns: None
:rtype: None
:raises: None
"""
refobjinter = self.get_refobjinter()
reference = refobjinter.get_reference(refobj)
if reference:
fullns = cmds.referenceQuery(reference, namespace=True)
cmds.file(removeReference=True, referenceNode=reference)
else:
parentns = common.get_namespace(refobj)
ns = cmds.getAttr("%s.namespace" % refobj)
fullns = ":".join((parentns.rstrip(":"), ns.lstrip(":")))
cmds.namespace(removeNamespace=fullns, deleteNamespaceContent=True) | ['def', 'delete', '(', 'self', ',', 'refobj', ')', ':', 'refobjinter', '=', 'self', '.', 'get_refobjinter', '(', ')', 'reference', '=', 'refobjinter', '.', 'get_reference', '(', 'refobj', ')', 'if', 'reference', ':', 'fullns', '=', 'cmds', '.', 'referenceQuery', '(', 'reference', ',', 'namespace', '=', 'True', ')', 'cmds', '.', 'file', '(', 'removeReference', '=', 'True', ',', 'referenceNode', '=', 'reference', ')', 'else', ':', 'parentns', '=', 'common', '.', 'get_namespace', '(', 'refobj', ')', 'ns', '=', 'cmds', '.', 'getAttr', '(', '"%s.namespace"', '%', 'refobj', ')', 'fullns', '=', '":"', '.', 'join', '(', '(', 'parentns', '.', 'rstrip', '(', '":"', ')', ',', 'ns', '.', 'lstrip', '(', '":"', ')', ')', ')', 'cmds', '.', 'namespace', '(', 'removeNamespace', '=', 'fullns', ',', 'deleteNamespaceContent', '=', 'True', ')'] | Delete the content of the given refobj
:param refobj: the refobj that represents the content that should be deleted
:type refobj: refobj
:returns: None
:rtype: None
:raises: None | ['Delete', 'the', 'content', 'of', 'the', 'given', 'refobj'] | train | https://github.com/JukeboxPipeline/jukeboxmaya/blob/c8d6318d53cdb5493453c4a6b65ef75bdb2d5f2c/src/jukeboxmaya/reftrack/asset.py#L195-L213 |
2,773 | pywbem/pywbem | pywbem/_recorder.py | LogOperationRecorder.stage_wbem_connection | def stage_wbem_connection(self, wbem_connection):
"""
Log connection information. This includes the connection id (conn_id)
that is output with the log entry. This entry is logged if either
http or api loggers are enable. It honors both the logger and
detail level of either api logger if defined or http logger if defined.
If the api logger does not exist, the output shows this as an http
loggger output since we do not want to create an api logger for this
specific output
"""
self._conn_id = wbem_connection.conn_id
if self.enabled:
if self.api_detail_level is not None:
logger = self.apilogger
detail_level = self.api_detail_level
max_len = self.api_maxlen
elif self.http_detail_level is not None:
logger = self.httplogger
detail_level = self.http_detail_level
max_len = self.http_maxlen
else:
return
if logger.isEnabledFor(logging.DEBUG):
conn_data = str(wbem_connection) if detail_level == 'summary' \
else repr(wbem_connection)
if max_len and (len(conn_data) > max_len):
conn_data = conn_data[:max_len] + '...'
logger.debug('Connection:%s %s', self._conn_id, conn_data) | python | def stage_wbem_connection(self, wbem_connection):
"""
Log connection information. This includes the connection id (conn_id)
that is output with the log entry. This entry is logged if either
http or api loggers are enable. It honors both the logger and
detail level of either api logger if defined or http logger if defined.
If the api logger does not exist, the output shows this as an http
loggger output since we do not want to create an api logger for this
specific output
"""
self._conn_id = wbem_connection.conn_id
if self.enabled:
if self.api_detail_level is not None:
logger = self.apilogger
detail_level = self.api_detail_level
max_len = self.api_maxlen
elif self.http_detail_level is not None:
logger = self.httplogger
detail_level = self.http_detail_level
max_len = self.http_maxlen
else:
return
if logger.isEnabledFor(logging.DEBUG):
conn_data = str(wbem_connection) if detail_level == 'summary' \
else repr(wbem_connection)
if max_len and (len(conn_data) > max_len):
conn_data = conn_data[:max_len] + '...'
logger.debug('Connection:%s %s', self._conn_id, conn_data) | ['def', 'stage_wbem_connection', '(', 'self', ',', 'wbem_connection', ')', ':', 'self', '.', '_conn_id', '=', 'wbem_connection', '.', 'conn_id', 'if', 'self', '.', 'enabled', ':', 'if', 'self', '.', 'api_detail_level', 'is', 'not', 'None', ':', 'logger', '=', 'self', '.', 'apilogger', 'detail_level', '=', 'self', '.', 'api_detail_level', 'max_len', '=', 'self', '.', 'api_maxlen', 'elif', 'self', '.', 'http_detail_level', 'is', 'not', 'None', ':', 'logger', '=', 'self', '.', 'httplogger', 'detail_level', '=', 'self', '.', 'http_detail_level', 'max_len', '=', 'self', '.', 'http_maxlen', 'else', ':', 'return', 'if', 'logger', '.', 'isEnabledFor', '(', 'logging', '.', 'DEBUG', ')', ':', 'conn_data', '=', 'str', '(', 'wbem_connection', ')', 'if', 'detail_level', '==', "'summary'", 'else', 'repr', '(', 'wbem_connection', ')', 'if', 'max_len', 'and', '(', 'len', '(', 'conn_data', ')', '>', 'max_len', ')', ':', 'conn_data', '=', 'conn_data', '[', ':', 'max_len', ']', '+', "'...'", 'logger', '.', 'debug', '(', "'Connection:%s %s'", ',', 'self', '.', '_conn_id', ',', 'conn_data', ')'] | Log connection information. This includes the connection id (conn_id)
that is output with the log entry. This entry is logged if either
http or api loggers are enable. It honors both the logger and
detail level of either api logger if defined or http logger if defined.
If the api logger does not exist, the output shows this as an http
loggger output since we do not want to create an api logger for this
specific output | ['Log', 'connection', 'information', '.', 'This', 'includes', 'the', 'connection', 'id', '(', 'conn_id', ')', 'that', 'is', 'output', 'with', 'the', 'log', 'entry', '.', 'This', 'entry', 'is', 'logged', 'if', 'either', 'http', 'or', 'api', 'loggers', 'are', 'enable', '.', 'It', 'honors', 'both', 'the', 'logger', 'and', 'detail', 'level', 'of', 'either', 'api', 'logger', 'if', 'defined', 'or', 'http', 'logger', 'if', 'defined', '.', 'If', 'the', 'api', 'logger', 'does', 'not', 'exist', 'the', 'output', 'shows', 'this', 'as', 'an', 'http', 'loggger', 'output', 'since', 'we', 'do', 'not', 'want', 'to', 'create', 'an', 'api', 'logger', 'for', 'this', 'specific', 'output'] | train | https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/pywbem/_recorder.py#L613-L643 |
2,774 | ihmeuw/vivarium | src/vivarium/framework/randomness.py | RandomnessManager.get_randomness_stream | def get_randomness_stream(self, decision_point: str, for_initialization: bool=False) -> RandomnessStream:
"""Provides a new source of random numbers for the given decision point.
Parameters
----------
decision_point :
A unique identifier for a stream of random numbers. Typically represents
a decision that needs to be made each time step like 'moves_left' or
'gets_disease'.
for_initialization :
A flag indicating whether this stream is used to generate key initialization information
that will be used to identify simulants in the Common Random Number framework. These streams
cannot be copied and should only be used to generate the state table columns specified
in ``builder.configuration.randomness.key_columns``.
Raises
------
RandomnessError :
If another location in the simulation has already created a randomness stream
with the same identifier.
"""
if decision_point in self._decision_points:
raise RandomnessError(f"Two separate places are attempting to create "
f"the same randomness stream for {decision_point}")
stream = RandomnessStream(key=decision_point, clock=self._clock, seed=self._seed,
index_map=self._key_mapping, manager=self, for_initialization=for_initialization)
self._decision_points[decision_point] = stream
return stream | python | def get_randomness_stream(self, decision_point: str, for_initialization: bool=False) -> RandomnessStream:
"""Provides a new source of random numbers for the given decision point.
Parameters
----------
decision_point :
A unique identifier for a stream of random numbers. Typically represents
a decision that needs to be made each time step like 'moves_left' or
'gets_disease'.
for_initialization :
A flag indicating whether this stream is used to generate key initialization information
that will be used to identify simulants in the Common Random Number framework. These streams
cannot be copied and should only be used to generate the state table columns specified
in ``builder.configuration.randomness.key_columns``.
Raises
------
RandomnessError :
If another location in the simulation has already created a randomness stream
with the same identifier.
"""
if decision_point in self._decision_points:
raise RandomnessError(f"Two separate places are attempting to create "
f"the same randomness stream for {decision_point}")
stream = RandomnessStream(key=decision_point, clock=self._clock, seed=self._seed,
index_map=self._key_mapping, manager=self, for_initialization=for_initialization)
self._decision_points[decision_point] = stream
return stream | ['def', 'get_randomness_stream', '(', 'self', ',', 'decision_point', ':', 'str', ',', 'for_initialization', ':', 'bool', '=', 'False', ')', '->', 'RandomnessStream', ':', 'if', 'decision_point', 'in', 'self', '.', '_decision_points', ':', 'raise', 'RandomnessError', '(', 'f"Two separate places are attempting to create "', 'f"the same randomness stream for {decision_point}"', ')', 'stream', '=', 'RandomnessStream', '(', 'key', '=', 'decision_point', ',', 'clock', '=', 'self', '.', '_clock', ',', 'seed', '=', 'self', '.', '_seed', ',', 'index_map', '=', 'self', '.', '_key_mapping', ',', 'manager', '=', 'self', ',', 'for_initialization', '=', 'for_initialization', ')', 'self', '.', '_decision_points', '[', 'decision_point', ']', '=', 'stream', 'return', 'stream'] | Provides a new source of random numbers for the given decision point.
Parameters
----------
decision_point :
A unique identifier for a stream of random numbers. Typically represents
a decision that needs to be made each time step like 'moves_left' or
'gets_disease'.
for_initialization :
A flag indicating whether this stream is used to generate key initialization information
that will be used to identify simulants in the Common Random Number framework. These streams
cannot be copied and should only be used to generate the state table columns specified
in ``builder.configuration.randomness.key_columns``.
Raises
------
RandomnessError :
If another location in the simulation has already created a randomness stream
with the same identifier. | ['Provides', 'a', 'new', 'source', 'of', 'random', 'numbers', 'for', 'the', 'given', 'decision', 'point', '.'] | train | https://github.com/ihmeuw/vivarium/blob/c5f5d50f775c8bf337d3aae1ff7c57c025a8e258/src/vivarium/framework/randomness.py#L630-L657 |
2,775 | DarkEnergySurvey/ugali | ugali/utils/projector.py | dms2dec | def dms2dec(dms):
"""
Convert latitude from degrees,minutes,seconds in string or 3-array
format to decimal degrees.
"""
DEGREE = 360.
HOUR = 24.
MINUTE = 60.
SECOND = 3600.
# Be careful here, degree needs to be a float so that negative zero
# can have its signbit set:
# http://docs.scipy.org/doc/numpy-1.7.0/reference/c-api.coremath.html#NPY_NZERO
if isstring(dms):
degree,minute,second = np.array(re.split('[dms]',hms))[:3].astype(float)
else:
degree,minute,second = dms.T
sign = np.copysign(1.0,degree)
decimal = np.abs(degree) + minute * 1./MINUTE + second * 1./SECOND
decimal *= sign
return decimal | python | def dms2dec(dms):
"""
Convert latitude from degrees,minutes,seconds in string or 3-array
format to decimal degrees.
"""
DEGREE = 360.
HOUR = 24.
MINUTE = 60.
SECOND = 3600.
# Be careful here, degree needs to be a float so that negative zero
# can have its signbit set:
# http://docs.scipy.org/doc/numpy-1.7.0/reference/c-api.coremath.html#NPY_NZERO
if isstring(dms):
degree,minute,second = np.array(re.split('[dms]',hms))[:3].astype(float)
else:
degree,minute,second = dms.T
sign = np.copysign(1.0,degree)
decimal = np.abs(degree) + minute * 1./MINUTE + second * 1./SECOND
decimal *= sign
return decimal | ['def', 'dms2dec', '(', 'dms', ')', ':', 'DEGREE', '=', '360.', 'HOUR', '=', '24.', 'MINUTE', '=', '60.', 'SECOND', '=', '3600.', '# Be careful here, degree needs to be a float so that negative zero', '# can have its signbit set:', '# http://docs.scipy.org/doc/numpy-1.7.0/reference/c-api.coremath.html#NPY_NZERO', 'if', 'isstring', '(', 'dms', ')', ':', 'degree', ',', 'minute', ',', 'second', '=', 'np', '.', 'array', '(', 're', '.', 'split', '(', "'[dms]'", ',', 'hms', ')', ')', '[', ':', '3', ']', '.', 'astype', '(', 'float', ')', 'else', ':', 'degree', ',', 'minute', ',', 'second', '=', 'dms', '.', 'T', 'sign', '=', 'np', '.', 'copysign', '(', '1.0', ',', 'degree', ')', 'decimal', '=', 'np', '.', 'abs', '(', 'degree', ')', '+', 'minute', '*', '1.', '/', 'MINUTE', '+', 'second', '*', '1.', '/', 'SECOND', 'decimal', '*=', 'sign', 'return', 'decimal'] | Convert latitude from degrees,minutes,seconds in string or 3-array
format to decimal degrees. | ['Convert', 'latitude', 'from', 'degrees', 'minutes', 'seconds', 'in', 'string', 'or', '3', '-', 'array', 'format', 'to', 'decimal', 'degrees', '.'] | train | https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/projector.py#L442-L464 |
2,776 | ronaldguillen/wave | wave/serializers.py | ModelSerializer.get_default_field_names | def get_default_field_names(self, declared_fields, model_info):
"""
Return the default list of field names that will be used if the
`Meta.fields` option is not specified.
"""
return (
[model_info.pk.name] +
list(declared_fields.keys()) +
list(model_info.fields.keys()) +
list(model_info.forward_relations.keys())
) | python | def get_default_field_names(self, declared_fields, model_info):
"""
Return the default list of field names that will be used if the
`Meta.fields` option is not specified.
"""
return (
[model_info.pk.name] +
list(declared_fields.keys()) +
list(model_info.fields.keys()) +
list(model_info.forward_relations.keys())
) | ['def', 'get_default_field_names', '(', 'self', ',', 'declared_fields', ',', 'model_info', ')', ':', 'return', '(', '[', 'model_info', '.', 'pk', '.', 'name', ']', '+', 'list', '(', 'declared_fields', '.', 'keys', '(', ')', ')', '+', 'list', '(', 'model_info', '.', 'fields', '.', 'keys', '(', ')', ')', '+', 'list', '(', 'model_info', '.', 'forward_relations', '.', 'keys', '(', ')', ')', ')'] | Return the default list of field names that will be used if the
`Meta.fields` option is not specified. | ['Return', 'the', 'default', 'list', 'of', 'field', 'names', 'that', 'will', 'be', 'used', 'if', 'the', 'Meta', '.', 'fields', 'option', 'is', 'not', 'specified', '.'] | train | https://github.com/ronaldguillen/wave/blob/20bb979c917f7634d8257992e6d449dc751256a9/wave/serializers.py#L1041-L1051 |
2,777 | spacetelescope/stsci.tools | lib/stsci/tools/fitsdiff.py | list_parse | def list_parse(name_list):
"""Parse a comma-separated list of values, or a filename (starting with @)
containing a list value on each line.
"""
if name_list and name_list[0] == '@':
value = name_list[1:]
if not os.path.exists(value):
log.warning('The file %s does not exist' % value)
return
try:
return [v.strip() for v in open(value, 'r').readlines()]
except IOError as e:
log.warning('reading %s failed: %s; ignoring this file' %
(value, e))
else:
return [v.strip() for v in name_list.split(',')] | python | def list_parse(name_list):
"""Parse a comma-separated list of values, or a filename (starting with @)
containing a list value on each line.
"""
if name_list and name_list[0] == '@':
value = name_list[1:]
if not os.path.exists(value):
log.warning('The file %s does not exist' % value)
return
try:
return [v.strip() for v in open(value, 'r').readlines()]
except IOError as e:
log.warning('reading %s failed: %s; ignoring this file' %
(value, e))
else:
return [v.strip() for v in name_list.split(',')] | ['def', 'list_parse', '(', 'name_list', ')', ':', 'if', 'name_list', 'and', 'name_list', '[', '0', ']', '==', "'@'", ':', 'value', '=', 'name_list', '[', '1', ':', ']', 'if', 'not', 'os', '.', 'path', '.', 'exists', '(', 'value', ')', ':', 'log', '.', 'warning', '(', "'The file %s does not exist'", '%', 'value', ')', 'return', 'try', ':', 'return', '[', 'v', '.', 'strip', '(', ')', 'for', 'v', 'in', 'open', '(', 'value', ',', "'r'", ')', '.', 'readlines', '(', ')', ']', 'except', 'IOError', 'as', 'e', ':', 'log', '.', 'warning', '(', "'reading %s failed: %s; ignoring this file'", '%', '(', 'value', ',', 'e', ')', ')', 'else', ':', 'return', '[', 'v', '.', 'strip', '(', ')', 'for', 'v', 'in', 'name_list', '.', 'split', '(', "','", ')', ']'] | Parse a comma-separated list of values, or a filename (starting with @)
containing a list value on each line. | ['Parse', 'a', 'comma', '-', 'separated', 'list', 'of', 'values', 'or', 'a', 'filename', '(', 'starting', 'with'] | train | https://github.com/spacetelescope/stsci.tools/blob/9a022503ad24ca54ce83331482dfa3ff6de9f403/lib/stsci/tools/fitsdiff.py#L45-L61 |
2,778 | pypa/pipenv | pipenv/vendor/distlib/util.py | convert_path | def convert_path(pathname):
"""Return 'pathname' as a name that will work on the native filesystem.
The path is split on '/' and put back together again using the current
directory separator. Needed because filenames in the setup script are
always supplied in Unix style, and have to be converted to the local
convention before we can actually use them in the filesystem. Raises
ValueError on non-Unix-ish systems if 'pathname' either starts or
ends with a slash.
"""
if os.sep == '/':
return pathname
if not pathname:
return pathname
if pathname[0] == '/':
raise ValueError("path '%s' cannot be absolute" % pathname)
if pathname[-1] == '/':
raise ValueError("path '%s' cannot end with '/'" % pathname)
paths = pathname.split('/')
while os.curdir in paths:
paths.remove(os.curdir)
if not paths:
return os.curdir
return os.path.join(*paths) | python | def convert_path(pathname):
"""Return 'pathname' as a name that will work on the native filesystem.
The path is split on '/' and put back together again using the current
directory separator. Needed because filenames in the setup script are
always supplied in Unix style, and have to be converted to the local
convention before we can actually use them in the filesystem. Raises
ValueError on non-Unix-ish systems if 'pathname' either starts or
ends with a slash.
"""
if os.sep == '/':
return pathname
if not pathname:
return pathname
if pathname[0] == '/':
raise ValueError("path '%s' cannot be absolute" % pathname)
if pathname[-1] == '/':
raise ValueError("path '%s' cannot end with '/'" % pathname)
paths = pathname.split('/')
while os.curdir in paths:
paths.remove(os.curdir)
if not paths:
return os.curdir
return os.path.join(*paths) | ['def', 'convert_path', '(', 'pathname', ')', ':', 'if', 'os', '.', 'sep', '==', "'/'", ':', 'return', 'pathname', 'if', 'not', 'pathname', ':', 'return', 'pathname', 'if', 'pathname', '[', '0', ']', '==', "'/'", ':', 'raise', 'ValueError', '(', '"path \'%s\' cannot be absolute"', '%', 'pathname', ')', 'if', 'pathname', '[', '-', '1', ']', '==', "'/'", ':', 'raise', 'ValueError', '(', '"path \'%s\' cannot end with \'/\'"', '%', 'pathname', ')', 'paths', '=', 'pathname', '.', 'split', '(', "'/'", ')', 'while', 'os', '.', 'curdir', 'in', 'paths', ':', 'paths', '.', 'remove', '(', 'os', '.', 'curdir', ')', 'if', 'not', 'paths', ':', 'return', 'os', '.', 'curdir', 'return', 'os', '.', 'path', '.', 'join', '(', '*', 'paths', ')'] | Return 'pathname' as a name that will work on the native filesystem.
The path is split on '/' and put back together again using the current
directory separator. Needed because filenames in the setup script are
always supplied in Unix style, and have to be converted to the local
convention before we can actually use them in the filesystem. Raises
ValueError on non-Unix-ish systems if 'pathname' either starts or
ends with a slash. | ['Return', 'pathname', 'as', 'a', 'name', 'that', 'will', 'work', 'on', 'the', 'native', 'filesystem', '.'] | train | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/util.py#L451-L475 |
2,779 | DLR-RM/RAFCON | source/rafcon/core/states/container_state.py | ContainerState._check_data_flow_ports | def _check_data_flow_ports(self, data_flow):
"""Checks the validity of the ports of a data flow
Checks whether the ports of a data flow are existing and whether it is allowed to connect these ports.
:param rafcon.core.data_flow.DataFlow data_flow: The data flow to be checked
:return bool validity, str message: validity is True, when the data flow is valid, False else. message gives
more information especially if the data flow is not valid
"""
from_state_id = data_flow.from_state
to_state_id = data_flow.to_state
from_data_port_id = data_flow.from_key
to_data_port_id = data_flow.to_key
# Check whether to and from port are existing
from_data_port = self.get_data_port(from_state_id, from_data_port_id)
if not from_data_port:
return False, "Data flow origin not existing -> {0}".format(data_flow)
to_data_port = self.get_data_port(to_state_id, to_data_port_id)
if not to_data_port:
return False, "Data flow target not existing -> {0}".format(data_flow)
# Data_ports without parents are not allowed to be connected twice
if not from_data_port.parent:
return False, "Source data port does not have a parent -> {0}".format(data_flow)
if not to_data_port.parent:
return False, "Target data port does not have a parent -> {0}".format(data_flow)
# Check if data ports are identical
if from_data_port is to_data_port:
return False, "Source and target data ports of data flow must not be identical -> {}".format(data_flow)
# Check, whether the origin of the data flow is valid
if from_state_id == self.state_id: # data_flow originates in container state
if from_data_port_id not in self.input_data_ports and from_data_port_id not in self.scoped_variables:
return False, "Data flow origin port must be an input port or scoped variable, when the data flow " \
"starts in the parent state -> {0}".format(data_flow)
else: # data flow originates in child state
if from_data_port_id not in from_data_port.parent.output_data_ports:
return False, "Data flow origin port must be an output port, when the data flow " \
"starts in the child state -> {0}".format(data_flow)
# Check, whether the target of a data flow is valid
if to_state_id == self.state_id: # data_flow ends in container state
if to_data_port_id not in self.output_data_ports and to_data_port_id not in self.scoped_variables:
return False, "Data flow target port must be an output port or scoped variable, when the data flow " \
"goes to the parent state -> {0}".format(data_flow)
else: # data_flow ends in child state
if to_data_port_id not in to_data_port.parent.input_data_ports:
return False, "Data flow target port must be an input port, when the data flow goes to a child state" \
" -> {0}".format(data_flow)
# Check if data flow connects two scoped variables
if isinstance(from_data_port, ScopedVariable) and isinstance(to_data_port, ScopedVariable):
return False, "Data flows must not connect two scoped variables -> {}".format(data_flow)
# Check, whether the target port is already connected
for existing_data_flow in self.data_flows.values():
to_data_port_existing = self.get_data_port(existing_data_flow.to_state, existing_data_flow.to_key)
from_data_port_existing = self.get_data_port(existing_data_flow.from_state, existing_data_flow.from_key)
if to_data_port is to_data_port_existing and data_flow is not existing_data_flow:
if from_data_port is from_data_port_existing:
return False, "Exactly the same data flow is already existing -> {0}".format(data_flow)
return True, "valid" | python | def _check_data_flow_ports(self, data_flow):
"""Checks the validity of the ports of a data flow
Checks whether the ports of a data flow are existing and whether it is allowed to connect these ports.
:param rafcon.core.data_flow.DataFlow data_flow: The data flow to be checked
:return bool validity, str message: validity is True, when the data flow is valid, False else. message gives
more information especially if the data flow is not valid
"""
from_state_id = data_flow.from_state
to_state_id = data_flow.to_state
from_data_port_id = data_flow.from_key
to_data_port_id = data_flow.to_key
# Check whether to and from port are existing
from_data_port = self.get_data_port(from_state_id, from_data_port_id)
if not from_data_port:
return False, "Data flow origin not existing -> {0}".format(data_flow)
to_data_port = self.get_data_port(to_state_id, to_data_port_id)
if not to_data_port:
return False, "Data flow target not existing -> {0}".format(data_flow)
# Data_ports without parents are not allowed to be connected twice
if not from_data_port.parent:
return False, "Source data port does not have a parent -> {0}".format(data_flow)
if not to_data_port.parent:
return False, "Target data port does not have a parent -> {0}".format(data_flow)
# Check if data ports are identical
if from_data_port is to_data_port:
return False, "Source and target data ports of data flow must not be identical -> {}".format(data_flow)
# Check, whether the origin of the data flow is valid
if from_state_id == self.state_id: # data_flow originates in container state
if from_data_port_id not in self.input_data_ports and from_data_port_id not in self.scoped_variables:
return False, "Data flow origin port must be an input port or scoped variable, when the data flow " \
"starts in the parent state -> {0}".format(data_flow)
else: # data flow originates in child state
if from_data_port_id not in from_data_port.parent.output_data_ports:
return False, "Data flow origin port must be an output port, when the data flow " \
"starts in the child state -> {0}".format(data_flow)
# Check, whether the target of a data flow is valid
if to_state_id == self.state_id: # data_flow ends in container state
if to_data_port_id not in self.output_data_ports and to_data_port_id not in self.scoped_variables:
return False, "Data flow target port must be an output port or scoped variable, when the data flow " \
"goes to the parent state -> {0}".format(data_flow)
else: # data_flow ends in child state
if to_data_port_id not in to_data_port.parent.input_data_ports:
return False, "Data flow target port must be an input port, when the data flow goes to a child state" \
" -> {0}".format(data_flow)
# Check if data flow connects two scoped variables
if isinstance(from_data_port, ScopedVariable) and isinstance(to_data_port, ScopedVariable):
return False, "Data flows must not connect two scoped variables -> {}".format(data_flow)
# Check, whether the target port is already connected
for existing_data_flow in self.data_flows.values():
to_data_port_existing = self.get_data_port(existing_data_flow.to_state, existing_data_flow.to_key)
from_data_port_existing = self.get_data_port(existing_data_flow.from_state, existing_data_flow.from_key)
if to_data_port is to_data_port_existing and data_flow is not existing_data_flow:
if from_data_port is from_data_port_existing:
return False, "Exactly the same data flow is already existing -> {0}".format(data_flow)
return True, "valid" | ['def', '_check_data_flow_ports', '(', 'self', ',', 'data_flow', ')', ':', 'from_state_id', '=', 'data_flow', '.', 'from_state', 'to_state_id', '=', 'data_flow', '.', 'to_state', 'from_data_port_id', '=', 'data_flow', '.', 'from_key', 'to_data_port_id', '=', 'data_flow', '.', 'to_key', '# Check whether to and from port are existing', 'from_data_port', '=', 'self', '.', 'get_data_port', '(', 'from_state_id', ',', 'from_data_port_id', ')', 'if', 'not', 'from_data_port', ':', 'return', 'False', ',', '"Data flow origin not existing -> {0}"', '.', 'format', '(', 'data_flow', ')', 'to_data_port', '=', 'self', '.', 'get_data_port', '(', 'to_state_id', ',', 'to_data_port_id', ')', 'if', 'not', 'to_data_port', ':', 'return', 'False', ',', '"Data flow target not existing -> {0}"', '.', 'format', '(', 'data_flow', ')', '# Data_ports without parents are not allowed to be connected twice', 'if', 'not', 'from_data_port', '.', 'parent', ':', 'return', 'False', ',', '"Source data port does not have a parent -> {0}"', '.', 'format', '(', 'data_flow', ')', 'if', 'not', 'to_data_port', '.', 'parent', ':', 'return', 'False', ',', '"Target data port does not have a parent -> {0}"', '.', 'format', '(', 'data_flow', ')', '# Check if data ports are identical', 'if', 'from_data_port', 'is', 'to_data_port', ':', 'return', 'False', ',', '"Source and target data ports of data flow must not be identical -> {}"', '.', 'format', '(', 'data_flow', ')', '# Check, whether the origin of the data flow is valid', 'if', 'from_state_id', '==', 'self', '.', 'state_id', ':', '# data_flow originates in container state', 'if', 'from_data_port_id', 'not', 'in', 'self', '.', 'input_data_ports', 'and', 'from_data_port_id', 'not', 'in', 'self', '.', 'scoped_variables', ':', 'return', 'False', ',', '"Data flow origin port must be an input port or scoped variable, when the data flow "', '"starts in the parent state -> {0}"', '.', 'format', '(', 'data_flow', ')', 'else', ':', '# data flow originates in child state', 'if', 'from_data_port_id', 'not', 'in', 'from_data_port', '.', 'parent', '.', 'output_data_ports', ':', 'return', 'False', ',', '"Data flow origin port must be an output port, when the data flow "', '"starts in the child state -> {0}"', '.', 'format', '(', 'data_flow', ')', '# Check, whether the target of a data flow is valid', 'if', 'to_state_id', '==', 'self', '.', 'state_id', ':', '# data_flow ends in container state', 'if', 'to_data_port_id', 'not', 'in', 'self', '.', 'output_data_ports', 'and', 'to_data_port_id', 'not', 'in', 'self', '.', 'scoped_variables', ':', 'return', 'False', ',', '"Data flow target port must be an output port or scoped variable, when the data flow "', '"goes to the parent state -> {0}"', '.', 'format', '(', 'data_flow', ')', 'else', ':', '# data_flow ends in child state', 'if', 'to_data_port_id', 'not', 'in', 'to_data_port', '.', 'parent', '.', 'input_data_ports', ':', 'return', 'False', ',', '"Data flow target port must be an input port, when the data flow goes to a child state"', '" -> {0}"', '.', 'format', '(', 'data_flow', ')', '# Check if data flow connects two scoped variables', 'if', 'isinstance', '(', 'from_data_port', ',', 'ScopedVariable', ')', 'and', 'isinstance', '(', 'to_data_port', ',', 'ScopedVariable', ')', ':', 'return', 'False', ',', '"Data flows must not connect two scoped variables -> {}"', '.', 'format', '(', 'data_flow', ')', '# Check, whether the target port is already connected', 'for', 'existing_data_flow', 'in', 'self', '.', 'data_flows', '.', 'values', '(', ')', ':', 'to_data_port_existing', '=', 'self', '.', 'get_data_port', '(', 'existing_data_flow', '.', 'to_state', ',', 'existing_data_flow', '.', 'to_key', ')', 'from_data_port_existing', '=', 'self', '.', 'get_data_port', '(', 'existing_data_flow', '.', 'from_state', ',', 'existing_data_flow', '.', 'from_key', ')', 'if', 'to_data_port', 'is', 'to_data_port_existing', 'and', 'data_flow', 'is', 'not', 'existing_data_flow', ':', 'if', 'from_data_port', 'is', 'from_data_port_existing', ':', 'return', 'False', ',', '"Exactly the same data flow is already existing -> {0}"', '.', 'format', '(', 'data_flow', ')', 'return', 'True', ',', '"valid"'] | Checks the validity of the ports of a data flow
Checks whether the ports of a data flow are existing and whether it is allowed to connect these ports.
:param rafcon.core.data_flow.DataFlow data_flow: The data flow to be checked
:return bool validity, str message: validity is True, when the data flow is valid, False else. message gives
more information especially if the data flow is not valid | ['Checks', 'the', 'validity', 'of', 'the', 'ports', 'of', 'a', 'data', 'flow'] | train | https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/core/states/container_state.py#L1824-L1888 |
2,780 | tensorflow/tensor2tensor | tensor2tensor/models/image_transformer.py | imagetransformer_base_8l_8h_big_cond_dr03_dan | def imagetransformer_base_8l_8h_big_cond_dr03_dan():
"""big 1d model for conditional image generation.2.99 on cifar10."""
hparams = imagetransformer_sep_channels_8l()
hparams.block_width = 256
hparams.block_length = 256
hparams.hidden_size = 512
hparams.num_heads = 8
hparams.filter_size = 2048
hparams.batch_size = 4
hparams.max_length = 3075
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.num_decoder_layers = 8
hparams.layer_prepostprocess_dropout = 0.3
return hparams | python | def imagetransformer_base_8l_8h_big_cond_dr03_dan():
"""big 1d model for conditional image generation.2.99 on cifar10."""
hparams = imagetransformer_sep_channels_8l()
hparams.block_width = 256
hparams.block_length = 256
hparams.hidden_size = 512
hparams.num_heads = 8
hparams.filter_size = 2048
hparams.batch_size = 4
hparams.max_length = 3075
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.num_decoder_layers = 8
hparams.layer_prepostprocess_dropout = 0.3
return hparams | ['def', 'imagetransformer_base_8l_8h_big_cond_dr03_dan', '(', ')', ':', 'hparams', '=', 'imagetransformer_sep_channels_8l', '(', ')', 'hparams', '.', 'block_width', '=', '256', 'hparams', '.', 'block_length', '=', '256', 'hparams', '.', 'hidden_size', '=', '512', 'hparams', '.', 'num_heads', '=', '8', 'hparams', '.', 'filter_size', '=', '2048', 'hparams', '.', 'batch_size', '=', '4', 'hparams', '.', 'max_length', '=', '3075', 'hparams', '.', 'layer_preprocess_sequence', '=', '"none"', 'hparams', '.', 'layer_postprocess_sequence', '=', '"dan"', 'hparams', '.', 'num_decoder_layers', '=', '8', 'hparams', '.', 'layer_prepostprocess_dropout', '=', '0.3', 'return', 'hparams'] | big 1d model for conditional image generation.2.99 on cifar10. | ['big', '1d', 'model', 'for', 'conditional', 'image', 'generation', '.', '2', '.', '99', 'on', 'cifar10', '.'] | train | https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/image_transformer.py#L386-L400 |
2,781 | deepmind/sonnet | sonnet/python/custom_getters/override_args.py | override_args | def override_args(**kwargs):
"""Creates a custom getter that applies specified named arguments.
Args:
**kwargs: Overriding arguments for the custom getter to use in preference
the named arguments it's called with.
Returns:
Custom getter.
"""
override_kwargs = kwargs
def custom_getter(getter, *args, **kwargs):
"""Custom getter with certain named arguments overridden.
Args:
getter: Underlying variable getter to invoke.
*args: Arguments, compatible with those of tf.get_variable.
**kwargs: Keyword arguments, compatible with those of tf.get_variable.
Returns:
The result of invoking `getter(*args, **kwargs)` except that certain
kwargs entries may have been overridden.
"""
kwargs.update(override_kwargs)
return getter(*args, **kwargs)
return custom_getter | python | def override_args(**kwargs):
"""Creates a custom getter that applies specified named arguments.
Args:
**kwargs: Overriding arguments for the custom getter to use in preference
the named arguments it's called with.
Returns:
Custom getter.
"""
override_kwargs = kwargs
def custom_getter(getter, *args, **kwargs):
"""Custom getter with certain named arguments overridden.
Args:
getter: Underlying variable getter to invoke.
*args: Arguments, compatible with those of tf.get_variable.
**kwargs: Keyword arguments, compatible with those of tf.get_variable.
Returns:
The result of invoking `getter(*args, **kwargs)` except that certain
kwargs entries may have been overridden.
"""
kwargs.update(override_kwargs)
return getter(*args, **kwargs)
return custom_getter | ['def', 'override_args', '(', '*', '*', 'kwargs', ')', ':', 'override_kwargs', '=', 'kwargs', 'def', 'custom_getter', '(', 'getter', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', '"""Custom getter with certain named arguments overridden.\n\n Args:\n getter: Underlying variable getter to invoke.\n *args: Arguments, compatible with those of tf.get_variable.\n **kwargs: Keyword arguments, compatible with those of tf.get_variable.\n\n Returns:\n The result of invoking `getter(*args, **kwargs)` except that certain\n kwargs entries may have been overridden.\n """', 'kwargs', '.', 'update', '(', 'override_kwargs', ')', 'return', 'getter', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', 'return', 'custom_getter'] | Creates a custom getter that applies specified named arguments.
Args:
**kwargs: Overriding arguments for the custom getter to use in preference
the named arguments it's called with.
Returns:
Custom getter. | ['Creates', 'a', 'custom', 'getter', 'that', 'applies', 'specified', 'named', 'arguments', '.'] | train | https://github.com/deepmind/sonnet/blob/00612ca3178964d86b556e062694d808ff81fcca/sonnet/python/custom_getters/override_args.py#L24-L52 |
2,782 | Diaoul/subliminal | subliminal/refiners/omdb.py | refine | def refine(video, **kwargs):
"""Refine a video by searching `OMDb API <http://omdbapi.com/>`_.
Several :class:`~subliminal.video.Episode` attributes can be found:
* :attr:`~subliminal.video.Episode.series`
* :attr:`~subliminal.video.Episode.year`
* :attr:`~subliminal.video.Episode.series_imdb_id`
Similarly, for a :class:`~subliminal.video.Movie`:
* :attr:`~subliminal.video.Movie.title`
* :attr:`~subliminal.video.Movie.year`
* :attr:`~subliminal.video.Video.imdb_id`
"""
if isinstance(video, Episode):
# exit if the information is complete
if video.series_imdb_id:
logger.debug('No need to search')
return
# search the series
results = search(video.series, 'series', video.year)
if not results:
logger.warning('No results for series')
return
logger.debug('Found %d results', len(results))
# filter the results
results = [r for r in results if sanitize(r['Title']) == sanitize(video.series)]
if not results:
logger.warning('No matching series found')
return
# process the results
found = False
for result in sorted(results, key=operator.itemgetter('Year')):
if video.original_series and video.year is None:
logger.debug('Found result for original series without year')
found = True
break
if video.year == int(result['Year'].split(u'\u2013')[0]):
logger.debug('Found result with matching year')
found = True
break
if not found:
logger.warning('No matching series found')
return
# add series information
logger.debug('Found series %r', result)
video.series = result['Title']
video.year = int(result['Year'].split(u'\u2013')[0])
video.series_imdb_id = result['imdbID']
elif isinstance(video, Movie):
# exit if the information is complete
if video.imdb_id:
return
# search the movie
results = search(video.title, 'movie', video.year)
if not results:
logger.warning('No results')
return
logger.debug('Found %d results', len(results))
# filter the results
results = [r for r in results if sanitize(r['Title']) == sanitize(video.title)]
if not results:
logger.warning('No matching movie found')
return
# process the results
found = False
for result in results:
if video.year is None:
logger.debug('Found result for movie without year')
found = True
break
if video.year == int(result['Year']):
logger.debug('Found result with matching year')
found = True
break
if not found:
logger.warning('No matching movie found')
return
# add movie information
logger.debug('Found movie %r', result)
video.title = result['Title']
video.year = int(result['Year'].split(u'\u2013')[0])
video.imdb_id = result['imdbID'] | python | def refine(video, **kwargs):
"""Refine a video by searching `OMDb API <http://omdbapi.com/>`_.
Several :class:`~subliminal.video.Episode` attributes can be found:
* :attr:`~subliminal.video.Episode.series`
* :attr:`~subliminal.video.Episode.year`
* :attr:`~subliminal.video.Episode.series_imdb_id`
Similarly, for a :class:`~subliminal.video.Movie`:
* :attr:`~subliminal.video.Movie.title`
* :attr:`~subliminal.video.Movie.year`
* :attr:`~subliminal.video.Video.imdb_id`
"""
if isinstance(video, Episode):
# exit if the information is complete
if video.series_imdb_id:
logger.debug('No need to search')
return
# search the series
results = search(video.series, 'series', video.year)
if not results:
logger.warning('No results for series')
return
logger.debug('Found %d results', len(results))
# filter the results
results = [r for r in results if sanitize(r['Title']) == sanitize(video.series)]
if not results:
logger.warning('No matching series found')
return
# process the results
found = False
for result in sorted(results, key=operator.itemgetter('Year')):
if video.original_series and video.year is None:
logger.debug('Found result for original series without year')
found = True
break
if video.year == int(result['Year'].split(u'\u2013')[0]):
logger.debug('Found result with matching year')
found = True
break
if not found:
logger.warning('No matching series found')
return
# add series information
logger.debug('Found series %r', result)
video.series = result['Title']
video.year = int(result['Year'].split(u'\u2013')[0])
video.series_imdb_id = result['imdbID']
elif isinstance(video, Movie):
# exit if the information is complete
if video.imdb_id:
return
# search the movie
results = search(video.title, 'movie', video.year)
if not results:
logger.warning('No results')
return
logger.debug('Found %d results', len(results))
# filter the results
results = [r for r in results if sanitize(r['Title']) == sanitize(video.title)]
if not results:
logger.warning('No matching movie found')
return
# process the results
found = False
for result in results:
if video.year is None:
logger.debug('Found result for movie without year')
found = True
break
if video.year == int(result['Year']):
logger.debug('Found result with matching year')
found = True
break
if not found:
logger.warning('No matching movie found')
return
# add movie information
logger.debug('Found movie %r', result)
video.title = result['Title']
video.year = int(result['Year'].split(u'\u2013')[0])
video.imdb_id = result['imdbID'] | ['def', 'refine', '(', 'video', ',', '*', '*', 'kwargs', ')', ':', 'if', 'isinstance', '(', 'video', ',', 'Episode', ')', ':', '# exit if the information is complete', 'if', 'video', '.', 'series_imdb_id', ':', 'logger', '.', 'debug', '(', "'No need to search'", ')', 'return', '# search the series', 'results', '=', 'search', '(', 'video', '.', 'series', ',', "'series'", ',', 'video', '.', 'year', ')', 'if', 'not', 'results', ':', 'logger', '.', 'warning', '(', "'No results for series'", ')', 'return', 'logger', '.', 'debug', '(', "'Found %d results'", ',', 'len', '(', 'results', ')', ')', '# filter the results', 'results', '=', '[', 'r', 'for', 'r', 'in', 'results', 'if', 'sanitize', '(', 'r', '[', "'Title'", ']', ')', '==', 'sanitize', '(', 'video', '.', 'series', ')', ']', 'if', 'not', 'results', ':', 'logger', '.', 'warning', '(', "'No matching series found'", ')', 'return', '# process the results', 'found', '=', 'False', 'for', 'result', 'in', 'sorted', '(', 'results', ',', 'key', '=', 'operator', '.', 'itemgetter', '(', "'Year'", ')', ')', ':', 'if', 'video', '.', 'original_series', 'and', 'video', '.', 'year', 'is', 'None', ':', 'logger', '.', 'debug', '(', "'Found result for original series without year'", ')', 'found', '=', 'True', 'break', 'if', 'video', '.', 'year', '==', 'int', '(', 'result', '[', "'Year'", ']', '.', 'split', '(', "u'\\u2013'", ')', '[', '0', ']', ')', ':', 'logger', '.', 'debug', '(', "'Found result with matching year'", ')', 'found', '=', 'True', 'break', 'if', 'not', 'found', ':', 'logger', '.', 'warning', '(', "'No matching series found'", ')', 'return', '# add series information', 'logger', '.', 'debug', '(', "'Found series %r'", ',', 'result', ')', 'video', '.', 'series', '=', 'result', '[', "'Title'", ']', 'video', '.', 'year', '=', 'int', '(', 'result', '[', "'Year'", ']', '.', 'split', '(', "u'\\u2013'", ')', '[', '0', ']', ')', 'video', '.', 'series_imdb_id', '=', 'result', '[', "'imdbID'", ']', 'elif', 'isinstance', '(', 'video', ',', 'Movie', ')', ':', '# exit if the information is complete', 'if', 'video', '.', 'imdb_id', ':', 'return', '# search the movie', 'results', '=', 'search', '(', 'video', '.', 'title', ',', "'movie'", ',', 'video', '.', 'year', ')', 'if', 'not', 'results', ':', 'logger', '.', 'warning', '(', "'No results'", ')', 'return', 'logger', '.', 'debug', '(', "'Found %d results'", ',', 'len', '(', 'results', ')', ')', '# filter the results', 'results', '=', '[', 'r', 'for', 'r', 'in', 'results', 'if', 'sanitize', '(', 'r', '[', "'Title'", ']', ')', '==', 'sanitize', '(', 'video', '.', 'title', ')', ']', 'if', 'not', 'results', ':', 'logger', '.', 'warning', '(', "'No matching movie found'", ')', 'return', '# process the results', 'found', '=', 'False', 'for', 'result', 'in', 'results', ':', 'if', 'video', '.', 'year', 'is', 'None', ':', 'logger', '.', 'debug', '(', "'Found result for movie without year'", ')', 'found', '=', 'True', 'break', 'if', 'video', '.', 'year', '==', 'int', '(', 'result', '[', "'Year'", ']', ')', ':', 'logger', '.', 'debug', '(', "'Found result with matching year'", ')', 'found', '=', 'True', 'break', 'if', 'not', 'found', ':', 'logger', '.', 'warning', '(', "'No matching movie found'", ')', 'return', '# add movie information', 'logger', '.', 'debug', '(', "'Found movie %r'", ',', 'result', ')', 'video', '.', 'title', '=', 'result', '[', "'Title'", ']', 'video', '.', 'year', '=', 'int', '(', 'result', '[', "'Year'", ']', '.', 'split', '(', "u'\\u2013'", ')', '[', '0', ']', ')', 'video', '.', 'imdb_id', '=', 'result', '[', "'imdbID'", ']'] | Refine a video by searching `OMDb API <http://omdbapi.com/>`_.
Several :class:`~subliminal.video.Episode` attributes can be found:
* :attr:`~subliminal.video.Episode.series`
* :attr:`~subliminal.video.Episode.year`
* :attr:`~subliminal.video.Episode.series_imdb_id`
Similarly, for a :class:`~subliminal.video.Movie`:
* :attr:`~subliminal.video.Movie.title`
* :attr:`~subliminal.video.Movie.year`
* :attr:`~subliminal.video.Video.imdb_id` | ['Refine', 'a', 'video', 'by', 'searching', 'OMDb', 'API', '<http', ':', '//', 'omdbapi', '.', 'com', '/', '>', '_', '.'] | train | https://github.com/Diaoul/subliminal/blob/a952dfb2032eb0fd6eb1eb89f04080923c11c4cf/subliminal/refiners/omdb.py#L92-L187 |
2,783 | GNS3/gns3-server | gns3server/compute/builtin/nodes/cloud.py | Cloud._add_linux_ethernet | def _add_linux_ethernet(self, port_info, bridge_name):
"""
Use raw sockets on Linux.
If interface is a bridge we connect a tap to it
"""
interface = port_info["interface"]
if gns3server.utils.interfaces.is_interface_bridge(interface):
network_interfaces = [interface["name"] for interface in self._interfaces()]
i = 0
while True:
tap = "gns3tap{}-{}".format(i, port_info["port_number"])
if tap not in network_interfaces:
break
i += 1
yield from self._ubridge_send('bridge add_nio_tap "{name}" "{interface}"'.format(name=bridge_name, interface=tap))
yield from self._ubridge_send('brctl addif "{interface}" "{tap}"'.format(tap=tap, interface=interface))
else:
yield from self._ubridge_send('bridge add_nio_linux_raw {name} "{interface}"'.format(name=bridge_name, interface=interface)) | python | def _add_linux_ethernet(self, port_info, bridge_name):
"""
Use raw sockets on Linux.
If interface is a bridge we connect a tap to it
"""
interface = port_info["interface"]
if gns3server.utils.interfaces.is_interface_bridge(interface):
network_interfaces = [interface["name"] for interface in self._interfaces()]
i = 0
while True:
tap = "gns3tap{}-{}".format(i, port_info["port_number"])
if tap not in network_interfaces:
break
i += 1
yield from self._ubridge_send('bridge add_nio_tap "{name}" "{interface}"'.format(name=bridge_name, interface=tap))
yield from self._ubridge_send('brctl addif "{interface}" "{tap}"'.format(tap=tap, interface=interface))
else:
yield from self._ubridge_send('bridge add_nio_linux_raw {name} "{interface}"'.format(name=bridge_name, interface=interface)) | ['def', '_add_linux_ethernet', '(', 'self', ',', 'port_info', ',', 'bridge_name', ')', ':', 'interface', '=', 'port_info', '[', '"interface"', ']', 'if', 'gns3server', '.', 'utils', '.', 'interfaces', '.', 'is_interface_bridge', '(', 'interface', ')', ':', 'network_interfaces', '=', '[', 'interface', '[', '"name"', ']', 'for', 'interface', 'in', 'self', '.', '_interfaces', '(', ')', ']', 'i', '=', '0', 'while', 'True', ':', 'tap', '=', '"gns3tap{}-{}"', '.', 'format', '(', 'i', ',', 'port_info', '[', '"port_number"', ']', ')', 'if', 'tap', 'not', 'in', 'network_interfaces', ':', 'break', 'i', '+=', '1', 'yield', 'from', 'self', '.', '_ubridge_send', '(', '\'bridge add_nio_tap "{name}" "{interface}"\'', '.', 'format', '(', 'name', '=', 'bridge_name', ',', 'interface', '=', 'tap', ')', ')', 'yield', 'from', 'self', '.', '_ubridge_send', '(', '\'brctl addif "{interface}" "{tap}"\'', '.', 'format', '(', 'tap', '=', 'tap', ',', 'interface', '=', 'interface', ')', ')', 'else', ':', 'yield', 'from', 'self', '.', '_ubridge_send', '(', '\'bridge add_nio_linux_raw {name} "{interface}"\'', '.', 'format', '(', 'name', '=', 'bridge_name', ',', 'interface', '=', 'interface', ')', ')'] | Use raw sockets on Linux.
If interface is a bridge we connect a tap to it | ['Use', 'raw', 'sockets', 'on', 'Linux', '.'] | train | https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/compute/builtin/nodes/cloud.py#L245-L265 |
2,784 | pydata/xarray | xarray/core/variable.py | Variable._getitem_with_mask | def _getitem_with_mask(self, key, fill_value=dtypes.NA):
"""Index this Variable with -1 remapped to fill_value."""
# TODO(shoyer): expose this method in public API somewhere (isel?) and
# use it for reindex.
# TODO(shoyer): add a sanity check that all other integers are
# non-negative
# TODO(shoyer): add an optimization, remapping -1 to an adjacent value
# that is actually indexed rather than mapping it to the last value
# along each axis.
if fill_value is dtypes.NA:
fill_value = dtypes.get_fill_value(self.dtype)
dims, indexer, new_order = self._broadcast_indexes(key)
if self.size:
if isinstance(self._data, dask_array_type):
# dask's indexing is faster this way; also vindex does not
# support negative indices yet:
# https://github.com/dask/dask/pull/2967
actual_indexer = indexing.posify_mask_indexer(indexer)
else:
actual_indexer = indexer
data = as_indexable(self._data)[actual_indexer]
chunks_hint = getattr(data, 'chunks', None)
mask = indexing.create_mask(indexer, self.shape, chunks_hint)
data = duck_array_ops.where(mask, fill_value, data)
else:
# array cannot be indexed along dimensions of size 0, so just
# build the mask directly instead.
mask = indexing.create_mask(indexer, self.shape)
data = np.broadcast_to(fill_value, getattr(mask, 'shape', ()))
if new_order:
data = np.moveaxis(data, range(len(new_order)), new_order)
return self._finalize_indexing_result(dims, data) | python | def _getitem_with_mask(self, key, fill_value=dtypes.NA):
"""Index this Variable with -1 remapped to fill_value."""
# TODO(shoyer): expose this method in public API somewhere (isel?) and
# use it for reindex.
# TODO(shoyer): add a sanity check that all other integers are
# non-negative
# TODO(shoyer): add an optimization, remapping -1 to an adjacent value
# that is actually indexed rather than mapping it to the last value
# along each axis.
if fill_value is dtypes.NA:
fill_value = dtypes.get_fill_value(self.dtype)
dims, indexer, new_order = self._broadcast_indexes(key)
if self.size:
if isinstance(self._data, dask_array_type):
# dask's indexing is faster this way; also vindex does not
# support negative indices yet:
# https://github.com/dask/dask/pull/2967
actual_indexer = indexing.posify_mask_indexer(indexer)
else:
actual_indexer = indexer
data = as_indexable(self._data)[actual_indexer]
chunks_hint = getattr(data, 'chunks', None)
mask = indexing.create_mask(indexer, self.shape, chunks_hint)
data = duck_array_ops.where(mask, fill_value, data)
else:
# array cannot be indexed along dimensions of size 0, so just
# build the mask directly instead.
mask = indexing.create_mask(indexer, self.shape)
data = np.broadcast_to(fill_value, getattr(mask, 'shape', ()))
if new_order:
data = np.moveaxis(data, range(len(new_order)), new_order)
return self._finalize_indexing_result(dims, data) | ['def', '_getitem_with_mask', '(', 'self', ',', 'key', ',', 'fill_value', '=', 'dtypes', '.', 'NA', ')', ':', '# TODO(shoyer): expose this method in public API somewhere (isel?) and', '# use it for reindex.', '# TODO(shoyer): add a sanity check that all other integers are', '# non-negative', '# TODO(shoyer): add an optimization, remapping -1 to an adjacent value', '# that is actually indexed rather than mapping it to the last value', '# along each axis.', 'if', 'fill_value', 'is', 'dtypes', '.', 'NA', ':', 'fill_value', '=', 'dtypes', '.', 'get_fill_value', '(', 'self', '.', 'dtype', ')', 'dims', ',', 'indexer', ',', 'new_order', '=', 'self', '.', '_broadcast_indexes', '(', 'key', ')', 'if', 'self', '.', 'size', ':', 'if', 'isinstance', '(', 'self', '.', '_data', ',', 'dask_array_type', ')', ':', "# dask's indexing is faster this way; also vindex does not", '# support negative indices yet:', '# https://github.com/dask/dask/pull/2967', 'actual_indexer', '=', 'indexing', '.', 'posify_mask_indexer', '(', 'indexer', ')', 'else', ':', 'actual_indexer', '=', 'indexer', 'data', '=', 'as_indexable', '(', 'self', '.', '_data', ')', '[', 'actual_indexer', ']', 'chunks_hint', '=', 'getattr', '(', 'data', ',', "'chunks'", ',', 'None', ')', 'mask', '=', 'indexing', '.', 'create_mask', '(', 'indexer', ',', 'self', '.', 'shape', ',', 'chunks_hint', ')', 'data', '=', 'duck_array_ops', '.', 'where', '(', 'mask', ',', 'fill_value', ',', 'data', ')', 'else', ':', '# array cannot be indexed along dimensions of size 0, so just', '# build the mask directly instead.', 'mask', '=', 'indexing', '.', 'create_mask', '(', 'indexer', ',', 'self', '.', 'shape', ')', 'data', '=', 'np', '.', 'broadcast_to', '(', 'fill_value', ',', 'getattr', '(', 'mask', ',', "'shape'", ',', '(', ')', ')', ')', 'if', 'new_order', ':', 'data', '=', 'np', '.', 'moveaxis', '(', 'data', ',', 'range', '(', 'len', '(', 'new_order', ')', ')', ',', 'new_order', ')', 'return', 'self', '.', '_finalize_indexing_result', '(', 'dims', ',', 'data', ')'] | Index this Variable with -1 remapped to fill_value. | ['Index', 'this', 'Variable', 'with', '-', '1', 'remapped', 'to', 'fill_value', '.'] | train | https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/core/variable.py#L647-L683 |
2,785 | KelSolaar/Umbra | umbra/managers/notifications_manager.py | NotificationsManager.register_notification | def register_notification(self, notification):
"""
Registers given notification.
:param notification: Notification to register.
:type notification: Notification
:return: Method success.
:rtype: bool
"""
LOGGER.debug("> Registering notification: '{0}'.".format(notification))
self.__notifications.append(notification)
self.notification_registered.emit(notification)
return True | python | def register_notification(self, notification):
"""
Registers given notification.
:param notification: Notification to register.
:type notification: Notification
:return: Method success.
:rtype: bool
"""
LOGGER.debug("> Registering notification: '{0}'.".format(notification))
self.__notifications.append(notification)
self.notification_registered.emit(notification)
return True | ['def', 'register_notification', '(', 'self', ',', 'notification', ')', ':', 'LOGGER', '.', 'debug', '(', '"> Registering notification: \'{0}\'."', '.', 'format', '(', 'notification', ')', ')', 'self', '.', '__notifications', '.', 'append', '(', 'notification', ')', 'self', '.', 'notification_registered', '.', 'emit', '(', 'notification', ')', 'return', 'True'] | Registers given notification.
:param notification: Notification to register.
:type notification: Notification
:return: Method success.
:rtype: bool | ['Registers', 'given', 'notification', '.'] | train | https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/managers/notifications_manager.py#L332-L346 |
2,786 | google/grr | grr/core/grr_response_core/lib/utils.py | StreamingZipGenerator.WriteFileHeader | def WriteFileHeader(self, arcname=None, compress_type=None, st=None):
"""Writes a file header."""
if not self._stream:
raise ArchiveAlreadyClosedError(
"Attempting to write to a ZIP archive that was already closed.")
self.cur_zinfo = self._GenerateZipInfo(
arcname=arcname, compress_type=compress_type, st=st)
self.cur_file_size = 0
self.cur_compress_size = 0
if self.cur_zinfo.compress_type == zipfile.ZIP_DEFLATED:
self.cur_cmpr = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION,
zlib.DEFLATED, -15)
else:
self.cur_cmpr = None
self.cur_crc = 0
if not self._stream:
raise ArchiveAlreadyClosedError(
"Attempting to write to a ZIP archive that was already closed.")
self.cur_zinfo.header_offset = self._stream.tell()
# Call _writeCheck(self.cur_zinfo) to do sanity checking on zinfo structure
# that we've constructed.
self._zip_fd._writecheck(self.cur_zinfo) # pylint: disable=protected-access
# Mark ZipFile as dirty. We have to keep self._zip_fd's internal state
# coherent so that it behaves correctly when close() is called.
self._zip_fd._didModify = True # pylint: disable=protected-access
# Write FileHeader now. It's incomplete, but CRC and uncompressed/compressed
# sized will be written later in data descriptor.
self._stream.write(self.cur_zinfo.FileHeader())
return self._stream.GetValueAndReset() | python | def WriteFileHeader(self, arcname=None, compress_type=None, st=None):
"""Writes a file header."""
if not self._stream:
raise ArchiveAlreadyClosedError(
"Attempting to write to a ZIP archive that was already closed.")
self.cur_zinfo = self._GenerateZipInfo(
arcname=arcname, compress_type=compress_type, st=st)
self.cur_file_size = 0
self.cur_compress_size = 0
if self.cur_zinfo.compress_type == zipfile.ZIP_DEFLATED:
self.cur_cmpr = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION,
zlib.DEFLATED, -15)
else:
self.cur_cmpr = None
self.cur_crc = 0
if not self._stream:
raise ArchiveAlreadyClosedError(
"Attempting to write to a ZIP archive that was already closed.")
self.cur_zinfo.header_offset = self._stream.tell()
# Call _writeCheck(self.cur_zinfo) to do sanity checking on zinfo structure
# that we've constructed.
self._zip_fd._writecheck(self.cur_zinfo) # pylint: disable=protected-access
# Mark ZipFile as dirty. We have to keep self._zip_fd's internal state
# coherent so that it behaves correctly when close() is called.
self._zip_fd._didModify = True # pylint: disable=protected-access
# Write FileHeader now. It's incomplete, but CRC and uncompressed/compressed
# sized will be written later in data descriptor.
self._stream.write(self.cur_zinfo.FileHeader())
return self._stream.GetValueAndReset() | ['def', 'WriteFileHeader', '(', 'self', ',', 'arcname', '=', 'None', ',', 'compress_type', '=', 'None', ',', 'st', '=', 'None', ')', ':', 'if', 'not', 'self', '.', '_stream', ':', 'raise', 'ArchiveAlreadyClosedError', '(', '"Attempting to write to a ZIP archive that was already closed."', ')', 'self', '.', 'cur_zinfo', '=', 'self', '.', '_GenerateZipInfo', '(', 'arcname', '=', 'arcname', ',', 'compress_type', '=', 'compress_type', ',', 'st', '=', 'st', ')', 'self', '.', 'cur_file_size', '=', '0', 'self', '.', 'cur_compress_size', '=', '0', 'if', 'self', '.', 'cur_zinfo', '.', 'compress_type', '==', 'zipfile', '.', 'ZIP_DEFLATED', ':', 'self', '.', 'cur_cmpr', '=', 'zlib', '.', 'compressobj', '(', 'zlib', '.', 'Z_DEFAULT_COMPRESSION', ',', 'zlib', '.', 'DEFLATED', ',', '-', '15', ')', 'else', ':', 'self', '.', 'cur_cmpr', '=', 'None', 'self', '.', 'cur_crc', '=', '0', 'if', 'not', 'self', '.', '_stream', ':', 'raise', 'ArchiveAlreadyClosedError', '(', '"Attempting to write to a ZIP archive that was already closed."', ')', 'self', '.', 'cur_zinfo', '.', 'header_offset', '=', 'self', '.', '_stream', '.', 'tell', '(', ')', '# Call _writeCheck(self.cur_zinfo) to do sanity checking on zinfo structure', "# that we've constructed.", 'self', '.', '_zip_fd', '.', '_writecheck', '(', 'self', '.', 'cur_zinfo', ')', '# pylint: disable=protected-access', "# Mark ZipFile as dirty. We have to keep self._zip_fd's internal state", '# coherent so that it behaves correctly when close() is called.', 'self', '.', '_zip_fd', '.', '_didModify', '=', 'True', '# pylint: disable=protected-access', "# Write FileHeader now. It's incomplete, but CRC and uncompressed/compressed", '# sized will be written later in data descriptor.', 'self', '.', '_stream', '.', 'write', '(', 'self', '.', 'cur_zinfo', '.', 'FileHeader', '(', ')', ')', 'return', 'self', '.', '_stream', '.', 'GetValueAndReset', '(', ')'] | Writes a file header. | ['Writes', 'a', 'file', 'header', '.'] | train | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/core/grr_response_core/lib/utils.py#L914-L950 |
2,787 | gpoulter/python-ngram | ngram.py | NGram.searchitem | def searchitem(self, item, threshold=None):
"""Search the index for items whose key exceeds the threshold
similarity to the key of the given item.
:return: list of pairs of (item, similarity) by decreasing similarity.
>>> from ngram import NGram
>>> n = NGram([(0, "SPAM"), (1, "SPAN"), (2, "EG"),
... (3, "SPANN")], key=lambda x:x[1])
>>> sorted(n.searchitem((2, "SPA"), 0.35))
[((0, 'SPAM'), 0.375), ((1, 'SPAN'), 0.375)]
"""
return self.search(self.key(item), threshold) | python | def searchitem(self, item, threshold=None):
"""Search the index for items whose key exceeds the threshold
similarity to the key of the given item.
:return: list of pairs of (item, similarity) by decreasing similarity.
>>> from ngram import NGram
>>> n = NGram([(0, "SPAM"), (1, "SPAN"), (2, "EG"),
... (3, "SPANN")], key=lambda x:x[1])
>>> sorted(n.searchitem((2, "SPA"), 0.35))
[((0, 'SPAM'), 0.375), ((1, 'SPAN'), 0.375)]
"""
return self.search(self.key(item), threshold) | ['def', 'searchitem', '(', 'self', ',', 'item', ',', 'threshold', '=', 'None', ')', ':', 'return', 'self', '.', 'search', '(', 'self', '.', 'key', '(', 'item', ')', ',', 'threshold', ')'] | Search the index for items whose key exceeds the threshold
similarity to the key of the given item.
:return: list of pairs of (item, similarity) by decreasing similarity.
>>> from ngram import NGram
>>> n = NGram([(0, "SPAM"), (1, "SPAN"), (2, "EG"),
... (3, "SPANN")], key=lambda x:x[1])
>>> sorted(n.searchitem((2, "SPA"), 0.35))
[((0, 'SPAM'), 0.375), ((1, 'SPAN'), 0.375)] | ['Search', 'the', 'index', 'for', 'items', 'whose', 'key', 'exceeds', 'the', 'threshold', 'similarity', 'to', 'the', 'key', 'of', 'the', 'given', 'item', '.'] | train | https://github.com/gpoulter/python-ngram/blob/f8543bdc84a4d24ac60a48b36c4034f881664491/ngram.py#L296-L308 |
2,788 | log2timeline/dfvfs | dfvfs/lib/tsk_partition.py | GetTSKVsPartByPathSpec | def GetTSKVsPartByPathSpec(tsk_volume, path_spec):
"""Retrieves the TSK volume system part object from the TSK volume object.
Args:
tsk_volume (pytsk3.Volume_Info): TSK volume information.
path_spec (PathSpec): path specification.
Returns:
tuple: contains:
pytsk3.TSK_VS_PART_INFO: TSK volume system part information or
None on error.
int: partition index or None if not available.
"""
location = getattr(path_spec, 'location', None)
part_index = getattr(path_spec, 'part_index', None)
start_offset = getattr(path_spec, 'start_offset', None)
partition_index = None
if part_index is None:
if location is not None:
if location.startswith('/p'):
try:
partition_index = int(location[2:], 10) - 1
except ValueError:
pass
if partition_index is None or partition_index < 0:
location = None
if location is None and start_offset is None:
return None, None
bytes_per_sector = TSKVolumeGetBytesPerSector(tsk_volume)
current_part_index = 0
current_partition_index = 0
tsk_vs_part = None
# pytsk3 does not handle the Volume_Info iterator correctly therefore
# the explicit cast to list is needed to prevent the iterator terminating
# too soon or looping forever.
tsk_vs_part_list = list(tsk_volume)
number_of_tsk_vs_parts = len(tsk_vs_part_list)
if number_of_tsk_vs_parts > 0:
if (part_index is not None and
(part_index < 0 or part_index >= number_of_tsk_vs_parts)):
return None, None
for tsk_vs_part in tsk_vs_part_list:
if TSKVsPartIsAllocated(tsk_vs_part):
if partition_index is not None:
if partition_index == current_partition_index:
break
current_partition_index += 1
if part_index is not None and part_index == current_part_index:
break
if start_offset is not None:
start_sector = TSKVsPartGetStartSector(tsk_vs_part)
if start_sector is not None:
start_sector *= bytes_per_sector
if start_sector == start_offset:
break
current_part_index += 1
# Note that here we cannot solely rely on testing if tsk_vs_part is set
# since the for loop will exit with tsk_vs_part set.
if tsk_vs_part is None or current_part_index >= number_of_tsk_vs_parts:
return None, None
if not TSKVsPartIsAllocated(tsk_vs_part):
current_partition_index = None
return tsk_vs_part, current_partition_index | python | def GetTSKVsPartByPathSpec(tsk_volume, path_spec):
"""Retrieves the TSK volume system part object from the TSK volume object.
Args:
tsk_volume (pytsk3.Volume_Info): TSK volume information.
path_spec (PathSpec): path specification.
Returns:
tuple: contains:
pytsk3.TSK_VS_PART_INFO: TSK volume system part information or
None on error.
int: partition index or None if not available.
"""
location = getattr(path_spec, 'location', None)
part_index = getattr(path_spec, 'part_index', None)
start_offset = getattr(path_spec, 'start_offset', None)
partition_index = None
if part_index is None:
if location is not None:
if location.startswith('/p'):
try:
partition_index = int(location[2:], 10) - 1
except ValueError:
pass
if partition_index is None or partition_index < 0:
location = None
if location is None and start_offset is None:
return None, None
bytes_per_sector = TSKVolumeGetBytesPerSector(tsk_volume)
current_part_index = 0
current_partition_index = 0
tsk_vs_part = None
# pytsk3 does not handle the Volume_Info iterator correctly therefore
# the explicit cast to list is needed to prevent the iterator terminating
# too soon or looping forever.
tsk_vs_part_list = list(tsk_volume)
number_of_tsk_vs_parts = len(tsk_vs_part_list)
if number_of_tsk_vs_parts > 0:
if (part_index is not None and
(part_index < 0 or part_index >= number_of_tsk_vs_parts)):
return None, None
for tsk_vs_part in tsk_vs_part_list:
if TSKVsPartIsAllocated(tsk_vs_part):
if partition_index is not None:
if partition_index == current_partition_index:
break
current_partition_index += 1
if part_index is not None and part_index == current_part_index:
break
if start_offset is not None:
start_sector = TSKVsPartGetStartSector(tsk_vs_part)
if start_sector is not None:
start_sector *= bytes_per_sector
if start_sector == start_offset:
break
current_part_index += 1
# Note that here we cannot solely rely on testing if tsk_vs_part is set
# since the for loop will exit with tsk_vs_part set.
if tsk_vs_part is None or current_part_index >= number_of_tsk_vs_parts:
return None, None
if not TSKVsPartIsAllocated(tsk_vs_part):
current_partition_index = None
return tsk_vs_part, current_partition_index | ['def', 'GetTSKVsPartByPathSpec', '(', 'tsk_volume', ',', 'path_spec', ')', ':', 'location', '=', 'getattr', '(', 'path_spec', ',', "'location'", ',', 'None', ')', 'part_index', '=', 'getattr', '(', 'path_spec', ',', "'part_index'", ',', 'None', ')', 'start_offset', '=', 'getattr', '(', 'path_spec', ',', "'start_offset'", ',', 'None', ')', 'partition_index', '=', 'None', 'if', 'part_index', 'is', 'None', ':', 'if', 'location', 'is', 'not', 'None', ':', 'if', 'location', '.', 'startswith', '(', "'/p'", ')', ':', 'try', ':', 'partition_index', '=', 'int', '(', 'location', '[', '2', ':', ']', ',', '10', ')', '-', '1', 'except', 'ValueError', ':', 'pass', 'if', 'partition_index', 'is', 'None', 'or', 'partition_index', '<', '0', ':', 'location', '=', 'None', 'if', 'location', 'is', 'None', 'and', 'start_offset', 'is', 'None', ':', 'return', 'None', ',', 'None', 'bytes_per_sector', '=', 'TSKVolumeGetBytesPerSector', '(', 'tsk_volume', ')', 'current_part_index', '=', '0', 'current_partition_index', '=', '0', 'tsk_vs_part', '=', 'None', '# pytsk3 does not handle the Volume_Info iterator correctly therefore', '# the explicit cast to list is needed to prevent the iterator terminating', '# too soon or looping forever.', 'tsk_vs_part_list', '=', 'list', '(', 'tsk_volume', ')', 'number_of_tsk_vs_parts', '=', 'len', '(', 'tsk_vs_part_list', ')', 'if', 'number_of_tsk_vs_parts', '>', '0', ':', 'if', '(', 'part_index', 'is', 'not', 'None', 'and', '(', 'part_index', '<', '0', 'or', 'part_index', '>=', 'number_of_tsk_vs_parts', ')', ')', ':', 'return', 'None', ',', 'None', 'for', 'tsk_vs_part', 'in', 'tsk_vs_part_list', ':', 'if', 'TSKVsPartIsAllocated', '(', 'tsk_vs_part', ')', ':', 'if', 'partition_index', 'is', 'not', 'None', ':', 'if', 'partition_index', '==', 'current_partition_index', ':', 'break', 'current_partition_index', '+=', '1', 'if', 'part_index', 'is', 'not', 'None', 'and', 'part_index', '==', 'current_part_index', ':', 'break', 'if', 'start_offset', 'is', 'not', 'None', ':', 'start_sector', '=', 'TSKVsPartGetStartSector', '(', 'tsk_vs_part', ')', 'if', 'start_sector', 'is', 'not', 'None', ':', 'start_sector', '*=', 'bytes_per_sector', 'if', 'start_sector', '==', 'start_offset', ':', 'break', 'current_part_index', '+=', '1', '# Note that here we cannot solely rely on testing if tsk_vs_part is set', '# since the for loop will exit with tsk_vs_part set.', 'if', 'tsk_vs_part', 'is', 'None', 'or', 'current_part_index', '>=', 'number_of_tsk_vs_parts', ':', 'return', 'None', ',', 'None', 'if', 'not', 'TSKVsPartIsAllocated', '(', 'tsk_vs_part', ')', ':', 'current_partition_index', '=', 'None', 'return', 'tsk_vs_part', ',', 'current_partition_index'] | Retrieves the TSK volume system part object from the TSK volume object.
Args:
tsk_volume (pytsk3.Volume_Info): TSK volume information.
path_spec (PathSpec): path specification.
Returns:
tuple: contains:
pytsk3.TSK_VS_PART_INFO: TSK volume system part information or
None on error.
int: partition index or None if not available. | ['Retrieves', 'the', 'TSK', 'volume', 'system', 'part', 'object', 'from', 'the', 'TSK', 'volume', 'object', '.'] | train | https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/lib/tsk_partition.py#L9-L85 |
2,789 | radujica/baloo | baloo/weld/weld_ops.py | weld_unique | def weld_unique(array, weld_type):
"""Return the unique elements of the array.
Parameters
----------
array : numpy.ndarray or WeldObject
Input array.
weld_type : WeldType
Type of each element in the input array.
Returns
-------
WeldObject
Representation of this computation.
"""
obj_id, weld_obj = create_weld_object(array)
weld_template = """map(
tovec(
result(
for(
map(
{array},
|e|
{{e, 0si}}
),
dictmerger[{type}, i16, +],
|b: dictmerger[{type}, i16, +], i: i64, e: {{{type}, i16}}|
merge(b, e)
)
)
),
|e|
e.$0
)"""
weld_obj.weld_code = weld_template.format(array=obj_id,
type=weld_type)
return weld_obj | python | def weld_unique(array, weld_type):
"""Return the unique elements of the array.
Parameters
----------
array : numpy.ndarray or WeldObject
Input array.
weld_type : WeldType
Type of each element in the input array.
Returns
-------
WeldObject
Representation of this computation.
"""
obj_id, weld_obj = create_weld_object(array)
weld_template = """map(
tovec(
result(
for(
map(
{array},
|e|
{{e, 0si}}
),
dictmerger[{type}, i16, +],
|b: dictmerger[{type}, i16, +], i: i64, e: {{{type}, i16}}|
merge(b, e)
)
)
),
|e|
e.$0
)"""
weld_obj.weld_code = weld_template.format(array=obj_id,
type=weld_type)
return weld_obj | ['def', 'weld_unique', '(', 'array', ',', 'weld_type', ')', ':', 'obj_id', ',', 'weld_obj', '=', 'create_weld_object', '(', 'array', ')', 'weld_template', '=', '"""map(\n tovec(\n result(\n for(\n map(\n {array},\n |e| \n {{e, 0si}}\n ),\n dictmerger[{type}, i16, +],\n |b: dictmerger[{type}, i16, +], i: i64, e: {{{type}, i16}}| \n merge(b, e)\n )\n )\n ),\n |e| \n e.$0\n)"""', 'weld_obj', '.', 'weld_code', '=', 'weld_template', '.', 'format', '(', 'array', '=', 'obj_id', ',', 'type', '=', 'weld_type', ')', 'return', 'weld_obj'] | Return the unique elements of the array.
Parameters
----------
array : numpy.ndarray or WeldObject
Input array.
weld_type : WeldType
Type of each element in the input array.
Returns
-------
WeldObject
Representation of this computation. | ['Return', 'the', 'unique', 'elements', 'of', 'the', 'array', '.'] | train | https://github.com/radujica/baloo/blob/f6e05e35b73a75e8a300754c6bdc575e5f2d53b9/baloo/weld/weld_ops.py#L529-L569 |
2,790 | shtalinberg/django-el-pagination | el_pagination/decorators.py | page_templates | def page_templates(mapping):
"""Like the *page_template* decorator but manage multiple paginations.
You can map multiple templates to *querystring_keys* using the *mapping*
dict, e.g.::
@page_templates({
'page_contents1.html': None,
'page_contents2.html': 'go_to_page',
})
def myview(request):
...
When the value of the dict is None then the default *querystring_key*
(defined in settings) is used. You can use this decorator instead of
chaining multiple *page_template* calls.
"""
def decorator(view):
@wraps(view)
def decorated(request, *args, **kwargs):
# Trust the developer: he wrote ``context.update(extra_context)``
# in his view.
extra_context = kwargs.setdefault('extra_context', {})
querystring_key = request.GET.get(QS_KEY,
request.POST.get(QS_KEY, PAGE_LABEL))
template = _get_template(querystring_key, mapping)
extra_context['page_template'] = template
# Switch the template when the request is Ajax.
if request.is_ajax() and template:
kwargs[TEMPLATE_VARNAME] = template
return view(request, *args, **kwargs)
return decorated
return decorator | python | def page_templates(mapping):
"""Like the *page_template* decorator but manage multiple paginations.
You can map multiple templates to *querystring_keys* using the *mapping*
dict, e.g.::
@page_templates({
'page_contents1.html': None,
'page_contents2.html': 'go_to_page',
})
def myview(request):
...
When the value of the dict is None then the default *querystring_key*
(defined in settings) is used. You can use this decorator instead of
chaining multiple *page_template* calls.
"""
def decorator(view):
@wraps(view)
def decorated(request, *args, **kwargs):
# Trust the developer: he wrote ``context.update(extra_context)``
# in his view.
extra_context = kwargs.setdefault('extra_context', {})
querystring_key = request.GET.get(QS_KEY,
request.POST.get(QS_KEY, PAGE_LABEL))
template = _get_template(querystring_key, mapping)
extra_context['page_template'] = template
# Switch the template when the request is Ajax.
if request.is_ajax() and template:
kwargs[TEMPLATE_VARNAME] = template
return view(request, *args, **kwargs)
return decorated
return decorator | ['def', 'page_templates', '(', 'mapping', ')', ':', 'def', 'decorator', '(', 'view', ')', ':', '@', 'wraps', '(', 'view', ')', 'def', 'decorated', '(', 'request', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', '# Trust the developer: he wrote ``context.update(extra_context)``', '# in his view.', 'extra_context', '=', 'kwargs', '.', 'setdefault', '(', "'extra_context'", ',', '{', '}', ')', 'querystring_key', '=', 'request', '.', 'GET', '.', 'get', '(', 'QS_KEY', ',', 'request', '.', 'POST', '.', 'get', '(', 'QS_KEY', ',', 'PAGE_LABEL', ')', ')', 'template', '=', '_get_template', '(', 'querystring_key', ',', 'mapping', ')', 'extra_context', '[', "'page_template'", ']', '=', 'template', '# Switch the template when the request is Ajax.', 'if', 'request', '.', 'is_ajax', '(', ')', 'and', 'template', ':', 'kwargs', '[', 'TEMPLATE_VARNAME', ']', '=', 'template', 'return', 'view', '(', 'request', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', 'return', 'decorated', 'return', 'decorator'] | Like the *page_template* decorator but manage multiple paginations.
You can map multiple templates to *querystring_keys* using the *mapping*
dict, e.g.::
@page_templates({
'page_contents1.html': None,
'page_contents2.html': 'go_to_page',
})
def myview(request):
...
When the value of the dict is None then the default *querystring_key*
(defined in settings) is used. You can use this decorator instead of
chaining multiple *page_template* calls. | ['Like', 'the', '*', 'page_template', '*', 'decorator', 'but', 'manage', 'multiple', 'paginations', '.'] | train | https://github.com/shtalinberg/django-el-pagination/blob/889ba62b46cb58292d554753a0bfda0b0a6d57da/el_pagination/decorators.py#L58-L91 |
2,791 | DataMedSci/mcpartools | setup.py | get_version | def get_version():
"""
Get project version (using versioneer)
:return: string containing version
"""
setup_versioneer()
clean_cache()
import versioneer
version = versioneer.get_version()
parsed_version = parse_version(version)
if '*@' in str(parsed_version):
import time
version += str(int(time.time()))
return version | python | def get_version():
"""
Get project version (using versioneer)
:return: string containing version
"""
setup_versioneer()
clean_cache()
import versioneer
version = versioneer.get_version()
parsed_version = parse_version(version)
if '*@' in str(parsed_version):
import time
version += str(int(time.time()))
return version | ['def', 'get_version', '(', ')', ':', 'setup_versioneer', '(', ')', 'clean_cache', '(', ')', 'import', 'versioneer', 'version', '=', 'versioneer', '.', 'get_version', '(', ')', 'parsed_version', '=', 'parse_version', '(', 'version', ')', 'if', "'*@'", 'in', 'str', '(', 'parsed_version', ')', ':', 'import', 'time', 'version', '+=', 'str', '(', 'int', '(', 'time', '.', 'time', '(', ')', ')', ')', 'return', 'version'] | Get project version (using versioneer)
:return: string containing version | ['Get', 'project', 'version', '(', 'using', 'versioneer', ')', ':', 'return', ':', 'string', 'containing', 'version'] | train | https://github.com/DataMedSci/mcpartools/blob/84f869094d05bf70f09e8aaeca671ddaa1c56ec4/setup.py#L84-L97 |
2,792 | cmbruns/pyopenvr | src/openvr/__init__.py | IVRApplications.getDefaultApplicationForMimeType | def getDefaultApplicationForMimeType(self, pchMimeType, pchAppKeyBuffer, unAppKeyBufferLen):
"""return the app key that will open this mime type"""
fn = self.function_table.getDefaultApplicationForMimeType
result = fn(pchMimeType, pchAppKeyBuffer, unAppKeyBufferLen)
return result | python | def getDefaultApplicationForMimeType(self, pchMimeType, pchAppKeyBuffer, unAppKeyBufferLen):
"""return the app key that will open this mime type"""
fn = self.function_table.getDefaultApplicationForMimeType
result = fn(pchMimeType, pchAppKeyBuffer, unAppKeyBufferLen)
return result | ['def', 'getDefaultApplicationForMimeType', '(', 'self', ',', 'pchMimeType', ',', 'pchAppKeyBuffer', ',', 'unAppKeyBufferLen', ')', ':', 'fn', '=', 'self', '.', 'function_table', '.', 'getDefaultApplicationForMimeType', 'result', '=', 'fn', '(', 'pchMimeType', ',', 'pchAppKeyBuffer', ',', 'unAppKeyBufferLen', ')', 'return', 'result'] | return the app key that will open this mime type | ['return', 'the', 'app', 'key', 'that', 'will', 'open', 'this', 'mime', 'type'] | train | https://github.com/cmbruns/pyopenvr/blob/68395d26bb3df6ab1f0f059c38d441f962938be6/src/openvr/__init__.py#L3572-L3577 |
2,793 | piotr-rusin/spam-lists | spam_lists/structures.py | Hostname.is_subdomain | def is_subdomain(self, other):
"""Test if the object is a subdomain of the other.
:param other: the object to which we compare this instance
:returns: True if this instance is a subdomain of the other
"""
compared = other.value if hasattr(other, 'value') else other
try:
return self.value.is_subdomain(compared)
except AttributeError:
return False | python | def is_subdomain(self, other):
"""Test if the object is a subdomain of the other.
:param other: the object to which we compare this instance
:returns: True if this instance is a subdomain of the other
"""
compared = other.value if hasattr(other, 'value') else other
try:
return self.value.is_subdomain(compared)
except AttributeError:
return False | ['def', 'is_subdomain', '(', 'self', ',', 'other', ')', ':', 'compared', '=', 'other', '.', 'value', 'if', 'hasattr', '(', 'other', ',', "'value'", ')', 'else', 'other', 'try', ':', 'return', 'self', '.', 'value', '.', 'is_subdomain', '(', 'compared', ')', 'except', 'AttributeError', ':', 'return', 'False'] | Test if the object is a subdomain of the other.
:param other: the object to which we compare this instance
:returns: True if this instance is a subdomain of the other | ['Test', 'if', 'the', 'object', 'is', 'a', 'subdomain', 'of', 'the', 'other', '.'] | train | https://github.com/piotr-rusin/spam-lists/blob/fd616e8761b28f3eaa503fee5e45f7748e8f88f2/spam_lists/structures.py#L84-L94 |
2,794 | pyviz/holoviews | holoviews/util/transform.py | dim.norm | def norm(self, limits=None):
"""Unity-based normalization to scale data into 0-1 range.
(values - min) / (max - min)
Args:
limits: tuple of (min, max) defining the normalization range
"""
kwargs = {}
if limits is not None:
kwargs = {'min': limits[0], 'max': limits[1]}
return dim(self, norm, **kwargs) | python | def norm(self, limits=None):
"""Unity-based normalization to scale data into 0-1 range.
(values - min) / (max - min)
Args:
limits: tuple of (min, max) defining the normalization range
"""
kwargs = {}
if limits is not None:
kwargs = {'min': limits[0], 'max': limits[1]}
return dim(self, norm, **kwargs) | ['def', 'norm', '(', 'self', ',', 'limits', '=', 'None', ')', ':', 'kwargs', '=', '{', '}', 'if', 'limits', 'is', 'not', 'None', ':', 'kwargs', '=', '{', "'min'", ':', 'limits', '[', '0', ']', ',', "'max'", ':', 'limits', '[', '1', ']', '}', 'return', 'dim', '(', 'self', ',', 'norm', ',', '*', '*', 'kwargs', ')'] | Unity-based normalization to scale data into 0-1 range.
(values - min) / (max - min)
Args:
limits: tuple of (min, max) defining the normalization range | ['Unity', '-', 'based', 'normalization', 'to', 'scale', 'data', 'into', '0', '-', '1', 'range', '.'] | train | https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/util/transform.py#L254-L265 |
2,795 | mikedh/trimesh | trimesh/base.py | Trimesh.convert_units | def convert_units(self, desired, guess=False):
"""
Convert the units of the mesh into a specified unit.
Parameters
----------
desired : string
Units to convert to (eg 'inches')
guess : boolean
If self.units are not defined should we
guess the current units of the document and then convert?
"""
units._convert_units(self, desired, guess)
return self | python | def convert_units(self, desired, guess=False):
"""
Convert the units of the mesh into a specified unit.
Parameters
----------
desired : string
Units to convert to (eg 'inches')
guess : boolean
If self.units are not defined should we
guess the current units of the document and then convert?
"""
units._convert_units(self, desired, guess)
return self | ['def', 'convert_units', '(', 'self', ',', 'desired', ',', 'guess', '=', 'False', ')', ':', 'units', '.', '_convert_units', '(', 'self', ',', 'desired', ',', 'guess', ')', 'return', 'self'] | Convert the units of the mesh into a specified unit.
Parameters
----------
desired : string
Units to convert to (eg 'inches')
guess : boolean
If self.units are not defined should we
guess the current units of the document and then convert? | ['Convert', 'the', 'units', 'of', 'the', 'mesh', 'into', 'a', 'specified', 'unit', '.'] | train | https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/base.py#L998-L1011 |
2,796 | GoogleCloudPlatform/datastore-ndb-python | ndb/query.py | QueryIterator.next | def next(self):
"""Iterator protocol: get next item or raise StopIteration."""
if self._fut is None:
self._fut = self._iter.getq()
try:
try:
# The future result is set by this class's _extended_callback
# method.
# pylint: disable=unpacking-non-sequence
(ent,
self._cursor_before,
self._cursor_after,
self._more_results) = self._fut.get_result()
return ent
except EOFError:
self._exhausted = True
raise StopIteration
finally:
self._fut = None | python | def next(self):
"""Iterator protocol: get next item or raise StopIteration."""
if self._fut is None:
self._fut = self._iter.getq()
try:
try:
# The future result is set by this class's _extended_callback
# method.
# pylint: disable=unpacking-non-sequence
(ent,
self._cursor_before,
self._cursor_after,
self._more_results) = self._fut.get_result()
return ent
except EOFError:
self._exhausted = True
raise StopIteration
finally:
self._fut = None | ['def', 'next', '(', 'self', ')', ':', 'if', 'self', '.', '_fut', 'is', 'None', ':', 'self', '.', '_fut', '=', 'self', '.', '_iter', '.', 'getq', '(', ')', 'try', ':', 'try', ':', "# The future result is set by this class's _extended_callback", '# method.', '# pylint: disable=unpacking-non-sequence', '(', 'ent', ',', 'self', '.', '_cursor_before', ',', 'self', '.', '_cursor_after', ',', 'self', '.', '_more_results', ')', '=', 'self', '.', '_fut', '.', 'get_result', '(', ')', 'return', 'ent', 'except', 'EOFError', ':', 'self', '.', '_exhausted', '=', 'True', 'raise', 'StopIteration', 'finally', ':', 'self', '.', '_fut', '=', 'None'] | Iterator protocol: get next item or raise StopIteration. | ['Iterator', 'protocol', ':', 'get', 'next', 'item', 'or', 'raise', 'StopIteration', '.'] | train | https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/ndb/query.py#L1798-L1816 |
2,797 | SheffieldML/GPy | GPy/kern/src/ODE_UYC.py | ODE_UYC.Kdiag | def Kdiag(self, X):
"""Compute the diagonal of the covariance matrix associated to X."""
Kdiag = np.zeros(X.shape[0])
ly=1/self.lengthscale_Y
lu=np.sqrt(3)/self.lengthscale_U
Vu = self.variance_U
Vy=self.variance_Y
k1 = (2*lu+ly)/(lu+ly)**2
k2 = (ly-2*lu + 2*lu-ly ) / (ly-lu)**2
k3 = 1/(lu+ly) + (lu)/(lu+ly)**2
slices = index_to_slices(X[:,-1])
for i, ss1 in enumerate(slices):
for s1 in ss1:
if i==0:
Kdiag[s1]+= self.variance_U + self.ubias
elif i==1:
Kdiag[s1]+= Vu*Vy*(k1+k2+k3)
else:
raise ValueError("invalid input/output index")
#Kdiag[slices[0][0]]+= self.variance_U #matern32 diag
#Kdiag[slices[1][0]]+= self.variance_U*self.variance_Y*(k1+k2+k3) # diag
return Kdiag | python | def Kdiag(self, X):
"""Compute the diagonal of the covariance matrix associated to X."""
Kdiag = np.zeros(X.shape[0])
ly=1/self.lengthscale_Y
lu=np.sqrt(3)/self.lengthscale_U
Vu = self.variance_U
Vy=self.variance_Y
k1 = (2*lu+ly)/(lu+ly)**2
k2 = (ly-2*lu + 2*lu-ly ) / (ly-lu)**2
k3 = 1/(lu+ly) + (lu)/(lu+ly)**2
slices = index_to_slices(X[:,-1])
for i, ss1 in enumerate(slices):
for s1 in ss1:
if i==0:
Kdiag[s1]+= self.variance_U + self.ubias
elif i==1:
Kdiag[s1]+= Vu*Vy*(k1+k2+k3)
else:
raise ValueError("invalid input/output index")
#Kdiag[slices[0][0]]+= self.variance_U #matern32 diag
#Kdiag[slices[1][0]]+= self.variance_U*self.variance_Y*(k1+k2+k3) # diag
return Kdiag | ['def', 'Kdiag', '(', 'self', ',', 'X', ')', ':', 'Kdiag', '=', 'np', '.', 'zeros', '(', 'X', '.', 'shape', '[', '0', ']', ')', 'ly', '=', '1', '/', 'self', '.', 'lengthscale_Y', 'lu', '=', 'np', '.', 'sqrt', '(', '3', ')', '/', 'self', '.', 'lengthscale_U', 'Vu', '=', 'self', '.', 'variance_U', 'Vy', '=', 'self', '.', 'variance_Y', 'k1', '=', '(', '2', '*', 'lu', '+', 'ly', ')', '/', '(', 'lu', '+', 'ly', ')', '**', '2', 'k2', '=', '(', 'ly', '-', '2', '*', 'lu', '+', '2', '*', 'lu', '-', 'ly', ')', '/', '(', 'ly', '-', 'lu', ')', '**', '2', 'k3', '=', '1', '/', '(', 'lu', '+', 'ly', ')', '+', '(', 'lu', ')', '/', '(', 'lu', '+', 'ly', ')', '**', '2', 'slices', '=', 'index_to_slices', '(', 'X', '[', ':', ',', '-', '1', ']', ')', 'for', 'i', ',', 'ss1', 'in', 'enumerate', '(', 'slices', ')', ':', 'for', 's1', 'in', 'ss1', ':', 'if', 'i', '==', '0', ':', 'Kdiag', '[', 's1', ']', '+=', 'self', '.', 'variance_U', '+', 'self', '.', 'ubias', 'elif', 'i', '==', '1', ':', 'Kdiag', '[', 's1', ']', '+=', 'Vu', '*', 'Vy', '*', '(', 'k1', '+', 'k2', '+', 'k3', ')', 'else', ':', 'raise', 'ValueError', '(', '"invalid input/output index"', ')', '#Kdiag[slices[0][0]]+= self.variance_U #matern32 diag', '#Kdiag[slices[1][0]]+= self.variance_U*self.variance_Y*(k1+k2+k3) # diag', 'return', 'Kdiag'] | Compute the diagonal of the covariance matrix associated to X. | ['Compute', 'the', 'diagonal', 'of', 'the', 'covariance', 'matrix', 'associated', 'to', 'X', '.'] | train | https://github.com/SheffieldML/GPy/blob/54c32d79d289d622fb18b898aee65a2a431d90cf/GPy/kern/src/ODE_UYC.py#L96-L121 |
2,798 | bihealth/vcfpy | vcfpy/reader.py | Reader.close | def close(self):
"""Close underlying stream"""
if self.tabix_file and not self.tabix_file.closed:
self.tabix_file.close()
if self.stream:
self.stream.close() | python | def close(self):
"""Close underlying stream"""
if self.tabix_file and not self.tabix_file.closed:
self.tabix_file.close()
if self.stream:
self.stream.close() | ['def', 'close', '(', 'self', ')', ':', 'if', 'self', '.', 'tabix_file', 'and', 'not', 'self', '.', 'tabix_file', '.', 'closed', ':', 'self', '.', 'tabix_file', '.', 'close', '(', ')', 'if', 'self', '.', 'stream', ':', 'self', '.', 'stream', '.', 'close', '(', ')'] | Close underlying stream | ['Close', 'underlying', 'stream'] | train | https://github.com/bihealth/vcfpy/blob/99e2165df30f11e0c95f3170f31bc5191d9e9e15/vcfpy/reader.py#L148-L153 |
2,799 | inveniosoftware/invenio-accounts | invenio_accounts/alembic/e12419831262_add_new_columns_on_sessionactivity.py | upgrade | def upgrade():
"""Upgrade database."""
with op.batch_alter_table('accounts_user_session_activity') as batch_op:
batch_op.add_column(sa.Column('browser', sa.String(80), nullable=True))
batch_op.add_column(
sa.Column('browser_version', sa.String(30), nullable=True))
batch_op.add_column(
sa.Column('country', sa.String(3), nullable=True))
batch_op.add_column(
sa.Column('device', sa.String(80), nullable=True))
batch_op.add_column(
sa.Column('ip', sa.String(80), nullable=True))
batch_op.add_column(
sa.Column('os', sa.String(80), nullable=True)) | python | def upgrade():
"""Upgrade database."""
with op.batch_alter_table('accounts_user_session_activity') as batch_op:
batch_op.add_column(sa.Column('browser', sa.String(80), nullable=True))
batch_op.add_column(
sa.Column('browser_version', sa.String(30), nullable=True))
batch_op.add_column(
sa.Column('country', sa.String(3), nullable=True))
batch_op.add_column(
sa.Column('device', sa.String(80), nullable=True))
batch_op.add_column(
sa.Column('ip', sa.String(80), nullable=True))
batch_op.add_column(
sa.Column('os', sa.String(80), nullable=True)) | ['def', 'upgrade', '(', ')', ':', 'with', 'op', '.', 'batch_alter_table', '(', "'accounts_user_session_activity'", ')', 'as', 'batch_op', ':', 'batch_op', '.', 'add_column', '(', 'sa', '.', 'Column', '(', "'browser'", ',', 'sa', '.', 'String', '(', '80', ')', ',', 'nullable', '=', 'True', ')', ')', 'batch_op', '.', 'add_column', '(', 'sa', '.', 'Column', '(', "'browser_version'", ',', 'sa', '.', 'String', '(', '30', ')', ',', 'nullable', '=', 'True', ')', ')', 'batch_op', '.', 'add_column', '(', 'sa', '.', 'Column', '(', "'country'", ',', 'sa', '.', 'String', '(', '3', ')', ',', 'nullable', '=', 'True', ')', ')', 'batch_op', '.', 'add_column', '(', 'sa', '.', 'Column', '(', "'device'", ',', 'sa', '.', 'String', '(', '80', ')', ',', 'nullable', '=', 'True', ')', ')', 'batch_op', '.', 'add_column', '(', 'sa', '.', 'Column', '(', "'ip'", ',', 'sa', '.', 'String', '(', '80', ')', ',', 'nullable', '=', 'True', ')', ')', 'batch_op', '.', 'add_column', '(', 'sa', '.', 'Column', '(', "'os'", ',', 'sa', '.', 'String', '(', '80', ')', ',', 'nullable', '=', 'True', ')', ')'] | Upgrade database. | ['Upgrade', 'database', '.'] | train | https://github.com/inveniosoftware/invenio-accounts/blob/b0d2f0739b00dbefea22ca15d7d374a1b4a63aec/invenio_accounts/alembic/e12419831262_add_new_columns_on_sessionactivity.py#L21-L34 |
Subsets and Splits