Unnamed: 0
int64
0
10k
repository_name
stringlengths
7
54
func_path_in_repository
stringlengths
5
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
100
30.3k
language
stringclasses
1 value
func_code_string
stringlengths
100
30.3k
func_code_tokens
stringlengths
138
33.2k
func_documentation_string
stringlengths
1
15k
func_documentation_tokens
stringlengths
5
5.14k
split_name
stringclasses
1 value
func_code_url
stringlengths
91
315
2,500
QInfer/python-qinfer
src/qinfer/smc.py
SMCUpdater.bayes_risk
def bayes_risk(self, expparams): r""" Calculates the Bayes risk for hypothetical experiments, assuming the quadratic loss function defined by the current model's scale matrix (see :attr:`qinfer.abstract_model.Simulatable.Q`). :param expparams: The experiments at which to compute the risk. :type expparams: :class:`~numpy.ndarray` of dtype given by the current model's :attr:`~qinfer.abstract_model.Simulatable.expparams_dtype` property, and of shape ``(1,)`` :return np.ndarray: The Bayes risk for the current posterior distribution at each hypothetical experiment in ``expparams``, therefore has shape ``(expparams.size,)`` """ # for models whose outcome number changes with experiment, we # take the easy way out and for-loop over experiments n_eps = expparams.size if n_eps > 1 and not self.model.is_n_outcomes_constant: risk = np.empty(n_eps) for idx in range(n_eps): risk[idx] = self.bayes_risk(expparams[idx, np.newaxis]) return risk # outcomes for the first experiment os = self.model.domain(expparams[0,np.newaxis])[0].values # compute the hypothetical weights, likelihoods and normalizations for # every possible outcome and expparam # the likelihood over outcomes should sum to 1, so don't compute for last outcome w_hyp, L, N = self.hypothetical_update( os[:-1], expparams, return_normalization=True, return_likelihood=True ) w_hyp_last_outcome = (1 - L.sum(axis=0)) * self.particle_weights[np.newaxis, :] N = np.concatenate([N[:,:,0], np.sum(w_hyp_last_outcome[np.newaxis,:,:], axis=2)], axis=0) w_hyp_last_outcome = w_hyp_last_outcome / N[-1,:,np.newaxis] w_hyp = np.concatenate([w_hyp, w_hyp_last_outcome[np.newaxis,:,:]], axis=0) # w_hyp.shape == (n_out, n_eps, n_particles) # N.shape == (n_out, n_eps) # compute the hypothetical means and variances given outcomes and exparams # mu_hyp.shape == (n_out, n_eps, n_models) # var_hyp.shape == (n_out, n_eps) mu_hyp = np.dot(w_hyp, self.particle_locations) var_hyp = np.sum( w_hyp * np.sum(self.model.Q * ( self.particle_locations[np.newaxis,np.newaxis,:,:] - mu_hyp[:,:,np.newaxis,:] ) ** 2, axis=3), axis=2 ) # the risk of a given expparam can be calculated as the mean posterior # variance weighted over all possible outcomes return np.sum(N * var_hyp, axis=0)
python
def bayes_risk(self, expparams): r""" Calculates the Bayes risk for hypothetical experiments, assuming the quadratic loss function defined by the current model's scale matrix (see :attr:`qinfer.abstract_model.Simulatable.Q`). :param expparams: The experiments at which to compute the risk. :type expparams: :class:`~numpy.ndarray` of dtype given by the current model's :attr:`~qinfer.abstract_model.Simulatable.expparams_dtype` property, and of shape ``(1,)`` :return np.ndarray: The Bayes risk for the current posterior distribution at each hypothetical experiment in ``expparams``, therefore has shape ``(expparams.size,)`` """ # for models whose outcome number changes with experiment, we # take the easy way out and for-loop over experiments n_eps = expparams.size if n_eps > 1 and not self.model.is_n_outcomes_constant: risk = np.empty(n_eps) for idx in range(n_eps): risk[idx] = self.bayes_risk(expparams[idx, np.newaxis]) return risk # outcomes for the first experiment os = self.model.domain(expparams[0,np.newaxis])[0].values # compute the hypothetical weights, likelihoods and normalizations for # every possible outcome and expparam # the likelihood over outcomes should sum to 1, so don't compute for last outcome w_hyp, L, N = self.hypothetical_update( os[:-1], expparams, return_normalization=True, return_likelihood=True ) w_hyp_last_outcome = (1 - L.sum(axis=0)) * self.particle_weights[np.newaxis, :] N = np.concatenate([N[:,:,0], np.sum(w_hyp_last_outcome[np.newaxis,:,:], axis=2)], axis=0) w_hyp_last_outcome = w_hyp_last_outcome / N[-1,:,np.newaxis] w_hyp = np.concatenate([w_hyp, w_hyp_last_outcome[np.newaxis,:,:]], axis=0) # w_hyp.shape == (n_out, n_eps, n_particles) # N.shape == (n_out, n_eps) # compute the hypothetical means and variances given outcomes and exparams # mu_hyp.shape == (n_out, n_eps, n_models) # var_hyp.shape == (n_out, n_eps) mu_hyp = np.dot(w_hyp, self.particle_locations) var_hyp = np.sum( w_hyp * np.sum(self.model.Q * ( self.particle_locations[np.newaxis,np.newaxis,:,:] - mu_hyp[:,:,np.newaxis,:] ) ** 2, axis=3), axis=2 ) # the risk of a given expparam can be calculated as the mean posterior # variance weighted over all possible outcomes return np.sum(N * var_hyp, axis=0)
['def', 'bayes_risk', '(', 'self', ',', 'expparams', ')', ':', '# for models whose outcome number changes with experiment, we ', '# take the easy way out and for-loop over experiments', 'n_eps', '=', 'expparams', '.', 'size', 'if', 'n_eps', '>', '1', 'and', 'not', 'self', '.', 'model', '.', 'is_n_outcomes_constant', ':', 'risk', '=', 'np', '.', 'empty', '(', 'n_eps', ')', 'for', 'idx', 'in', 'range', '(', 'n_eps', ')', ':', 'risk', '[', 'idx', ']', '=', 'self', '.', 'bayes_risk', '(', 'expparams', '[', 'idx', ',', 'np', '.', 'newaxis', ']', ')', 'return', 'risk', '# outcomes for the first experiment', 'os', '=', 'self', '.', 'model', '.', 'domain', '(', 'expparams', '[', '0', ',', 'np', '.', 'newaxis', ']', ')', '[', '0', ']', '.', 'values', '# compute the hypothetical weights, likelihoods and normalizations for', '# every possible outcome and expparam', "# the likelihood over outcomes should sum to 1, so don't compute for last outcome", 'w_hyp', ',', 'L', ',', 'N', '=', 'self', '.', 'hypothetical_update', '(', 'os', '[', ':', '-', '1', ']', ',', 'expparams', ',', 'return_normalization', '=', 'True', ',', 'return_likelihood', '=', 'True', ')', 'w_hyp_last_outcome', '=', '(', '1', '-', 'L', '.', 'sum', '(', 'axis', '=', '0', ')', ')', '*', 'self', '.', 'particle_weights', '[', 'np', '.', 'newaxis', ',', ':', ']', 'N', '=', 'np', '.', 'concatenate', '(', '[', 'N', '[', ':', ',', ':', ',', '0', ']', ',', 'np', '.', 'sum', '(', 'w_hyp_last_outcome', '[', 'np', '.', 'newaxis', ',', ':', ',', ':', ']', ',', 'axis', '=', '2', ')', ']', ',', 'axis', '=', '0', ')', 'w_hyp_last_outcome', '=', 'w_hyp_last_outcome', '/', 'N', '[', '-', '1', ',', ':', ',', 'np', '.', 'newaxis', ']', 'w_hyp', '=', 'np', '.', 'concatenate', '(', '[', 'w_hyp', ',', 'w_hyp_last_outcome', '[', 'np', '.', 'newaxis', ',', ':', ',', ':', ']', ']', ',', 'axis', '=', '0', ')', '# w_hyp.shape == (n_out, n_eps, n_particles)', '# N.shape == (n_out, n_eps)', '# compute the hypothetical means and variances given outcomes and exparams', '# mu_hyp.shape == (n_out, n_eps, n_models)', '# var_hyp.shape == (n_out, n_eps)', 'mu_hyp', '=', 'np', '.', 'dot', '(', 'w_hyp', ',', 'self', '.', 'particle_locations', ')', 'var_hyp', '=', 'np', '.', 'sum', '(', 'w_hyp', '*', 'np', '.', 'sum', '(', 'self', '.', 'model', '.', 'Q', '*', '(', 'self', '.', 'particle_locations', '[', 'np', '.', 'newaxis', ',', 'np', '.', 'newaxis', ',', ':', ',', ':', ']', '-', 'mu_hyp', '[', ':', ',', ':', ',', 'np', '.', 'newaxis', ',', ':', ']', ')', '**', '2', ',', 'axis', '=', '3', ')', ',', 'axis', '=', '2', ')', '# the risk of a given expparam can be calculated as the mean posterior', '# variance weighted over all possible outcomes', 'return', 'np', '.', 'sum', '(', 'N', '*', 'var_hyp', ',', 'axis', '=', '0', ')']
r""" Calculates the Bayes risk for hypothetical experiments, assuming the quadratic loss function defined by the current model's scale matrix (see :attr:`qinfer.abstract_model.Simulatable.Q`). :param expparams: The experiments at which to compute the risk. :type expparams: :class:`~numpy.ndarray` of dtype given by the current model's :attr:`~qinfer.abstract_model.Simulatable.expparams_dtype` property, and of shape ``(1,)`` :return np.ndarray: The Bayes risk for the current posterior distribution at each hypothetical experiment in ``expparams``, therefore has shape ``(expparams.size,)``
['r', 'Calculates', 'the', 'Bayes', 'risk', 'for', 'hypothetical', 'experiments', 'assuming', 'the', 'quadratic', 'loss', 'function', 'defined', 'by', 'the', 'current', 'model', 's', 'scale', 'matrix', '(', 'see', ':', 'attr', ':', 'qinfer', '.', 'abstract_model', '.', 'Simulatable', '.', 'Q', ')', '.']
train
https://github.com/QInfer/python-qinfer/blob/8170c84a0be1723f8c6b09e0d3c7a40a886f1fe3/src/qinfer/smc.py#L553-L612
2,501
bunq/sdk_python
bunq/sdk/model/generated/endpoint.py
SofortMerchantTransaction.is_all_field_none
def is_all_field_none(self): """ :rtype: bool """ if self._monetary_account_id is not None: return False if self._alias is not None: return False if self._counterparty_alias is not None: return False if self._amount_guaranteed is not None: return False if self._amount_requested is not None: return False if self._issuer is not None: return False if self._issuer_authentication_url is not None: return False if self._status is not None: return False if self._error_message is not None: return False if self._transaction_identifier is not None: return False return True
python
def is_all_field_none(self): """ :rtype: bool """ if self._monetary_account_id is not None: return False if self._alias is not None: return False if self._counterparty_alias is not None: return False if self._amount_guaranteed is not None: return False if self._amount_requested is not None: return False if self._issuer is not None: return False if self._issuer_authentication_url is not None: return False if self._status is not None: return False if self._error_message is not None: return False if self._transaction_identifier is not None: return False return True
['def', 'is_all_field_none', '(', 'self', ')', ':', 'if', 'self', '.', '_monetary_account_id', 'is', 'not', 'None', ':', 'return', 'False', 'if', 'self', '.', '_alias', 'is', 'not', 'None', ':', 'return', 'False', 'if', 'self', '.', '_counterparty_alias', 'is', 'not', 'None', ':', 'return', 'False', 'if', 'self', '.', '_amount_guaranteed', 'is', 'not', 'None', ':', 'return', 'False', 'if', 'self', '.', '_amount_requested', 'is', 'not', 'None', ':', 'return', 'False', 'if', 'self', '.', '_issuer', 'is', 'not', 'None', ':', 'return', 'False', 'if', 'self', '.', '_issuer_authentication_url', 'is', 'not', 'None', ':', 'return', 'False', 'if', 'self', '.', '_status', 'is', 'not', 'None', ':', 'return', 'False', 'if', 'self', '.', '_error_message', 'is', 'not', 'None', ':', 'return', 'False', 'if', 'self', '.', '_transaction_identifier', 'is', 'not', 'None', ':', 'return', 'False', 'return', 'True']
:rtype: bool
[':', 'rtype', ':', 'bool']
train
https://github.com/bunq/sdk_python/blob/da6c9b83e6d83ee8062617f53c6eb7293c0d863d/bunq/sdk/model/generated/endpoint.py#L15194-L15229
2,502
buildbot/buildbot
master/buildbot/www/hooks/gitlab.py
GitLabHandler.getChanges
def getChanges(self, request): """ Reponds only to POST events and starts the build process :arguments: request the http request object """ expected_secret = isinstance(self.options, dict) and self.options.get('secret') if expected_secret: received_secret = request.getHeader(_HEADER_GITLAB_TOKEN) received_secret = bytes2unicode(received_secret) p = Properties() p.master = self.master expected_secret_value = yield p.render(expected_secret) if received_secret != expected_secret_value: raise ValueError("Invalid secret") try: content = request.content.read() payload = json.loads(bytes2unicode(content)) except Exception as e: raise ValueError("Error loading JSON: " + str(e)) event_type = request.getHeader(_HEADER_EVENT) event_type = bytes2unicode(event_type) # newer version of gitlab have a object_kind parameter, # which allows not to use the http header event_type = payload.get('object_kind', event_type) codebase = request.args.get(b'codebase', [None])[0] codebase = bytes2unicode(codebase) if event_type in ("push", "tag_push", "Push Hook"): user = payload['user_name'] repo = payload['repository']['name'] repo_url = payload['repository']['url'] changes = self._process_change( payload, user, repo, repo_url, event_type, codebase=codebase) elif event_type == 'merge_request': changes = self._process_merge_request_change( payload, event_type, codebase=codebase) else: changes = [] if changes: log.msg("Received {} changes from {} gitlab event".format( len(changes), event_type)) return (changes, 'git')
python
def getChanges(self, request): """ Reponds only to POST events and starts the build process :arguments: request the http request object """ expected_secret = isinstance(self.options, dict) and self.options.get('secret') if expected_secret: received_secret = request.getHeader(_HEADER_GITLAB_TOKEN) received_secret = bytes2unicode(received_secret) p = Properties() p.master = self.master expected_secret_value = yield p.render(expected_secret) if received_secret != expected_secret_value: raise ValueError("Invalid secret") try: content = request.content.read() payload = json.loads(bytes2unicode(content)) except Exception as e: raise ValueError("Error loading JSON: " + str(e)) event_type = request.getHeader(_HEADER_EVENT) event_type = bytes2unicode(event_type) # newer version of gitlab have a object_kind parameter, # which allows not to use the http header event_type = payload.get('object_kind', event_type) codebase = request.args.get(b'codebase', [None])[0] codebase = bytes2unicode(codebase) if event_type in ("push", "tag_push", "Push Hook"): user = payload['user_name'] repo = payload['repository']['name'] repo_url = payload['repository']['url'] changes = self._process_change( payload, user, repo, repo_url, event_type, codebase=codebase) elif event_type == 'merge_request': changes = self._process_merge_request_change( payload, event_type, codebase=codebase) else: changes = [] if changes: log.msg("Received {} changes from {} gitlab event".format( len(changes), event_type)) return (changes, 'git')
['def', 'getChanges', '(', 'self', ',', 'request', ')', ':', 'expected_secret', '=', 'isinstance', '(', 'self', '.', 'options', ',', 'dict', ')', 'and', 'self', '.', 'options', '.', 'get', '(', "'secret'", ')', 'if', 'expected_secret', ':', 'received_secret', '=', 'request', '.', 'getHeader', '(', '_HEADER_GITLAB_TOKEN', ')', 'received_secret', '=', 'bytes2unicode', '(', 'received_secret', ')', 'p', '=', 'Properties', '(', ')', 'p', '.', 'master', '=', 'self', '.', 'master', 'expected_secret_value', '=', 'yield', 'p', '.', 'render', '(', 'expected_secret', ')', 'if', 'received_secret', '!=', 'expected_secret_value', ':', 'raise', 'ValueError', '(', '"Invalid secret"', ')', 'try', ':', 'content', '=', 'request', '.', 'content', '.', 'read', '(', ')', 'payload', '=', 'json', '.', 'loads', '(', 'bytes2unicode', '(', 'content', ')', ')', 'except', 'Exception', 'as', 'e', ':', 'raise', 'ValueError', '(', '"Error loading JSON: "', '+', 'str', '(', 'e', ')', ')', 'event_type', '=', 'request', '.', 'getHeader', '(', '_HEADER_EVENT', ')', 'event_type', '=', 'bytes2unicode', '(', 'event_type', ')', '# newer version of gitlab have a object_kind parameter,', '# which allows not to use the http header', 'event_type', '=', 'payload', '.', 'get', '(', "'object_kind'", ',', 'event_type', ')', 'codebase', '=', 'request', '.', 'args', '.', 'get', '(', "b'codebase'", ',', '[', 'None', ']', ')', '[', '0', ']', 'codebase', '=', 'bytes2unicode', '(', 'codebase', ')', 'if', 'event_type', 'in', '(', '"push"', ',', '"tag_push"', ',', '"Push Hook"', ')', ':', 'user', '=', 'payload', '[', "'user_name'", ']', 'repo', '=', 'payload', '[', "'repository'", ']', '[', "'name'", ']', 'repo_url', '=', 'payload', '[', "'repository'", ']', '[', "'url'", ']', 'changes', '=', 'self', '.', '_process_change', '(', 'payload', ',', 'user', ',', 'repo', ',', 'repo_url', ',', 'event_type', ',', 'codebase', '=', 'codebase', ')', 'elif', 'event_type', '==', "'merge_request'", ':', 'changes', '=', 'self', '.', '_process_merge_request_change', '(', 'payload', ',', 'event_type', ',', 'codebase', '=', 'codebase', ')', 'else', ':', 'changes', '=', '[', ']', 'if', 'changes', ':', 'log', '.', 'msg', '(', '"Received {} changes from {} gitlab event"', '.', 'format', '(', 'len', '(', 'changes', ')', ',', 'event_type', ')', ')', 'return', '(', 'changes', ',', "'git'", ')']
Reponds only to POST events and starts the build process :arguments: request the http request object
['Reponds', 'only', 'to', 'POST', 'events', 'and', 'starts', 'the', 'build', 'process']
train
https://github.com/buildbot/buildbot/blob/5df3cfae6d760557d99156633c32b1822a1e130c/master/buildbot/www/hooks/gitlab.py#L157-L202
2,503
pypa/setuptools
setuptools/msvc.py
SystemInfo._guess_vc
def _guess_vc(self): """ Locate Visual C for 2017 """ if self.vc_ver <= 14.0: return default = r'VC\Tools\MSVC' guess_vc = os.path.join(self.VSInstallDir, default) # Subdir with VC exact version as name try: vc_exact_ver = os.listdir(guess_vc)[-1] return os.path.join(guess_vc, vc_exact_ver) except (OSError, IOError, IndexError): pass
python
def _guess_vc(self): """ Locate Visual C for 2017 """ if self.vc_ver <= 14.0: return default = r'VC\Tools\MSVC' guess_vc = os.path.join(self.VSInstallDir, default) # Subdir with VC exact version as name try: vc_exact_ver = os.listdir(guess_vc)[-1] return os.path.join(guess_vc, vc_exact_ver) except (OSError, IOError, IndexError): pass
['def', '_guess_vc', '(', 'self', ')', ':', 'if', 'self', '.', 'vc_ver', '<=', '14.0', ':', 'return', 'default', '=', "r'VC\\Tools\\MSVC'", 'guess_vc', '=', 'os', '.', 'path', '.', 'join', '(', 'self', '.', 'VSInstallDir', ',', 'default', ')', '# Subdir with VC exact version as name', 'try', ':', 'vc_exact_ver', '=', 'os', '.', 'listdir', '(', 'guess_vc', ')', '[', '-', '1', ']', 'return', 'os', '.', 'path', '.', 'join', '(', 'guess_vc', ',', 'vc_exact_ver', ')', 'except', '(', 'OSError', ',', 'IOError', ',', 'IndexError', ')', ':', 'pass']
Locate Visual C for 2017
['Locate', 'Visual', 'C', 'for', '2017']
train
https://github.com/pypa/setuptools/blob/83c667e0b2a98193851c07115d1af65011ed0fb6/setuptools/msvc.py#L559-L573
2,504
twilio/twilio-python
twilio/rest/taskrouter/v1/workspace/workflow/workflow_cumulative_statistics.py
WorkflowCumulativeStatisticsList.get
def get(self): """ Constructs a WorkflowCumulativeStatisticsContext :returns: twilio.rest.taskrouter.v1.workspace.workflow.workflow_cumulative_statistics.WorkflowCumulativeStatisticsContext :rtype: twilio.rest.taskrouter.v1.workspace.workflow.workflow_cumulative_statistics.WorkflowCumulativeStatisticsContext """ return WorkflowCumulativeStatisticsContext( self._version, workspace_sid=self._solution['workspace_sid'], workflow_sid=self._solution['workflow_sid'], )
python
def get(self): """ Constructs a WorkflowCumulativeStatisticsContext :returns: twilio.rest.taskrouter.v1.workspace.workflow.workflow_cumulative_statistics.WorkflowCumulativeStatisticsContext :rtype: twilio.rest.taskrouter.v1.workspace.workflow.workflow_cumulative_statistics.WorkflowCumulativeStatisticsContext """ return WorkflowCumulativeStatisticsContext( self._version, workspace_sid=self._solution['workspace_sid'], workflow_sid=self._solution['workflow_sid'], )
['def', 'get', '(', 'self', ')', ':', 'return', 'WorkflowCumulativeStatisticsContext', '(', 'self', '.', '_version', ',', 'workspace_sid', '=', 'self', '.', '_solution', '[', "'workspace_sid'", ']', ',', 'workflow_sid', '=', 'self', '.', '_solution', '[', "'workflow_sid'", ']', ',', ')']
Constructs a WorkflowCumulativeStatisticsContext :returns: twilio.rest.taskrouter.v1.workspace.workflow.workflow_cumulative_statistics.WorkflowCumulativeStatisticsContext :rtype: twilio.rest.taskrouter.v1.workspace.workflow.workflow_cumulative_statistics.WorkflowCumulativeStatisticsContext
['Constructs', 'a', 'WorkflowCumulativeStatisticsContext']
train
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/taskrouter/v1/workspace/workflow/workflow_cumulative_statistics.py#L37-L48
2,505
saltstack/salt
salt/cloud/libcloudfuncs.py
get_node
def get_node(conn, name): ''' Return a libcloud node for the named VM ''' nodes = conn.list_nodes() for node in nodes: if node.name == name: __utils__['cloud.cache_node'](salt.utils.data.simple_types_filter(node.__dict__), __active_provider_name__, __opts__) return node
python
def get_node(conn, name): ''' Return a libcloud node for the named VM ''' nodes = conn.list_nodes() for node in nodes: if node.name == name: __utils__['cloud.cache_node'](salt.utils.data.simple_types_filter(node.__dict__), __active_provider_name__, __opts__) return node
['def', 'get_node', '(', 'conn', ',', 'name', ')', ':', 'nodes', '=', 'conn', '.', 'list_nodes', '(', ')', 'for', 'node', 'in', 'nodes', ':', 'if', 'node', '.', 'name', '==', 'name', ':', '__utils__', '[', "'cloud.cache_node'", ']', '(', 'salt', '.', 'utils', '.', 'data', '.', 'simple_types_filter', '(', 'node', '.', '__dict__', ')', ',', '__active_provider_name__', ',', '__opts__', ')', 'return', 'node']
Return a libcloud node for the named VM
['Return', 'a', 'libcloud', 'node', 'for', 'the', 'named', 'VM']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/libcloudfuncs.py#L118-L126
2,506
skylander86/ycsettings
ycsettings/settings.py
Settings._load_settings_from_source
def _load_settings_from_source(self, source): """ Loads the relevant settings from the specified ``source``. :returns: a standard :func:`dict` containing the settings from the source :rtype: dict """ if not source: pass elif source == 'env_settings_uri': for env_settings_uri_key in self.env_settings_uri_keys: env_settings_uri = self._search_environ(env_settings_uri_key) if env_settings_uri: logger.debug('Found {} in the environment.'.format(env_settings_uri_key)) yield env_settings_uri, self._load_settings_from_uri(env_settings_uri) #end if #end for elif source == 'env': logger.debug('Loaded {} settings from the environment.'.format(len(os.environ))) yield source, dict(os.environ.items()) elif isinstance(source, ParseResult): settings = self._load_settings_from_uri(source) yield source, settings elif isinstance(source, str): try: spec = importlib.util.find_spec(source) except (AttributeError, ImportError): spec = None settings = self._load_settings_from_spec(spec, name=source) if settings is None: _, ext = os.path.splitext(source) with uri_open(source, 'rb') as f: yield source, self._load_settings_from_file(f, ext=ext) else: yield source, settings #end if elif hasattr(source, 'read'): yield source.name, self._load_settings_from_file(source) elif hasattr(source, 'items'): source_type = type(source).__name__ for dict_settings_uri_key in self.dict_settings_uri_keys: if dict_settings_uri_key and dict_settings_uri_key in source and source[dict_settings_uri_key]: logger.debug('Found {} in the dict-like object <{}>.'.format(dict_settings_uri_key, source_type)) yield from self._load_settings_from_source(source[dict_settings_uri_key]) #end if #end for logger.debug('Loaded {} settings from dict-like object <{}>.'.format(len(source), source_type)) yield self._get_unique_name(source_type), source else: source_type = type(source).__name__ for object_settings_uri_key in self.object_settings_uri_keys: if object_settings_uri_key and hasattr(source, object_settings_uri_key) and getattr(source, object_settings_uri_key): logger.debug('Found {} in the object <{}>.'.format(object_settings_uri_key, source_type)) yield from self._load_settings_from_source(getattr(source, object_settings_uri_key)) #end if #end for settings = dict((k, v) for k, v in source.__dict__.items() if not k.startswith('__')) logger.debug('Loaded {} settings from object <{}>.'.format(len(settings), source_type)) yield self._get_unique_name(source_type), settings
python
def _load_settings_from_source(self, source): """ Loads the relevant settings from the specified ``source``. :returns: a standard :func:`dict` containing the settings from the source :rtype: dict """ if not source: pass elif source == 'env_settings_uri': for env_settings_uri_key in self.env_settings_uri_keys: env_settings_uri = self._search_environ(env_settings_uri_key) if env_settings_uri: logger.debug('Found {} in the environment.'.format(env_settings_uri_key)) yield env_settings_uri, self._load_settings_from_uri(env_settings_uri) #end if #end for elif source == 'env': logger.debug('Loaded {} settings from the environment.'.format(len(os.environ))) yield source, dict(os.environ.items()) elif isinstance(source, ParseResult): settings = self._load_settings_from_uri(source) yield source, settings elif isinstance(source, str): try: spec = importlib.util.find_spec(source) except (AttributeError, ImportError): spec = None settings = self._load_settings_from_spec(spec, name=source) if settings is None: _, ext = os.path.splitext(source) with uri_open(source, 'rb') as f: yield source, self._load_settings_from_file(f, ext=ext) else: yield source, settings #end if elif hasattr(source, 'read'): yield source.name, self._load_settings_from_file(source) elif hasattr(source, 'items'): source_type = type(source).__name__ for dict_settings_uri_key in self.dict_settings_uri_keys: if dict_settings_uri_key and dict_settings_uri_key in source and source[dict_settings_uri_key]: logger.debug('Found {} in the dict-like object <{}>.'.format(dict_settings_uri_key, source_type)) yield from self._load_settings_from_source(source[dict_settings_uri_key]) #end if #end for logger.debug('Loaded {} settings from dict-like object <{}>.'.format(len(source), source_type)) yield self._get_unique_name(source_type), source else: source_type = type(source).__name__ for object_settings_uri_key in self.object_settings_uri_keys: if object_settings_uri_key and hasattr(source, object_settings_uri_key) and getattr(source, object_settings_uri_key): logger.debug('Found {} in the object <{}>.'.format(object_settings_uri_key, source_type)) yield from self._load_settings_from_source(getattr(source, object_settings_uri_key)) #end if #end for settings = dict((k, v) for k, v in source.__dict__.items() if not k.startswith('__')) logger.debug('Loaded {} settings from object <{}>.'.format(len(settings), source_type)) yield self._get_unique_name(source_type), settings
['def', '_load_settings_from_source', '(', 'self', ',', 'source', ')', ':', 'if', 'not', 'source', ':', 'pass', 'elif', 'source', '==', "'env_settings_uri'", ':', 'for', 'env_settings_uri_key', 'in', 'self', '.', 'env_settings_uri_keys', ':', 'env_settings_uri', '=', 'self', '.', '_search_environ', '(', 'env_settings_uri_key', ')', 'if', 'env_settings_uri', ':', 'logger', '.', 'debug', '(', "'Found {} in the environment.'", '.', 'format', '(', 'env_settings_uri_key', ')', ')', 'yield', 'env_settings_uri', ',', 'self', '.', '_load_settings_from_uri', '(', 'env_settings_uri', ')', '#end if', '#end for', 'elif', 'source', '==', "'env'", ':', 'logger', '.', 'debug', '(', "'Loaded {} settings from the environment.'", '.', 'format', '(', 'len', '(', 'os', '.', 'environ', ')', ')', ')', 'yield', 'source', ',', 'dict', '(', 'os', '.', 'environ', '.', 'items', '(', ')', ')', 'elif', 'isinstance', '(', 'source', ',', 'ParseResult', ')', ':', 'settings', '=', 'self', '.', '_load_settings_from_uri', '(', 'source', ')', 'yield', 'source', ',', 'settings', 'elif', 'isinstance', '(', 'source', ',', 'str', ')', ':', 'try', ':', 'spec', '=', 'importlib', '.', 'util', '.', 'find_spec', '(', 'source', ')', 'except', '(', 'AttributeError', ',', 'ImportError', ')', ':', 'spec', '=', 'None', 'settings', '=', 'self', '.', '_load_settings_from_spec', '(', 'spec', ',', 'name', '=', 'source', ')', 'if', 'settings', 'is', 'None', ':', '_', ',', 'ext', '=', 'os', '.', 'path', '.', 'splitext', '(', 'source', ')', 'with', 'uri_open', '(', 'source', ',', "'rb'", ')', 'as', 'f', ':', 'yield', 'source', ',', 'self', '.', '_load_settings_from_file', '(', 'f', ',', 'ext', '=', 'ext', ')', 'else', ':', 'yield', 'source', ',', 'settings', '#end if', 'elif', 'hasattr', '(', 'source', ',', "'read'", ')', ':', 'yield', 'source', '.', 'name', ',', 'self', '.', '_load_settings_from_file', '(', 'source', ')', 'elif', 'hasattr', '(', 'source', ',', "'items'", ')', ':', 'source_type', '=', 'type', '(', 'source', ')', '.', '__name__', 'for', 'dict_settings_uri_key', 'in', 'self', '.', 'dict_settings_uri_keys', ':', 'if', 'dict_settings_uri_key', 'and', 'dict_settings_uri_key', 'in', 'source', 'and', 'source', '[', 'dict_settings_uri_key', ']', ':', 'logger', '.', 'debug', '(', "'Found {} in the dict-like object <{}>.'", '.', 'format', '(', 'dict_settings_uri_key', ',', 'source_type', ')', ')', 'yield', 'from', 'self', '.', '_load_settings_from_source', '(', 'source', '[', 'dict_settings_uri_key', ']', ')', '#end if', '#end for', 'logger', '.', 'debug', '(', "'Loaded {} settings from dict-like object <{}>.'", '.', 'format', '(', 'len', '(', 'source', ')', ',', 'source_type', ')', ')', 'yield', 'self', '.', '_get_unique_name', '(', 'source_type', ')', ',', 'source', 'else', ':', 'source_type', '=', 'type', '(', 'source', ')', '.', '__name__', 'for', 'object_settings_uri_key', 'in', 'self', '.', 'object_settings_uri_keys', ':', 'if', 'object_settings_uri_key', 'and', 'hasattr', '(', 'source', ',', 'object_settings_uri_key', ')', 'and', 'getattr', '(', 'source', ',', 'object_settings_uri_key', ')', ':', 'logger', '.', 'debug', '(', "'Found {} in the object <{}>.'", '.', 'format', '(', 'object_settings_uri_key', ',', 'source_type', ')', ')', 'yield', 'from', 'self', '.', '_load_settings_from_source', '(', 'getattr', '(', 'source', ',', 'object_settings_uri_key', ')', ')', '#end if', '#end for', 'settings', '=', 'dict', '(', '(', 'k', ',', 'v', ')', 'for', 'k', ',', 'v', 'in', 'source', '.', '__dict__', '.', 'items', '(', ')', 'if', 'not', 'k', '.', 'startswith', '(', "'__'", ')', ')', 'logger', '.', 'debug', '(', "'Loaded {} settings from object <{}>.'", '.', 'format', '(', 'len', '(', 'settings', ')', ',', 'source_type', ')', ')', 'yield', 'self', '.', '_get_unique_name', '(', 'source_type', ')', ',', 'settings']
Loads the relevant settings from the specified ``source``. :returns: a standard :func:`dict` containing the settings from the source :rtype: dict
['Loads', 'the', 'relevant', 'settings', 'from', 'the', 'specified', 'source', '.']
train
https://github.com/skylander86/ycsettings/blob/3f363673a6cb1823ebb18c4d640d87aa49202344/ycsettings/settings.py#L93-L158
2,507
pyvisa/pyvisa
pyvisa/resources/resource.py
Resource.enable_event
def enable_event(self, event_type, mechanism, context=None): """Enable event occurrences for specified event types and mechanisms in this resource. :param event_type: Logical event identifier. :param mechanism: Specifies event handling mechanisms to be enabled. (Constants.VI_QUEUE, .VI_HNDLR, .VI_SUSPEND_HNDLR) :param context: Not currently used, leave as None. """ self.visalib.enable_event(self.session, event_type, mechanism, context)
python
def enable_event(self, event_type, mechanism, context=None): """Enable event occurrences for specified event types and mechanisms in this resource. :param event_type: Logical event identifier. :param mechanism: Specifies event handling mechanisms to be enabled. (Constants.VI_QUEUE, .VI_HNDLR, .VI_SUSPEND_HNDLR) :param context: Not currently used, leave as None. """ self.visalib.enable_event(self.session, event_type, mechanism, context)
['def', 'enable_event', '(', 'self', ',', 'event_type', ',', 'mechanism', ',', 'context', '=', 'None', ')', ':', 'self', '.', 'visalib', '.', 'enable_event', '(', 'self', '.', 'session', ',', 'event_type', ',', 'mechanism', ',', 'context', ')']
Enable event occurrences for specified event types and mechanisms in this resource. :param event_type: Logical event identifier. :param mechanism: Specifies event handling mechanisms to be enabled. (Constants.VI_QUEUE, .VI_HNDLR, .VI_SUSPEND_HNDLR) :param context: Not currently used, leave as None.
['Enable', 'event', 'occurrences', 'for', 'specified', 'event', 'types', 'and', 'mechanisms', 'in', 'this', 'resource', '.']
train
https://github.com/pyvisa/pyvisa/blob/b8b2d4371e1f00782856aa9176ff1ced6bcb3798/pyvisa/resources/resource.py#L321-L329
2,508
manns/pyspread
pyspread/src/lib/_grid_cairo_renderer.py
CellBorders.get_r
def get_r(self): """Returns the right border of the cell""" start_point, end_point = self._get_right_line_coordinates() width = self._get_right_line_width() color = self._get_right_line_color() return CellBorder(start_point, end_point, width, color)
python
def get_r(self): """Returns the right border of the cell""" start_point, end_point = self._get_right_line_coordinates() width = self._get_right_line_width() color = self._get_right_line_color() return CellBorder(start_point, end_point, width, color)
['def', 'get_r', '(', 'self', ')', ':', 'start_point', ',', 'end_point', '=', 'self', '.', '_get_right_line_coordinates', '(', ')', 'width', '=', 'self', '.', '_get_right_line_width', '(', ')', 'color', '=', 'self', '.', '_get_right_line_color', '(', ')', 'return', 'CellBorder', '(', 'start_point', ',', 'end_point', ',', 'width', ',', 'color', ')']
Returns the right border of the cell
['Returns', 'the', 'right', 'border', 'of', 'the', 'cell']
train
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/lib/_grid_cairo_renderer.py#L1202-L1209
2,509
humilis/humilis-lambdautils
lambdautils/kinesis.py
send_to_kinesis_stream
def send_to_kinesis_stream(events, stream_name, partition_key=None, packer=None, serializer=json.dumps): """Sends events to a Kinesis stream.""" if not events: logger.info("No events provided: nothing delivered to Firehose") return records = [] for event in events: if not partition_key: partition_key_value = str(uuid.uuid4()) elif hasattr(partition_key, "__call__"): partition_key_value = partition_key(event) else: partition_key_value = partition_key if not isinstance(event, str): event = serializer(event) if packer: event = packer(event) record = {"Data": event, "PartitionKey": partition_key_value} records.append(record) kinesis = boto3.client("kinesis") resp = kinesis.put_records(StreamName=stream_name, Records=records) return resp
python
def send_to_kinesis_stream(events, stream_name, partition_key=None, packer=None, serializer=json.dumps): """Sends events to a Kinesis stream.""" if not events: logger.info("No events provided: nothing delivered to Firehose") return records = [] for event in events: if not partition_key: partition_key_value = str(uuid.uuid4()) elif hasattr(partition_key, "__call__"): partition_key_value = partition_key(event) else: partition_key_value = partition_key if not isinstance(event, str): event = serializer(event) if packer: event = packer(event) record = {"Data": event, "PartitionKey": partition_key_value} records.append(record) kinesis = boto3.client("kinesis") resp = kinesis.put_records(StreamName=stream_name, Records=records) return resp
['def', 'send_to_kinesis_stream', '(', 'events', ',', 'stream_name', ',', 'partition_key', '=', 'None', ',', 'packer', '=', 'None', ',', 'serializer', '=', 'json', '.', 'dumps', ')', ':', 'if', 'not', 'events', ':', 'logger', '.', 'info', '(', '"No events provided: nothing delivered to Firehose"', ')', 'return', 'records', '=', '[', ']', 'for', 'event', 'in', 'events', ':', 'if', 'not', 'partition_key', ':', 'partition_key_value', '=', 'str', '(', 'uuid', '.', 'uuid4', '(', ')', ')', 'elif', 'hasattr', '(', 'partition_key', ',', '"__call__"', ')', ':', 'partition_key_value', '=', 'partition_key', '(', 'event', ')', 'else', ':', 'partition_key_value', '=', 'partition_key', 'if', 'not', 'isinstance', '(', 'event', ',', 'str', ')', ':', 'event', '=', 'serializer', '(', 'event', ')', 'if', 'packer', ':', 'event', '=', 'packer', '(', 'event', ')', 'record', '=', '{', '"Data"', ':', 'event', ',', '"PartitionKey"', ':', 'partition_key_value', '}', 'records', '.', 'append', '(', 'record', ')', 'kinesis', '=', 'boto3', '.', 'client', '(', '"kinesis"', ')', 'resp', '=', 'kinesis', '.', 'put_records', '(', 'StreamName', '=', 'stream_name', ',', 'Records', '=', 'records', ')', 'return', 'resp']
Sends events to a Kinesis stream.
['Sends', 'events', 'to', 'a', 'Kinesis', 'stream', '.']
train
https://github.com/humilis/humilis-lambdautils/blob/58f75eb5ace23523c283708d56a9193181ea7e8e/lambdautils/kinesis.py#L108-L136
2,510
saltstack/salt
salt/modules/pagerduty_util.py
get_escalation_policies
def get_escalation_policies(profile='pagerduty', subdomain=None, api_key=None): ''' List escalation_policies belonging to this account CLI Example: salt myminion pagerduty.get_escalation_policies ''' return _list_items( 'escalation_policies', 'id', profile=profile, subdomain=subdomain, api_key=api_key, )
python
def get_escalation_policies(profile='pagerduty', subdomain=None, api_key=None): ''' List escalation_policies belonging to this account CLI Example: salt myminion pagerduty.get_escalation_policies ''' return _list_items( 'escalation_policies', 'id', profile=profile, subdomain=subdomain, api_key=api_key, )
['def', 'get_escalation_policies', '(', 'profile', '=', "'pagerduty'", ',', 'subdomain', '=', 'None', ',', 'api_key', '=', 'None', ')', ':', 'return', '_list_items', '(', "'escalation_policies'", ',', "'id'", ',', 'profile', '=', 'profile', ',', 'subdomain', '=', 'subdomain', ',', 'api_key', '=', 'api_key', ',', ')']
List escalation_policies belonging to this account CLI Example: salt myminion pagerduty.get_escalation_policies
['List', 'escalation_policies', 'belonging', 'to', 'this', 'account']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/pagerduty_util.py#L88-L103
2,511
twisted/epsilon
epsilon/expose.py
Exposer.get
def get(self, obj, key): """ Retrieve 'key' from an instance of a class which previously exposed it. @param key: a hashable object, previously passed to L{Exposer.expose}. @return: the object which was exposed with the given name on obj's key. @raise MethodNotExposed: when the key in question was not exposed with this exposer. """ if key not in self._exposed: raise MethodNotExposed() rightFuncs = self._exposed[key] T = obj.__class__ seen = {} for subT in inspect.getmro(T): for name, value in subT.__dict__.items(): for rightFunc in rightFuncs: if value is rightFunc: if name in seen: raise MethodNotExposed() return value.__get__(obj, T) seen[name] = True raise MethodNotExposed()
python
def get(self, obj, key): """ Retrieve 'key' from an instance of a class which previously exposed it. @param key: a hashable object, previously passed to L{Exposer.expose}. @return: the object which was exposed with the given name on obj's key. @raise MethodNotExposed: when the key in question was not exposed with this exposer. """ if key not in self._exposed: raise MethodNotExposed() rightFuncs = self._exposed[key] T = obj.__class__ seen = {} for subT in inspect.getmro(T): for name, value in subT.__dict__.items(): for rightFunc in rightFuncs: if value is rightFunc: if name in seen: raise MethodNotExposed() return value.__get__(obj, T) seen[name] = True raise MethodNotExposed()
['def', 'get', '(', 'self', ',', 'obj', ',', 'key', ')', ':', 'if', 'key', 'not', 'in', 'self', '.', '_exposed', ':', 'raise', 'MethodNotExposed', '(', ')', 'rightFuncs', '=', 'self', '.', '_exposed', '[', 'key', ']', 'T', '=', 'obj', '.', '__class__', 'seen', '=', '{', '}', 'for', 'subT', 'in', 'inspect', '.', 'getmro', '(', 'T', ')', ':', 'for', 'name', ',', 'value', 'in', 'subT', '.', '__dict__', '.', 'items', '(', ')', ':', 'for', 'rightFunc', 'in', 'rightFuncs', ':', 'if', 'value', 'is', 'rightFunc', ':', 'if', 'name', 'in', 'seen', ':', 'raise', 'MethodNotExposed', '(', ')', 'return', 'value', '.', '__get__', '(', 'obj', ',', 'T', ')', 'seen', '[', 'name', ']', '=', 'True', 'raise', 'MethodNotExposed', '(', ')']
Retrieve 'key' from an instance of a class which previously exposed it. @param key: a hashable object, previously passed to L{Exposer.expose}. @return: the object which was exposed with the given name on obj's key. @raise MethodNotExposed: when the key in question was not exposed with this exposer.
['Retrieve', 'key', 'from', 'an', 'instance', 'of', 'a', 'class', 'which', 'previously', 'exposed', 'it', '.']
train
https://github.com/twisted/epsilon/blob/e85fa985a41983ef06e1d3bb26639181e1f78b24/epsilon/expose.py#L117-L141
2,512
mayfield/shellish
shellish/data.py
ttl_cache
def ttl_cache(maxage, maxsize=128): """ A time-to-live caching decorator that follows after the style of lru_cache. The `maxage` argument is time-to-live in seconds for each cache result. Any cache entries over the maxage are lazily replaced. """ def decorator(inner_func): wrapper = make_ttl_cache_wrapper(inner_func, maxage, maxsize) return functools.update_wrapper(wrapper, inner_func) return decorator
python
def ttl_cache(maxage, maxsize=128): """ A time-to-live caching decorator that follows after the style of lru_cache. The `maxage` argument is time-to-live in seconds for each cache result. Any cache entries over the maxage are lazily replaced. """ def decorator(inner_func): wrapper = make_ttl_cache_wrapper(inner_func, maxage, maxsize) return functools.update_wrapper(wrapper, inner_func) return decorator
['def', 'ttl_cache', '(', 'maxage', ',', 'maxsize', '=', '128', ')', ':', 'def', 'decorator', '(', 'inner_func', ')', ':', 'wrapper', '=', 'make_ttl_cache_wrapper', '(', 'inner_func', ',', 'maxage', ',', 'maxsize', ')', 'return', 'functools', '.', 'update_wrapper', '(', 'wrapper', ',', 'inner_func', ')', 'return', 'decorator']
A time-to-live caching decorator that follows after the style of lru_cache. The `maxage` argument is time-to-live in seconds for each cache result. Any cache entries over the maxage are lazily replaced.
['A', 'time', '-', 'to', '-', 'live', 'caching', 'decorator', 'that', 'follows', 'after', 'the', 'style', 'of', 'lru_cache', '.', 'The', 'maxage', 'argument', 'is', 'time', '-', 'to', '-', 'live', 'in', 'seconds', 'for', 'each', 'cache', 'result', '.', 'Any', 'cache', 'entries', 'over', 'the', 'maxage', 'are', 'lazily', 'replaced', '.']
train
https://github.com/mayfield/shellish/blob/df0f0e4612d138c34d8cb99b66ab5b8e47f1414a/shellish/data.py#L207-L215
2,513
christophertbrown/bioscripts
ctbBio/compare_aligned.py
to_dictionary
def to_dictionary(pw, print_list): """ - convert list of comparisons to dictionary - print list of pidents (if requested) to stderr """ pairs = {} for p in pw: a, b, pident = p if a not in pairs: pairs[a] = {a: '-'} if b not in pairs: pairs[b] = {b: '-'} pairs[a][b] = pident pairs[b][a] = pident if print_list is True: A, B = a.split('>')[1], b.split('>')[1] print('\t'.join([str(i) for i in [A, B, pident]]), file=sys.stderr) print('\t'.join([str(i) for i in [B, A, pident]]), file=sys.stderr) return pairs
python
def to_dictionary(pw, print_list): """ - convert list of comparisons to dictionary - print list of pidents (if requested) to stderr """ pairs = {} for p in pw: a, b, pident = p if a not in pairs: pairs[a] = {a: '-'} if b not in pairs: pairs[b] = {b: '-'} pairs[a][b] = pident pairs[b][a] = pident if print_list is True: A, B = a.split('>')[1], b.split('>')[1] print('\t'.join([str(i) for i in [A, B, pident]]), file=sys.stderr) print('\t'.join([str(i) for i in [B, A, pident]]), file=sys.stderr) return pairs
['def', 'to_dictionary', '(', 'pw', ',', 'print_list', ')', ':', 'pairs', '=', '{', '}', 'for', 'p', 'in', 'pw', ':', 'a', ',', 'b', ',', 'pident', '=', 'p', 'if', 'a', 'not', 'in', 'pairs', ':', 'pairs', '[', 'a', ']', '=', '{', 'a', ':', "'-'", '}', 'if', 'b', 'not', 'in', 'pairs', ':', 'pairs', '[', 'b', ']', '=', '{', 'b', ':', "'-'", '}', 'pairs', '[', 'a', ']', '[', 'b', ']', '=', 'pident', 'pairs', '[', 'b', ']', '[', 'a', ']', '=', 'pident', 'if', 'print_list', 'is', 'True', ':', 'A', ',', 'B', '=', 'a', '.', 'split', '(', "'>'", ')', '[', '1', ']', ',', 'b', '.', 'split', '(', "'>'", ')', '[', '1', ']', 'print', '(', "'\\t'", '.', 'join', '(', '[', 'str', '(', 'i', ')', 'for', 'i', 'in', '[', 'A', ',', 'B', ',', 'pident', ']', ']', ')', ',', 'file', '=', 'sys', '.', 'stderr', ')', 'print', '(', "'\\t'", '.', 'join', '(', '[', 'str', '(', 'i', ')', 'for', 'i', 'in', '[', 'B', ',', 'A', ',', 'pident', ']', ']', ')', ',', 'file', '=', 'sys', '.', 'stderr', ')', 'return', 'pairs']
- convert list of comparisons to dictionary - print list of pidents (if requested) to stderr
['-', 'convert', 'list', 'of', 'comparisons', 'to', 'dictionary', '-', 'print', 'list', 'of', 'pidents', '(', 'if', 'requested', ')', 'to', 'stderr']
train
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/compare_aligned.py#L112-L130
2,514
Cito/DBUtils
DBUtils/SteadyDB.py
SteadyDBCursor.close
def close(self): """Close the tough cursor. It will not complain if you close it more than once. """ if not self._closed: try: self._cursor.close() except Exception: pass self._closed = True
python
def close(self): """Close the tough cursor. It will not complain if you close it more than once. """ if not self._closed: try: self._cursor.close() except Exception: pass self._closed = True
['def', 'close', '(', 'self', ')', ':', 'if', 'not', 'self', '.', '_closed', ':', 'try', ':', 'self', '.', '_cursor', '.', 'close', '(', ')', 'except', 'Exception', ':', 'pass', 'self', '.', '_closed', '=', 'True']
Close the tough cursor. It will not complain if you close it more than once.
['Close', 'the', 'tough', 'cursor', '.']
train
https://github.com/Cito/DBUtils/blob/90e8825e038f08c82044b8e50831480175fa026a/DBUtils/SteadyDB.py#L576-L587
2,515
manns/pyspread
pyspread/src/lib/vlc.py
libvlc_media_library_media_list
def libvlc_media_library_media_list(p_mlib): '''Get media library subitems. @param p_mlib: media library object. @return: media list subitems. ''' f = _Cfunctions.get('libvlc_media_library_media_list', None) or \ _Cfunction('libvlc_media_library_media_list', ((1,),), class_result(MediaList), ctypes.c_void_p, MediaLibrary) return f(p_mlib)
python
def libvlc_media_library_media_list(p_mlib): '''Get media library subitems. @param p_mlib: media library object. @return: media list subitems. ''' f = _Cfunctions.get('libvlc_media_library_media_list', None) or \ _Cfunction('libvlc_media_library_media_list', ((1,),), class_result(MediaList), ctypes.c_void_p, MediaLibrary) return f(p_mlib)
['def', 'libvlc_media_library_media_list', '(', 'p_mlib', ')', ':', 'f', '=', '_Cfunctions', '.', 'get', '(', "'libvlc_media_library_media_list'", ',', 'None', ')', 'or', '_Cfunction', '(', "'libvlc_media_library_media_list'", ',', '(', '(', '1', ',', ')', ',', ')', ',', 'class_result', '(', 'MediaList', ')', ',', 'ctypes', '.', 'c_void_p', ',', 'MediaLibrary', ')', 'return', 'f', '(', 'p_mlib', ')']
Get media library subitems. @param p_mlib: media library object. @return: media list subitems.
['Get', 'media', 'library', 'subitems', '.']
train
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/lib/vlc.py#L4592-L4600
2,516
aiscenblue/flask-blueprint
flask_blueprint/package_extractor.py
PackageExtractor.__extract_modules
def __extract_modules(self, loader, name, is_pkg): """ if module found load module and save all attributes in the module found """ mod = loader.find_module(name).load_module(name) """ find the attribute method on each module """ if hasattr(mod, '__method__'): """ register to the blueprint if method attribute found """ module_router = ModuleRouter(mod, ignore_names=self.__serialize_module_paths() ).register_route(app=self.application, name=name) self.__routers.extend(module_router.routers) self.__modules.append(mod) else: """ prompt not found notification """ # print('{} has no module attribute method'.format(mod)) pass
python
def __extract_modules(self, loader, name, is_pkg): """ if module found load module and save all attributes in the module found """ mod = loader.find_module(name).load_module(name) """ find the attribute method on each module """ if hasattr(mod, '__method__'): """ register to the blueprint if method attribute found """ module_router = ModuleRouter(mod, ignore_names=self.__serialize_module_paths() ).register_route(app=self.application, name=name) self.__routers.extend(module_router.routers) self.__modules.append(mod) else: """ prompt not found notification """ # print('{} has no module attribute method'.format(mod)) pass
['def', '__extract_modules', '(', 'self', ',', 'loader', ',', 'name', ',', 'is_pkg', ')', ':', 'mod', '=', 'loader', '.', 'find_module', '(', 'name', ')', '.', 'load_module', '(', 'name', ')', '""" find the attribute method on each module """', 'if', 'hasattr', '(', 'mod', ',', "'__method__'", ')', ':', '""" register to the blueprint if method attribute found """', 'module_router', '=', 'ModuleRouter', '(', 'mod', ',', 'ignore_names', '=', 'self', '.', '__serialize_module_paths', '(', ')', ')', '.', 'register_route', '(', 'app', '=', 'self', '.', 'application', ',', 'name', '=', 'name', ')', 'self', '.', '__routers', '.', 'extend', '(', 'module_router', '.', 'routers', ')', 'self', '.', '__modules', '.', 'append', '(', 'mod', ')', 'else', ':', '""" prompt not found notification """', "# print('{} has no module attribute method'.format(mod))", 'pass']
if module found load module and save all attributes in the module found
['if', 'module', 'found', 'load', 'module', 'and', 'save', 'all', 'attributes', 'in', 'the', 'module', 'found']
train
https://github.com/aiscenblue/flask-blueprint/blob/c558d9d5d9630bab53c297ce2c33f4ceb3874724/flask_blueprint/package_extractor.py#L59-L78
2,517
kpdyer/regex2dfa
third_party/re2/lib/codereview/codereview.py
mail
def mail(ui, repo, *pats, **opts): """mail a change for review Uploads a patch to the code review server and then sends mail to the reviewer and CC list asking for a review. """ if codereview_disabled: raise hg_util.Abort(codereview_disabled) cl, err = CommandLineCL(ui, repo, pats, opts, op="mail", defaultcc=defaultcc) if err != "": raise hg_util.Abort(err) cl.Upload(ui, repo, gofmt_just_warn=True) if not cl.reviewer: # If no reviewer is listed, assign the review to defaultcc. # This makes sure that it appears in the # codereview.appspot.com/user/defaultcc # page, so that it doesn't get dropped on the floor. if not defaultcc: raise hg_util.Abort("no reviewers listed in CL") cl.cc = Sub(cl.cc, defaultcc) cl.reviewer = defaultcc cl.Flush(ui, repo) if cl.files == []: raise hg_util.Abort("no changed files, not sending mail") cl.Mail(ui, repo)
python
def mail(ui, repo, *pats, **opts): """mail a change for review Uploads a patch to the code review server and then sends mail to the reviewer and CC list asking for a review. """ if codereview_disabled: raise hg_util.Abort(codereview_disabled) cl, err = CommandLineCL(ui, repo, pats, opts, op="mail", defaultcc=defaultcc) if err != "": raise hg_util.Abort(err) cl.Upload(ui, repo, gofmt_just_warn=True) if not cl.reviewer: # If no reviewer is listed, assign the review to defaultcc. # This makes sure that it appears in the # codereview.appspot.com/user/defaultcc # page, so that it doesn't get dropped on the floor. if not defaultcc: raise hg_util.Abort("no reviewers listed in CL") cl.cc = Sub(cl.cc, defaultcc) cl.reviewer = defaultcc cl.Flush(ui, repo) if cl.files == []: raise hg_util.Abort("no changed files, not sending mail") cl.Mail(ui, repo)
['def', 'mail', '(', 'ui', ',', 'repo', ',', '*', 'pats', ',', '*', '*', 'opts', ')', ':', 'if', 'codereview_disabled', ':', 'raise', 'hg_util', '.', 'Abort', '(', 'codereview_disabled', ')', 'cl', ',', 'err', '=', 'CommandLineCL', '(', 'ui', ',', 'repo', ',', 'pats', ',', 'opts', ',', 'op', '=', '"mail"', ',', 'defaultcc', '=', 'defaultcc', ')', 'if', 'err', '!=', '""', ':', 'raise', 'hg_util', '.', 'Abort', '(', 'err', ')', 'cl', '.', 'Upload', '(', 'ui', ',', 'repo', ',', 'gofmt_just_warn', '=', 'True', ')', 'if', 'not', 'cl', '.', 'reviewer', ':', '# If no reviewer is listed, assign the review to defaultcc.', '# This makes sure that it appears in the ', '# codereview.appspot.com/user/defaultcc', "# page, so that it doesn't get dropped on the floor.", 'if', 'not', 'defaultcc', ':', 'raise', 'hg_util', '.', 'Abort', '(', '"no reviewers listed in CL"', ')', 'cl', '.', 'cc', '=', 'Sub', '(', 'cl', '.', 'cc', ',', 'defaultcc', ')', 'cl', '.', 'reviewer', '=', 'defaultcc', 'cl', '.', 'Flush', '(', 'ui', ',', 'repo', ')', 'if', 'cl', '.', 'files', '==', '[', ']', ':', 'raise', 'hg_util', '.', 'Abort', '(', '"no changed files, not sending mail"', ')', 'cl', '.', 'Mail', '(', 'ui', ',', 'repo', ')']
mail a change for review Uploads a patch to the code review server and then sends mail to the reviewer and CC list asking for a review.
['mail', 'a', 'change', 'for', 'review']
train
https://github.com/kpdyer/regex2dfa/blob/109f877e60ef0dfcb430f11516d215930b7b9936/third_party/re2/lib/codereview/codereview.py#L1811-L1838
2,518
kevinconway/daemons
daemons/daemonize/simple.py
SimpleDaemonizeManager.daemonize
def daemonize(self): """Double fork and set the pid.""" self._double_fork() # Write pidfile. self.pid = os.getpid() LOG.info( "Succesfully daemonized process {0}.".format(self.pid) )
python
def daemonize(self): """Double fork and set the pid.""" self._double_fork() # Write pidfile. self.pid = os.getpid() LOG.info( "Succesfully daemonized process {0}.".format(self.pid) )
['def', 'daemonize', '(', 'self', ')', ':', 'self', '.', '_double_fork', '(', ')', '# Write pidfile.', 'self', '.', 'pid', '=', 'os', '.', 'getpid', '(', ')', 'LOG', '.', 'info', '(', '"Succesfully daemonized process {0}."', '.', 'format', '(', 'self', '.', 'pid', ')', ')']
Double fork and set the pid.
['Double', 'fork', 'and', 'set', 'the', 'pid', '.']
train
https://github.com/kevinconway/daemons/blob/b0fe0db5821171a35aa9078596d19d630c570b38/daemons/daemonize/simple.py#L22-L31
2,519
pydata/xarray
xarray/core/indexing.py
get_indexer_nd
def get_indexer_nd(index, labels, method=None, tolerance=None): """ Call pd.Index.get_indexer(labels). """ kwargs = _index_method_kwargs(method, tolerance) flat_labels = np.ravel(labels) flat_indexer = index.get_indexer(flat_labels, **kwargs) indexer = flat_indexer.reshape(labels.shape) return indexer
python
def get_indexer_nd(index, labels, method=None, tolerance=None): """ Call pd.Index.get_indexer(labels). """ kwargs = _index_method_kwargs(method, tolerance) flat_labels = np.ravel(labels) flat_indexer = index.get_indexer(flat_labels, **kwargs) indexer = flat_indexer.reshape(labels.shape) return indexer
['def', 'get_indexer_nd', '(', 'index', ',', 'labels', ',', 'method', '=', 'None', ',', 'tolerance', '=', 'None', ')', ':', 'kwargs', '=', '_index_method_kwargs', '(', 'method', ',', 'tolerance', ')', 'flat_labels', '=', 'np', '.', 'ravel', '(', 'labels', ')', 'flat_indexer', '=', 'index', '.', 'get_indexer', '(', 'flat_labels', ',', '*', '*', 'kwargs', ')', 'indexer', '=', 'flat_indexer', '.', 'reshape', '(', 'labels', '.', 'shape', ')', 'return', 'indexer']
Call pd.Index.get_indexer(labels).
['Call', 'pd', '.', 'Index', '.', 'get_indexer', '(', 'labels', ')', '.']
train
https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/core/indexing.py#L111-L118
2,520
census-instrumentation/opencensus-python
contrib/opencensus-ext-stackdriver/opencensus/ext/stackdriver/stats_exporter/__init__.py
set_attribute_label
def set_attribute_label(series, resource_labels, attribute_key, canonical_key=None, label_value_prefix=''): """Set a label to timeseries that can be used for monitoring :param series: TimeSeries object based on view data :param resource_labels: collection of labels :param attribute_key: actual label key :param canonical_key: exporter specific label key, Optional :param label_value_prefix: exporter specific label value prefix, Optional """ if attribute_key in resource_labels: if canonical_key is None: canonical_key = attribute_key series.resource.labels[canonical_key] = \ label_value_prefix + resource_labels[attribute_key]
python
def set_attribute_label(series, resource_labels, attribute_key, canonical_key=None, label_value_prefix=''): """Set a label to timeseries that can be used for monitoring :param series: TimeSeries object based on view data :param resource_labels: collection of labels :param attribute_key: actual label key :param canonical_key: exporter specific label key, Optional :param label_value_prefix: exporter specific label value prefix, Optional """ if attribute_key in resource_labels: if canonical_key is None: canonical_key = attribute_key series.resource.labels[canonical_key] = \ label_value_prefix + resource_labels[attribute_key]
['def', 'set_attribute_label', '(', 'series', ',', 'resource_labels', ',', 'attribute_key', ',', 'canonical_key', '=', 'None', ',', 'label_value_prefix', '=', "''", ')', ':', 'if', 'attribute_key', 'in', 'resource_labels', ':', 'if', 'canonical_key', 'is', 'None', ':', 'canonical_key', '=', 'attribute_key', 'series', '.', 'resource', '.', 'labels', '[', 'canonical_key', ']', '=', 'label_value_prefix', '+', 'resource_labels', '[', 'attribute_key', ']']
Set a label to timeseries that can be used for monitoring :param series: TimeSeries object based on view data :param resource_labels: collection of labels :param attribute_key: actual label key :param canonical_key: exporter specific label key, Optional :param label_value_prefix: exporter specific label value prefix, Optional
['Set', 'a', 'label', 'to', 'timeseries', 'that', 'can', 'be', 'used', 'for', 'monitoring', ':', 'param', 'series', ':', 'TimeSeries', 'object', 'based', 'on', 'view', 'data', ':', 'param', 'resource_labels', ':', 'collection', 'of', 'labels', ':', 'param', 'attribute_key', ':', 'actual', 'label', 'key', ':', 'param', 'canonical_key', ':', 'exporter', 'specific', 'label', 'key', 'Optional', ':', 'param', 'label_value_prefix', ':', 'exporter', 'specific', 'label', 'value', 'prefix', 'Optional']
train
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/contrib/opencensus-ext-stackdriver/opencensus/ext/stackdriver/stats_exporter/__init__.py#L344-L358
2,521
splunk/splunk-sdk-python
examples/analytics/bottle.py
BaseTemplate.global_config
def global_config(cls, key, *args): ''' This reads or sets the global settings stored in class.settings. ''' if args: cls.settings[key] = args[0] else: return cls.settings[key]
python
def global_config(cls, key, *args): ''' This reads or sets the global settings stored in class.settings. ''' if args: cls.settings[key] = args[0] else: return cls.settings[key]
['def', 'global_config', '(', 'cls', ',', 'key', ',', '*', 'args', ')', ':', 'if', 'args', ':', 'cls', '.', 'settings', '[', 'key', ']', '=', 'args', '[', '0', ']', 'else', ':', 'return', 'cls', '.', 'settings', '[', 'key', ']']
This reads or sets the global settings stored in class.settings.
['This', 'reads', 'or', 'sets', 'the', 'global', 'settings', 'stored', 'in', 'class', '.', 'settings', '.']
train
https://github.com/splunk/splunk-sdk-python/blob/a245a4eeb93b3621730418008e31715912bcdcd8/examples/analytics/bottle.py#L2137-L2142
2,522
phareous/insteonlocal
insteonlocal/Hub.py
Hub.id_request
def id_request(self, device_id): """Get the device for the ID. ID request can return device type (cat/subcat), firmware ver, etc. Cat is status['is_high'], sub cat is status['id_mid']""" self.logger.info("\nid_request for device %s", device_id) device_id = device_id.upper() self.direct_command(device_id, '10', '00') sleep(2) status = self.get_buffer_status(device_id) if not status: sleep(1) status = self.get_buffer_status(device_id) return status
python
def id_request(self, device_id): """Get the device for the ID. ID request can return device type (cat/subcat), firmware ver, etc. Cat is status['is_high'], sub cat is status['id_mid']""" self.logger.info("\nid_request for device %s", device_id) device_id = device_id.upper() self.direct_command(device_id, '10', '00') sleep(2) status = self.get_buffer_status(device_id) if not status: sleep(1) status = self.get_buffer_status(device_id) return status
['def', 'id_request', '(', 'self', ',', 'device_id', ')', ':', 'self', '.', 'logger', '.', 'info', '(', '"\\nid_request for device %s"', ',', 'device_id', ')', 'device_id', '=', 'device_id', '.', 'upper', '(', ')', 'self', '.', 'direct_command', '(', 'device_id', ',', "'10'", ',', "'00'", ')', 'sleep', '(', '2', ')', 'status', '=', 'self', '.', 'get_buffer_status', '(', 'device_id', ')', 'if', 'not', 'status', ':', 'sleep', '(', '1', ')', 'status', '=', 'self', '.', 'get_buffer_status', '(', 'device_id', ')', 'return', 'status']
Get the device for the ID. ID request can return device type (cat/subcat), firmware ver, etc. Cat is status['is_high'], sub cat is status['id_mid']
['Get', 'the', 'device', 'for', 'the', 'ID', '.', 'ID', 'request', 'can', 'return', 'device', 'type', '(', 'cat', '/', 'subcat', ')', 'firmware', 'ver', 'etc', '.', 'Cat', 'is', 'status', '[', 'is_high', ']', 'sub', 'cat', 'is', 'status', '[', 'id_mid', ']']
train
https://github.com/phareous/insteonlocal/blob/a4544a17d143fb285852cb873e862c270d55dd00/insteonlocal/Hub.py#L301-L316
2,523
foremast/foremast
src/foremast/s3/create_archaius.py
init_properties
def init_properties(env='dev', app='unnecessary', **_): """Make sure _application.properties_ file exists in S3. For Applications with Archaius support, there needs to be a file where the cloud environment variable points to. Args: env (str): Deployment environment/account, i.e. dev, stage, prod. app (str): GitLab Project name. Returns: True when application.properties was found. False when application.properties needed to be created. """ aws_env = boto3.session.Session(profile_name=env) s3client = aws_env.resource('s3') generated = get_details(app=app, env=env) archaius = generated.archaius() archaius_file = ('{path}/application.properties').format(path=archaius['path']) try: s3client.Object(archaius['bucket'], archaius_file).get() LOG.info('Found: %(bucket)s/%(file)s', {'bucket': archaius['bucket'], 'file': archaius_file}) return True except boto3.exceptions.botocore.client.ClientError: s3client.Object(archaius['bucket'], archaius_file).put() LOG.info('Created: %(bucket)s/%(file)s', {'bucket': archaius['bucket'], 'file': archaius_file}) return False
python
def init_properties(env='dev', app='unnecessary', **_): """Make sure _application.properties_ file exists in S3. For Applications with Archaius support, there needs to be a file where the cloud environment variable points to. Args: env (str): Deployment environment/account, i.e. dev, stage, prod. app (str): GitLab Project name. Returns: True when application.properties was found. False when application.properties needed to be created. """ aws_env = boto3.session.Session(profile_name=env) s3client = aws_env.resource('s3') generated = get_details(app=app, env=env) archaius = generated.archaius() archaius_file = ('{path}/application.properties').format(path=archaius['path']) try: s3client.Object(archaius['bucket'], archaius_file).get() LOG.info('Found: %(bucket)s/%(file)s', {'bucket': archaius['bucket'], 'file': archaius_file}) return True except boto3.exceptions.botocore.client.ClientError: s3client.Object(archaius['bucket'], archaius_file).put() LOG.info('Created: %(bucket)s/%(file)s', {'bucket': archaius['bucket'], 'file': archaius_file}) return False
['def', 'init_properties', '(', 'env', '=', "'dev'", ',', 'app', '=', "'unnecessary'", ',', '*', '*', '_', ')', ':', 'aws_env', '=', 'boto3', '.', 'session', '.', 'Session', '(', 'profile_name', '=', 'env', ')', 's3client', '=', 'aws_env', '.', 'resource', '(', "'s3'", ')', 'generated', '=', 'get_details', '(', 'app', '=', 'app', ',', 'env', '=', 'env', ')', 'archaius', '=', 'generated', '.', 'archaius', '(', ')', 'archaius_file', '=', '(', "'{path}/application.properties'", ')', '.', 'format', '(', 'path', '=', 'archaius', '[', "'path'", ']', ')', 'try', ':', 's3client', '.', 'Object', '(', 'archaius', '[', "'bucket'", ']', ',', 'archaius_file', ')', '.', 'get', '(', ')', 'LOG', '.', 'info', '(', "'Found: %(bucket)s/%(file)s'", ',', '{', "'bucket'", ':', 'archaius', '[', "'bucket'", ']', ',', "'file'", ':', 'archaius_file', '}', ')', 'return', 'True', 'except', 'boto3', '.', 'exceptions', '.', 'botocore', '.', 'client', '.', 'ClientError', ':', 's3client', '.', 'Object', '(', 'archaius', '[', "'bucket'", ']', ',', 'archaius_file', ')', '.', 'put', '(', ')', 'LOG', '.', 'info', '(', "'Created: %(bucket)s/%(file)s'", ',', '{', "'bucket'", ':', 'archaius', '[', "'bucket'", ']', ',', "'file'", ':', 'archaius_file', '}', ')', 'return', 'False']
Make sure _application.properties_ file exists in S3. For Applications with Archaius support, there needs to be a file where the cloud environment variable points to. Args: env (str): Deployment environment/account, i.e. dev, stage, prod. app (str): GitLab Project name. Returns: True when application.properties was found. False when application.properties needed to be created.
['Make', 'sure', '_application', '.', 'properties_', 'file', 'exists', 'in', 'S3', '.']
train
https://github.com/foremast/foremast/blob/fb70f29b8ce532f061685a17d120486e47b215ba/src/foremast/s3/create_archaius.py#L26-L55
2,524
materialsproject/pymatgen
pymatgen/command_line/critic2_caller.py
Critic2Output._add_edge
def _add_edge(self, idx, from_idx, from_lvec, to_idx, to_lvec): """ Add information about an edge linking two critical points. This actually describes two edges: from_idx ------ idx ------ to_idx However, in practice, from_idx and to_idx will typically be atom nuclei, with the center node (idx) referring to a bond critical point. Thus, it will be more convenient to model this as a single edge linking nuclei with the properties of the bond critical point stored as an edge attribute. :param idx: index of node :param from_idx: from index of node :param from_lvec: vector of lattice image the from node is in as tuple of ints :param to_idx: to index of node :param to_lvec: vector of lattice image the to node is in as tuple of ints :return: """ self.edges[idx] = {'from_idx': from_idx, 'from_lvec': from_lvec, 'to_idx': to_idx, 'to_lvec': to_lvec}
python
def _add_edge(self, idx, from_idx, from_lvec, to_idx, to_lvec): """ Add information about an edge linking two critical points. This actually describes two edges: from_idx ------ idx ------ to_idx However, in practice, from_idx and to_idx will typically be atom nuclei, with the center node (idx) referring to a bond critical point. Thus, it will be more convenient to model this as a single edge linking nuclei with the properties of the bond critical point stored as an edge attribute. :param idx: index of node :param from_idx: from index of node :param from_lvec: vector of lattice image the from node is in as tuple of ints :param to_idx: to index of node :param to_lvec: vector of lattice image the to node is in as tuple of ints :return: """ self.edges[idx] = {'from_idx': from_idx, 'from_lvec': from_lvec, 'to_idx': to_idx, 'to_lvec': to_lvec}
['def', '_add_edge', '(', 'self', ',', 'idx', ',', 'from_idx', ',', 'from_lvec', ',', 'to_idx', ',', 'to_lvec', ')', ':', 'self', '.', 'edges', '[', 'idx', ']', '=', '{', "'from_idx'", ':', 'from_idx', ',', "'from_lvec'", ':', 'from_lvec', ',', "'to_idx'", ':', 'to_idx', ',', "'to_lvec'", ':', 'to_lvec', '}']
Add information about an edge linking two critical points. This actually describes two edges: from_idx ------ idx ------ to_idx However, in practice, from_idx and to_idx will typically be atom nuclei, with the center node (idx) referring to a bond critical point. Thus, it will be more convenient to model this as a single edge linking nuclei with the properties of the bond critical point stored as an edge attribute. :param idx: index of node :param from_idx: from index of node :param from_lvec: vector of lattice image the from node is in as tuple of ints :param to_idx: to index of node :param to_lvec: vector of lattice image the to node is in as tuple of ints :return:
['Add', 'information', 'about', 'an', 'edge', 'linking', 'two', 'critical', 'points', '.']
train
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/command_line/critic2_caller.py#L541-L565
2,525
lobocv/pyperform
pyperform/customlogger.py
new_log_level
def new_log_level(level, name, logger_name=None): """ Quick way to create a custom log level that behaves like the default levels in the logging module. :param level: level number :param name: level name :param logger_name: optional logger name """ @CustomLogLevel(level, name, logger_name) def _default_template(logger, msg, *args, **kwargs): return msg, args, kwargs
python
def new_log_level(level, name, logger_name=None): """ Quick way to create a custom log level that behaves like the default levels in the logging module. :param level: level number :param name: level name :param logger_name: optional logger name """ @CustomLogLevel(level, name, logger_name) def _default_template(logger, msg, *args, **kwargs): return msg, args, kwargs
['def', 'new_log_level', '(', 'level', ',', 'name', ',', 'logger_name', '=', 'None', ')', ':', '@', 'CustomLogLevel', '(', 'level', ',', 'name', ',', 'logger_name', ')', 'def', '_default_template', '(', 'logger', ',', 'msg', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'return', 'msg', ',', 'args', ',', 'kwargs']
Quick way to create a custom log level that behaves like the default levels in the logging module. :param level: level number :param name: level name :param logger_name: optional logger name
['Quick', 'way', 'to', 'create', 'a', 'custom', 'log', 'level', 'that', 'behaves', 'like', 'the', 'default', 'levels', 'in', 'the', 'logging', 'module', '.', ':', 'param', 'level', ':', 'level', 'number', ':', 'param', 'name', ':', 'level', 'name', ':', 'param', 'logger_name', ':', 'optional', 'logger', 'name']
train
https://github.com/lobocv/pyperform/blob/97d87e8b9ddb35bd8f2a6782965fd7735ab0349f/pyperform/customlogger.py#L40-L49
2,526
RedHatInsights/insights-core
insights/client/__init__.py
InsightsClient.delete_cached_branch_info
def delete_cached_branch_info(self): ''' Deletes cached branch_info file ''' if os.path.isfile(constants.cached_branch_info): logger.debug('Deleting cached branch_info file...') os.remove(constants.cached_branch_info) else: logger.debug('Cached branch_info file does not exist.')
python
def delete_cached_branch_info(self): ''' Deletes cached branch_info file ''' if os.path.isfile(constants.cached_branch_info): logger.debug('Deleting cached branch_info file...') os.remove(constants.cached_branch_info) else: logger.debug('Cached branch_info file does not exist.')
['def', 'delete_cached_branch_info', '(', 'self', ')', ':', 'if', 'os', '.', 'path', '.', 'isfile', '(', 'constants', '.', 'cached_branch_info', ')', ':', 'logger', '.', 'debug', '(', "'Deleting cached branch_info file...'", ')', 'os', '.', 'remove', '(', 'constants', '.', 'cached_branch_info', ')', 'else', ':', 'logger', '.', 'debug', '(', "'Cached branch_info file does not exist.'", ')']
Deletes cached branch_info file
['Deletes', 'cached', 'branch_info', 'file']
train
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/__init__.py#L465-L473
2,527
AnimusPEXUS/wayround_i2p_carafe
wayround_i2p/carafe/carafe.py
Router.add2
def add2(self, target, path_settings, method): """ add() with reordered paameters """ return self.add(method, path_settings, target)
python
def add2(self, target, path_settings, method): """ add() with reordered paameters """ return self.add(method, path_settings, target)
['def', 'add2', '(', 'self', ',', 'target', ',', 'path_settings', ',', 'method', ')', ':', 'return', 'self', '.', 'add', '(', 'method', ',', 'path_settings', ',', 'target', ')']
add() with reordered paameters
['add', '()', 'with', 'reordered', 'paameters']
train
https://github.com/AnimusPEXUS/wayround_i2p_carafe/blob/c92a72e1f7b559ac0bd6dc0ce2716ce1e61a9c5e/wayround_i2p/carafe/carafe.py#L198-L202
2,528
tonybaloney/wily
wily/state.py
Index.save
def save(self): """Save the index data back to the wily cache.""" data = [i.asdict() for i in self._revisions.values()] logger.debug("Saving data") cache.store_archiver_index(self.config, self.archiver, data)
python
def save(self): """Save the index data back to the wily cache.""" data = [i.asdict() for i in self._revisions.values()] logger.debug("Saving data") cache.store_archiver_index(self.config, self.archiver, data)
['def', 'save', '(', 'self', ')', ':', 'data', '=', '[', 'i', '.', 'asdict', '(', ')', 'for', 'i', 'in', 'self', '.', '_revisions', '.', 'values', '(', ')', ']', 'logger', '.', 'debug', '(', '"Saving data"', ')', 'cache', '.', 'store_archiver_index', '(', 'self', '.', 'config', ',', 'self', '.', 'archiver', ',', 'data', ')']
Save the index data back to the wily cache.
['Save', 'the', 'index', 'data', 'back', 'to', 'the', 'wily', 'cache', '.']
train
https://github.com/tonybaloney/wily/blob/bae259354a91b57d56603f0ca7403186f086a84c/wily/state.py#L168-L172
2,529
deontologician/restnavigator
restnavigator/halnav.py
APICore.get_cached
def get_cached(self, link, default=None): '''Retrieves a cached navigator from the id_map. Either a Link object or a bare uri string may be passed in.''' if hasattr(link, 'uri'): return self.id_map.get(link.uri, default) else: return self.id_map.get(link, default)
python
def get_cached(self, link, default=None): '''Retrieves a cached navigator from the id_map. Either a Link object or a bare uri string may be passed in.''' if hasattr(link, 'uri'): return self.id_map.get(link.uri, default) else: return self.id_map.get(link, default)
['def', 'get_cached', '(', 'self', ',', 'link', ',', 'default', '=', 'None', ')', ':', 'if', 'hasattr', '(', 'link', ',', "'uri'", ')', ':', 'return', 'self', '.', 'id_map', '.', 'get', '(', 'link', '.', 'uri', ',', 'default', ')', 'else', ':', 'return', 'self', '.', 'id_map', '.', 'get', '(', 'link', ',', 'default', ')']
Retrieves a cached navigator from the id_map. Either a Link object or a bare uri string may be passed in.
['Retrieves', 'a', 'cached', 'navigator', 'from', 'the', 'id_map', '.']
train
https://github.com/deontologician/restnavigator/blob/453b9de4e70e602009d3e3ffafcf77d23c8b07c5/restnavigator/halnav.py#L73-L80
2,530
coins13/twins
twins/misc.py
get_nendo
def get_nendo (): """今は何年度?""" y, m = map(int, time.strftime("%Y %m").split()) return y if m >= 4 else y - 1
python
def get_nendo (): """今は何年度?""" y, m = map(int, time.strftime("%Y %m").split()) return y if m >= 4 else y - 1
['def', 'get_nendo', '(', ')', ':', 'y', ',', 'm', '=', 'map', '(', 'int', ',', 'time', '.', 'strftime', '(', '"%Y %m"', ')', '.', 'split', '(', ')', ')', 'return', 'y', 'if', 'm', '>=', '4', 'else', 'y', '-', '1']
今は何年度?
['今は何年度?']
train
https://github.com/coins13/twins/blob/d66cc850007a25f01812a9d8c7e3efe64a631ca2/twins/misc.py#L5-L8
2,531
mozilla/build-mar
src/mardor/cli.py
do_add_signature
def do_add_signature(input_file, output_file, signature_file): """Add a signature to the MAR file.""" signature = open(signature_file, 'rb').read() if len(signature) == 256: hash_algo = 'sha1' elif len(signature) == 512: hash_algo = 'sha384' else: raise ValueError() with open(output_file, 'w+b') as dst: with open(input_file, 'rb') as src: add_signature_block(src, dst, hash_algo, signature)
python
def do_add_signature(input_file, output_file, signature_file): """Add a signature to the MAR file.""" signature = open(signature_file, 'rb').read() if len(signature) == 256: hash_algo = 'sha1' elif len(signature) == 512: hash_algo = 'sha384' else: raise ValueError() with open(output_file, 'w+b') as dst: with open(input_file, 'rb') as src: add_signature_block(src, dst, hash_algo, signature)
['def', 'do_add_signature', '(', 'input_file', ',', 'output_file', ',', 'signature_file', ')', ':', 'signature', '=', 'open', '(', 'signature_file', ',', "'rb'", ')', '.', 'read', '(', ')', 'if', 'len', '(', 'signature', ')', '==', '256', ':', 'hash_algo', '=', "'sha1'", 'elif', 'len', '(', 'signature', ')', '==', '512', ':', 'hash_algo', '=', "'sha384'", 'else', ':', 'raise', 'ValueError', '(', ')', 'with', 'open', '(', 'output_file', ',', "'w+b'", ')', 'as', 'dst', ':', 'with', 'open', '(', 'input_file', ',', "'rb'", ')', 'as', 'src', ':', 'add_signature_block', '(', 'src', ',', 'dst', ',', 'hash_algo', ',', 'signature', ')']
Add a signature to the MAR file.
['Add', 'a', 'signature', 'to', 'the', 'MAR', 'file', '.']
train
https://github.com/mozilla/build-mar/blob/d8c3b3469e55654d31f430cb343fd89392196c4e/src/mardor/cli.py#L217-L229
2,532
ska-sa/katcp-python
katcp/server.py
DeviceLogger.warn
def warn(self, msg, *args, **kwargs): """Log an warning message.""" self.log(self.WARN, msg, *args, **kwargs)
python
def warn(self, msg, *args, **kwargs): """Log an warning message.""" self.log(self.WARN, msg, *args, **kwargs)
['def', 'warn', '(', 'self', ',', 'msg', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'self', '.', 'log', '(', 'self', '.', 'WARN', ',', 'msg', ',', '*', 'args', ',', '*', '*', 'kwargs', ')']
Log an warning message.
['Log', 'an', 'warning', 'message', '.']
train
https://github.com/ska-sa/katcp-python/blob/9127c826a1d030c53b84d0e95743e20e5c5ea153/katcp/server.py#L2550-L2552
2,533
glue-viz/glue-vispy-viewers
glue_vispy_viewers/extern/vispy/visuals/isoline.py
IsolineVisual._compute_iso_color
def _compute_iso_color(self): """ compute LineVisual color from level index and corresponding level color """ level_color = [] colors = self._lc for i, index in enumerate(self._li): level_color.append(np.zeros((index, 4)) + colors[i]) self._cl = np.vstack(level_color)
python
def _compute_iso_color(self): """ compute LineVisual color from level index and corresponding level color """ level_color = [] colors = self._lc for i, index in enumerate(self._li): level_color.append(np.zeros((index, 4)) + colors[i]) self._cl = np.vstack(level_color)
['def', '_compute_iso_color', '(', 'self', ')', ':', 'level_color', '=', '[', ']', 'colors', '=', 'self', '.', '_lc', 'for', 'i', ',', 'index', 'in', 'enumerate', '(', 'self', '.', '_li', ')', ':', 'level_color', '.', 'append', '(', 'np', '.', 'zeros', '(', '(', 'index', ',', '4', ')', ')', '+', 'colors', '[', 'i', ']', ')', 'self', '.', '_cl', '=', 'np', '.', 'vstack', '(', 'level_color', ')']
compute LineVisual color from level index and corresponding level color
['compute', 'LineVisual', 'color', 'from', 'level', 'index', 'and', 'corresponding', 'level', 'color']
train
https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/visuals/isoline.py#L214-L222
2,534
weblyzard/inscriptis
src/inscriptis/css.py
CssParse.get_style_attribute
def get_style_attribute(style_attribute, html_element): ''' ::param: style_directive \ The attribute value of the given style sheet. Example: display: none ::param: html_element: \ The HtmlElement to which the given style is applied ::returns: A HtmlElement that merges the given element with the style attributes specified. ''' custome_html_element = html_element.clone() for style_directive in style_attribute.lower().split(';'): if ':' not in style_directive: continue key, value = (s.strip() for s in style_directive.split(':', 1)) try: apply_style = getattr(CssParse, "_attr_" + key.replace('-webkit-', '') .replace("-", "_")) apply_style(value, custome_html_element) except AttributeError: pass return custome_html_element
python
def get_style_attribute(style_attribute, html_element): ''' ::param: style_directive \ The attribute value of the given style sheet. Example: display: none ::param: html_element: \ The HtmlElement to which the given style is applied ::returns: A HtmlElement that merges the given element with the style attributes specified. ''' custome_html_element = html_element.clone() for style_directive in style_attribute.lower().split(';'): if ':' not in style_directive: continue key, value = (s.strip() for s in style_directive.split(':', 1)) try: apply_style = getattr(CssParse, "_attr_" + key.replace('-webkit-', '') .replace("-", "_")) apply_style(value, custome_html_element) except AttributeError: pass return custome_html_element
['def', 'get_style_attribute', '(', 'style_attribute', ',', 'html_element', ')', ':', 'custome_html_element', '=', 'html_element', '.', 'clone', '(', ')', 'for', 'style_directive', 'in', 'style_attribute', '.', 'lower', '(', ')', '.', 'split', '(', "';'", ')', ':', 'if', "':'", 'not', 'in', 'style_directive', ':', 'continue', 'key', ',', 'value', '=', '(', 's', '.', 'strip', '(', ')', 'for', 's', 'in', 'style_directive', '.', 'split', '(', "':'", ',', '1', ')', ')', 'try', ':', 'apply_style', '=', 'getattr', '(', 'CssParse', ',', '"_attr_"', '+', 'key', '.', 'replace', '(', "'-webkit-'", ',', "''", ')', '.', 'replace', '(', '"-"', ',', '"_"', ')', ')', 'apply_style', '(', 'value', ',', 'custome_html_element', ')', 'except', 'AttributeError', ':', 'pass', 'return', 'custome_html_element']
::param: style_directive \ The attribute value of the given style sheet. Example: display: none ::param: html_element: \ The HtmlElement to which the given style is applied ::returns: A HtmlElement that merges the given element with the style attributes specified.
['::', 'param', ':', 'style_directive', '\\', 'The', 'attribute', 'value', 'of', 'the', 'given', 'style', 'sheet', '.', 'Example', ':', 'display', ':', 'none']
train
https://github.com/weblyzard/inscriptis/blob/0d04f81e69d643bb5f470f33b4ca67b62fc1037c/src/inscriptis/css.py#L62-L89
2,535
dswah/pyGAM
pygam/terms.py
TermList.build_penalties
def build_penalties(self): """ builds the GAM block-diagonal penalty matrix in quadratic form out of penalty matrices specified for each feature. each feature penalty matrix is multiplied by a lambda for that feature. so for m features: P = block_diag[lam0 * P0, lam1 * P1, lam2 * P2, ... , lamm * Pm] Parameters ---------- None Returns ------- P : sparse CSC matrix containing the model penalties in quadratic form """ P = [] for term in self._terms: P.append(term.build_penalties()) return sp.sparse.block_diag(P)
python
def build_penalties(self): """ builds the GAM block-diagonal penalty matrix in quadratic form out of penalty matrices specified for each feature. each feature penalty matrix is multiplied by a lambda for that feature. so for m features: P = block_diag[lam0 * P0, lam1 * P1, lam2 * P2, ... , lamm * Pm] Parameters ---------- None Returns ------- P : sparse CSC matrix containing the model penalties in quadratic form """ P = [] for term in self._terms: P.append(term.build_penalties()) return sp.sparse.block_diag(P)
['def', 'build_penalties', '(', 'self', ')', ':', 'P', '=', '[', ']', 'for', 'term', 'in', 'self', '.', '_terms', ':', 'P', '.', 'append', '(', 'term', '.', 'build_penalties', '(', ')', ')', 'return', 'sp', '.', 'sparse', '.', 'block_diag', '(', 'P', ')']
builds the GAM block-diagonal penalty matrix in quadratic form out of penalty matrices specified for each feature. each feature penalty matrix is multiplied by a lambda for that feature. so for m features: P = block_diag[lam0 * P0, lam1 * P1, lam2 * P2, ... , lamm * Pm] Parameters ---------- None Returns ------- P : sparse CSC matrix containing the model penalties in quadratic form
['builds', 'the', 'GAM', 'block', '-', 'diagonal', 'penalty', 'matrix', 'in', 'quadratic', 'form', 'out', 'of', 'penalty', 'matrices', 'specified', 'for', 'each', 'feature', '.']
train
https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/terms.py#L1722-L1744
2,536
saltstack/salt
salt/modules/smbios.py
records
def records(rec_type=None, fields=None, clean=True): ''' Return DMI records from SMBIOS type Return only records of type(s) The SMBIOS specification defines the following DMI types: ==== ====================================== Type Information ==== ====================================== 0 BIOS 1 System 2 Baseboard 3 Chassis 4 Processor 5 Memory Controller 6 Memory Module 7 Cache 8 Port Connector 9 System Slots 10 On Board Devices 11 OEM Strings 12 System Configuration Options 13 BIOS Language 14 Group Associations 15 System Event Log 16 Physical Memory Array 17 Memory Device 18 32-bit Memory Error 19 Memory Array Mapped Address 20 Memory Device Mapped Address 21 Built-in Pointing Device 22 Portable Battery 23 System Reset 24 Hardware Security 25 System Power Controls 26 Voltage Probe 27 Cooling Device 28 Temperature Probe 29 Electrical Current Probe 30 Out-of-band Remote Access 31 Boot Integrity Services 32 System Boot 33 64-bit Memory Error 34 Management Device 35 Management Device Component 36 Management Device Threshold Data 37 Memory Channel 38 IPMI Device 39 Power Supply 40 Additional Information 41 Onboard Devices Extended Information 42 Management Controller Host Interface ==== ====================================== clean | Don't return well-known false information | (invalid UUID's, serial 000000000's, etcetera) | Defaults to ``True`` CLI Example: .. code-block:: bash salt '*' smbios.records clean=False salt '*' smbios.records 14 salt '*' smbios.records 4 core_count,thread_count,current_speed ''' if rec_type is None: smbios = _dmi_parse(_dmidecoder(), clean, fields) else: smbios = _dmi_parse(_dmidecoder('-t {0}'.format(rec_type)), clean, fields) return smbios
python
def records(rec_type=None, fields=None, clean=True): ''' Return DMI records from SMBIOS type Return only records of type(s) The SMBIOS specification defines the following DMI types: ==== ====================================== Type Information ==== ====================================== 0 BIOS 1 System 2 Baseboard 3 Chassis 4 Processor 5 Memory Controller 6 Memory Module 7 Cache 8 Port Connector 9 System Slots 10 On Board Devices 11 OEM Strings 12 System Configuration Options 13 BIOS Language 14 Group Associations 15 System Event Log 16 Physical Memory Array 17 Memory Device 18 32-bit Memory Error 19 Memory Array Mapped Address 20 Memory Device Mapped Address 21 Built-in Pointing Device 22 Portable Battery 23 System Reset 24 Hardware Security 25 System Power Controls 26 Voltage Probe 27 Cooling Device 28 Temperature Probe 29 Electrical Current Probe 30 Out-of-band Remote Access 31 Boot Integrity Services 32 System Boot 33 64-bit Memory Error 34 Management Device 35 Management Device Component 36 Management Device Threshold Data 37 Memory Channel 38 IPMI Device 39 Power Supply 40 Additional Information 41 Onboard Devices Extended Information 42 Management Controller Host Interface ==== ====================================== clean | Don't return well-known false information | (invalid UUID's, serial 000000000's, etcetera) | Defaults to ``True`` CLI Example: .. code-block:: bash salt '*' smbios.records clean=False salt '*' smbios.records 14 salt '*' smbios.records 4 core_count,thread_count,current_speed ''' if rec_type is None: smbios = _dmi_parse(_dmidecoder(), clean, fields) else: smbios = _dmi_parse(_dmidecoder('-t {0}'.format(rec_type)), clean, fields) return smbios
['def', 'records', '(', 'rec_type', '=', 'None', ',', 'fields', '=', 'None', ',', 'clean', '=', 'True', ')', ':', 'if', 'rec_type', 'is', 'None', ':', 'smbios', '=', '_dmi_parse', '(', '_dmidecoder', '(', ')', ',', 'clean', ',', 'fields', ')', 'else', ':', 'smbios', '=', '_dmi_parse', '(', '_dmidecoder', '(', "'-t {0}'", '.', 'format', '(', 'rec_type', ')', ')', ',', 'clean', ',', 'fields', ')', 'return', 'smbios']
Return DMI records from SMBIOS type Return only records of type(s) The SMBIOS specification defines the following DMI types: ==== ====================================== Type Information ==== ====================================== 0 BIOS 1 System 2 Baseboard 3 Chassis 4 Processor 5 Memory Controller 6 Memory Module 7 Cache 8 Port Connector 9 System Slots 10 On Board Devices 11 OEM Strings 12 System Configuration Options 13 BIOS Language 14 Group Associations 15 System Event Log 16 Physical Memory Array 17 Memory Device 18 32-bit Memory Error 19 Memory Array Mapped Address 20 Memory Device Mapped Address 21 Built-in Pointing Device 22 Portable Battery 23 System Reset 24 Hardware Security 25 System Power Controls 26 Voltage Probe 27 Cooling Device 28 Temperature Probe 29 Electrical Current Probe 30 Out-of-band Remote Access 31 Boot Integrity Services 32 System Boot 33 64-bit Memory Error 34 Management Device 35 Management Device Component 36 Management Device Threshold Data 37 Memory Channel 38 IPMI Device 39 Power Supply 40 Additional Information 41 Onboard Devices Extended Information 42 Management Controller Host Interface ==== ====================================== clean | Don't return well-known false information | (invalid UUID's, serial 000000000's, etcetera) | Defaults to ``True`` CLI Example: .. code-block:: bash salt '*' smbios.records clean=False salt '*' smbios.records 14 salt '*' smbios.records 4 core_count,thread_count,current_speed
['Return', 'DMI', 'records', 'from', 'SMBIOS']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/smbios.py#L92-L167
2,537
KelSolaar/Foundations
foundations/parsers.py
PlistFileParser.filter_values
def filter_values(self, pattern, flags=0): """ | Filters the :meth:`PlistFileParser.elements` class property elements using given pattern. | Will return a list of matching elements values, if you want to get only one element value, use the :meth:`PlistFileParser.get_value` method instead. Usage:: >>> plist_file_parser = PlistFileParser("standard.plist") >>> plist_file_parser.parse() True >>> plist_file_parser.filter_values(r"String A") [u'My Value A'] >>> plist_file_parser.filter_values(r"String.*") [u'My Value C', u'My Value B', u'My Value A'] :param pattern: Regex filtering pattern. :type pattern: unicode :param flags: Regex flags. :type flags: int :return: Values. :rtype: list """ values = [] if not self.__elements: return values for item in foundations.walkers.dictionaries_walker(self.__elements): path, element, value = item if re.search(pattern, element, flags): values.append(value) return values
python
def filter_values(self, pattern, flags=0): """ | Filters the :meth:`PlistFileParser.elements` class property elements using given pattern. | Will return a list of matching elements values, if you want to get only one element value, use the :meth:`PlistFileParser.get_value` method instead. Usage:: >>> plist_file_parser = PlistFileParser("standard.plist") >>> plist_file_parser.parse() True >>> plist_file_parser.filter_values(r"String A") [u'My Value A'] >>> plist_file_parser.filter_values(r"String.*") [u'My Value C', u'My Value B', u'My Value A'] :param pattern: Regex filtering pattern. :type pattern: unicode :param flags: Regex flags. :type flags: int :return: Values. :rtype: list """ values = [] if not self.__elements: return values for item in foundations.walkers.dictionaries_walker(self.__elements): path, element, value = item if re.search(pattern, element, flags): values.append(value) return values
['def', 'filter_values', '(', 'self', ',', 'pattern', ',', 'flags', '=', '0', ')', ':', 'values', '=', '[', ']', 'if', 'not', 'self', '.', '__elements', ':', 'return', 'values', 'for', 'item', 'in', 'foundations', '.', 'walkers', '.', 'dictionaries_walker', '(', 'self', '.', '__elements', ')', ':', 'path', ',', 'element', ',', 'value', '=', 'item', 'if', 're', '.', 'search', '(', 'pattern', ',', 'element', ',', 'flags', ')', ':', 'values', '.', 'append', '(', 'value', ')', 'return', 'values']
| Filters the :meth:`PlistFileParser.elements` class property elements using given pattern. | Will return a list of matching elements values, if you want to get only one element value, use the :meth:`PlistFileParser.get_value` method instead. Usage:: >>> plist_file_parser = PlistFileParser("standard.plist") >>> plist_file_parser.parse() True >>> plist_file_parser.filter_values(r"String A") [u'My Value A'] >>> plist_file_parser.filter_values(r"String.*") [u'My Value C', u'My Value B', u'My Value A'] :param pattern: Regex filtering pattern. :type pattern: unicode :param flags: Regex flags. :type flags: int :return: Values. :rtype: list
['|', 'Filters', 'the', ':', 'meth', ':', 'PlistFileParser', '.', 'elements', 'class', 'property', 'elements', 'using', 'given', 'pattern', '.', '|', 'Will', 'return', 'a', 'list', 'of', 'matching', 'elements', 'values', 'if', 'you', 'want', 'to', 'get', 'only', 'one', 'element', 'value', 'use', 'the', ':', 'meth', ':', 'PlistFileParser', '.', 'get_value', 'method', 'instead', '.']
train
https://github.com/KelSolaar/Foundations/blob/5c141330faf09dad70a12bc321f4c564917d0a91/foundations/parsers.py#L1317-L1349
2,538
twilio/twilio-python
twilio/rest/wireless/v1/sim/__init__.py
SimContext.data_sessions
def data_sessions(self): """ Access the data_sessions :returns: twilio.rest.wireless.v1.sim.data_session.DataSessionList :rtype: twilio.rest.wireless.v1.sim.data_session.DataSessionList """ if self._data_sessions is None: self._data_sessions = DataSessionList(self._version, sim_sid=self._solution['sid'], ) return self._data_sessions
python
def data_sessions(self): """ Access the data_sessions :returns: twilio.rest.wireless.v1.sim.data_session.DataSessionList :rtype: twilio.rest.wireless.v1.sim.data_session.DataSessionList """ if self._data_sessions is None: self._data_sessions = DataSessionList(self._version, sim_sid=self._solution['sid'], ) return self._data_sessions
['def', 'data_sessions', '(', 'self', ')', ':', 'if', 'self', '.', '_data_sessions', 'is', 'None', ':', 'self', '.', '_data_sessions', '=', 'DataSessionList', '(', 'self', '.', '_version', ',', 'sim_sid', '=', 'self', '.', '_solution', '[', "'sid'", ']', ',', ')', 'return', 'self', '.', '_data_sessions']
Access the data_sessions :returns: twilio.rest.wireless.v1.sim.data_session.DataSessionList :rtype: twilio.rest.wireless.v1.sim.data_session.DataSessionList
['Access', 'the', 'data_sessions']
train
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/wireless/v1/sim/__init__.py#L357-L366
2,539
jashort/SmartFileSorter
smartfilesorter/smartfilesorter.py
SmartFileSorter.load_rules
def load_rules(self, filename): """ Load rules from YAML configuration in the given stream object :param filename: Filename of rule YAML file :return: rules object """ self.logger.debug('Reading rules from %s', filename) try: in_file = open(filename) except IOError: self.logger.error('Error opening {0}'.format(filename)) raise y = None try: y = yaml.load(in_file) except yaml.YAMLError as exc: if hasattr(exc, 'problem_mark'): self.logger.error('Error parsing rules{0}'.format(exc.problem_mark)) else: self.logger.error('Error parsing rules in {0}'.format(in_file.name)) raise return y
python
def load_rules(self, filename): """ Load rules from YAML configuration in the given stream object :param filename: Filename of rule YAML file :return: rules object """ self.logger.debug('Reading rules from %s', filename) try: in_file = open(filename) except IOError: self.logger.error('Error opening {0}'.format(filename)) raise y = None try: y = yaml.load(in_file) except yaml.YAMLError as exc: if hasattr(exc, 'problem_mark'): self.logger.error('Error parsing rules{0}'.format(exc.problem_mark)) else: self.logger.error('Error parsing rules in {0}'.format(in_file.name)) raise return y
['def', 'load_rules', '(', 'self', ',', 'filename', ')', ':', 'self', '.', 'logger', '.', 'debug', '(', "'Reading rules from %s'", ',', 'filename', ')', 'try', ':', 'in_file', '=', 'open', '(', 'filename', ')', 'except', 'IOError', ':', 'self', '.', 'logger', '.', 'error', '(', "'Error opening {0}'", '.', 'format', '(', 'filename', ')', ')', 'raise', 'y', '=', 'None', 'try', ':', 'y', '=', 'yaml', '.', 'load', '(', 'in_file', ')', 'except', 'yaml', '.', 'YAMLError', 'as', 'exc', ':', 'if', 'hasattr', '(', 'exc', ',', "'problem_mark'", ')', ':', 'self', '.', 'logger', '.', 'error', '(', "'Error parsing rules{0}'", '.', 'format', '(', 'exc', '.', 'problem_mark', ')', ')', 'else', ':', 'self', '.', 'logger', '.', 'error', '(', "'Error parsing rules in {0}'", '.', 'format', '(', 'in_file', '.', 'name', ')', ')', 'raise', 'return', 'y']
Load rules from YAML configuration in the given stream object :param filename: Filename of rule YAML file :return: rules object
['Load', 'rules', 'from', 'YAML', 'configuration', 'in', 'the', 'given', 'stream', 'object', ':', 'param', 'filename', ':', 'Filename', 'of', 'rule', 'YAML', 'file', ':', 'return', ':', 'rules', 'object']
train
https://github.com/jashort/SmartFileSorter/blob/77faf09e5a737da93e16e71a64707366b8307910/smartfilesorter/smartfilesorter.py#L107-L130
2,540
nyaruka/smartmin
smartmin/views.py
SmartListView.derive_link_fields
def derive_link_fields(self, context): """ Used to derive which fields should be linked. This should return a set() containing the names of those fields which should be linkable. """ if self.link_fields is not None: return self.link_fields else: link_fields = set() if self.fields: for field in self.fields: if field != 'is_active': link_fields.add(field) break return link_fields
python
def derive_link_fields(self, context): """ Used to derive which fields should be linked. This should return a set() containing the names of those fields which should be linkable. """ if self.link_fields is not None: return self.link_fields else: link_fields = set() if self.fields: for field in self.fields: if field != 'is_active': link_fields.add(field) break return link_fields
['def', 'derive_link_fields', '(', 'self', ',', 'context', ')', ':', 'if', 'self', '.', 'link_fields', 'is', 'not', 'None', ':', 'return', 'self', '.', 'link_fields', 'else', ':', 'link_fields', '=', 'set', '(', ')', 'if', 'self', '.', 'fields', ':', 'for', 'field', 'in', 'self', '.', 'fields', ':', 'if', 'field', '!=', "'is_active'", ':', 'link_fields', '.', 'add', '(', 'field', ')', 'break', 'return', 'link_fields']
Used to derive which fields should be linked. This should return a set() containing the names of those fields which should be linkable.
['Used', 'to', 'derive', 'which', 'fields', 'should', 'be', 'linked', '.', 'This', 'should', 'return', 'a', 'set', '()', 'containing', 'the', 'names', 'of', 'those', 'fields', 'which', 'should', 'be', 'linkable', '.']
train
https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/views.py#L542-L558
2,541
booktype/python-ooxml
ooxml/serialize.py
serialize_table
def serialize_table(ctx, document, table, root): """Serializes table element. """ # What we should check really is why do we pass None as root element # There is a good chance some content is missing after the import if root is None: return root if ctx.ilvl != None: root = close_list(ctx, root) ctx.ilvl, ctx.numid = None, None _table = etree.SubElement(root, 'table') _table.set('border', '1') _table.set('width', '100%') style = get_style(document, table) if style: _table.set('class', get_css_classes(document, style)) for rows in table.rows: _tr = etree.SubElement(_table, 'tr') for cell in rows: _td = etree.SubElement(_tr, 'td') if cell.grid_span != 1: _td.set('colspan', str(cell.grid_span)) if cell.row_span != 1: _td.set('rowspan', str(cell.row_span)) for elem in cell.elements: if isinstance(elem, doc.Paragraph): _ser = ctx.get_serializer(elem) _td = _ser(ctx, document, elem, _td, embed=False) if ctx.ilvl != None: # root = close_list(ctx, root) _td = close_list(ctx, _td) ctx.ilvl, ctx.numid = None, None fire_hooks(ctx, document, table, _td, ctx.get_hook('td')) fire_hooks(ctx, document, table, _td, ctx.get_hook('tr')) fire_hooks(ctx, document, table, _table, ctx.get_hook('table')) return root
python
def serialize_table(ctx, document, table, root): """Serializes table element. """ # What we should check really is why do we pass None as root element # There is a good chance some content is missing after the import if root is None: return root if ctx.ilvl != None: root = close_list(ctx, root) ctx.ilvl, ctx.numid = None, None _table = etree.SubElement(root, 'table') _table.set('border', '1') _table.set('width', '100%') style = get_style(document, table) if style: _table.set('class', get_css_classes(document, style)) for rows in table.rows: _tr = etree.SubElement(_table, 'tr') for cell in rows: _td = etree.SubElement(_tr, 'td') if cell.grid_span != 1: _td.set('colspan', str(cell.grid_span)) if cell.row_span != 1: _td.set('rowspan', str(cell.row_span)) for elem in cell.elements: if isinstance(elem, doc.Paragraph): _ser = ctx.get_serializer(elem) _td = _ser(ctx, document, elem, _td, embed=False) if ctx.ilvl != None: # root = close_list(ctx, root) _td = close_list(ctx, _td) ctx.ilvl, ctx.numid = None, None fire_hooks(ctx, document, table, _td, ctx.get_hook('td')) fire_hooks(ctx, document, table, _td, ctx.get_hook('tr')) fire_hooks(ctx, document, table, _table, ctx.get_hook('table')) return root
['def', 'serialize_table', '(', 'ctx', ',', 'document', ',', 'table', ',', 'root', ')', ':', '# What we should check really is why do we pass None as root element', '# There is a good chance some content is missing after the import', 'if', 'root', 'is', 'None', ':', 'return', 'root', 'if', 'ctx', '.', 'ilvl', '!=', 'None', ':', 'root', '=', 'close_list', '(', 'ctx', ',', 'root', ')', 'ctx', '.', 'ilvl', ',', 'ctx', '.', 'numid', '=', 'None', ',', 'None', '_table', '=', 'etree', '.', 'SubElement', '(', 'root', ',', "'table'", ')', '_table', '.', 'set', '(', "'border'", ',', "'1'", ')', '_table', '.', 'set', '(', "'width'", ',', "'100%'", ')', 'style', '=', 'get_style', '(', 'document', ',', 'table', ')', 'if', 'style', ':', '_table', '.', 'set', '(', "'class'", ',', 'get_css_classes', '(', 'document', ',', 'style', ')', ')', 'for', 'rows', 'in', 'table', '.', 'rows', ':', '_tr', '=', 'etree', '.', 'SubElement', '(', '_table', ',', "'tr'", ')', 'for', 'cell', 'in', 'rows', ':', '_td', '=', 'etree', '.', 'SubElement', '(', '_tr', ',', "'td'", ')', 'if', 'cell', '.', 'grid_span', '!=', '1', ':', '_td', '.', 'set', '(', "'colspan'", ',', 'str', '(', 'cell', '.', 'grid_span', ')', ')', 'if', 'cell', '.', 'row_span', '!=', '1', ':', '_td', '.', 'set', '(', "'rowspan'", ',', 'str', '(', 'cell', '.', 'row_span', ')', ')', 'for', 'elem', 'in', 'cell', '.', 'elements', ':', 'if', 'isinstance', '(', 'elem', ',', 'doc', '.', 'Paragraph', ')', ':', '_ser', '=', 'ctx', '.', 'get_serializer', '(', 'elem', ')', '_td', '=', '_ser', '(', 'ctx', ',', 'document', ',', 'elem', ',', '_td', ',', 'embed', '=', 'False', ')', 'if', 'ctx', '.', 'ilvl', '!=', 'None', ':', '# root = close_list(ctx, root)', '_td', '=', 'close_list', '(', 'ctx', ',', '_td', ')', 'ctx', '.', 'ilvl', ',', 'ctx', '.', 'numid', '=', 'None', ',', 'None', 'fire_hooks', '(', 'ctx', ',', 'document', ',', 'table', ',', '_td', ',', 'ctx', '.', 'get_hook', '(', "'td'", ')', ')', 'fire_hooks', '(', 'ctx', ',', 'document', ',', 'table', ',', '_td', ',', 'ctx', '.', 'get_hook', '(', "'tr'", ')', ')', 'fire_hooks', '(', 'ctx', ',', 'document', ',', 'table', ',', '_table', ',', 'ctx', '.', 'get_hook', '(', "'table'", ')', ')', 'return', 'root']
Serializes table element.
['Serializes', 'table', 'element', '.']
train
https://github.com/booktype/python-ooxml/blob/b56990a5bee2e1bc46839cec5161ff3726dc4d87/ooxml/serialize.py#L828-L879
2,542
pip-services3-python/pip-services3-commons-python
pip_services3_commons/data/AnyValueMap.py
AnyValueMap.append
def append(self, map): """ Appends new elements to this map. :param map: a map with elements to be added. """ if isinstance(map, dict): for (k, v) in map.items(): key = StringConverter.to_string(k) value = v self.put(key, value)
python
def append(self, map): """ Appends new elements to this map. :param map: a map with elements to be added. """ if isinstance(map, dict): for (k, v) in map.items(): key = StringConverter.to_string(k) value = v self.put(key, value)
['def', 'append', '(', 'self', ',', 'map', ')', ':', 'if', 'isinstance', '(', 'map', ',', 'dict', ')', ':', 'for', '(', 'k', ',', 'v', ')', 'in', 'map', '.', 'items', '(', ')', ':', 'key', '=', 'StringConverter', '.', 'to_string', '(', 'k', ')', 'value', '=', 'v', 'self', '.', 'put', '(', 'key', ',', 'value', ')']
Appends new elements to this map. :param map: a map with elements to be added.
['Appends', 'new', 'elements', 'to', 'this', 'map', '.']
train
https://github.com/pip-services3-python/pip-services3-commons-python/blob/22cbbb3e91e49717f65c083d36147fdb07ba9e3b/pip_services3_commons/data/AnyValueMap.py#L86-L96
2,543
quodlibet/mutagen
mutagen/oggvorbis.py
OggVCommentDict._inject
def _inject(self, fileobj, padding_func): """Write tag data into the Vorbis comment packet/page.""" # Find the old pages in the file; we'll need to remove them, # plus grab any stray setup packet data out of them. fileobj.seek(0) page = OggPage(fileobj) while not page.packets[0].startswith(b"\x03vorbis"): page = OggPage(fileobj) old_pages = [page] while not (old_pages[-1].complete or len(old_pages[-1].packets) > 1): page = OggPage(fileobj) if page.serial == old_pages[0].serial: old_pages.append(page) packets = OggPage.to_packets(old_pages, strict=False) content_size = get_size(fileobj) - len(packets[0]) # approx vcomment_data = b"\x03vorbis" + self.write() padding_left = len(packets[0]) - len(vcomment_data) info = PaddingInfo(padding_left, content_size) new_padding = info._get_padding(padding_func) # Set the new comment packet. packets[0] = vcomment_data + b"\x00" * new_padding new_pages = OggPage._from_packets_try_preserve(packets, old_pages) OggPage.replace(fileobj, old_pages, new_pages)
python
def _inject(self, fileobj, padding_func): """Write tag data into the Vorbis comment packet/page.""" # Find the old pages in the file; we'll need to remove them, # plus grab any stray setup packet data out of them. fileobj.seek(0) page = OggPage(fileobj) while not page.packets[0].startswith(b"\x03vorbis"): page = OggPage(fileobj) old_pages = [page] while not (old_pages[-1].complete or len(old_pages[-1].packets) > 1): page = OggPage(fileobj) if page.serial == old_pages[0].serial: old_pages.append(page) packets = OggPage.to_packets(old_pages, strict=False) content_size = get_size(fileobj) - len(packets[0]) # approx vcomment_data = b"\x03vorbis" + self.write() padding_left = len(packets[0]) - len(vcomment_data) info = PaddingInfo(padding_left, content_size) new_padding = info._get_padding(padding_func) # Set the new comment packet. packets[0] = vcomment_data + b"\x00" * new_padding new_pages = OggPage._from_packets_try_preserve(packets, old_pages) OggPage.replace(fileobj, old_pages, new_pages)
['def', '_inject', '(', 'self', ',', 'fileobj', ',', 'padding_func', ')', ':', "# Find the old pages in the file; we'll need to remove them,", '# plus grab any stray setup packet data out of them.', 'fileobj', '.', 'seek', '(', '0', ')', 'page', '=', 'OggPage', '(', 'fileobj', ')', 'while', 'not', 'page', '.', 'packets', '[', '0', ']', '.', 'startswith', '(', 'b"\\x03vorbis"', ')', ':', 'page', '=', 'OggPage', '(', 'fileobj', ')', 'old_pages', '=', '[', 'page', ']', 'while', 'not', '(', 'old_pages', '[', '-', '1', ']', '.', 'complete', 'or', 'len', '(', 'old_pages', '[', '-', '1', ']', '.', 'packets', ')', '>', '1', ')', ':', 'page', '=', 'OggPage', '(', 'fileobj', ')', 'if', 'page', '.', 'serial', '==', 'old_pages', '[', '0', ']', '.', 'serial', ':', 'old_pages', '.', 'append', '(', 'page', ')', 'packets', '=', 'OggPage', '.', 'to_packets', '(', 'old_pages', ',', 'strict', '=', 'False', ')', 'content_size', '=', 'get_size', '(', 'fileobj', ')', '-', 'len', '(', 'packets', '[', '0', ']', ')', '# approx', 'vcomment_data', '=', 'b"\\x03vorbis"', '+', 'self', '.', 'write', '(', ')', 'padding_left', '=', 'len', '(', 'packets', '[', '0', ']', ')', '-', 'len', '(', 'vcomment_data', ')', 'info', '=', 'PaddingInfo', '(', 'padding_left', ',', 'content_size', ')', 'new_padding', '=', 'info', '.', '_get_padding', '(', 'padding_func', ')', '# Set the new comment packet.', 'packets', '[', '0', ']', '=', 'vcomment_data', '+', 'b"\\x00"', '*', 'new_padding', 'new_pages', '=', 'OggPage', '.', '_from_packets_try_preserve', '(', 'packets', ',', 'old_pages', ')', 'OggPage', '.', 'replace', '(', 'fileobj', ',', 'old_pages', ',', 'new_pages', ')']
Write tag data into the Vorbis comment packet/page.
['Write', 'tag', 'data', 'into', 'the', 'Vorbis', 'comment', 'packet', '/', 'page', '.']
train
https://github.com/quodlibet/mutagen/blob/e393df5971ba41ba5a50de9c2c9e7e5484d82c4e/mutagen/oggvorbis.py#L111-L140
2,544
facelessuser/backrefs
backrefs/uniprops/__init__.py
get_nfkd_quick_check_property
def get_nfkd_quick_check_property(value, is_bytes=False): """Get `NFKD QUICK CHECK` property.""" obj = unidata.ascii_nfkd_quick_check if is_bytes else unidata.unicode_nfkd_quick_check if value.startswith('^'): negated = value[1:] value = '^' + unidata.unicode_alias['nfkdquickcheck'].get(negated, negated) else: value = unidata.unicode_alias['nfkdquickcheck'].get(value, value) return obj[value]
python
def get_nfkd_quick_check_property(value, is_bytes=False): """Get `NFKD QUICK CHECK` property.""" obj = unidata.ascii_nfkd_quick_check if is_bytes else unidata.unicode_nfkd_quick_check if value.startswith('^'): negated = value[1:] value = '^' + unidata.unicode_alias['nfkdquickcheck'].get(negated, negated) else: value = unidata.unicode_alias['nfkdquickcheck'].get(value, value) return obj[value]
['def', 'get_nfkd_quick_check_property', '(', 'value', ',', 'is_bytes', '=', 'False', ')', ':', 'obj', '=', 'unidata', '.', 'ascii_nfkd_quick_check', 'if', 'is_bytes', 'else', 'unidata', '.', 'unicode_nfkd_quick_check', 'if', 'value', '.', 'startswith', '(', "'^'", ')', ':', 'negated', '=', 'value', '[', '1', ':', ']', 'value', '=', "'^'", '+', 'unidata', '.', 'unicode_alias', '[', "'nfkdquickcheck'", ']', '.', 'get', '(', 'negated', ',', 'negated', ')', 'else', ':', 'value', '=', 'unidata', '.', 'unicode_alias', '[', "'nfkdquickcheck'", ']', '.', 'get', '(', 'value', ',', 'value', ')', 'return', 'obj', '[', 'value', ']']
Get `NFKD QUICK CHECK` property.
['Get', 'NFKD', 'QUICK', 'CHECK', 'property', '.']
train
https://github.com/facelessuser/backrefs/blob/3b3d60f5d57b02044f880aa29c9c5add0e31a34f/backrefs/uniprops/__init__.py#L257-L268
2,545
intuition-io/intuition
intuition/api/portfolio.py
PortfolioFactory.update
def update(self, portfolio, date, perfs=None): ''' Actualizes the portfolio universe with the alog state ''' # Make the manager aware of current simulation self.portfolio = portfolio self.perfs = perfs self.date = date
python
def update(self, portfolio, date, perfs=None): ''' Actualizes the portfolio universe with the alog state ''' # Make the manager aware of current simulation self.portfolio = portfolio self.perfs = perfs self.date = date
['def', 'update', '(', 'self', ',', 'portfolio', ',', 'date', ',', 'perfs', '=', 'None', ')', ':', '# Make the manager aware of current simulation', 'self', '.', 'portfolio', '=', 'portfolio', 'self', '.', 'perfs', '=', 'perfs', 'self', '.', 'date', '=', 'date']
Actualizes the portfolio universe with the alog state
['Actualizes', 'the', 'portfolio', 'universe', 'with', 'the', 'alog', 'state']
train
https://github.com/intuition-io/intuition/blob/cd517e6b3b315a743eb4d0d0dc294e264ab913ce/intuition/api/portfolio.py#L77-L84
2,546
sanger-pathogens/ariba
ariba/assembly_compare.py
AssemblyCompare._ref_covered_by_at_least_one_full_length_contig
def _ref_covered_by_at_least_one_full_length_contig(nucmer_hits, percent_threshold, max_nt_extend): '''Returns true iff there exists a contig that completely covers the reference sequence nucmer_hits = hits made by self._parse_nucmer_coords_file.''' for l in nucmer_hits.values(): for hit in l: if ( (2 * max_nt_extend) + len(hit.ref_coords()) ) / hit.ref_length >= percent_threshold: return True return False
python
def _ref_covered_by_at_least_one_full_length_contig(nucmer_hits, percent_threshold, max_nt_extend): '''Returns true iff there exists a contig that completely covers the reference sequence nucmer_hits = hits made by self._parse_nucmer_coords_file.''' for l in nucmer_hits.values(): for hit in l: if ( (2 * max_nt_extend) + len(hit.ref_coords()) ) / hit.ref_length >= percent_threshold: return True return False
['def', '_ref_covered_by_at_least_one_full_length_contig', '(', 'nucmer_hits', ',', 'percent_threshold', ',', 'max_nt_extend', ')', ':', 'for', 'l', 'in', 'nucmer_hits', '.', 'values', '(', ')', ':', 'for', 'hit', 'in', 'l', ':', 'if', '(', '(', '2', '*', 'max_nt_extend', ')', '+', 'len', '(', 'hit', '.', 'ref_coords', '(', ')', ')', ')', '/', 'hit', '.', 'ref_length', '>=', 'percent_threshold', ':', 'return', 'True', 'return', 'False']
Returns true iff there exists a contig that completely covers the reference sequence nucmer_hits = hits made by self._parse_nucmer_coords_file.
['Returns', 'true', 'iff', 'there', 'exists', 'a', 'contig', 'that', 'completely', 'covers', 'the', 'reference', 'sequence', 'nucmer_hits', '=', 'hits', 'made', 'by', 'self', '.', '_parse_nucmer_coords_file', '.']
train
https://github.com/sanger-pathogens/ariba/blob/16a0b1916ce0e886bd22550ba2d648542977001b/ariba/assembly_compare.py#L352-L360
2,547
sdispater/cachy
cachy/tagged_cache.py
TaggedCache.decrement
def decrement(self, key, value=1): """ Decrement the value of an item in the cache. :param key: The cache key :type key: str :param value: The decrement value :type value: int :rtype: int or bool """ self._store.decrement(self.tagged_item_key(key), value)
python
def decrement(self, key, value=1): """ Decrement the value of an item in the cache. :param key: The cache key :type key: str :param value: The decrement value :type value: int :rtype: int or bool """ self._store.decrement(self.tagged_item_key(key), value)
['def', 'decrement', '(', 'self', ',', 'key', ',', 'value', '=', '1', ')', ':', 'self', '.', '_store', '.', 'decrement', '(', 'self', '.', 'tagged_item_key', '(', 'key', ')', ',', 'value', ')']
Decrement the value of an item in the cache. :param key: The cache key :type key: str :param value: The decrement value :type value: int :rtype: int or bool
['Decrement', 'the', 'value', 'of', 'an', 'item', 'in', 'the', 'cache', '.']
train
https://github.com/sdispater/cachy/blob/ee4b044d6aafa80125730a00b1f679a7bd852b8a/cachy/tagged_cache.py#L110-L122
2,548
openstack/horizon
openstack_dashboard/dashboards/project/vg_snapshots/tables.py
GroupSnapshotsFilterAction.filter
def filter(self, table, vg_snapshots, filter_string): """Naive case-insensitive search.""" query = filter_string.lower() return [vg_snapshot for vg_snapshot in vg_snapshots if query in vg_snapshot.name.lower()]
python
def filter(self, table, vg_snapshots, filter_string): """Naive case-insensitive search.""" query = filter_string.lower() return [vg_snapshot for vg_snapshot in vg_snapshots if query in vg_snapshot.name.lower()]
['def', 'filter', '(', 'self', ',', 'table', ',', 'vg_snapshots', ',', 'filter_string', ')', ':', 'query', '=', 'filter_string', '.', 'lower', '(', ')', 'return', '[', 'vg_snapshot', 'for', 'vg_snapshot', 'in', 'vg_snapshots', 'if', 'query', 'in', 'vg_snapshot', '.', 'name', '.', 'lower', '(', ')', ']']
Naive case-insensitive search.
['Naive', 'case', '-', 'insensitive', 'search', '.']
train
https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_dashboard/dashboards/project/vg_snapshots/tables.py#L85-L89
2,549
twisted/mantissa
xmantissa/website.py
PrefixURLMixin.produceResource
def produceResource(self, request, segments, webViewer): """ Return a C{(resource, subsegments)} tuple or None, depending on whether I wish to return an L{IResource} provider for the given set of segments or not. """ def thunk(): cr = getattr(self, 'createResource', None) if cr is not None: return cr() else: return self.createResourceWith(webViewer) return self._produceIt(segments, thunk)
python
def produceResource(self, request, segments, webViewer): """ Return a C{(resource, subsegments)} tuple or None, depending on whether I wish to return an L{IResource} provider for the given set of segments or not. """ def thunk(): cr = getattr(self, 'createResource', None) if cr is not None: return cr() else: return self.createResourceWith(webViewer) return self._produceIt(segments, thunk)
['def', 'produceResource', '(', 'self', ',', 'request', ',', 'segments', ',', 'webViewer', ')', ':', 'def', 'thunk', '(', ')', ':', 'cr', '=', 'getattr', '(', 'self', ',', "'createResource'", ',', 'None', ')', 'if', 'cr', 'is', 'not', 'None', ':', 'return', 'cr', '(', ')', 'else', ':', 'return', 'self', '.', 'createResourceWith', '(', 'webViewer', ')', 'return', 'self', '.', '_produceIt', '(', 'segments', ',', 'thunk', ')']
Return a C{(resource, subsegments)} tuple or None, depending on whether I wish to return an L{IResource} provider for the given set of segments or not.
['Return', 'a', 'C', '{', '(', 'resource', 'subsegments', ')', '}', 'tuple', 'or', 'None', 'depending', 'on', 'whether', 'I', 'wish', 'to', 'return', 'an', 'L', '{', 'IResource', '}', 'provider', 'for', 'the', 'given', 'set', 'of', 'segments', 'or', 'not', '.']
train
https://github.com/twisted/mantissa/blob/53e5502aba23ce99be78b27f923a276593033fe8/xmantissa/website.py#L169-L181
2,550
bspaans/python-mingus
mingus/core/chords.py
triad
def triad(note, key): """Return the triad on note in key as a list. Examples: >>> triad('E', 'C') ['E', 'G', 'B'] >>> triad('E', 'B') ['E', 'G#', 'B'] """ return [note, intervals.third(note, key), intervals.fifth(note, key)]
python
def triad(note, key): """Return the triad on note in key as a list. Examples: >>> triad('E', 'C') ['E', 'G', 'B'] >>> triad('E', 'B') ['E', 'G#', 'B'] """ return [note, intervals.third(note, key), intervals.fifth(note, key)]
['def', 'triad', '(', 'note', ',', 'key', ')', ':', 'return', '[', 'note', ',', 'intervals', '.', 'third', '(', 'note', ',', 'key', ')', ',', 'intervals', '.', 'fifth', '(', 'note', ',', 'key', ')', ']']
Return the triad on note in key as a list. Examples: >>> triad('E', 'C') ['E', 'G', 'B'] >>> triad('E', 'B') ['E', 'G#', 'B']
['Return', 'the', 'triad', 'on', 'note', 'in', 'key', 'as', 'a', 'list', '.']
train
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/core/chords.py#L165-L174
2,551
rehandalal/therapist
therapist/utils/filesystem.py
list_files
def list_files(path): """Recursively collects a list of files at a path.""" files = [] if os.path.isdir(path): for stats in os.walk(path): for f in stats[2]: files.append(os.path.join(stats[0], f)) elif os.path.isfile(path): files = [path] return files
python
def list_files(path): """Recursively collects a list of files at a path.""" files = [] if os.path.isdir(path): for stats in os.walk(path): for f in stats[2]: files.append(os.path.join(stats[0], f)) elif os.path.isfile(path): files = [path] return files
['def', 'list_files', '(', 'path', ')', ':', 'files', '=', '[', ']', 'if', 'os', '.', 'path', '.', 'isdir', '(', 'path', ')', ':', 'for', 'stats', 'in', 'os', '.', 'walk', '(', 'path', ')', ':', 'for', 'f', 'in', 'stats', '[', '2', ']', ':', 'files', '.', 'append', '(', 'os', '.', 'path', '.', 'join', '(', 'stats', '[', '0', ']', ',', 'f', ')', ')', 'elif', 'os', '.', 'path', '.', 'isfile', '(', 'path', ')', ':', 'files', '=', '[', 'path', ']', 'return', 'files']
Recursively collects a list of files at a path.
['Recursively', 'collects', 'a', 'list', 'of', 'files', 'at', 'a', 'path', '.']
train
https://github.com/rehandalal/therapist/blob/1995a7e396eea2ec8685bb32a779a4110b459b1f/therapist/utils/filesystem.py#L14-L23
2,552
espressif/esptool
ecdsa/numbertheory.py
order_mod
def order_mod( x, m ): """Return the order of x in the multiplicative group mod m. """ # Warning: this implementation is not very clever, and will # take a long time if m is very large. if m <= 1: return 0 assert gcd( x, m ) == 1 z = x result = 1 while z != 1: z = ( z * x ) % m result = result + 1 return result
python
def order_mod( x, m ): """Return the order of x in the multiplicative group mod m. """ # Warning: this implementation is not very clever, and will # take a long time if m is very large. if m <= 1: return 0 assert gcd( x, m ) == 1 z = x result = 1 while z != 1: z = ( z * x ) % m result = result + 1 return result
['def', 'order_mod', '(', 'x', ',', 'm', ')', ':', '# Warning: this implementation is not very clever, and will', '# take a long time if m is very large.', 'if', 'm', '<=', '1', ':', 'return', '0', 'assert', 'gcd', '(', 'x', ',', 'm', ')', '==', '1', 'z', '=', 'x', 'result', '=', '1', 'while', 'z', '!=', '1', ':', 'z', '=', '(', 'z', '*', 'x', ')', '%', 'm', 'result', '=', 'result', '+', '1', 'return', 'result']
Return the order of x in the multiplicative group mod m.
['Return', 'the', 'order', 'of', 'x', 'in', 'the', 'multiplicative', 'group', 'mod', 'm', '.']
train
https://github.com/espressif/esptool/blob/c583756c118039cfcfe256f7a3285618914d16a5/ecdsa/numbertheory.py#L346-L362
2,553
pyvec/pyvodb
pyvodb/tables.py
Event.start
def start(self): """The event's start time, as a timezone-aware datetime object""" if self.start_time is None: time = datetime.time(hour=19, tzinfo=CET) else: time = self.start_time.replace(tzinfo=CET) return datetime.datetime.combine(self.date, time)
python
def start(self): """The event's start time, as a timezone-aware datetime object""" if self.start_time is None: time = datetime.time(hour=19, tzinfo=CET) else: time = self.start_time.replace(tzinfo=CET) return datetime.datetime.combine(self.date, time)
['def', 'start', '(', 'self', ')', ':', 'if', 'self', '.', 'start_time', 'is', 'None', ':', 'time', '=', 'datetime', '.', 'time', '(', 'hour', '=', '19', ',', 'tzinfo', '=', 'CET', ')', 'else', ':', 'time', '=', 'self', '.', 'start_time', '.', 'replace', '(', 'tzinfo', '=', 'CET', ')', 'return', 'datetime', '.', 'datetime', '.', 'combine', '(', 'self', '.', 'date', ',', 'time', ')']
The event's start time, as a timezone-aware datetime object
['The', 'event', 's', 'start', 'time', 'as', 'a', 'timezone', '-', 'aware', 'datetime', 'object']
train
https://github.com/pyvec/pyvodb/blob/07183333df26eb12c5c2b98802cde3fb3a6c1339/pyvodb/tables.py#L103-L109
2,554
mcs07/ChemDataExtractor
chemdataextractor/cli/pos.py
evaluate_all
def evaluate_all(ctx, model): """Evaluate POS taggers on WSJ and GENIA.""" click.echo('chemdataextractor.pos.evaluate_all') click.echo('Model: %s' % model) ctx.invoke(evaluate, model='%s_wsj_nocluster.pickle' % model, corpus='wsj', clusters=False) ctx.invoke(evaluate, model='%s_wsj_nocluster.pickle' % model, corpus='genia', clusters=False) ctx.invoke(evaluate, model='%s_wsj.pickle' % model, corpus='wsj', clusters=True) ctx.invoke(evaluate, model='%s_wsj.pickle' % model, corpus='genia', clusters=True) ctx.invoke(evaluate, model='%s_genia_nocluster.pickle' % model, corpus='wsj', clusters=False) ctx.invoke(evaluate, model='%s_genia_nocluster.pickle' % model, corpus='genia', clusters=False) ctx.invoke(evaluate, model='%s_genia.pickle' % model, corpus='wsj', clusters=True) ctx.invoke(evaluate, model='%s_genia.pickle' % model, corpus='genia', clusters=True) ctx.invoke(evaluate, model='%s_wsj_genia_nocluster.pickle' % model, corpus='wsj', clusters=False) ctx.invoke(evaluate, model='%s_wsj_genia_nocluster.pickle' % model, corpus='genia', clusters=False) ctx.invoke(evaluate, model='%s_wsj_genia.pickle' % model, corpus='wsj', clusters=True) ctx.invoke(evaluate, model='%s_wsj_genia.pickle' % model, corpus='genia', clusters=True)
python
def evaluate_all(ctx, model): """Evaluate POS taggers on WSJ and GENIA.""" click.echo('chemdataextractor.pos.evaluate_all') click.echo('Model: %s' % model) ctx.invoke(evaluate, model='%s_wsj_nocluster.pickle' % model, corpus='wsj', clusters=False) ctx.invoke(evaluate, model='%s_wsj_nocluster.pickle' % model, corpus='genia', clusters=False) ctx.invoke(evaluate, model='%s_wsj.pickle' % model, corpus='wsj', clusters=True) ctx.invoke(evaluate, model='%s_wsj.pickle' % model, corpus='genia', clusters=True) ctx.invoke(evaluate, model='%s_genia_nocluster.pickle' % model, corpus='wsj', clusters=False) ctx.invoke(evaluate, model='%s_genia_nocluster.pickle' % model, corpus='genia', clusters=False) ctx.invoke(evaluate, model='%s_genia.pickle' % model, corpus='wsj', clusters=True) ctx.invoke(evaluate, model='%s_genia.pickle' % model, corpus='genia', clusters=True) ctx.invoke(evaluate, model='%s_wsj_genia_nocluster.pickle' % model, corpus='wsj', clusters=False) ctx.invoke(evaluate, model='%s_wsj_genia_nocluster.pickle' % model, corpus='genia', clusters=False) ctx.invoke(evaluate, model='%s_wsj_genia.pickle' % model, corpus='wsj', clusters=True) ctx.invoke(evaluate, model='%s_wsj_genia.pickle' % model, corpus='genia', clusters=True)
['def', 'evaluate_all', '(', 'ctx', ',', 'model', ')', ':', 'click', '.', 'echo', '(', "'chemdataextractor.pos.evaluate_all'", ')', 'click', '.', 'echo', '(', "'Model: %s'", '%', 'model', ')', 'ctx', '.', 'invoke', '(', 'evaluate', ',', 'model', '=', "'%s_wsj_nocluster.pickle'", '%', 'model', ',', 'corpus', '=', "'wsj'", ',', 'clusters', '=', 'False', ')', 'ctx', '.', 'invoke', '(', 'evaluate', ',', 'model', '=', "'%s_wsj_nocluster.pickle'", '%', 'model', ',', 'corpus', '=', "'genia'", ',', 'clusters', '=', 'False', ')', 'ctx', '.', 'invoke', '(', 'evaluate', ',', 'model', '=', "'%s_wsj.pickle'", '%', 'model', ',', 'corpus', '=', "'wsj'", ',', 'clusters', '=', 'True', ')', 'ctx', '.', 'invoke', '(', 'evaluate', ',', 'model', '=', "'%s_wsj.pickle'", '%', 'model', ',', 'corpus', '=', "'genia'", ',', 'clusters', '=', 'True', ')', 'ctx', '.', 'invoke', '(', 'evaluate', ',', 'model', '=', "'%s_genia_nocluster.pickle'", '%', 'model', ',', 'corpus', '=', "'wsj'", ',', 'clusters', '=', 'False', ')', 'ctx', '.', 'invoke', '(', 'evaluate', ',', 'model', '=', "'%s_genia_nocluster.pickle'", '%', 'model', ',', 'corpus', '=', "'genia'", ',', 'clusters', '=', 'False', ')', 'ctx', '.', 'invoke', '(', 'evaluate', ',', 'model', '=', "'%s_genia.pickle'", '%', 'model', ',', 'corpus', '=', "'wsj'", ',', 'clusters', '=', 'True', ')', 'ctx', '.', 'invoke', '(', 'evaluate', ',', 'model', '=', "'%s_genia.pickle'", '%', 'model', ',', 'corpus', '=', "'genia'", ',', 'clusters', '=', 'True', ')', 'ctx', '.', 'invoke', '(', 'evaluate', ',', 'model', '=', "'%s_wsj_genia_nocluster.pickle'", '%', 'model', ',', 'corpus', '=', "'wsj'", ',', 'clusters', '=', 'False', ')', 'ctx', '.', 'invoke', '(', 'evaluate', ',', 'model', '=', "'%s_wsj_genia_nocluster.pickle'", '%', 'model', ',', 'corpus', '=', "'genia'", ',', 'clusters', '=', 'False', ')', 'ctx', '.', 'invoke', '(', 'evaluate', ',', 'model', '=', "'%s_wsj_genia.pickle'", '%', 'model', ',', 'corpus', '=', "'wsj'", ',', 'clusters', '=', 'True', ')', 'ctx', '.', 'invoke', '(', 'evaluate', ',', 'model', '=', "'%s_wsj_genia.pickle'", '%', 'model', ',', 'corpus', '=', "'genia'", ',', 'clusters', '=', 'True', ')']
Evaluate POS taggers on WSJ and GENIA.
['Evaluate', 'POS', 'taggers', 'on', 'WSJ', 'and', 'GENIA', '.']
train
https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/cli/pos.py#L50-L65
2,555
sdispater/cachy
cachy/stores/redis_store.py
RedisStore.get
def get(self, key): """ Retrieve an item from the cache by key. :param key: The cache key :type key: str :return: The cache value """ value = self._redis.get(self._prefix + key) if value is not None: return self.unserialize(value)
python
def get(self, key): """ Retrieve an item from the cache by key. :param key: The cache key :type key: str :return: The cache value """ value = self._redis.get(self._prefix + key) if value is not None: return self.unserialize(value)
['def', 'get', '(', 'self', ',', 'key', ')', ':', 'value', '=', 'self', '.', '_redis', '.', 'get', '(', 'self', '.', '_prefix', '+', 'key', ')', 'if', 'value', 'is', 'not', 'None', ':', 'return', 'self', '.', 'unserialize', '(', 'value', ')']
Retrieve an item from the cache by key. :param key: The cache key :type key: str :return: The cache value
['Retrieve', 'an', 'item', 'from', 'the', 'cache', 'by', 'key', '.']
train
https://github.com/sdispater/cachy/blob/ee4b044d6aafa80125730a00b1f679a7bd852b8a/cachy/stores/redis_store.py#L27-L39
2,556
anchore/anchore
anchore/util/resources.py
ResourceCache._flush
def _flush(self): """ Flush metadata to the backing file :return: """ with open(self.metadata_file, 'w') as f: json.dump(self.metadata, f)
python
def _flush(self): """ Flush metadata to the backing file :return: """ with open(self.metadata_file, 'w') as f: json.dump(self.metadata, f)
['def', '_flush', '(', 'self', ')', ':', 'with', 'open', '(', 'self', '.', 'metadata_file', ',', "'w'", ')', 'as', 'f', ':', 'json', '.', 'dump', '(', 'self', '.', 'metadata', ',', 'f', ')']
Flush metadata to the backing file :return:
['Flush', 'metadata', 'to', 'the', 'backing', 'file', ':', 'return', ':']
train
https://github.com/anchore/anchore/blob/8a4d5b9708e27856312d303aae3f04f3c72039d6/anchore/util/resources.py#L317-L323
2,557
jam31118/vis
vis/layout.py
get_text_position
def get_text_position(fig, ax, ha='left', va='top', pad_scale=1.0): """Return text position inside of the given axis""" ## Check and preprocess input arguments try: pad_scale = float(pad_scale) except: raise TypeError("'pad_scale should be of type 'float'") for arg in [va, ha]: assert type(arg) is str arg = arg.lower() # Make it lowercase to prevent case problem. ## Get axis size in inches ax_height, ax_width = get_ax_size_in_inch(fig, ax) ## Construct inversion factor from inch to plot coordinate length_x = ax.get_xlim()[1] - ax.get_xlim()[0] length_y = ax.get_ylim()[1] - ax.get_ylim()[0] inch2coord_x = length_x / ax_width inch2coord_y = length_y / ax_height ## Set padding size relative to the text size #pad_inch = text_bbox_inch.height * pad_scale #pad_inch = fontsize_points * point2inch * pad_scale ax_length_geom_average = (ax_height * ax_width) ** 0.5 pad_inch = ax_length_geom_average * 0.03 * pad_scale pad_inch_x, pad_inch_y = pad_inch, pad_inch pad_coord_x = pad_inch_x * inch2coord_x pad_coord_y = pad_inch_y * inch2coord_y if ha == 'left': pos_x = ax.get_xlim()[0] + pad_coord_x elif ha == 'right': pos_x = ax.get_xlim()[1] - pad_coord_x else: raise Exception("Unsupported value for 'ha'") if va in ['top','up','upper']: pos_y = ax.get_ylim()[1] - pad_coord_y elif va in ['bottom','down','lower']: pos_y = ax.get_ylim()[0] + pad_coord_y else: raise Exception("Unsupported value for 'va'") return pos_x, pos_y
python
def get_text_position(fig, ax, ha='left', va='top', pad_scale=1.0): """Return text position inside of the given axis""" ## Check and preprocess input arguments try: pad_scale = float(pad_scale) except: raise TypeError("'pad_scale should be of type 'float'") for arg in [va, ha]: assert type(arg) is str arg = arg.lower() # Make it lowercase to prevent case problem. ## Get axis size in inches ax_height, ax_width = get_ax_size_in_inch(fig, ax) ## Construct inversion factor from inch to plot coordinate length_x = ax.get_xlim()[1] - ax.get_xlim()[0] length_y = ax.get_ylim()[1] - ax.get_ylim()[0] inch2coord_x = length_x / ax_width inch2coord_y = length_y / ax_height ## Set padding size relative to the text size #pad_inch = text_bbox_inch.height * pad_scale #pad_inch = fontsize_points * point2inch * pad_scale ax_length_geom_average = (ax_height * ax_width) ** 0.5 pad_inch = ax_length_geom_average * 0.03 * pad_scale pad_inch_x, pad_inch_y = pad_inch, pad_inch pad_coord_x = pad_inch_x * inch2coord_x pad_coord_y = pad_inch_y * inch2coord_y if ha == 'left': pos_x = ax.get_xlim()[0] + pad_coord_x elif ha == 'right': pos_x = ax.get_xlim()[1] - pad_coord_x else: raise Exception("Unsupported value for 'ha'") if va in ['top','up','upper']: pos_y = ax.get_ylim()[1] - pad_coord_y elif va in ['bottom','down','lower']: pos_y = ax.get_ylim()[0] + pad_coord_y else: raise Exception("Unsupported value for 'va'") return pos_x, pos_y
['def', 'get_text_position', '(', 'fig', ',', 'ax', ',', 'ha', '=', "'left'", ',', 'va', '=', "'top'", ',', 'pad_scale', '=', '1.0', ')', ':', '## Check and preprocess input arguments', 'try', ':', 'pad_scale', '=', 'float', '(', 'pad_scale', ')', 'except', ':', 'raise', 'TypeError', '(', '"\'pad_scale should be of type \'float\'"', ')', 'for', 'arg', 'in', '[', 'va', ',', 'ha', ']', ':', 'assert', 'type', '(', 'arg', ')', 'is', 'str', 'arg', '=', 'arg', '.', 'lower', '(', ')', '# Make it lowercase to prevent case problem.', '## Get axis size in inches', 'ax_height', ',', 'ax_width', '=', 'get_ax_size_in_inch', '(', 'fig', ',', 'ax', ')', '## Construct inversion factor from inch to plot coordinate', 'length_x', '=', 'ax', '.', 'get_xlim', '(', ')', '[', '1', ']', '-', 'ax', '.', 'get_xlim', '(', ')', '[', '0', ']', 'length_y', '=', 'ax', '.', 'get_ylim', '(', ')', '[', '1', ']', '-', 'ax', '.', 'get_ylim', '(', ')', '[', '0', ']', 'inch2coord_x', '=', 'length_x', '/', 'ax_width', 'inch2coord_y', '=', 'length_y', '/', 'ax_height', '## Set padding size relative to the text size', '#pad_inch = text_bbox_inch.height * pad_scale', '#pad_inch = fontsize_points * point2inch * pad_scale', 'ax_length_geom_average', '=', '(', 'ax_height', '*', 'ax_width', ')', '**', '0.5', 'pad_inch', '=', 'ax_length_geom_average', '*', '0.03', '*', 'pad_scale', 'pad_inch_x', ',', 'pad_inch_y', '=', 'pad_inch', ',', 'pad_inch', 'pad_coord_x', '=', 'pad_inch_x', '*', 'inch2coord_x', 'pad_coord_y', '=', 'pad_inch_y', '*', 'inch2coord_y', 'if', 'ha', '==', "'left'", ':', 'pos_x', '=', 'ax', '.', 'get_xlim', '(', ')', '[', '0', ']', '+', 'pad_coord_x', 'elif', 'ha', '==', "'right'", ':', 'pos_x', '=', 'ax', '.', 'get_xlim', '(', ')', '[', '1', ']', '-', 'pad_coord_x', 'else', ':', 'raise', 'Exception', '(', '"Unsupported value for \'ha\'"', ')', 'if', 'va', 'in', '[', "'top'", ',', "'up'", ',', "'upper'", ']', ':', 'pos_y', '=', 'ax', '.', 'get_ylim', '(', ')', '[', '1', ']', '-', 'pad_coord_y', 'elif', 'va', 'in', '[', "'bottom'", ',', "'down'", ',', "'lower'", ']', ':', 'pos_y', '=', 'ax', '.', 'get_ylim', '(', ')', '[', '0', ']', '+', 'pad_coord_y', 'else', ':', 'raise', 'Exception', '(', '"Unsupported value for \'va\'"', ')', 'return', 'pos_x', ',', 'pos_y']
Return text position inside of the given axis
['Return', 'text', 'position', 'inside', 'of', 'the', 'given', 'axis']
train
https://github.com/jam31118/vis/blob/965ebec102c539b323d5756fef04153ac71e50d9/vis/layout.py#L81-L118
2,558
kgori/treeCl
treeCl/parutils.py
processpool_map
def processpool_map(task, args, message, concurrency, batchsize=1, nargs=None): """ See http://stackoverflow.com/a/16071616 """ njobs = get_njobs(nargs, args) show_progress = bool(message) batches = grouper(batchsize, tupleise(args)) def batched_task(*batch): return [task(*job) for job in batch] if show_progress: message += ' (PP:{}w:{}b)'.format(concurrency, batchsize) pbar = setup_progressbar(message, njobs, simple_progress=True) pbar.start() q_in = multiprocessing.Queue() # Should I limit either queue size? Limiting in-queue q_out = multiprocessing.Queue() # increases time taken to send jobs, makes pbar less useful proc = [multiprocessing.Process(target=fun, args=(batched_task, q_in, q_out)) for _ in range(concurrency)] for p in proc: p.daemon = True p.start() sent = [q_in.put((i, x)) for (i, x) in enumerate(batches)] [q_in.put((None, None)) for _ in range(concurrency)] res = [] completed_count = 0 for _ in range(len(sent)): result = get_from_queue(q_out) res.append(result) completed_count += len(result[1]) if show_progress: pbar.update(completed_count) [p.join() for p in proc] if show_progress: pbar.finish() return flatten_list([x for (i, x) in sorted(res)])
python
def processpool_map(task, args, message, concurrency, batchsize=1, nargs=None): """ See http://stackoverflow.com/a/16071616 """ njobs = get_njobs(nargs, args) show_progress = bool(message) batches = grouper(batchsize, tupleise(args)) def batched_task(*batch): return [task(*job) for job in batch] if show_progress: message += ' (PP:{}w:{}b)'.format(concurrency, batchsize) pbar = setup_progressbar(message, njobs, simple_progress=True) pbar.start() q_in = multiprocessing.Queue() # Should I limit either queue size? Limiting in-queue q_out = multiprocessing.Queue() # increases time taken to send jobs, makes pbar less useful proc = [multiprocessing.Process(target=fun, args=(batched_task, q_in, q_out)) for _ in range(concurrency)] for p in proc: p.daemon = True p.start() sent = [q_in.put((i, x)) for (i, x) in enumerate(batches)] [q_in.put((None, None)) for _ in range(concurrency)] res = [] completed_count = 0 for _ in range(len(sent)): result = get_from_queue(q_out) res.append(result) completed_count += len(result[1]) if show_progress: pbar.update(completed_count) [p.join() for p in proc] if show_progress: pbar.finish() return flatten_list([x for (i, x) in sorted(res)])
['def', 'processpool_map', '(', 'task', ',', 'args', ',', 'message', ',', 'concurrency', ',', 'batchsize', '=', '1', ',', 'nargs', '=', 'None', ')', ':', 'njobs', '=', 'get_njobs', '(', 'nargs', ',', 'args', ')', 'show_progress', '=', 'bool', '(', 'message', ')', 'batches', '=', 'grouper', '(', 'batchsize', ',', 'tupleise', '(', 'args', ')', ')', 'def', 'batched_task', '(', '*', 'batch', ')', ':', 'return', '[', 'task', '(', '*', 'job', ')', 'for', 'job', 'in', 'batch', ']', 'if', 'show_progress', ':', 'message', '+=', "' (PP:{}w:{}b)'", '.', 'format', '(', 'concurrency', ',', 'batchsize', ')', 'pbar', '=', 'setup_progressbar', '(', 'message', ',', 'njobs', ',', 'simple_progress', '=', 'True', ')', 'pbar', '.', 'start', '(', ')', 'q_in', '=', 'multiprocessing', '.', 'Queue', '(', ')', '# Should I limit either queue size? Limiting in-queue', 'q_out', '=', 'multiprocessing', '.', 'Queue', '(', ')', '# increases time taken to send jobs, makes pbar less useful', 'proc', '=', '[', 'multiprocessing', '.', 'Process', '(', 'target', '=', 'fun', ',', 'args', '=', '(', 'batched_task', ',', 'q_in', ',', 'q_out', ')', ')', 'for', '_', 'in', 'range', '(', 'concurrency', ')', ']', 'for', 'p', 'in', 'proc', ':', 'p', '.', 'daemon', '=', 'True', 'p', '.', 'start', '(', ')', 'sent', '=', '[', 'q_in', '.', 'put', '(', '(', 'i', ',', 'x', ')', ')', 'for', '(', 'i', ',', 'x', ')', 'in', 'enumerate', '(', 'batches', ')', ']', '[', 'q_in', '.', 'put', '(', '(', 'None', ',', 'None', ')', ')', 'for', '_', 'in', 'range', '(', 'concurrency', ')', ']', 'res', '=', '[', ']', 'completed_count', '=', '0', 'for', '_', 'in', 'range', '(', 'len', '(', 'sent', ')', ')', ':', 'result', '=', 'get_from_queue', '(', 'q_out', ')', 'res', '.', 'append', '(', 'result', ')', 'completed_count', '+=', 'len', '(', 'result', '[', '1', ']', ')', 'if', 'show_progress', ':', 'pbar', '.', 'update', '(', 'completed_count', ')', '[', 'p', '.', 'join', '(', ')', 'for', 'p', 'in', 'proc', ']', 'if', 'show_progress', ':', 'pbar', '.', 'finish', '(', ')', 'return', 'flatten_list', '(', '[', 'x', 'for', '(', 'i', ',', 'x', ')', 'in', 'sorted', '(', 'res', ')', ']', ')']
See http://stackoverflow.com/a/16071616
['See', 'http', ':', '//', 'stackoverflow', '.', 'com', '/', 'a', '/', '16071616']
train
https://github.com/kgori/treeCl/blob/fed624b3db1c19cc07175ca04e3eda6905a8d305/treeCl/parutils.py#L177-L214
2,559
log2timeline/dfwinreg
dfwinreg/virtual.py
VirtualWinRegistryKey.GetSubkeyByPath
def GetSubkeyByPath(self, key_path): """Retrieves a subkey by path. Args: key_path (str): path of the subkey. Returns: WinRegistryKey: Windows Registry subkey or None if not found. """ if not self._registry_key and self._registry: self._GetKeyFromRegistry() subkey = self for path_segment in key_paths.SplitKeyPath(key_path): subkey = subkey.GetSubkeyByName(path_segment) if not subkey: break return subkey
python
def GetSubkeyByPath(self, key_path): """Retrieves a subkey by path. Args: key_path (str): path of the subkey. Returns: WinRegistryKey: Windows Registry subkey or None if not found. """ if not self._registry_key and self._registry: self._GetKeyFromRegistry() subkey = self for path_segment in key_paths.SplitKeyPath(key_path): subkey = subkey.GetSubkeyByName(path_segment) if not subkey: break return subkey
['def', 'GetSubkeyByPath', '(', 'self', ',', 'key_path', ')', ':', 'if', 'not', 'self', '.', '_registry_key', 'and', 'self', '.', '_registry', ':', 'self', '.', '_GetKeyFromRegistry', '(', ')', 'subkey', '=', 'self', 'for', 'path_segment', 'in', 'key_paths', '.', 'SplitKeyPath', '(', 'key_path', ')', ':', 'subkey', '=', 'subkey', '.', 'GetSubkeyByName', '(', 'path_segment', ')', 'if', 'not', 'subkey', ':', 'break', 'return', 'subkey']
Retrieves a subkey by path. Args: key_path (str): path of the subkey. Returns: WinRegistryKey: Windows Registry subkey or None if not found.
['Retrieves', 'a', 'subkey', 'by', 'path', '.']
train
https://github.com/log2timeline/dfwinreg/blob/9d488bb1db562197dbfb48de9613d6b29dea056e/dfwinreg/virtual.py#L198-L216
2,560
benmoran56/esper
esper.py
World.remove_component
def remove_component(self, entity: int, component_type: Any) -> int: """Remove a Component instance from an Entity, by type. A Component instance can be removed by providing it's type. For example: world.delete_component(enemy_a, Velocity) will remove the Velocity instance from the Entity enemy_a. Raises a KeyError if either the given entity or Component type does not exist in the database. :param entity: The Entity to remove the Component from. :param component_type: The type of the Component to remove. """ self._components[component_type].discard(entity) if not self._components[component_type]: del self._components[component_type] del self._entities[entity][component_type] if not self._entities[entity]: del self._entities[entity] self.clear_cache() return entity
python
def remove_component(self, entity: int, component_type: Any) -> int: """Remove a Component instance from an Entity, by type. A Component instance can be removed by providing it's type. For example: world.delete_component(enemy_a, Velocity) will remove the Velocity instance from the Entity enemy_a. Raises a KeyError if either the given entity or Component type does not exist in the database. :param entity: The Entity to remove the Component from. :param component_type: The type of the Component to remove. """ self._components[component_type].discard(entity) if not self._components[component_type]: del self._components[component_type] del self._entities[entity][component_type] if not self._entities[entity]: del self._entities[entity] self.clear_cache() return entity
['def', 'remove_component', '(', 'self', ',', 'entity', ':', 'int', ',', 'component_type', ':', 'Any', ')', '->', 'int', ':', 'self', '.', '_components', '[', 'component_type', ']', '.', 'discard', '(', 'entity', ')', 'if', 'not', 'self', '.', '_components', '[', 'component_type', ']', ':', 'del', 'self', '.', '_components', '[', 'component_type', ']', 'del', 'self', '.', '_entities', '[', 'entity', ']', '[', 'component_type', ']', 'if', 'not', 'self', '.', '_entities', '[', 'entity', ']', ':', 'del', 'self', '.', '_entities', '[', 'entity', ']', 'self', '.', 'clear_cache', '(', ')', 'return', 'entity']
Remove a Component instance from an Entity, by type. A Component instance can be removed by providing it's type. For example: world.delete_component(enemy_a, Velocity) will remove the Velocity instance from the Entity enemy_a. Raises a KeyError if either the given entity or Component type does not exist in the database. :param entity: The Entity to remove the Component from. :param component_type: The type of the Component to remove.
['Remove', 'a', 'Component', 'instance', 'from', 'an', 'Entity', 'by', 'type', '.']
train
https://github.com/benmoran56/esper/blob/5b6cd0c51718d5dcfa0e5613f824b5251cf092ac/esper.py#L199-L222
2,561
borntyping/python-riemann-client
riemann_client/client.py
Client.send_query
def send_query(self, query): """Sends a query to the Riemann server :returns: The response message from Riemann """ message = riemann_client.riemann_pb2.Msg() message.query.string = query return self.transport.send(message)
python
def send_query(self, query): """Sends a query to the Riemann server :returns: The response message from Riemann """ message = riemann_client.riemann_pb2.Msg() message.query.string = query return self.transport.send(message)
['def', 'send_query', '(', 'self', ',', 'query', ')', ':', 'message', '=', 'riemann_client', '.', 'riemann_pb2', '.', 'Msg', '(', ')', 'message', '.', 'query', '.', 'string', '=', 'query', 'return', 'self', '.', 'transport', '.', 'send', '(', 'message', ')']
Sends a query to the Riemann server :returns: The response message from Riemann
['Sends', 'a', 'query', 'to', 'the', 'Riemann', 'server']
train
https://github.com/borntyping/python-riemann-client/blob/3e181d90bdf685afd21c1ec5ee20e6840b011ea5/riemann_client/client.py#L158-L165
2,562
openwisp/django-x509
django_x509/base/models.py
AbstractCa.get_revoked_certs
def get_revoked_certs(self): """ Returns revoked certificates of this CA (does not include expired certificates) """ now = timezone.now() return self.cert_set.filter(revoked=True, validity_start__lte=now, validity_end__gte=now)
python
def get_revoked_certs(self): """ Returns revoked certificates of this CA (does not include expired certificates) """ now = timezone.now() return self.cert_set.filter(revoked=True, validity_start__lte=now, validity_end__gte=now)
['def', 'get_revoked_certs', '(', 'self', ')', ':', 'now', '=', 'timezone', '.', 'now', '(', ')', 'return', 'self', '.', 'cert_set', '.', 'filter', '(', 'revoked', '=', 'True', ',', 'validity_start__lte', '=', 'now', ',', 'validity_end__gte', '=', 'now', ')']
Returns revoked certificates of this CA (does not include expired certificates)
['Returns', 'revoked', 'certificates', 'of', 'this', 'CA', '(', 'does', 'not', 'include', 'expired', 'certificates', ')']
train
https://github.com/openwisp/django-x509/blob/7f6cc937d6b13a10ce6511e0bb2a9a1345e45a2c/django_x509/base/models.py#L439-L447
2,563
praekelt/django-simple-autocomplete
simple_autocomplete/views.py
get_json
def get_json(request, token): """Return matching results as JSON""" result = [] searchtext = request.GET['q'] if len(searchtext) >= 3: pickled = _simple_autocomplete_queryset_cache.get(token, None) if pickled is not None: app_label, model_name, query = pickle.loads(pickled) model = apps.get_model(app_label, model_name) queryset = QuerySet(model=model, query=query) fieldname = get_search_fieldname(model) di = {'%s__istartswith' % fieldname: searchtext} app_label_model = '%s.%s' % (app_label, model_name) max_items = get_setting(app_label_model, 'max_items', 10) items = queryset.filter(**di).order_by(fieldname)[:max_items] # Check for duplicate strings counts = {} for item in items: if hasattr(item, "__unicode__"): key = item.__unicode__() else: key = str(item) #key = unicode(item) counts.setdefault(key, 0) counts[key] += 1 # Assemble result set for item in items: #key = value = unicode(item) if hasattr(item, "__unicode__"): key = value = item.__unicode__() else: key = value = str(item) value = getattr(item, fieldname) if counts[key] > 1: func = get_setting( app_label_model, 'duplicate_format_function', lambda obj, model, content_type: content_type.name ) content_type = ContentType.objects.get_for_model(model) value = '%s (%s)' % (value, func(item, model, content_type)) result.append((item.id, value)) else: result = 'CACHE_MISS' return HttpResponse(json.dumps(result))
python
def get_json(request, token): """Return matching results as JSON""" result = [] searchtext = request.GET['q'] if len(searchtext) >= 3: pickled = _simple_autocomplete_queryset_cache.get(token, None) if pickled is not None: app_label, model_name, query = pickle.loads(pickled) model = apps.get_model(app_label, model_name) queryset = QuerySet(model=model, query=query) fieldname = get_search_fieldname(model) di = {'%s__istartswith' % fieldname: searchtext} app_label_model = '%s.%s' % (app_label, model_name) max_items = get_setting(app_label_model, 'max_items', 10) items = queryset.filter(**di).order_by(fieldname)[:max_items] # Check for duplicate strings counts = {} for item in items: if hasattr(item, "__unicode__"): key = item.__unicode__() else: key = str(item) #key = unicode(item) counts.setdefault(key, 0) counts[key] += 1 # Assemble result set for item in items: #key = value = unicode(item) if hasattr(item, "__unicode__"): key = value = item.__unicode__() else: key = value = str(item) value = getattr(item, fieldname) if counts[key] > 1: func = get_setting( app_label_model, 'duplicate_format_function', lambda obj, model, content_type: content_type.name ) content_type = ContentType.objects.get_for_model(model) value = '%s (%s)' % (value, func(item, model, content_type)) result.append((item.id, value)) else: result = 'CACHE_MISS' return HttpResponse(json.dumps(result))
['def', 'get_json', '(', 'request', ',', 'token', ')', ':', 'result', '=', '[', ']', 'searchtext', '=', 'request', '.', 'GET', '[', "'q'", ']', 'if', 'len', '(', 'searchtext', ')', '>=', '3', ':', 'pickled', '=', '_simple_autocomplete_queryset_cache', '.', 'get', '(', 'token', ',', 'None', ')', 'if', 'pickled', 'is', 'not', 'None', ':', 'app_label', ',', 'model_name', ',', 'query', '=', 'pickle', '.', 'loads', '(', 'pickled', ')', 'model', '=', 'apps', '.', 'get_model', '(', 'app_label', ',', 'model_name', ')', 'queryset', '=', 'QuerySet', '(', 'model', '=', 'model', ',', 'query', '=', 'query', ')', 'fieldname', '=', 'get_search_fieldname', '(', 'model', ')', 'di', '=', '{', "'%s__istartswith'", '%', 'fieldname', ':', 'searchtext', '}', 'app_label_model', '=', "'%s.%s'", '%', '(', 'app_label', ',', 'model_name', ')', 'max_items', '=', 'get_setting', '(', 'app_label_model', ',', "'max_items'", ',', '10', ')', 'items', '=', 'queryset', '.', 'filter', '(', '*', '*', 'di', ')', '.', 'order_by', '(', 'fieldname', ')', '[', ':', 'max_items', ']', '# Check for duplicate strings', 'counts', '=', '{', '}', 'for', 'item', 'in', 'items', ':', 'if', 'hasattr', '(', 'item', ',', '"__unicode__"', ')', ':', 'key', '=', 'item', '.', '__unicode__', '(', ')', 'else', ':', 'key', '=', 'str', '(', 'item', ')', '#key = unicode(item)', 'counts', '.', 'setdefault', '(', 'key', ',', '0', ')', 'counts', '[', 'key', ']', '+=', '1', '# Assemble result set', 'for', 'item', 'in', 'items', ':', '#key = value = unicode(item)', 'if', 'hasattr', '(', 'item', ',', '"__unicode__"', ')', ':', 'key', '=', 'value', '=', 'item', '.', '__unicode__', '(', ')', 'else', ':', 'key', '=', 'value', '=', 'str', '(', 'item', ')', 'value', '=', 'getattr', '(', 'item', ',', 'fieldname', ')', 'if', 'counts', '[', 'key', ']', '>', '1', ':', 'func', '=', 'get_setting', '(', 'app_label_model', ',', "'duplicate_format_function'", ',', 'lambda', 'obj', ',', 'model', ',', 'content_type', ':', 'content_type', '.', 'name', ')', 'content_type', '=', 'ContentType', '.', 'objects', '.', 'get_for_model', '(', 'model', ')', 'value', '=', "'%s (%s)'", '%', '(', 'value', ',', 'func', '(', 'item', ',', 'model', ',', 'content_type', ')', ')', 'result', '.', 'append', '(', '(', 'item', '.', 'id', ',', 'value', ')', ')', 'else', ':', 'result', '=', "'CACHE_MISS'", 'return', 'HttpResponse', '(', 'json', '.', 'dumps', '(', 'result', ')', ')']
Return matching results as JSON
['Return', 'matching', 'results', 'as', 'JSON']
train
https://github.com/praekelt/django-simple-autocomplete/blob/925b639a6a7fac2350dda9656845d8bd9aa2e748/simple_autocomplete/views.py#L14-L62
2,564
rehandalal/flask-funnel
flask_funnel/extensions.py
coffee
def coffee(input, output, **kw): """Process CoffeeScript files""" subprocess.call([current_app.config.get('COFFEE_BIN'), '-c', '-o', output, input])
python
def coffee(input, output, **kw): """Process CoffeeScript files""" subprocess.call([current_app.config.get('COFFEE_BIN'), '-c', '-o', output, input])
['def', 'coffee', '(', 'input', ',', 'output', ',', '*', '*', 'kw', ')', ':', 'subprocess', '.', 'call', '(', '[', 'current_app', '.', 'config', '.', 'get', '(', "'COFFEE_BIN'", ')', ',', "'-c'", ',', "'-o'", ',', 'output', ',', 'input', ']', ')']
Process CoffeeScript files
['Process', 'CoffeeScript', 'files']
train
https://github.com/rehandalal/flask-funnel/blob/b635cf52d1c9133c748aab7465edd7caef48e433/flask_funnel/extensions.py#L30-L33
2,565
garyp/sifter
sifter/grammar/grammar.py
p_commands_list
def p_commands_list(p): """commands : commands command""" p[0] = p[1] # section 3.2: REQUIRE command must come before any other commands if p[2].RULE_IDENTIFIER == 'REQUIRE': if any(command.RULE_IDENTIFIER != 'REQUIRE' for command in p[0].commands): print("REQUIRE command on line %d must come before any " "other non-REQUIRE commands" % p.lineno(2)) raise SyntaxError # section 3.1: ELSIF and ELSE must follow IF or another ELSIF elif p[2].RULE_IDENTIFIER in ('ELSIF', 'ELSE'): if p[0].commands[-1].RULE_IDENTIFIER not in ('IF', 'ELSIF'): print("ELSIF/ELSE command on line %d must follow an IF/ELSIF " "command" % p.lineno(2)) raise SyntaxError p[0].commands.append(p[2])
python
def p_commands_list(p): """commands : commands command""" p[0] = p[1] # section 3.2: REQUIRE command must come before any other commands if p[2].RULE_IDENTIFIER == 'REQUIRE': if any(command.RULE_IDENTIFIER != 'REQUIRE' for command in p[0].commands): print("REQUIRE command on line %d must come before any " "other non-REQUIRE commands" % p.lineno(2)) raise SyntaxError # section 3.1: ELSIF and ELSE must follow IF or another ELSIF elif p[2].RULE_IDENTIFIER in ('ELSIF', 'ELSE'): if p[0].commands[-1].RULE_IDENTIFIER not in ('IF', 'ELSIF'): print("ELSIF/ELSE command on line %d must follow an IF/ELSIF " "command" % p.lineno(2)) raise SyntaxError p[0].commands.append(p[2])
['def', 'p_commands_list', '(', 'p', ')', ':', 'p', '[', '0', ']', '=', 'p', '[', '1', ']', '# section 3.2: REQUIRE command must come before any other commands', 'if', 'p', '[', '2', ']', '.', 'RULE_IDENTIFIER', '==', "'REQUIRE'", ':', 'if', 'any', '(', 'command', '.', 'RULE_IDENTIFIER', '!=', "'REQUIRE'", 'for', 'command', 'in', 'p', '[', '0', ']', '.', 'commands', ')', ':', 'print', '(', '"REQUIRE command on line %d must come before any "', '"other non-REQUIRE commands"', '%', 'p', '.', 'lineno', '(', '2', ')', ')', 'raise', 'SyntaxError', '# section 3.1: ELSIF and ELSE must follow IF or another ELSIF', 'elif', 'p', '[', '2', ']', '.', 'RULE_IDENTIFIER', 'in', '(', "'ELSIF'", ',', "'ELSE'", ')', ':', 'if', 'p', '[', '0', ']', '.', 'commands', '[', '-', '1', ']', '.', 'RULE_IDENTIFIER', 'not', 'in', '(', "'IF'", ',', "'ELSIF'", ')', ':', 'print', '(', '"ELSIF/ELSE command on line %d must follow an IF/ELSIF "', '"command"', '%', 'p', '.', 'lineno', '(', '2', ')', ')', 'raise', 'SyntaxError', 'p', '[', '0', ']', '.', 'commands', '.', 'append', '(', 'p', '[', '2', ']', ')']
commands : commands command
['commands', ':', 'commands', 'command']
train
https://github.com/garyp/sifter/blob/9c472af76853c1196387141e017114d282637474/sifter/grammar/grammar.py#L17-L36
2,566
nickpandolfi/Cyther
cyther/pathway.py
normalize
def normalize(path_name, override=None): """ Prepares a path name to be worked with. Path name must not be empty. This function will return the 'normpath'ed path and the identity of the path. This function takes an optional overriding argument for the identity. ONLY PROVIDE OVERRIDE IF: 1) YOU AREWORKING WITH A FOLDER THAT HAS AN EXTENSION IN THE NAME 2) YOU ARE MAKING A FILE WITH NO EXTENSION """ identity = identify(path_name, override=override) new_path_name = os.path.normpath(os.path.expanduser(path_name)) return new_path_name, identity
python
def normalize(path_name, override=None): """ Prepares a path name to be worked with. Path name must not be empty. This function will return the 'normpath'ed path and the identity of the path. This function takes an optional overriding argument for the identity. ONLY PROVIDE OVERRIDE IF: 1) YOU AREWORKING WITH A FOLDER THAT HAS AN EXTENSION IN THE NAME 2) YOU ARE MAKING A FILE WITH NO EXTENSION """ identity = identify(path_name, override=override) new_path_name = os.path.normpath(os.path.expanduser(path_name)) return new_path_name, identity
['def', 'normalize', '(', 'path_name', ',', 'override', '=', 'None', ')', ':', 'identity', '=', 'identify', '(', 'path_name', ',', 'override', '=', 'override', ')', 'new_path_name', '=', 'os', '.', 'path', '.', 'normpath', '(', 'os', '.', 'path', '.', 'expanduser', '(', 'path_name', ')', ')', 'return', 'new_path_name', ',', 'identity']
Prepares a path name to be worked with. Path name must not be empty. This function will return the 'normpath'ed path and the identity of the path. This function takes an optional overriding argument for the identity. ONLY PROVIDE OVERRIDE IF: 1) YOU AREWORKING WITH A FOLDER THAT HAS AN EXTENSION IN THE NAME 2) YOU ARE MAKING A FILE WITH NO EXTENSION
['Prepares', 'a', 'path', 'name', 'to', 'be', 'worked', 'with', '.', 'Path', 'name', 'must', 'not', 'be', 'empty', '.', 'This', 'function', 'will', 'return', 'the', 'normpath', 'ed', 'path', 'and', 'the', 'identity', 'of', 'the', 'path', '.', 'This', 'function', 'takes', 'an', 'optional', 'overriding', 'argument', 'for', 'the', 'identity', '.']
train
https://github.com/nickpandolfi/Cyther/blob/9fb0bd77af594008aa6ee8af460aa8c953abf5bc/cyther/pathway.py#L36-L50
2,567
ynop/audiomate
audiomate/annotations/label_list.py
LabelList.add
def add(self, label): """ Add a label to the end of the list. Args: label (Label): The label to add. """ label.label_list = self self.label_tree.addi(label.start, label.end, label)
python
def add(self, label): """ Add a label to the end of the list. Args: label (Label): The label to add. """ label.label_list = self self.label_tree.addi(label.start, label.end, label)
['def', 'add', '(', 'self', ',', 'label', ')', ':', 'label', '.', 'label_list', '=', 'self', 'self', '.', 'label_tree', '.', 'addi', '(', 'label', '.', 'start', ',', 'label', '.', 'end', ',', 'label', ')']
Add a label to the end of the list. Args: label (Label): The label to add.
['Add', 'a', 'label', 'to', 'the', 'end', 'of', 'the', 'list', '.']
train
https://github.com/ynop/audiomate/blob/61727920b23a708293c3d526fa3000d4de9c6c21/audiomate/annotations/label_list.py#L90-L98
2,568
ARMmbed/icetea
icetea_lib/tools/GenericProcess.py
GenericProcess.stop_process
def stop_process(self): """ Stop the process. :raises: EnvironmentError if stopping fails due to unknown environment TestStepError if process stops with non-default returncode and return code is not ignored. """ if self.read_thread is not None: self.logger.debug("stop_process::readThread.stop()-in") self.read_thread.stop() self.logger.debug("stop_process::readThread.stop()-out") returncode = None if self.proc: self.logger.debug("os.killpg(%d)", self.proc.pid) for sig in (signal.SIGINT, signal.SIGTERM, signal.SIGKILL): timeout = 5 try: try: self.logger.debug("Trying signal %s", sig) os.killpg(self.proc.pid, sig) except AttributeError: self.logger.debug("os.killpg::AttributeError") # Failed most likely because in windows, # so use taskkill to kill whole process tree of proc if platform.system() == "Windows": subprocess.call(['taskkill', '/F', '/T', '/PID', str(self.proc.pid)]) else: self.logger.debug("os.killpg::unknown env") raise EnvironmentError("Unknown platform, " "don't know how to terminate process") while self.proc.poll() is None and timeout > 0: time.sleep(1) timeout -= 1 returncode = self.proc.poll() if returncode is not None: break except OSError as error: self.logger.info("os.killpg::OSError: %s", error) self.proc = None if returncode is not None: self.logger.debug("Process stopped with returncode %s" % returncode) if returncode != self.default_retcode and not self.__ignore_return_code: raise TestStepError("Process stopped with returncode %d" % returncode) self.logger.debug("stop_process-out")
python
def stop_process(self): """ Stop the process. :raises: EnvironmentError if stopping fails due to unknown environment TestStepError if process stops with non-default returncode and return code is not ignored. """ if self.read_thread is not None: self.logger.debug("stop_process::readThread.stop()-in") self.read_thread.stop() self.logger.debug("stop_process::readThread.stop()-out") returncode = None if self.proc: self.logger.debug("os.killpg(%d)", self.proc.pid) for sig in (signal.SIGINT, signal.SIGTERM, signal.SIGKILL): timeout = 5 try: try: self.logger.debug("Trying signal %s", sig) os.killpg(self.proc.pid, sig) except AttributeError: self.logger.debug("os.killpg::AttributeError") # Failed most likely because in windows, # so use taskkill to kill whole process tree of proc if platform.system() == "Windows": subprocess.call(['taskkill', '/F', '/T', '/PID', str(self.proc.pid)]) else: self.logger.debug("os.killpg::unknown env") raise EnvironmentError("Unknown platform, " "don't know how to terminate process") while self.proc.poll() is None and timeout > 0: time.sleep(1) timeout -= 1 returncode = self.proc.poll() if returncode is not None: break except OSError as error: self.logger.info("os.killpg::OSError: %s", error) self.proc = None if returncode is not None: self.logger.debug("Process stopped with returncode %s" % returncode) if returncode != self.default_retcode and not self.__ignore_return_code: raise TestStepError("Process stopped with returncode %d" % returncode) self.logger.debug("stop_process-out")
['def', 'stop_process', '(', 'self', ')', ':', 'if', 'self', '.', 'read_thread', 'is', 'not', 'None', ':', 'self', '.', 'logger', '.', 'debug', '(', '"stop_process::readThread.stop()-in"', ')', 'self', '.', 'read_thread', '.', 'stop', '(', ')', 'self', '.', 'logger', '.', 'debug', '(', '"stop_process::readThread.stop()-out"', ')', 'returncode', '=', 'None', 'if', 'self', '.', 'proc', ':', 'self', '.', 'logger', '.', 'debug', '(', '"os.killpg(%d)"', ',', 'self', '.', 'proc', '.', 'pid', ')', 'for', 'sig', 'in', '(', 'signal', '.', 'SIGINT', ',', 'signal', '.', 'SIGTERM', ',', 'signal', '.', 'SIGKILL', ')', ':', 'timeout', '=', '5', 'try', ':', 'try', ':', 'self', '.', 'logger', '.', 'debug', '(', '"Trying signal %s"', ',', 'sig', ')', 'os', '.', 'killpg', '(', 'self', '.', 'proc', '.', 'pid', ',', 'sig', ')', 'except', 'AttributeError', ':', 'self', '.', 'logger', '.', 'debug', '(', '"os.killpg::AttributeError"', ')', '# Failed most likely because in windows,', '# so use taskkill to kill whole process tree of proc', 'if', 'platform', '.', 'system', '(', ')', '==', '"Windows"', ':', 'subprocess', '.', 'call', '(', '[', "'taskkill'", ',', "'/F'", ',', "'/T'", ',', "'/PID'", ',', 'str', '(', 'self', '.', 'proc', '.', 'pid', ')', ']', ')', 'else', ':', 'self', '.', 'logger', '.', 'debug', '(', '"os.killpg::unknown env"', ')', 'raise', 'EnvironmentError', '(', '"Unknown platform, "', '"don\'t know how to terminate process"', ')', 'while', 'self', '.', 'proc', '.', 'poll', '(', ')', 'is', 'None', 'and', 'timeout', '>', '0', ':', 'time', '.', 'sleep', '(', '1', ')', 'timeout', '-=', '1', 'returncode', '=', 'self', '.', 'proc', '.', 'poll', '(', ')', 'if', 'returncode', 'is', 'not', 'None', ':', 'break', 'except', 'OSError', 'as', 'error', ':', 'self', '.', 'logger', '.', 'info', '(', '"os.killpg::OSError: %s"', ',', 'error', ')', 'self', '.', 'proc', '=', 'None', 'if', 'returncode', 'is', 'not', 'None', ':', 'self', '.', 'logger', '.', 'debug', '(', '"Process stopped with returncode %s"', '%', 'returncode', ')', 'if', 'returncode', '!=', 'self', '.', 'default_retcode', 'and', 'not', 'self', '.', '__ignore_return_code', ':', 'raise', 'TestStepError', '(', '"Process stopped with returncode %d"', '%', 'returncode', ')', 'self', '.', 'logger', '.', 'debug', '(', '"stop_process-out"', ')']
Stop the process. :raises: EnvironmentError if stopping fails due to unknown environment TestStepError if process stops with non-default returncode and return code is not ignored.
['Stop', 'the', 'process', '.']
train
https://github.com/ARMmbed/icetea/blob/b2b97ac607429830cf7d62dae2e3903692c7c778/icetea_lib/tools/GenericProcess.py#L503-L548
2,569
spacetelescope/pysynphot
pysynphot/spectrum.py
SpectralElement.check_sig
def check_sig(self, other): """Check overlap insignificance with another spectrum. Also see :ref:`pysynphot-command-checko`. .. note:: Only use when :meth:`check_overlap` returns "partial". Parameters ---------- other : `SourceSpectrum` or `SpectralElement` The other spectrum. Returns ------- ans : bool `True` means the *lack* of overlap is *insignificant* (i.e., okay to proceed). """ swave = self.wave[N.where(self.throughput != 0)] s1, s2 = swave.min(), swave.max() owave = other.wave o1, o2 = owave.min(), owave.max() lorange = sorted([s1, o1]) hirange = sorted([s2, o2]) # Get the full throughput total = self.integrate() # Now get the other two pieces # We cannot yet do # low = self[slice(*lowrange)].integrate() wave = self.wave idxs = [N.searchsorted(wave, lorange, 'left'), N.searchsorted(wave, hirange, 'left')] excluded = 0.0 for idx in idxs: try: excluded += self.integrate(wave=wave[slice(*idx)]) except IndexError: pass # If the range is zero, do nothing if excluded/total < 0.01: return True else: return False
python
def check_sig(self, other): """Check overlap insignificance with another spectrum. Also see :ref:`pysynphot-command-checko`. .. note:: Only use when :meth:`check_overlap` returns "partial". Parameters ---------- other : `SourceSpectrum` or `SpectralElement` The other spectrum. Returns ------- ans : bool `True` means the *lack* of overlap is *insignificant* (i.e., okay to proceed). """ swave = self.wave[N.where(self.throughput != 0)] s1, s2 = swave.min(), swave.max() owave = other.wave o1, o2 = owave.min(), owave.max() lorange = sorted([s1, o1]) hirange = sorted([s2, o2]) # Get the full throughput total = self.integrate() # Now get the other two pieces # We cannot yet do # low = self[slice(*lowrange)].integrate() wave = self.wave idxs = [N.searchsorted(wave, lorange, 'left'), N.searchsorted(wave, hirange, 'left')] excluded = 0.0 for idx in idxs: try: excluded += self.integrate(wave=wave[slice(*idx)]) except IndexError: pass # If the range is zero, do nothing if excluded/total < 0.01: return True else: return False
['def', 'check_sig', '(', 'self', ',', 'other', ')', ':', 'swave', '=', 'self', '.', 'wave', '[', 'N', '.', 'where', '(', 'self', '.', 'throughput', '!=', '0', ')', ']', 's1', ',', 's2', '=', 'swave', '.', 'min', '(', ')', ',', 'swave', '.', 'max', '(', ')', 'owave', '=', 'other', '.', 'wave', 'o1', ',', 'o2', '=', 'owave', '.', 'min', '(', ')', ',', 'owave', '.', 'max', '(', ')', 'lorange', '=', 'sorted', '(', '[', 's1', ',', 'o1', ']', ')', 'hirange', '=', 'sorted', '(', '[', 's2', ',', 'o2', ']', ')', '# Get the full throughput', 'total', '=', 'self', '.', 'integrate', '(', ')', '# Now get the other two pieces', '# We cannot yet do', '# low = self[slice(*lowrange)].integrate()', 'wave', '=', 'self', '.', 'wave', 'idxs', '=', '[', 'N', '.', 'searchsorted', '(', 'wave', ',', 'lorange', ',', "'left'", ')', ',', 'N', '.', 'searchsorted', '(', 'wave', ',', 'hirange', ',', "'left'", ')', ']', 'excluded', '=', '0.0', 'for', 'idx', 'in', 'idxs', ':', 'try', ':', 'excluded', '+=', 'self', '.', 'integrate', '(', 'wave', '=', 'wave', '[', 'slice', '(', '*', 'idx', ')', ']', ')', 'except', 'IndexError', ':', 'pass', '# If the range is zero, do nothing', 'if', 'excluded', '/', 'total', '<', '0.01', ':', 'return', 'True', 'else', ':', 'return', 'False']
Check overlap insignificance with another spectrum. Also see :ref:`pysynphot-command-checko`. .. note:: Only use when :meth:`check_overlap` returns "partial". Parameters ---------- other : `SourceSpectrum` or `SpectralElement` The other spectrum. Returns ------- ans : bool `True` means the *lack* of overlap is *insignificant* (i.e., okay to proceed).
['Check', 'overlap', 'insignificance', 'with', 'another', 'spectrum', '.', 'Also', 'see', ':', 'ref', ':', 'pysynphot', '-', 'command', '-', 'checko', '.']
train
https://github.com/spacetelescope/pysynphot/blob/a125ff956f4d94beb157bd51899747a13234bb97/pysynphot/spectrum.py#L1998-L2047
2,570
thiagopbueno/rddl2tf
rddl2tf/compiler.py
Compiler.compile_action_preconditions_checking
def compile_action_preconditions_checking(self, state: Sequence[tf.Tensor], action: Sequence[tf.Tensor]) -> tf.Tensor: '''Combines the action preconditions into an applicability checking op. Args: state (Sequence[tf.Tensor]): The current state fluents. action (Sequence[tf.Tensor]): The action fluents. Returns: A boolean tensor for checking if `action` is application in `state`. ''' with self.graph.as_default(): with tf.name_scope('action_preconditions_checking'): preconds = self.compile_action_preconditions(state, action) all_preconds = tf.stack([p.tensor for p in preconds], axis=1) checking = tf.reduce_all(all_preconds, axis=1) return checking
python
def compile_action_preconditions_checking(self, state: Sequence[tf.Tensor], action: Sequence[tf.Tensor]) -> tf.Tensor: '''Combines the action preconditions into an applicability checking op. Args: state (Sequence[tf.Tensor]): The current state fluents. action (Sequence[tf.Tensor]): The action fluents. Returns: A boolean tensor for checking if `action` is application in `state`. ''' with self.graph.as_default(): with tf.name_scope('action_preconditions_checking'): preconds = self.compile_action_preconditions(state, action) all_preconds = tf.stack([p.tensor for p in preconds], axis=1) checking = tf.reduce_all(all_preconds, axis=1) return checking
['def', 'compile_action_preconditions_checking', '(', 'self', ',', 'state', ':', 'Sequence', '[', 'tf', '.', 'Tensor', ']', ',', 'action', ':', 'Sequence', '[', 'tf', '.', 'Tensor', ']', ')', '->', 'tf', '.', 'Tensor', ':', 'with', 'self', '.', 'graph', '.', 'as_default', '(', ')', ':', 'with', 'tf', '.', 'name_scope', '(', "'action_preconditions_checking'", ')', ':', 'preconds', '=', 'self', '.', 'compile_action_preconditions', '(', 'state', ',', 'action', ')', 'all_preconds', '=', 'tf', '.', 'stack', '(', '[', 'p', '.', 'tensor', 'for', 'p', 'in', 'preconds', ']', ',', 'axis', '=', '1', ')', 'checking', '=', 'tf', '.', 'reduce_all', '(', 'all_preconds', ',', 'axis', '=', '1', ')', 'return', 'checking']
Combines the action preconditions into an applicability checking op. Args: state (Sequence[tf.Tensor]): The current state fluents. action (Sequence[tf.Tensor]): The action fluents. Returns: A boolean tensor for checking if `action` is application in `state`.
['Combines', 'the', 'action', 'preconditions', 'into', 'an', 'applicability', 'checking', 'op', '.']
train
https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L321-L338
2,571
cs50/check50
check50/__main__.py
install_translations
def install_translations(config): """Add check translations according to ``config`` as a fallback to existing translations""" if not config: return from . import _translation checks_translation = gettext.translation(domain=config["domain"], localedir=internal.check_dir / config["localedir"], fallback=True) _translation.add_fallback(checks_translation)
python
def install_translations(config): """Add check translations according to ``config`` as a fallback to existing translations""" if not config: return from . import _translation checks_translation = gettext.translation(domain=config["domain"], localedir=internal.check_dir / config["localedir"], fallback=True) _translation.add_fallback(checks_translation)
['def', 'install_translations', '(', 'config', ')', ':', 'if', 'not', 'config', ':', 'return', 'from', '.', 'import', '_translation', 'checks_translation', '=', 'gettext', '.', 'translation', '(', 'domain', '=', 'config', '[', '"domain"', ']', ',', 'localedir', '=', 'internal', '.', 'check_dir', '/', 'config', '[', '"localedir"', ']', ',', 'fallback', '=', 'True', ')', '_translation', '.', 'add_fallback', '(', 'checks_translation', ')']
Add check translations according to ``config`` as a fallback to existing translations
['Add', 'check', 'translations', 'according', 'to', 'config', 'as', 'a', 'fallback', 'to', 'existing', 'translations']
train
https://github.com/cs50/check50/blob/42c1f0c36baa6a24f69742d74551a9ea7a5ceb33/check50/__main__.py#L134-L144
2,572
cvxopt/chompack
src/python/symbolic.py
__leaf
def __leaf(i, j, first, maxfirst, prevleaf, ancestor): """ Determine if j is leaf of i'th row subtree. """ jleaf = 0 if i<=j or first[j] <= maxfirst[i]: return -1, jleaf maxfirst[i] = first[j] jprev = prevleaf[i] prevleaf[i] = j if jprev == -1: jleaf = 1 else: jleaf = 2 if jleaf == 1: return i, jleaf q = jprev while q != ancestor[q]: q = ancestor[q] s = jprev while s != q: sparent = ancestor[s] ancestor[s] = q s = sparent return q, jleaf
python
def __leaf(i, j, first, maxfirst, prevleaf, ancestor): """ Determine if j is leaf of i'th row subtree. """ jleaf = 0 if i<=j or first[j] <= maxfirst[i]: return -1, jleaf maxfirst[i] = first[j] jprev = prevleaf[i] prevleaf[i] = j if jprev == -1: jleaf = 1 else: jleaf = 2 if jleaf == 1: return i, jleaf q = jprev while q != ancestor[q]: q = ancestor[q] s = jprev while s != q: sparent = ancestor[s] ancestor[s] = q s = sparent return q, jleaf
['def', '__leaf', '(', 'i', ',', 'j', ',', 'first', ',', 'maxfirst', ',', 'prevleaf', ',', 'ancestor', ')', ':', 'jleaf', '=', '0', 'if', 'i', '<=', 'j', 'or', 'first', '[', 'j', ']', '<=', 'maxfirst', '[', 'i', ']', ':', 'return', '-', '1', ',', 'jleaf', 'maxfirst', '[', 'i', ']', '=', 'first', '[', 'j', ']', 'jprev', '=', 'prevleaf', '[', 'i', ']', 'prevleaf', '[', 'i', ']', '=', 'j', 'if', 'jprev', '==', '-', '1', ':', 'jleaf', '=', '1', 'else', ':', 'jleaf', '=', '2', 'if', 'jleaf', '==', '1', ':', 'return', 'i', ',', 'jleaf', 'q', '=', 'jprev', 'while', 'q', '!=', 'ancestor', '[', 'q', ']', ':', 'q', '=', 'ancestor', '[', 'q', ']', 's', '=', 'jprev', 'while', 's', '!=', 'q', ':', 'sparent', '=', 'ancestor', '[', 's', ']', 'ancestor', '[', 's', ']', '=', 'q', 's', '=', 'sparent', 'return', 'q', ',', 'jleaf']
Determine if j is leaf of i'th row subtree.
['Determine', 'if', 'j', 'is', 'leaf', 'of', 'i', 'th', 'row', 'subtree', '.']
train
https://github.com/cvxopt/chompack/blob/e07106b58b8055c34f6201e8c954482f86987833/src/python/symbolic.py#L74-L93
2,573
CZ-NIC/yangson
yangson/instance.py
InstanceNode._node_set
def _node_set(self) -> List["InstanceNode"]: """XPath - return the list of all receiver's nodes.""" return list(self) if isinstance(self.value, ArrayValue) else [self]
python
def _node_set(self) -> List["InstanceNode"]: """XPath - return the list of all receiver's nodes.""" return list(self) if isinstance(self.value, ArrayValue) else [self]
['def', '_node_set', '(', 'self', ')', '->', 'List', '[', '"InstanceNode"', ']', ':', 'return', 'list', '(', 'self', ')', 'if', 'isinstance', '(', 'self', '.', 'value', ',', 'ArrayValue', ')', 'else', '[', 'self', ']']
XPath - return the list of all receiver's nodes.
['XPath', '-', 'return', 'the', 'list', 'of', 'all', 'receiver', 's', 'nodes', '.']
train
https://github.com/CZ-NIC/yangson/blob/a4b9464041fa8b28f6020a420ababf18fddf5d4a/yangson/instance.py#L418-L420
2,574
PmagPy/PmagPy
pmagpy/ipmag.py
equi
def equi(map_axis, centerlon, centerlat, radius, color, alpha=1.0): """ This function enables A95 error ellipses to be drawn in cartopy around paleomagnetic poles in conjunction with shoot (modified from: http://www.geophysique.be/2011/02/20/matplotlib-basemap-tutorial-09-drawing-circles/). """ if not has_cartopy: print('-W- cartopy must be installed to run ipmag.equi') return glon1 = centerlon glat1 = centerlat X = [] Y = [] for azimuth in range(0, 360): glon2, glat2, baz = shoot(glon1, glat1, azimuth, radius) X.append(glon2) Y.append(glat2) X.append(X[0]) Y.append(Y[0]) plt.plot(X[::-1], Y[::-1], color=color, transform=ccrs.Geodetic(), alpha=alpha)
python
def equi(map_axis, centerlon, centerlat, radius, color, alpha=1.0): """ This function enables A95 error ellipses to be drawn in cartopy around paleomagnetic poles in conjunction with shoot (modified from: http://www.geophysique.be/2011/02/20/matplotlib-basemap-tutorial-09-drawing-circles/). """ if not has_cartopy: print('-W- cartopy must be installed to run ipmag.equi') return glon1 = centerlon glat1 = centerlat X = [] Y = [] for azimuth in range(0, 360): glon2, glat2, baz = shoot(glon1, glat1, azimuth, radius) X.append(glon2) Y.append(glat2) X.append(X[0]) Y.append(Y[0]) plt.plot(X[::-1], Y[::-1], color=color, transform=ccrs.Geodetic(), alpha=alpha)
['def', 'equi', '(', 'map_axis', ',', 'centerlon', ',', 'centerlat', ',', 'radius', ',', 'color', ',', 'alpha', '=', '1.0', ')', ':', 'if', 'not', 'has_cartopy', ':', 'print', '(', "'-W- cartopy must be installed to run ipmag.equi'", ')', 'return', 'glon1', '=', 'centerlon', 'glat1', '=', 'centerlat', 'X', '=', '[', ']', 'Y', '=', '[', ']', 'for', 'azimuth', 'in', 'range', '(', '0', ',', '360', ')', ':', 'glon2', ',', 'glat2', ',', 'baz', '=', 'shoot', '(', 'glon1', ',', 'glat1', ',', 'azimuth', ',', 'radius', ')', 'X', '.', 'append', '(', 'glon2', ')', 'Y', '.', 'append', '(', 'glat2', ')', 'X', '.', 'append', '(', 'X', '[', '0', ']', ')', 'Y', '.', 'append', '(', 'Y', '[', '0', ']', ')', 'plt', '.', 'plot', '(', 'X', '[', ':', ':', '-', '1', ']', ',', 'Y', '[', ':', ':', '-', '1', ']', ',', 'color', '=', 'color', ',', 'transform', '=', 'ccrs', '.', 'Geodetic', '(', ')', ',', 'alpha', '=', 'alpha', ')']
This function enables A95 error ellipses to be drawn in cartopy around paleomagnetic poles in conjunction with shoot (modified from: http://www.geophysique.be/2011/02/20/matplotlib-basemap-tutorial-09-drawing-circles/).
['This', 'function', 'enables', 'A95', 'error', 'ellipses', 'to', 'be', 'drawn', 'in', 'cartopy', 'around', 'paleomagnetic', 'poles', 'in', 'conjunction', 'with', 'shoot', '(', 'modified', 'from', ':', 'http', ':', '//', 'www', '.', 'geophysique', '.', 'be', '/', '2011', '/', '02', '/', '20', '/', 'matplotlib', '-', 'basemap', '-', 'tutorial', '-', '09', '-', 'drawing', '-', 'circles', '/', ')', '.']
train
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L2542-L2563
2,575
quiltdata/quilt
compiler/quilt/tools/store.py
PackageStore.create_package_node
def create_package_node(self, team, user, package, dry_run=False): """ Creates a new package and initializes its contents. See `install_package`. """ contents = RootNode(dict()) if dry_run: return contents self.check_name(team, user, package) assert contents is not None self.create_dirs() # Delete any existing data. path = self.package_path(team, user, package) try: os.remove(path) except OSError: pass return contents
python
def create_package_node(self, team, user, package, dry_run=False): """ Creates a new package and initializes its contents. See `install_package`. """ contents = RootNode(dict()) if dry_run: return contents self.check_name(team, user, package) assert contents is not None self.create_dirs() # Delete any existing data. path = self.package_path(team, user, package) try: os.remove(path) except OSError: pass return contents
['def', 'create_package_node', '(', 'self', ',', 'team', ',', 'user', ',', 'package', ',', 'dry_run', '=', 'False', ')', ':', 'contents', '=', 'RootNode', '(', 'dict', '(', ')', ')', 'if', 'dry_run', ':', 'return', 'contents', 'self', '.', 'check_name', '(', 'team', ',', 'user', ',', 'package', ')', 'assert', 'contents', 'is', 'not', 'None', 'self', '.', 'create_dirs', '(', ')', '# Delete any existing data.', 'path', '=', 'self', '.', 'package_path', '(', 'team', ',', 'user', ',', 'package', ')', 'try', ':', 'os', '.', 'remove', '(', 'path', ')', 'except', 'OSError', ':', 'pass', 'return', 'contents']
Creates a new package and initializes its contents. See `install_package`.
['Creates', 'a', 'new', 'package', 'and', 'initializes', 'its', 'contents', '.', 'See', 'install_package', '.']
train
https://github.com/quiltdata/quilt/blob/651853e7e89a8af86e0ff26167e752efa5878c12/compiler/quilt/tools/store.py#L243-L262
2,576
DataONEorg/d1_python
lib_common/src/d1_common/cert/subject_info.py
_trim_tree
def _trim_tree(state): """Trim empty leaf nodes from the tree. - To simplify the tree conversion, empty nodes are added before it is known if they will contain items that connect back to the authenticated subject. If there are no connections, the nodes remain empty, which causes them to be removed here. - Removing a leaf node may cause the parent to become a new empty leaf node, so the function is repeated until there are no more empty leaf nodes. """ for n in list(state.tree.leaf_node_gen): if n.type_str == TYPE_NODE_TAG: n.parent.child_list.remove(n) return _trim_tree(state)
python
def _trim_tree(state): """Trim empty leaf nodes from the tree. - To simplify the tree conversion, empty nodes are added before it is known if they will contain items that connect back to the authenticated subject. If there are no connections, the nodes remain empty, which causes them to be removed here. - Removing a leaf node may cause the parent to become a new empty leaf node, so the function is repeated until there are no more empty leaf nodes. """ for n in list(state.tree.leaf_node_gen): if n.type_str == TYPE_NODE_TAG: n.parent.child_list.remove(n) return _trim_tree(state)
['def', '_trim_tree', '(', 'state', ')', ':', 'for', 'n', 'in', 'list', '(', 'state', '.', 'tree', '.', 'leaf_node_gen', ')', ':', 'if', 'n', '.', 'type_str', '==', 'TYPE_NODE_TAG', ':', 'n', '.', 'parent', '.', 'child_list', '.', 'remove', '(', 'n', ')', 'return', '_trim_tree', '(', 'state', ')']
Trim empty leaf nodes from the tree. - To simplify the tree conversion, empty nodes are added before it is known if they will contain items that connect back to the authenticated subject. If there are no connections, the nodes remain empty, which causes them to be removed here. - Removing a leaf node may cause the parent to become a new empty leaf node, so the function is repeated until there are no more empty leaf nodes.
['Trim', 'empty', 'leaf', 'nodes', 'from', 'the', 'tree', '.']
train
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/cert/subject_info.py#L378-L392
2,577
NuGrid/NuGridPy
nugridpy/data_plot.py
DataPlot.iso_abund
def iso_abund(self, cycle, stable=False, amass_range=None, mass_range=None, ylim=[0,0], ref=-1, show=True, log_logic=True, decayed=False, color_plot=True, grid=False, point_set=1, include_title=False, data_provided=False,thedata=None, verbose=True, mov=False,drawfig=None,drawax=None,show_names=True, label=None,colour=None,elemaburtn=False,mypoint=None,plot_type=['-','--','-.',':','-']): ''' plot the abundance of all the chemical species Parameters ---------- cycle : string, integer or list The cycle of interest. If it is a list of cycles, this method will do a plot for each cycle and save them to a file. stable : boolean, optional A boolean of whether to filter out the unstables. The defaults is False. amass_range : list, optional A 1x2 array containing the lower and upper atomic mass range. If None plot entire available atomic mass range. The default is None. mass_range : list, optional A 1x2 array containing the lower and upper mass range. If this is an instance of abu_vector this will only plot isotopes that have an atominc mass within this range. This will throw an error if this range does not make sense ie [45,2]. If None, it will plot over the entire range. The defaults is None. ylim : list, optional A 1x2 array containing the lower and upper Y limits. If it is [0,0], then ylim will be determined automatically. The default is [0,0]. ref : integer or list, optional reference cycle. If it is not -1, this method will plot the abundences of cycle devided by the cycle of the same instance given in the ref variable. If ref is a list it will be interpreted to have two elements: ref=['dir/of/ref/run',cycle] which uses a refernece cycle from another run. If any abundence in the reference cycle is zero, it will replace it with 1e-99. The default is -1. show : boolean, optional Boolean of if the plot should be displayed. The default is True. log_logic : boolean, optional Plot abundances in log scale or linear. The default is True. decayed : boolean, optional If True plot decayed distributions, else plot life distribution. The default is False. color_plot : boolean, optional Color dots and lines [True/False]. The default is True. grid : boolean, optional print grid. The default is False. point_set : integer, optional Set to 0, 1 or 2 to select one of three point sets, useful for multiple abundances or ratios in one plot. The defalult is 1. include_title : boolean, optional Include a title with the plot. The default is False. drawfig, drawax, mov : optional, not necessary for user to set these variables The figure and axes containers to be drawn on, and whether or not a movie is being made (only True when se.movie is called, which sets mov to True automatically elemaburtn : boolean, private If true, iso_abund() returns after writing self.***_iso_to_plot for use with other plotting routines.f mypoint : string, optional fix the marker style of all the points in this plot to one type, given as a string. If None, multiple point styles are used as per point_set. The default is None ''' plotType=self._classTest() if str(cycle.__class__)=="<type 'list'>": self.iso_abundMulti(cycle, stable,amass_range,mass_range,ylim,ref, decayed,include_title,color_plot=color_plot,grid=False,point_set=point_set) return if mass_range!=None and mass_range[0]>mass_range[1]: print('Please input a proper mass range') print('Returning None') return None if amass_range!=None and amass_range[0]>amass_range[1]: print('Please input a proper Atomic mass range') print('Returning None') return None if plotType=='se': if decayed: print('Decay option not yet implemented for mppnp - but it is easy do! Consider investing the time!') return None # get things as arrays if not data_provided: cycle=self.se.findCycle(cycle) a_iso_to_plot = array(self.se.A) abunds = self.get(cycle,'iso_massf') isotope_to_plot = array(self.se.isotopes) z_iso_to_plot = array(self.se.Z) isomers_to_plot = array(self.se.isomeric_states) if ref >-1: ref=self.se.findCycle(ref) abundsRef=self.se.get(ref,'iso_massf') masses = self.se.get(cycle,'mass') else: cycle=cycle # why so serious? a_iso_to_plot = array(self.se.A) abunds = thedata[0] isotope_to_plot = array(self.se.isotopes) z_iso_to_plot = array(self.se.Z) isomers_to_plot = array(self.se.isomeric_states) if ref >-1: raise IOError("No. It's not ready yet.") #ref=self.se.findCycle(ref) #abundsRef=self.se.get(ref,'iso_massf') masses = thedata[1] if mass_range == None: if verbose: print('Using default mass range') mass_range = [min(masses),max(masses)] masses.sort() mass_range.sort() if amass_range == None: amass_range=[int(min(a_iso_to_plot)),int(max(a_iso_to_plot))] # remove neutrons - this could move in the non- se/PPN specific part below if 0 in z_iso_to_plot: ind_neut = where(z_iso_to_plot==0)[0][0] a_iso_to_plot = delete(a_iso_to_plot,ind_neut) z_iso_to_plot = delete(z_iso_to_plot,ind_neut) isomers_to_plot = delete(isomers_to_plot,ind_neut) isotope_to_plot = delete(isotope_to_plot,ind_neut) abunds = delete(abunds,ind_neut,1) if ref >-1: abundsRef = delete(abundsRef,ind_neut,1) # extract amass_range acon=(a_iso_to_plot>=amass_range[0]) & (a_iso_to_plot<=amass_range[1]) isomers_to_plot = isomers_to_plot[acon] isotope_to_plot = isotope_to_plot[acon] z_iso_to_plot = z_iso_to_plot[acon] abunds = abunds.T[acon].T if ref >-1: abundsRef = abundsRef.T[acon].T a_iso_to_plot = a_iso_to_plot[acon] el_iso_to_plot = array([x.split('-')[0] for x in isotope_to_plot.tolist()]) # apply mass range if mass_range == None: if verbose: print('Using default mass range') mass_range = [min(masses),max(masses)] mass_range.sort() aabs = [] if ref >-1: cyc = [cycle,ref] abus = [abunds,abundsRef] else: cyc = [cycle] abus = [abunds] for cc,aa in zip(cyc,abus): if not data_provided: masses = self.se.get(cc,'mass') else: masses=masses # why so serious? masses.sort() dmass = masses[1:] - masses[:-1] # I should check the grid definition dmass = append(dmass,0.) mcon = (masses>=mass_range[0]) & (masses<=mass_range[1]) dmass = dmass[mcon] aa = aa[mcon] # average over mass range: aa = (aa.T*dmass).T.sum(0) aa = old_div(aa, (mass_range[1] - mass_range[0])) # abunds has now length of isotope_to_plot aabs.append(aa) if ref >-1: abunds = old_div(aabs[0],(aabs[1]+1.e-99)) else: abunds = aabs[0] self.a_iso_to_plot=a_iso_to_plot self.isotope_to_plot=isotope_to_plot self.z_iso_to_plot=z_iso_to_plot self.el_iso_to_plot=el_iso_to_plot self.abunds=abunds self.isomers_to_plot=isomers_to_plot if elemaburtn: return # self.isotopes = self.se.isotopes elif plotType=='PPN': print("This method adds the following variables to the instance:") print("a_iso_to_plot mass number of plotted range of species") print("isotope_to_plot corresponding list of isotopes") print("z_iso_to_plot corresponding charge numbers") print("el_iso_to_plot corresponding element names") print("abunds corresponding abundances") print("isom isomers and their abundance") self.get(cycle,decayed=decayed) if ref is not -1: if type(ref) is list: # reference cycle from other run import ppn pp=ppn.abu_vector(ref[0]) abunds_pp=pp.get(ref[1],decayed=decayed) self.abunds=old_div(self.abunds,pp.abunds) else: abunds=self.abunds self.get(ref,decayed=decayed) self.abunds=old_div(abunds,(self.abunds+1.e-99)) if amass_range == None: amass_range=[min(self.a_iso_to_plot),max(self.a_iso_to_plot)] aa=ma.masked_outside(self.a_iso_to_plot,amass_range[0],amass_range[1]) isotope_to_plot=ma.array(self.isotope_to_plot,mask=aa.mask).compressed() z_iso_to_plot=ma.array(self.z_iso_to_plot,mask=aa.mask).compressed() el_iso_to_plot=ma.array(self.el_iso_to_plot,mask=aa.mask).compressed() abunds=ma.array(self.abunds,mask=aa.mask).compressed() a_iso_to_plot=aa.compressed() isomers_to_plot=[] for i in range(len(self.isom)): if int(self.isom[i][0].split('-')[1])>100: isomers_to_plot.append(self.isom[i]) self.a_iso_to_plot=a_iso_to_plot self.isotope_to_plot=isotope_to_plot self.z_iso_to_plot=z_iso_to_plot self.el_iso_to_plot=el_iso_to_plot self.abunds=abunds self.isomers_to_plot=isomers_to_plot else: print('This method, iso_abund, is not supported by this class') print('Returning None') return None if verbose: print('Using the following conditions:') if plotType=='se': print('\tmass_range:', mass_range[0], mass_range[1]) print('\tAtomic mass_range:', amass_range[0], amass_range[1]) print('\tcycle: ',cycle) print('\tplot only stable:',stable) print('\tplot decayed: ',decayed) if stable: # remove unstables: # For the element that belongs to the isotope at index 5 in isotope_to_plot # (C-12) the following gives the mass numbers of stable elements: # self.stable_el[self.stable_names.index(el_iso_to_plot[5])][1:] ind_delete=[] for i in range(len(isotope_to_plot)): if a_iso_to_plot[i] not in self.stable_el[self.stable_names.index(el_iso_to_plot[i])][1:]: ind_delete.append(i) a_iso_to_plot = delete(a_iso_to_plot, ind_delete) z_iso_to_plot = delete(z_iso_to_plot, ind_delete) isomers_to_plot = delete(isomers_to_plot,ind_delete) isotope_to_plot = delete(isotope_to_plot,ind_delete) el_iso_to_plot = delete(el_iso_to_plot, ind_delete) abunds = delete(abunds, ind_delete) # el_list=[] # list of elements in el_iso_to_plot # # for el in self.elements_names: # if el in el_iso_to_plot: # el_list.append(el) # SJONES implicit loop: el_list = [el for el in self.elements_names if el in el_iso_to_plot] abund_plot = [] # extract for each element an abundance and associated mass_num = [] # mass number array, sorted by mass number for el in el_list: numbers = a_iso_to_plot[(el_iso_to_plot==el)] abund_plot.append(abunds[(el_iso_to_plot==el)][argsort(numbers)]) mass_num.append(sort(numbers)) # now plot: #plot_type = ['-','--','-.',':','-'] ##now implemented as an arg print(plot_type) while len(plot_type)<=4: plot_type.append('') print(plot_type) pl_index = 0 if mypoint is None: points = [['o','^','p','h','*'],['x','+','D','>','s'],['H','v','<','*','3']] else: points = [ [mypoint]*5 , [mypoint]*5 , [mypoint]*5] if color_plot: colors = ['g','r','c','m','k'] elif colour is not None: colors = [colour]*5 else: colors = ['k','k','k','k','k'] ylim1 = 1.e99 ylim2 = -1.e99 # initialise movie-related things: if mov: artists=[] ax=drawax fig=drawfig elif drawax is not None: ax=drawax else: ax=pl.axes() if drawfig is not None: fig=drawfig for j in range(len(abund_plot)): #Loop through the elements of interest # for l in xrange(len(abund_plot[j])): # if abund_plot[j][l] == 0: # abund_plot[j][l] = 1e-99 abund_plot[j] = np.maximum(abund_plot[j],1.e-99) # SJONES instead of looping # a_dum=zeros(len(abund_plot[j])) # this I (FH) have to do because for some if log_logic == False: # reason log10(abu_abund[j]) does not work a_dum = abund_plot[j] # although abu_abund[j] is a numpy array?!? else: # for ii in range(len(abund_plot[j])): # a_dum[ii]=log10(abund_plot[j][ii]) a_dum=np.log10(abund_plot[j]) # SJONES this seems to work fine for me if type(colors[0]) is str: this_label=str(colors[pl_index]+points[point_set][pl_index]+\ plot_type[pl_index]) else: this_label=None if mov: artist1,=ax.plot(mass_num[j],a_dum,this_label,markersize=6, markeredgecolor='None') else: if this_label is not None: if label is not None and j==0: pl.plot(mass_num[j],a_dum,this_label,markersize=6, label=label,markeredgecolor='None') pl.legend(loc='best').draw_frame(False) else: pl.plot(mass_num[j],a_dum,this_label,markersize=6, markeredgecolor='None') else: if label is not None and j==0: pl.plot(mass_num[j],a_dum, color=colors[pl_index], marker=points[point_set][pl_index], linestyle=plot_type[pl_index], markersize=6,label=label, markeredgecolor='None') pl.legend(loc='best').draw_frame(False) else: pl.plot(mass_num[j],a_dum, color=colors[pl_index], marker=points[point_set][pl_index], linestyle=plot_type[pl_index], markersize=6,markeredgecolor='None') abu_max = max(a_dum) max_index=where(a_dum==abu_max)[0][0] coordinates=[mass_num[j][max_index],abu_max] if mov: artist2=ax.text(coordinates[0]+0.1,1.05*coordinates[1],el_list[j],clip_on=True) else: if show_names: # pl.text(coordinates[0]+0.1,1.05*coordinates[1],el_list[j],clip_on=True) pl.text(coordinates[0],np.log10(2.2*10.**coordinates[1]), el_list[j],clip_on=True, horizontalalignment='center') pl_index+=1 if pl_index > 4: pl_index = 0 ylim1=min(ylim1,min(a_dum)) ylim2=max(ylim2,max(a_dum)) if mov: artists.extend([artist1,artist2]) # now trimming the ylims if log_logic: dylim=0.05*(ylim2-ylim1) ylim1 = ylim1 -dylim ylim2 = ylim2 +dylim if ref is not -1: ylim2 = min(ylim2,4) ylim1 = max(ylim1,-4) else: ylim2 = min(ylim2,0.2) ylim1 = max(ylim1,-13) else: ylim1 = ylim1 *0.8 ylim2 = ylim2 *1.1 if include_title: if plotType=='se': if ref == -1: title = str('Range %4.2f' %mass_range[0]) + str('-%4.2f' %mass_range[1]) +\ str(' for cycle %d' %int(cycle)) else: title = str('Range %4.2f' %mass_range[0]) + \ str('-%4.2f' %mass_range[1]) + str(' for cycle %d' %int(cycle))+\ str(' relative to cycle %d' %int(ref)) else: if ref == -1: title = str('Cycle %d' %int(cycle)) else: title = str('Cycle %d' %int(cycle))+\ str(' relative to cycle %d' %int(ref)) print("including title: ...") if mov: artist1,=ax.title(title) artists.append(artist1) else: pl.title(title) if ylim[0] == 0 and ylim[1] == 0: pl.ylim(ylim1,ylim2) else: pl.ylim(ylim[0],ylim[1]) pl.xlim([amass_range[0]-.5,amass_range[1]+.5]) pl.xlabel('mass number (A)',fontsize=14) if ref is not -1: if log_logic: pl.ylabel(r'log abundance ratio',fontsize=14) else: pl.ylabel(r'abundance ratio',fontsize=14) else: if log_logic: pl.ylabel(r'log mass fraction ',fontsize=14) else: pl.ylabel(r'mass fraction',fontsize=14) if amass_range != None: minimum_mass = amass_range[0] maximum_mass = amass_range[1] elif mass_range != None: minimum_mass = mass_range[0] maximum_mass = mass_range[1] else: minimum_mass = 0 maximum_mass = 200 if log_logic == False: if mov: artist1,=ax.plot([amass_range[0]-.5,amass_range[1]+.5],[1,1],'k-') artists.append(artist1) else: pl.plot([amass_range[0]-.5,amass_range[1]+.5],[1,1],'k-') else: if mov: artist1,=ax.plot([amass_range[0]-.5,amass_range[1]+.5],[0,0],'k-') artists.append(artist1) else: pl.plot([amass_range[0]-.5,amass_range[1]+.5],[0,0],'k-') labelsx=[] if (maximum_mass-minimum_mass) > 100: delta_labelsx = 10 else: delta_labelsx = 5 iii = amass_range[0]%delta_labelsx if iii == 0: labelsx.append(str(amass_range[0])) else: labelsx.append(' ') iii = iii+1 kkk = 0 for label1 in range(amass_range[1]-amass_range[0]): if iii == 5: kkk = kkk+1 labelsx.append(str((iii*kkk)+amass_range[0]-(amass_range[0]%5))) iii = 0 iii = iii+1 else: labelsx.append(' ') iii = iii+1 if delta_labelsx == 5: xticks = arange(amass_range[0],amass_range[1],1) pl.xticks(xticks,labelsx) else: pl.xticks() # SJONES moved the pl.grid and pl.show to the very end if grid: pl.grid() if show: pl.show() ##!!FOR!!###### print 'LEN LABELS= ', len(labelsx) ##DEBUGGING#### ####!!!######## for bbb in range (len(labelsx)): ############### print labelsx[bbb] if mov: return artists
python
def iso_abund(self, cycle, stable=False, amass_range=None, mass_range=None, ylim=[0,0], ref=-1, show=True, log_logic=True, decayed=False, color_plot=True, grid=False, point_set=1, include_title=False, data_provided=False,thedata=None, verbose=True, mov=False,drawfig=None,drawax=None,show_names=True, label=None,colour=None,elemaburtn=False,mypoint=None,plot_type=['-','--','-.',':','-']): ''' plot the abundance of all the chemical species Parameters ---------- cycle : string, integer or list The cycle of interest. If it is a list of cycles, this method will do a plot for each cycle and save them to a file. stable : boolean, optional A boolean of whether to filter out the unstables. The defaults is False. amass_range : list, optional A 1x2 array containing the lower and upper atomic mass range. If None plot entire available atomic mass range. The default is None. mass_range : list, optional A 1x2 array containing the lower and upper mass range. If this is an instance of abu_vector this will only plot isotopes that have an atominc mass within this range. This will throw an error if this range does not make sense ie [45,2]. If None, it will plot over the entire range. The defaults is None. ylim : list, optional A 1x2 array containing the lower and upper Y limits. If it is [0,0], then ylim will be determined automatically. The default is [0,0]. ref : integer or list, optional reference cycle. If it is not -1, this method will plot the abundences of cycle devided by the cycle of the same instance given in the ref variable. If ref is a list it will be interpreted to have two elements: ref=['dir/of/ref/run',cycle] which uses a refernece cycle from another run. If any abundence in the reference cycle is zero, it will replace it with 1e-99. The default is -1. show : boolean, optional Boolean of if the plot should be displayed. The default is True. log_logic : boolean, optional Plot abundances in log scale or linear. The default is True. decayed : boolean, optional If True plot decayed distributions, else plot life distribution. The default is False. color_plot : boolean, optional Color dots and lines [True/False]. The default is True. grid : boolean, optional print grid. The default is False. point_set : integer, optional Set to 0, 1 or 2 to select one of three point sets, useful for multiple abundances or ratios in one plot. The defalult is 1. include_title : boolean, optional Include a title with the plot. The default is False. drawfig, drawax, mov : optional, not necessary for user to set these variables The figure and axes containers to be drawn on, and whether or not a movie is being made (only True when se.movie is called, which sets mov to True automatically elemaburtn : boolean, private If true, iso_abund() returns after writing self.***_iso_to_plot for use with other plotting routines.f mypoint : string, optional fix the marker style of all the points in this plot to one type, given as a string. If None, multiple point styles are used as per point_set. The default is None ''' plotType=self._classTest() if str(cycle.__class__)=="<type 'list'>": self.iso_abundMulti(cycle, stable,amass_range,mass_range,ylim,ref, decayed,include_title,color_plot=color_plot,grid=False,point_set=point_set) return if mass_range!=None and mass_range[0]>mass_range[1]: print('Please input a proper mass range') print('Returning None') return None if amass_range!=None and amass_range[0]>amass_range[1]: print('Please input a proper Atomic mass range') print('Returning None') return None if plotType=='se': if decayed: print('Decay option not yet implemented for mppnp - but it is easy do! Consider investing the time!') return None # get things as arrays if not data_provided: cycle=self.se.findCycle(cycle) a_iso_to_plot = array(self.se.A) abunds = self.get(cycle,'iso_massf') isotope_to_plot = array(self.se.isotopes) z_iso_to_plot = array(self.se.Z) isomers_to_plot = array(self.se.isomeric_states) if ref >-1: ref=self.se.findCycle(ref) abundsRef=self.se.get(ref,'iso_massf') masses = self.se.get(cycle,'mass') else: cycle=cycle # why so serious? a_iso_to_plot = array(self.se.A) abunds = thedata[0] isotope_to_plot = array(self.se.isotopes) z_iso_to_plot = array(self.se.Z) isomers_to_plot = array(self.se.isomeric_states) if ref >-1: raise IOError("No. It's not ready yet.") #ref=self.se.findCycle(ref) #abundsRef=self.se.get(ref,'iso_massf') masses = thedata[1] if mass_range == None: if verbose: print('Using default mass range') mass_range = [min(masses),max(masses)] masses.sort() mass_range.sort() if amass_range == None: amass_range=[int(min(a_iso_to_plot)),int(max(a_iso_to_plot))] # remove neutrons - this could move in the non- se/PPN specific part below if 0 in z_iso_to_plot: ind_neut = where(z_iso_to_plot==0)[0][0] a_iso_to_plot = delete(a_iso_to_plot,ind_neut) z_iso_to_plot = delete(z_iso_to_plot,ind_neut) isomers_to_plot = delete(isomers_to_plot,ind_neut) isotope_to_plot = delete(isotope_to_plot,ind_neut) abunds = delete(abunds,ind_neut,1) if ref >-1: abundsRef = delete(abundsRef,ind_neut,1) # extract amass_range acon=(a_iso_to_plot>=amass_range[0]) & (a_iso_to_plot<=amass_range[1]) isomers_to_plot = isomers_to_plot[acon] isotope_to_plot = isotope_to_plot[acon] z_iso_to_plot = z_iso_to_plot[acon] abunds = abunds.T[acon].T if ref >-1: abundsRef = abundsRef.T[acon].T a_iso_to_plot = a_iso_to_plot[acon] el_iso_to_plot = array([x.split('-')[0] for x in isotope_to_plot.tolist()]) # apply mass range if mass_range == None: if verbose: print('Using default mass range') mass_range = [min(masses),max(masses)] mass_range.sort() aabs = [] if ref >-1: cyc = [cycle,ref] abus = [abunds,abundsRef] else: cyc = [cycle] abus = [abunds] for cc,aa in zip(cyc,abus): if not data_provided: masses = self.se.get(cc,'mass') else: masses=masses # why so serious? masses.sort() dmass = masses[1:] - masses[:-1] # I should check the grid definition dmass = append(dmass,0.) mcon = (masses>=mass_range[0]) & (masses<=mass_range[1]) dmass = dmass[mcon] aa = aa[mcon] # average over mass range: aa = (aa.T*dmass).T.sum(0) aa = old_div(aa, (mass_range[1] - mass_range[0])) # abunds has now length of isotope_to_plot aabs.append(aa) if ref >-1: abunds = old_div(aabs[0],(aabs[1]+1.e-99)) else: abunds = aabs[0] self.a_iso_to_plot=a_iso_to_plot self.isotope_to_plot=isotope_to_plot self.z_iso_to_plot=z_iso_to_plot self.el_iso_to_plot=el_iso_to_plot self.abunds=abunds self.isomers_to_plot=isomers_to_plot if elemaburtn: return # self.isotopes = self.se.isotopes elif plotType=='PPN': print("This method adds the following variables to the instance:") print("a_iso_to_plot mass number of plotted range of species") print("isotope_to_plot corresponding list of isotopes") print("z_iso_to_plot corresponding charge numbers") print("el_iso_to_plot corresponding element names") print("abunds corresponding abundances") print("isom isomers and their abundance") self.get(cycle,decayed=decayed) if ref is not -1: if type(ref) is list: # reference cycle from other run import ppn pp=ppn.abu_vector(ref[0]) abunds_pp=pp.get(ref[1],decayed=decayed) self.abunds=old_div(self.abunds,pp.abunds) else: abunds=self.abunds self.get(ref,decayed=decayed) self.abunds=old_div(abunds,(self.abunds+1.e-99)) if amass_range == None: amass_range=[min(self.a_iso_to_plot),max(self.a_iso_to_plot)] aa=ma.masked_outside(self.a_iso_to_plot,amass_range[0],amass_range[1]) isotope_to_plot=ma.array(self.isotope_to_plot,mask=aa.mask).compressed() z_iso_to_plot=ma.array(self.z_iso_to_plot,mask=aa.mask).compressed() el_iso_to_plot=ma.array(self.el_iso_to_plot,mask=aa.mask).compressed() abunds=ma.array(self.abunds,mask=aa.mask).compressed() a_iso_to_plot=aa.compressed() isomers_to_plot=[] for i in range(len(self.isom)): if int(self.isom[i][0].split('-')[1])>100: isomers_to_plot.append(self.isom[i]) self.a_iso_to_plot=a_iso_to_plot self.isotope_to_plot=isotope_to_plot self.z_iso_to_plot=z_iso_to_plot self.el_iso_to_plot=el_iso_to_plot self.abunds=abunds self.isomers_to_plot=isomers_to_plot else: print('This method, iso_abund, is not supported by this class') print('Returning None') return None if verbose: print('Using the following conditions:') if plotType=='se': print('\tmass_range:', mass_range[0], mass_range[1]) print('\tAtomic mass_range:', amass_range[0], amass_range[1]) print('\tcycle: ',cycle) print('\tplot only stable:',stable) print('\tplot decayed: ',decayed) if stable: # remove unstables: # For the element that belongs to the isotope at index 5 in isotope_to_plot # (C-12) the following gives the mass numbers of stable elements: # self.stable_el[self.stable_names.index(el_iso_to_plot[5])][1:] ind_delete=[] for i in range(len(isotope_to_plot)): if a_iso_to_plot[i] not in self.stable_el[self.stable_names.index(el_iso_to_plot[i])][1:]: ind_delete.append(i) a_iso_to_plot = delete(a_iso_to_plot, ind_delete) z_iso_to_plot = delete(z_iso_to_plot, ind_delete) isomers_to_plot = delete(isomers_to_plot,ind_delete) isotope_to_plot = delete(isotope_to_plot,ind_delete) el_iso_to_plot = delete(el_iso_to_plot, ind_delete) abunds = delete(abunds, ind_delete) # el_list=[] # list of elements in el_iso_to_plot # # for el in self.elements_names: # if el in el_iso_to_plot: # el_list.append(el) # SJONES implicit loop: el_list = [el for el in self.elements_names if el in el_iso_to_plot] abund_plot = [] # extract for each element an abundance and associated mass_num = [] # mass number array, sorted by mass number for el in el_list: numbers = a_iso_to_plot[(el_iso_to_plot==el)] abund_plot.append(abunds[(el_iso_to_plot==el)][argsort(numbers)]) mass_num.append(sort(numbers)) # now plot: #plot_type = ['-','--','-.',':','-'] ##now implemented as an arg print(plot_type) while len(plot_type)<=4: plot_type.append('') print(plot_type) pl_index = 0 if mypoint is None: points = [['o','^','p','h','*'],['x','+','D','>','s'],['H','v','<','*','3']] else: points = [ [mypoint]*5 , [mypoint]*5 , [mypoint]*5] if color_plot: colors = ['g','r','c','m','k'] elif colour is not None: colors = [colour]*5 else: colors = ['k','k','k','k','k'] ylim1 = 1.e99 ylim2 = -1.e99 # initialise movie-related things: if mov: artists=[] ax=drawax fig=drawfig elif drawax is not None: ax=drawax else: ax=pl.axes() if drawfig is not None: fig=drawfig for j in range(len(abund_plot)): #Loop through the elements of interest # for l in xrange(len(abund_plot[j])): # if abund_plot[j][l] == 0: # abund_plot[j][l] = 1e-99 abund_plot[j] = np.maximum(abund_plot[j],1.e-99) # SJONES instead of looping # a_dum=zeros(len(abund_plot[j])) # this I (FH) have to do because for some if log_logic == False: # reason log10(abu_abund[j]) does not work a_dum = abund_plot[j] # although abu_abund[j] is a numpy array?!? else: # for ii in range(len(abund_plot[j])): # a_dum[ii]=log10(abund_plot[j][ii]) a_dum=np.log10(abund_plot[j]) # SJONES this seems to work fine for me if type(colors[0]) is str: this_label=str(colors[pl_index]+points[point_set][pl_index]+\ plot_type[pl_index]) else: this_label=None if mov: artist1,=ax.plot(mass_num[j],a_dum,this_label,markersize=6, markeredgecolor='None') else: if this_label is not None: if label is not None and j==0: pl.plot(mass_num[j],a_dum,this_label,markersize=6, label=label,markeredgecolor='None') pl.legend(loc='best').draw_frame(False) else: pl.plot(mass_num[j],a_dum,this_label,markersize=6, markeredgecolor='None') else: if label is not None and j==0: pl.plot(mass_num[j],a_dum, color=colors[pl_index], marker=points[point_set][pl_index], linestyle=plot_type[pl_index], markersize=6,label=label, markeredgecolor='None') pl.legend(loc='best').draw_frame(False) else: pl.plot(mass_num[j],a_dum, color=colors[pl_index], marker=points[point_set][pl_index], linestyle=plot_type[pl_index], markersize=6,markeredgecolor='None') abu_max = max(a_dum) max_index=where(a_dum==abu_max)[0][0] coordinates=[mass_num[j][max_index],abu_max] if mov: artist2=ax.text(coordinates[0]+0.1,1.05*coordinates[1],el_list[j],clip_on=True) else: if show_names: # pl.text(coordinates[0]+0.1,1.05*coordinates[1],el_list[j],clip_on=True) pl.text(coordinates[0],np.log10(2.2*10.**coordinates[1]), el_list[j],clip_on=True, horizontalalignment='center') pl_index+=1 if pl_index > 4: pl_index = 0 ylim1=min(ylim1,min(a_dum)) ylim2=max(ylim2,max(a_dum)) if mov: artists.extend([artist1,artist2]) # now trimming the ylims if log_logic: dylim=0.05*(ylim2-ylim1) ylim1 = ylim1 -dylim ylim2 = ylim2 +dylim if ref is not -1: ylim2 = min(ylim2,4) ylim1 = max(ylim1,-4) else: ylim2 = min(ylim2,0.2) ylim1 = max(ylim1,-13) else: ylim1 = ylim1 *0.8 ylim2 = ylim2 *1.1 if include_title: if plotType=='se': if ref == -1: title = str('Range %4.2f' %mass_range[0]) + str('-%4.2f' %mass_range[1]) +\ str(' for cycle %d' %int(cycle)) else: title = str('Range %4.2f' %mass_range[0]) + \ str('-%4.2f' %mass_range[1]) + str(' for cycle %d' %int(cycle))+\ str(' relative to cycle %d' %int(ref)) else: if ref == -1: title = str('Cycle %d' %int(cycle)) else: title = str('Cycle %d' %int(cycle))+\ str(' relative to cycle %d' %int(ref)) print("including title: ...") if mov: artist1,=ax.title(title) artists.append(artist1) else: pl.title(title) if ylim[0] == 0 and ylim[1] == 0: pl.ylim(ylim1,ylim2) else: pl.ylim(ylim[0],ylim[1]) pl.xlim([amass_range[0]-.5,amass_range[1]+.5]) pl.xlabel('mass number (A)',fontsize=14) if ref is not -1: if log_logic: pl.ylabel(r'log abundance ratio',fontsize=14) else: pl.ylabel(r'abundance ratio',fontsize=14) else: if log_logic: pl.ylabel(r'log mass fraction ',fontsize=14) else: pl.ylabel(r'mass fraction',fontsize=14) if amass_range != None: minimum_mass = amass_range[0] maximum_mass = amass_range[1] elif mass_range != None: minimum_mass = mass_range[0] maximum_mass = mass_range[1] else: minimum_mass = 0 maximum_mass = 200 if log_logic == False: if mov: artist1,=ax.plot([amass_range[0]-.5,amass_range[1]+.5],[1,1],'k-') artists.append(artist1) else: pl.plot([amass_range[0]-.5,amass_range[1]+.5],[1,1],'k-') else: if mov: artist1,=ax.plot([amass_range[0]-.5,amass_range[1]+.5],[0,0],'k-') artists.append(artist1) else: pl.plot([amass_range[0]-.5,amass_range[1]+.5],[0,0],'k-') labelsx=[] if (maximum_mass-minimum_mass) > 100: delta_labelsx = 10 else: delta_labelsx = 5 iii = amass_range[0]%delta_labelsx if iii == 0: labelsx.append(str(amass_range[0])) else: labelsx.append(' ') iii = iii+1 kkk = 0 for label1 in range(amass_range[1]-amass_range[0]): if iii == 5: kkk = kkk+1 labelsx.append(str((iii*kkk)+amass_range[0]-(amass_range[0]%5))) iii = 0 iii = iii+1 else: labelsx.append(' ') iii = iii+1 if delta_labelsx == 5: xticks = arange(amass_range[0],amass_range[1],1) pl.xticks(xticks,labelsx) else: pl.xticks() # SJONES moved the pl.grid and pl.show to the very end if grid: pl.grid() if show: pl.show() ##!!FOR!!###### print 'LEN LABELS= ', len(labelsx) ##DEBUGGING#### ####!!!######## for bbb in range (len(labelsx)): ############### print labelsx[bbb] if mov: return artists
['def', 'iso_abund', '(', 'self', ',', 'cycle', ',', 'stable', '=', 'False', ',', 'amass_range', '=', 'None', ',', 'mass_range', '=', 'None', ',', 'ylim', '=', '[', '0', ',', '0', ']', ',', 'ref', '=', '-', '1', ',', 'show', '=', 'True', ',', 'log_logic', '=', 'True', ',', 'decayed', '=', 'False', ',', 'color_plot', '=', 'True', ',', 'grid', '=', 'False', ',', 'point_set', '=', '1', ',', 'include_title', '=', 'False', ',', 'data_provided', '=', 'False', ',', 'thedata', '=', 'None', ',', 'verbose', '=', 'True', ',', 'mov', '=', 'False', ',', 'drawfig', '=', 'None', ',', 'drawax', '=', 'None', ',', 'show_names', '=', 'True', ',', 'label', '=', 'None', ',', 'colour', '=', 'None', ',', 'elemaburtn', '=', 'False', ',', 'mypoint', '=', 'None', ',', 'plot_type', '=', '[', "'-'", ',', "'--'", ',', "'-.'", ',', "':'", ',', "'-'", ']', ')', ':', 'plotType', '=', 'self', '.', '_classTest', '(', ')', 'if', 'str', '(', 'cycle', '.', '__class__', ')', '==', '"<type \'list\'>"', ':', 'self', '.', 'iso_abundMulti', '(', 'cycle', ',', 'stable', ',', 'amass_range', ',', 'mass_range', ',', 'ylim', ',', 'ref', ',', 'decayed', ',', 'include_title', ',', 'color_plot', '=', 'color_plot', ',', 'grid', '=', 'False', ',', 'point_set', '=', 'point_set', ')', 'return', 'if', 'mass_range', '!=', 'None', 'and', 'mass_range', '[', '0', ']', '>', 'mass_range', '[', '1', ']', ':', 'print', '(', "'Please input a proper mass range'", ')', 'print', '(', "'Returning None'", ')', 'return', 'None', 'if', 'amass_range', '!=', 'None', 'and', 'amass_range', '[', '0', ']', '>', 'amass_range', '[', '1', ']', ':', 'print', '(', "'Please input a proper Atomic mass range'", ')', 'print', '(', "'Returning None'", ')', 'return', 'None', 'if', 'plotType', '==', "'se'", ':', 'if', 'decayed', ':', 'print', '(', "'Decay option not yet implemented for mppnp - but it is easy do! Consider investing the time!'", ')', 'return', 'None', '# get things as arrays', 'if', 'not', 'data_provided', ':', 'cycle', '=', 'self', '.', 'se', '.', 'findCycle', '(', 'cycle', ')', 'a_iso_to_plot', '=', 'array', '(', 'self', '.', 'se', '.', 'A', ')', 'abunds', '=', 'self', '.', 'get', '(', 'cycle', ',', "'iso_massf'", ')', 'isotope_to_plot', '=', 'array', '(', 'self', '.', 'se', '.', 'isotopes', ')', 'z_iso_to_plot', '=', 'array', '(', 'self', '.', 'se', '.', 'Z', ')', 'isomers_to_plot', '=', 'array', '(', 'self', '.', 'se', '.', 'isomeric_states', ')', 'if', 'ref', '>', '-', '1', ':', 'ref', '=', 'self', '.', 'se', '.', 'findCycle', '(', 'ref', ')', 'abundsRef', '=', 'self', '.', 'se', '.', 'get', '(', 'ref', ',', "'iso_massf'", ')', 'masses', '=', 'self', '.', 'se', '.', 'get', '(', 'cycle', ',', "'mass'", ')', 'else', ':', 'cycle', '=', 'cycle', '# why so serious?', 'a_iso_to_plot', '=', 'array', '(', 'self', '.', 'se', '.', 'A', ')', 'abunds', '=', 'thedata', '[', '0', ']', 'isotope_to_plot', '=', 'array', '(', 'self', '.', 'se', '.', 'isotopes', ')', 'z_iso_to_plot', '=', 'array', '(', 'self', '.', 'se', '.', 'Z', ')', 'isomers_to_plot', '=', 'array', '(', 'self', '.', 'se', '.', 'isomeric_states', ')', 'if', 'ref', '>', '-', '1', ':', 'raise', 'IOError', '(', '"No. It\'s not ready yet."', ')', '#ref=self.se.findCycle(ref)', "#abundsRef=self.se.get(ref,'iso_massf')", 'masses', '=', 'thedata', '[', '1', ']', 'if', 'mass_range', '==', 'None', ':', 'if', 'verbose', ':', 'print', '(', "'Using default mass range'", ')', 'mass_range', '=', '[', 'min', '(', 'masses', ')', ',', 'max', '(', 'masses', ')', ']', 'masses', '.', 'sort', '(', ')', 'mass_range', '.', 'sort', '(', ')', 'if', 'amass_range', '==', 'None', ':', 'amass_range', '=', '[', 'int', '(', 'min', '(', 'a_iso_to_plot', ')', ')', ',', 'int', '(', 'max', '(', 'a_iso_to_plot', ')', ')', ']', '# remove neutrons - this could move in the non- se/PPN specific part below', 'if', '0', 'in', 'z_iso_to_plot', ':', 'ind_neut', '=', 'where', '(', 'z_iso_to_plot', '==', '0', ')', '[', '0', ']', '[', '0', ']', 'a_iso_to_plot', '=', 'delete', '(', 'a_iso_to_plot', ',', 'ind_neut', ')', 'z_iso_to_plot', '=', 'delete', '(', 'z_iso_to_plot', ',', 'ind_neut', ')', 'isomers_to_plot', '=', 'delete', '(', 'isomers_to_plot', ',', 'ind_neut', ')', 'isotope_to_plot', '=', 'delete', '(', 'isotope_to_plot', ',', 'ind_neut', ')', 'abunds', '=', 'delete', '(', 'abunds', ',', 'ind_neut', ',', '1', ')', 'if', 'ref', '>', '-', '1', ':', 'abundsRef', '=', 'delete', '(', 'abundsRef', ',', 'ind_neut', ',', '1', ')', '# extract amass_range', 'acon', '=', '(', 'a_iso_to_plot', '>=', 'amass_range', '[', '0', ']', ')', '&', '(', 'a_iso_to_plot', '<=', 'amass_range', '[', '1', ']', ')', 'isomers_to_plot', '=', 'isomers_to_plot', '[', 'acon', ']', 'isotope_to_plot', '=', 'isotope_to_plot', '[', 'acon', ']', 'z_iso_to_plot', '=', 'z_iso_to_plot', '[', 'acon', ']', 'abunds', '=', 'abunds', '.', 'T', '[', 'acon', ']', '.', 'T', 'if', 'ref', '>', '-', '1', ':', 'abundsRef', '=', 'abundsRef', '.', 'T', '[', 'acon', ']', '.', 'T', 'a_iso_to_plot', '=', 'a_iso_to_plot', '[', 'acon', ']', 'el_iso_to_plot', '=', 'array', '(', '[', 'x', '.', 'split', '(', "'-'", ')', '[', '0', ']', 'for', 'x', 'in', 'isotope_to_plot', '.', 'tolist', '(', ')', ']', ')', '# apply mass range', 'if', 'mass_range', '==', 'None', ':', 'if', 'verbose', ':', 'print', '(', "'Using default mass range'", ')', 'mass_range', '=', '[', 'min', '(', 'masses', ')', ',', 'max', '(', 'masses', ')', ']', 'mass_range', '.', 'sort', '(', ')', 'aabs', '=', '[', ']', 'if', 'ref', '>', '-', '1', ':', 'cyc', '=', '[', 'cycle', ',', 'ref', ']', 'abus', '=', '[', 'abunds', ',', 'abundsRef', ']', 'else', ':', 'cyc', '=', '[', 'cycle', ']', 'abus', '=', '[', 'abunds', ']', 'for', 'cc', ',', 'aa', 'in', 'zip', '(', 'cyc', ',', 'abus', ')', ':', 'if', 'not', 'data_provided', ':', 'masses', '=', 'self', '.', 'se', '.', 'get', '(', 'cc', ',', "'mass'", ')', 'else', ':', 'masses', '=', 'masses', '# why so serious?', 'masses', '.', 'sort', '(', ')', 'dmass', '=', 'masses', '[', '1', ':', ']', '-', 'masses', '[', ':', '-', '1', ']', '# I should check the grid definition', 'dmass', '=', 'append', '(', 'dmass', ',', '0.', ')', 'mcon', '=', '(', 'masses', '>=', 'mass_range', '[', '0', ']', ')', '&', '(', 'masses', '<=', 'mass_range', '[', '1', ']', ')', 'dmass', '=', 'dmass', '[', 'mcon', ']', 'aa', '=', 'aa', '[', 'mcon', ']', '# average over mass range:', 'aa', '=', '(', 'aa', '.', 'T', '*', 'dmass', ')', '.', 'T', '.', 'sum', '(', '0', ')', 'aa', '=', 'old_div', '(', 'aa', ',', '(', 'mass_range', '[', '1', ']', '-', 'mass_range', '[', '0', ']', ')', ')', '# abunds has now length of isotope_to_plot', 'aabs', '.', 'append', '(', 'aa', ')', 'if', 'ref', '>', '-', '1', ':', 'abunds', '=', 'old_div', '(', 'aabs', '[', '0', ']', ',', '(', 'aabs', '[', '1', ']', '+', '1.e-99', ')', ')', 'else', ':', 'abunds', '=', 'aabs', '[', '0', ']', 'self', '.', 'a_iso_to_plot', '=', 'a_iso_to_plot', 'self', '.', 'isotope_to_plot', '=', 'isotope_to_plot', 'self', '.', 'z_iso_to_plot', '=', 'z_iso_to_plot', 'self', '.', 'el_iso_to_plot', '=', 'el_iso_to_plot', 'self', '.', 'abunds', '=', 'abunds', 'self', '.', 'isomers_to_plot', '=', 'isomers_to_plot', 'if', 'elemaburtn', ':', 'return', '# self.isotopes = self.se.isotopes', 'elif', 'plotType', '==', "'PPN'", ':', 'print', '(', '"This method adds the following variables to the instance:"', ')', 'print', '(', '"a_iso_to_plot mass number of plotted range of species"', ')', 'print', '(', '"isotope_to_plot corresponding list of isotopes"', ')', 'print', '(', '"z_iso_to_plot corresponding charge numbers"', ')', 'print', '(', '"el_iso_to_plot corresponding element names"', ')', 'print', '(', '"abunds corresponding abundances"', ')', 'print', '(', '"isom isomers and their abundance"', ')', 'self', '.', 'get', '(', 'cycle', ',', 'decayed', '=', 'decayed', ')', 'if', 'ref', 'is', 'not', '-', '1', ':', 'if', 'type', '(', 'ref', ')', 'is', 'list', ':', '# reference cycle from other run', 'import', 'ppn', 'pp', '=', 'ppn', '.', 'abu_vector', '(', 'ref', '[', '0', ']', ')', 'abunds_pp', '=', 'pp', '.', 'get', '(', 'ref', '[', '1', ']', ',', 'decayed', '=', 'decayed', ')', 'self', '.', 'abunds', '=', 'old_div', '(', 'self', '.', 'abunds', ',', 'pp', '.', 'abunds', ')', 'else', ':', 'abunds', '=', 'self', '.', 'abunds', 'self', '.', 'get', '(', 'ref', ',', 'decayed', '=', 'decayed', ')', 'self', '.', 'abunds', '=', 'old_div', '(', 'abunds', ',', '(', 'self', '.', 'abunds', '+', '1.e-99', ')', ')', 'if', 'amass_range', '==', 'None', ':', 'amass_range', '=', '[', 'min', '(', 'self', '.', 'a_iso_to_plot', ')', ',', 'max', '(', 'self', '.', 'a_iso_to_plot', ')', ']', 'aa', '=', 'ma', '.', 'masked_outside', '(', 'self', '.', 'a_iso_to_plot', ',', 'amass_range', '[', '0', ']', ',', 'amass_range', '[', '1', ']', ')', 'isotope_to_plot', '=', 'ma', '.', 'array', '(', 'self', '.', 'isotope_to_plot', ',', 'mask', '=', 'aa', '.', 'mask', ')', '.', 'compressed', '(', ')', 'z_iso_to_plot', '=', 'ma', '.', 'array', '(', 'self', '.', 'z_iso_to_plot', ',', 'mask', '=', 'aa', '.', 'mask', ')', '.', 'compressed', '(', ')', 'el_iso_to_plot', '=', 'ma', '.', 'array', '(', 'self', '.', 'el_iso_to_plot', ',', 'mask', '=', 'aa', '.', 'mask', ')', '.', 'compressed', '(', ')', 'abunds', '=', 'ma', '.', 'array', '(', 'self', '.', 'abunds', ',', 'mask', '=', 'aa', '.', 'mask', ')', '.', 'compressed', '(', ')', 'a_iso_to_plot', '=', 'aa', '.', 'compressed', '(', ')', 'isomers_to_plot', '=', '[', ']', 'for', 'i', 'in', 'range', '(', 'len', '(', 'self', '.', 'isom', ')', ')', ':', 'if', 'int', '(', 'self', '.', 'isom', '[', 'i', ']', '[', '0', ']', '.', 'split', '(', "'-'", ')', '[', '1', ']', ')', '>', '100', ':', 'isomers_to_plot', '.', 'append', '(', 'self', '.', 'isom', '[', 'i', ']', ')', 'self', '.', 'a_iso_to_plot', '=', 'a_iso_to_plot', 'self', '.', 'isotope_to_plot', '=', 'isotope_to_plot', 'self', '.', 'z_iso_to_plot', '=', 'z_iso_to_plot', 'self', '.', 'el_iso_to_plot', '=', 'el_iso_to_plot', 'self', '.', 'abunds', '=', 'abunds', 'self', '.', 'isomers_to_plot', '=', 'isomers_to_plot', 'else', ':', 'print', '(', "'This method, iso_abund, is not supported by this class'", ')', 'print', '(', "'Returning None'", ')', 'return', 'None', 'if', 'verbose', ':', 'print', '(', "'Using the following conditions:'", ')', 'if', 'plotType', '==', "'se'", ':', 'print', '(', "'\\tmass_range:'", ',', 'mass_range', '[', '0', ']', ',', 'mass_range', '[', '1', ']', ')', 'print', '(', "'\\tAtomic mass_range:'", ',', 'amass_range', '[', '0', ']', ',', 'amass_range', '[', '1', ']', ')', 'print', '(', "'\\tcycle: '", ',', 'cycle', ')', 'print', '(', "'\\tplot only stable:'", ',', 'stable', ')', 'print', '(', "'\\tplot decayed: '", ',', 'decayed', ')', 'if', 'stable', ':', '# remove unstables:', '# For the element that belongs to the isotope at index 5 in isotope_to_plot', '# (C-12) the following gives the mass numbers of stable elements:', '# self.stable_el[self.stable_names.index(el_iso_to_plot[5])][1:]', 'ind_delete', '=', '[', ']', 'for', 'i', 'in', 'range', '(', 'len', '(', 'isotope_to_plot', ')', ')', ':', 'if', 'a_iso_to_plot', '[', 'i', ']', 'not', 'in', 'self', '.', 'stable_el', '[', 'self', '.', 'stable_names', '.', 'index', '(', 'el_iso_to_plot', '[', 'i', ']', ')', ']', '[', '1', ':', ']', ':', 'ind_delete', '.', 'append', '(', 'i', ')', 'a_iso_to_plot', '=', 'delete', '(', 'a_iso_to_plot', ',', 'ind_delete', ')', 'z_iso_to_plot', '=', 'delete', '(', 'z_iso_to_plot', ',', 'ind_delete', ')', 'isomers_to_plot', '=', 'delete', '(', 'isomers_to_plot', ',', 'ind_delete', ')', 'isotope_to_plot', '=', 'delete', '(', 'isotope_to_plot', ',', 'ind_delete', ')', 'el_iso_to_plot', '=', 'delete', '(', 'el_iso_to_plot', ',', 'ind_delete', ')', 'abunds', '=', 'delete', '(', 'abunds', ',', 'ind_delete', ')', '# el_list=[] # list of elements in el_iso_to_plot', '#', '# for el in self.elements_names:', '# if el in el_iso_to_plot:', '# el_list.append(el)', '# SJONES implicit loop:', 'el_list', '=', '[', 'el', 'for', 'el', 'in', 'self', '.', 'elements_names', 'if', 'el', 'in', 'el_iso_to_plot', ']', 'abund_plot', '=', '[', ']', '# extract for each element an abundance and associated', 'mass_num', '=', '[', ']', '# mass number array, sorted by mass number', 'for', 'el', 'in', 'el_list', ':', 'numbers', '=', 'a_iso_to_plot', '[', '(', 'el_iso_to_plot', '==', 'el', ')', ']', 'abund_plot', '.', 'append', '(', 'abunds', '[', '(', 'el_iso_to_plot', '==', 'el', ')', ']', '[', 'argsort', '(', 'numbers', ')', ']', ')', 'mass_num', '.', 'append', '(', 'sort', '(', 'numbers', ')', ')', '# now plot:', "#plot_type = ['-','--','-.',':','-'] ##now implemented as an arg", 'print', '(', 'plot_type', ')', 'while', 'len', '(', 'plot_type', ')', '<=', '4', ':', 'plot_type', '.', 'append', '(', "''", ')', 'print', '(', 'plot_type', ')', 'pl_index', '=', '0', 'if', 'mypoint', 'is', 'None', ':', 'points', '=', '[', '[', "'o'", ',', "'^'", ',', "'p'", ',', "'h'", ',', "'*'", ']', ',', '[', "'x'", ',', "'+'", ',', "'D'", ',', "'>'", ',', "'s'", ']', ',', '[', "'H'", ',', "'v'", ',', "'<'", ',', "'*'", ',', "'3'", ']', ']', 'else', ':', 'points', '=', '[', '[', 'mypoint', ']', '*', '5', ',', '[', 'mypoint', ']', '*', '5', ',', '[', 'mypoint', ']', '*', '5', ']', 'if', 'color_plot', ':', 'colors', '=', '[', "'g'", ',', "'r'", ',', "'c'", ',', "'m'", ',', "'k'", ']', 'elif', 'colour', 'is', 'not', 'None', ':', 'colors', '=', '[', 'colour', ']', '*', '5', 'else', ':', 'colors', '=', '[', "'k'", ',', "'k'", ',', "'k'", ',', "'k'", ',', "'k'", ']', 'ylim1', '=', '1.e99', 'ylim2', '=', '-', '1.e99', '# initialise movie-related things:', 'if', 'mov', ':', 'artists', '=', '[', ']', 'ax', '=', 'drawax', 'fig', '=', 'drawfig', 'elif', 'drawax', 'is', 'not', 'None', ':', 'ax', '=', 'drawax', 'else', ':', 'ax', '=', 'pl', '.', 'axes', '(', ')', 'if', 'drawfig', 'is', 'not', 'None', ':', 'fig', '=', 'drawfig', 'for', 'j', 'in', 'range', '(', 'len', '(', 'abund_plot', ')', ')', ':', '#Loop through the elements of interest', '# for l in xrange(len(abund_plot[j])):', '# if abund_plot[j][l] == 0:', '# abund_plot[j][l] = 1e-99', 'abund_plot', '[', 'j', ']', '=', 'np', '.', 'maximum', '(', 'abund_plot', '[', 'j', ']', ',', '1.e-99', ')', '# SJONES instead of looping', '# a_dum=zeros(len(abund_plot[j])) # this I (FH) have to do because for some', 'if', 'log_logic', '==', 'False', ':', '# reason log10(abu_abund[j]) does not work', 'a_dum', '=', 'abund_plot', '[', 'j', ']', '# although abu_abund[j] is a numpy array?!?', 'else', ':', '# for ii in range(len(abund_plot[j])):', '# a_dum[ii]=log10(abund_plot[j][ii])', 'a_dum', '=', 'np', '.', 'log10', '(', 'abund_plot', '[', 'j', ']', ')', '# SJONES this seems to work fine for me', 'if', 'type', '(', 'colors', '[', '0', ']', ')', 'is', 'str', ':', 'this_label', '=', 'str', '(', 'colors', '[', 'pl_index', ']', '+', 'points', '[', 'point_set', ']', '[', 'pl_index', ']', '+', 'plot_type', '[', 'pl_index', ']', ')', 'else', ':', 'this_label', '=', 'None', 'if', 'mov', ':', 'artist1', ',', '=', 'ax', '.', 'plot', '(', 'mass_num', '[', 'j', ']', ',', 'a_dum', ',', 'this_label', ',', 'markersize', '=', '6', ',', 'markeredgecolor', '=', "'None'", ')', 'else', ':', 'if', 'this_label', 'is', 'not', 'None', ':', 'if', 'label', 'is', 'not', 'None', 'and', 'j', '==', '0', ':', 'pl', '.', 'plot', '(', 'mass_num', '[', 'j', ']', ',', 'a_dum', ',', 'this_label', ',', 'markersize', '=', '6', ',', 'label', '=', 'label', ',', 'markeredgecolor', '=', "'None'", ')', 'pl', '.', 'legend', '(', 'loc', '=', "'best'", ')', '.', 'draw_frame', '(', 'False', ')', 'else', ':', 'pl', '.', 'plot', '(', 'mass_num', '[', 'j', ']', ',', 'a_dum', ',', 'this_label', ',', 'markersize', '=', '6', ',', 'markeredgecolor', '=', "'None'", ')', 'else', ':', 'if', 'label', 'is', 'not', 'None', 'and', 'j', '==', '0', ':', 'pl', '.', 'plot', '(', 'mass_num', '[', 'j', ']', ',', 'a_dum', ',', 'color', '=', 'colors', '[', 'pl_index', ']', ',', 'marker', '=', 'points', '[', 'point_set', ']', '[', 'pl_index', ']', ',', 'linestyle', '=', 'plot_type', '[', 'pl_index', ']', ',', 'markersize', '=', '6', ',', 'label', '=', 'label', ',', 'markeredgecolor', '=', "'None'", ')', 'pl', '.', 'legend', '(', 'loc', '=', "'best'", ')', '.', 'draw_frame', '(', 'False', ')', 'else', ':', 'pl', '.', 'plot', '(', 'mass_num', '[', 'j', ']', ',', 'a_dum', ',', 'color', '=', 'colors', '[', 'pl_index', ']', ',', 'marker', '=', 'points', '[', 'point_set', ']', '[', 'pl_index', ']', ',', 'linestyle', '=', 'plot_type', '[', 'pl_index', ']', ',', 'markersize', '=', '6', ',', 'markeredgecolor', '=', "'None'", ')', 'abu_max', '=', 'max', '(', 'a_dum', ')', 'max_index', '=', 'where', '(', 'a_dum', '==', 'abu_max', ')', '[', '0', ']', '[', '0', ']', 'coordinates', '=', '[', 'mass_num', '[', 'j', ']', '[', 'max_index', ']', ',', 'abu_max', ']', 'if', 'mov', ':', 'artist2', '=', 'ax', '.', 'text', '(', 'coordinates', '[', '0', ']', '+', '0.1', ',', '1.05', '*', 'coordinates', '[', '1', ']', ',', 'el_list', '[', 'j', ']', ',', 'clip_on', '=', 'True', ')', 'else', ':', 'if', 'show_names', ':', '# pl.text(coordinates[0]+0.1,1.05*coordinates[1],el_list[j],clip_on=True)', 'pl', '.', 'text', '(', 'coordinates', '[', '0', ']', ',', 'np', '.', 'log10', '(', '2.2', '*', '10.', '**', 'coordinates', '[', '1', ']', ')', ',', 'el_list', '[', 'j', ']', ',', 'clip_on', '=', 'True', ',', 'horizontalalignment', '=', "'center'", ')', 'pl_index', '+=', '1', 'if', 'pl_index', '>', '4', ':', 'pl_index', '=', '0', 'ylim1', '=', 'min', '(', 'ylim1', ',', 'min', '(', 'a_dum', ')', ')', 'ylim2', '=', 'max', '(', 'ylim2', ',', 'max', '(', 'a_dum', ')', ')', 'if', 'mov', ':', 'artists', '.', 'extend', '(', '[', 'artist1', ',', 'artist2', ']', ')', '# now trimming the ylims', 'if', 'log_logic', ':', 'dylim', '=', '0.05', '*', '(', 'ylim2', '-', 'ylim1', ')', 'ylim1', '=', 'ylim1', '-', 'dylim', 'ylim2', '=', 'ylim2', '+', 'dylim', 'if', 'ref', 'is', 'not', '-', '1', ':', 'ylim2', '=', 'min', '(', 'ylim2', ',', '4', ')', 'ylim1', '=', 'max', '(', 'ylim1', ',', '-', '4', ')', 'else', ':', 'ylim2', '=', 'min', '(', 'ylim2', ',', '0.2', ')', 'ylim1', '=', 'max', '(', 'ylim1', ',', '-', '13', ')', 'else', ':', 'ylim1', '=', 'ylim1', '*', '0.8', 'ylim2', '=', 'ylim2', '*', '1.1', 'if', 'include_title', ':', 'if', 'plotType', '==', "'se'", ':', 'if', 'ref', '==', '-', '1', ':', 'title', '=', 'str', '(', "'Range %4.2f'", '%', 'mass_range', '[', '0', ']', ')', '+', 'str', '(', "'-%4.2f'", '%', 'mass_range', '[', '1', ']', ')', '+', 'str', '(', "' for cycle %d'", '%', 'int', '(', 'cycle', ')', ')', 'else', ':', 'title', '=', 'str', '(', "'Range %4.2f'", '%', 'mass_range', '[', '0', ']', ')', '+', 'str', '(', "'-%4.2f'", '%', 'mass_range', '[', '1', ']', ')', '+', 'str', '(', "' for cycle %d'", '%', 'int', '(', 'cycle', ')', ')', '+', 'str', '(', "' relative to cycle %d'", '%', 'int', '(', 'ref', ')', ')', 'else', ':', 'if', 'ref', '==', '-', '1', ':', 'title', '=', 'str', '(', "'Cycle %d'", '%', 'int', '(', 'cycle', ')', ')', 'else', ':', 'title', '=', 'str', '(', "'Cycle %d'", '%', 'int', '(', 'cycle', ')', ')', '+', 'str', '(', "' relative to cycle %d'", '%', 'int', '(', 'ref', ')', ')', 'print', '(', '"including title: ..."', ')', 'if', 'mov', ':', 'artist1', ',', '=', 'ax', '.', 'title', '(', 'title', ')', 'artists', '.', 'append', '(', 'artist1', ')', 'else', ':', 'pl', '.', 'title', '(', 'title', ')', 'if', 'ylim', '[', '0', ']', '==', '0', 'and', 'ylim', '[', '1', ']', '==', '0', ':', 'pl', '.', 'ylim', '(', 'ylim1', ',', 'ylim2', ')', 'else', ':', 'pl', '.', 'ylim', '(', 'ylim', '[', '0', ']', ',', 'ylim', '[', '1', ']', ')', 'pl', '.', 'xlim', '(', '[', 'amass_range', '[', '0', ']', '-', '.5', ',', 'amass_range', '[', '1', ']', '+', '.5', ']', ')', 'pl', '.', 'xlabel', '(', "'mass number (A)'", ',', 'fontsize', '=', '14', ')', 'if', 'ref', 'is', 'not', '-', '1', ':', 'if', 'log_logic', ':', 'pl', '.', 'ylabel', '(', "r'log abundance ratio'", ',', 'fontsize', '=', '14', ')', 'else', ':', 'pl', '.', 'ylabel', '(', "r'abundance ratio'", ',', 'fontsize', '=', '14', ')', 'else', ':', 'if', 'log_logic', ':', 'pl', '.', 'ylabel', '(', "r'log mass fraction '", ',', 'fontsize', '=', '14', ')', 'else', ':', 'pl', '.', 'ylabel', '(', "r'mass fraction'", ',', 'fontsize', '=', '14', ')', 'if', 'amass_range', '!=', 'None', ':', 'minimum_mass', '=', 'amass_range', '[', '0', ']', 'maximum_mass', '=', 'amass_range', '[', '1', ']', 'elif', 'mass_range', '!=', 'None', ':', 'minimum_mass', '=', 'mass_range', '[', '0', ']', 'maximum_mass', '=', 'mass_range', '[', '1', ']', 'else', ':', 'minimum_mass', '=', '0', 'maximum_mass', '=', '200', 'if', 'log_logic', '==', 'False', ':', 'if', 'mov', ':', 'artist1', ',', '=', 'ax', '.', 'plot', '(', '[', 'amass_range', '[', '0', ']', '-', '.5', ',', 'amass_range', '[', '1', ']', '+', '.5', ']', ',', '[', '1', ',', '1', ']', ',', "'k-'", ')', 'artists', '.', 'append', '(', 'artist1', ')', 'else', ':', 'pl', '.', 'plot', '(', '[', 'amass_range', '[', '0', ']', '-', '.5', ',', 'amass_range', '[', '1', ']', '+', '.5', ']', ',', '[', '1', ',', '1', ']', ',', "'k-'", ')', 'else', ':', 'if', 'mov', ':', 'artist1', ',', '=', 'ax', '.', 'plot', '(', '[', 'amass_range', '[', '0', ']', '-', '.5', ',', 'amass_range', '[', '1', ']', '+', '.5', ']', ',', '[', '0', ',', '0', ']', ',', "'k-'", ')', 'artists', '.', 'append', '(', 'artist1', ')', 'else', ':', 'pl', '.', 'plot', '(', '[', 'amass_range', '[', '0', ']', '-', '.5', ',', 'amass_range', '[', '1', ']', '+', '.5', ']', ',', '[', '0', ',', '0', ']', ',', "'k-'", ')', 'labelsx', '=', '[', ']', 'if', '(', 'maximum_mass', '-', 'minimum_mass', ')', '>', '100', ':', 'delta_labelsx', '=', '10', 'else', ':', 'delta_labelsx', '=', '5', 'iii', '=', 'amass_range', '[', '0', ']', '%', 'delta_labelsx', 'if', 'iii', '==', '0', ':', 'labelsx', '.', 'append', '(', 'str', '(', 'amass_range', '[', '0', ']', ')', ')', 'else', ':', 'labelsx', '.', 'append', '(', "' '", ')', 'iii', '=', 'iii', '+', '1', 'kkk', '=', '0', 'for', 'label1', 'in', 'range', '(', 'amass_range', '[', '1', ']', '-', 'amass_range', '[', '0', ']', ')', ':', 'if', 'iii', '==', '5', ':', 'kkk', '=', 'kkk', '+', '1', 'labelsx', '.', 'append', '(', 'str', '(', '(', 'iii', '*', 'kkk', ')', '+', 'amass_range', '[', '0', ']', '-', '(', 'amass_range', '[', '0', ']', '%', '5', ')', ')', ')', 'iii', '=', '0', 'iii', '=', 'iii', '+', '1', 'else', ':', 'labelsx', '.', 'append', '(', "' '", ')', 'iii', '=', 'iii', '+', '1', 'if', 'delta_labelsx', '==', '5', ':', 'xticks', '=', 'arange', '(', 'amass_range', '[', '0', ']', ',', 'amass_range', '[', '1', ']', ',', '1', ')', 'pl', '.', 'xticks', '(', 'xticks', ',', 'labelsx', ')', 'else', ':', 'pl', '.', 'xticks', '(', ')', '# SJONES moved the pl.grid and pl.show to the very end', 'if', 'grid', ':', 'pl', '.', 'grid', '(', ')', 'if', 'show', ':', 'pl', '.', 'show', '(', ')', "##!!FOR!!###### print 'LEN LABELS= ', len(labelsx)", '##DEBUGGING####', '####!!!######## for bbb in range (len(labelsx)):', '############### print labelsx[bbb]', 'if', 'mov', ':', 'return', 'artists']
plot the abundance of all the chemical species Parameters ---------- cycle : string, integer or list The cycle of interest. If it is a list of cycles, this method will do a plot for each cycle and save them to a file. stable : boolean, optional A boolean of whether to filter out the unstables. The defaults is False. amass_range : list, optional A 1x2 array containing the lower and upper atomic mass range. If None plot entire available atomic mass range. The default is None. mass_range : list, optional A 1x2 array containing the lower and upper mass range. If this is an instance of abu_vector this will only plot isotopes that have an atominc mass within this range. This will throw an error if this range does not make sense ie [45,2]. If None, it will plot over the entire range. The defaults is None. ylim : list, optional A 1x2 array containing the lower and upper Y limits. If it is [0,0], then ylim will be determined automatically. The default is [0,0]. ref : integer or list, optional reference cycle. If it is not -1, this method will plot the abundences of cycle devided by the cycle of the same instance given in the ref variable. If ref is a list it will be interpreted to have two elements: ref=['dir/of/ref/run',cycle] which uses a refernece cycle from another run. If any abundence in the reference cycle is zero, it will replace it with 1e-99. The default is -1. show : boolean, optional Boolean of if the plot should be displayed. The default is True. log_logic : boolean, optional Plot abundances in log scale or linear. The default is True. decayed : boolean, optional If True plot decayed distributions, else plot life distribution. The default is False. color_plot : boolean, optional Color dots and lines [True/False]. The default is True. grid : boolean, optional print grid. The default is False. point_set : integer, optional Set to 0, 1 or 2 to select one of three point sets, useful for multiple abundances or ratios in one plot. The defalult is 1. include_title : boolean, optional Include a title with the plot. The default is False. drawfig, drawax, mov : optional, not necessary for user to set these variables The figure and axes containers to be drawn on, and whether or not a movie is being made (only True when se.movie is called, which sets mov to True automatically elemaburtn : boolean, private If true, iso_abund() returns after writing self.***_iso_to_plot for use with other plotting routines.f mypoint : string, optional fix the marker style of all the points in this plot to one type, given as a string. If None, multiple point styles are used as per point_set. The default is None
['plot', 'the', 'abundance', 'of', 'all', 'the', 'chemical', 'species']
train
https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/data_plot.py#L3309-L3811
2,578
jason-weirather/py-seq-tools
seqtools/errors.py
ErrorProfileFactory.combine_context_errors
def combine_context_errors(self): """Each alignment contributes some information to the error report. These reports for each alignment need to be gone through and combined into one report. :returns: Dictionary containing the error counts on context base :rtype: dict() """ r = {} if self._target_context_errors: r = self._target_context_errors for k in [x.get_context_target_errors() for x in self._alignment_errors]: for b in k: if b not in r: r[b] = {} for c in k[b]: if c not in r[b]: r[b][c] = {} for a in k[b][c]: if a not in r[b][c]: r[b][c][a] = {} r[b][c][a]['total'] = 0 r[b][c][a]['types'] = {} r[b][c][a]['total'] += k[b][c][a]['total'] for type in k[b][c][a]['types']: if type not in r[b][c][a]['types']: r[b][c][a]['types'][type] = 0 r[b][c][a]['types'][type] += k[b][c][a]['types'][type] self._target_context_errors = r r = {} if self._query_context_errors: r = self._query_context_errors for k in [x.get_context_query_errors() for x in self._alignment_errors]: for b in k: if b not in r: r[b] = {} for c in k[b]: if c not in r[b]: r[b][c] = {} for a in k[b][c]: if a not in r[b][c]: r[b][c][a] = {} r[b][c][a]['total'] = 0 r[b][c][a]['types'] = {} r[b][c][a]['total'] += k[b][c][a]['total'] for type in k[b][c][a]['types']: if type not in r[b][c][a]['types']: r[b][c][a]['types'][type] = 0 r[b][c][a]['types'][type] += k[b][c][a]['types'][type] self._query_context_errors = r
python
def combine_context_errors(self): """Each alignment contributes some information to the error report. These reports for each alignment need to be gone through and combined into one report. :returns: Dictionary containing the error counts on context base :rtype: dict() """ r = {} if self._target_context_errors: r = self._target_context_errors for k in [x.get_context_target_errors() for x in self._alignment_errors]: for b in k: if b not in r: r[b] = {} for c in k[b]: if c not in r[b]: r[b][c] = {} for a in k[b][c]: if a not in r[b][c]: r[b][c][a] = {} r[b][c][a]['total'] = 0 r[b][c][a]['types'] = {} r[b][c][a]['total'] += k[b][c][a]['total'] for type in k[b][c][a]['types']: if type not in r[b][c][a]['types']: r[b][c][a]['types'][type] = 0 r[b][c][a]['types'][type] += k[b][c][a]['types'][type] self._target_context_errors = r r = {} if self._query_context_errors: r = self._query_context_errors for k in [x.get_context_query_errors() for x in self._alignment_errors]: for b in k: if b not in r: r[b] = {} for c in k[b]: if c not in r[b]: r[b][c] = {} for a in k[b][c]: if a not in r[b][c]: r[b][c][a] = {} r[b][c][a]['total'] = 0 r[b][c][a]['types'] = {} r[b][c][a]['total'] += k[b][c][a]['total'] for type in k[b][c][a]['types']: if type not in r[b][c][a]['types']: r[b][c][a]['types'][type] = 0 r[b][c][a]['types'][type] += k[b][c][a]['types'][type] self._query_context_errors = r
['def', 'combine_context_errors', '(', 'self', ')', ':', 'r', '=', '{', '}', 'if', 'self', '.', '_target_context_errors', ':', 'r', '=', 'self', '.', '_target_context_errors', 'for', 'k', 'in', '[', 'x', '.', 'get_context_target_errors', '(', ')', 'for', 'x', 'in', 'self', '.', '_alignment_errors', ']', ':', 'for', 'b', 'in', 'k', ':', 'if', 'b', 'not', 'in', 'r', ':', 'r', '[', 'b', ']', '=', '{', '}', 'for', 'c', 'in', 'k', '[', 'b', ']', ':', 'if', 'c', 'not', 'in', 'r', '[', 'b', ']', ':', 'r', '[', 'b', ']', '[', 'c', ']', '=', '{', '}', 'for', 'a', 'in', 'k', '[', 'b', ']', '[', 'c', ']', ':', 'if', 'a', 'not', 'in', 'r', '[', 'b', ']', '[', 'c', ']', ':', 'r', '[', 'b', ']', '[', 'c', ']', '[', 'a', ']', '=', '{', '}', 'r', '[', 'b', ']', '[', 'c', ']', '[', 'a', ']', '[', "'total'", ']', '=', '0', 'r', '[', 'b', ']', '[', 'c', ']', '[', 'a', ']', '[', "'types'", ']', '=', '{', '}', 'r', '[', 'b', ']', '[', 'c', ']', '[', 'a', ']', '[', "'total'", ']', '+=', 'k', '[', 'b', ']', '[', 'c', ']', '[', 'a', ']', '[', "'total'", ']', 'for', 'type', 'in', 'k', '[', 'b', ']', '[', 'c', ']', '[', 'a', ']', '[', "'types'", ']', ':', 'if', 'type', 'not', 'in', 'r', '[', 'b', ']', '[', 'c', ']', '[', 'a', ']', '[', "'types'", ']', ':', 'r', '[', 'b', ']', '[', 'c', ']', '[', 'a', ']', '[', "'types'", ']', '[', 'type', ']', '=', '0', 'r', '[', 'b', ']', '[', 'c', ']', '[', 'a', ']', '[', "'types'", ']', '[', 'type', ']', '+=', 'k', '[', 'b', ']', '[', 'c', ']', '[', 'a', ']', '[', "'types'", ']', '[', 'type', ']', 'self', '.', '_target_context_errors', '=', 'r', 'r', '=', '{', '}', 'if', 'self', '.', '_query_context_errors', ':', 'r', '=', 'self', '.', '_query_context_errors', 'for', 'k', 'in', '[', 'x', '.', 'get_context_query_errors', '(', ')', 'for', 'x', 'in', 'self', '.', '_alignment_errors', ']', ':', 'for', 'b', 'in', 'k', ':', 'if', 'b', 'not', 'in', 'r', ':', 'r', '[', 'b', ']', '=', '{', '}', 'for', 'c', 'in', 'k', '[', 'b', ']', ':', 'if', 'c', 'not', 'in', 'r', '[', 'b', ']', ':', 'r', '[', 'b', ']', '[', 'c', ']', '=', '{', '}', 'for', 'a', 'in', 'k', '[', 'b', ']', '[', 'c', ']', ':', 'if', 'a', 'not', 'in', 'r', '[', 'b', ']', '[', 'c', ']', ':', 'r', '[', 'b', ']', '[', 'c', ']', '[', 'a', ']', '=', '{', '}', 'r', '[', 'b', ']', '[', 'c', ']', '[', 'a', ']', '[', "'total'", ']', '=', '0', 'r', '[', 'b', ']', '[', 'c', ']', '[', 'a', ']', '[', "'types'", ']', '=', '{', '}', 'r', '[', 'b', ']', '[', 'c', ']', '[', 'a', ']', '[', "'total'", ']', '+=', 'k', '[', 'b', ']', '[', 'c', ']', '[', 'a', ']', '[', "'total'", ']', 'for', 'type', 'in', 'k', '[', 'b', ']', '[', 'c', ']', '[', 'a', ']', '[', "'types'", ']', ':', 'if', 'type', 'not', 'in', 'r', '[', 'b', ']', '[', 'c', ']', '[', 'a', ']', '[', "'types'", ']', ':', 'r', '[', 'b', ']', '[', 'c', ']', '[', 'a', ']', '[', "'types'", ']', '[', 'type', ']', '=', '0', 'r', '[', 'b', ']', '[', 'c', ']', '[', 'a', ']', '[', "'types'", ']', '[', 'type', ']', '+=', 'k', '[', 'b', ']', '[', 'c', ']', '[', 'a', ']', '[', "'types'", ']', '[', 'type', ']', 'self', '.', '_query_context_errors', '=', 'r']
Each alignment contributes some information to the error report. These reports for each alignment need to be gone through and combined into one report. :returns: Dictionary containing the error counts on context base :rtype: dict()
['Each', 'alignment', 'contributes', 'some', 'information', 'to', 'the', 'error', 'report', '.', 'These', 'reports', 'for', 'each', 'alignment', 'need', 'to', 'be', 'gone', 'through', 'and', 'combined', 'into', 'one', 'report', '.']
train
https://github.com/jason-weirather/py-seq-tools/blob/f642c2c73ffef2acc83656a78059a476fc734ca1/seqtools/errors.py#L199-L240
2,579
fastai/fastai
old/fastai/plots.py
ImageModelResults.most_by_uncertain
def most_by_uncertain(self, y): """ Extracts the predicted classes which correspond to the selected class (y) and have probabilities nearest to 1/number_of_classes (eg. 0.5 for 2 classes, 0.33 for 3 classes) for the selected class. Arguments: y (int): the selected class Returns: idxs (numpy.ndarray): An array of indexes (numpy.ndarray) """ return self.most_uncertain_by_mask((self.ds.y == y), y)
python
def most_by_uncertain(self, y): """ Extracts the predicted classes which correspond to the selected class (y) and have probabilities nearest to 1/number_of_classes (eg. 0.5 for 2 classes, 0.33 for 3 classes) for the selected class. Arguments: y (int): the selected class Returns: idxs (numpy.ndarray): An array of indexes (numpy.ndarray) """ return self.most_uncertain_by_mask((self.ds.y == y), y)
['def', 'most_by_uncertain', '(', 'self', ',', 'y', ')', ':', 'return', 'self', '.', 'most_uncertain_by_mask', '(', '(', 'self', '.', 'ds', '.', 'y', '==', 'y', ')', ',', 'y', ')']
Extracts the predicted classes which correspond to the selected class (y) and have probabilities nearest to 1/number_of_classes (eg. 0.5 for 2 classes, 0.33 for 3 classes) for the selected class. Arguments: y (int): the selected class Returns: idxs (numpy.ndarray): An array of indexes (numpy.ndarray)
['Extracts', 'the', 'predicted', 'classes', 'which', 'correspond', 'to', 'the', 'selected', 'class', '(', 'y', ')', 'and', 'have', 'probabilities', 'nearest', 'to', '1', '/', 'number_of_classes', '(', 'eg', '.', '0', '.', '5', 'for', '2', 'classes', '0', '.', '33', 'for', '3', 'classes', ')', 'for', 'the', 'selected', 'class', '.']
train
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/old/fastai/plots.py#L173-L182
2,580
rigetti/pyquil
pyquil/api/_qam.py
QAM.reset
def reset(self): """ Reset the Quantum Abstract Machine to its initial state, which is particularly useful when it has gotten into an unwanted state. This can happen, for example, if the QAM is interrupted in the middle of a run. """ self._variables_shim = {} self._executable = None self._bitstrings = None self.status = 'connected'
python
def reset(self): """ Reset the Quantum Abstract Machine to its initial state, which is particularly useful when it has gotten into an unwanted state. This can happen, for example, if the QAM is interrupted in the middle of a run. """ self._variables_shim = {} self._executable = None self._bitstrings = None self.status = 'connected'
['def', 'reset', '(', 'self', ')', ':', 'self', '.', '_variables_shim', '=', '{', '}', 'self', '.', '_executable', '=', 'None', 'self', '.', '_bitstrings', '=', 'None', 'self', '.', 'status', '=', "'connected'"]
Reset the Quantum Abstract Machine to its initial state, which is particularly useful when it has gotten into an unwanted state. This can happen, for example, if the QAM is interrupted in the middle of a run.
['Reset', 'the', 'Quantum', 'Abstract', 'Machine', 'to', 'its', 'initial', 'state', 'which', 'is', 'particularly', 'useful', 'when', 'it', 'has', 'gotten', 'into', 'an', 'unwanted', 'state', '.', 'This', 'can', 'happen', 'for', 'example', 'if', 'the', 'QAM', 'is', 'interrupted', 'in', 'the', 'middle', 'of', 'a', 'run', '.']
train
https://github.com/rigetti/pyquil/blob/ec98e453084b0037d69d8c3245f6822a5422593d/pyquil/api/_qam.py#L137-L147
2,581
PGower/PyCanvas
pycanvas/apis/courses.py
CoursesAPI.reset_course
def reset_course(self, course_id): """ Reset a course. Deletes the current course, and creates a new equivalent course with no content, but all sections and users moved over. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id self.logger.debug("POST /api/v1/courses/{course_id}/reset_content with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/courses/{course_id}/reset_content".format(**path), data=data, params=params, single_item=True)
python
def reset_course(self, course_id): """ Reset a course. Deletes the current course, and creates a new equivalent course with no content, but all sections and users moved over. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id self.logger.debug("POST /api/v1/courses/{course_id}/reset_content with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/courses/{course_id}/reset_content".format(**path), data=data, params=params, single_item=True)
['def', 'reset_course', '(', 'self', ',', 'course_id', ')', ':', 'path', '=', '{', '}', 'data', '=', '{', '}', 'params', '=', '{', '}', '# REQUIRED - PATH - course_id\r', '"""ID"""', 'path', '[', '"course_id"', ']', '=', 'course_id', 'self', '.', 'logger', '.', 'debug', '(', '"POST /api/v1/courses/{course_id}/reset_content with query params: {params} and form data: {data}"', '.', 'format', '(', 'params', '=', 'params', ',', 'data', '=', 'data', ',', '*', '*', 'path', ')', ')', 'return', 'self', '.', 'generic_request', '(', '"POST"', ',', '"/api/v1/courses/{course_id}/reset_content"', '.', 'format', '(', '*', '*', 'path', ')', ',', 'data', '=', 'data', ',', 'params', '=', 'params', ',', 'single_item', '=', 'True', ')']
Reset a course. Deletes the current course, and creates a new equivalent course with no content, but all sections and users moved over.
['Reset', 'a', 'course', '.', 'Deletes', 'the', 'current', 'course', 'and', 'creates', 'a', 'new', 'equivalent', 'course', 'with', 'no', 'content', 'but', 'all', 'sections', 'and', 'users', 'moved', 'over', '.']
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/courses.py#L1182-L1198
2,582
Kronuz/pyScss
scss/cssdefs.py
determine_encoding
def determine_encoding(buf): """Return the appropriate encoding for the given CSS source, according to the CSS charset rules. `buf` may be either a string or bytes. """ # The ultimate default is utf8; bravo, W3C bom_encoding = 'UTF-8' if not buf: # What return bom_encoding if isinstance(buf, six.text_type): # We got a file that, for whatever reason, produces already-decoded # text. Check for the BOM (which is useless now) and believe # whatever's in the @charset. if buf[0] == '\ufeff': buf = buf[0:] # This is pretty similar to the code below, but without any encoding # double-checking. charset_start = '@charset "' charset_end = '";' if buf.startswith(charset_start): start = len(charset_start) end = buf.index(charset_end, start) return buf[start:end] else: return bom_encoding # BOMs if buf[:3] == b'\xef\xbb\xbf': bom_encoding = 'UTF-8' buf = buf[3:] if buf[:4] == b'\x00\x00\xfe\xff': bom_encoding = 'UTF-32BE' buf = buf[4:] elif buf[:4] == b'\xff\xfe\x00\x00': bom_encoding = 'UTF-32LE' buf = buf[4:] if buf[:4] == b'\x00\x00\xff\xfe': raise UnicodeError("UTF-32-2143 is not supported") elif buf[:4] == b'\xfe\xff\x00\x00': raise UnicodeError("UTF-32-2143 is not supported") elif buf[:2] == b'\xfe\xff': bom_encoding = 'UTF-16BE' buf = buf[2:] elif buf[:2] == b'\xff\xfe': bom_encoding = 'UTF-16LE' buf = buf[2:] # The spec requires exactly this syntax; no escapes or extra spaces or # other shenanigans, thank goodness. charset_start = '@charset "'.encode(bom_encoding) charset_end = '";'.encode(bom_encoding) if buf.startswith(charset_start): start = len(charset_start) end = buf.index(charset_end, start) encoded_encoding = buf[start:end] encoding = encoded_encoding.decode(bom_encoding) # Ensure that decoding with the specified encoding actually produces # the same @charset rule encoded_charset = buf[:end + len(charset_end)] if (encoded_charset.decode(encoding) != encoded_charset.decode(bom_encoding)): raise UnicodeError( "@charset {0} is incompatible with detected encoding {1}" .format(bom_encoding, encoding)) else: # With no @charset, believe the BOM encoding = bom_encoding return encoding
python
def determine_encoding(buf): """Return the appropriate encoding for the given CSS source, according to the CSS charset rules. `buf` may be either a string or bytes. """ # The ultimate default is utf8; bravo, W3C bom_encoding = 'UTF-8' if not buf: # What return bom_encoding if isinstance(buf, six.text_type): # We got a file that, for whatever reason, produces already-decoded # text. Check for the BOM (which is useless now) and believe # whatever's in the @charset. if buf[0] == '\ufeff': buf = buf[0:] # This is pretty similar to the code below, but without any encoding # double-checking. charset_start = '@charset "' charset_end = '";' if buf.startswith(charset_start): start = len(charset_start) end = buf.index(charset_end, start) return buf[start:end] else: return bom_encoding # BOMs if buf[:3] == b'\xef\xbb\xbf': bom_encoding = 'UTF-8' buf = buf[3:] if buf[:4] == b'\x00\x00\xfe\xff': bom_encoding = 'UTF-32BE' buf = buf[4:] elif buf[:4] == b'\xff\xfe\x00\x00': bom_encoding = 'UTF-32LE' buf = buf[4:] if buf[:4] == b'\x00\x00\xff\xfe': raise UnicodeError("UTF-32-2143 is not supported") elif buf[:4] == b'\xfe\xff\x00\x00': raise UnicodeError("UTF-32-2143 is not supported") elif buf[:2] == b'\xfe\xff': bom_encoding = 'UTF-16BE' buf = buf[2:] elif buf[:2] == b'\xff\xfe': bom_encoding = 'UTF-16LE' buf = buf[2:] # The spec requires exactly this syntax; no escapes or extra spaces or # other shenanigans, thank goodness. charset_start = '@charset "'.encode(bom_encoding) charset_end = '";'.encode(bom_encoding) if buf.startswith(charset_start): start = len(charset_start) end = buf.index(charset_end, start) encoded_encoding = buf[start:end] encoding = encoded_encoding.decode(bom_encoding) # Ensure that decoding with the specified encoding actually produces # the same @charset rule encoded_charset = buf[:end + len(charset_end)] if (encoded_charset.decode(encoding) != encoded_charset.decode(bom_encoding)): raise UnicodeError( "@charset {0} is incompatible with detected encoding {1}" .format(bom_encoding, encoding)) else: # With no @charset, believe the BOM encoding = bom_encoding return encoding
['def', 'determine_encoding', '(', 'buf', ')', ':', '# The ultimate default is utf8; bravo, W3C', 'bom_encoding', '=', "'UTF-8'", 'if', 'not', 'buf', ':', '# What', 'return', 'bom_encoding', 'if', 'isinstance', '(', 'buf', ',', 'six', '.', 'text_type', ')', ':', '# We got a file that, for whatever reason, produces already-decoded', '# text. Check for the BOM (which is useless now) and believe', "# whatever's in the @charset.", 'if', 'buf', '[', '0', ']', '==', "'\\ufeff'", ':', 'buf', '=', 'buf', '[', '0', ':', ']', '# This is pretty similar to the code below, but without any encoding', '# double-checking.', 'charset_start', '=', '\'@charset "\'', 'charset_end', '=', '\'";\'', 'if', 'buf', '.', 'startswith', '(', 'charset_start', ')', ':', 'start', '=', 'len', '(', 'charset_start', ')', 'end', '=', 'buf', '.', 'index', '(', 'charset_end', ',', 'start', ')', 'return', 'buf', '[', 'start', ':', 'end', ']', 'else', ':', 'return', 'bom_encoding', '# BOMs', 'if', 'buf', '[', ':', '3', ']', '==', "b'\\xef\\xbb\\xbf'", ':', 'bom_encoding', '=', "'UTF-8'", 'buf', '=', 'buf', '[', '3', ':', ']', 'if', 'buf', '[', ':', '4', ']', '==', "b'\\x00\\x00\\xfe\\xff'", ':', 'bom_encoding', '=', "'UTF-32BE'", 'buf', '=', 'buf', '[', '4', ':', ']', 'elif', 'buf', '[', ':', '4', ']', '==', "b'\\xff\\xfe\\x00\\x00'", ':', 'bom_encoding', '=', "'UTF-32LE'", 'buf', '=', 'buf', '[', '4', ':', ']', 'if', 'buf', '[', ':', '4', ']', '==', "b'\\x00\\x00\\xff\\xfe'", ':', 'raise', 'UnicodeError', '(', '"UTF-32-2143 is not supported"', ')', 'elif', 'buf', '[', ':', '4', ']', '==', "b'\\xfe\\xff\\x00\\x00'", ':', 'raise', 'UnicodeError', '(', '"UTF-32-2143 is not supported"', ')', 'elif', 'buf', '[', ':', '2', ']', '==', "b'\\xfe\\xff'", ':', 'bom_encoding', '=', "'UTF-16BE'", 'buf', '=', 'buf', '[', '2', ':', ']', 'elif', 'buf', '[', ':', '2', ']', '==', "b'\\xff\\xfe'", ':', 'bom_encoding', '=', "'UTF-16LE'", 'buf', '=', 'buf', '[', '2', ':', ']', '# The spec requires exactly this syntax; no escapes or extra spaces or', '# other shenanigans, thank goodness.', 'charset_start', '=', '\'@charset "\'', '.', 'encode', '(', 'bom_encoding', ')', 'charset_end', '=', '\'";\'', '.', 'encode', '(', 'bom_encoding', ')', 'if', 'buf', '.', 'startswith', '(', 'charset_start', ')', ':', 'start', '=', 'len', '(', 'charset_start', ')', 'end', '=', 'buf', '.', 'index', '(', 'charset_end', ',', 'start', ')', 'encoded_encoding', '=', 'buf', '[', 'start', ':', 'end', ']', 'encoding', '=', 'encoded_encoding', '.', 'decode', '(', 'bom_encoding', ')', '# Ensure that decoding with the specified encoding actually produces', '# the same @charset rule', 'encoded_charset', '=', 'buf', '[', ':', 'end', '+', 'len', '(', 'charset_end', ')', ']', 'if', '(', 'encoded_charset', '.', 'decode', '(', 'encoding', ')', '!=', 'encoded_charset', '.', 'decode', '(', 'bom_encoding', ')', ')', ':', 'raise', 'UnicodeError', '(', '"@charset {0} is incompatible with detected encoding {1}"', '.', 'format', '(', 'bom_encoding', ',', 'encoding', ')', ')', 'else', ':', '# With no @charset, believe the BOM', 'encoding', '=', 'bom_encoding', 'return', 'encoding']
Return the appropriate encoding for the given CSS source, according to the CSS charset rules. `buf` may be either a string or bytes.
['Return', 'the', 'appropriate', 'encoding', 'for', 'the', 'given', 'CSS', 'source', 'according', 'to', 'the', 'CSS', 'charset', 'rules', '.']
train
https://github.com/Kronuz/pyScss/blob/fb32b317f6e2b4b4aad2b86a74844658ac4aa11e/scss/cssdefs.py#L358-L432
2,583
archman/beamline
beamline/ui/myappframe.py
MyAppFrame.update_stat
def update_stat(self, mode='open', infostr='', stat=''): """ write operation stats to log :param mode: 'open', 'saveas', 'listtree' :param infostr: string to put into info_st :param stat: 'OK' or 'ERR' """ self._update_stat[mode](mode, infostr, stat)
python
def update_stat(self, mode='open', infostr='', stat=''): """ write operation stats to log :param mode: 'open', 'saveas', 'listtree' :param infostr: string to put into info_st :param stat: 'OK' or 'ERR' """ self._update_stat[mode](mode, infostr, stat)
['def', 'update_stat', '(', 'self', ',', 'mode', '=', "'open'", ',', 'infostr', '=', "''", ',', 'stat', '=', "''", ')', ':', 'self', '.', '_update_stat', '[', 'mode', ']', '(', 'mode', ',', 'infostr', ',', 'stat', ')']
write operation stats to log :param mode: 'open', 'saveas', 'listtree' :param infostr: string to put into info_st :param stat: 'OK' or 'ERR'
['write', 'operation', 'stats', 'to', 'log', ':', 'param', 'mode', ':', 'open', 'saveas', 'listtree', ':', 'param', 'infostr', ':', 'string', 'to', 'put', 'into', 'info_st', ':', 'param', 'stat', ':', 'OK', 'or', 'ERR']
train
https://github.com/archman/beamline/blob/417bc5dc13e754bc89d246427984590fced64d07/beamline/ui/myappframe.py#L621-L627
2,584
saltstack/salt
salt/client/ssh/__init__.py
SSH._key_deploy_run
def _key_deploy_run(self, host, target, re_run=True): ''' The ssh-copy-id routine ''' argv = [ 'ssh.set_auth_key', target.get('user', 'root'), self.get_pubkey(), ] single = Single( self.opts, argv, host, mods=self.mods, fsclient=self.fsclient, thin=self.thin, **target) if salt.utils.path.which('ssh-copy-id'): # we have ssh-copy-id, use it! stdout, stderr, retcode = single.shell.copy_id() else: stdout, stderr, retcode = single.run() if re_run: target.pop('passwd') single = Single( self.opts, self.opts['argv'], host, mods=self.mods, fsclient=self.fsclient, thin=self.thin, **target) stdout, stderr, retcode = single.cmd_block() try: data = salt.utils.json.find_json(stdout) return {host: data.get('local', data)} except Exception: if stderr: return {host: stderr} return {host: 'Bad Return'} if salt.defaults.exitcodes.EX_OK != retcode: return {host: stderr} return {host: stdout}
python
def _key_deploy_run(self, host, target, re_run=True): ''' The ssh-copy-id routine ''' argv = [ 'ssh.set_auth_key', target.get('user', 'root'), self.get_pubkey(), ] single = Single( self.opts, argv, host, mods=self.mods, fsclient=self.fsclient, thin=self.thin, **target) if salt.utils.path.which('ssh-copy-id'): # we have ssh-copy-id, use it! stdout, stderr, retcode = single.shell.copy_id() else: stdout, stderr, retcode = single.run() if re_run: target.pop('passwd') single = Single( self.opts, self.opts['argv'], host, mods=self.mods, fsclient=self.fsclient, thin=self.thin, **target) stdout, stderr, retcode = single.cmd_block() try: data = salt.utils.json.find_json(stdout) return {host: data.get('local', data)} except Exception: if stderr: return {host: stderr} return {host: 'Bad Return'} if salt.defaults.exitcodes.EX_OK != retcode: return {host: stderr} return {host: stdout}
['def', '_key_deploy_run', '(', 'self', ',', 'host', ',', 'target', ',', 're_run', '=', 'True', ')', ':', 'argv', '=', '[', "'ssh.set_auth_key'", ',', 'target', '.', 'get', '(', "'user'", ',', "'root'", ')', ',', 'self', '.', 'get_pubkey', '(', ')', ',', ']', 'single', '=', 'Single', '(', 'self', '.', 'opts', ',', 'argv', ',', 'host', ',', 'mods', '=', 'self', '.', 'mods', ',', 'fsclient', '=', 'self', '.', 'fsclient', ',', 'thin', '=', 'self', '.', 'thin', ',', '*', '*', 'target', ')', 'if', 'salt', '.', 'utils', '.', 'path', '.', 'which', '(', "'ssh-copy-id'", ')', ':', '# we have ssh-copy-id, use it!', 'stdout', ',', 'stderr', ',', 'retcode', '=', 'single', '.', 'shell', '.', 'copy_id', '(', ')', 'else', ':', 'stdout', ',', 'stderr', ',', 'retcode', '=', 'single', '.', 'run', '(', ')', 'if', 're_run', ':', 'target', '.', 'pop', '(', "'passwd'", ')', 'single', '=', 'Single', '(', 'self', '.', 'opts', ',', 'self', '.', 'opts', '[', "'argv'", ']', ',', 'host', ',', 'mods', '=', 'self', '.', 'mods', ',', 'fsclient', '=', 'self', '.', 'fsclient', ',', 'thin', '=', 'self', '.', 'thin', ',', '*', '*', 'target', ')', 'stdout', ',', 'stderr', ',', 'retcode', '=', 'single', '.', 'cmd_block', '(', ')', 'try', ':', 'data', '=', 'salt', '.', 'utils', '.', 'json', '.', 'find_json', '(', 'stdout', ')', 'return', '{', 'host', ':', 'data', '.', 'get', '(', "'local'", ',', 'data', ')', '}', 'except', 'Exception', ':', 'if', 'stderr', ':', 'return', '{', 'host', ':', 'stderr', '}', 'return', '{', 'host', ':', "'Bad Return'", '}', 'if', 'salt', '.', 'defaults', '.', 'exitcodes', '.', 'EX_OK', '!=', 'retcode', ':', 'return', '{', 'host', ':', 'stderr', '}', 'return', '{', 'host', ':', 'stdout', '}']
The ssh-copy-id routine
['The', 'ssh', '-', 'copy', '-', 'id', 'routine']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/client/ssh/__init__.py#L463-L506
2,585
oanda/v20-python
src/v20/order.py
UnitsAvailableDetails.from_dict
def from_dict(data, ctx): """ Instantiate a new UnitsAvailableDetails from a dict (generally from loading a JSON response). The data used to instantiate the UnitsAvailableDetails is a shallow copy of the dict passed in, with any complex child types instantiated appropriately. """ data = data.copy() if data.get('long') is not None: data['long'] = ctx.convert_decimal_number( data.get('long') ) if data.get('short') is not None: data['short'] = ctx.convert_decimal_number( data.get('short') ) return UnitsAvailableDetails(**data)
python
def from_dict(data, ctx): """ Instantiate a new UnitsAvailableDetails from a dict (generally from loading a JSON response). The data used to instantiate the UnitsAvailableDetails is a shallow copy of the dict passed in, with any complex child types instantiated appropriately. """ data = data.copy() if data.get('long') is not None: data['long'] = ctx.convert_decimal_number( data.get('long') ) if data.get('short') is not None: data['short'] = ctx.convert_decimal_number( data.get('short') ) return UnitsAvailableDetails(**data)
['def', 'from_dict', '(', 'data', ',', 'ctx', ')', ':', 'data', '=', 'data', '.', 'copy', '(', ')', 'if', 'data', '.', 'get', '(', "'long'", ')', 'is', 'not', 'None', ':', 'data', '[', "'long'", ']', '=', 'ctx', '.', 'convert_decimal_number', '(', 'data', '.', 'get', '(', "'long'", ')', ')', 'if', 'data', '.', 'get', '(', "'short'", ')', 'is', 'not', 'None', ':', 'data', '[', "'short'", ']', '=', 'ctx', '.', 'convert_decimal_number', '(', 'data', '.', 'get', '(', "'short'", ')', ')', 'return', 'UnitsAvailableDetails', '(', '*', '*', 'data', ')']
Instantiate a new UnitsAvailableDetails from a dict (generally from loading a JSON response). The data used to instantiate the UnitsAvailableDetails is a shallow copy of the dict passed in, with any complex child types instantiated appropriately.
['Instantiate', 'a', 'new', 'UnitsAvailableDetails', 'from', 'a', 'dict', '(', 'generally', 'from', 'loading', 'a', 'JSON', 'response', ')', '.', 'The', 'data', 'used', 'to', 'instantiate', 'the', 'UnitsAvailableDetails', 'is', 'a', 'shallow', 'copy', 'of', 'the', 'dict', 'passed', 'in', 'with', 'any', 'complex', 'child', 'types', 'instantiated', 'appropriately', '.']
train
https://github.com/oanda/v20-python/blob/f28192f4a31bce038cf6dfa302f5878bec192fe5/src/v20/order.py#L3324-L3344
2,586
raff/dynash
dynash2/dynash2.py
DynamoDBShell2.do_login
def do_login(self, line): "login aws-acces-key aws-secret" if line: args = self.getargs(line) self.connect(args[0], args[1]) else: self.connect() self.do_tables('')
python
def do_login(self, line): "login aws-acces-key aws-secret" if line: args = self.getargs(line) self.connect(args[0], args[1]) else: self.connect() self.do_tables('')
['def', 'do_login', '(', 'self', ',', 'line', ')', ':', 'if', 'line', ':', 'args', '=', 'self', '.', 'getargs', '(', 'line', ')', 'self', '.', 'connect', '(', 'args', '[', '0', ']', ',', 'args', '[', '1', ']', ')', 'else', ':', 'self', '.', 'connect', '(', ')', 'self', '.', 'do_tables', '(', "''", ')']
login aws-acces-key aws-secret
['login', 'aws', '-', 'acces', '-', 'key', 'aws', '-', 'secret']
train
https://github.com/raff/dynash/blob/a2b4fab67dd85ceaa9c1bb7604ebc1768a7fc28e/dynash2/dynash2.py#L360-L368
2,587
pyinvoke/invocations
invocations/packaging/release.py
_release_line
def _release_line(c): """ Examine current repo state to determine what type of release to prep. :returns: A two-tuple of ``(branch-name, line-type)`` where: - ``branch-name`` is the current branch name, e.g. ``1.1``, ``master``, ``gobbledygook`` (or, usually, ``HEAD`` if not on a branch). - ``line-type`` is a symbolic member of `.Release` representing what "type" of release the line appears to be for: - ``Release.BUGFIX`` if on a bugfix/stable release line, e.g. ``1.1``. - ``Release.FEATURE`` if on a feature-release branch (typically ``master``). - ``Release.UNDEFINED`` if neither of those appears to apply (usually means on some unmerged feature/dev branch). """ # TODO: I don't _think_ this technically overlaps with Releases (because # that only ever deals with changelog contents, and therefore full release # version numbers) but in case it does, move it there sometime. # TODO: this and similar calls in this module may want to be given an # explicit pointer-to-git-repo option (i.e. if run from outside project # context). # TODO: major releases? or are they big enough events we don't need to # bother with the script? Also just hard to gauge - when is master the next # 1.x feature vs 2.0? branch = c.run("git rev-parse --abbrev-ref HEAD", hide=True).stdout.strip() type_ = Release.UNDEFINED if BUGFIX_RE.match(branch): type_ = Release.BUGFIX if FEATURE_RE.match(branch): type_ = Release.FEATURE return branch, type_
python
def _release_line(c): """ Examine current repo state to determine what type of release to prep. :returns: A two-tuple of ``(branch-name, line-type)`` where: - ``branch-name`` is the current branch name, e.g. ``1.1``, ``master``, ``gobbledygook`` (or, usually, ``HEAD`` if not on a branch). - ``line-type`` is a symbolic member of `.Release` representing what "type" of release the line appears to be for: - ``Release.BUGFIX`` if on a bugfix/stable release line, e.g. ``1.1``. - ``Release.FEATURE`` if on a feature-release branch (typically ``master``). - ``Release.UNDEFINED`` if neither of those appears to apply (usually means on some unmerged feature/dev branch). """ # TODO: I don't _think_ this technically overlaps with Releases (because # that only ever deals with changelog contents, and therefore full release # version numbers) but in case it does, move it there sometime. # TODO: this and similar calls in this module may want to be given an # explicit pointer-to-git-repo option (i.e. if run from outside project # context). # TODO: major releases? or are they big enough events we don't need to # bother with the script? Also just hard to gauge - when is master the next # 1.x feature vs 2.0? branch = c.run("git rev-parse --abbrev-ref HEAD", hide=True).stdout.strip() type_ = Release.UNDEFINED if BUGFIX_RE.match(branch): type_ = Release.BUGFIX if FEATURE_RE.match(branch): type_ = Release.FEATURE return branch, type_
['def', '_release_line', '(', 'c', ')', ':', "# TODO: I don't _think_ this technically overlaps with Releases (because", '# that only ever deals with changelog contents, and therefore full release', '# version numbers) but in case it does, move it there sometime.', '# TODO: this and similar calls in this module may want to be given an', '# explicit pointer-to-git-repo option (i.e. if run from outside project', '# context).', "# TODO: major releases? or are they big enough events we don't need to", '# bother with the script? Also just hard to gauge - when is master the next', '# 1.x feature vs 2.0?', 'branch', '=', 'c', '.', 'run', '(', '"git rev-parse --abbrev-ref HEAD"', ',', 'hide', '=', 'True', ')', '.', 'stdout', '.', 'strip', '(', ')', 'type_', '=', 'Release', '.', 'UNDEFINED', 'if', 'BUGFIX_RE', '.', 'match', '(', 'branch', ')', ':', 'type_', '=', 'Release', '.', 'BUGFIX', 'if', 'FEATURE_RE', '.', 'match', '(', 'branch', ')', ':', 'type_', '=', 'Release', '.', 'FEATURE', 'return', 'branch', ',', 'type_']
Examine current repo state to determine what type of release to prep. :returns: A two-tuple of ``(branch-name, line-type)`` where: - ``branch-name`` is the current branch name, e.g. ``1.1``, ``master``, ``gobbledygook`` (or, usually, ``HEAD`` if not on a branch). - ``line-type`` is a symbolic member of `.Release` representing what "type" of release the line appears to be for: - ``Release.BUGFIX`` if on a bugfix/stable release line, e.g. ``1.1``. - ``Release.FEATURE`` if on a feature-release branch (typically ``master``). - ``Release.UNDEFINED`` if neither of those appears to apply (usually means on some unmerged feature/dev branch).
['Examine', 'current', 'repo', 'state', 'to', 'determine', 'what', 'type', 'of', 'release', 'to', 'prep', '.']
train
https://github.com/pyinvoke/invocations/blob/bbf1b319bd1536817d5301ceb9eeb2f31830e5dc/invocations/packaging/release.py#L330-L364
2,588
quodlibet/mutagen
mutagen/apev2.py
APEv2.__parse_tag
def __parse_tag(self, tag, count): """Raises IOError and APEBadItemError""" fileobj = cBytesIO(tag) for i in xrange(count): tag_data = fileobj.read(8) # someone writes wrong item counts if not tag_data: break if len(tag_data) != 8: raise error size = cdata.uint32_le(tag_data[:4]) flags = cdata.uint32_le(tag_data[4:8]) # Bits 1 and 2 bits are flags, 0-3 # Bit 0 is read/write flag, ignored kind = (flags & 6) >> 1 if kind == 3: raise APEBadItemError("value type must be 0, 1, or 2") key = value = fileobj.read(1) if not key: raise APEBadItemError while key[-1:] != b'\x00' and value: value = fileobj.read(1) if not value: raise APEBadItemError key += value if key[-1:] == b"\x00": key = key[:-1] if PY3: try: key = key.decode("ascii") except UnicodeError as err: reraise(APEBadItemError, err, sys.exc_info()[2]) value = fileobj.read(size) if len(value) != size: raise APEBadItemError value = _get_value_type(kind)._new(value) self[key] = value
python
def __parse_tag(self, tag, count): """Raises IOError and APEBadItemError""" fileobj = cBytesIO(tag) for i in xrange(count): tag_data = fileobj.read(8) # someone writes wrong item counts if not tag_data: break if len(tag_data) != 8: raise error size = cdata.uint32_le(tag_data[:4]) flags = cdata.uint32_le(tag_data[4:8]) # Bits 1 and 2 bits are flags, 0-3 # Bit 0 is read/write flag, ignored kind = (flags & 6) >> 1 if kind == 3: raise APEBadItemError("value type must be 0, 1, or 2") key = value = fileobj.read(1) if not key: raise APEBadItemError while key[-1:] != b'\x00' and value: value = fileobj.read(1) if not value: raise APEBadItemError key += value if key[-1:] == b"\x00": key = key[:-1] if PY3: try: key = key.decode("ascii") except UnicodeError as err: reraise(APEBadItemError, err, sys.exc_info()[2]) value = fileobj.read(size) if len(value) != size: raise APEBadItemError value = _get_value_type(kind)._new(value) self[key] = value
['def', '__parse_tag', '(', 'self', ',', 'tag', ',', 'count', ')', ':', 'fileobj', '=', 'cBytesIO', '(', 'tag', ')', 'for', 'i', 'in', 'xrange', '(', 'count', ')', ':', 'tag_data', '=', 'fileobj', '.', 'read', '(', '8', ')', '# someone writes wrong item counts', 'if', 'not', 'tag_data', ':', 'break', 'if', 'len', '(', 'tag_data', ')', '!=', '8', ':', 'raise', 'error', 'size', '=', 'cdata', '.', 'uint32_le', '(', 'tag_data', '[', ':', '4', ']', ')', 'flags', '=', 'cdata', '.', 'uint32_le', '(', 'tag_data', '[', '4', ':', '8', ']', ')', '# Bits 1 and 2 bits are flags, 0-3', '# Bit 0 is read/write flag, ignored', 'kind', '=', '(', 'flags', '&', '6', ')', '>>', '1', 'if', 'kind', '==', '3', ':', 'raise', 'APEBadItemError', '(', '"value type must be 0, 1, or 2"', ')', 'key', '=', 'value', '=', 'fileobj', '.', 'read', '(', '1', ')', 'if', 'not', 'key', ':', 'raise', 'APEBadItemError', 'while', 'key', '[', '-', '1', ':', ']', '!=', "b'\\x00'", 'and', 'value', ':', 'value', '=', 'fileobj', '.', 'read', '(', '1', ')', 'if', 'not', 'value', ':', 'raise', 'APEBadItemError', 'key', '+=', 'value', 'if', 'key', '[', '-', '1', ':', ']', '==', 'b"\\x00"', ':', 'key', '=', 'key', '[', ':', '-', '1', ']', 'if', 'PY3', ':', 'try', ':', 'key', '=', 'key', '.', 'decode', '(', '"ascii"', ')', 'except', 'UnicodeError', 'as', 'err', ':', 'reraise', '(', 'APEBadItemError', ',', 'err', ',', 'sys', '.', 'exc_info', '(', ')', '[', '2', ']', ')', 'value', '=', 'fileobj', '.', 'read', '(', 'size', ')', 'if', 'len', '(', 'value', ')', '!=', 'size', ':', 'raise', 'APEBadItemError', 'value', '=', '_get_value_type', '(', 'kind', ')', '.', '_new', '(', 'value', ')', 'self', '[', 'key', ']', '=', 'value']
Raises IOError and APEBadItemError
['Raises', 'IOError', 'and', 'APEBadItemError']
train
https://github.com/quodlibet/mutagen/blob/e393df5971ba41ba5a50de9c2c9e7e5484d82c4e/mutagen/apev2.py#L306-L349
2,589
saltstack/salt
salt/returners/highstate_return.py
_produce_output
def _produce_output(report, failed, setup): ''' Produce output from the report dictionary generated by _generate_report ''' report_format = setup.get('report_format', 'yaml') log.debug('highstate output format: %s', report_format) if report_format == 'json': report_text = salt.utils.json.dumps(report) elif report_format == 'yaml': string_file = StringIO() salt.utils.yaml.safe_dump(report, string_file, default_flow_style=False) string_file.seek(0) report_text = string_file.read() else: string_file = StringIO() _generate_html(report, string_file) string_file.seek(0) report_text = string_file.read() report_delivery = setup.get('report_delivery', 'file') log.debug('highstate report_delivery: %s', report_delivery) if report_delivery == 'file': output_file = _sprinkle(setup.get('file_output', '/tmp/test.rpt')) with salt.utils.files.fopen(output_file, 'w') as out: out.write(salt.utils.stringutils.to_str(report_text)) else: msg = MIMEText(report_text, report_format) sender = setup.get('smtp_sender', '') recipients = setup.get('smtp_recipients', '') host = setup.get('smtp_server', '') port = int(setup.get('smtp_port', 25)) tls = setup.get('smtp_tls') username = setup.get('smtp_username') password = setup.get('smtp_password') if failed: subject = setup.get('smtp_failure_subject', 'Installation failure') else: subject = setup.get('smtp_success_subject', 'Installation success') subject = _sprinkle(subject) msg['Subject'] = subject msg['From'] = sender msg['To'] = recipients log.debug('highstate smtp port: %d', port) smtp = smtplib.SMTP(host=host, port=port) if tls is True: smtp.starttls() log.debug('highstate smtp tls enabled') if username and password: smtp.login(username, password) log.debug('highstate smtp authenticated') smtp.sendmail( sender, [x.strip() for x in recipients.split(',')], msg.as_string()) log.debug('highstate message sent.') smtp.quit()
python
def _produce_output(report, failed, setup): ''' Produce output from the report dictionary generated by _generate_report ''' report_format = setup.get('report_format', 'yaml') log.debug('highstate output format: %s', report_format) if report_format == 'json': report_text = salt.utils.json.dumps(report) elif report_format == 'yaml': string_file = StringIO() salt.utils.yaml.safe_dump(report, string_file, default_flow_style=False) string_file.seek(0) report_text = string_file.read() else: string_file = StringIO() _generate_html(report, string_file) string_file.seek(0) report_text = string_file.read() report_delivery = setup.get('report_delivery', 'file') log.debug('highstate report_delivery: %s', report_delivery) if report_delivery == 'file': output_file = _sprinkle(setup.get('file_output', '/tmp/test.rpt')) with salt.utils.files.fopen(output_file, 'w') as out: out.write(salt.utils.stringutils.to_str(report_text)) else: msg = MIMEText(report_text, report_format) sender = setup.get('smtp_sender', '') recipients = setup.get('smtp_recipients', '') host = setup.get('smtp_server', '') port = int(setup.get('smtp_port', 25)) tls = setup.get('smtp_tls') username = setup.get('smtp_username') password = setup.get('smtp_password') if failed: subject = setup.get('smtp_failure_subject', 'Installation failure') else: subject = setup.get('smtp_success_subject', 'Installation success') subject = _sprinkle(subject) msg['Subject'] = subject msg['From'] = sender msg['To'] = recipients log.debug('highstate smtp port: %d', port) smtp = smtplib.SMTP(host=host, port=port) if tls is True: smtp.starttls() log.debug('highstate smtp tls enabled') if username and password: smtp.login(username, password) log.debug('highstate smtp authenticated') smtp.sendmail( sender, [x.strip() for x in recipients.split(',')], msg.as_string()) log.debug('highstate message sent.') smtp.quit()
['def', '_produce_output', '(', 'report', ',', 'failed', ',', 'setup', ')', ':', 'report_format', '=', 'setup', '.', 'get', '(', "'report_format'", ',', "'yaml'", ')', 'log', '.', 'debug', '(', "'highstate output format: %s'", ',', 'report_format', ')', 'if', 'report_format', '==', "'json'", ':', 'report_text', '=', 'salt', '.', 'utils', '.', 'json', '.', 'dumps', '(', 'report', ')', 'elif', 'report_format', '==', "'yaml'", ':', 'string_file', '=', 'StringIO', '(', ')', 'salt', '.', 'utils', '.', 'yaml', '.', 'safe_dump', '(', 'report', ',', 'string_file', ',', 'default_flow_style', '=', 'False', ')', 'string_file', '.', 'seek', '(', '0', ')', 'report_text', '=', 'string_file', '.', 'read', '(', ')', 'else', ':', 'string_file', '=', 'StringIO', '(', ')', '_generate_html', '(', 'report', ',', 'string_file', ')', 'string_file', '.', 'seek', '(', '0', ')', 'report_text', '=', 'string_file', '.', 'read', '(', ')', 'report_delivery', '=', 'setup', '.', 'get', '(', "'report_delivery'", ',', "'file'", ')', 'log', '.', 'debug', '(', "'highstate report_delivery: %s'", ',', 'report_delivery', ')', 'if', 'report_delivery', '==', "'file'", ':', 'output_file', '=', '_sprinkle', '(', 'setup', '.', 'get', '(', "'file_output'", ',', "'/tmp/test.rpt'", ')', ')', 'with', 'salt', '.', 'utils', '.', 'files', '.', 'fopen', '(', 'output_file', ',', "'w'", ')', 'as', 'out', ':', 'out', '.', 'write', '(', 'salt', '.', 'utils', '.', 'stringutils', '.', 'to_str', '(', 'report_text', ')', ')', 'else', ':', 'msg', '=', 'MIMEText', '(', 'report_text', ',', 'report_format', ')', 'sender', '=', 'setup', '.', 'get', '(', "'smtp_sender'", ',', "''", ')', 'recipients', '=', 'setup', '.', 'get', '(', "'smtp_recipients'", ',', "''", ')', 'host', '=', 'setup', '.', 'get', '(', "'smtp_server'", ',', "''", ')', 'port', '=', 'int', '(', 'setup', '.', 'get', '(', "'smtp_port'", ',', '25', ')', ')', 'tls', '=', 'setup', '.', 'get', '(', "'smtp_tls'", ')', 'username', '=', 'setup', '.', 'get', '(', "'smtp_username'", ')', 'password', '=', 'setup', '.', 'get', '(', "'smtp_password'", ')', 'if', 'failed', ':', 'subject', '=', 'setup', '.', 'get', '(', "'smtp_failure_subject'", ',', "'Installation failure'", ')', 'else', ':', 'subject', '=', 'setup', '.', 'get', '(', "'smtp_success_subject'", ',', "'Installation success'", ')', 'subject', '=', '_sprinkle', '(', 'subject', ')', 'msg', '[', "'Subject'", ']', '=', 'subject', 'msg', '[', "'From'", ']', '=', 'sender', 'msg', '[', "'To'", ']', '=', 'recipients', 'log', '.', 'debug', '(', "'highstate smtp port: %d'", ',', 'port', ')', 'smtp', '=', 'smtplib', '.', 'SMTP', '(', 'host', '=', 'host', ',', 'port', '=', 'port', ')', 'if', 'tls', 'is', 'True', ':', 'smtp', '.', 'starttls', '(', ')', 'log', '.', 'debug', '(', "'highstate smtp tls enabled'", ')', 'if', 'username', 'and', 'password', ':', 'smtp', '.', 'login', '(', 'username', ',', 'password', ')', 'log', '.', 'debug', '(', "'highstate smtp authenticated'", ')', 'smtp', '.', 'sendmail', '(', 'sender', ',', '[', 'x', '.', 'strip', '(', ')', 'for', 'x', 'in', 'recipients', '.', 'split', '(', "','", ')', ']', ',', 'msg', '.', 'as_string', '(', ')', ')', 'log', '.', 'debug', '(', "'highstate message sent.'", ')', 'smtp', '.', 'quit', '(', ')']
Produce output from the report dictionary generated by _generate_report
['Produce', 'output', 'from', 'the', 'report', 'dictionary', 'generated', 'by', '_generate_report']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/highstate_return.py#L425-L493
2,590
wright-group/WrightTools
WrightTools/artists/_helpers.py
plot_margins
def plot_margins(*, fig=None, inches=1., centers=True, edges=True): """Add lines onto a figure indicating the margins, centers, and edges. Useful for ensuring your figure design scripts work as intended, and for laying out figures. Parameters ---------- fig : matplotlib.figure.Figure object (optional) The figure to plot onto. If None, gets current figure. Default is None. inches : float (optional) The size of the figure margin, in inches. Default is 1. centers : bool (optional) Toggle for plotting lines indicating the figure center. Default is True. edges : bool (optional) Toggle for plotting lines indicating the figure edges. Default is True. """ if fig is None: fig = plt.gcf() size = fig.get_size_inches() # [H, V] trans_vert = inches / size[0] left = matplotlib.lines.Line2D( [trans_vert, trans_vert], [0, 1], transform=fig.transFigure, figure=fig ) right = matplotlib.lines.Line2D( [1 - trans_vert, 1 - trans_vert], [0, 1], transform=fig.transFigure, figure=fig ) trans_horz = inches / size[1] bottom = matplotlib.lines.Line2D( [0, 1], [trans_horz, trans_horz], transform=fig.transFigure, figure=fig ) top = matplotlib.lines.Line2D( [0, 1], [1 - trans_horz, 1 - trans_horz], transform=fig.transFigure, figure=fig ) fig.lines.extend([left, right, bottom, top]) if centers: vert = matplotlib.lines.Line2D( [0.5, 0.5], [0, 1], transform=fig.transFigure, figure=fig, c="r" ) horiz = matplotlib.lines.Line2D( [0, 1], [0.5, 0.5], transform=fig.transFigure, figure=fig, c="r" ) fig.lines.extend([vert, horiz]) if edges: left = matplotlib.lines.Line2D( [0, 0], [0, 1], transform=fig.transFigure, figure=fig, c="k" ) right = matplotlib.lines.Line2D( [1, 1], [0, 1], transform=fig.transFigure, figure=fig, c="k" ) bottom = matplotlib.lines.Line2D( [0, 1], [0, 0], transform=fig.transFigure, figure=fig, c="k" ) top = matplotlib.lines.Line2D([0, 1], [1, 1], transform=fig.transFigure, figure=fig, c="k") fig.lines.extend([left, right, bottom, top])
python
def plot_margins(*, fig=None, inches=1., centers=True, edges=True): """Add lines onto a figure indicating the margins, centers, and edges. Useful for ensuring your figure design scripts work as intended, and for laying out figures. Parameters ---------- fig : matplotlib.figure.Figure object (optional) The figure to plot onto. If None, gets current figure. Default is None. inches : float (optional) The size of the figure margin, in inches. Default is 1. centers : bool (optional) Toggle for plotting lines indicating the figure center. Default is True. edges : bool (optional) Toggle for plotting lines indicating the figure edges. Default is True. """ if fig is None: fig = plt.gcf() size = fig.get_size_inches() # [H, V] trans_vert = inches / size[0] left = matplotlib.lines.Line2D( [trans_vert, trans_vert], [0, 1], transform=fig.transFigure, figure=fig ) right = matplotlib.lines.Line2D( [1 - trans_vert, 1 - trans_vert], [0, 1], transform=fig.transFigure, figure=fig ) trans_horz = inches / size[1] bottom = matplotlib.lines.Line2D( [0, 1], [trans_horz, trans_horz], transform=fig.transFigure, figure=fig ) top = matplotlib.lines.Line2D( [0, 1], [1 - trans_horz, 1 - trans_horz], transform=fig.transFigure, figure=fig ) fig.lines.extend([left, right, bottom, top]) if centers: vert = matplotlib.lines.Line2D( [0.5, 0.5], [0, 1], transform=fig.transFigure, figure=fig, c="r" ) horiz = matplotlib.lines.Line2D( [0, 1], [0.5, 0.5], transform=fig.transFigure, figure=fig, c="r" ) fig.lines.extend([vert, horiz]) if edges: left = matplotlib.lines.Line2D( [0, 0], [0, 1], transform=fig.transFigure, figure=fig, c="k" ) right = matplotlib.lines.Line2D( [1, 1], [0, 1], transform=fig.transFigure, figure=fig, c="k" ) bottom = matplotlib.lines.Line2D( [0, 1], [0, 0], transform=fig.transFigure, figure=fig, c="k" ) top = matplotlib.lines.Line2D([0, 1], [1, 1], transform=fig.transFigure, figure=fig, c="k") fig.lines.extend([left, right, bottom, top])
['def', 'plot_margins', '(', '*', ',', 'fig', '=', 'None', ',', 'inches', '=', '1.', ',', 'centers', '=', 'True', ',', 'edges', '=', 'True', ')', ':', 'if', 'fig', 'is', 'None', ':', 'fig', '=', 'plt', '.', 'gcf', '(', ')', 'size', '=', 'fig', '.', 'get_size_inches', '(', ')', '# [H, V]', 'trans_vert', '=', 'inches', '/', 'size', '[', '0', ']', 'left', '=', 'matplotlib', '.', 'lines', '.', 'Line2D', '(', '[', 'trans_vert', ',', 'trans_vert', ']', ',', '[', '0', ',', '1', ']', ',', 'transform', '=', 'fig', '.', 'transFigure', ',', 'figure', '=', 'fig', ')', 'right', '=', 'matplotlib', '.', 'lines', '.', 'Line2D', '(', '[', '1', '-', 'trans_vert', ',', '1', '-', 'trans_vert', ']', ',', '[', '0', ',', '1', ']', ',', 'transform', '=', 'fig', '.', 'transFigure', ',', 'figure', '=', 'fig', ')', 'trans_horz', '=', 'inches', '/', 'size', '[', '1', ']', 'bottom', '=', 'matplotlib', '.', 'lines', '.', 'Line2D', '(', '[', '0', ',', '1', ']', ',', '[', 'trans_horz', ',', 'trans_horz', ']', ',', 'transform', '=', 'fig', '.', 'transFigure', ',', 'figure', '=', 'fig', ')', 'top', '=', 'matplotlib', '.', 'lines', '.', 'Line2D', '(', '[', '0', ',', '1', ']', ',', '[', '1', '-', 'trans_horz', ',', '1', '-', 'trans_horz', ']', ',', 'transform', '=', 'fig', '.', 'transFigure', ',', 'figure', '=', 'fig', ')', 'fig', '.', 'lines', '.', 'extend', '(', '[', 'left', ',', 'right', ',', 'bottom', ',', 'top', ']', ')', 'if', 'centers', ':', 'vert', '=', 'matplotlib', '.', 'lines', '.', 'Line2D', '(', '[', '0.5', ',', '0.5', ']', ',', '[', '0', ',', '1', ']', ',', 'transform', '=', 'fig', '.', 'transFigure', ',', 'figure', '=', 'fig', ',', 'c', '=', '"r"', ')', 'horiz', '=', 'matplotlib', '.', 'lines', '.', 'Line2D', '(', '[', '0', ',', '1', ']', ',', '[', '0.5', ',', '0.5', ']', ',', 'transform', '=', 'fig', '.', 'transFigure', ',', 'figure', '=', 'fig', ',', 'c', '=', '"r"', ')', 'fig', '.', 'lines', '.', 'extend', '(', '[', 'vert', ',', 'horiz', ']', ')', 'if', 'edges', ':', 'left', '=', 'matplotlib', '.', 'lines', '.', 'Line2D', '(', '[', '0', ',', '0', ']', ',', '[', '0', ',', '1', ']', ',', 'transform', '=', 'fig', '.', 'transFigure', ',', 'figure', '=', 'fig', ',', 'c', '=', '"k"', ')', 'right', '=', 'matplotlib', '.', 'lines', '.', 'Line2D', '(', '[', '1', ',', '1', ']', ',', '[', '0', ',', '1', ']', ',', 'transform', '=', 'fig', '.', 'transFigure', ',', 'figure', '=', 'fig', ',', 'c', '=', '"k"', ')', 'bottom', '=', 'matplotlib', '.', 'lines', '.', 'Line2D', '(', '[', '0', ',', '1', ']', ',', '[', '0', ',', '0', ']', ',', 'transform', '=', 'fig', '.', 'transFigure', ',', 'figure', '=', 'fig', ',', 'c', '=', '"k"', ')', 'top', '=', 'matplotlib', '.', 'lines', '.', 'Line2D', '(', '[', '0', ',', '1', ']', ',', '[', '1', ',', '1', ']', ',', 'transform', '=', 'fig', '.', 'transFigure', ',', 'figure', '=', 'fig', ',', 'c', '=', '"k"', ')', 'fig', '.', 'lines', '.', 'extend', '(', '[', 'left', ',', 'right', ',', 'bottom', ',', 'top', ']', ')']
Add lines onto a figure indicating the margins, centers, and edges. Useful for ensuring your figure design scripts work as intended, and for laying out figures. Parameters ---------- fig : matplotlib.figure.Figure object (optional) The figure to plot onto. If None, gets current figure. Default is None. inches : float (optional) The size of the figure margin, in inches. Default is 1. centers : bool (optional) Toggle for plotting lines indicating the figure center. Default is True. edges : bool (optional) Toggle for plotting lines indicating the figure edges. Default is True.
['Add', 'lines', 'onto', 'a', 'figure', 'indicating', 'the', 'margins', 'centers', 'and', 'edges', '.']
train
https://github.com/wright-group/WrightTools/blob/80d3ddd5074d8d5c1bc03fd5a0e0f10d4b424aeb/WrightTools/artists/_helpers.py#L695-L750
2,591
nir0s/serv
serv/init/base.py
Base.generate_file_from_template
def generate_file_from_template(self, template, destination): """Generate a file from a Jinja2 `template` and writes it to `destination` using `params`. `overwrite` allows to overwrite existing files. It is passed to the `generate` method. This is used by the different init implementations to generate init scripts/configs and deploy them to the relevant directories. Templates are looked up under init/templates/`template`. If the `destination` directory doesn't exist, it will alert the user and exit. We don't want to be creating any system related directories out of the blue. The exception to the rule is with nssm. While it may seem a bit weird, not all relevant directories exist out of the box. For instance, `/etc/sysconfig` doesn't necessarily exist even if systemd is used by default. """ # We cast the object to a string before passing it on as py3.x # will fail on Jinja2 if there are ints/bytes (not strings) in the # template which will not allow `env.from_string(template)` to # take place. templates = str(pkgutil.get_data(__name__, os.path.join( 'templates', template))) pretty_params = json.dumps(self.params, indent=4, sort_keys=True) self.logger.debug( 'Rendering %s with params: %s...', template, pretty_params) generated = jinja2.Environment().from_string( templates).render(self.params) self.logger.debug('Writing generated file to %s...', destination) self._should_overwrite(destination) with open(destination, 'w') as f: f.write(generated) self.files.append(destination)
python
def generate_file_from_template(self, template, destination): """Generate a file from a Jinja2 `template` and writes it to `destination` using `params`. `overwrite` allows to overwrite existing files. It is passed to the `generate` method. This is used by the different init implementations to generate init scripts/configs and deploy them to the relevant directories. Templates are looked up under init/templates/`template`. If the `destination` directory doesn't exist, it will alert the user and exit. We don't want to be creating any system related directories out of the blue. The exception to the rule is with nssm. While it may seem a bit weird, not all relevant directories exist out of the box. For instance, `/etc/sysconfig` doesn't necessarily exist even if systemd is used by default. """ # We cast the object to a string before passing it on as py3.x # will fail on Jinja2 if there are ints/bytes (not strings) in the # template which will not allow `env.from_string(template)` to # take place. templates = str(pkgutil.get_data(__name__, os.path.join( 'templates', template))) pretty_params = json.dumps(self.params, indent=4, sort_keys=True) self.logger.debug( 'Rendering %s with params: %s...', template, pretty_params) generated = jinja2.Environment().from_string( templates).render(self.params) self.logger.debug('Writing generated file to %s...', destination) self._should_overwrite(destination) with open(destination, 'w') as f: f.write(generated) self.files.append(destination)
['def', 'generate_file_from_template', '(', 'self', ',', 'template', ',', 'destination', ')', ':', '# We cast the object to a string before passing it on as py3.x', '# will fail on Jinja2 if there are ints/bytes (not strings) in the', '# template which will not allow `env.from_string(template)` to', '# take place.', 'templates', '=', 'str', '(', 'pkgutil', '.', 'get_data', '(', '__name__', ',', 'os', '.', 'path', '.', 'join', '(', "'templates'", ',', 'template', ')', ')', ')', 'pretty_params', '=', 'json', '.', 'dumps', '(', 'self', '.', 'params', ',', 'indent', '=', '4', ',', 'sort_keys', '=', 'True', ')', 'self', '.', 'logger', '.', 'debug', '(', "'Rendering %s with params: %s...'", ',', 'template', ',', 'pretty_params', ')', 'generated', '=', 'jinja2', '.', 'Environment', '(', ')', '.', 'from_string', '(', 'templates', ')', '.', 'render', '(', 'self', '.', 'params', ')', 'self', '.', 'logger', '.', 'debug', '(', "'Writing generated file to %s...'", ',', 'destination', ')', 'self', '.', '_should_overwrite', '(', 'destination', ')', 'with', 'open', '(', 'destination', ',', "'w'", ')', 'as', 'f', ':', 'f', '.', 'write', '(', 'generated', ')', 'self', '.', 'files', '.', 'append', '(', 'destination', ')']
Generate a file from a Jinja2 `template` and writes it to `destination` using `params`. `overwrite` allows to overwrite existing files. It is passed to the `generate` method. This is used by the different init implementations to generate init scripts/configs and deploy them to the relevant directories. Templates are looked up under init/templates/`template`. If the `destination` directory doesn't exist, it will alert the user and exit. We don't want to be creating any system related directories out of the blue. The exception to the rule is with nssm. While it may seem a bit weird, not all relevant directories exist out of the box. For instance, `/etc/sysconfig` doesn't necessarily exist even if systemd is used by default.
['Generate', 'a', 'file', 'from', 'a', 'Jinja2', 'template', 'and', 'writes', 'it', 'to', 'destination', 'using', 'params', '.']
train
https://github.com/nir0s/serv/blob/7af724ed49c0eb766c37c4b5287b043a8cf99e9c/serv/init/base.py#L163-L198
2,592
rstoneback/pysat
pysat/_constellation.py
Constellation.difference
def difference(self, instrument1, instrument2, bounds, data_labels, cost_function): """ Calculates the difference in signals from multiple instruments within the given bounds. Parameters ---------- instrument1 : Instrument Information must already be loaded into the instrument. instrument2 : Instrument Information must already be loaded into the instrument. bounds : list of tuples in the form (inst1_label, inst2_label, min, max, max_difference) inst1_label are inst2_label are labels for the data in instrument1 and instrument2 min and max are bounds on the data considered max_difference is the maximum difference between two points for the difference to be calculated data_labels : list of tuples of data labels The first key is used to access data in s1 and the second data in s2. cost_function : function function that operates on two rows of the instrument data. used to determine the distance between two points for finding closest points Returns ------- data_df: pandas DataFrame Each row has a point from instrument1, with the keys preceded by '1_', and a point within bounds on that point from instrument2 with the keys preceded by '2_', and the difference between the instruments' data for all the labels in data_labels Created as part of a Spring 2018 UTDesign project. """ """ Draft Pseudocode ---------------- Check integrity of inputs. Let STD_LABELS be the constant tuple: ("time", "lat", "long", "alt") Note: modify so that user can override labels for time, lat, long, data for each satelite. // We only care about the data currently loaded into each object. Let start be the later of the datetime of the first piece of data loaded into s1, the first piece of data loaded into s2, and the user supplied start bound. Let end be the later of the datetime of the first piece of data loaded into s1, the first piece of data loaded into s2, and the user supplied end bound. If start is after end, raise an error. // Let data be the 2D array of deques holding each piece // of data, sorted into bins by lat/long/alt. Let s1_data (resp s2_data) be data from s1.data, s2.data filtered by user-provided lat/long/alt bounds, time bounds calculated. Let data be a dictionary of lists with the keys [ dl1 for dl1, dl2 in data_labels ] + STD_LABELS + [ lb+"2" for lb in STD_LABELS ] For each piece of data s1_point in s1_data: # Hopefully np.where is very good, because this # runs O(n) times. # We could try reusing selections, maybe, if needed. # This would probably involve binning. Let s2_near be the data from s2.data within certain bounds on lat/long/alt/time using 8 statements to numpy.where. We can probably get those defaults from the user or handy constants / config? # We could try a different algorithm for closest pairs # of points. Let distance be the numpy array representing the distance between s1_point and each point in s2_near. # S: Difference for others: change this line. For each of those, calculate the spatial difference from the s1 using lat/long/alt. If s2_near is empty; break loop. Let s2_nearest be the point in s2_near corresponding to the lowest distance. Append to data: a point, indexed by the time from s1_point, containing the following data: # note Let n be the length of data["time"]. For each key in data: Assert len(data[key]) == n End for. # Create data row to pass to pandas. Let row be an empty dict. For dl1, dl2 in data_labels: Append s1_point[dl1] - s2_nearest[dl2] to data[dl1]. For key in STD_LABELS: Append s1_point[translate[key]] to data[key] key = key+"2" Append s2_nearest[translate[key]] to data[key] Let data_df be a pandas dataframe created from the data in data. return { 'data': data_df, 'start':start, 'end':end } """ labels = [dl1 for dl1, dl2 in data_labels] + ['1_'+b[0] for b in bounds] + ['2_'+b[1] for b in bounds] + ['dist'] data = {label: [] for label in labels} # Apply bounds inst1 = instrument1.data inst2 = instrument2.data for b in bounds: label1 = b[0] label2 = b[1] low = b[2] high = b[3] data1 = inst1[label1] ind1 = np.where((data1 >= low) & (data1 < high)) inst1 = inst1.iloc[ind1] data2 = inst2[label2] ind2 = np.where((data2 >= low) & (data2 < high)) inst2 = inst2.iloc[ind2] for i, s1_point in inst1.iterrows(): # Gets points in instrument2 within the given bounds s2_near = instrument2.data for b in bounds: label1 = b[0] label2 = b[1] s1_val = s1_point[label1] max_dist = b[4] minbound = s1_val - max_dist maxbound = s1_val + max_dist data2 = s2_near[label2] indices = np.where((data2 >= minbound) & (data2 < maxbound)) s2_near = s2_near.iloc[indices] # Finds nearest point to s1_point in s2_near s2_nearest = None min_dist = float('NaN') for j, s2_point in s2_near.iterrows(): dist = cost_function(s1_point, s2_point) if dist < min_dist or min_dist != min_dist: min_dist = dist s2_nearest = s2_point data['dist'].append(min_dist) # Append difference to data dict for dl1, dl2 in data_labels: if s2_nearest is not None: data[dl1].append(s1_point[dl1] - s2_nearest[dl2]) else: data[dl1].append(float('NaN')) # Append the rest of the row for b in bounds: label1 = b[0] label2 = b[1] data['1_'+label1].append(s1_point[label1]) if s2_nearest is not None: data['2_'+label2].append(s2_nearest[label2]) else: data['2_'+label2].append(float('NaN')) data_df = pds.DataFrame(data=data) return data_df
python
def difference(self, instrument1, instrument2, bounds, data_labels, cost_function): """ Calculates the difference in signals from multiple instruments within the given bounds. Parameters ---------- instrument1 : Instrument Information must already be loaded into the instrument. instrument2 : Instrument Information must already be loaded into the instrument. bounds : list of tuples in the form (inst1_label, inst2_label, min, max, max_difference) inst1_label are inst2_label are labels for the data in instrument1 and instrument2 min and max are bounds on the data considered max_difference is the maximum difference between two points for the difference to be calculated data_labels : list of tuples of data labels The first key is used to access data in s1 and the second data in s2. cost_function : function function that operates on two rows of the instrument data. used to determine the distance between two points for finding closest points Returns ------- data_df: pandas DataFrame Each row has a point from instrument1, with the keys preceded by '1_', and a point within bounds on that point from instrument2 with the keys preceded by '2_', and the difference between the instruments' data for all the labels in data_labels Created as part of a Spring 2018 UTDesign project. """ """ Draft Pseudocode ---------------- Check integrity of inputs. Let STD_LABELS be the constant tuple: ("time", "lat", "long", "alt") Note: modify so that user can override labels for time, lat, long, data for each satelite. // We only care about the data currently loaded into each object. Let start be the later of the datetime of the first piece of data loaded into s1, the first piece of data loaded into s2, and the user supplied start bound. Let end be the later of the datetime of the first piece of data loaded into s1, the first piece of data loaded into s2, and the user supplied end bound. If start is after end, raise an error. // Let data be the 2D array of deques holding each piece // of data, sorted into bins by lat/long/alt. Let s1_data (resp s2_data) be data from s1.data, s2.data filtered by user-provided lat/long/alt bounds, time bounds calculated. Let data be a dictionary of lists with the keys [ dl1 for dl1, dl2 in data_labels ] + STD_LABELS + [ lb+"2" for lb in STD_LABELS ] For each piece of data s1_point in s1_data: # Hopefully np.where is very good, because this # runs O(n) times. # We could try reusing selections, maybe, if needed. # This would probably involve binning. Let s2_near be the data from s2.data within certain bounds on lat/long/alt/time using 8 statements to numpy.where. We can probably get those defaults from the user or handy constants / config? # We could try a different algorithm for closest pairs # of points. Let distance be the numpy array representing the distance between s1_point and each point in s2_near. # S: Difference for others: change this line. For each of those, calculate the spatial difference from the s1 using lat/long/alt. If s2_near is empty; break loop. Let s2_nearest be the point in s2_near corresponding to the lowest distance. Append to data: a point, indexed by the time from s1_point, containing the following data: # note Let n be the length of data["time"]. For each key in data: Assert len(data[key]) == n End for. # Create data row to pass to pandas. Let row be an empty dict. For dl1, dl2 in data_labels: Append s1_point[dl1] - s2_nearest[dl2] to data[dl1]. For key in STD_LABELS: Append s1_point[translate[key]] to data[key] key = key+"2" Append s2_nearest[translate[key]] to data[key] Let data_df be a pandas dataframe created from the data in data. return { 'data': data_df, 'start':start, 'end':end } """ labels = [dl1 for dl1, dl2 in data_labels] + ['1_'+b[0] for b in bounds] + ['2_'+b[1] for b in bounds] + ['dist'] data = {label: [] for label in labels} # Apply bounds inst1 = instrument1.data inst2 = instrument2.data for b in bounds: label1 = b[0] label2 = b[1] low = b[2] high = b[3] data1 = inst1[label1] ind1 = np.where((data1 >= low) & (data1 < high)) inst1 = inst1.iloc[ind1] data2 = inst2[label2] ind2 = np.where((data2 >= low) & (data2 < high)) inst2 = inst2.iloc[ind2] for i, s1_point in inst1.iterrows(): # Gets points in instrument2 within the given bounds s2_near = instrument2.data for b in bounds: label1 = b[0] label2 = b[1] s1_val = s1_point[label1] max_dist = b[4] minbound = s1_val - max_dist maxbound = s1_val + max_dist data2 = s2_near[label2] indices = np.where((data2 >= minbound) & (data2 < maxbound)) s2_near = s2_near.iloc[indices] # Finds nearest point to s1_point in s2_near s2_nearest = None min_dist = float('NaN') for j, s2_point in s2_near.iterrows(): dist = cost_function(s1_point, s2_point) if dist < min_dist or min_dist != min_dist: min_dist = dist s2_nearest = s2_point data['dist'].append(min_dist) # Append difference to data dict for dl1, dl2 in data_labels: if s2_nearest is not None: data[dl1].append(s1_point[dl1] - s2_nearest[dl2]) else: data[dl1].append(float('NaN')) # Append the rest of the row for b in bounds: label1 = b[0] label2 = b[1] data['1_'+label1].append(s1_point[label1]) if s2_nearest is not None: data['2_'+label2].append(s2_nearest[label2]) else: data['2_'+label2].append(float('NaN')) data_df = pds.DataFrame(data=data) return data_df
['def', 'difference', '(', 'self', ',', 'instrument1', ',', 'instrument2', ',', 'bounds', ',', 'data_labels', ',', 'cost_function', ')', ':', '"""\n Draft Pseudocode\n ----------------\n Check integrity of inputs.\n\n Let STD_LABELS be the constant tuple:\n ("time", "lat", "long", "alt")\n\n Note: modify so that user can override labels for time,\n lat, long, data for each satelite.\n\n // We only care about the data currently loaded\n into each object.\n\n Let start be the later of the datetime of the\n first piece of data loaded into s1, the first\n piece of data loaded into s2, and the user\n supplied start bound.\n\n Let end be the later of the datetime of the first\n piece of data loaded into s1, the first piece\n of data loaded into s2, and the user supplied\n end bound.\n\n If start is after end, raise an error.\n\n // Let data be the 2D array of deques holding each piece\n // of data, sorted into bins by lat/long/alt.\n\n Let s1_data (resp s2_data) be data from s1.data, s2.data\n filtered by user-provided lat/long/alt bounds, time bounds\n calculated.\n\n Let data be a dictionary of lists with the keys\n [ dl1 for dl1, dl2 in data_labels ] +\n STD_LABELS +\n [ lb+"2" for lb in STD_LABELS ]\n\n For each piece of data s1_point in s1_data:\n\n # Hopefully np.where is very good, because this\n # runs O(n) times.\n # We could try reusing selections, maybe, if needed.\n # This would probably involve binning.\n Let s2_near be the data from s2.data within certain\n bounds on lat/long/alt/time using 8 statements to\n numpy.where. We can probably get those defaults from\n the user or handy constants / config?\n\n # We could try a different algorithm for closest pairs\n # of points.\n\n Let distance be the numpy array representing the\n distance between s1_point and each point in s2_near.\n\n # S: Difference for others: change this line.\n For each of those, calculate the spatial difference\n from the s1 using lat/long/alt. If s2_near is\n empty; break loop.\n\n Let s2_nearest be the point in s2_near corresponding\n to the lowest distance.\n\n Append to data: a point, indexed by the time from\n s1_point, containing the following data:\n\n # note\n Let n be the length of data["time"].\n For each key in data:\n Assert len(data[key]) == n\n End for.\n\n # Create data row to pass to pandas.\n Let row be an empty dict.\n For dl1, dl2 in data_labels:\n Append s1_point[dl1] - s2_nearest[dl2] to data[dl1].\n\n For key in STD_LABELS:\n Append s1_point[translate[key]] to data[key]\n key = key+"2"\n Append s2_nearest[translate[key]] to data[key]\n\n Let data_df be a pandas dataframe created from the data\n in data.\n\n return { \'data\': data_df, \'start\':start, \'end\':end }\n """', 'labels', '=', '[', 'dl1', 'for', 'dl1', ',', 'dl2', 'in', 'data_labels', ']', '+', '[', "'1_'", '+', 'b', '[', '0', ']', 'for', 'b', 'in', 'bounds', ']', '+', '[', "'2_'", '+', 'b', '[', '1', ']', 'for', 'b', 'in', 'bounds', ']', '+', '[', "'dist'", ']', 'data', '=', '{', 'label', ':', '[', ']', 'for', 'label', 'in', 'labels', '}', '# Apply bounds', 'inst1', '=', 'instrument1', '.', 'data', 'inst2', '=', 'instrument2', '.', 'data', 'for', 'b', 'in', 'bounds', ':', 'label1', '=', 'b', '[', '0', ']', 'label2', '=', 'b', '[', '1', ']', 'low', '=', 'b', '[', '2', ']', 'high', '=', 'b', '[', '3', ']', 'data1', '=', 'inst1', '[', 'label1', ']', 'ind1', '=', 'np', '.', 'where', '(', '(', 'data1', '>=', 'low', ')', '&', '(', 'data1', '<', 'high', ')', ')', 'inst1', '=', 'inst1', '.', 'iloc', '[', 'ind1', ']', 'data2', '=', 'inst2', '[', 'label2', ']', 'ind2', '=', 'np', '.', 'where', '(', '(', 'data2', '>=', 'low', ')', '&', '(', 'data2', '<', 'high', ')', ')', 'inst2', '=', 'inst2', '.', 'iloc', '[', 'ind2', ']', 'for', 'i', ',', 's1_point', 'in', 'inst1', '.', 'iterrows', '(', ')', ':', '# Gets points in instrument2 within the given bounds', 's2_near', '=', 'instrument2', '.', 'data', 'for', 'b', 'in', 'bounds', ':', 'label1', '=', 'b', '[', '0', ']', 'label2', '=', 'b', '[', '1', ']', 's1_val', '=', 's1_point', '[', 'label1', ']', 'max_dist', '=', 'b', '[', '4', ']', 'minbound', '=', 's1_val', '-', 'max_dist', 'maxbound', '=', 's1_val', '+', 'max_dist', 'data2', '=', 's2_near', '[', 'label2', ']', 'indices', '=', 'np', '.', 'where', '(', '(', 'data2', '>=', 'minbound', ')', '&', '(', 'data2', '<', 'maxbound', ')', ')', 's2_near', '=', 's2_near', '.', 'iloc', '[', 'indices', ']', '# Finds nearest point to s1_point in s2_near', 's2_nearest', '=', 'None', 'min_dist', '=', 'float', '(', "'NaN'", ')', 'for', 'j', ',', 's2_point', 'in', 's2_near', '.', 'iterrows', '(', ')', ':', 'dist', '=', 'cost_function', '(', 's1_point', ',', 's2_point', ')', 'if', 'dist', '<', 'min_dist', 'or', 'min_dist', '!=', 'min_dist', ':', 'min_dist', '=', 'dist', 's2_nearest', '=', 's2_point', 'data', '[', "'dist'", ']', '.', 'append', '(', 'min_dist', ')', '# Append difference to data dict', 'for', 'dl1', ',', 'dl2', 'in', 'data_labels', ':', 'if', 's2_nearest', 'is', 'not', 'None', ':', 'data', '[', 'dl1', ']', '.', 'append', '(', 's1_point', '[', 'dl1', ']', '-', 's2_nearest', '[', 'dl2', ']', ')', 'else', ':', 'data', '[', 'dl1', ']', '.', 'append', '(', 'float', '(', "'NaN'", ')', ')', '# Append the rest of the row', 'for', 'b', 'in', 'bounds', ':', 'label1', '=', 'b', '[', '0', ']', 'label2', '=', 'b', '[', '1', ']', 'data', '[', "'1_'", '+', 'label1', ']', '.', 'append', '(', 's1_point', '[', 'label1', ']', ')', 'if', 's2_nearest', 'is', 'not', 'None', ':', 'data', '[', "'2_'", '+', 'label2', ']', '.', 'append', '(', 's2_nearest', '[', 'label2', ']', ')', 'else', ':', 'data', '[', "'2_'", '+', 'label2', ']', '.', 'append', '(', 'float', '(', "'NaN'", ')', ')', 'data_df', '=', 'pds', '.', 'DataFrame', '(', 'data', '=', 'data', ')', 'return', 'data_df']
Calculates the difference in signals from multiple instruments within the given bounds. Parameters ---------- instrument1 : Instrument Information must already be loaded into the instrument. instrument2 : Instrument Information must already be loaded into the instrument. bounds : list of tuples in the form (inst1_label, inst2_label, min, max, max_difference) inst1_label are inst2_label are labels for the data in instrument1 and instrument2 min and max are bounds on the data considered max_difference is the maximum difference between two points for the difference to be calculated data_labels : list of tuples of data labels The first key is used to access data in s1 and the second data in s2. cost_function : function function that operates on two rows of the instrument data. used to determine the distance between two points for finding closest points Returns ------- data_df: pandas DataFrame Each row has a point from instrument1, with the keys preceded by '1_', and a point within bounds on that point from instrument2 with the keys preceded by '2_', and the difference between the instruments' data for all the labels in data_labels Created as part of a Spring 2018 UTDesign project.
['Calculates', 'the', 'difference', 'in', 'signals', 'from', 'multiple', 'instruments', 'within', 'the', 'given', 'bounds', '.']
train
https://github.com/rstoneback/pysat/blob/4ae1afd80e15e4449397d39dce8c3e969c32c422/pysat/_constellation.py#L254-L451
2,593
django-haystack/pysolr
pysolr.py
SolrCoreAdmin.create
def create(self, name, instance_dir=None, config='solrconfig.xml', schema='schema.xml'): """http://wiki.apache.org/solr/CoreAdmin#head-7ca1b98a9df8b8ca0dcfbfc49940ed5ac98c4a08""" params = { 'action': 'CREATE', 'name': name, 'config': config, 'schema': schema, } if instance_dir is None: params.update(instanceDir=name) else: params.update(instanceDir=instance_dir) return self._get_url(self.url, params=params)
python
def create(self, name, instance_dir=None, config='solrconfig.xml', schema='schema.xml'): """http://wiki.apache.org/solr/CoreAdmin#head-7ca1b98a9df8b8ca0dcfbfc49940ed5ac98c4a08""" params = { 'action': 'CREATE', 'name': name, 'config': config, 'schema': schema, } if instance_dir is None: params.update(instanceDir=name) else: params.update(instanceDir=instance_dir) return self._get_url(self.url, params=params)
['def', 'create', '(', 'self', ',', 'name', ',', 'instance_dir', '=', 'None', ',', 'config', '=', "'solrconfig.xml'", ',', 'schema', '=', "'schema.xml'", ')', ':', 'params', '=', '{', "'action'", ':', "'CREATE'", ',', "'name'", ':', 'name', ',', "'config'", ':', 'config', ',', "'schema'", ':', 'schema', ',', '}', 'if', 'instance_dir', 'is', 'None', ':', 'params', '.', 'update', '(', 'instanceDir', '=', 'name', ')', 'else', ':', 'params', '.', 'update', '(', 'instanceDir', '=', 'instance_dir', ')', 'return', 'self', '.', '_get_url', '(', 'self', '.', 'url', ',', 'params', '=', 'params', ')']
http://wiki.apache.org/solr/CoreAdmin#head-7ca1b98a9df8b8ca0dcfbfc49940ed5ac98c4a08
['http', ':', '//', 'wiki', '.', 'apache', '.', 'org', '/', 'solr', '/', 'CoreAdmin#head', '-', '7ca1b98a9df8b8ca0dcfbfc49940ed5ac98c4a08']
train
https://github.com/django-haystack/pysolr/blob/ee28b39324fa21a99842d297e313c1759d8adbd2/pysolr.py#L1145-L1159
2,594
saltstack/salt
salt/states/github.py
team_absent
def team_absent(name, profile="github", **kwargs): ''' Ensure a team is absent. Example: .. code-block:: yaml ensure team test is present in github: github.team_absent: - name: 'test' The following parameters are required: name This is the name of the team in the organization. .. versionadded:: 2016.11.0 ''' ret = { 'name': name, 'changes': {}, 'result': None, 'comment': '' } target = __salt__['github.get_team'](name, profile=profile, **kwargs) if not target: ret['comment'] = 'Team {0} does not exist'.format(name) ret['result'] = True return ret else: if __opts__['test']: ret['comment'] = "Team {0} will be deleted".format(name) ret['result'] = None return ret result = __salt__['github.remove_team'](name, profile=profile, **kwargs) if result: ret['comment'] = 'Deleted team {0}'.format(name) ret['changes'].setdefault('old', 'Team {0} exists'.format(name)) ret['changes'].setdefault('new', 'Team {0} deleted'.format(name)) ret['result'] = True else: ret['comment'] = 'Failed to delete {0}'.format(name) ret['result'] = False return ret
python
def team_absent(name, profile="github", **kwargs): ''' Ensure a team is absent. Example: .. code-block:: yaml ensure team test is present in github: github.team_absent: - name: 'test' The following parameters are required: name This is the name of the team in the organization. .. versionadded:: 2016.11.0 ''' ret = { 'name': name, 'changes': {}, 'result': None, 'comment': '' } target = __salt__['github.get_team'](name, profile=profile, **kwargs) if not target: ret['comment'] = 'Team {0} does not exist'.format(name) ret['result'] = True return ret else: if __opts__['test']: ret['comment'] = "Team {0} will be deleted".format(name) ret['result'] = None return ret result = __salt__['github.remove_team'](name, profile=profile, **kwargs) if result: ret['comment'] = 'Deleted team {0}'.format(name) ret['changes'].setdefault('old', 'Team {0} exists'.format(name)) ret['changes'].setdefault('new', 'Team {0} deleted'.format(name)) ret['result'] = True else: ret['comment'] = 'Failed to delete {0}'.format(name) ret['result'] = False return ret
['def', 'team_absent', '(', 'name', ',', 'profile', '=', '"github"', ',', '*', '*', 'kwargs', ')', ':', 'ret', '=', '{', "'name'", ':', 'name', ',', "'changes'", ':', '{', '}', ',', "'result'", ':', 'None', ',', "'comment'", ':', "''", '}', 'target', '=', '__salt__', '[', "'github.get_team'", ']', '(', 'name', ',', 'profile', '=', 'profile', ',', '*', '*', 'kwargs', ')', 'if', 'not', 'target', ':', 'ret', '[', "'comment'", ']', '=', "'Team {0} does not exist'", '.', 'format', '(', 'name', ')', 'ret', '[', "'result'", ']', '=', 'True', 'return', 'ret', 'else', ':', 'if', '__opts__', '[', "'test'", ']', ':', 'ret', '[', "'comment'", ']', '=', '"Team {0} will be deleted"', '.', 'format', '(', 'name', ')', 'ret', '[', "'result'", ']', '=', 'None', 'return', 'ret', 'result', '=', '__salt__', '[', "'github.remove_team'", ']', '(', 'name', ',', 'profile', '=', 'profile', ',', '*', '*', 'kwargs', ')', 'if', 'result', ':', 'ret', '[', "'comment'", ']', '=', "'Deleted team {0}'", '.', 'format', '(', 'name', ')', 'ret', '[', "'changes'", ']', '.', 'setdefault', '(', "'old'", ',', "'Team {0} exists'", '.', 'format', '(', 'name', ')', ')', 'ret', '[', "'changes'", ']', '.', 'setdefault', '(', "'new'", ',', "'Team {0} deleted'", '.', 'format', '(', 'name', ')', ')', 'ret', '[', "'result'", ']', '=', 'True', 'else', ':', 'ret', '[', "'comment'", ']', '=', "'Failed to delete {0}'", '.', 'format', '(', 'name', ')', 'ret', '[', "'result'", ']', '=', 'False', 'return', 'ret']
Ensure a team is absent. Example: .. code-block:: yaml ensure team test is present in github: github.team_absent: - name: 'test' The following parameters are required: name This is the name of the team in the organization. .. versionadded:: 2016.11.0
['Ensure', 'a', 'team', 'is', 'absent', '.']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/github.py#L424-L473
2,595
google/transitfeed
transitfeed/problems.py
ExceptionWithContext.GetOrderKey
def GetOrderKey(self): """Return a tuple that can be used to sort problems into a consistent order. Returns: A list of values. """ context_attributes = ['_type'] context_attributes.extend(ExceptionWithContext.CONTEXT_PARTS) context_attributes.extend(self._GetExtraOrderAttributes()) tokens = [] for context_attribute in context_attributes: tokens.append(getattr(self, context_attribute, None)) return tokens
python
def GetOrderKey(self): """Return a tuple that can be used to sort problems into a consistent order. Returns: A list of values. """ context_attributes = ['_type'] context_attributes.extend(ExceptionWithContext.CONTEXT_PARTS) context_attributes.extend(self._GetExtraOrderAttributes()) tokens = [] for context_attribute in context_attributes: tokens.append(getattr(self, context_attribute, None)) return tokens
['def', 'GetOrderKey', '(', 'self', ')', ':', 'context_attributes', '=', '[', "'_type'", ']', 'context_attributes', '.', 'extend', '(', 'ExceptionWithContext', '.', 'CONTEXT_PARTS', ')', 'context_attributes', '.', 'extend', '(', 'self', '.', '_GetExtraOrderAttributes', '(', ')', ')', 'tokens', '=', '[', ']', 'for', 'context_attribute', 'in', 'context_attributes', ':', 'tokens', '.', 'append', '(', 'getattr', '(', 'self', ',', 'context_attribute', ',', 'None', ')', ')', 'return', 'tokens']
Return a tuple that can be used to sort problems into a consistent order. Returns: A list of values.
['Return', 'a', 'tuple', 'that', 'can', 'be', 'used', 'to', 'sort', 'problems', 'into', 'a', 'consistent', 'order', '.']
train
https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/transitfeed/problems.py#L506-L519
2,596
uw-it-aca/uw-restclients
restclients/bookstore.py
Bookstore.get_verba_link_for_schedule
def get_verba_link_for_schedule(self, schedule): """ Returns a link to verba. The link varies by campus and schedule. Multiple calls to this with the same schedule may result in different urls. """ dao = Book_DAO() url = self.get_verba_url(schedule) response = dao.getURL(url, {"Accept": "application/json"}) if response.status != 200: raise DataFailureException(url, response.status, response.data) data = json.loads(response.data) for key in data: if re.match(r'^[A-Z]{2}[0-9]{5}$', key): return "%s%s&quarter=%s" % (BOOK_PREFIX, key, schedule.term.quarter)
python
def get_verba_link_for_schedule(self, schedule): """ Returns a link to verba. The link varies by campus and schedule. Multiple calls to this with the same schedule may result in different urls. """ dao = Book_DAO() url = self.get_verba_url(schedule) response = dao.getURL(url, {"Accept": "application/json"}) if response.status != 200: raise DataFailureException(url, response.status, response.data) data = json.loads(response.data) for key in data: if re.match(r'^[A-Z]{2}[0-9]{5}$', key): return "%s%s&quarter=%s" % (BOOK_PREFIX, key, schedule.term.quarter)
['def', 'get_verba_link_for_schedule', '(', 'self', ',', 'schedule', ')', ':', 'dao', '=', 'Book_DAO', '(', ')', 'url', '=', 'self', '.', 'get_verba_url', '(', 'schedule', ')', 'response', '=', 'dao', '.', 'getURL', '(', 'url', ',', '{', '"Accept"', ':', '"application/json"', '}', ')', 'if', 'response', '.', 'status', '!=', '200', ':', 'raise', 'DataFailureException', '(', 'url', ',', 'response', '.', 'status', ',', 'response', '.', 'data', ')', 'data', '=', 'json', '.', 'loads', '(', 'response', '.', 'data', ')', 'for', 'key', 'in', 'data', ':', 'if', 're', '.', 'match', '(', "r'^[A-Z]{2}[0-9]{5}$'", ',', 'key', ')', ':', 'return', '"%s%s&quarter=%s"', '%', '(', 'BOOK_PREFIX', ',', 'key', ',', 'schedule', '.', 'term', '.', 'quarter', ')']
Returns a link to verba. The link varies by campus and schedule. Multiple calls to this with the same schedule may result in different urls.
['Returns', 'a', 'link', 'to', 'verba', '.', 'The', 'link', 'varies', 'by', 'campus', 'and', 'schedule', '.', 'Multiple', 'calls', 'to', 'this', 'with', 'the', 'same', 'schedule', 'may', 'result', 'in', 'different', 'urls', '.']
train
https://github.com/uw-it-aca/uw-restclients/blob/e12dcd32bf5296b6ebdf71798031594afb7852cb/restclients/bookstore.py#L76-L96
2,597
dask/dask-ml
dask_ml/cluster/spectral.py
_slice_mostly_sorted
def _slice_mostly_sorted(array, keep, rest, ind=None): """Slice dask array `array` that is almost entirely sorted already. We perform approximately `2 * len(keep)` slices on `array`. This is OK, since `keep` is small. Individually, each of these slices is entirely sorted. Parameters ---------- array : dask.array.Array keep : ndarray[Int] This must be sorted. rest : ndarray[Bool] ind : ndarray[Int], optional Returns ------- sliced : dask.array.Array """ if ind is None: ind = np.arange(len(array)) idx = np.argsort(np.concatenate([keep, ind[rest]])) slices = [] if keep[0] > 0: # avoid creating empty slices slices.append(slice(None, keep[0])) slices.append([keep[0]]) windows = zip(keep[:-1], keep[1:]) for l, r in windows: if r > l + 1: # avoid creating empty slices slices.append(slice(l + 1, r)) slices.append([r]) if keep[-1] < len(array) - 1: # avoid creating empty slices slices.append(slice(keep[-1] + 1, None)) result = da.concatenate([array[idx[slice_]] for slice_ in slices]) return result
python
def _slice_mostly_sorted(array, keep, rest, ind=None): """Slice dask array `array` that is almost entirely sorted already. We perform approximately `2 * len(keep)` slices on `array`. This is OK, since `keep` is small. Individually, each of these slices is entirely sorted. Parameters ---------- array : dask.array.Array keep : ndarray[Int] This must be sorted. rest : ndarray[Bool] ind : ndarray[Int], optional Returns ------- sliced : dask.array.Array """ if ind is None: ind = np.arange(len(array)) idx = np.argsort(np.concatenate([keep, ind[rest]])) slices = [] if keep[0] > 0: # avoid creating empty slices slices.append(slice(None, keep[0])) slices.append([keep[0]]) windows = zip(keep[:-1], keep[1:]) for l, r in windows: if r > l + 1: # avoid creating empty slices slices.append(slice(l + 1, r)) slices.append([r]) if keep[-1] < len(array) - 1: # avoid creating empty slices slices.append(slice(keep[-1] + 1, None)) result = da.concatenate([array[idx[slice_]] for slice_ in slices]) return result
['def', '_slice_mostly_sorted', '(', 'array', ',', 'keep', ',', 'rest', ',', 'ind', '=', 'None', ')', ':', 'if', 'ind', 'is', 'None', ':', 'ind', '=', 'np', '.', 'arange', '(', 'len', '(', 'array', ')', ')', 'idx', '=', 'np', '.', 'argsort', '(', 'np', '.', 'concatenate', '(', '[', 'keep', ',', 'ind', '[', 'rest', ']', ']', ')', ')', 'slices', '=', '[', ']', 'if', 'keep', '[', '0', ']', '>', '0', ':', '# avoid creating empty slices', 'slices', '.', 'append', '(', 'slice', '(', 'None', ',', 'keep', '[', '0', ']', ')', ')', 'slices', '.', 'append', '(', '[', 'keep', '[', '0', ']', ']', ')', 'windows', '=', 'zip', '(', 'keep', '[', ':', '-', '1', ']', ',', 'keep', '[', '1', ':', ']', ')', 'for', 'l', ',', 'r', 'in', 'windows', ':', 'if', 'r', '>', 'l', '+', '1', ':', '# avoid creating empty slices', 'slices', '.', 'append', '(', 'slice', '(', 'l', '+', '1', ',', 'r', ')', ')', 'slices', '.', 'append', '(', '[', 'r', ']', ')', 'if', 'keep', '[', '-', '1', ']', '<', 'len', '(', 'array', ')', '-', '1', ':', '# avoid creating empty slices', 'slices', '.', 'append', '(', 'slice', '(', 'keep', '[', '-', '1', ']', '+', '1', ',', 'None', ')', ')', 'result', '=', 'da', '.', 'concatenate', '(', '[', 'array', '[', 'idx', '[', 'slice_', ']', ']', 'for', 'slice_', 'in', 'slices', ']', ')', 'return', 'result']
Slice dask array `array` that is almost entirely sorted already. We perform approximately `2 * len(keep)` slices on `array`. This is OK, since `keep` is small. Individually, each of these slices is entirely sorted. Parameters ---------- array : dask.array.Array keep : ndarray[Int] This must be sorted. rest : ndarray[Bool] ind : ndarray[Int], optional Returns ------- sliced : dask.array.Array
['Slice', 'dask', 'array', 'array', 'that', 'is', 'almost', 'entirely', 'sorted', 'already', '.']
train
https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/cluster/spectral.py#L339-L376
2,598
smarie/python-parsyfiles
parsyfiles/parsing_registries.py
AbstractParserCache.get_capabilities_by_type
def get_capabilities_by_type(self, strict_type_matching: bool = False) -> Dict[Type, Dict[str, Dict[str, Parser]]]: """ For all types that are supported, lists all extensions that can be parsed into such a type. For each extension, provides the list of parsers supported. The order is "most pertinent first" This method is for monitoring and debug, so we prefer to not rely on the cache, but rather on the query engine. That will ensure consistency of the results. :param strict_type_matching: :return: """ check_var(strict_type_matching, var_types=bool, var_name='strict_matching') res = dict() # List all types that can be parsed for typ in self.get_all_supported_types(): res[typ] = self.get_capabilities_for_type(typ, strict_type_matching) return res
python
def get_capabilities_by_type(self, strict_type_matching: bool = False) -> Dict[Type, Dict[str, Dict[str, Parser]]]: """ For all types that are supported, lists all extensions that can be parsed into such a type. For each extension, provides the list of parsers supported. The order is "most pertinent first" This method is for monitoring and debug, so we prefer to not rely on the cache, but rather on the query engine. That will ensure consistency of the results. :param strict_type_matching: :return: """ check_var(strict_type_matching, var_types=bool, var_name='strict_matching') res = dict() # List all types that can be parsed for typ in self.get_all_supported_types(): res[typ] = self.get_capabilities_for_type(typ, strict_type_matching) return res
['def', 'get_capabilities_by_type', '(', 'self', ',', 'strict_type_matching', ':', 'bool', '=', 'False', ')', '->', 'Dict', '[', 'Type', ',', 'Dict', '[', 'str', ',', 'Dict', '[', 'str', ',', 'Parser', ']', ']', ']', ':', 'check_var', '(', 'strict_type_matching', ',', 'var_types', '=', 'bool', ',', 'var_name', '=', "'strict_matching'", ')', 'res', '=', 'dict', '(', ')', '# List all types that can be parsed', 'for', 'typ', 'in', 'self', '.', 'get_all_supported_types', '(', ')', ':', 'res', '[', 'typ', ']', '=', 'self', '.', 'get_capabilities_for_type', '(', 'typ', ',', 'strict_type_matching', ')', 'return', 'res']
For all types that are supported, lists all extensions that can be parsed into such a type. For each extension, provides the list of parsers supported. The order is "most pertinent first" This method is for monitoring and debug, so we prefer to not rely on the cache, but rather on the query engine. That will ensure consistency of the results. :param strict_type_matching: :return:
['For', 'all', 'types', 'that', 'are', 'supported', 'lists', 'all', 'extensions', 'that', 'can', 'be', 'parsed', 'into', 'such', 'a', 'type', '.', 'For', 'each', 'extension', 'provides', 'the', 'list', 'of', 'parsers', 'supported', '.', 'The', 'order', 'is', 'most', 'pertinent', 'first']
train
https://github.com/smarie/python-parsyfiles/blob/344b37e1151e8d4e7c2ee49ae09d6568715ae64e/parsyfiles/parsing_registries.py#L279-L300
2,599
erdc/RAPIDpy
RAPIDpy/gis/taudem.py
TauDEM.rasterToPolygon
def rasterToPolygon(raster_file, polygon_file): """ Converts watershed raster to polygon and then dissolves it. It dissolves features based on the LINKNO attribute. """ log("Process: Raster to Polygon ...") time_start = datetime.utcnow() temp_polygon_file = \ "{0}_temp.shp".format( os.path.splitext(os.path.basename(polygon_file))[0]) GDALGrid(raster_file).to_polygon(out_shapefile=temp_polygon_file, fieldname="LINKNO", self_mask=True) log("Time to convert to polygon: {0}" .format(datetime.utcnow()-time_start)) log("Dissolving ...") time_start_dissolve = datetime.utcnow() ogr_polygin_shapefile = ogr.Open(temp_polygon_file) ogr_polygon_shapefile_lyr = ogr_polygin_shapefile.GetLayer() number_of_features = ogr_polygon_shapefile_lyr.GetFeatureCount() polygon_rivid_list = np.zeros(number_of_features, dtype=np.int32) for feature_idx, catchment_feature in \ enumerate(ogr_polygon_shapefile_lyr): polygon_rivid_list[feature_idx] = \ catchment_feature.GetField('LINKNO') shp_drv = ogr.GetDriverByName('ESRI Shapefile') # Remove output shapefile if it already exists if os.path.exists(polygon_file): shp_drv.DeleteDataSource(polygon_file) dissolve_shapefile = shp_drv.CreateDataSource(polygon_file) dissolve_layer = \ dissolve_shapefile.CreateLayer( '', ogr_polygon_shapefile_lyr.GetSpatialRef(), ogr.wkbPolygon) dissolve_layer.CreateField(ogr.FieldDefn('LINKNO', ogr.OFTInteger)) dissolve_layer_defn = dissolve_layer.GetLayerDefn() for unique_rivid in np.unique(polygon_rivid_list): # get indices where it is in the polygon feature_indices = np.where(polygon_rivid_list == unique_rivid)[0] new_feat = ogr.Feature(dissolve_layer_defn) new_feat.SetField('LINKNO', int(unique_rivid)) if len(feature_indices) == 1: # write feature to file feature = \ ogr_polygon_shapefile_lyr.GetFeature(feature_indices[0]) new_feat.SetGeometry(feature.GetGeometryRef()) else: # dissolve dissolve_poly_list = [] for feature_index in feature_indices: feature = \ ogr_polygon_shapefile_lyr.GetFeature(feature_index) feat_geom = feature.GetGeometryRef() dissolve_poly_list.append( shapely_loads(feat_geom.ExportToWkb())) dissolve_polygon = cascaded_union(dissolve_poly_list) new_feat.SetGeometry( ogr.CreateGeometryFromWkb(dissolve_polygon.wkb)) dissolve_layer.CreateFeature(new_feat) # clean up shp_drv.DeleteDataSource(temp_polygon_file) log("Time to dissolve: {0}".format(datetime.utcnow() - time_start_dissolve)) log("Total time to convert: {0}".format(datetime.utcnow() - time_start))
python
def rasterToPolygon(raster_file, polygon_file): """ Converts watershed raster to polygon and then dissolves it. It dissolves features based on the LINKNO attribute. """ log("Process: Raster to Polygon ...") time_start = datetime.utcnow() temp_polygon_file = \ "{0}_temp.shp".format( os.path.splitext(os.path.basename(polygon_file))[0]) GDALGrid(raster_file).to_polygon(out_shapefile=temp_polygon_file, fieldname="LINKNO", self_mask=True) log("Time to convert to polygon: {0}" .format(datetime.utcnow()-time_start)) log("Dissolving ...") time_start_dissolve = datetime.utcnow() ogr_polygin_shapefile = ogr.Open(temp_polygon_file) ogr_polygon_shapefile_lyr = ogr_polygin_shapefile.GetLayer() number_of_features = ogr_polygon_shapefile_lyr.GetFeatureCount() polygon_rivid_list = np.zeros(number_of_features, dtype=np.int32) for feature_idx, catchment_feature in \ enumerate(ogr_polygon_shapefile_lyr): polygon_rivid_list[feature_idx] = \ catchment_feature.GetField('LINKNO') shp_drv = ogr.GetDriverByName('ESRI Shapefile') # Remove output shapefile if it already exists if os.path.exists(polygon_file): shp_drv.DeleteDataSource(polygon_file) dissolve_shapefile = shp_drv.CreateDataSource(polygon_file) dissolve_layer = \ dissolve_shapefile.CreateLayer( '', ogr_polygon_shapefile_lyr.GetSpatialRef(), ogr.wkbPolygon) dissolve_layer.CreateField(ogr.FieldDefn('LINKNO', ogr.OFTInteger)) dissolve_layer_defn = dissolve_layer.GetLayerDefn() for unique_rivid in np.unique(polygon_rivid_list): # get indices where it is in the polygon feature_indices = np.where(polygon_rivid_list == unique_rivid)[0] new_feat = ogr.Feature(dissolve_layer_defn) new_feat.SetField('LINKNO', int(unique_rivid)) if len(feature_indices) == 1: # write feature to file feature = \ ogr_polygon_shapefile_lyr.GetFeature(feature_indices[0]) new_feat.SetGeometry(feature.GetGeometryRef()) else: # dissolve dissolve_poly_list = [] for feature_index in feature_indices: feature = \ ogr_polygon_shapefile_lyr.GetFeature(feature_index) feat_geom = feature.GetGeometryRef() dissolve_poly_list.append( shapely_loads(feat_geom.ExportToWkb())) dissolve_polygon = cascaded_union(dissolve_poly_list) new_feat.SetGeometry( ogr.CreateGeometryFromWkb(dissolve_polygon.wkb)) dissolve_layer.CreateFeature(new_feat) # clean up shp_drv.DeleteDataSource(temp_polygon_file) log("Time to dissolve: {0}".format(datetime.utcnow() - time_start_dissolve)) log("Total time to convert: {0}".format(datetime.utcnow() - time_start))
['def', 'rasterToPolygon', '(', 'raster_file', ',', 'polygon_file', ')', ':', 'log', '(', '"Process: Raster to Polygon ..."', ')', 'time_start', '=', 'datetime', '.', 'utcnow', '(', ')', 'temp_polygon_file', '=', '"{0}_temp.shp"', '.', 'format', '(', 'os', '.', 'path', '.', 'splitext', '(', 'os', '.', 'path', '.', 'basename', '(', 'polygon_file', ')', ')', '[', '0', ']', ')', 'GDALGrid', '(', 'raster_file', ')', '.', 'to_polygon', '(', 'out_shapefile', '=', 'temp_polygon_file', ',', 'fieldname', '=', '"LINKNO"', ',', 'self_mask', '=', 'True', ')', 'log', '(', '"Time to convert to polygon: {0}"', '.', 'format', '(', 'datetime', '.', 'utcnow', '(', ')', '-', 'time_start', ')', ')', 'log', '(', '"Dissolving ..."', ')', 'time_start_dissolve', '=', 'datetime', '.', 'utcnow', '(', ')', 'ogr_polygin_shapefile', '=', 'ogr', '.', 'Open', '(', 'temp_polygon_file', ')', 'ogr_polygon_shapefile_lyr', '=', 'ogr_polygin_shapefile', '.', 'GetLayer', '(', ')', 'number_of_features', '=', 'ogr_polygon_shapefile_lyr', '.', 'GetFeatureCount', '(', ')', 'polygon_rivid_list', '=', 'np', '.', 'zeros', '(', 'number_of_features', ',', 'dtype', '=', 'np', '.', 'int32', ')', 'for', 'feature_idx', ',', 'catchment_feature', 'in', 'enumerate', '(', 'ogr_polygon_shapefile_lyr', ')', ':', 'polygon_rivid_list', '[', 'feature_idx', ']', '=', 'catchment_feature', '.', 'GetField', '(', "'LINKNO'", ')', 'shp_drv', '=', 'ogr', '.', 'GetDriverByName', '(', "'ESRI Shapefile'", ')', '# Remove output shapefile if it already exists', 'if', 'os', '.', 'path', '.', 'exists', '(', 'polygon_file', ')', ':', 'shp_drv', '.', 'DeleteDataSource', '(', 'polygon_file', ')', 'dissolve_shapefile', '=', 'shp_drv', '.', 'CreateDataSource', '(', 'polygon_file', ')', 'dissolve_layer', '=', 'dissolve_shapefile', '.', 'CreateLayer', '(', "''", ',', 'ogr_polygon_shapefile_lyr', '.', 'GetSpatialRef', '(', ')', ',', 'ogr', '.', 'wkbPolygon', ')', 'dissolve_layer', '.', 'CreateField', '(', 'ogr', '.', 'FieldDefn', '(', "'LINKNO'", ',', 'ogr', '.', 'OFTInteger', ')', ')', 'dissolve_layer_defn', '=', 'dissolve_layer', '.', 'GetLayerDefn', '(', ')', 'for', 'unique_rivid', 'in', 'np', '.', 'unique', '(', 'polygon_rivid_list', ')', ':', '# get indices where it is in the polygon', 'feature_indices', '=', 'np', '.', 'where', '(', 'polygon_rivid_list', '==', 'unique_rivid', ')', '[', '0', ']', 'new_feat', '=', 'ogr', '.', 'Feature', '(', 'dissolve_layer_defn', ')', 'new_feat', '.', 'SetField', '(', "'LINKNO'", ',', 'int', '(', 'unique_rivid', ')', ')', 'if', 'len', '(', 'feature_indices', ')', '==', '1', ':', '# write feature to file', 'feature', '=', 'ogr_polygon_shapefile_lyr', '.', 'GetFeature', '(', 'feature_indices', '[', '0', ']', ')', 'new_feat', '.', 'SetGeometry', '(', 'feature', '.', 'GetGeometryRef', '(', ')', ')', 'else', ':', '# dissolve', 'dissolve_poly_list', '=', '[', ']', 'for', 'feature_index', 'in', 'feature_indices', ':', 'feature', '=', 'ogr_polygon_shapefile_lyr', '.', 'GetFeature', '(', 'feature_index', ')', 'feat_geom', '=', 'feature', '.', 'GetGeometryRef', '(', ')', 'dissolve_poly_list', '.', 'append', '(', 'shapely_loads', '(', 'feat_geom', '.', 'ExportToWkb', '(', ')', ')', ')', 'dissolve_polygon', '=', 'cascaded_union', '(', 'dissolve_poly_list', ')', 'new_feat', '.', 'SetGeometry', '(', 'ogr', '.', 'CreateGeometryFromWkb', '(', 'dissolve_polygon', '.', 'wkb', ')', ')', 'dissolve_layer', '.', 'CreateFeature', '(', 'new_feat', ')', '# clean up', 'shp_drv', '.', 'DeleteDataSource', '(', 'temp_polygon_file', ')', 'log', '(', '"Time to dissolve: {0}"', '.', 'format', '(', 'datetime', '.', 'utcnow', '(', ')', '-', 'time_start_dissolve', ')', ')', 'log', '(', '"Total time to convert: {0}"', '.', 'format', '(', 'datetime', '.', 'utcnow', '(', ')', '-', 'time_start', ')', ')']
Converts watershed raster to polygon and then dissolves it. It dissolves features based on the LINKNO attribute.
['Converts', 'watershed', 'raster', 'to', 'polygon', 'and', 'then', 'dissolves', 'it', '.', 'It', 'dissolves', 'features', 'based', 'on', 'the', 'LINKNO', 'attribute', '.']
train
https://github.com/erdc/RAPIDpy/blob/50e14e130554b254a00ff23b226cd7e4c6cfe91a/RAPIDpy/gis/taudem.py#L453-L525