repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
sequencelengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
sequencelengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
MonashBI/arcana
arcana/processor/base.py
Processor._to_process
def _to_process(self, pipeline, required_outputs, prqs_to_process_array, to_skip_array, filter_array, subject_inds, visit_inds, force): """ Check whether the outputs of the pipeline are present in all sessions in the project repository and were generated with matching provenance. Return an 2D boolean array (subjects: rows, visits: cols) with the sessions to process marked True. Parameters ---------- pipeline : Pipeline The pipeline to determine the sessions to process required_ouputs : set[str] The names of the pipeline outputs that are required. If None all are deemed to be required prqs_to_process_array : 2-D numpy.array[bool] A two-dimensional boolean array, where rows and columns correspond correspond to subjects and visits in the repository tree. True values represent a subject/visit ID pairs that will be (re)processed in prerequisite pipelines and therefore need to be included in the returned array. to_skip_array : 2-D numpy.array[bool] Similar to prqs_to_process_array, except denote the subject/visits that are to be skipped due to missing inputs filter_array : 2-D numpy.array[bool] A two-dimensional boolean array, where rows and columns correspond correspond to subjects and visits in the repository tree. True values represent a subject/visit ID pairs to include in the current round of processing. Note that if the 'force' flag is not set, sessions won't be reprocessed unless the parameters and pipeline version saved in the provenance doesn't match that of the given pipeline. subject_inds : dict[str,int] Mapping from subject ID to index in filter|to_process arrays visit_inds : dict[str,int] Mapping from visit ID to index in filter|to_process arrays force : bool Whether to force reprocessing of all (filtered) sessions or not. Note that if 'force' is true we can't just return the filter array as it might be dilated by summary outputs (i.e. of frequency 'per_visit', 'per_subject' or 'per_study'). So we still loop through all outputs and treat them like they don't exist Returns ------- to_process_array : 2-D numpy.array[bool] A two-dimensional boolean array, where rows correspond to subjects and columns correspond to visits in the repository. True values represent subject/visit ID pairs to run the pipeline for """ # Reference the study tree in local variable for convenience tree = self.study.tree # Check to see if the pipeline has any low frequency outputs, because # if not then each session can be processed indepdently. Otherwise, # the "session matrix" (as defined by subject_ids and visit_ids # passed to the Study class) needs to be complete, i.e. a session # exists (with the full complement of requird inputs) for each # subject/visit ID pair. summary_outputs = [ o.name for o in pipeline.outputs if o.frequency != 'per_session'] # Set of frequencies present in pipeline outputs output_freqs = pipeline.output_frequencies if summary_outputs: if list(tree.incomplete_subjects): raise ArcanaUsageError( "Can't process '{}' pipeline as it has low frequency " " outputs (i.e. outputs that aren't of 'per_session' " "frequency) ({}) and subjects ({}) that are missing one " "or more visits ({}). Please restrict the subject/visit " "IDs in the study __init__ to continue the analysis" .format( self.name, ', '.join(summary_outputs), ', '.join(s.id for s in tree.incomplete_subjects), ', '.join(v.id for v in tree.incomplete_visits))) def array_inds(x): """ Get the array index corresponding to an item or node. If it doesn't have a subject and/or visit ID (i.e. frequency != 'per_session') then the first index in the row/column is selected as it will be dialated to the whole row/column later """ return (subject_inds.get(x.subject_id, 0), visit_inds.get(x.visit_id, 0)) # Initalise array to represent which sessions need to be reprocessed to_process_array = np.zeros((len(subject_inds), len(visit_inds)), dtype=bool) # An array to mark outputs that have been altered outside of Arcana # and therefore protect from over-writing to_protect_array = np.copy(to_process_array) # Mark the sessions that we should check to see if the configuration # saved in the provenance record matches that of the current pipeline to_check_array = np.copy(to_process_array) # As well as the the sessions that need to be protected or skipped, # keep track of the items in those sessions that are protected or # missing for more informative error messages to_protect = defaultdict(list) to_skip = defaultdict(list) # Check data tree for missing inputs for input in pipeline.inputs: # @ReservedAssignment # NB: Study inputs that don't have skip_missing set and have # missing data should raise an error before this point if input.skip_missing: for item in input.collection: if not item.exists: to_skip_array[array_inds(item)] = True to_skip[array_inds(item)].append(item) # Dialate array over all iterators that are joined by the pipeline to_skip_array = self._dialate_array(to_skip_array, pipeline.joins) # Check data tree for missing required outputs for output in pipeline.outputs: # Check to see if output is required by downstream processing required = (required_outputs is None or output.name in required_outputs) for item in output.collection: if item.exists: # Check to see if checksums recorded when derivative # was generated by previous run match those of current file # set. If not we assume they have been manually altered and # therefore should not be overridden if item.checksums != item.recorded_checksums: logger.warning( "Checksums for {} do not match those recorded in " "provenance. Assuming it has been manually " "corrected outside of Arcana and will therefore " "not overwrite. Please delete manually if this " "is not intended".format(repr(item))) to_protect_array[array_inds(item)] = True to_protect[array_inds(item)].append(item) elif required: if force: to_process_array[array_inds(item)] = True else: to_check_array[array_inds(item)] = True elif required: to_process_array[array_inds(item)] = True # Filter sessions to process by those requested to_process_array *= filter_array to_check_array *= (filter_array * np.invert(to_process_array)) if to_check_array.any() and self.prov_check: # Get list of sessions, subjects, visits, tree objects to check # their provenance against that of the pipeline to_check = [] if 'per_session' in output_freqs: to_check.extend( s for s in tree.sessions if to_check_array[subject_inds[s.subject_id], visit_inds[s.visit_id]]) if 'per_subject' in output_freqs: # We can just test the first col of outputs_exist as rows # should be either all True or all False to_check.extend(s for s in tree.subjects if to_check_array[subject_inds[s.id], 0]) if 'per_visit' in output_freqs: # We can just test the first row of outputs_exist as cols # should be either all True or all False to_check.extend(v for v in tree.visits if to_check_array[0, visit_inds[v.id]]) if 'per_study' in output_freqs: to_check.append(tree) for node in to_check: # Generated expected record from current pipeline/repository- # state requires_reprocess = False try: # Retrieve record stored in tree node record = node.record(pipeline.name, pipeline.study.name) expected_record = pipeline.expected_record(node) # Compare record with expected mismatches = record.mismatches(expected_record, self.prov_check, self.prov_ignore) if mismatches: msg = ("mismatch in provenance:\n{}\n Add mismatching " "paths (delimeted by '/') to 'prov_ignore' " "argument of Processor to ignore" .format( pformat(mismatches))) requires_reprocess = True except ArcanaNameError as e: msg = "missing provenance record" requires_reprocess = False to_protect_array[array_inds(node)] = True except ArcanaDataNotDerivedYetError as e: msg = ("missing input '{}' and therefore cannot check " "provenance".format(e.name)) requires_reprocess = True if requires_reprocess: if self.reprocess: to_process_array[array_inds(node)] = True logger.info( "Reprocessing {} with '{}' due to {}" .format(node, pipeline.name, msg)) else: raise ArcanaReprocessException( "Cannot use derivatives for '{}' pipeline stored " "in {} due to {}, set 'reprocess' " "flag of Processor to overwrite".format( pipeline.name, node, msg)) def inds_to_ids(inds): subject_id = next(k for k, v in subject_inds.items() if v == inds[0]) visit_id = next(k for k, v in visit_inds.items() if v == inds[1]) return (subject_id, visit_id) # Dialate to process array to_process_array = self._dialate_array(to_process_array, pipeline.joins) intersection = to_process_array * to_skip_array if intersection.any(): missing_inputs_msg = '' missing_prq_inputs_msg = '' for sess_inds in zip(*np.nonzero(intersection)): subject_id, visit_id = inds_to_ids(sess_inds) if sess_inds in to_skip: missing_inputs_msg += ( "\n(subject={}, visit={}): [{}]".format( subject_id, visit_id, ', '.join(i.name for i in to_skip[sess_inds]))) else: missing_prq_inputs_msg += ( "\n(subject={}, visit={})".format(subject_id, visit_id)) warning_msg = ("Skipping the following nodes for '{}' pipeline due" " to ".format(pipeline)) if missing_inputs_msg: warning_msg += "missing inputs:{}".format(missing_inputs_msg) if missing_prq_inputs_msg: if missing_inputs_msg: warning_msg += "\nand the following due to " warning_msg += ("missing inputs to prerequisite pipelines:{}" .format(missing_prq_inputs_msg)) logger.warning(warning_msg) # Remove nodes that are to be skipped due to missing inputs to_process_array *= np.invert(to_skip_array) # Check for conflicts between nodes to process and nodes to protect conflicting = to_process_array * to_protect_array if conflicting.any(): error_msg = '' for sess_inds in zip(*np.nonzero(conflicting)): subject_id, visit_id = inds_to_ids(sess_inds) if required_outputs is None: conflict_outputs = pipeline.outputs else: conflict_outputs = [pipeline.study.bound_spec(r) for r in required_outputs] items = [ o.collection.item(subject_id=subject_id, visit_id=visit_id) for o in conflict_outputs] missing = [i for i in items if i not in to_protect[sess_inds]] error_msg += ( "\n({}, {}): protected=[{}], missing=[{}]" .format( subject_id, visit_id, ', '.join(i.name for i in to_protect[sess_inds]), ', '.join(i.name for i in missing))) raise ArcanaProtectedOutputConflictError( "Cannot process {} as there are nodes with both protected " "outputs (ones modified externally to Arcana) and missing " "required outputs. Either delete protected outputs or provide " "missing required outputs to continue:{}".format(pipeline, error_msg)) # Add in any prerequisites to process that aren't explicitly protected to_process_array |= (prqs_to_process_array * filter_array * np.invert(to_protect_array)) to_process_array = self._dialate_array(to_process_array, pipeline.joins) return to_process_array, to_protect_array, to_skip_array
python
def _to_process(self, pipeline, required_outputs, prqs_to_process_array, to_skip_array, filter_array, subject_inds, visit_inds, force): """ Check whether the outputs of the pipeline are present in all sessions in the project repository and were generated with matching provenance. Return an 2D boolean array (subjects: rows, visits: cols) with the sessions to process marked True. Parameters ---------- pipeline : Pipeline The pipeline to determine the sessions to process required_ouputs : set[str] The names of the pipeline outputs that are required. If None all are deemed to be required prqs_to_process_array : 2-D numpy.array[bool] A two-dimensional boolean array, where rows and columns correspond correspond to subjects and visits in the repository tree. True values represent a subject/visit ID pairs that will be (re)processed in prerequisite pipelines and therefore need to be included in the returned array. to_skip_array : 2-D numpy.array[bool] Similar to prqs_to_process_array, except denote the subject/visits that are to be skipped due to missing inputs filter_array : 2-D numpy.array[bool] A two-dimensional boolean array, where rows and columns correspond correspond to subjects and visits in the repository tree. True values represent a subject/visit ID pairs to include in the current round of processing. Note that if the 'force' flag is not set, sessions won't be reprocessed unless the parameters and pipeline version saved in the provenance doesn't match that of the given pipeline. subject_inds : dict[str,int] Mapping from subject ID to index in filter|to_process arrays visit_inds : dict[str,int] Mapping from visit ID to index in filter|to_process arrays force : bool Whether to force reprocessing of all (filtered) sessions or not. Note that if 'force' is true we can't just return the filter array as it might be dilated by summary outputs (i.e. of frequency 'per_visit', 'per_subject' or 'per_study'). So we still loop through all outputs and treat them like they don't exist Returns ------- to_process_array : 2-D numpy.array[bool] A two-dimensional boolean array, where rows correspond to subjects and columns correspond to visits in the repository. True values represent subject/visit ID pairs to run the pipeline for """ # Reference the study tree in local variable for convenience tree = self.study.tree # Check to see if the pipeline has any low frequency outputs, because # if not then each session can be processed indepdently. Otherwise, # the "session matrix" (as defined by subject_ids and visit_ids # passed to the Study class) needs to be complete, i.e. a session # exists (with the full complement of requird inputs) for each # subject/visit ID pair. summary_outputs = [ o.name for o in pipeline.outputs if o.frequency != 'per_session'] # Set of frequencies present in pipeline outputs output_freqs = pipeline.output_frequencies if summary_outputs: if list(tree.incomplete_subjects): raise ArcanaUsageError( "Can't process '{}' pipeline as it has low frequency " " outputs (i.e. outputs that aren't of 'per_session' " "frequency) ({}) and subjects ({}) that are missing one " "or more visits ({}). Please restrict the subject/visit " "IDs in the study __init__ to continue the analysis" .format( self.name, ', '.join(summary_outputs), ', '.join(s.id for s in tree.incomplete_subjects), ', '.join(v.id for v in tree.incomplete_visits))) def array_inds(x): """ Get the array index corresponding to an item or node. If it doesn't have a subject and/or visit ID (i.e. frequency != 'per_session') then the first index in the row/column is selected as it will be dialated to the whole row/column later """ return (subject_inds.get(x.subject_id, 0), visit_inds.get(x.visit_id, 0)) # Initalise array to represent which sessions need to be reprocessed to_process_array = np.zeros((len(subject_inds), len(visit_inds)), dtype=bool) # An array to mark outputs that have been altered outside of Arcana # and therefore protect from over-writing to_protect_array = np.copy(to_process_array) # Mark the sessions that we should check to see if the configuration # saved in the provenance record matches that of the current pipeline to_check_array = np.copy(to_process_array) # As well as the the sessions that need to be protected or skipped, # keep track of the items in those sessions that are protected or # missing for more informative error messages to_protect = defaultdict(list) to_skip = defaultdict(list) # Check data tree for missing inputs for input in pipeline.inputs: # @ReservedAssignment # NB: Study inputs that don't have skip_missing set and have # missing data should raise an error before this point if input.skip_missing: for item in input.collection: if not item.exists: to_skip_array[array_inds(item)] = True to_skip[array_inds(item)].append(item) # Dialate array over all iterators that are joined by the pipeline to_skip_array = self._dialate_array(to_skip_array, pipeline.joins) # Check data tree for missing required outputs for output in pipeline.outputs: # Check to see if output is required by downstream processing required = (required_outputs is None or output.name in required_outputs) for item in output.collection: if item.exists: # Check to see if checksums recorded when derivative # was generated by previous run match those of current file # set. If not we assume they have been manually altered and # therefore should not be overridden if item.checksums != item.recorded_checksums: logger.warning( "Checksums for {} do not match those recorded in " "provenance. Assuming it has been manually " "corrected outside of Arcana and will therefore " "not overwrite. Please delete manually if this " "is not intended".format(repr(item))) to_protect_array[array_inds(item)] = True to_protect[array_inds(item)].append(item) elif required: if force: to_process_array[array_inds(item)] = True else: to_check_array[array_inds(item)] = True elif required: to_process_array[array_inds(item)] = True # Filter sessions to process by those requested to_process_array *= filter_array to_check_array *= (filter_array * np.invert(to_process_array)) if to_check_array.any() and self.prov_check: # Get list of sessions, subjects, visits, tree objects to check # their provenance against that of the pipeline to_check = [] if 'per_session' in output_freqs: to_check.extend( s for s in tree.sessions if to_check_array[subject_inds[s.subject_id], visit_inds[s.visit_id]]) if 'per_subject' in output_freqs: # We can just test the first col of outputs_exist as rows # should be either all True or all False to_check.extend(s for s in tree.subjects if to_check_array[subject_inds[s.id], 0]) if 'per_visit' in output_freqs: # We can just test the first row of outputs_exist as cols # should be either all True or all False to_check.extend(v for v in tree.visits if to_check_array[0, visit_inds[v.id]]) if 'per_study' in output_freqs: to_check.append(tree) for node in to_check: # Generated expected record from current pipeline/repository- # state requires_reprocess = False try: # Retrieve record stored in tree node record = node.record(pipeline.name, pipeline.study.name) expected_record = pipeline.expected_record(node) # Compare record with expected mismatches = record.mismatches(expected_record, self.prov_check, self.prov_ignore) if mismatches: msg = ("mismatch in provenance:\n{}\n Add mismatching " "paths (delimeted by '/') to 'prov_ignore' " "argument of Processor to ignore" .format( pformat(mismatches))) requires_reprocess = True except ArcanaNameError as e: msg = "missing provenance record" requires_reprocess = False to_protect_array[array_inds(node)] = True except ArcanaDataNotDerivedYetError as e: msg = ("missing input '{}' and therefore cannot check " "provenance".format(e.name)) requires_reprocess = True if requires_reprocess: if self.reprocess: to_process_array[array_inds(node)] = True logger.info( "Reprocessing {} with '{}' due to {}" .format(node, pipeline.name, msg)) else: raise ArcanaReprocessException( "Cannot use derivatives for '{}' pipeline stored " "in {} due to {}, set 'reprocess' " "flag of Processor to overwrite".format( pipeline.name, node, msg)) def inds_to_ids(inds): subject_id = next(k for k, v in subject_inds.items() if v == inds[0]) visit_id = next(k for k, v in visit_inds.items() if v == inds[1]) return (subject_id, visit_id) # Dialate to process array to_process_array = self._dialate_array(to_process_array, pipeline.joins) intersection = to_process_array * to_skip_array if intersection.any(): missing_inputs_msg = '' missing_prq_inputs_msg = '' for sess_inds in zip(*np.nonzero(intersection)): subject_id, visit_id = inds_to_ids(sess_inds) if sess_inds in to_skip: missing_inputs_msg += ( "\n(subject={}, visit={}): [{}]".format( subject_id, visit_id, ', '.join(i.name for i in to_skip[sess_inds]))) else: missing_prq_inputs_msg += ( "\n(subject={}, visit={})".format(subject_id, visit_id)) warning_msg = ("Skipping the following nodes for '{}' pipeline due" " to ".format(pipeline)) if missing_inputs_msg: warning_msg += "missing inputs:{}".format(missing_inputs_msg) if missing_prq_inputs_msg: if missing_inputs_msg: warning_msg += "\nand the following due to " warning_msg += ("missing inputs to prerequisite pipelines:{}" .format(missing_prq_inputs_msg)) logger.warning(warning_msg) # Remove nodes that are to be skipped due to missing inputs to_process_array *= np.invert(to_skip_array) # Check for conflicts between nodes to process and nodes to protect conflicting = to_process_array * to_protect_array if conflicting.any(): error_msg = '' for sess_inds in zip(*np.nonzero(conflicting)): subject_id, visit_id = inds_to_ids(sess_inds) if required_outputs is None: conflict_outputs = pipeline.outputs else: conflict_outputs = [pipeline.study.bound_spec(r) for r in required_outputs] items = [ o.collection.item(subject_id=subject_id, visit_id=visit_id) for o in conflict_outputs] missing = [i for i in items if i not in to_protect[sess_inds]] error_msg += ( "\n({}, {}): protected=[{}], missing=[{}]" .format( subject_id, visit_id, ', '.join(i.name for i in to_protect[sess_inds]), ', '.join(i.name for i in missing))) raise ArcanaProtectedOutputConflictError( "Cannot process {} as there are nodes with both protected " "outputs (ones modified externally to Arcana) and missing " "required outputs. Either delete protected outputs or provide " "missing required outputs to continue:{}".format(pipeline, error_msg)) # Add in any prerequisites to process that aren't explicitly protected to_process_array |= (prqs_to_process_array * filter_array * np.invert(to_protect_array)) to_process_array = self._dialate_array(to_process_array, pipeline.joins) return to_process_array, to_protect_array, to_skip_array
[ "def", "_to_process", "(", "self", ",", "pipeline", ",", "required_outputs", ",", "prqs_to_process_array", ",", "to_skip_array", ",", "filter_array", ",", "subject_inds", ",", "visit_inds", ",", "force", ")", ":", "# Reference the study tree in local variable for convenience", "tree", "=", "self", ".", "study", ".", "tree", "# Check to see if the pipeline has any low frequency outputs, because", "# if not then each session can be processed indepdently. Otherwise,", "# the \"session matrix\" (as defined by subject_ids and visit_ids", "# passed to the Study class) needs to be complete, i.e. a session", "# exists (with the full complement of requird inputs) for each", "# subject/visit ID pair.", "summary_outputs", "=", "[", "o", ".", "name", "for", "o", "in", "pipeline", ".", "outputs", "if", "o", ".", "frequency", "!=", "'per_session'", "]", "# Set of frequencies present in pipeline outputs", "output_freqs", "=", "pipeline", ".", "output_frequencies", "if", "summary_outputs", ":", "if", "list", "(", "tree", ".", "incomplete_subjects", ")", ":", "raise", "ArcanaUsageError", "(", "\"Can't process '{}' pipeline as it has low frequency \"", "\" outputs (i.e. outputs that aren't of 'per_session' \"", "\"frequency) ({}) and subjects ({}) that are missing one \"", "\"or more visits ({}). Please restrict the subject/visit \"", "\"IDs in the study __init__ to continue the analysis\"", ".", "format", "(", "self", ".", "name", ",", "', '", ".", "join", "(", "summary_outputs", ")", ",", "', '", ".", "join", "(", "s", ".", "id", "for", "s", "in", "tree", ".", "incomplete_subjects", ")", ",", "', '", ".", "join", "(", "v", ".", "id", "for", "v", "in", "tree", ".", "incomplete_visits", ")", ")", ")", "def", "array_inds", "(", "x", ")", ":", "\"\"\"\n Get the array index corresponding to an item or node. If it doesn't\n have a subject and/or visit ID (i.e. frequency != 'per_session')\n then the first index in the row/column is selected as it will be\n dialated to the whole row/column later\n \"\"\"", "return", "(", "subject_inds", ".", "get", "(", "x", ".", "subject_id", ",", "0", ")", ",", "visit_inds", ".", "get", "(", "x", ".", "visit_id", ",", "0", ")", ")", "# Initalise array to represent which sessions need to be reprocessed", "to_process_array", "=", "np", ".", "zeros", "(", "(", "len", "(", "subject_inds", ")", ",", "len", "(", "visit_inds", ")", ")", ",", "dtype", "=", "bool", ")", "# An array to mark outputs that have been altered outside of Arcana", "# and therefore protect from over-writing", "to_protect_array", "=", "np", ".", "copy", "(", "to_process_array", ")", "# Mark the sessions that we should check to see if the configuration", "# saved in the provenance record matches that of the current pipeline", "to_check_array", "=", "np", ".", "copy", "(", "to_process_array", ")", "# As well as the the sessions that need to be protected or skipped,", "# keep track of the items in those sessions that are protected or", "# missing for more informative error messages", "to_protect", "=", "defaultdict", "(", "list", ")", "to_skip", "=", "defaultdict", "(", "list", ")", "# Check data tree for missing inputs", "for", "input", "in", "pipeline", ".", "inputs", ":", "# @ReservedAssignment", "# NB: Study inputs that don't have skip_missing set and have", "# missing data should raise an error before this point", "if", "input", ".", "skip_missing", ":", "for", "item", "in", "input", ".", "collection", ":", "if", "not", "item", ".", "exists", ":", "to_skip_array", "[", "array_inds", "(", "item", ")", "]", "=", "True", "to_skip", "[", "array_inds", "(", "item", ")", "]", ".", "append", "(", "item", ")", "# Dialate array over all iterators that are joined by the pipeline", "to_skip_array", "=", "self", ".", "_dialate_array", "(", "to_skip_array", ",", "pipeline", ".", "joins", ")", "# Check data tree for missing required outputs", "for", "output", "in", "pipeline", ".", "outputs", ":", "# Check to see if output is required by downstream processing", "required", "=", "(", "required_outputs", "is", "None", "or", "output", ".", "name", "in", "required_outputs", ")", "for", "item", "in", "output", ".", "collection", ":", "if", "item", ".", "exists", ":", "# Check to see if checksums recorded when derivative", "# was generated by previous run match those of current file", "# set. If not we assume they have been manually altered and", "# therefore should not be overridden", "if", "item", ".", "checksums", "!=", "item", ".", "recorded_checksums", ":", "logger", ".", "warning", "(", "\"Checksums for {} do not match those recorded in \"", "\"provenance. Assuming it has been manually \"", "\"corrected outside of Arcana and will therefore \"", "\"not overwrite. Please delete manually if this \"", "\"is not intended\"", ".", "format", "(", "repr", "(", "item", ")", ")", ")", "to_protect_array", "[", "array_inds", "(", "item", ")", "]", "=", "True", "to_protect", "[", "array_inds", "(", "item", ")", "]", ".", "append", "(", "item", ")", "elif", "required", ":", "if", "force", ":", "to_process_array", "[", "array_inds", "(", "item", ")", "]", "=", "True", "else", ":", "to_check_array", "[", "array_inds", "(", "item", ")", "]", "=", "True", "elif", "required", ":", "to_process_array", "[", "array_inds", "(", "item", ")", "]", "=", "True", "# Filter sessions to process by those requested", "to_process_array", "*=", "filter_array", "to_check_array", "*=", "(", "filter_array", "*", "np", ".", "invert", "(", "to_process_array", ")", ")", "if", "to_check_array", ".", "any", "(", ")", "and", "self", ".", "prov_check", ":", "# Get list of sessions, subjects, visits, tree objects to check", "# their provenance against that of the pipeline", "to_check", "=", "[", "]", "if", "'per_session'", "in", "output_freqs", ":", "to_check", ".", "extend", "(", "s", "for", "s", "in", "tree", ".", "sessions", "if", "to_check_array", "[", "subject_inds", "[", "s", ".", "subject_id", "]", ",", "visit_inds", "[", "s", ".", "visit_id", "]", "]", ")", "if", "'per_subject'", "in", "output_freqs", ":", "# We can just test the first col of outputs_exist as rows", "# should be either all True or all False", "to_check", ".", "extend", "(", "s", "for", "s", "in", "tree", ".", "subjects", "if", "to_check_array", "[", "subject_inds", "[", "s", ".", "id", "]", ",", "0", "]", ")", "if", "'per_visit'", "in", "output_freqs", ":", "# We can just test the first row of outputs_exist as cols", "# should be either all True or all False", "to_check", ".", "extend", "(", "v", "for", "v", "in", "tree", ".", "visits", "if", "to_check_array", "[", "0", ",", "visit_inds", "[", "v", ".", "id", "]", "]", ")", "if", "'per_study'", "in", "output_freqs", ":", "to_check", ".", "append", "(", "tree", ")", "for", "node", "in", "to_check", ":", "# Generated expected record from current pipeline/repository-", "# state", "requires_reprocess", "=", "False", "try", ":", "# Retrieve record stored in tree node", "record", "=", "node", ".", "record", "(", "pipeline", ".", "name", ",", "pipeline", ".", "study", ".", "name", ")", "expected_record", "=", "pipeline", ".", "expected_record", "(", "node", ")", "# Compare record with expected", "mismatches", "=", "record", ".", "mismatches", "(", "expected_record", ",", "self", ".", "prov_check", ",", "self", ".", "prov_ignore", ")", "if", "mismatches", ":", "msg", "=", "(", "\"mismatch in provenance:\\n{}\\n Add mismatching \"", "\"paths (delimeted by '/') to 'prov_ignore' \"", "\"argument of Processor to ignore\"", ".", "format", "(", "pformat", "(", "mismatches", ")", ")", ")", "requires_reprocess", "=", "True", "except", "ArcanaNameError", "as", "e", ":", "msg", "=", "\"missing provenance record\"", "requires_reprocess", "=", "False", "to_protect_array", "[", "array_inds", "(", "node", ")", "]", "=", "True", "except", "ArcanaDataNotDerivedYetError", "as", "e", ":", "msg", "=", "(", "\"missing input '{}' and therefore cannot check \"", "\"provenance\"", ".", "format", "(", "e", ".", "name", ")", ")", "requires_reprocess", "=", "True", "if", "requires_reprocess", ":", "if", "self", ".", "reprocess", ":", "to_process_array", "[", "array_inds", "(", "node", ")", "]", "=", "True", "logger", ".", "info", "(", "\"Reprocessing {} with '{}' due to {}\"", ".", "format", "(", "node", ",", "pipeline", ".", "name", ",", "msg", ")", ")", "else", ":", "raise", "ArcanaReprocessException", "(", "\"Cannot use derivatives for '{}' pipeline stored \"", "\"in {} due to {}, set 'reprocess' \"", "\"flag of Processor to overwrite\"", ".", "format", "(", "pipeline", ".", "name", ",", "node", ",", "msg", ")", ")", "def", "inds_to_ids", "(", "inds", ")", ":", "subject_id", "=", "next", "(", "k", "for", "k", ",", "v", "in", "subject_inds", ".", "items", "(", ")", "if", "v", "==", "inds", "[", "0", "]", ")", "visit_id", "=", "next", "(", "k", "for", "k", ",", "v", "in", "visit_inds", ".", "items", "(", ")", "if", "v", "==", "inds", "[", "1", "]", ")", "return", "(", "subject_id", ",", "visit_id", ")", "# Dialate to process array", "to_process_array", "=", "self", ".", "_dialate_array", "(", "to_process_array", ",", "pipeline", ".", "joins", ")", "intersection", "=", "to_process_array", "*", "to_skip_array", "if", "intersection", ".", "any", "(", ")", ":", "missing_inputs_msg", "=", "''", "missing_prq_inputs_msg", "=", "''", "for", "sess_inds", "in", "zip", "(", "*", "np", ".", "nonzero", "(", "intersection", ")", ")", ":", "subject_id", ",", "visit_id", "=", "inds_to_ids", "(", "sess_inds", ")", "if", "sess_inds", "in", "to_skip", ":", "missing_inputs_msg", "+=", "(", "\"\\n(subject={}, visit={}): [{}]\"", ".", "format", "(", "subject_id", ",", "visit_id", ",", "', '", ".", "join", "(", "i", ".", "name", "for", "i", "in", "to_skip", "[", "sess_inds", "]", ")", ")", ")", "else", ":", "missing_prq_inputs_msg", "+=", "(", "\"\\n(subject={}, visit={})\"", ".", "format", "(", "subject_id", ",", "visit_id", ")", ")", "warning_msg", "=", "(", "\"Skipping the following nodes for '{}' pipeline due\"", "\" to \"", ".", "format", "(", "pipeline", ")", ")", "if", "missing_inputs_msg", ":", "warning_msg", "+=", "\"missing inputs:{}\"", ".", "format", "(", "missing_inputs_msg", ")", "if", "missing_prq_inputs_msg", ":", "if", "missing_inputs_msg", ":", "warning_msg", "+=", "\"\\nand the following due to \"", "warning_msg", "+=", "(", "\"missing inputs to prerequisite pipelines:{}\"", ".", "format", "(", "missing_prq_inputs_msg", ")", ")", "logger", ".", "warning", "(", "warning_msg", ")", "# Remove nodes that are to be skipped due to missing inputs", "to_process_array", "*=", "np", ".", "invert", "(", "to_skip_array", ")", "# Check for conflicts between nodes to process and nodes to protect", "conflicting", "=", "to_process_array", "*", "to_protect_array", "if", "conflicting", ".", "any", "(", ")", ":", "error_msg", "=", "''", "for", "sess_inds", "in", "zip", "(", "*", "np", ".", "nonzero", "(", "conflicting", ")", ")", ":", "subject_id", ",", "visit_id", "=", "inds_to_ids", "(", "sess_inds", ")", "if", "required_outputs", "is", "None", ":", "conflict_outputs", "=", "pipeline", ".", "outputs", "else", ":", "conflict_outputs", "=", "[", "pipeline", ".", "study", ".", "bound_spec", "(", "r", ")", "for", "r", "in", "required_outputs", "]", "items", "=", "[", "o", ".", "collection", ".", "item", "(", "subject_id", "=", "subject_id", ",", "visit_id", "=", "visit_id", ")", "for", "o", "in", "conflict_outputs", "]", "missing", "=", "[", "i", "for", "i", "in", "items", "if", "i", "not", "in", "to_protect", "[", "sess_inds", "]", "]", "error_msg", "+=", "(", "\"\\n({}, {}): protected=[{}], missing=[{}]\"", ".", "format", "(", "subject_id", ",", "visit_id", ",", "', '", ".", "join", "(", "i", ".", "name", "for", "i", "in", "to_protect", "[", "sess_inds", "]", ")", ",", "', '", ".", "join", "(", "i", ".", "name", "for", "i", "in", "missing", ")", ")", ")", "raise", "ArcanaProtectedOutputConflictError", "(", "\"Cannot process {} as there are nodes with both protected \"", "\"outputs (ones modified externally to Arcana) and missing \"", "\"required outputs. Either delete protected outputs or provide \"", "\"missing required outputs to continue:{}\"", ".", "format", "(", "pipeline", ",", "error_msg", ")", ")", "# Add in any prerequisites to process that aren't explicitly protected", "to_process_array", "|=", "(", "prqs_to_process_array", "*", "filter_array", "*", "np", ".", "invert", "(", "to_protect_array", ")", ")", "to_process_array", "=", "self", ".", "_dialate_array", "(", "to_process_array", ",", "pipeline", ".", "joins", ")", "return", "to_process_array", ",", "to_protect_array", ",", "to_skip_array" ]
Check whether the outputs of the pipeline are present in all sessions in the project repository and were generated with matching provenance. Return an 2D boolean array (subjects: rows, visits: cols) with the sessions to process marked True. Parameters ---------- pipeline : Pipeline The pipeline to determine the sessions to process required_ouputs : set[str] The names of the pipeline outputs that are required. If None all are deemed to be required prqs_to_process_array : 2-D numpy.array[bool] A two-dimensional boolean array, where rows and columns correspond correspond to subjects and visits in the repository tree. True values represent a subject/visit ID pairs that will be (re)processed in prerequisite pipelines and therefore need to be included in the returned array. to_skip_array : 2-D numpy.array[bool] Similar to prqs_to_process_array, except denote the subject/visits that are to be skipped due to missing inputs filter_array : 2-D numpy.array[bool] A two-dimensional boolean array, where rows and columns correspond correspond to subjects and visits in the repository tree. True values represent a subject/visit ID pairs to include in the current round of processing. Note that if the 'force' flag is not set, sessions won't be reprocessed unless the parameters and pipeline version saved in the provenance doesn't match that of the given pipeline. subject_inds : dict[str,int] Mapping from subject ID to index in filter|to_process arrays visit_inds : dict[str,int] Mapping from visit ID to index in filter|to_process arrays force : bool Whether to force reprocessing of all (filtered) sessions or not. Note that if 'force' is true we can't just return the filter array as it might be dilated by summary outputs (i.e. of frequency 'per_visit', 'per_subject' or 'per_study'). So we still loop through all outputs and treat them like they don't exist Returns ------- to_process_array : 2-D numpy.array[bool] A two-dimensional boolean array, where rows correspond to subjects and columns correspond to visits in the repository. True values represent subject/visit ID pairs to run the pipeline for
[ "Check", "whether", "the", "outputs", "of", "the", "pipeline", "are", "present", "in", "all", "sessions", "in", "the", "project", "repository", "and", "were", "generated", "with", "matching", "provenance", ".", "Return", "an", "2D", "boolean", "array", "(", "subjects", ":", "rows", "visits", ":", "cols", ")", "with", "the", "sessions", "to", "process", "marked", "True", "." ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/processor/base.py#L660-L933
MonashBI/arcana
arcana/processor/base.py
Processor._dialate_array
def _dialate_array(self, array, iterators): """ 'Dialates' a to_process/to_protect array to include all subject and/or visits if the pipeline contains any joins over the corresponding iterators. Parameters ---------- array : np.array[M, N] The array to potentially dialate iterators : set[str] The iterators that the array should be dialated for Returns ------- dialated : np.array[M, N] The dialated array """ if not iterators: return array dialated = np.copy(array) if self.study.SUBJECT_ID in iterators: # If we join over subjects we should include all subjects for every # visit we want to process dialated[:, dialated.any(axis=0)] = True if self.study.VISIT_ID in iterators: # If we join over visits we should include all visits for every # subject we want to process dialated[dialated.any(axis=1), :] = True return dialated
python
def _dialate_array(self, array, iterators): """ 'Dialates' a to_process/to_protect array to include all subject and/or visits if the pipeline contains any joins over the corresponding iterators. Parameters ---------- array : np.array[M, N] The array to potentially dialate iterators : set[str] The iterators that the array should be dialated for Returns ------- dialated : np.array[M, N] The dialated array """ if not iterators: return array dialated = np.copy(array) if self.study.SUBJECT_ID in iterators: # If we join over subjects we should include all subjects for every # visit we want to process dialated[:, dialated.any(axis=0)] = True if self.study.VISIT_ID in iterators: # If we join over visits we should include all visits for every # subject we want to process dialated[dialated.any(axis=1), :] = True return dialated
[ "def", "_dialate_array", "(", "self", ",", "array", ",", "iterators", ")", ":", "if", "not", "iterators", ":", "return", "array", "dialated", "=", "np", ".", "copy", "(", "array", ")", "if", "self", ".", "study", ".", "SUBJECT_ID", "in", "iterators", ":", "# If we join over subjects we should include all subjects for every", "# visit we want to process", "dialated", "[", ":", ",", "dialated", ".", "any", "(", "axis", "=", "0", ")", "]", "=", "True", "if", "self", ".", "study", ".", "VISIT_ID", "in", "iterators", ":", "# If we join over visits we should include all visits for every", "# subject we want to process", "dialated", "[", "dialated", ".", "any", "(", "axis", "=", "1", ")", ",", ":", "]", "=", "True", "return", "dialated" ]
'Dialates' a to_process/to_protect array to include all subject and/or visits if the pipeline contains any joins over the corresponding iterators. Parameters ---------- array : np.array[M, N] The array to potentially dialate iterators : set[str] The iterators that the array should be dialated for Returns ------- dialated : np.array[M, N] The dialated array
[ "Dialates", "a", "to_process", "/", "to_protect", "array", "to", "include", "all", "subject", "and", "/", "or", "visits", "if", "the", "pipeline", "contains", "any", "joins", "over", "the", "corresponding", "iterators", "." ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/processor/base.py#L935-L964
MonashBI/arcana
arcana/processor/slurm.py
ArcanaSlurmGraphPlugin._get_args
def _get_args(self, node, keywords): """ Intercept calls to get template and return our own node-specific template """ args = super(ArcanaSlurmGraphPlugin, self)._get_args( node, keywords) # Substitute the template arg with the node-specific one new_args = [] for name, arg in zip(keywords, args): if name == 'template': new_args.append(self._processor.slurm_template(node)) else: new_args.append(arg) return tuple(new_args)
python
def _get_args(self, node, keywords): """ Intercept calls to get template and return our own node-specific template """ args = super(ArcanaSlurmGraphPlugin, self)._get_args( node, keywords) # Substitute the template arg with the node-specific one new_args = [] for name, arg in zip(keywords, args): if name == 'template': new_args.append(self._processor.slurm_template(node)) else: new_args.append(arg) return tuple(new_args)
[ "def", "_get_args", "(", "self", ",", "node", ",", "keywords", ")", ":", "args", "=", "super", "(", "ArcanaSlurmGraphPlugin", ",", "self", ")", ".", "_get_args", "(", "node", ",", "keywords", ")", "# Substitute the template arg with the node-specific one", "new_args", "=", "[", "]", "for", "name", ",", "arg", "in", "zip", "(", "keywords", ",", "args", ")", ":", "if", "name", "==", "'template'", ":", "new_args", ".", "append", "(", "self", ".", "_processor", ".", "slurm_template", "(", "node", ")", ")", "else", ":", "new_args", ".", "append", "(", "arg", ")", "return", "tuple", "(", "new_args", ")" ]
Intercept calls to get template and return our own node-specific template
[ "Intercept", "calls", "to", "get", "template", "and", "return", "our", "own", "node", "-", "specific", "template" ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/processor/slurm.py#L16-L30
MonashBI/arcana
arcana/processor/slurm.py
SlurmProc.wall_time_str
def wall_time_str(self, wall_time): """ Returns the wall time in the format required for the sbatch script """ days = int(wall_time // 1440) hours = int((wall_time - days * 1440) // 60) minutes = int(math.floor(wall_time - days * 1440 - hours * 60)) seconds = int((wall_time - math.floor(wall_time)) * 60) return "{}-{:0>2}:{:0>2}:{:0>2}".format(days, hours, minutes, seconds)
python
def wall_time_str(self, wall_time): """ Returns the wall time in the format required for the sbatch script """ days = int(wall_time // 1440) hours = int((wall_time - days * 1440) // 60) minutes = int(math.floor(wall_time - days * 1440 - hours * 60)) seconds = int((wall_time - math.floor(wall_time)) * 60) return "{}-{:0>2}:{:0>2}:{:0>2}".format(days, hours, minutes, seconds)
[ "def", "wall_time_str", "(", "self", ",", "wall_time", ")", ":", "days", "=", "int", "(", "wall_time", "//", "1440", ")", "hours", "=", "int", "(", "(", "wall_time", "-", "days", "*", "1440", ")", "//", "60", ")", "minutes", "=", "int", "(", "math", ".", "floor", "(", "wall_time", "-", "days", "*", "1440", "-", "hours", "*", "60", ")", ")", "seconds", "=", "int", "(", "(", "wall_time", "-", "math", ".", "floor", "(", "wall_time", ")", ")", "*", "60", ")", "return", "\"{}-{:0>2}:{:0>2}:{:0>2}\"", ".", "format", "(", "days", ",", "hours", ",", "minutes", ",", "seconds", ")" ]
Returns the wall time in the format required for the sbatch script
[ "Returns", "the", "wall", "time", "in", "the", "format", "required", "for", "the", "sbatch", "script" ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/processor/slurm.py#L139-L147
gwww/elkm1
elkm1_lib/panel.py
Panel.sync
def sync(self): """Retrieve panel information from ElkM1""" self._elk.add_handler('VN', self._vn_handler) self._elk.add_handler('XK', self._xk_handler) self._elk.add_handler('RP', self._rp_handler) self._elk.add_handler('IE', self._elk.call_sync_handlers) self._elk.add_handler('SS', self._ss_handler) self._elk.send(vn_encode()) self._elk.send(lw_encode()) self._elk.send(ss_encode())
python
def sync(self): """Retrieve panel information from ElkM1""" self._elk.add_handler('VN', self._vn_handler) self._elk.add_handler('XK', self._xk_handler) self._elk.add_handler('RP', self._rp_handler) self._elk.add_handler('IE', self._elk.call_sync_handlers) self._elk.add_handler('SS', self._ss_handler) self._elk.send(vn_encode()) self._elk.send(lw_encode()) self._elk.send(ss_encode())
[ "def", "sync", "(", "self", ")", ":", "self", ".", "_elk", ".", "add_handler", "(", "'VN'", ",", "self", ".", "_vn_handler", ")", "self", ".", "_elk", ".", "add_handler", "(", "'XK'", ",", "self", ".", "_xk_handler", ")", "self", ".", "_elk", ".", "add_handler", "(", "'RP'", ",", "self", ".", "_rp_handler", ")", "self", ".", "_elk", ".", "add_handler", "(", "'IE'", ",", "self", ".", "_elk", ".", "call_sync_handlers", ")", "self", ".", "_elk", ".", "add_handler", "(", "'SS'", ",", "self", ".", "_ss_handler", ")", "self", ".", "_elk", ".", "send", "(", "vn_encode", "(", ")", ")", "self", ".", "_elk", ".", "send", "(", "lw_encode", "(", ")", ")", "self", ".", "_elk", ".", "send", "(", "ss_encode", "(", ")", ")" ]
Retrieve panel information from ElkM1
[ "Retrieve", "panel", "information", "from", "ElkM1" ]
train
https://github.com/gwww/elkm1/blob/078d0de30840c3fab46f1f8534d98df557931e91/elkm1_lib/panel.py#L19-L28
gwww/elkm1
elkm1_lib/thermostats.py
Thermostat.set
def set(self, element_to_set, value): """(Helper) Set thermostat""" self._elk.send(ts_encode(self.index, value, element_to_set))
python
def set(self, element_to_set, value): """(Helper) Set thermostat""" self._elk.send(ts_encode(self.index, value, element_to_set))
[ "def", "set", "(", "self", ",", "element_to_set", ",", "value", ")", ":", "self", ".", "_elk", ".", "send", "(", "ts_encode", "(", "self", ".", "index", ",", "value", ",", "element_to_set", ")", ")" ]
(Helper) Set thermostat
[ "(", "Helper", ")", "Set", "thermostat" ]
train
https://github.com/gwww/elkm1/blob/078d0de30840c3fab46f1f8534d98df557931e91/elkm1_lib/thermostats.py#L19-L21
MonashBI/arcana
arcana/data/base.py
BaseData.renamed
def renamed(self, name): """ Duplicate the datum and rename it """ duplicate = copy(self) duplicate._name = name return duplicate
python
def renamed(self, name): """ Duplicate the datum and rename it """ duplicate = copy(self) duplicate._name = name return duplicate
[ "def", "renamed", "(", "self", ",", "name", ")", ":", "duplicate", "=", "copy", "(", "self", ")", "duplicate", ".", "_name", "=", "name", "return", "duplicate" ]
Duplicate the datum and rename it
[ "Duplicate", "the", "datum", "and", "rename", "it" ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/data/base.py#L72-L78
MonashBI/arcana
arcana/environment/requirement/base.py
Version.compare
def compare(self, other): """ Compares the version with another Parameters ---------- other : Version The version to compare to """ if self._req != other._req: raise ArcanaUsageError( "Can't compare versions of different requirements {} and {}" .format(self._req, other._req)) # Compare main sequence if self._seq < other._seq: return -1 elif self._seq > other._seq: return 1 # If main sequence is equal check prerelease. If a prerelease is # None then it is a full release which is > then a prerelease so we # just assign it 'z' (which is greater than 'a', 'b' and 'rc') s = self._prerelease if self._prerelease is not None else ('z',) o = other._prerelease if other._prerelease is not None else ('z',) if s < o: return -1 if s > o: return 1 # If both main sequence and prereleases are equal, compare post release s = self._post if self._post is not None else 0 o = other._post if other._post is not None else 0 if s < o: return -1 if s > o: return 1 # If both main sequence and prereleases are equal, compare development # release s = self._dev if self._dev is not None else 0 o = other._dev if other._dev is not None else 0 if s < o: return -1 if s > o: return 1 assert self == other return 0
python
def compare(self, other): """ Compares the version with another Parameters ---------- other : Version The version to compare to """ if self._req != other._req: raise ArcanaUsageError( "Can't compare versions of different requirements {} and {}" .format(self._req, other._req)) # Compare main sequence if self._seq < other._seq: return -1 elif self._seq > other._seq: return 1 # If main sequence is equal check prerelease. If a prerelease is # None then it is a full release which is > then a prerelease so we # just assign it 'z' (which is greater than 'a', 'b' and 'rc') s = self._prerelease if self._prerelease is not None else ('z',) o = other._prerelease if other._prerelease is not None else ('z',) if s < o: return -1 if s > o: return 1 # If both main sequence and prereleases are equal, compare post release s = self._post if self._post is not None else 0 o = other._post if other._post is not None else 0 if s < o: return -1 if s > o: return 1 # If both main sequence and prereleases are equal, compare development # release s = self._dev if self._dev is not None else 0 o = other._dev if other._dev is not None else 0 if s < o: return -1 if s > o: return 1 assert self == other return 0
[ "def", "compare", "(", "self", ",", "other", ")", ":", "if", "self", ".", "_req", "!=", "other", ".", "_req", ":", "raise", "ArcanaUsageError", "(", "\"Can't compare versions of different requirements {} and {}\"", ".", "format", "(", "self", ".", "_req", ",", "other", ".", "_req", ")", ")", "# Compare main sequence", "if", "self", ".", "_seq", "<", "other", ".", "_seq", ":", "return", "-", "1", "elif", "self", ".", "_seq", ">", "other", ".", "_seq", ":", "return", "1", "# If main sequence is equal check prerelease. If a prerelease is", "# None then it is a full release which is > then a prerelease so we", "# just assign it 'z' (which is greater than 'a', 'b' and 'rc')", "s", "=", "self", ".", "_prerelease", "if", "self", ".", "_prerelease", "is", "not", "None", "else", "(", "'z'", ",", ")", "o", "=", "other", ".", "_prerelease", "if", "other", ".", "_prerelease", "is", "not", "None", "else", "(", "'z'", ",", ")", "if", "s", "<", "o", ":", "return", "-", "1", "if", "s", ">", "o", ":", "return", "1", "# If both main sequence and prereleases are equal, compare post release", "s", "=", "self", ".", "_post", "if", "self", ".", "_post", "is", "not", "None", "else", "0", "o", "=", "other", ".", "_post", "if", "other", ".", "_post", "is", "not", "None", "else", "0", "if", "s", "<", "o", ":", "return", "-", "1", "if", "s", ">", "o", ":", "return", "1", "# If both main sequence and prereleases are equal, compare development", "# release", "s", "=", "self", ".", "_dev", "if", "self", ".", "_dev", "is", "not", "None", "else", "0", "o", "=", "other", ".", "_dev", "if", "other", ".", "_dev", "is", "not", "None", "else", "0", "if", "s", "<", "o", ":", "return", "-", "1", "if", "s", ">", "o", ":", "return", "1", "assert", "self", "==", "other", "return", "0" ]
Compares the version with another Parameters ---------- other : Version The version to compare to
[ "Compares", "the", "version", "with", "another" ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/environment/requirement/base.py#L108-L151
MonashBI/arcana
arcana/environment/requirement/base.py
Version.parse
def parse(self, version): """ Splits a typical version string (e.g. <MAJOR>.<MINOR>.<MICRO>) into a tuple that can be sorted properly. Ignores all leading and trailing characters by using a regex search (instead of match) so as to pick the version string out of a block of text. Parameters ---------- version : str | int | float | tuple(int) The string containing the version numbers, or alternatively an integer, float (number after decimal is interpreted as minor ver), or tuple|list containing the version sequence. Returns ------- sequence : tuple(int | str) A tuple containing the main sequence of the version, e.g. <MAJOR>.<MINOR>.<MICRO> prerelease : 2-tuple(str, int) | None A 2-tuple containing the type of prerelease ('a' - alpha, 'b' - beta, or 'rc' - release-canditate) and the number of the prerelease post : int |None The number of the post version dev : int | None The number of the development version """ # Check to see if version is not a string but rather another type # that can be interpreted as a version if isinstance(version, int): return (version,), None, None, None elif isinstance(version, (tuple, list)): return tuple(int(i) for i in version), None, None, None elif isinstance(version, float): major = math.floor(version) minor = version - major return (major, minor), None, None, None match = self.regex.search(version) if match is None: raise ArcanaVersionNotDetectableError( "Could not parse version string {} as {}. Regex ({}) did not " "match any sub-string".format(version, type(self).__name__, self.regex.pattern)) sequence = [] prerelease = None dev = None post = None for part in match.group(1).split(self.delimeter): if part.startswith('dev'): dev = int(part[len('dev'):]) elif part.startswith('post'): post = int(part[len('post'):]) else: # Split on non-numeric parts of the version string so that we # can detect prerelease sub_parts = re.split('([^\d]+)', part) if sub_parts[0]: try: seq_part = int(sub_parts[0]) except ValueError: seq_part = sub_parts[0] sequence.append(seq_part) if len(sub_parts) > 1: stage = sub_parts[1] try: pr_ver = int(sub_parts[2]) except ValueError: raise ArcanaVersionNotDetectableError( "Could not parse version string {} as {}. " "Did not recognise pre-release version {}" .format(version, type(self).__name__, sub_parts[2])) stage = stage.strip('-_').lower() if not stage: # No prerelease info, assume a dev version assert dev is None dev = pr_ver continue if 'alpha'.startswith(stage): stage = 'a' elif 'beta'.startswith(stage): stage = 'b' elif stage == 'rc' or stage == 'release-canditate': stage = 'rc' else: raise ArcanaVersionNotDetectableError( "Could not parse version string {} as {}. " "Did not recognise pre-release stage {}" .format(version, type(self).__name__, stage)) prerelease = (stage, pr_ver) return tuple(sequence), prerelease, post, dev
python
def parse(self, version): """ Splits a typical version string (e.g. <MAJOR>.<MINOR>.<MICRO>) into a tuple that can be sorted properly. Ignores all leading and trailing characters by using a regex search (instead of match) so as to pick the version string out of a block of text. Parameters ---------- version : str | int | float | tuple(int) The string containing the version numbers, or alternatively an integer, float (number after decimal is interpreted as minor ver), or tuple|list containing the version sequence. Returns ------- sequence : tuple(int | str) A tuple containing the main sequence of the version, e.g. <MAJOR>.<MINOR>.<MICRO> prerelease : 2-tuple(str, int) | None A 2-tuple containing the type of prerelease ('a' - alpha, 'b' - beta, or 'rc' - release-canditate) and the number of the prerelease post : int |None The number of the post version dev : int | None The number of the development version """ # Check to see if version is not a string but rather another type # that can be interpreted as a version if isinstance(version, int): return (version,), None, None, None elif isinstance(version, (tuple, list)): return tuple(int(i) for i in version), None, None, None elif isinstance(version, float): major = math.floor(version) minor = version - major return (major, minor), None, None, None match = self.regex.search(version) if match is None: raise ArcanaVersionNotDetectableError( "Could not parse version string {} as {}. Regex ({}) did not " "match any sub-string".format(version, type(self).__name__, self.regex.pattern)) sequence = [] prerelease = None dev = None post = None for part in match.group(1).split(self.delimeter): if part.startswith('dev'): dev = int(part[len('dev'):]) elif part.startswith('post'): post = int(part[len('post'):]) else: # Split on non-numeric parts of the version string so that we # can detect prerelease sub_parts = re.split('([^\d]+)', part) if sub_parts[0]: try: seq_part = int(sub_parts[0]) except ValueError: seq_part = sub_parts[0] sequence.append(seq_part) if len(sub_parts) > 1: stage = sub_parts[1] try: pr_ver = int(sub_parts[2]) except ValueError: raise ArcanaVersionNotDetectableError( "Could not parse version string {} as {}. " "Did not recognise pre-release version {}" .format(version, type(self).__name__, sub_parts[2])) stage = stage.strip('-_').lower() if not stage: # No prerelease info, assume a dev version assert dev is None dev = pr_ver continue if 'alpha'.startswith(stage): stage = 'a' elif 'beta'.startswith(stage): stage = 'b' elif stage == 'rc' or stage == 'release-canditate': stage = 'rc' else: raise ArcanaVersionNotDetectableError( "Could not parse version string {} as {}. " "Did not recognise pre-release stage {}" .format(version, type(self).__name__, stage)) prerelease = (stage, pr_ver) return tuple(sequence), prerelease, post, dev
[ "def", "parse", "(", "self", ",", "version", ")", ":", "# Check to see if version is not a string but rather another type", "# that can be interpreted as a version", "if", "isinstance", "(", "version", ",", "int", ")", ":", "return", "(", "version", ",", ")", ",", "None", ",", "None", ",", "None", "elif", "isinstance", "(", "version", ",", "(", "tuple", ",", "list", ")", ")", ":", "return", "tuple", "(", "int", "(", "i", ")", "for", "i", "in", "version", ")", ",", "None", ",", "None", ",", "None", "elif", "isinstance", "(", "version", ",", "float", ")", ":", "major", "=", "math", ".", "floor", "(", "version", ")", "minor", "=", "version", "-", "major", "return", "(", "major", ",", "minor", ")", ",", "None", ",", "None", ",", "None", "match", "=", "self", ".", "regex", ".", "search", "(", "version", ")", "if", "match", "is", "None", ":", "raise", "ArcanaVersionNotDetectableError", "(", "\"Could not parse version string {} as {}. Regex ({}) did not \"", "\"match any sub-string\"", ".", "format", "(", "version", ",", "type", "(", "self", ")", ".", "__name__", ",", "self", ".", "regex", ".", "pattern", ")", ")", "sequence", "=", "[", "]", "prerelease", "=", "None", "dev", "=", "None", "post", "=", "None", "for", "part", "in", "match", ".", "group", "(", "1", ")", ".", "split", "(", "self", ".", "delimeter", ")", ":", "if", "part", ".", "startswith", "(", "'dev'", ")", ":", "dev", "=", "int", "(", "part", "[", "len", "(", "'dev'", ")", ":", "]", ")", "elif", "part", ".", "startswith", "(", "'post'", ")", ":", "post", "=", "int", "(", "part", "[", "len", "(", "'post'", ")", ":", "]", ")", "else", ":", "# Split on non-numeric parts of the version string so that we", "# can detect prerelease", "sub_parts", "=", "re", ".", "split", "(", "'([^\\d]+)'", ",", "part", ")", "if", "sub_parts", "[", "0", "]", ":", "try", ":", "seq_part", "=", "int", "(", "sub_parts", "[", "0", "]", ")", "except", "ValueError", ":", "seq_part", "=", "sub_parts", "[", "0", "]", "sequence", ".", "append", "(", "seq_part", ")", "if", "len", "(", "sub_parts", ")", ">", "1", ":", "stage", "=", "sub_parts", "[", "1", "]", "try", ":", "pr_ver", "=", "int", "(", "sub_parts", "[", "2", "]", ")", "except", "ValueError", ":", "raise", "ArcanaVersionNotDetectableError", "(", "\"Could not parse version string {} as {}. \"", "\"Did not recognise pre-release version {}\"", ".", "format", "(", "version", ",", "type", "(", "self", ")", ".", "__name__", ",", "sub_parts", "[", "2", "]", ")", ")", "stage", "=", "stage", ".", "strip", "(", "'-_'", ")", ".", "lower", "(", ")", "if", "not", "stage", ":", "# No prerelease info, assume a dev version", "assert", "dev", "is", "None", "dev", "=", "pr_ver", "continue", "if", "'alpha'", ".", "startswith", "(", "stage", ")", ":", "stage", "=", "'a'", "elif", "'beta'", ".", "startswith", "(", "stage", ")", ":", "stage", "=", "'b'", "elif", "stage", "==", "'rc'", "or", "stage", "==", "'release-canditate'", ":", "stage", "=", "'rc'", "else", ":", "raise", "ArcanaVersionNotDetectableError", "(", "\"Could not parse version string {} as {}. \"", "\"Did not recognise pre-release stage {}\"", ".", "format", "(", "version", ",", "type", "(", "self", ")", ".", "__name__", ",", "stage", ")", ")", "prerelease", "=", "(", "stage", ",", "pr_ver", ")", "return", "tuple", "(", "sequence", ")", ",", "prerelease", ",", "post", ",", "dev" ]
Splits a typical version string (e.g. <MAJOR>.<MINOR>.<MICRO>) into a tuple that can be sorted properly. Ignores all leading and trailing characters by using a regex search (instead of match) so as to pick the version string out of a block of text. Parameters ---------- version : str | int | float | tuple(int) The string containing the version numbers, or alternatively an integer, float (number after decimal is interpreted as minor ver), or tuple|list containing the version sequence. Returns ------- sequence : tuple(int | str) A tuple containing the main sequence of the version, e.g. <MAJOR>.<MINOR>.<MICRO> prerelease : 2-tuple(str, int) | None A 2-tuple containing the type of prerelease ('a' - alpha, 'b' - beta, or 'rc' - release-canditate) and the number of the prerelease post : int |None The number of the post version dev : int | None The number of the development version
[ "Splits", "a", "typical", "version", "string", "(", "e", ".", "g", ".", "<MAJOR", ">", ".", "<MINOR", ">", ".", "<MICRO", ">", ")", "into", "a", "tuple", "that", "can", "be", "sorted", "properly", ".", "Ignores", "all", "leading", "and", "trailing", "characters", "by", "using", "a", "regex", "search", "(", "instead", "of", "match", ")", "so", "as", "to", "pick", "the", "version", "string", "out", "of", "a", "block", "of", "text", "." ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/environment/requirement/base.py#L175-L265
MonashBI/arcana
arcana/environment/requirement/base.py
Version.within
def within(self, version): """ A single version can also be interpreted as an open range (i.e. no maximum version) """ if not isinstance(version, Version): version = type(self._min_ver)(self._req, version) return version >= self
python
def within(self, version): """ A single version can also be interpreted as an open range (i.e. no maximum version) """ if not isinstance(version, Version): version = type(self._min_ver)(self._req, version) return version >= self
[ "def", "within", "(", "self", ",", "version", ")", ":", "if", "not", "isinstance", "(", "version", ",", "Version", ")", ":", "version", "=", "type", "(", "self", ".", "_min_ver", ")", "(", "self", ".", "_req", ",", "version", ")", "return", "version", ">=", "self" ]
A single version can also be interpreted as an open range (i.e. no maximum version)
[ "A", "single", "version", "can", "also", "be", "interpreted", "as", "an", "open", "range", "(", "i", ".", "e", ".", "no", "maximum", "version", ")" ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/environment/requirement/base.py#L267-L274
MonashBI/arcana
arcana/environment/requirement/base.py
BaseRequirement.v
def v(self, version, max_version=None, **kwargs): """ Returns either a single requirement version or a requirement version range depending on whether two arguments are supplied or one Parameters ---------- version : str | Version Either a version of the requirement, or the first version in a range of acceptable versions """ if not isinstance(version, Version): version = self.version_cls(self, version, **kwargs) # Return a version range instead of version if max_version is not None: if not isinstance(max_version, Version): max_version = self.version_cls(self, max_version, **kwargs) version = VersionRange(version, max_version) return version
python
def v(self, version, max_version=None, **kwargs): """ Returns either a single requirement version or a requirement version range depending on whether two arguments are supplied or one Parameters ---------- version : str | Version Either a version of the requirement, or the first version in a range of acceptable versions """ if not isinstance(version, Version): version = self.version_cls(self, version, **kwargs) # Return a version range instead of version if max_version is not None: if not isinstance(max_version, Version): max_version = self.version_cls(self, max_version, **kwargs) version = VersionRange(version, max_version) return version
[ "def", "v", "(", "self", ",", "version", ",", "max_version", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "not", "isinstance", "(", "version", ",", "Version", ")", ":", "version", "=", "self", ".", "version_cls", "(", "self", ",", "version", ",", "*", "*", "kwargs", ")", "# Return a version range instead of version", "if", "max_version", "is", "not", "None", ":", "if", "not", "isinstance", "(", "max_version", ",", "Version", ")", ":", "max_version", "=", "self", ".", "version_cls", "(", "self", ",", "max_version", ",", "*", "*", "kwargs", ")", "version", "=", "VersionRange", "(", "version", ",", "max_version", ")", "return", "version" ]
Returns either a single requirement version or a requirement version range depending on whether two arguments are supplied or one Parameters ---------- version : str | Version Either a version of the requirement, or the first version in a range of acceptable versions
[ "Returns", "either", "a", "single", "requirement", "version", "or", "a", "requirement", "version", "range", "depending", "on", "whether", "two", "arguments", "are", "supplied", "or", "one" ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/environment/requirement/base.py#L393-L411
vedvyas/doxytag2zealdb
doxytag2zealdb/zealdb.py
ZealDB.open
def open(self): '''Open a connection to the database. If a connection appears to be open already, transactions are committed and it is closed before proceeding. After establishing the connection, the searchIndex table is prepared (and dropped if it already exists). ''' if self.conn is not None: self.close() self.conn = sqlite3.connect(self.filename) self.cursor = self.conn.cursor() c = self.cursor c.execute('SELECT name FROM sqlite_master WHERE type="table"') if (u'searchIndex',) in c: c.execute('DROP TABLE searchIndex') if self.verbose: print('Dropped existing table', file=sys.stderr) c.executescript( ''' CREATE TABLE searchIndex (id INTEGER PRIMARY KEY, name TEXT, type TEXT, path TEXT); CREATE UNIQUE INDEX anchor ON searchIndex (name, type, path); ''' )
python
def open(self): '''Open a connection to the database. If a connection appears to be open already, transactions are committed and it is closed before proceeding. After establishing the connection, the searchIndex table is prepared (and dropped if it already exists). ''' if self.conn is not None: self.close() self.conn = sqlite3.connect(self.filename) self.cursor = self.conn.cursor() c = self.cursor c.execute('SELECT name FROM sqlite_master WHERE type="table"') if (u'searchIndex',) in c: c.execute('DROP TABLE searchIndex') if self.verbose: print('Dropped existing table', file=sys.stderr) c.executescript( ''' CREATE TABLE searchIndex (id INTEGER PRIMARY KEY, name TEXT, type TEXT, path TEXT); CREATE UNIQUE INDEX anchor ON searchIndex (name, type, path); ''' )
[ "def", "open", "(", "self", ")", ":", "if", "self", ".", "conn", "is", "not", "None", ":", "self", ".", "close", "(", ")", "self", ".", "conn", "=", "sqlite3", ".", "connect", "(", "self", ".", "filename", ")", "self", ".", "cursor", "=", "self", ".", "conn", ".", "cursor", "(", ")", "c", "=", "self", ".", "cursor", "c", ".", "execute", "(", "'SELECT name FROM sqlite_master WHERE type=\"table\"'", ")", "if", "(", "u'searchIndex'", ",", ")", "in", "c", ":", "c", ".", "execute", "(", "'DROP TABLE searchIndex'", ")", "if", "self", ".", "verbose", ":", "print", "(", "'Dropped existing table'", ",", "file", "=", "sys", ".", "stderr", ")", "c", ".", "executescript", "(", "'''\n CREATE TABLE searchIndex\n (id INTEGER PRIMARY KEY, name TEXT, type TEXT, path TEXT);\n\n CREATE UNIQUE INDEX anchor ON searchIndex (name, type, path);\n '''", ")" ]
Open a connection to the database. If a connection appears to be open already, transactions are committed and it is closed before proceeding. After establishing the connection, the searchIndex table is prepared (and dropped if it already exists).
[ "Open", "a", "connection", "to", "the", "database", "." ]
train
https://github.com/vedvyas/doxytag2zealdb/blob/8b07a88af6794248f8cfdabb0fda9dd61c777127/doxytag2zealdb/zealdb.py#L77-L105
vedvyas/doxytag2zealdb
doxytag2zealdb/zealdb.py
ZealDB.insert
def insert(self, name, entry_type, filename): '''Insert an entry into the Zeal database. Args: name: A string representing the name of the entry. entry_type: A string representing the entry type. filename: A string representing the filename of the documentation for the entry. Raises: RuntimeError: a database connection was not established before calling insert() ''' if self.cursor is None: raise RuntimeError( 'Open DB connection before attempting to call insert!') db_entry = (name, entry_type, filename) if self.verbose: print('Inserting %s "%s" -> %s' % db_entry, file=sys.stderr) self.cursor.execute( '''INSERT OR IGNORE INTO searchIndex(name, type, path) VALUES (?, ?, ?)''', db_entry)
python
def insert(self, name, entry_type, filename): '''Insert an entry into the Zeal database. Args: name: A string representing the name of the entry. entry_type: A string representing the entry type. filename: A string representing the filename of the documentation for the entry. Raises: RuntimeError: a database connection was not established before calling insert() ''' if self.cursor is None: raise RuntimeError( 'Open DB connection before attempting to call insert!') db_entry = (name, entry_type, filename) if self.verbose: print('Inserting %s "%s" -> %s' % db_entry, file=sys.stderr) self.cursor.execute( '''INSERT OR IGNORE INTO searchIndex(name, type, path) VALUES (?, ?, ?)''', db_entry)
[ "def", "insert", "(", "self", ",", "name", ",", "entry_type", ",", "filename", ")", ":", "if", "self", ".", "cursor", "is", "None", ":", "raise", "RuntimeError", "(", "'Open DB connection before attempting to call insert!'", ")", "db_entry", "=", "(", "name", ",", "entry_type", ",", "filename", ")", "if", "self", ".", "verbose", ":", "print", "(", "'Inserting %s \"%s\" -> %s'", "%", "db_entry", ",", "file", "=", "sys", ".", "stderr", ")", "self", ".", "cursor", ".", "execute", "(", "'''INSERT OR IGNORE INTO searchIndex(name, type, path)\n VALUES (?, ?, ?)'''", ",", "db_entry", ")" ]
Insert an entry into the Zeal database. Args: name: A string representing the name of the entry. entry_type: A string representing the entry type. filename: A string representing the filename of the documentation for the entry. Raises: RuntimeError: a database connection was not established before calling insert()
[ "Insert", "an", "entry", "into", "the", "Zeal", "database", "." ]
train
https://github.com/vedvyas/doxytag2zealdb/blob/8b07a88af6794248f8cfdabb0fda9dd61c777127/doxytag2zealdb/zealdb.py#L131-L155
erikvw/django-collect-offline
django_collect_offline/transaction/serialize.py
serialize
def serialize(objects=None): """A simple wrapper of Django's serializer with defaults for JSON and natural keys. Note: use_natural_primary_keys is False as once a pk is set, it should not be changed throughout the distributed data. """ return serializers.serialize( "json", objects, ensure_ascii=True, use_natural_foreign_keys=True, use_natural_primary_keys=False, )
python
def serialize(objects=None): """A simple wrapper of Django's serializer with defaults for JSON and natural keys. Note: use_natural_primary_keys is False as once a pk is set, it should not be changed throughout the distributed data. """ return serializers.serialize( "json", objects, ensure_ascii=True, use_natural_foreign_keys=True, use_natural_primary_keys=False, )
[ "def", "serialize", "(", "objects", "=", "None", ")", ":", "return", "serializers", ".", "serialize", "(", "\"json\"", ",", "objects", ",", "ensure_ascii", "=", "True", ",", "use_natural_foreign_keys", "=", "True", ",", "use_natural_primary_keys", "=", "False", ",", ")" ]
A simple wrapper of Django's serializer with defaults for JSON and natural keys. Note: use_natural_primary_keys is False as once a pk is set, it should not be changed throughout the distributed data.
[ "A", "simple", "wrapper", "of", "Django", "s", "serializer", "with", "defaults", "for", "JSON", "and", "natural", "keys", "." ]
train
https://github.com/erikvw/django-collect-offline/blob/3d5efd66c68e2db4b060a82b070ae490dc399ca7/django_collect_offline/transaction/serialize.py#L4-L19
MonashBI/arcana
arcana/study/base.py
Study.data
def data(self, name, subject_ids=None, visit_ids=None, session_ids=None, **kwargs): """ Returns the Fileset(s) or Field(s) associated with the provided spec name(s), generating derived filesets as required. Multiple names in a list can be provided, to allow their workflows to be combined into a single workflow. Parameters ---------- name : str | List[str] The name of the FilesetSpec|FieldSpec to retried the filesets for subject_id : str | None The subject ID of the data to return. If provided (including None values) the data will be return as a single item instead of a collection visit_id : str | None The visit ID of the data to return. If provided (including None values) the data will be return as a single item instead of a c ollection subject_ids : list[str] The subject IDs to include in the returned collection visit_ids : list[str] The visit IDs to include in the returned collection session_ids : list[str] The session IDs (i.e. 2-tuples of the form (<subject-id>, <visit-id>) to include in the returned collection Returns ------- data : BaseItem | BaseCollection | list[BaseItem | BaseCollection] If 'subject_id' or 'visit_id' is provided then the data returned is a single Fileset or Field. Otherwise a collection of Filesets or Fields are returned. If muliple spec names are provided then a list of items or collections corresponding to each spec name. """ if isinstance(name, basestring): single_name = True names = [name] else: names = name single_name = False single_item = 'subject_id' in kwargs or 'visit_id' in kwargs filter_items = (subject_ids, visit_ids, session_ids) != (None, None, None) specs = [self.spec(n) for n in names] if single_item: if filter_items: raise ArcanaUsageError( "Cannot provide 'subject_id' and/or 'visit_id' in " "combination with 'subject_ids', 'visit_ids' or " "'session_ids'") subject_id = kwargs.pop('subject_id', None) visit_id = kwargs.pop('visit_id', None) iterators = set(chain(self.FREQUENCIES[s.frequency] for s in specs)) if subject_id is not None and visit_id is not None: session_ids = [(subject_id, visit_id)] elif subject_id is not None: if self.VISIT_ID in iterators: raise ArcanaUsageError( "Non-None values for visit IDs need to be " "provided to select a single item for each of '{}'" .format("', '".join(names))) subject_ids = [subject_id] elif visit_id is not None: if self.SUBJECT_ID in iterators: raise ArcanaUsageError( "Non-None values for subject IDs need to be " "provided to select a single item for each of '{}'" .format("', '".join(names))) visit_ids = [visit_id] elif iterators: raise ArcanaUsageError( "Non-None values for subject and/or visit IDs need to be " "provided to select a single item for each of '{}'" .format("', '".join(names))) # Work out which pipelines need to be run pipeline_getters = defaultdict(set) for spec in specs: if spec.derived or spec.derivable: # Filter out Study inputs # Add name of spec to set of required outputs pipeline_getters[spec.pipeline_getter].add(spec.name) # Run required pipelines if pipeline_getters: kwargs = copy(kwargs) kwargs.update({'subject_ids': subject_ids, 'visit_ids': visit_ids, 'session_ids': session_ids}) pipelines, required_outputs = zip(*( (self.pipeline(k), v) for k, v in pipeline_getters.items())) kwargs['required_outputs'] = required_outputs self.processor.run(*pipelines, **kwargs) # Find and return Item/Collection corresponding to requested spec # names all_data = [] for name in names: spec = self.bound_spec(name) data = spec.collection if single_item: data = data.item(subject_id=subject_id, visit_id=visit_id) elif filter_items and spec.frequency != 'per_study': if subject_ids is None: subject_ids = [] if visit_ids is None: visit_ids = [] if session_ids is None: session_ids = [] if spec.frequency == 'per_session': data = [d for d in data if (d.subject_id in subject_ids or d.visit_id in visit_ids or d.session_id in session_ids)] elif spec.frequency == 'per_subject': data = [d for d in data if (d.subject_id in subject_ids or d.subject_id in [s[0] for s in session_ids])] elif spec.frequency == 'per_visit': data = [d for d in data if (d.visit_id in visit_ids or d.visit_id in [s[1] for s in session_ids])] if not data: raise ArcanaUsageError( "No matching data found (subject_ids={}, visit_ids={} " ", session_ids={})" .format(subject_ids, visit_ids, session_ids)) data = spec.CollectionClass(spec.name, data) if single_name: return data else: all_data.append(data) return all_data
python
def data(self, name, subject_ids=None, visit_ids=None, session_ids=None, **kwargs): """ Returns the Fileset(s) or Field(s) associated with the provided spec name(s), generating derived filesets as required. Multiple names in a list can be provided, to allow their workflows to be combined into a single workflow. Parameters ---------- name : str | List[str] The name of the FilesetSpec|FieldSpec to retried the filesets for subject_id : str | None The subject ID of the data to return. If provided (including None values) the data will be return as a single item instead of a collection visit_id : str | None The visit ID of the data to return. If provided (including None values) the data will be return as a single item instead of a c ollection subject_ids : list[str] The subject IDs to include in the returned collection visit_ids : list[str] The visit IDs to include in the returned collection session_ids : list[str] The session IDs (i.e. 2-tuples of the form (<subject-id>, <visit-id>) to include in the returned collection Returns ------- data : BaseItem | BaseCollection | list[BaseItem | BaseCollection] If 'subject_id' or 'visit_id' is provided then the data returned is a single Fileset or Field. Otherwise a collection of Filesets or Fields are returned. If muliple spec names are provided then a list of items or collections corresponding to each spec name. """ if isinstance(name, basestring): single_name = True names = [name] else: names = name single_name = False single_item = 'subject_id' in kwargs or 'visit_id' in kwargs filter_items = (subject_ids, visit_ids, session_ids) != (None, None, None) specs = [self.spec(n) for n in names] if single_item: if filter_items: raise ArcanaUsageError( "Cannot provide 'subject_id' and/or 'visit_id' in " "combination with 'subject_ids', 'visit_ids' or " "'session_ids'") subject_id = kwargs.pop('subject_id', None) visit_id = kwargs.pop('visit_id', None) iterators = set(chain(self.FREQUENCIES[s.frequency] for s in specs)) if subject_id is not None and visit_id is not None: session_ids = [(subject_id, visit_id)] elif subject_id is not None: if self.VISIT_ID in iterators: raise ArcanaUsageError( "Non-None values for visit IDs need to be " "provided to select a single item for each of '{}'" .format("', '".join(names))) subject_ids = [subject_id] elif visit_id is not None: if self.SUBJECT_ID in iterators: raise ArcanaUsageError( "Non-None values for subject IDs need to be " "provided to select a single item for each of '{}'" .format("', '".join(names))) visit_ids = [visit_id] elif iterators: raise ArcanaUsageError( "Non-None values for subject and/or visit IDs need to be " "provided to select a single item for each of '{}'" .format("', '".join(names))) # Work out which pipelines need to be run pipeline_getters = defaultdict(set) for spec in specs: if spec.derived or spec.derivable: # Filter out Study inputs # Add name of spec to set of required outputs pipeline_getters[spec.pipeline_getter].add(spec.name) # Run required pipelines if pipeline_getters: kwargs = copy(kwargs) kwargs.update({'subject_ids': subject_ids, 'visit_ids': visit_ids, 'session_ids': session_ids}) pipelines, required_outputs = zip(*( (self.pipeline(k), v) for k, v in pipeline_getters.items())) kwargs['required_outputs'] = required_outputs self.processor.run(*pipelines, **kwargs) # Find and return Item/Collection corresponding to requested spec # names all_data = [] for name in names: spec = self.bound_spec(name) data = spec.collection if single_item: data = data.item(subject_id=subject_id, visit_id=visit_id) elif filter_items and spec.frequency != 'per_study': if subject_ids is None: subject_ids = [] if visit_ids is None: visit_ids = [] if session_ids is None: session_ids = [] if spec.frequency == 'per_session': data = [d for d in data if (d.subject_id in subject_ids or d.visit_id in visit_ids or d.session_id in session_ids)] elif spec.frequency == 'per_subject': data = [d for d in data if (d.subject_id in subject_ids or d.subject_id in [s[0] for s in session_ids])] elif spec.frequency == 'per_visit': data = [d for d in data if (d.visit_id in visit_ids or d.visit_id in [s[1] for s in session_ids])] if not data: raise ArcanaUsageError( "No matching data found (subject_ids={}, visit_ids={} " ", session_ids={})" .format(subject_ids, visit_ids, session_ids)) data = spec.CollectionClass(spec.name, data) if single_name: return data else: all_data.append(data) return all_data
[ "def", "data", "(", "self", ",", "name", ",", "subject_ids", "=", "None", ",", "visit_ids", "=", "None", ",", "session_ids", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "name", ",", "basestring", ")", ":", "single_name", "=", "True", "names", "=", "[", "name", "]", "else", ":", "names", "=", "name", "single_name", "=", "False", "single_item", "=", "'subject_id'", "in", "kwargs", "or", "'visit_id'", "in", "kwargs", "filter_items", "=", "(", "subject_ids", ",", "visit_ids", ",", "session_ids", ")", "!=", "(", "None", ",", "None", ",", "None", ")", "specs", "=", "[", "self", ".", "spec", "(", "n", ")", "for", "n", "in", "names", "]", "if", "single_item", ":", "if", "filter_items", ":", "raise", "ArcanaUsageError", "(", "\"Cannot provide 'subject_id' and/or 'visit_id' in \"", "\"combination with 'subject_ids', 'visit_ids' or \"", "\"'session_ids'\"", ")", "subject_id", "=", "kwargs", ".", "pop", "(", "'subject_id'", ",", "None", ")", "visit_id", "=", "kwargs", ".", "pop", "(", "'visit_id'", ",", "None", ")", "iterators", "=", "set", "(", "chain", "(", "self", ".", "FREQUENCIES", "[", "s", ".", "frequency", "]", "for", "s", "in", "specs", ")", ")", "if", "subject_id", "is", "not", "None", "and", "visit_id", "is", "not", "None", ":", "session_ids", "=", "[", "(", "subject_id", ",", "visit_id", ")", "]", "elif", "subject_id", "is", "not", "None", ":", "if", "self", ".", "VISIT_ID", "in", "iterators", ":", "raise", "ArcanaUsageError", "(", "\"Non-None values for visit IDs need to be \"", "\"provided to select a single item for each of '{}'\"", ".", "format", "(", "\"', '\"", ".", "join", "(", "names", ")", ")", ")", "subject_ids", "=", "[", "subject_id", "]", "elif", "visit_id", "is", "not", "None", ":", "if", "self", ".", "SUBJECT_ID", "in", "iterators", ":", "raise", "ArcanaUsageError", "(", "\"Non-None values for subject IDs need to be \"", "\"provided to select a single item for each of '{}'\"", ".", "format", "(", "\"', '\"", ".", "join", "(", "names", ")", ")", ")", "visit_ids", "=", "[", "visit_id", "]", "elif", "iterators", ":", "raise", "ArcanaUsageError", "(", "\"Non-None values for subject and/or visit IDs need to be \"", "\"provided to select a single item for each of '{}'\"", ".", "format", "(", "\"', '\"", ".", "join", "(", "names", ")", ")", ")", "# Work out which pipelines need to be run", "pipeline_getters", "=", "defaultdict", "(", "set", ")", "for", "spec", "in", "specs", ":", "if", "spec", ".", "derived", "or", "spec", ".", "derivable", ":", "# Filter out Study inputs", "# Add name of spec to set of required outputs", "pipeline_getters", "[", "spec", ".", "pipeline_getter", "]", ".", "add", "(", "spec", ".", "name", ")", "# Run required pipelines", "if", "pipeline_getters", ":", "kwargs", "=", "copy", "(", "kwargs", ")", "kwargs", ".", "update", "(", "{", "'subject_ids'", ":", "subject_ids", ",", "'visit_ids'", ":", "visit_ids", ",", "'session_ids'", ":", "session_ids", "}", ")", "pipelines", ",", "required_outputs", "=", "zip", "(", "*", "(", "(", "self", ".", "pipeline", "(", "k", ")", ",", "v", ")", "for", "k", ",", "v", "in", "pipeline_getters", ".", "items", "(", ")", ")", ")", "kwargs", "[", "'required_outputs'", "]", "=", "required_outputs", "self", ".", "processor", ".", "run", "(", "*", "pipelines", ",", "*", "*", "kwargs", ")", "# Find and return Item/Collection corresponding to requested spec", "# names", "all_data", "=", "[", "]", "for", "name", "in", "names", ":", "spec", "=", "self", ".", "bound_spec", "(", "name", ")", "data", "=", "spec", ".", "collection", "if", "single_item", ":", "data", "=", "data", ".", "item", "(", "subject_id", "=", "subject_id", ",", "visit_id", "=", "visit_id", ")", "elif", "filter_items", "and", "spec", ".", "frequency", "!=", "'per_study'", ":", "if", "subject_ids", "is", "None", ":", "subject_ids", "=", "[", "]", "if", "visit_ids", "is", "None", ":", "visit_ids", "=", "[", "]", "if", "session_ids", "is", "None", ":", "session_ids", "=", "[", "]", "if", "spec", ".", "frequency", "==", "'per_session'", ":", "data", "=", "[", "d", "for", "d", "in", "data", "if", "(", "d", ".", "subject_id", "in", "subject_ids", "or", "d", ".", "visit_id", "in", "visit_ids", "or", "d", ".", "session_id", "in", "session_ids", ")", "]", "elif", "spec", ".", "frequency", "==", "'per_subject'", ":", "data", "=", "[", "d", "for", "d", "in", "data", "if", "(", "d", ".", "subject_id", "in", "subject_ids", "or", "d", ".", "subject_id", "in", "[", "s", "[", "0", "]", "for", "s", "in", "session_ids", "]", ")", "]", "elif", "spec", ".", "frequency", "==", "'per_visit'", ":", "data", "=", "[", "d", "for", "d", "in", "data", "if", "(", "d", ".", "visit_id", "in", "visit_ids", "or", "d", ".", "visit_id", "in", "[", "s", "[", "1", "]", "for", "s", "in", "session_ids", "]", ")", "]", "if", "not", "data", ":", "raise", "ArcanaUsageError", "(", "\"No matching data found (subject_ids={}, visit_ids={} \"", "\", session_ids={})\"", ".", "format", "(", "subject_ids", ",", "visit_ids", ",", "session_ids", ")", ")", "data", "=", "spec", ".", "CollectionClass", "(", "spec", ".", "name", ",", "data", ")", "if", "single_name", ":", "return", "data", "else", ":", "all_data", ".", "append", "(", "data", ")", "return", "all_data" ]
Returns the Fileset(s) or Field(s) associated with the provided spec name(s), generating derived filesets as required. Multiple names in a list can be provided, to allow their workflows to be combined into a single workflow. Parameters ---------- name : str | List[str] The name of the FilesetSpec|FieldSpec to retried the filesets for subject_id : str | None The subject ID of the data to return. If provided (including None values) the data will be return as a single item instead of a collection visit_id : str | None The visit ID of the data to return. If provided (including None values) the data will be return as a single item instead of a c ollection subject_ids : list[str] The subject IDs to include in the returned collection visit_ids : list[str] The visit IDs to include in the returned collection session_ids : list[str] The session IDs (i.e. 2-tuples of the form (<subject-id>, <visit-id>) to include in the returned collection Returns ------- data : BaseItem | BaseCollection | list[BaseItem | BaseCollection] If 'subject_id' or 'visit_id' is provided then the data returned is a single Fileset or Field. Otherwise a collection of Filesets or Fields are returned. If muliple spec names are provided then a list of items or collections corresponding to each spec name.
[ "Returns", "the", "Fileset", "(", "s", ")", "or", "Field", "(", "s", ")", "associated", "with", "the", "provided", "spec", "name", "(", "s", ")", "generating", "derived", "filesets", "as", "required", ".", "Multiple", "names", "in", "a", "list", "can", "be", "provided", "to", "allow", "their", "workflows", "to", "be", "combined", "into", "a", "single", "workflow", "." ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/study/base.py#L244-L376
MonashBI/arcana
arcana/study/base.py
Study.branch
def branch(self, name, values=None): # @UnusedVariable @IgnorePep8 """ Checks whether the given switch matches the value provided Parameters ---------- name : str The name of the parameter to retrieve value : str | None The value(s) of the switch to match if a non-boolean switch """ if isinstance(values, basestring): values = [values] spec = self.parameter_spec(name) if not isinstance(spec, SwitchSpec): raise ArcanaUsageError( "{} is standard parameter not a switch".format(spec)) switch = self._get_parameter(name) if spec.is_boolean: if values is not None: raise ArcanaDesignError( "Should not provide values ({}) to boolean switch " "'{}' in {}".format( values, name, self._param_error_location)) in_branch = switch.value else: if values is None: raise ArcanaDesignError( "Value(s) need(s) to be provided non-boolean switch" " '{}' in {}".format( name, self._param_error_location)) # Register parameter as being used by the pipeline unrecognised_values = set(values) - set(spec.choices) if unrecognised_values: raise ArcanaDesignError( "Provided value(s) ('{}') for switch '{}' in {} " "is not a valid option ('{}')".format( "', '".join(unrecognised_values), name, self._param_error_location, "', '".join(spec.choices))) in_branch = switch.value in values return in_branch
python
def branch(self, name, values=None): # @UnusedVariable @IgnorePep8 """ Checks whether the given switch matches the value provided Parameters ---------- name : str The name of the parameter to retrieve value : str | None The value(s) of the switch to match if a non-boolean switch """ if isinstance(values, basestring): values = [values] spec = self.parameter_spec(name) if not isinstance(spec, SwitchSpec): raise ArcanaUsageError( "{} is standard parameter not a switch".format(spec)) switch = self._get_parameter(name) if spec.is_boolean: if values is not None: raise ArcanaDesignError( "Should not provide values ({}) to boolean switch " "'{}' in {}".format( values, name, self._param_error_location)) in_branch = switch.value else: if values is None: raise ArcanaDesignError( "Value(s) need(s) to be provided non-boolean switch" " '{}' in {}".format( name, self._param_error_location)) # Register parameter as being used by the pipeline unrecognised_values = set(values) - set(spec.choices) if unrecognised_values: raise ArcanaDesignError( "Provided value(s) ('{}') for switch '{}' in {} " "is not a valid option ('{}')".format( "', '".join(unrecognised_values), name, self._param_error_location, "', '".join(spec.choices))) in_branch = switch.value in values return in_branch
[ "def", "branch", "(", "self", ",", "name", ",", "values", "=", "None", ")", ":", "# @UnusedVariable @IgnorePep8", "if", "isinstance", "(", "values", ",", "basestring", ")", ":", "values", "=", "[", "values", "]", "spec", "=", "self", ".", "parameter_spec", "(", "name", ")", "if", "not", "isinstance", "(", "spec", ",", "SwitchSpec", ")", ":", "raise", "ArcanaUsageError", "(", "\"{} is standard parameter not a switch\"", ".", "format", "(", "spec", ")", ")", "switch", "=", "self", ".", "_get_parameter", "(", "name", ")", "if", "spec", ".", "is_boolean", ":", "if", "values", "is", "not", "None", ":", "raise", "ArcanaDesignError", "(", "\"Should not provide values ({}) to boolean switch \"", "\"'{}' in {}\"", ".", "format", "(", "values", ",", "name", ",", "self", ".", "_param_error_location", ")", ")", "in_branch", "=", "switch", ".", "value", "else", ":", "if", "values", "is", "None", ":", "raise", "ArcanaDesignError", "(", "\"Value(s) need(s) to be provided non-boolean switch\"", "\" '{}' in {}\"", ".", "format", "(", "name", ",", "self", ".", "_param_error_location", ")", ")", "# Register parameter as being used by the pipeline", "unrecognised_values", "=", "set", "(", "values", ")", "-", "set", "(", "spec", ".", "choices", ")", "if", "unrecognised_values", ":", "raise", "ArcanaDesignError", "(", "\"Provided value(s) ('{}') for switch '{}' in {} \"", "\"is not a valid option ('{}')\"", ".", "format", "(", "\"', '\"", ".", "join", "(", "unrecognised_values", ")", ",", "name", ",", "self", ".", "_param_error_location", ",", "\"', '\"", ".", "join", "(", "spec", ".", "choices", ")", ")", ")", "in_branch", "=", "switch", ".", "value", "in", "values", "return", "in_branch" ]
Checks whether the given switch matches the value provided Parameters ---------- name : str The name of the parameter to retrieve value : str | None The value(s) of the switch to match if a non-boolean switch
[ "Checks", "whether", "the", "given", "switch", "matches", "the", "value", "provided" ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/study/base.py#L593-L634
MonashBI/arcana
arcana/study/base.py
Study.unhandled_branch
def unhandled_branch(self, name): """ Convenient method for raising exception if a pipeline doesn't handle a particular switch value Parameters ---------- name : str Name of the switch value : str Value of the switch which hasn't been handled """ raise ArcanaDesignError( "'{}' value of '{}' switch in {} is not handled" .format(self._get_parameter(name), name, self._param_error_location))
python
def unhandled_branch(self, name): """ Convenient method for raising exception if a pipeline doesn't handle a particular switch value Parameters ---------- name : str Name of the switch value : str Value of the switch which hasn't been handled """ raise ArcanaDesignError( "'{}' value of '{}' switch in {} is not handled" .format(self._get_parameter(name), name, self._param_error_location))
[ "def", "unhandled_branch", "(", "self", ",", "name", ")", ":", "raise", "ArcanaDesignError", "(", "\"'{}' value of '{}' switch in {} is not handled\"", ".", "format", "(", "self", ".", "_get_parameter", "(", "name", ")", ",", "name", ",", "self", ".", "_param_error_location", ")", ")" ]
Convenient method for raising exception if a pipeline doesn't handle a particular switch value Parameters ---------- name : str Name of the switch value : str Value of the switch which hasn't been handled
[ "Convenient", "method", "for", "raising", "exception", "if", "a", "pipeline", "doesn", "t", "handle", "a", "particular", "switch", "value" ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/study/base.py#L636-L651
MonashBI/arcana
arcana/study/base.py
Study.save_workflow_graph_for
def save_workflow_graph_for(self, spec_name, fname, full=False, style='flat', **kwargs): """ Saves a graph of the workflow to generate the requested spec_name Parameters ---------- spec_name : str Name of the spec to generate the graph for fname : str The filename for the saved graph style : str The style of the graph, can be one of can be one of 'orig', 'flat', 'exec', 'hierarchical' """ pipeline = self.spec(spec_name).pipeline if full: workflow = pe.Workflow(name='{}_gen'.format(spec_name), base_dir=self.processor.work_dir) self.processor._connect_pipeline( pipeline, workflow, **kwargs) else: workflow = pipeline._workflow fname = op.expanduser(fname) if not fname.endswith('.png'): fname += '.png' dotfilename = fname[:-4] + '.dot' workflow.write_graph(graph2use=style, dotfilename=dotfilename)
python
def save_workflow_graph_for(self, spec_name, fname, full=False, style='flat', **kwargs): """ Saves a graph of the workflow to generate the requested spec_name Parameters ---------- spec_name : str Name of the spec to generate the graph for fname : str The filename for the saved graph style : str The style of the graph, can be one of can be one of 'orig', 'flat', 'exec', 'hierarchical' """ pipeline = self.spec(spec_name).pipeline if full: workflow = pe.Workflow(name='{}_gen'.format(spec_name), base_dir=self.processor.work_dir) self.processor._connect_pipeline( pipeline, workflow, **kwargs) else: workflow = pipeline._workflow fname = op.expanduser(fname) if not fname.endswith('.png'): fname += '.png' dotfilename = fname[:-4] + '.dot' workflow.write_graph(graph2use=style, dotfilename=dotfilename)
[ "def", "save_workflow_graph_for", "(", "self", ",", "spec_name", ",", "fname", ",", "full", "=", "False", ",", "style", "=", "'flat'", ",", "*", "*", "kwargs", ")", ":", "pipeline", "=", "self", ".", "spec", "(", "spec_name", ")", ".", "pipeline", "if", "full", ":", "workflow", "=", "pe", ".", "Workflow", "(", "name", "=", "'{}_gen'", ".", "format", "(", "spec_name", ")", ",", "base_dir", "=", "self", ".", "processor", ".", "work_dir", ")", "self", ".", "processor", ".", "_connect_pipeline", "(", "pipeline", ",", "workflow", ",", "*", "*", "kwargs", ")", "else", ":", "workflow", "=", "pipeline", ".", "_workflow", "fname", "=", "op", ".", "expanduser", "(", "fname", ")", "if", "not", "fname", ".", "endswith", "(", "'.png'", ")", ":", "fname", "+=", "'.png'", "dotfilename", "=", "fname", "[", ":", "-", "4", "]", "+", "'.dot'", "workflow", ".", "write_graph", "(", "graph2use", "=", "style", ",", "dotfilename", "=", "dotfilename", ")" ]
Saves a graph of the workflow to generate the requested spec_name Parameters ---------- spec_name : str Name of the spec to generate the graph for fname : str The filename for the saved graph style : str The style of the graph, can be one of can be one of 'orig', 'flat', 'exec', 'hierarchical'
[ "Saves", "a", "graph", "of", "the", "workflow", "to", "generate", "the", "requested", "spec_name" ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/study/base.py#L669-L697
MonashBI/arcana
arcana/study/base.py
Study.spec
def spec(self, name): """ Returns either the input corresponding to a fileset or field field spec or a spec or parameter that has either been passed to the study as an input or can be derived. Parameters ---------- name : Str | BaseData | Parameter A parameter, fileset or field or name of one """ # If the provided "name" is actually a data item or parameter then # replace it with its name. if isinstance(name, (BaseData, Parameter)): name = name.name # If name is a parameter than return the parameter spec if name in self._param_specs: return self._param_specs[name] else: return self.bound_spec(name)
python
def spec(self, name): """ Returns either the input corresponding to a fileset or field field spec or a spec or parameter that has either been passed to the study as an input or can be derived. Parameters ---------- name : Str | BaseData | Parameter A parameter, fileset or field or name of one """ # If the provided "name" is actually a data item or parameter then # replace it with its name. if isinstance(name, (BaseData, Parameter)): name = name.name # If name is a parameter than return the parameter spec if name in self._param_specs: return self._param_specs[name] else: return self.bound_spec(name)
[ "def", "spec", "(", "self", ",", "name", ")", ":", "# If the provided \"name\" is actually a data item or parameter then", "# replace it with its name.", "if", "isinstance", "(", "name", ",", "(", "BaseData", ",", "Parameter", ")", ")", ":", "name", "=", "name", ".", "name", "# If name is a parameter than return the parameter spec", "if", "name", "in", "self", ".", "_param_specs", ":", "return", "self", ".", "_param_specs", "[", "name", "]", "else", ":", "return", "self", ".", "bound_spec", "(", "name", ")" ]
Returns either the input corresponding to a fileset or field field spec or a spec or parameter that has either been passed to the study as an input or can be derived. Parameters ---------- name : Str | BaseData | Parameter A parameter, fileset or field or name of one
[ "Returns", "either", "the", "input", "corresponding", "to", "a", "fileset", "or", "field", "field", "spec", "or", "a", "spec", "or", "parameter", "that", "has", "either", "been", "passed", "to", "the", "study", "as", "an", "input", "or", "can", "be", "derived", "." ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/study/base.py#L699-L718
MonashBI/arcana
arcana/study/base.py
Study.bound_spec
def bound_spec(self, name): """ Returns an input selector or derived spec bound to the study, i.e. where the repository tree is checked for existing outputs Parameters ---------- name : Str A name of a fileset or field """ # If the provided "name" is actually a data item or parameter then # replace it with its name. if isinstance(name, BaseData): name = name.name # Get the spec from the class spec = self.data_spec(name) try: bound = self._inputs[name] except KeyError: if not spec.derived and spec.default is None: raise ArcanaMissingDataException( "Acquired (i.e. non-generated) fileset '{}' " "was not supplied when the study '{}' was " "initiated".format(name, self.name)) else: try: bound = self._bound_specs[name] except KeyError: bound = self._bound_specs[name] = spec.bind(self) return bound
python
def bound_spec(self, name): """ Returns an input selector or derived spec bound to the study, i.e. where the repository tree is checked for existing outputs Parameters ---------- name : Str A name of a fileset or field """ # If the provided "name" is actually a data item or parameter then # replace it with its name. if isinstance(name, BaseData): name = name.name # Get the spec from the class spec = self.data_spec(name) try: bound = self._inputs[name] except KeyError: if not spec.derived and spec.default is None: raise ArcanaMissingDataException( "Acquired (i.e. non-generated) fileset '{}' " "was not supplied when the study '{}' was " "initiated".format(name, self.name)) else: try: bound = self._bound_specs[name] except KeyError: bound = self._bound_specs[name] = spec.bind(self) return bound
[ "def", "bound_spec", "(", "self", ",", "name", ")", ":", "# If the provided \"name\" is actually a data item or parameter then", "# replace it with its name.", "if", "isinstance", "(", "name", ",", "BaseData", ")", ":", "name", "=", "name", ".", "name", "# Get the spec from the class", "spec", "=", "self", ".", "data_spec", "(", "name", ")", "try", ":", "bound", "=", "self", ".", "_inputs", "[", "name", "]", "except", "KeyError", ":", "if", "not", "spec", ".", "derived", "and", "spec", ".", "default", "is", "None", ":", "raise", "ArcanaMissingDataException", "(", "\"Acquired (i.e. non-generated) fileset '{}' \"", "\"was not supplied when the study '{}' was \"", "\"initiated\"", ".", "format", "(", "name", ",", "self", ".", "name", ")", ")", "else", ":", "try", ":", "bound", "=", "self", ".", "_bound_specs", "[", "name", "]", "except", "KeyError", ":", "bound", "=", "self", ".", "_bound_specs", "[", "name", "]", "=", "spec", ".", "bind", "(", "self", ")", "return", "bound" ]
Returns an input selector or derived spec bound to the study, i.e. where the repository tree is checked for existing outputs Parameters ---------- name : Str A name of a fileset or field
[ "Returns", "an", "input", "selector", "or", "derived", "spec", "bound", "to", "the", "study", "i", ".", "e", ".", "where", "the", "repository", "tree", "is", "checked", "for", "existing", "outputs" ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/study/base.py#L720-L749
MonashBI/arcana
arcana/study/base.py
Study.data_spec
def data_spec(cls, name): """ Return the fileset_spec, i.e. the template of the fileset expected to be supplied or generated corresponding to the fileset_spec name. Parameters ---------- name : Str Name of the fileset_spec to return """ # If the provided "name" is actually a data item or parameter then # replace it with its name. if isinstance(name, BaseData): name = name.name try: return cls._data_specs[name] except KeyError: raise ArcanaNameError( name, "No fileset spec named '{}' in {}, available:\n{}" .format(name, cls.__name__, "\n".join(list(cls._data_specs.keys()))))
python
def data_spec(cls, name): """ Return the fileset_spec, i.e. the template of the fileset expected to be supplied or generated corresponding to the fileset_spec name. Parameters ---------- name : Str Name of the fileset_spec to return """ # If the provided "name" is actually a data item or parameter then # replace it with its name. if isinstance(name, BaseData): name = name.name try: return cls._data_specs[name] except KeyError: raise ArcanaNameError( name, "No fileset spec named '{}' in {}, available:\n{}" .format(name, cls.__name__, "\n".join(list(cls._data_specs.keys()))))
[ "def", "data_spec", "(", "cls", ",", "name", ")", ":", "# If the provided \"name\" is actually a data item or parameter then", "# replace it with its name.", "if", "isinstance", "(", "name", ",", "BaseData", ")", ":", "name", "=", "name", ".", "name", "try", ":", "return", "cls", ".", "_data_specs", "[", "name", "]", "except", "KeyError", ":", "raise", "ArcanaNameError", "(", "name", ",", "\"No fileset spec named '{}' in {}, available:\\n{}\"", ".", "format", "(", "name", ",", "cls", ".", "__name__", ",", "\"\\n\"", ".", "join", "(", "list", "(", "cls", ".", "_data_specs", ".", "keys", "(", ")", ")", ")", ")", ")" ]
Return the fileset_spec, i.e. the template of the fileset expected to be supplied or generated corresponding to the fileset_spec name. Parameters ---------- name : Str Name of the fileset_spec to return
[ "Return", "the", "fileset_spec", "i", ".", "e", ".", "the", "template", "of", "the", "fileset", "expected", "to", "be", "supplied", "or", "generated", "corresponding", "to", "the", "fileset_spec", "name", "." ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/study/base.py#L752-L773
MonashBI/arcana
arcana/study/base.py
Study.cache_inputs
def cache_inputs(self): """ Runs the Study's repository source node for each of the inputs of the study, thereby caching any data required from remote repositorys. Useful when launching many parallel jobs that will all try to concurrently access the remote repository, and probably lead to timeout errors. """ workflow = pe.Workflow(name='cache_download', base_dir=self.processor.work_dir) subjects = pe.Node(IdentityInterface(['subject_id']), name='subjects', environment=self.environment) sessions = pe.Node(IdentityInterface(['subject_id', 'visit_id']), name='sessions', environment=self.environment) subjects.iterables = ('subject_id', tuple(self.subject_ids)) sessions.iterables = ('visit_id', tuple(self.visit_ids)) source = pe.Node(RepositorySource( self.bound_spec(i).collection for i in self.inputs), name='source') workflow.connect(subjects, 'subject_id', sessions, 'subject_id') workflow.connect(sessions, 'subject_id', source, 'subject_id') workflow.connect(sessions, 'visit_id', source, 'visit_id') workflow.run()
python
def cache_inputs(self): """ Runs the Study's repository source node for each of the inputs of the study, thereby caching any data required from remote repositorys. Useful when launching many parallel jobs that will all try to concurrently access the remote repository, and probably lead to timeout errors. """ workflow = pe.Workflow(name='cache_download', base_dir=self.processor.work_dir) subjects = pe.Node(IdentityInterface(['subject_id']), name='subjects', environment=self.environment) sessions = pe.Node(IdentityInterface(['subject_id', 'visit_id']), name='sessions', environment=self.environment) subjects.iterables = ('subject_id', tuple(self.subject_ids)) sessions.iterables = ('visit_id', tuple(self.visit_ids)) source = pe.Node(RepositorySource( self.bound_spec(i).collection for i in self.inputs), name='source') workflow.connect(subjects, 'subject_id', sessions, 'subject_id') workflow.connect(sessions, 'subject_id', source, 'subject_id') workflow.connect(sessions, 'visit_id', source, 'visit_id') workflow.run()
[ "def", "cache_inputs", "(", "self", ")", ":", "workflow", "=", "pe", ".", "Workflow", "(", "name", "=", "'cache_download'", ",", "base_dir", "=", "self", ".", "processor", ".", "work_dir", ")", "subjects", "=", "pe", ".", "Node", "(", "IdentityInterface", "(", "[", "'subject_id'", "]", ")", ",", "name", "=", "'subjects'", ",", "environment", "=", "self", ".", "environment", ")", "sessions", "=", "pe", ".", "Node", "(", "IdentityInterface", "(", "[", "'subject_id'", ",", "'visit_id'", "]", ")", ",", "name", "=", "'sessions'", ",", "environment", "=", "self", ".", "environment", ")", "subjects", ".", "iterables", "=", "(", "'subject_id'", ",", "tuple", "(", "self", ".", "subject_ids", ")", ")", "sessions", ".", "iterables", "=", "(", "'visit_id'", ",", "tuple", "(", "self", ".", "visit_ids", ")", ")", "source", "=", "pe", ".", "Node", "(", "RepositorySource", "(", "self", ".", "bound_spec", "(", "i", ")", ".", "collection", "for", "i", "in", "self", ".", "inputs", ")", ",", "name", "=", "'source'", ")", "workflow", ".", "connect", "(", "subjects", ",", "'subject_id'", ",", "sessions", ",", "'subject_id'", ")", "workflow", ".", "connect", "(", "sessions", ",", "'subject_id'", ",", "source", ",", "'subject_id'", ")", "workflow", ".", "connect", "(", "sessions", ",", "'visit_id'", ",", "source", ",", "'visit_id'", ")", "workflow", ".", "run", "(", ")" ]
Runs the Study's repository source node for each of the inputs of the study, thereby caching any data required from remote repositorys. Useful when launching many parallel jobs that will all try to concurrently access the remote repository, and probably lead to timeout errors.
[ "Runs", "the", "Study", "s", "repository", "source", "node", "for", "each", "of", "the", "inputs", "of", "the", "study", "thereby", "caching", "any", "data", "required", "from", "remote", "repositorys", ".", "Useful", "when", "launching", "many", "parallel", "jobs", "that", "will", "all", "try", "to", "concurrently", "access", "the", "remote", "repository", "and", "probably", "lead", "to", "timeout", "errors", "." ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/study/base.py#L837-L858
MonashBI/arcana
arcana/study/base.py
Study.provided
def provided(self, spec_name, default_okay=True): """ Checks to see whether the corresponding data spec was provided an explicit input, as opposed to derivatives or missing optional inputs Parameters ---------- spec_name : str Name of a data spec """ try: spec = self.bound_spec(spec_name) except ArcanaMissingDataException: return False if isinstance(spec, BaseInputSpec): return spec.default is not None and default_okay else: return True
python
def provided(self, spec_name, default_okay=True): """ Checks to see whether the corresponding data spec was provided an explicit input, as opposed to derivatives or missing optional inputs Parameters ---------- spec_name : str Name of a data spec """ try: spec = self.bound_spec(spec_name) except ArcanaMissingDataException: return False if isinstance(spec, BaseInputSpec): return spec.default is not None and default_okay else: return True
[ "def", "provided", "(", "self", ",", "spec_name", ",", "default_okay", "=", "True", ")", ":", "try", ":", "spec", "=", "self", ".", "bound_spec", "(", "spec_name", ")", "except", "ArcanaMissingDataException", ":", "return", "False", "if", "isinstance", "(", "spec", ",", "BaseInputSpec", ")", ":", "return", "spec", ".", "default", "is", "not", "None", "and", "default_okay", "else", ":", "return", "True" ]
Checks to see whether the corresponding data spec was provided an explicit input, as opposed to derivatives or missing optional inputs Parameters ---------- spec_name : str Name of a data spec
[ "Checks", "to", "see", "whether", "the", "corresponding", "data", "spec", "was", "provided", "an", "explicit", "input", "as", "opposed", "to", "derivatives", "or", "missing", "optional", "inputs" ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/study/base.py#L869-L886
MonashBI/arcana
arcana/study/base.py
Study.freq_from_iterators
def freq_from_iterators(cls, iterators): """ Returns the frequency corresponding to the given iterators """ return { set(it): f for f, it in cls.FREQUENCIES.items()}[set(iterators)]
python
def freq_from_iterators(cls, iterators): """ Returns the frequency corresponding to the given iterators """ return { set(it): f for f, it in cls.FREQUENCIES.items()}[set(iterators)]
[ "def", "freq_from_iterators", "(", "cls", ",", "iterators", ")", ":", "return", "{", "set", "(", "it", ")", ":", "f", "for", "f", ",", "it", "in", "cls", ".", "FREQUENCIES", ".", "items", "(", ")", "}", "[", "set", "(", "iterators", ")", "]" ]
Returns the frequency corresponding to the given iterators
[ "Returns", "the", "frequency", "corresponding", "to", "the", "given", "iterators" ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/study/base.py#L889-L894
MonashBI/arcana
arcana/study/base.py
Study.prov
def prov(self): """ Extracts provenance information from the study for storage alongside generated derivatives. Typically for reference purposes only as only the pipeline workflow, inputs and outputs are checked by default when determining which sessions require reprocessing. Returns ------- prov : dict[str, *] A dictionary containing the provenance information to record for the study """ # Get list of repositories where inputs to the study are stored input_repos = list(set((i.repository for i in self.inputs))) inputs = {} for input in self.inputs: # @ReservedAssignment inputs[input.name] = { 'repository_index': input_repos.index(input.repository)} if input.frequency == 'per_study': inputs[input.name]['names'] = next(input.collection).name elif input.frequency == 'per_subject': inputs[input.name]['names'] = {i.subject_id: i.name for i in input.collection} elif input.frequency == 'per_visit': inputs[input.name]['names'] = {i.visit_id: i.name for i in input.collection} elif input.frequency == 'per_session': names = defaultdict(dict) for item in input.collection: names[item.subject_id][item.visit_id] = item.name # Convert from defaultdict to dict inputs[input.name]['names'] = dict(names.items()) return { 'name': self.name, 'type': get_class_info(type(self)), 'parameters': {p.name: p.value for p in self.parameters}, 'inputs': inputs, 'environment': self.environment.prov, 'repositories': [r.prov for r in input_repos], 'processor': self.processor.prov, 'subject_ids': self.subject_ids, 'visit_ids': self.visit_ids}
python
def prov(self): """ Extracts provenance information from the study for storage alongside generated derivatives. Typically for reference purposes only as only the pipeline workflow, inputs and outputs are checked by default when determining which sessions require reprocessing. Returns ------- prov : dict[str, *] A dictionary containing the provenance information to record for the study """ # Get list of repositories where inputs to the study are stored input_repos = list(set((i.repository for i in self.inputs))) inputs = {} for input in self.inputs: # @ReservedAssignment inputs[input.name] = { 'repository_index': input_repos.index(input.repository)} if input.frequency == 'per_study': inputs[input.name]['names'] = next(input.collection).name elif input.frequency == 'per_subject': inputs[input.name]['names'] = {i.subject_id: i.name for i in input.collection} elif input.frequency == 'per_visit': inputs[input.name]['names'] = {i.visit_id: i.name for i in input.collection} elif input.frequency == 'per_session': names = defaultdict(dict) for item in input.collection: names[item.subject_id][item.visit_id] = item.name # Convert from defaultdict to dict inputs[input.name]['names'] = dict(names.items()) return { 'name': self.name, 'type': get_class_info(type(self)), 'parameters': {p.name: p.value for p in self.parameters}, 'inputs': inputs, 'environment': self.environment.prov, 'repositories': [r.prov for r in input_repos], 'processor': self.processor.prov, 'subject_ids': self.subject_ids, 'visit_ids': self.visit_ids}
[ "def", "prov", "(", "self", ")", ":", "# Get list of repositories where inputs to the study are stored", "input_repos", "=", "list", "(", "set", "(", "(", "i", ".", "repository", "for", "i", "in", "self", ".", "inputs", ")", ")", ")", "inputs", "=", "{", "}", "for", "input", "in", "self", ".", "inputs", ":", "# @ReservedAssignment", "inputs", "[", "input", ".", "name", "]", "=", "{", "'repository_index'", ":", "input_repos", ".", "index", "(", "input", ".", "repository", ")", "}", "if", "input", ".", "frequency", "==", "'per_study'", ":", "inputs", "[", "input", ".", "name", "]", "[", "'names'", "]", "=", "next", "(", "input", ".", "collection", ")", ".", "name", "elif", "input", ".", "frequency", "==", "'per_subject'", ":", "inputs", "[", "input", ".", "name", "]", "[", "'names'", "]", "=", "{", "i", ".", "subject_id", ":", "i", ".", "name", "for", "i", "in", "input", ".", "collection", "}", "elif", "input", ".", "frequency", "==", "'per_visit'", ":", "inputs", "[", "input", ".", "name", "]", "[", "'names'", "]", "=", "{", "i", ".", "visit_id", ":", "i", ".", "name", "for", "i", "in", "input", ".", "collection", "}", "elif", "input", ".", "frequency", "==", "'per_session'", ":", "names", "=", "defaultdict", "(", "dict", ")", "for", "item", "in", "input", ".", "collection", ":", "names", "[", "item", ".", "subject_id", "]", "[", "item", ".", "visit_id", "]", "=", "item", ".", "name", "# Convert from defaultdict to dict", "inputs", "[", "input", ".", "name", "]", "[", "'names'", "]", "=", "dict", "(", "names", ".", "items", "(", ")", ")", "return", "{", "'name'", ":", "self", ".", "name", ",", "'type'", ":", "get_class_info", "(", "type", "(", "self", ")", ")", ",", "'parameters'", ":", "{", "p", ".", "name", ":", "p", ".", "value", "for", "p", "in", "self", ".", "parameters", "}", ",", "'inputs'", ":", "inputs", ",", "'environment'", ":", "self", ".", "environment", ".", "prov", ",", "'repositories'", ":", "[", "r", ".", "prov", "for", "r", "in", "input_repos", "]", ",", "'processor'", ":", "self", ".", "processor", ".", "prov", ",", "'subject_ids'", ":", "self", ".", "subject_ids", ",", "'visit_ids'", ":", "self", ".", "visit_ids", "}" ]
Extracts provenance information from the study for storage alongside generated derivatives. Typically for reference purposes only as only the pipeline workflow, inputs and outputs are checked by default when determining which sessions require reprocessing. Returns ------- prov : dict[str, *] A dictionary containing the provenance information to record for the study
[ "Extracts", "provenance", "information", "from", "the", "study", "for", "storage", "alongside", "generated", "derivatives", ".", "Typically", "for", "reference", "purposes", "only", "as", "only", "the", "pipeline", "workflow", "inputs", "and", "outputs", "are", "checked", "by", "default", "when", "determining", "which", "sessions", "require", "reprocessing", "." ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/study/base.py#L897-L939
gwww/elkm1
elkm1_lib/areas.py
Area.arm
def arm(self, level, code): """(Helper) Arm system at specified level (away, vacation, etc)""" self._elk.send(al_encode(level, self._index, code))
python
def arm(self, level, code): """(Helper) Arm system at specified level (away, vacation, etc)""" self._elk.send(al_encode(level, self._index, code))
[ "def", "arm", "(", "self", ",", "level", ",", "code", ")", ":", "self", ".", "_elk", ".", "send", "(", "al_encode", "(", "level", ",", "self", ".", "_index", ",", "code", ")", ")" ]
(Helper) Arm system at specified level (away, vacation, etc)
[ "(", "Helper", ")", "Arm", "system", "at", "specified", "level", "(", "away", "vacation", "etc", ")" ]
train
https://github.com/gwww/elkm1/blob/078d0de30840c3fab46f1f8534d98df557931e91/elkm1_lib/areas.py#L20-L22
gwww/elkm1
elkm1_lib/areas.py
Area.display_message
def display_message(self, clear, beep, timeout, line1, line2): """Display a message on all of the keypads in this area.""" self._elk.send( dm_encode(self._index, clear, beep, timeout, line1, line2) )
python
def display_message(self, clear, beep, timeout, line1, line2): """Display a message on all of the keypads in this area.""" self._elk.send( dm_encode(self._index, clear, beep, timeout, line1, line2) )
[ "def", "display_message", "(", "self", ",", "clear", ",", "beep", ",", "timeout", ",", "line1", ",", "line2", ")", ":", "self", ".", "_elk", ".", "send", "(", "dm_encode", "(", "self", ".", "_index", ",", "clear", ",", "beep", ",", "timeout", ",", "line1", ",", "line2", ")", ")" ]
Display a message on all of the keypads in this area.
[ "Display", "a", "message", "on", "all", "of", "the", "keypads", "in", "this", "area", "." ]
train
https://github.com/gwww/elkm1/blob/078d0de30840c3fab46f1f8534d98df557931e91/elkm1_lib/areas.py#L28-L32
gwww/elkm1
elkm1_lib/areas.py
Areas.sync
def sync(self): """Retrieve areas from ElkM1""" self.elk.send(as_encode()) self.get_descriptions(TextDescriptions.AREA.value)
python
def sync(self): """Retrieve areas from ElkM1""" self.elk.send(as_encode()) self.get_descriptions(TextDescriptions.AREA.value)
[ "def", "sync", "(", "self", ")", ":", "self", ".", "elk", ".", "send", "(", "as_encode", "(", ")", ")", "self", ".", "get_descriptions", "(", "TextDescriptions", ".", "AREA", ".", "value", ")" ]
Retrieve areas from ElkM1
[ "Retrieve", "areas", "from", "ElkM1" ]
train
https://github.com/gwww/elkm1/blob/078d0de30840c3fab46f1f8534d98df557931e91/elkm1_lib/areas.py#L43-L46
gwww/elkm1
elkm1_lib/lights.py
Light.level
def level(self, level, time=0): """(Helper) Set light to specified level""" if level <= 0: self._elk.send(pf_encode(self._index)) elif level >= 98: self._elk.send(pn_encode(self._index)) else: self._elk.send(pc_encode(self._index, 9, level, time))
python
def level(self, level, time=0): """(Helper) Set light to specified level""" if level <= 0: self._elk.send(pf_encode(self._index)) elif level >= 98: self._elk.send(pn_encode(self._index)) else: self._elk.send(pc_encode(self._index, 9, level, time))
[ "def", "level", "(", "self", ",", "level", ",", "time", "=", "0", ")", ":", "if", "level", "<=", "0", ":", "self", ".", "_elk", ".", "send", "(", "pf_encode", "(", "self", ".", "_index", ")", ")", "elif", "level", ">=", "98", ":", "self", ".", "_elk", ".", "send", "(", "pn_encode", "(", "self", ".", "_index", ")", ")", "else", ":", "self", ".", "_elk", ".", "send", "(", "pc_encode", "(", "self", ".", "_index", ",", "9", ",", "level", ",", "time", ")", ")" ]
(Helper) Set light to specified level
[ "(", "Helper", ")", "Set", "light", "to", "specified", "level" ]
train
https://github.com/gwww/elkm1/blob/078d0de30840c3fab46f1f8534d98df557931e91/elkm1_lib/lights.py#L14-L21
gwww/elkm1
elkm1_lib/lights.py
Lights.sync
def sync(self): """Retrieve lights from ElkM1""" for i in range(4): self.elk.send(ps_encode(i)) self.get_descriptions(TextDescriptions.LIGHT.value)
python
def sync(self): """Retrieve lights from ElkM1""" for i in range(4): self.elk.send(ps_encode(i)) self.get_descriptions(TextDescriptions.LIGHT.value)
[ "def", "sync", "(", "self", ")", ":", "for", "i", "in", "range", "(", "4", ")", ":", "self", ".", "elk", ".", "send", "(", "ps_encode", "(", "i", ")", ")", "self", ".", "get_descriptions", "(", "TextDescriptions", ".", "LIGHT", ".", "value", ")" ]
Retrieve lights from ElkM1
[ "Retrieve", "lights", "from", "ElkM1" ]
train
https://github.com/gwww/elkm1/blob/078d0de30840c3fab46f1f8534d98df557931e91/elkm1_lib/lights.py#L35-L39
MonashBI/arcana
arcana/utils/base.py
dir_modtime
def dir_modtime(dpath): """ Returns the latest modification time of all files/subdirectories in a directory """ return max(os.path.getmtime(d) for d, _, _ in os.walk(dpath))
python
def dir_modtime(dpath): """ Returns the latest modification time of all files/subdirectories in a directory """ return max(os.path.getmtime(d) for d, _, _ in os.walk(dpath))
[ "def", "dir_modtime", "(", "dpath", ")", ":", "return", "max", "(", "os", ".", "path", ".", "getmtime", "(", "d", ")", "for", "d", ",", "_", ",", "_", "in", "os", ".", "walk", "(", "dpath", ")", ")" ]
Returns the latest modification time of all files/subdirectories in a directory
[ "Returns", "the", "latest", "modification", "time", "of", "all", "files", "/", "subdirectories", "in", "a", "directory" ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/utils/base.py#L34-L39
MonashBI/arcana
arcana/utils/base.py
split_extension
def split_extension(path): """ A extension splitter that checks for compound extensions such as 'file.nii.gz' Parameters ---------- filename : str A filename to split into base and extension Returns ------- base : str The base part of the string, i.e. 'file' of 'file.nii.gz' ext : str The extension part of the string, i.e. 'nii.gz' of 'file.nii.gz' """ for double_ext in double_exts: if path.endswith(double_ext): return path[:-len(double_ext)], double_ext dirname = os.path.dirname(path) filename = os.path.basename(path) parts = filename.split('.') if len(parts) == 1: base = filename ext = None else: ext = '.' + parts[-1] base = '.'.join(parts[:-1]) return os.path.join(dirname, base), ext
python
def split_extension(path): """ A extension splitter that checks for compound extensions such as 'file.nii.gz' Parameters ---------- filename : str A filename to split into base and extension Returns ------- base : str The base part of the string, i.e. 'file' of 'file.nii.gz' ext : str The extension part of the string, i.e. 'nii.gz' of 'file.nii.gz' """ for double_ext in double_exts: if path.endswith(double_ext): return path[:-len(double_ext)], double_ext dirname = os.path.dirname(path) filename = os.path.basename(path) parts = filename.split('.') if len(parts) == 1: base = filename ext = None else: ext = '.' + parts[-1] base = '.'.join(parts[:-1]) return os.path.join(dirname, base), ext
[ "def", "split_extension", "(", "path", ")", ":", "for", "double_ext", "in", "double_exts", ":", "if", "path", ".", "endswith", "(", "double_ext", ")", ":", "return", "path", "[", ":", "-", "len", "(", "double_ext", ")", "]", ",", "double_ext", "dirname", "=", "os", ".", "path", ".", "dirname", "(", "path", ")", "filename", "=", "os", ".", "path", ".", "basename", "(", "path", ")", "parts", "=", "filename", ".", "split", "(", "'.'", ")", "if", "len", "(", "parts", ")", "==", "1", ":", "base", "=", "filename", "ext", "=", "None", "else", ":", "ext", "=", "'.'", "+", "parts", "[", "-", "1", "]", "base", "=", "'.'", ".", "join", "(", "parts", "[", ":", "-", "1", "]", ")", "return", "os", ".", "path", ".", "join", "(", "dirname", ",", "base", ")", ",", "ext" ]
A extension splitter that checks for compound extensions such as 'file.nii.gz' Parameters ---------- filename : str A filename to split into base and extension Returns ------- base : str The base part of the string, i.e. 'file' of 'file.nii.gz' ext : str The extension part of the string, i.e. 'nii.gz' of 'file.nii.gz'
[ "A", "extension", "splitter", "that", "checks", "for", "compound", "extensions", "such", "as", "file", ".", "nii", ".", "gz" ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/utils/base.py#L45-L74
MonashBI/arcana
arcana/utils/base.py
parse_single_value
def parse_single_value(value): """ Tries to convert to int, float and then gives up and assumes the value is of type string. Useful when excepting values that may be string representations of numerical values """ if isinstance(value, basestring): try: if value.startswith('"') and value.endswith('"'): value = str(value[1:-1]) elif '.' in value: value = float(value) else: value = int(value) except ValueError: value = str(value) elif not isinstance(value, (int, float)): raise ArcanaUsageError( "Unrecognised type for single value {}".format(value)) return value
python
def parse_single_value(value): """ Tries to convert to int, float and then gives up and assumes the value is of type string. Useful when excepting values that may be string representations of numerical values """ if isinstance(value, basestring): try: if value.startswith('"') and value.endswith('"'): value = str(value[1:-1]) elif '.' in value: value = float(value) else: value = int(value) except ValueError: value = str(value) elif not isinstance(value, (int, float)): raise ArcanaUsageError( "Unrecognised type for single value {}".format(value)) return value
[ "def", "parse_single_value", "(", "value", ")", ":", "if", "isinstance", "(", "value", ",", "basestring", ")", ":", "try", ":", "if", "value", ".", "startswith", "(", "'\"'", ")", "and", "value", ".", "endswith", "(", "'\"'", ")", ":", "value", "=", "str", "(", "value", "[", "1", ":", "-", "1", "]", ")", "elif", "'.'", "in", "value", ":", "value", "=", "float", "(", "value", ")", "else", ":", "value", "=", "int", "(", "value", ")", "except", "ValueError", ":", "value", "=", "str", "(", "value", ")", "elif", "not", "isinstance", "(", "value", ",", "(", "int", ",", "float", ")", ")", ":", "raise", "ArcanaUsageError", "(", "\"Unrecognised type for single value {}\"", ".", "format", "(", "value", ")", ")", "return", "value" ]
Tries to convert to int, float and then gives up and assumes the value is of type string. Useful when excepting values that may be string representations of numerical values
[ "Tries", "to", "convert", "to", "int", "float", "and", "then", "gives", "up", "and", "assumes", "the", "value", "is", "of", "type", "string", ".", "Useful", "when", "excepting", "values", "that", "may", "be", "string", "representations", "of", "numerical", "values" ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/utils/base.py#L103-L122
MonashBI/arcana
arcana/utils/base.py
iscontainer
def iscontainer(*items): """ Checks whether all the provided items are containers (i.e of class list, dict, tuple, etc...) """ return all(isinstance(i, Iterable) and not isinstance(i, basestring) for i in items)
python
def iscontainer(*items): """ Checks whether all the provided items are containers (i.e of class list, dict, tuple, etc...) """ return all(isinstance(i, Iterable) and not isinstance(i, basestring) for i in items)
[ "def", "iscontainer", "(", "*", "items", ")", ":", "return", "all", "(", "isinstance", "(", "i", ",", "Iterable", ")", "and", "not", "isinstance", "(", "i", ",", "basestring", ")", "for", "i", "in", "items", ")" ]
Checks whether all the provided items are containers (i.e of class list, dict, tuple, etc...)
[ "Checks", "whether", "all", "the", "provided", "items", "are", "containers", "(", "i", ".", "e", "of", "class", "list", "dict", "tuple", "etc", "...", ")" ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/utils/base.py#L161-L167
MonashBI/arcana
arcana/utils/base.py
find_mismatch
def find_mismatch(first, second, indent=''): """ Finds where two objects differ, iterating down into nested containers (i.e. dicts, lists and tuples) They can be nested containers any combination of primary dtypes, str, int, float, dict and lists Parameters ---------- first : dict | list | tuple | str | int | float The first object to compare second : dict | list | tuple | str | int | float The other object to compare with the first indent : str The amount newlines in the output string should be indented. Provide the actual indent, i.e. a string of spaces. Returns ------- mismatch : str Human readable output highlighting where two container differ. """ # Basic case where we are dealing with non-containers if not (isinstance(first, type(second)) or isinstance(second, type(first))): mismatch = (' types: self={} v other={}' .format(type(first).__name__, type(second).__name__)) elif not iscontainer(first, second): mismatch = ': self={} v other={}'.format(first, second) else: sub_indent = indent + ' ' mismatch = '' if isinstance(first, dict): if sorted(first.keys()) != sorted(second.keys()): mismatch += (' keys: self={} v other={}' .format(sorted(first.keys()), sorted(second.keys()))) else: mismatch += ":" for k in first: if first[k] != second[k]: mismatch += ("\n{indent}'{}' values{}" .format(k, find_mismatch(first[k], second[k], indent=sub_indent), indent=sub_indent)) else: mismatch += ":" for i, (f, s) in enumerate(zip_longest(first, second)): if f != s: mismatch += ("\n{indent}{} index{}" .format(i, find_mismatch(f, s, indent=sub_indent), indent=sub_indent)) return mismatch
python
def find_mismatch(first, second, indent=''): """ Finds where two objects differ, iterating down into nested containers (i.e. dicts, lists and tuples) They can be nested containers any combination of primary dtypes, str, int, float, dict and lists Parameters ---------- first : dict | list | tuple | str | int | float The first object to compare second : dict | list | tuple | str | int | float The other object to compare with the first indent : str The amount newlines in the output string should be indented. Provide the actual indent, i.e. a string of spaces. Returns ------- mismatch : str Human readable output highlighting where two container differ. """ # Basic case where we are dealing with non-containers if not (isinstance(first, type(second)) or isinstance(second, type(first))): mismatch = (' types: self={} v other={}' .format(type(first).__name__, type(second).__name__)) elif not iscontainer(first, second): mismatch = ': self={} v other={}'.format(first, second) else: sub_indent = indent + ' ' mismatch = '' if isinstance(first, dict): if sorted(first.keys()) != sorted(second.keys()): mismatch += (' keys: self={} v other={}' .format(sorted(first.keys()), sorted(second.keys()))) else: mismatch += ":" for k in first: if first[k] != second[k]: mismatch += ("\n{indent}'{}' values{}" .format(k, find_mismatch(first[k], second[k], indent=sub_indent), indent=sub_indent)) else: mismatch += ":" for i, (f, s) in enumerate(zip_longest(first, second)): if f != s: mismatch += ("\n{indent}{} index{}" .format(i, find_mismatch(f, s, indent=sub_indent), indent=sub_indent)) return mismatch
[ "def", "find_mismatch", "(", "first", ",", "second", ",", "indent", "=", "''", ")", ":", "# Basic case where we are dealing with non-containers", "if", "not", "(", "isinstance", "(", "first", ",", "type", "(", "second", ")", ")", "or", "isinstance", "(", "second", ",", "type", "(", "first", ")", ")", ")", ":", "mismatch", "=", "(", "' types: self={} v other={}'", ".", "format", "(", "type", "(", "first", ")", ".", "__name__", ",", "type", "(", "second", ")", ".", "__name__", ")", ")", "elif", "not", "iscontainer", "(", "first", ",", "second", ")", ":", "mismatch", "=", "': self={} v other={}'", ".", "format", "(", "first", ",", "second", ")", "else", ":", "sub_indent", "=", "indent", "+", "' '", "mismatch", "=", "''", "if", "isinstance", "(", "first", ",", "dict", ")", ":", "if", "sorted", "(", "first", ".", "keys", "(", ")", ")", "!=", "sorted", "(", "second", ".", "keys", "(", ")", ")", ":", "mismatch", "+=", "(", "' keys: self={} v other={}'", ".", "format", "(", "sorted", "(", "first", ".", "keys", "(", ")", ")", ",", "sorted", "(", "second", ".", "keys", "(", ")", ")", ")", ")", "else", ":", "mismatch", "+=", "\":\"", "for", "k", "in", "first", ":", "if", "first", "[", "k", "]", "!=", "second", "[", "k", "]", ":", "mismatch", "+=", "(", "\"\\n{indent}'{}' values{}\"", ".", "format", "(", "k", ",", "find_mismatch", "(", "first", "[", "k", "]", ",", "second", "[", "k", "]", ",", "indent", "=", "sub_indent", ")", ",", "indent", "=", "sub_indent", ")", ")", "else", ":", "mismatch", "+=", "\":\"", "for", "i", ",", "(", "f", ",", "s", ")", "in", "enumerate", "(", "zip_longest", "(", "first", ",", "second", ")", ")", ":", "if", "f", "!=", "s", ":", "mismatch", "+=", "(", "\"\\n{indent}{} index{}\"", ".", "format", "(", "i", ",", "find_mismatch", "(", "f", ",", "s", ",", "indent", "=", "sub_indent", ")", ",", "indent", "=", "sub_indent", ")", ")", "return", "mismatch" ]
Finds where two objects differ, iterating down into nested containers (i.e. dicts, lists and tuples) They can be nested containers any combination of primary dtypes, str, int, float, dict and lists Parameters ---------- first : dict | list | tuple | str | int | float The first object to compare second : dict | list | tuple | str | int | float The other object to compare with the first indent : str The amount newlines in the output string should be indented. Provide the actual indent, i.e. a string of spaces. Returns ------- mismatch : str Human readable output highlighting where two container differ.
[ "Finds", "where", "two", "objects", "differ", "iterating", "down", "into", "nested", "containers", "(", "i", ".", "e", ".", "dicts", "lists", "and", "tuples", ")", "They", "can", "be", "nested", "containers", "any", "combination", "of", "primary", "dtypes", "str", "int", "float", "dict", "and", "lists" ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/utils/base.py#L170-L225
erikvw/django-collect-offline
django_collect_offline/transaction/deserialize.py
deserialize
def deserialize(json_text=None): """Returns a generator of deserialized objects. Wraps django deserialize with defaults for JSON and natural keys. See https://docs.djangoproject.com/en/2.1/topics/serialization/ """ return serializers.deserialize( "json", json_text, ensure_ascii=True, use_natural_foreign_keys=True, use_natural_primary_keys=False, )
python
def deserialize(json_text=None): """Returns a generator of deserialized objects. Wraps django deserialize with defaults for JSON and natural keys. See https://docs.djangoproject.com/en/2.1/topics/serialization/ """ return serializers.deserialize( "json", json_text, ensure_ascii=True, use_natural_foreign_keys=True, use_natural_primary_keys=False, )
[ "def", "deserialize", "(", "json_text", "=", "None", ")", ":", "return", "serializers", ".", "deserialize", "(", "\"json\"", ",", "json_text", ",", "ensure_ascii", "=", "True", ",", "use_natural_foreign_keys", "=", "True", ",", "use_natural_primary_keys", "=", "False", ",", ")" ]
Returns a generator of deserialized objects. Wraps django deserialize with defaults for JSON and natural keys. See https://docs.djangoproject.com/en/2.1/topics/serialization/
[ "Returns", "a", "generator", "of", "deserialized", "objects", "." ]
train
https://github.com/erikvw/django-collect-offline/blob/3d5efd66c68e2db4b060a82b070ae490dc399ca7/django_collect_offline/transaction/deserialize.py#L4-L19
erikvw/django-collect-offline
django_collect_offline/signals.py
create_auth_token
def create_auth_token(sender, instance, raw, created, **kwargs): """Create token when a user is created (from rest_framework). """ if not raw: if created: sender.objects.create(user=instance)
python
def create_auth_token(sender, instance, raw, created, **kwargs): """Create token when a user is created (from rest_framework). """ if not raw: if created: sender.objects.create(user=instance)
[ "def", "create_auth_token", "(", "sender", ",", "instance", ",", "raw", ",", "created", ",", "*", "*", "kwargs", ")", ":", "if", "not", "raw", ":", "if", "created", ":", "sender", ".", "objects", ".", "create", "(", "user", "=", "instance", ")" ]
Create token when a user is created (from rest_framework).
[ "Create", "token", "when", "a", "user", "is", "created", "(", "from", "rest_framework", ")", "." ]
train
https://github.com/erikvw/django-collect-offline/blob/3d5efd66c68e2db4b060a82b070ae490dc399ca7/django_collect_offline/signals.py#L10-L15
erikvw/django-collect-offline
django_collect_offline/signals.py
serialize_m2m_on_save
def serialize_m2m_on_save(sender, action, instance, using, **kwargs): """ Part of the serialize transaction process that ensures m2m are serialized correctly. Skip those not registered. """ if action == "post_add": try: wrapped_instance = site_offline_models.get_wrapped_instance(instance) except ModelNotRegistered: pass else: wrapped_instance.to_outgoing_transaction(using, created=True)
python
def serialize_m2m_on_save(sender, action, instance, using, **kwargs): """ Part of the serialize transaction process that ensures m2m are serialized correctly. Skip those not registered. """ if action == "post_add": try: wrapped_instance = site_offline_models.get_wrapped_instance(instance) except ModelNotRegistered: pass else: wrapped_instance.to_outgoing_transaction(using, created=True)
[ "def", "serialize_m2m_on_save", "(", "sender", ",", "action", ",", "instance", ",", "using", ",", "*", "*", "kwargs", ")", ":", "if", "action", "==", "\"post_add\"", ":", "try", ":", "wrapped_instance", "=", "site_offline_models", ".", "get_wrapped_instance", "(", "instance", ")", "except", "ModelNotRegistered", ":", "pass", "else", ":", "wrapped_instance", ".", "to_outgoing_transaction", "(", "using", ",", "created", "=", "True", ")" ]
Part of the serialize transaction process that ensures m2m are serialized correctly. Skip those not registered.
[ "Part", "of", "the", "serialize", "transaction", "process", "that", "ensures", "m2m", "are", "serialized", "correctly", "." ]
train
https://github.com/erikvw/django-collect-offline/blob/3d5efd66c68e2db4b060a82b070ae490dc399ca7/django_collect_offline/signals.py#L19-L31
erikvw/django-collect-offline
django_collect_offline/signals.py
serialize_on_save
def serialize_on_save(sender, instance, raw, created, using, **kwargs): """ Serialize the model instance as an OutgoingTransaction. Skip those not registered. """ if not raw: if "historical" not in instance._meta.label_lower: try: wrapped_instance = site_offline_models.get_wrapped_instance(instance) except ModelNotRegistered: pass else: wrapped_instance.to_outgoing_transaction(using, created=created)
python
def serialize_on_save(sender, instance, raw, created, using, **kwargs): """ Serialize the model instance as an OutgoingTransaction. Skip those not registered. """ if not raw: if "historical" not in instance._meta.label_lower: try: wrapped_instance = site_offline_models.get_wrapped_instance(instance) except ModelNotRegistered: pass else: wrapped_instance.to_outgoing_transaction(using, created=created)
[ "def", "serialize_on_save", "(", "sender", ",", "instance", ",", "raw", ",", "created", ",", "using", ",", "*", "*", "kwargs", ")", ":", "if", "not", "raw", ":", "if", "\"historical\"", "not", "in", "instance", ".", "_meta", ".", "label_lower", ":", "try", ":", "wrapped_instance", "=", "site_offline_models", ".", "get_wrapped_instance", "(", "instance", ")", "except", "ModelNotRegistered", ":", "pass", "else", ":", "wrapped_instance", ".", "to_outgoing_transaction", "(", "using", ",", "created", "=", "created", ")" ]
Serialize the model instance as an OutgoingTransaction. Skip those not registered.
[ "Serialize", "the", "model", "instance", "as", "an", "OutgoingTransaction", "." ]
train
https://github.com/erikvw/django-collect-offline/blob/3d5efd66c68e2db4b060a82b070ae490dc399ca7/django_collect_offline/signals.py#L35-L47
erikvw/django-collect-offline
django_collect_offline/signals.py
serialize_history_on_post_create
def serialize_history_on_post_create(history_instance, using, **kwargs): """ Serialize the history instance as an OutgoingTransaction. Skip those not registered. """ try: wrapped_instance = site_offline_models.get_wrapped_instance(history_instance) except ModelNotRegistered: pass else: wrapped_instance.to_outgoing_transaction(using, created=True)
python
def serialize_history_on_post_create(history_instance, using, **kwargs): """ Serialize the history instance as an OutgoingTransaction. Skip those not registered. """ try: wrapped_instance = site_offline_models.get_wrapped_instance(history_instance) except ModelNotRegistered: pass else: wrapped_instance.to_outgoing_transaction(using, created=True)
[ "def", "serialize_history_on_post_create", "(", "history_instance", ",", "using", ",", "*", "*", "kwargs", ")", ":", "try", ":", "wrapped_instance", "=", "site_offline_models", ".", "get_wrapped_instance", "(", "history_instance", ")", "except", "ModelNotRegistered", ":", "pass", "else", ":", "wrapped_instance", ".", "to_outgoing_transaction", "(", "using", ",", "created", "=", "True", ")" ]
Serialize the history instance as an OutgoingTransaction. Skip those not registered.
[ "Serialize", "the", "history", "instance", "as", "an", "OutgoingTransaction", "." ]
train
https://github.com/erikvw/django-collect-offline/blob/3d5efd66c68e2db4b060a82b070ae490dc399ca7/django_collect_offline/signals.py#L55-L65
erikvw/django-collect-offline
django_collect_offline/signals.py
serialize_on_post_delete
def serialize_on_post_delete(sender, instance, using, **kwargs): """Creates a serialized OutgoingTransaction when a model instance is deleted. Skip those not registered. """ try: wrapped_instance = site_offline_models.get_wrapped_instance(instance) except ModelNotRegistered: pass else: wrapped_instance.to_outgoing_transaction(using, created=False, deleted=True)
python
def serialize_on_post_delete(sender, instance, using, **kwargs): """Creates a serialized OutgoingTransaction when a model instance is deleted. Skip those not registered. """ try: wrapped_instance = site_offline_models.get_wrapped_instance(instance) except ModelNotRegistered: pass else: wrapped_instance.to_outgoing_transaction(using, created=False, deleted=True)
[ "def", "serialize_on_post_delete", "(", "sender", ",", "instance", ",", "using", ",", "*", "*", "kwargs", ")", ":", "try", ":", "wrapped_instance", "=", "site_offline_models", ".", "get_wrapped_instance", "(", "instance", ")", "except", "ModelNotRegistered", ":", "pass", "else", ":", "wrapped_instance", ".", "to_outgoing_transaction", "(", "using", ",", "created", "=", "False", ",", "deleted", "=", "True", ")" ]
Creates a serialized OutgoingTransaction when a model instance is deleted. Skip those not registered.
[ "Creates", "a", "serialized", "OutgoingTransaction", "when", "a", "model", "instance", "is", "deleted", "." ]
train
https://github.com/erikvw/django-collect-offline/blob/3d5efd66c68e2db4b060a82b070ae490dc399ca7/django_collect_offline/signals.py#L69-L80
gwww/elkm1
elkm1_lib/settings.py
Setting.set
def set(self, value): """(Helper) Set custom value.""" self._elk.send(cw_encode(self._index, value, self.value_format))
python
def set(self, value): """(Helper) Set custom value.""" self._elk.send(cw_encode(self._index, value, self.value_format))
[ "def", "set", "(", "self", ",", "value", ")", ":", "self", ".", "_elk", ".", "send", "(", "cw_encode", "(", "self", ".", "_index", ",", "value", ",", "self", ".", "value_format", ")", ")" ]
(Helper) Set custom value.
[ "(", "Helper", ")", "Set", "custom", "value", "." ]
train
https://github.com/gwww/elkm1/blob/078d0de30840c3fab46f1f8534d98df557931e91/elkm1_lib/settings.py#L14-L16
gwww/elkm1
elkm1_lib/settings.py
Settings.sync
def sync(self): """Retrieve custom values from ElkM1""" self.elk.send(cp_encode()) self.get_descriptions(TextDescriptions.SETTING.value)
python
def sync(self): """Retrieve custom values from ElkM1""" self.elk.send(cp_encode()) self.get_descriptions(TextDescriptions.SETTING.value)
[ "def", "sync", "(", "self", ")", ":", "self", ".", "elk", ".", "send", "(", "cp_encode", "(", ")", ")", "self", ".", "get_descriptions", "(", "TextDescriptions", ".", "SETTING", ".", "value", ")" ]
Retrieve custom values from ElkM1
[ "Retrieve", "custom", "values", "from", "ElkM1" ]
train
https://github.com/gwww/elkm1/blob/078d0de30840c3fab46f1f8534d98df557931e91/elkm1_lib/settings.py#L25-L28
MonashBI/arcana
arcana/data/item.py
Fileset.paths
def paths(self): """Iterates through all files in the set""" if self.format is None: raise ArcanaFileFormatError( "Cannot get paths of fileset ({}) that hasn't had its format " "set".format(self)) if self.format.directory: return chain(*((op.join(root, f) for f in files) for root, _, files in os.walk(self.path))) else: return chain([self.path], self.aux_files.values())
python
def paths(self): """Iterates through all files in the set""" if self.format is None: raise ArcanaFileFormatError( "Cannot get paths of fileset ({}) that hasn't had its format " "set".format(self)) if self.format.directory: return chain(*((op.join(root, f) for f in files) for root, _, files in os.walk(self.path))) else: return chain([self.path], self.aux_files.values())
[ "def", "paths", "(", "self", ")", ":", "if", "self", ".", "format", "is", "None", ":", "raise", "ArcanaFileFormatError", "(", "\"Cannot get paths of fileset ({}) that hasn't had its format \"", "\"set\"", ".", "format", "(", "self", ")", ")", "if", "self", ".", "format", ".", "directory", ":", "return", "chain", "(", "*", "(", "(", "op", ".", "join", "(", "root", ",", "f", ")", "for", "f", "in", "files", ")", "for", "root", ",", "_", ",", "files", "in", "os", ".", "walk", "(", "self", ".", "path", ")", ")", ")", "else", ":", "return", "chain", "(", "[", "self", ".", "path", "]", ",", "self", ".", "aux_files", ".", "values", "(", ")", ")" ]
Iterates through all files in the set
[ "Iterates", "through", "all", "files", "in", "the", "set" ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/data/item.py#L371-L381
MonashBI/arcana
arcana/data/item.py
Fileset.detect_format
def detect_format(self, candidates): """ Detects the format of the fileset from a list of possible candidates. If multiple candidates match the potential files, e.g. NiFTI-X (see dcm2niix) and NiFTI, then the first matching candidate is selected. If a 'format_name' was specified when the fileset was created then that is used to select between the candidates. Otherwise the file extensions of the primary path and potential auxiliary files, or extensions of the files within the directory for directories are matched against those specified for the file formats Parameters ---------- candidates : FileFormat A list of file-formats to select from. """ if self._format is not None: raise ArcanaFileFormatError( "Format has already been set for {}".format(self)) matches = [c for c in candidates if c.matches(self)] if not matches: raise ArcanaFileFormatError( "None of the candidate file formats ({}) match {}" .format(', '.join(str(c) for c in candidates), self)) return matches[0]
python
def detect_format(self, candidates): """ Detects the format of the fileset from a list of possible candidates. If multiple candidates match the potential files, e.g. NiFTI-X (see dcm2niix) and NiFTI, then the first matching candidate is selected. If a 'format_name' was specified when the fileset was created then that is used to select between the candidates. Otherwise the file extensions of the primary path and potential auxiliary files, or extensions of the files within the directory for directories are matched against those specified for the file formats Parameters ---------- candidates : FileFormat A list of file-formats to select from. """ if self._format is not None: raise ArcanaFileFormatError( "Format has already been set for {}".format(self)) matches = [c for c in candidates if c.matches(self)] if not matches: raise ArcanaFileFormatError( "None of the candidate file formats ({}) match {}" .format(', '.join(str(c) for c in candidates), self)) return matches[0]
[ "def", "detect_format", "(", "self", ",", "candidates", ")", ":", "if", "self", ".", "_format", "is", "not", "None", ":", "raise", "ArcanaFileFormatError", "(", "\"Format has already been set for {}\"", ".", "format", "(", "self", ")", ")", "matches", "=", "[", "c", "for", "c", "in", "candidates", "if", "c", ".", "matches", "(", "self", ")", "]", "if", "not", "matches", ":", "raise", "ArcanaFileFormatError", "(", "\"None of the candidate file formats ({}) match {}\"", ".", "format", "(", "', '", ".", "join", "(", "str", "(", "c", ")", "for", "c", "in", "candidates", ")", ",", "self", ")", ")", "return", "matches", "[", "0", "]" ]
Detects the format of the fileset from a list of possible candidates. If multiple candidates match the potential files, e.g. NiFTI-X (see dcm2niix) and NiFTI, then the first matching candidate is selected. If a 'format_name' was specified when the fileset was created then that is used to select between the candidates. Otherwise the file extensions of the primary path and potential auxiliary files, or extensions of the files within the directory for directories are matched against those specified for the file formats Parameters ---------- candidates : FileFormat A list of file-formats to select from.
[ "Detects", "the", "format", "of", "the", "fileset", "from", "a", "list", "of", "possible", "candidates", ".", "If", "multiple", "candidates", "match", "the", "potential", "files", "e", ".", "g", ".", "NiFTI", "-", "X", "(", "see", "dcm2niix", ")", "and", "NiFTI", "then", "the", "first", "matching", "candidate", "is", "selected", "." ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/data/item.py#L493-L519
MonashBI/arcana
arcana/data/item.py
Fileset.contents_equal
def contents_equal(self, other, **kwargs): """ Test the equality of the fileset contents with another fileset. If the fileset's format implements a 'contents_equal' method than that is used to determine the equality, otherwise a straight comparison of the checksums is used. Parameters ---------- other : Fileset The other fileset to compare to """ if hasattr(self.format, 'contents_equal'): equal = self.format.contents_equal(self, other, **kwargs) else: equal = (self.checksums == other.checksums) return equal
python
def contents_equal(self, other, **kwargs): """ Test the equality of the fileset contents with another fileset. If the fileset's format implements a 'contents_equal' method than that is used to determine the equality, otherwise a straight comparison of the checksums is used. Parameters ---------- other : Fileset The other fileset to compare to """ if hasattr(self.format, 'contents_equal'): equal = self.format.contents_equal(self, other, **kwargs) else: equal = (self.checksums == other.checksums) return equal
[ "def", "contents_equal", "(", "self", ",", "other", ",", "*", "*", "kwargs", ")", ":", "if", "hasattr", "(", "self", ".", "format", ",", "'contents_equal'", ")", ":", "equal", "=", "self", ".", "format", ".", "contents_equal", "(", "self", ",", "other", ",", "*", "*", "kwargs", ")", "else", ":", "equal", "=", "(", "self", ".", "checksums", "==", "other", ".", "checksums", ")", "return", "equal" ]
Test the equality of the fileset contents with another fileset. If the fileset's format implements a 'contents_equal' method than that is used to determine the equality, otherwise a straight comparison of the checksums is used. Parameters ---------- other : Fileset The other fileset to compare to
[ "Test", "the", "equality", "of", "the", "fileset", "contents", "with", "another", "fileset", ".", "If", "the", "fileset", "s", "format", "implements", "a", "contents_equal", "method", "than", "that", "is", "used", "to", "determine", "the", "equality", "otherwise", "a", "straight", "comparison", "of", "the", "checksums", "is", "used", "." ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/data/item.py#L542-L558
MonashBI/arcana
arcana/data/spec.py
BaseInputSpec.bind
def bind(self, study, **kwargs): # @UnusedVariable """ Returns a copy of the AcquiredSpec bound to the given study Parameters ---------- study : Study A study to bind the fileset spec to (should happen in the study __init__) """ if self.default is None: raise ArcanaError( "Attempted to bind '{}' to {} but only acquired specs with " "a default value should be bound to studies{})".format( self.name, study)) if self._study is not None: # This avoids rebinding specs to sub-studies that have already # been bound to the multi-study bound = self else: bound = copy(self) bound._study = study bound._default = bound.default.bind(study) return bound
python
def bind(self, study, **kwargs): # @UnusedVariable """ Returns a copy of the AcquiredSpec bound to the given study Parameters ---------- study : Study A study to bind the fileset spec to (should happen in the study __init__) """ if self.default is None: raise ArcanaError( "Attempted to bind '{}' to {} but only acquired specs with " "a default value should be bound to studies{})".format( self.name, study)) if self._study is not None: # This avoids rebinding specs to sub-studies that have already # been bound to the multi-study bound = self else: bound = copy(self) bound._study = study bound._default = bound.default.bind(study) return bound
[ "def", "bind", "(", "self", ",", "study", ",", "*", "*", "kwargs", ")", ":", "# @UnusedVariable", "if", "self", ".", "default", "is", "None", ":", "raise", "ArcanaError", "(", "\"Attempted to bind '{}' to {} but only acquired specs with \"", "\"a default value should be bound to studies{})\"", ".", "format", "(", "self", ".", "name", ",", "study", ")", ")", "if", "self", ".", "_study", "is", "not", "None", ":", "# This avoids rebinding specs to sub-studies that have already", "# been bound to the multi-study", "bound", "=", "self", "else", ":", "bound", "=", "copy", "(", "self", ")", "bound", ".", "_study", "=", "study", "bound", ".", "_default", "=", "bound", ".", "default", ".", "bind", "(", "study", ")", "return", "bound" ]
Returns a copy of the AcquiredSpec bound to the given study Parameters ---------- study : Study A study to bind the fileset spec to (should happen in the study __init__)
[ "Returns", "a", "copy", "of", "the", "AcquiredSpec", "bound", "to", "the", "given", "study" ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/data/spec.py#L73-L96
MonashBI/arcana
arcana/data/spec.py
BaseSpec.bind
def bind(self, study, **kwargs): # @UnusedVariable """ Returns a copy of the Spec bound to the given study Parameters ---------- study : Study A study to bind the fileset spec to (should happen in the study __init__) """ if self._study is not None: # Avoid rebinding specs in sub-studies that have already # been bound to MultiStudy bound = self else: bound = copy(self) bound._study = study if not hasattr(study, self.pipeline_getter): raise ArcanaError( "{} does not have a method named '{}' required to " "derive {}".format(study, self.pipeline_getter, self)) bound._bind_tree(study.tree) return bound
python
def bind(self, study, **kwargs): # @UnusedVariable """ Returns a copy of the Spec bound to the given study Parameters ---------- study : Study A study to bind the fileset spec to (should happen in the study __init__) """ if self._study is not None: # Avoid rebinding specs in sub-studies that have already # been bound to MultiStudy bound = self else: bound = copy(self) bound._study = study if not hasattr(study, self.pipeline_getter): raise ArcanaError( "{} does not have a method named '{}' required to " "derive {}".format(study, self.pipeline_getter, self)) bound._bind_tree(study.tree) return bound
[ "def", "bind", "(", "self", ",", "study", ",", "*", "*", "kwargs", ")", ":", "# @UnusedVariable", "if", "self", ".", "_study", "is", "not", "None", ":", "# Avoid rebinding specs in sub-studies that have already", "# been bound to MultiStudy", "bound", "=", "self", "else", ":", "bound", "=", "copy", "(", "self", ")", "bound", ".", "_study", "=", "study", "if", "not", "hasattr", "(", "study", ",", "self", ".", "pipeline_getter", ")", ":", "raise", "ArcanaError", "(", "\"{} does not have a method named '{}' required to \"", "\"derive {}\"", ".", "format", "(", "study", ",", "self", ".", "pipeline_getter", ",", "self", ")", ")", "bound", ".", "_bind_tree", "(", "study", ".", "tree", ")", "return", "bound" ]
Returns a copy of the Spec bound to the given study Parameters ---------- study : Study A study to bind the fileset spec to (should happen in the study __init__)
[ "Returns", "a", "copy", "of", "the", "Spec", "bound", "to", "the", "given", "study" ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/data/spec.py#L179-L202
MonashBI/arcana
arcana/data/spec.py
BaseSpec.nodes
def nodes(self, tree): """ Returns the relevant nodes for the spec's frequency """ # Run the match against the tree if self.frequency == 'per_session': nodes = [] for subject in tree.subjects: for sess in subject.sessions: nodes.append(sess) elif self.frequency == 'per_subject': nodes = tree.subjects elif self.frequency == 'per_visit': nodes = tree.visits elif self.frequency == 'per_study': nodes = [tree] else: assert False, "Unrecognised frequency '{}'".format( self.frequency) return nodes
python
def nodes(self, tree): """ Returns the relevant nodes for the spec's frequency """ # Run the match against the tree if self.frequency == 'per_session': nodes = [] for subject in tree.subjects: for sess in subject.sessions: nodes.append(sess) elif self.frequency == 'per_subject': nodes = tree.subjects elif self.frequency == 'per_visit': nodes = tree.visits elif self.frequency == 'per_study': nodes = [tree] else: assert False, "Unrecognised frequency '{}'".format( self.frequency) return nodes
[ "def", "nodes", "(", "self", ",", "tree", ")", ":", "# Run the match against the tree", "if", "self", ".", "frequency", "==", "'per_session'", ":", "nodes", "=", "[", "]", "for", "subject", "in", "tree", ".", "subjects", ":", "for", "sess", "in", "subject", ".", "sessions", ":", "nodes", ".", "append", "(", "sess", ")", "elif", "self", ".", "frequency", "==", "'per_subject'", ":", "nodes", "=", "tree", ".", "subjects", "elif", "self", ".", "frequency", "==", "'per_visit'", ":", "nodes", "=", "tree", ".", "visits", "elif", "self", ".", "frequency", "==", "'per_study'", ":", "nodes", "=", "[", "tree", "]", "else", ":", "assert", "False", ",", "\"Unrecognised frequency '{}'\"", ".", "format", "(", "self", ".", "frequency", ")", "return", "nodes" ]
Returns the relevant nodes for the spec's frequency
[ "Returns", "the", "relevant", "nodes", "for", "the", "spec", "s", "frequency" ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/data/spec.py#L212-L231
MonashBI/arcana
arcana/data/spec.py
BaseSpec.derivable
def derivable(self): """ Whether the spec (only valid for derived specs) can be derived given the inputs and switches provided to the study """ try: # Just need to iterate all study inputs and catch relevant # exceptions list(self.pipeline.study_inputs) except (ArcanaOutputNotProducedException, ArcanaMissingDataException): return False return True
python
def derivable(self): """ Whether the spec (only valid for derived specs) can be derived given the inputs and switches provided to the study """ try: # Just need to iterate all study inputs and catch relevant # exceptions list(self.pipeline.study_inputs) except (ArcanaOutputNotProducedException, ArcanaMissingDataException): return False return True
[ "def", "derivable", "(", "self", ")", ":", "try", ":", "# Just need to iterate all study inputs and catch relevant", "# exceptions", "list", "(", "self", ".", "pipeline", ".", "study_inputs", ")", "except", "(", "ArcanaOutputNotProducedException", ",", "ArcanaMissingDataException", ")", ":", "return", "False", "return", "True" ]
Whether the spec (only valid for derived specs) can be derived given the inputs and switches provided to the study
[ "Whether", "the", "spec", "(", "only", "valid", "for", "derived", "specs", ")", "can", "be", "derived", "given", "the", "inputs", "and", "switches", "provided", "to", "the", "study" ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/data/spec.py#L234-L246
RedHatQE/python-stitches
stitches/structure.py
Structure.reconnect_all
def reconnect_all(self): """ Re-establish connection to all instances """ for role in self.Instances.keys(): for connection in self.Instances[role]: connection.reconnect()
python
def reconnect_all(self): """ Re-establish connection to all instances """ for role in self.Instances.keys(): for connection in self.Instances[role]: connection.reconnect()
[ "def", "reconnect_all", "(", "self", ")", ":", "for", "role", "in", "self", ".", "Instances", ".", "keys", "(", ")", ":", "for", "connection", "in", "self", ".", "Instances", "[", "role", "]", ":", "connection", ".", "reconnect", "(", ")" ]
Re-establish connection to all instances
[ "Re", "-", "establish", "connection", "to", "all", "instances" ]
train
https://github.com/RedHatQE/python-stitches/blob/957e9895e64ffd3b8157b38b9cce414969509288/stitches/structure.py#L30-L36
RedHatQE/python-stitches
stitches/structure.py
Structure.add_instance
def add_instance(self, role, instance, username='root', key_filename=None, output_shell=False): """ Add instance to the setup @param role: instance's role @type role: str @param instance: host parameters we would like to establish connection to @type instance: dict @param username: user name for creating ssh connection @type username: str @param key_filename: file name with ssh private key @type key_filename: str @param output_shell: write output from this connection to standard output @type output_shell: bool """ if not role in self.Instances.keys(): self.Instances[role] = [] self.logger.debug('Adding ' + role + ' with private_hostname ' + instance['private_hostname'] + ', public_hostname ' + instance['public_hostname']) self.Instances[role].append(Connection(instance, username, key_filename, output_shell=output_shell))
python
def add_instance(self, role, instance, username='root', key_filename=None, output_shell=False): """ Add instance to the setup @param role: instance's role @type role: str @param instance: host parameters we would like to establish connection to @type instance: dict @param username: user name for creating ssh connection @type username: str @param key_filename: file name with ssh private key @type key_filename: str @param output_shell: write output from this connection to standard output @type output_shell: bool """ if not role in self.Instances.keys(): self.Instances[role] = [] self.logger.debug('Adding ' + role + ' with private_hostname ' + instance['private_hostname'] + ', public_hostname ' + instance['public_hostname']) self.Instances[role].append(Connection(instance, username, key_filename, output_shell=output_shell))
[ "def", "add_instance", "(", "self", ",", "role", ",", "instance", ",", "username", "=", "'root'", ",", "key_filename", "=", "None", ",", "output_shell", "=", "False", ")", ":", "if", "not", "role", "in", "self", ".", "Instances", ".", "keys", "(", ")", ":", "self", ".", "Instances", "[", "role", "]", "=", "[", "]", "self", ".", "logger", ".", "debug", "(", "'Adding '", "+", "role", "+", "' with private_hostname '", "+", "instance", "[", "'private_hostname'", "]", "+", "', public_hostname '", "+", "instance", "[", "'public_hostname'", "]", ")", "self", ".", "Instances", "[", "role", "]", ".", "append", "(", "Connection", "(", "instance", ",", "username", ",", "key_filename", ",", "output_shell", "=", "output_shell", ")", ")" ]
Add instance to the setup @param role: instance's role @type role: str @param instance: host parameters we would like to establish connection to @type instance: dict @param username: user name for creating ssh connection @type username: str @param key_filename: file name with ssh private key @type key_filename: str @param output_shell: write output from this connection to standard output @type output_shell: bool
[ "Add", "instance", "to", "the", "setup" ]
train
https://github.com/RedHatQE/python-stitches/blob/957e9895e64ffd3b8157b38b9cce414969509288/stitches/structure.py#L38-L72
RedHatQE/python-stitches
stitches/structure.py
Structure.setup_from_yamlfile
def setup_from_yamlfile(self, yamlfile, output_shell=False): """ Setup from yaml config @param yamlfile: path to yaml config file @type yamlfile: str @param output_shell: write output from this connection to standard output @type output_shell: bool """ self.logger.debug('Loading config from ' + yamlfile) with open(yamlfile, 'r') as yamlfd: yamlconfig = yaml.load(yamlfd) for instance in yamlconfig['Instances']: self.add_instance(instance['role'].upper(), instance, output_shell=output_shell) if 'Config' in yamlconfig.keys(): self.logger.debug('Config found: ' + str(yamlconfig['Config'])) self.config = yamlconfig['Config'].copy()
python
def setup_from_yamlfile(self, yamlfile, output_shell=False): """ Setup from yaml config @param yamlfile: path to yaml config file @type yamlfile: str @param output_shell: write output from this connection to standard output @type output_shell: bool """ self.logger.debug('Loading config from ' + yamlfile) with open(yamlfile, 'r') as yamlfd: yamlconfig = yaml.load(yamlfd) for instance in yamlconfig['Instances']: self.add_instance(instance['role'].upper(), instance, output_shell=output_shell) if 'Config' in yamlconfig.keys(): self.logger.debug('Config found: ' + str(yamlconfig['Config'])) self.config = yamlconfig['Config'].copy()
[ "def", "setup_from_yamlfile", "(", "self", ",", "yamlfile", ",", "output_shell", "=", "False", ")", ":", "self", ".", "logger", ".", "debug", "(", "'Loading config from '", "+", "yamlfile", ")", "with", "open", "(", "yamlfile", ",", "'r'", ")", "as", "yamlfd", ":", "yamlconfig", "=", "yaml", ".", "load", "(", "yamlfd", ")", "for", "instance", "in", "yamlconfig", "[", "'Instances'", "]", ":", "self", ".", "add_instance", "(", "instance", "[", "'role'", "]", ".", "upper", "(", ")", ",", "instance", ",", "output_shell", "=", "output_shell", ")", "if", "'Config'", "in", "yamlconfig", ".", "keys", "(", ")", ":", "self", ".", "logger", ".", "debug", "(", "'Config found: '", "+", "str", "(", "yamlconfig", "[", "'Config'", "]", ")", ")", "self", ".", "config", "=", "yamlconfig", "[", "'Config'", "]", ".", "copy", "(", ")" ]
Setup from yaml config @param yamlfile: path to yaml config file @type yamlfile: str @param output_shell: write output from this connection to standard output @type output_shell: bool
[ "Setup", "from", "yaml", "config" ]
train
https://github.com/RedHatQE/python-stitches/blob/957e9895e64ffd3b8157b38b9cce414969509288/stitches/structure.py#L74-L94
brentp/toolshed
toolshed/files.py
process_iter
def process_iter(proc, cmd=""): """ helper function to iterate over a process stdout and report error messages when done """ try: for l in proc.stdout: yield l finally: if proc.poll() is None: # there was an exception return else: proc.wait() if proc.returncode not in (0, None, signal.SIGPIPE, signal.SIGPIPE + 128): sys.stderr.write("cmd was:%s\n" % cmd) sys.stderr.write("return code was:%s\n" % proc.returncode) raise ProcessException(cmd)
python
def process_iter(proc, cmd=""): """ helper function to iterate over a process stdout and report error messages when done """ try: for l in proc.stdout: yield l finally: if proc.poll() is None: # there was an exception return else: proc.wait() if proc.returncode not in (0, None, signal.SIGPIPE, signal.SIGPIPE + 128): sys.stderr.write("cmd was:%s\n" % cmd) sys.stderr.write("return code was:%s\n" % proc.returncode) raise ProcessException(cmd)
[ "def", "process_iter", "(", "proc", ",", "cmd", "=", "\"\"", ")", ":", "try", ":", "for", "l", "in", "proc", ".", "stdout", ":", "yield", "l", "finally", ":", "if", "proc", ".", "poll", "(", ")", "is", "None", ":", "# there was an exception", "return", "else", ":", "proc", ".", "wait", "(", ")", "if", "proc", ".", "returncode", "not", "in", "(", "0", ",", "None", ",", "signal", ".", "SIGPIPE", ",", "signal", ".", "SIGPIPE", "+", "128", ")", ":", "sys", ".", "stderr", ".", "write", "(", "\"cmd was:%s\\n\"", "%", "cmd", ")", "sys", ".", "stderr", ".", "write", "(", "\"return code was:%s\\n\"", "%", "proc", ".", "returncode", ")", "raise", "ProcessException", "(", "cmd", ")" ]
helper function to iterate over a process stdout and report error messages when done
[ "helper", "function", "to", "iterate", "over", "a", "process", "stdout", "and", "report", "error", "messages", "when", "done" ]
train
https://github.com/brentp/toolshed/blob/c9529d6872bf28207642896c3b416f68e79b1269/toolshed/files.py#L44-L61
brentp/toolshed
toolshed/files.py
header
def header(fname, sep="\t"): """ just grab the header from a given file """ fh = iter(nopen(fname)) h = tokens(next(fh), sep) h[0] = h[0].lstrip("#") return h
python
def header(fname, sep="\t"): """ just grab the header from a given file """ fh = iter(nopen(fname)) h = tokens(next(fh), sep) h[0] = h[0].lstrip("#") return h
[ "def", "header", "(", "fname", ",", "sep", "=", "\"\\t\"", ")", ":", "fh", "=", "iter", "(", "nopen", "(", "fname", ")", ")", "h", "=", "tokens", "(", "next", "(", "fh", ")", ",", "sep", ")", "h", "[", "0", "]", "=", "h", "[", "0", "]", ".", "lstrip", "(", "\"#\"", ")", "return", "h" ]
just grab the header from a given file
[ "just", "grab", "the", "header", "from", "a", "given", "file" ]
train
https://github.com/brentp/toolshed/blob/c9529d6872bf28207642896c3b416f68e79b1269/toolshed/files.py#L164-L171
brentp/toolshed
toolshed/files.py
reader
def reader(fname, header=True, sep="\t", skip_while=None, quotechar='"'): r""" for each row in the file `fname` generate dicts If `header` is True or lists if `header` is False. The dict keys are drawn from the first line. If `header` is a list of names, those will be used as the dict keys. If `header` is 'ordered', the function will yield ordered dicts. If `header` is a callable, it will be called for each row. If `header` is collections.namedtuple, a namedtuple object will be yielded for each row using the first row as the names. skip_while is a function that returns False when it is ready to start consuming. this could be something like: skip_while = lambda toks: toks[0].startswith('#') >>> import sys >>> if sys.version_info[0] < 3: ... from StringIO import StringIO ... else: ... from io import StringIO >>> get_str = lambda : StringIO("a\tb\tname\n1\t2\tfred\n11\t22\tjane") >>> expected = [{'a': '1', 'b': '2', 'name': 'fred'}, ... {'a': '11', 'b': '22', 'name': 'jane'}] >>> list(reader(get_str())) == expected True >>> expected = [['a', 'b', 'name'], ... ['1', '2', 'fred'], ['11', '22', 'jane']] >>> list(reader(get_str(), header=False)) == expected True """ if isinstance(fname, int_types): fname = sys.argv[fname] if not isinstance(fname, basestring) and \ isinstance(fname, types.GeneratorType): line_gen = fname elif sep is None: def _line_gen(f, sep): if sep is None: for line in nopen(f): yield line.rstrip("\r\n").split() line_gen = _line_gen(fname, sep) elif isinstance(fname, basestring) and fname.endswith((".xls")): line_gen = xls_reader(fname) else: # simple separator, e.g. comma or "\t, "\s", etc. if len(sep) == 1 or (len(sep) == 2 and sep[0] == '\\'): dialect = csv.excel dialect.delimiter = sep dialect.quotechar = quotechar if not quotechar: dialect.quoting = csv.QUOTE_NONE line_gen = csv.reader(nopen(fname), dialect=dialect) else: # sep is a regex. import re sep = re.compile(sep) def _re_line_gen(f, sep): for line in nopen(f): yield sep.split(line.rstrip("\r\n")) line_gen = _re_line_gen(fname, sep) if skip_while: from itertools import chain l = next(line_gen) while skip_while(l): l = next(line_gen) line_gen = chain.from_iterable(([l], line_gen)) if header is namedtuple: header = next(line_gen) header[0] = header[0].lstrip("#") nt = namedtuple('namedtuple', header) for toks in line_gen: yield nt._make(toks) raise StopIteration # they sent in a class or function that accepts the toks. elif callable(header): for toks in line_gen: yield header(toks) raise StopIteration a_dict = dict # if header is 'ordered', then use an ordered dictionary. if header == "ordered": try: from collections import OrderedDict as a_dict except ImportError: from ordereddict import OrderedDict as a_dict header = True if header is True: header = next(line_gen) header[0] = header[0].lstrip("#") if header: for toks in line_gen: yield a_dict(izip(header, toks)) else: for toks in line_gen: yield toks
python
def reader(fname, header=True, sep="\t", skip_while=None, quotechar='"'): r""" for each row in the file `fname` generate dicts If `header` is True or lists if `header` is False. The dict keys are drawn from the first line. If `header` is a list of names, those will be used as the dict keys. If `header` is 'ordered', the function will yield ordered dicts. If `header` is a callable, it will be called for each row. If `header` is collections.namedtuple, a namedtuple object will be yielded for each row using the first row as the names. skip_while is a function that returns False when it is ready to start consuming. this could be something like: skip_while = lambda toks: toks[0].startswith('#') >>> import sys >>> if sys.version_info[0] < 3: ... from StringIO import StringIO ... else: ... from io import StringIO >>> get_str = lambda : StringIO("a\tb\tname\n1\t2\tfred\n11\t22\tjane") >>> expected = [{'a': '1', 'b': '2', 'name': 'fred'}, ... {'a': '11', 'b': '22', 'name': 'jane'}] >>> list(reader(get_str())) == expected True >>> expected = [['a', 'b', 'name'], ... ['1', '2', 'fred'], ['11', '22', 'jane']] >>> list(reader(get_str(), header=False)) == expected True """ if isinstance(fname, int_types): fname = sys.argv[fname] if not isinstance(fname, basestring) and \ isinstance(fname, types.GeneratorType): line_gen = fname elif sep is None: def _line_gen(f, sep): if sep is None: for line in nopen(f): yield line.rstrip("\r\n").split() line_gen = _line_gen(fname, sep) elif isinstance(fname, basestring) and fname.endswith((".xls")): line_gen = xls_reader(fname) else: # simple separator, e.g. comma or "\t, "\s", etc. if len(sep) == 1 or (len(sep) == 2 and sep[0] == '\\'): dialect = csv.excel dialect.delimiter = sep dialect.quotechar = quotechar if not quotechar: dialect.quoting = csv.QUOTE_NONE line_gen = csv.reader(nopen(fname), dialect=dialect) else: # sep is a regex. import re sep = re.compile(sep) def _re_line_gen(f, sep): for line in nopen(f): yield sep.split(line.rstrip("\r\n")) line_gen = _re_line_gen(fname, sep) if skip_while: from itertools import chain l = next(line_gen) while skip_while(l): l = next(line_gen) line_gen = chain.from_iterable(([l], line_gen)) if header is namedtuple: header = next(line_gen) header[0] = header[0].lstrip("#") nt = namedtuple('namedtuple', header) for toks in line_gen: yield nt._make(toks) raise StopIteration # they sent in a class or function that accepts the toks. elif callable(header): for toks in line_gen: yield header(toks) raise StopIteration a_dict = dict # if header is 'ordered', then use an ordered dictionary. if header == "ordered": try: from collections import OrderedDict as a_dict except ImportError: from ordereddict import OrderedDict as a_dict header = True if header is True: header = next(line_gen) header[0] = header[0].lstrip("#") if header: for toks in line_gen: yield a_dict(izip(header, toks)) else: for toks in line_gen: yield toks
[ "def", "reader", "(", "fname", ",", "header", "=", "True", ",", "sep", "=", "\"\\t\"", ",", "skip_while", "=", "None", ",", "quotechar", "=", "'\"'", ")", ":", "if", "isinstance", "(", "fname", ",", "int_types", ")", ":", "fname", "=", "sys", ".", "argv", "[", "fname", "]", "if", "not", "isinstance", "(", "fname", ",", "basestring", ")", "and", "isinstance", "(", "fname", ",", "types", ".", "GeneratorType", ")", ":", "line_gen", "=", "fname", "elif", "sep", "is", "None", ":", "def", "_line_gen", "(", "f", ",", "sep", ")", ":", "if", "sep", "is", "None", ":", "for", "line", "in", "nopen", "(", "f", ")", ":", "yield", "line", ".", "rstrip", "(", "\"\\r\\n\"", ")", ".", "split", "(", ")", "line_gen", "=", "_line_gen", "(", "fname", ",", "sep", ")", "elif", "isinstance", "(", "fname", ",", "basestring", ")", "and", "fname", ".", "endswith", "(", "(", "\".xls\"", ")", ")", ":", "line_gen", "=", "xls_reader", "(", "fname", ")", "else", ":", "# simple separator, e.g. comma or \"\\t, \"\\s\", etc.", "if", "len", "(", "sep", ")", "==", "1", "or", "(", "len", "(", "sep", ")", "==", "2", "and", "sep", "[", "0", "]", "==", "'\\\\'", ")", ":", "dialect", "=", "csv", ".", "excel", "dialect", ".", "delimiter", "=", "sep", "dialect", ".", "quotechar", "=", "quotechar", "if", "not", "quotechar", ":", "dialect", ".", "quoting", "=", "csv", ".", "QUOTE_NONE", "line_gen", "=", "csv", ".", "reader", "(", "nopen", "(", "fname", ")", ",", "dialect", "=", "dialect", ")", "else", ":", "# sep is a regex.", "import", "re", "sep", "=", "re", ".", "compile", "(", "sep", ")", "def", "_re_line_gen", "(", "f", ",", "sep", ")", ":", "for", "line", "in", "nopen", "(", "f", ")", ":", "yield", "sep", ".", "split", "(", "line", ".", "rstrip", "(", "\"\\r\\n\"", ")", ")", "line_gen", "=", "_re_line_gen", "(", "fname", ",", "sep", ")", "if", "skip_while", ":", "from", "itertools", "import", "chain", "l", "=", "next", "(", "line_gen", ")", "while", "skip_while", "(", "l", ")", ":", "l", "=", "next", "(", "line_gen", ")", "line_gen", "=", "chain", ".", "from_iterable", "(", "(", "[", "l", "]", ",", "line_gen", ")", ")", "if", "header", "is", "namedtuple", ":", "header", "=", "next", "(", "line_gen", ")", "header", "[", "0", "]", "=", "header", "[", "0", "]", ".", "lstrip", "(", "\"#\"", ")", "nt", "=", "namedtuple", "(", "'namedtuple'", ",", "header", ")", "for", "toks", "in", "line_gen", ":", "yield", "nt", ".", "_make", "(", "toks", ")", "raise", "StopIteration", "# they sent in a class or function that accepts the toks.", "elif", "callable", "(", "header", ")", ":", "for", "toks", "in", "line_gen", ":", "yield", "header", "(", "toks", ")", "raise", "StopIteration", "a_dict", "=", "dict", "# if header is 'ordered', then use an ordered dictionary.", "if", "header", "==", "\"ordered\"", ":", "try", ":", "from", "collections", "import", "OrderedDict", "as", "a_dict", "except", "ImportError", ":", "from", "ordereddict", "import", "OrderedDict", "as", "a_dict", "header", "=", "True", "if", "header", "is", "True", ":", "header", "=", "next", "(", "line_gen", ")", "header", "[", "0", "]", "=", "header", "[", "0", "]", ".", "lstrip", "(", "\"#\"", ")", "if", "header", ":", "for", "toks", "in", "line_gen", ":", "yield", "a_dict", "(", "izip", "(", "header", ",", "toks", ")", ")", "else", ":", "for", "toks", "in", "line_gen", ":", "yield", "toks" ]
r""" for each row in the file `fname` generate dicts If `header` is True or lists if `header` is False. The dict keys are drawn from the first line. If `header` is a list of names, those will be used as the dict keys. If `header` is 'ordered', the function will yield ordered dicts. If `header` is a callable, it will be called for each row. If `header` is collections.namedtuple, a namedtuple object will be yielded for each row using the first row as the names. skip_while is a function that returns False when it is ready to start consuming. this could be something like: skip_while = lambda toks: toks[0].startswith('#') >>> import sys >>> if sys.version_info[0] < 3: ... from StringIO import StringIO ... else: ... from io import StringIO >>> get_str = lambda : StringIO("a\tb\tname\n1\t2\tfred\n11\t22\tjane") >>> expected = [{'a': '1', 'b': '2', 'name': 'fred'}, ... {'a': '11', 'b': '22', 'name': 'jane'}] >>> list(reader(get_str())) == expected True >>> expected = [['a', 'b', 'name'], ... ['1', '2', 'fred'], ['11', '22', 'jane']] >>> list(reader(get_str(), header=False)) == expected True
[ "r", "for", "each", "row", "in", "the", "file", "fname", "generate", "dicts", "If", "header", "is", "True", "or", "lists", "if", "header", "is", "False", ".", "The", "dict", "keys", "are", "drawn", "from", "the", "first", "line", ".", "If", "header", "is", "a", "list", "of", "names", "those", "will", "be", "used", "as", "the", "dict", "keys", ".", "If", "header", "is", "ordered", "the", "function", "will", "yield", "ordered", "dicts", ".", "If", "header", "is", "a", "callable", "it", "will", "be", "called", "for", "each", "row", ".", "If", "header", "is", "collections", ".", "namedtuple", "a", "namedtuple", "object", "will", "be", "yielded", "for", "each", "row", "using", "the", "first", "row", "as", "the", "names", ".", "skip_while", "is", "a", "function", "that", "returns", "False", "when", "it", "is", "ready", "to", "start", "consuming", ".", "this", "could", "be", "something", "like", ":" ]
train
https://github.com/brentp/toolshed/blob/c9529d6872bf28207642896c3b416f68e79b1269/toolshed/files.py#L180-L282
brentp/toolshed
toolshed/files.py
is_newer_b
def is_newer_b(a, bfiles): """ check that all b files have been modified more recently than a """ if isinstance(bfiles, basestring): bfiles = [bfiles] if not op.exists(a): return False if not all(op.exists(b) for b in bfiles): return False atime = os.stat(a).st_mtime # modification time for b in bfiles: # a has been modified since if atime > os.stat(b).st_mtime: return False return True
python
def is_newer_b(a, bfiles): """ check that all b files have been modified more recently than a """ if isinstance(bfiles, basestring): bfiles = [bfiles] if not op.exists(a): return False if not all(op.exists(b) for b in bfiles): return False atime = os.stat(a).st_mtime # modification time for b in bfiles: # a has been modified since if atime > os.stat(b).st_mtime: return False return True
[ "def", "is_newer_b", "(", "a", ",", "bfiles", ")", ":", "if", "isinstance", "(", "bfiles", ",", "basestring", ")", ":", "bfiles", "=", "[", "bfiles", "]", "if", "not", "op", ".", "exists", "(", "a", ")", ":", "return", "False", "if", "not", "all", "(", "op", ".", "exists", "(", "b", ")", "for", "b", "in", "bfiles", ")", ":", "return", "False", "atime", "=", "os", ".", "stat", "(", "a", ")", ".", "st_mtime", "# modification time", "for", "b", "in", "bfiles", ":", "# a has been modified since", "if", "atime", ">", "os", ".", "stat", "(", "b", ")", ".", "st_mtime", ":", "return", "False", "return", "True" ]
check that all b files have been modified more recently than a
[ "check", "that", "all", "b", "files", "have", "been", "modified", "more", "recently", "than", "a" ]
train
https://github.com/brentp/toolshed/blob/c9529d6872bf28207642896c3b416f68e79b1269/toolshed/files.py#L284-L299
RedHatQE/python-stitches
stitches/expect.py
Expect.expect_list
def expect_list(connection, regexp_list, timeout=10): ''' Expect a list of expressions @param connection: Connection to the host @type connection: L{Connection} @param regexp_list: regular expressions and associated return values @type regexp_list: list of (regexp, return value) @param timeout: timeout for performing expect operation @type timeout: int @return: propper return value from regexp_list @rtype: return value @raises ExpectFailed ''' result = "" count = 0 while count < timeout: try: recv_part = connection.channel.recv(32768).decode() logging.getLogger('stitches.expect').debug("RCV: " + recv_part) if connection.output_shell: sys.stdout.write(recv_part) result += recv_part except socket.timeout: # socket.timeout here means 'no more data' pass for (regexp, retvalue) in regexp_list: # search for the first matching regexp and return desired value if re.match(regexp, result): return retvalue time.sleep(1) count += 1 raise ExpectFailed(result)
python
def expect_list(connection, regexp_list, timeout=10): ''' Expect a list of expressions @param connection: Connection to the host @type connection: L{Connection} @param regexp_list: regular expressions and associated return values @type regexp_list: list of (regexp, return value) @param timeout: timeout for performing expect operation @type timeout: int @return: propper return value from regexp_list @rtype: return value @raises ExpectFailed ''' result = "" count = 0 while count < timeout: try: recv_part = connection.channel.recv(32768).decode() logging.getLogger('stitches.expect').debug("RCV: " + recv_part) if connection.output_shell: sys.stdout.write(recv_part) result += recv_part except socket.timeout: # socket.timeout here means 'no more data' pass for (regexp, retvalue) in regexp_list: # search for the first matching regexp and return desired value if re.match(regexp, result): return retvalue time.sleep(1) count += 1 raise ExpectFailed(result)
[ "def", "expect_list", "(", "connection", ",", "regexp_list", ",", "timeout", "=", "10", ")", ":", "result", "=", "\"\"", "count", "=", "0", "while", "count", "<", "timeout", ":", "try", ":", "recv_part", "=", "connection", ".", "channel", ".", "recv", "(", "32768", ")", ".", "decode", "(", ")", "logging", ".", "getLogger", "(", "'stitches.expect'", ")", ".", "debug", "(", "\"RCV: \"", "+", "recv_part", ")", "if", "connection", ".", "output_shell", ":", "sys", ".", "stdout", ".", "write", "(", "recv_part", ")", "result", "+=", "recv_part", "except", "socket", ".", "timeout", ":", "# socket.timeout here means 'no more data'", "pass", "for", "(", "regexp", ",", "retvalue", ")", "in", "regexp_list", ":", "# search for the first matching regexp and return desired value", "if", "re", ".", "match", "(", "regexp", ",", "result", ")", ":", "return", "retvalue", "time", ".", "sleep", "(", "1", ")", "count", "+=", "1", "raise", "ExpectFailed", "(", "result", ")" ]
Expect a list of expressions @param connection: Connection to the host @type connection: L{Connection} @param regexp_list: regular expressions and associated return values @type regexp_list: list of (regexp, return value) @param timeout: timeout for performing expect operation @type timeout: int @return: propper return value from regexp_list @rtype: return value @raises ExpectFailed
[ "Expect", "a", "list", "of", "expressions" ]
train
https://github.com/RedHatQE/python-stitches/blob/957e9895e64ffd3b8157b38b9cce414969509288/stitches/expect.py#L26-L63
RedHatQE/python-stitches
stitches/expect.py
Expect.expect
def expect(connection, strexp, timeout=10): ''' Expect one expression @param connection: Connection to the host @type connection: L{Connection} @param strexp: string to convert to expression (.*string.*) @type strexp: str @param timeout: timeout for performing expect operation @type timeout: int @return: True if succeeded @rtype: bool @raises ExpectFailed ''' return Expect.expect_list(connection, [(re.compile(".*" + strexp + ".*", re.DOTALL), True)], timeout)
python
def expect(connection, strexp, timeout=10): ''' Expect one expression @param connection: Connection to the host @type connection: L{Connection} @param strexp: string to convert to expression (.*string.*) @type strexp: str @param timeout: timeout for performing expect operation @type timeout: int @return: True if succeeded @rtype: bool @raises ExpectFailed ''' return Expect.expect_list(connection, [(re.compile(".*" + strexp + ".*", re.DOTALL), True)], timeout)
[ "def", "expect", "(", "connection", ",", "strexp", ",", "timeout", "=", "10", ")", ":", "return", "Expect", ".", "expect_list", "(", "connection", ",", "[", "(", "re", ".", "compile", "(", "\".*\"", "+", "strexp", "+", "\".*\"", ",", "re", ".", "DOTALL", ")", ",", "True", ")", "]", ",", "timeout", ")" ]
Expect one expression @param connection: Connection to the host @type connection: L{Connection} @param strexp: string to convert to expression (.*string.*) @type strexp: str @param timeout: timeout for performing expect operation @type timeout: int @return: True if succeeded @rtype: bool @raises ExpectFailed
[ "Expect", "one", "expression" ]
train
https://github.com/RedHatQE/python-stitches/blob/957e9895e64ffd3b8157b38b9cce414969509288/stitches/expect.py#L66-L87
RedHatQE/python-stitches
stitches/expect.py
Expect.match
def match(connection, regexp, grouplist=[1], timeout=10): ''' Match against an expression @param connection: Connection to the host @type connection: L{Connection} @param regexp: compiled regular expression @type regexp: L{SRE_Pattern} @param grouplist: list of groups to return @type group: list of int @param timeout: timeout for performing expect operation @type timeout: int @return: matched string @rtype: str @raises ExpectFailed ''' logging.getLogger('stitches.expect').debug("MATCHING: " + regexp.pattern) result = "" count = 0 while count < timeout: try: recv_part = connection.channel.recv(32768).decode() logging.getLogger('stitches.expect').debug("RCV: " + recv_part) if connection.output_shell: sys.stdout.write(recv_part) result += recv_part except socket.timeout: # socket.timeout here means 'no more data' pass match = regexp.match(result) if match: ret_list = [] for group in grouplist: logging.getLogger('stitches.expect').debug("matched: " + match.group(group)) ret_list.append(match.group(group)) return ret_list time.sleep(1) count += 1 raise ExpectFailed(result)
python
def match(connection, regexp, grouplist=[1], timeout=10): ''' Match against an expression @param connection: Connection to the host @type connection: L{Connection} @param regexp: compiled regular expression @type regexp: L{SRE_Pattern} @param grouplist: list of groups to return @type group: list of int @param timeout: timeout for performing expect operation @type timeout: int @return: matched string @rtype: str @raises ExpectFailed ''' logging.getLogger('stitches.expect').debug("MATCHING: " + regexp.pattern) result = "" count = 0 while count < timeout: try: recv_part = connection.channel.recv(32768).decode() logging.getLogger('stitches.expect').debug("RCV: " + recv_part) if connection.output_shell: sys.stdout.write(recv_part) result += recv_part except socket.timeout: # socket.timeout here means 'no more data' pass match = regexp.match(result) if match: ret_list = [] for group in grouplist: logging.getLogger('stitches.expect').debug("matched: " + match.group(group)) ret_list.append(match.group(group)) return ret_list time.sleep(1) count += 1 raise ExpectFailed(result)
[ "def", "match", "(", "connection", ",", "regexp", ",", "grouplist", "=", "[", "1", "]", ",", "timeout", "=", "10", ")", ":", "logging", ".", "getLogger", "(", "'stitches.expect'", ")", ".", "debug", "(", "\"MATCHING: \"", "+", "regexp", ".", "pattern", ")", "result", "=", "\"\"", "count", "=", "0", "while", "count", "<", "timeout", ":", "try", ":", "recv_part", "=", "connection", ".", "channel", ".", "recv", "(", "32768", ")", ".", "decode", "(", ")", "logging", ".", "getLogger", "(", "'stitches.expect'", ")", ".", "debug", "(", "\"RCV: \"", "+", "recv_part", ")", "if", "connection", ".", "output_shell", ":", "sys", ".", "stdout", ".", "write", "(", "recv_part", ")", "result", "+=", "recv_part", "except", "socket", ".", "timeout", ":", "# socket.timeout here means 'no more data'", "pass", "match", "=", "regexp", ".", "match", "(", "result", ")", "if", "match", ":", "ret_list", "=", "[", "]", "for", "group", "in", "grouplist", ":", "logging", ".", "getLogger", "(", "'stitches.expect'", ")", ".", "debug", "(", "\"matched: \"", "+", "match", ".", "group", "(", "group", ")", ")", "ret_list", ".", "append", "(", "match", ".", "group", "(", "group", ")", ")", "return", "ret_list", "time", ".", "sleep", "(", "1", ")", "count", "+=", "1", "raise", "ExpectFailed", "(", "result", ")" ]
Match against an expression @param connection: Connection to the host @type connection: L{Connection} @param regexp: compiled regular expression @type regexp: L{SRE_Pattern} @param grouplist: list of groups to return @type group: list of int @param timeout: timeout for performing expect operation @type timeout: int @return: matched string @rtype: str @raises ExpectFailed
[ "Match", "against", "an", "expression" ]
train
https://github.com/RedHatQE/python-stitches/blob/957e9895e64ffd3b8157b38b9cce414969509288/stitches/expect.py#L90-L134
RedHatQE/python-stitches
stitches/expect.py
Expect.ping_pong
def ping_pong(connection, command, strexp, timeout=10): ''' Enter a command and wait for something to happen (enter + expect combined) @param connection: connection to the host @type connection: L{Connection} @param command: command to execute @type command: str @param strexp: string to convert to expression (.*string.*) @type strexp: str @param timeout: timeout for performing expect operation @type timeout: int @return: True if succeeded @rtype: bool @raises ExpectFailed ''' Expect.enter(connection, command) return Expect.expect(connection, strexp, timeout)
python
def ping_pong(connection, command, strexp, timeout=10): ''' Enter a command and wait for something to happen (enter + expect combined) @param connection: connection to the host @type connection: L{Connection} @param command: command to execute @type command: str @param strexp: string to convert to expression (.*string.*) @type strexp: str @param timeout: timeout for performing expect operation @type timeout: int @return: True if succeeded @rtype: bool @raises ExpectFailed ''' Expect.enter(connection, command) return Expect.expect(connection, strexp, timeout)
[ "def", "ping_pong", "(", "connection", ",", "command", ",", "strexp", ",", "timeout", "=", "10", ")", ":", "Expect", ".", "enter", "(", "connection", ",", "command", ")", "return", "Expect", ".", "expect", "(", "connection", ",", "strexp", ",", "timeout", ")" ]
Enter a command and wait for something to happen (enter + expect combined) @param connection: connection to the host @type connection: L{Connection} @param command: command to execute @type command: str @param strexp: string to convert to expression (.*string.*) @type strexp: str @param timeout: timeout for performing expect operation @type timeout: int @return: True if succeeded @rtype: bool @raises ExpectFailed
[ "Enter", "a", "command", "and", "wait", "for", "something", "to", "happen", "(", "enter", "+", "expect", "combined", ")" ]
train
https://github.com/RedHatQE/python-stitches/blob/957e9895e64ffd3b8157b38b9cce414969509288/stitches/expect.py#L153-L176
RedHatQE/python-stitches
stitches/expect.py
Expect.expect_retval
def expect_retval(connection, command, expected_status=0, timeout=10): ''' Run command and expect specified return valud @param connection: connection to the host @type connection: L{Connection} @param command: command to execute @type command: str @param expected_status: expected return value @type expected_status: int @param timeout: timeout for performing expect operation @type timeout: int @return: return value @rtype: int @raises ExpectFailed ''' retval = connection.recv_exit_status(command, timeout) if retval is None: raise ExpectFailed("Got timeout (%i seconds) while executing '%s'" % (timeout, command)) elif retval != expected_status: raise ExpectFailed("Got %s exit status (%s expected)\ncmd: %s\nstdout: %s\nstderr: %s" % (retval, expected_status, connection.last_command, connection.last_stdout, connection.last_stderr)) if connection.output_shell: sys.stdout.write("Run '%s', got %i return value\n" % (command, retval)) return retval
python
def expect_retval(connection, command, expected_status=0, timeout=10): ''' Run command and expect specified return valud @param connection: connection to the host @type connection: L{Connection} @param command: command to execute @type command: str @param expected_status: expected return value @type expected_status: int @param timeout: timeout for performing expect operation @type timeout: int @return: return value @rtype: int @raises ExpectFailed ''' retval = connection.recv_exit_status(command, timeout) if retval is None: raise ExpectFailed("Got timeout (%i seconds) while executing '%s'" % (timeout, command)) elif retval != expected_status: raise ExpectFailed("Got %s exit status (%s expected)\ncmd: %s\nstdout: %s\nstderr: %s" % (retval, expected_status, connection.last_command, connection.last_stdout, connection.last_stderr)) if connection.output_shell: sys.stdout.write("Run '%s', got %i return value\n" % (command, retval)) return retval
[ "def", "expect_retval", "(", "connection", ",", "command", ",", "expected_status", "=", "0", ",", "timeout", "=", "10", ")", ":", "retval", "=", "connection", ".", "recv_exit_status", "(", "command", ",", "timeout", ")", "if", "retval", "is", "None", ":", "raise", "ExpectFailed", "(", "\"Got timeout (%i seconds) while executing '%s'\"", "%", "(", "timeout", ",", "command", ")", ")", "elif", "retval", "!=", "expected_status", ":", "raise", "ExpectFailed", "(", "\"Got %s exit status (%s expected)\\ncmd: %s\\nstdout: %s\\nstderr: %s\"", "%", "(", "retval", ",", "expected_status", ",", "connection", ".", "last_command", ",", "connection", ".", "last_stdout", ",", "connection", ".", "last_stderr", ")", ")", "if", "connection", ".", "output_shell", ":", "sys", ".", "stdout", ".", "write", "(", "\"Run '%s', got %i return value\\n\"", "%", "(", "command", ",", "retval", ")", ")", "return", "retval" ]
Run command and expect specified return valud @param connection: connection to the host @type connection: L{Connection} @param command: command to execute @type command: str @param expected_status: expected return value @type expected_status: int @param timeout: timeout for performing expect operation @type timeout: int @return: return value @rtype: int @raises ExpectFailed
[ "Run", "command", "and", "expect", "specified", "return", "valud" ]
train
https://github.com/RedHatQE/python-stitches/blob/957e9895e64ffd3b8157b38b9cce414969509288/stitches/expect.py#L179-L211
gwww/elkm1
elkm1_lib/keypads.py
Keypads.sync
def sync(self): """Retrieve areas from ElkM1""" self.elk.send(ka_encode()) self.get_descriptions(TextDescriptions.KEYPAD.value)
python
def sync(self): """Retrieve areas from ElkM1""" self.elk.send(ka_encode()) self.get_descriptions(TextDescriptions.KEYPAD.value)
[ "def", "sync", "(", "self", ")", ":", "self", ".", "elk", ".", "send", "(", "ka_encode", "(", ")", ")", "self", ".", "get_descriptions", "(", "TextDescriptions", ".", "KEYPAD", ".", "value", ")" ]
Retrieve areas from ElkM1
[ "Retrieve", "areas", "from", "ElkM1" ]
train
https://github.com/gwww/elkm1/blob/078d0de30840c3fab46f1f8534d98df557931e91/elkm1_lib/keypads.py#L32-L35
vedvyas/doxytag2zealdb
doxytag2zealdb/doxytag.py
TagProcessor.find
def find(self, soup): '''Yield tags matching the tag criterion from a soup. There is no need to override this if you are satisfied with finding tags that match match_criterion. Args: soup: A BeautifulSoup to search through. Yields: BeautifulSoup Tags that match the criterion. ''' for tag in soup.recursiveChildGenerator(): if self.match_criterion(tag): yield tag
python
def find(self, soup): '''Yield tags matching the tag criterion from a soup. There is no need to override this if you are satisfied with finding tags that match match_criterion. Args: soup: A BeautifulSoup to search through. Yields: BeautifulSoup Tags that match the criterion. ''' for tag in soup.recursiveChildGenerator(): if self.match_criterion(tag): yield tag
[ "def", "find", "(", "self", ",", "soup", ")", ":", "for", "tag", "in", "soup", ".", "recursiveChildGenerator", "(", ")", ":", "if", "self", ".", "match_criterion", "(", "tag", ")", ":", "yield", "tag" ]
Yield tags matching the tag criterion from a soup. There is no need to override this if you are satisfied with finding tags that match match_criterion. Args: soup: A BeautifulSoup to search through. Yields: BeautifulSoup Tags that match the criterion.
[ "Yield", "tags", "matching", "the", "tag", "criterion", "from", "a", "soup", "." ]
train
https://github.com/vedvyas/doxytag2zealdb/blob/8b07a88af6794248f8cfdabb0fda9dd61c777127/doxytag2zealdb/doxytag.py#L68-L82
vedvyas/doxytag2zealdb
doxytag2zealdb/doxytag.py
TagProcessor.get_name
def get_name(self, tag): '''Extract and return a representative "name" from a tag. Override as necessary. get_name's output can be controlled through keyword arguments that are provided when initializing a TagProcessor. For instance, a member of a class or namespace can have its parent scope included in the name by passing include_parent_scopes=True to __init__(). Args: tag: A BeautifulSoup Tag that satisfies match_criterion. Returns: A string that would be appropriate to use as an entry name in a Zeal database. ''' name = tag.findChild('name').contents[0] if self.include_parent_scopes: # Include parent scope in returned name parent_tag = tag.findParent() if parent_tag.get('kind') in ['class', 'struct', 'namespace']: name = parent_tag.findChild('name').contents[0] + '::' + name return name
python
def get_name(self, tag): '''Extract and return a representative "name" from a tag. Override as necessary. get_name's output can be controlled through keyword arguments that are provided when initializing a TagProcessor. For instance, a member of a class or namespace can have its parent scope included in the name by passing include_parent_scopes=True to __init__(). Args: tag: A BeautifulSoup Tag that satisfies match_criterion. Returns: A string that would be appropriate to use as an entry name in a Zeal database. ''' name = tag.findChild('name').contents[0] if self.include_parent_scopes: # Include parent scope in returned name parent_tag = tag.findParent() if parent_tag.get('kind') in ['class', 'struct', 'namespace']: name = parent_tag.findChild('name').contents[0] + '::' + name return name
[ "def", "get_name", "(", "self", ",", "tag", ")", ":", "name", "=", "tag", ".", "findChild", "(", "'name'", ")", ".", "contents", "[", "0", "]", "if", "self", ".", "include_parent_scopes", ":", "# Include parent scope in returned name", "parent_tag", "=", "tag", ".", "findParent", "(", ")", "if", "parent_tag", ".", "get", "(", "'kind'", ")", "in", "[", "'class'", ",", "'struct'", ",", "'namespace'", "]", ":", "name", "=", "parent_tag", ".", "findChild", "(", "'name'", ")", ".", "contents", "[", "0", "]", "+", "'::'", "+", "name", "return", "name" ]
Extract and return a representative "name" from a tag. Override as necessary. get_name's output can be controlled through keyword arguments that are provided when initializing a TagProcessor. For instance, a member of a class or namespace can have its parent scope included in the name by passing include_parent_scopes=True to __init__(). Args: tag: A BeautifulSoup Tag that satisfies match_criterion. Returns: A string that would be appropriate to use as an entry name in a Zeal database.
[ "Extract", "and", "return", "a", "representative", "name", "from", "a", "tag", "." ]
train
https://github.com/vedvyas/doxytag2zealdb/blob/8b07a88af6794248f8cfdabb0fda9dd61c777127/doxytag2zealdb/doxytag.py#L84-L108
vedvyas/doxytag2zealdb
doxytag2zealdb/doxytag.py
TagProcessor.get_filename
def get_filename(self, tag): '''Extract and return a documentation filename from a tag. Override as necessary, though this default implementation probably covers all the cases of interest. Args: tag: A BeautifulSoup Tag that satisfies match_criterion. Returns: A string that would be appropriate to use as the documentation filename for an entry in a Zeal database. ''' if tag.find('filename', recursive=False) is not None: return tag.filename.contents[0] elif tag.find('anchorfile', recursive=False) is not None: return tag.anchorfile.contents[0] + '#' + tag.anchor.contents[0]
python
def get_filename(self, tag): '''Extract and return a documentation filename from a tag. Override as necessary, though this default implementation probably covers all the cases of interest. Args: tag: A BeautifulSoup Tag that satisfies match_criterion. Returns: A string that would be appropriate to use as the documentation filename for an entry in a Zeal database. ''' if tag.find('filename', recursive=False) is not None: return tag.filename.contents[0] elif tag.find('anchorfile', recursive=False) is not None: return tag.anchorfile.contents[0] + '#' + tag.anchor.contents[0]
[ "def", "get_filename", "(", "self", ",", "tag", ")", ":", "if", "tag", ".", "find", "(", "'filename'", ",", "recursive", "=", "False", ")", "is", "not", "None", ":", "return", "tag", ".", "filename", ".", "contents", "[", "0", "]", "elif", "tag", ".", "find", "(", "'anchorfile'", ",", "recursive", "=", "False", ")", "is", "not", "None", ":", "return", "tag", ".", "anchorfile", ".", "contents", "[", "0", "]", "+", "'#'", "+", "tag", ".", "anchor", ".", "contents", "[", "0", "]" ]
Extract and return a documentation filename from a tag. Override as necessary, though this default implementation probably covers all the cases of interest. Args: tag: A BeautifulSoup Tag that satisfies match_criterion. Returns: A string that would be appropriate to use as the documentation filename for an entry in a Zeal database.
[ "Extract", "and", "return", "a", "documentation", "filename", "from", "a", "tag", "." ]
train
https://github.com/vedvyas/doxytag2zealdb/blob/8b07a88af6794248f8cfdabb0fda9dd61c777127/doxytag2zealdb/doxytag.py#L124-L140
vedvyas/doxytag2zealdb
doxytag2zealdb/doxytag.py
TagProcessorWithEntryTypeAndFindByNamePlusKind.match_criterion
def match_criterion(self, tag): '''Override. Determine if a tag has the desired name and kind attribute value. Args: tag: A BeautifulSoup Tag. Returns: True if tag has the desired name and kind, otherwise False. ''' return tag.name == self.reference_tag_name and \ tag.attrs.get('kind', '') == self.reference_tag_kind
python
def match_criterion(self, tag): '''Override. Determine if a tag has the desired name and kind attribute value. Args: tag: A BeautifulSoup Tag. Returns: True if tag has the desired name and kind, otherwise False. ''' return tag.name == self.reference_tag_name and \ tag.attrs.get('kind', '') == self.reference_tag_kind
[ "def", "match_criterion", "(", "self", ",", "tag", ")", ":", "return", "tag", ".", "name", "==", "self", ".", "reference_tag_name", "and", "tag", ".", "attrs", ".", "get", "(", "'kind'", ",", "''", ")", "==", "self", ".", "reference_tag_kind" ]
Override. Determine if a tag has the desired name and kind attribute value. Args: tag: A BeautifulSoup Tag. Returns: True if tag has the desired name and kind, otherwise False.
[ "Override", ".", "Determine", "if", "a", "tag", "has", "the", "desired", "name", "and", "kind", "attribute", "value", "." ]
train
https://github.com/vedvyas/doxytag2zealdb/blob/8b07a88af6794248f8cfdabb0fda9dd61c777127/doxytag2zealdb/doxytag.py#L169-L180
vedvyas/doxytag2zealdb
doxytag2zealdb/doxytag.py
functionTagProcessor.get_name
def get_name(self, tag): '''Override. Extract a representative "name" from a function tag. get_name's output can be controlled through keyword arguments that are provided when initializing a functionTagProcessor. For instance, function arguments and return types can be included by passing include_function_signatures=True to __init__(). Args: tag: A BeautifulSoup Tag for a function. Returns: A string that would be appropriate to use as an entry name for a function in a Zeal database. ''' name = super(functionTagProcessor, self).get_name(tag) if self.include_function_signatures: # Include complete function signature in returned name func_args = tag.findChild('arglist') if func_args and len(func_args.contents): name += func_args.contents[0] ret_type = tag.findChild('type') if ret_type and len(ret_type.contents): name += ' -> ' + ret_type.contents[0] return name
python
def get_name(self, tag): '''Override. Extract a representative "name" from a function tag. get_name's output can be controlled through keyword arguments that are provided when initializing a functionTagProcessor. For instance, function arguments and return types can be included by passing include_function_signatures=True to __init__(). Args: tag: A BeautifulSoup Tag for a function. Returns: A string that would be appropriate to use as an entry name for a function in a Zeal database. ''' name = super(functionTagProcessor, self).get_name(tag) if self.include_function_signatures: # Include complete function signature in returned name func_args = tag.findChild('arglist') if func_args and len(func_args.contents): name += func_args.contents[0] ret_type = tag.findChild('type') if ret_type and len(ret_type.contents): name += ' -> ' + ret_type.contents[0] return name
[ "def", "get_name", "(", "self", ",", "tag", ")", ":", "name", "=", "super", "(", "functionTagProcessor", ",", "self", ")", ".", "get_name", "(", "tag", ")", "if", "self", ".", "include_function_signatures", ":", "# Include complete function signature in returned name", "func_args", "=", "tag", ".", "findChild", "(", "'arglist'", ")", "if", "func_args", "and", "len", "(", "func_args", ".", "contents", ")", ":", "name", "+=", "func_args", ".", "contents", "[", "0", "]", "ret_type", "=", "tag", ".", "findChild", "(", "'type'", ")", "if", "ret_type", "and", "len", "(", "ret_type", ".", "contents", ")", ":", "name", "+=", "' -> '", "+", "ret_type", ".", "contents", "[", "0", "]", "return", "name" ]
Override. Extract a representative "name" from a function tag. get_name's output can be controlled through keyword arguments that are provided when initializing a functionTagProcessor. For instance, function arguments and return types can be included by passing include_function_signatures=True to __init__(). Args: tag: A BeautifulSoup Tag for a function. Returns: A string that would be appropriate to use as an entry name for a function in a Zeal database.
[ "Override", ".", "Extract", "a", "representative", "name", "from", "a", "function", "tag", "." ]
train
https://github.com/vedvyas/doxytag2zealdb/blob/8b07a88af6794248f8cfdabb0fda9dd61c777127/doxytag2zealdb/doxytag.py#L298-L325
vedvyas/doxytag2zealdb
doxytag2zealdb/doxytag.py
functionTagProcessor.get_entry_type
def get_entry_type(self, tag): '''Override that returns u'Method' for class/struct methods. Override as necessary. Args: tag: A BeautifulSoup Tag for a function. Returns: If this is a class/struct method, returns u'Method', otherwise returns the value from the inherited implementation of get_entry_type (which should be u'Function'). ''' if tag.findParent().get('kind') in ['class', 'struct']: return u'Method' return super(functionTagProcessor, self).get_entry_type(tag)
python
def get_entry_type(self, tag): '''Override that returns u'Method' for class/struct methods. Override as necessary. Args: tag: A BeautifulSoup Tag for a function. Returns: If this is a class/struct method, returns u'Method', otherwise returns the value from the inherited implementation of get_entry_type (which should be u'Function'). ''' if tag.findParent().get('kind') in ['class', 'struct']: return u'Method' return super(functionTagProcessor, self).get_entry_type(tag)
[ "def", "get_entry_type", "(", "self", ",", "tag", ")", ":", "if", "tag", ".", "findParent", "(", ")", ".", "get", "(", "'kind'", ")", "in", "[", "'class'", ",", "'struct'", "]", ":", "return", "u'Method'", "return", "super", "(", "functionTagProcessor", ",", "self", ")", ".", "get_entry_type", "(", "tag", ")" ]
Override that returns u'Method' for class/struct methods. Override as necessary. Args: tag: A BeautifulSoup Tag for a function. Returns: If this is a class/struct method, returns u'Method', otherwise returns the value from the inherited implementation of get_entry_type (which should be u'Function').
[ "Override", "that", "returns", "u", "Method", "for", "class", "/", "struct", "methods", "." ]
train
https://github.com/vedvyas/doxytag2zealdb/blob/8b07a88af6794248f8cfdabb0fda9dd61c777127/doxytag2zealdb/doxytag.py#L327-L344
brentp/toolshed
toolshed/pool.py
pool
def pool(n=None, dummy=False): """ create a multiprocessing pool that responds to interrupts. """ if dummy: from multiprocessing.dummy import Pool else: from multiprocessing import Pool if n is None: import multiprocessing n = multiprocessing.cpu_count() - 1 return Pool(n)
python
def pool(n=None, dummy=False): """ create a multiprocessing pool that responds to interrupts. """ if dummy: from multiprocessing.dummy import Pool else: from multiprocessing import Pool if n is None: import multiprocessing n = multiprocessing.cpu_count() - 1 return Pool(n)
[ "def", "pool", "(", "n", "=", "None", ",", "dummy", "=", "False", ")", ":", "if", "dummy", ":", "from", "multiprocessing", ".", "dummy", "import", "Pool", "else", ":", "from", "multiprocessing", "import", "Pool", "if", "n", "is", "None", ":", "import", "multiprocessing", "n", "=", "multiprocessing", ".", "cpu_count", "(", ")", "-", "1", "return", "Pool", "(", "n", ")" ]
create a multiprocessing pool that responds to interrupts.
[ "create", "a", "multiprocessing", "pool", "that", "responds", "to", "interrupts", "." ]
train
https://github.com/brentp/toolshed/blob/c9529d6872bf28207642896c3b416f68e79b1269/toolshed/pool.py#L70-L83
brentp/toolshed
toolshed/pool.py
pmap
def pmap(f, iterable, n=None, dummy=False, p=None): """ parallel map of a function to an iterable if each item in iterable is itself an iterable, then automatically call f(*item) instead of f(item) Arguments: f: function iterable: any iterable where each item is sent to f n: number of cpus (default is number on machine) dummy: use dummy pool. p: existing pool to re-use """ # make it easier to debug. if n == 1: for r in it.starmap(f, iterable): yield r raise StopIteration if p is None: po = pool(n, dummy) else: po = p assert hasattr(po, 'imap') f = _func_star(f) try: for r in po.imap(f, iterable): yield r # explicitly clean up created pool finally: if p is None: try: po.close() po.join() except: pass
python
def pmap(f, iterable, n=None, dummy=False, p=None): """ parallel map of a function to an iterable if each item in iterable is itself an iterable, then automatically call f(*item) instead of f(item) Arguments: f: function iterable: any iterable where each item is sent to f n: number of cpus (default is number on machine) dummy: use dummy pool. p: existing pool to re-use """ # make it easier to debug. if n == 1: for r in it.starmap(f, iterable): yield r raise StopIteration if p is None: po = pool(n, dummy) else: po = p assert hasattr(po, 'imap') f = _func_star(f) try: for r in po.imap(f, iterable): yield r # explicitly clean up created pool finally: if p is None: try: po.close() po.join() except: pass
[ "def", "pmap", "(", "f", ",", "iterable", ",", "n", "=", "None", ",", "dummy", "=", "False", ",", "p", "=", "None", ")", ":", "# make it easier to debug.", "if", "n", "==", "1", ":", "for", "r", "in", "it", ".", "starmap", "(", "f", ",", "iterable", ")", ":", "yield", "r", "raise", "StopIteration", "if", "p", "is", "None", ":", "po", "=", "pool", "(", "n", ",", "dummy", ")", "else", ":", "po", "=", "p", "assert", "hasattr", "(", "po", ",", "'imap'", ")", "f", "=", "_func_star", "(", "f", ")", "try", ":", "for", "r", "in", "po", ".", "imap", "(", "f", ",", "iterable", ")", ":", "yield", "r", "# explicitly clean up created pool", "finally", ":", "if", "p", "is", "None", ":", "try", ":", "po", ".", "close", "(", ")", "po", ".", "join", "(", ")", "except", ":", "pass" ]
parallel map of a function to an iterable if each item in iterable is itself an iterable, then automatically call f(*item) instead of f(item) Arguments: f: function iterable: any iterable where each item is sent to f n: number of cpus (default is number on machine) dummy: use dummy pool. p: existing pool to re-use
[ "parallel", "map", "of", "a", "function", "to", "an", "iterable", "if", "each", "item", "in", "iterable", "is", "itself", "an", "iterable", "then", "automatically", "call", "f", "(", "*", "item", ")", "instead", "of", "f", "(", "item", ")" ]
train
https://github.com/brentp/toolshed/blob/c9529d6872bf28207642896c3b416f68e79b1269/toolshed/pool.py#L99-L138
gwww/elkm1
elkm1_lib/elements.py
Element._call_callbacks
def _call_callbacks(self): """Callbacks when attribute of element changes""" for callback in self._callbacks: callback(self, self._changeset) self._changeset = {}
python
def _call_callbacks(self): """Callbacks when attribute of element changes""" for callback in self._callbacks: callback(self, self._changeset) self._changeset = {}
[ "def", "_call_callbacks", "(", "self", ")", ":", "for", "callback", "in", "self", ".", "_callbacks", ":", "callback", "(", "self", ",", "self", ".", "_changeset", ")", "self", ".", "_changeset", "=", "{", "}" ]
Callbacks when attribute of element changes
[ "Callbacks", "when", "attribute", "of", "element", "changes" ]
train
https://github.com/gwww/elkm1/blob/078d0de30840c3fab46f1f8534d98df557931e91/elkm1_lib/elements.py#L31-L35
gwww/elkm1
elkm1_lib/elements.py
Element.setattr
def setattr(self, attr, new_value, close_the_changeset=True): """If attribute value has changed then set it and call the callbacks""" existing_value = getattr(self, attr, None) if existing_value != new_value: setattr(self, attr, new_value) self._changeset[attr] = new_value if close_the_changeset and self._changeset: self._call_callbacks()
python
def setattr(self, attr, new_value, close_the_changeset=True): """If attribute value has changed then set it and call the callbacks""" existing_value = getattr(self, attr, None) if existing_value != new_value: setattr(self, attr, new_value) self._changeset[attr] = new_value if close_the_changeset and self._changeset: self._call_callbacks()
[ "def", "setattr", "(", "self", ",", "attr", ",", "new_value", ",", "close_the_changeset", "=", "True", ")", ":", "existing_value", "=", "getattr", "(", "self", ",", "attr", ",", "None", ")", "if", "existing_value", "!=", "new_value", ":", "setattr", "(", "self", ",", "attr", ",", "new_value", ")", "self", ".", "_changeset", "[", "attr", "]", "=", "new_value", "if", "close_the_changeset", "and", "self", ".", "_changeset", ":", "self", ".", "_call_callbacks", "(", ")" ]
If attribute value has changed then set it and call the callbacks
[ "If", "attribute", "value", "has", "changed", "then", "set", "it", "and", "call", "the", "callbacks" ]
train
https://github.com/gwww/elkm1/blob/078d0de30840c3fab46f1f8534d98df557931e91/elkm1_lib/elements.py#L37-L45
gwww/elkm1
elkm1_lib/elements.py
Element.default_name
def default_name(self, separator='-'): """Return a default name for based on class and index of element""" return self.__class__.__name__ + '{}{:03d}'.format( separator, self._index+1)
python
def default_name(self, separator='-'): """Return a default name for based on class and index of element""" return self.__class__.__name__ + '{}{:03d}'.format( separator, self._index+1)
[ "def", "default_name", "(", "self", ",", "separator", "=", "'-'", ")", ":", "return", "self", ".", "__class__", ".", "__name__", "+", "'{}{:03d}'", ".", "format", "(", "separator", ",", "self", ".", "_index", "+", "1", ")" ]
Return a default name for based on class and index of element
[ "Return", "a", "default", "name", "for", "based", "on", "class", "and", "index", "of", "element" ]
train
https://github.com/gwww/elkm1/blob/078d0de30840c3fab46f1f8534d98df557931e91/elkm1_lib/elements.py#L47-L50
gwww/elkm1
elkm1_lib/elements.py
Element.as_dict
def as_dict(self): """Package up the public attributes as a dict.""" attrs = vars(self) return {key: attrs[key] for key in attrs if not key.startswith('_')}
python
def as_dict(self): """Package up the public attributes as a dict.""" attrs = vars(self) return {key: attrs[key] for key in attrs if not key.startswith('_')}
[ "def", "as_dict", "(", "self", ")", ":", "attrs", "=", "vars", "(", "self", ")", "return", "{", "key", ":", "attrs", "[", "key", "]", "for", "key", "in", "attrs", "if", "not", "key", ".", "startswith", "(", "'_'", ")", "}" ]
Package up the public attributes as a dict.
[ "Package", "up", "the", "public", "attributes", "as", "a", "dict", "." ]
train
https://github.com/gwww/elkm1/blob/078d0de30840c3fab46f1f8534d98df557931e91/elkm1_lib/elements.py#L62-L65
gwww/elkm1
elkm1_lib/elements.py
Elements.get_descriptions
def get_descriptions(self, description_type): """ Gets the descriptions for specified type. When complete the callback is called with a list of descriptions """ (desc_type, max_units) = description_type results = [None] * max_units self.elk._descriptions_in_progress[desc_type] = (max_units, results, self._got_desc) self.elk.send(sd_encode(desc_type=desc_type, unit=0))
python
def get_descriptions(self, description_type): """ Gets the descriptions for specified type. When complete the callback is called with a list of descriptions """ (desc_type, max_units) = description_type results = [None] * max_units self.elk._descriptions_in_progress[desc_type] = (max_units, results, self._got_desc) self.elk.send(sd_encode(desc_type=desc_type, unit=0))
[ "def", "get_descriptions", "(", "self", ",", "description_type", ")", ":", "(", "desc_type", ",", "max_units", ")", "=", "description_type", "results", "=", "[", "None", "]", "*", "max_units", "self", ".", "elk", ".", "_descriptions_in_progress", "[", "desc_type", "]", "=", "(", "max_units", ",", "results", ",", "self", ".", "_got_desc", ")", "self", ".", "elk", ".", "send", "(", "sd_encode", "(", "desc_type", "=", "desc_type", ",", "unit", "=", "0", ")", ")" ]
Gets the descriptions for specified type. When complete the callback is called with a list of descriptions
[ "Gets", "the", "descriptions", "for", "specified", "type", ".", "When", "complete", "the", "callback", "is", "called", "with", "a", "list", "of", "descriptions" ]
train
https://github.com/gwww/elkm1/blob/078d0de30840c3fab46f1f8534d98df557931e91/elkm1_lib/elements.py#L90-L100
dahlia/wikidata
wikidata/commonsmedia.py
File.page_url
def page_url(self) -> str: """(:class:`str`) The canonical url of the page.""" url = self.attributes['canonicalurl'] assert isinstance(url, str) return url
python
def page_url(self) -> str: """(:class:`str`) The canonical url of the page.""" url = self.attributes['canonicalurl'] assert isinstance(url, str) return url
[ "def", "page_url", "(", "self", ")", "->", "str", ":", "url", "=", "self", ".", "attributes", "[", "'canonicalurl'", "]", "assert", "isinstance", "(", "url", ",", "str", ")", "return", "url" ]
(:class:`str`) The canonical url of the page.
[ "(", ":", "class", ":", "str", ")", "The", "canonical", "url", "of", "the", "page", "." ]
train
https://github.com/dahlia/wikidata/blob/b07c9f8fffc59b088ec9dd428d0ec4d989c82db4/wikidata/commonsmedia.py#L29-L33
dahlia/wikidata
wikidata/commonsmedia.py
File.image_url
def image_url(self) -> Optional[str]: r"""(:class:`~typing.Optional`\ [:class:`str`]) The image url. It may be :const:`None` if it's not an image. """ images = self.attributes.get('imageinfo', []) if images and isinstance(images, collections.abc.Sequence): return images[0]['url'] return None
python
def image_url(self) -> Optional[str]: r"""(:class:`~typing.Optional`\ [:class:`str`]) The image url. It may be :const:`None` if it's not an image. """ images = self.attributes.get('imageinfo', []) if images and isinstance(images, collections.abc.Sequence): return images[0]['url'] return None
[ "def", "image_url", "(", "self", ")", "->", "Optional", "[", "str", "]", ":", "images", "=", "self", ".", "attributes", ".", "get", "(", "'imageinfo'", ",", "[", "]", ")", "if", "images", "and", "isinstance", "(", "images", ",", "collections", ".", "abc", ".", "Sequence", ")", ":", "return", "images", "[", "0", "]", "[", "'url'", "]", "return", "None" ]
r"""(:class:`~typing.Optional`\ [:class:`str`]) The image url. It may be :const:`None` if it's not an image.
[ "r", "(", ":", "class", ":", "~typing", ".", "Optional", "\\", "[", ":", "class", ":", "str", "]", ")", "The", "image", "url", ".", "It", "may", "be", ":", "const", ":", "None", "if", "it", "s", "not", "an", "image", "." ]
train
https://github.com/dahlia/wikidata/blob/b07c9f8fffc59b088ec9dd428d0ec4d989c82db4/wikidata/commonsmedia.py#L36-L44
dahlia/wikidata
wikidata/commonsmedia.py
File.image_resolution
def image_resolution(self) -> Optional[Tuple[int, int]]: r"""(:class:`~typing.Optional`\ [:class:`~typing.Tuple`\ [:class:`int`, :class:`int`]]) The (width, height) pair of the image. It may be :const:`None` if it's not an image. """ images = self.attributes.get('imageinfo', []) if images and isinstance(images, collections.abc.Sequence): img = images[0] return img['width'], img['height'] return None
python
def image_resolution(self) -> Optional[Tuple[int, int]]: r"""(:class:`~typing.Optional`\ [:class:`~typing.Tuple`\ [:class:`int`, :class:`int`]]) The (width, height) pair of the image. It may be :const:`None` if it's not an image. """ images = self.attributes.get('imageinfo', []) if images and isinstance(images, collections.abc.Sequence): img = images[0] return img['width'], img['height'] return None
[ "def", "image_resolution", "(", "self", ")", "->", "Optional", "[", "Tuple", "[", "int", ",", "int", "]", "]", ":", "images", "=", "self", ".", "attributes", ".", "get", "(", "'imageinfo'", ",", "[", "]", ")", "if", "images", "and", "isinstance", "(", "images", ",", "collections", ".", "abc", ".", "Sequence", ")", ":", "img", "=", "images", "[", "0", "]", "return", "img", "[", "'width'", "]", ",", "img", "[", "'height'", "]", "return", "None" ]
r"""(:class:`~typing.Optional`\ [:class:`~typing.Tuple`\ [:class:`int`, :class:`int`]]) The (width, height) pair of the image. It may be :const:`None` if it's not an image.
[ "r", "(", ":", "class", ":", "~typing", ".", "Optional", "\\", "[", ":", "class", ":", "~typing", ".", "Tuple", "\\", "[", ":", "class", ":", "int", ":", "class", ":", "int", "]]", ")", "The", "(", "width", "height", ")", "pair", "of", "the", "image", ".", "It", "may", "be", ":", "const", ":", "None", "if", "it", "s", "not", "an", "image", "." ]
train
https://github.com/dahlia/wikidata/blob/b07c9f8fffc59b088ec9dd428d0ec4d989c82db4/wikidata/commonsmedia.py#L58-L68
dahlia/wikidata
wikidata/commonsmedia.py
File.image_size
def image_size(self) -> Optional[int]: r"""(:class:`~typing.Optional`\ [:class:`int`]) The size of the image in bytes. It may be :const:`None` if it's not an image. """ images = self.attributes.get('imageinfo', []) if images and isinstance(images, collections.abc.Sequence): return images[0]['size'] return None
python
def image_size(self) -> Optional[int]: r"""(:class:`~typing.Optional`\ [:class:`int`]) The size of the image in bytes. It may be :const:`None` if it's not an image. """ images = self.attributes.get('imageinfo', []) if images and isinstance(images, collections.abc.Sequence): return images[0]['size'] return None
[ "def", "image_size", "(", "self", ")", "->", "Optional", "[", "int", "]", ":", "images", "=", "self", ".", "attributes", ".", "get", "(", "'imageinfo'", ",", "[", "]", ")", "if", "images", "and", "isinstance", "(", "images", ",", "collections", ".", "abc", ".", "Sequence", ")", ":", "return", "images", "[", "0", "]", "[", "'size'", "]", "return", "None" ]
r"""(:class:`~typing.Optional`\ [:class:`int`]) The size of the image in bytes. It may be :const:`None` if it's not an image.
[ "r", "(", ":", "class", ":", "~typing", ".", "Optional", "\\", "[", ":", "class", ":", "int", "]", ")", "The", "size", "of", "the", "image", "in", "bytes", ".", "It", "may", "be", ":", "const", ":", "None", "if", "it", "s", "not", "an", "image", "." ]
train
https://github.com/dahlia/wikidata/blob/b07c9f8fffc59b088ec9dd428d0ec4d989c82db4/wikidata/commonsmedia.py#L71-L79
dahlia/wikidata
wikidata/multilingual.py
normalize_locale_code
def normalize_locale_code(locale: Union[Locale, str]) -> str: """Determine the normalized locale code string. >>> normalize_locale_code('ko-kr') 'ko_KR' >>> normalize_locale_code('zh_TW') 'zh_Hant_TW' >>> normalize_locale_code(Locale.parse('en_US')) 'en_US' """ if not isinstance(locale, Locale): locale = Locale.parse(locale.replace('-', '_')) return str(locale)
python
def normalize_locale_code(locale: Union[Locale, str]) -> str: """Determine the normalized locale code string. >>> normalize_locale_code('ko-kr') 'ko_KR' >>> normalize_locale_code('zh_TW') 'zh_Hant_TW' >>> normalize_locale_code(Locale.parse('en_US')) 'en_US' """ if not isinstance(locale, Locale): locale = Locale.parse(locale.replace('-', '_')) return str(locale)
[ "def", "normalize_locale_code", "(", "locale", ":", "Union", "[", "Locale", ",", "str", "]", ")", "->", "str", ":", "if", "not", "isinstance", "(", "locale", ",", "Locale", ")", ":", "locale", "=", "Locale", ".", "parse", "(", "locale", ".", "replace", "(", "'-'", ",", "'_'", ")", ")", "return", "str", "(", "locale", ")" ]
Determine the normalized locale code string. >>> normalize_locale_code('ko-kr') 'ko_KR' >>> normalize_locale_code('zh_TW') 'zh_Hant_TW' >>> normalize_locale_code(Locale.parse('en_US')) 'en_US'
[ "Determine", "the", "normalized", "locale", "code", "string", "." ]
train
https://github.com/dahlia/wikidata/blob/b07c9f8fffc59b088ec9dd428d0ec4d989c82db4/wikidata/multilingual.py#L83-L96
dahlia/wikidata
wikidata/client.py
Client.get
def get(self, entity_id: EntityId, load: bool = False) -> Entity: """Get a Wikidata entity by its :class:`~.entity.EntityId`. :param entity_id: The :attr:`~.entity.Entity.id` of the :class:`~.entity.Entity` to find. :type eneity_id: :class:`~.entity.EntityId` :param load: Eager loading on :const:`True`. Lazy loading (:const:`False`) by default. :type load: :class:`bool` :return: The found entity. :rtype: :class:`~.entity.Entity` .. versionadded:: 0.3.0 The ``load`` option. """ try: entity = self.identity_map[entity_id] except KeyError: entity = Entity(entity_id, self) self.identity_map[entity_id] = entity if load: entity.load() return entity
python
def get(self, entity_id: EntityId, load: bool = False) -> Entity: """Get a Wikidata entity by its :class:`~.entity.EntityId`. :param entity_id: The :attr:`~.entity.Entity.id` of the :class:`~.entity.Entity` to find. :type eneity_id: :class:`~.entity.EntityId` :param load: Eager loading on :const:`True`. Lazy loading (:const:`False`) by default. :type load: :class:`bool` :return: The found entity. :rtype: :class:`~.entity.Entity` .. versionadded:: 0.3.0 The ``load`` option. """ try: entity = self.identity_map[entity_id] except KeyError: entity = Entity(entity_id, self) self.identity_map[entity_id] = entity if load: entity.load() return entity
[ "def", "get", "(", "self", ",", "entity_id", ":", "EntityId", ",", "load", ":", "bool", "=", "False", ")", "->", "Entity", ":", "try", ":", "entity", "=", "self", ".", "identity_map", "[", "entity_id", "]", "except", "KeyError", ":", "entity", "=", "Entity", "(", "entity_id", ",", "self", ")", "self", ".", "identity_map", "[", "entity_id", "]", "=", "entity", "if", "load", ":", "entity", ".", "load", "(", ")", "return", "entity" ]
Get a Wikidata entity by its :class:`~.entity.EntityId`. :param entity_id: The :attr:`~.entity.Entity.id` of the :class:`~.entity.Entity` to find. :type eneity_id: :class:`~.entity.EntityId` :param load: Eager loading on :const:`True`. Lazy loading (:const:`False`) by default. :type load: :class:`bool` :return: The found entity. :rtype: :class:`~.entity.Entity` .. versionadded:: 0.3.0 The ``load`` option.
[ "Get", "a", "Wikidata", "entity", "by", "its", ":", "class", ":", "~", ".", "entity", ".", "EntityId", "." ]
train
https://github.com/dahlia/wikidata/blob/b07c9f8fffc59b088ec9dd428d0ec4d989c82db4/wikidata/client.py#L117-L140
dahlia/wikidata
wikidata/client.py
Client.guess_entity_type
def guess_entity_type(self, entity_id: EntityId) -> Optional[EntityType]: r"""Guess :class:`~.entity.EntityType` from the given :class:`~.entity.EntityId`. It could return :const:`None` when it fails to guess. .. note:: It always fails to guess when :attr:`entity_type_guess` is configued to :const:`False`. :return: The guessed :class:`~.entity.EntityId`, or :const:`None` if it fails to guess. :rtype: :class:`~typing.Optional`\ [:class:`~.entity.EntityType`] .. versionadded:: 0.2.0 """ if not self.entity_type_guess: return None if entity_id[0] == 'Q': return EntityType.item elif entity_id[0] == 'P': return EntityType.property return None
python
def guess_entity_type(self, entity_id: EntityId) -> Optional[EntityType]: r"""Guess :class:`~.entity.EntityType` from the given :class:`~.entity.EntityId`. It could return :const:`None` when it fails to guess. .. note:: It always fails to guess when :attr:`entity_type_guess` is configued to :const:`False`. :return: The guessed :class:`~.entity.EntityId`, or :const:`None` if it fails to guess. :rtype: :class:`~typing.Optional`\ [:class:`~.entity.EntityType`] .. versionadded:: 0.2.0 """ if not self.entity_type_guess: return None if entity_id[0] == 'Q': return EntityType.item elif entity_id[0] == 'P': return EntityType.property return None
[ "def", "guess_entity_type", "(", "self", ",", "entity_id", ":", "EntityId", ")", "->", "Optional", "[", "EntityType", "]", ":", "if", "not", "self", ".", "entity_type_guess", ":", "return", "None", "if", "entity_id", "[", "0", "]", "==", "'Q'", ":", "return", "EntityType", ".", "item", "elif", "entity_id", "[", "0", "]", "==", "'P'", ":", "return", "EntityType", ".", "property", "return", "None" ]
r"""Guess :class:`~.entity.EntityType` from the given :class:`~.entity.EntityId`. It could return :const:`None` when it fails to guess. .. note:: It always fails to guess when :attr:`entity_type_guess` is configued to :const:`False`. :return: The guessed :class:`~.entity.EntityId`, or :const:`None` if it fails to guess. :rtype: :class:`~typing.Optional`\ [:class:`~.entity.EntityType`] .. versionadded:: 0.2.0
[ "r", "Guess", ":", "class", ":", "~", ".", "entity", ".", "EntityType", "from", "the", "given", ":", "class", ":", "~", ".", "entity", ".", "EntityId", ".", "It", "could", "return", ":", "const", ":", "None", "when", "it", "fails", "to", "guess", "." ]
train
https://github.com/dahlia/wikidata/blob/b07c9f8fffc59b088ec9dd428d0ec4d989c82db4/wikidata/client.py#L142-L165
dahlia/wikidata
wikidata/client.py
Client.decode_datavalue
def decode_datavalue(self, datatype: str, datavalue: Mapping[str, object]) -> object: """Decode the given ``datavalue`` using the configured :attr:`datavalue_decoder`. .. versionadded:: 0.3.0 """ decode = cast(Callable[[Client, str, Mapping[str, object]], object], self.datavalue_decoder) return decode(self, datatype, datavalue)
python
def decode_datavalue(self, datatype: str, datavalue: Mapping[str, object]) -> object: """Decode the given ``datavalue`` using the configured :attr:`datavalue_decoder`. .. versionadded:: 0.3.0 """ decode = cast(Callable[[Client, str, Mapping[str, object]], object], self.datavalue_decoder) return decode(self, datatype, datavalue)
[ "def", "decode_datavalue", "(", "self", ",", "datatype", ":", "str", ",", "datavalue", ":", "Mapping", "[", "str", ",", "object", "]", ")", "->", "object", ":", "decode", "=", "cast", "(", "Callable", "[", "[", "Client", ",", "str", ",", "Mapping", "[", "str", ",", "object", "]", "]", ",", "object", "]", ",", "self", ".", "datavalue_decoder", ")", "return", "decode", "(", "self", ",", "datatype", ",", "datavalue", ")" ]
Decode the given ``datavalue`` using the configured :attr:`datavalue_decoder`. .. versionadded:: 0.3.0
[ "Decode", "the", "given", "datavalue", "using", "the", "configured", ":", "attr", ":", "datavalue_decoder", "." ]
train
https://github.com/dahlia/wikidata/blob/b07c9f8fffc59b088ec9dd428d0ec4d989c82db4/wikidata/client.py#L167-L178
dahlia/wikidata
wikidata/cache.py
CachePolicy.set
def set(self, key: CacheKey, value: Optional[CacheValue]) -> None: r"""Create or update a cache. :param key: A key string to create or update. :type key: :const:`CacheKey` :param value: A value to cache. :const:`None` to remove cache. :type value: :class:`~typing.Optional`\ [:const:`CacheValue`] """ raise NotImplementedError( 'Concreate subclasses of {0.__module__}.{0.__qualname__} have to ' 'override .set() method'.format(CachePolicy) )
python
def set(self, key: CacheKey, value: Optional[CacheValue]) -> None: r"""Create or update a cache. :param key: A key string to create or update. :type key: :const:`CacheKey` :param value: A value to cache. :const:`None` to remove cache. :type value: :class:`~typing.Optional`\ [:const:`CacheValue`] """ raise NotImplementedError( 'Concreate subclasses of {0.__module__}.{0.__qualname__} have to ' 'override .set() method'.format(CachePolicy) )
[ "def", "set", "(", "self", ",", "key", ":", "CacheKey", ",", "value", ":", "Optional", "[", "CacheValue", "]", ")", "->", "None", ":", "raise", "NotImplementedError", "(", "'Concreate subclasses of {0.__module__}.{0.__qualname__} have to '", "'override .set() method'", ".", "format", "(", "CachePolicy", ")", ")" ]
r"""Create or update a cache. :param key: A key string to create or update. :type key: :const:`CacheKey` :param value: A value to cache. :const:`None` to remove cache. :type value: :class:`~typing.Optional`\ [:const:`CacheValue`]
[ "r", "Create", "or", "update", "a", "cache", "." ]
train
https://github.com/dahlia/wikidata/blob/b07c9f8fffc59b088ec9dd428d0ec4d989c82db4/wikidata/cache.py#L43-L55
dahlia/wikidata
wikidata/entity.py
Entity.getlist
def getlist(self, key: 'Entity') -> Sequence[object]: r"""Return all values associated to the given ``key`` property in sequence. :param key: The property entity. :type key: :class:`Entity` :return: A sequence of all values associated to the given ``key`` property. It can be empty if nothing is associated to the property. :rtype: :class:`~typing.Sequence`\ [:class:`object`] """ if not (isinstance(key, type(self)) and key.type is EntityType.property): return [] claims_map = self.attributes.get('claims') or {} assert isinstance(claims_map, collections.abc.Mapping) claims = claims_map.get(key.id, []) claims.sort(key=lambda claim: claim['rank'], # FIXME reverse=True) logger = logging.getLogger(__name__ + '.Entity.getitem') if logger.isEnabledFor(logging.DEBUG): logger.debug('claim data: %s', __import__('pprint').pformat(claims)) decode = self.client.decode_datavalue return [decode(snak['datatype'], snak['datavalue']) for snak in (claim['mainsnak'] for claim in claims)]
python
def getlist(self, key: 'Entity') -> Sequence[object]: r"""Return all values associated to the given ``key`` property in sequence. :param key: The property entity. :type key: :class:`Entity` :return: A sequence of all values associated to the given ``key`` property. It can be empty if nothing is associated to the property. :rtype: :class:`~typing.Sequence`\ [:class:`object`] """ if not (isinstance(key, type(self)) and key.type is EntityType.property): return [] claims_map = self.attributes.get('claims') or {} assert isinstance(claims_map, collections.abc.Mapping) claims = claims_map.get(key.id, []) claims.sort(key=lambda claim: claim['rank'], # FIXME reverse=True) logger = logging.getLogger(__name__ + '.Entity.getitem') if logger.isEnabledFor(logging.DEBUG): logger.debug('claim data: %s', __import__('pprint').pformat(claims)) decode = self.client.decode_datavalue return [decode(snak['datatype'], snak['datavalue']) for snak in (claim['mainsnak'] for claim in claims)]
[ "def", "getlist", "(", "self", ",", "key", ":", "'Entity'", ")", "->", "Sequence", "[", "object", "]", ":", "if", "not", "(", "isinstance", "(", "key", ",", "type", "(", "self", ")", ")", "and", "key", ".", "type", "is", "EntityType", ".", "property", ")", ":", "return", "[", "]", "claims_map", "=", "self", ".", "attributes", ".", "get", "(", "'claims'", ")", "or", "{", "}", "assert", "isinstance", "(", "claims_map", ",", "collections", ".", "abc", ".", "Mapping", ")", "claims", "=", "claims_map", ".", "get", "(", "key", ".", "id", ",", "[", "]", ")", "claims", ".", "sort", "(", "key", "=", "lambda", "claim", ":", "claim", "[", "'rank'", "]", ",", "# FIXME", "reverse", "=", "True", ")", "logger", "=", "logging", ".", "getLogger", "(", "__name__", "+", "'.Entity.getitem'", ")", "if", "logger", ".", "isEnabledFor", "(", "logging", ".", "DEBUG", ")", ":", "logger", ".", "debug", "(", "'claim data: %s'", ",", "__import__", "(", "'pprint'", ")", ".", "pformat", "(", "claims", ")", ")", "decode", "=", "self", ".", "client", ".", "decode_datavalue", "return", "[", "decode", "(", "snak", "[", "'datatype'", "]", ",", "snak", "[", "'datavalue'", "]", ")", "for", "snak", "in", "(", "claim", "[", "'mainsnak'", "]", "for", "claim", "in", "claims", ")", "]" ]
r"""Return all values associated to the given ``key`` property in sequence. :param key: The property entity. :type key: :class:`Entity` :return: A sequence of all values associated to the given ``key`` property. It can be empty if nothing is associated to the property. :rtype: :class:`~typing.Sequence`\ [:class:`object`]
[ "r", "Return", "all", "values", "associated", "to", "the", "given", "key", "property", "in", "sequence", "." ]
train
https://github.com/dahlia/wikidata/blob/b07c9f8fffc59b088ec9dd428d0ec4d989c82db4/wikidata/entity.py#L165-L191
dahlia/wikidata
wikidata/entity.py
Entity.type
def type(self) -> EntityType: """(:class:`EntityType`) The type of entity, :attr:`~EntityType.item` or :attr:`~EntityType.property`. .. versionadded:: 0.2.0 """ if self.data is None: guessed_type = self.client.guess_entity_type(self.id) if guessed_type is not None: return guessed_type # If guessing was failed follow the straightforward way. return EntityType(self.attributes['type'])
python
def type(self) -> EntityType: """(:class:`EntityType`) The type of entity, :attr:`~EntityType.item` or :attr:`~EntityType.property`. .. versionadded:: 0.2.0 """ if self.data is None: guessed_type = self.client.guess_entity_type(self.id) if guessed_type is not None: return guessed_type # If guessing was failed follow the straightforward way. return EntityType(self.attributes['type'])
[ "def", "type", "(", "self", ")", "->", "EntityType", ":", "if", "self", ".", "data", "is", "None", ":", "guessed_type", "=", "self", ".", "client", ".", "guess_entity_type", "(", "self", ".", "id", ")", "if", "guessed_type", "is", "not", "None", ":", "return", "guessed_type", "# If guessing was failed follow the straightforward way.", "return", "EntityType", "(", "self", ".", "attributes", "[", "'type'", "]", ")" ]
(:class:`EntityType`) The type of entity, :attr:`~EntityType.item` or :attr:`~EntityType.property`. .. versionadded:: 0.2.0
[ "(", ":", "class", ":", "EntityType", ")", "The", "type", "of", "entity", ":", "attr", ":", "~EntityType", ".", "item", "or", ":", "attr", ":", "~EntityType", ".", "property", "." ]
train
https://github.com/dahlia/wikidata/blob/b07c9f8fffc59b088ec9dd428d0ec4d989c82db4/wikidata/entity.py#L216-L228
raddevon/flask-permissions
flask_permissions/models.py
make_user_role_table
def make_user_role_table(table_name='user', id_column_name='id'): """ Create the user-role association table so that it correctly references your own UserMixin subclass. """ return db.Table('fp_user_role', db.Column( 'user_id', db.Integer, db.ForeignKey('{}.{}'.format( table_name, id_column_name))), db.Column( 'role_id', db.Integer, db.ForeignKey('fp_role.id')), extend_existing=True)
python
def make_user_role_table(table_name='user', id_column_name='id'): """ Create the user-role association table so that it correctly references your own UserMixin subclass. """ return db.Table('fp_user_role', db.Column( 'user_id', db.Integer, db.ForeignKey('{}.{}'.format( table_name, id_column_name))), db.Column( 'role_id', db.Integer, db.ForeignKey('fp_role.id')), extend_existing=True)
[ "def", "make_user_role_table", "(", "table_name", "=", "'user'", ",", "id_column_name", "=", "'id'", ")", ":", "return", "db", ".", "Table", "(", "'fp_user_role'", ",", "db", ".", "Column", "(", "'user_id'", ",", "db", ".", "Integer", ",", "db", ".", "ForeignKey", "(", "'{}.{}'", ".", "format", "(", "table_name", ",", "id_column_name", ")", ")", ")", ",", "db", ".", "Column", "(", "'role_id'", ",", "db", ".", "Integer", ",", "db", ".", "ForeignKey", "(", "'fp_role.id'", ")", ")", ",", "extend_existing", "=", "True", ")" ]
Create the user-role association table so that it correctly references your own UserMixin subclass.
[ "Create", "the", "user", "-", "role", "association", "table", "so", "that", "it", "correctly", "references", "your", "own", "UserMixin", "subclass", "." ]
train
https://github.com/raddevon/flask-permissions/blob/a2f64c8e26b6b4807019794a68bad21b12ceeb71/flask_permissions/models.py#L23-L36
raddevon/flask-permissions
flask_permissions/decorators.py
user_has
def user_has(ability, get_user=import_user): """ Takes an ability (a string name of either a role or an ability) and returns the function if the user has that ability """ def wrapper(func): @wraps(func) def inner(*args, **kwargs): from .models import Ability desired_ability = Ability.query.filter_by( name=ability).first() user_abilities = [] current_user = get_user() for role in current_user._roles: user_abilities += role.abilities if desired_ability in user_abilities: return func(*args, **kwargs) else: raise Forbidden("You do not have access") return inner return wrapper
python
def user_has(ability, get_user=import_user): """ Takes an ability (a string name of either a role or an ability) and returns the function if the user has that ability """ def wrapper(func): @wraps(func) def inner(*args, **kwargs): from .models import Ability desired_ability = Ability.query.filter_by( name=ability).first() user_abilities = [] current_user = get_user() for role in current_user._roles: user_abilities += role.abilities if desired_ability in user_abilities: return func(*args, **kwargs) else: raise Forbidden("You do not have access") return inner return wrapper
[ "def", "user_has", "(", "ability", ",", "get_user", "=", "import_user", ")", ":", "def", "wrapper", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "inner", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "from", ".", "models", "import", "Ability", "desired_ability", "=", "Ability", ".", "query", ".", "filter_by", "(", "name", "=", "ability", ")", ".", "first", "(", ")", "user_abilities", "=", "[", "]", "current_user", "=", "get_user", "(", ")", "for", "role", "in", "current_user", ".", "_roles", ":", "user_abilities", "+=", "role", ".", "abilities", "if", "desired_ability", "in", "user_abilities", ":", "return", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", "raise", "Forbidden", "(", "\"You do not have access\"", ")", "return", "inner", "return", "wrapper" ]
Takes an ability (a string name of either a role or an ability) and returns the function if the user has that ability
[ "Takes", "an", "ability", "(", "a", "string", "name", "of", "either", "a", "role", "or", "an", "ability", ")", "and", "returns", "the", "function", "if", "the", "user", "has", "that", "ability" ]
train
https://github.com/raddevon/flask-permissions/blob/a2f64c8e26b6b4807019794a68bad21b12ceeb71/flask_permissions/decorators.py#L14-L33
raddevon/flask-permissions
flask_permissions/decorators.py
user_is
def user_is(role, get_user=import_user): """ Takes an role (a string name of either a role or an ability) and returns the function if the user has that role """ def wrapper(func): @wraps(func) def inner(*args, **kwargs): from .models import Role current_user = get_user() if role in current_user.roles: return func(*args, **kwargs) raise Forbidden("You do not have access") return inner return wrapper
python
def user_is(role, get_user=import_user): """ Takes an role (a string name of either a role or an ability) and returns the function if the user has that role """ def wrapper(func): @wraps(func) def inner(*args, **kwargs): from .models import Role current_user = get_user() if role in current_user.roles: return func(*args, **kwargs) raise Forbidden("You do not have access") return inner return wrapper
[ "def", "user_is", "(", "role", ",", "get_user", "=", "import_user", ")", ":", "def", "wrapper", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "inner", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "from", ".", "models", "import", "Role", "current_user", "=", "get_user", "(", ")", "if", "role", "in", "current_user", ".", "roles", ":", "return", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "raise", "Forbidden", "(", "\"You do not have access\"", ")", "return", "inner", "return", "wrapper" ]
Takes an role (a string name of either a role or an ability) and returns the function if the user has that role
[ "Takes", "an", "role", "(", "a", "string", "name", "of", "either", "a", "role", "or", "an", "ability", ")", "and", "returns", "the", "function", "if", "the", "user", "has", "that", "role" ]
train
https://github.com/raddevon/flask-permissions/blob/a2f64c8e26b6b4807019794a68bad21b12ceeb71/flask_permissions/decorators.py#L36-L49
sunlightlabs/django-mediasync
mediasync/views.py
combo_serve
def combo_serve(request, path, client): """ Handles generating a 'combo' file for the given path. This is similar to what happens when we upload to S3. Processors are applied, and we get the value that we would if we were serving from S3. This is a good way to make sure combo files work as intended before rolling out to production. """ joinfile = path sourcefiles = msettings['JOINED'][path] # Generate the combo file as a string. combo_data, dirname = combine_files(joinfile, sourcefiles, client) if path.endswith('.css'): mime_type = 'text/css' elif joinfile.endswith('.js'): mime_type = 'application/javascript' return HttpResponse(combo_data, mimetype=mime_type)
python
def combo_serve(request, path, client): """ Handles generating a 'combo' file for the given path. This is similar to what happens when we upload to S3. Processors are applied, and we get the value that we would if we were serving from S3. This is a good way to make sure combo files work as intended before rolling out to production. """ joinfile = path sourcefiles = msettings['JOINED'][path] # Generate the combo file as a string. combo_data, dirname = combine_files(joinfile, sourcefiles, client) if path.endswith('.css'): mime_type = 'text/css' elif joinfile.endswith('.js'): mime_type = 'application/javascript' return HttpResponse(combo_data, mimetype=mime_type)
[ "def", "combo_serve", "(", "request", ",", "path", ",", "client", ")", ":", "joinfile", "=", "path", "sourcefiles", "=", "msettings", "[", "'JOINED'", "]", "[", "path", "]", "# Generate the combo file as a string.", "combo_data", ",", "dirname", "=", "combine_files", "(", "joinfile", ",", "sourcefiles", ",", "client", ")", "if", "path", ".", "endswith", "(", "'.css'", ")", ":", "mime_type", "=", "'text/css'", "elif", "joinfile", ".", "endswith", "(", "'.js'", ")", ":", "mime_type", "=", "'application/javascript'", "return", "HttpResponse", "(", "combo_data", ",", "mimetype", "=", "mime_type", ")" ]
Handles generating a 'combo' file for the given path. This is similar to what happens when we upload to S3. Processors are applied, and we get the value that we would if we were serving from S3. This is a good way to make sure combo files work as intended before rolling out to production.
[ "Handles", "generating", "a", "combo", "file", "for", "the", "given", "path", ".", "This", "is", "similar", "to", "what", "happens", "when", "we", "upload", "to", "S3", ".", "Processors", "are", "applied", "and", "we", "get", "the", "value", "that", "we", "would", "if", "we", "were", "serving", "from", "S3", ".", "This", "is", "a", "good", "way", "to", "make", "sure", "combo", "files", "work", "as", "intended", "before", "rolling", "out", "to", "production", "." ]
train
https://github.com/sunlightlabs/django-mediasync/blob/aa8ce4cfff757bbdb488463c64c0863cca6a1932/mediasync/views.py#L14-L32
sunlightlabs/django-mediasync
mediasync/views.py
_form_key_str
def _form_key_str(path): """ Given a URL path, massage it into a key we can perform a lookup on the MEDIASYNC['JOINED'] dict with. This mostly involves figuring into account the CSS_PATH and JS_PATH settings, if they have been set. """ if path.endswith('.css'): media_path_prefix = msettings['CSS_PATH'] elif path.endswith('.js'): media_path_prefix = msettings['JS_PATH'] else: # This isn't a CSS/JS file, no combo for you. return None if media_path_prefix: # CS/JSS path prefix has been set. Factor that into the key lookup. if not media_path_prefix.endswith('/'): # We need to add this slash so we can lop it off the 'path' # variable, to match the value in the JOINED dict. media_path_prefix += '/' if path.startswith(media_path_prefix): # Given path starts with the CSS/JS media prefix. Lop this off # so we can perform a lookup in the JOINED dict. return path[len(media_path_prefix):] else: # Path is in a root dir, send along as-is. return path # No CSS/JS path prefix set. Keep it raw. return path
python
def _form_key_str(path): """ Given a URL path, massage it into a key we can perform a lookup on the MEDIASYNC['JOINED'] dict with. This mostly involves figuring into account the CSS_PATH and JS_PATH settings, if they have been set. """ if path.endswith('.css'): media_path_prefix = msettings['CSS_PATH'] elif path.endswith('.js'): media_path_prefix = msettings['JS_PATH'] else: # This isn't a CSS/JS file, no combo for you. return None if media_path_prefix: # CS/JSS path prefix has been set. Factor that into the key lookup. if not media_path_prefix.endswith('/'): # We need to add this slash so we can lop it off the 'path' # variable, to match the value in the JOINED dict. media_path_prefix += '/' if path.startswith(media_path_prefix): # Given path starts with the CSS/JS media prefix. Lop this off # so we can perform a lookup in the JOINED dict. return path[len(media_path_prefix):] else: # Path is in a root dir, send along as-is. return path # No CSS/JS path prefix set. Keep it raw. return path
[ "def", "_form_key_str", "(", "path", ")", ":", "if", "path", ".", "endswith", "(", "'.css'", ")", ":", "media_path_prefix", "=", "msettings", "[", "'CSS_PATH'", "]", "elif", "path", ".", "endswith", "(", "'.js'", ")", ":", "media_path_prefix", "=", "msettings", "[", "'JS_PATH'", "]", "else", ":", "# This isn't a CSS/JS file, no combo for you.", "return", "None", "if", "media_path_prefix", ":", "# CS/JSS path prefix has been set. Factor that into the key lookup.", "if", "not", "media_path_prefix", ".", "endswith", "(", "'/'", ")", ":", "# We need to add this slash so we can lop it off the 'path'", "# variable, to match the value in the JOINED dict.", "media_path_prefix", "+=", "'/'", "if", "path", ".", "startswith", "(", "media_path_prefix", ")", ":", "# Given path starts with the CSS/JS media prefix. Lop this off", "# so we can perform a lookup in the JOINED dict.", "return", "path", "[", "len", "(", "media_path_prefix", ")", ":", "]", "else", ":", "# Path is in a root dir, send along as-is.", "return", "path", "# No CSS/JS path prefix set. Keep it raw.", "return", "path" ]
Given a URL path, massage it into a key we can perform a lookup on the MEDIASYNC['JOINED'] dict with. This mostly involves figuring into account the CSS_PATH and JS_PATH settings, if they have been set.
[ "Given", "a", "URL", "path", "massage", "it", "into", "a", "key", "we", "can", "perform", "a", "lookup", "on", "the", "MEDIASYNC", "[", "JOINED", "]", "dict", "with", ".", "This", "mostly", "involves", "figuring", "into", "account", "the", "CSS_PATH", "and", "JS_PATH", "settings", "if", "they", "have", "been", "set", "." ]
train
https://github.com/sunlightlabs/django-mediasync/blob/aa8ce4cfff757bbdb488463c64c0863cca6a1932/mediasync/views.py#L34-L66
sunlightlabs/django-mediasync
mediasync/views.py
_find_combo_match
def _find_combo_match(path): """ Calculate the key to check the MEDIASYNC['JOINED'] dict for, perform the lookup, and return the matching key string if a match is found. If no match is found, return None instead. """ key_str = _form_key_str(path) if not key_str: # _form_key_str() says this isn't even a CSS/JS file. return None if not msettings['JOINED'].has_key(key_str): # No combo file match found. Must be an single file. return None else: # Combo match found, return the JOINED key. return key_str
python
def _find_combo_match(path): """ Calculate the key to check the MEDIASYNC['JOINED'] dict for, perform the lookup, and return the matching key string if a match is found. If no match is found, return None instead. """ key_str = _form_key_str(path) if not key_str: # _form_key_str() says this isn't even a CSS/JS file. return None if not msettings['JOINED'].has_key(key_str): # No combo file match found. Must be an single file. return None else: # Combo match found, return the JOINED key. return key_str
[ "def", "_find_combo_match", "(", "path", ")", ":", "key_str", "=", "_form_key_str", "(", "path", ")", "if", "not", "key_str", ":", "# _form_key_str() says this isn't even a CSS/JS file.", "return", "None", "if", "not", "msettings", "[", "'JOINED'", "]", ".", "has_key", "(", "key_str", ")", ":", "# No combo file match found. Must be an single file.", "return", "None", "else", ":", "# Combo match found, return the JOINED key.", "return", "key_str" ]
Calculate the key to check the MEDIASYNC['JOINED'] dict for, perform the lookup, and return the matching key string if a match is found. If no match is found, return None instead.
[ "Calculate", "the", "key", "to", "check", "the", "MEDIASYNC", "[", "JOINED", "]", "dict", "for", "perform", "the", "lookup", "and", "return", "the", "matching", "key", "string", "if", "a", "match", "is", "found", ".", "If", "no", "match", "is", "found", "return", "None", "instead", "." ]
train
https://github.com/sunlightlabs/django-mediasync/blob/aa8ce4cfff757bbdb488463c64c0863cca6a1932/mediasync/views.py#L68-L84
sunlightlabs/django-mediasync
mediasync/views.py
static_serve
def static_serve(request, path, client): """ Given a request for a media asset, this view does the necessary wrangling to get the correct thing delivered to the user. This can also emulate the combo behavior seen when SERVE_REMOTE == False and EMULATE_COMBO == True. """ if msettings['SERVE_REMOTE']: # We're serving from S3, redirect there. url = client.remote_media_url().strip('/') + '/%(path)s' return redirect(url, permanent=True) if not msettings['SERVE_REMOTE'] and msettings['EMULATE_COMBO']: # Combo emulation is on and we're serving media locally. Try to see if # the given path matches a combo file defined in the JOINED dict in # the MEDIASYNC settings dict. combo_match = _find_combo_match(path) if combo_match: # We found a combo file match. Combine it and serve the result. return combo_serve(request, combo_match, client) # No combo file, but we're serving locally. Use the standard (inefficient) # Django static serve view. resp = serve(request, path, document_root=client.media_root, show_indexes=True) try: resp.content = client.process(resp.content, resp['Content-Type'], path) except KeyError: # HTTPNotModifiedResponse lacks the "Content-Type" key. pass return resp
python
def static_serve(request, path, client): """ Given a request for a media asset, this view does the necessary wrangling to get the correct thing delivered to the user. This can also emulate the combo behavior seen when SERVE_REMOTE == False and EMULATE_COMBO == True. """ if msettings['SERVE_REMOTE']: # We're serving from S3, redirect there. url = client.remote_media_url().strip('/') + '/%(path)s' return redirect(url, permanent=True) if not msettings['SERVE_REMOTE'] and msettings['EMULATE_COMBO']: # Combo emulation is on and we're serving media locally. Try to see if # the given path matches a combo file defined in the JOINED dict in # the MEDIASYNC settings dict. combo_match = _find_combo_match(path) if combo_match: # We found a combo file match. Combine it and serve the result. return combo_serve(request, combo_match, client) # No combo file, but we're serving locally. Use the standard (inefficient) # Django static serve view. resp = serve(request, path, document_root=client.media_root, show_indexes=True) try: resp.content = client.process(resp.content, resp['Content-Type'], path) except KeyError: # HTTPNotModifiedResponse lacks the "Content-Type" key. pass return resp
[ "def", "static_serve", "(", "request", ",", "path", ",", "client", ")", ":", "if", "msettings", "[", "'SERVE_REMOTE'", "]", ":", "# We're serving from S3, redirect there.", "url", "=", "client", ".", "remote_media_url", "(", ")", ".", "strip", "(", "'/'", ")", "+", "'/%(path)s'", "return", "redirect", "(", "url", ",", "permanent", "=", "True", ")", "if", "not", "msettings", "[", "'SERVE_REMOTE'", "]", "and", "msettings", "[", "'EMULATE_COMBO'", "]", ":", "# Combo emulation is on and we're serving media locally. Try to see if", "# the given path matches a combo file defined in the JOINED dict in", "# the MEDIASYNC settings dict.", "combo_match", "=", "_find_combo_match", "(", "path", ")", "if", "combo_match", ":", "# We found a combo file match. Combine it and serve the result.", "return", "combo_serve", "(", "request", ",", "combo_match", ",", "client", ")", "# No combo file, but we're serving locally. Use the standard (inefficient)", "# Django static serve view.", "resp", "=", "serve", "(", "request", ",", "path", ",", "document_root", "=", "client", ".", "media_root", ",", "show_indexes", "=", "True", ")", "try", ":", "resp", ".", "content", "=", "client", ".", "process", "(", "resp", ".", "content", ",", "resp", "[", "'Content-Type'", "]", ",", "path", ")", "except", "KeyError", ":", "# HTTPNotModifiedResponse lacks the \"Content-Type\" key.", "pass", "return", "resp" ]
Given a request for a media asset, this view does the necessary wrangling to get the correct thing delivered to the user. This can also emulate the combo behavior seen when SERVE_REMOTE == False and EMULATE_COMBO == True.
[ "Given", "a", "request", "for", "a", "media", "asset", "this", "view", "does", "the", "necessary", "wrangling", "to", "get", "the", "correct", "thing", "delivered", "to", "the", "user", ".", "This", "can", "also", "emulate", "the", "combo", "behavior", "seen", "when", "SERVE_REMOTE", "==", "False", "and", "EMULATE_COMBO", "==", "True", "." ]
train
https://github.com/sunlightlabs/django-mediasync/blob/aa8ce4cfff757bbdb488463c64c0863cca6a1932/mediasync/views.py#L86-L116
sprockets/sprockets.http
sprockets/http/runner.py
Runner.start_server
def start_server(self, port_number, number_of_procs=0): """ Create a HTTP server and start it. :param int port_number: the port number to bind the server to :param int number_of_procs: number of processes to pass to Tornado's ``httpserver.HTTPServer.start``. If the application's ``debug`` setting is ``True``, then we are going to run in a single-process mode; otherwise, we'll let tornado decide how many sub-processes to spawn. The following additional configuration parameters can be set on the ``httpserver.HTTPServer`` instance by setting them in the application settings: ``xheaders``, ``max_body_size``, ``max_buffer_size``. """ signal.signal(signal.SIGTERM, self._on_signal) signal.signal(signal.SIGINT, self._on_signal) xheaders = self.application.settings.get('xheaders', False) max_body_size = self.application.settings.get('max_body_size', None) max_buffer_size = self.application.settings.get('max_buffer_size', None) self.server = httpserver.HTTPServer( self.application.tornado_application, xheaders=xheaders, max_body_size=max_body_size, max_buffer_size=max_buffer_size) if self.application.settings.get('debug', False): self.logger.info('starting 1 process on port %d', port_number) self.server.listen(port_number) else: self.logger.info('starting processes on port %d', port_number) self.server.bind(port_number, reuse_port=True) self.server.start(number_of_procs)
python
def start_server(self, port_number, number_of_procs=0): """ Create a HTTP server and start it. :param int port_number: the port number to bind the server to :param int number_of_procs: number of processes to pass to Tornado's ``httpserver.HTTPServer.start``. If the application's ``debug`` setting is ``True``, then we are going to run in a single-process mode; otherwise, we'll let tornado decide how many sub-processes to spawn. The following additional configuration parameters can be set on the ``httpserver.HTTPServer`` instance by setting them in the application settings: ``xheaders``, ``max_body_size``, ``max_buffer_size``. """ signal.signal(signal.SIGTERM, self._on_signal) signal.signal(signal.SIGINT, self._on_signal) xheaders = self.application.settings.get('xheaders', False) max_body_size = self.application.settings.get('max_body_size', None) max_buffer_size = self.application.settings.get('max_buffer_size', None) self.server = httpserver.HTTPServer( self.application.tornado_application, xheaders=xheaders, max_body_size=max_body_size, max_buffer_size=max_buffer_size) if self.application.settings.get('debug', False): self.logger.info('starting 1 process on port %d', port_number) self.server.listen(port_number) else: self.logger.info('starting processes on port %d', port_number) self.server.bind(port_number, reuse_port=True) self.server.start(number_of_procs)
[ "def", "start_server", "(", "self", ",", "port_number", ",", "number_of_procs", "=", "0", ")", ":", "signal", ".", "signal", "(", "signal", ".", "SIGTERM", ",", "self", ".", "_on_signal", ")", "signal", ".", "signal", "(", "signal", ".", "SIGINT", ",", "self", ".", "_on_signal", ")", "xheaders", "=", "self", ".", "application", ".", "settings", ".", "get", "(", "'xheaders'", ",", "False", ")", "max_body_size", "=", "self", ".", "application", ".", "settings", ".", "get", "(", "'max_body_size'", ",", "None", ")", "max_buffer_size", "=", "self", ".", "application", ".", "settings", ".", "get", "(", "'max_buffer_size'", ",", "None", ")", "self", ".", "server", "=", "httpserver", ".", "HTTPServer", "(", "self", ".", "application", ".", "tornado_application", ",", "xheaders", "=", "xheaders", ",", "max_body_size", "=", "max_body_size", ",", "max_buffer_size", "=", "max_buffer_size", ")", "if", "self", ".", "application", ".", "settings", ".", "get", "(", "'debug'", ",", "False", ")", ":", "self", ".", "logger", ".", "info", "(", "'starting 1 process on port %d'", ",", "port_number", ")", "self", ".", "server", ".", "listen", "(", "port_number", ")", "else", ":", "self", ".", "logger", ".", "info", "(", "'starting processes on port %d'", ",", "port_number", ")", "self", ".", "server", ".", "bind", "(", "port_number", ",", "reuse_port", "=", "True", ")", "self", ".", "server", ".", "start", "(", "number_of_procs", ")" ]
Create a HTTP server and start it. :param int port_number: the port number to bind the server to :param int number_of_procs: number of processes to pass to Tornado's ``httpserver.HTTPServer.start``. If the application's ``debug`` setting is ``True``, then we are going to run in a single-process mode; otherwise, we'll let tornado decide how many sub-processes to spawn. The following additional configuration parameters can be set on the ``httpserver.HTTPServer`` instance by setting them in the application settings: ``xheaders``, ``max_body_size``, ``max_buffer_size``.
[ "Create", "a", "HTTP", "server", "and", "start", "it", "." ]
train
https://github.com/sprockets/sprockets.http/blob/8baa4cdc1fa35a162ee226fd6cc4170a0ca0ecd3/sprockets/http/runner.py#L59-L94
sprockets/sprockets.http
sprockets/http/runner.py
Runner.run
def run(self, port_number, number_of_procs=0): """ Create the server and run the IOLoop. :param int port_number: the port number to bind the server to :param int number_of_procs: number of processes to pass to Tornado's ``httpserver.HTTPServer.start``. If the application's ``debug`` setting is ``True``, then we are going to run in a single-process mode; otherwise, we'll let tornado decide how many sub-processes based on the value of the ``number_of_procs`` argument. In any case, the application's *before_run* callbacks are invoked. If a callback raises an exception, then the application is terminated by calling :func:`sys.exit`. If any ``on_start`` callbacks are registered, they will be added to the Tornado IOLoop for execution after the IOLoop is started. The following additional configuration parameters can be set on the ``httpserver.HTTPServer`` instance by setting them in the application settings: ``xheaders``, ``max_body_size``, ``max_buffer_size``. """ self.start_server(port_number, number_of_procs) iol = ioloop.IOLoop.instance() try: self.application.start(iol) except Exception: self.logger.exception('application terminated during start, ' 'exiting') sys.exit(70) iol.start()
python
def run(self, port_number, number_of_procs=0): """ Create the server and run the IOLoop. :param int port_number: the port number to bind the server to :param int number_of_procs: number of processes to pass to Tornado's ``httpserver.HTTPServer.start``. If the application's ``debug`` setting is ``True``, then we are going to run in a single-process mode; otherwise, we'll let tornado decide how many sub-processes based on the value of the ``number_of_procs`` argument. In any case, the application's *before_run* callbacks are invoked. If a callback raises an exception, then the application is terminated by calling :func:`sys.exit`. If any ``on_start`` callbacks are registered, they will be added to the Tornado IOLoop for execution after the IOLoop is started. The following additional configuration parameters can be set on the ``httpserver.HTTPServer`` instance by setting them in the application settings: ``xheaders``, ``max_body_size``, ``max_buffer_size``. """ self.start_server(port_number, number_of_procs) iol = ioloop.IOLoop.instance() try: self.application.start(iol) except Exception: self.logger.exception('application terminated during start, ' 'exiting') sys.exit(70) iol.start()
[ "def", "run", "(", "self", ",", "port_number", ",", "number_of_procs", "=", "0", ")", ":", "self", ".", "start_server", "(", "port_number", ",", "number_of_procs", ")", "iol", "=", "ioloop", ".", "IOLoop", ".", "instance", "(", ")", "try", ":", "self", ".", "application", ".", "start", "(", "iol", ")", "except", "Exception", ":", "self", ".", "logger", ".", "exception", "(", "'application terminated during start, '", "'exiting'", ")", "sys", ".", "exit", "(", "70", ")", "iol", ".", "start", "(", ")" ]
Create the server and run the IOLoop. :param int port_number: the port number to bind the server to :param int number_of_procs: number of processes to pass to Tornado's ``httpserver.HTTPServer.start``. If the application's ``debug`` setting is ``True``, then we are going to run in a single-process mode; otherwise, we'll let tornado decide how many sub-processes based on the value of the ``number_of_procs`` argument. In any case, the application's *before_run* callbacks are invoked. If a callback raises an exception, then the application is terminated by calling :func:`sys.exit`. If any ``on_start`` callbacks are registered, they will be added to the Tornado IOLoop for execution after the IOLoop is started. The following additional configuration parameters can be set on the ``httpserver.HTTPServer`` instance by setting them in the application settings: ``xheaders``, ``max_body_size``, ``max_buffer_size``.
[ "Create", "the", "server", "and", "run", "the", "IOLoop", "." ]
train
https://github.com/sprockets/sprockets.http/blob/8baa4cdc1fa35a162ee226fd6cc4170a0ca0ecd3/sprockets/http/runner.py#L100-L133
matllubos/django-is-core
is_core/utils/decorators.py
short_description
def short_description(description): """ Sets 'short_description' attribute (this attribute is in exports to generate header name). """ def decorator(func): if isinstance(func, property): func = func.fget func.short_description = description return func return decorator
python
def short_description(description): """ Sets 'short_description' attribute (this attribute is in exports to generate header name). """ def decorator(func): if isinstance(func, property): func = func.fget func.short_description = description return func return decorator
[ "def", "short_description", "(", "description", ")", ":", "def", "decorator", "(", "func", ")", ":", "if", "isinstance", "(", "func", ",", "property", ")", ":", "func", "=", "func", ".", "fget", "func", ".", "short_description", "=", "description", "return", "func", "return", "decorator" ]
Sets 'short_description' attribute (this attribute is in exports to generate header name).
[ "Sets", "short_description", "attribute", "(", "this", "attribute", "is", "in", "exports", "to", "generate", "header", "name", ")", "." ]
train
https://github.com/matllubos/django-is-core/blob/3f87ec56a814738683c732dce5f07e0328c2300d/is_core/utils/decorators.py#L1-L10
confirm/ansibleci
ansibleci/config.py
Config.add_module
def add_module(self, module): ''' Adds configuration parameters from a Python module. ''' for key, value in module.__dict__.iteritems(): if key[0:2] != '__': self.__setattr__(attr=key, value=value)
python
def add_module(self, module): ''' Adds configuration parameters from a Python module. ''' for key, value in module.__dict__.iteritems(): if key[0:2] != '__': self.__setattr__(attr=key, value=value)
[ "def", "add_module", "(", "self", ",", "module", ")", ":", "for", "key", ",", "value", "in", "module", ".", "__dict__", ".", "iteritems", "(", ")", ":", "if", "key", "[", "0", ":", "2", "]", "!=", "'__'", ":", "self", ".", "__setattr__", "(", "attr", "=", "key", ",", "value", "=", "value", ")" ]
Adds configuration parameters from a Python module.
[ "Adds", "configuration", "parameters", "from", "a", "Python", "module", "." ]
train
https://github.com/confirm/ansibleci/blob/6a53ae8c4a4653624977e146092422857f661b8f/ansibleci/config.py#L61-L67
matllubos/django-is-core
is_core/generic_views/__init__.py
redirect_to_login
def redirect_to_login(next, redirect_field_name=REDIRECT_FIELD_NAME): """ Redirects the user to the login page, passing the given 'next' page """ resolved_url = reverse('IS:login') login_url_parts = list(urlparse(resolved_url)) if redirect_field_name: querystring = QueryDict(login_url_parts[4], mutable=True) querystring[redirect_field_name] = next login_url_parts[4] = querystring.urlencode(safe='/') raise HTTPRedirectResponseException(urlunparse(login_url_parts))
python
def redirect_to_login(next, redirect_field_name=REDIRECT_FIELD_NAME): """ Redirects the user to the login page, passing the given 'next' page """ resolved_url = reverse('IS:login') login_url_parts = list(urlparse(resolved_url)) if redirect_field_name: querystring = QueryDict(login_url_parts[4], mutable=True) querystring[redirect_field_name] = next login_url_parts[4] = querystring.urlencode(safe='/') raise HTTPRedirectResponseException(urlunparse(login_url_parts))
[ "def", "redirect_to_login", "(", "next", ",", "redirect_field_name", "=", "REDIRECT_FIELD_NAME", ")", ":", "resolved_url", "=", "reverse", "(", "'IS:login'", ")", "login_url_parts", "=", "list", "(", "urlparse", "(", "resolved_url", ")", ")", "if", "redirect_field_name", ":", "querystring", "=", "QueryDict", "(", "login_url_parts", "[", "4", "]", ",", "mutable", "=", "True", ")", "querystring", "[", "redirect_field_name", "]", "=", "next", "login_url_parts", "[", "4", "]", "=", "querystring", ".", "urlencode", "(", "safe", "=", "'/'", ")", "raise", "HTTPRedirectResponseException", "(", "urlunparse", "(", "login_url_parts", ")", ")" ]
Redirects the user to the login page, passing the given 'next' page
[ "Redirects", "the", "user", "to", "the", "login", "page", "passing", "the", "given", "next", "page" ]
train
https://github.com/matllubos/django-is-core/blob/3f87ec56a814738683c732dce5f07e0328c2300d/is_core/generic_views/__init__.py#L23-L35
matllubos/django-is-core
is_core/generic_views/__init__.py
PermissionsViewMixin._check_permission
def _check_permission(self, name, obj=None): """ If customer is not authorized he should not get information that object is exists. Therefore 403 is returned if object was not found or is redirected to the login page. If custmer is authorized and object was not found is returned 404. If object was found and user is not authorized is returned 403 or redirect to login page. If object was found and user is authorized is returned 403 or 200 according of result of _has_permission method. """ def redirect_or_exception(ex): if not self.request.user or not self.request.user.is_authenticated: if self.auto_login_redirect: redirect_to_login(self.request.get_full_path()) else: raise HTTPUnauthorizedResponseException else: raise ex try: if not self._has_permission(name, obj): redirect_or_exception(HTTPForbiddenResponseException) except Http404 as ex: redirect_or_exception(ex)
python
def _check_permission(self, name, obj=None): """ If customer is not authorized he should not get information that object is exists. Therefore 403 is returned if object was not found or is redirected to the login page. If custmer is authorized and object was not found is returned 404. If object was found and user is not authorized is returned 403 or redirect to login page. If object was found and user is authorized is returned 403 or 200 according of result of _has_permission method. """ def redirect_or_exception(ex): if not self.request.user or not self.request.user.is_authenticated: if self.auto_login_redirect: redirect_to_login(self.request.get_full_path()) else: raise HTTPUnauthorizedResponseException else: raise ex try: if not self._has_permission(name, obj): redirect_or_exception(HTTPForbiddenResponseException) except Http404 as ex: redirect_or_exception(ex)
[ "def", "_check_permission", "(", "self", ",", "name", ",", "obj", "=", "None", ")", ":", "def", "redirect_or_exception", "(", "ex", ")", ":", "if", "not", "self", ".", "request", ".", "user", "or", "not", "self", ".", "request", ".", "user", ".", "is_authenticated", ":", "if", "self", ".", "auto_login_redirect", ":", "redirect_to_login", "(", "self", ".", "request", ".", "get_full_path", "(", ")", ")", "else", ":", "raise", "HTTPUnauthorizedResponseException", "else", ":", "raise", "ex", "try", ":", "if", "not", "self", ".", "_has_permission", "(", "name", ",", "obj", ")", ":", "redirect_or_exception", "(", "HTTPForbiddenResponseException", ")", "except", "Http404", "as", "ex", ":", "redirect_or_exception", "(", "ex", ")" ]
If customer is not authorized he should not get information that object is exists. Therefore 403 is returned if object was not found or is redirected to the login page. If custmer is authorized and object was not found is returned 404. If object was found and user is not authorized is returned 403 or redirect to login page. If object was found and user is authorized is returned 403 or 200 according of result of _has_permission method.
[ "If", "customer", "is", "not", "authorized", "he", "should", "not", "get", "information", "that", "object", "is", "exists", ".", "Therefore", "403", "is", "returned", "if", "object", "was", "not", "found", "or", "is", "redirected", "to", "the", "login", "page", ".", "If", "custmer", "is", "authorized", "and", "object", "was", "not", "found", "is", "returned", "404", ".", "If", "object", "was", "found", "and", "user", "is", "not", "authorized", "is", "returned", "403", "or", "redirect", "to", "login", "page", ".", "If", "object", "was", "found", "and", "user", "is", "authorized", "is", "returned", "403", "or", "200", "according", "of", "result", "of", "_has_permission", "method", "." ]
train
https://github.com/matllubos/django-is-core/blob/3f87ec56a814738683c732dce5f07e0328c2300d/is_core/generic_views/__init__.py#L67-L88