repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
NoviceLive/intellicoder
intellicoder/converters.py
Converter.from_section
def from_section(cls, stream, section_name='.pic'): """Construct a Converter object from the specified section of the specified binary stream.""" binary = Executable(stream) section_data = binary.get_section_data(section_name) return cls(section_data, binary.system)
python
def from_section(cls, stream, section_name='.pic'): """Construct a Converter object from the specified section of the specified binary stream.""" binary = Executable(stream) section_data = binary.get_section_data(section_name) return cls(section_data, binary.system)
[ "def", "from_section", "(", "cls", ",", "stream", ",", "section_name", "=", "'.pic'", ")", ":", "binary", "=", "Executable", "(", "stream", ")", "section_data", "=", "binary", ".", "get_section_data", "(", "section_name", ")", "return", "cls", "(", "section_data", ",", "binary", ".", "system", ")" ]
Construct a Converter object from the specified section of the specified binary stream.
[ "Construct", "a", "Converter", "object", "from", "the", "specified", "section", "of", "the", "specified", "binary", "stream", "." ]
6cac5ebfce65c370dbebe47756a1789b120ef982
https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/converters.py#L64-L69
train
NoviceLive/intellicoder
intellicoder/converters.py
Converter.to_esc
def to_esc(self): """Convert to escape string.""" chunks = chunked(self.stream, 2) return ''.join(r'\x' + ''.join(pair) for pair in chunks)
python
def to_esc(self): """Convert to escape string.""" chunks = chunked(self.stream, 2) return ''.join(r'\x' + ''.join(pair) for pair in chunks)
[ "def", "to_esc", "(", "self", ")", ":", "chunks", "=", "chunked", "(", "self", ".", "stream", ",", "2", ")", "return", "''", ".", "join", "(", "r'\\x'", "+", "''", ".", "join", "(", "pair", ")", "for", "pair", "in", "chunks", ")" ]
Convert to escape string.
[ "Convert", "to", "escape", "string", "." ]
6cac5ebfce65c370dbebe47756a1789b120ef982
https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/converters.py#L90-L93
train
sirfoga/pyhal
hal/maths/utils.py
get_percentage_relative_to
def get_percentage_relative_to(val, other): """Finds percentage between 2 numbers :param val: number :param other: number to compare to :return: percentage of delta between first and second """ val = float(val) other = float(other) ratio = val / other - 1 return ratio * 100.0
python
def get_percentage_relative_to(val, other): """Finds percentage between 2 numbers :param val: number :param other: number to compare to :return: percentage of delta between first and second """ val = float(val) other = float(other) ratio = val / other - 1 return ratio * 100.0
[ "def", "get_percentage_relative_to", "(", "val", ",", "other", ")", ":", "val", "=", "float", "(", "val", ")", "other", "=", "float", "(", "other", ")", "ratio", "=", "val", "/", "other", "-", "1", "return", "ratio", "*", "100.0" ]
Finds percentage between 2 numbers :param val: number :param other: number to compare to :return: percentage of delta between first and second
[ "Finds", "percentage", "between", "2", "numbers" ]
4394d8a1f7e45bea28a255ec390f4962ee64d33a
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/maths/utils.py#L6-L17
train
JukeboxPipeline/jukeboxmaya
src/jukeboxmaya/addons/scenerelease/scenerelease.py
OptionWidget.setup_ui
def setup_ui(self, ): """Create all ui elements and layouts :returns: None :rtype: None :raises: None """ self.main_vbox = QtGui.QVBoxLayout(self) self.import_all_references_cb = QtGui.QCheckBox("Import references") self.main_vbox.addWidget(self.import_all_references_cb)
python
def setup_ui(self, ): """Create all ui elements and layouts :returns: None :rtype: None :raises: None """ self.main_vbox = QtGui.QVBoxLayout(self) self.import_all_references_cb = QtGui.QCheckBox("Import references") self.main_vbox.addWidget(self.import_all_references_cb)
[ "def", "setup_ui", "(", "self", ",", ")", ":", "self", ".", "main_vbox", "=", "QtGui", ".", "QVBoxLayout", "(", "self", ")", "self", ".", "import_all_references_cb", "=", "QtGui", ".", "QCheckBox", "(", "\"Import references\"", ")", "self", ".", "main_vbox", ".", "addWidget", "(", "self", ".", "import_all_references_cb", ")" ]
Create all ui elements and layouts :returns: None :rtype: None :raises: None
[ "Create", "all", "ui", "elements", "and", "layouts" ]
c8d6318d53cdb5493453c4a6b65ef75bdb2d5f2c
https://github.com/JukeboxPipeline/jukeboxmaya/blob/c8d6318d53cdb5493453c4a6b65ef75bdb2d5f2c/src/jukeboxmaya/addons/scenerelease/scenerelease.py#L33-L42
train
JukeboxPipeline/jukeboxmaya
src/jukeboxmaya/addons/scenerelease/scenerelease.py
SceneReleaseActions.get_cleanups
def get_cleanups(self, ): """Get the cleanup actions for a releaes depending on the selected options :returns: the cleanup actions :rtype: :class:`jukeboxcore.action.ActionCollection` :raises: None """ cleanups = [] open_unit = ActionUnit(name="Open", description="Open the maya scene.", actionfunc=open_scene) cleanups.append(open_unit) if self._option_widget.import_references(): import_unit = ActionUnit(name="Import references", description="Import all references in the scene.", actionfunc=import_all_references, depsuccess=[open_unit]) cleanups.append(import_unit) update_scenenode_unit = ActionUnit(name="Update Scene Node", description="Change the id from the jbscene node from work to releasefile.", actionfunc=update_scenenode, depsuccess=[open_unit]) cleanups.append(update_scenenode_unit) save_unit = ActionUnit(name="Save", description="Save the scene.", actionfunc=save_scene, depsuccess=[update_scenenode_unit]) cleanups.append(save_unit) return ActionCollection(cleanups)
python
def get_cleanups(self, ): """Get the cleanup actions for a releaes depending on the selected options :returns: the cleanup actions :rtype: :class:`jukeboxcore.action.ActionCollection` :raises: None """ cleanups = [] open_unit = ActionUnit(name="Open", description="Open the maya scene.", actionfunc=open_scene) cleanups.append(open_unit) if self._option_widget.import_references(): import_unit = ActionUnit(name="Import references", description="Import all references in the scene.", actionfunc=import_all_references, depsuccess=[open_unit]) cleanups.append(import_unit) update_scenenode_unit = ActionUnit(name="Update Scene Node", description="Change the id from the jbscene node from work to releasefile.", actionfunc=update_scenenode, depsuccess=[open_unit]) cleanups.append(update_scenenode_unit) save_unit = ActionUnit(name="Save", description="Save the scene.", actionfunc=save_scene, depsuccess=[update_scenenode_unit]) cleanups.append(save_unit) return ActionCollection(cleanups)
[ "def", "get_cleanups", "(", "self", ",", ")", ":", "cleanups", "=", "[", "]", "open_unit", "=", "ActionUnit", "(", "name", "=", "\"Open\"", ",", "description", "=", "\"Open the maya scene.\"", ",", "actionfunc", "=", "open_scene", ")", "cleanups", ".", "append", "(", "open_unit", ")", "if", "self", ".", "_option_widget", ".", "import_references", "(", ")", ":", "import_unit", "=", "ActionUnit", "(", "name", "=", "\"Import references\"", ",", "description", "=", "\"Import all references in the scene.\"", ",", "actionfunc", "=", "import_all_references", ",", "depsuccess", "=", "[", "open_unit", "]", ")", "cleanups", ".", "append", "(", "import_unit", ")", "update_scenenode_unit", "=", "ActionUnit", "(", "name", "=", "\"Update Scene Node\"", ",", "description", "=", "\"Change the id from the jbscene node from work to releasefile.\"", ",", "actionfunc", "=", "update_scenenode", ",", "depsuccess", "=", "[", "open_unit", "]", ")", "cleanups", ".", "append", "(", "update_scenenode_unit", ")", "save_unit", "=", "ActionUnit", "(", "name", "=", "\"Save\"", ",", "description", "=", "\"Save the scene.\"", ",", "actionfunc", "=", "save_scene", ",", "depsuccess", "=", "[", "update_scenenode_unit", "]", ")", "cleanups", ".", "append", "(", "save_unit", ")", "return", "ActionCollection", "(", "cleanups", ")" ]
Get the cleanup actions for a releaes depending on the selected options :returns: the cleanup actions :rtype: :class:`jukeboxcore.action.ActionCollection` :raises: None
[ "Get", "the", "cleanup", "actions", "for", "a", "releaes", "depending", "on", "the", "selected", "options" ]
c8d6318d53cdb5493453c4a6b65ef75bdb2d5f2c
https://github.com/JukeboxPipeline/jukeboxmaya/blob/c8d6318d53cdb5493453c4a6b65ef75bdb2d5f2c/src/jukeboxmaya/addons/scenerelease/scenerelease.py#L77-L105
train
lowandrew/OLCTools
spadespipeline/legacy_vtyper.py
Vtyper.epcr_primer_file
def epcr_primer_file(self, formattedprimers): """ Create the ePCR-compatible primer file from the dictionaries of primer combinations """ logging.info('Creating re-PCR-compatible primer file') with open(formattedprimers, 'w') as formatted: # Iterate through all the targets for basename in sorted(self.forward_dict): # Use enumerate to number the iterations for each forward and reverse primer in the lists for forward_index, forward_primer in enumerate(self.forward_dict[basename]): for reverse_index, reverse_primer in enumerate(self.reverse_dict[basename]): # Set the name of the primer using the target name, and the indices of the primers # e.g. vtx1a_0_0 primer_name = '{bn}_{fi}_{ri}'.format(bn=basename, fi=forward_index, ri=reverse_index) # Create the string to write to the ePCR-compatible primer file # e.g. vtx1a_0_0 CCTTTCCAGGTACAACAGCGGTT GGAAACTCATCAGATGCCATTCTGG output_string = '{pn}\t{fp}\t{rp}\n'.format(pn=primer_name, fp=forward_primer, rp=reverse_primer) # Write the string to file formatted.write(output_string)
python
def epcr_primer_file(self, formattedprimers): """ Create the ePCR-compatible primer file from the dictionaries of primer combinations """ logging.info('Creating re-PCR-compatible primer file') with open(formattedprimers, 'w') as formatted: # Iterate through all the targets for basename in sorted(self.forward_dict): # Use enumerate to number the iterations for each forward and reverse primer in the lists for forward_index, forward_primer in enumerate(self.forward_dict[basename]): for reverse_index, reverse_primer in enumerate(self.reverse_dict[basename]): # Set the name of the primer using the target name, and the indices of the primers # e.g. vtx1a_0_0 primer_name = '{bn}_{fi}_{ri}'.format(bn=basename, fi=forward_index, ri=reverse_index) # Create the string to write to the ePCR-compatible primer file # e.g. vtx1a_0_0 CCTTTCCAGGTACAACAGCGGTT GGAAACTCATCAGATGCCATTCTGG output_string = '{pn}\t{fp}\t{rp}\n'.format(pn=primer_name, fp=forward_primer, rp=reverse_primer) # Write the string to file formatted.write(output_string)
[ "def", "epcr_primer_file", "(", "self", ",", "formattedprimers", ")", ":", "logging", ".", "info", "(", "'Creating re-PCR-compatible primer file'", ")", "with", "open", "(", "formattedprimers", ",", "'w'", ")", "as", "formatted", ":", "# Iterate through all the targets", "for", "basename", "in", "sorted", "(", "self", ".", "forward_dict", ")", ":", "# Use enumerate to number the iterations for each forward and reverse primer in the lists", "for", "forward_index", ",", "forward_primer", "in", "enumerate", "(", "self", ".", "forward_dict", "[", "basename", "]", ")", ":", "for", "reverse_index", ",", "reverse_primer", "in", "enumerate", "(", "self", ".", "reverse_dict", "[", "basename", "]", ")", ":", "# Set the name of the primer using the target name, and the indices of the primers", "# e.g. vtx1a_0_0", "primer_name", "=", "'{bn}_{fi}_{ri}'", ".", "format", "(", "bn", "=", "basename", ",", "fi", "=", "forward_index", ",", "ri", "=", "reverse_index", ")", "# Create the string to write to the ePCR-compatible primer file", "# e.g. vtx1a_0_0\tCCTTTCCAGGTACAACAGCGGTT\tGGAAACTCATCAGATGCCATTCTGG", "output_string", "=", "'{pn}\\t{fp}\\t{rp}\\n'", ".", "format", "(", "pn", "=", "primer_name", ",", "fp", "=", "forward_primer", ",", "rp", "=", "reverse_primer", ")", "# Write the string to file", "formatted", ".", "write", "(", "output_string", ")" ]
Create the ePCR-compatible primer file from the dictionaries of primer combinations
[ "Create", "the", "ePCR", "-", "compatible", "primer", "file", "from", "the", "dictionaries", "of", "primer", "combinations" ]
88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a
https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/spadespipeline/legacy_vtyper.py#L63-L85
train
lowandrew/OLCTools
spadespipeline/legacy_vtyper.py
Vtyper.epcr_threads
def epcr_threads(self, formattedprimers, ampliconsize=10000): """ Run ePCR in a multi-threaded fashion """ # Create the threads for the ePCR analysis for sample in self.metadata: if sample.general.bestassemblyfile != 'NA': threads = Thread(target=self.epcr, args=()) threads.setDaemon(True) threads.start() logging.info('Running ePCR analyses') for sample in self.metadata: if sample.general.bestassemblyfile != 'NA': setattr(sample, self.analysistype, GenObject()) # Get the primers ready sample[self.analysistype].primers = formattedprimers # Make the output path sample[self.analysistype].reportdir = os.path.join(sample.general.outputdirectory, self.analysistype) make_path(sample[self.analysistype].reportdir) outfile = os.path.join(sample[self.analysistype].reportdir, sample.name) # Set the hashing and mapping commands sample.commands.famap = '{famap} -b {outfile}.famap {fasta}'\ .format(famap=os.path.join(self.homepath, 'ePCR', 'famap'), outfile=outfile, fasta=sample.general.bestassemblyfile) sample.commands.fahash = '{fahash} -b {outfile}.hash {outfile}.famap'\ .format(fahash=os.path.join(self.homepath, 'ePCR', 'fahash'), outfile=outfile) # re-PCR uses the subtyping primers list to search the contigs file using the following parameters # -S {hash file} (Perform STS lookup using hash-file), -r + (Enable/disable reverse STS lookup) # -m 10000 (Set variability for STS size for lookup), this very large, as I don't necessarily know # the size of the amplicon # -n 1 (Set max allowed mismatches per primer pair for lookup) # -g 0 (Set max allowed indels per primer pair for lookup), # -G (Print alignments in comments) # -o {output file} sample.commands.epcr = \ '{rePCR} -S {outfile}.hash -r + -d 1-{ampsize} -n {mismatches} -g 0 -G -q ' \ '-o {outfile}.txt {primers}'\ .format(rePCR=os.path.join(self.homepath, 'ePCR', 're-PCR'), outfile=outfile, ampsize=ampliconsize, mismatches=self.mismatches, primers=sample[self.analysistype].primers) sample[self.analysistype].resultsfile = '{of}.txt'.format(of=outfile) # Add the sample object and the output file to the queue self.epcrqueue.put((sample, outfile)) # Join the threads self.epcrqueue.join()
python
def epcr_threads(self, formattedprimers, ampliconsize=10000): """ Run ePCR in a multi-threaded fashion """ # Create the threads for the ePCR analysis for sample in self.metadata: if sample.general.bestassemblyfile != 'NA': threads = Thread(target=self.epcr, args=()) threads.setDaemon(True) threads.start() logging.info('Running ePCR analyses') for sample in self.metadata: if sample.general.bestassemblyfile != 'NA': setattr(sample, self.analysistype, GenObject()) # Get the primers ready sample[self.analysistype].primers = formattedprimers # Make the output path sample[self.analysistype].reportdir = os.path.join(sample.general.outputdirectory, self.analysistype) make_path(sample[self.analysistype].reportdir) outfile = os.path.join(sample[self.analysistype].reportdir, sample.name) # Set the hashing and mapping commands sample.commands.famap = '{famap} -b {outfile}.famap {fasta}'\ .format(famap=os.path.join(self.homepath, 'ePCR', 'famap'), outfile=outfile, fasta=sample.general.bestassemblyfile) sample.commands.fahash = '{fahash} -b {outfile}.hash {outfile}.famap'\ .format(fahash=os.path.join(self.homepath, 'ePCR', 'fahash'), outfile=outfile) # re-PCR uses the subtyping primers list to search the contigs file using the following parameters # -S {hash file} (Perform STS lookup using hash-file), -r + (Enable/disable reverse STS lookup) # -m 10000 (Set variability for STS size for lookup), this very large, as I don't necessarily know # the size of the amplicon # -n 1 (Set max allowed mismatches per primer pair for lookup) # -g 0 (Set max allowed indels per primer pair for lookup), # -G (Print alignments in comments) # -o {output file} sample.commands.epcr = \ '{rePCR} -S {outfile}.hash -r + -d 1-{ampsize} -n {mismatches} -g 0 -G -q ' \ '-o {outfile}.txt {primers}'\ .format(rePCR=os.path.join(self.homepath, 'ePCR', 're-PCR'), outfile=outfile, ampsize=ampliconsize, mismatches=self.mismatches, primers=sample[self.analysistype].primers) sample[self.analysistype].resultsfile = '{of}.txt'.format(of=outfile) # Add the sample object and the output file to the queue self.epcrqueue.put((sample, outfile)) # Join the threads self.epcrqueue.join()
[ "def", "epcr_threads", "(", "self", ",", "formattedprimers", ",", "ampliconsize", "=", "10000", ")", ":", "# Create the threads for the ePCR analysis", "for", "sample", "in", "self", ".", "metadata", ":", "if", "sample", ".", "general", ".", "bestassemblyfile", "!=", "'NA'", ":", "threads", "=", "Thread", "(", "target", "=", "self", ".", "epcr", ",", "args", "=", "(", ")", ")", "threads", ".", "setDaemon", "(", "True", ")", "threads", ".", "start", "(", ")", "logging", ".", "info", "(", "'Running ePCR analyses'", ")", "for", "sample", "in", "self", ".", "metadata", ":", "if", "sample", ".", "general", ".", "bestassemblyfile", "!=", "'NA'", ":", "setattr", "(", "sample", ",", "self", ".", "analysistype", ",", "GenObject", "(", ")", ")", "# Get the primers ready", "sample", "[", "self", ".", "analysistype", "]", ".", "primers", "=", "formattedprimers", "# Make the output path", "sample", "[", "self", ".", "analysistype", "]", ".", "reportdir", "=", "os", ".", "path", ".", "join", "(", "sample", ".", "general", ".", "outputdirectory", ",", "self", ".", "analysistype", ")", "make_path", "(", "sample", "[", "self", ".", "analysistype", "]", ".", "reportdir", ")", "outfile", "=", "os", ".", "path", ".", "join", "(", "sample", "[", "self", ".", "analysistype", "]", ".", "reportdir", ",", "sample", ".", "name", ")", "# Set the hashing and mapping commands", "sample", ".", "commands", ".", "famap", "=", "'{famap} -b {outfile}.famap {fasta}'", ".", "format", "(", "famap", "=", "os", ".", "path", ".", "join", "(", "self", ".", "homepath", ",", "'ePCR'", ",", "'famap'", ")", ",", "outfile", "=", "outfile", ",", "fasta", "=", "sample", ".", "general", ".", "bestassemblyfile", ")", "sample", ".", "commands", ".", "fahash", "=", "'{fahash} -b {outfile}.hash {outfile}.famap'", ".", "format", "(", "fahash", "=", "os", ".", "path", ".", "join", "(", "self", ".", "homepath", ",", "'ePCR'", ",", "'fahash'", ")", ",", "outfile", "=", "outfile", ")", "# re-PCR uses the subtyping primers list to search the contigs file using the following parameters", "# -S {hash file} (Perform STS lookup using hash-file), -r + (Enable/disable reverse STS lookup)", "# -m 10000 (Set variability for STS size for lookup), this very large, as I don't necessarily know", "# the size of the amplicon", "# -n 1 (Set max allowed mismatches per primer pair for lookup)", "# -g 0 (Set max allowed indels per primer pair for lookup),", "# -G (Print alignments in comments)", "# -o {output file}", "sample", ".", "commands", ".", "epcr", "=", "'{rePCR} -S {outfile}.hash -r + -d 1-{ampsize} -n {mismatches} -g 0 -G -q '", "'-o {outfile}.txt {primers}'", ".", "format", "(", "rePCR", "=", "os", ".", "path", ".", "join", "(", "self", ".", "homepath", ",", "'ePCR'", ",", "'re-PCR'", ")", ",", "outfile", "=", "outfile", ",", "ampsize", "=", "ampliconsize", ",", "mismatches", "=", "self", ".", "mismatches", ",", "primers", "=", "sample", "[", "self", ".", "analysistype", "]", ".", "primers", ")", "sample", "[", "self", ".", "analysistype", "]", ".", "resultsfile", "=", "'{of}.txt'", ".", "format", "(", "of", "=", "outfile", ")", "# Add the sample object and the output file to the queue", "self", ".", "epcrqueue", ".", "put", "(", "(", "sample", ",", "outfile", ")", ")", "# Join the threads", "self", ".", "epcrqueue", ".", "join", "(", ")" ]
Run ePCR in a multi-threaded fashion
[ "Run", "ePCR", "in", "a", "multi", "-", "threaded", "fashion" ]
88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a
https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/spadespipeline/legacy_vtyper.py#L87-L136
train
lowandrew/OLCTools
spadespipeline/legacy_vtyper.py
Vtyper.epcr_parse
def epcr_parse(self): """ Parse the ePCR outputs """ logging.info('Parsing ePCR outputs') for sample in self.metadata: if sample.general.bestassemblyfile != 'NA': # Create a set to store all the unique results toxin_set = set() if os.path.isfile(sample[self.analysistype].resultsfile): with open(sample[self.analysistype].resultsfile) as epcrresults: for result in epcrresults: # Only the lines without a # contain results if "#" not in result: # Split on \t data = result.split('\t') # The subtyping primer pair is the first entry on lines with results vttype = data[0].split('_')[0] # Add the verotoxin subtype to the set of detected subtypes toxin_set.add(vttype) # Create a string of the entries in the sorted list of toxins joined with ";" sample[self.analysistype].toxinprofile = ";".join(sorted(list(toxin_set))) if toxin_set else 'ND' else: setattr(sample, self.analysistype, GenObject()) sample[self.analysistype].toxinprofile = 'NA'
python
def epcr_parse(self): """ Parse the ePCR outputs """ logging.info('Parsing ePCR outputs') for sample in self.metadata: if sample.general.bestassemblyfile != 'NA': # Create a set to store all the unique results toxin_set = set() if os.path.isfile(sample[self.analysistype].resultsfile): with open(sample[self.analysistype].resultsfile) as epcrresults: for result in epcrresults: # Only the lines without a # contain results if "#" not in result: # Split on \t data = result.split('\t') # The subtyping primer pair is the first entry on lines with results vttype = data[0].split('_')[0] # Add the verotoxin subtype to the set of detected subtypes toxin_set.add(vttype) # Create a string of the entries in the sorted list of toxins joined with ";" sample[self.analysistype].toxinprofile = ";".join(sorted(list(toxin_set))) if toxin_set else 'ND' else: setattr(sample, self.analysistype, GenObject()) sample[self.analysistype].toxinprofile = 'NA'
[ "def", "epcr_parse", "(", "self", ")", ":", "logging", ".", "info", "(", "'Parsing ePCR outputs'", ")", "for", "sample", "in", "self", ".", "metadata", ":", "if", "sample", ".", "general", ".", "bestassemblyfile", "!=", "'NA'", ":", "# Create a set to store all the unique results", "toxin_set", "=", "set", "(", ")", "if", "os", ".", "path", ".", "isfile", "(", "sample", "[", "self", ".", "analysistype", "]", ".", "resultsfile", ")", ":", "with", "open", "(", "sample", "[", "self", ".", "analysistype", "]", ".", "resultsfile", ")", "as", "epcrresults", ":", "for", "result", "in", "epcrresults", ":", "# Only the lines without a # contain results", "if", "\"#\"", "not", "in", "result", ":", "# Split on \\t", "data", "=", "result", ".", "split", "(", "'\\t'", ")", "# The subtyping primer pair is the first entry on lines with results", "vttype", "=", "data", "[", "0", "]", ".", "split", "(", "'_'", ")", "[", "0", "]", "# Add the verotoxin subtype to the set of detected subtypes", "toxin_set", ".", "add", "(", "vttype", ")", "# Create a string of the entries in the sorted list of toxins joined with \";\"", "sample", "[", "self", ".", "analysistype", "]", ".", "toxinprofile", "=", "\";\"", ".", "join", "(", "sorted", "(", "list", "(", "toxin_set", ")", ")", ")", "if", "toxin_set", "else", "'ND'", "else", ":", "setattr", "(", "sample", ",", "self", ".", "analysistype", ",", "GenObject", "(", ")", ")", "sample", "[", "self", ".", "analysistype", "]", ".", "toxinprofile", "=", "'NA'" ]
Parse the ePCR outputs
[ "Parse", "the", "ePCR", "outputs" ]
88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a
https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/spadespipeline/legacy_vtyper.py#L158-L182
train
lowandrew/OLCTools
spadespipeline/legacy_vtyper.py
Vtyper.epcr_report
def epcr_report(self): """ Create a report of the ePCR-calculated toxin profiles """ logging.info('Creating {at} report'.format(at=self.analysistype)) with open(os.path.join(self.reportpath, '{at}.csv'.format(at=self.analysistype)), 'w') as report: data = 'Strain,ToxinProfile\n' for sample in self.metadata: data += '{sn},{tp}\n'.format(sn=sample.name, tp=sample[self.analysistype].toxinprofile) # Write the data to the report report.write(data)
python
def epcr_report(self): """ Create a report of the ePCR-calculated toxin profiles """ logging.info('Creating {at} report'.format(at=self.analysistype)) with open(os.path.join(self.reportpath, '{at}.csv'.format(at=self.analysistype)), 'w') as report: data = 'Strain,ToxinProfile\n' for sample in self.metadata: data += '{sn},{tp}\n'.format(sn=sample.name, tp=sample[self.analysistype].toxinprofile) # Write the data to the report report.write(data)
[ "def", "epcr_report", "(", "self", ")", ":", "logging", ".", "info", "(", "'Creating {at} report'", ".", "format", "(", "at", "=", "self", ".", "analysistype", ")", ")", "with", "open", "(", "os", ".", "path", ".", "join", "(", "self", ".", "reportpath", ",", "'{at}.csv'", ".", "format", "(", "at", "=", "self", ".", "analysistype", ")", ")", ",", "'w'", ")", "as", "report", ":", "data", "=", "'Strain,ToxinProfile\\n'", "for", "sample", "in", "self", ".", "metadata", ":", "data", "+=", "'{sn},{tp}\\n'", ".", "format", "(", "sn", "=", "sample", ".", "name", ",", "tp", "=", "sample", "[", "self", ".", "analysistype", "]", ".", "toxinprofile", ")", "# Write the data to the report", "report", ".", "write", "(", "data", ")" ]
Create a report of the ePCR-calculated toxin profiles
[ "Create", "a", "report", "of", "the", "ePCR", "-", "calculated", "toxin", "profiles" ]
88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a
https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/spadespipeline/legacy_vtyper.py#L184-L195
train
lowandrew/OLCTools
spadespipeline/legacy_vtyper.py
Custom.parse_epcr
def parse_epcr(self): """ Parse the ePCR output file. Populate dictionary of resutls. For alleles, find the best result based on the number of mismatches before populating dictionary """ # Use the metadata object from the vtyper_object for sample in self.vtyper_object.metadata: # Initialise the dictionary sample[self.analysistype].result_dict = dict() # Read in the output file with open(sample[self.analysistype].resultsfile) as epcrresults: for result in epcrresults: # Only the lines without a # contain results if "#" not in result: # Split on \t # vtx2a_0_0 2014-SEQ-0121_127_length_1407_cov_50.7797_ID_10924 - 228 576 2 0 349/100-350 # primer_set: vtx2a_0_0, contig: 2014-SEQ-0121_127_length_1407_cov_50.7797_ID_10924, strand: -, # start: 228, stop: 576, number of forward mismatches: 2, number of reverse mismatches: 2 # amplicon_combo: 349/100-350 primer_set, contig, strand, start, stop, total_mismatches, indels, amplicon_combo = \ result.rstrip().split('\t') # Set the mismatches to be an int total_mismatches = int(total_mismatches) # Set the position of the amplicon on the contig. Ensure that the lower value is first genome_pos = '{min}-{max}'.format(min=min([int(start), int(stop)]), max=max([int(start), int(stop)])) # Extract the gene name from the modified name used when creating the primer file: LMhlyA_0_0 # becomes LMhlyA gene_re = re.search(r'([\w-]+)_(\d{1,3})_(\d{1,3})', primer_set) gene = gene_re.groups()[0] # Split the amplicon length from amplicon_combo: 349/100-350 -> 349 amplicon_length = amplicon_combo.split('/')[0] # Populate the dictionary if the 'total_mismatches' key doesn't exist, or if the current number # of mismatches is better than the previous 'best' number of mismatches try: if total_mismatches < sample[self.analysistype].result_dict[gene]['total_mismatches']: self.populate_results_dict(sample=sample, gene=gene, total_mismatches=total_mismatches, genome_pos=genome_pos, amplicon_length=amplicon_length, contig=contig, primer_set=primer_set) except KeyError: self.populate_results_dict(sample=sample, gene=gene, total_mismatches=total_mismatches, genome_pos=genome_pos, amplicon_length=amplicon_length, contig=contig, primer_set=primer_set)
python
def parse_epcr(self): """ Parse the ePCR output file. Populate dictionary of resutls. For alleles, find the best result based on the number of mismatches before populating dictionary """ # Use the metadata object from the vtyper_object for sample in self.vtyper_object.metadata: # Initialise the dictionary sample[self.analysistype].result_dict = dict() # Read in the output file with open(sample[self.analysistype].resultsfile) as epcrresults: for result in epcrresults: # Only the lines without a # contain results if "#" not in result: # Split on \t # vtx2a_0_0 2014-SEQ-0121_127_length_1407_cov_50.7797_ID_10924 - 228 576 2 0 349/100-350 # primer_set: vtx2a_0_0, contig: 2014-SEQ-0121_127_length_1407_cov_50.7797_ID_10924, strand: -, # start: 228, stop: 576, number of forward mismatches: 2, number of reverse mismatches: 2 # amplicon_combo: 349/100-350 primer_set, contig, strand, start, stop, total_mismatches, indels, amplicon_combo = \ result.rstrip().split('\t') # Set the mismatches to be an int total_mismatches = int(total_mismatches) # Set the position of the amplicon on the contig. Ensure that the lower value is first genome_pos = '{min}-{max}'.format(min=min([int(start), int(stop)]), max=max([int(start), int(stop)])) # Extract the gene name from the modified name used when creating the primer file: LMhlyA_0_0 # becomes LMhlyA gene_re = re.search(r'([\w-]+)_(\d{1,3})_(\d{1,3})', primer_set) gene = gene_re.groups()[0] # Split the amplicon length from amplicon_combo: 349/100-350 -> 349 amplicon_length = amplicon_combo.split('/')[0] # Populate the dictionary if the 'total_mismatches' key doesn't exist, or if the current number # of mismatches is better than the previous 'best' number of mismatches try: if total_mismatches < sample[self.analysistype].result_dict[gene]['total_mismatches']: self.populate_results_dict(sample=sample, gene=gene, total_mismatches=total_mismatches, genome_pos=genome_pos, amplicon_length=amplicon_length, contig=contig, primer_set=primer_set) except KeyError: self.populate_results_dict(sample=sample, gene=gene, total_mismatches=total_mismatches, genome_pos=genome_pos, amplicon_length=amplicon_length, contig=contig, primer_set=primer_set)
[ "def", "parse_epcr", "(", "self", ")", ":", "# Use the metadata object from the vtyper_object", "for", "sample", "in", "self", ".", "vtyper_object", ".", "metadata", ":", "# Initialise the dictionary", "sample", "[", "self", ".", "analysistype", "]", ".", "result_dict", "=", "dict", "(", ")", "# Read in the output file", "with", "open", "(", "sample", "[", "self", ".", "analysistype", "]", ".", "resultsfile", ")", "as", "epcrresults", ":", "for", "result", "in", "epcrresults", ":", "# Only the lines without a # contain results", "if", "\"#\"", "not", "in", "result", ":", "# Split on \\t", "# vtx2a_0_0 2014-SEQ-0121_127_length_1407_cov_50.7797_ID_10924 - 228 576 2 0 349/100-350", "# primer_set: vtx2a_0_0, contig: 2014-SEQ-0121_127_length_1407_cov_50.7797_ID_10924, strand: -,", "# start: 228, stop: 576, number of forward mismatches: 2, number of reverse mismatches: 2", "# amplicon_combo: 349/100-350", "primer_set", ",", "contig", ",", "strand", ",", "start", ",", "stop", ",", "total_mismatches", ",", "indels", ",", "amplicon_combo", "=", "result", ".", "rstrip", "(", ")", ".", "split", "(", "'\\t'", ")", "# Set the mismatches to be an int", "total_mismatches", "=", "int", "(", "total_mismatches", ")", "# Set the position of the amplicon on the contig. Ensure that the lower value is first", "genome_pos", "=", "'{min}-{max}'", ".", "format", "(", "min", "=", "min", "(", "[", "int", "(", "start", ")", ",", "int", "(", "stop", ")", "]", ")", ",", "max", "=", "max", "(", "[", "int", "(", "start", ")", ",", "int", "(", "stop", ")", "]", ")", ")", "# Extract the gene name from the modified name used when creating the primer file: LMhlyA_0_0", "# becomes LMhlyA", "gene_re", "=", "re", ".", "search", "(", "r'([\\w-]+)_(\\d{1,3})_(\\d{1,3})'", ",", "primer_set", ")", "gene", "=", "gene_re", ".", "groups", "(", ")", "[", "0", "]", "# Split the amplicon length from amplicon_combo: 349/100-350 -> 349", "amplicon_length", "=", "amplicon_combo", ".", "split", "(", "'/'", ")", "[", "0", "]", "# Populate the dictionary if the 'total_mismatches' key doesn't exist, or if the current number", "# of mismatches is better than the previous 'best' number of mismatches", "try", ":", "if", "total_mismatches", "<", "sample", "[", "self", ".", "analysistype", "]", ".", "result_dict", "[", "gene", "]", "[", "'total_mismatches'", "]", ":", "self", ".", "populate_results_dict", "(", "sample", "=", "sample", ",", "gene", "=", "gene", ",", "total_mismatches", "=", "total_mismatches", ",", "genome_pos", "=", "genome_pos", ",", "amplicon_length", "=", "amplicon_length", ",", "contig", "=", "contig", ",", "primer_set", "=", "primer_set", ")", "except", "KeyError", ":", "self", ".", "populate_results_dict", "(", "sample", "=", "sample", ",", "gene", "=", "gene", ",", "total_mismatches", "=", "total_mismatches", ",", "genome_pos", "=", "genome_pos", ",", "amplicon_length", "=", "amplicon_length", ",", "contig", "=", "contig", ",", "primer_set", "=", "primer_set", ")" ]
Parse the ePCR output file. Populate dictionary of resutls. For alleles, find the best result based on the number of mismatches before populating dictionary
[ "Parse", "the", "ePCR", "output", "file", ".", "Populate", "dictionary", "of", "resutls", ".", "For", "alleles", "find", "the", "best", "result", "based", "on", "the", "number", "of", "mismatches", "before", "populating", "dictionary" ]
88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a
https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/spadespipeline/legacy_vtyper.py#L228-L278
train
lowandrew/OLCTools
spadespipeline/legacy_vtyper.py
Custom.create_epr_report
def create_epr_report(self): """ Parse the results dictionaries, and create a final report """ # Open the report as a .csv file with open(os.path.join(self.reportpath, 'ePCR_report.csv'), 'w') as report: # Initialise a string to store the header results = 'Sample,Gene,GenomeLocation,AmpliconSize,Contig,TotalMismatches,PrimerSet\n' for sample in self.vtyper_object.metadata: # Check to see if there are strain-specific results if sample[self.analysistype].result_dict: for gene, result_dict in sample[self.analysistype].result_dict.items(): # Populate the string with the appropriate values extracted from the dictionary results += '{sn},{gene},{genomelocation},{ampliconsize},{contig},{nm},{ps}\n'\ .format(sn=sample.name, gene=gene, genomelocation=result_dict['genome_pos'], ampliconsize=result_dict['amplicon_length'], contig=result_dict['contig'], nm=result_dict['total_mismatches'], ps=result_dict['primer_set']) if self.export_amplicons: self.ampliconfile(sample=sample, contig=result_dict['contig'], amplicon_range=result_dict['genome_pos'].split('-'), primer_set=result_dict['primer_set']) else: results += '{sn}\n'.format(sn=sample.name) # Write the complete string to the report report.write(results)
python
def create_epr_report(self): """ Parse the results dictionaries, and create a final report """ # Open the report as a .csv file with open(os.path.join(self.reportpath, 'ePCR_report.csv'), 'w') as report: # Initialise a string to store the header results = 'Sample,Gene,GenomeLocation,AmpliconSize,Contig,TotalMismatches,PrimerSet\n' for sample in self.vtyper_object.metadata: # Check to see if there are strain-specific results if sample[self.analysistype].result_dict: for gene, result_dict in sample[self.analysistype].result_dict.items(): # Populate the string with the appropriate values extracted from the dictionary results += '{sn},{gene},{genomelocation},{ampliconsize},{contig},{nm},{ps}\n'\ .format(sn=sample.name, gene=gene, genomelocation=result_dict['genome_pos'], ampliconsize=result_dict['amplicon_length'], contig=result_dict['contig'], nm=result_dict['total_mismatches'], ps=result_dict['primer_set']) if self.export_amplicons: self.ampliconfile(sample=sample, contig=result_dict['contig'], amplicon_range=result_dict['genome_pos'].split('-'), primer_set=result_dict['primer_set']) else: results += '{sn}\n'.format(sn=sample.name) # Write the complete string to the report report.write(results)
[ "def", "create_epr_report", "(", "self", ")", ":", "# Open the report as a .csv file", "with", "open", "(", "os", ".", "path", ".", "join", "(", "self", ".", "reportpath", ",", "'ePCR_report.csv'", ")", ",", "'w'", ")", "as", "report", ":", "# Initialise a string to store the header", "results", "=", "'Sample,Gene,GenomeLocation,AmpliconSize,Contig,TotalMismatches,PrimerSet\\n'", "for", "sample", "in", "self", ".", "vtyper_object", ".", "metadata", ":", "# Check to see if there are strain-specific results", "if", "sample", "[", "self", ".", "analysistype", "]", ".", "result_dict", ":", "for", "gene", ",", "result_dict", "in", "sample", "[", "self", ".", "analysistype", "]", ".", "result_dict", ".", "items", "(", ")", ":", "# Populate the string with the appropriate values extracted from the dictionary", "results", "+=", "'{sn},{gene},{genomelocation},{ampliconsize},{contig},{nm},{ps}\\n'", ".", "format", "(", "sn", "=", "sample", ".", "name", ",", "gene", "=", "gene", ",", "genomelocation", "=", "result_dict", "[", "'genome_pos'", "]", ",", "ampliconsize", "=", "result_dict", "[", "'amplicon_length'", "]", ",", "contig", "=", "result_dict", "[", "'contig'", "]", ",", "nm", "=", "result_dict", "[", "'total_mismatches'", "]", ",", "ps", "=", "result_dict", "[", "'primer_set'", "]", ")", "if", "self", ".", "export_amplicons", ":", "self", ".", "ampliconfile", "(", "sample", "=", "sample", ",", "contig", "=", "result_dict", "[", "'contig'", "]", ",", "amplicon_range", "=", "result_dict", "[", "'genome_pos'", "]", ".", "split", "(", "'-'", ")", ",", "primer_set", "=", "result_dict", "[", "'primer_set'", "]", ")", "else", ":", "results", "+=", "'{sn}\\n'", ".", "format", "(", "sn", "=", "sample", ".", "name", ")", "# Write the complete string to the report", "report", ".", "write", "(", "results", ")" ]
Parse the results dictionaries, and create a final report
[ "Parse", "the", "results", "dictionaries", "and", "create", "a", "final", "report" ]
88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a
https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/spadespipeline/legacy_vtyper.py#L299-L328
train
lowandrew/OLCTools
sipprCommon/create_sample_sheet.py
SampleSheet.samplesheet
def samplesheet(self): """ Create a custom sample sheet based on the original sample sheet for the run, but only including the samples that did not pass the quality threshold on the previous iteration """ if self.demultiplex: make_path(self.samplesheetpath) self.customsamplesheet = os.path.join(self.samplesheetpath, 'SampleSheet.csv') header = ['Sample_ID', 'Sample_Name', 'Sample_Plate', 'Sample_Well', 'I7_Index_ID', 'index', 'I5_Index_ID', 'index2', 'Sample_Project', 'Description'] with open(self.customsamplesheet, 'w') as samplesheet: lines = str() lines += '[Header]\n' lines += 'IEMFileVersion,{}\n'.format(self.header.IEMFileVersion) lines += 'Investigator Name,{}\n'.format(self.header.InvestigatorName) lines += 'Experiment Name,{}\n'.format(self.header.ExperimentName) lines += 'Date,{}\n'.format(self.header.Date) lines += 'Workflow,{}\n'.format(self.header.Workflow) lines += 'Application,{}\n'.format(self.header.Application) lines += 'Assay,{}\n'.format(self.header.Assay) lines += 'Description,{}\n'.format(self.header.Description) lines += 'Chemistry,{}\n'.format(self.header.Chemistry) lines += '\n' lines += '[Reads]\n' lines += str(self.forward) + '\n' lines += str(self.reverse) + '\n' lines += '\n' lines += '[Settings]\n' lines += 'ReverseComplement,{}\n'.format(self.header.ReverseComplement) lines += 'Adapter,{}\n'.format(self.header.Adapter) lines += '\n' lines += '[Data]\n' lines += ','.join(header) lines += '\n' # Correlate all the samples added to the list of incomplete samples with their metadata for incomplete in self.incomplete: for sample in self.rundata: if incomplete == sample['SampleID']: # Use each entry in the header list as a key for the rundata dictionary for data in header: # Modify the key to be consistent with how the dictionary was populated result = sample[data.replace('_', '')] # Description is the final entry in the list, and shouldn't have a , following the value if data != 'Description': lines += '{},'.format(result.replace('NA', '')) # This entry should have a newline instead of a , else: lines += '{}\n'.format(result.replace('NA', '')) # Write the string to the sample sheet samplesheet.write(lines)
python
def samplesheet(self): """ Create a custom sample sheet based on the original sample sheet for the run, but only including the samples that did not pass the quality threshold on the previous iteration """ if self.demultiplex: make_path(self.samplesheetpath) self.customsamplesheet = os.path.join(self.samplesheetpath, 'SampleSheet.csv') header = ['Sample_ID', 'Sample_Name', 'Sample_Plate', 'Sample_Well', 'I7_Index_ID', 'index', 'I5_Index_ID', 'index2', 'Sample_Project', 'Description'] with open(self.customsamplesheet, 'w') as samplesheet: lines = str() lines += '[Header]\n' lines += 'IEMFileVersion,{}\n'.format(self.header.IEMFileVersion) lines += 'Investigator Name,{}\n'.format(self.header.InvestigatorName) lines += 'Experiment Name,{}\n'.format(self.header.ExperimentName) lines += 'Date,{}\n'.format(self.header.Date) lines += 'Workflow,{}\n'.format(self.header.Workflow) lines += 'Application,{}\n'.format(self.header.Application) lines += 'Assay,{}\n'.format(self.header.Assay) lines += 'Description,{}\n'.format(self.header.Description) lines += 'Chemistry,{}\n'.format(self.header.Chemistry) lines += '\n' lines += '[Reads]\n' lines += str(self.forward) + '\n' lines += str(self.reverse) + '\n' lines += '\n' lines += '[Settings]\n' lines += 'ReverseComplement,{}\n'.format(self.header.ReverseComplement) lines += 'Adapter,{}\n'.format(self.header.Adapter) lines += '\n' lines += '[Data]\n' lines += ','.join(header) lines += '\n' # Correlate all the samples added to the list of incomplete samples with their metadata for incomplete in self.incomplete: for sample in self.rundata: if incomplete == sample['SampleID']: # Use each entry in the header list as a key for the rundata dictionary for data in header: # Modify the key to be consistent with how the dictionary was populated result = sample[data.replace('_', '')] # Description is the final entry in the list, and shouldn't have a , following the value if data != 'Description': lines += '{},'.format(result.replace('NA', '')) # This entry should have a newline instead of a , else: lines += '{}\n'.format(result.replace('NA', '')) # Write the string to the sample sheet samplesheet.write(lines)
[ "def", "samplesheet", "(", "self", ")", ":", "if", "self", ".", "demultiplex", ":", "make_path", "(", "self", ".", "samplesheetpath", ")", "self", ".", "customsamplesheet", "=", "os", ".", "path", ".", "join", "(", "self", ".", "samplesheetpath", ",", "'SampleSheet.csv'", ")", "header", "=", "[", "'Sample_ID'", ",", "'Sample_Name'", ",", "'Sample_Plate'", ",", "'Sample_Well'", ",", "'I7_Index_ID'", ",", "'index'", ",", "'I5_Index_ID'", ",", "'index2'", ",", "'Sample_Project'", ",", "'Description'", "]", "with", "open", "(", "self", ".", "customsamplesheet", ",", "'w'", ")", "as", "samplesheet", ":", "lines", "=", "str", "(", ")", "lines", "+=", "'[Header]\\n'", "lines", "+=", "'IEMFileVersion,{}\\n'", ".", "format", "(", "self", ".", "header", ".", "IEMFileVersion", ")", "lines", "+=", "'Investigator Name,{}\\n'", ".", "format", "(", "self", ".", "header", ".", "InvestigatorName", ")", "lines", "+=", "'Experiment Name,{}\\n'", ".", "format", "(", "self", ".", "header", ".", "ExperimentName", ")", "lines", "+=", "'Date,{}\\n'", ".", "format", "(", "self", ".", "header", ".", "Date", ")", "lines", "+=", "'Workflow,{}\\n'", ".", "format", "(", "self", ".", "header", ".", "Workflow", ")", "lines", "+=", "'Application,{}\\n'", ".", "format", "(", "self", ".", "header", ".", "Application", ")", "lines", "+=", "'Assay,{}\\n'", ".", "format", "(", "self", ".", "header", ".", "Assay", ")", "lines", "+=", "'Description,{}\\n'", ".", "format", "(", "self", ".", "header", ".", "Description", ")", "lines", "+=", "'Chemistry,{}\\n'", ".", "format", "(", "self", ".", "header", ".", "Chemistry", ")", "lines", "+=", "'\\n'", "lines", "+=", "'[Reads]\\n'", "lines", "+=", "str", "(", "self", ".", "forward", ")", "+", "'\\n'", "lines", "+=", "str", "(", "self", ".", "reverse", ")", "+", "'\\n'", "lines", "+=", "'\\n'", "lines", "+=", "'[Settings]\\n'", "lines", "+=", "'ReverseComplement,{}\\n'", ".", "format", "(", "self", ".", "header", ".", "ReverseComplement", ")", "lines", "+=", "'Adapter,{}\\n'", ".", "format", "(", "self", ".", "header", ".", "Adapter", ")", "lines", "+=", "'\\n'", "lines", "+=", "'[Data]\\n'", "lines", "+=", "','", ".", "join", "(", "header", ")", "lines", "+=", "'\\n'", "# Correlate all the samples added to the list of incomplete samples with their metadata", "for", "incomplete", "in", "self", ".", "incomplete", ":", "for", "sample", "in", "self", ".", "rundata", ":", "if", "incomplete", "==", "sample", "[", "'SampleID'", "]", ":", "# Use each entry in the header list as a key for the rundata dictionary", "for", "data", "in", "header", ":", "# Modify the key to be consistent with how the dictionary was populated", "result", "=", "sample", "[", "data", ".", "replace", "(", "'_'", ",", "''", ")", "]", "# Description is the final entry in the list, and shouldn't have a , following the value", "if", "data", "!=", "'Description'", ":", "lines", "+=", "'{},'", ".", "format", "(", "result", ".", "replace", "(", "'NA'", ",", "''", ")", ")", "# This entry should have a newline instead of a ,", "else", ":", "lines", "+=", "'{}\\n'", ".", "format", "(", "result", ".", "replace", "(", "'NA'", ",", "''", ")", ")", "# Write the string to the sample sheet", "samplesheet", ".", "write", "(", "lines", ")" ]
Create a custom sample sheet based on the original sample sheet for the run, but only including the samples that did not pass the quality threshold on the previous iteration
[ "Create", "a", "custom", "sample", "sheet", "based", "on", "the", "original", "sample", "sheet", "for", "the", "run", "but", "only", "including", "the", "samples", "that", "did", "not", "pass", "the", "quality", "threshold", "on", "the", "previous", "iteration" ]
88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a
https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/sipprCommon/create_sample_sheet.py#L10-L59
train
LeKono/pyhgnc
src/pyhgnc/manager/database.py
update
def update(connection=None, silent=False, hgnc_file_path=None, hcop_file_path=None, low_memory=False): """Update the database with current version of HGNC :param str connection: conncetion string :param bool silent: silent while import :param str hgnc_file_path: import from path HGNC :param str hcop_file_path: import from path HCOP (orthologs) :param bool low_memory: set to `True` if you have low memory :return: """ database = DbManager(connection) database.db_import(silent=silent, hgnc_file_path=hgnc_file_path, hcop_file_path=hcop_file_path, low_memory=low_memory) database.session.close()
python
def update(connection=None, silent=False, hgnc_file_path=None, hcop_file_path=None, low_memory=False): """Update the database with current version of HGNC :param str connection: conncetion string :param bool silent: silent while import :param str hgnc_file_path: import from path HGNC :param str hcop_file_path: import from path HCOP (orthologs) :param bool low_memory: set to `True` if you have low memory :return: """ database = DbManager(connection) database.db_import(silent=silent, hgnc_file_path=hgnc_file_path, hcop_file_path=hcop_file_path, low_memory=low_memory) database.session.close()
[ "def", "update", "(", "connection", "=", "None", ",", "silent", "=", "False", ",", "hgnc_file_path", "=", "None", ",", "hcop_file_path", "=", "None", ",", "low_memory", "=", "False", ")", ":", "database", "=", "DbManager", "(", "connection", ")", "database", ".", "db_import", "(", "silent", "=", "silent", ",", "hgnc_file_path", "=", "hgnc_file_path", ",", "hcop_file_path", "=", "hcop_file_path", ",", "low_memory", "=", "low_memory", ")", "database", ".", "session", ".", "close", "(", ")" ]
Update the database with current version of HGNC :param str connection: conncetion string :param bool silent: silent while import :param str hgnc_file_path: import from path HGNC :param str hcop_file_path: import from path HCOP (orthologs) :param bool low_memory: set to `True` if you have low memory :return:
[ "Update", "the", "database", "with", "current", "version", "of", "HGNC" ]
1cae20c40874bfb51581b7c5c1481707e942b5d0
https://github.com/LeKono/pyhgnc/blob/1cae20c40874bfb51581b7c5c1481707e942b5d0/src/pyhgnc/manager/database.py#L408-L420
train
LeKono/pyhgnc
src/pyhgnc/manager/database.py
set_connection
def set_connection(connection=defaults.sqlalchemy_connection_string_default): """Set the connection string for sqlalchemy and write it to the config file. .. code-block:: python import pyhgnc pyhgnc.set_connection('mysql+pymysql://{user}:{passwd}@{host}/{db}?charset={charset}') .. hint:: valid connection strings - mysql+pymysql://user:passwd@localhost/database?charset=utf8 - postgresql://scott:tiger@localhost/mydatabase - mssql+pyodbc://user:passwd@database - oracle://user:[email protected]:1521/database - Linux: sqlite:////absolute/path/to/database.db - Windows: sqlite:///C:\path\to\database.db :param str connection: sqlalchemy connection string """ config_path = defaults.config_file_path config = RawConfigParser() if not os.path.exists(config_path): with open(config_path, 'w') as config_file: config['database'] = {'sqlalchemy_connection_string': connection} config.write(config_file) log.info('create configuration file {}'.format(config_path)) else: config.read(config_path) config.set('database', 'sqlalchemy_connection_string', connection) with open(config_path, 'w') as configfile: config.write(configfile)
python
def set_connection(connection=defaults.sqlalchemy_connection_string_default): """Set the connection string for sqlalchemy and write it to the config file. .. code-block:: python import pyhgnc pyhgnc.set_connection('mysql+pymysql://{user}:{passwd}@{host}/{db}?charset={charset}') .. hint:: valid connection strings - mysql+pymysql://user:passwd@localhost/database?charset=utf8 - postgresql://scott:tiger@localhost/mydatabase - mssql+pyodbc://user:passwd@database - oracle://user:[email protected]:1521/database - Linux: sqlite:////absolute/path/to/database.db - Windows: sqlite:///C:\path\to\database.db :param str connection: sqlalchemy connection string """ config_path = defaults.config_file_path config = RawConfigParser() if not os.path.exists(config_path): with open(config_path, 'w') as config_file: config['database'] = {'sqlalchemy_connection_string': connection} config.write(config_file) log.info('create configuration file {}'.format(config_path)) else: config.read(config_path) config.set('database', 'sqlalchemy_connection_string', connection) with open(config_path, 'w') as configfile: config.write(configfile)
[ "def", "set_connection", "(", "connection", "=", "defaults", ".", "sqlalchemy_connection_string_default", ")", ":", "config_path", "=", "defaults", ".", "config_file_path", "config", "=", "RawConfigParser", "(", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "config_path", ")", ":", "with", "open", "(", "config_path", ",", "'w'", ")", "as", "config_file", ":", "config", "[", "'database'", "]", "=", "{", "'sqlalchemy_connection_string'", ":", "connection", "}", "config", ".", "write", "(", "config_file", ")", "log", ".", "info", "(", "'create configuration file {}'", ".", "format", "(", "config_path", ")", ")", "else", ":", "config", ".", "read", "(", "config_path", ")", "config", ".", "set", "(", "'database'", ",", "'sqlalchemy_connection_string'", ",", "connection", ")", "with", "open", "(", "config_path", ",", "'w'", ")", "as", "configfile", ":", "config", ".", "write", "(", "configfile", ")" ]
Set the connection string for sqlalchemy and write it to the config file. .. code-block:: python import pyhgnc pyhgnc.set_connection('mysql+pymysql://{user}:{passwd}@{host}/{db}?charset={charset}') .. hint:: valid connection strings - mysql+pymysql://user:passwd@localhost/database?charset=utf8 - postgresql://scott:tiger@localhost/mydatabase - mssql+pyodbc://user:passwd@database - oracle://user:[email protected]:1521/database - Linux: sqlite:////absolute/path/to/database.db - Windows: sqlite:///C:\path\to\database.db :param str connection: sqlalchemy connection string
[ "Set", "the", "connection", "string", "for", "sqlalchemy", "and", "write", "it", "to", "the", "config", "file", "." ]
1cae20c40874bfb51581b7c5c1481707e942b5d0
https://github.com/LeKono/pyhgnc/blob/1cae20c40874bfb51581b7c5c1481707e942b5d0/src/pyhgnc/manager/database.py#L423-L464
train
LeKono/pyhgnc
src/pyhgnc/manager/database.py
set_mysql_connection
def set_mysql_connection(host='localhost', user='pyhgnc_user', passwd='pyhgnc_passwd', db='pyhgnc', charset='utf8'): """Method to set a MySQL connection :param str host: MySQL database host :param str user: MySQL database user :param str passwd: MySQL database password :param str db: MySQL database name :param str charset: MySQL database charater set :return: connection string :rtype: str """ connection_string = 'mysql+pymysql://{user}:{passwd}@{host}/{db}?charset={charset}'.format( host=host, user=user, passwd=passwd, db=db, charset=charset ) set_connection(connection_string) return connection_string
python
def set_mysql_connection(host='localhost', user='pyhgnc_user', passwd='pyhgnc_passwd', db='pyhgnc', charset='utf8'): """Method to set a MySQL connection :param str host: MySQL database host :param str user: MySQL database user :param str passwd: MySQL database password :param str db: MySQL database name :param str charset: MySQL database charater set :return: connection string :rtype: str """ connection_string = 'mysql+pymysql://{user}:{passwd}@{host}/{db}?charset={charset}'.format( host=host, user=user, passwd=passwd, db=db, charset=charset ) set_connection(connection_string) return connection_string
[ "def", "set_mysql_connection", "(", "host", "=", "'localhost'", ",", "user", "=", "'pyhgnc_user'", ",", "passwd", "=", "'pyhgnc_passwd'", ",", "db", "=", "'pyhgnc'", ",", "charset", "=", "'utf8'", ")", ":", "connection_string", "=", "'mysql+pymysql://{user}:{passwd}@{host}/{db}?charset={charset}'", ".", "format", "(", "host", "=", "host", ",", "user", "=", "user", ",", "passwd", "=", "passwd", ",", "db", "=", "db", ",", "charset", "=", "charset", ")", "set_connection", "(", "connection_string", ")", "return", "connection_string" ]
Method to set a MySQL connection :param str host: MySQL database host :param str user: MySQL database user :param str passwd: MySQL database password :param str db: MySQL database name :param str charset: MySQL database charater set :return: connection string :rtype: str
[ "Method", "to", "set", "a", "MySQL", "connection" ]
1cae20c40874bfb51581b7c5c1481707e942b5d0
https://github.com/LeKono/pyhgnc/blob/1cae20c40874bfb51581b7c5c1481707e942b5d0/src/pyhgnc/manager/database.py#L467-L489
train
kevinconway/venvctrl
venvctrl/venv/relocate.py
RelocateMixin.relocate
def relocate(self, destination): """Configure the virtual environment for another path. Args: destination (str): The target path of the virtual environment. Note: This does not actually move the virtual environment. Is only rewrites the metadata required to support a move. """ for activate in self.bin.activates: activate.vpath = destination for binfile in self.bin.files: if binfile.shebang and ( 'python' in binfile.shebang or 'pypy' in binfile.shebang ): binfile.shebang = '#!{0}'.format( os.path.join(destination, 'bin', 'python') )
python
def relocate(self, destination): """Configure the virtual environment for another path. Args: destination (str): The target path of the virtual environment. Note: This does not actually move the virtual environment. Is only rewrites the metadata required to support a move. """ for activate in self.bin.activates: activate.vpath = destination for binfile in self.bin.files: if binfile.shebang and ( 'python' in binfile.shebang or 'pypy' in binfile.shebang ): binfile.shebang = '#!{0}'.format( os.path.join(destination, 'bin', 'python') )
[ "def", "relocate", "(", "self", ",", "destination", ")", ":", "for", "activate", "in", "self", ".", "bin", ".", "activates", ":", "activate", ".", "vpath", "=", "destination", "for", "binfile", "in", "self", ".", "bin", ".", "files", ":", "if", "binfile", ".", "shebang", "and", "(", "'python'", "in", "binfile", ".", "shebang", "or", "'pypy'", "in", "binfile", ".", "shebang", ")", ":", "binfile", ".", "shebang", "=", "'#!{0}'", ".", "format", "(", "os", ".", "path", ".", "join", "(", "destination", ",", "'bin'", ",", "'python'", ")", ")" ]
Configure the virtual environment for another path. Args: destination (str): The target path of the virtual environment. Note: This does not actually move the virtual environment. Is only rewrites the metadata required to support a move.
[ "Configure", "the", "virtual", "environment", "for", "another", "path", "." ]
36d4e0e4d5ebced6385a6ade1198f4769ff2df41
https://github.com/kevinconway/venvctrl/blob/36d4e0e4d5ebced6385a6ade1198f4769ff2df41/venvctrl/venv/relocate.py#L16-L38
train
kevinconway/venvctrl
venvctrl/venv/relocate.py
RelocateMixin.move
def move(self, destination): """Reconfigure and move the virtual environment to another path. Args: destination (str): The target path of the virtual environment. Note: Unlike `relocate`, this method *will* move the virtual to the given path. """ self.relocate(destination) shutil.move(self.path, destination) self._path = destination
python
def move(self, destination): """Reconfigure and move the virtual environment to another path. Args: destination (str): The target path of the virtual environment. Note: Unlike `relocate`, this method *will* move the virtual to the given path. """ self.relocate(destination) shutil.move(self.path, destination) self._path = destination
[ "def", "move", "(", "self", ",", "destination", ")", ":", "self", ".", "relocate", "(", "destination", ")", "shutil", ".", "move", "(", "self", ".", "path", ",", "destination", ")", "self", ".", "_path", "=", "destination" ]
Reconfigure and move the virtual environment to another path. Args: destination (str): The target path of the virtual environment. Note: Unlike `relocate`, this method *will* move the virtual to the given path.
[ "Reconfigure", "and", "move", "the", "virtual", "environment", "to", "another", "path", "." ]
36d4e0e4d5ebced6385a6ade1198f4769ff2df41
https://github.com/kevinconway/venvctrl/blob/36d4e0e4d5ebced6385a6ade1198f4769ff2df41/venvctrl/venv/relocate.py#L40-L52
train
lowandrew/OLCTools
accessoryFunctions/reportaggregator.py
Aggregate.aggregate
def aggregate(self): """ Aggregate all reports of the same type into a master report """ for report in self.reportset: printtime('Processing {}'.format(report.split('.')[0]), self.start) # Initialise the header for each report - MLST is different, as the header is different for each # MLST scheme. This provides a generic header instead header = '' if report != 'mlst.csv' else 'Strain,Genus,SequenceType,Matches,1,2,3,4,5,6,7\n' # Initialise a string to hold the data for each report data = '' # Open the aggregated report with open(os.path.join(self.reportpath, report), 'w') as aggregate: for sample in self.runmetadata.samples: # Try to open the report for this run try: # with open(os.path.join(sample.general.reportpath, report), 'r') as runreport: # Only get the header from the first file if not header: header = runreport.readline() else: for row in runreport: # The final entry in a report does not have a newline character. Add \n as required if not row.endswith('\n'): row += '\n' # For certain reports, the header row is printed above each strain - ignore multiple # instances of the header if row.split(',')[0] != header.split(',')[0]: # Add the row to the string of data data += row except IOError: pass # Write the strings to the aggregate report file aggregate.write(header) aggregate.write(data)
python
def aggregate(self): """ Aggregate all reports of the same type into a master report """ for report in self.reportset: printtime('Processing {}'.format(report.split('.')[0]), self.start) # Initialise the header for each report - MLST is different, as the header is different for each # MLST scheme. This provides a generic header instead header = '' if report != 'mlst.csv' else 'Strain,Genus,SequenceType,Matches,1,2,3,4,5,6,7\n' # Initialise a string to hold the data for each report data = '' # Open the aggregated report with open(os.path.join(self.reportpath, report), 'w') as aggregate: for sample in self.runmetadata.samples: # Try to open the report for this run try: # with open(os.path.join(sample.general.reportpath, report), 'r') as runreport: # Only get the header from the first file if not header: header = runreport.readline() else: for row in runreport: # The final entry in a report does not have a newline character. Add \n as required if not row.endswith('\n'): row += '\n' # For certain reports, the header row is printed above each strain - ignore multiple # instances of the header if row.split(',')[0] != header.split(',')[0]: # Add the row to the string of data data += row except IOError: pass # Write the strings to the aggregate report file aggregate.write(header) aggregate.write(data)
[ "def", "aggregate", "(", "self", ")", ":", "for", "report", "in", "self", ".", "reportset", ":", "printtime", "(", "'Processing {}'", ".", "format", "(", "report", ".", "split", "(", "'.'", ")", "[", "0", "]", ")", ",", "self", ".", "start", ")", "# Initialise the header for each report - MLST is different, as the header is different for each", "# MLST scheme. This provides a generic header instead", "header", "=", "''", "if", "report", "!=", "'mlst.csv'", "else", "'Strain,Genus,SequenceType,Matches,1,2,3,4,5,6,7\\n'", "# Initialise a string to hold the data for each report", "data", "=", "''", "# Open the aggregated report", "with", "open", "(", "os", ".", "path", ".", "join", "(", "self", ".", "reportpath", ",", "report", ")", ",", "'w'", ")", "as", "aggregate", ":", "for", "sample", "in", "self", ".", "runmetadata", ".", "samples", ":", "# Try to open the report for this run", "try", ":", "#", "with", "open", "(", "os", ".", "path", ".", "join", "(", "sample", ".", "general", ".", "reportpath", ",", "report", ")", ",", "'r'", ")", "as", "runreport", ":", "# Only get the header from the first file", "if", "not", "header", ":", "header", "=", "runreport", ".", "readline", "(", ")", "else", ":", "for", "row", "in", "runreport", ":", "# The final entry in a report does not have a newline character. Add \\n as required", "if", "not", "row", ".", "endswith", "(", "'\\n'", ")", ":", "row", "+=", "'\\n'", "# For certain reports, the header row is printed above each strain - ignore multiple", "# instances of the header", "if", "row", ".", "split", "(", "','", ")", "[", "0", "]", "!=", "header", ".", "split", "(", "','", ")", "[", "0", "]", ":", "# Add the row to the string of data", "data", "+=", "row", "except", "IOError", ":", "pass", "# Write the strings to the aggregate report file", "aggregate", ".", "write", "(", "header", ")", "aggregate", ".", "write", "(", "data", ")" ]
Aggregate all reports of the same type into a master report
[ "Aggregate", "all", "reports", "of", "the", "same", "type", "into", "a", "master", "report" ]
88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a
https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/accessoryFunctions/reportaggregator.py#L71-L106
train
etal/biocma
biocma/cma.py
_parse_blocks
def _parse_blocks(instream): """Parse an alignment block from the given file handle. Block looks like: [0_(1)=fa2cma(8){go=10000,gx=2000,pn=1000.0,lf=0,rf=0}: (209)***********************************************... ... sequences, numbered 1-8 ... _0]. """ ilines = sugar.unblank(instream) for line in ilines: if line.startswith('['): # Start of block level, one, name, seqcount, params = _parse_block_header(line) qlen, qchars = _parse_block_postheader(next(ilines)) # Pass control to the sequence parser sequences = list(_parse_sequences(ilines, qlen)) # Validation if not len(sequences) == seqcount: logging.warn("Expected %d sequences in block %s, found %d", seqcount, name, len(sequences)) yield {'level': level, 'one': one, 'name': name, # 'seqcount': seqcount, 'params': params, 'query_length': qlen, 'query_chars': qchars, 'sequences': sequences, }
python
def _parse_blocks(instream): """Parse an alignment block from the given file handle. Block looks like: [0_(1)=fa2cma(8){go=10000,gx=2000,pn=1000.0,lf=0,rf=0}: (209)***********************************************... ... sequences, numbered 1-8 ... _0]. """ ilines = sugar.unblank(instream) for line in ilines: if line.startswith('['): # Start of block level, one, name, seqcount, params = _parse_block_header(line) qlen, qchars = _parse_block_postheader(next(ilines)) # Pass control to the sequence parser sequences = list(_parse_sequences(ilines, qlen)) # Validation if not len(sequences) == seqcount: logging.warn("Expected %d sequences in block %s, found %d", seqcount, name, len(sequences)) yield {'level': level, 'one': one, 'name': name, # 'seqcount': seqcount, 'params': params, 'query_length': qlen, 'query_chars': qchars, 'sequences': sequences, }
[ "def", "_parse_blocks", "(", "instream", ")", ":", "ilines", "=", "sugar", ".", "unblank", "(", "instream", ")", "for", "line", "in", "ilines", ":", "if", "line", ".", "startswith", "(", "'['", ")", ":", "# Start of block", "level", ",", "one", ",", "name", ",", "seqcount", ",", "params", "=", "_parse_block_header", "(", "line", ")", "qlen", ",", "qchars", "=", "_parse_block_postheader", "(", "next", "(", "ilines", ")", ")", "# Pass control to the sequence parser", "sequences", "=", "list", "(", "_parse_sequences", "(", "ilines", ",", "qlen", ")", ")", "# Validation", "if", "not", "len", "(", "sequences", ")", "==", "seqcount", ":", "logging", ".", "warn", "(", "\"Expected %d sequences in block %s, found %d\"", ",", "seqcount", ",", "name", ",", "len", "(", "sequences", ")", ")", "yield", "{", "'level'", ":", "level", ",", "'one'", ":", "one", ",", "'name'", ":", "name", ",", "# 'seqcount': seqcount,", "'params'", ":", "params", ",", "'query_length'", ":", "qlen", ",", "'query_chars'", ":", "qchars", ",", "'sequences'", ":", "sequences", ",", "}" ]
Parse an alignment block from the given file handle. Block looks like: [0_(1)=fa2cma(8){go=10000,gx=2000,pn=1000.0,lf=0,rf=0}: (209)***********************************************... ... sequences, numbered 1-8 ... _0].
[ "Parse", "an", "alignment", "block", "from", "the", "given", "file", "handle", "." ]
eac0c57eb83a9498e53ccdeb9cbc3fe21a5826a7
https://github.com/etal/biocma/blob/eac0c57eb83a9498e53ccdeb9cbc3fe21a5826a7/biocma/cma.py#L43-L75
train
etal/biocma
biocma/cma.py
_parse_sequences
def _parse_sequences(ilines, expect_qlen): """Parse the sequences in the current block. Sequence looks like: $3=227(209): >gi|15606894|ref|NP_214275.1| {|2(244)|<Aquificae(B)>}DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|2984127|gb|AAC07663.1| DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|75 {()YVPFARKYRPKFFREVIGQEAPVRILKNAIKNDRVAHaYLFAGPRGVGKTTIARILAKALNcknpskgepcgecencreiDRGVFPDLIEMDAASNRGIDDVRA-LKEAVNYKPIKG-KYKVYIIDEAHMLTKEAFNALLKTLEEPPPRTVFVLCTTEYDKILPTILSRCQRIIFSKVRKEKVIEYLKKICEKEGIECEEGALEVLAHASEGCMRDAASLLDQASVYGE()}* """ while True: first = next(ilines) if first.startswith('_') and first.endswith('].'): # End of sequences & end of block break # ENH: handle wrapped lines? try: index, this_len, query_len = _parse_seq_preheader(first) except ValueError: logging.warn('Unparseable line (SKIPPING):\n%s', first) continue (rec_id, dbxrefs, headlen, taillen, phylum, taxchar, description ) = _parse_seq_header(next(ilines)) try: headseq, molseq, tailseq = _parse_seq_body(next(ilines)) except ValueError: logging.warn('Unparseable sequence: %s -- SKIPPING', rec_id) continue # Validation if expect_qlen != query_len: logging.warn("Query length in %s given as %d; expected %d", rec_id, query_len, expect_qlen) if not headseq and not headlen: headlen = 0 if not tailseq and not taillen: taillen = 0 if headseq: if headlen is None: headlen = len(headseq) elif headlen != len(headseq): logging.warn("Conflicting head flank lengths in %s: %d, %d", rec_id, headlen, len(headseq)) if tailseq: if taillen is None: taillen = len(tailseq) elif taillen != len(tailseq): logging.warn("Conflicting tail flank lengths in %s: %d, %d", rec_id, taillen, len(tailseq)) yield {'index': index, 'id': rec_id, 'description': description, 'dbxrefs': dbxrefs, 'phylum': phylum, 'taxchar': taxchar, 'head_len': headlen, 'tail_len': taillen, 'head_seq': headseq, 'tail_seq': tailseq, 'length': this_len, 'seq': molseq, }
python
def _parse_sequences(ilines, expect_qlen): """Parse the sequences in the current block. Sequence looks like: $3=227(209): >gi|15606894|ref|NP_214275.1| {|2(244)|<Aquificae(B)>}DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|2984127|gb|AAC07663.1| DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|75 {()YVPFARKYRPKFFREVIGQEAPVRILKNAIKNDRVAHaYLFAGPRGVGKTTIARILAKALNcknpskgepcgecencreiDRGVFPDLIEMDAASNRGIDDVRA-LKEAVNYKPIKG-KYKVYIIDEAHMLTKEAFNALLKTLEEPPPRTVFVLCTTEYDKILPTILSRCQRIIFSKVRKEKVIEYLKKICEKEGIECEEGALEVLAHASEGCMRDAASLLDQASVYGE()}* """ while True: first = next(ilines) if first.startswith('_') and first.endswith('].'): # End of sequences & end of block break # ENH: handle wrapped lines? try: index, this_len, query_len = _parse_seq_preheader(first) except ValueError: logging.warn('Unparseable line (SKIPPING):\n%s', first) continue (rec_id, dbxrefs, headlen, taillen, phylum, taxchar, description ) = _parse_seq_header(next(ilines)) try: headseq, molseq, tailseq = _parse_seq_body(next(ilines)) except ValueError: logging.warn('Unparseable sequence: %s -- SKIPPING', rec_id) continue # Validation if expect_qlen != query_len: logging.warn("Query length in %s given as %d; expected %d", rec_id, query_len, expect_qlen) if not headseq and not headlen: headlen = 0 if not tailseq and not taillen: taillen = 0 if headseq: if headlen is None: headlen = len(headseq) elif headlen != len(headseq): logging.warn("Conflicting head flank lengths in %s: %d, %d", rec_id, headlen, len(headseq)) if tailseq: if taillen is None: taillen = len(tailseq) elif taillen != len(tailseq): logging.warn("Conflicting tail flank lengths in %s: %d, %d", rec_id, taillen, len(tailseq)) yield {'index': index, 'id': rec_id, 'description': description, 'dbxrefs': dbxrefs, 'phylum': phylum, 'taxchar': taxchar, 'head_len': headlen, 'tail_len': taillen, 'head_seq': headseq, 'tail_seq': tailseq, 'length': this_len, 'seq': molseq, }
[ "def", "_parse_sequences", "(", "ilines", ",", "expect_qlen", ")", ":", "while", "True", ":", "first", "=", "next", "(", "ilines", ")", "if", "first", ".", "startswith", "(", "'_'", ")", "and", "first", ".", "endswith", "(", "'].'", ")", ":", "# End of sequences & end of block", "break", "# ENH: handle wrapped lines?", "try", ":", "index", ",", "this_len", ",", "query_len", "=", "_parse_seq_preheader", "(", "first", ")", "except", "ValueError", ":", "logging", ".", "warn", "(", "'Unparseable line (SKIPPING):\\n%s'", ",", "first", ")", "continue", "(", "rec_id", ",", "dbxrefs", ",", "headlen", ",", "taillen", ",", "phylum", ",", "taxchar", ",", "description", ")", "=", "_parse_seq_header", "(", "next", "(", "ilines", ")", ")", "try", ":", "headseq", ",", "molseq", ",", "tailseq", "=", "_parse_seq_body", "(", "next", "(", "ilines", ")", ")", "except", "ValueError", ":", "logging", ".", "warn", "(", "'Unparseable sequence: %s -- SKIPPING'", ",", "rec_id", ")", "continue", "# Validation", "if", "expect_qlen", "!=", "query_len", ":", "logging", ".", "warn", "(", "\"Query length in %s given as %d; expected %d\"", ",", "rec_id", ",", "query_len", ",", "expect_qlen", ")", "if", "not", "headseq", "and", "not", "headlen", ":", "headlen", "=", "0", "if", "not", "tailseq", "and", "not", "taillen", ":", "taillen", "=", "0", "if", "headseq", ":", "if", "headlen", "is", "None", ":", "headlen", "=", "len", "(", "headseq", ")", "elif", "headlen", "!=", "len", "(", "headseq", ")", ":", "logging", ".", "warn", "(", "\"Conflicting head flank lengths in %s: %d, %d\"", ",", "rec_id", ",", "headlen", ",", "len", "(", "headseq", ")", ")", "if", "tailseq", ":", "if", "taillen", "is", "None", ":", "taillen", "=", "len", "(", "tailseq", ")", "elif", "taillen", "!=", "len", "(", "tailseq", ")", ":", "logging", ".", "warn", "(", "\"Conflicting tail flank lengths in %s: %d, %d\"", ",", "rec_id", ",", "taillen", ",", "len", "(", "tailseq", ")", ")", "yield", "{", "'index'", ":", "index", ",", "'id'", ":", "rec_id", ",", "'description'", ":", "description", ",", "'dbxrefs'", ":", "dbxrefs", ",", "'phylum'", ":", "phylum", ",", "'taxchar'", ":", "taxchar", ",", "'head_len'", ":", "headlen", ",", "'tail_len'", ":", "taillen", ",", "'head_seq'", ":", "headseq", ",", "'tail_seq'", ":", "tailseq", ",", "'length'", ":", "this_len", ",", "'seq'", ":", "molseq", ",", "}" ]
Parse the sequences in the current block. Sequence looks like: $3=227(209): >gi|15606894|ref|NP_214275.1| {|2(244)|<Aquificae(B)>}DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|2984127|gb|AAC07663.1| DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|75 {()YVPFARKYRPKFFREVIGQEAPVRILKNAIKNDRVAHaYLFAGPRGVGKTTIARILAKALNcknpskgepcgecencreiDRGVFPDLIEMDAASNRGIDDVRA-LKEAVNYKPIKG-KYKVYIIDEAHMLTKEAFNALLKTLEEPPPRTVFVLCTTEYDKILPTILSRCQRIIFSKVRKEKVIEYLKKICEKEGIECEEGALEVLAHASEGCMRDAASLLDQASVYGE()}*
[ "Parse", "the", "sequences", "in", "the", "current", "block", "." ]
eac0c57eb83a9498e53ccdeb9cbc3fe21a5826a7
https://github.com/etal/biocma/blob/eac0c57eb83a9498e53ccdeb9cbc3fe21a5826a7/biocma/cma.py#L78-L141
train
etal/biocma
biocma/cma.py
realign_seqs
def realign_seqs(block, gap_char='.', align_indels=False): """Add gaps to a block so all residues in a column are equivalent. Given a block, containing a list of "sequences" (dicts) each containing a "seq" (actual string sequence, where upper=match, lower=insert, dash=gap), insert gaps (- or .) into the sequences s.t. 1. columns line up properly, and 2. all resulting sequences have the same length The reason this needs to be done is that the query/consensus sequence is not assigned gaps to account for inserts in the other sequences. We need to add the gaps back to obtain a normal alignment. `return`: a list of realigned sequence strings. """ # ENH: align inserts using an external tool (if align_indels) all_chars = [list(sq['seq']) for sq in block['sequences']] # NB: If speed is an issue here, consider Numpy or Cython # main problem: list.insert is O(n) -- would OrderedDict help? nrows = len(all_chars) i = 0 while i < len(all_chars[0]): rows_need_gaps = [r for r in all_chars if not r[i].islower()] if len(rows_need_gaps) != nrows: for row in rows_need_gaps: row.insert(i, gap_char) i += 1 return [''.join(row) for row in all_chars]
python
def realign_seqs(block, gap_char='.', align_indels=False): """Add gaps to a block so all residues in a column are equivalent. Given a block, containing a list of "sequences" (dicts) each containing a "seq" (actual string sequence, where upper=match, lower=insert, dash=gap), insert gaps (- or .) into the sequences s.t. 1. columns line up properly, and 2. all resulting sequences have the same length The reason this needs to be done is that the query/consensus sequence is not assigned gaps to account for inserts in the other sequences. We need to add the gaps back to obtain a normal alignment. `return`: a list of realigned sequence strings. """ # ENH: align inserts using an external tool (if align_indels) all_chars = [list(sq['seq']) for sq in block['sequences']] # NB: If speed is an issue here, consider Numpy or Cython # main problem: list.insert is O(n) -- would OrderedDict help? nrows = len(all_chars) i = 0 while i < len(all_chars[0]): rows_need_gaps = [r for r in all_chars if not r[i].islower()] if len(rows_need_gaps) != nrows: for row in rows_need_gaps: row.insert(i, gap_char) i += 1 return [''.join(row) for row in all_chars]
[ "def", "realign_seqs", "(", "block", ",", "gap_char", "=", "'.'", ",", "align_indels", "=", "False", ")", ":", "# ENH: align inserts using an external tool (if align_indels)", "all_chars", "=", "[", "list", "(", "sq", "[", "'seq'", "]", ")", "for", "sq", "in", "block", "[", "'sequences'", "]", "]", "# NB: If speed is an issue here, consider Numpy or Cython", "# main problem: list.insert is O(n) -- would OrderedDict help?", "nrows", "=", "len", "(", "all_chars", ")", "i", "=", "0", "while", "i", "<", "len", "(", "all_chars", "[", "0", "]", ")", ":", "rows_need_gaps", "=", "[", "r", "for", "r", "in", "all_chars", "if", "not", "r", "[", "i", "]", ".", "islower", "(", ")", "]", "if", "len", "(", "rows_need_gaps", ")", "!=", "nrows", ":", "for", "row", "in", "rows_need_gaps", ":", "row", ".", "insert", "(", "i", ",", "gap_char", ")", "i", "+=", "1", "return", "[", "''", ".", "join", "(", "row", ")", "for", "row", "in", "all_chars", "]" ]
Add gaps to a block so all residues in a column are equivalent. Given a block, containing a list of "sequences" (dicts) each containing a "seq" (actual string sequence, where upper=match, lower=insert, dash=gap), insert gaps (- or .) into the sequences s.t. 1. columns line up properly, and 2. all resulting sequences have the same length The reason this needs to be done is that the query/consensus sequence is not assigned gaps to account for inserts in the other sequences. We need to add the gaps back to obtain a normal alignment. `return`: a list of realigned sequence strings.
[ "Add", "gaps", "to", "a", "block", "so", "all", "residues", "in", "a", "column", "are", "equivalent", "." ]
eac0c57eb83a9498e53ccdeb9cbc3fe21a5826a7
https://github.com/etal/biocma/blob/eac0c57eb83a9498e53ccdeb9cbc3fe21a5826a7/biocma/cma.py#L342-L371
train
etal/biocma
biocma/cma.py
collapse_to_consensus
def collapse_to_consensus(seqrecords, strict=False, do_iron=True): """Opposite of realign_seqs. Input sequences should all be the same length. The first record must be the consensus. """ level = 0 name = seqrecords[0].id # If this is a CMA alignment, extract additional info: if hasattr(seqrecords, '_records'): if hasattr(seqrecords, 'level'): level = seqrecords.level if hasattr(seqrecords, 'name'): name = seqrecords.name seqrecords = seqrecords._records consensus = seqrecords.pop(0) cons_length = len(consensus) for i, s in enumerate(seqrecords): if len(s) != cons_length: raise ValueError( "Sequence #%d has length %d, consensus is %d" % (i+2, len(s), cons_length)) if '.' in str(consensus.seq): # Strict -- error if there's a '-' if '-' in str(consensus.seq): if strict: raise ValueError("Consensus contains '-' gap characters") logging.warn("Consensus sequence contains both '.' and '-' gap " "characters -- is it really the consensus?") aligned_cols = [(c not in '.-') for c in str(consensus.seq)] else: aligned_cols = [c != '.' for c in str(consensus.seq)] else: # A little more ambiguous... aligned_cols = [c != '-' for c in str(consensus.seq)] consensus.seq = replace_asterisks(consensus.seq, 'consensus') # Start a block with the consensus sequence block = consensus2block(consensus, level=level, name=name) qlen = block['query_length'] # Collapse & add remaining sequences to the block for index, rec in zip(xrange(2, len(seqrecords)+2), seqrecords): # Collapse rec.seq down to aligned size new_mol_seq = [] is_beginning = True for aligned_col, char in zip(aligned_cols, replace_asterisks(rec.seq, index)): if aligned_col: is_beginning = False if char in '-.': # deletion new_mol_seq.append('-') else: # aligned character new_mol_seq.append(char.upper()) else: # it's an insert or nothing # (also, skip any left-side inserts) if char not in '-.' and not is_beginning: new_mol_seq.append(char.lower()) rec.seq = ''.join(new_mol_seq) if do_iron: rec.seq = iron(rec.seq) block['sequences'].append(seqrecord2sequence(rec, qlen, index)) return block
python
def collapse_to_consensus(seqrecords, strict=False, do_iron=True): """Opposite of realign_seqs. Input sequences should all be the same length. The first record must be the consensus. """ level = 0 name = seqrecords[0].id # If this is a CMA alignment, extract additional info: if hasattr(seqrecords, '_records'): if hasattr(seqrecords, 'level'): level = seqrecords.level if hasattr(seqrecords, 'name'): name = seqrecords.name seqrecords = seqrecords._records consensus = seqrecords.pop(0) cons_length = len(consensus) for i, s in enumerate(seqrecords): if len(s) != cons_length: raise ValueError( "Sequence #%d has length %d, consensus is %d" % (i+2, len(s), cons_length)) if '.' in str(consensus.seq): # Strict -- error if there's a '-' if '-' in str(consensus.seq): if strict: raise ValueError("Consensus contains '-' gap characters") logging.warn("Consensus sequence contains both '.' and '-' gap " "characters -- is it really the consensus?") aligned_cols = [(c not in '.-') for c in str(consensus.seq)] else: aligned_cols = [c != '.' for c in str(consensus.seq)] else: # A little more ambiguous... aligned_cols = [c != '-' for c in str(consensus.seq)] consensus.seq = replace_asterisks(consensus.seq, 'consensus') # Start a block with the consensus sequence block = consensus2block(consensus, level=level, name=name) qlen = block['query_length'] # Collapse & add remaining sequences to the block for index, rec in zip(xrange(2, len(seqrecords)+2), seqrecords): # Collapse rec.seq down to aligned size new_mol_seq = [] is_beginning = True for aligned_col, char in zip(aligned_cols, replace_asterisks(rec.seq, index)): if aligned_col: is_beginning = False if char in '-.': # deletion new_mol_seq.append('-') else: # aligned character new_mol_seq.append(char.upper()) else: # it's an insert or nothing # (also, skip any left-side inserts) if char not in '-.' and not is_beginning: new_mol_seq.append(char.lower()) rec.seq = ''.join(new_mol_seq) if do_iron: rec.seq = iron(rec.seq) block['sequences'].append(seqrecord2sequence(rec, qlen, index)) return block
[ "def", "collapse_to_consensus", "(", "seqrecords", ",", "strict", "=", "False", ",", "do_iron", "=", "True", ")", ":", "level", "=", "0", "name", "=", "seqrecords", "[", "0", "]", ".", "id", "# If this is a CMA alignment, extract additional info:", "if", "hasattr", "(", "seqrecords", ",", "'_records'", ")", ":", "if", "hasattr", "(", "seqrecords", ",", "'level'", ")", ":", "level", "=", "seqrecords", ".", "level", "if", "hasattr", "(", "seqrecords", ",", "'name'", ")", ":", "name", "=", "seqrecords", ".", "name", "seqrecords", "=", "seqrecords", ".", "_records", "consensus", "=", "seqrecords", ".", "pop", "(", "0", ")", "cons_length", "=", "len", "(", "consensus", ")", "for", "i", ",", "s", "in", "enumerate", "(", "seqrecords", ")", ":", "if", "len", "(", "s", ")", "!=", "cons_length", ":", "raise", "ValueError", "(", "\"Sequence #%d has length %d, consensus is %d\"", "%", "(", "i", "+", "2", ",", "len", "(", "s", ")", ",", "cons_length", ")", ")", "if", "'.'", "in", "str", "(", "consensus", ".", "seq", ")", ":", "# Strict -- error if there's a '-'", "if", "'-'", "in", "str", "(", "consensus", ".", "seq", ")", ":", "if", "strict", ":", "raise", "ValueError", "(", "\"Consensus contains '-' gap characters\"", ")", "logging", ".", "warn", "(", "\"Consensus sequence contains both '.' and '-' gap \"", "\"characters -- is it really the consensus?\"", ")", "aligned_cols", "=", "[", "(", "c", "not", "in", "'.-'", ")", "for", "c", "in", "str", "(", "consensus", ".", "seq", ")", "]", "else", ":", "aligned_cols", "=", "[", "c", "!=", "'.'", "for", "c", "in", "str", "(", "consensus", ".", "seq", ")", "]", "else", ":", "# A little more ambiguous...", "aligned_cols", "=", "[", "c", "!=", "'-'", "for", "c", "in", "str", "(", "consensus", ".", "seq", ")", "]", "consensus", ".", "seq", "=", "replace_asterisks", "(", "consensus", ".", "seq", ",", "'consensus'", ")", "# Start a block with the consensus sequence", "block", "=", "consensus2block", "(", "consensus", ",", "level", "=", "level", ",", "name", "=", "name", ")", "qlen", "=", "block", "[", "'query_length'", "]", "# Collapse & add remaining sequences to the block", "for", "index", ",", "rec", "in", "zip", "(", "xrange", "(", "2", ",", "len", "(", "seqrecords", ")", "+", "2", ")", ",", "seqrecords", ")", ":", "# Collapse rec.seq down to aligned size", "new_mol_seq", "=", "[", "]", "is_beginning", "=", "True", "for", "aligned_col", ",", "char", "in", "zip", "(", "aligned_cols", ",", "replace_asterisks", "(", "rec", ".", "seq", ",", "index", ")", ")", ":", "if", "aligned_col", ":", "is_beginning", "=", "False", "if", "char", "in", "'-.'", ":", "# deletion", "new_mol_seq", ".", "append", "(", "'-'", ")", "else", ":", "# aligned character", "new_mol_seq", ".", "append", "(", "char", ".", "upper", "(", ")", ")", "else", ":", "# it's an insert or nothing", "# (also, skip any left-side inserts)", "if", "char", "not", "in", "'-.'", "and", "not", "is_beginning", ":", "new_mol_seq", ".", "append", "(", "char", ".", "lower", "(", ")", ")", "rec", ".", "seq", "=", "''", ".", "join", "(", "new_mol_seq", ")", "if", "do_iron", ":", "rec", ".", "seq", "=", "iron", "(", "rec", ".", "seq", ")", "block", "[", "'sequences'", "]", ".", "append", "(", "seqrecord2sequence", "(", "rec", ",", "qlen", ",", "index", ")", ")", "return", "block" ]
Opposite of realign_seqs. Input sequences should all be the same length. The first record must be the consensus.
[ "Opposite", "of", "realign_seqs", "." ]
eac0c57eb83a9498e53ccdeb9cbc3fe21a5826a7
https://github.com/etal/biocma/blob/eac0c57eb83a9498e53ccdeb9cbc3fe21a5826a7/biocma/cma.py#L426-L495
train
etal/biocma
biocma/cma.py
iron
def iron(sequence): """'Iron out' indel regions in the aligned sequence. Any inserts next to deletions are converted to matches (uppercase). Given a CMA string like: AAAAbc--de-f--gAAA Result: AAAABCDEFgAAA """ r_indel = re.compile(r'(-[a-y]|[a-y]-)') orig_sequence = sequence while r_indel.search(sequence): in_insert = False in_gap = False seen_gaps = 0 inserts = [] outchars = [] for char in sequence: if in_insert: if char.islower(): # Extend the insert inserts.append(char) elif char.isupper(): # Indel is over; 'iron' out & emit inserts, then gaps in_insert = False outchars.extend(inserts) inserts = [] outchars.append('-' * seen_gaps) seen_gaps = 0 outchars.append(char) else: # Convert a preceding indel char to a 'match' (uppercase) # If the indel and gap are both multiple chars, this will # capitalize the insert left-to-right, then leave any gap # remainer as-is. assert char == '-' if not inserts: in_insert = False in_gap = True seen_gaps += 1 else: outchars.append(inserts.pop(0).upper()) # NB: Only leave the insert region if we've finished # converting all the insert chars if not inserts: in_insert = False in_gap = True elif in_gap: if char.islower(): in_insert = True in_gap = False # If some inserts previously seen, emit them now # If no inserts have been seen yet, we'll iron this indel if inserts: outchars.extend(inserts) outchars.append('-' * seen_gaps) seen_gaps = 0 inserts = [char] elif char.isupper(): in_gap = False # End of the gap -- emit if inserts: outchars.extend(inserts) inserts = [] outchars.append('-' * seen_gaps) seen_gaps = 0 outchars.append(char) else: # Extend the gap assert char == '-' seen_gaps += 1 else: assert not inserts and not seen_gaps, ( "Inserts: %s, gaps: %s, seq: %s, in_ins=%s, in_gap=%s" % (inserts, seen_gaps, sequence, in_insert, in_gap)) # Coming from Match state if char.isupper(): # Extend the match outchars.append(char) elif char.islower(): inserts.append(char) in_insert = True else: assert char == '-' seen_gaps += 1 in_gap = True # Emit any trailing indel if inserts: outchars.extend(inserts) if seen_gaps: outchars.append('-' * seen_gaps) sequence = ''.join(outchars) # logging.info(sequence) assert (sequence.replace('-', '').upper() == orig_sequence.replace('-', '').upper()), \ '\nOrig: ' + orig_sequence + \ '\nIron: ' + sequence return sequence
python
def iron(sequence): """'Iron out' indel regions in the aligned sequence. Any inserts next to deletions are converted to matches (uppercase). Given a CMA string like: AAAAbc--de-f--gAAA Result: AAAABCDEFgAAA """ r_indel = re.compile(r'(-[a-y]|[a-y]-)') orig_sequence = sequence while r_indel.search(sequence): in_insert = False in_gap = False seen_gaps = 0 inserts = [] outchars = [] for char in sequence: if in_insert: if char.islower(): # Extend the insert inserts.append(char) elif char.isupper(): # Indel is over; 'iron' out & emit inserts, then gaps in_insert = False outchars.extend(inserts) inserts = [] outchars.append('-' * seen_gaps) seen_gaps = 0 outchars.append(char) else: # Convert a preceding indel char to a 'match' (uppercase) # If the indel and gap are both multiple chars, this will # capitalize the insert left-to-right, then leave any gap # remainer as-is. assert char == '-' if not inserts: in_insert = False in_gap = True seen_gaps += 1 else: outchars.append(inserts.pop(0).upper()) # NB: Only leave the insert region if we've finished # converting all the insert chars if not inserts: in_insert = False in_gap = True elif in_gap: if char.islower(): in_insert = True in_gap = False # If some inserts previously seen, emit them now # If no inserts have been seen yet, we'll iron this indel if inserts: outchars.extend(inserts) outchars.append('-' * seen_gaps) seen_gaps = 0 inserts = [char] elif char.isupper(): in_gap = False # End of the gap -- emit if inserts: outchars.extend(inserts) inserts = [] outchars.append('-' * seen_gaps) seen_gaps = 0 outchars.append(char) else: # Extend the gap assert char == '-' seen_gaps += 1 else: assert not inserts and not seen_gaps, ( "Inserts: %s, gaps: %s, seq: %s, in_ins=%s, in_gap=%s" % (inserts, seen_gaps, sequence, in_insert, in_gap)) # Coming from Match state if char.isupper(): # Extend the match outchars.append(char) elif char.islower(): inserts.append(char) in_insert = True else: assert char == '-' seen_gaps += 1 in_gap = True # Emit any trailing indel if inserts: outchars.extend(inserts) if seen_gaps: outchars.append('-' * seen_gaps) sequence = ''.join(outchars) # logging.info(sequence) assert (sequence.replace('-', '').upper() == orig_sequence.replace('-', '').upper()), \ '\nOrig: ' + orig_sequence + \ '\nIron: ' + sequence return sequence
[ "def", "iron", "(", "sequence", ")", ":", "r_indel", "=", "re", ".", "compile", "(", "r'(-[a-y]|[a-y]-)'", ")", "orig_sequence", "=", "sequence", "while", "r_indel", ".", "search", "(", "sequence", ")", ":", "in_insert", "=", "False", "in_gap", "=", "False", "seen_gaps", "=", "0", "inserts", "=", "[", "]", "outchars", "=", "[", "]", "for", "char", "in", "sequence", ":", "if", "in_insert", ":", "if", "char", ".", "islower", "(", ")", ":", "# Extend the insert", "inserts", ".", "append", "(", "char", ")", "elif", "char", ".", "isupper", "(", ")", ":", "# Indel is over; 'iron' out & emit inserts, then gaps", "in_insert", "=", "False", "outchars", ".", "extend", "(", "inserts", ")", "inserts", "=", "[", "]", "outchars", ".", "append", "(", "'-'", "*", "seen_gaps", ")", "seen_gaps", "=", "0", "outchars", ".", "append", "(", "char", ")", "else", ":", "# Convert a preceding indel char to a 'match' (uppercase)", "# If the indel and gap are both multiple chars, this will", "# capitalize the insert left-to-right, then leave any gap", "# remainer as-is.", "assert", "char", "==", "'-'", "if", "not", "inserts", ":", "in_insert", "=", "False", "in_gap", "=", "True", "seen_gaps", "+=", "1", "else", ":", "outchars", ".", "append", "(", "inserts", ".", "pop", "(", "0", ")", ".", "upper", "(", ")", ")", "# NB: Only leave the insert region if we've finished", "# converting all the insert chars", "if", "not", "inserts", ":", "in_insert", "=", "False", "in_gap", "=", "True", "elif", "in_gap", ":", "if", "char", ".", "islower", "(", ")", ":", "in_insert", "=", "True", "in_gap", "=", "False", "# If some inserts previously seen, emit them now", "# If no inserts have been seen yet, we'll iron this indel", "if", "inserts", ":", "outchars", ".", "extend", "(", "inserts", ")", "outchars", ".", "append", "(", "'-'", "*", "seen_gaps", ")", "seen_gaps", "=", "0", "inserts", "=", "[", "char", "]", "elif", "char", ".", "isupper", "(", ")", ":", "in_gap", "=", "False", "# End of the gap -- emit", "if", "inserts", ":", "outchars", ".", "extend", "(", "inserts", ")", "inserts", "=", "[", "]", "outchars", ".", "append", "(", "'-'", "*", "seen_gaps", ")", "seen_gaps", "=", "0", "outchars", ".", "append", "(", "char", ")", "else", ":", "# Extend the gap", "assert", "char", "==", "'-'", "seen_gaps", "+=", "1", "else", ":", "assert", "not", "inserts", "and", "not", "seen_gaps", ",", "(", "\"Inserts: %s, gaps: %s, seq: %s, in_ins=%s, in_gap=%s\"", "%", "(", "inserts", ",", "seen_gaps", ",", "sequence", ",", "in_insert", ",", "in_gap", ")", ")", "# Coming from Match state", "if", "char", ".", "isupper", "(", ")", ":", "# Extend the match", "outchars", ".", "append", "(", "char", ")", "elif", "char", ".", "islower", "(", ")", ":", "inserts", ".", "append", "(", "char", ")", "in_insert", "=", "True", "else", ":", "assert", "char", "==", "'-'", "seen_gaps", "+=", "1", "in_gap", "=", "True", "# Emit any trailing indel", "if", "inserts", ":", "outchars", ".", "extend", "(", "inserts", ")", "if", "seen_gaps", ":", "outchars", ".", "append", "(", "'-'", "*", "seen_gaps", ")", "sequence", "=", "''", ".", "join", "(", "outchars", ")", "# logging.info(sequence)", "assert", "(", "sequence", ".", "replace", "(", "'-'", ",", "''", ")", ".", "upper", "(", ")", "==", "orig_sequence", ".", "replace", "(", "'-'", ",", "''", ")", ".", "upper", "(", ")", ")", ",", "'\\nOrig: '", "+", "orig_sequence", "+", "'\\nIron: '", "+", "sequence", "return", "sequence" ]
Iron out' indel regions in the aligned sequence. Any inserts next to deletions are converted to matches (uppercase). Given a CMA string like: AAAAbc--de-f--gAAA Result: AAAABCDEFgAAA
[ "Iron", "out", "indel", "regions", "in", "the", "aligned", "sequence", "." ]
eac0c57eb83a9498e53ccdeb9cbc3fe21a5826a7
https://github.com/etal/biocma/blob/eac0c57eb83a9498e53ccdeb9cbc3fe21a5826a7/biocma/cma.py#L498-L601
train
jreinhardt/handkerchief
handkerchief/handkerchief.py
get_github_content
def get_github_content(repo,path,auth=None): """ Retrieve text files from a github repo """ request = requests.get(file_url.format(repo=repo, path=path), auth=auth) if not request.ok: print("There is a problem with the request") print(file_url.format(repo=repo, path=path)) print(request.json()) exit(1) if not request.json()['encoding'] == 'base64': raise RuntimeError("Unknown Encoding encountered when fetching {} from repo {}: {}".format(path,repo,request.json()['encoding'])) return request.json()['content'].decode('base64').decode('utf8')
python
def get_github_content(repo,path,auth=None): """ Retrieve text files from a github repo """ request = requests.get(file_url.format(repo=repo, path=path), auth=auth) if not request.ok: print("There is a problem with the request") print(file_url.format(repo=repo, path=path)) print(request.json()) exit(1) if not request.json()['encoding'] == 'base64': raise RuntimeError("Unknown Encoding encountered when fetching {} from repo {}: {}".format(path,repo,request.json()['encoding'])) return request.json()['content'].decode('base64').decode('utf8')
[ "def", "get_github_content", "(", "repo", ",", "path", ",", "auth", "=", "None", ")", ":", "request", "=", "requests", ".", "get", "(", "file_url", ".", "format", "(", "repo", "=", "repo", ",", "path", "=", "path", ")", ",", "auth", "=", "auth", ")", "if", "not", "request", ".", "ok", ":", "print", "(", "\"There is a problem with the request\"", ")", "print", "(", "file_url", ".", "format", "(", "repo", "=", "repo", ",", "path", "=", "path", ")", ")", "print", "(", "request", ".", "json", "(", ")", ")", "exit", "(", "1", ")", "if", "not", "request", ".", "json", "(", ")", "[", "'encoding'", "]", "==", "'base64'", ":", "raise", "RuntimeError", "(", "\"Unknown Encoding encountered when fetching {} from repo {}: {}\"", ".", "format", "(", "path", ",", "repo", ",", "request", ".", "json", "(", ")", "[", "'encoding'", "]", ")", ")", "return", "request", ".", "json", "(", ")", "[", "'content'", "]", ".", "decode", "(", "'base64'", ")", ".", "decode", "(", "'utf8'", ")" ]
Retrieve text files from a github repo
[ "Retrieve", "text", "files", "from", "a", "github", "repo" ]
450291314ccbbf557b41a30ce9c523587758fe76
https://github.com/jreinhardt/handkerchief/blob/450291314ccbbf557b41a30ce9c523587758fe76/handkerchief/handkerchief.py#L68-L80
train
jreinhardt/handkerchief
handkerchief/handkerchief.py
collect_reponames
def collect_reponames(): """ Try to figure out a list of repos to consider by default from the contents of the working directory. """ reponames = [] #try to figure out the repo from git repo in current directory try: with open(os.devnull) as devnull: remote_data = subprocess.check_output(["git","remote","-v","show"],stderr=devnull) branches = {} for line in remote_data.decode('utf-8').split("\n"): if line.strip() == "": continue remote_match = re_mote.match(line) if not remote_match is None: branches[remote_match.group(1)] = remote_match.group(5) if len(branches) > 0: if "origin" in branches: reponames.append(branches["origin"]) else: reponames.append(branches.values()[0]) except OSError: pass except subprocess.CalledProcessError: pass #scan html files for further repos to consider for fname in glob.iglob("*.html"): fid = open(fname,"r","utf8") #check the second line for the repo marker fid.readline() line = fid.readline() match = re.match(repo_marker_re,line) if not match is None: reponames.append(match.group(1)) reponames = list(set(reponames)) return reponames
python
def collect_reponames(): """ Try to figure out a list of repos to consider by default from the contents of the working directory. """ reponames = [] #try to figure out the repo from git repo in current directory try: with open(os.devnull) as devnull: remote_data = subprocess.check_output(["git","remote","-v","show"],stderr=devnull) branches = {} for line in remote_data.decode('utf-8').split("\n"): if line.strip() == "": continue remote_match = re_mote.match(line) if not remote_match is None: branches[remote_match.group(1)] = remote_match.group(5) if len(branches) > 0: if "origin" in branches: reponames.append(branches["origin"]) else: reponames.append(branches.values()[0]) except OSError: pass except subprocess.CalledProcessError: pass #scan html files for further repos to consider for fname in glob.iglob("*.html"): fid = open(fname,"r","utf8") #check the second line for the repo marker fid.readline() line = fid.readline() match = re.match(repo_marker_re,line) if not match is None: reponames.append(match.group(1)) reponames = list(set(reponames)) return reponames
[ "def", "collect_reponames", "(", ")", ":", "reponames", "=", "[", "]", "#try to figure out the repo from git repo in current directory", "try", ":", "with", "open", "(", "os", ".", "devnull", ")", "as", "devnull", ":", "remote_data", "=", "subprocess", ".", "check_output", "(", "[", "\"git\"", ",", "\"remote\"", ",", "\"-v\"", ",", "\"show\"", "]", ",", "stderr", "=", "devnull", ")", "branches", "=", "{", "}", "for", "line", "in", "remote_data", ".", "decode", "(", "'utf-8'", ")", ".", "split", "(", "\"\\n\"", ")", ":", "if", "line", ".", "strip", "(", ")", "==", "\"\"", ":", "continue", "remote_match", "=", "re_mote", ".", "match", "(", "line", ")", "if", "not", "remote_match", "is", "None", ":", "branches", "[", "remote_match", ".", "group", "(", "1", ")", "]", "=", "remote_match", ".", "group", "(", "5", ")", "if", "len", "(", "branches", ")", ">", "0", ":", "if", "\"origin\"", "in", "branches", ":", "reponames", ".", "append", "(", "branches", "[", "\"origin\"", "]", ")", "else", ":", "reponames", ".", "append", "(", "branches", ".", "values", "(", ")", "[", "0", "]", ")", "except", "OSError", ":", "pass", "except", "subprocess", ".", "CalledProcessError", ":", "pass", "#scan html files for further repos to consider", "for", "fname", "in", "glob", ".", "iglob", "(", "\"*.html\"", ")", ":", "fid", "=", "open", "(", "fname", ",", "\"r\"", ",", "\"utf8\"", ")", "#check the second line for the repo marker", "fid", ".", "readline", "(", ")", "line", "=", "fid", ".", "readline", "(", ")", "match", "=", "re", ".", "match", "(", "repo_marker_re", ",", "line", ")", "if", "not", "match", "is", "None", ":", "reponames", ".", "append", "(", "match", ".", "group", "(", "1", ")", ")", "reponames", "=", "list", "(", "set", "(", "reponames", ")", ")", "return", "reponames" ]
Try to figure out a list of repos to consider by default from the contents of the working directory.
[ "Try", "to", "figure", "out", "a", "list", "of", "repos", "to", "consider", "by", "default", "from", "the", "contents", "of", "the", "working", "directory", "." ]
450291314ccbbf557b41a30ce9c523587758fe76
https://github.com/jreinhardt/handkerchief/blob/450291314ccbbf557b41a30ce9c523587758fe76/handkerchief/handkerchief.py#L248-L286
train
jreinhardt/handkerchief
handkerchief/handkerchief.py
collect_github_config
def collect_github_config(): """ Try load Github configuration such as usernames from the local or global git config """ github_config = {} for field in ["user", "token"]: try: github_config[field] = subprocess.check_output(["git", "config", "github.{}".format(field)]).decode('utf-8').strip() except (OSError, subprocess.CalledProcessError): pass return github_config
python
def collect_github_config(): """ Try load Github configuration such as usernames from the local or global git config """ github_config = {} for field in ["user", "token"]: try: github_config[field] = subprocess.check_output(["git", "config", "github.{}".format(field)]).decode('utf-8').strip() except (OSError, subprocess.CalledProcessError): pass return github_config
[ "def", "collect_github_config", "(", ")", ":", "github_config", "=", "{", "}", "for", "field", "in", "[", "\"user\"", ",", "\"token\"", "]", ":", "try", ":", "github_config", "[", "field", "]", "=", "subprocess", ".", "check_output", "(", "[", "\"git\"", ",", "\"config\"", ",", "\"github.{}\"", ".", "format", "(", "field", ")", "]", ")", ".", "decode", "(", "'utf-8'", ")", ".", "strip", "(", ")", "except", "(", "OSError", ",", "subprocess", ".", "CalledProcessError", ")", ":", "pass", "return", "github_config" ]
Try load Github configuration such as usernames from the local or global git config
[ "Try", "load", "Github", "configuration", "such", "as", "usernames", "from", "the", "local", "or", "global", "git", "config" ]
450291314ccbbf557b41a30ce9c523587758fe76
https://github.com/jreinhardt/handkerchief/blob/450291314ccbbf557b41a30ce9c523587758fe76/handkerchief/handkerchief.py#L289-L299
train
portfors-lab/sparkle
sparkle/gui/calibration_widget.py
CalibrationWidget.setCurveModel
def setCurveModel(self, model): """Sets the stimulus model for the calibration curve test :param model: Stimulus model that has a tone curve configured :type model: :class:`StimulusModel <sparkle.stim.stimulus_model.StimulusModel>` """ self.stimModel = model self.ui.curveWidget.setModel(model)
python
def setCurveModel(self, model): """Sets the stimulus model for the calibration curve test :param model: Stimulus model that has a tone curve configured :type model: :class:`StimulusModel <sparkle.stim.stimulus_model.StimulusModel>` """ self.stimModel = model self.ui.curveWidget.setModel(model)
[ "def", "setCurveModel", "(", "self", ",", "model", ")", ":", "self", ".", "stimModel", "=", "model", "self", ".", "ui", ".", "curveWidget", ".", "setModel", "(", "model", ")" ]
Sets the stimulus model for the calibration curve test :param model: Stimulus model that has a tone curve configured :type model: :class:`StimulusModel <sparkle.stim.stimulus_model.StimulusModel>`
[ "Sets", "the", "stimulus", "model", "for", "the", "calibration", "curve", "test" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/calibration_widget.py#L24-L31
train
portfors-lab/sparkle
sparkle/gui/calibration_widget.py
CalibrationWidget.addOption
def addOption(self, stim): """Adds a stimulus to the list of stims to use for testing calibration :param stim: stimulus to add to drop-down list :type stim: :class:`AbstractStimulusComponent<sparkle.stim.abstract_component.AbstractStimulusComponent>` """ # set the editor widgets for noise and sweep self.ui.calTypeCmbbx.insertItem(0,stim.name) editor = stim.showEditor() # should probably make this less coupled durInput = editor.durationInputWidget() self.durationWidgets.append(durInput) durInput.setEnabled(False) self.ui.caleditorStack.insertWidget(0, editor) self.ui.calTypeCmbbx.setCurrentIndex(0)
python
def addOption(self, stim): """Adds a stimulus to the list of stims to use for testing calibration :param stim: stimulus to add to drop-down list :type stim: :class:`AbstractStimulusComponent<sparkle.stim.abstract_component.AbstractStimulusComponent>` """ # set the editor widgets for noise and sweep self.ui.calTypeCmbbx.insertItem(0,stim.name) editor = stim.showEditor() # should probably make this less coupled durInput = editor.durationInputWidget() self.durationWidgets.append(durInput) durInput.setEnabled(False) self.ui.caleditorStack.insertWidget(0, editor) self.ui.calTypeCmbbx.setCurrentIndex(0)
[ "def", "addOption", "(", "self", ",", "stim", ")", ":", "# set the editor widgets for noise and sweep", "self", ".", "ui", ".", "calTypeCmbbx", ".", "insertItem", "(", "0", ",", "stim", ".", "name", ")", "editor", "=", "stim", ".", "showEditor", "(", ")", "# should probably make this less coupled", "durInput", "=", "editor", ".", "durationInputWidget", "(", ")", "self", ".", "durationWidgets", ".", "append", "(", "durInput", ")", "durInput", ".", "setEnabled", "(", "False", ")", "self", ".", "ui", ".", "caleditorStack", ".", "insertWidget", "(", "0", ",", "editor", ")", "self", ".", "ui", ".", "calTypeCmbbx", ".", "setCurrentIndex", "(", "0", ")" ]
Adds a stimulus to the list of stims to use for testing calibration :param stim: stimulus to add to drop-down list :type stim: :class:`AbstractStimulusComponent<sparkle.stim.abstract_component.AbstractStimulusComponent>`
[ "Adds", "a", "stimulus", "to", "the", "list", "of", "stims", "to", "use", "for", "testing", "calibration" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/calibration_widget.py#L42-L56
train
portfors-lab/sparkle
sparkle/gui/calibration_widget.py
CalibrationWidget.saveToObject
def saveToObject(self): """Saves the current UI setting to the model""" for i in range(self.ui.caleditorStack.count()): try: self.ui.caleditorStack.widget(i).saveToObject() except AttributeError: logger = logging.getLogger('main') logger.debug('index {} does not have method saveToObject'.format(i))
python
def saveToObject(self): """Saves the current UI setting to the model""" for i in range(self.ui.caleditorStack.count()): try: self.ui.caleditorStack.widget(i).saveToObject() except AttributeError: logger = logging.getLogger('main') logger.debug('index {} does not have method saveToObject'.format(i))
[ "def", "saveToObject", "(", "self", ")", ":", "for", "i", "in", "range", "(", "self", ".", "ui", ".", "caleditorStack", ".", "count", "(", ")", ")", ":", "try", ":", "self", ".", "ui", ".", "caleditorStack", ".", "widget", "(", "i", ")", ".", "saveToObject", "(", ")", "except", "AttributeError", ":", "logger", "=", "logging", ".", "getLogger", "(", "'main'", ")", "logger", ".", "debug", "(", "'index {} does not have method saveToObject'", ".", "format", "(", "i", ")", ")" ]
Saves the current UI setting to the model
[ "Saves", "the", "current", "UI", "setting", "to", "the", "model" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/calibration_widget.py#L58-L65
train
portfors-lab/sparkle
sparkle/gui/calibration_widget.py
CalibrationWidget.isToneCal
def isToneCal(self): """Whether the currently selected calibration stimulus type is the calibration curve :returns: boolean -- if the current combo box selection is calibration curve """ return self.ui.calTypeCmbbx.currentIndex() == self.ui.calTypeCmbbx.count() -1
python
def isToneCal(self): """Whether the currently selected calibration stimulus type is the calibration curve :returns: boolean -- if the current combo box selection is calibration curve """ return self.ui.calTypeCmbbx.currentIndex() == self.ui.calTypeCmbbx.count() -1
[ "def", "isToneCal", "(", "self", ")", ":", "return", "self", ".", "ui", ".", "calTypeCmbbx", ".", "currentIndex", "(", ")", "==", "self", ".", "ui", ".", "calTypeCmbbx", ".", "count", "(", ")", "-", "1" ]
Whether the currently selected calibration stimulus type is the calibration curve :returns: boolean -- if the current combo box selection is calibration curve
[ "Whether", "the", "currently", "selected", "calibration", "stimulus", "type", "is", "the", "calibration", "curve" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/calibration_widget.py#L81-L86
train
portfors-lab/sparkle
sparkle/acq/players.py
AbstractPlayerBase.reset_generation
def reset_generation(self, trigger): """Re-arms the analog output according to current settings :param trigger: name of the trigger terminal. ``None`` value means generation begins immediately on run :type trigger: str """ self.tone_lock.acquire() npts = self.stim.size try: self.aotask = AOTaskFinite(self.aochan, self.fs, npts, trigsrc=trigger) self.aotask.write(self.stim) if self.attenuator is not None: self.attenuator.SetAtten(self.atten) else: # print "ERROR: attenuation not set!" pass # raise self.ngenerated +=1 if self.stim_changed: new_gen = self.stim else: new_gen = None self.stim_changed = False except: print u'ERROR! TERMINATE!' self.tone_lock.release() raise self.tone_lock.release() return new_gen
python
def reset_generation(self, trigger): """Re-arms the analog output according to current settings :param trigger: name of the trigger terminal. ``None`` value means generation begins immediately on run :type trigger: str """ self.tone_lock.acquire() npts = self.stim.size try: self.aotask = AOTaskFinite(self.aochan, self.fs, npts, trigsrc=trigger) self.aotask.write(self.stim) if self.attenuator is not None: self.attenuator.SetAtten(self.atten) else: # print "ERROR: attenuation not set!" pass # raise self.ngenerated +=1 if self.stim_changed: new_gen = self.stim else: new_gen = None self.stim_changed = False except: print u'ERROR! TERMINATE!' self.tone_lock.release() raise self.tone_lock.release() return new_gen
[ "def", "reset_generation", "(", "self", ",", "trigger", ")", ":", "self", ".", "tone_lock", ".", "acquire", "(", ")", "npts", "=", "self", ".", "stim", ".", "size", "try", ":", "self", ".", "aotask", "=", "AOTaskFinite", "(", "self", ".", "aochan", ",", "self", ".", "fs", ",", "npts", ",", "trigsrc", "=", "trigger", ")", "self", ".", "aotask", ".", "write", "(", "self", ".", "stim", ")", "if", "self", ".", "attenuator", "is", "not", "None", ":", "self", ".", "attenuator", ".", "SetAtten", "(", "self", ".", "atten", ")", "else", ":", "# print \"ERROR: attenuation not set!\"", "pass", "# raise", "self", ".", "ngenerated", "+=", "1", "if", "self", ".", "stim_changed", ":", "new_gen", "=", "self", ".", "stim", "else", ":", "new_gen", "=", "None", "self", ".", "stim_changed", "=", "False", "except", ":", "print", "u'ERROR! TERMINATE!'", "self", ".", "tone_lock", ".", "release", "(", ")", "raise", "self", ".", "tone_lock", ".", "release", "(", ")", "return", "new_gen" ]
Re-arms the analog output according to current settings :param trigger: name of the trigger terminal. ``None`` value means generation begins immediately on run :type trigger: str
[ "Re", "-", "arms", "the", "analog", "output", "according", "to", "current", "settings" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/acq/players.py#L55-L87
train
portfors-lab/sparkle
sparkle/acq/players.py
AbstractPlayerBase.set_stim
def set_stim(self, signal, fs, attenuation=0): """Sets any vector as the next stimulus to be output. Does not call write to hardware""" self.tone_lock.acquire() self.stim = signal self.fs = fs self.atten = attenuation self.stim_changed = True self.tone_lock.release()
python
def set_stim(self, signal, fs, attenuation=0): """Sets any vector as the next stimulus to be output. Does not call write to hardware""" self.tone_lock.acquire() self.stim = signal self.fs = fs self.atten = attenuation self.stim_changed = True self.tone_lock.release()
[ "def", "set_stim", "(", "self", ",", "signal", ",", "fs", ",", "attenuation", "=", "0", ")", ":", "self", ".", "tone_lock", ".", "acquire", "(", ")", "self", ".", "stim", "=", "signal", "self", ".", "fs", "=", "fs", "self", ".", "atten", "=", "attenuation", "self", ".", "stim_changed", "=", "True", "self", ".", "tone_lock", ".", "release", "(", ")" ]
Sets any vector as the next stimulus to be output. Does not call write to hardware
[ "Sets", "any", "vector", "as", "the", "next", "stimulus", "to", "be", "output", ".", "Does", "not", "call", "write", "to", "hardware" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/acq/players.py#L89-L98
train
portfors-lab/sparkle
sparkle/acq/players.py
AbstractPlayerBase.connect_attenuator
def connect_attenuator(self, connect=True): """Establish a connection to the TDT PA5 attenuator""" if connect: try: pa5 = win32com.client.Dispatch("PA5.x") success = pa5.ConnectPA5('GB', 1) if success == 1: print 'Connection to PA5 attenuator established' pass else: print 'Connection to PA5 attenuator failed' errmsg = pa5.GetError() print u"Error: ", errmsg raise Exception(u"Attenuator connection failed") except: print "Error connecting to attenuator" pa5 = None self.attenuator = pa5 else: # if there is an attenuator, make sure it is set to 0 before disconnecting if self.attenuator: self.attenuator.setAtten(0) self.attenuator = None return self.attenuator
python
def connect_attenuator(self, connect=True): """Establish a connection to the TDT PA5 attenuator""" if connect: try: pa5 = win32com.client.Dispatch("PA5.x") success = pa5.ConnectPA5('GB', 1) if success == 1: print 'Connection to PA5 attenuator established' pass else: print 'Connection to PA5 attenuator failed' errmsg = pa5.GetError() print u"Error: ", errmsg raise Exception(u"Attenuator connection failed") except: print "Error connecting to attenuator" pa5 = None self.attenuator = pa5 else: # if there is an attenuator, make sure it is set to 0 before disconnecting if self.attenuator: self.attenuator.setAtten(0) self.attenuator = None return self.attenuator
[ "def", "connect_attenuator", "(", "self", ",", "connect", "=", "True", ")", ":", "if", "connect", ":", "try", ":", "pa5", "=", "win32com", ".", "client", ".", "Dispatch", "(", "\"PA5.x\"", ")", "success", "=", "pa5", ".", "ConnectPA5", "(", "'GB'", ",", "1", ")", "if", "success", "==", "1", ":", "print", "'Connection to PA5 attenuator established'", "pass", "else", ":", "print", "'Connection to PA5 attenuator failed'", "errmsg", "=", "pa5", ".", "GetError", "(", ")", "print", "u\"Error: \"", ",", "errmsg", "raise", "Exception", "(", "u\"Attenuator connection failed\"", ")", "except", ":", "print", "\"Error connecting to attenuator\"", "pa5", "=", "None", "self", ".", "attenuator", "=", "pa5", "else", ":", "# if there is an attenuator, make sure it is set to 0 before disconnecting", "if", "self", ".", "attenuator", ":", "self", ".", "attenuator", ".", "setAtten", "(", "0", ")", "self", ".", "attenuator", "=", "None", "return", "self", ".", "attenuator" ]
Establish a connection to the TDT PA5 attenuator
[ "Establish", "a", "connection", "to", "the", "TDT", "PA5", "attenuator" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/acq/players.py#L153-L177
train
portfors-lab/sparkle
sparkle/acq/players.py
AbstractPlayerBase.start_timer
def start_timer(self, reprate): """Start the digital output task that serves as the acquistion trigger""" print 'starting digital output at rate {} Hz'.format(reprate) self.trigger_task = DigitalOutTask(self.trigger_src, reprate) self.trigger_task.start()
python
def start_timer(self, reprate): """Start the digital output task that serves as the acquistion trigger""" print 'starting digital output at rate {} Hz'.format(reprate) self.trigger_task = DigitalOutTask(self.trigger_src, reprate) self.trigger_task.start()
[ "def", "start_timer", "(", "self", ",", "reprate", ")", ":", "print", "'starting digital output at rate {} Hz'", ".", "format", "(", "reprate", ")", "self", ".", "trigger_task", "=", "DigitalOutTask", "(", "self", ".", "trigger_src", ",", "reprate", ")", "self", ".", "trigger_task", ".", "start", "(", ")" ]
Start the digital output task that serves as the acquistion trigger
[ "Start", "the", "digital", "output", "task", "that", "serves", "as", "the", "acquistion", "trigger" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/acq/players.py#L183-L187
train
portfors-lab/sparkle
sparkle/acq/players.py
FinitePlayer.start
def start(self): """Writes output buffer and settings to device :returns: numpy.ndarray -- if the first presentation of a novel stimulus, or None if a repeat stimulus """ # this shouldn't actually be possible still... if self.aitask is not None: self.stop() raise Exception("FIX ME : NESTED START OPERATIONS ALLOWED") self.daq_lock.acquire() self.ngenerated = 0 self.nacquired = 0 return self.reset()
python
def start(self): """Writes output buffer and settings to device :returns: numpy.ndarray -- if the first presentation of a novel stimulus, or None if a repeat stimulus """ # this shouldn't actually be possible still... if self.aitask is not None: self.stop() raise Exception("FIX ME : NESTED START OPERATIONS ALLOWED") self.daq_lock.acquire() self.ngenerated = 0 self.nacquired = 0 return self.reset()
[ "def", "start", "(", "self", ")", ":", "# this shouldn't actually be possible still...", "if", "self", ".", "aitask", "is", "not", "None", ":", "self", ".", "stop", "(", ")", "raise", "Exception", "(", "\"FIX ME : NESTED START OPERATIONS ALLOWED\"", ")", "self", ".", "daq_lock", ".", "acquire", "(", ")", "self", ".", "ngenerated", "=", "0", "self", ".", "nacquired", "=", "0", "return", "self", ".", "reset", "(", ")" ]
Writes output buffer and settings to device :returns: numpy.ndarray -- if the first presentation of a novel stimulus, or None if a repeat stimulus
[ "Writes", "output", "buffer", "and", "settings", "to", "device" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/acq/players.py#L200-L216
train
portfors-lab/sparkle
sparkle/acq/players.py
FinitePlayer.stop
def stop(self): """Halts the acquisition, this must be called before resetting acquisition""" try: self.aitask.stop() self.aotask.stop() pass except: print u"No task running" self.aitask = None self.aotask = None
python
def stop(self): """Halts the acquisition, this must be called before resetting acquisition""" try: self.aitask.stop() self.aotask.stop() pass except: print u"No task running" self.aitask = None self.aotask = None
[ "def", "stop", "(", "self", ")", ":", "try", ":", "self", ".", "aitask", ".", "stop", "(", ")", "self", ".", "aotask", ".", "stop", "(", ")", "pass", "except", ":", "print", "u\"No task running\"", "self", ".", "aitask", "=", "None", "self", ".", "aotask", "=", "None" ]
Halts the acquisition, this must be called before resetting acquisition
[ "Halts", "the", "acquisition", "this", "must", "be", "called", "before", "resetting", "acquisition" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/acq/players.py#L268-L277
train
portfors-lab/sparkle
sparkle/acq/players.py
ContinuousPlayer.start_continuous
def start_continuous(self, aichans, update_hz=10): """Begins a continuous analog generation, calling a provided function at a rate of 10Hz :param aichans: name of channel(s) to record (analog input) from :type aichans: list<str> :param update_hz: Rate (Hz) at which to read data from the device input buffer :type update_hz: int """ self.daq_lock.acquire() self.ngenerated = 0 # number of stimuli presented during chart run npts = int(self.aifs/update_hz) #update display at 10Hz rate nchans = len(aichans) self.aitask = AITask(aichans, self.aifs, npts*5*nchans) self.aitask.register_callback(self._read_continuous, npts) self.aitask.start()
python
def start_continuous(self, aichans, update_hz=10): """Begins a continuous analog generation, calling a provided function at a rate of 10Hz :param aichans: name of channel(s) to record (analog input) from :type aichans: list<str> :param update_hz: Rate (Hz) at which to read data from the device input buffer :type update_hz: int """ self.daq_lock.acquire() self.ngenerated = 0 # number of stimuli presented during chart run npts = int(self.aifs/update_hz) #update display at 10Hz rate nchans = len(aichans) self.aitask = AITask(aichans, self.aifs, npts*5*nchans) self.aitask.register_callback(self._read_continuous, npts) self.aitask.start()
[ "def", "start_continuous", "(", "self", ",", "aichans", ",", "update_hz", "=", "10", ")", ":", "self", ".", "daq_lock", ".", "acquire", "(", ")", "self", ".", "ngenerated", "=", "0", "# number of stimuli presented during chart run", "npts", "=", "int", "(", "self", ".", "aifs", "/", "update_hz", ")", "#update display at 10Hz rate", "nchans", "=", "len", "(", "aichans", ")", "self", ".", "aitask", "=", "AITask", "(", "aichans", ",", "self", ".", "aifs", ",", "npts", "*", "5", "*", "nchans", ")", "self", ".", "aitask", ".", "register_callback", "(", "self", ".", "_read_continuous", ",", "npts", ")", "self", ".", "aitask", ".", "start", "(", ")" ]
Begins a continuous analog generation, calling a provided function at a rate of 10Hz :param aichans: name of channel(s) to record (analog input) from :type aichans: list<str> :param update_hz: Rate (Hz) at which to read data from the device input buffer :type update_hz: int
[ "Begins", "a", "continuous", "analog", "generation", "calling", "a", "provided", "function", "at", "a", "rate", "of", "10Hz" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/acq/players.py#L286-L302
train
portfors-lab/sparkle
sparkle/acq/players.py
ContinuousPlayer.run
def run(self): """Executes the stimulus generation, and returns when completed""" self.aotask.StartTask() self.aotask.wait() # don't return until generation finished self.aotask.stop() self.aotask = None
python
def run(self): """Executes the stimulus generation, and returns when completed""" self.aotask.StartTask() self.aotask.wait() # don't return until generation finished self.aotask.stop() self.aotask = None
[ "def", "run", "(", "self", ")", ":", "self", ".", "aotask", ".", "StartTask", "(", ")", "self", ".", "aotask", ".", "wait", "(", ")", "# don't return until generation finished", "self", ".", "aotask", ".", "stop", "(", ")", "self", ".", "aotask", "=", "None" ]
Executes the stimulus generation, and returns when completed
[ "Executes", "the", "stimulus", "generation", "and", "returns", "when", "completed" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/acq/players.py#L315-L320
train
portfors-lab/sparkle
sparkle/acq/players.py
ContinuousPlayer.stop_all
def stop_all(self): """Halts both the analog output and input tasks""" if self.aotask is not None: self.aotask.stop() self.aitask.stop() self.daq_lock.release() self.aitask = None self.aotask = None
python
def stop_all(self): """Halts both the analog output and input tasks""" if self.aotask is not None: self.aotask.stop() self.aitask.stop() self.daq_lock.release() self.aitask = None self.aotask = None
[ "def", "stop_all", "(", "self", ")", ":", "if", "self", ".", "aotask", "is", "not", "None", ":", "self", ".", "aotask", ".", "stop", "(", ")", "self", ".", "aitask", ".", "stop", "(", ")", "self", ".", "daq_lock", ".", "release", "(", ")", "self", ".", "aitask", "=", "None", "self", ".", "aotask", "=", "None" ]
Halts both the analog output and input tasks
[ "Halts", "both", "the", "analog", "output", "and", "input", "tasks" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/acq/players.py#L344-L351
train
amigocloud/python-amigocloud
amigocloud/amigocloud.py
AmigoCloud.get
def get(self, url, params=None, raw=False, stream=False, **request_kwargs): """ GET request to AmigoCloud endpoint. """ full_url = self.build_url(url) params = params or {} # Add token (if it's not already there) if self._token: params.setdefault('token', self._token) response = requests.get(full_url, params=params, stream=stream, **request_kwargs) self.check_for_errors(response) # Raise exception if something failed if stream: return response if raw or not response.content: return response.content return json.loads(response.text)
python
def get(self, url, params=None, raw=False, stream=False, **request_kwargs): """ GET request to AmigoCloud endpoint. """ full_url = self.build_url(url) params = params or {} # Add token (if it's not already there) if self._token: params.setdefault('token', self._token) response = requests.get(full_url, params=params, stream=stream, **request_kwargs) self.check_for_errors(response) # Raise exception if something failed if stream: return response if raw or not response.content: return response.content return json.loads(response.text)
[ "def", "get", "(", "self", ",", "url", ",", "params", "=", "None", ",", "raw", "=", "False", ",", "stream", "=", "False", ",", "*", "*", "request_kwargs", ")", ":", "full_url", "=", "self", ".", "build_url", "(", "url", ")", "params", "=", "params", "or", "{", "}", "# Add token (if it's not already there)", "if", "self", ".", "_token", ":", "params", ".", "setdefault", "(", "'token'", ",", "self", ".", "_token", ")", "response", "=", "requests", ".", "get", "(", "full_url", ",", "params", "=", "params", ",", "stream", "=", "stream", ",", "*", "*", "request_kwargs", ")", "self", ".", "check_for_errors", "(", "response", ")", "# Raise exception if something failed", "if", "stream", ":", "return", "response", "if", "raw", "or", "not", "response", ".", "content", ":", "return", "response", ".", "content", "return", "json", ".", "loads", "(", "response", ".", "text", ")" ]
GET request to AmigoCloud endpoint.
[ "GET", "request", "to", "AmigoCloud", "endpoint", "." ]
d31403e7299cc46e3a3e1392090ee033f3a02b6d
https://github.com/amigocloud/python-amigocloud/blob/d31403e7299cc46e3a3e1392090ee033f3a02b6d/amigocloud/amigocloud.py#L118-L138
train
amigocloud/python-amigocloud
amigocloud/amigocloud.py
AmigoCloud.post
def post(self, url, data=None, files=None, headers=None, raw=False, send_as_json=True, content_type=None, **request_kwargs): """ POST request to AmigoCloud endpoint. """ return self._secure_request( url, 'post', data=data, files=files, headers=headers, raw=raw, send_as_json=send_as_json, content_type=content_type, **request_kwargs )
python
def post(self, url, data=None, files=None, headers=None, raw=False, send_as_json=True, content_type=None, **request_kwargs): """ POST request to AmigoCloud endpoint. """ return self._secure_request( url, 'post', data=data, files=files, headers=headers, raw=raw, send_as_json=send_as_json, content_type=content_type, **request_kwargs )
[ "def", "post", "(", "self", ",", "url", ",", "data", "=", "None", ",", "files", "=", "None", ",", "headers", "=", "None", ",", "raw", "=", "False", ",", "send_as_json", "=", "True", ",", "content_type", "=", "None", ",", "*", "*", "request_kwargs", ")", ":", "return", "self", ".", "_secure_request", "(", "url", ",", "'post'", ",", "data", "=", "data", ",", "files", "=", "files", ",", "headers", "=", "headers", ",", "raw", "=", "raw", ",", "send_as_json", "=", "send_as_json", ",", "content_type", "=", "content_type", ",", "*", "*", "request_kwargs", ")" ]
POST request to AmigoCloud endpoint.
[ "POST", "request", "to", "AmigoCloud", "endpoint", "." ]
d31403e7299cc46e3a3e1392090ee033f3a02b6d
https://github.com/amigocloud/python-amigocloud/blob/d31403e7299cc46e3a3e1392090ee033f3a02b6d/amigocloud/amigocloud.py#L175-L185
train
amigocloud/python-amigocloud
amigocloud/amigocloud.py
AmigoCloud.upload_gallery_photo
def upload_gallery_photo(self, gallery_id, source_amigo_id, file_obj, chunk_size=CHUNK_SIZE, force_chunked=False, metadata=None): """ Upload a photo to a dataset's gallery. """ simple_upload_url = 'related_tables/%s/upload' % gallery_id chunked_upload_url = 'related_tables/%s/chunked_upload' % gallery_id data = {'source_amigo_id': source_amigo_id} if isinstance(file_obj, basestring): data['filename'] = os.path.basename(file_obj) else: data['filename'] = os.path.basename(file_obj.name) if metadata: data.update(metadata) return self.upload_file(simple_upload_url, chunked_upload_url, file_obj, chunk_size=chunk_size, force_chunked=force_chunked, extra_data=data)
python
def upload_gallery_photo(self, gallery_id, source_amigo_id, file_obj, chunk_size=CHUNK_SIZE, force_chunked=False, metadata=None): """ Upload a photo to a dataset's gallery. """ simple_upload_url = 'related_tables/%s/upload' % gallery_id chunked_upload_url = 'related_tables/%s/chunked_upload' % gallery_id data = {'source_amigo_id': source_amigo_id} if isinstance(file_obj, basestring): data['filename'] = os.path.basename(file_obj) else: data['filename'] = os.path.basename(file_obj.name) if metadata: data.update(metadata) return self.upload_file(simple_upload_url, chunked_upload_url, file_obj, chunk_size=chunk_size, force_chunked=force_chunked, extra_data=data)
[ "def", "upload_gallery_photo", "(", "self", ",", "gallery_id", ",", "source_amigo_id", ",", "file_obj", ",", "chunk_size", "=", "CHUNK_SIZE", ",", "force_chunked", "=", "False", ",", "metadata", "=", "None", ")", ":", "simple_upload_url", "=", "'related_tables/%s/upload'", "%", "gallery_id", "chunked_upload_url", "=", "'related_tables/%s/chunked_upload'", "%", "gallery_id", "data", "=", "{", "'source_amigo_id'", ":", "source_amigo_id", "}", "if", "isinstance", "(", "file_obj", ",", "basestring", ")", ":", "data", "[", "'filename'", "]", "=", "os", ".", "path", ".", "basename", "(", "file_obj", ")", "else", ":", "data", "[", "'filename'", "]", "=", "os", ".", "path", ".", "basename", "(", "file_obj", ".", "name", ")", "if", "metadata", ":", "data", ".", "update", "(", "metadata", ")", "return", "self", ".", "upload_file", "(", "simple_upload_url", ",", "chunked_upload_url", ",", "file_obj", ",", "chunk_size", "=", "chunk_size", ",", "force_chunked", "=", "force_chunked", ",", "extra_data", "=", "data", ")" ]
Upload a photo to a dataset's gallery.
[ "Upload", "a", "photo", "to", "a", "dataset", "s", "gallery", "." ]
d31403e7299cc46e3a3e1392090ee033f3a02b6d
https://github.com/amigocloud/python-amigocloud/blob/d31403e7299cc46e3a3e1392090ee033f3a02b6d/amigocloud/amigocloud.py#L306-L326
train
amigocloud/python-amigocloud
amigocloud/amigocloud.py
AmigoCloud.listen_user_events
def listen_user_events(self): """ Authenticate to start listening to user events. """ if not self._user_id: raise AmigoCloudError(self.error_msg['logged_in_websockets']) response = self.get('/me/start_websocket_session') websocket_session = response['websocket_session'] auth_data = {'userid': self._user_id, 'websocket_session': websocket_session} self.amigosocket.emit('authenticate', auth_data)
python
def listen_user_events(self): """ Authenticate to start listening to user events. """ if not self._user_id: raise AmigoCloudError(self.error_msg['logged_in_websockets']) response = self.get('/me/start_websocket_session') websocket_session = response['websocket_session'] auth_data = {'userid': self._user_id, 'websocket_session': websocket_session} self.amigosocket.emit('authenticate', auth_data)
[ "def", "listen_user_events", "(", "self", ")", ":", "if", "not", "self", ".", "_user_id", ":", "raise", "AmigoCloudError", "(", "self", ".", "error_msg", "[", "'logged_in_websockets'", "]", ")", "response", "=", "self", ".", "get", "(", "'/me/start_websocket_session'", ")", "websocket_session", "=", "response", "[", "'websocket_session'", "]", "auth_data", "=", "{", "'userid'", ":", "self", ".", "_user_id", ",", "'websocket_session'", ":", "websocket_session", "}", "self", ".", "amigosocket", ".", "emit", "(", "'authenticate'", ",", "auth_data", ")" ]
Authenticate to start listening to user events.
[ "Authenticate", "to", "start", "listening", "to", "user", "events", "." ]
d31403e7299cc46e3a3e1392090ee033f3a02b6d
https://github.com/amigocloud/python-amigocloud/blob/d31403e7299cc46e3a3e1392090ee033f3a02b6d/amigocloud/amigocloud.py#L328-L340
train
amigocloud/python-amigocloud
amigocloud/amigocloud.py
AmigoCloud.listen_dataset_events
def listen_dataset_events(self, owner_id, project_id, dataset_id): """ Authenticate to start using dataset events. """ if not self._user_id: raise AmigoCloudError(self.error_msg['logged_in_websockets']) url = '/users/%s/projects/%s/datasets/%s/start_websocket_session' response = self.get(url % (owner_id, project_id, dataset_id)) websocket_session = response['websocket_session'] auth_data = {'userid': self._user_id, 'datasetid': dataset_id, 'websocket_session': websocket_session} self.amigosocket.emit('authenticate', auth_data)
python
def listen_dataset_events(self, owner_id, project_id, dataset_id): """ Authenticate to start using dataset events. """ if not self._user_id: raise AmigoCloudError(self.error_msg['logged_in_websockets']) url = '/users/%s/projects/%s/datasets/%s/start_websocket_session' response = self.get(url % (owner_id, project_id, dataset_id)) websocket_session = response['websocket_session'] auth_data = {'userid': self._user_id, 'datasetid': dataset_id, 'websocket_session': websocket_session} self.amigosocket.emit('authenticate', auth_data)
[ "def", "listen_dataset_events", "(", "self", ",", "owner_id", ",", "project_id", ",", "dataset_id", ")", ":", "if", "not", "self", ".", "_user_id", ":", "raise", "AmigoCloudError", "(", "self", ".", "error_msg", "[", "'logged_in_websockets'", "]", ")", "url", "=", "'/users/%s/projects/%s/datasets/%s/start_websocket_session'", "response", "=", "self", ".", "get", "(", "url", "%", "(", "owner_id", ",", "project_id", ",", "dataset_id", ")", ")", "websocket_session", "=", "response", "[", "'websocket_session'", "]", "auth_data", "=", "{", "'userid'", ":", "self", ".", "_user_id", ",", "'datasetid'", ":", "dataset_id", ",", "'websocket_session'", ":", "websocket_session", "}", "self", ".", "amigosocket", ".", "emit", "(", "'authenticate'", ",", "auth_data", ")" ]
Authenticate to start using dataset events.
[ "Authenticate", "to", "start", "using", "dataset", "events", "." ]
d31403e7299cc46e3a3e1392090ee033f3a02b6d
https://github.com/amigocloud/python-amigocloud/blob/d31403e7299cc46e3a3e1392090ee033f3a02b6d/amigocloud/amigocloud.py#L342-L356
train
loganasherjones/yapconf
yapconf/docs.py
build_markdown_table
def build_markdown_table(headers, rows, row_keys=None): """Build a lined up markdown table. Args: headers (dict): A key -> value pairing fo the headers. rows (list): List of dictionaries that contain all the keys listed in the headers. row_keys (list): A sorted list of keys to display Returns: A valid Markdown Table as a string. """ row_maxes = _find_row_maxes(headers, rows) row_keys = row_keys or [key for key, value in headers.items()] table = [ _build_row(headers, row_maxes, row_keys), _build_separator(row_maxes, row_keys) ] for row in rows: table.append(_build_row(row, row_maxes, row_keys)) return '\n'.join(table) + '\n'
python
def build_markdown_table(headers, rows, row_keys=None): """Build a lined up markdown table. Args: headers (dict): A key -> value pairing fo the headers. rows (list): List of dictionaries that contain all the keys listed in the headers. row_keys (list): A sorted list of keys to display Returns: A valid Markdown Table as a string. """ row_maxes = _find_row_maxes(headers, rows) row_keys = row_keys or [key for key, value in headers.items()] table = [ _build_row(headers, row_maxes, row_keys), _build_separator(row_maxes, row_keys) ] for row in rows: table.append(_build_row(row, row_maxes, row_keys)) return '\n'.join(table) + '\n'
[ "def", "build_markdown_table", "(", "headers", ",", "rows", ",", "row_keys", "=", "None", ")", ":", "row_maxes", "=", "_find_row_maxes", "(", "headers", ",", "rows", ")", "row_keys", "=", "row_keys", "or", "[", "key", "for", "key", ",", "value", "in", "headers", ".", "items", "(", ")", "]", "table", "=", "[", "_build_row", "(", "headers", ",", "row_maxes", ",", "row_keys", ")", ",", "_build_separator", "(", "row_maxes", ",", "row_keys", ")", "]", "for", "row", "in", "rows", ":", "table", ".", "append", "(", "_build_row", "(", "row", ",", "row_maxes", ",", "row_keys", ")", ")", "return", "'\\n'", ".", "join", "(", "table", ")", "+", "'\\n'" ]
Build a lined up markdown table. Args: headers (dict): A key -> value pairing fo the headers. rows (list): List of dictionaries that contain all the keys listed in the headers. row_keys (list): A sorted list of keys to display Returns: A valid Markdown Table as a string.
[ "Build", "a", "lined", "up", "markdown", "table", "." ]
d2970e6e7e3334615d4d978d8b0ca33006d79d16
https://github.com/loganasherjones/yapconf/blob/d2970e6e7e3334615d4d978d8b0ca33006d79d16/yapconf/docs.py#L123-L144
train
jason-weirather/pythologist
pythologist/interface/__init__.py
SegmentationImageOutput.write_to_path
def write_to_path(self,path,suffix='',format='png',overwrite=False): """ Output the data the dataframe's 'image' column to a directory structured by project->sample and named by frame Args: path (str): Where to write the directory of images suffix (str): for labeling the imaages you write format (str): default 'png' format to write the file overwrite (bool): default False. if true can overwrite files in the path Modifies: Creates path folder if necessary and writes images to path """ if os.path.exists(path) and overwrite is False: raise ValueError("Error: use ovewrite=True to overwrite images") if not os.path.exists(path): os.makedirs(path) for i,r in self.iterrows(): spath = os.path.join(path,r['project_name'],r['sample_name']) if not os.path.exists(spath): os.makedirs(spath) if suffix == '': fname = os.path.join(spath,r['frame_name']+'.'+format) else: fname = os.path.join(spath,r['frame_name']+'_'+suffix+'.'+format) imageio.imwrite(fname, r['image'],format=format)
python
def write_to_path(self,path,suffix='',format='png',overwrite=False): """ Output the data the dataframe's 'image' column to a directory structured by project->sample and named by frame Args: path (str): Where to write the directory of images suffix (str): for labeling the imaages you write format (str): default 'png' format to write the file overwrite (bool): default False. if true can overwrite files in the path Modifies: Creates path folder if necessary and writes images to path """ if os.path.exists(path) and overwrite is False: raise ValueError("Error: use ovewrite=True to overwrite images") if not os.path.exists(path): os.makedirs(path) for i,r in self.iterrows(): spath = os.path.join(path,r['project_name'],r['sample_name']) if not os.path.exists(spath): os.makedirs(spath) if suffix == '': fname = os.path.join(spath,r['frame_name']+'.'+format) else: fname = os.path.join(spath,r['frame_name']+'_'+suffix+'.'+format) imageio.imwrite(fname, r['image'],format=format)
[ "def", "write_to_path", "(", "self", ",", "path", ",", "suffix", "=", "''", ",", "format", "=", "'png'", ",", "overwrite", "=", "False", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "path", ")", "and", "overwrite", "is", "False", ":", "raise", "ValueError", "(", "\"Error: use ovewrite=True to overwrite images\"", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "os", ".", "makedirs", "(", "path", ")", "for", "i", ",", "r", "in", "self", ".", "iterrows", "(", ")", ":", "spath", "=", "os", ".", "path", ".", "join", "(", "path", ",", "r", "[", "'project_name'", "]", ",", "r", "[", "'sample_name'", "]", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "spath", ")", ":", "os", ".", "makedirs", "(", "spath", ")", "if", "suffix", "==", "''", ":", "fname", "=", "os", ".", "path", ".", "join", "(", "spath", ",", "r", "[", "'frame_name'", "]", "+", "'.'", "+", "format", ")", "else", ":", "fname", "=", "os", ".", "path", ".", "join", "(", "spath", ",", "r", "[", "'frame_name'", "]", "+", "'_'", "+", "suffix", "+", "'.'", "+", "format", ")", "imageio", ".", "imwrite", "(", "fname", ",", "r", "[", "'image'", "]", ",", "format", "=", "format", ")" ]
Output the data the dataframe's 'image' column to a directory structured by project->sample and named by frame Args: path (str): Where to write the directory of images suffix (str): for labeling the imaages you write format (str): default 'png' format to write the file overwrite (bool): default False. if true can overwrite files in the path Modifies: Creates path folder if necessary and writes images to path
[ "Output", "the", "data", "the", "dataframe", "s", "image", "column", "to", "a", "directory", "structured", "by", "project", "-", ">", "sample", "and", "named", "by", "frame" ]
6eb4082be9dffa9570e4ceaa06d97845eac4c006
https://github.com/jason-weirather/pythologist/blob/6eb4082be9dffa9570e4ceaa06d97845eac4c006/pythologist/interface/__init__.py#L22-L43
train
jason-weirather/pythologist
pythologist/interface/__init__.py
SegmentationImages.build_segmentation_image
def build_segmentation_image(self,schema,background=(0,0,0,0)): """ Put together an image. Defined by a list of layers with RGBA colors Make the schema example | schema = [ | {'subset_logic':SL(phenotypes=['SOX10+']), | 'edge_color':(31, 31, 46,255), | 'watershed_steps':0, | 'fill_color':(51, 51, 77,255) | }, | {'subset_logic':SL(phenotypes=['CD8+'],scored_calls={'PD1':'+'}), | 'edge_color':(255,0,0,255), | 'watershed_steps':1, | 'fill_color':(0,0,0,255) | }, | {'subset_logic':SL(phenotypes=['CD8+'],scored_calls={'PD1':'-'}), | 'edge_color':(255,0,255,255), | 'watershed_steps':1, | 'fill_color':(0,0,255,255) | } | ] | imgs = imageaccess.build_segmentation_image(schema,background=(0,0,0,255)) Args: schema (list): a list of layers (see example above) background (tuple): a color RGBA 0-255 tuple for the. background color Returns: SegmentationImageOutput: an output suitable for writing images """ cummulative = self.copy() def _set_blank(img,blank): img[:][:] = blank return img cummulative['merged'] = cummulative.apply(lambda x: _set_blank(np.zeros(list(x['shape'])+[4]),background) ,1) for layer in schema: if self.verbose: sys.stderr.write("Calculating layer "+str(layer)+"\n") images = self.get_outline_images(subset_logic=layer['subset_logic'], edge_color=layer['edge_color'], watershed_steps=layer['watershed_steps'], fill_color=layer['fill_color']) cummulative = cummulative.rename(columns={'merged':'old'}) cummulative = cummulative.merge(images,on=list(self.columns)) cummulative['new'] = cummulative.apply(lambda x: _merge_images(x['merged'],x['old']),1) cummulative = cummulative.drop(columns=['old','merged']).rename(columns={'new':'merged'}) cummulative = cummulative.rename(columns={'merged':'image'}) return SegmentationImageOutput(cummulative)
python
def build_segmentation_image(self,schema,background=(0,0,0,0)): """ Put together an image. Defined by a list of layers with RGBA colors Make the schema example | schema = [ | {'subset_logic':SL(phenotypes=['SOX10+']), | 'edge_color':(31, 31, 46,255), | 'watershed_steps':0, | 'fill_color':(51, 51, 77,255) | }, | {'subset_logic':SL(phenotypes=['CD8+'],scored_calls={'PD1':'+'}), | 'edge_color':(255,0,0,255), | 'watershed_steps':1, | 'fill_color':(0,0,0,255) | }, | {'subset_logic':SL(phenotypes=['CD8+'],scored_calls={'PD1':'-'}), | 'edge_color':(255,0,255,255), | 'watershed_steps':1, | 'fill_color':(0,0,255,255) | } | ] | imgs = imageaccess.build_segmentation_image(schema,background=(0,0,0,255)) Args: schema (list): a list of layers (see example above) background (tuple): a color RGBA 0-255 tuple for the. background color Returns: SegmentationImageOutput: an output suitable for writing images """ cummulative = self.copy() def _set_blank(img,blank): img[:][:] = blank return img cummulative['merged'] = cummulative.apply(lambda x: _set_blank(np.zeros(list(x['shape'])+[4]),background) ,1) for layer in schema: if self.verbose: sys.stderr.write("Calculating layer "+str(layer)+"\n") images = self.get_outline_images(subset_logic=layer['subset_logic'], edge_color=layer['edge_color'], watershed_steps=layer['watershed_steps'], fill_color=layer['fill_color']) cummulative = cummulative.rename(columns={'merged':'old'}) cummulative = cummulative.merge(images,on=list(self.columns)) cummulative['new'] = cummulative.apply(lambda x: _merge_images(x['merged'],x['old']),1) cummulative = cummulative.drop(columns=['old','merged']).rename(columns={'new':'merged'}) cummulative = cummulative.rename(columns={'merged':'image'}) return SegmentationImageOutput(cummulative)
[ "def", "build_segmentation_image", "(", "self", ",", "schema", ",", "background", "=", "(", "0", ",", "0", ",", "0", ",", "0", ")", ")", ":", "cummulative", "=", "self", ".", "copy", "(", ")", "def", "_set_blank", "(", "img", ",", "blank", ")", ":", "img", "[", ":", "]", "[", ":", "]", "=", "blank", "return", "img", "cummulative", "[", "'merged'", "]", "=", "cummulative", ".", "apply", "(", "lambda", "x", ":", "_set_blank", "(", "np", ".", "zeros", "(", "list", "(", "x", "[", "'shape'", "]", ")", "+", "[", "4", "]", ")", ",", "background", ")", ",", "1", ")", "for", "layer", "in", "schema", ":", "if", "self", ".", "verbose", ":", "sys", ".", "stderr", ".", "write", "(", "\"Calculating layer \"", "+", "str", "(", "layer", ")", "+", "\"\\n\"", ")", "images", "=", "self", ".", "get_outline_images", "(", "subset_logic", "=", "layer", "[", "'subset_logic'", "]", ",", "edge_color", "=", "layer", "[", "'edge_color'", "]", ",", "watershed_steps", "=", "layer", "[", "'watershed_steps'", "]", ",", "fill_color", "=", "layer", "[", "'fill_color'", "]", ")", "cummulative", "=", "cummulative", ".", "rename", "(", "columns", "=", "{", "'merged'", ":", "'old'", "}", ")", "cummulative", "=", "cummulative", ".", "merge", "(", "images", ",", "on", "=", "list", "(", "self", ".", "columns", ")", ")", "cummulative", "[", "'new'", "]", "=", "cummulative", ".", "apply", "(", "lambda", "x", ":", "_merge_images", "(", "x", "[", "'merged'", "]", ",", "x", "[", "'old'", "]", ")", ",", "1", ")", "cummulative", "=", "cummulative", ".", "drop", "(", "columns", "=", "[", "'old'", ",", "'merged'", "]", ")", ".", "rename", "(", "columns", "=", "{", "'new'", ":", "'merged'", "}", ")", "cummulative", "=", "cummulative", ".", "rename", "(", "columns", "=", "{", "'merged'", ":", "'image'", "}", ")", "return", "SegmentationImageOutput", "(", "cummulative", ")" ]
Put together an image. Defined by a list of layers with RGBA colors Make the schema example | schema = [ | {'subset_logic':SL(phenotypes=['SOX10+']), | 'edge_color':(31, 31, 46,255), | 'watershed_steps':0, | 'fill_color':(51, 51, 77,255) | }, | {'subset_logic':SL(phenotypes=['CD8+'],scored_calls={'PD1':'+'}), | 'edge_color':(255,0,0,255), | 'watershed_steps':1, | 'fill_color':(0,0,0,255) | }, | {'subset_logic':SL(phenotypes=['CD8+'],scored_calls={'PD1':'-'}), | 'edge_color':(255,0,255,255), | 'watershed_steps':1, | 'fill_color':(0,0,255,255) | } | ] | imgs = imageaccess.build_segmentation_image(schema,background=(0,0,0,255)) Args: schema (list): a list of layers (see example above) background (tuple): a color RGBA 0-255 tuple for the. background color Returns: SegmentationImageOutput: an output suitable for writing images
[ "Put", "together", "an", "image", ".", "Defined", "by", "a", "list", "of", "layers", "with", "RGBA", "colors" ]
6eb4082be9dffa9570e4ceaa06d97845eac4c006
https://github.com/jason-weirather/pythologist/blob/6eb4082be9dffa9570e4ceaa06d97845eac4c006/pythologist/interface/__init__.py#L147-L197
train
luhnmod10/python
luhnmod10/__init__.py
valid
def valid(number): """ Returns true if the number string is luhn valid, and false otherwise. The number string passed to the function must contain only numeric characters otherwise behavior is undefined. """ checksum = 0 number_len = len(number) offset = ord('0') i = number_len - 1 while i >= 0: n = ord(number[i]) - offset checksum += n i -= 2 i = number_len - 2 while i >= 0: n = ord(number[i]) - offset n *= 2 if n > 9: n -= 9 checksum += n i -= 2 return checksum%10 == 0
python
def valid(number): """ Returns true if the number string is luhn valid, and false otherwise. The number string passed to the function must contain only numeric characters otherwise behavior is undefined. """ checksum = 0 number_len = len(number) offset = ord('0') i = number_len - 1 while i >= 0: n = ord(number[i]) - offset checksum += n i -= 2 i = number_len - 2 while i >= 0: n = ord(number[i]) - offset n *= 2 if n > 9: n -= 9 checksum += n i -= 2 return checksum%10 == 0
[ "def", "valid", "(", "number", ")", ":", "checksum", "=", "0", "number_len", "=", "len", "(", "number", ")", "offset", "=", "ord", "(", "'0'", ")", "i", "=", "number_len", "-", "1", "while", "i", ">=", "0", ":", "n", "=", "ord", "(", "number", "[", "i", "]", ")", "-", "offset", "checksum", "+=", "n", "i", "-=", "2", "i", "=", "number_len", "-", "2", "while", "i", ">=", "0", ":", "n", "=", "ord", "(", "number", "[", "i", "]", ")", "-", "offset", "n", "*=", "2", "if", "n", ">", "9", ":", "n", "-=", "9", "checksum", "+=", "n", "i", "-=", "2", "return", "checksum", "%", "10", "==", "0" ]
Returns true if the number string is luhn valid, and false otherwise. The number string passed to the function must contain only numeric characters otherwise behavior is undefined.
[ "Returns", "true", "if", "the", "number", "string", "is", "luhn", "valid", "and", "false", "otherwise", ".", "The", "number", "string", "passed", "to", "the", "function", "must", "contain", "only", "numeric", "characters", "otherwise", "behavior", "is", "undefined", "." ]
7cd1e9e4029dd364a10435bd80ed48a2cc180491
https://github.com/luhnmod10/python/blob/7cd1e9e4029dd364a10435bd80ed48a2cc180491/luhnmod10/__init__.py#L1-L27
train
IS-ENES-Data/esgf-pid
esgfpid/connector.py
Connector.create_publication_assistant
def create_publication_assistant(self, **args): ''' Create an assistant for a dataset that allows to make PID requests for the dataset and all of its files. :param drs_id: Mandatory. The dataset id of the dataset to be published. :param version_number: Mandatory. The version number of the dataset to be published. :param is_replica: Mandatory. Flag to indicate whether the dataset is a replica. .. note:: If the replica flag is set to False, the publication may still be considered a replica by the consuming servlet, namely if the dataset was already published at a different host. For this, please refer to the consumer documentation. :return: A publication assistant which provides all necessary methods to publish a dataset and its files. ''' # Check args logdebug(LOGGER, 'Creating publication assistant..') mandatory_args = ['drs_id', 'version_number', 'is_replica'] esgfpid.utils.check_presence_of_mandatory_args(args, mandatory_args) # Check if service path is given if self.__thredds_service_path is None: msg = 'No thredds_service_path given (but it is mandatory for publication)' logwarn(LOGGER, msg) raise esgfpid.exceptions.ArgumentError(msg) # Check if data node is given if self.__data_node is None: msg = 'No data_node given (but it is mandatory for publication)' logwarn(LOGGER, msg) raise esgfpid.exceptions.ArgumentError(msg) # Check if solr has access: if self.__coupler.is_solr_switched_off(): pass # solr access not mandatory anymore # Create publication assistant assistant = esgfpid.assistant.publish.DatasetPublicationAssistant( drs_id=args['drs_id'], version_number=args['version_number'], thredds_service_path=self.__thredds_service_path, data_node=self.__data_node, prefix=self.prefix, coupler=self.__coupler, is_replica=args['is_replica'], consumer_solr_url=self.__consumer_solr_url # may be None ) logdebug(LOGGER, 'Creating publication assistant.. done') return assistant
python
def create_publication_assistant(self, **args): ''' Create an assistant for a dataset that allows to make PID requests for the dataset and all of its files. :param drs_id: Mandatory. The dataset id of the dataset to be published. :param version_number: Mandatory. The version number of the dataset to be published. :param is_replica: Mandatory. Flag to indicate whether the dataset is a replica. .. note:: If the replica flag is set to False, the publication may still be considered a replica by the consuming servlet, namely if the dataset was already published at a different host. For this, please refer to the consumer documentation. :return: A publication assistant which provides all necessary methods to publish a dataset and its files. ''' # Check args logdebug(LOGGER, 'Creating publication assistant..') mandatory_args = ['drs_id', 'version_number', 'is_replica'] esgfpid.utils.check_presence_of_mandatory_args(args, mandatory_args) # Check if service path is given if self.__thredds_service_path is None: msg = 'No thredds_service_path given (but it is mandatory for publication)' logwarn(LOGGER, msg) raise esgfpid.exceptions.ArgumentError(msg) # Check if data node is given if self.__data_node is None: msg = 'No data_node given (but it is mandatory for publication)' logwarn(LOGGER, msg) raise esgfpid.exceptions.ArgumentError(msg) # Check if solr has access: if self.__coupler.is_solr_switched_off(): pass # solr access not mandatory anymore # Create publication assistant assistant = esgfpid.assistant.publish.DatasetPublicationAssistant( drs_id=args['drs_id'], version_number=args['version_number'], thredds_service_path=self.__thredds_service_path, data_node=self.__data_node, prefix=self.prefix, coupler=self.__coupler, is_replica=args['is_replica'], consumer_solr_url=self.__consumer_solr_url # may be None ) logdebug(LOGGER, 'Creating publication assistant.. done') return assistant
[ "def", "create_publication_assistant", "(", "self", ",", "*", "*", "args", ")", ":", "# Check args", "logdebug", "(", "LOGGER", ",", "'Creating publication assistant..'", ")", "mandatory_args", "=", "[", "'drs_id'", ",", "'version_number'", ",", "'is_replica'", "]", "esgfpid", ".", "utils", ".", "check_presence_of_mandatory_args", "(", "args", ",", "mandatory_args", ")", "# Check if service path is given", "if", "self", ".", "__thredds_service_path", "is", "None", ":", "msg", "=", "'No thredds_service_path given (but it is mandatory for publication)'", "logwarn", "(", "LOGGER", ",", "msg", ")", "raise", "esgfpid", ".", "exceptions", ".", "ArgumentError", "(", "msg", ")", "# Check if data node is given", "if", "self", ".", "__data_node", "is", "None", ":", "msg", "=", "'No data_node given (but it is mandatory for publication)'", "logwarn", "(", "LOGGER", ",", "msg", ")", "raise", "esgfpid", ".", "exceptions", ".", "ArgumentError", "(", "msg", ")", "# Check if solr has access:", "if", "self", ".", "__coupler", ".", "is_solr_switched_off", "(", ")", ":", "pass", "# solr access not mandatory anymore", "# Create publication assistant", "assistant", "=", "esgfpid", ".", "assistant", ".", "publish", ".", "DatasetPublicationAssistant", "(", "drs_id", "=", "args", "[", "'drs_id'", "]", ",", "version_number", "=", "args", "[", "'version_number'", "]", ",", "thredds_service_path", "=", "self", ".", "__thredds_service_path", ",", "data_node", "=", "self", ".", "__data_node", ",", "prefix", "=", "self", ".", "prefix", ",", "coupler", "=", "self", ".", "__coupler", ",", "is_replica", "=", "args", "[", "'is_replica'", "]", ",", "consumer_solr_url", "=", "self", ".", "__consumer_solr_url", "# may be None", ")", "logdebug", "(", "LOGGER", ",", "'Creating publication assistant.. done'", ")", "return", "assistant" ]
Create an assistant for a dataset that allows to make PID requests for the dataset and all of its files. :param drs_id: Mandatory. The dataset id of the dataset to be published. :param version_number: Mandatory. The version number of the dataset to be published. :param is_replica: Mandatory. Flag to indicate whether the dataset is a replica. .. note:: If the replica flag is set to False, the publication may still be considered a replica by the consuming servlet, namely if the dataset was already published at a different host. For this, please refer to the consumer documentation. :return: A publication assistant which provides all necessary methods to publish a dataset and its files.
[ "Create", "an", "assistant", "for", "a", "dataset", "that", "allows", "to", "make", "PID", "requests", "for", "the", "dataset", "and", "all", "of", "its", "files", "." ]
2f4909bb3ff79c0b6ed2932e0dd8b3bb6aec5e41
https://github.com/IS-ENES-Data/esgf-pid/blob/2f4909bb3ff79c0b6ed2932e0dd8b3bb6aec5e41/esgfpid/connector.py#L287-L341
train
IS-ENES-Data/esgf-pid
esgfpid/connector.py
Connector.unpublish_one_version
def unpublish_one_version(self, **args): ''' Sends a PID update request for the unpublication of one version of a dataset currently published at the given data node. Either the handle or the pair of drs_id and version_number have to be provided, otherwise an exception will occur. The consumer will of course check the PID request message's timestamp with the timestamp of the last publication, so that republications in the mean time are not unpublished. The unpublication of the files is included in this method. :param handle: Optional. The handle of the dataset to be unpublished. :param drs_id: Optional. The dataset id of the dataset to be unpublished. :param version_number: Optional. The version number of the dataset to be unpublished. :raises: ArgumentError: If not enough arguments are passed to identify the dataset, or if no data node was specified during library init. ''' # Check args optional_args = ['handle', 'drs_id', 'version_number'] esgfpid.utils.add_missing_optional_args_with_value_none(args, optional_args) # Check if data node is given if self.__data_node is None: msg = 'No data_node given (but it is mandatory for unpublication)' logwarn(LOGGER, msg) raise esgfpid.exceptions.ArgumentError(msg) # Unpublish assistant = esgfpid.assistant.unpublish.AssistantOneVersion( drs_id = args['drs_id'], data_node = self.__data_node, prefix=self.prefix, coupler=self.__coupler, message_timestamp=esgfpid.utils.get_now_utc_as_formatted_string() ) assistant.unpublish_one_dataset_version( handle = args['handle'], version_number = args['version_number'] )
python
def unpublish_one_version(self, **args): ''' Sends a PID update request for the unpublication of one version of a dataset currently published at the given data node. Either the handle or the pair of drs_id and version_number have to be provided, otherwise an exception will occur. The consumer will of course check the PID request message's timestamp with the timestamp of the last publication, so that republications in the mean time are not unpublished. The unpublication of the files is included in this method. :param handle: Optional. The handle of the dataset to be unpublished. :param drs_id: Optional. The dataset id of the dataset to be unpublished. :param version_number: Optional. The version number of the dataset to be unpublished. :raises: ArgumentError: If not enough arguments are passed to identify the dataset, or if no data node was specified during library init. ''' # Check args optional_args = ['handle', 'drs_id', 'version_number'] esgfpid.utils.add_missing_optional_args_with_value_none(args, optional_args) # Check if data node is given if self.__data_node is None: msg = 'No data_node given (but it is mandatory for unpublication)' logwarn(LOGGER, msg) raise esgfpid.exceptions.ArgumentError(msg) # Unpublish assistant = esgfpid.assistant.unpublish.AssistantOneVersion( drs_id = args['drs_id'], data_node = self.__data_node, prefix=self.prefix, coupler=self.__coupler, message_timestamp=esgfpid.utils.get_now_utc_as_formatted_string() ) assistant.unpublish_one_dataset_version( handle = args['handle'], version_number = args['version_number'] )
[ "def", "unpublish_one_version", "(", "self", ",", "*", "*", "args", ")", ":", "# Check args", "optional_args", "=", "[", "'handle'", ",", "'drs_id'", ",", "'version_number'", "]", "esgfpid", ".", "utils", ".", "add_missing_optional_args_with_value_none", "(", "args", ",", "optional_args", ")", "# Check if data node is given", "if", "self", ".", "__data_node", "is", "None", ":", "msg", "=", "'No data_node given (but it is mandatory for unpublication)'", "logwarn", "(", "LOGGER", ",", "msg", ")", "raise", "esgfpid", ".", "exceptions", ".", "ArgumentError", "(", "msg", ")", "# Unpublish", "assistant", "=", "esgfpid", ".", "assistant", ".", "unpublish", ".", "AssistantOneVersion", "(", "drs_id", "=", "args", "[", "'drs_id'", "]", ",", "data_node", "=", "self", ".", "__data_node", ",", "prefix", "=", "self", ".", "prefix", ",", "coupler", "=", "self", ".", "__coupler", ",", "message_timestamp", "=", "esgfpid", ".", "utils", ".", "get_now_utc_as_formatted_string", "(", ")", ")", "assistant", ".", "unpublish_one_dataset_version", "(", "handle", "=", "args", "[", "'handle'", "]", ",", "version_number", "=", "args", "[", "'version_number'", "]", ")" ]
Sends a PID update request for the unpublication of one version of a dataset currently published at the given data node. Either the handle or the pair of drs_id and version_number have to be provided, otherwise an exception will occur. The consumer will of course check the PID request message's timestamp with the timestamp of the last publication, so that republications in the mean time are not unpublished. The unpublication of the files is included in this method. :param handle: Optional. The handle of the dataset to be unpublished. :param drs_id: Optional. The dataset id of the dataset to be unpublished. :param version_number: Optional. The version number of the dataset to be unpublished. :raises: ArgumentError: If not enough arguments are passed to identify the dataset, or if no data node was specified during library init.
[ "Sends", "a", "PID", "update", "request", "for", "the", "unpublication", "of", "one", "version", "of", "a", "dataset", "currently", "published", "at", "the", "given", "data", "node", "." ]
2f4909bb3ff79c0b6ed2932e0dd8b3bb6aec5e41
https://github.com/IS-ENES-Data/esgf-pid/blob/2f4909bb3ff79c0b6ed2932e0dd8b3bb6aec5e41/esgfpid/connector.py#L351-L401
train
IS-ENES-Data/esgf-pid
esgfpid/connector.py
Connector.unpublish_all_versions
def unpublish_all_versions(self, **args): ''' Sends a PID update request for the unpublication of all versions of a dataset currently published at the given data node. If the library has solr access, it will try to find all the dataset versions and their handles from solr, and send individual messages for each version. Otherwise, one message is sent, and the queue consuming servlet has to identify the relevant versions, also making sure not to unpublish any versions that may have been republished in the meantime. :param drs_id: Dataset id of the dataset to be unpublished. :raises: ArgumentError: If the data node was not provided at library initialization. ''' # Check args mandatory_args = ['drs_id'] esgfpid.utils.check_presence_of_mandatory_args(args, mandatory_args) # Check if data node is given if self.__data_node is None: msg = 'No data_node given (but it is mandatory for publication)' logwarn(LOGGER, msg) raise esgfpid.exceptions.ArgumentError(msg) # Check if solr has access: if self.__coupler.is_solr_switched_off(): msg = 'Unpublication of all versions. Without solr access, we cannot identify the versions, so the consumer will have to take care of this.' logdebug(LOGGER, msg) #raise esgfpid.exceptions.ArgumentError('No solr access. Solr access is needed for publication. Please provide access to a solr index when initializing the library') # Unpublish assistant = esgfpid.assistant.unpublish.AssistantAllVersions( drs_id = args['drs_id'], data_node = self.__data_node, prefix=self.prefix, coupler=self.__coupler, message_timestamp=esgfpid.utils.get_now_utc_as_formatted_string(), consumer_solr_url = self.__consumer_solr_url # may be None ) assistant.unpublish_all_dataset_versions()
python
def unpublish_all_versions(self, **args): ''' Sends a PID update request for the unpublication of all versions of a dataset currently published at the given data node. If the library has solr access, it will try to find all the dataset versions and their handles from solr, and send individual messages for each version. Otherwise, one message is sent, and the queue consuming servlet has to identify the relevant versions, also making sure not to unpublish any versions that may have been republished in the meantime. :param drs_id: Dataset id of the dataset to be unpublished. :raises: ArgumentError: If the data node was not provided at library initialization. ''' # Check args mandatory_args = ['drs_id'] esgfpid.utils.check_presence_of_mandatory_args(args, mandatory_args) # Check if data node is given if self.__data_node is None: msg = 'No data_node given (but it is mandatory for publication)' logwarn(LOGGER, msg) raise esgfpid.exceptions.ArgumentError(msg) # Check if solr has access: if self.__coupler.is_solr_switched_off(): msg = 'Unpublication of all versions. Without solr access, we cannot identify the versions, so the consumer will have to take care of this.' logdebug(LOGGER, msg) #raise esgfpid.exceptions.ArgumentError('No solr access. Solr access is needed for publication. Please provide access to a solr index when initializing the library') # Unpublish assistant = esgfpid.assistant.unpublish.AssistantAllVersions( drs_id = args['drs_id'], data_node = self.__data_node, prefix=self.prefix, coupler=self.__coupler, message_timestamp=esgfpid.utils.get_now_utc_as_formatted_string(), consumer_solr_url = self.__consumer_solr_url # may be None ) assistant.unpublish_all_dataset_versions()
[ "def", "unpublish_all_versions", "(", "self", ",", "*", "*", "args", ")", ":", "# Check args", "mandatory_args", "=", "[", "'drs_id'", "]", "esgfpid", ".", "utils", ".", "check_presence_of_mandatory_args", "(", "args", ",", "mandatory_args", ")", "# Check if data node is given", "if", "self", ".", "__data_node", "is", "None", ":", "msg", "=", "'No data_node given (but it is mandatory for publication)'", "logwarn", "(", "LOGGER", ",", "msg", ")", "raise", "esgfpid", ".", "exceptions", ".", "ArgumentError", "(", "msg", ")", "# Check if solr has access:", "if", "self", ".", "__coupler", ".", "is_solr_switched_off", "(", ")", ":", "msg", "=", "'Unpublication of all versions. Without solr access, we cannot identify the versions, so the consumer will have to take care of this.'", "logdebug", "(", "LOGGER", ",", "msg", ")", "#raise esgfpid.exceptions.ArgumentError('No solr access. Solr access is needed for publication. Please provide access to a solr index when initializing the library')", "# Unpublish", "assistant", "=", "esgfpid", ".", "assistant", ".", "unpublish", ".", "AssistantAllVersions", "(", "drs_id", "=", "args", "[", "'drs_id'", "]", ",", "data_node", "=", "self", ".", "__data_node", ",", "prefix", "=", "self", ".", "prefix", ",", "coupler", "=", "self", ".", "__coupler", ",", "message_timestamp", "=", "esgfpid", ".", "utils", ".", "get_now_utc_as_formatted_string", "(", ")", ",", "consumer_solr_url", "=", "self", ".", "__consumer_solr_url", "# may be None", ")", "assistant", ".", "unpublish_all_dataset_versions", "(", ")" ]
Sends a PID update request for the unpublication of all versions of a dataset currently published at the given data node. If the library has solr access, it will try to find all the dataset versions and their handles from solr, and send individual messages for each version. Otherwise, one message is sent, and the queue consuming servlet has to identify the relevant versions, also making sure not to unpublish any versions that may have been republished in the meantime. :param drs_id: Dataset id of the dataset to be unpublished. :raises: ArgumentError: If the data node was not provided at library initialization.
[ "Sends", "a", "PID", "update", "request", "for", "the", "unpublication", "of", "all", "versions", "of", "a", "dataset", "currently", "published", "at", "the", "given", "data", "node", "." ]
2f4909bb3ff79c0b6ed2932e0dd8b3bb6aec5e41
https://github.com/IS-ENES-Data/esgf-pid/blob/2f4909bb3ff79c0b6ed2932e0dd8b3bb6aec5e41/esgfpid/connector.py#L403-L446
train
IS-ENES-Data/esgf-pid
esgfpid/connector.py
Connector.add_errata_ids
def add_errata_ids(self, **args): ''' Add errata ids to a dataset handle record. To call this method, you do not need to provide the PID of the dataset. Instead, the PID string is derived from the dataset id and the version number. :param errata_ids: Mandatory. A list of errata ids (strings) to be added to the handle record. :param drs_id: Mandatory. The dataset id of the dataset to whose handle record the errata ids are to be added. (This is needed because the handle is found by making a hash over dataset id and version number). :param version_number: Mandatory. The version number of the dataset to whose handle record the errata ids are to be added. (This is needed because the handle is found by making a hash over dataset id and version number). ''' # Check args: mandatory_args = ['drs_id', 'version_number', 'errata_ids'] esgfpid.utils.check_presence_of_mandatory_args(args, mandatory_args) esgfpid.utils.check_noneness_of_mandatory_args(args, mandatory_args) # Perform metadata update assistant = esgfpid.assistant.errata.ErrataAssistant( coupler=self.__coupler, prefix=self.prefix ) assistant.add_errata_ids( drs_id=args['drs_id'], version_number=args['version_number'], errata_ids=args['errata_ids'] )
python
def add_errata_ids(self, **args): ''' Add errata ids to a dataset handle record. To call this method, you do not need to provide the PID of the dataset. Instead, the PID string is derived from the dataset id and the version number. :param errata_ids: Mandatory. A list of errata ids (strings) to be added to the handle record. :param drs_id: Mandatory. The dataset id of the dataset to whose handle record the errata ids are to be added. (This is needed because the handle is found by making a hash over dataset id and version number). :param version_number: Mandatory. The version number of the dataset to whose handle record the errata ids are to be added. (This is needed because the handle is found by making a hash over dataset id and version number). ''' # Check args: mandatory_args = ['drs_id', 'version_number', 'errata_ids'] esgfpid.utils.check_presence_of_mandatory_args(args, mandatory_args) esgfpid.utils.check_noneness_of_mandatory_args(args, mandatory_args) # Perform metadata update assistant = esgfpid.assistant.errata.ErrataAssistant( coupler=self.__coupler, prefix=self.prefix ) assistant.add_errata_ids( drs_id=args['drs_id'], version_number=args['version_number'], errata_ids=args['errata_ids'] )
[ "def", "add_errata_ids", "(", "self", ",", "*", "*", "args", ")", ":", "# Check args:", "mandatory_args", "=", "[", "'drs_id'", ",", "'version_number'", ",", "'errata_ids'", "]", "esgfpid", ".", "utils", ".", "check_presence_of_mandatory_args", "(", "args", ",", "mandatory_args", ")", "esgfpid", ".", "utils", ".", "check_noneness_of_mandatory_args", "(", "args", ",", "mandatory_args", ")", "# Perform metadata update", "assistant", "=", "esgfpid", ".", "assistant", ".", "errata", ".", "ErrataAssistant", "(", "coupler", "=", "self", ".", "__coupler", ",", "prefix", "=", "self", ".", "prefix", ")", "assistant", ".", "add_errata_ids", "(", "drs_id", "=", "args", "[", "'drs_id'", "]", ",", "version_number", "=", "args", "[", "'version_number'", "]", ",", "errata_ids", "=", "args", "[", "'errata_ids'", "]", ")" ]
Add errata ids to a dataset handle record. To call this method, you do not need to provide the PID of the dataset. Instead, the PID string is derived from the dataset id and the version number. :param errata_ids: Mandatory. A list of errata ids (strings) to be added to the handle record. :param drs_id: Mandatory. The dataset id of the dataset to whose handle record the errata ids are to be added. (This is needed because the handle is found by making a hash over dataset id and version number). :param version_number: Mandatory. The version number of the dataset to whose handle record the errata ids are to be added. (This is needed because the handle is found by making a hash over dataset id and version number).
[ "Add", "errata", "ids", "to", "a", "dataset", "handle", "record", "." ]
2f4909bb3ff79c0b6ed2932e0dd8b3bb6aec5e41
https://github.com/IS-ENES-Data/esgf-pid/blob/2f4909bb3ff79c0b6ed2932e0dd8b3bb6aec5e41/esgfpid/connector.py#L448-L484
train
IS-ENES-Data/esgf-pid
esgfpid/connector.py
Connector.make_handle_from_drsid_and_versionnumber
def make_handle_from_drsid_and_versionnumber(self, **args): ''' Create a handle string for a specific dataset, based on its dataset id and version number, and the prefix passed to the library at initializing. :param drs_id: The dataset id of the dataset. :param version_number: The version number of the dataset (as a string or integer, this does not matter) :return: A handle string (e.g. "hdl:21.14100/abcxyzfoo") ''' args['prefix'] = self.prefix return esgfpid.utils.make_handle_from_drsid_and_versionnumber(**args)
python
def make_handle_from_drsid_and_versionnumber(self, **args): ''' Create a handle string for a specific dataset, based on its dataset id and version number, and the prefix passed to the library at initializing. :param drs_id: The dataset id of the dataset. :param version_number: The version number of the dataset (as a string or integer, this does not matter) :return: A handle string (e.g. "hdl:21.14100/abcxyzfoo") ''' args['prefix'] = self.prefix return esgfpid.utils.make_handle_from_drsid_and_versionnumber(**args)
[ "def", "make_handle_from_drsid_and_versionnumber", "(", "self", ",", "*", "*", "args", ")", ":", "args", "[", "'prefix'", "]", "=", "self", ".", "prefix", "return", "esgfpid", ".", "utils", ".", "make_handle_from_drsid_and_versionnumber", "(", "*", "*", "args", ")" ]
Create a handle string for a specific dataset, based on its dataset id and version number, and the prefix passed to the library at initializing. :param drs_id: The dataset id of the dataset. :param version_number: The version number of the dataset (as a string or integer, this does not matter) :return: A handle string (e.g. "hdl:21.14100/abcxyzfoo")
[ "Create", "a", "handle", "string", "for", "a", "specific", "dataset", "based", "on", "its", "dataset", "id", "and", "version", "number", "and", "the", "prefix", "passed", "to", "the", "library", "at", "initializing", "." ]
2f4909bb3ff79c0b6ed2932e0dd8b3bb6aec5e41
https://github.com/IS-ENES-Data/esgf-pid/blob/2f4909bb3ff79c0b6ed2932e0dd8b3bb6aec5e41/esgfpid/connector.py#L605-L617
train
portfors-lab/sparkle
sparkle/gui/abstract_drag_view.py
AbstractDragView.mousePressEvent
def mousePressEvent(self, event): """saves the drag position, so we know when a drag should be initiated""" super(AbstractDragView, self).mousePressEvent(event) self.dragStartPosition = event.pos()
python
def mousePressEvent(self, event): """saves the drag position, so we know when a drag should be initiated""" super(AbstractDragView, self).mousePressEvent(event) self.dragStartPosition = event.pos()
[ "def", "mousePressEvent", "(", "self", ",", "event", ")", ":", "super", "(", "AbstractDragView", ",", "self", ")", ".", "mousePressEvent", "(", "event", ")", "self", ".", "dragStartPosition", "=", "event", ".", "pos", "(", ")" ]
saves the drag position, so we know when a drag should be initiated
[ "saves", "the", "drag", "position", "so", "we", "know", "when", "a", "drag", "should", "be", "initiated" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/abstract_drag_view.py#L56-L59
train
portfors-lab/sparkle
sparkle/gui/abstract_drag_view.py
AbstractDragView.dragLeaveEvent
def dragLeaveEvent(self, event): """Clears drop cursor line""" super(AbstractDragView, self).dragLeaveEvent(event) self.dragline = None self.viewport().update() event.accept()
python
def dragLeaveEvent(self, event): """Clears drop cursor line""" super(AbstractDragView, self).dragLeaveEvent(event) self.dragline = None self.viewport().update() event.accept()
[ "def", "dragLeaveEvent", "(", "self", ",", "event", ")", ":", "super", "(", "AbstractDragView", ",", "self", ")", ".", "dragLeaveEvent", "(", "event", ")", "self", ".", "dragline", "=", "None", "self", ".", "viewport", "(", ")", ".", "update", "(", ")", "event", ".", "accept", "(", ")" ]
Clears drop cursor line
[ "Clears", "drop", "cursor", "line" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/abstract_drag_view.py#L135-L140
train
portfors-lab/sparkle
sparkle/gui/abstract_drag_view.py
AbstractDragView.childEvent
def childEvent(self, event): """Catches items dropped off edge of view, reinserts at original position :param event: contains event parameters for child object events :type event: :qtdoc:`QChildEvent` """ super(AbstractDragView, self).childEvent(event) if event.type() == QtCore.QEvent.ChildRemoved: # hack to catch drop offs if self.originalPos is not None: selected = self.limbo_component self.model().insertItem(self.originalPos, selected) self.originalPos = None self.dragStartPosition = None self.viewport().update()
python
def childEvent(self, event): """Catches items dropped off edge of view, reinserts at original position :param event: contains event parameters for child object events :type event: :qtdoc:`QChildEvent` """ super(AbstractDragView, self).childEvent(event) if event.type() == QtCore.QEvent.ChildRemoved: # hack to catch drop offs if self.originalPos is not None: selected = self.limbo_component self.model().insertItem(self.originalPos, selected) self.originalPos = None self.dragStartPosition = None self.viewport().update()
[ "def", "childEvent", "(", "self", ",", "event", ")", ":", "super", "(", "AbstractDragView", ",", "self", ")", ".", "childEvent", "(", "event", ")", "if", "event", ".", "type", "(", ")", "==", "QtCore", ".", "QEvent", ".", "ChildRemoved", ":", "# hack to catch drop offs ", "if", "self", ".", "originalPos", "is", "not", "None", ":", "selected", "=", "self", ".", "limbo_component", "self", ".", "model", "(", ")", ".", "insertItem", "(", "self", ".", "originalPos", ",", "selected", ")", "self", ".", "originalPos", "=", "None", "self", ".", "dragStartPosition", "=", "None", "self", ".", "viewport", "(", ")", ".", "update", "(", ")" ]
Catches items dropped off edge of view, reinserts at original position :param event: contains event parameters for child object events :type event: :qtdoc:`QChildEvent`
[ "Catches", "items", "dropped", "off", "edge", "of", "view", "reinserts", "at", "original", "position" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/abstract_drag_view.py#L170-L185
train
portfors-lab/sparkle
sparkle/gui/abstract_drag_view.py
AbstractDragView.mouseReleaseEvent
def mouseReleaseEvent(self, event): """Resets the drag start position""" super(AbstractDragView, self).mouseReleaseEvent(event) self.dragStartPosition = None
python
def mouseReleaseEvent(self, event): """Resets the drag start position""" super(AbstractDragView, self).mouseReleaseEvent(event) self.dragStartPosition = None
[ "def", "mouseReleaseEvent", "(", "self", ",", "event", ")", ":", "super", "(", "AbstractDragView", ",", "self", ")", ".", "mouseReleaseEvent", "(", "event", ")", "self", ".", "dragStartPosition", "=", "None" ]
Resets the drag start position
[ "Resets", "the", "drag", "start", "position" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/abstract_drag_view.py#L187-L190
train
dpa-newslab/livebridge
livebridge/storages/mongo.py
MongoStorage.setup
async def setup(self): """Setting up MongoDB collections, if they not exist.""" try: db = await self.db collections = await db.list_collection_names() created = False if self.table_name not in collections: # create table logger.info("Creating MongoDB collection [{}]".format(self.table_name)) await db.create_collection(self.table_name) await db[self.table_name].create_index([("target_id", DESCENDING), ("post_id", DESCENDING)]) created = True # create control collection if not already created. if self.control_table_name and self.control_table_name not in collections: # create table logger.info("Creating MongoDB control data collection [{}]".format(self.control_table_name)) await db.create_collection(self.control_table_name) created = True return created except Exception as exc: logger.error("[DB] Error when setting up MongoDB collections: {}".format(exc)) return False
python
async def setup(self): """Setting up MongoDB collections, if they not exist.""" try: db = await self.db collections = await db.list_collection_names() created = False if self.table_name not in collections: # create table logger.info("Creating MongoDB collection [{}]".format(self.table_name)) await db.create_collection(self.table_name) await db[self.table_name].create_index([("target_id", DESCENDING), ("post_id", DESCENDING)]) created = True # create control collection if not already created. if self.control_table_name and self.control_table_name not in collections: # create table logger.info("Creating MongoDB control data collection [{}]".format(self.control_table_name)) await db.create_collection(self.control_table_name) created = True return created except Exception as exc: logger.error("[DB] Error when setting up MongoDB collections: {}".format(exc)) return False
[ "async", "def", "setup", "(", "self", ")", ":", "try", ":", "db", "=", "await", "self", ".", "db", "collections", "=", "await", "db", ".", "list_collection_names", "(", ")", "created", "=", "False", "if", "self", ".", "table_name", "not", "in", "collections", ":", "# create table", "logger", ".", "info", "(", "\"Creating MongoDB collection [{}]\"", ".", "format", "(", "self", ".", "table_name", ")", ")", "await", "db", ".", "create_collection", "(", "self", ".", "table_name", ")", "await", "db", "[", "self", ".", "table_name", "]", ".", "create_index", "(", "[", "(", "\"target_id\"", ",", "DESCENDING", ")", ",", "(", "\"post_id\"", ",", "DESCENDING", ")", "]", ")", "created", "=", "True", "# create control collection if not already created.", "if", "self", ".", "control_table_name", "and", "self", ".", "control_table_name", "not", "in", "collections", ":", "# create table", "logger", ".", "info", "(", "\"Creating MongoDB control data collection [{}]\"", ".", "format", "(", "self", ".", "control_table_name", ")", ")", "await", "db", ".", "create_collection", "(", "self", ".", "control_table_name", ")", "created", "=", "True", "return", "created", "except", "Exception", "as", "exc", ":", "logger", ".", "error", "(", "\"[DB] Error when setting up MongoDB collections: {}\"", ".", "format", "(", "exc", ")", ")", "return", "False" ]
Setting up MongoDB collections, if they not exist.
[ "Setting", "up", "MongoDB", "collections", "if", "they", "not", "exist", "." ]
d930e887faa2f882d15b574f0f1fe4a580d7c5fa
https://github.com/dpa-newslab/livebridge/blob/d930e887faa2f882d15b574f0f1fe4a580d7c5fa/livebridge/storages/mongo.py#L57-L78
train
portfors-lab/sparkle
sparkle/gui/stim/component_detail.py
ComponentsDetailWidget.setDoc
def setDoc(self, docs): """Sets the documentation to display :param docs: a list of the stimuli doc, which are dicts :type docs: list<dict> """ # sort stim by start time docs = sorted(docs, key=lambda k: k['start_s']) for doc in docs: stim_type = doc['stim_type'] if not stim_type in self.displayTable: continue if not stim_type in self.displayTable[stim_type]: continue display_attributes = self.displayTable.get(stim_type, self.defaultAttributes) self.lyt.addWidget(ComponentDetailFrame(doc, display_attributes))
python
def setDoc(self, docs): """Sets the documentation to display :param docs: a list of the stimuli doc, which are dicts :type docs: list<dict> """ # sort stim by start time docs = sorted(docs, key=lambda k: k['start_s']) for doc in docs: stim_type = doc['stim_type'] if not stim_type in self.displayTable: continue if not stim_type in self.displayTable[stim_type]: continue display_attributes = self.displayTable.get(stim_type, self.defaultAttributes) self.lyt.addWidget(ComponentDetailFrame(doc, display_attributes))
[ "def", "setDoc", "(", "self", ",", "docs", ")", ":", "# sort stim by start time", "docs", "=", "sorted", "(", "docs", ",", "key", "=", "lambda", "k", ":", "k", "[", "'start_s'", "]", ")", "for", "doc", "in", "docs", ":", "stim_type", "=", "doc", "[", "'stim_type'", "]", "if", "not", "stim_type", "in", "self", ".", "displayTable", ":", "continue", "if", "not", "stim_type", "in", "self", ".", "displayTable", "[", "stim_type", "]", ":", "continue", "display_attributes", "=", "self", ".", "displayTable", ".", "get", "(", "stim_type", ",", "self", ".", "defaultAttributes", ")", "self", ".", "lyt", ".", "addWidget", "(", "ComponentDetailFrame", "(", "doc", ",", "display_attributes", ")", ")" ]
Sets the documentation to display :param docs: a list of the stimuli doc, which are dicts :type docs: list<dict>
[ "Sets", "the", "documentation", "to", "display" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/stim/component_detail.py#L38-L55
train
portfors-lab/sparkle
sparkle/gui/stim/component_detail.py
ComponentsDetailSelector.setComponents
def setComponents(self, components): """Clears and sets the components contained in this widget :param components: list of documentation for subclasses of AbStractStimulusComponents :type Components: list<dict> """ layout = self.layout() for comp in components: attrWidget = ComponentAttributerChecker(comp) layout.addWidget(attrWidget)
python
def setComponents(self, components): """Clears and sets the components contained in this widget :param components: list of documentation for subclasses of AbStractStimulusComponents :type Components: list<dict> """ layout = self.layout() for comp in components: attrWidget = ComponentAttributerChecker(comp) layout.addWidget(attrWidget)
[ "def", "setComponents", "(", "self", ",", "components", ")", ":", "layout", "=", "self", ".", "layout", "(", ")", "for", "comp", "in", "components", ":", "attrWidget", "=", "ComponentAttributerChecker", "(", "comp", ")", "layout", ".", "addWidget", "(", "attrWidget", ")" ]
Clears and sets the components contained in this widget :param components: list of documentation for subclasses of AbStractStimulusComponents :type Components: list<dict>
[ "Clears", "and", "sets", "the", "components", "contained", "in", "this", "widget" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/stim/component_detail.py#L96-L105
train
portfors-lab/sparkle
sparkle/gui/stim/component_detail.py
ComponentsDetailSelector.setCheckedDetails
def setCheckedDetails(self, checked): """Sets which components are checked :param checked: dictionary of stimtype:list<attribute names> for which components and their attributes should be checked :type checked: dict """ layout = self.layout() for i in range(layout.count()): w = layout.itemAt(i).widget() if w.stimType in checked: w.setChecked(checked[w.stimType])
python
def setCheckedDetails(self, checked): """Sets which components are checked :param checked: dictionary of stimtype:list<attribute names> for which components and their attributes should be checked :type checked: dict """ layout = self.layout() for i in range(layout.count()): w = layout.itemAt(i).widget() if w.stimType in checked: w.setChecked(checked[w.stimType])
[ "def", "setCheckedDetails", "(", "self", ",", "checked", ")", ":", "layout", "=", "self", ".", "layout", "(", ")", "for", "i", "in", "range", "(", "layout", ".", "count", "(", ")", ")", ":", "w", "=", "layout", ".", "itemAt", "(", "i", ")", ".", "widget", "(", ")", "if", "w", ".", "stimType", "in", "checked", ":", "w", ".", "setChecked", "(", "checked", "[", "w", ".", "stimType", "]", ")" ]
Sets which components are checked :param checked: dictionary of stimtype:list<attribute names> for which components and their attributes should be checked :type checked: dict
[ "Sets", "which", "components", "are", "checked" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/stim/component_detail.py#L107-L117
train
portfors-lab/sparkle
sparkle/gui/stim/component_detail.py
ComponentsDetailSelector.getCheckedDetails
def getCheckedDetails(self): """Gets the currently checked components and checked attributes :returns: dict -- of members with stimtype:list<attribute names> """ attrs = {} layout = self.layout() for i in range(layout.count()): w = layout.itemAt(i).widget() attrs[w.stimType] = w.getChecked() return attrs
python
def getCheckedDetails(self): """Gets the currently checked components and checked attributes :returns: dict -- of members with stimtype:list<attribute names> """ attrs = {} layout = self.layout() for i in range(layout.count()): w = layout.itemAt(i).widget() attrs[w.stimType] = w.getChecked() return attrs
[ "def", "getCheckedDetails", "(", "self", ")", ":", "attrs", "=", "{", "}", "layout", "=", "self", ".", "layout", "(", ")", "for", "i", "in", "range", "(", "layout", ".", "count", "(", ")", ")", ":", "w", "=", "layout", ".", "itemAt", "(", "i", ")", ".", "widget", "(", ")", "attrs", "[", "w", ".", "stimType", "]", "=", "w", ".", "getChecked", "(", ")", "return", "attrs" ]
Gets the currently checked components and checked attributes :returns: dict -- of members with stimtype:list<attribute names>
[ "Gets", "the", "currently", "checked", "components", "and", "checked", "attributes" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/stim/component_detail.py#L119-L129
train
portfors-lab/sparkle
sparkle/gui/stim/component_detail.py
ComponentAttributerChecker.getChecked
def getChecked(self): """Gets the checked attributes :returns: list<str> -- checked attribute names """ attrs = [] layout = self.layout() for i in range(layout.count()): w = layout.itemAt(i).widget() if w.isChecked(): attrs.append(str(w.text())) return attrs
python
def getChecked(self): """Gets the checked attributes :returns: list<str> -- checked attribute names """ attrs = [] layout = self.layout() for i in range(layout.count()): w = layout.itemAt(i).widget() if w.isChecked(): attrs.append(str(w.text())) return attrs
[ "def", "getChecked", "(", "self", ")", ":", "attrs", "=", "[", "]", "layout", "=", "self", ".", "layout", "(", ")", "for", "i", "in", "range", "(", "layout", ".", "count", "(", ")", ")", ":", "w", "=", "layout", ".", "itemAt", "(", "i", ")", ".", "widget", "(", ")", "if", "w", ".", "isChecked", "(", ")", ":", "attrs", ".", "append", "(", "str", "(", "w", ".", "text", "(", ")", ")", ")", "return", "attrs" ]
Gets the checked attributes :returns: list<str> -- checked attribute names
[ "Gets", "the", "checked", "attributes" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/stim/component_detail.py#L165-L176
train
portfors-lab/sparkle
sparkle/gui/qprotocol.py
QProtocolTabelModel.headerData
def headerData(self, section, orientation, role): """Get the Header for the columns in the table Required by view, see :qtdoc:`subclassing<qabstractitemmodel.subclassing>` :param section: column of header to return :type section: int """ if role == QtCore.Qt.DisplayRole: if orientation == QtCore.Qt.Horizontal: return self.headers[section]
python
def headerData(self, section, orientation, role): """Get the Header for the columns in the table Required by view, see :qtdoc:`subclassing<qabstractitemmodel.subclassing>` :param section: column of header to return :type section: int """ if role == QtCore.Qt.DisplayRole: if orientation == QtCore.Qt.Horizontal: return self.headers[section]
[ "def", "headerData", "(", "self", ",", "section", ",", "orientation", ",", "role", ")", ":", "if", "role", "==", "QtCore", ".", "Qt", ".", "DisplayRole", ":", "if", "orientation", "==", "QtCore", ".", "Qt", ".", "Horizontal", ":", "return", "self", ".", "headers", "[", "section", "]" ]
Get the Header for the columns in the table Required by view, see :qtdoc:`subclassing<qabstractitemmodel.subclassing>` :param section: column of header to return :type section: int
[ "Get", "the", "Header", "for", "the", "columns", "in", "the", "table" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/qprotocol.py#L23-L33
train
portfors-lab/sparkle
sparkle/gui/qprotocol.py
ProtocolView.cursor
def cursor(self, pos): """Returns a line at the nearest row split between tests. Re-implemented from :meth:`AbstractDragView<sparkle.gui.abstract_drag_view.AbstractDragView.cursor>` """ row = self.indexAt(pos).row() if row == -1: row = self.model().rowCount() row_height = self.rowHeight(0) y = row_height*row x = self.width() return QtCore.QLine(0,y,x,y)
python
def cursor(self, pos): """Returns a line at the nearest row split between tests. Re-implemented from :meth:`AbstractDragView<sparkle.gui.abstract_drag_view.AbstractDragView.cursor>` """ row = self.indexAt(pos).row() if row == -1: row = self.model().rowCount() row_height = self.rowHeight(0) y = row_height*row x = self.width() return QtCore.QLine(0,y,x,y)
[ "def", "cursor", "(", "self", ",", "pos", ")", ":", "row", "=", "self", ".", "indexAt", "(", "pos", ")", ".", "row", "(", ")", "if", "row", "==", "-", "1", ":", "row", "=", "self", ".", "model", "(", ")", ".", "rowCount", "(", ")", "row_height", "=", "self", ".", "rowHeight", "(", "0", ")", "y", "=", "row_height", "*", "row", "x", "=", "self", ".", "width", "(", ")", "return", "QtCore", ".", "QLine", "(", "0", ",", "y", ",", "x", ",", "y", ")" ]
Returns a line at the nearest row split between tests. Re-implemented from :meth:`AbstractDragView<sparkle.gui.abstract_drag_view.AbstractDragView.cursor>`
[ "Returns", "a", "line", "at", "the", "nearest", "row", "split", "between", "tests", "." ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/qprotocol.py#L211-L222
train
portfors-lab/sparkle
sparkle/gui/qprotocol.py
ProtocolView.mousePressEvent
def mousePressEvent(self, event): """Launches edit of cell if first column clicked, otherwise passes to super class""" index = self.indexAt(event.pos()) if index.isValid(): if index.column() == 0: self.edit(index, QtGui.QAbstractItemView.DoubleClicked, event) else: super(ProtocolView, self).mousePressEvent(event)
python
def mousePressEvent(self, event): """Launches edit of cell if first column clicked, otherwise passes to super class""" index = self.indexAt(event.pos()) if index.isValid(): if index.column() == 0: self.edit(index, QtGui.QAbstractItemView.DoubleClicked, event) else: super(ProtocolView, self).mousePressEvent(event)
[ "def", "mousePressEvent", "(", "self", ",", "event", ")", ":", "index", "=", "self", ".", "indexAt", "(", "event", ".", "pos", "(", ")", ")", "if", "index", ".", "isValid", "(", ")", ":", "if", "index", ".", "column", "(", ")", "==", "0", ":", "self", ".", "edit", "(", "index", ",", "QtGui", ".", "QAbstractItemView", ".", "DoubleClicked", ",", "event", ")", "else", ":", "super", "(", "ProtocolView", ",", "self", ")", ".", "mousePressEvent", "(", "event", ")" ]
Launches edit of cell if first column clicked, otherwise passes to super class
[ "Launches", "edit", "of", "cell", "if", "first", "column", "clicked", "otherwise", "passes", "to", "super", "class" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/qprotocol.py#L224-L231
train
lsst-sqre/sqre-codekit
codekit/cli/github_list_repos.py
run
def run(): """List repos and teams""" args = parse_args() codetools.setup_logging(args.debug) global g g = pygithub.login_github(token_path=args.token_path, token=args.token) if not args.hide: args.hide = [] org = g.get_organization(args.organization) try: repos = list(org.get_repos()) except github.RateLimitExceededException: raise except github.GithubException as e: msg = 'error getting repos' raise pygithub.CaughtOrganizationError(org, e, msg) from None for r in repos: try: teamnames = [t.name for t in r.get_teams() if t.name not in args.hide] except github.RateLimitExceededException: raise except github.GithubException as e: msg = 'error getting teams' raise pygithub.CaughtRepositoryError(r, e, msg) from None maxt = args.maxt if (args.maxt is not None and args.maxt >= 0) else len(teamnames) if args.debug: print("MAXT=", maxt) if args.mint <= len(teamnames) <= maxt: print(r.name.ljust(40) + args.delimiter.join(teamnames))
python
def run(): """List repos and teams""" args = parse_args() codetools.setup_logging(args.debug) global g g = pygithub.login_github(token_path=args.token_path, token=args.token) if not args.hide: args.hide = [] org = g.get_organization(args.organization) try: repos = list(org.get_repos()) except github.RateLimitExceededException: raise except github.GithubException as e: msg = 'error getting repos' raise pygithub.CaughtOrganizationError(org, e, msg) from None for r in repos: try: teamnames = [t.name for t in r.get_teams() if t.name not in args.hide] except github.RateLimitExceededException: raise except github.GithubException as e: msg = 'error getting teams' raise pygithub.CaughtRepositoryError(r, e, msg) from None maxt = args.maxt if (args.maxt is not None and args.maxt >= 0) else len(teamnames) if args.debug: print("MAXT=", maxt) if args.mint <= len(teamnames) <= maxt: print(r.name.ljust(40) + args.delimiter.join(teamnames))
[ "def", "run", "(", ")", ":", "args", "=", "parse_args", "(", ")", "codetools", ".", "setup_logging", "(", "args", ".", "debug", ")", "global", "g", "g", "=", "pygithub", ".", "login_github", "(", "token_path", "=", "args", ".", "token_path", ",", "token", "=", "args", ".", "token", ")", "if", "not", "args", ".", "hide", ":", "args", ".", "hide", "=", "[", "]", "org", "=", "g", ".", "get_organization", "(", "args", ".", "organization", ")", "try", ":", "repos", "=", "list", "(", "org", ".", "get_repos", "(", ")", ")", "except", "github", ".", "RateLimitExceededException", ":", "raise", "except", "github", ".", "GithubException", "as", "e", ":", "msg", "=", "'error getting repos'", "raise", "pygithub", ".", "CaughtOrganizationError", "(", "org", ",", "e", ",", "msg", ")", "from", "None", "for", "r", "in", "repos", ":", "try", ":", "teamnames", "=", "[", "t", ".", "name", "for", "t", "in", "r", ".", "get_teams", "(", ")", "if", "t", ".", "name", "not", "in", "args", ".", "hide", "]", "except", "github", ".", "RateLimitExceededException", ":", "raise", "except", "github", ".", "GithubException", "as", "e", ":", "msg", "=", "'error getting teams'", "raise", "pygithub", ".", "CaughtRepositoryError", "(", "r", ",", "e", ",", "msg", ")", "from", "None", "maxt", "=", "args", ".", "maxt", "if", "(", "args", ".", "maxt", "is", "not", "None", "and", "args", ".", "maxt", ">=", "0", ")", "else", "len", "(", "teamnames", ")", "if", "args", ".", "debug", ":", "print", "(", "\"MAXT=\"", ",", "maxt", ")", "if", "args", ".", "mint", "<=", "len", "(", "teamnames", ")", "<=", "maxt", ":", "print", "(", "r", ".", "name", ".", "ljust", "(", "40", ")", "+", "args", ".", "delimiter", ".", "join", "(", "teamnames", ")", ")" ]
List repos and teams
[ "List", "repos", "and", "teams" ]
98122404cd9065d4d1d570867fe518042669126c
https://github.com/lsst-sqre/sqre-codekit/blob/98122404cd9065d4d1d570867fe518042669126c/codekit/cli/github_list_repos.py#L73-L111
train
sirfoga/pyhal
hal/charts/models.py
SimpleChart.create_bar_chart
def create_bar_chart(self, x_labels, y_values, y_label): """Creates bar char :param x_labels: Names for each variable :param y_values: Values of x labels :param y_label: Label of y axis :return: Bar chart """ self.setup(0.25) ax1 = self.get_ax() ax1.set_xticks(list(range(len(x_labels)))) ax1.set_xticklabels([x_labels[i] for i in range(len(x_labels))], rotation=90) plt.ylabel(y_label) x_pos = range(len(x_labels)) plt.bar(x_pos, y_values, align="center") return ax1
python
def create_bar_chart(self, x_labels, y_values, y_label): """Creates bar char :param x_labels: Names for each variable :param y_values: Values of x labels :param y_label: Label of y axis :return: Bar chart """ self.setup(0.25) ax1 = self.get_ax() ax1.set_xticks(list(range(len(x_labels)))) ax1.set_xticklabels([x_labels[i] for i in range(len(x_labels))], rotation=90) plt.ylabel(y_label) x_pos = range(len(x_labels)) plt.bar(x_pos, y_values, align="center") return ax1
[ "def", "create_bar_chart", "(", "self", ",", "x_labels", ",", "y_values", ",", "y_label", ")", ":", "self", ".", "setup", "(", "0.25", ")", "ax1", "=", "self", ".", "get_ax", "(", ")", "ax1", ".", "set_xticks", "(", "list", "(", "range", "(", "len", "(", "x_labels", ")", ")", ")", ")", "ax1", ".", "set_xticklabels", "(", "[", "x_labels", "[", "i", "]", "for", "i", "in", "range", "(", "len", "(", "x_labels", ")", ")", "]", ",", "rotation", "=", "90", ")", "plt", ".", "ylabel", "(", "y_label", ")", "x_pos", "=", "range", "(", "len", "(", "x_labels", ")", ")", "plt", ".", "bar", "(", "x_pos", ",", "y_values", ",", "align", "=", "\"center\"", ")", "return", "ax1" ]
Creates bar char :param x_labels: Names for each variable :param y_values: Values of x labels :param y_label: Label of y axis :return: Bar chart
[ "Creates", "bar", "char" ]
4394d8a1f7e45bea28a255ec390f4962ee64d33a
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/charts/models.py#L48-L66
train
sirfoga/pyhal
hal/charts/models.py
SimpleChart.create_multiple_bar_chart
def create_multiple_bar_chart(self, x_labels, mul_y_values, mul_y_labels, normalize=False): """Creates bar chart with multiple lines :param x_labels: Names for each variable :param mul_y_values: list of values of x labels :param mul_y_labels: list of labels for each y value :param normalize: True iff you want to normalize each y series :return: Bar chart """ self.setup(0.25) ax1 = self.get_ax() ax1.set_xticks(list(range(len(x_labels)))) ax1.set_xticklabels([x_labels[i] for i in range(len(x_labels))], rotation=90) y_counts = len(mul_y_values) colors = cm.rainbow(np.linspace(0, 1, y_counts)) # different colors max_bar_width = 0.6 bar_width = max_bar_width / y_counts # width of each bar x_shifts = np.linspace(0, max_bar_width, y_counts) - max_bar_width * 0.5 # center in 0 ax_series = [] for i in range(y_counts): x_pos = range(len(x_labels)) # x points x_pos = np.array(x_pos) + x_shifts[i] # shift for each y series if normalize: # normalize array y_values = normalize_array(mul_y_values[i]) else: y_values = mul_y_values[i] ax_series.append( ax1.bar( x_pos, y_values, width=bar_width, align="center", color=colors[i] ) ) ax1.legend(ax_series, mul_y_labels) return ax1
python
def create_multiple_bar_chart(self, x_labels, mul_y_values, mul_y_labels, normalize=False): """Creates bar chart with multiple lines :param x_labels: Names for each variable :param mul_y_values: list of values of x labels :param mul_y_labels: list of labels for each y value :param normalize: True iff you want to normalize each y series :return: Bar chart """ self.setup(0.25) ax1 = self.get_ax() ax1.set_xticks(list(range(len(x_labels)))) ax1.set_xticklabels([x_labels[i] for i in range(len(x_labels))], rotation=90) y_counts = len(mul_y_values) colors = cm.rainbow(np.linspace(0, 1, y_counts)) # different colors max_bar_width = 0.6 bar_width = max_bar_width / y_counts # width of each bar x_shifts = np.linspace(0, max_bar_width, y_counts) - max_bar_width * 0.5 # center in 0 ax_series = [] for i in range(y_counts): x_pos = range(len(x_labels)) # x points x_pos = np.array(x_pos) + x_shifts[i] # shift for each y series if normalize: # normalize array y_values = normalize_array(mul_y_values[i]) else: y_values = mul_y_values[i] ax_series.append( ax1.bar( x_pos, y_values, width=bar_width, align="center", color=colors[i] ) ) ax1.legend(ax_series, mul_y_labels) return ax1
[ "def", "create_multiple_bar_chart", "(", "self", ",", "x_labels", ",", "mul_y_values", ",", "mul_y_labels", ",", "normalize", "=", "False", ")", ":", "self", ".", "setup", "(", "0.25", ")", "ax1", "=", "self", ".", "get_ax", "(", ")", "ax1", ".", "set_xticks", "(", "list", "(", "range", "(", "len", "(", "x_labels", ")", ")", ")", ")", "ax1", ".", "set_xticklabels", "(", "[", "x_labels", "[", "i", "]", "for", "i", "in", "range", "(", "len", "(", "x_labels", ")", ")", "]", ",", "rotation", "=", "90", ")", "y_counts", "=", "len", "(", "mul_y_values", ")", "colors", "=", "cm", ".", "rainbow", "(", "np", ".", "linspace", "(", "0", ",", "1", ",", "y_counts", ")", ")", "# different colors", "max_bar_width", "=", "0.6", "bar_width", "=", "max_bar_width", "/", "y_counts", "# width of each bar", "x_shifts", "=", "np", ".", "linspace", "(", "0", ",", "max_bar_width", ",", "y_counts", ")", "-", "max_bar_width", "*", "0.5", "# center in 0", "ax_series", "=", "[", "]", "for", "i", "in", "range", "(", "y_counts", ")", ":", "x_pos", "=", "range", "(", "len", "(", "x_labels", ")", ")", "# x points", "x_pos", "=", "np", ".", "array", "(", "x_pos", ")", "+", "x_shifts", "[", "i", "]", "# shift for each y series", "if", "normalize", ":", "# normalize array", "y_values", "=", "normalize_array", "(", "mul_y_values", "[", "i", "]", ")", "else", ":", "y_values", "=", "mul_y_values", "[", "i", "]", "ax_series", ".", "append", "(", "ax1", ".", "bar", "(", "x_pos", ",", "y_values", ",", "width", "=", "bar_width", ",", "align", "=", "\"center\"", ",", "color", "=", "colors", "[", "i", "]", ")", ")", "ax1", ".", "legend", "(", "ax_series", ",", "mul_y_labels", ")", "return", "ax1" ]
Creates bar chart with multiple lines :param x_labels: Names for each variable :param mul_y_values: list of values of x labels :param mul_y_labels: list of labels for each y value :param normalize: True iff you want to normalize each y series :return: Bar chart
[ "Creates", "bar", "chart", "with", "multiple", "lines" ]
4394d8a1f7e45bea28a255ec390f4962ee64d33a
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/charts/models.py#L68-L111
train
TorkamaniLab/metapipe
metapipe/models/command.py
Command.file_parts
def file_parts(self): """ Returns a list of the file tokens in the list of parts. """ file_parts = [] for part in self.parts: try: for sub_part in part: if isinstance(sub_part, FileToken): file_parts.append(sub_part) except TypeError: if isinstance(part, FileToken): file_parts.append(part) return file_parts
python
def file_parts(self): """ Returns a list of the file tokens in the list of parts. """ file_parts = [] for part in self.parts: try: for sub_part in part: if isinstance(sub_part, FileToken): file_parts.append(sub_part) except TypeError: if isinstance(part, FileToken): file_parts.append(part) return file_parts
[ "def", "file_parts", "(", "self", ")", ":", "file_parts", "=", "[", "]", "for", "part", "in", "self", ".", "parts", ":", "try", ":", "for", "sub_part", "in", "part", ":", "if", "isinstance", "(", "sub_part", ",", "FileToken", ")", ":", "file_parts", ".", "append", "(", "sub_part", ")", "except", "TypeError", ":", "if", "isinstance", "(", "part", ",", "FileToken", ")", ":", "file_parts", ".", "append", "(", "part", ")", "return", "file_parts" ]
Returns a list of the file tokens in the list of parts.
[ "Returns", "a", "list", "of", "the", "file", "tokens", "in", "the", "list", "of", "parts", "." ]
15592e5b0c217afb00ac03503f8d0d7453d4baf4
https://github.com/TorkamaniLab/metapipe/blob/15592e5b0c217afb00ac03503f8d0d7453d4baf4/metapipe/models/command.py#L47-L58
train
TorkamaniLab/metapipe
metapipe/models/command.py
Command.update_dependent_files
def update_dependent_files(self, prev_commands=[]): """ Update the command's dependencies based on the evaluated input and output of previous commands. """ for command in prev_commands: for my_input in self.input_parts: for their_output in command.output_parts: if their_output == my_input: my_input.filename = their_output.eval()
python
def update_dependent_files(self, prev_commands=[]): """ Update the command's dependencies based on the evaluated input and output of previous commands. """ for command in prev_commands: for my_input in self.input_parts: for their_output in command.output_parts: if their_output == my_input: my_input.filename = their_output.eval()
[ "def", "update_dependent_files", "(", "self", ",", "prev_commands", "=", "[", "]", ")", ":", "for", "command", "in", "prev_commands", ":", "for", "my_input", "in", "self", ".", "input_parts", ":", "for", "their_output", "in", "command", ".", "output_parts", ":", "if", "their_output", "==", "my_input", ":", "my_input", ".", "filename", "=", "their_output", ".", "eval", "(", ")" ]
Update the command's dependencies based on the evaluated input and output of previous commands.
[ "Update", "the", "command", "s", "dependencies", "based", "on", "the", "evaluated", "input", "and", "output", "of", "previous", "commands", "." ]
15592e5b0c217afb00ac03503f8d0d7453d4baf4
https://github.com/TorkamaniLab/metapipe/blob/15592e5b0c217afb00ac03503f8d0d7453d4baf4/metapipe/models/command.py#L66-L74
train
TorkamaniLab/metapipe
metapipe/models/command.py
Command.eval
def eval(self): """ Evaluate the given job and return a complete shell script to be run by the job manager. """ eval = [] for part in self.parts: try: result = part.eval() except AttributeError: result = part if result[-1] != '\n': result += ' ' eval.append(result) return ''.join(eval).strip()
python
def eval(self): """ Evaluate the given job and return a complete shell script to be run by the job manager. """ eval = [] for part in self.parts: try: result = part.eval() except AttributeError: result = part if result[-1] != '\n': result += ' ' eval.append(result) return ''.join(eval).strip()
[ "def", "eval", "(", "self", ")", ":", "eval", "=", "[", "]", "for", "part", "in", "self", ".", "parts", ":", "try", ":", "result", "=", "part", ".", "eval", "(", ")", "except", "AttributeError", ":", "result", "=", "part", "if", "result", "[", "-", "1", "]", "!=", "'\\n'", ":", "result", "+=", "' '", "eval", ".", "append", "(", "result", ")", "return", "''", ".", "join", "(", "eval", ")", ".", "strip", "(", ")" ]
Evaluate the given job and return a complete shell script to be run by the job manager.
[ "Evaluate", "the", "given", "job", "and", "return", "a", "complete", "shell", "script", "to", "be", "run", "by", "the", "job", "manager", "." ]
15592e5b0c217afb00ac03503f8d0d7453d4baf4
https://github.com/TorkamaniLab/metapipe/blob/15592e5b0c217afb00ac03503f8d0d7453d4baf4/metapipe/models/command.py#L76-L89
train
JukeboxPipeline/jukeboxmaya
src/jukeboxmaya/mayaplugins/jbreftrack.py
JB_ReftrackNode.add_type
def add_type(cls, typ): """Register a type for jb_reftrack nodes. A type specifies how the reference should be handled. For example the type shader will connect shaders with the parent when it the shaders are loaded. Default types are :data:`JB_ReftrackNode.types`. .. Note:: You have to add types before you initialize the plugin! :param typ: a new type specifier, e.g. \"asset\" :type typ: str :returns: None :rtype: None :raises: :class:`TypeError` """ if not isinstance(typ, basestring): raise TypeError("The type should be a string. But is %s" % type(typ)) cls.types.append(typ)
python
def add_type(cls, typ): """Register a type for jb_reftrack nodes. A type specifies how the reference should be handled. For example the type shader will connect shaders with the parent when it the shaders are loaded. Default types are :data:`JB_ReftrackNode.types`. .. Note:: You have to add types before you initialize the plugin! :param typ: a new type specifier, e.g. \"asset\" :type typ: str :returns: None :rtype: None :raises: :class:`TypeError` """ if not isinstance(typ, basestring): raise TypeError("The type should be a string. But is %s" % type(typ)) cls.types.append(typ)
[ "def", "add_type", "(", "cls", ",", "typ", ")", ":", "if", "not", "isinstance", "(", "typ", ",", "basestring", ")", ":", "raise", "TypeError", "(", "\"The type should be a string. But is %s\"", "%", "type", "(", "typ", ")", ")", "cls", ".", "types", ".", "append", "(", "typ", ")" ]
Register a type for jb_reftrack nodes. A type specifies how the reference should be handled. For example the type shader will connect shaders with the parent when it the shaders are loaded. Default types are :data:`JB_ReftrackNode.types`. .. Note:: You have to add types before you initialize the plugin! :param typ: a new type specifier, e.g. \"asset\" :type typ: str :returns: None :rtype: None :raises: :class:`TypeError`
[ "Register", "a", "type", "for", "jb_reftrack", "nodes", "." ]
c8d6318d53cdb5493453c4a6b65ef75bdb2d5f2c
https://github.com/JukeboxPipeline/jukeboxmaya/blob/c8d6318d53cdb5493453c4a6b65ef75bdb2d5f2c/src/jukeboxmaya/mayaplugins/jbreftrack.py#L79-L96
train
FriendCode/funky
funky/funky.py
transform
def transform(transform_func): """Apply a transformation to a functions return value""" def decorator(func): @wraps(func) def f(*args, **kwargs): return transform_func( func(*args, **kwargs) ) return f return decorator
python
def transform(transform_func): """Apply a transformation to a functions return value""" def decorator(func): @wraps(func) def f(*args, **kwargs): return transform_func( func(*args, **kwargs) ) return f return decorator
[ "def", "transform", "(", "transform_func", ")", ":", "def", "decorator", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "f", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "transform_func", "(", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", ")", "return", "f", "return", "decorator" ]
Apply a transformation to a functions return value
[ "Apply", "a", "transformation", "to", "a", "functions", "return", "value" ]
25450a61d531c97615384fc5ef80e4eee97200ac
https://github.com/FriendCode/funky/blob/25450a61d531c97615384fc5ef80e4eee97200ac/funky/funky.py#L193-L202
train
FriendCode/funky
funky/funky.py
subkey
def subkey(dct, keys): """Get an entry from a dict of dicts by the list of keys to 'follow' """ key = keys[0] if len(keys) == 1: return dct[key] return subkey(dct[key], keys[1:])
python
def subkey(dct, keys): """Get an entry from a dict of dicts by the list of keys to 'follow' """ key = keys[0] if len(keys) == 1: return dct[key] return subkey(dct[key], keys[1:])
[ "def", "subkey", "(", "dct", ",", "keys", ")", ":", "key", "=", "keys", "[", "0", "]", "if", "len", "(", "keys", ")", "==", "1", ":", "return", "dct", "[", "key", "]", "return", "subkey", "(", "dct", "[", "key", "]", ",", "keys", "[", "1", ":", "]", ")" ]
Get an entry from a dict of dicts by the list of keys to 'follow'
[ "Get", "an", "entry", "from", "a", "dict", "of", "dicts", "by", "the", "list", "of", "keys", "to", "follow" ]
25450a61d531c97615384fc5ef80e4eee97200ac
https://github.com/FriendCode/funky/blob/25450a61d531c97615384fc5ef80e4eee97200ac/funky/funky.py#L257-L263
train
sirfoga/pyhal
hal/internet/services/google/gauthenticator.py
GoogleApiOAuth.get_driver
def get_driver(self, name, version): """Authenticates and creates new API driver to perform scope stuff :param name: Name of driver :param version: Version of driver :return: driver """ user_credentials = self.get_user_credentials() # get credentials return discovery.build( name, version, http=self.authenticate(user_credentials) )
python
def get_driver(self, name, version): """Authenticates and creates new API driver to perform scope stuff :param name: Name of driver :param version: Version of driver :return: driver """ user_credentials = self.get_user_credentials() # get credentials return discovery.build( name, version, http=self.authenticate(user_credentials) )
[ "def", "get_driver", "(", "self", ",", "name", ",", "version", ")", ":", "user_credentials", "=", "self", ".", "get_user_credentials", "(", ")", "# get credentials", "return", "discovery", ".", "build", "(", "name", ",", "version", ",", "http", "=", "self", ".", "authenticate", "(", "user_credentials", ")", ")" ]
Authenticates and creates new API driver to perform scope stuff :param name: Name of driver :param version: Version of driver :return: driver
[ "Authenticates", "and", "creates", "new", "API", "driver", "to", "perform", "scope", "stuff" ]
4394d8a1f7e45bea28a255ec390f4962ee64d33a
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/internet/services/google/gauthenticator.py#L69-L80
train
portfors-lab/sparkle
sparkle/run/chart_runner.py
ChartRunner.start_chart
def start_chart(self): """Begin on-going chart style acqusition""" self.current_dataset_name = self.chart_name self.datafile.init_data(self.current_dataset_name, mode='continuous') self.chart_name = increment_title(self.chart_name) # stimulus tracker channel hard-coded at least chan for now self.player.start_continuous([self.aichan, u"PCI-6259/ai31"])
python
def start_chart(self): """Begin on-going chart style acqusition""" self.current_dataset_name = self.chart_name self.datafile.init_data(self.current_dataset_name, mode='continuous') self.chart_name = increment_title(self.chart_name) # stimulus tracker channel hard-coded at least chan for now self.player.start_continuous([self.aichan, u"PCI-6259/ai31"])
[ "def", "start_chart", "(", "self", ")", ":", "self", ".", "current_dataset_name", "=", "self", ".", "chart_name", "self", ".", "datafile", ".", "init_data", "(", "self", ".", "current_dataset_name", ",", "mode", "=", "'continuous'", ")", "self", ".", "chart_name", "=", "increment_title", "(", "self", ".", "chart_name", ")", "# stimulus tracker channel hard-coded at least chan for now", "self", ".", "player", ".", "start_continuous", "(", "[", "self", ".", "aichan", ",", "u\"PCI-6259/ai31\"", "]", ")" ]
Begin on-going chart style acqusition
[ "Begin", "on", "-", "going", "chart", "style", "acqusition" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/run/chart_runner.py#L18-L25
train
portfors-lab/sparkle
sparkle/tools/qtdoc.py
qtdoc_role
def qtdoc_role(name, rawtext, text, lineno, inliner, options={}, content=[]): """Links to a Qt class's doc Returns 2 part tuple containing list of nodes to insert into the document and a list of system messages. Both are allowed to be empty. :param name: The role name used in the document. :param rawtext: The entire markup snippet, with role. :param text: The text marked with the role. :param lineno: The line number where rawtext appears in the input. :param inliner: The inliner instance that called us. :param options: Directive options for customization. :param content: The directive content for customization. """ base = 'http://qt-project.org/doc/qt-4.8/' match = re.search('([^<]+)(<[^<>]+>)?', text) if match is None: raise ValueError label = match.group(1) if match.lastindex == 2: # remove the carots from second group clsmeth = match.group(2)[1:-1] # assumes single . separating a class and a method or property name cls, meth = clsmeth.split('.') ref = base + cls + '.html#' + meth else: ref = base + label.lower() + '.html' node = nodes.reference(rawtext, label, refuri=ref, **options) return [node], []
python
def qtdoc_role(name, rawtext, text, lineno, inliner, options={}, content=[]): """Links to a Qt class's doc Returns 2 part tuple containing list of nodes to insert into the document and a list of system messages. Both are allowed to be empty. :param name: The role name used in the document. :param rawtext: The entire markup snippet, with role. :param text: The text marked with the role. :param lineno: The line number where rawtext appears in the input. :param inliner: The inliner instance that called us. :param options: Directive options for customization. :param content: The directive content for customization. """ base = 'http://qt-project.org/doc/qt-4.8/' match = re.search('([^<]+)(<[^<>]+>)?', text) if match is None: raise ValueError label = match.group(1) if match.lastindex == 2: # remove the carots from second group clsmeth = match.group(2)[1:-1] # assumes single . separating a class and a method or property name cls, meth = clsmeth.split('.') ref = base + cls + '.html#' + meth else: ref = base + label.lower() + '.html' node = nodes.reference(rawtext, label, refuri=ref, **options) return [node], []
[ "def", "qtdoc_role", "(", "name", ",", "rawtext", ",", "text", ",", "lineno", ",", "inliner", ",", "options", "=", "{", "}", ",", "content", "=", "[", "]", ")", ":", "base", "=", "'http://qt-project.org/doc/qt-4.8/'", "match", "=", "re", ".", "search", "(", "'([^<]+)(<[^<>]+>)?'", ",", "text", ")", "if", "match", "is", "None", ":", "raise", "ValueError", "label", "=", "match", ".", "group", "(", "1", ")", "if", "match", ".", "lastindex", "==", "2", ":", "# remove the carots from second group", "clsmeth", "=", "match", ".", "group", "(", "2", ")", "[", "1", ":", "-", "1", "]", "# assumes single . separating a class and a method or property name", "cls", ",", "meth", "=", "clsmeth", ".", "split", "(", "'.'", ")", "ref", "=", "base", "+", "cls", "+", "'.html#'", "+", "meth", "else", ":", "ref", "=", "base", "+", "label", ".", "lower", "(", ")", "+", "'.html'", "node", "=", "nodes", ".", "reference", "(", "rawtext", ",", "label", ",", "refuri", "=", "ref", ",", "*", "*", "options", ")", "return", "[", "node", "]", ",", "[", "]" ]
Links to a Qt class's doc Returns 2 part tuple containing list of nodes to insert into the document and a list of system messages. Both are allowed to be empty. :param name: The role name used in the document. :param rawtext: The entire markup snippet, with role. :param text: The text marked with the role. :param lineno: The line number where rawtext appears in the input. :param inliner: The inliner instance that called us. :param options: Directive options for customization. :param content: The directive content for customization.
[ "Links", "to", "a", "Qt", "class", "s", "doc" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/tools/qtdoc.py#L9-L41
train
NoviceLive/intellicoder
intellicoder/transformers.py
update_func_body
def update_func_body(original, updater=None): """Update all function body using the updating function.""" updated = '' regex = r'([_\w][_\w\d]*)\s*\(.*\)\s*\{' match = re.search(regex, original) while match: name = match.group(1) logging.debug(_('Found candidate: %s'), name) start = match.end() end = start + find_balance_index(original[start:]) body = original[start:end] if updater: body = updater(body, name) updated += original[:start] + '\n' + body + original[end] original = original[end + 1:] match = re.search(regex, original) return updated
python
def update_func_body(original, updater=None): """Update all function body using the updating function.""" updated = '' regex = r'([_\w][_\w\d]*)\s*\(.*\)\s*\{' match = re.search(regex, original) while match: name = match.group(1) logging.debug(_('Found candidate: %s'), name) start = match.end() end = start + find_balance_index(original[start:]) body = original[start:end] if updater: body = updater(body, name) updated += original[:start] + '\n' + body + original[end] original = original[end + 1:] match = re.search(regex, original) return updated
[ "def", "update_func_body", "(", "original", ",", "updater", "=", "None", ")", ":", "updated", "=", "''", "regex", "=", "r'([_\\w][_\\w\\d]*)\\s*\\(.*\\)\\s*\\{'", "match", "=", "re", ".", "search", "(", "regex", ",", "original", ")", "while", "match", ":", "name", "=", "match", ".", "group", "(", "1", ")", "logging", ".", "debug", "(", "_", "(", "'Found candidate: %s'", ")", ",", "name", ")", "start", "=", "match", ".", "end", "(", ")", "end", "=", "start", "+", "find_balance_index", "(", "original", "[", "start", ":", "]", ")", "body", "=", "original", "[", "start", ":", "end", "]", "if", "updater", ":", "body", "=", "updater", "(", "body", ",", "name", ")", "updated", "+=", "original", "[", ":", "start", "]", "+", "'\\n'", "+", "body", "+", "original", "[", "end", "]", "original", "=", "original", "[", "end", "+", "1", ":", "]", "match", "=", "re", ".", "search", "(", "regex", ",", "original", ")", "return", "updated" ]
Update all function body using the updating function.
[ "Update", "all", "function", "body", "using", "the", "updating", "function", "." ]
6cac5ebfce65c370dbebe47756a1789b120ef982
https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/transformers.py#L272-L288
train
NoviceLive/intellicoder
intellicoder/transformers.py
find_balance_index
def find_balance_index(source, start='{', end='}'): """Get the first balance index.""" state = 1 for index, char in enumerate(source): if char == start: state += 1 elif char == end: state -= 1 if state == 0: return index raise RuntimeError('This should not happen: Balance Not Found')
python
def find_balance_index(source, start='{', end='}'): """Get the first balance index.""" state = 1 for index, char in enumerate(source): if char == start: state += 1 elif char == end: state -= 1 if state == 0: return index raise RuntimeError('This should not happen: Balance Not Found')
[ "def", "find_balance_index", "(", "source", ",", "start", "=", "'{'", ",", "end", "=", "'}'", ")", ":", "state", "=", "1", "for", "index", ",", "char", "in", "enumerate", "(", "source", ")", ":", "if", "char", "==", "start", ":", "state", "+=", "1", "elif", "char", "==", "end", ":", "state", "-=", "1", "if", "state", "==", "0", ":", "return", "index", "raise", "RuntimeError", "(", "'This should not happen: Balance Not Found'", ")" ]
Get the first balance index.
[ "Get", "the", "first", "balance", "index", "." ]
6cac5ebfce65c370dbebe47756a1789b120ef982
https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/transformers.py#L291-L301
train
NoviceLive/intellicoder
intellicoder/transformers.py
WindowsTransformer.transform_sources
def transform_sources(self, sources, with_string=False): """Get the defintions of needed strings and functions after replacement. """ modules = {} updater = partial( self.replace_source, modules=modules, prefix='string_') for filename in sources: updated = update_func_body(sources[filename], updater) sources[filename] = EXTERN_AND_SEG + updated logging.debug('modules: %s', modules) return sources, self.build_funcs(modules)
python
def transform_sources(self, sources, with_string=False): """Get the defintions of needed strings and functions after replacement. """ modules = {} updater = partial( self.replace_source, modules=modules, prefix='string_') for filename in sources: updated = update_func_body(sources[filename], updater) sources[filename] = EXTERN_AND_SEG + updated logging.debug('modules: %s', modules) return sources, self.build_funcs(modules)
[ "def", "transform_sources", "(", "self", ",", "sources", ",", "with_string", "=", "False", ")", ":", "modules", "=", "{", "}", "updater", "=", "partial", "(", "self", ".", "replace_source", ",", "modules", "=", "modules", ",", "prefix", "=", "'string_'", ")", "for", "filename", "in", "sources", ":", "updated", "=", "update_func_body", "(", "sources", "[", "filename", "]", ",", "updater", ")", "sources", "[", "filename", "]", "=", "EXTERN_AND_SEG", "+", "updated", "logging", ".", "debug", "(", "'modules: %s'", ",", "modules", ")", "return", "sources", ",", "self", ".", "build_funcs", "(", "modules", ")" ]
Get the defintions of needed strings and functions after replacement.
[ "Get", "the", "defintions", "of", "needed", "strings", "and", "functions", "after", "replacement", "." ]
6cac5ebfce65c370dbebe47756a1789b120ef982
https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/transformers.py#L80-L91
train
NoviceLive/intellicoder
intellicoder/transformers.py
WindowsTransformer.replace_source
def replace_source(self, source, name, modules, prefix): """Scan C source code for string literals as well as function calls and do replacement using the specified replacing function. Note that the regular expression currently used for strings is naive or quick and dirty. """ needs_windll = False def _func_replacer(match, modules, windll): matched = match.group(0) if matched in self.BLACKLIST: return matched module = self.database.query_func_module(matched) if module: try: modules[module[0]] += [module[1]] except KeyError: modules[module[0]] = [module[1]] if windll: return '{}->{}.{}'.format(windll, *module) return '{}->{}'.format(*module) return matched replacer = partial( _func_replacer, modules=modules, windll='windll') replaced = re.sub(r'[_\w][_\w\d]*(?=\s*\()', replacer, source) if source != replaced: needs_windll = True str_table = {} def _string_replacer(match): matched = match.group()[1:-1] try: number = str_table[matched] except KeyError: number = len(str_table) + 1 str_table.update({matched: number}) return '{}{}'.format(prefix, number) replaced = re.sub(r'".+?"', _string_replacer, replaced) strings, relocs = self.build_strings(str_table, prefix) strings = ''.join(strings).strip() windll32 = reloc_var('windll', 'reloc_delta', True, 'windll_t') if needs_windll: relocs += [windll32] if strings: strings = '\n' + strings if not needs_windll: relocs += [windll32] needs_windll = True windll64 = '' if needs_windll: windll64 = '{0} *{1} = &_{1};\n'.format('windll_t', 'windll') relocs = reloc_both(''.join(relocs), windll64) if name in ['main']: replaced = '\ninit();' + replaced return strings + relocs + replaced
python
def replace_source(self, source, name, modules, prefix): """Scan C source code for string literals as well as function calls and do replacement using the specified replacing function. Note that the regular expression currently used for strings is naive or quick and dirty. """ needs_windll = False def _func_replacer(match, modules, windll): matched = match.group(0) if matched in self.BLACKLIST: return matched module = self.database.query_func_module(matched) if module: try: modules[module[0]] += [module[1]] except KeyError: modules[module[0]] = [module[1]] if windll: return '{}->{}.{}'.format(windll, *module) return '{}->{}'.format(*module) return matched replacer = partial( _func_replacer, modules=modules, windll='windll') replaced = re.sub(r'[_\w][_\w\d]*(?=\s*\()', replacer, source) if source != replaced: needs_windll = True str_table = {} def _string_replacer(match): matched = match.group()[1:-1] try: number = str_table[matched] except KeyError: number = len(str_table) + 1 str_table.update({matched: number}) return '{}{}'.format(prefix, number) replaced = re.sub(r'".+?"', _string_replacer, replaced) strings, relocs = self.build_strings(str_table, prefix) strings = ''.join(strings).strip() windll32 = reloc_var('windll', 'reloc_delta', True, 'windll_t') if needs_windll: relocs += [windll32] if strings: strings = '\n' + strings if not needs_windll: relocs += [windll32] needs_windll = True windll64 = '' if needs_windll: windll64 = '{0} *{1} = &_{1};\n'.format('windll_t', 'windll') relocs = reloc_both(''.join(relocs), windll64) if name in ['main']: replaced = '\ninit();' + replaced return strings + relocs + replaced
[ "def", "replace_source", "(", "self", ",", "source", ",", "name", ",", "modules", ",", "prefix", ")", ":", "needs_windll", "=", "False", "def", "_func_replacer", "(", "match", ",", "modules", ",", "windll", ")", ":", "matched", "=", "match", ".", "group", "(", "0", ")", "if", "matched", "in", "self", ".", "BLACKLIST", ":", "return", "matched", "module", "=", "self", ".", "database", ".", "query_func_module", "(", "matched", ")", "if", "module", ":", "try", ":", "modules", "[", "module", "[", "0", "]", "]", "+=", "[", "module", "[", "1", "]", "]", "except", "KeyError", ":", "modules", "[", "module", "[", "0", "]", "]", "=", "[", "module", "[", "1", "]", "]", "if", "windll", ":", "return", "'{}->{}.{}'", ".", "format", "(", "windll", ",", "*", "module", ")", "return", "'{}->{}'", ".", "format", "(", "*", "module", ")", "return", "matched", "replacer", "=", "partial", "(", "_func_replacer", ",", "modules", "=", "modules", ",", "windll", "=", "'windll'", ")", "replaced", "=", "re", ".", "sub", "(", "r'[_\\w][_\\w\\d]*(?=\\s*\\()'", ",", "replacer", ",", "source", ")", "if", "source", "!=", "replaced", ":", "needs_windll", "=", "True", "str_table", "=", "{", "}", "def", "_string_replacer", "(", "match", ")", ":", "matched", "=", "match", ".", "group", "(", ")", "[", "1", ":", "-", "1", "]", "try", ":", "number", "=", "str_table", "[", "matched", "]", "except", "KeyError", ":", "number", "=", "len", "(", "str_table", ")", "+", "1", "str_table", ".", "update", "(", "{", "matched", ":", "number", "}", ")", "return", "'{}{}'", ".", "format", "(", "prefix", ",", "number", ")", "replaced", "=", "re", ".", "sub", "(", "r'\".+?\"'", ",", "_string_replacer", ",", "replaced", ")", "strings", ",", "relocs", "=", "self", ".", "build_strings", "(", "str_table", ",", "prefix", ")", "strings", "=", "''", ".", "join", "(", "strings", ")", ".", "strip", "(", ")", "windll32", "=", "reloc_var", "(", "'windll'", ",", "'reloc_delta'", ",", "True", ",", "'windll_t'", ")", "if", "needs_windll", ":", "relocs", "+=", "[", "windll32", "]", "if", "strings", ":", "strings", "=", "'\\n'", "+", "strings", "if", "not", "needs_windll", ":", "relocs", "+=", "[", "windll32", "]", "needs_windll", "=", "True", "windll64", "=", "''", "if", "needs_windll", ":", "windll64", "=", "'{0} *{1} = &_{1};\\n'", ".", "format", "(", "'windll_t'", ",", "'windll'", ")", "relocs", "=", "reloc_both", "(", "''", ".", "join", "(", "relocs", ")", ",", "windll64", ")", "if", "name", "in", "[", "'main'", "]", ":", "replaced", "=", "'\\ninit();'", "+", "replaced", "return", "strings", "+", "relocs", "+", "replaced" ]
Scan C source code for string literals as well as function calls and do replacement using the specified replacing function. Note that the regular expression currently used for strings is naive or quick and dirty.
[ "Scan", "C", "source", "code", "for", "string", "literals", "as", "well", "as", "function", "calls", "and", "do", "replacement", "using", "the", "specified", "replacing", "function", ".", "Note", "that", "the", "regular", "expression", "currently", "used", "for", "strings", "is", "naive", "or", "quick", "and", "dirty", "." ]
6cac5ebfce65c370dbebe47756a1789b120ef982
https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/transformers.py#L93-L155
train
NoviceLive/intellicoder
intellicoder/transformers.py
WindowsTransformer.build_funcs
def build_funcs(modules): """Build a used functions and modules list for later consumption. """ kernel32 = ['kernel32_'] try: kernel32 += remove_dups(modules['kernel32']) except KeyError: if len(modules) and 'LoadLibraryA' not in kernel32: kernel32.insert(1, 'LoadLibraryA') if len(modules) > 1 and 'LoadLibraryA' not in kernel32: kernel32.insert(1, 'LoadLibraryA') if 'GetProcAddress' not in kernel32: kernel32.insert(1, 'GetProcAddress') logging.debug('kernel32: %s', kernel32) for module, funcs in modules.items(): logging.debug('%s: %s', module, funcs) if module != 'kernel32': kernel32.extend([module + '_'] + remove_dups(funcs)) return kernel32
python
def build_funcs(modules): """Build a used functions and modules list for later consumption. """ kernel32 = ['kernel32_'] try: kernel32 += remove_dups(modules['kernel32']) except KeyError: if len(modules) and 'LoadLibraryA' not in kernel32: kernel32.insert(1, 'LoadLibraryA') if len(modules) > 1 and 'LoadLibraryA' not in kernel32: kernel32.insert(1, 'LoadLibraryA') if 'GetProcAddress' not in kernel32: kernel32.insert(1, 'GetProcAddress') logging.debug('kernel32: %s', kernel32) for module, funcs in modules.items(): logging.debug('%s: %s', module, funcs) if module != 'kernel32': kernel32.extend([module + '_'] + remove_dups(funcs)) return kernel32
[ "def", "build_funcs", "(", "modules", ")", ":", "kernel32", "=", "[", "'kernel32_'", "]", "try", ":", "kernel32", "+=", "remove_dups", "(", "modules", "[", "'kernel32'", "]", ")", "except", "KeyError", ":", "if", "len", "(", "modules", ")", "and", "'LoadLibraryA'", "not", "in", "kernel32", ":", "kernel32", ".", "insert", "(", "1", ",", "'LoadLibraryA'", ")", "if", "len", "(", "modules", ")", ">", "1", "and", "'LoadLibraryA'", "not", "in", "kernel32", ":", "kernel32", ".", "insert", "(", "1", ",", "'LoadLibraryA'", ")", "if", "'GetProcAddress'", "not", "in", "kernel32", ":", "kernel32", ".", "insert", "(", "1", ",", "'GetProcAddress'", ")", "logging", ".", "debug", "(", "'kernel32: %s'", ",", "kernel32", ")", "for", "module", ",", "funcs", "in", "modules", ".", "items", "(", ")", ":", "logging", ".", "debug", "(", "'%s: %s'", ",", "module", ",", "funcs", ")", "if", "module", "!=", "'kernel32'", ":", "kernel32", ".", "extend", "(", "[", "module", "+", "'_'", "]", "+", "remove_dups", "(", "funcs", ")", ")", "return", "kernel32" ]
Build a used functions and modules list for later consumption.
[ "Build", "a", "used", "functions", "and", "modules", "list", "for", "later", "consumption", "." ]
6cac5ebfce65c370dbebe47756a1789b120ef982
https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/transformers.py#L158-L177
train
NoviceLive/intellicoder
intellicoder/transformers.py
WindowsTransformer.build_strings
def build_strings(strings, prefix): """Construct string definitions according to the previously maintained table. """ strings = [ ( make_c_str(prefix + str(number), value), reloc_ptr( prefix + str(number), 'reloc_delta', 'char *' ) ) for value, number in sort_values(strings) ] return [i[0] for i in strings], [i[1] for i in strings]
python
def build_strings(strings, prefix): """Construct string definitions according to the previously maintained table. """ strings = [ ( make_c_str(prefix + str(number), value), reloc_ptr( prefix + str(number), 'reloc_delta', 'char *' ) ) for value, number in sort_values(strings) ] return [i[0] for i in strings], [i[1] for i in strings]
[ "def", "build_strings", "(", "strings", ",", "prefix", ")", ":", "strings", "=", "[", "(", "make_c_str", "(", "prefix", "+", "str", "(", "number", ")", ",", "value", ")", ",", "reloc_ptr", "(", "prefix", "+", "str", "(", "number", ")", ",", "'reloc_delta'", ",", "'char *'", ")", ")", "for", "value", ",", "number", "in", "sort_values", "(", "strings", ")", "]", "return", "[", "i", "[", "0", "]", "for", "i", "in", "strings", "]", ",", "[", "i", "[", "1", "]", "for", "i", "in", "strings", "]" ]
Construct string definitions according to the previously maintained table.
[ "Construct", "string", "definitions", "according", "to", "the", "previously", "maintained", "table", "." ]
6cac5ebfce65c370dbebe47756a1789b120ef982
https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/transformers.py#L180-L192
train
IS-ENES-Data/esgf-pid
esgfpid/assistant/publish.py
DatasetPublicationAssistant.add_file
def add_file(self, **args): ''' Adds a file's information to the set of files to be published in this dataset. :param file_name: Mandatory. The file name (string). This information will simply be included in the PID record, but not used for anything. :param file_handle: Mandatory. The handle (PID) of this file (string). It is included in the file's netcdf header. It must bear the prefix that this library (or rather, the consuming servlet that will consume this library's requests), has write access to. :param file_size: Mandatory. The file size (as string or integer. Will be transformed to integer). This information will be included in the handle record and used for consistency checks during republications of files with the same handle. :param checksum: Mandatory. The file's checksum. This information will be included in the handle record and used for consistency checks during republications of files with the same handle. :param checksum_type: Mandatory. The checksum type/method (string), e.g. "MD5" or "SHA256". This information will be included in the handle record and used for consistency checks during republications of files with the same handle. :param publish_path: Mandatory. The THREDDS publish path as a string. This is part of the URL for accessing the file, which will be part of the handle record. It will not be accessed, neither by the library nor by the consumer. The URL consists of the dataset's "data_node", the dataset's "thredds_service_path", and this "publish_path". Redundant slashes are removed. If the URL does not start with "http", "http://" is added. :param file_version: Mandatory. Any string. File versions are not managed in the PID. This information will simply be included in the PID record, but not used for any reasoning. ''' # Check if allowed: self.__check_if_adding_files_allowed_right_now() # Check if args ok: mandatory_args = ['file_name', 'file_handle', 'file_size', 'checksum', 'publish_path', 'checksum_type', 'file_version'] utils.check_presence_of_mandatory_args(args, mandatory_args) self.__enforce_integer_file_size(args) self.__enforce_string_file_version(args) # Add file: self.__check_and_correct_handle_syntax(args) self.__add_file(**args)
python
def add_file(self, **args): ''' Adds a file's information to the set of files to be published in this dataset. :param file_name: Mandatory. The file name (string). This information will simply be included in the PID record, but not used for anything. :param file_handle: Mandatory. The handle (PID) of this file (string). It is included in the file's netcdf header. It must bear the prefix that this library (or rather, the consuming servlet that will consume this library's requests), has write access to. :param file_size: Mandatory. The file size (as string or integer. Will be transformed to integer). This information will be included in the handle record and used for consistency checks during republications of files with the same handle. :param checksum: Mandatory. The file's checksum. This information will be included in the handle record and used for consistency checks during republications of files with the same handle. :param checksum_type: Mandatory. The checksum type/method (string), e.g. "MD5" or "SHA256". This information will be included in the handle record and used for consistency checks during republications of files with the same handle. :param publish_path: Mandatory. The THREDDS publish path as a string. This is part of the URL for accessing the file, which will be part of the handle record. It will not be accessed, neither by the library nor by the consumer. The URL consists of the dataset's "data_node", the dataset's "thredds_service_path", and this "publish_path". Redundant slashes are removed. If the URL does not start with "http", "http://" is added. :param file_version: Mandatory. Any string. File versions are not managed in the PID. This information will simply be included in the PID record, but not used for any reasoning. ''' # Check if allowed: self.__check_if_adding_files_allowed_right_now() # Check if args ok: mandatory_args = ['file_name', 'file_handle', 'file_size', 'checksum', 'publish_path', 'checksum_type', 'file_version'] utils.check_presence_of_mandatory_args(args, mandatory_args) self.__enforce_integer_file_size(args) self.__enforce_string_file_version(args) # Add file: self.__check_and_correct_handle_syntax(args) self.__add_file(**args)
[ "def", "add_file", "(", "self", ",", "*", "*", "args", ")", ":", "# Check if allowed:", "self", ".", "__check_if_adding_files_allowed_right_now", "(", ")", "# Check if args ok:", "mandatory_args", "=", "[", "'file_name'", ",", "'file_handle'", ",", "'file_size'", ",", "'checksum'", ",", "'publish_path'", ",", "'checksum_type'", ",", "'file_version'", "]", "utils", ".", "check_presence_of_mandatory_args", "(", "args", ",", "mandatory_args", ")", "self", ".", "__enforce_integer_file_size", "(", "args", ")", "self", ".", "__enforce_string_file_version", "(", "args", ")", "# Add file:", "self", ".", "__check_and_correct_handle_syntax", "(", "args", ")", "self", ".", "__add_file", "(", "*", "*", "args", ")" ]
Adds a file's information to the set of files to be published in this dataset. :param file_name: Mandatory. The file name (string). This information will simply be included in the PID record, but not used for anything. :param file_handle: Mandatory. The handle (PID) of this file (string). It is included in the file's netcdf header. It must bear the prefix that this library (or rather, the consuming servlet that will consume this library's requests), has write access to. :param file_size: Mandatory. The file size (as string or integer. Will be transformed to integer). This information will be included in the handle record and used for consistency checks during republications of files with the same handle. :param checksum: Mandatory. The file's checksum. This information will be included in the handle record and used for consistency checks during republications of files with the same handle. :param checksum_type: Mandatory. The checksum type/method (string), e.g. "MD5" or "SHA256". This information will be included in the handle record and used for consistency checks during republications of files with the same handle. :param publish_path: Mandatory. The THREDDS publish path as a string. This is part of the URL for accessing the file, which will be part of the handle record. It will not be accessed, neither by the library nor by the consumer. The URL consists of the dataset's "data_node", the dataset's "thredds_service_path", and this "publish_path". Redundant slashes are removed. If the URL does not start with "http", "http://" is added. :param file_version: Mandatory. Any string. File versions are not managed in the PID. This information will simply be included in the PID record, but not used for any reasoning.
[ "Adds", "a", "file", "s", "information", "to", "the", "set", "of", "files", "to", "be", "published", "in", "this", "dataset", "." ]
2f4909bb3ff79c0b6ed2932e0dd8b3bb6aec5e41
https://github.com/IS-ENES-Data/esgf-pid/blob/2f4909bb3ff79c0b6ed2932e0dd8b3bb6aec5e41/esgfpid/assistant/publish.py#L119-L177
train
NoviceLive/intellicoder
intellicoder/utils.py
run_program
def run_program(program, *args): """Wrap subprocess.check_output to make life easier.""" real_args = [program] real_args.extend(args) logging.debug(_('check_output arguments: %s'), real_args) check_output(real_args, universal_newlines=True)
python
def run_program(program, *args): """Wrap subprocess.check_output to make life easier.""" real_args = [program] real_args.extend(args) logging.debug(_('check_output arguments: %s'), real_args) check_output(real_args, universal_newlines=True)
[ "def", "run_program", "(", "program", ",", "*", "args", ")", ":", "real_args", "=", "[", "program", "]", "real_args", ".", "extend", "(", "args", ")", "logging", ".", "debug", "(", "_", "(", "'check_output arguments: %s'", ")", ",", "real_args", ")", "check_output", "(", "real_args", ",", "universal_newlines", "=", "True", ")" ]
Wrap subprocess.check_output to make life easier.
[ "Wrap", "subprocess", ".", "check_output", "to", "make", "life", "easier", "." ]
6cac5ebfce65c370dbebe47756a1789b120ef982
https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/utils.py#L57-L62
train
NoviceLive/intellicoder
intellicoder/utils.py
get_parent_dir
def get_parent_dir(name): """Get the parent directory of a filename.""" parent_dir = os.path.dirname(os.path.dirname(name)) if parent_dir: return parent_dir return os.path.abspath('.')
python
def get_parent_dir(name): """Get the parent directory of a filename.""" parent_dir = os.path.dirname(os.path.dirname(name)) if parent_dir: return parent_dir return os.path.abspath('.')
[ "def", "get_parent_dir", "(", "name", ")", ":", "parent_dir", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "dirname", "(", "name", ")", ")", "if", "parent_dir", ":", "return", "parent_dir", "return", "os", ".", "path", ".", "abspath", "(", "'.'", ")" ]
Get the parent directory of a filename.
[ "Get", "the", "parent", "directory", "of", "a", "filename", "." ]
6cac5ebfce65c370dbebe47756a1789b120ef982
https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/utils.py#L76-L81
train
NoviceLive/intellicoder
intellicoder/utils.py
split_ext
def split_ext(path, basename=True): """Wrap them to make life easier.""" if basename: path = os.path.basename(path) return os.path.splitext(path)
python
def split_ext(path, basename=True): """Wrap them to make life easier.""" if basename: path = os.path.basename(path) return os.path.splitext(path)
[ "def", "split_ext", "(", "path", ",", "basename", "=", "True", ")", ":", "if", "basename", ":", "path", "=", "os", ".", "path", ".", "basename", "(", "path", ")", "return", "os", ".", "path", ".", "splitext", "(", "path", ")" ]
Wrap them to make life easier.
[ "Wrap", "them", "to", "make", "life", "easier", "." ]
6cac5ebfce65c370dbebe47756a1789b120ef982
https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/utils.py#L105-L109
train
NoviceLive/intellicoder
intellicoder/utils.py
ad_hoc_magic_from_file
def ad_hoc_magic_from_file(filename, **kwargs): """Ad-hoc emulation of magic.from_file from python-magic.""" with open(filename, 'rb') as stream: head = stream.read(16) if head[:4] == b'\x7fELF': return b'application/x-executable' elif head[:2] == b'MZ': return b'application/x-dosexec' else: raise NotImplementedError()
python
def ad_hoc_magic_from_file(filename, **kwargs): """Ad-hoc emulation of magic.from_file from python-magic.""" with open(filename, 'rb') as stream: head = stream.read(16) if head[:4] == b'\x7fELF': return b'application/x-executable' elif head[:2] == b'MZ': return b'application/x-dosexec' else: raise NotImplementedError()
[ "def", "ad_hoc_magic_from_file", "(", "filename", ",", "*", "*", "kwargs", ")", ":", "with", "open", "(", "filename", ",", "'rb'", ")", "as", "stream", ":", "head", "=", "stream", ".", "read", "(", "16", ")", "if", "head", "[", ":", "4", "]", "==", "b'\\x7fELF'", ":", "return", "b'application/x-executable'", "elif", "head", "[", ":", "2", "]", "==", "b'MZ'", ":", "return", "b'application/x-dosexec'", "else", ":", "raise", "NotImplementedError", "(", ")" ]
Ad-hoc emulation of magic.from_file from python-magic.
[ "Ad", "-", "hoc", "emulation", "of", "magic", ".", "from_file", "from", "python", "-", "magic", "." ]
6cac5ebfce65c370dbebe47756a1789b120ef982
https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/utils.py#L113-L122
train
NoviceLive/intellicoder
intellicoder/utils.py
expand_path
def expand_path(*paths): """Expand the path with the directory of the executed file.""" return os.path.join( os.path.dirname(os.path.realpath(sys.argv[0])), *paths)
python
def expand_path(*paths): """Expand the path with the directory of the executed file.""" return os.path.join( os.path.dirname(os.path.realpath(sys.argv[0])), *paths)
[ "def", "expand_path", "(", "*", "paths", ")", ":", "return", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "realpath", "(", "sys", ".", "argv", "[", "0", "]", ")", ")", ",", "*", "paths", ")" ]
Expand the path with the directory of the executed file.
[ "Expand", "the", "path", "with", "the", "directory", "of", "the", "executed", "file", "." ]
6cac5ebfce65c370dbebe47756a1789b120ef982
https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/utils.py#L125-L128
train
NoviceLive/intellicoder
intellicoder/utils.py
translate_filenames
def translate_filenames(filenames): """Convert filenames from Linux to Windows.""" if is_windows(): return filenames for index, filename in enumerate(filenames): filenames[index] = vboxsf_to_windows(filename)
python
def translate_filenames(filenames): """Convert filenames from Linux to Windows.""" if is_windows(): return filenames for index, filename in enumerate(filenames): filenames[index] = vboxsf_to_windows(filename)
[ "def", "translate_filenames", "(", "filenames", ")", ":", "if", "is_windows", "(", ")", ":", "return", "filenames", "for", "index", ",", "filename", "in", "enumerate", "(", "filenames", ")", ":", "filenames", "[", "index", "]", "=", "vboxsf_to_windows", "(", "filename", ")" ]
Convert filenames from Linux to Windows.
[ "Convert", "filenames", "from", "Linux", "to", "Windows", "." ]
6cac5ebfce65c370dbebe47756a1789b120ef982
https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/utils.py#L166-L171
train
NoviceLive/intellicoder
intellicoder/utils.py
vboxsf_to_windows
def vboxsf_to_windows(filename, letter='f:'): """Convert the Linux path name to a Windows one.""" home = os.path.expanduser('~') filename = os.path.abspath(filename).replace(home, letter) return filename.replace('/', '\\')
python
def vboxsf_to_windows(filename, letter='f:'): """Convert the Linux path name to a Windows one.""" home = os.path.expanduser('~') filename = os.path.abspath(filename).replace(home, letter) return filename.replace('/', '\\')
[ "def", "vboxsf_to_windows", "(", "filename", ",", "letter", "=", "'f:'", ")", ":", "home", "=", "os", ".", "path", ".", "expanduser", "(", "'~'", ")", "filename", "=", "os", ".", "path", ".", "abspath", "(", "filename", ")", ".", "replace", "(", "home", ",", "letter", ")", "return", "filename", ".", "replace", "(", "'/'", ",", "'\\\\'", ")" ]
Convert the Linux path name to a Windows one.
[ "Convert", "the", "Linux", "path", "name", "to", "a", "Windows", "one", "." ]
6cac5ebfce65c370dbebe47756a1789b120ef982
https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/utils.py#L174-L178
train
NoviceLive/intellicoder
intellicoder/utils.py
read_files
def read_files(filenames, with_name=False): """Read many files.""" text = [read_file(filename) for filename in filenames] if with_name: return dict(zip(filenames, text)) return text
python
def read_files(filenames, with_name=False): """Read many files.""" text = [read_file(filename) for filename in filenames] if with_name: return dict(zip(filenames, text)) return text
[ "def", "read_files", "(", "filenames", ",", "with_name", "=", "False", ")", ":", "text", "=", "[", "read_file", "(", "filename", ")", "for", "filename", "in", "filenames", "]", "if", "with_name", ":", "return", "dict", "(", "zip", "(", "filenames", ",", "text", ")", ")", "return", "text" ]
Read many files.
[ "Read", "many", "files", "." ]
6cac5ebfce65c370dbebe47756a1789b120ef982
https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/utils.py#L181-L186
train
NoviceLive/intellicoder
intellicoder/utils.py
write_files
def write_files(text, where='.'): """Write many files.""" for filename in text: target = os.path.join(where, filename) write_file(target, text[filename])
python
def write_files(text, where='.'): """Write many files.""" for filename in text: target = os.path.join(where, filename) write_file(target, text[filename])
[ "def", "write_files", "(", "text", ",", "where", "=", "'.'", ")", ":", "for", "filename", "in", "text", ":", "target", "=", "os", ".", "path", ".", "join", "(", "where", ",", "filename", ")", "write_file", "(", "target", ",", "text", "[", "filename", "]", ")" ]
Write many files.
[ "Write", "many", "files", "." ]
6cac5ebfce65c370dbebe47756a1789b120ef982
https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/utils.py#L189-L193
train
NoviceLive/intellicoder
intellicoder/utils.py
write_file
def write_file(filename, text): """Write text to a file.""" logging.debug(_('Writing file: %s'), filename) try: with open(filename, 'w') as writable: writable.write(text) except (PermissionError, NotADirectoryError): logging.error(_('Error writing file: %s'), filename) return False return True
python
def write_file(filename, text): """Write text to a file.""" logging.debug(_('Writing file: %s'), filename) try: with open(filename, 'w') as writable: writable.write(text) except (PermissionError, NotADirectoryError): logging.error(_('Error writing file: %s'), filename) return False return True
[ "def", "write_file", "(", "filename", ",", "text", ")", ":", "logging", ".", "debug", "(", "_", "(", "'Writing file: %s'", ")", ",", "filename", ")", "try", ":", "with", "open", "(", "filename", ",", "'w'", ")", "as", "writable", ":", "writable", ".", "write", "(", "text", ")", "except", "(", "PermissionError", ",", "NotADirectoryError", ")", ":", "logging", ".", "error", "(", "_", "(", "'Error writing file: %s'", ")", ",", "filename", ")", "return", "False", "return", "True" ]
Write text to a file.
[ "Write", "text", "to", "a", "file", "." ]
6cac5ebfce65c370dbebe47756a1789b120ef982
https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/utils.py#L196-L205
train
NoviceLive/intellicoder
intellicoder/utils.py
stylify_files
def stylify_files(text): """Stylify many files.""" for filename in text: text[filename] = stylify_code(text[filename]) return text
python
def stylify_files(text): """Stylify many files.""" for filename in text: text[filename] = stylify_code(text[filename]) return text
[ "def", "stylify_files", "(", "text", ")", ":", "for", "filename", "in", "text", ":", "text", "[", "filename", "]", "=", "stylify_code", "(", "text", "[", "filename", "]", ")", "return", "text" ]
Stylify many files.
[ "Stylify", "many", "files", "." ]
6cac5ebfce65c370dbebe47756a1789b120ef982
https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/utils.py#L208-L212
train
NoviceLive/intellicoder
intellicoder/utils.py
stylify_code
def stylify_code(code): """Stylify the C source code using astyle.""" try: output = check_output( ['astyle', '--max-code-length=69', '--indent=spaces=2'], universal_newlines=True, input=code ) except (OSError, CalledProcessError, TypeError): logging.debug(_('failed to stylify code')) return code return output
python
def stylify_code(code): """Stylify the C source code using astyle.""" try: output = check_output( ['astyle', '--max-code-length=69', '--indent=spaces=2'], universal_newlines=True, input=code ) except (OSError, CalledProcessError, TypeError): logging.debug(_('failed to stylify code')) return code return output
[ "def", "stylify_code", "(", "code", ")", ":", "try", ":", "output", "=", "check_output", "(", "[", "'astyle'", ",", "'--max-code-length=69'", ",", "'--indent=spaces=2'", "]", ",", "universal_newlines", "=", "True", ",", "input", "=", "code", ")", "except", "(", "OSError", ",", "CalledProcessError", ",", "TypeError", ")", ":", "logging", ".", "debug", "(", "_", "(", "'failed to stylify code'", ")", ")", "return", "code", "return", "output" ]
Stylify the C source code using astyle.
[ "Stylify", "the", "C", "source", "code", "using", "astyle", "." ]
6cac5ebfce65c370dbebe47756a1789b120ef982
https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/utils.py#L215-L225
train
NoviceLive/intellicoder
intellicoder/utils.py
sort_item
def sort_item(iterable, number, reverse=False): """Sort the itertable according to the given number item.""" return sorted(iterable, key=itemgetter(number), reverse=reverse)
python
def sort_item(iterable, number, reverse=False): """Sort the itertable according to the given number item.""" return sorted(iterable, key=itemgetter(number), reverse=reverse)
[ "def", "sort_item", "(", "iterable", ",", "number", ",", "reverse", "=", "False", ")", ":", "return", "sorted", "(", "iterable", ",", "key", "=", "itemgetter", "(", "number", ")", ",", "reverse", "=", "reverse", ")" ]
Sort the itertable according to the given number item.
[ "Sort", "the", "itertable", "according", "to", "the", "given", "number", "item", "." ]
6cac5ebfce65c370dbebe47756a1789b120ef982
https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/utils.py#L233-L235
train
NoviceLive/intellicoder
intellicoder/utils.py
remove_by
def remove_by(keys, original): """Remove items in a list according to another list.""" for i in [ original[index] for index, needed in enumerate(keys) if not needed ]: original.remove(i)
python
def remove_by(keys, original): """Remove items in a list according to another list.""" for i in [ original[index] for index, needed in enumerate(keys) if not needed ]: original.remove(i)
[ "def", "remove_by", "(", "keys", ",", "original", ")", ":", "for", "i", "in", "[", "original", "[", "index", "]", "for", "index", ",", "needed", "in", "enumerate", "(", "keys", ")", "if", "not", "needed", "]", ":", "original", ".", "remove", "(", "i", ")" ]
Remove items in a list according to another list.
[ "Remove", "items", "in", "a", "list", "according", "to", "another", "list", "." ]
6cac5ebfce65c370dbebe47756a1789b120ef982
https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/utils.py#L262-L268
train
NoviceLive/intellicoder
intellicoder/utils.py
group_by
def group_by(iterable, key_func): """Wrap itertools.groupby to make life easier.""" groups = ( list(sub) for key, sub in groupby(iterable, key_func) ) return zip(groups, groups)
python
def group_by(iterable, key_func): """Wrap itertools.groupby to make life easier.""" groups = ( list(sub) for key, sub in groupby(iterable, key_func) ) return zip(groups, groups)
[ "def", "group_by", "(", "iterable", ",", "key_func", ")", ":", "groups", "=", "(", "list", "(", "sub", ")", "for", "key", ",", "sub", "in", "groupby", "(", "iterable", ",", "key_func", ")", ")", "return", "zip", "(", "groups", ",", "groups", ")" ]
Wrap itertools.groupby to make life easier.
[ "Wrap", "itertools", ".", "groupby", "to", "make", "life", "easier", "." ]
6cac5ebfce65c370dbebe47756a1789b120ef982
https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/utils.py#L271-L276
train